From f7b716f95c38dafa8e62a656f6eb1d2923338c5c Mon Sep 17 00:00:00 2001 From: Content Bot Date: Sun, 23 Mar 2025 12:52:33 +0000 Subject: [PATCH 01/18] ServiceNow: Apply ruff Format --- .../ServiceNowEventCollector.py | 43 +- .../ServiceNowEventCollector_test.py | 156 +- .../ServiceNow_CMDB/ServiceNow_CMDB.py | 459 +-- .../ServiceNow_CMDB/ServiceNow_CMDB_test.py | 85 +- .../test_data/response_constants.py | 188 +- .../test_data/result_constants.py | 201 +- .../ServiceNow_IAM/ServiceNow_IAM.py | 269 +- .../ServiceNow_IAM/ServiceNow_IAM_test.py | 275 +- .../Integrations/ServiceNowv2/ServiceNowv2.py | 2726 +++++++------- .../ServiceNowv2/ServiceNowv2_test.py | 3097 ++++++++++------ .../test_data/created_ticket_context.py | 71 +- .../test_data/response_constants.py | 3164 ++++++++--------- .../test_data/result_constants.py | 906 +++-- Packs/ServiceNow/ReleaseNotes/2_7_9.md | 36 + .../ServiceNowAddComment.py | 56 +- .../ServiceNowAddComment_test.py | 39 +- .../ServiceNowCreateIncident.py | 122 +- .../ServiceNowCreateIncident_test.py | 57 +- .../ServiceNowIncidentStatus.py | 46 +- .../ServiceNowQueryIncident.py | 109 +- .../ServiceNowQueryIncident_test.py | 62 +- .../ServiceNowTroubleshoot.py | 78 +- .../ServiceNowTroubleshoot_test.py | 145 +- .../ServiceNowUpdateIncident.py | 142 +- .../ServiceNowUpdateIncident_test.py | 81 +- Packs/ServiceNow/pack_metadata.json | 2 +- 26 files changed, 6989 insertions(+), 5626 deletions(-) create mode 100644 Packs/ServiceNow/ReleaseNotes/2_7_9.md diff --git a/Packs/ServiceNow/Integrations/ServiceNowEventCollector/ServiceNowEventCollector.py b/Packs/ServiceNow/Integrations/ServiceNowEventCollector/ServiceNowEventCollector.py index 81949a329a48..2a5e026ffdce 100644 --- a/Packs/ServiceNow/Integrations/ServiceNowEventCollector/ServiceNowEventCollector.py +++ b/Packs/ServiceNow/Integrations/ServiceNowEventCollector/ServiceNowEventCollector.py @@ -1,7 +1,6 @@ import demistomock as demisto -from CommonServerPython import * # noqa # pylint: disable=unused-wildcard-import - import urllib3 +from CommonServerPython import * # noqa # pylint: disable=unused-wildcard-import # Disable insecure warnings urllib3.disable_warnings() @@ -19,8 +18,19 @@ class Client: - def __init__(self, use_oauth, credentials, client_id, client_secret, url, verify, proxy, api_server_url, fetch_limit_audit, - fetch_limit_syslog): + def __init__( + self, + use_oauth, + credentials, + client_id, + client_secret, + url, + verify, + proxy, + api_server_url, + fetch_limit_audit, + fetch_limit_syslog, + ): self.sn_client = ServiceNowClient( credentials=credentials, use_oauth=use_oauth, @@ -72,14 +82,15 @@ def handle_log_types(event_types_to_fetch: list) -> list: If an event type title is not found, an exception is raised. """ log_types = [] - VALID_EVENT_TITLES = ['Audit', 'Syslog Transactions'] - titles_to_types = {'Audit': AUDIT, 'Syslog Transactions': SYSLOG_TRANSACTIONS} + VALID_EVENT_TITLES = ["Audit", "Syslog Transactions"] + titles_to_types = {"Audit": AUDIT, "Syslog Transactions": SYSLOG_TRANSACTIONS} for type_title in event_types_to_fetch: if log_type := titles_to_types.get(type_title): log_types.append(log_type) else: raise DemistoException( - f"'{type_title}' is not valid event type, please select from the following list: {VALID_EVENT_TITLES}") + f"'{type_title}' is not valid event type, please select from the following list: {VALID_EVENT_TITLES}" + ) return log_types @@ -207,7 +218,7 @@ def get_events_command(client: Client, args: dict, log_type: str, last_run: dict Returns: Sign on logs from Workday. """ - types_to_titles = {AUDIT: 'Audit', SYSLOG_TRANSACTIONS: 'Syslog Transactions'} + types_to_titles = {AUDIT: "Audit", SYSLOG_TRANSACTIONS: "Syslog Transactions"} all_events = [] if arg_from := args.get("from_date"): from_date = arg_from @@ -219,8 +230,12 @@ def get_events_command(client: Client, args: dict, log_type: str, last_run: dict logs = client.search_events(from_time=from_date, log_type=log_type, limit=limit, offset=offset) add_time_field(logs, log_type) demisto.debug(f"Got a total of {len(logs)} {log_type} events created after {from_date}") - hr = tableToMarkdown(name=f'{types_to_titles[log_type]} Events', t=logs, removeNull=True, - headerTransform=lambda x: string_to_table_header(camel_case_to_underscore(x))) + hr = tableToMarkdown( + name=f"{types_to_titles[log_type]} Events", + t=logs, + removeNull=True, + headerTransform=lambda x: string_to_table_header(camel_case_to_underscore(x)), + ) all_events.extend(logs) return all_events, CommandResults(readable_output=hr) @@ -296,7 +311,7 @@ def main() -> None: # pragma: no cover password = credentials.get("password") max_fetch_audit = arg_to_number(params.get("max_fetch")) or 10000 max_fetch_syslog = arg_to_number(params.get("max_fetch_syslog_transactions")) or 10000 - event_types_to_fetch = argToList(params.get('event_types_to_fetch', ['Audit'])) + event_types_to_fetch = argToList(params.get("event_types_to_fetch", ["Audit"])) log_types = handle_log_types(event_types_to_fetch) version = params.get("api_version") @@ -320,7 +335,7 @@ def main() -> None: # pragma: no cover proxy=proxy, api_server_url=api_server_url, fetch_limit_audit=max_fetch_audit, - fetch_limit_syslog=max_fetch_syslog + fetch_limit_syslog=max_fetch_syslog, ) last_run = demisto.getLastRun() if client.sn_client.use_oauth and not get_integration_context().get("refresh_token", None): @@ -354,8 +369,8 @@ def main() -> None: # pragma: no cover # Log exceptions and return errors except Exception as e: - demisto.info(f"here {str(e)}") - return_error(f"Failed to execute {demisto.command()} command.\nError:\n{str(e)}") + demisto.info(f"here {e!s}") + return_error(f"Failed to execute {demisto.command()} command.\nError:\n{e!s}") from ServiceNowApiModule import * # noqa: E402 diff --git a/Packs/ServiceNow/Integrations/ServiceNowEventCollector/ServiceNowEventCollector_test.py b/Packs/ServiceNow/Integrations/ServiceNowEventCollector/ServiceNowEventCollector_test.py index ef1c0f0cfa6a..e4df106e390d 100644 --- a/Packs/ServiceNow/Integrations/ServiceNowEventCollector/ServiceNowEventCollector_test.py +++ b/Packs/ServiceNow/Integrations/ServiceNowEventCollector/ServiceNowEventCollector_test.py @@ -1,12 +1,26 @@ import json from datetime import datetime, timedelta -import ServiceNowEventCollector + import pytest -from ServiceNowEventCollector import ( - Client, LOGS_DATE_FORMAT, get_events_command, fetch_events_command, process_and_filter_events, get_limit, - SYSLOG_TRANSACTIONS, AUDIT, add_time_field, DATE_FORMAT, initialize_from_date, update_last_run, handle_log_types, - LAST_FETCH_TIME, PREVIOUS_RUN_IDS) +import ServiceNowEventCollector from CommonServerPython import DemistoException +from ServiceNowEventCollector import ( + AUDIT, + DATE_FORMAT, + LAST_FETCH_TIME, + LOGS_DATE_FORMAT, + PREVIOUS_RUN_IDS, + SYSLOG_TRANSACTIONS, + Client, + add_time_field, + fetch_events_command, + get_events_command, + get_limit, + handle_log_types, + initialize_from_date, + process_and_filter_events, + update_last_run, +) def util_load_json(path): @@ -28,7 +42,7 @@ def create_client(self): proxy=False, api_server_url=f"{self.base_url}/api/now", fetch_limit_audit=10, - fetch_limit_syslog=10 + fetch_limit_syslog=10, ) @staticmethod @@ -84,11 +98,7 @@ def test_get_events_command_standard(self, mocker): log_type = AUDIT mock_logs = [{"event_id": 1, "timestamp": "2023-01-01 01:00:00"}] - http_responses = mocker.patch.object( - Client, - "search_events", - return_value=mock_logs - ) + http_responses = mocker.patch.object(Client, "search_events", return_value=mock_logs) mocker.patch("ServiceNowEventCollector.add_time_field", return_value="") all_events, command_results = get_events_command(self.client, args, log_type, last_run) @@ -167,16 +177,11 @@ def test_get_events_command_with_last_run(self, mocker): log_type = AUDIT mock_logs = [{"event_id": 2, "timestamp": "2023-01-01 02:00:00"}] - http_responses = mocker.patch.object( - Client, - "search_events", - return_value=mock_logs - ) + http_responses = mocker.patch.object(Client, "search_events", return_value=mock_logs) mocker.patch("ServiceNowEventCollector.add_time_field", return_value="") mock_initialize_from_date = mocker.patch( - "ServiceNowEventCollector.initialize_from_date", - wraps=ServiceNowEventCollector.initialize_from_date + "ServiceNowEventCollector.initialize_from_date", wraps=ServiceNowEventCollector.initialize_from_date ) all_events, command_results = get_events_command(self.client, args, log_type, last_run) @@ -211,15 +216,19 @@ def test_fetch_events_command_standard(self, mocker): mocker.patch("ServiceNowEventCollector.initialize_from_date", return_value="2023-01-01T00:00:00Z") mocker.patch.object(self.client, "search_events", return_value=mock_events) mock_process_and_filter = mocker.patch( - "ServiceNowEventCollector.process_and_filter_events", return_value=(mock_events, {"1"})) - mocker.patch("ServiceNowEventCollector.update_last_run", return_value={ - "audit": {"previous_run_ids": ["1"], "last_fetch_time": "2023-01-01 01:00:00"}}) + "ServiceNowEventCollector.process_and_filter_events", return_value=(mock_events, {"1"}) + ) + mocker.patch( + "ServiceNowEventCollector.update_last_run", + return_value={"audit": {"previous_run_ids": ["1"], "last_fetch_time": "2023-01-01 01:00:00"}}, + ) collected_events, updated_last_run = fetch_events_command(self.client, last_run, log_types) assert collected_events == mock_events mock_process_and_filter.assert_called_once_with( - events=mock_events, previous_run_ids=set(), from_date="2023-01-01T00:00:00Z", log_type="audit") + events=mock_events, previous_run_ids=set(), from_date="2023-01-01T00:00:00Z", log_type="audit" + ) assert updated_last_run["audit"]["last_fetch_time"] == "2023-01-01 01:00:00" def test_fetch_events_command_no_new_events(self, mocker): @@ -263,11 +272,14 @@ def test_fetch_events_command_multiple_log_types(self, mocker): mock_audit_events = [{"event_id": 1, "sys_created_on": "2023-01-01 01:00:00"}] mock_syslog_events = [{"event_id": 2, "sys_created_on": "2023-01-01T02:00:00Z"}] - mocker.patch("ServiceNowEventCollector.initialize_from_date", - side_effect=["2023-01-01T00:00:00Z", "2023-01-01T00:00:00Z"]) + mocker.patch( + "ServiceNowEventCollector.initialize_from_date", side_effect=["2023-01-01T00:00:00Z", "2023-01-01T00:00:00Z"] + ) mocker.patch.object(self.client, "search_events", side_effect=[mock_audit_events, mock_syslog_events]) - mocker.patch("ServiceNowEventCollector.process_and_filter_events", - side_effect=[(mock_audit_events, {"1"}), (mock_syslog_events, {"2"})]) + mocker.patch( + "ServiceNowEventCollector.process_and_filter_events", + side_effect=[(mock_audit_events, {"1"}), (mock_syslog_events, {"2"})], + ) collected_events, updated_last_run = fetch_events_command(self.client, last_run, log_types) @@ -306,10 +318,7 @@ def test_process_and_filter_events_standard_case(): - Validates that all events are added to unique_events, and previous_run_ids are updated. """ - events = [ - {"sys_id": "1", "sys_created_on": "2023-01-01 01:00:00"}, - {"sys_id": "2", "sys_created_on": "2023-01-01 02:00:00"} - ] + events = [{"sys_id": "1", "sys_created_on": "2023-01-01 01:00:00"}, {"sys_id": "2", "sys_created_on": "2023-01-01 02:00:00"}] from_date = "2023-01-01 00:00:00" log_type = "audit" @@ -331,10 +340,7 @@ def test_process_and_filter_events_duplicate_event(): Then: - Validates that duplicate events are excluded from unique_events and not added to previous_run_ids. """ - events = [ - {"sys_id": "1", "sys_created_on": "2023-01-01 01:00:00"}, - {"sys_id": "1", "sys_created_on": "2023-01-01 01:30:00"} - ] + events = [{"sys_id": "1", "sys_created_on": "2023-01-01 01:00:00"}, {"sys_id": "1", "sys_created_on": "2023-01-01 01:30:00"}] previous_run_ids = {"1"} from_date = "2023-01-01 00:00:00" log_type = "audit" @@ -356,10 +362,7 @@ def test_process_and_filter_events_with_same_time(): Then: - Validates that all events are added to previous_run_ids, but only one copy is in unique_events. """ - events = [ - {"sys_id": "1", "sys_created_on": "2023-01-01 01:00:00"}, - {"sys_id": "2", "sys_created_on": "2023-01-01 01:00:00"} - ] + events = [{"sys_id": "1", "sys_created_on": "2023-01-01 01:00:00"}, {"sys_id": "2", "sys_created_on": "2023-01-01 01:00:00"}] from_date = "2023-01-01 01:00:00" log_type = "audit" @@ -380,10 +383,7 @@ def test_process_and_filter_events_after_from_date(): Then: - Validates that all events are added to unique_events and previous_run_ids is reset after finding new events. """ - events = [ - {"sys_id": "3", "sys_created_on": "2023-01-01 02:00:00"}, - {"sys_id": "4", "sys_created_on": "2023-01-01 02:00:00"} - ] + events = [{"sys_id": "3", "sys_created_on": "2023-01-01 02:00:00"}, {"sys_id": "4", "sys_created_on": "2023-01-01 02:00:00"}] from_date = "2023-01-01 01:00:00" log_type = "audit" @@ -426,10 +426,7 @@ def test_process_and_filter_events_log_type_assignment(): Then: - Validates that each event has the correct 'source_log_type' value. """ - events = [ - {"sys_id": "5", "sys_created_on": "2023-01-01 02:00:00"}, - {"sys_id": "6", "sys_created_on": "2023-01-01 03:00:00"} - ] + events = [{"sys_id": "5", "sys_created_on": "2023-01-01 02:00:00"}, {"sys_id": "6", "sys_created_on": "2023-01-01 03:00:00"}] from_date = "2023-01-01 00:00:00" log_type = "audit" @@ -449,9 +446,7 @@ def test_process_and_filter_events_handles_event_time_formatting(): Then: - Validates that each event has a correctly formatted '_time' field. """ - events = [ - {"sys_id": "7", "sys_created_on": "2023-01-01 02:00:00"} - ] + events = [{"sys_id": "7", "sys_created_on": "2023-01-01 02:00:00"}] from_date = "2023-01-01 00:00:00" log_type = "audit" expected_time_format = "2023-01-01T02:00:00Z" @@ -484,7 +479,7 @@ def test_get_limit_with_args(): proxy=False, api_server_url="https://test.com/api/now", fetch_limit_audit=300, - fetch_limit_syslog=400 + fetch_limit_syslog=400, ) limit = get_limit(args, client) @@ -517,7 +512,7 @@ def test_get_limit_with_client_default(): proxy=False, api_server_url="https://test.com/api/now", fetch_limit_audit=300, - fetch_limit_syslog=400 + fetch_limit_syslog=400, ) limit = get_limit(args, client) @@ -549,7 +544,7 @@ def test_get_limit_with_no_args_or_client_default(): proxy=False, api_server_url="https://test.com/api/now", fetch_limit_audit=None, - fetch_limit_syslog=400 + fetch_limit_syslog=400, ) limit = get_limit(args, client) @@ -568,10 +563,7 @@ def test_add_time_field_standard_case(): - Ensures each event has a correctly formatted '_time' field. - Ensures each event has the specified 'source_log_type'. """ - events = [ - {"sys_created_on": "2023-01-01 12:00:00", "sys_id": "1"}, - {"sys_created_on": "2023-01-02 15:30:00", "sys_id": "2"} - ] + events = [{"sys_created_on": "2023-01-01 12:00:00", "sys_id": "1"}, {"sys_created_on": "2023-01-02 15:30:00", "sys_id": "2"}] log_type = "audit" result = add_time_field(events, log_type) @@ -633,7 +625,7 @@ def test_add_time_field_partial_valid_dates(): """ events = [ {"sys_created_on": "2023-01-01T12:00:00Z", "sys_id": "1"}, - {"sys_created_on": "2023/01/02 15:30:00", "sys_id": "2"} # incorrect format + {"sys_created_on": "2023/01/02 15:30:00", "sys_id": "2"}, # incorrect format ] log_type = "audit" @@ -652,9 +644,7 @@ def test_add_time_field_no_sys_created_on_field(): Then: - Expects a KeyError as 'sys_created_on' is missing in the event. """ - events = [ - {"sys_id": "1"} - ] + events = [{"sys_id": "1"}] log_type = "audit" with pytest.raises(KeyError): @@ -672,10 +662,7 @@ def test_initialize_from_date_with_existing_timestamp(): Then: - Returns the existing last_fetch_time for the log_type. """ - last_run = { - "last_fetch_time": "2023-01-01T00:00:00Z", - "last_fetch_time_syslog": "2023-01-02T00:00:00Z" - } + last_run = {"last_fetch_time": "2023-01-01T00:00:00Z", "last_fetch_time_syslog": "2023-01-02T00:00:00Z"} log_type = "audit" result = initialize_from_date(last_run, log_type) @@ -699,8 +686,9 @@ def test_initialize_from_date_without_existing_timestamp(): result = initialize_from_date(last_run, log_type) expected_time = (datetime.utcnow() - timedelta(minutes=1)).strftime(LOGS_DATE_FORMAT) - assert abs(datetime.strptime(result, LOGS_DATE_FORMAT) - - datetime.strptime(expected_time, LOGS_DATE_FORMAT)) < timedelta(seconds=5) + assert abs(datetime.strptime(result, LOGS_DATE_FORMAT) - datetime.strptime(expected_time, LOGS_DATE_FORMAT)) < timedelta( + seconds=5 + ) def test_initialize_from_date_with_different_log_type(): @@ -714,15 +702,14 @@ def test_initialize_from_date_with_different_log_type(): Then: - Returns a default timestamp set to one minute before the current UTC time. """ - last_run = { - "syslog transactions": {"last_fetch_time": "2023-01-02T00:00:00Z"} - } + last_run = {"syslog transactions": {"last_fetch_time": "2023-01-02T00:00:00Z"}} log_type = "audit" result = initialize_from_date(last_run, log_type) expected_time = (datetime.utcnow() - timedelta(minutes=1)).strftime(LOGS_DATE_FORMAT) - assert abs(datetime.strptime(result, LOGS_DATE_FORMAT) - - datetime.strptime(expected_time, LOGS_DATE_FORMAT)) < timedelta(seconds=5) + assert abs(datetime.strptime(result, LOGS_DATE_FORMAT) - datetime.strptime(expected_time, LOGS_DATE_FORMAT)) < timedelta( + seconds=5 + ) def test_initialize_from_date_missing_last_fetch_key(): @@ -736,16 +723,15 @@ def test_initialize_from_date_missing_last_fetch_key(): Then: - Returns a default timestamp set to one minute before the current UTC time. """ - last_run = { - "audit": {"some_other_field": "some_value"} - } + last_run = {"audit": {"some_other_field": "some_value"}} log_type = "audit" result = initialize_from_date(last_run, log_type) expected_time = (datetime.utcnow() - timedelta(minutes=1)).strftime(LOGS_DATE_FORMAT) - assert abs(datetime.strptime(result, LOGS_DATE_FORMAT) - - datetime.strptime(expected_time, LOGS_DATE_FORMAT)) < timedelta(seconds=5) + assert abs(datetime.strptime(result, LOGS_DATE_FORMAT) - datetime.strptime(expected_time, LOGS_DATE_FORMAT)) < timedelta( + seconds=5 + ) def test_update_existing_log_type(): @@ -759,9 +745,7 @@ def test_update_existing_log_type(): Then: - Updates the existing log type entry with new last fetch time and previous run IDs. """ - last_run = { - "last_fetch_time": "2023-01-01T00:00:00Z", "previous_run_ids": ["id1", "id2"] - } + last_run = {"last_fetch_time": "2023-01-01T00:00:00Z", "previous_run_ids": ["id1", "id2"]} log_type = "audit" last_event_time = "2023-01-02T00:00:00Z" previous_run_ids = ["id3", "id4"] @@ -783,9 +767,7 @@ def test_update_new_log_type(): Then: - Adds the new log type entry with the specified last fetch time and previous run IDs. """ - last_run = { - "last_fetch_time": "2023-01-01T00:00:00Z", "previous_run_ids": ["id1", "id2"] - } + last_run = {"last_fetch_time": "2023-01-01T00:00:00Z", "previous_run_ids": ["id1", "id2"]} log_type = "syslog transactions" last_event_time = "2023-01-02T00:00:00Z" previous_run_ids = ["id5", "id6"] @@ -808,9 +790,7 @@ def test_update_empty_previous_run_ids(): Then: - Updates the log type entry in last_run with an empty previous_run_ids list. """ - last_run = { - "last_fetch_time": "2023-01-01T00:00:00Z", "previous_run_ids": ["id1", "id2"] - } + last_run = {"last_fetch_time": "2023-01-01T00:00:00Z", "previous_run_ids": ["id1", "id2"]} log_type = "audit" last_event_time = "2023-01-02T00:00:00Z" previous_run_ids = [] @@ -855,8 +835,10 @@ def test_update_multiple_log_types(): - Correctly updates each log type entry with its respective last fetch time and previous run IDs. """ last_run = { - "last_fetch_time": "2023-01-01T00:00:00Z", "previous_run_ids": ["id1", "id2"], - "last_fetch_time_syslog": "2023-01-01T00:00:00Z", "previous_run_ids_syslog": ["id3", "id4"] + "last_fetch_time": "2023-01-01T00:00:00Z", + "previous_run_ids": ["id1", "id2"], + "last_fetch_time_syslog": "2023-01-01T00:00:00Z", + "previous_run_ids_syslog": ["id3", "id4"], } # Update audit logs diff --git a/Packs/ServiceNow/Integrations/ServiceNow_CMDB/ServiceNow_CMDB.py b/Packs/ServiceNow/Integrations/ServiceNow_CMDB/ServiceNow_CMDB.py index 3538ee2c41ea..d36887946a1a 100644 --- a/Packs/ServiceNow/Integrations/ServiceNow_CMDB/ServiceNow_CMDB.py +++ b/Packs/ServiceNow/Integrations/ServiceNow_CMDB/ServiceNow_CMDB.py @@ -1,26 +1,25 @@ import demistomock as demisto from CommonServerPython import * + from CommonServerUserPython import * -'''IMPORTS''' -import urllib3 -from typing import Any -from _collections import defaultdict +"""IMPORTS""" import ast +from _collections import defaultdict from operator import itemgetter +from typing import Any + +import urllib3 # Disable insecure warnings urllib3.disable_warnings() -''' CONSTANTS ''' -API_VERSION = '/api/now/cmdb/instance/' -CREAT_RECORD_DATA_FIELDS = ['attributes', 'inbound_relations', 'outbound_relations', 'source'] -UPDATE_RECORD_DATA_FIELDS = ['attributes', 'source'] -ADD_RELATION_DATA_FIELDS = ['inbound_relations', 'outbound_relations', 'source'] -FIELD_TO_OUTPUT = { - 'inbound_relations': 'Inbound Relations', - 'outbound_relations': 'Outbound Relations' -} +""" CONSTANTS """ +API_VERSION = "/api/now/cmdb/instance/" +CREAT_RECORD_DATA_FIELDS = ["attributes", "inbound_relations", "outbound_relations", "source"] +UPDATE_RECORD_DATA_FIELDS = ["attributes", "source"] +ADD_RELATION_DATA_FIELDS = ["inbound_relations", "outbound_relations", "source"] +FIELD_TO_OUTPUT = {"inbound_relations": "Inbound Relations", "outbound_relations": "Outbound Relations"} class Client: @@ -32,8 +31,16 @@ class Client: Most calls use _http_request() that handles proxy, SSL verification, etc. """ - def __init__(self, credentials: dict, use_oauth: bool = False, client_id: str = '', client_secret: str = '', - url: str = '', verify: bool = False, proxy: bool = False): + def __init__( + self, + credentials: dict, + use_oauth: bool = False, + client_id: str = "", + client_secret: str = "", + url: str = "", + verify: bool = False, + proxy: bool = False, + ): """ Args: - credentials: the username and password given by the user. @@ -46,39 +53,43 @@ def __init__(self, credentials: dict, use_oauth: bool = False, client_id: str = - headers: The request headers, for example: {'Accept`: `application/json`}. Can be None. - use_oauth: a flag indicating whether the user wants to use OAuth 2.0 or basic authorization. """ - headers = { - 'Content-Type': 'application/json', - 'Accept': 'application/json' - } + headers = {"Content-Type": "application/json", "Accept": "application/json"} self.use_oauth = use_oauth - self.snow_client: ServiceNowClient = ServiceNowClient(credentials=credentials, use_oauth=use_oauth, - client_id=client_id, client_secret=client_secret, - url=url, verify=verify, proxy=proxy, headers=headers) + self.snow_client: ServiceNowClient = ServiceNowClient( + credentials=credentials, + use_oauth=use_oauth, + client_id=client_id, + client_secret=client_secret, + url=url, + verify=verify, + proxy=proxy, + headers=headers, + ) def records_list(self, class_name, params=None): - return self.snow_client.http_request(method='GET', url_suffix=f'{API_VERSION}{class_name}', params=params) + return self.snow_client.http_request(method="GET", url_suffix=f"{API_VERSION}{class_name}", params=params) def get_record(self, class_name, sys_id, params=None): - url_suffix = f'{API_VERSION}{class_name}/{sys_id}' - return self.snow_client.http_request(method='GET', url_suffix=url_suffix, params=params) + url_suffix = f"{API_VERSION}{class_name}/{sys_id}" + return self.snow_client.http_request(method="GET", url_suffix=url_suffix, params=params) def create_record(self, class_name, data, params=None): - return self.snow_client.http_request(method='POST', url_suffix=f'{API_VERSION}{class_name}', params=params, data=data) + return self.snow_client.http_request(method="POST", url_suffix=f"{API_VERSION}{class_name}", params=params, data=data) def update_record(self, class_name, sys_id, data, params=None): - url_suffix = f'{API_VERSION}{class_name}/{sys_id}' - return self.snow_client.http_request(method='PATCH', url_suffix=url_suffix, params=params, data=data) + url_suffix = f"{API_VERSION}{class_name}/{sys_id}" + return self.snow_client.http_request(method="PATCH", url_suffix=url_suffix, params=params, data=data) def add_relation(self, class_name, sys_id, data, params=None): - url_suffix = f'{API_VERSION}{class_name}/{sys_id}/relation' - return self.snow_client.http_request(method='POST', url_suffix=url_suffix, params=params, data=data) + url_suffix = f"{API_VERSION}{class_name}/{sys_id}/relation" + return self.snow_client.http_request(method="POST", url_suffix=url_suffix, params=params, data=data) def delete_relation(self, class_name, sys_id, rel_sys_id, params=None): - url_suffix = f'{API_VERSION}{class_name}/{sys_id}/relation/{rel_sys_id}' - return self.snow_client.http_request(method='DELETE', url_suffix=url_suffix, params=params) + url_suffix = f"{API_VERSION}{class_name}/{sys_id}/relation/{rel_sys_id}" + return self.snow_client.http_request(method="DELETE", url_suffix=url_suffix, params=params) -''' HELPER FUNCTIONS ''' +""" HELPER FUNCTIONS """ def create_request_data(data_fields: list, args: dict) -> dict: @@ -95,28 +106,30 @@ def create_request_data(data_fields: list, args: dict) -> dict: """ data = {} for field in data_fields: - if field == 'source': + if field == "source": data[field] = args.get(field) - elif field == 'attributes': # 'attributes' input should be of the form key1=value1,key2=value2... + elif field == "attributes": # 'attributes' input should be of the form key1=value1,key2=value2... val = args.get(field) if val: try: attributes_dict = {} - attributes_input = val.split(',') + attributes_input = val.split(",") for attribute in attributes_input: - pair = attribute.split('=') + pair = attribute.split("=") attributes_dict[pair[0]] = pair[1] data[field] = attributes_dict except Exception: - raise Exception('Illegal input. Input format should be "key=value". Multiple values can be filled, ' - 'separated by a comma.') + raise Exception( + 'Illegal input. Input format should be "key=value". Multiple values can be filled, ' + "separated by a comma." + ) else: # other fields should be converted to dict/list val = args.get(field) if val: try: data[field] = ast.literal_eval(val) except Exception: - raise Exception('Illegal input. Please see the argument description for the correct input format.') + raise Exception("Illegal input. Please see the argument description for the correct input format.") return data @@ -133,12 +146,12 @@ def create_record_context(class_name: str, sys_id: str, result: dict) -> dict: A dictionary representing the context output for the record. """ context = { - 'ServiceNowCMDB.Record(val.ID===obj.ID)': { - 'Class': class_name, - 'SysID': sys_id, - 'Attributes': result.get('attributes', {}), - 'InboundRelations': result.get('inbound_relations', []), - 'OutboundRelations': result.get('outbound_relations', []), + "ServiceNowCMDB.Record(val.ID===obj.ID)": { + "Class": class_name, + "SysID": sys_id, + "Attributes": result.get("attributes", {}), + "InboundRelations": result.get("inbound_relations", []), + "OutboundRelations": result.get("outbound_relations", []), } } return context @@ -158,35 +171,37 @@ def create_human_readable(title: str, result: dict, fields: str) -> str: Return: A string representing the markdown output that should be displayed in the war room. """ - md = f'{title}\n' + md = f"{title}\n" attributes_outputs = {} if fields: - for field in fields.split(','): - if result.get('attributes', {}).get(field): - attributes_outputs[string_to_context_key(field)] = result.get('attributes', {}).get(field) + for field in fields.split(","): + if result.get("attributes", {}).get(field): + attributes_outputs[string_to_context_key(field)] = result.get("attributes", {}).get(field) else: attributes_outputs = { - 'SysID': result.get('attributes', {}).get('sys_id'), - 'Name': result.get('attributes', {}).get('name') + "SysID": result.get("attributes", {}).get("sys_id"), + "Name": result.get("attributes", {}).get("name"), } - md += tableToMarkdown('Attributes', t=attributes_outputs, removeNull=True) + md += tableToMarkdown("Attributes", t=attributes_outputs, removeNull=True) - for relation_type in ['inbound_relations', 'outbound_relations']: + for relation_type in ["inbound_relations", "outbound_relations"]: relations = result.get(relation_type) if relations: relation_output = { - 'SysID': list(map(itemgetter('sys_id'), relations)), - 'Target Display Value': list( - map(itemgetter('display_value'), list(map(itemgetter('target'), result.get(relation_type))))), # type: ignore - 'Type Display Value': list( - map(itemgetter('display_value'), list(map(itemgetter('type'), result.get(relation_type))))), # type: ignore + "SysID": list(map(itemgetter("sys_id"), relations)), + "Target Display Value": list( + map(itemgetter("display_value"), list(map(itemgetter("target"), result.get(relation_type)))) + ), # type: ignore + "Type Display Value": list( + map(itemgetter("display_value"), list(map(itemgetter("type"), result.get(relation_type)))) + ), # type: ignore } - md += f' {tableToMarkdown(FIELD_TO_OUTPUT.get(relation_type), t=relation_output)}' + md += f" {tableToMarkdown(FIELD_TO_OUTPUT.get(relation_type), t=relation_output)}" return md -''' COMMAND FUNCTIONS ''' +""" COMMAND FUNCTIONS """ def records_list_command(client: Client, args: dict) -> tuple[str, dict, Any]: @@ -201,27 +216,25 @@ def records_list_command(client: Client, args: dict) -> tuple[str, dict, Any]: Demisto Outputs. """ context: dict = defaultdict(list) - class_name = args.get('class') + class_name = args.get("class") params = {} - if args.get('query'): - params['sysparm_query'] = args.get('query') - if args.get('limit'): - params['sysparm_limit'] = args.get('limit') - if args.get('offset'): - params['sysparm_offset'] = args.get('offset') - - outputs = { - 'Class': class_name - } + if args.get("query"): + params["sysparm_query"] = args.get("query") + if args.get("limit"): + params["sysparm_limit"] = args.get("limit") + if args.get("offset"): + params["sysparm_offset"] = args.get("offset") + + outputs = {"Class": class_name} response = client.records_list(class_name=class_name, params=params) - result = response.get('result', {}) + result = response.get("result", {}) if result: - outputs['Records'] = result - human_readable = tableToMarkdown(f'Found {len(result)} records for class {class_name}:', t=result) + outputs["Records"] = result + human_readable = tableToMarkdown(f"Found {len(result)} records for class {class_name}:", t=result) else: - human_readable = f'Found no records for class {class_name}.' - context['ServiceNowCMDB(val.ID===obj.ID)'] = outputs + human_readable = f"Found no records for class {class_name}." + context["ServiceNowCMDB(val.ID===obj.ID)"] = outputs return human_readable, context, response @@ -238,39 +251,36 @@ def get_record_command(client: Client, args: dict) -> tuple[str, dict, Any]: Demisto Outputs. """ context: dict = defaultdict(list) - class_name = args.get('class') - sys_id = args.get('sys_id') + class_name = args.get("class") + sys_id = args.get("sys_id") params: dict = {} - if args.get('fields'): - params['sysparm_fields'] = args.get('fields') + if args.get("fields"): + params["sysparm_fields"] = args.get("fields") # Verify that sys_id and name were added so they can be used in the output of the command: - if 'sys_id' not in params.get('sysparm_fields', ''): - params['sysparm_fields'] += ',sys_id' - if 'name' not in params.get('sysparm_fields', ''): - params['sysparm_fields'] += ',name' - if args.get('relation_limit'): - params['sysparm_relation_limit'] = args.get('relation_limit') - if args.get('relation_offset'): - params['sysparm_relation_offset'] = args.get('relation_offset') + if "sys_id" not in params.get("sysparm_fields", ""): + params["sysparm_fields"] += ",sys_id" + if "name" not in params.get("sysparm_fields", ""): + params["sysparm_fields"] += ",name" + if args.get("relation_limit"): + params["sysparm_relation_limit"] = args.get("relation_limit") + if args.get("relation_offset"): + params["sysparm_relation_offset"] = args.get("relation_offset") response = client.get_record(class_name=class_name, sys_id=sys_id, params=params) - result = response.get('result') + result = response.get("result") if result: - context['ServiceNowCMDB.Record(val.ID===obj.ID)'] = { - 'Class': class_name, - 'SysID': sys_id, - 'Attributes': result.get('attributes', {}), - 'InboundRelations': result.get('inbound_relations', []), - 'OutboundRelations': result.get('outbound_relations', []), + context["ServiceNowCMDB.Record(val.ID===obj.ID)"] = { + "Class": class_name, + "SysID": sys_id, + "Attributes": result.get("attributes", {}), + "InboundRelations": result.get("inbound_relations", []), + "OutboundRelations": result.get("outbound_relations", []), } - hr_title = f'### Found the following attributes and relations for record {sys_id}:' - human_readable = create_human_readable(hr_title, result, params.get('sysparm_fields', '')) + hr_title = f"### Found the following attributes and relations for record {sys_id}:" + human_readable = create_human_readable(hr_title, result, params.get("sysparm_fields", "")) else: - context['ServiceNowCMDB.Record(val.ID===obj.ID)'] = { - 'Class': class_name, - 'SysID': sys_id - } - human_readable = f'Found no attributes and relations for record {sys_id}.' + context["ServiceNowCMDB.Record(val.ID===obj.ID)"] = {"Class": class_name, "SysID": sys_id} + human_readable = f"Found no attributes and relations for record {sys_id}." return human_readable, context, response @@ -287,31 +297,31 @@ def create_record_command(client: Client, args: dict) -> tuple[str, dict, Any]: Demisto Outputs. """ context: dict = defaultdict(list) - class_name = args.get('class', '') + class_name = args.get("class", "") params: dict = {} - if args.get('fields'): - params['sysparm_fields'] = args.get('fields') + if args.get("fields"): + params["sysparm_fields"] = args.get("fields") # Verify that sys_id and name were added so they can be used in the output of the command: - if 'sys_id' not in params.get('sysparm_fields', ''): - params['sysparm_fields'] += ',sys_id' - if 'name' not in params.get('sysparm_fields', ''): - params['sysparm_fields'] += ',name' - if args.get('relation_limit'): - params['sysparm_relation_limit'] = args.get('relation_limit') - if args.get('relation_offset'): - params['sysparm_relation_offset'] = args.get('relation_offset') + if "sys_id" not in params.get("sysparm_fields", ""): + params["sysparm_fields"] += ",sys_id" + if "name" not in params.get("sysparm_fields", ""): + params["sysparm_fields"] += ",name" + if args.get("relation_limit"): + params["sysparm_relation_limit"] = args.get("relation_limit") + if args.get("relation_offset"): + params["sysparm_relation_offset"] = args.get("relation_offset") data = create_request_data(CREAT_RECORD_DATA_FIELDS, args) response = client.create_record(class_name=class_name, params=params, data=str(data)) - result = response.get('result') + result = response.get("result") if result: - sys_id = result.get('attributes', {}).get('sys_id') + sys_id = result.get("attributes", {}).get("sys_id") context = create_record_context(class_name, sys_id, result) - hr_title = f'### Record {sys_id} was created successfully.' - human_readable = create_human_readable(hr_title, result, params.get('sysparm_fields', '')) + hr_title = f"### Record {sys_id} was created successfully." + human_readable = create_human_readable(hr_title, result, params.get("sysparm_fields", "")) else: - human_readable = 'Failed to create a new record.' + human_readable = "Failed to create a new record." return human_readable, context, response @@ -328,31 +338,31 @@ def update_record_command(client: Client, args: dict) -> tuple[str, dict, Any]: Demisto Outputs. """ context: dict = defaultdict(list) - class_name = args.get('class', '') - sys_id = args.get('sys_id', '') + class_name = args.get("class", "") + sys_id = args.get("sys_id", "") params: dict = {} - if args.get('fields'): - params['sysparm_fields'] = args.get('fields') + if args.get("fields"): + params["sysparm_fields"] = args.get("fields") # Verify that sys_id and name were added so they can be used in the output of the command: - if 'sys_id' not in params.get('sysparm_fields', ''): - params['sysparm_fields'] += ',sys_id' - if 'name' not in params.get('sysparm_fields', ''): - params['sysparm_fields'] += ',name' - if args.get('relation_limit'): - params['sysparm_relation_limit'] = args.get('relation_limit') - if args.get('relation_offset'): - params['sysparm_relation_offset'] = args.get('relation_offset') + if "sys_id" not in params.get("sysparm_fields", ""): + params["sysparm_fields"] += ",sys_id" + if "name" not in params.get("sysparm_fields", ""): + params["sysparm_fields"] += ",name" + if args.get("relation_limit"): + params["sysparm_relation_limit"] = args.get("relation_limit") + if args.get("relation_offset"): + params["sysparm_relation_offset"] = args.get("relation_offset") data = create_request_data(UPDATE_RECORD_DATA_FIELDS, args) response = client.update_record(class_name=class_name, sys_id=sys_id, data=str(data), params=params) - result = response.get('result') + result = response.get("result") if result: context = create_record_context(class_name, sys_id, result) - hr_title = f'### Updated record {sys_id} successfully.' - human_readable = create_human_readable(hr_title, result, params.get('sysparm_fields', '')) + hr_title = f"### Updated record {sys_id} successfully." + human_readable = create_human_readable(hr_title, result, params.get("sysparm_fields", "")) else: - human_readable = f'Failed to update record {sys_id}.' + human_readable = f"Failed to update record {sys_id}." return human_readable, context, response @@ -369,31 +379,31 @@ def add_relation_command(client: Client, args: dict) -> tuple[str, dict, Any]: Demisto Outputs. """ context: dict = defaultdict(list) - class_name = args.get('class', '') - sys_id = args.get('sys_id', '') + class_name = args.get("class", "") + sys_id = args.get("sys_id", "") params: dict = {} - if args.get('fields'): - params['sysparm_fields'] = args.get('fields') + if args.get("fields"): + params["sysparm_fields"] = args.get("fields") # Verify that sys_id and name were added so they can be used in the output of the command: - if 'sys_id' not in params.get('sysparm_fields', ''): - params['sysparm_fields'] += ',sys_id' - if 'name' not in params.get('sysparm_fields', ''): - params['sysparm_fields'] += ',name' - if args.get('relation_limit'): - params['sysparm_relation_limit'] = args.get('relation_limit') - if args.get('relation_offset'): - params['sysparm_relation_offset'] = args.get('relation_offset') + if "sys_id" not in params.get("sysparm_fields", ""): + params["sysparm_fields"] += ",sys_id" + if "name" not in params.get("sysparm_fields", ""): + params["sysparm_fields"] += ",name" + if args.get("relation_limit"): + params["sysparm_relation_limit"] = args.get("relation_limit") + if args.get("relation_offset"): + params["sysparm_relation_offset"] = args.get("relation_offset") data = create_request_data(ADD_RELATION_DATA_FIELDS, args) response = client.add_relation(class_name=class_name, sys_id=sys_id, data=str(data), params=params) - result = response.get('result') + result = response.get("result") if result: context = create_record_context(class_name, sys_id, result) - hr_title = f'### New relations were added to {sys_id} record successfully.' - human_readable = create_human_readable(hr_title, result, params.get('sysparm_fields', '')) + hr_title = f"### New relations were added to {sys_id} record successfully." + human_readable = create_human_readable(hr_title, result, params.get("sysparm_fields", "")) else: - human_readable = f'Failed to add new relations to record {sys_id}.' + human_readable = f"Failed to add new relations to record {sys_id}." return human_readable, context, response @@ -410,30 +420,30 @@ def delete_relation_command(client: Client, args: dict) -> tuple[str, dict, Any] Demisto Outputs. """ context: dict = defaultdict(list) - class_name = args.get('class', '') - sys_id = args.get('sys_id', '') - rel_sys_id = args.get('relation_sys_id', '') + class_name = args.get("class", "") + sys_id = args.get("sys_id", "") + rel_sys_id = args.get("relation_sys_id", "") params: dict = {} - if args.get('fields'): - params['sysparm_fields'] = args.get('fields') + if args.get("fields"): + params["sysparm_fields"] = args.get("fields") # Verify that sys_id and name were added so they can be used in the output of the command: - if 'sys_id' not in params.get('sysparm_fields', ''): - params['sysparm_fields'] += ',sys_id' - if 'name' not in params.get('sysparm_fields', ''): - params['sysparm_fields'] += ',name' - if args.get('relation_limit'): - params['sysparm_relation_limit'] = args.get('relation_limit') - if args.get('relation_offset'): - params['sysparm_relation_offset'] = args.get('relation_offset') + if "sys_id" not in params.get("sysparm_fields", ""): + params["sysparm_fields"] += ",sys_id" + if "name" not in params.get("sysparm_fields", ""): + params["sysparm_fields"] += ",name" + if args.get("relation_limit"): + params["sysparm_relation_limit"] = args.get("relation_limit") + if args.get("relation_offset"): + params["sysparm_relation_offset"] = args.get("relation_offset") response = client.delete_relation(class_name=class_name, sys_id=sys_id, rel_sys_id=rel_sys_id, params=params) - result = response.get('result') + result = response.get("result") if result: context = create_record_context(class_name, sys_id, result) - hr_title = f'### Deleted relation {rel_sys_id} successfully from {sys_id} record.' - human_readable = create_human_readable(hr_title, result, params.get('sysparm_fields', '')) + hr_title = f"### Deleted relation {rel_sys_id} successfully from {sys_id} record." + human_readable = create_human_readable(hr_title, result, params.get("sysparm_fields", "")) else: - human_readable = f'Failed to delete relation {rel_sys_id} from record {sys_id}.' + human_readable = f"Failed to delete relation {rel_sys_id} from record {sys_id}." return human_readable, context, response @@ -453,14 +463,16 @@ def test_module(client: Client) -> str: """ # Notify the user that test button can't be used when using OAuth 2.0: if client.use_oauth: - return_error('Test button cannot be used when using OAuth 2.0. Please use the !servicenow-cmdb-oauth-login ' - 'command followed by the !servicenow-cmdb-oauth-test command to test the instance.') + return_error( + "Test button cannot be used when using OAuth 2.0. Please use the !servicenow-cmdb-oauth-login " + "command followed by the !servicenow-cmdb-oauth-test command to test the instance." + ) try: - client.records_list(class_name='cmdb_ci_linux_server') + client.records_list(class_name="cmdb_ci_linux_server") except Exception as e: raise e - return 'ok' + return "ok" def oauth_test_module(client: Client, *_) -> tuple[str, dict[Any, Any], dict[Any, Any]]: @@ -468,14 +480,16 @@ def oauth_test_module(client: Client, *_) -> tuple[str, dict[Any, Any], dict[Any Test the instance configurations when using OAuth authorization. """ if not client.use_oauth: - return_error('!servicenow-cmdb-oauth-test command should be used only when using OAuth 2.0 authorization.\n ' - 'Please select the `Use OAuth Login` checkbox in the instance configuration before running this ' - 'command.') + return_error( + "!servicenow-cmdb-oauth-test command should be used only when using OAuth 2.0 authorization.\n " + "Please select the `Use OAuth Login` checkbox in the instance configuration before running this " + "command." + ) try: - client.records_list(class_name='cmdb_ci_linux_server') + client.records_list(class_name="cmdb_ci_linux_server") except Exception as e: raise e - hr = '### Instance Configured Successfully.\n' + hr = "### Instance Configured Successfully.\n" return hr, {}, {} @@ -491,77 +505,88 @@ def login_command(client: Client, args: dict[str, Any]) -> tuple[str, dict[Any, """ # Verify that the user selected the `Use OAuth Login` checkbox: if not client.use_oauth: - return_error('!servicenow-cmdb-oauth-login command can be used only when using OAuth 2.0 authorization.\n ' - 'Please select the `Use OAuth Login` checkbox in the instance configuration before running this ' - 'command.') - - username = args.get('username', '') - password = args.get('password', '') + return_error( + "!servicenow-cmdb-oauth-login command can be used only when using OAuth 2.0 authorization.\n " + "Please select the `Use OAuth Login` checkbox in the instance configuration before running this " + "command." + ) + + username = args.get("username", "") + password = args.get("password", "") try: client.snow_client.login(username, password) - hr = '### Logged in successfully.\n A refresh token was saved to the integration context and will be ' \ - 'used to generate a new access token once the current one expires.' + hr = ( + "### Logged in successfully.\n A refresh token was saved to the integration context and will be " + "used to generate a new access token once the current one expires." + ) except Exception as e: - return_error(f'Failed to login. Please verify that the provided username and password are correct, and that you' - f' entered the correct client id and client secret in the instance configuration (see ? for' - f'correct usage when using OAuth).\n\n{e}') + return_error( + f"Failed to login. Please verify that the provided username and password are correct, and that you" + f" entered the correct client id and client secret in the instance configuration (see ? for" + f"correct usage when using OAuth).\n\n{e}" + ) return hr, {}, {} -''' MAIN FUNCTION ''' +""" MAIN FUNCTION """ def main() -> None: - """main function, parses params and runs command functions - """ + """main function, parses params and runs command functions""" params = demisto.params() - url = params.get('url', '') - verify = not params.get('insecure', False) - proxy = params.get('proxy', False) - client_id = client_secret = '' - credentials = params.get('credentials', {}) - use_oauth = params.get('use_oauth', False) + url = params.get("url", "") + verify = not params.get("insecure", False) + proxy = params.get("proxy", False) + client_id = client_secret = "" + credentials = params.get("credentials", {}) + use_oauth = params.get("use_oauth", False) if use_oauth: - client_id = credentials.get('identifier') - client_secret = credentials.get('password') - - client = Client(credentials=credentials, use_oauth=use_oauth, client_id=client_id, - client_secret=client_secret, url=url, verify=verify, proxy=proxy) + client_id = credentials.get("identifier") + client_secret = credentials.get("password") + + client = Client( + credentials=credentials, + use_oauth=use_oauth, + client_id=client_id, + client_secret=client_secret, + url=url, + verify=verify, + proxy=proxy, + ) commands = { - 'servicenow-cmdb-oauth-login': login_command, - 'servicenow-cmdb-oauth-test': oauth_test_module, - 'servicenow-cmdb-records-list': records_list_command, - 'servicenow-cmdb-record-get-by-id': get_record_command, - 'servicenow-cmdb-record-create': create_record_command, - 'servicenow-cmdb-record-update': update_record_command, - 'servicenow-cmdb-record-add-relations': add_relation_command, - 'servicenow-cmdb-record-delete-relations': delete_relation_command + "servicenow-cmdb-oauth-login": login_command, + "servicenow-cmdb-oauth-test": oauth_test_module, + "servicenow-cmdb-records-list": records_list_command, + "servicenow-cmdb-record-get-by-id": get_record_command, + "servicenow-cmdb-record-create": create_record_command, + "servicenow-cmdb-record-update": update_record_command, + "servicenow-cmdb-record-add-relations": add_relation_command, + "servicenow-cmdb-record-delete-relations": delete_relation_command, } command = demisto.command() - demisto.debug(f'Command being called is {command}') + demisto.debug(f"Command being called is {command}") try: - if demisto.command() == 'test-module': + if demisto.command() == "test-module": # This is the call made when pressing the integration Test button. result = test_module(client) return_results(result) elif command in commands: return_outputs(*commands[command](client, demisto.args())) # type: ignore else: - return_error('Command not found.') + return_error("Command not found.") # Log exceptions and return errors except Exception as e: - return_error(f'Failed to execute {command} command.\nError:\n{str(e)}') + return_error(f"Failed to execute {command} command.\nError:\n{e!s}") from ServiceNowApiModule import * # noqa: E402 - -''' ENTRY POINT ''' -if __name__ in ('__main__', '__builtin__', 'builtins'): +""" ENTRY POINT """ +if __name__ in ("__main__", "__builtin__", "builtins"): main() diff --git a/Packs/ServiceNow/Integrations/ServiceNow_CMDB/ServiceNow_CMDB_test.py b/Packs/ServiceNow/Integrations/ServiceNow_CMDB/ServiceNow_CMDB_test.py index 0883875ba37a..d3103c4a3375 100644 --- a/Packs/ServiceNow/Integrations/ServiceNow_CMDB/ServiceNow_CMDB_test.py +++ b/Packs/ServiceNow/Integrations/ServiceNow_CMDB/ServiceNow_CMDB_test.py @@ -1,19 +1,41 @@ import pytest -from ServiceNow_CMDB import Client, records_list_command, get_record_command, create_record_command, \ - update_record_command, add_relation_command, delete_relation_command -from test_data.result_constants import EXPECTED_RECORDS_LIST_NO_RECORDS, \ - EXPECTED_RECORDS_LIST_WITH_RECORDS, EXPECTED_GET_RECORD, EXPECTED_CREATE_RECORD, EXPECTED_UPDATE_RECORD, \ - EXPECTED_ADD_RELATION, EXPECTED_DELETE_RELATION -from test_data.response_constants import RECORDS_LIST_EMPTY_RESPONSE, RECORDS_LIST_RESPONSE_WITH_RECORDS, \ - GET_RECORD_RESPONSE, CREATE_RECORD_RESPONSE, UPDATE_RECORD_RESPONSE, \ - ADD_RELATION_RESPONSE, DELETE_RELATION_RESPONSE +from ServiceNow_CMDB import ( + Client, + add_relation_command, + create_record_command, + delete_relation_command, + get_record_command, + records_list_command, + update_record_command, +) from ServiceNowApiModule import ServiceNowClient +from test_data.response_constants import ( + ADD_RELATION_RESPONSE, + CREATE_RECORD_RESPONSE, + DELETE_RELATION_RESPONSE, + GET_RECORD_RESPONSE, + RECORDS_LIST_EMPTY_RESPONSE, + RECORDS_LIST_RESPONSE_WITH_RECORDS, + UPDATE_RECORD_RESPONSE, +) +from test_data.result_constants import ( + EXPECTED_ADD_RELATION, + EXPECTED_CREATE_RECORD, + EXPECTED_DELETE_RELATION, + EXPECTED_GET_RECORD, + EXPECTED_RECORDS_LIST_NO_RECORDS, + EXPECTED_RECORDS_LIST_WITH_RECORDS, + EXPECTED_UPDATE_RECORD, +) -@pytest.mark.parametrize('response, expected_result', [ - (RECORDS_LIST_RESPONSE_WITH_RECORDS, EXPECTED_RECORDS_LIST_WITH_RECORDS), - (RECORDS_LIST_EMPTY_RESPONSE, EXPECTED_RECORDS_LIST_NO_RECORDS) -]) +@pytest.mark.parametrize( + "response, expected_result", + [ + (RECORDS_LIST_RESPONSE_WITH_RECORDS, EXPECTED_RECORDS_LIST_WITH_RECORDS), + (RECORDS_LIST_EMPTY_RESPONSE, EXPECTED_RECORDS_LIST_NO_RECORDS), + ], +) def test_records_list_command(response, expected_result, mocker): """ Given: @@ -26,8 +48,8 @@ def test_records_list_command(response, expected_result, mocker): keys. In the second case, when no records are in the response, validate the context has only the `Class` key. """ client = Client({}) - mocker.patch.object(ServiceNowClient, 'http_request', return_value=response) - result = records_list_command(client, args={'class': 'test_class'}) + mocker.patch.object(ServiceNowClient, "http_request", return_value=response) + result = records_list_command(client, args={"class": "test_class"}) assert expected_result == result[1] @@ -42,8 +64,8 @@ def test_get_record_command(mocker): - Validate that the output context of the command contains all attributes and relations that were returned. """ client = Client(credentials={}) - mocker.patch.object(ServiceNowClient, 'http_request', return_value=GET_RECORD_RESPONSE) - result = get_record_command(client, args={'class': 'test_class', 'sys_id': 'record_id'}) + mocker.patch.object(ServiceNowClient, "http_request", return_value=GET_RECORD_RESPONSE) + result = get_record_command(client, args={"class": "test_class", "sys_id": "record_id"}) assert result[1] == EXPECTED_GET_RECORD @@ -59,8 +81,8 @@ def test_create_record_command(mocker): response, and that the inbound and outbound relations lists are empty. """ client = Client(credentials={}) - mocker.patch.object(ServiceNowClient, 'http_request', return_value=CREATE_RECORD_RESPONSE) - result = create_record_command(client, args={'class': 'test_class', 'attributes': 'name=Test Create Record'}) + mocker.patch.object(ServiceNowClient, "http_request", return_value=CREATE_RECORD_RESPONSE) + result = create_record_command(client, args={"class": "test_class", "attributes": "name=Test Create Record"}) assert result[1] == EXPECTED_CREATE_RECORD @@ -74,9 +96,10 @@ def test_update_record_command(mocker): - Validate that the context output was changed according to the new attributes. """ client = Client(credentials={}) - mocker.patch.object(ServiceNowClient, 'http_request', return_value=UPDATE_RECORD_RESPONSE) - result = update_record_command(client, args={'class': 'test_class', 'sys_id': 'record_id', - 'attributes': 'name=Test Create Record'}) + mocker.patch.object(ServiceNowClient, "http_request", return_value=UPDATE_RECORD_RESPONSE) + result = update_record_command( + client, args={"class": "test_class", "sys_id": "record_id", "attributes": "name=Test Create Record"} + ) assert result[1] == EXPECTED_UPDATE_RECORD @@ -91,12 +114,15 @@ def test_add_relation_command(mocker): - Validate that the `InboundRelations` key in the context output contains the added relation. """ client = Client(credentials={}) - mocker.patch.object(ServiceNowClient, 'http_request', return_value=ADD_RELATION_RESPONSE) - result = add_relation_command(client, args={ - 'class': 'test_class', - 'sys_id': 'record_id', - 'inbound_relations': "[{'type': 'relation_type', 'target':'target', 'sys_class_name':'class_name'}]" - }) + mocker.patch.object(ServiceNowClient, "http_request", return_value=ADD_RELATION_RESPONSE) + result = add_relation_command( + client, + args={ + "class": "test_class", + "sys_id": "record_id", + "inbound_relations": "[{'type': 'relation_type', 'target':'target', 'sys_class_name':'class_name'}]", + }, + ) assert result[1] == EXPECTED_ADD_RELATION @@ -111,7 +137,6 @@ def test_delete_relation_command(mocker): - Validate that the `InboundRelations` key in the context output is empty. """ client = Client(credentials={}) - mocker.patch.object(ServiceNowClient, 'http_request', return_value=DELETE_RELATION_RESPONSE) - result = delete_relation_command(client, args={'class': 'test_class', 'sys_id': 'record_id', - 'relation_sys_id': 'rel_id'}) + mocker.patch.object(ServiceNowClient, "http_request", return_value=DELETE_RELATION_RESPONSE) + result = delete_relation_command(client, args={"class": "test_class", "sys_id": "record_id", "relation_sys_id": "rel_id"}) assert result[1] == EXPECTED_DELETE_RELATION diff --git a/Packs/ServiceNow/Integrations/ServiceNow_CMDB/test_data/response_constants.py b/Packs/ServiceNow/Integrations/ServiceNow_CMDB/test_data/response_constants.py index 8bb36c08659c..0639f536638a 100644 --- a/Packs/ServiceNow/Integrations/ServiceNow_CMDB/test_data/response_constants.py +++ b/Packs/ServiceNow/Integrations/ServiceNow_CMDB/test_data/response_constants.py @@ -1,134 +1,120 @@ -HANDLE_SYSPARMS_ARGS = { - 'class': 'class_name', - 'limit': '3', - 'offset': '4', - 'query': 'query_example' -} +HANDLE_SYSPARMS_ARGS = {"class": "class_name", "limit": "3", "offset": "4", "query": "query_example"} -HANDLE_SYSPARMS_PARAMS = [ - [], - ['query'], - ['limit', 'offset'], - ['limit', 'offset', 'query'] -] +HANDLE_SYSPARMS_PARAMS = [[], ["query"], ["limit", "offset"], ["limit", "offset", "query"]] RECORDS_LIST_RESPONSE_WITH_RECORDS = { - 'result': [{ - 'sys_id': '0ad329e3db27901026fca015ca9619fb', - 'name': 'Test record 1' - }, { - 'sys_id': '2a41eb4e1b739810042611b4bd4bcb9d', - 'name': 'Test record 2' - }, { - 'sys_id': '38b05eb1db7f581026fca015ca96198a', - 'name': 'Test record 3' - }] + "result": [ + {"sys_id": "0ad329e3db27901026fca015ca9619fb", "name": "Test record 1"}, + {"sys_id": "2a41eb4e1b739810042611b4bd4bcb9d", "name": "Test record 2"}, + {"sys_id": "38b05eb1db7f581026fca015ca96198a", "name": "Test record 3"}, + ] } -RECORDS_LIST_EMPTY_RESPONSE = { - 'result': [] -} +RECORDS_LIST_EMPTY_RESPONSE = {"result": []} GET_RECORD_RESPONSE = { - 'result': { - 'outbound_relations': [{ - 'sys_id': 'out_rel_1', - 'type': { - 'display_value': 'Uses::Used by', - 'link': - 'https://test.service-now.com/api/now/table/cmdb_rel_type/cb5592603751200032ff8c00dfbe5d17', - 'value': 'cb5592603751200032ff8c00dfbe5d17' - }, - 'target': { - 'display_value': 'local hd', - 'link': - 'https://test.service-now.com/api/now/cmdb/instance/cmdb_ci/62cfa627c0a8010e01f01b87035ba803', - 'value': '62cfa627c0a8010e01f01b87035ba803' + "result": { + "outbound_relations": [ + { + "sys_id": "out_rel_1", + "type": { + "display_value": "Uses::Used by", + "link": "https://test.service-now.com/api/now/table/cmdb_rel_type/cb5592603751200032ff8c00dfbe5d17", + "value": "cb5592603751200032ff8c00dfbe5d17", + }, + "target": { + "display_value": "local hd", + "link": "https://test.service-now.com/api/now/cmdb/instance/cmdb_ci/62cfa627c0a8010e01f01b87035ba803", + "value": "62cfa627c0a8010e01f01b87035ba803", + }, } - }], - 'attributes': { - 'sys_class_name': 'test_class', - 'sys_created_on': '2020-10-05 11:04:20', - 'name': 'Test', - 'sys_id': 'record_id' + ], + "attributes": { + "sys_class_name": "test_class", + "sys_created_on": "2020-10-05 11:04:20", + "name": "Test", + "sys_id": "record_id", }, - 'inbound_relations': [{ - 'sys_id': '0a0afcb1db7b581026fca015ca9619f1', - 'type': { - 'display_value': 'Uses::Used by', - 'link': 'https://test.service-now.com/api/now/table/cmdb_rel_type/cb5592603751200032ff8c00dfbe5d17', - 'value': 'cb5592603751200032ff8c00dfbe5d17' - }, - 'target': { - 'display_value': 'CMS App FLX', - 'link': 'https://test.service-now.com/api/now/cmdb/instance/cmdb_ci' - '/829e953a0ad3370200af63483498b1ea', - 'value': '829e953a0ad3370200af63483498b1ea' + "inbound_relations": [ + { + "sys_id": "0a0afcb1db7b581026fca015ca9619f1", + "type": { + "display_value": "Uses::Used by", + "link": "https://test.service-now.com/api/now/table/cmdb_rel_type/cb5592603751200032ff8c00dfbe5d17", + "value": "cb5592603751200032ff8c00dfbe5d17", + }, + "target": { + "display_value": "CMS App FLX", + "link": "https://test.service-now.com/api/now/cmdb/instance/cmdb_ci" "/829e953a0ad3370200af63483498b1ea", + "value": "829e953a0ad3370200af63483498b1ea", + }, } - }] + ], } } CREATE_RECORD_RESPONSE = { - 'result': { - 'outbound_relations': [], - 'attributes': { - 'sys_class_name': 'test_class', - 'sys_created_on': '2020-11-11 12:24:59', - 'name': 'Test Create Record', - 'sys_id': 'record_id' + "result": { + "outbound_relations": [], + "attributes": { + "sys_class_name": "test_class", + "sys_created_on": "2020-11-11 12:24:59", + "name": "Test Create Record", + "sys_id": "record_id", }, - 'inbound_relations': [] + "inbound_relations": [], } } UPDATE_RECORD_RESPONSE = { - 'result': { - 'outbound_relations': [], - 'attributes': { - 'sys_class_name': 'test_class', - 'sys_created_on': '2020-11-11 12:24:59', - 'name': 'Update Name Test', - 'sys_id': 'record_id' + "result": { + "outbound_relations": [], + "attributes": { + "sys_class_name": "test_class", + "sys_created_on": "2020-11-11 12:24:59", + "name": "Update Name Test", + "sys_id": "record_id", }, - 'inbound_relations': [] + "inbound_relations": [], } } ADD_RELATION_RESPONSE = { - 'result': { - 'outbound_relations': [], - 'attributes': { - 'sys_class_name': 'test_class', - 'sys_created_on': '2020-11-11 12:24:59', - 'name': 'Add Relation Test', - 'sys_id': 'record_id' + "result": { + "outbound_relations": [], + "attributes": { + "sys_class_name": "test_class", + "sys_created_on": "2020-11-11 12:24:59", + "name": "Add Relation Test", + "sys_id": "record_id", }, - 'inbound_relations': [{ - 'sys_id': 'inbound_rel', - 'type': { - 'display_value': 'Uses::Used by', - 'link': 'https://test.service-now.com/api/now/table/cmdb_rel_type/cb5592603751200032ff8c00dfbe5d17', - 'value': 'cb5592603751200032ff8c00dfbe5d17' - }, - 'target': { - 'display_value': 'CMS App FLX', - 'link': 'https://test.service-now.com/api/now/cmdb/instance/cmdb_ci/829e953a0ad3370200af63483498b1ea', - 'value': '829e953a0ad3370200af63483498b1ea' + "inbound_relations": [ + { + "sys_id": "inbound_rel", + "type": { + "display_value": "Uses::Used by", + "link": "https://test.service-now.com/api/now/table/cmdb_rel_type/cb5592603751200032ff8c00dfbe5d17", + "value": "cb5592603751200032ff8c00dfbe5d17", + }, + "target": { + "display_value": "CMS App FLX", + "link": "https://test.service-now.com/api/now/cmdb/instance/cmdb_ci/829e953a0ad3370200af63483498b1ea", + "value": "829e953a0ad3370200af63483498b1ea", + }, } - }] + ], } } DELETE_RELATION_RESPONSE = { - 'result': { - 'outbound_relations': [], - 'attributes': { - 'sys_class_name': 'test_class', - 'sys_created_on': '2020-11-11 12:24:59', - 'name': 'Delete Relation Test', - 'sys_id': 'record_id' + "result": { + "outbound_relations": [], + "attributes": { + "sys_class_name": "test_class", + "sys_created_on": "2020-11-11 12:24:59", + "name": "Delete Relation Test", + "sys_id": "record_id", }, - 'inbound_relations': [] + "inbound_relations": [], } } diff --git a/Packs/ServiceNow/Integrations/ServiceNow_CMDB/test_data/result_constants.py b/Packs/ServiceNow/Integrations/ServiceNow_CMDB/test_data/result_constants.py index 3d43e3e83103..8ec8e3d880d4 100644 --- a/Packs/ServiceNow/Integrations/ServiceNow_CMDB/test_data/result_constants.py +++ b/Packs/ServiceNow/Integrations/ServiceNow_CMDB/test_data/result_constants.py @@ -1,136 +1,129 @@ EXPECTED_RECORDS_LIST_WITH_RECORDS = { - 'ServiceNowCMDB(val.ID===obj.ID)': { - 'Class': 'test_class', - 'Records': [{ - 'sys_id': '0ad329e3db27901026fca015ca9619fb', - 'name': 'Test record 1' - }, { - 'sys_id': '2a41eb4e1b739810042611b4bd4bcb9d', - 'name': 'Test record 2' - }, { - 'sys_id': '38b05eb1db7f581026fca015ca96198a', - 'name': 'Test record 3' - }] + "ServiceNowCMDB(val.ID===obj.ID)": { + "Class": "test_class", + "Records": [ + {"sys_id": "0ad329e3db27901026fca015ca9619fb", "name": "Test record 1"}, + {"sys_id": "2a41eb4e1b739810042611b4bd4bcb9d", "name": "Test record 2"}, + {"sys_id": "38b05eb1db7f581026fca015ca96198a", "name": "Test record 3"}, + ], } } -EXPECTED_RECORDS_LIST_NO_RECORDS = { - 'ServiceNowCMDB(val.ID===obj.ID)': { - 'Class': 'test_class' - } -} +EXPECTED_RECORDS_LIST_NO_RECORDS = {"ServiceNowCMDB(val.ID===obj.ID)": {"Class": "test_class"}} EXPECTED_GET_RECORD = { - 'ServiceNowCMDB.Record(val.ID===obj.ID)': { - 'Class': 'test_class', - 'SysID': 'record_id', - 'Attributes': { - 'sys_class_name': 'test_class', - 'sys_created_on': '2020-10-05 11:04:20', - 'name': 'Test', - 'sys_id': 'record_id' + "ServiceNowCMDB.Record(val.ID===obj.ID)": { + "Class": "test_class", + "SysID": "record_id", + "Attributes": { + "sys_class_name": "test_class", + "sys_created_on": "2020-10-05 11:04:20", + "name": "Test", + "sys_id": "record_id", }, - 'InboundRelations': [{ - 'sys_id': '0a0afcb1db7b581026fca015ca9619f1', - 'type': { - 'display_value': 'Uses::Used by', - 'link': 'https://test.service-now.com/api/now/table/cmdb_rel_type/cb5592603751200032ff8c00dfbe5d17', - 'value': 'cb5592603751200032ff8c00dfbe5d17' - }, - 'target': { - 'display_value': 'CMS App FLX', - 'link': 'https://test.service-now.com/api/now/cmdb/instance/cmdb_ci' - '/829e953a0ad3370200af63483498b1ea', - 'value': '829e953a0ad3370200af63483498b1ea' + "InboundRelations": [ + { + "sys_id": "0a0afcb1db7b581026fca015ca9619f1", + "type": { + "display_value": "Uses::Used by", + "link": "https://test.service-now.com/api/now/table/cmdb_rel_type/cb5592603751200032ff8c00dfbe5d17", + "value": "cb5592603751200032ff8c00dfbe5d17", + }, + "target": { + "display_value": "CMS App FLX", + "link": "https://test.service-now.com/api/now/cmdb/instance/cmdb_ci" "/829e953a0ad3370200af63483498b1ea", + "value": "829e953a0ad3370200af63483498b1ea", + }, } - }], - 'OutboundRelations': [{ - 'sys_id': 'out_rel_1', - 'type': { - 'display_value': 'Uses::Used by', - 'link': - 'https://test.service-now.com/api/now/table/cmdb_rel_type/cb5592603751200032ff8c00dfbe5d17', - 'value': 'cb5592603751200032ff8c00dfbe5d17' - }, - 'target': { - 'display_value': 'local hd', - 'link': - 'https://test.service-now.com/api/now/cmdb/instance/cmdb_ci/62cfa627c0a8010e01f01b87035ba803', - 'value': '62cfa627c0a8010e01f01b87035ba803' + ], + "OutboundRelations": [ + { + "sys_id": "out_rel_1", + "type": { + "display_value": "Uses::Used by", + "link": "https://test.service-now.com/api/now/table/cmdb_rel_type/cb5592603751200032ff8c00dfbe5d17", + "value": "cb5592603751200032ff8c00dfbe5d17", + }, + "target": { + "display_value": "local hd", + "link": "https://test.service-now.com/api/now/cmdb/instance/cmdb_ci/62cfa627c0a8010e01f01b87035ba803", + "value": "62cfa627c0a8010e01f01b87035ba803", + }, } - }] + ], } } EXPECTED_CREATE_RECORD = { - 'ServiceNowCMDB.Record(val.ID===obj.ID)': { - 'Class': 'test_class', - 'SysID': 'record_id', - 'Attributes': { - 'sys_class_name': 'test_class', - 'sys_created_on': '2020-11-11 12:24:59', - 'name': 'Test Create Record', - 'sys_id': 'record_id' + "ServiceNowCMDB.Record(val.ID===obj.ID)": { + "Class": "test_class", + "SysID": "record_id", + "Attributes": { + "sys_class_name": "test_class", + "sys_created_on": "2020-11-11 12:24:59", + "name": "Test Create Record", + "sys_id": "record_id", }, - 'InboundRelations': [], - 'OutboundRelations': [] + "InboundRelations": [], + "OutboundRelations": [], } } EXPECTED_UPDATE_RECORD = { - 'ServiceNowCMDB.Record(val.ID===obj.ID)': { - 'Class': 'test_class', - 'SysID': 'record_id', - 'Attributes': { - 'sys_class_name': 'test_class', - 'sys_created_on': '2020-11-11 12:24:59', - 'name': 'Update Name Test', - 'sys_id': 'record_id' + "ServiceNowCMDB.Record(val.ID===obj.ID)": { + "Class": "test_class", + "SysID": "record_id", + "Attributes": { + "sys_class_name": "test_class", + "sys_created_on": "2020-11-11 12:24:59", + "name": "Update Name Test", + "sys_id": "record_id", }, - 'InboundRelations': [], - 'OutboundRelations': [] + "InboundRelations": [], + "OutboundRelations": [], } } EXPECTED_ADD_RELATION = { - 'ServiceNowCMDB.Record(val.ID===obj.ID)': { - 'Class': 'test_class', - 'SysID': 'record_id', - 'Attributes': { - 'sys_class_name': 'test_class', - 'sys_created_on': '2020-11-11 12:24:59', - 'name': 'Add Relation Test', - 'sys_id': 'record_id' + "ServiceNowCMDB.Record(val.ID===obj.ID)": { + "Class": "test_class", + "SysID": "record_id", + "Attributes": { + "sys_class_name": "test_class", + "sys_created_on": "2020-11-11 12:24:59", + "name": "Add Relation Test", + "sys_id": "record_id", }, - 'InboundRelations': [{ - 'sys_id': 'inbound_rel', - 'type': { - 'display_value': 'Uses::Used by', - 'link': 'https://test.service-now.com/api/now/table/cmdb_rel_type/cb5592603751200032ff8c00dfbe5d17', - 'value': 'cb5592603751200032ff8c00dfbe5d17' - }, - 'target': { - 'display_value': 'CMS App FLX', - 'link': 'https://test.service-now.com/api/now/cmdb/instance/cmdb_ci' - '/829e953a0ad3370200af63483498b1ea', - 'value': '829e953a0ad3370200af63483498b1ea' + "InboundRelations": [ + { + "sys_id": "inbound_rel", + "type": { + "display_value": "Uses::Used by", + "link": "https://test.service-now.com/api/now/table/cmdb_rel_type/cb5592603751200032ff8c00dfbe5d17", + "value": "cb5592603751200032ff8c00dfbe5d17", + }, + "target": { + "display_value": "CMS App FLX", + "link": "https://test.service-now.com/api/now/cmdb/instance/cmdb_ci" "/829e953a0ad3370200af63483498b1ea", + "value": "829e953a0ad3370200af63483498b1ea", + }, } - }], - 'OutboundRelations': [] + ], + "OutboundRelations": [], } } EXPECTED_DELETE_RELATION = { - 'ServiceNowCMDB.Record(val.ID===obj.ID)': { - 'Class': 'test_class', - 'SysID': 'record_id', - 'Attributes': { - 'sys_class_name': 'test_class', - 'sys_created_on': '2020-11-11 12:24:59', - 'name': 'Delete Relation Test', - 'sys_id': 'record_id' + "ServiceNowCMDB.Record(val.ID===obj.ID)": { + "Class": "test_class", + "SysID": "record_id", + "Attributes": { + "sys_class_name": "test_class", + "sys_created_on": "2020-11-11 12:24:59", + "name": "Delete Relation Test", + "sys_id": "record_id", }, - 'InboundRelations': [], - 'OutboundRelations': [] + "InboundRelations": [], + "OutboundRelations": [], } } diff --git a/Packs/ServiceNow/Integrations/ServiceNow_IAM/ServiceNow_IAM.py b/Packs/ServiceNow/Integrations/ServiceNow_IAM/ServiceNow_IAM.py index 2213f7b88e8f..08e00014469f 100644 --- a/Packs/ServiceNow/Integrations/ServiceNow_IAM/ServiceNow_IAM.py +++ b/Packs/ServiceNow/Integrations/ServiceNow_IAM/ServiceNow_IAM.py @@ -1,19 +1,18 @@ -import demistomock as demisto # noqa: F401 -from CommonServerPython import * # noqa: F401 # noqa: F401 # noqa: F401 # noqa: F401 # noqa: F401 - - import traceback + +import demistomock as demisto # noqa: F401 import urllib3 +from CommonServerPython import * # noqa: F401 # Disable insecure warnings urllib3.disable_warnings() -IAM_GET_USER_ATTRIBUTES = ['id', 'user_name', 'email'] -'''CLIENT CLASS''' +IAM_GET_USER_ATTRIBUTES = ["id", "user_name", "email"] +"""CLIENT CLASS""" class Client(BaseClient): @@ -22,73 +21,56 @@ class Client(BaseClient): """ def test(self): - uri = '/table/sys_user?sysparm_limit=1' - self._http_request(method='GET', url_suffix=uri) + uri = "/table/sys_user?sysparm_limit=1" + self._http_request(method="GET", url_suffix=uri) def get_user(self, filter_name: str, filter_value: str): - uri = 'table/sys_user' - query_params = { - filter_name: filter_value - } - - res = self._http_request( - method='GET', - url_suffix=uri, - params=query_params - ) + uri = "table/sys_user" + query_params = {filter_name: filter_value} + + res = self._http_request(method="GET", url_suffix=uri, params=query_params) - if res and len(res.get('result', [])) > 0: - return res.get('result')[0] + if res and len(res.get("result", [])) > 0: + return res.get("result")[0] return None def create_user(self, user_data): - uri = 'table/sys_user' - res = self._http_request( - method='POST', - url_suffix=uri, - json_data=user_data - ) - return res.get('result') + uri = "table/sys_user" + res = self._http_request(method="POST", url_suffix=uri, json_data=user_data) + return res.get("result") def update_user(self, user_id, user_data): - uri = f'/table/sys_user/{user_id}' - res = self._http_request( - method='PATCH', - url_suffix=uri, - json_data=user_data - ) - return res.get('result') + uri = f"/table/sys_user/{user_id}" + res = self._http_request(method="PATCH", url_suffix=uri, json_data=user_data) + return res.get("result") def get_service_now_fields(self): service_now_fields = {} - uri = 'table/sys_dictionary?sysparm_query=name=sys_user' - res = self._http_request( - method='GET', - url_suffix=uri - ) + uri = "table/sys_dictionary?sysparm_query=name=sys_user" + res = self._http_request(method="GET", url_suffix=uri) - elements = res.get('result', []) + elements = res.get("result", []) for elem in elements: - if elem.get('element'): - field_name = elem.get('element') - description = elem.get('sys_name') + if elem.get("element"): + field_name = elem.get("element") + description = elem.get("sys_name") service_now_fields[field_name] = description return service_now_fields -'''HELPER FUNCTIONS''' +"""HELPER FUNCTIONS""" def handle_exception(user_profile, e, action): - """ Handles failed responses from ServiceNow API by setting the User Profile object with the results. + """Handles failed responses from ServiceNow API by setting the User Profile object with the results. Args: user_profile (IAMUserProfile): The User Profile object. e (Exception): The exception error. If DemistoException, holds the response json. action (IAMActions): An enum represents the current action (get, update, create, etc). """ - if e.__class__ is DemistoException and hasattr(e, 'res') and e.res is not None: + if e.__class__ is DemistoException and hasattr(e, "res") and e.res is not None: error_code = e.res.status_code try: resp = e.res.json() @@ -96,19 +78,16 @@ def handle_exception(user_profile, e, action): except ValueError: error_message = str(e) else: - error_code = '' + error_code = "" error_message = str(e) - user_profile.set_result(action=action, - success=False, - error_code=error_code, - error_message=error_message) + user_profile.set_result(action=action, success=False, error_code=error_code, error_message=error_message) demisto.error(traceback.format_exc()) def get_error_details(res): - """ Parses the error details retrieved from ServiceNow and outputs the resulted string. + """Parses the error details retrieved from ServiceNow and outputs the resulted string. Args: res (dict): The data retrieved from ServiceNow. @@ -116,17 +95,17 @@ def get_error_details(res): Returns: (str) The parsed error details. """ - message = res.get('error', {}).get('message') - details = res.get('error', {}).get('detail') - return f'{message}: {details}' + message = res.get("error", {}).get("message") + details = res.get("error", {}).get("detail") + return f"{message}: {details}" -'''COMMAND FUNCTIONS''' +"""COMMAND FUNCTIONS""" def test_module(client): client.test() - return_results('ok') + return_results("ok") def get_mapping_fields_command(client): @@ -140,28 +119,26 @@ def get_mapping_fields_command(client): def get_user_command(client, args, mapper_in, mapper_out): - user_profile = IAMUserProfile(user_profile=args.get('user-profile'), mapper=mapper_out, - incident_type=IAMUserProfile.UPDATE_INCIDENT_TYPE) + user_profile = IAMUserProfile( + user_profile=args.get("user-profile"), mapper=mapper_out, incident_type=IAMUserProfile.UPDATE_INCIDENT_TYPE + ) try: iam_attr, iam_attr_value = user_profile.get_first_available_iam_user_attr(IAM_GET_USER_ATTRIBUTES) - service_now_filter_name: str = 'sys_id' if iam_attr == 'id' else iam_attr + service_now_filter_name: str = "sys_id" if iam_attr == "id" else iam_attr service_now_user = client.get_user(service_now_filter_name, iam_attr_value) if not service_now_user: error_code, error_message = IAMErrors.USER_DOES_NOT_EXIST - user_profile.set_result(action=IAMActions.GET_USER, - success=False, - error_code=error_code, - error_message=error_message) + user_profile.set_result(action=IAMActions.GET_USER, success=False, error_code=error_code, error_message=error_message) else: user_profile.update_with_app_data(service_now_user, mapper_in) user_profile.set_result( action=IAMActions.GET_USER, success=True, - active=service_now_user.get('active') == 'true', - iden=service_now_user.get('sys_id'), - email=service_now_user.get('email'), - username=service_now_user.get('user_name'), - details=service_now_user + active=service_now_user.get("active") == "true", + iden=service_now_user.get("sys_id"), + email=service_now_user.get("email"), + username=service_now_user.get("user_name"), + details=service_now_user, ) except Exception as e: @@ -171,37 +148,34 @@ def get_user_command(client, args, mapper_in, mapper_out): def disable_user_command(client, args, is_command_enabled, mapper_out): - user_profile = IAMUserProfile(user_profile=args.get('user-profile'), mapper=mapper_out, - incident_type=IAMUserProfile.UPDATE_INCIDENT_TYPE) + user_profile = IAMUserProfile( + user_profile=args.get("user-profile"), mapper=mapper_out, incident_type=IAMUserProfile.UPDATE_INCIDENT_TYPE + ) if not is_command_enabled: - user_profile.set_result(action=IAMActions.DISABLE_USER, - skip=True, - skip_reason='Command is disabled.') + user_profile.set_result(action=IAMActions.DISABLE_USER, skip=True, skip_reason="Command is disabled.") else: try: iam_attr, iam_attr_value = user_profile.get_first_available_iam_user_attr(IAM_GET_USER_ATTRIBUTES) - service_now_filter_name: str = 'sys_id' if iam_attr == 'id' else iam_attr + service_now_filter_name: str = "sys_id" if iam_attr == "id" else iam_attr service_now_user = client.get_user(service_now_filter_name, iam_attr_value) if not service_now_user: _, error_message = IAMErrors.USER_DOES_NOT_EXIST - user_profile.set_result(action=IAMActions.DISABLE_USER, - skip=True, - skip_reason=error_message) + user_profile.set_result(action=IAMActions.DISABLE_USER, skip=True, skip_reason=error_message) else: - if service_now_user.get('active', 'true') == 'false': + if service_now_user.get("active", "true") == "false": user_profile.set_user_is_already_disabled(service_now_user) else: - user_id = service_now_user.get('sys_id') - user_data = {'active': False} + user_id = service_now_user.get("sys_id") + user_data = {"active": False} updated_user = client.update_user(user_id, user_data) user_profile.set_result( action=IAMActions.DISABLE_USER, success=True, active=False, - iden=updated_user.get('sys_id'), - email=updated_user.get('email'), - username=updated_user.get('user_name'), - details=updated_user + iden=updated_user.get("sys_id"), + email=updated_user.get("email"), + username=updated_user.get("user_name"), + details=updated_user, ) except Exception as e: @@ -211,35 +185,32 @@ def disable_user_command(client, args, is_command_enabled, mapper_out): def create_user_command(client, args, mapper_out, is_command_enabled, is_update_enabled, is_enable_enabled): - user_profile = IAMUserProfile(user_profile=args.get('user-profile'), mapper=mapper_out, - incident_type=IAMUserProfile.CREATE_INCIDENT_TYPE) + user_profile = IAMUserProfile( + user_profile=args.get("user-profile"), mapper=mapper_out, incident_type=IAMUserProfile.CREATE_INCIDENT_TYPE + ) if not is_command_enabled: - user_profile.set_result(action=IAMActions.CREATE_USER, - skip=True, - skip_reason='Command is disabled.') + user_profile.set_result(action=IAMActions.CREATE_USER, skip=True, skip_reason="Command is disabled.") else: try: iam_attr, iam_attr_value = user_profile.get_first_available_iam_user_attr(IAM_GET_USER_ATTRIBUTES) - service_now_filter_name: str = 'sys_id' if iam_attr == 'id' else iam_attr + service_now_filter_name: str = "sys_id" if iam_attr == "id" else iam_attr service_now_user = client.get_user(service_now_filter_name, iam_attr_value) if service_now_user: # if user exists, update it - user_profile = update_user_command(client, args, mapper_out, is_update_enabled, - is_enable_enabled, False, False) + user_profile = update_user_command(client, args, mapper_out, is_update_enabled, is_enable_enabled, False, False) else: - service_now_profile = user_profile.map_object(mapper_out, - incident_type=IAMUserProfile.CREATE_INCIDENT_TYPE) + service_now_profile = user_profile.map_object(mapper_out, incident_type=IAMUserProfile.CREATE_INCIDENT_TYPE) created_user = client.create_user(service_now_profile) user_profile.set_result( action=IAMActions.CREATE_USER, success=True, - active=created_user.get('active') == 'true', - iden=created_user.get('sys_id'), - email=created_user.get('email'), - username=created_user.get('user_name'), - details=created_user + active=created_user.get("active") == "true", + iden=created_user.get("sys_id"), + email=created_user.get("email"), + username=created_user.get("user_name"), + details=created_user, ) except Exception as e: @@ -248,48 +219,44 @@ def create_user_command(client, args, mapper_out, is_command_enabled, is_update_ return user_profile -def update_user_command(client, args, mapper_out, is_command_enabled, is_enable_enabled, - is_create_user_enabled, create_if_not_exists): - user_profile = IAMUserProfile(user_profile=args.get('user-profile'), mapper=mapper_out, - incident_type=IAMUserProfile.UPDATE_INCIDENT_TYPE) - allow_enable = args.get('allow-enable') == 'true' +def update_user_command( + client, args, mapper_out, is_command_enabled, is_enable_enabled, is_create_user_enabled, create_if_not_exists +): + user_profile = IAMUserProfile( + user_profile=args.get("user-profile"), mapper=mapper_out, incident_type=IAMUserProfile.UPDATE_INCIDENT_TYPE + ) + allow_enable = args.get("allow-enable") == "true" if not is_command_enabled: - user_profile.set_result(action=IAMActions.UPDATE_USER, - skip=True, - skip_reason='Command is disabled.') + user_profile.set_result(action=IAMActions.UPDATE_USER, skip=True, skip_reason="Command is disabled.") else: try: iam_attr, iam_attr_value = user_profile.get_first_available_iam_user_attr(IAM_GET_USER_ATTRIBUTES) - service_now_filter_name: str = 'sys_id' if iam_attr == 'id' else iam_attr + service_now_filter_name: str = "sys_id" if iam_attr == "id" else iam_attr service_now_user = client.get_user(service_now_filter_name, iam_attr_value) if service_now_user: - user_id = service_now_user.get('sys_id') - service_now_profile = user_profile.map_object(mapper_out, - incident_type=IAMUserProfile.UPDATE_INCIDENT_TYPE) + user_id = service_now_user.get("sys_id") + service_now_profile = user_profile.map_object(mapper_out, incident_type=IAMUserProfile.UPDATE_INCIDENT_TYPE) if allow_enable and is_enable_enabled: - service_now_profile['active'] = True - service_now_profile['locked_out'] = False + service_now_profile["active"] = True + service_now_profile["locked_out"] = False updated_user = client.update_user(user_id, service_now_profile) user_profile.set_result( action=IAMActions.UPDATE_USER, success=True, - active=updated_user.get('active') == 'true', - iden=updated_user.get('sys_id'), - email=updated_user.get('email'), - username=updated_user.get('user_name'), - details=updated_user + active=updated_user.get("active") == "true", + iden=updated_user.get("sys_id"), + email=updated_user.get("email"), + username=updated_user.get("user_name"), + details=updated_user, ) else: if create_if_not_exists: - user_profile = create_user_command(client, args, mapper_out, is_create_user_enabled, - False, False) + user_profile = create_user_command(client, args, mapper_out, is_create_user_enabled, False, False) else: _, error_message = IAMErrors.USER_DOES_NOT_EXIST - user_profile.set_result(action=IAMActions.UPDATE_USER, - skip=True, - skip_reason=error_message) + user_profile.set_result(action=IAMActions.UPDATE_USER, skip=True, skip_reason=error_message) except Exception as e: handle_exception(user_profile, e, IAMActions.UPDATE_USER) @@ -300,16 +267,16 @@ def update_user_command(client, args, mapper_out, is_command_enabled, is_enable_ def main(): user_profile = None params = demisto.params() - api_version = params.get('api_version', '') - base_url = urljoin(params['url'].strip('/'), '/api/now/') + api_version = params.get("api_version", "") + base_url = urljoin(params["url"].strip("/"), "/api/now/") if api_version: base_url += api_version - username = params.get('credentials', {}).get('identifier') - password = params.get('credentials', {}).get('password') - mapper_in = params.get('mapper_in') - mapper_out = params.get('mapper_out') - verify_certificate = not params.get('insecure', False) - proxy = params.get('proxy', False) + username = params.get("credentials", {}).get("identifier") + password = params.get("credentials", {}).get("password") + mapper_in = params.get("mapper_in") + mapper_out = params.get("mapper_out") + verify_certificate = not params.get("insecure", False) + proxy = params.get("proxy", False) command = demisto.command() args = demisto.args() @@ -319,52 +286,44 @@ def main(): is_update_enabled = demisto.params().get("update_user_enabled") create_if_not_exists = demisto.params().get("create_if_not_exists") - headers = { - 'Content-Type': 'application/json', - 'Accept': 'application/json' - } + headers = {"Content-Type": "application/json", "Accept": "application/json"} client = Client( - base_url=base_url, - verify=verify_certificate, - proxy=proxy, - headers=headers, - ok_codes=(200, 201), - auth=(username, password) + base_url=base_url, verify=verify_certificate, proxy=proxy, headers=headers, ok_codes=(200, 201), auth=(username, password) ) - demisto.debug(f'Command being called is {command}') + demisto.debug(f"Command being called is {command}") - if command == 'iam-get-user': + if command == "iam-get-user": user_profile = get_user_command(client, args, mapper_in, mapper_out) - elif command == 'iam-create-user': - user_profile = create_user_command(client, args, mapper_out, is_create_enabled, is_update_enabled, - is_enable_enabled) + elif command == "iam-create-user": + user_profile = create_user_command(client, args, mapper_out, is_create_enabled, is_update_enabled, is_enable_enabled) - elif command == 'iam-update-user': - user_profile = update_user_command(client, args, mapper_out, is_update_enabled, is_enable_enabled, - is_create_enabled, create_if_not_exists) + elif command == "iam-update-user": + user_profile = update_user_command( + client, args, mapper_out, is_update_enabled, is_enable_enabled, is_create_enabled, create_if_not_exists + ) - elif command == 'iam-disable-user': + elif command == "iam-disable-user": user_profile = disable_user_command(client, args, is_disable_enabled, mapper_out) if user_profile: return_results(user_profile) try: - if command == 'test-module': + if command == "test-module": test_module(client) - elif command == 'get-mapping-fields': + elif command == "get-mapping-fields": return_results(get_mapping_fields_command(client)) except Exception as e: # For any other integration command exception, return an error - return_error(f'Failed to execute {command} command. Error: {str(e)}') + return_error(f"Failed to execute {command} command. Error: {e!s}") from IAMApiModule import * # noqa E402 -if __name__ in ('__main__', '__builtin__', 'builtins'): +if __name__ in ("__main__", "__builtin__", "builtins"): main() diff --git a/Packs/ServiceNow/Integrations/ServiceNow_IAM/ServiceNow_IAM_test.py b/Packs/ServiceNow/Integrations/ServiceNow_IAM/ServiceNow_IAM_test.py index abb0c649fe21..6b19fce9b8bd 100644 --- a/Packs/ServiceNow/Integrations/ServiceNow_IAM/ServiceNow_IAM_test.py +++ b/Packs/ServiceNow/Integrations/ServiceNow_IAM/ServiceNow_IAM_test.py @@ -1,8 +1,14 @@ -from requests import Response, Session -from ServiceNow_IAM import Client, get_user_command, create_user_command, update_user_command, \ - disable_user_command, get_mapping_fields_command -from IAMApiModule import * import pytest +from IAMApiModule import * +from requests import Response, Session +from ServiceNow_IAM import ( + Client, + create_user_command, + disable_user_command, + get_mapping_fields_command, + get_user_command, + update_user_command, +) SERVICENOW_USER_OUTPUT = { "sys_id": "mock_id", @@ -10,7 +16,7 @@ "first_name": "mock_first_name", "last_name": "mock_last_name", "active": "true", - "email": "testdemisto2@paloaltonetworks.com" + "email": "testdemisto2@paloaltonetworks.com", } SERVICENOW_DISABLED_USER_OUTPUT = { @@ -19,10 +25,10 @@ "first_name": "mock_first_name", "last_name": "mock_last_name", "active": "false", - "email": "testdemisto2@paloaltonetworks.com" + "email": "testdemisto2@paloaltonetworks.com", } -BASE_URL = 'https://test.com' +BASE_URL = "https://test.com" def mock_client(): @@ -32,18 +38,27 @@ def mock_client(): def get_outputs_from_user_profile(user_profile): entry_context = user_profile.to_entry() - outputs = entry_context.get('Contents') + outputs = entry_context.get("Contents") return outputs -@pytest.mark.parametrize('args, mock_url', [({'user-profile': {'email': 'testdemisto2@paloaltonetworks.com'}}, - f'{BASE_URL}/table/sys_user?email=testdemisto2@paloaltonetworks.com'), - ({'user-profile': {'email': 'testdemisto2@paloaltonetworks.com', - 'user_name': 'mock_user_name'}}, - f'{BASE_URL}/table/sys_user?user_name=mock_user_name'), - ({'user-profile': {'email': 'testdemisto2@paloaltonetworks.com', - 'id': 'mock_id', 'user_name': 'mock_user_name'}}, - f'{BASE_URL}/table/sys_user?sys_id=mock_id')]) +@pytest.mark.parametrize( + "args, mock_url", + [ + ( + {"user-profile": {"email": "testdemisto2@paloaltonetworks.com"}}, + f"{BASE_URL}/table/sys_user?email=testdemisto2@paloaltonetworks.com", + ), + ( + {"user-profile": {"email": "testdemisto2@paloaltonetworks.com", "user_name": "mock_user_name"}}, + f"{BASE_URL}/table/sys_user?user_name=mock_user_name", + ), + ( + {"user-profile": {"email": "testdemisto2@paloaltonetworks.com", "id": "mock_id", "user_name": "mock_user_name"}}, + f"{BASE_URL}/table/sys_user?sys_id=mock_id", + ), + ], +) def test_get_user_command__existing_user(mocker, args, mock_url, requests_mock): """ Given: @@ -64,22 +79,19 @@ def test_get_user_command__existing_user(mocker, args, mock_url, requests_mock): Case c: Mocked URL querying by ID is called. """ client = mock_client() - requests_mock.get( - mock_url, - json={'result': [SERVICENOW_USER_OUTPUT]} - ) - mocker.patch.object(IAMUserProfile, 'update_with_app_data', return_value={}) + requests_mock.get(mock_url, json={"result": [SERVICENOW_USER_OUTPUT]}) + mocker.patch.object(IAMUserProfile, "update_with_app_data", return_value={}) - user_profile = get_user_command(client, args, 'mapper_in', 'mapper_out') + user_profile = get_user_command(client, args, "mapper_in", "mapper_out") outputs = get_outputs_from_user_profile(user_profile) - assert outputs.get('action') == IAMActions.GET_USER - assert outputs.get('success') is True - assert outputs.get('active') is True - assert outputs.get('id') == 'mock_id' - assert outputs.get('username') == 'mock_user_name' - assert outputs.get('details', {}).get('first_name') == 'mock_first_name' - assert outputs.get('details', {}).get('last_name') == 'mock_last_name' + assert outputs.get("action") == IAMActions.GET_USER + assert outputs.get("success") is True + assert outputs.get("active") is True + assert outputs.get("id") == "mock_id" + assert outputs.get("username") == "mock_user_name" + assert outputs.get("details", {}).get("first_name") == "mock_first_name" + assert outputs.get("details", {}).get("last_name") == "mock_last_name" def test_get_user_command__non_existing_user(mocker): @@ -94,17 +106,17 @@ def test_get_user_command__non_existing_user(mocker): - Ensure the resulted User Profile object holds information about an unsuccessful result. """ client = mock_client() - args = {'user-profile': {'email': 'testdemisto2@paloaltonetworks.com'}} + args = {"user-profile": {"email": "testdemisto2@paloaltonetworks.com"}} - mocker.patch.object(client, 'get_user', return_value=None) + mocker.patch.object(client, "get_user", return_value=None) - user_profile = get_user_command(client, args, 'mapper_in', 'mapper_out') + user_profile = get_user_command(client, args, "mapper_in", "mapper_out") outputs = get_outputs_from_user_profile(user_profile) - assert outputs.get('action') == IAMActions.GET_USER - assert outputs.get('success') is False - assert outputs.get('errorCode') == IAMErrors.USER_DOES_NOT_EXIST[0] - assert outputs.get('errorMessage') == IAMErrors.USER_DOES_NOT_EXIST[1] + assert outputs.get("action") == IAMActions.GET_USER + assert outputs.get("success") is False + assert outputs.get("errorCode") == IAMErrors.USER_DOES_NOT_EXIST[0] + assert outputs.get("errorMessage") == IAMErrors.USER_DOES_NOT_EXIST[1] def test_get_user_command__bad_response(mocker): @@ -121,22 +133,22 @@ def test_get_user_command__bad_response(mocker): import demistomock as demisto client = mock_client() - args = {'user-profile': {'email': 'testdemisto2@paloaltonetworks.com'}} + args = {"user-profile": {"email": "testdemisto2@paloaltonetworks.com"}} bad_response = Response() bad_response.status_code = 500 bad_response._content = b'{"error": {"detail": "details", "message": "message"}}' - mocker.patch.object(demisto, 'error') - mocker.patch.object(Session, 'request', return_value=bad_response) + mocker.patch.object(demisto, "error") + mocker.patch.object(Session, "request", return_value=bad_response) - user_profile = get_user_command(client, args, 'mapper_in', 'mapper_out') + user_profile = get_user_command(client, args, "mapper_in", "mapper_out") outputs = get_outputs_from_user_profile(user_profile) - assert outputs.get('action') == IAMActions.GET_USER - assert outputs.get('success') is False - assert outputs.get('errorCode') == 500 - assert outputs.get('errorMessage') == 'message: details' + assert outputs.get("action") == IAMActions.GET_USER + assert outputs.get("success") is False + assert outputs.get("errorCode") == 500 + assert outputs.get("errorMessage") == "message: details" def test_create_user_command__success(mocker): @@ -150,22 +162,23 @@ def test_create_user_command__success(mocker): - Ensure a User Profile object with the user data is returned """ client = mock_client() - args = {'user-profile': {'email': 'testdemisto2@paloaltonetworks.com'}} + args = {"user-profile": {"email": "testdemisto2@paloaltonetworks.com"}} - mocker.patch.object(client, 'get_user', return_value=None) - mocker.patch.object(client, 'create_user', return_value=SERVICENOW_USER_OUTPUT) + mocker.patch.object(client, "get_user", return_value=None) + mocker.patch.object(client, "create_user", return_value=SERVICENOW_USER_OUTPUT) - user_profile = create_user_command(client, args, 'mapper_out', is_command_enabled=True, - is_update_enabled=False, is_enable_enabled=False) + user_profile = create_user_command( + client, args, "mapper_out", is_command_enabled=True, is_update_enabled=False, is_enable_enabled=False + ) outputs = get_outputs_from_user_profile(user_profile) - assert outputs.get('action') == IAMActions.CREATE_USER - assert outputs.get('success') is True - assert outputs.get('active') is True - assert outputs.get('id') == 'mock_id' - assert outputs.get('username') == 'mock_user_name' - assert outputs.get('details', {}).get('first_name') == 'mock_first_name' - assert outputs.get('details', {}).get('last_name') == 'mock_last_name' + assert outputs.get("action") == IAMActions.CREATE_USER + assert outputs.get("success") is True + assert outputs.get("active") is True + assert outputs.get("id") == "mock_id" + assert outputs.get("username") == "mock_user_name" + assert outputs.get("details", {}).get("first_name") == "mock_first_name" + assert outputs.get("details", {}).get("last_name") == "mock_last_name" def test_create_user_command__user_already_exists(mocker): @@ -181,22 +194,23 @@ def test_create_user_command__user_already_exists(mocker): - Ensure the command is considered successful and the user is still disabled """ client = mock_client() - args = {'user-profile': {'email': 'testdemisto2@paloaltonetworks.com'}, 'allow-enable': 'false'} + args = {"user-profile": {"email": "testdemisto2@paloaltonetworks.com"}, "allow-enable": "false"} - mocker.patch.object(client, 'get_user', return_value=SERVICENOW_DISABLED_USER_OUTPUT) - mocker.patch.object(client, 'update_user', return_value=SERVICENOW_DISABLED_USER_OUTPUT) + mocker.patch.object(client, "get_user", return_value=SERVICENOW_DISABLED_USER_OUTPUT) + mocker.patch.object(client, "update_user", return_value=SERVICENOW_DISABLED_USER_OUTPUT) - user_profile = create_user_command(client, args, 'mapper_out', is_command_enabled=True, - is_update_enabled=True, is_enable_enabled=True) + user_profile = create_user_command( + client, args, "mapper_out", is_command_enabled=True, is_update_enabled=True, is_enable_enabled=True + ) outputs = get_outputs_from_user_profile(user_profile) - assert outputs.get('action') == IAMActions.UPDATE_USER - assert outputs.get('success') is True - assert outputs.get('active') is False - assert outputs.get('id') == 'mock_id' - assert outputs.get('username') == 'mock_user_name' - assert outputs.get('details', {}).get('first_name') == 'mock_first_name' - assert outputs.get('details', {}).get('last_name') == 'mock_last_name' + assert outputs.get("action") == IAMActions.UPDATE_USER + assert outputs.get("success") is True + assert outputs.get("active") is False + assert outputs.get("id") == "mock_id" + assert outputs.get("username") == "mock_user_name" + assert outputs.get("details", {}).get("first_name") == "mock_first_name" + assert outputs.get("details", {}).get("last_name") == "mock_last_name" def test_update_user_command__non_existing_user(mocker): @@ -214,22 +228,29 @@ def test_update_user_command__non_existing_user(mocker): - Ensure a User Profile object with the user data is returned """ client = mock_client() - args = {'user-profile': {'email': 'testdemisto2@paloaltonetworks.com', 'givenname': 'mock_first_name'}} - - mocker.patch.object(client, 'get_user', return_value=None) - mocker.patch.object(client, 'create_user', return_value=SERVICENOW_USER_OUTPUT) - - user_profile = update_user_command(client, args, 'mapper_out', is_command_enabled=True, is_enable_enabled=False, - is_create_user_enabled=True, create_if_not_exists=True) + args = {"user-profile": {"email": "testdemisto2@paloaltonetworks.com", "givenname": "mock_first_name"}} + + mocker.patch.object(client, "get_user", return_value=None) + mocker.patch.object(client, "create_user", return_value=SERVICENOW_USER_OUTPUT) + + user_profile = update_user_command( + client, + args, + "mapper_out", + is_command_enabled=True, + is_enable_enabled=False, + is_create_user_enabled=True, + create_if_not_exists=True, + ) outputs = get_outputs_from_user_profile(user_profile) - assert outputs.get('action') == IAMActions.CREATE_USER - assert outputs.get('success') is True - assert outputs.get('active') is True - assert outputs.get('id') == 'mock_id' - assert outputs.get('username') == 'mock_user_name' - assert outputs.get('details', {}).get('first_name') == 'mock_first_name' - assert outputs.get('details', {}).get('last_name') == 'mock_last_name' + assert outputs.get("action") == IAMActions.CREATE_USER + assert outputs.get("success") is True + assert outputs.get("active") is True + assert outputs.get("id") == "mock_id" + assert outputs.get("username") == "mock_user_name" + assert outputs.get("details", {}).get("first_name") == "mock_first_name" + assert outputs.get("details", {}).get("last_name") == "mock_last_name" def test_update_user_command__command_is_disabled(mocker): @@ -244,20 +265,27 @@ def test_update_user_command__command_is_disabled(mocker): - Ensure the command is considered successful and skipped """ client = mock_client() - args = {'user-profile': {'email': 'testdemisto2@paloaltonetworks.com', 'givenname': 'mock_first_name'}} - - mocker.patch.object(client, 'get_user', return_value=None) - mocker.patch.object(IAMUserProfile, 'map_object', return_value={}) - mocker.patch.object(client, 'update_user', return_value=SERVICENOW_USER_OUTPUT) - - user_profile = update_user_command(client, args, 'mapper_out', is_command_enabled=False, is_enable_enabled=False, - is_create_user_enabled=False, create_if_not_exists=False) + args = {"user-profile": {"email": "testdemisto2@paloaltonetworks.com", "givenname": "mock_first_name"}} + + mocker.patch.object(client, "get_user", return_value=None) + mocker.patch.object(IAMUserProfile, "map_object", return_value={}) + mocker.patch.object(client, "update_user", return_value=SERVICENOW_USER_OUTPUT) + + user_profile = update_user_command( + client, + args, + "mapper_out", + is_command_enabled=False, + is_enable_enabled=False, + is_create_user_enabled=False, + create_if_not_exists=False, + ) outputs = get_outputs_from_user_profile(user_profile) - assert outputs.get('action') == IAMActions.UPDATE_USER - assert outputs.get('success') is True - assert outputs.get('skipped') is True - assert outputs.get('reason') == 'Command is disabled.' + assert outputs.get("action") == IAMActions.UPDATE_USER + assert outputs.get("success") is True + assert outputs.get("skipped") is True + assert outputs.get("reason") == "Command is disabled." def test_update_user_command__allow_enable(mocker): @@ -273,23 +301,32 @@ def test_update_user_command__allow_enable(mocker): - Ensure the user is enabled at the end of the command execution. """ client = mock_client() - args = {'user-profile': {'email': 'testdemisto2@paloaltonetworks.com', 'givenname': 'mock_first_name'}, - 'allow-enable': 'true'} - - mocker.patch.object(client, 'get_user', return_value=SERVICENOW_DISABLED_USER_OUTPUT) - mocker.patch.object(client, 'update_user', return_value=SERVICENOW_USER_OUTPUT) - - user_profile = update_user_command(client, args, 'mapper_out', is_command_enabled=True, is_enable_enabled=True, - is_create_user_enabled=False, create_if_not_exists=False) + args = { + "user-profile": {"email": "testdemisto2@paloaltonetworks.com", "givenname": "mock_first_name"}, + "allow-enable": "true", + } + + mocker.patch.object(client, "get_user", return_value=SERVICENOW_DISABLED_USER_OUTPUT) + mocker.patch.object(client, "update_user", return_value=SERVICENOW_USER_OUTPUT) + + user_profile = update_user_command( + client, + args, + "mapper_out", + is_command_enabled=True, + is_enable_enabled=True, + is_create_user_enabled=False, + create_if_not_exists=False, + ) outputs = get_outputs_from_user_profile(user_profile) - assert outputs.get('action') == IAMActions.UPDATE_USER - assert outputs.get('success') is True - assert outputs.get('active') is True - assert outputs.get('id') == 'mock_id' - assert outputs.get('username') == 'mock_user_name' - assert outputs.get('details', {}).get('first_name') == 'mock_first_name' - assert outputs.get('details', {}).get('last_name') == 'mock_last_name' + assert outputs.get("action") == IAMActions.UPDATE_USER + assert outputs.get("success") is True + assert outputs.get("active") is True + assert outputs.get("id") == "mock_id" + assert outputs.get("username") == "mock_user_name" + assert outputs.get("details", {}).get("first_name") == "mock_first_name" + assert outputs.get("details", {}).get("last_name") == "mock_last_name" def test_disable_user_command__non_existing_user(mocker): @@ -305,17 +342,17 @@ def test_disable_user_command__non_existing_user(mocker): - Ensure the command is considered successful and skipped """ client = mock_client() - args = {'user-profile': {'email': 'testdemisto2@paloaltonetworks.com'}} + args = {"user-profile": {"email": "testdemisto2@paloaltonetworks.com"}} - mocker.patch.object(client, 'get_user', return_value=None) + mocker.patch.object(client, "get_user", return_value=None) - user_profile = disable_user_command(client, args, is_command_enabled=True, mapper_out='mapper_out') + user_profile = disable_user_command(client, args, is_command_enabled=True, mapper_out="mapper_out") outputs = get_outputs_from_user_profile(user_profile) - assert outputs.get('action') == IAMActions.DISABLE_USER - assert outputs.get('success') is True - assert outputs.get('skipped') is True - assert outputs.get('reason') == IAMErrors.USER_DOES_NOT_EXIST[1] + assert outputs.get("action") == IAMActions.DISABLE_USER + assert outputs.get("success") is True + assert outputs.get("skipped") is True + assert outputs.get("reason") == IAMErrors.USER_DOES_NOT_EXIST[1] def test_get_mapping_fields_command(mocker): @@ -329,10 +366,10 @@ def test_get_mapping_fields_command(mocker): - Ensure a GetMappingFieldsResponse object that contains the ServiceNow fields is returned """ client = mock_client() - mocker.patch.object(client, 'get_service_now_fields', return_value={'field1': 'desc1', 'field2': 'desc2'}) + mocker.patch.object(client, "get_service_now_fields", return_value={"field1": "desc1", "field2": "desc2"}) mapping_response = get_mapping_fields_command(client) mapping = mapping_response.extract_mapping() - assert mapping.get(IAMUserProfile.DEFAULT_INCIDENT_TYPE, {}).get('field1') == 'desc1' - assert mapping.get(IAMUserProfile.DEFAULT_INCIDENT_TYPE, {}).get('field2') == 'desc2' + assert mapping.get(IAMUserProfile.DEFAULT_INCIDENT_TYPE, {}).get("field1") == "desc1" + assert mapping.get(IAMUserProfile.DEFAULT_INCIDENT_TYPE, {}).get("field2") == "desc2" diff --git a/Packs/ServiceNow/Integrations/ServiceNowv2/ServiceNowv2.py b/Packs/ServiceNow/Integrations/ServiceNowv2/ServiceNowv2.py index c8547a4851f5..83c3bbe6aecf 100644 --- a/Packs/ServiceNow/Integrations/ServiceNowv2/ServiceNowv2.py +++ b/Packs/ServiceNow/Integrations/ServiceNowv2/ServiceNowv2.py @@ -1,158 +1,192 @@ -import demistomock as demisto # noqa: F401 -from CommonServerPython import * # noqa: F401 +import mimetypes import re from collections.abc import Callable, Iterable - -import mimetypes +import demistomock as demisto # noqa: F401 # disable insecure warnings import urllib3 +from CommonServerPython import * # noqa: F401 + urllib3.disable_warnings() -DEFAULT_FETCH_TIME = '10 minutes' +DEFAULT_FETCH_TIME = "10 minutes" -INCIDENT = 'incident' -SIR_INCIDENT = 'sn_si_incident' +INCIDENT = "incident" +SIR_INCIDENT = "sn_si_incident" -COMMAND_NOT_IMPLEMENTED_MSG = 'Command not implemented' +COMMAND_NOT_IMPLEMENTED_MSG = "Command not implemented" -DATE_FORMAT = '%Y-%m-%d %H:%M:%S' +DATE_FORMAT = "%Y-%m-%d %H:%M:%S" DATE_FORMAT_OPTIONS = { - 'MM-dd-yyyy': '%m-%d-%Y %H:%M:%S', - 'MM/dd/yyyy': '%m/%d/%Y %H:%M:%S', - 'dd/MM/yyyy': '%d/%m/%Y %H:%M:%S', - 'dd-MM-yyyy': '%d-%m-%Y %H:%M:%S', - 'dd.MM.yyyy': '%d.%m.%Y %H:%M:%S', - 'yyyy-MM-dd': '%Y-%m-%d %H:%M:%S', - 'mmm-dd-yyyy': '%b-%d-%Y %H:%M:%S' + "MM-dd-yyyy": "%m-%d-%Y %H:%M:%S", + "MM/dd/yyyy": "%m/%d/%Y %H:%M:%S", + "dd/MM/yyyy": "%d/%m/%Y %H:%M:%S", + "dd-MM-yyyy": "%d-%m-%Y %H:%M:%S", + "dd.MM.yyyy": "%d.%m.%Y %H:%M:%S", + "yyyy-MM-dd": "%Y-%m-%d %H:%M:%S", + "mmm-dd-yyyy": "%b-%d-%Y %H:%M:%S", } TICKET_STATES = { - 'incident': { - '1': '1 - New', - '2': '2 - In Progress', - '3': '3 - On Hold', - '4': '4 - Awaiting Caller', - '5': '5 - Awaiting Evidence', - '6': '6 - Resolved', - '7': '7 - Closed', - '8': '8 - Canceled' - }, - 'problem': { - '1': '1 - Open', - '2': '2 - Known Error', - '3': '3 - Pending Change', - '4': '4 - Closed/Resolved' + "incident": { + "1": "1 - New", + "2": "2 - In Progress", + "3": "3 - On Hold", + "4": "4 - Awaiting Caller", + "5": "5 - Awaiting Evidence", + "6": "6 - Resolved", + "7": "7 - Closed", + "8": "8 - Canceled", }, - 'change_request': { - '-5': '-5 - New', - '-4': '-4 - Assess', - '-3': '-3 - Authorize', - '-2': '-2 - Scheduled', - '-1': '-1 - Implement', - '0': '0 - Review', - '3': '3 - Closed', - '4': '4 - Canceled' + "problem": {"1": "1 - Open", "2": "2 - Known Error", "3": "3 - Pending Change", "4": "4 - Closed/Resolved"}, + "change_request": { + "-5": "-5 - New", + "-4": "-4 - Assess", + "-3": "-3 - Authorize", + "-2": "-2 - Scheduled", + "-1": "-1 - Implement", + "0": "0 - Review", + "3": "3 - Closed", + "4": "4 - Canceled", }, - 'sc_task': { - '-5': '-5 - Pending', - '1': '1 - Open', - '2': '2 - Work In Progress', - '3': '3 - Closed Complete', - '4': '4 - Closed Incomplete', - '7': '7 - Closed Skipped' + "sc_task": { + "-5": "-5 - Pending", + "1": "1 - Open", + "2": "2 - Work In Progress", + "3": "3 - Closed Complete", + "4": "4 - Closed Incomplete", + "7": "7 - Closed Skipped", }, - 'sc_request': { - '1': '1 - Approved', - '3': '3 - Closed', - '4': '4 - Rejected' - }, - SIR_INCIDENT: { - '3': 'Closed', - '7': 'Cancelled', - '10': 'Draft', - '16': 'Analysis', - '18': 'Contain', - '19': 'Eradicate' - } + "sc_request": {"1": "1 - Approved", "3": "3 - Closed", "4": "4 - Rejected"}, + SIR_INCIDENT: {"3": "Closed", "7": "Cancelled", "10": "Draft", "16": "Analysis", "18": "Contain", "19": "Eradicate"}, } -TICKET_TYPE_TO_CLOSED_STATE = {INCIDENT: '7', - 'problem': '4', - 'change_request': '3', - 'sc_task': '3', - 'sc_request': '3', - 'sc_req_item': '3', - SIR_INCIDENT: '3'} +TICKET_TYPE_TO_CLOSED_STATE = { + INCIDENT: "7", + "problem": "4", + "change_request": "3", + "sc_task": "3", + "sc_request": "3", + "sc_req_item": "3", + SIR_INCIDENT: "3", +} TICKET_APPROVAL = { - 'sc_req_item': { - 'waiting_for_approval': 'Waiting for approval', - 'approved': 'Approved', - 'requested': 'Requested', - 'rejected': 'Rejected', - 'not requested': 'Not Yet Requested' + "sc_req_item": { + "waiting_for_approval": "Waiting for approval", + "approved": "Approved", + "requested": "Requested", + "rejected": "Rejected", + "not requested": "Not Yet Requested", } } -TICKET_PRIORITY = { - '1': '1 - Critical', - '2': '2 - High', - '3': '3 - Moderate', - '4': '4 - Low', - '5': '5 - Planning' -} +TICKET_PRIORITY = {"1": "1 - Critical", "2": "2 - High", "3": "3 - Moderate", "4": "4 - Low", "5": "5 - Planning"} TICKET_IMPACT = { - '1': '1 - Enterprise', - '2': '2 - Region / Market', - '3': '3 - Ministry', - '4': '4 - Department / Function', - '5': '5 - Caregiver' -} - -BUSINESS_IMPACT = { - '1': '1 - Critical', - '2': '2 - High', - '3': '3 - Non-Critical' + "1": "1 - Enterprise", + "2": "2 - Region / Market", + "3": "3 - Ministry", + "4": "4 - Department / Function", + "5": "5 - Caregiver", } -SNOW_ARGS = ['active', 'activity_due', 'opened_at', 'short_description', 'additional_assignee_list', 'approval_history', - 'approval', 'approval_set', 'assigned_to', 'assignment_group', - 'business_duration', 'business_service', 'business_stc', 'change_type', 'category', 'caller', - 'calendar_duration', 'calendar_stc', 'caller_id', 'caused_by', 'close_code', 'close_notes', - 'closed_at', 'closed_by', 'cmdb_ci', 'comments', 'comments_and_work_notes', 'company', 'contact_type', - 'correlation_display', 'correlation_id', 'delivery_plan', 'delivery_task', 'description', 'due_date', - 'expected_start', 'follow_up', 'group_list', 'hold_reason', 'impact', 'incident_state', - 'knowledge', 'location', 'made_sla', 'notify', 'order', 'parent', 'parent_incident', 'priority', - 'problem_id', 'reassignment_count', 'reopen_count', 'resolved_at', 'resolved_by', 'rfc', - 'severity', 'sla_due', 'state', 'subcategory', 'sys_tags', 'sys_updated_by', 'sys_updated_on', - 'time_worked', 'title', 'type', 'urgency', 'user_input', 'watch_list', 'work_end', 'work_notes', - 'work_notes_list', 'work_start', 'business_criticality', 'risk_score'] - -SIR_OUT_FIELDS = ['attack_vector', 'affected_user', 'change_request', 'incident', 'parent_security_incident', - 'substate'] +BUSINESS_IMPACT = {"1": "1 - Critical", "2": "2 - High", "3": "3 - Non-Critical"} + +SNOW_ARGS = [ + "active", + "activity_due", + "opened_at", + "short_description", + "additional_assignee_list", + "approval_history", + "approval", + "approval_set", + "assigned_to", + "assignment_group", + "business_duration", + "business_service", + "business_stc", + "change_type", + "category", + "caller", + "calendar_duration", + "calendar_stc", + "caller_id", + "caused_by", + "close_code", + "close_notes", + "closed_at", + "closed_by", + "cmdb_ci", + "comments", + "comments_and_work_notes", + "company", + "contact_type", + "correlation_display", + "correlation_id", + "delivery_plan", + "delivery_task", + "description", + "due_date", + "expected_start", + "follow_up", + "group_list", + "hold_reason", + "impact", + "incident_state", + "knowledge", + "location", + "made_sla", + "notify", + "order", + "parent", + "parent_incident", + "priority", + "problem_id", + "reassignment_count", + "reopen_count", + "resolved_at", + "resolved_by", + "rfc", + "severity", + "sla_due", + "state", + "subcategory", + "sys_tags", + "sys_updated_by", + "sys_updated_on", + "time_worked", + "title", + "type", + "urgency", + "user_input", + "watch_list", + "work_end", + "work_notes", + "work_notes_list", + "work_start", + "business_criticality", + "risk_score", +] + +SIR_OUT_FIELDS = ["attack_vector", "affected_user", "change_request", "incident", "parent_security_incident", "substate"] # Every table in ServiceNow should have those fields DEFAULT_RECORD_FIELDS = { - 'sys_id': 'ID', - 'sys_updated_by': 'UpdatedBy', - 'sys_updated_on': 'UpdatedAt', - 'sys_created_by': 'CreatedBy', - 'sys_created_on': 'CreatedAt' + "sys_id": "ID", + "sys_updated_by": "UpdatedBy", + "sys_updated_on": "UpdatedAt", + "sys_created_by": "CreatedBy", + "sys_created_on": "CreatedAt", } -MIRROR_DIRECTION = { - 'None': None, - 'Incoming': 'In', - 'Outgoing': 'Out', - 'Incoming And Outgoing': 'Both' -} +MIRROR_DIRECTION = {"None": None, "Incoming": "In", "Outgoing": "Out", "Incoming And Outgoing": "Both"} def arg_to_timestamp(arg: Any, arg_name: str, required: bool = False) -> int: @@ -184,10 +218,10 @@ def arg_to_timestamp(arg: Any, arg_name: str, required: bool = False) -> int: # we use dateparser to handle strings either in ISO8601 format, or # relative time stamps. # For example: format 2019-10-23T00:00:00 or "3 days", etc - date = dateparser.parse(arg, settings={'TIMEZONE': 'UTC'}) + date = dateparser.parse(arg, settings={"TIMEZONE": "UTC"}) if date is None: # if d is None it means dateparser failed to parse it - raise ValueError(f'Invalid date: {arg_name}') + raise ValueError(f"Invalid date: {arg_name}") return int(date.timestamp()) if isinstance(arg, int | float): @@ -198,8 +232,8 @@ def arg_to_timestamp(arg: Any, arg_name: str, required: bool = False) -> int: def get_server_url(server_url: str) -> str: url = server_url - url = re.sub('/[/]+$/', '', url) - url = re.sub('/$', '', url) + url = re.sub("/[/]+$/", "", url) + url = re.sub("/$", "", url) return url @@ -213,23 +247,23 @@ def get_item_human_readable(data: dict) -> dict: item human readable. """ item = { - 'ID': data.get('sys_id', ''), - 'Name': data.get('name', ''), - 'Description': data.get('short_description', ''), - 'Price': data.get('price', ''), - 'Variables': [] + "ID": data.get("sys_id", ""), + "Name": data.get("name", ""), + "Description": data.get("short_description", ""), + "Price": data.get("price", ""), + "Variables": [], } - variables = data.get('variables') + variables = data.get("variables") if variables and isinstance(variables, list): for var in variables: if var: pretty_variables = { - 'Question': var.get('label', ''), - 'Type': var.get('display_type', ''), - 'Name': var.get('name', ''), - 'Mandatory': var.get('mandatory', '') + "Question": var.get("label", ""), + "Type": var.get("display_type", ""), + "Name": var.get("name", ""), + "Mandatory": var.get("mandatory", ""), } - item['Variables'].append(pretty_variables) + item["Variables"].append(pretty_variables) return item @@ -244,56 +278,56 @@ def create_ticket_context(data: dict, additional_fields: list | None = None) -> ticket context. """ context = { - 'ID': data.get('sys_id'), - 'Summary': data.get('short_description'), - 'Number': data.get('number'), - 'CreatedOn': data.get('sys_created_on'), - 'Active': data.get('active'), - 'AdditionalComments': data.get('comments'), - 'CloseCode': data.get('close_code'), - 'OpenedAt': data.get('opened_at') + "ID": data.get("sys_id"), + "Summary": data.get("short_description"), + "Number": data.get("number"), + "CreatedOn": data.get("sys_created_on"), + "Active": data.get("active"), + "AdditionalComments": data.get("comments"), + "CloseCode": data.get("close_code"), + "OpenedAt": data.get("opened_at"), } if additional_fields: for additional_field in additional_fields: if camelize_string(additional_field) not in context: # in case of a nested additional field (in the form of field1.field2) - nested_additional_field_list = additional_field.split('.') + nested_additional_field_list = additional_field.split(".") if value := dict_safe_get(data, nested_additional_field_list): context[additional_field] = value # These fields refer to records in the database, the value is their system ID. - closed_by = data.get('closed_by') + closed_by = data.get("closed_by") if closed_by: if isinstance(closed_by, dict): - context['ResolvedBy'] = closed_by.get('value', '') + context["ResolvedBy"] = closed_by.get("value", "") else: - context['ResolvedBy'] = closed_by - opened_by = data.get('opened_by') + context["ResolvedBy"] = closed_by + opened_by = data.get("opened_by") if opened_by: if isinstance(opened_by, dict): - context['OpenedBy'] = opened_by.get('value', '') - context['Creator'] = opened_by.get('value', '') + context["OpenedBy"] = opened_by.get("value", "") + context["Creator"] = opened_by.get("value", "") else: - context['OpenedBy'] = opened_by - context['Creator'] = opened_by - assigned_to = data.get('assigned_to') + context["OpenedBy"] = opened_by + context["Creator"] = opened_by + assigned_to = data.get("assigned_to") if assigned_to: if isinstance(assigned_to, dict): - context['Assignee'] = assigned_to.get('value', '') + context["Assignee"] = assigned_to.get("value", "") else: - context['Assignee'] = assigned_to + context["Assignee"] = assigned_to # Try to map fields - priority = data.get('priority') + priority = data.get("priority") if priority: if isinstance(priority, dict): - value = priority.get('value', '') - context['Priority'] = TICKET_PRIORITY.get(str(int(value)), str(int(value))) if value else '' + value = priority.get("value", "") + context["Priority"] = TICKET_PRIORITY.get(str(int(value)), str(int(value))) if value else "" else: - context['Priority'] = TICKET_PRIORITY.get(priority, priority) - state = data.get('state') + context["Priority"] = TICKET_PRIORITY.get(priority, priority) + state = data.get("state") if state: - context['State'] = state + context["State"] = state return createContext(context, removeNull=True) @@ -331,73 +365,69 @@ def get_ticket_human_readable(tickets, ticket_type: str, additional_fields: list if not isinstance(tickets, list): tickets = [tickets] - ticket_severity = { - '1': '1 - High', - '2': '2 - Medium', - '3': '3 - Low' - } + ticket_severity = {"1": "1 - High", "2": "2 - Medium", "3": "3 - Low"} result = [] for ticket in tickets: - hr = { - 'Number': ticket.get('number'), - 'System ID': ticket.get('sys_id'), - 'Created On': ticket.get('sys_created_on'), - 'Created By': ticket.get('sys_created_by'), - 'Active': ticket.get('active'), - 'Close Notes': ticket.get('close_notes'), - 'Close Code': ticket.get('close_code'), - 'Description': ticket.get('description'), - 'Opened At': ticket.get('opened_at'), - 'Due Date': ticket.get('due_date'), + "Number": ticket.get("number"), + "System ID": ticket.get("sys_id"), + "Created On": ticket.get("sys_created_on"), + "Created By": ticket.get("sys_created_by"), + "Active": ticket.get("active"), + "Close Notes": ticket.get("close_notes"), + "Close Code": ticket.get("close_code"), + "Description": ticket.get("description"), + "Opened At": ticket.get("opened_at"), + "Due Date": ticket.get("due_date"), # This field refers to a record in the database, the value is its system ID. - 'Resolved By': ticket.get('closed_by', {}).get('value') if isinstance(ticket.get('closed_by'), dict) - else ticket.get('closed_by'), - 'Resolved At': ticket.get('resolved_at'), - 'SLA Due': ticket.get('sla_due'), - 'Short Description': ticket.get('short_description'), - 'Additional Comments': ticket.get('comments') + "Resolved By": ticket.get("closed_by", {}).get("value") + if isinstance(ticket.get("closed_by"), dict) + else ticket.get("closed_by"), + "Resolved At": ticket.get("resolved_at"), + "SLA Due": ticket.get("sla_due"), + "Short Description": ticket.get("short_description"), + "Additional Comments": ticket.get("comments"), } # Try to map the fields - impact = ticket.get('impact', '') + impact = ticket.get("impact", "") if impact: - hr['Impact'] = ticket_severity.get(impact, impact) - urgency = ticket.get('urgency', '') + hr["Impact"] = ticket_severity.get(impact, impact) + urgency = ticket.get("urgency", "") if urgency: - hr['Urgency'] = ticket_severity.get(urgency, urgency) - severity = ticket.get('severity', '') + hr["Urgency"] = ticket_severity.get(urgency, urgency) + severity = ticket.get("severity", "") if severity: - hr['Severity'] = ticket_severity.get(severity, severity) - priority = ticket.get('priority', '') + hr["Severity"] = ticket_severity.get(severity, severity) + priority = ticket.get("priority", "") if priority: - hr['Priority'] = TICKET_PRIORITY.get(priority, priority) + hr["Priority"] = TICKET_PRIORITY.get(priority, priority) - state = ticket.get('state', '') + state = ticket.get("state", "") if state: mapped_state = state if ticket_type in TICKET_STATES: mapped_state = TICKET_STATES[ticket_type].get(state, mapped_state) - hr['State'] = mapped_state - approval = ticket.get('approval', '') + hr["State"] = mapped_state + approval = ticket.get("approval", "") if approval: mapped_approval = approval if ticket_type in TICKET_APPROVAL: - mapped_approval = TICKET_APPROVAL[ticket_type].get(ticket.get('approval'), mapped_approval) + mapped_approval = TICKET_APPROVAL[ticket_type].get(ticket.get("approval"), mapped_approval) # Approval will be added to the markdown only in the necessary ticket types - hr['Approval'] = mapped_approval + hr["Approval"] = mapped_approval if additional_fields: for additional_field in additional_fields: # in case of a nested additional field (in the form of field1.field2) - nested_additional_field_list = additional_field.split('.') + nested_additional_field_list = additional_field.split(".") hr[additional_field] = dict_safe_get(ticket, nested_additional_field_list) result.append(hr) return result -def get_ticket_fields(args: dict, template_name: dict = {}, ticket_type: str = '') -> dict: +def get_ticket_fields(args: dict, template_name: dict = {}, ticket_type: str = "") -> dict: """Inverse the keys and values of those dictionaries to map the arguments to their corresponding values in ServiceNow. @@ -409,11 +439,7 @@ def get_ticket_fields(args: dict, template_name: dict = {}, ticket_type: str = ' Returns: ticket fields. """ - ticket_severity = { - '1': '1 - High', - '2': '2 - Medium', - '3': '3 - Low' - } + ticket_severity = {"1": "1 - High", "2": "2 - Medium", "3": "3 - Low"} inv_severity = {v: k for k, v in ticket_severity.items()} inv_priority = {v: k for k, v in TICKET_PRIORITY.items()} @@ -422,14 +448,13 @@ def get_ticket_fields(args: dict, template_name: dict = {}, ticket_type: str = ' inv_states = {v: k for k, v in states.items()} if states else {} approval = TICKET_APPROVAL.get(ticket_type) inv_approval = {v: k for k, v in approval.items()} if approval else {} - fields_to_clear = argToList( - args.get('clear_fields', [])) # This argument will contain fields to allow their value empty + fields_to_clear = argToList(args.get("clear_fields", [])) # This argument will contain fields to allow their value empty # This is for updating null fields for update_remote_system function for example: assigned_to. for arg in args: if not args[arg]: fields_to_clear.append(arg) - demisto.debug(f'Fields to clear {fields_to_clear}') + demisto.debug(f"Fields to clear {fields_to_clear}") ticket_fields = {} for arg in SNOW_ARGS: @@ -441,18 +466,18 @@ def get_ticket_fields(args: dict, template_name: dict = {}, ticket_type: str = ' You can either set or clear the field value.") ticket_fields[arg] = "" elif input_arg: - if arg in ['impact', 'urgency', 'severity']: + if arg in ["impact", "urgency", "severity"]: ticket_fields[arg] = inv_severity.get(input_arg, input_arg) - elif arg == 'priority': + elif arg == "priority": ticket_fields[arg] = inv_priority.get(input_arg, input_arg) - elif arg == 'state': + elif arg == "state": ticket_fields[arg] = inv_states.get(input_arg, input_arg) - elif arg == 'approval': + elif arg == "approval": ticket_fields[arg] = inv_approval.get(input_arg, input_arg) - elif arg == 'change_type': + elif arg == "change_type": # this change is required in order to use type 'Standard' as well. - ticket_fields['type'] = input_arg - elif arg == 'business_criticality': + ticket_fields["type"] = input_arg + elif arg == "business_criticality": ticket_fields[arg] = inv_business_impact.get(input_arg, input_arg) else: ticket_fields[arg] = input_arg @@ -481,15 +506,15 @@ def generate_body(fields: dict = {}, custom_fields: dict = {}) -> dict: if custom_fields: for field in custom_fields: # custom fields begin with "u_" - if field.startswith('u_'): + if field.startswith("u_"): body[field] = custom_fields[field] else: - body['u_' + field] = custom_fields[field] + body["u_" + field] = custom_fields[field] return body -def split_fields(fields: str = '', delimiter: str = ';') -> dict: +def split_fields(fields: str = "", delimiter: str = ";") -> dict: """Split str fields of Demisto arguments to SNOW request fields by the char ';'. Args: @@ -501,12 +526,11 @@ def split_fields(fields: str = '', delimiter: str = ';') -> dict: dic_fields = {} if fields: - if '=' not in fields: - raise Exception( - f"The argument: {fields}.\nmust contain a '=' to specify the keys and values. e.g: key=val.") + if "=" not in fields: + raise Exception(f"The argument: {fields}.\nmust contain a '=' to specify the keys and values. e.g: key=val.") arr_fields = fields.split(delimiter) for f in arr_fields: - field = f.split('=', 1) # a field might include a '=' sign in the value. thus, splitting only once. + field = f.split("=", 1) # a field might include a '=' sign in the value. thus, splitting only once. if len(field) > 1: dic_fields[field[0]] = field[1] @@ -518,23 +542,23 @@ def split_notes(raw_notes, note_type, time_info): # The notes should be in this form: # '16/05/2023 15:49:56 - John Doe (Additional comments)\nsecond note first line\n\nsecond line\n\nthird # line\n\n2023-05-10 15:41:38 - פלוני אלמוני (Additional comments)\nfirst note first line\n\nsecond line\n\n - delimiter = r'([0-9]{1,4}(?:\/|-|\.)[0-9]{1,2}(?:\/|-|\.)[0-9]{1,4}.*\((?:Additional comments|Work notes)\))' + delimiter = r"([0-9]{1,4}(?:\/|-|\.)[0-9]{1,2}(?:\/|-|\.)[0-9]{1,4}.*\((?:Additional comments|Work notes)\))" notes_split = list(filter(None, re.split(delimiter, raw_notes))) for note_info, note_value in zip(notes_split[::2], notes_split[1::2]): created_on, _, created_by = note_info.partition(" - ") - created_by = created_by.split(' (')[0] + created_by = created_by.split(" (")[0] if not created_on or not created_by: - raise Exception(f'Failed to extract the required information from the following note: {note_info} - {note_value}') + raise Exception(f"Failed to extract the required information from the following note: {note_info} - {note_value}") # convert note creation time to UTC try: - display_date_format = time_info.get('display_date_format') - created_on = (created_on.replace('AM', '').replace('PM', '')).strip() - created_on_UTC = datetime.strptime(created_on, display_date_format) + time_info.get('timezone_offset') + display_date_format = time_info.get("display_date_format") + created_on = (created_on.replace("AM", "").replace("PM", "")).strip() + created_on_UTC = datetime.strptime(created_on, display_date_format) + time_info.get("timezone_offset") except ValueError as e: - raise Exception(f'Failed to convert {created_on} to a datetime object. Error: {e}') + raise Exception(f"Failed to convert {created_on} to a datetime object. Error: {e}") - if time_info.get('filter') and created_on_UTC < time_info.get('filter'): + if time_info.get("filter") and created_on_UTC < time_info.get("filter"): # If a time_filter was passed and the note was created before this time, do not return it. demisto.debug(f'Using time filter: {time_info.get("filter")}. Not including note: {note_info} - {note_value}.') continue @@ -542,7 +566,7 @@ def split_notes(raw_notes, note_type, time_info): "sys_created_on": created_on_UTC.strftime(DATE_FORMAT), "value": note_value.strip(), "sys_created_by": created_by, - "element": note_type + "element": note_type, } notes.append(note_dict) return notes @@ -563,17 +587,17 @@ def convert_to_notes_result(ticket: dict, time_info: dict) -> dict: return {} all_notes = [] - raw_comments = ticket.get('comments', '') + raw_comments = ticket.get("comments", "") if raw_comments: - comments = split_notes(raw_comments, 'comments', time_info=time_info) + comments = split_notes(raw_comments, "comments", time_info=time_info) all_notes.extend(comments) - raw_work_notes = ticket.get('work_notes', '') + raw_work_notes = ticket.get("work_notes", "") if raw_work_notes: - work_notes = split_notes(raw_work_notes, 'work_notes', time_info=time_info) + work_notes = split_notes(raw_work_notes, "work_notes", time_info=time_info) all_notes.extend(work_notes) - return {'result': all_notes} + return {"result": all_notes} class Client(BaseClient): @@ -581,11 +605,27 @@ class Client(BaseClient): Client to use in the ServiceNow integration. Overrides BaseClient. """ - def __init__(self, server_url: str, sc_server_url: str, cr_server_url: str, username: str, - password: str, verify: bool, fetch_time: str, sysparm_query: str, - sysparm_limit: int, timestamp_field: str, ticket_type: str, get_attachments: bool, - incident_name: str, oauth_params: dict | None = None, version: str | None = None, look_back: int = 0, - use_display_value: bool = False, display_date_format: str = ''): + def __init__( + self, + server_url: str, + sc_server_url: str, + cr_server_url: str, + username: str, + password: str, + verify: bool, + fetch_time: str, + sysparm_query: str, + sysparm_limit: int, + timestamp_field: str, + ticket_type: str, + get_attachments: bool, + incident_name: str, + oauth_params: dict | None = None, + version: str | None = None, + look_back: int = 0, + use_display_value: bool = False, + display_date_format: str = "", + ): """ Args: @@ -614,7 +654,7 @@ def __init__(self, server_url: str, sc_server_url: str, cr_server_url: str, user self._verify = verify self._username = username self._password = password - self._proxies = handle_proxy(proxy_param_name='proxy', checkbox_default_value=False) + self._proxies = handle_proxy(proxy_param_name="proxy", checkbox_default_value=False) self.use_oauth = bool(oauth_params) self.fetch_time = fetch_time self.timestamp_field = timestamp_field @@ -628,23 +668,35 @@ def __init__(self, server_url: str, sc_server_url: str, cr_server_url: str, user self.use_display_value = use_display_value self.display_date_format = DATE_FORMAT_OPTIONS.get(display_date_format) if self.use_display_value: - assert self.display_date_format, 'A display date format must be selected in the instance configuration when ' \ - 'using the `Use Display Value` option.' + assert self.display_date_format, ( + "A display date format must be selected in the instance configuration when " + "using the `Use Display Value` option." + ) if self.use_oauth: # if user selected the `Use OAuth` checkbox, OAuth2 authentication should be used - self.snow_client: ServiceNowClient = ServiceNowClient(credentials=oauth_params.get('credentials', {}), - use_oauth=self.use_oauth, - client_id=oauth_params.get('client_id', ''), - client_secret=oauth_params.get('client_secret', ''), - url=oauth_params.get('url', ''), - verify=oauth_params.get('verify', False), - proxy=oauth_params.get('proxy', False), - headers=oauth_params.get('headers', '')) + self.snow_client: ServiceNowClient = ServiceNowClient( + credentials=oauth_params.get("credentials", {}), + use_oauth=self.use_oauth, + client_id=oauth_params.get("client_id", ""), + client_secret=oauth_params.get("client_secret", ""), + url=oauth_params.get("url", ""), + verify=oauth_params.get("verify", False), + proxy=oauth_params.get("proxy", False), + headers=oauth_params.get("headers", ""), + ) else: self._auth = (self._username, self._password) - def generic_request(self, method: str, path: str, body: Optional[dict] = None, headers: Optional[dict] = None, - sc_api: bool = False, cr_api: bool = False, custom_api: str = ""): + def generic_request( + self, + method: str, + path: str, + body: Optional[dict] = None, + headers: Optional[dict] = None, + sc_api: bool = False, + cr_api: bool = False, + custom_api: str = "", + ): """Generic request to ServiceNow api. Args: @@ -663,9 +715,20 @@ def generic_request(self, method: str, path: str, body: Optional[dict] = None, h """ return self.send_request(path, method, body, headers=headers, sc_api=sc_api, cr_api=cr_api, custom_api=custom_api) - def send_request(self, path: str, method: str = 'GET', body: dict | None = None, params: dict | None = None, - headers: dict | None = None, file=None, sc_api: bool = False, cr_api: bool = False, - get_attachments: bool = False, no_record_found_res: dict = {'result': []}, custom_api: str = ''): + def send_request( + self, + path: str, + method: str = "GET", + body: dict | None = None, + params: dict | None = None, + headers: dict | None = None, + file=None, + sc_api: bool = False, + cr_api: bool = False, + get_attachments: bool = False, + no_record_found_res: dict = {"result": []}, + custom_api: str = "", + ): """Generic request to ServiceNow. Args: @@ -689,94 +752,120 @@ def send_request(self, path: str, method: str = 'GET', body: dict | None = None, if custom_api: if not custom_api.startswith("/"): return_error("Argument custom_api must start with a leading forward slash '/'") - server_url = demisto.params().get('url') - url = f'{get_server_url(server_url)}{custom_api}{path}' + server_url = demisto.params().get("url") + url = f"{get_server_url(server_url)}{custom_api}{path}" elif sc_api: - url = f'{self._sc_server_url}{path}' + url = f"{self._sc_server_url}{path}" elif cr_api: - url = f'{self._cr_server_url}{path}' + url = f"{self._cr_server_url}{path}" else: - url = f'{self._base_url}{path}' + url = f"{self._base_url}{path}" if not headers: - headers = { - 'Accept': 'application/json', - 'Content-Type': 'application/json' - } + headers = {"Accept": "application/json", "Content-Type": "application/json"} # The attachments table does not support v2 api version if get_attachments: - url = url.replace('/v2', '/v1') + url = url.replace("/v2", "/v1") max_retries = 3 num_of_tries = 0 while num_of_tries < max_retries: if file: # Not supported in v2 - url = url.replace('/v2', '/v1') + url = url.replace("/v2", "/v1") try: - file_entry = file['id'] - file_name = file['name'] - file_path = demisto.getFilePath(file_entry)['path'] - with open(file_path, 'rb') as f: + file_entry = file["id"] + file_name = file["name"] + file_path = demisto.getFilePath(file_entry)["path"] + with open(file_path, "rb") as f: file_info = (file_name, f, self.get_content_type(file_name)) if self.use_oauth: access_token = self.snow_client.get_access_token() - headers.update({ - 'Authorization': f'Bearer {access_token}' - }) - res = requests.request(method, url, headers=headers, data=body, params=params, - files={'file': file_info}, verify=self._verify, proxies=self._proxies) + headers.update({"Authorization": f"Bearer {access_token}"}) + res = requests.request( + method, + url, + headers=headers, + data=body, + params=params, + files={"file": file_info}, + verify=self._verify, + proxies=self._proxies, + ) else: - res = requests.request(method, url, headers=headers, data=body, params=params, - files={'file': file_info}, auth=self._auth, - verify=self._verify, proxies=self._proxies) + res = requests.request( + method, + url, + headers=headers, + data=body, + params=params, + files={"file": file_info}, + auth=self._auth, + verify=self._verify, + proxies=self._proxies, + ) except Exception as err: - raise Exception('Failed to upload file - ' + str(err)) + raise Exception("Failed to upload file - " + str(err)) else: if self.use_oauth: access_token = self.snow_client.get_access_token() - headers.update({ - 'Authorization': f'Bearer {access_token}' - }) - res = requests.request(method, url, headers=headers, data=json.dumps(body) if body else {}, - params=params, verify=self._verify, proxies=self._proxies) + headers.update({"Authorization": f"Bearer {access_token}"}) + res = requests.request( + method, + url, + headers=headers, + data=json.dumps(body) if body else {}, + params=params, + verify=self._verify, + proxies=self._proxies, + ) else: - res = requests.request(method, url, headers=headers, data=json.dumps(body) if body else {}, - params=params, auth=self._auth, verify=self._verify, proxies=self._proxies) + res = requests.request( + method, + url, + headers=headers, + data=json.dumps(body) if body else {}, + params=params, + auth=self._auth, + verify=self._verify, + proxies=self._proxies, + ) if "Instance Hibernating page" in res.text: raise DemistoException( "A connection was established but the instance is in hibernate mode.\n" - "Please wake your instance and try again.") + "Please wake your instance and try again." + ) try: json_res = res.json() except Exception as err: if res.status_code == 201: return "The ticket was successfully created." if not res.content: - return '' - raise Exception(f'Error parsing reply - {str(res.content)} - {str(err)}') + return "" + raise Exception(f"Error parsing reply - {res.content!s} - {err!s}") - if 'error' in json_res: - error = json_res.get('error', {}) + if "error" in json_res: + error = json_res.get("error", {}) if res.status_code == 401: - demisto.debug(f'Got status code 401 - {json_res}. Retrying ...') + demisto.debug(f"Got status code 401 - {json_res}. Retrying ...") else: if isinstance(error, dict): - message = json_res.get('error', {}).get('message') - details = json_res.get('error', {}).get('detail') - if message == 'No Record found': + message = json_res.get("error", {}).get("message") + details = json_res.get("error", {}).get("detail") + if message == "No Record found": return no_record_found_res else: - raise Exception(f'ServiceNow Error: {message}, details: {details}') + raise Exception(f"ServiceNow Error: {message}, details: {details}") else: - raise Exception(f'ServiceNow Error: {error}') + raise Exception(f"ServiceNow Error: {error}") if res.status_code < 200 or res.status_code >= 300: if res.status_code != 401 or num_of_tries == (max_retries - 1): raise Exception( - f'Got status code {str(res.status_code)} with url {url} with body {str(res.content)}' - f' with headers {str(res.headers)}') + f"Got status code {res.status_code!s} with url {url} with body {res.content!s}" + f" with headers {res.headers!s}" + ) else: break num_of_tries += 1 @@ -797,9 +886,9 @@ def get_content_type(self, file_name): demisto.debug("file name was not supllied, uploading with general type") else: file_type, _ = mimetypes.guess_type(file_name) - return file_type or '*/*' + return file_type or "*/*" - def get_table_name(self, ticket_type: str = '') -> str: + def get_table_name(self, ticket_type: str = "") -> str: """Get the relevant table name from th client. Args: @@ -820,18 +909,18 @@ def get_template(self, template_name: str) -> dict: Returns: the ticket template """ - query_params = {'sysparm_limit': 1, 'sysparm_query': f'name={template_name}'} + query_params = {"sysparm_limit": 1, "sysparm_query": f"name={template_name}"} - result = self.send_request('table/sys_template', 'GET', params=query_params) + result = self.send_request("table/sys_template", "GET", params=query_params) - if len(result['result']) == 0: + if len(result["result"]) == 0: raise ValueError("Incorrect template name.") - template = result['result'][0].get('template', '').split('^') + template = result["result"][0].get("template", "").split("^") dic_template = {} for i in range(len(template) - 1): - template_value = template[i].split('=') + template_value = template[i].split("=") if len(template_value) > 1: dic_template[template_value[0]] = template_value[1] @@ -847,10 +936,10 @@ def get_ticket_attachments(self, ticket_id: str, sys_created_on: Optional[str] = Returns: Response from API. """ - query = f'table_sys_id={ticket_id}' + query = f"table_sys_id={ticket_id}" if sys_created_on: - query += f'^sys_created_on>{sys_created_on}' - return self.send_request('attachment', 'GET', params={'sysparm_query': query}, get_attachments=True) + query += f"^sys_created_on>{sys_created_on}" + return self.send_request("attachment", "GET", params={"sysparm_query": query}, get_attachments=True) def get_ticket_attachment_entries(self, ticket_id: str, sys_created_on: Optional[str] = None) -> list: """Get ticket attachments, including file attachments @@ -865,32 +954,36 @@ def get_ticket_attachment_entries(self, ticket_id: str, sys_created_on: Optional """ entries = [] links = [] # type: List[tuple[str, str]] - headers = { - 'Accept': 'application/json', - 'Content-Type': 'application/json' - } + headers = {"Accept": "application/json", "Content-Type": "application/json"} attachments_res = self.get_ticket_attachments(ticket_id, sys_created_on) - if 'result' in attachments_res and len(attachments_res['result']) > 0: - attachments = attachments_res['result'] - links = [(attachment.get('download_link', ''), attachment.get('file_name', '')) - for attachment in attachments] + if "result" in attachments_res and len(attachments_res["result"]) > 0: + attachments = attachments_res["result"] + links = [(attachment.get("download_link", ""), attachment.get("file_name", "")) for attachment in attachments] for link in links: if self.use_oauth: access_token = self.snow_client.get_access_token() - headers.update({'Authorization': f'Bearer {access_token}'}) + headers.update({"Authorization": f"Bearer {access_token}"}) file_res = requests.get(link[0], headers=headers, verify=self._verify, proxies=self._proxies) else: - file_res = requests.get(link[0], auth=(self._username, self._password), verify=self._verify, - proxies=self._proxies) + file_res = requests.get( + link[0], auth=(self._username, self._password), verify=self._verify, proxies=self._proxies + ) if file_res is not None: entries.append(fileResult(link[1], file_res.content)) return entries - def get(self, table_name: str, record_id: str, custom_fields: dict = {}, number: str | None = None, - no_record_found_res: dict = {'result': []}, use_display_value: bool = False) -> dict: + def get( + self, + table_name: str, + record_id: str, + custom_fields: dict = {}, + number: str | None = None, + no_record_found_res: dict = {"result": []}, + use_display_value: bool = False, + ) -> dict: """Get a ticket by sending a GET request. Args: @@ -905,26 +998,25 @@ def get(self, table_name: str, record_id: str, custom_fields: dict = {}, number: """ query_params = {} # type: Dict if record_id: - path = f'table/{table_name}/{record_id}' + path = f"table/{table_name}/{record_id}" elif number: - path = f'table/{table_name}' - query_params = { - 'number': number - } + path = f"table/{table_name}" + query_params = {"number": number} elif custom_fields: - path = f'table/{table_name}' + path = f"table/{table_name}" query_params = custom_fields else: # Only in cases where the table is of type ticket - raise ValueError('servicenow-get-ticket requires either ticket ID (sys_id) or ticket number.') + raise ValueError("servicenow-get-ticket requires either ticket ID (sys_id) or ticket number.") if use_display_value: - query_params['sysparm_display_value'] = "all" + query_params["sysparm_display_value"] = "all" - return self.send_request(path, 'GET', params=query_params, no_record_found_res=no_record_found_res) + return self.send_request(path, "GET", params=query_params, no_record_found_res=no_record_found_res) - def update(self, table_name: str, record_id: str, fields: dict = {}, custom_fields: dict = {}, - input_display_value: bool = False) -> dict: + def update( + self, table_name: str, record_id: str, fields: dict = {}, custom_fields: dict = {}, input_display_value: bool = False + ) -> dict: """Updates a ticket or a record by sending a PATCH request. Args: @@ -937,11 +1029,10 @@ def update(self, table_name: str, record_id: str, fields: dict = {}, custom_fiel Response from API. """ body = generate_body(fields, custom_fields) - query_params = {'sysparm_input_display_value': input_display_value} - return self.send_request(f'table/{table_name}/{record_id}', 'PATCH', params=query_params, body=body) + query_params = {"sysparm_input_display_value": input_display_value} + return self.send_request(f"table/{table_name}/{record_id}", "PATCH", params=query_params, body=body) - def create(self, table_name: str, fields: dict = {}, custom_fields: dict = {}, - input_display_value: bool = False): + def create(self, table_name: str, fields: dict = {}, custom_fields: dict = {}, input_display_value: bool = False): """Creates a ticket or a record by sending a POST request. Args: @@ -955,8 +1046,8 @@ def create(self, table_name: str, fields: dict = {}, custom_fields: dict = {}, Response from API. """ body = generate_body(fields, custom_fields) - query_params = {'sysparm_input_display_value': input_display_value} - return self.send_request(f'table/{table_name}', 'POST', params=query_params, body=body) + query_params = {"sysparm_input_display_value": input_display_value} + return self.send_request(f"table/{table_name}", "POST", params=query_params, body=body) def delete(self, table_name: str, record_id: str) -> dict: """Deletes a ticket or a record by sending a DELETE request. @@ -968,7 +1059,7 @@ def delete(self, table_name: str, record_id: str) -> dict: Returns: Response from API. """ - return self.send_request(f'table/{table_name}/{record_id}', 'DELETE') + return self.send_request(f"table/{table_name}/{record_id}", "DELETE") def add_link(self, ticket_id: str, ticket_type: str, key: str, link: str) -> dict: """Adds a link to a ticket by sending a PATCH request. @@ -982,7 +1073,7 @@ def add_link(self, ticket_id: str, ticket_type: str, key: str, link: str) -> dic Returns: Response from API. """ - return self.send_request(f'table/{ticket_type}/{ticket_id}', 'PATCH', body={key: link}) + return self.send_request(f"table/{ticket_type}/{ticket_id}", "PATCH", body={key: link}) def add_comment(self, ticket_id: str, ticket_type: str, key: str, text: str) -> dict: """Adds a comment to a ticket by sending a PATCH request. @@ -996,7 +1087,7 @@ def add_comment(self, ticket_id: str, ticket_type: str, key: str, text: str) -> Returns: Response from API. """ - return self.send_request(f'table/{ticket_type}/{ticket_id}', 'PATCH', body={key: text}) + return self.send_request(f"table/{ticket_type}/{ticket_id}", "PATCH", body={key: text}) def upload_file(self, ticket_id: str, file_id: str, file_name: str, ticket_type: str) -> dict: """Adds a file to a ticket by sending a POST request. @@ -1010,14 +1101,15 @@ def upload_file(self, ticket_id: str, file_id: str, file_name: str, ticket_type: Returns: Response from API. """ - body = { - 'table_name': ticket_type, - 'table_sys_id': ticket_id, - 'file_name': file_name - } - - return self.send_request('attachment/upload', 'POST', headers={'Accept': 'application/json'}, - body=body, file={'id': file_id, 'name': file_name}) + body = {"table_name": ticket_type, "table_sys_id": ticket_id, "file_name": file_name} + + return self.send_request( + "attachment/upload", + "POST", + headers={"Accept": "application/json"}, + body=body, + file={"id": file_id, "name": file_name}, + ) def delete_attachment(self, attachment_file_id: str) -> dict: """Deletes an attachment file by sending a DELETE request. @@ -1028,7 +1120,7 @@ def delete_attachment(self, attachment_file_id: str) -> dict: Returns: Response from API. """ - return self.send_request(f'attachment/{attachment_file_id}', 'DELETE') + return self.send_request(f"attachment/{attachment_file_id}", "DELETE") def add_tag(self, ticket_id: str, tag_id: str, title: str, ticket_type: str) -> dict: """Adds a tag to a ticket by sending a POST request. @@ -1042,11 +1134,18 @@ def add_tag(self, ticket_id: str, tag_id: str, title: str, ticket_type: str) -> Returns: Response from API. """ - body = {'label': tag_id, 'table': ticket_type, 'table_key': ticket_id, 'title': title} - return self.send_request('/table/label_entry', 'POST', body=body) - - def query(self, table_name: str, sys_param_limit: str, sys_param_offset: str, sys_param_query: str, - system_params: dict = {}, sysparm_fields: Optional[str] = None) -> dict: + body = {"label": tag_id, "table": ticket_type, "table_key": ticket_id, "title": title} + return self.send_request("/table/label_entry", "POST", body=body) + + def query( + self, + table_name: str, + sys_param_limit: str, + sys_param_offset: str, + sys_param_query: str, + system_params: dict = {}, + sysparm_fields: Optional[str] = None, + ) -> dict: """Query records by sending a GET request. Args: @@ -1061,15 +1160,15 @@ def query(self, table_name: str, sys_param_limit: str, sys_param_offset: str, sy Response from API. """ - query_params = {'sysparm_limit': sys_param_limit, 'sysparm_offset': sys_param_offset} + query_params = {"sysparm_limit": sys_param_limit, "sysparm_offset": sys_param_offset} if sys_param_query: - query_params['sysparm_query'] = sys_param_query + query_params["sysparm_query"] = sys_param_query if system_params: query_params.update(system_params) if sysparm_fields: - query_params['sysparm_fields'] = sysparm_fields - demisto.debug(f'Running query records with the params: {query_params}') - return self.send_request(f'table/{table_name}', 'GET', params=query_params) + query_params["sysparm_fields"] = sysparm_fields + demisto.debug(f"Running query records with the params: {query_params}") + return self.send_request(f"table/{table_name}", "GET", params=query_params) def get_table_fields(self, table_name: str) -> dict: """Get table fields by sending a GET request. @@ -1080,7 +1179,7 @@ def get_table_fields(self, table_name: str) -> dict: Returns: Response from API. """ - return self.send_request(f'table/{table_name}?sysparm_limit=1', 'GET') + return self.send_request(f"table/{table_name}?sysparm_limit=1", "GET") def get_item_details(self, id_: str) -> dict: """Get item details from service catalog by sending a GET request to the Service Catalog API. @@ -1091,7 +1190,7 @@ def get_item_details(self, id_: str) -> dict: Returns: Response from API. """ - return self.send_request(f'servicecatalog/items/{id_}', 'GET', sc_api=True) + return self.send_request(f"servicecatalog/items/{id_}", "GET", sc_api=True) def create_item_order(self, id_: str, quantity: str, variables: dict = {}) -> dict: """Create item order in the service catalog by sending a POST request to the Service Catalog API. @@ -1104,8 +1203,8 @@ def create_item_order(self, id_: str, quantity: str, variables: dict = {}) -> di Returns: Response from API. """ - body = {'sysparm_quantity': quantity, 'variables': variables} - return self.send_request(f'servicecatalog/items/{id_}/order_now', 'POST', body=body, sc_api=True) + body = {"sysparm_quantity": quantity, "variables": variables} + return self.send_request(f"servicecatalog/items/{id_}/order_now", "POST", body=body, sc_api=True) def document_route_to_table_request(self, queue_id: str, document_table: str, document_id: str) -> dict: """Routes a document(ticket/incident) to a queue by sending a GET request. @@ -1118,8 +1217,8 @@ def document_route_to_table_request(self, queue_id: str, document_table: str, do Returns: Response from API. """ - body = {'document_sys_id': document_id, 'document_table': document_table} - return self.send_request(f'awa/queues/{queue_id}/work_item', 'POST', body=body) + body = {"document_sys_id": document_id, "document_table": document_table} + return self.send_request(f"awa/queues/{queue_id}/work_item", "POST", body=body) def create_co_from_template(self, template: str): """Creates a standard change request from template by sending a POST request. @@ -1129,8 +1228,7 @@ def create_co_from_template(self, template: str): Returns: Response from API. """ - return self.send_request(f'change/standard/{template}', 'POST', body={}, - cr_api=True) + return self.send_request(f"change/standard/{template}", "POST", body={}, cr_api=True) def get_co_tasks(self, sys_id: str) -> dict: """Get item details from service catalog by sending a GET request to the Change Request REST API. @@ -1141,7 +1239,7 @@ def get_co_tasks(self, sys_id: str) -> dict: Returns: Response from API. """ - return self.send_request(f'change/{sys_id}/task', 'GET', cr_api=True) + return self.send_request(f"change/{sys_id}/task", "GET", cr_api=True) def get_ticket_command(client: Client, args: dict): @@ -1154,54 +1252,72 @@ def get_ticket_command(client: Client, args: dict): Returns: Demisto Outputs. """ - ticket_type = client.get_table_name(str(args.get('ticket_type', ''))) - ticket_id = str(args.get('id', '')) - number = str(args.get('number', '')) - get_attachments = args.get('get_attachments', 'false') - fields_delimiter = args.get('fields_delimiter', ';') - custom_fields = split_fields(str(args.get('custom_fields', '')), fields_delimiter) - additional_fields = argToList(str(args.get('additional_fields', ''))) - - result = client.get(ticket_type, ticket_id, generate_body({}, custom_fields), - number, use_display_value=client.use_display_value) - if not result or 'result' not in result: - return 'Ticket was not found.' - - if isinstance(result['result'], list): - if len(result['result']) == 0: - return 'Ticket was not found.' - ticket = result['result'][0] + ticket_type = client.get_table_name(str(args.get("ticket_type", ""))) + ticket_id = str(args.get("id", "")) + number = str(args.get("number", "")) + get_attachments = args.get("get_attachments", "false") + fields_delimiter = args.get("fields_delimiter", ";") + custom_fields = split_fields(str(args.get("custom_fields", "")), fields_delimiter) + additional_fields = argToList(str(args.get("additional_fields", ""))) + + result = client.get( + ticket_type, ticket_id, generate_body({}, custom_fields), number, use_display_value=client.use_display_value + ) + if not result or "result" not in result: + return "Ticket was not found." + + if isinstance(result["result"], list): + if len(result["result"]) == 0: + return "Ticket was not found." + ticket = result["result"][0] else: - ticket = result['result'] + ticket = result["result"] if client.use_display_value: ticket = format_incidents_response_with_display_values(ticket)[0] entries = [] # type: List[Dict] - if get_attachments.lower() != 'false': - entries = client.get_ticket_attachment_entries(ticket.get('sys_id')) + if get_attachments.lower() != "false": + entries = client.get_ticket_attachment_entries(ticket.get("sys_id")) hr = get_ticket_human_readable(ticket, ticket_type, additional_fields) context = get_ticket_context(ticket, additional_fields) - headers = ['System ID', 'Number', 'Impact', 'Urgency', 'Severity', 'Priority', 'State', 'Approval', - 'Created On', 'Created By', 'Active', 'Close Notes', 'Close Code', 'Description', 'Opened At', - 'Due Date', 'Resolved By', 'Resolved At', 'SLA Due', 'Short Description', 'Additional Comments'] + headers = [ + "System ID", + "Number", + "Impact", + "Urgency", + "Severity", + "Priority", + "State", + "Approval", + "Created On", + "Created By", + "Active", + "Close Notes", + "Close Code", + "Description", + "Opened At", + "Due Date", + "Resolved By", + "Resolved At", + "SLA Due", + "Short Description", + "Additional Comments", + ] if additional_fields: headers.extend(additional_fields) entry = { - 'Type': entryTypes['note'], - 'Contents': result, - 'ContentsFormat': formats['json'], - 'ReadableContentsFormat': formats['markdown'], - 'HumanReadable': tableToMarkdown('ServiceNow ticket', hr, headers=headers, removeNull=True), - 'EntryContext': { - 'Ticket(val.ID===obj.ID)': context, - 'ServiceNow.Ticket(val.ID===obj.ID)': context - }, - 'IgnoreAutoExtract': True + "Type": entryTypes["note"], + "Contents": result, + "ContentsFormat": formats["json"], + "ReadableContentsFormat": formats["markdown"], + "HumanReadable": tableToMarkdown("ServiceNow ticket", hr, headers=headers, removeNull=True), + "EntryContext": {"Ticket(val.ID===obj.ID)": context, "ServiceNow.Ticket(val.ID===obj.ID)": context}, + "IgnoreAutoExtract": True, } entries.append(entry) return entries @@ -1217,30 +1333,31 @@ def update_ticket_command(client: Client, args: dict) -> tuple[Any, dict, dict, Returns: Demisto Outputs. """ - fields_delimiter = args.get('fields_delimiter', ';') - custom_fields = split_fields(str(args.get('custom_fields', '')), fields_delimiter) - ticket_type_value = args.get('ticket_type') - demisto.debug(f'args(ticket_type): {ticket_type_value}') + fields_delimiter = args.get("fields_delimiter", ";") + custom_fields = split_fields(str(args.get("custom_fields", "")), fields_delimiter) + ticket_type_value = args.get("ticket_type") + demisto.debug(f"args(ticket_type): {ticket_type_value}") if not ticket_type_value: - ticket_type_value = demisto.params().get('ticket_type') - demisto.debug(f'Empty args(ticket_type), params(ticket_type): {ticket_type_value}') + ticket_type_value = demisto.params().get("ticket_type") + demisto.debug(f"Empty args(ticket_type), params(ticket_type): {ticket_type_value}") ticket_type = client.get_table_name(str(ticket_type_value)) - demisto.debug(f'Using ticket_type: {ticket_type}, from {ticket_type_value}') - ticket_id = str(args.get('id', '')) - additional_fields = split_fields(str(args.get('additional_fields', '')), fields_delimiter) + demisto.debug(f"Using ticket_type: {ticket_type}, from {ticket_type_value}") + ticket_id = str(args.get("id", "")) + additional_fields = split_fields(str(args.get("additional_fields", "")), fields_delimiter) additional_fields_keys = list(additional_fields.keys()) fields = get_ticket_fields(args, ticket_type=ticket_type) fields.update(additional_fields) - input_display_value = argToBoolean(args.get('input_display_value', 'false')) + input_display_value = argToBoolean(args.get("input_display_value", "false")) result = client.update(ticket_type, ticket_id, fields, custom_fields, input_display_value) - if not result or 'result' not in result: - raise Exception('Unable to retrieve response.') - ticket = result['result'] + if not result or "result" not in result: + raise Exception("Unable to retrieve response.") + ticket = result["result"] hr_ = get_ticket_human_readable(ticket, ticket_type, additional_fields_keys) - human_readable = tableToMarkdown(f'ServiceNow ticket updated successfully\nTicket type: {ticket_type}', - t=hr_, removeNull=True) + human_readable = tableToMarkdown( + f"ServiceNow ticket updated successfully\nTicket type: {ticket_type}", t=hr_, removeNull=True + ) # make the modified fields the user inserted as arguments show in the context if additional_fields: @@ -1248,7 +1365,7 @@ def update_ticket_command(client: Client, args: dict) -> tuple[Any, dict, dict, else: additional_fields_keys = list(args.keys()) - entry_context = {'ServiceNow.Ticket(val.ID===obj.ID)': get_ticket_context(ticket, additional_fields_keys)} + entry_context = {"ServiceNow.Ticket(val.ID===obj.ID)": get_ticket_context(ticket, additional_fields_keys)} return human_readable, entry_context, result, True @@ -1263,13 +1380,13 @@ def create_ticket_command(client: Client, args: dict) -> tuple[str, dict, dict, Returns: Demisto Outputs. """ - fields_delimiter = args.get('fields_delimiter', ';') - custom_fields = split_fields(str(args.get('custom_fields', '')), fields_delimiter) - template = args.get('template') - ticket_type = client.get_table_name(str(args.get('ticket_type', ''))) - additional_fields = split_fields(str(args.get('additional_fields', '')), fields_delimiter) + fields_delimiter = args.get("fields_delimiter", ";") + custom_fields = split_fields(str(args.get("custom_fields", "")), fields_delimiter) + template = args.get("template") + ticket_type = client.get_table_name(str(args.get("ticket_type", ""))) + additional_fields = split_fields(str(args.get("additional_fields", "")), fields_delimiter) additional_fields_keys = list(additional_fields.keys()) - input_display_value = argToBoolean(args.get('input_display_value', 'false')) + input_display_value = argToBoolean(args.get("input_display_value", "false")) if template: template = client.get_template(template) @@ -1279,20 +1396,39 @@ def create_ticket_command(client: Client, args: dict) -> tuple[str, dict, dict, result = client.create(ticket_type, fields, custom_fields, input_display_value) - if not result or 'result' not in result: - if 'successfully' in result: + if not result or "result" not in result: + if "successfully" in result: return result, {}, {}, True - raise Exception('Unable to retrieve response.') - ticket = result['result'] + raise Exception("Unable to retrieve response.") + ticket = result["result"] hr_ = get_ticket_human_readable(ticket, ticket_type, additional_fields_keys) - headers = ['System ID', 'Number', 'Impact', 'Urgency', 'Severity', 'Priority', 'State', 'Approval', - 'Created On', 'Created By', 'Active', 'Close Notes', 'Close Code', 'Description', 'Opened At', - 'Due Date', 'Resolved By', 'Resolved At', 'SLA Due', 'Short Description', 'Additional Comments'] + headers = [ + "System ID", + "Number", + "Impact", + "Urgency", + "Severity", + "Priority", + "State", + "Approval", + "Created On", + "Created By", + "Active", + "Close Notes", + "Close Code", + "Description", + "Opened At", + "Due Date", + "Resolved By", + "Resolved At", + "SLA Due", + "Short Description", + "Additional Comments", + ] if additional_fields: headers.extend(additional_fields_keys) - human_readable = tableToMarkdown('ServiceNow ticket was created successfully.', t=hr_, - headers=headers, removeNull=True) + human_readable = tableToMarkdown("ServiceNow ticket was created successfully.", t=hr_, headers=headers, removeNull=True) # make the modified fields the user inserted as arguments show in the context if additional_fields: @@ -1302,8 +1438,8 @@ def create_ticket_command(client: Client, args: dict) -> tuple[str, dict, dict, created_ticket_context = get_ticket_context(ticket, additional_fields_keys) entry_context = { - 'Ticket(val.ID===obj.ID)': created_ticket_context, - 'ServiceNow.Ticket(val.ID===obj.ID)': created_ticket_context + "Ticket(val.ID===obj.ID)": created_ticket_context, + "ServiceNow.Ticket(val.ID===obj.ID)": created_ticket_context, } return human_readable, entry_context, result, True @@ -1319,12 +1455,12 @@ def delete_ticket_command(client: Client, args: dict) -> tuple[str, dict, dict, Returns: Demisto Outputs. """ - ticket_id = str(args.get('id', '')) - ticket_type = client.get_table_name(str(args.get('ticket_type', ''))) + ticket_id = str(args.get("id", "")) + ticket_type = client.get_table_name(str(args.get("ticket_type", ""))) result = client.delete(ticket_type, ticket_id) - return f'Ticket with ID {ticket_id} was successfully deleted.', {}, result, True + return f"Ticket with ID {ticket_id} was successfully deleted.", {}, result, True def query_tickets_command(client: Client, args: dict) -> tuple[str, dict, dict, bool]: @@ -1337,32 +1473,48 @@ def query_tickets_command(client: Client, args: dict) -> tuple[str, dict, dict, Returns: Demisto Outputs. """ - sys_param_limit = args.get('limit', client.sys_param_limit) - sys_param_offset = args.get('offset', client.sys_param_offset) - sys_param_query = str(args.get('query', '')) - system_params = split_fields(args.get('system_params', '')) - additional_fields = argToList(str(args.get('additional_fields'))) + sys_param_limit = args.get("limit", client.sys_param_limit) + sys_param_offset = args.get("offset", client.sys_param_offset) + sys_param_query = str(args.get("query", "")) + system_params = split_fields(args.get("system_params", "")) + additional_fields = argToList(str(args.get("additional_fields"))) - ticket_type = client.get_table_name(str(args.get('ticket_type', ''))) + ticket_type = client.get_table_name(str(args.get("ticket_type", ""))) result = client.query(ticket_type, sys_param_limit, sys_param_offset, sys_param_query, system_params) - if not result or 'result' not in result or len(result['result']) == 0: - return 'No ServiceNow tickets matched the query.', {}, {}, True - tickets = result.get('result', {}) + if not result or "result" not in result or len(result["result"]) == 0: + return "No ServiceNow tickets matched the query.", {}, {}, True + tickets = result.get("result", {}) hr_ = get_ticket_human_readable(tickets, ticket_type, additional_fields) context = get_ticket_context(tickets, additional_fields) - headers = ['System ID', 'Number', 'Impact', 'Urgency', 'Severity', 'Priority', 'State', 'Created On', 'Created By', - 'Active', 'Close Notes', 'Close Code', 'Description', 'Opened At', 'Due Date', 'Resolved By', - 'Resolved At', 'SLA Due', 'Short Description', 'Additional Comments'] + headers = [ + "System ID", + "Number", + "Impact", + "Urgency", + "Severity", + "Priority", + "State", + "Created On", + "Created By", + "Active", + "Close Notes", + "Close Code", + "Description", + "Opened At", + "Due Date", + "Resolved By", + "Resolved At", + "SLA Due", + "Short Description", + "Additional Comments", + ] if additional_fields: headers.extend(additional_fields) - human_readable = tableToMarkdown('ServiceNow tickets', t=hr_, headers=headers, removeNull=True) - entry_context = { - 'Ticket(val.ID===obj.ID)': context, - 'ServiceNow.Ticket(val.ID===obj.ID)': context - } + human_readable = tableToMarkdown("ServiceNow tickets", t=hr_, headers=headers, removeNull=True) + entry_context = {"Ticket(val.ID===obj.ID)": context, "ServiceNow.Ticket(val.ID===obj.ID)": context} return human_readable, entry_context, result, True @@ -1377,24 +1529,42 @@ def add_link_command(client: Client, args: dict) -> tuple[str, dict, dict, bool] Returns: Demisto Outputs. """ - ticket_id = str(args.get('id', '')) - key = 'comments' if args.get('post-as-comment', 'false').lower() == 'true' else 'work_notes' - link_argument = str(args.get('link', '')) - text = args.get('text', link_argument) + ticket_id = str(args.get("id", "")) + key = "comments" if args.get("post-as-comment", "false").lower() == "true" else "work_notes" + link_argument = str(args.get("link", "")) + text = args.get("text", link_argument) link = f'[code]{text}[/code]' - ticket_type = client.get_table_name(str(args.get('ticket_type', ''))) + ticket_type = client.get_table_name(str(args.get("ticket_type", ""))) result = client.add_link(ticket_id, ticket_type, key, link) - if not result or 'result' not in result: - raise Exception('Unable to retrieve response.') - - headers = ['System ID', 'Number', 'Impact', 'Urgency', 'Severity', 'Priority', 'State', 'Created On', 'Created By', - 'Active', 'Close Notes', 'Close Code', 'Description', 'Opened At', 'Due Date', 'Resolved By', - 'Resolved At', 'SLA Due', 'Short Description', 'Additional Comments'] - hr_ = get_ticket_human_readable(result['result'], ticket_type) - human_readable = tableToMarkdown('Link successfully added to ServiceNow ticket', t=hr_, - headers=headers, removeNull=True) + if not result or "result" not in result: + raise Exception("Unable to retrieve response.") + + headers = [ + "System ID", + "Number", + "Impact", + "Urgency", + "Severity", + "Priority", + "State", + "Created On", + "Created By", + "Active", + "Close Notes", + "Close Code", + "Description", + "Opened At", + "Due Date", + "Resolved By", + "Resolved At", + "SLA Due", + "Short Description", + "Additional Comments", + ] + hr_ = get_ticket_human_readable(result["result"], ticket_type) + human_readable = tableToMarkdown("Link successfully added to ServiceNow ticket", t=hr_, headers=headers, removeNull=True) return human_readable, {}, result, True @@ -1409,23 +1579,40 @@ def add_comment_command(client: Client, args: dict) -> tuple[str, dict, dict, bo Returns: Demisto Outputs. """ - ticket_id = str(args.get('id', '')) - key = 'comments' if args.get('post-as-comment', 'false').lower() == 'true' else 'work_notes' - text = str(args.get('comment', '')) - ticket_type = client.get_table_name(str(args.get('ticket_type', ''))) + ticket_id = str(args.get("id", "")) + key = "comments" if args.get("post-as-comment", "false").lower() == "true" else "work_notes" + text = str(args.get("comment", "")) + ticket_type = client.get_table_name(str(args.get("ticket_type", ""))) result = client.add_comment(ticket_id, ticket_type, key, text) - if not result or 'result' not in result: - raise Exception('Unable to retrieve response.') - - headers = ['System ID', 'Number', 'Impact', 'Urgency', 'Severity', 'Priority', 'State', 'Created On', 'Created By', - 'Active', 'Close Notes', 'Close Code', - 'Description', 'Opened At', 'Due Date', 'Resolved By', 'Resolved At', 'SLA Due', 'Short Description', - 'Additional Comments'] - hr_ = get_ticket_human_readable(result['result'], ticket_type) - human_readable = tableToMarkdown('Comment successfully added to ServiceNow ticket', t=hr_, - headers=headers, removeNull=True) + if not result or "result" not in result: + raise Exception("Unable to retrieve response.") + + headers = [ + "System ID", + "Number", + "Impact", + "Urgency", + "Severity", + "Priority", + "State", + "Created On", + "Created By", + "Active", + "Close Notes", + "Close Code", + "Description", + "Opened At", + "Due Date", + "Resolved By", + "Resolved At", + "SLA Due", + "Short Description", + "Additional Comments", + ] + hr_ = get_ticket_human_readable(result["result"], ticket_type) + human_readable = tableToMarkdown("Comment successfully added to ServiceNow ticket", t=hr_, headers=headers, removeNull=True) return human_readable, {}, result, True @@ -1440,39 +1627,36 @@ def upload_file_command(client: Client, args: dict) -> tuple[str, dict, dict, bo Returns: Demisto Outputs. """ - ticket_type = client.get_table_name(str(args.get('ticket_type', ''))) - ticket_id = str(args.get('id', '')) - file_id = str(args.get('file_id', '')) + ticket_type = client.get_table_name(str(args.get("ticket_type", ""))) + ticket_id = str(args.get("id", "")) + file_id = str(args.get("file_id", "")) - file_name = args.get('file_name') + file_name = args.get("file_name") if not file_name: file_data = demisto.getFilePath(file_id) - file_name = file_data.get('name') + file_name = file_data.get("name") result = client.upload_file(ticket_id, file_id, file_name, ticket_type) - if not result or 'result' not in result or not result['result']: - raise Exception('Unable to upload file.') - uploaded_file_resp = result.get('result', {}) + if not result or "result" not in result or not result["result"]: + raise Exception("Unable to upload file.") + uploaded_file_resp = result.get("result", {}) hr_ = { - 'Filename': uploaded_file_resp.get('file_name'), - 'Download link': uploaded_file_resp.get('download_link'), - 'System ID': uploaded_file_resp.get('sys_id') + "Filename": uploaded_file_resp.get("file_name"), + "Download link": uploaded_file_resp.get("download_link"), + "System ID": uploaded_file_resp.get("sys_id"), } - human_readable = tableToMarkdown(f'File uploaded successfully to ticket {ticket_id}.', t=hr_) + human_readable = tableToMarkdown(f"File uploaded successfully to ticket {ticket_id}.", t=hr_) context = { - 'ID': ticket_id, - 'File': { - 'Filename': uploaded_file_resp.get('file_name'), - 'Link': uploaded_file_resp.get('download_link'), - 'SystemID': uploaded_file_resp.get('sys_id') - } - } - entry_context = { - 'ServiceNow.Ticket(val.ID===obj.ID)': context, - 'Ticket(val.ID===obj.ID)': context + "ID": ticket_id, + "File": { + "Filename": uploaded_file_resp.get("file_name"), + "Link": uploaded_file_resp.get("download_link"), + "SystemID": uploaded_file_resp.get("sys_id"), + }, } + entry_context = {"ServiceNow.Ticket(val.ID===obj.ID)": context, "Ticket(val.ID===obj.ID)": context} return human_readable, entry_context, result, True @@ -1494,11 +1678,11 @@ def delete_attachment_command(client: Client, args: dict) -> tuple[str, dict, di :raises DemistoException: Raised if no record is found for the provided attachment file ID. """ - attachment_file_id = str(args.get('file_sys_id', '')) + attachment_file_id = str(args.get("file_sys_id", "")) result = client.delete_attachment(attachment_file_id) if not result: # successful response is 204 (empty response) - return f'Attachment with Sys ID {attachment_file_id} was successfully deleted.', {}, result, True + return f"Attachment with Sys ID {attachment_file_id} was successfully deleted.", {}, result, True raise DemistoException("Error: No record found. Record doesn't exist or ACL restricts the record retrieval.") @@ -1512,12 +1696,12 @@ def get_attachment_command(client: Client, args: dict) -> list | CommandResults: Returns: Command results and file results. """ - sys_id = str(args.get('sys_id', '')) + sys_id = str(args.get("sys_id", "")) result = client.get_ticket_attachment_entries(sys_id) if result: - return [CommandResults(readable_output=f'Successfully retrieved attachments for ticket with sys id {sys_id}.'), result] - return CommandResults(readable_output=f'Ticket with sys id {sys_id} has no attachments to retrieve.') + return [CommandResults(readable_output=f"Successfully retrieved attachments for ticket with sys id {sys_id}."), result] + return CommandResults(readable_output=f"Ticket with sys id {sys_id} has no attachments to retrieve.") def add_tag_command(client: Client, args: dict) -> tuple[str, dict, dict, bool]: @@ -1530,34 +1714,38 @@ def add_tag_command(client: Client, args: dict) -> tuple[str, dict, dict, bool]: Returns: Demisto Outputs. """ - ticket_id = str(args.get('id', '')) - tag_id = str(args.get('tag_id', '')) - title = str(args.get('title', '')) - ticket_type = client.get_table_name(str(args.get('ticket_type', ''))) + ticket_id = str(args.get("id", "")) + tag_id = str(args.get("tag_id", "")) + title = str(args.get("title", "")) + ticket_type = client.get_table_name(str(args.get("ticket_type", ""))) result = client.add_tag(ticket_id, tag_id, title, ticket_type) - if not result or 'result' not in result: - raise Exception(f'Could not add tag {title} to ticket {ticket_id}.') + if not result or "result" not in result: + raise Exception(f"Could not add tag {title} to ticket {ticket_id}.") - added_tag_resp = result.get('result', {}) + added_tag_resp = result.get("result", {}) hr_ = { - 'Title': added_tag_resp.get('title'), - 'Ticket ID': added_tag_resp.get('id_display'), - 'Ticket Type': added_tag_resp.get('id_type'), - 'Tag ID': added_tag_resp.get('sys_id'), + "Title": added_tag_resp.get("title"), + "Ticket ID": added_tag_resp.get("id_display"), + "Ticket Type": added_tag_resp.get("id_type"), + "Tag ID": added_tag_resp.get("sys_id"), } - human_readable = tableToMarkdown(f'Tag {tag_id} was added successfully to ticket {ticket_id}.', t=hr_) + human_readable = tableToMarkdown(f"Tag {tag_id} was added successfully to ticket {ticket_id}.", t=hr_) context = { - 'ID': ticket_id, - 'TagTitle': added_tag_resp.get('title'), - 'TagID': added_tag_resp.get('sys_id'), + "ID": ticket_id, + "TagTitle": added_tag_resp.get("title"), + "TagID": added_tag_resp.get("sys_id"), } - entry_context = {'ServiceNow.Ticket(val.ID===obj.ID)': context} + entry_context = {"ServiceNow.Ticket(val.ID===obj.ID)": context} return human_readable, entry_context, result, True -def get_ticket_notes_command(client: Client, args: dict, params: dict,) -> list[CommandResults | dict]: +def get_ticket_notes_command( + client: Client, + args: dict, + params: dict, +) -> list[CommandResults | dict]: """Get the ticket's note. Args: @@ -1567,55 +1755,63 @@ def get_ticket_notes_command(client: Client, args: dict, params: dict,) -> list[ Returns: Demisto Outputs. """ - ticket_id = args.get('id') - sys_param_limit = args.get('limit', client.sys_param_limit) - sys_param_offset = args.get('offset', client.sys_param_offset) - add_as_entry = argToBoolean(args.get('add_as_entry', False)) + ticket_id = args.get("id") + sys_param_limit = args.get("limit", client.sys_param_limit) + sys_param_offset = args.get("offset", client.sys_param_offset) + add_as_entry = argToBoolean(args.get("add_as_entry", False)) - use_display_value = argToBoolean(args.get('use_display_value', client.use_display_value)) + use_display_value = argToBoolean(args.get("use_display_value", client.use_display_value)) return_results: list = [] if use_display_value: # make query using sysparm_display_value=all (requires less permissions) - assert client.display_date_format, 'A display date format must be selected in the instance configuration when' \ - ' retrieving notes using the display value option.' - ticket_type = client.get_table_name(str(args.get('ticket_type', client.ticket_type))) - path = f'table/{ticket_type}/{ticket_id}' - query_params = {'sysparm_limit': sys_param_limit, 'sysparm_offset': sys_param_offset, 'sysparm_display_value': 'all'} - response = client.send_request(path, 'GET', params=query_params).get('result', {}) + assert client.display_date_format, ( + "A display date format must be selected in the instance configuration when" + " retrieving notes using the display value option." + ) + ticket_type = client.get_table_name(str(args.get("ticket_type", client.ticket_type))) + path = f"table/{ticket_type}/{ticket_id}" + query_params = {"sysparm_limit": sys_param_limit, "sysparm_offset": sys_param_offset, "sysparm_display_value": "all"} + response = client.send_request(path, "GET", params=query_params).get("result", {}) timezone_offset = get_timezone_offset(response, client.display_date_format) format_response = format_incidents_response_with_display_values(response)[0] - result = convert_to_notes_result(format_response, time_info={ - 'display_date_format': client.display_date_format, 'timezone_offset': timezone_offset}) + result = convert_to_notes_result( + format_response, time_info={"display_date_format": client.display_date_format, "timezone_offset": timezone_offset} + ) else: - sys_param_query = f'element_id={ticket_id}^element=comments^ORelement=work_notes' - result = client.query('sys_journal_field', sys_param_limit, sys_param_offset, sys_param_query) + sys_param_query = f"element_id={ticket_id}^element=comments^ORelement=work_notes" + result = client.query("sys_journal_field", sys_param_limit, sys_param_offset, sys_param_query) - if not result or 'result' not in result: - return [CommandResults(raw_response=f'No comment found on ticket {ticket_id}.')] + if not result or "result" not in result: + return [CommandResults(raw_response=f"No comment found on ticket {ticket_id}.")] if add_as_entry: - return_results.extend(get_entries_for_notes(result['result'], params)) + return_results.extend(get_entries_for_notes(result["result"], params)) - headers = ['Value', 'CreatedOn', 'CreatedBy', 'Type'] + headers = ["Value", "CreatedOn", "CreatedBy", "Type"] - mapped_notes = [{ - 'Value': note.get('value'), - 'CreatedOn': note.get('sys_created_on'), - 'CreatedBy': note.get('sys_created_by'), - 'Type': 'Work Note' if note.get('element', '') == 'work_notes' else 'Comment' - } for note in result['result']] + mapped_notes = [ + { + "Value": note.get("value"), + "CreatedOn": note.get("sys_created_on"), + "CreatedBy": note.get("sys_created_by"), + "Type": "Work Note" if note.get("element", "") == "work_notes" else "Comment", + } + for note in result["result"] + ] if not mapped_notes: - return [CommandResults(raw_response=f'No comment found on ticket {ticket_id}.')] + return [CommandResults(raw_response=f"No comment found on ticket {ticket_id}.")] - ticket = { - 'ID': ticket_id, - 'Note': mapped_notes - } + ticket = {"ID": ticket_id, "Note": mapped_notes} - human_readable = tableToMarkdown(f'ServiceNow notes for ticket {ticket_id}', t=mapped_notes, headers=headers, - headerTransform=pascalToSpace, removeNull=True) + human_readable = tableToMarkdown( + f"ServiceNow notes for ticket {ticket_id}", + t=mapped_notes, + headers=headers, + headerTransform=pascalToSpace, + removeNull=True, + ) return_results.append( CommandResults( @@ -1623,7 +1819,7 @@ def get_ticket_notes_command(client: Client, args: dict, params: dict,) -> list[ outputs_key_field="ID", outputs=createContext(ticket, removeNull=True), readable_output=human_readable, - raw_response=result + raw_response=result, ) ) return return_results @@ -1632,32 +1828,34 @@ def get_ticket_notes_command(client: Client, args: dict, params: dict,) -> list[ def get_entries_for_notes(notes: list[dict], params) -> list[dict]: entries = [] for note in notes: - if 'Mirrored from Cortex XSOAR' not in note.get('value', ''): - comments_context = {'comments_and_work_notes': note.get('value')} + if "Mirrored from Cortex XSOAR" not in note.get("value", ""): + comments_context = {"comments_and_work_notes": note.get("value")} - if (tagsstr := note.get('tags', 'none')) == 'none': - if note.get('element') == 'comments': - tags = [params.get('comment_tag_from_servicenow', 'CommentFromServiceNow')] + if (tagsstr := note.get("tags", "none")) == "none": + if note.get("element") == "comments": + tags = [params.get("comment_tag_from_servicenow", "CommentFromServiceNow")] else: - tags = [params.get('work_notes_tag_from_servicenow', 'WorkNoteFromServiceNow')] + tags = [params.get("work_notes_tag_from_servicenow", "WorkNoteFromServiceNow")] else: - if str(note.get('element')) == 'comments': - tags = tagsstr + params.get('comment_tag_from_servicenow', 'CommentFromServiceNow') + if str(note.get("element")) == "comments": + tags = tagsstr + params.get("comment_tag_from_servicenow", "CommentFromServiceNow") tags = argToList(tags) else: - tags = tagsstr + params.get('work_notes_tag_from_servicenow', 'WorkNoteFromServiceNow') + tags = tagsstr + params.get("work_notes_tag_from_servicenow", "WorkNoteFromServiceNow") tags = argToList(tags) - entries.append({ - 'Type': note.get('type', 1), - 'Category': note.get('category'), - 'Contents': f"Type: {note.get('element')}\nCreated By: {note.get('sys_created_by')}\n" - f"Created On: {note.get('sys_created_on')}\n{note.get('value')}", - 'ContentsFormat': note.get('format'), - 'Tags': tags, - 'Note': True, - 'EntryContext': comments_context - }) + entries.append( + { + "Type": note.get("type", 1), + "Category": note.get("category"), + "Contents": f"Type: {note.get('element')}\nCreated By: {note.get('sys_created_by')}\n" + f"Created On: {note.get('sys_created_on')}\n{note.get('value')}", + "ContentsFormat": note.get("format"), + "Tags": tags, + "Note": True, + "EntryContext": comments_context, + } + ) return entries @@ -1672,40 +1870,40 @@ def get_record_command(client: Client, args: dict) -> tuple[str, dict, dict, boo Returns: Demisto Outputs. """ - table_name = str(args.get('table_name', '')) - record_id = str(args.get('id', '')) - fields = str(args.get('fields', '')) + table_name = str(args.get("table_name", "")) + record_id = str(args.get("id", "")) + fields = str(args.get("fields", "")) result = client.get(table_name, record_id) - if not result or 'result' not in result: - return f'ServiceNow record with ID {record_id} was not found.', {}, {}, True + if not result or "result" not in result: + return f"ServiceNow record with ID {record_id} was not found.", {}, {}, True - if isinstance(result['result'], list): - if len(result['result']) == 0: - return f'ServiceNow record with ID {record_id} was not found.', {}, result, True - record = result['result'][0] + if isinstance(result["result"], list): + if len(result["result"]) == 0: + return f"ServiceNow record with ID {record_id} was not found.", {}, result, True + record = result["result"][0] else: - record = result['result'] + record = result["result"] if fields: list_fields = argToList(fields) - if 'sys_id' not in list_fields: + if "sys_id" not in list_fields: # ID is added by default - list_fields.append('sys_id') + list_fields.append("sys_id") # filter the record for the required fields record = dict([kv_pair for kv_pair in list(record.items()) if kv_pair[0] in list_fields]) for k, v in record.items(): if isinstance(v, dict): # For objects that refer to a record in the database, take their value(system ID). - record[k] = v.get('value', record[k]) - record['ID'] = record.pop('sys_id') - human_readable = tableToMarkdown('ServiceNow record', record, removeNull=True) - entry_context = {'ServiceNow.Record(val.ID===obj.ID)': createContext(record)} + record[k] = v.get("value", record[k]) + record["ID"] = record.pop("sys_id") + human_readable = tableToMarkdown("ServiceNow record", record, removeNull=True) + entry_context = {"ServiceNow.Record(val.ID===obj.ID)": createContext(record)} else: mapped_record = {DEFAULT_RECORD_FIELDS[k]: record[k] for k in DEFAULT_RECORD_FIELDS if k in record} - human_readable = tableToMarkdown(f'ServiceNow record {record_id}', mapped_record, removeNull=True) - entry_context = {'ServiceNow.Record(val.ID===obj.ID)': createContext(mapped_record)} + human_readable = tableToMarkdown(f"ServiceNow record {record_id}", mapped_record, removeNull=True) + entry_context = {"ServiceNow.Record(val.ID===obj.ID)": createContext(mapped_record)} return human_readable, entry_context, result, True @@ -1720,11 +1918,11 @@ def create_record_command(client: Client, args: dict) -> tuple[Any, dict[Any, An Returns: Demisto Outputs. """ - table_name = str(args.get('table_name', '')) - fields_str = str(args.get('fields', '')) - custom_fields_str = str(args.get('custom_fields', '')) - input_display_value = argToBoolean(args.get('input_display_value', 'false')) - fields_delimiter = args.get('fields_delimiter', ';') + table_name = str(args.get("table_name", "")) + fields_str = str(args.get("fields", "")) + custom_fields_str = str(args.get("custom_fields", "")) + input_display_value = argToBoolean(args.get("input_display_value", "false")) + fields_delimiter = args.get("fields_delimiter", ";") fields = {} if fields_str: @@ -1735,14 +1933,14 @@ def create_record_command(client: Client, args: dict) -> tuple[Any, dict[Any, An result = client.create(table_name, fields, custom_fields, input_display_value) - if not result or 'result' not in result: - return 'Could not create record.', {}, {}, True + if not result or "result" not in result: + return "Could not create record.", {}, {}, True - record = result.get('result', {}) + record = result.get("result", {}) mapped_record = {DEFAULT_RECORD_FIELDS[k]: record[k] for k in DEFAULT_RECORD_FIELDS if k in record} - human_readable = tableToMarkdown('ServiceNow record created successfully', mapped_record, removeNull=True) - entry_context = {'ServiceNow.Record(val.ID===obj.ID)': createContext(mapped_record)} + human_readable = tableToMarkdown("ServiceNow record created successfully", mapped_record, removeNull=True) + entry_context = {"ServiceNow.Record(val.ID===obj.ID)": createContext(mapped_record)} return human_readable, entry_context, result, True @@ -1757,12 +1955,12 @@ def update_record_command(client: Client, args: dict) -> tuple[Any, dict[Any, An Returns: Demisto Outputs. """ - table_name = str(args.get('table_name', '')) - record_id = str(args.get('id', '')) - fields_str = str(args.get('fields', '')) - custom_fields_str = str(args.get('custom_fields', '')) - input_display_value = argToBoolean(args.get('input_display_value', 'false')) - fields_delimiter = args.get('fields_delimiter', ';') + table_name = str(args.get("table_name", "")) + record_id = str(args.get("id", "")) + fields_str = str(args.get("fields", "")) + custom_fields_str = str(args.get("custom_fields", "")) + input_display_value = argToBoolean(args.get("input_display_value", "false")) + fields_delimiter = args.get("fields_delimiter", ";") fields = get_ticket_fields(args, ticket_type=table_name) if fields_str: @@ -1774,14 +1972,15 @@ def update_record_command(client: Client, args: dict) -> tuple[Any, dict[Any, An result = client.update(table_name, record_id, fields, custom_fields, input_display_value) - if not result or 'result' not in result: - return 'Could not retrieve record.', {}, {}, True + if not result or "result" not in result: + return "Could not retrieve record.", {}, {}, True - record = result.get('result', {}) + record = result.get("result", {}) mapped_record = {DEFAULT_RECORD_FIELDS[k]: record[k] for k in DEFAULT_RECORD_FIELDS if k in record} - human_readable = tableToMarkdown(f'ServiceNow record with ID {record_id} updated successfully', - t=mapped_record, removeNull=True) - entry_context = {'ServiceNow.Record(val.ID===obj.ID)': createContext(mapped_record)} + human_readable = tableToMarkdown( + f"ServiceNow record with ID {record_id} updated successfully", t=mapped_record, removeNull=True + ) + entry_context = {"ServiceNow.Record(val.ID===obj.ID)": createContext(mapped_record)} return human_readable, entry_context, result, True @@ -1796,12 +1995,12 @@ def delete_record_command(client: Client, args: dict) -> tuple[str, dict[Any, An Returns: Demisto Outputs. """ - record_id = str(args.get('id', '')) - table_name = str(args.get('table_name', '')) + record_id = str(args.get("id", "")) + table_name = str(args.get("table_name", "")) result = client.delete(table_name, record_id) - return f'ServiceNow record with ID {record_id} was successfully deleted.', {}, result, True + return f"ServiceNow record with ID {record_id} was successfully deleted.", {}, result, True def query_table_command(client: Client, args: dict) -> tuple[str, dict, dict, bool]: @@ -1814,38 +2013,36 @@ def query_table_command(client: Client, args: dict) -> tuple[str, dict, dict, bo Returns: Demisto Outputs. """ - table_name = str(args.get('table_name', '')) - sys_param_limit = args.get('limit', client.sys_param_limit) - sys_param_query = str(args.get('query', '')) - system_params = split_fields(args.get('system_params', '')) - sys_param_offset = args.get('offset', client.sys_param_offset) - fields = args.get('fields') - if fields and 'sys_id' not in fields: - fields = f'{fields},sys_id' # ID is added by default - - result = client.query(table_name, sys_param_limit, sys_param_offset, sys_param_query, system_params, - sysparm_fields=fields) - if not result or 'result' not in result or len(result['result']) == 0: - return 'No results found', {}, {}, False - table_entries = result.get('result', {}) + table_name = str(args.get("table_name", "")) + sys_param_limit = args.get("limit", client.sys_param_limit) + sys_param_query = str(args.get("query", "")) + system_params = split_fields(args.get("system_params", "")) + sys_param_offset = args.get("offset", client.sys_param_offset) + fields = args.get("fields") + if fields and "sys_id" not in fields: + fields = f"{fields},sys_id" # ID is added by default + + result = client.query(table_name, sys_param_limit, sys_param_offset, sys_param_query, system_params, sysparm_fields=fields) + if not result or "result" not in result or len(result["result"]) == 0: + return "No results found", {}, {}, False + table_entries = result.get("result", {}) if fields: fields = argToList(fields) # Filter the records according to the given fields - records = [{k.replace('.', '_'): v for k, v in r.items() if k in fields} for r in table_entries] + records = [{k.replace(".", "_"): v for k, v in r.items() if k in fields} for r in table_entries] for record in records: - record['ID'] = record.pop('sys_id') + record["ID"] = record.pop("sys_id") for k, v in record.items(): if isinstance(v, dict): # For objects that refer to a record in the database, take their value (system ID). - record[k] = v.get('value', v) - human_readable = tableToMarkdown('ServiceNow records', records, removeNull=True) - entry_context = {'ServiceNow.Record(val.ID===obj.ID)': createContext(records)} + record[k] = v.get("value", v) + human_readable = tableToMarkdown("ServiceNow records", records, removeNull=True) + entry_context = {"ServiceNow.Record(val.ID===obj.ID)": createContext(records)} else: - mapped_records = [{DEFAULT_RECORD_FIELDS[k]: r[k] for k in DEFAULT_RECORD_FIELDS if k in r} - for r in table_entries] - human_readable = tableToMarkdown('ServiceNow records', mapped_records, removeNull=True) - entry_context = {'ServiceNow.Record(val.ID===obj.ID)': createContext(mapped_records)} + mapped_records = [{DEFAULT_RECORD_FIELDS[k]: r[k] for k in DEFAULT_RECORD_FIELDS if k in r} for r in table_entries] + human_readable = tableToMarkdown("ServiceNow records", mapped_records, removeNull=True) + entry_context = {"ServiceNow.Record(val.ID===obj.ID)": createContext(mapped_records)} return human_readable, entry_context, result, False @@ -1860,64 +2057,81 @@ def query_computers_command(client: Client, args: dict) -> tuple[Any, dict[Any, Returns: Demisto Outputs. """ - table_name = 'cmdb_ci_computer' - computer_id = args.get('computer_id', None) - computer_name = args.get('computer_name', None) - asset_tag = args.get('asset_tag', None) - computer_query = args.get('query', {}) - offset = args.get('offset', client.sys_param_offset) - limit = args.get('limit', client.sys_param_limit) + table_name = "cmdb_ci_computer" + computer_id = args.get("computer_id", None) + computer_name = args.get("computer_name", None) + asset_tag = args.get("asset_tag", None) + computer_query = args.get("query", {}) + offset = args.get("offset", client.sys_param_offset) + limit = args.get("limit", client.sys_param_limit) if computer_id: result = client.get(table_name, computer_id) else: if computer_name: - computer_query = f'name={computer_name}' + computer_query = f"name={computer_name}" elif asset_tag: - computer_query = f'asset_tag={asset_tag}' + computer_query = f"asset_tag={asset_tag}" result = client.query(table_name, limit, offset, computer_query) - if not result or 'result' not in result: - return 'No computers found.', {}, {}, False + if not result or "result" not in result: + return "No computers found.", {}, {}, False - computers = result.get('result', {}) + computers = result.get("result", {}) if not isinstance(computers, list): computers = [computers] if len(computers) == 0: - return 'No computers found.', {}, {}, False + return "No computers found.", {}, {}, False computer_statuses = { - '1': 'In use', - '2': 'On order', - '3': 'On maintenance', - '6': 'In stock/In transit', - '7': 'Retired', - '100': 'Missing' + "1": "In use", + "2": "On order", + "3": "On maintenance", + "6": "In stock/In transit", + "7": "Retired", + "100": "Missing", } - mapped_computers = [{ - 'ID': computer.get('sys_id'), - 'AssetTag': computer.get('asset_tag'), - 'Name': computer.get('name'), - 'DisplayName': f"{computer.get('asset_tag', '')} - {computer.get('name', '')}", - 'SupportGroup': computer.get('support_group'), - 'OperatingSystem': computer.get('os'), - 'Company': computer.get('company', {}).get('value') - if isinstance(computer.get('company'), dict) else computer.get('company'), - 'AssignedTo': computer.get('assigned_to', {}).get('value') - if isinstance(computer.get('assigned_to'), dict) else computer.get('assigned_to'), - 'State': computer_statuses.get(computer.get('install_status', ''), computer.get('install_status')), - 'Cost': f"{computer.get('cost', '').rstrip()} {computer.get('cost_cc', '').rstrip()}", - 'Comments': computer.get('comments') - } for computer in computers] - - headers = ['ID', 'AssetTag', 'Name', 'DisplayName', 'SupportGroup', 'OperatingSystem', 'Company', 'AssignedTo', - 'State', 'Cost', 'Comments'] - human_readable = tableToMarkdown('ServiceNow Computers', t=mapped_computers, headers=headers, - removeNull=True, headerTransform=pascalToSpace) - entry_context = {'ServiceNow.Computer(val.ID===obj.ID)': createContext(mapped_computers, removeNull=True)} + mapped_computers = [ + { + "ID": computer.get("sys_id"), + "AssetTag": computer.get("asset_tag"), + "Name": computer.get("name"), + "DisplayName": f"{computer.get('asset_tag', '')} - {computer.get('name', '')}", + "SupportGroup": computer.get("support_group"), + "OperatingSystem": computer.get("os"), + "Company": computer.get("company", {}).get("value") + if isinstance(computer.get("company"), dict) + else computer.get("company"), + "AssignedTo": computer.get("assigned_to", {}).get("value") + if isinstance(computer.get("assigned_to"), dict) + else computer.get("assigned_to"), + "State": computer_statuses.get(computer.get("install_status", ""), computer.get("install_status")), + "Cost": f"{computer.get('cost', '').rstrip()} {computer.get('cost_cc', '').rstrip()}", + "Comments": computer.get("comments"), + } + for computer in computers + ] + + headers = [ + "ID", + "AssetTag", + "Name", + "DisplayName", + "SupportGroup", + "OperatingSystem", + "Company", + "AssignedTo", + "State", + "Cost", + "Comments", + ] + human_readable = tableToMarkdown( + "ServiceNow Computers", t=mapped_computers, headers=headers, removeNull=True, headerTransform=pascalToSpace + ) + entry_context = {"ServiceNow.Computer(val.ID===obj.ID)": createContext(mapped_computers, removeNull=True)} return human_readable, entry_context, result, False @@ -1932,45 +2146,48 @@ def query_groups_command(client: Client, args: dict) -> tuple[Any, dict[Any, Any Returns: Demisto Outputs. """ - table_name = 'sys_user_group' - group_id = args.get('group_id') - group_name = args.get('group_name') - group_query = args.get('query', {}) - offset = args.get('offset', client.sys_param_offset) - limit = args.get('limit', client.sys_param_limit) + table_name = "sys_user_group" + group_id = args.get("group_id") + group_name = args.get("group_name") + group_query = args.get("query", {}) + offset = args.get("offset", client.sys_param_offset) + limit = args.get("limit", client.sys_param_limit) if group_id: result = client.get(table_name, group_id) else: if group_name: - group_query = f'name={group_name}' + group_query = f"name={group_name}" result = client.query(table_name, limit, offset, group_query) - if not result or 'result' not in result: - return 'No groups found.', {}, {}, False + if not result or "result" not in result: + return "No groups found.", {}, {}, False - groups = result.get('result', {}) + groups = result.get("result", {}) if not isinstance(groups, list): groups = [groups] if len(groups) == 0: - return 'No groups found.', {}, {}, False - - headers = ['ID', 'Description', 'Name', 'Active', 'Manager', 'Updated'] - - mapped_groups = [{ - 'ID': group.get('sys_id'), - 'Description': group.get('description'), - 'Name': group.get('name'), - 'Active': group.get('active'), - 'Manager': group.get('manager', {}).get('value') - if isinstance(group.get('manager'), dict) else group.get('manager'), - 'Updated': group.get('sys_updated_on'), - } for group in groups] + return "No groups found.", {}, {}, False + + headers = ["ID", "Description", "Name", "Active", "Manager", "Updated"] + + mapped_groups = [ + { + "ID": group.get("sys_id"), + "Description": group.get("description"), + "Name": group.get("name"), + "Active": group.get("active"), + "Manager": group.get("manager", {}).get("value") if isinstance(group.get("manager"), dict) else group.get("manager"), + "Updated": group.get("sys_updated_on"), + } + for group in groups + ] - human_readable = tableToMarkdown('ServiceNow Groups', t=mapped_groups, headers=headers, - removeNull=True, headerTransform=pascalToSpace) - entry_context = {'ServiceNow.Group(val.ID===obj.ID)': createContext(mapped_groups, removeNull=True)} + human_readable = tableToMarkdown( + "ServiceNow Groups", t=mapped_groups, headers=headers, removeNull=True, headerTransform=pascalToSpace + ) + entry_context = {"ServiceNow.Group(val.ID===obj.ID)": createContext(mapped_groups, removeNull=True)} return human_readable, entry_context, result, False @@ -1985,43 +2202,47 @@ def query_users_command(client: Client, args: dict) -> tuple[Any, dict[Any, Any] Returns: Demisto Outputs. """ - table_name = 'sys_user' - user_id = args.get('user_id') - user_name = args.get('user_name') - user_query = args.get('query', {}) - offset = args.get('offset', client.sys_param_offset) - limit = args.get('limit', client.sys_param_limit) + table_name = "sys_user" + user_id = args.get("user_id") + user_name = args.get("user_name") + user_query = args.get("query", {}) + offset = args.get("offset", client.sys_param_offset) + limit = args.get("limit", client.sys_param_limit) if user_id: result = client.get(table_name, user_id) else: if user_name: - user_query = f'user_name={user_name}' + user_query = f"user_name={user_name}" result = client.query(table_name, limit, offset, user_query) - if not result or 'result' not in result: - return 'No users found.', {}, {}, False + if not result or "result" not in result: + return "No users found.", {}, {}, False - users = result.get('result', {}) + users = result.get("result", {}) if not isinstance(users, list): users = [users] if len(users) == 0: - return 'No users found.', {}, {}, False - - mapped_users = [{ - 'ID': user.get('sys_id'), - 'Name': f"{user.get('first_name', '').rstrip()} {user.get('last_name', '').rstrip()}", - 'UserName': user.get('user_name'), - 'Email': user.get('email'), - 'Created': user.get('sys_created_on'), - 'Updated': user.get('sys_updated_on'), - } for user in users] - - headers = ['ID', 'Name', 'UserName', 'Email', 'Created', 'Updated'] - human_readable = tableToMarkdown('ServiceNow Users', t=mapped_users, headers=headers, removeNull=True, - headerTransform=pascalToSpace) - entry_context = {'ServiceNow.User(val.ID===obj.ID)': createContext(mapped_users, removeNull=True)} + return "No users found.", {}, {}, False + + mapped_users = [ + { + "ID": user.get("sys_id"), + "Name": f"{user.get('first_name', '').rstrip()} {user.get('last_name', '').rstrip()}", + "UserName": user.get("user_name"), + "Email": user.get("email"), + "Created": user.get("sys_created_on"), + "Updated": user.get("sys_updated_on"), + } + for user in users + ] + + headers = ["ID", "Name", "UserName", "Email", "Created", "Updated"] + human_readable = tableToMarkdown( + "ServiceNow Users", t=mapped_users, headers=headers, removeNull=True, headerTransform=pascalToSpace + ) + entry_context = {"ServiceNow.User(val.ID===obj.ID)": createContext(mapped_users, removeNull=True)} return human_readable, entry_context, result, False @@ -2036,20 +2257,20 @@ def list_table_fields_command(client: Client, args: dict) -> tuple[Any, dict[Any Returns: Demisto Outputs. """ - table_name = str(args.get('table_name', '')) + table_name = str(args.get("table_name", "")) result = client.get_table_fields(table_name) - if not result or 'result' not in result: - return 'Table was not found.', {}, {}, False + if not result or "result" not in result: + return "Table was not found.", {}, {}, False - if len(result['result']) == 0: - return 'Table contains no records.', {}, {}, False + if len(result["result"]) == 0: + return "Table contains no records.", {}, {}, False - fields = [{'Name': k} for k, v in result['result'][0].items()] + fields = [{"Name": k} for k, v in result["result"][0].items()] - human_readable = tableToMarkdown(f'ServiceNow Table fields - {table_name}', fields) - entry_context = {'ServiceNow.Field': createContext(fields)} + human_readable = tableToMarkdown(f"ServiceNow Table fields - {table_name}", fields) + entry_context = {"ServiceNow.Field": createContext(fields)} return human_readable, entry_context, result, False @@ -2064,30 +2285,29 @@ def get_table_name_command(client: Client, args: dict) -> tuple[Any, dict[Any, A Returns: Demisto Outputs. """ - label = args.get('label') - offset = args.get('offset', client.sys_param_offset) - limit = args.get('limit', client.sys_param_limit) - table_query = f'label={label}' + label = args.get("label") + offset = args.get("offset", client.sys_param_offset) + limit = args.get("limit", client.sys_param_limit) + table_query = f"label={label}" - result = client.query('sys_db_object', limit, offset, table_query) + result = client.query("sys_db_object", limit, offset, table_query) - if not result or 'result' not in result: - return 'Table was not found.', {}, {}, False - tables = result.get('result', {}) + if not result or "result" not in result: + return "Table was not found.", {}, {}, False + tables = result.get("result", {}) if len(tables) == 0: - return 'Table was not found.', {}, {}, False + return "Table was not found.", {}, {}, False - headers = ['ID', 'Name', 'SystemName'] + headers = ["ID", "Name", "SystemName"] - mapped_tables = [{ - 'ID': table.get('sys_id'), - 'Name': table.get('name'), - 'SystemName': table.get('sys_name') - } for table in tables] + mapped_tables = [ + {"ID": table.get("sys_id"), "Name": table.get("name"), "SystemName": table.get("sys_name")} for table in tables + ] - human_readable = tableToMarkdown(f'ServiceNow Tables for label - {label}', t=mapped_tables, - headers=headers, headerTransform=pascalToSpace) - entry_context = {'ServiceNow.Table(val.ID===obj.ID)': createContext(mapped_tables)} + human_readable = tableToMarkdown( + f"ServiceNow Tables for label - {label}", t=mapped_tables, headers=headers, headerTransform=pascalToSpace + ) + entry_context = {"ServiceNow.Table(val.ID===obj.ID)": createContext(mapped_tables)} return human_readable, entry_context, result, False @@ -2102,31 +2322,32 @@ def query_items_command(client: Client, args: dict) -> tuple[Any, dict[Any, Any] Returns: Demisto Outputs. """ - table_name = 'sc_cat_item' - limit = args.get('limit', client.sys_param_limit) - offset = args.get('offset', client.sys_param_offset) - name = str(args.get('name', '')) - items_query = f'nameLIKE{name}' if name else '' + table_name = "sc_cat_item" + limit = args.get("limit", client.sys_param_limit) + offset = args.get("offset", client.sys_param_offset) + name = str(args.get("name", "")) + items_query = f"nameLIKE{name}" if name else "" result = client.query(table_name, limit, offset, items_query) - if not result or 'result' not in result: - return 'No items were found.', {}, {}, True - items = result.get('result', {}) + if not result or "result" not in result: + return "No items were found.", {}, {}, True + items = result.get("result", {}) if not isinstance(items, list): items_list = [items] else: items_list = items if len(items_list) == 0: - return 'No items were found.', {}, {}, True + return "No items were found.", {}, {}, True mapped_items = [] for item in items_list: mapped_items.append(get_item_human_readable(item)) - headers = ['ID', 'Name', 'Price', 'Description'] - human_readable = tableToMarkdown('ServiceNow Catalog Items', mapped_items, headers=headers, - removeNull=True, headerTransform=pascalToSpace) - entry_context = {'ServiceNow.CatalogItem(val.ID===obj.ID)': createContext(mapped_items, removeNull=True)} + headers = ["ID", "Name", "Price", "Description"] + human_readable = tableToMarkdown( + "ServiceNow Catalog Items", mapped_items, headers=headers, removeNull=True, headerTransform=pascalToSpace + ) + entry_context = {"ServiceNow.CatalogItem(val.ID===obj.ID)": createContext(mapped_items, removeNull=True)} return human_readable, entry_context, result, True @@ -2141,20 +2362,29 @@ def get_item_details_command(client: Client, args: dict) -> tuple[Any, dict[Any, Returns: Demisto Outputs. """ - id_ = str(args.get('id', '')) + id_ = str(args.get("id", "")) result = client.get_item_details(id_) - if not result or 'result' not in result: - return 'Item was not found.', {}, {}, True - item = result.get('result', {}) + if not result or "result" not in result: + return "Item was not found.", {}, {}, True + item = result.get("result", {}) mapped_item = get_item_human_readable(item) - human_readable = tableToMarkdown('ServiceNow Catalog Item', t=mapped_item, headers=['ID', 'Name', 'Description'], - removeNull=True, headerTransform=pascalToSpace) - if mapped_item.get('Variables'): - human_readable += tableToMarkdown('Item Variables', t=mapped_item.get('Variables'), - headers=['Question', 'Type', 'Name', 'Mandatory'], - removeNull=True, headerTransform=pascalToSpace) - entry_context = {'ServiceNow.CatalogItem(val.ID===obj.ID)': createContext(mapped_item, removeNull=True)} + human_readable = tableToMarkdown( + "ServiceNow Catalog Item", + t=mapped_item, + headers=["ID", "Name", "Description"], + removeNull=True, + headerTransform=pascalToSpace, + ) + if mapped_item.get("Variables"): + human_readable += tableToMarkdown( + "Item Variables", + t=mapped_item.get("Variables"), + headers=["Question", "Type", "Name", "Mandatory"], + removeNull=True, + headerTransform=pascalToSpace, + ) + entry_context = {"ServiceNow.CatalogItem(val.ID===obj.ID)": createContext(mapped_item, removeNull=True)} return human_readable, entry_context, result, True @@ -2168,22 +2398,18 @@ def create_order_item_command(client: Client, args: dict) -> tuple[Any, dict[Any Returns: Demisto Outputs. """ - id_ = str(args.get('id', '')) - quantity = str(args.get('quantity', '1')) - variables = split_fields(str(args.get('variables', ''))) + id_ = str(args.get("id", "")) + quantity = str(args.get("quantity", "1")) + variables = split_fields(str(args.get("variables", ""))) result = client.create_item_order(id_, quantity, variables) - if not result or 'result' not in result: - return 'Order item was not created.', {}, {}, True - order_item = result.get('result', {}) + if not result or "result" not in result: + return "Order item was not created.", {}, {}, True + order_item = result.get("result", {}) - mapped_item = { - 'ID': order_item.get('sys_id'), - 'RequestNumber': order_item.get('request_number') - } - human_readable = tableToMarkdown('ServiceNow Order Request', mapped_item, - removeNull=True, headerTransform=pascalToSpace) - entry_context = {'ServiceNow.OrderRequest(val.ID===obj.ID)': createContext(mapped_item, removeNull=True)} + mapped_item = {"ID": order_item.get("sys_id"), "RequestNumber": order_item.get("request_number")} + human_readable = tableToMarkdown("ServiceNow Order Request", mapped_item, removeNull=True, headerTransform=pascalToSpace) + entry_context = {"ServiceNow.OrderRequest(val.ID===obj.ID)": createContext(mapped_item, removeNull=True)} return human_readable, entry_context, result, True @@ -2198,27 +2424,28 @@ def document_route_to_table(client: Client, args: dict) -> tuple[Any, dict[Any, Returns: Demisto Outputs. """ - queue_id = str(args.get('queue_id', '')) - document_table = str(args.get('document_table', '')) - document_id = str(args.get('document_id', '')) + queue_id = str(args.get("queue_id", "")) + document_table = str(args.get("document_table", "")) + document_id = str(args.get("document_id", "")) result = client.document_route_to_table_request(queue_id, document_table, document_id) - if not result or 'result' not in result: - return 'Route to table was not found.', {}, {}, True + if not result or "result" not in result: + return "Route to table was not found.", {}, {}, True - route = result.get('result', {}) + route = result.get("result", {}) context = { - 'DisplayName': route.get('display_name'), - 'DocumentID': route.get('document_id'), - 'DocumentTable': route.get('document_table'), - 'QueueID': route.get('queue'), - 'WorkItemID': route.get('sys_id') + "DisplayName": route.get("display_name"), + "DocumentID": route.get("document_id"), + "DocumentTable": route.get("document_table"), + "QueueID": route.get("queue"), + "WorkItemID": route.get("sys_id"), } - headers = ['DisplayName', 'DocumentID', 'DocumentTable', 'QueueID', 'WorkItemID'] - human_readable = tableToMarkdown('ServiceNow Queue', t=context, headers=headers, removeNull=True, - headerTransform=pascalToSpace) - entry_context = {'ServiceNow.WorkItem(val.WorkItemID===obj.WorkItemID)': createContext(context, removeNull=True)} + headers = ["DisplayName", "DocumentID", "DocumentTable", "QueueID", "WorkItemID"] + human_readable = tableToMarkdown( + "ServiceNow Queue", t=context, headers=headers, removeNull=True, headerTransform=pascalToSpace + ) + entry_context = {"ServiceNow.WorkItem(val.WorkItemID===obj.WorkItemID)": createContext(context, removeNull=True)} return human_readable, entry_context, result, True @@ -2229,15 +2456,12 @@ def get_ticket_file_attachments(client: Client, ticket: dict) -> list: """ file_names = [] if client.get_attachments: - file_entries = client.get_ticket_attachment_entries(ticket.get('sys_id', '')) + file_entries = client.get_ticket_attachment_entries(ticket.get("sys_id", "")) if isinstance(file_entries, list): for file_result in file_entries: - if file_result['Type'] == entryTypes['error']: - raise Exception(f"Error getting attachment: {str(file_result.get('Contents', ''))}") - file_names.append({ - 'path': file_result.get('FileID', ''), - 'name': file_result.get('File', '') - }) + if file_result["Type"] == entryTypes["error"]: + raise Exception(f"Error getting attachment: {file_result.get('Contents', '')!s}") + file_names.append({"path": file_result.get("FileID", ""), "name": file_result.get("File", "")}) return file_names @@ -2248,16 +2472,16 @@ def get_mirroring(): params = demisto.params() return { - 'mirror_direction': MIRROR_DIRECTION.get(params.get('mirror_direction')), - 'mirror_tags': [ - params.get('comment_tag'), # comment tag to service now - params.get('comment_tag_from_servicenow'), - params.get('file_tag'), # file tag to service now - params.get('file_tag_from_service_now'), - params.get('work_notes_tag'), # work not tag to service now - params.get('work_notes_tag_from_servicenow') + "mirror_direction": MIRROR_DIRECTION.get(params.get("mirror_direction")), + "mirror_tags": [ + params.get("comment_tag"), # comment tag to service now + params.get("comment_tag_from_servicenow"), + params.get("file_tag"), # file tag to service now + params.get("file_tag_from_service_now"), + params.get("work_notes_tag"), # work not tag to service now + params.get("work_notes_tag_from_servicenow"), ], - 'mirror_instance': demisto.integrationInstance() + "mirror_instance": demisto.integrationInstance(), } @@ -2322,35 +2546,35 @@ def fetch_incidents(client: Client) -> list: ) snow_time_as_date = datetime.strptime(start_snow_time, DATE_FORMAT) - fetch_limit = last_run.get('limit') or client.sys_param_limit + fetch_limit = last_run.get("limit") or client.sys_param_limit - query = '' + query = "" if client.sys_param_query: - query += f'{client.sys_param_query}^' + query += f"{client.sys_param_query}^" # get the tickets which occurred after the 'start_snow_time' - query += f'ORDERBY{client.timestamp_field}^{client.timestamp_field}>{start_snow_time}' + query += f"ORDERBY{client.timestamp_field}^{client.timestamp_field}>{start_snow_time}" if query: - query_params['sysparm_query'] = query - query_params['sysparm_limit'] = fetch_limit # type: ignore[assignment] + query_params["sysparm_query"] = query + query_params["sysparm_limit"] = fetch_limit # type: ignore[assignment] if client.use_display_value: - query_params['sysparm_display_value'] = "all" + query_params["sysparm_display_value"] = "all" demisto.debug(f"ServiceNowV2 - Last run: {json.dumps(last_run)}") - demisto.debug(f"ServiceNowV2 - Query sent to the server: {str(query_params)}") - tickets_response = client.send_request(f'table/{client.ticket_type}', 'GET', params=query_params).get('result', []) + demisto.debug(f"ServiceNowV2 - Query sent to the server: {query_params!s}") + tickets_response = client.send_request(f"table/{client.ticket_type}", "GET", params=query_params).get("result", []) count = 0 skipped_incidents = 0 # Map SNOW severity to Demisto severity for incident creation - severity_map = {'1': 3, '1 - High': 3, '2': 2, '2 - Medium': 2, '3': 1, '3 - Low': 1} + severity_map = {"1": 3, "1 - High": 3, "2": 2, "2 - Medium": 2, "3": 1, "3 - Low": 1} if client.use_display_value: tickets_response = format_incidents_response_with_display_values(incidents_res=tickets_response) # remove duplicate incidents which were already fetched tickets_response = filter_incidents_by_duplicates_and_limit( - incidents_res=tickets_response, last_run=last_run, fetch_limit=client.sys_param_limit, id_field='sys_id' + incidents_res=tickets_response, last_run=last_run, fetch_limit=client.sys_param_limit, id_field="sys_id" ) for ticket in tickets_response: @@ -2367,25 +2591,28 @@ def fetch_incidents(client: Client) -> list: skipped_incidents += 1 demisto.debug( f"ServiceNowV2 - -Skipping incident with sys_id={ticket.get('sys_id')} and date=" - f"{ticket.get(client.timestamp_field)} because its creation time is smaller than the last fetch.") + f"{ticket.get(client.timestamp_field)} because its creation time is smaller than the last fetch." + ) continue parse_dict_ticket_fields(client, ticket) except Exception as e: demisto.debug(f"Got the following error: {e}") - incidents.append({ - 'name': f"ServiceNow Incident {ticket.get(client.incident_name)}", - 'labels': [ - {'type': _type, 'value': value if isinstance(value, str) else json.dumps(value)} - for _type, value in ticket.items() - ], - 'details': json.dumps(ticket), - 'severity': severity_map.get(ticket.get('severity', ''), 0), - 'attachment': get_ticket_file_attachments(client=client, ticket=ticket), - 'occurred': ticket.get(client.timestamp_field), - 'sys_id': ticket.get('sys_id'), - 'rawJSON': json.dumps(ticket) - }) + incidents.append( + { + "name": f"ServiceNow Incident {ticket.get(client.incident_name)}", + "labels": [ + {"type": _type, "value": value if isinstance(value, str) else json.dumps(value)} + for _type, value in ticket.items() + ], + "details": json.dumps(ticket), + "severity": severity_map.get(ticket.get("severity", ""), 0), + "attachment": get_ticket_file_attachments(client=client, ticket=ticket), + "occurred": ticket.get(client.timestamp_field), + "sys_id": ticket.get("sys_id"), + "rawJSON": json.dumps(ticket), + } + ) count += 1 last_run = update_last_run_object( @@ -2395,9 +2622,9 @@ def fetch_incidents(client: Client) -> list: start_fetch_time=start_snow_time, end_fetch_time=end_snow_time, look_back=client.look_back, - created_time_field='occurred', - id_field='sys_id', - date_format=DATE_FORMAT + created_time_field="occurred", + id_field="sys_id", + date_format=DATE_FORMAT, ) demisto.debug(f"ServiceNowV2 - Last run after incidents fetching: {json.dumps(last_run)}") @@ -2407,8 +2634,8 @@ def fetch_incidents(client: Client) -> list: for ticket in incidents: # the occurred time requires to be in ISO format. - occurred = datetime.strptime(ticket.get('occurred'), DATE_FORMAT).isoformat() # type: ignore[arg-type] - ticket['occurred'] = f"{occurred}Z" + occurred = datetime.strptime(ticket.get("occurred"), DATE_FORMAT).isoformat() # type: ignore[arg-type] + ticket["occurred"] = f"{occurred}Z" if demisto.params().get("mirror_notes_for_new_incidents", False): store_ids_for_first_mirroring(incidents) @@ -2438,12 +2665,12 @@ def test_instance(client: Client): """ # Validate fetch_time parameter is valid (if not, parse_date_range will raise the error message) parse_date_range(client.fetch_time, DATE_FORMAT) - params = {'sysparm_limit': 1, 'sysparm_query': 'active=true'} - result = client.send_request(f'table/{client.ticket_type}', params=params, method='GET') - if 'result' not in result: - raise Exception('ServiceNow error: ' + str(result)) - ticket = result.get('result') - if ticket and demisto.params().get('isFetch'): + params = {"sysparm_limit": 1, "sysparm_query": "active=true"} + result = client.send_request(f"table/{client.ticket_type}", params=params, method="GET") + if "result" not in result: + raise Exception("ServiceNow error: " + str(result)) + ticket = result.get("result") + if ticket and demisto.params().get("isFetch"): if isinstance(ticket, list): ticket = ticket[0] if client.timestamp_field not in ticket: @@ -2458,14 +2685,16 @@ def test_module(client: Client, *_) -> tuple[str, dict[Any, Any], dict[Any, Any] """ # Notify the user that test button can't be used when using OAuth 2.0: if client.use_oauth: - raise Exception('Test button cannot be used when using OAuth 2.0. Please use the !servicenow-oauth-login ' - 'command followed by the !servicenow-oauth-test command to test the instance.') + raise Exception( + "Test button cannot be used when using OAuth 2.0. Please use the !servicenow-oauth-login " + "command followed by the !servicenow-oauth-test command to test the instance." + ) - if client._version == 'v2' and client.get_attachments: - raise DemistoException('Retrieving incident attachments is not supported when using the V2 API.') + if client._version == "v2" and client.get_attachments: + raise DemistoException("Retrieving incident attachments is not supported when using the V2 API.") test_instance(client) - return 'ok', {}, {}, True + return "ok", {}, {}, True def oauth_test_module(client: Client, *_) -> tuple[str, dict[Any, Any], dict[Any, Any], bool]: @@ -2473,12 +2702,14 @@ def oauth_test_module(client: Client, *_) -> tuple[str, dict[Any, Any], dict[Any Test the instance configurations when using OAuth authentication. """ if not client.use_oauth: - raise Exception('!servicenow-oauth-test command should be used only when using OAuth 2.0 authorization.\n ' - 'Please select the `Use OAuth Login` checkbox in the instance configuration before running ' - 'this command.') + raise Exception( + "!servicenow-oauth-test command should be used only when using OAuth 2.0 authorization.\n " + "Please select the `Use OAuth Login` checkbox in the instance configuration before running " + "this command." + ) test_instance(client) - hr = '### Instance Configured Successfully.\n' + hr = "### Instance Configured Successfully.\n" return hr, {}, {}, True @@ -2494,59 +2725,66 @@ def login_command(client: Client, args: dict[str, Any]) -> tuple[str, dict[Any, """ # Verify that the user checked the `Use OAuth` checkbox: if not client.use_oauth: - raise Exception('!servicenow-oauth-login command can be used only when using OAuth 2.0 authorization.\n Please ' - 'select the `Use OAuth Login` checkbox in the instance configuration before running this ' - 'command.') + raise Exception( + "!servicenow-oauth-login command can be used only when using OAuth 2.0 authorization.\n Please " + "select the `Use OAuth Login` checkbox in the instance configuration before running this " + "command." + ) - username = args.get('username', '') - password = args.get('password', '') + username = args.get("username", "") + password = args.get("password", "") try: client.snow_client.login(username, password) - hr = '### Logged in successfully.\n A refresh token was saved to the integration context. This token will be ' \ - 'used to generate a new access token once the current one expires.' + hr = ( + "### Logged in successfully.\n A refresh token was saved to the integration context. This token will be " + "used to generate a new access token once the current one expires." + ) except Exception as e: return_error( - f'Failed to login. Please verify that the provided username and password are correct, and that you ' - f'entered the correct client id and client secret in the instance configuration (see ? for' - f'correct usage when using OAuth).\n\n{e}') + f"Failed to login. Please verify that the provided username and password are correct, and that you " + f"entered the correct client id and client secret in the instance configuration (see ? for" + f"correct usage when using OAuth).\n\n{e}" + ) return hr, {}, {}, True def check_assigned_to_field(client: Client, assigned_to: dict) -> Optional[str]: if assigned_to: - user_result = client.get('sys_user', assigned_to.get('value'), # type: ignore[arg-type] - no_record_found_res={'result': {}}) - user = user_result.get('result', {}) + user_result = client.get( + "sys_user", + assigned_to.get("value"), # type: ignore[arg-type] + no_record_found_res={"result": {}}, + ) + user = user_result.get("result", {}) if user: - user_email = user.get('email') + user_email = user.get("email") return user_email else: demisto.debug(f'Could not assign user {assigned_to.get("value")} since it does not exist in ServiceNow') - return '' + return "" def parse_dict_ticket_fields(client: Client, ticket: dict) -> dict: - # Parse user dict to email - assigned_to = ticket.get('assigned_to', {}) - caller = ticket.get('caller_id', {}) - assignment_group = ticket.get('assignment_group', {}) + assigned_to = ticket.get("assigned_to", {}) + caller = ticket.get("caller_id", {}) + assignment_group = ticket.get("assignment_group", {}) if assignment_group: - group_result = client.get('sys_user_group', assignment_group.get('value'), no_record_found_res={'result': {}}) - group = group_result.get('result', {}) - group_name = group.get('name') - ticket['assignment_group'] = group_name + group_result = client.get("sys_user_group", assignment_group.get("value"), no_record_found_res={"result": {}}) + group = group_result.get("result", {}) + group_name = group.get("name") + ticket["assignment_group"] = group_name if ticket: user_assigned = check_assigned_to_field(client, assigned_to) - ticket['assigned_to'] = user_assigned + ticket["assigned_to"] = user_assigned if caller: - user_result = client.get('sys_user', caller.get('value'), no_record_found_res={'result': {}}) - user = user_result.get('result', {}) - user_email = user.get('email') - ticket['caller_id'] = user_email + user_result = client.get("sys_user", caller.get("value"), no_record_found_res={"result": {}}) + user = user_result.get("result", {}) + user_email = user.get("email") + ticket["caller_id"] = user_email return ticket @@ -2564,17 +2802,17 @@ def get_timezone_offset(ticket: dict, display_date_format: str): datetime.timedelta: The timezone offset between the SNOW instance and UTC. """ try: - local_time: str = ticket.get('sys_created_on', {}).get('display_value', '') + local_time: str = ticket.get("sys_created_on", {}).get("display_value", "") # With %H hour format, AM/PM is redundant info. - local_time = (local_time.replace('AM', '').replace('PM', '')).strip() + local_time = (local_time.replace("AM", "").replace("PM", "")).strip() local_time_dt = datetime.strptime(local_time, display_date_format) except Exception as e: - raise Exception(f'Failed to get the display value offset time. ERROR: {e}') + raise Exception(f"Failed to get the display value offset time. ERROR: {e}") try: - utc_time = ticket.get('sys_created_on', {}).get('value', '') + utc_time = ticket.get("sys_created_on", {}).get("value", "") utc_time = datetime.strptime(utc_time, DATE_FORMAT) except ValueError as e: - raise Exception(f'Failed to convert {utc_time} to datetime object. ERROR: {e}') + raise Exception(f"Failed to convert {utc_time} to datetime object. ERROR: {e}") offset = utc_time - local_time_dt return offset @@ -2592,30 +2830,26 @@ def get_remote_data_command(client: Client, args: dict[str, Any], params: dict) List[Dict[str, Any]]: first entry is the incident (which can be completely empty) and the new entries. """ - ticket_id = args.get('id', '') - demisto.debug(f'Getting update for remote {ticket_id}') - last_update = arg_to_timestamp( - arg=args.get('lastUpdate'), - arg_name='lastUpdate', - required=True - ) - demisto.debug(f'last_update is {last_update}') + ticket_id = args.get("id", "") + demisto.debug(f"Getting update for remote {ticket_id}") + last_update = arg_to_timestamp(arg=args.get("lastUpdate"), arg_name="lastUpdate", required=True) + demisto.debug(f"last_update is {last_update}") ticket_type = client.ticket_type result = client.get(ticket_type, ticket_id, use_display_value=client.use_display_value) is_new_ticket_id = is_new_incident(ticket_id) - if not result or 'result' not in result: - return f'Ticket {ticket_id=} was not found.' + if not result or "result" not in result: + return f"Ticket {ticket_id=} was not found." - if isinstance(result['result'], list): - if len(result['result']) == 0: - return 'Ticket was not found.' + if isinstance(result["result"], list): + if len(result["result"]) == 0: + return "Ticket was not found." - ticket = result['result'][0] + ticket = result["result"][0] else: - ticket = result['result'] + ticket = result["result"] if client.use_display_value and client.display_date_format: timezone_offset = get_timezone_offset(ticket, client.display_date_format) @@ -2624,22 +2858,18 @@ def get_remote_data_command(client: Client, args: dict[str, Any], params: dict) timezone_offset = None demisto.debug(f"not ({client.use_display_value=} and {client.display_date_format=}) setting {timezone_offset=}") - ticket_last_update = arg_to_timestamp( - arg=ticket.get('sys_updated_on'), - arg_name='sys_updated_on', - required=False - ) + ticket_last_update = arg_to_timestamp(arg=ticket.get("sys_updated_on"), arg_name="sys_updated_on", required=False) - demisto.debug(f'ticket_last_update of {ticket_id=} is {ticket_last_update}') - is_fetch = demisto.params().get('isFetch') + demisto.debug(f"ticket_last_update of {ticket_id=} is {ticket_last_update}") + is_fetch = demisto.params().get("isFetch") if is_fetch and last_update > ticket_last_update and not is_new_ticket_id: - demisto.debug(f'Nothing new in the ticket {ticket_id=}') + demisto.debug(f"Nothing new in the ticket {ticket_id=}") ticket = {} else: # in case we use SNOW just to mirror by setting the incident with mirror fields # is_fetch will be false, so we will update even the XSOAR incident will be updated then SNOW ticket. - demisto.debug(f'ticket is updated: {ticket}') + demisto.debug(f"ticket is updated: {ticket}") parse_dict_ticket_fields(client, ticket) @@ -2648,65 +2878,69 @@ def get_remote_data_command(client: Client, args: dict[str, Any], params: dict) file_entries = client.get_ticket_attachment_entries(ticket_id, datetime.fromtimestamp(last_update)) # type: ignore if file_entries: for file in file_entries: - if '_mirrored_from_xsoar' not in file.get('File'): - file['Tags'] = [params.get('file_tag_from_service_now')] + if "_mirrored_from_xsoar" not in file.get("File"): + file["Tags"] = [params.get("file_tag_from_service_now")] entries.append(file) if client.use_display_value: try: - time_info = {'display_date_format': client.display_date_format, 'timezone_offset': timezone_offset} + time_info = {"display_date_format": client.display_date_format, "timezone_offset": timezone_offset} if not is_new_ticket_id: - time_info.update({'filter': datetime.fromtimestamp(last_update)}) + time_info.update({"filter": datetime.fromtimestamp(last_update)}) comments_result = convert_to_notes_result(ticket, time_info) except Exception as e: - demisto.debug(f'Failed to retrieve notes using display value. Continuing without retrieving notes.\n Error: {e}') - comments_result = {'result': []} + demisto.debug(f"Failed to retrieve notes using display value. Continuing without retrieving notes.\n Error: {e}") + comments_result = {"result": []} else: - sys_param_limit = args.get('limit', client.sys_param_limit) - sys_param_offset = args.get('offset', client.sys_param_offset) + sys_param_limit = args.get("limit", client.sys_param_limit) + sys_param_offset = args.get("offset", client.sys_param_offset) - sys_param_query = f'element_id={ticket_id}^element=comments^ORelement=work_notes' + sys_param_query = f"element_id={ticket_id}^element=comments^ORelement=work_notes" if not is_new_ticket_id: # for latest fetch run incidents do not filter by last_update - sys_param_query += f'^sys_created_on>{datetime.fromtimestamp(last_update)}' + sys_param_query += f"^sys_created_on>{datetime.fromtimestamp(last_update)}" - comments_result = client.query('sys_journal_field', sys_param_limit, sys_param_offset, sys_param_query) - demisto.debug(f'Comments result is {comments_result}') + comments_result = client.query("sys_journal_field", sys_param_limit, sys_param_offset, sys_param_query) + demisto.debug(f"Comments result is {comments_result}") - if not comments_result or 'result' not in comments_result: - demisto.debug(f'ServiceNowV2 - Pull result is {ticket}') + if not comments_result or "result" not in comments_result: + demisto.debug(f"ServiceNowV2 - Pull result is {ticket}") return [ticket] + entries - entries.extend(get_entries_for_notes(comments_result.get('result', []), params)) + entries.extend(get_entries_for_notes(comments_result.get("result", []), params)) # Handle closing ticket/incident in XSOAR - close_incident = params.get('close_incident') - if close_incident != 'None': - server_close_custom_state = params.get('server_close_custom_state', '') - server_custom_close_code = params.get('server_custom_close_code', '') - ticket_state = ticket.get('state', '') - ticket_close_code = ticket.get('close_code', '') + close_incident = params.get("close_incident") + if close_incident != "None": + server_close_custom_state = params.get("server_close_custom_state", "") + server_custom_close_code = params.get("server_custom_close_code", "") + ticket_state = ticket.get("state", "") + ticket_close_code = ticket.get("close_code", "") # The first condition is for closing the incident if the ticket's state is in the # `Mirrored XSOAR Ticket custom close state code` parameter, which is configured by the user in the # integration configuration. - if (ticket_state and ticket_state in server_close_custom_state) \ - or (ticket_close_code and ticket_close_code in server_custom_close_code) \ - or (ticket.get('closed_at') and close_incident == 'closed') \ - or (ticket.get('resolved_at') and close_incident == 'resolved'): # noqa: E127 - demisto.debug(f'SNOW ticket changed state - should be closed in XSOAR: {ticket}') - entries.append({ - 'Type': EntryType.NOTE, - 'Contents': { - 'dbotIncidentClose': True, - 'closeNotes': ticket.get("close_notes"), - 'closeReason': converts_close_code_or_state_to_close_reason(ticket_state, ticket_close_code, - server_close_custom_state, - server_custom_close_code) - }, - 'ContentsFormat': EntryFormat.JSON - }) - - demisto.debug(f'ServiceNowV2 - Pull result is {ticket=}, {entries=}') + if ( + (ticket_state and ticket_state in server_close_custom_state) + or (ticket_close_code and ticket_close_code in server_custom_close_code) + or (ticket.get("closed_at") and close_incident == "closed") + or (ticket.get("resolved_at") and close_incident == "resolved") + ): # noqa: E127 + demisto.debug(f"SNOW ticket changed state - should be closed in XSOAR: {ticket}") + entries.append( + { + "Type": EntryType.NOTE, + "Contents": { + "dbotIncidentClose": True, + "closeNotes": ticket.get("close_notes"), + "closeReason": converts_close_code_or_state_to_close_reason( + ticket_state, ticket_close_code, server_close_custom_state, server_custom_close_code + ), + }, + "ContentsFormat": EntryFormat.JSON, + } + ) + + demisto.debug(f"ServiceNowV2 - Pull result is {ticket=}, {entries=}") return [ticket] + entries @@ -2730,8 +2964,9 @@ def is_new_incident(ticket_id: str) -> bool: return ticket_id_in_last_fetch -def converts_close_code_or_state_to_close_reason(ticket_state: str, ticket_close_code: str, server_close_custom_state: str, - server_custom_close_code: str): +def converts_close_code_or_state_to_close_reason( + ticket_state: str, ticket_close_code: str, server_close_custom_state: str, server_custom_close_code: str +): """ determine the XSOAR incident close reason based on the ServiceNow ticket close_code or state. if 'Mirrored XSOAR Ticket custom close resolution code' parameter is set, the function will try to use it to @@ -2752,29 +2987,29 @@ def converts_close_code_or_state_to_close_reason(ticket_state: str, ticket_close # if custom close code parameter is set and ticket close code is returned from the SNOW incident if server_custom_close_code and ticket_close_code: - demisto.debug(f'trying to close XSOAR incident using custom resolution code: {server_custom_close_code}, with \ - received close code: {ticket_close_code}') + demisto.debug(f"trying to close XSOAR incident using custom resolution code: {server_custom_close_code}, with \ + received close code: {ticket_close_code}") # parse custom close code parameter into a dictionary of custom close codes and their names (label) server_close_custom_code_dict = dict(item.strip().split("=") for item in server_custom_close_code.split(",")) # check if close code is in the parsed dictionary if close_code_label := server_close_custom_code_dict.get(ticket_close_code): - demisto.debug(f'incident closed using custom close code. Close Code: {ticket_close_code}, Label: {close_code_label}') + demisto.debug(f"incident closed using custom close code. Close Code: {ticket_close_code}, Label: {close_code_label}") return close_code_label # if custom state parameter is set and ticket state is returned from incident is not empty if server_close_custom_state and ticket_state: - demisto.debug(f'trying to close XSOAR incident using custom states: {server_close_custom_state}, with \ - received state code: {ticket_state}') + demisto.debug(f"trying to close XSOAR incident using custom states: {server_close_custom_state}, with \ + received state code: {ticket_state}") # parse custom state parameter into a dictionary of custom state codes and their names (label) server_close_custom_state_dict = dict(item.strip().split("=") for item in server_close_custom_state.split(",")) # check if state code is in the parsed dictionary if state_label := server_close_custom_state_dict.get(ticket_state): - demisto.debug(f'incident closed using custom state. State Code: {ticket_state}, Label: {state_label}') + demisto.debug(f"incident closed using custom state. State Code: {ticket_state}, Label: {state_label}") return state_label - if ticket_state in ['6', '7']: # default states for closed (6) and resolved (7) - demisto.debug(f'incident should be closed using default state. State Code: {ticket_state}') - return 'Resolved' + if ticket_state in ["6", "7"]: # default states for closed (6) and resolved (7) + demisto.debug(f"incident should be closed using default state. State Code: {ticket_state}") + return "Resolved" demisto.debug(f'incident is closed using default close reason "Other". State Code: {ticket_state}') - return 'Other' + return "Other" def update_remote_system_command(client: Client, args: dict[str, Any], params: dict[str, Any]) -> str: @@ -2795,90 +3030,95 @@ def update_remote_system_command(client: Client, args: dict[str, Any], params: d """ parsed_args = UpdateRemoteSystemArgs(args) if parsed_args.delta: - demisto.debug(f'Got the following delta keys {str(list(parsed_args.delta.keys()))}') + demisto.debug(f"Got the following delta keys {list(parsed_args.delta.keys())!s}") ticket_type = client.ticket_type ticket_id = parsed_args.remote_incident_id closure_case = get_closure_case(params) demisto.debug(f"closure case= {closure_case}") is_custom_close = False - close_custom_state = params.get('close_custom_state', None) + close_custom_state = params.get("close_custom_state", None) demisto.debug(f"state will change to= {parsed_args.data.get('state')}") if parsed_args.incident_changed: - demisto.debug(f'Incident changed: {parsed_args.incident_changed}') + demisto.debug(f"Incident changed: {parsed_args.incident_changed}") if parsed_args.inc_status == IncidentStatus.DONE: - demisto.debug('Closing incident by closure case') - if closure_case and ticket_type in {'sc_task', 'sc_req_item', SIR_INCIDENT}: - parsed_args.data['state'] = '3' + demisto.debug("Closing incident by closure case") + if closure_case and ticket_type in {"sc_task", "sc_req_item", SIR_INCIDENT}: + parsed_args.data["state"] = "3" # These ticket types are closed by changing their state. - if closure_case == 'closed' and ticket_type == INCIDENT: - parsed_args.data['state'] = '7' # Closing incident ticket. - elif closure_case == 'resolved' and ticket_type == INCIDENT: - parsed_args.data['state'] = '6' # resolving incident ticket. + if closure_case == "closed" and ticket_type == INCIDENT: + parsed_args.data["state"] = "7" # Closing incident ticket. + elif closure_case == "resolved" and ticket_type == INCIDENT: + parsed_args.data["state"] = "6" # resolving incident ticket. if close_custom_state: # Closing by custom state - demisto.debug(f'Closing by custom state = {close_custom_state}') + demisto.debug(f"Closing by custom state = {close_custom_state}") is_custom_close = True - parsed_args.data['state'] = close_custom_state + parsed_args.data["state"] = close_custom_state fields = get_ticket_fields(parsed_args.data, ticket_type=ticket_type) demisto.debug(f"all fields= {fields}") if closure_case: # Convert the closing state to the right one if the ticket type is not incident in order to close the # ticket/incident via XSOAR - if parsed_args.data.get('state') == '7 - Closed' and not is_custom_close: - fields['state'] = TICKET_TYPE_TO_CLOSED_STATE[ticket_type] + if parsed_args.data.get("state") == "7 - Closed" and not is_custom_close: + fields["state"] = TICKET_TYPE_TO_CLOSED_STATE[ticket_type] - fields = {key: val for key, val in fields.items() if key != 'closed_at' and key != 'resolved_at'} + fields = {key: val for key, val in fields.items() if key != "closed_at" and key != "resolved_at"} - demisto.debug(f'Sending update request to server {ticket_type}, {ticket_id}, {fields}') + demisto.debug(f"Sending update request to server {ticket_type}, {ticket_id}, {fields}") result = client.update(ticket_type, ticket_id, fields) # Handle case of custom state doesn't exist, reverting to the original close state - if is_custom_close and demisto.get(result, 'result.state') != close_custom_state: - fields['state'] = TICKET_TYPE_TO_CLOSED_STATE[ticket_type] - demisto.debug(f'Given custom state doesn\'t exist - Sending second update request to server with ' - f'default closed state: {ticket_type}, {ticket_id}, {fields}') + if is_custom_close and demisto.get(result, "result.state") != close_custom_state: + fields["state"] = TICKET_TYPE_TO_CLOSED_STATE[ticket_type] + demisto.debug( + f"Given custom state doesn't exist - Sending second update request to server with " + f"default closed state: {ticket_type}, {ticket_id}, {fields}" + ) result = client.update(ticket_type, ticket_id, fields) - demisto.info(f'Ticket Update result {result}') + demisto.info(f"Ticket Update result {result}") entries = parsed_args.entries if entries: - demisto.debug(f'New entries {entries}') + demisto.debug(f"New entries {entries}") for entry in entries: demisto.debug(f'Sending entry {entry.get("id")}, type: {entry.get("type")}') # Mirroring files as entries - if is_entry_type_mirror_supported(entry.get('type')): - path_res = demisto.getFilePath(entry.get('id')) - full_file_name = path_res.get('name') + if is_entry_type_mirror_supported(entry.get("type")): + path_res = demisto.getFilePath(entry.get("id")) + full_file_name = path_res.get("name") file_name, file_extension = os.path.splitext(full_file_name) if not file_extension: - file_extension = '' - if params.get('file_tag_from_service_now') not in entry.get('tags', []): + file_extension = "" + if params.get("file_tag_from_service_now") not in entry.get("tags", []): try: - client.upload_file(ticket_id, entry.get('id'), file_name + '_mirrored_from_xsoar' + file_extension, - ticket_type) + client.upload_file( + ticket_id, entry.get("id"), file_name + "_mirrored_from_xsoar" + file_extension, ticket_type + ) except Exception as e: demisto.error(f"An attempt to mirror a file has failed. entry_id={entry.get('id')}, {file_name=}\n{e}") - text_for_snow_comment = "An attempt to mirror a file from Cortex XSOAR was failed." \ - f"\nFile name: {file_name}\nError from integration: {e}" - client.add_comment(ticket_id, ticket_type, 'comments', text_for_snow_comment) + text_for_snow_comment = ( + "An attempt to mirror a file from Cortex XSOAR was failed." + f"\nFile name: {file_name}\nError from integration: {e}" + ) + client.add_comment(ticket_id, ticket_type, "comments", text_for_snow_comment) else: # Mirroring comment and work notes as entries - tags = entry.get('tags', []) - key = '' - if params.get('work_notes_tag') in tags: - key = 'work_notes' - elif params.get('comment_tag') in tags: - key = 'comments' + tags = entry.get("tags", []) + key = "" + if params.get("work_notes_tag") in tags: + key = "work_notes" + elif params.get("comment_tag") in tags: + key = "comments" # Sometimes user is an empty str, not None, therefore nothing is displayed in ServiceNow - user = entry.get('user', 'dbot') or 'dbot' - if str(entry.get('format')) == 'html': - contents = str(entry.get('contents', '')) + user = entry.get("user", "dbot") or "dbot" + if str(entry.get("format")) == "html": + contents = str(entry.get("contents", "")) text = f"({user}):

[code]{contents}

[/code] Mirrored from Cortex XSOAR" else: - text = f"({user}): {str(entry.get('contents', ''))}\n\n Mirrored from Cortex XSOAR" + text = f"({user}): {entry.get('contents', '')!s}\n\n Mirrored from Cortex XSOAR" client.add_comment(ticket_id, ticket_type, key, text) return ticket_id @@ -2892,23 +3132,28 @@ def get_closure_case(params: dict[str, Any]): Returns: None if no closure method is specified. otherwise returns (str) The right closure method. """ - if params.get('close_ticket_multiple_options') != 'None': - return params.get('close_ticket_multiple_options') - elif params.get('close_ticket'): - return 'closed' + if params.get("close_ticket_multiple_options") != "None": + return params.get("close_ticket_multiple_options") + elif params.get("close_ticket"): + return "closed" else: return None def is_entry_type_mirror_supported(entry_type): """ - Args: - entry_type (int) - Return: - True if the entry type supports mirroring otherwise False - """ - supported_mirror_entries = [EntryType.FILE, EntryType.ENTRY_INFO_FILE, EntryType.IMAGE, - EntryType.VIDEO_FILE, EntryType.STATIC_VIDEO_FILE] + Args: + entry_type (int) + Return: + True if the entry type supports mirroring otherwise False + """ + supported_mirror_entries = [ + EntryType.FILE, + EntryType.ENTRY_INFO_FILE, + EntryType.IMAGE, + EntryType.VIDEO_FILE, + EntryType.STATIC_VIDEO_FILE, + ] return entry_type in supported_mirror_entries @@ -2937,36 +3182,34 @@ def get_mapping_fields_command(client: Client) -> GetMappingFieldsResponse: def get_modified_remote_data_command( - client: Client, - args: dict[str, str], - update_timestamp_field: str = 'sys_updated_on', - mirror_limit: str = '100', + client: Client, + args: dict[str, str], + update_timestamp_field: str = "sys_updated_on", + mirror_limit: str = "100", ) -> GetModifiedRemoteDataResponse: remote_args = GetModifiedRemoteDataArgs(args) - parsed_date = dateparser.parse(remote_args.last_update, settings={'TIMEZONE': 'UTC'}) - assert parsed_date is not None, f'could not parse {remote_args.last_update}' + parsed_date = dateparser.parse(remote_args.last_update, settings={"TIMEZONE": "UTC"}) + assert parsed_date is not None, f"could not parse {remote_args.last_update}" last_update = parsed_date.strftime(DATE_FORMAT) - demisto.debug(f'Running get-modified-remote-data command. Last update is: {last_update}') + demisto.debug(f"Running get-modified-remote-data command. Last update is: {last_update}") result = client.query( table_name=client.ticket_type, sys_param_limit=mirror_limit, sys_param_offset=str(client.sys_param_offset), - sys_param_query=f'{update_timestamp_field}>{last_update}', - sysparm_fields='sys_id', + sys_param_query=f"{update_timestamp_field}>{last_update}", + sysparm_fields="sys_id", ) modified_records_ids = [] - if result and (modified_records := result.get('result')): - modified_records_ids = [record.get('sys_id') for record in modified_records if 'sys_id' in record] + if result and (modified_records := result.get("result")): + modified_records_ids = [record.get("sys_id") for record in modified_records if "sys_id" in record] modified_records_ids = extend_with_new_incidents(modified_records_ids) - demisto.debug(f'ServiceNowV2 - returning the following incident ids: {modified_records_ids}') - return GetModifiedRemoteDataResponse( - modified_records_ids - ) + demisto.debug(f"ServiceNowV2 - returning the following incident ids: {modified_records_ids}") + return GetModifiedRemoteDataResponse(modified_records_ids) def extend_with_new_incidents(modified_records_ids: list) -> list: @@ -2988,7 +3231,7 @@ def extend_with_new_incidents(modified_records_ids: list) -> list: def add_custom_fields(params): global SNOW_ARGS - custom_fields = argToList(params.get('custom_fields')) + custom_fields = argToList(params.get("custom_fields")) SNOW_ARGS += custom_fields @@ -3004,25 +3247,25 @@ def get_tasks_from_co_human_readable(data: dict, ticket_type: str) -> dict: :param ticket_type: ticket type """ states = TICKET_STATES.get(ticket_type, {}) - state = data.get('state', {}).get('value') + state = data.get("state", {}).get("value") item = { - 'ID': data.get('sys_id', {}).get('value', ''), - 'Name': data.get('number', {}).get('value', ''), - 'Description': data.get('short_description', {}).get('value', ''), - 'State': states.get(str(int(state)), str(int(state))), - 'Variables': [] + "ID": data.get("sys_id", {}).get("value", ""), + "Name": data.get("number", {}).get("value", ""), + "Description": data.get("short_description", {}).get("value", ""), + "State": states.get(str(int(state)), str(int(state))), + "Variables": [], } - variables = data.get('variables') + variables = data.get("variables") if variables and isinstance(variables, list): for var in variables: if var: pretty_variables = { - 'Question': var.get('label', ''), - 'Type': var.get('display_type', ''), - 'Name': var.get('name', ''), - 'Mandatory': var.get('mandatory', '') + "Question": var.get("label", ""), + "Type": var.get("display_type", ""), + "Name": var.get("name", ""), + "Mandatory": var.get("mandatory", ""), } - item['Variables'].append(pretty_variables) + item["Variables"].append(pretty_variables) return item @@ -3036,40 +3279,30 @@ def get_tasks_for_co_command(client: Client, args: dict) -> CommandResults: Returns: Demisto Outputs. """ - sys_id = str(args.get('id', '')) + sys_id = str(args.get("id", "")) result = client.get_co_tasks(sys_id) - if not result or 'result' not in result: - return CommandResults( - outputs_prefix="ServiceNow.Tasks", - readable_output='Item was not found.', - raw_response=result - ) - items = result.get('result', {}) + if not result or "result" not in result: + return CommandResults(outputs_prefix="ServiceNow.Tasks", readable_output="Item was not found.", raw_response=result) + items = result.get("result", {}) if not isinstance(items, list): items_list = [items] else: items_list = items if len(items_list) == 0: - return CommandResults( - outputs_prefix="ServiceNow.Tasks", - readable_output='No items were found.', - raw_response=result - ) + return CommandResults(outputs_prefix="ServiceNow.Tasks", readable_output="No items were found.", raw_response=result) mapped_items = [] for item in items_list: mapped_items.append(get_tasks_from_co_human_readable(item, client.ticket_type)) - headers = ['ID', 'Name', 'State', 'Description'] - human_readable = tableToMarkdown('ServiceNow Catalog Items', mapped_items, headers=headers, - removeNull=True, headerTransform=pascalToSpace) - entry_context = {'ServiceNow.Tasks(val.ID===obj.ID)': createContext(mapped_items, removeNull=True)} + headers = ["ID", "Name", "State", "Description"] + human_readable = tableToMarkdown( + "ServiceNow Catalog Items", mapped_items, headers=headers, removeNull=True, headerTransform=pascalToSpace + ) + entry_context = {"ServiceNow.Tasks(val.ID===obj.ID)": createContext(mapped_items, removeNull=True)} return CommandResults( - outputs_prefix="ServiceNow.Tasks", - outputs=entry_context, - readable_output=human_readable, - raw_response=result + outputs_prefix="ServiceNow.Tasks", outputs=entry_context, readable_output=human_readable, raw_response=result ) @@ -3084,27 +3317,45 @@ def create_co_from_template_command(client: Client, args: dict) -> CommandResult Demisto Outputs. """ - template = args.get('template', "") + template = args.get("template", "") result = client.create_co_from_template(template) - if not result or 'result' not in result: - raise Exception('Unable to retrieve response.') - ticket = result['result'] - human_readable_table = get_co_human_readable(ticket=ticket, ticket_type='change_request') - headers = ['System ID', 'Number', 'Impact', 'Urgency', 'Severity', 'Priority', 'State', 'Approval', - 'Created On', 'Created By', 'Active', 'Close Notes', 'Close Code', 'Description', 'Opened At', - 'Due Date', 'Resolved By', 'Resolved At', 'SLA Due', 'Short Description', 'Additional Comments'] - human_readable = tableToMarkdown('ServiceNow ticket was created successfully.', t=human_readable_table, - headers=headers, removeNull=True) + if not result or "result" not in result: + raise Exception("Unable to retrieve response.") + ticket = result["result"] + human_readable_table = get_co_human_readable(ticket=ticket, ticket_type="change_request") + headers = [ + "System ID", + "Number", + "Impact", + "Urgency", + "Severity", + "Priority", + "State", + "Approval", + "Created On", + "Created By", + "Active", + "Close Notes", + "Close Code", + "Description", + "Opened At", + "Due Date", + "Resolved By", + "Resolved At", + "SLA Due", + "Short Description", + "Additional Comments", + ] + human_readable = tableToMarkdown( + "ServiceNow ticket was created successfully.", t=human_readable_table, headers=headers, removeNull=True + ) created_ticket_context = get_ticket_context(ticket) entry_context = { - 'Ticket(val.ID===obj.ID)': created_ticket_context, - 'ServiceNow.Ticket(val.ID===obj.ID)': created_ticket_context + "Ticket(val.ID===obj.ID)": created_ticket_context, + "ServiceNow.Ticket(val.ID===obj.ID)": created_ticket_context, } return CommandResults( - outputs_prefix="ServiceNow.Ticket", - outputs=entry_context, - readable_output=human_readable, - raw_response=result + outputs_prefix="ServiceNow.Ticket", outputs=entry_context, readable_output=human_readable, raw_response=result ) @@ -3121,35 +3372,35 @@ def get_co_human_readable(ticket: dict, ticket_type: str, additional_fields: Ite """ states = TICKET_STATES.get(ticket_type, {}) - state = ticket.get('state', {}).get('value', '') - priority = ticket.get('priority', {}).get('value', '') + state = ticket.get("state", {}).get("value", "") + priority = ticket.get("priority", {}).get("value", "") item = { - 'System ID': ticket.get('sys_id', {}).get('value', ''), - 'Number': ticket.get('number', {}).get('value', ''), - 'Impact': TICKET_IMPACT.get(str(int(ticket.get('impact', {}).get('value', ''))), ''), - 'Business Impact': BUSINESS_IMPACT.get(str(ticket.get('business_criticality', {}).get('value', '')), ''), - 'Urgency': ticket.get('urgency', {}).get('display_value', ''), - 'Severity': ticket.get('severity', {}).get('value', ''), - 'Priority': TICKET_PRIORITY.get(str(int(priority)), str(int(priority))) if priority else '', - 'State': states.get(str(int(state)), str(int(state))), - 'Approval': ticket.get('approval_history', {}).get('value', ''), - 'Created On': ticket.get('sys_created_on', {}).get('value', ''), - 'Created By': ticket.get('sys_created_by', {}).get('value', ''), - 'Active': ticket.get('active', {}).get('value', ''), - 'Close Notes': ticket.get('close_notes', {}).get('value', ''), - 'Close Code': ticket.get('close_code', {}).get('value', ''), - 'Description': ticket.get('description', {}).get('value', ''), - 'Opened At': ticket.get('opened_at', {}).get('value', ''), - 'Due Date': ticket.get('due_date', {}).get('value', ''), - 'Resolved By': ticket.get('closed_by', {}).get('value', ''), - 'Resolved At': ticket.get('closed_at', {}).get('value', ''), - 'SLA Due': ticket.get('sla_due', {}).get('value', ''), - 'Short Description': ticket.get('short_description', {}).get('value', ''), - 'Additional Comments': ticket.get('comments', {}).get('value', '') + "System ID": ticket.get("sys_id", {}).get("value", ""), + "Number": ticket.get("number", {}).get("value", ""), + "Impact": TICKET_IMPACT.get(str(int(ticket.get("impact", {}).get("value", ""))), ""), + "Business Impact": BUSINESS_IMPACT.get(str(ticket.get("business_criticality", {}).get("value", "")), ""), + "Urgency": ticket.get("urgency", {}).get("display_value", ""), + "Severity": ticket.get("severity", {}).get("value", ""), + "Priority": TICKET_PRIORITY.get(str(int(priority)), str(int(priority))) if priority else "", + "State": states.get(str(int(state)), str(int(state))), + "Approval": ticket.get("approval_history", {}).get("value", ""), + "Created On": ticket.get("sys_created_on", {}).get("value", ""), + "Created By": ticket.get("sys_created_by", {}).get("value", ""), + "Active": ticket.get("active", {}).get("value", ""), + "Close Notes": ticket.get("close_notes", {}).get("value", ""), + "Close Code": ticket.get("close_code", {}).get("value", ""), + "Description": ticket.get("description", {}).get("value", ""), + "Opened At": ticket.get("opened_at", {}).get("value", ""), + "Due Date": ticket.get("due_date", {}).get("value", ""), + "Resolved By": ticket.get("closed_by", {}).get("value", ""), + "Resolved At": ticket.get("closed_at", {}).get("value", ""), + "SLA Due": ticket.get("sla_due", {}).get("value", ""), + "Short Description": ticket.get("short_description", {}).get("value", ""), + "Additional Comments": ticket.get("comments", {}).get("value", ""), } for field in additional_fields: - item.update({field: ticket.get(field, {}).get('value', '')}) + item.update({field: ticket.get(field, {}).get("value", "")}) return item @@ -3172,7 +3423,7 @@ def generic_api_call_command(client: Client, args: dict) -> Union[str, CommandRe method = str(args.get("method")) path = str(args.get("path")) headers = json.loads(str(args.get("headers", {}))) - custom_api = args.get('custom_api', '') + custom_api = args.get("custom_api", "") try: body: dict = json.loads(str(args.get("body", {}))) except ValueError: @@ -3184,8 +3435,9 @@ def generic_api_call_command(client: Client, args: dict) -> Union[str, CommandRe return f"{method} method not supported.\nTry something from {', '.join(methods)}" response = None - response = client.generic_request(method=method, path=path, body=body, headers=headers, - sc_api=sc_api, cr_api=cr_api, custom_api=custom_api) + response = client.generic_request( + method=method, path=path, body=body, headers=headers, sc_api=sc_api, cr_api=cr_api, custom_api=custom_api + ) if response is not None: resp = response @@ -3204,183 +3456,185 @@ def main(): PARSE AND VALIDATE INTEGRATION PARAMS """ command = demisto.command() - LOG(f'Executing command {command}') + LOG(f"Executing command {command}") params = demisto.params() args = demisto.args() - verify = not params.get('insecure', False) - use_oauth = params.get('use_oauth', False) + verify = not params.get("insecure", False) + use_oauth = params.get("use_oauth", False) oauth_params = {} if use_oauth: # if the `Use OAuth` checkbox was checked, client id & secret should be in the credentials fields - username = '' - password = '' - client_id = params.get('credentials', {}).get('identifier') - client_secret = params.get('credentials', {}).get('password') + username = "" + password = "" + client_id = params.get("credentials", {}).get("identifier") + client_secret = params.get("credentials", {}).get("password") oauth_params = { - 'credentials': { - 'identifier': username, - 'password': password - }, - 'client_id': client_id, - 'client_secret': client_secret, - 'url': params.get('url'), - 'headers': { - 'Content-Type': 'application/json', - 'Accept': 'application/json' - }, - 'verify': verify, - 'proxy': params.get('proxy'), - 'use_oauth': use_oauth + "credentials": {"identifier": username, "password": password}, + "client_id": client_id, + "client_secret": client_secret, + "url": params.get("url"), + "headers": {"Content-Type": "application/json", "Accept": "application/json"}, + "verify": verify, + "proxy": params.get("proxy"), + "use_oauth": use_oauth, } else: # use basic authentication - username = params.get('credentials', {}).get('identifier') - password = params.get('credentials', {}).get('password') + username = params.get("credentials", {}).get("identifier") + password = params.get("credentials", {}).get("password") - version = params.get('api_version') + version = params.get("api_version") - force_default_url = argToBoolean(args.get('force_default_url', 'false')) + force_default_url = argToBoolean(args.get("force_default_url", "false")) if version and not force_default_url: - api = f'/api/now/{version}/' - sc_api = f'/api/sn_sc/{version}/' - cr_api = f'/api/sn_chg_rest/{version}/' + api = f"/api/now/{version}/" + sc_api = f"/api/sn_sc/{version}/" + cr_api = f"/api/sn_chg_rest/{version}/" else: if force_default_url: """ force_default_url is given as part of the arguments of the command servicenow-create-co-from-template, if True, then the request will not use the configured api version """ - demisto.debug(f'{force_default_url=}, ignoring api {version=} configured in parameters') + demisto.debug(f"{force_default_url=}, ignoring api {version=} configured in parameters") # Either no API version configured, OR force_default_url=True - api = '/api/now/' - sc_api = '/api/sn_sc/' - cr_api = '/api/sn_chg_rest/' - server_url = params.get('url') - sc_server_url = f'{get_server_url(server_url)}{sc_api}' - cr_server_url = f'{get_server_url(server_url)}{cr_api}' - server_url = f'{get_server_url(server_url)}{api}' - - fetch_time = (params.get('fetch_time') or DEFAULT_FETCH_TIME).strip() - sysparm_query = params.get('sysparm_query') - sysparm_limit = int(params.get('fetch_limit', 10)) - timestamp_field = params.get('timestamp_field', 'opened_at') - ticket_type = params.get('ticket_type', INCIDENT) - incident_name = params.get('incident_name', 'number') or 'number' - get_attachments = params.get('get_attachments', False) - update_timestamp_field = params.get('update_timestamp_field', 'sys_updated_on') or 'sys_updated_on' - mirror_limit = params.get('mirror_limit', '100') or '100' - look_back = arg_to_number(params.get('look_back')) or 0 - use_display_value = argToBoolean(params.get('use_display_value', False)) - display_date_format = params.get('display_date_format', '') + api = "/api/now/" + sc_api = "/api/sn_sc/" + cr_api = "/api/sn_chg_rest/" + server_url = params.get("url") + sc_server_url = f"{get_server_url(server_url)}{sc_api}" + cr_server_url = f"{get_server_url(server_url)}{cr_api}" + server_url = f"{get_server_url(server_url)}{api}" + + fetch_time = (params.get("fetch_time") or DEFAULT_FETCH_TIME).strip() + sysparm_query = params.get("sysparm_query") + sysparm_limit = int(params.get("fetch_limit", 10)) + timestamp_field = params.get("timestamp_field", "opened_at") + ticket_type = params.get("ticket_type", INCIDENT) + incident_name = params.get("incident_name", "number") or "number" + get_attachments = params.get("get_attachments", False) + update_timestamp_field = params.get("update_timestamp_field", "sys_updated_on") or "sys_updated_on" + mirror_limit = params.get("mirror_limit", "100") or "100" + look_back = arg_to_number(params.get("look_back")) or 0 + use_display_value = argToBoolean(params.get("use_display_value", False)) + display_date_format = params.get("display_date_format", "") add_custom_fields(params) - file_tag_from_service_now, file_tag_to_service_now = ( - params.get('file_tag_from_service_now'), params.get('file_tag') - ) + file_tag_from_service_now, file_tag_to_service_now = (params.get("file_tag_from_service_now"), params.get("file_tag")) if file_tag_from_service_now == file_tag_to_service_now: raise Exception( - f'File Entry Tag To ServiceNow and File Entry Tag ' - f'From ServiceNow cannot be the same name [{file_tag_from_service_now}].' + f"File Entry Tag To ServiceNow and File Entry Tag " + f"From ServiceNow cannot be the same name [{file_tag_from_service_now}]." ) - comment_tag_from_servicenow, comment_tag = ( - params.get('comment_tag_from_servicenow'), params.get('comment_tag') - ) + comment_tag_from_servicenow, comment_tag = (params.get("comment_tag_from_servicenow"), params.get("comment_tag")) if comment_tag_from_servicenow == comment_tag: raise Exception( - f'Comment Entry Tag To ServiceNow and Comment Entry Tag ' - f'From ServiceNow cannot be the same name [{comment_tag_from_servicenow}].' + f"Comment Entry Tag To ServiceNow and Comment Entry Tag " + f"From ServiceNow cannot be the same name [{comment_tag_from_servicenow}]." ) - work_notes_tag_from_servicenow, work_notes_tag = ( - params.get('work_notes_tag_from_servicenow'), params.get('work_notes_tag') - ) + work_notes_tag_from_servicenow, work_notes_tag = (params.get("work_notes_tag_from_servicenow"), params.get("work_notes_tag")) if work_notes_tag_from_servicenow == work_notes_tag: raise Exception( - f'Work note Entry Tag To ServiceNow and Work Note Entry Tag ' - f'From ServiceNow cannot be the same name [{work_notes_tag_from_servicenow}].' + f"Work note Entry Tag To ServiceNow and Work Note Entry Tag " + f"From ServiceNow cannot be the same name [{work_notes_tag_from_servicenow}]." ) raise_exception = False try: - client = Client(server_url=server_url, sc_server_url=sc_server_url, cr_server_url=cr_server_url, - username=username, password=password, verify=verify, fetch_time=fetch_time, - sysparm_query=sysparm_query, sysparm_limit=sysparm_limit, - timestamp_field=timestamp_field, ticket_type=ticket_type, get_attachments=get_attachments, - incident_name=incident_name, oauth_params=oauth_params, version=version, look_back=look_back, - use_display_value=use_display_value, display_date_format=display_date_format) + client = Client( + server_url=server_url, + sc_server_url=sc_server_url, + cr_server_url=cr_server_url, + username=username, + password=password, + verify=verify, + fetch_time=fetch_time, + sysparm_query=sysparm_query, + sysparm_limit=sysparm_limit, + timestamp_field=timestamp_field, + ticket_type=ticket_type, + get_attachments=get_attachments, + incident_name=incident_name, + oauth_params=oauth_params, + version=version, + look_back=look_back, + use_display_value=use_display_value, + display_date_format=display_date_format, + ) commands: dict[str, Callable[[Client, dict[str, str]], tuple[str, dict[Any, Any], dict[Any, Any], bool]]] = { - 'test-module': test_module, - 'servicenow-oauth-test': oauth_test_module, - 'servicenow-oauth-login': login_command, - 'servicenow-update-ticket': update_ticket_command, - 'servicenow-create-ticket': create_ticket_command, - 'servicenow-delete-ticket': delete_ticket_command, - 'servicenow-query-tickets': query_tickets_command, - 'servicenow-add-link': add_link_command, - 'servicenow-add-comment': add_comment_command, - 'servicenow-upload-file': upload_file_command, - 'servicenow-add-tag': add_tag_command, - 'servicenow-get-record': get_record_command, - 'servicenow-update-record': update_record_command, - 'servicenow-create-record': create_record_command, - 'servicenow-delete-record': delete_record_command, - 'servicenow-query-table': query_table_command, - 'servicenow-list-table-fields': list_table_fields_command, - 'servicenow-query-computers': query_computers_command, - 'servicenow-query-groups': query_groups_command, - 'servicenow-query-users': query_users_command, - 'servicenow-get-table-name': get_table_name_command, - 'servicenow-query-items': query_items_command, - 'servicenow-get-item-details': get_item_details_command, - 'servicenow-create-item-order': create_order_item_command, - 'servicenow-document-route-to-queue': document_route_to_table, - 'servicenow-delete-file': delete_attachment_command, + "test-module": test_module, + "servicenow-oauth-test": oauth_test_module, + "servicenow-oauth-login": login_command, + "servicenow-update-ticket": update_ticket_command, + "servicenow-create-ticket": create_ticket_command, + "servicenow-delete-ticket": delete_ticket_command, + "servicenow-query-tickets": query_tickets_command, + "servicenow-add-link": add_link_command, + "servicenow-add-comment": add_comment_command, + "servicenow-upload-file": upload_file_command, + "servicenow-add-tag": add_tag_command, + "servicenow-get-record": get_record_command, + "servicenow-update-record": update_record_command, + "servicenow-create-record": create_record_command, + "servicenow-delete-record": delete_record_command, + "servicenow-query-table": query_table_command, + "servicenow-list-table-fields": list_table_fields_command, + "servicenow-query-computers": query_computers_command, + "servicenow-query-groups": query_groups_command, + "servicenow-query-users": query_users_command, + "servicenow-get-table-name": get_table_name_command, + "servicenow-query-items": query_items_command, + "servicenow-get-item-details": get_item_details_command, + "servicenow-create-item-order": create_order_item_command, + "servicenow-document-route-to-queue": document_route_to_table, + "servicenow-delete-file": delete_attachment_command, } - if command == 'fetch-incidents': + if command == "fetch-incidents": raise_exception = True incidents = fetch_incidents(client) demisto.incidents(incidents) - elif command == 'servicenow-get-ticket': + elif command == "servicenow-get-ticket": demisto.results(get_ticket_command(client, args)) elif command == "servicenow-generic-api-call": return_results(generic_api_call_command(client, args)) - elif command == 'get-remote-data': + elif command == "get-remote-data": return_results(get_remote_data_command(client, demisto.args(), demisto.params())) - elif command == 'update-remote-system': + elif command == "update-remote-system": return_results(update_remote_system_command(client, demisto.args(), demisto.params())) - elif demisto.command() == 'get-mapping-fields': + elif demisto.command() == "get-mapping-fields": return_results(get_mapping_fields_command(client)) - elif demisto.command() == 'get-modified-remote-data': + elif demisto.command() == "get-modified-remote-data": return_results(get_modified_remote_data_command(client, args, update_timestamp_field, mirror_limit)) - elif demisto.command() == 'servicenow-create-co-from-template': + elif demisto.command() == "servicenow-create-co-from-template": return_results(create_co_from_template_command(client, demisto.args())) - elif demisto.command() == 'servicenow-get-tasks-for-co': + elif demisto.command() == "servicenow-get-tasks-for-co": return_results(get_tasks_for_co_command(client, demisto.args())) - elif demisto.command() == 'servicenow-get-ticket-notes': + elif demisto.command() == "servicenow-get-ticket-notes": return_results(get_ticket_notes_command(client, args, params)) - elif demisto.command() == 'servicenow-get-ticket-attachments': + elif demisto.command() == "servicenow-get-ticket-attachments": return_results(get_attachment_command(client, args)) elif command in commands: md_, ec_, raw_response, ignore_auto_extract = commands[command](client, args) return_outputs(md_, ec_, raw_response, ignore_auto_extract=ignore_auto_extract) else: raise_exception = True - raise NotImplementedError(f'{COMMAND_NOT_IMPLEMENTED_MSG}: {demisto.command()}') + raise NotImplementedError(f"{COMMAND_NOT_IMPLEMENTED_MSG}: {demisto.command()}") except Exception as err: LOG(err) LOG.print_log() if not raise_exception: - return_error(f'Unexpected error: {str(err)}', error=traceback.format_exc()) + return_error(f"Unexpected error: {err!s}", error=traceback.format_exc()) else: raise from ServiceNowApiModule import * # noqa: E402 -if __name__ in ('__main__', '__builtin__', 'builtins'): +if __name__ in ("__main__", "__builtin__", "builtins"): main() diff --git a/Packs/ServiceNow/Integrations/ServiceNowv2/ServiceNowv2_test.py b/Packs/ServiceNow/Integrations/ServiceNowv2/ServiceNowv2_test.py index e061044d66d1..a454464b7d9c 100644 --- a/Packs/ServiceNow/Integrations/ServiceNowv2/ServiceNowv2_test.py +++ b/Packs/ServiceNow/Integrations/ServiceNowv2/ServiceNowv2_test.py @@ -1,61 +1,165 @@ +import json import re +from datetime import datetime, timedelta from unittest.mock import MagicMock +from urllib.parse import urlencode -from pytest_mock import MockerFixture -from requests_mock import MockerCore +import demistomock as demisto import pytest -import json -from datetime import datetime, timedelta -from freezegun import freeze_time -import ServiceNowv2 import requests +import ServiceNowv2 from CommonServerPython import CommandResults, DemistoException, EntryType -from ServiceNowv2 import get_server_url, get_ticket_context, get_ticket_human_readable, \ - generate_body, parse_dict_ticket_fields, split_fields, Client, update_ticket_command, create_ticket_command, \ - query_tickets_command, add_link_command, add_comment_command, upload_file_command, get_ticket_notes_command, \ - get_record_command, update_record_command, create_record_command, delete_record_command, query_table_command, \ - list_table_fields_command, query_computers_command, get_table_name_command, add_tag_command, query_items_command, \ - get_item_details_command, create_order_item_command, document_route_to_table, fetch_incidents, main, \ - get_mapping_fields_command, get_remote_data_command, update_remote_system_command, delete_ticket_command, \ - ServiceNowClient, oauth_test_module, login_command, get_modified_remote_data_command, \ - get_ticket_fields, check_assigned_to_field, generic_api_call_command, get_closure_case, get_timezone_offset, \ - converts_close_code_or_state_to_close_reason, split_notes, DATE_FORMAT, convert_to_notes_result, DATE_FORMAT_OPTIONS, \ - format_incidents_response_with_display_values, get_entries_for_notes, is_time_field, delete_attachment_command, \ - get_attachment_command, is_new_incident +from freezegun import freeze_time +from pytest_mock import MockerFixture +from requests_mock import MockerCore +from ServiceNowv2 import ( + DATE_FORMAT, + DATE_FORMAT_OPTIONS, + Client, + ServiceNowClient, + add_comment_command, + add_link_command, + add_tag_command, + check_assigned_to_field, + convert_to_notes_result, + converts_close_code_or_state_to_close_reason, + create_order_item_command, + create_record_command, + create_ticket_command, + delete_attachment_command, + delete_record_command, + delete_ticket_command, + document_route_to_table, + fetch_incidents, + format_incidents_response_with_display_values, + generate_body, + generic_api_call_command, + get_attachment_command, + get_closure_case, + get_entries_for_notes, + get_item_details_command, + get_mapping_fields_command, + get_modified_remote_data_command, + get_record_command, + get_remote_data_command, + get_server_url, + get_table_name_command, + get_ticket_context, + get_ticket_fields, + get_ticket_human_readable, + get_ticket_notes_command, + get_timezone_offset, + is_new_incident, + is_time_field, + list_table_fields_command, + login_command, + main, + oauth_test_module, + parse_dict_ticket_fields, + query_computers_command, + query_items_command, + query_table_command, + query_tickets_command, + split_fields, + split_notes, + update_record_command, + update_remote_system_command, + update_ticket_command, + upload_file_command, +) from ServiceNowv2 import test_module as module -from test_data.response_constants import RESPONSE_TICKET, RESPONSE_MULTIPLE_TICKET, RESPONSE_UPDATE_TICKET, \ - RESPONSE_UPDATE_TICKET_SC_REQ, RESPONSE_CREATE_TICKET, RESPONSE_CREATE_TICKET_WITH_OUT_JSON, RESPONSE_QUERY_TICKETS, \ - RESPONSE_ADD_LINK, RESPONSE_ADD_COMMENT, RESPONSE_UPLOAD_FILE, RESPONSE_GET_TICKET_NOTES, RESPONSE_GET_RECORD, \ - RESPONSE_UPDATE_RECORD, RESPONSE_CREATE_RECORD, RESPONSE_QUERY_TABLE, RESPONSE_LIST_TABLE_FIELDS, \ - RESPONSE_QUERY_COMPUTERS, RESPONSE_GET_TABLE_NAME, RESPONSE_UPDATE_TICKET_ADDITIONAL, \ - RESPONSE_QUERY_TABLE_SYS_PARAMS, RESPONSE_ADD_TAG, RESPONSE_QUERY_ITEMS, RESPONSE_ITEM_DETAILS, \ - RESPONSE_CREATE_ITEM_ORDER, RESPONSE_DOCUMENT_ROUTE, RESPONSE_FETCH, RESPONSE_FETCH_ATTACHMENTS_FILE, \ - RESPONSE_FETCH_ATTACHMENTS_TICKET, RESPONSE_TICKET_MIRROR, MIRROR_COMMENTS_RESPONSE, RESPONSE_MIRROR_FILE_ENTRY, \ - RESPONSE_ASSIGNMENT_GROUP, RESPONSE_MIRROR_FILE_ENTRY_FROM_XSOAR, MIRROR_COMMENTS_RESPONSE_FROM_XSOAR, \ - MIRROR_ENTRIES, RESPONSE_CLOSING_TICKET_MIRROR_CLOSED, RESPONSE_CLOSING_TICKET_MIRROR_RESOLVED, \ - RESPONSE_CLOSING_TICKET_MIRROR_CUSTOM, RESPONSE_TICKET_ASSIGNED, OAUTH_PARAMS, \ - RESPONSE_QUERY_TICKETS_EXCLUDE_REFERENCE_LINK, MIRROR_ENTRIES_WITH_EMPTY_USERNAME, USER_RESPONSE, \ - RESPONSE_GENERIC_TICKET, RESPONSE_COMMENTS_DISPLAY_VALUE_AFTER_FORMAT, RESPONSE_COMMENTS_DISPLAY_VALUE_NO_COMMENTS, \ - RESPONSE_COMMENTS_DISPLAY_VALUE, RESPONSE_FETCH_USE_DISPLAY_VALUE -from test_data.result_constants import EXPECTED_TICKET_CONTEXT, EXPECTED_MULTIPLE_TICKET_CONTEXT, \ - EXPECTED_TICKET_HR, EXPECTED_MULTIPLE_TICKET_HR, EXPECTED_UPDATE_TICKET, EXPECTED_UPDATE_TICKET_SC_REQ, \ - EXPECTED_CREATE_TICKET, EXPECTED_CREATE_TICKET_WITH_OUT_JSON, EXPECTED_QUERY_TICKETS, EXPECTED_ADD_LINK_HR, \ - EXPECTED_ADD_COMMENT_HR, EXPECTED_UPLOAD_FILE, EXPECTED_GET_TICKET_NOTES, EXPECTED_GET_RECORD, \ - EXPECTED_UPDATE_RECORD, EXPECTED_CREATE_RECORD, EXPECTED_QUERY_TABLE, EXPECTED_LIST_TABLE_FIELDS, \ - EXPECTED_QUERY_COMPUTERS, EXPECTED_GET_TABLE_NAME, EXPECTED_UPDATE_TICKET_ADDITIONAL, \ - EXPECTED_QUERY_TABLE_SYS_PARAMS, EXPECTED_ADD_TAG, EXPECTED_QUERY_ITEMS, EXPECTED_ITEM_DETAILS, \ - EXPECTED_CREATE_ITEM_ORDER, EXPECTED_DOCUMENT_ROUTE, EXPECTED_MAPPING, \ - EXPECTED_TICKET_CONTEXT_WITH_ADDITIONAL_FIELDS, EXPECTED_QUERY_TICKETS_EXCLUDE_REFERENCE_LINK, \ - EXPECTED_TICKET_CONTEXT_WITH_NESTED_ADDITIONAL_FIELDS, EXPECTED_GET_TICKET_NOTES_DISPLAY_VALUE -from test_data.created_ticket_context import CREATED_TICKET_CONTEXT_CREATE_CO_FROM_TEMPLATE_COMMAND, \ - CREATED_TICKET_CONTEXT_GET_TASKS_FOR_CO_COMMAND - -import demistomock as demisto -from urllib.parse import urlencode +from test_data.created_ticket_context import ( + CREATED_TICKET_CONTEXT_CREATE_CO_FROM_TEMPLATE_COMMAND, + CREATED_TICKET_CONTEXT_GET_TASKS_FOR_CO_COMMAND, +) +from test_data.response_constants import ( + MIRROR_COMMENTS_RESPONSE, + MIRROR_COMMENTS_RESPONSE_FROM_XSOAR, + MIRROR_ENTRIES, + MIRROR_ENTRIES_WITH_EMPTY_USERNAME, + OAUTH_PARAMS, + RESPONSE_ADD_COMMENT, + RESPONSE_ADD_LINK, + RESPONSE_ADD_TAG, + RESPONSE_ASSIGNMENT_GROUP, + RESPONSE_CLOSING_TICKET_MIRROR_CLOSED, + RESPONSE_CLOSING_TICKET_MIRROR_CUSTOM, + RESPONSE_CLOSING_TICKET_MIRROR_RESOLVED, + RESPONSE_COMMENTS_DISPLAY_VALUE, + RESPONSE_COMMENTS_DISPLAY_VALUE_AFTER_FORMAT, + RESPONSE_COMMENTS_DISPLAY_VALUE_NO_COMMENTS, + RESPONSE_CREATE_ITEM_ORDER, + RESPONSE_CREATE_RECORD, + RESPONSE_CREATE_TICKET, + RESPONSE_CREATE_TICKET_WITH_OUT_JSON, + RESPONSE_DOCUMENT_ROUTE, + RESPONSE_FETCH, + RESPONSE_FETCH_ATTACHMENTS_FILE, + RESPONSE_FETCH_ATTACHMENTS_TICKET, + RESPONSE_FETCH_USE_DISPLAY_VALUE, + RESPONSE_GENERIC_TICKET, + RESPONSE_GET_RECORD, + RESPONSE_GET_TABLE_NAME, + RESPONSE_GET_TICKET_NOTES, + RESPONSE_ITEM_DETAILS, + RESPONSE_LIST_TABLE_FIELDS, + RESPONSE_MIRROR_FILE_ENTRY, + RESPONSE_MIRROR_FILE_ENTRY_FROM_XSOAR, + RESPONSE_MULTIPLE_TICKET, + RESPONSE_QUERY_COMPUTERS, + RESPONSE_QUERY_ITEMS, + RESPONSE_QUERY_TABLE, + RESPONSE_QUERY_TABLE_SYS_PARAMS, + RESPONSE_QUERY_TICKETS, + RESPONSE_QUERY_TICKETS_EXCLUDE_REFERENCE_LINK, + RESPONSE_TICKET, + RESPONSE_TICKET_ASSIGNED, + RESPONSE_TICKET_MIRROR, + RESPONSE_UPDATE_RECORD, + RESPONSE_UPDATE_TICKET, + RESPONSE_UPDATE_TICKET_ADDITIONAL, + RESPONSE_UPDATE_TICKET_SC_REQ, + RESPONSE_UPLOAD_FILE, + USER_RESPONSE, +) +from test_data.result_constants import ( + EXPECTED_ADD_COMMENT_HR, + EXPECTED_ADD_LINK_HR, + EXPECTED_ADD_TAG, + EXPECTED_CREATE_ITEM_ORDER, + EXPECTED_CREATE_RECORD, + EXPECTED_CREATE_TICKET, + EXPECTED_CREATE_TICKET_WITH_OUT_JSON, + EXPECTED_DOCUMENT_ROUTE, + EXPECTED_GET_RECORD, + EXPECTED_GET_TABLE_NAME, + EXPECTED_GET_TICKET_NOTES, + EXPECTED_GET_TICKET_NOTES_DISPLAY_VALUE, + EXPECTED_ITEM_DETAILS, + EXPECTED_LIST_TABLE_FIELDS, + EXPECTED_MAPPING, + EXPECTED_MULTIPLE_TICKET_CONTEXT, + EXPECTED_MULTIPLE_TICKET_HR, + EXPECTED_QUERY_COMPUTERS, + EXPECTED_QUERY_ITEMS, + EXPECTED_QUERY_TABLE, + EXPECTED_QUERY_TABLE_SYS_PARAMS, + EXPECTED_QUERY_TICKETS, + EXPECTED_QUERY_TICKETS_EXCLUDE_REFERENCE_LINK, + EXPECTED_TICKET_CONTEXT, + EXPECTED_TICKET_CONTEXT_WITH_ADDITIONAL_FIELDS, + EXPECTED_TICKET_CONTEXT_WITH_NESTED_ADDITIONAL_FIELDS, + EXPECTED_TICKET_HR, + EXPECTED_UPDATE_RECORD, + EXPECTED_UPDATE_TICKET, + EXPECTED_UPDATE_TICKET_ADDITIONAL, + EXPECTED_UPDATE_TICKET_SC_REQ, + EXPECTED_UPLOAD_FILE, +) def util_load_json(path): - with open(path, encoding='utf-8') as f: + with open(path, encoding="utf-8") as f: return json.loads(f.read()) @@ -68,39 +172,32 @@ def test_force_default_url_arg(mocker: MockerFixture, requests_mock: MockerCore) Then - Validate that the api version configured as a parameter was not used in the API request """ - url = 'https://test.service-now.com' - api_endpoint = '/api/sn_chg_rest/change/standard/dummy_template' - api_version = '2' + url = "https://test.service-now.com" + api_endpoint = "/api/sn_chg_rest/change/standard/dummy_template" + api_version = "2" mocker.patch.object( demisto, - 'params', + "params", return_value={ - 'isFetch': True, - 'url': url, - 'credentials': { - 'identifier': 'identifier', - 'password': 'password', + "isFetch": True, + "url": url, + "credentials": { + "identifier": "identifier", + "password": "password", }, - 'api_version': api_version, # << We test overriding this value - 'incident_name': None, - 'file_tag_from_service_now': 'FromServiceNow', - 'file_tag_to_service_now': 'ToServiceNow', - 'comment_tag': 'comments', - 'comment_tag_from_servicenow': 'CommentFromServiceNow', - 'work_notes_tag': 'work_notes', - 'work_notes_tag_from_servicenow': 'WorkNoteFromServiceNow' - } - ) - mocker.patch.object( - demisto, - 'args', - return_value={ - 'template': 'dummy_template', - 'force_default_url': 'true' - } + "api_version": api_version, # << We test overriding this value + "incident_name": None, + "file_tag_from_service_now": "FromServiceNow", + "file_tag_to_service_now": "ToServiceNow", + "comment_tag": "comments", + "comment_tag_from_servicenow": "CommentFromServiceNow", + "work_notes_tag": "work_notes", + "work_notes_tag_from_servicenow": "WorkNoteFromServiceNow", + }, ) - mocker.patch.object(demisto, 'command', return_value='servicenow-create-co-from-template') - requests_mock.post(f'{url}{api_endpoint}', json=util_load_json('test_data/create_co_from_template_result.json')) + mocker.patch.object(demisto, "args", return_value={"template": "dummy_template", "force_default_url": "true"}) + mocker.patch.object(demisto, "command", return_value="servicenow-create-co-from-template") + requests_mock.post(f"{url}{api_endpoint}", json=util_load_json("test_data/create_co_from_template_result.json")) main() assert requests_mock.request_history[0].path == api_endpoint @@ -126,9 +223,7 @@ def test_get_ticket_context_additional_fields(): - validate that all the details of the ticket were updated, and all the updated keys are shown in the context with do duplicates. """ - assert get_ticket_context(RESPONSE_TICKET, - ['Summary', 'sys_created_by']) == \ - EXPECTED_TICKET_CONTEXT_WITH_ADDITIONAL_FIELDS + assert get_ticket_context(RESPONSE_TICKET, ["Summary", "sys_created_by"]) == EXPECTED_TICKET_CONTEXT_WITH_ADDITIONAL_FIELDS def test_get_ticket_context_nested_additional_fields(): @@ -141,43 +236,45 @@ def test_get_ticket_context_nested_additional_fields(): - validate that all the details of the ticket were updated, and all the updated keys are shown in the context with do duplicates. """ - assert get_ticket_context(RESPONSE_TICKET, - ['Summary', 'opened_by.link']) == \ - EXPECTED_TICKET_CONTEXT_WITH_NESTED_ADDITIONAL_FIELDS + assert ( + get_ticket_context(RESPONSE_TICKET, ["Summary", "opened_by.link"]) + == EXPECTED_TICKET_CONTEXT_WITH_NESTED_ADDITIONAL_FIELDS + ) def test_get_ticket_human_readable(): - assert get_ticket_human_readable(RESPONSE_TICKET, 'incident') == EXPECTED_TICKET_HR + assert get_ticket_human_readable(RESPONSE_TICKET, "incident") == EXPECTED_TICKET_HR - assert EXPECTED_MULTIPLE_TICKET_HR[0] in get_ticket_human_readable(RESPONSE_MULTIPLE_TICKET, 'incident') - assert EXPECTED_MULTIPLE_TICKET_HR[1] in get_ticket_human_readable(RESPONSE_MULTIPLE_TICKET, 'incident') + assert EXPECTED_MULTIPLE_TICKET_HR[0] in get_ticket_human_readable(RESPONSE_MULTIPLE_TICKET, "incident") + assert EXPECTED_MULTIPLE_TICKET_HR[1] in get_ticket_human_readable(RESPONSE_MULTIPLE_TICKET, "incident") def test_generate_body(): - fields = {'a_field': 'test'} - custom_fields = {'a_custom_field': 'test'} - expected_body = {'a_field': 'test', 'u_a_custom_field': 'test'} + fields = {"a_field": "test"} + custom_fields = {"a_custom_field": "test"} + expected_body = {"a_field": "test", "u_a_custom_field": "test"} assert expected_body == generate_body(fields, custom_fields) def test_split_fields(): - expected_dict_fields = {'a': 'b', 'c': 'd', 'e': ''} - assert expected_dict_fields == split_fields('a=b;c=d;e=') + expected_dict_fields = {"a": "b", "c": "d", "e": ""} + assert expected_dict_fields == split_fields("a=b;c=d;e=") - expected_custom_field = {'u_customfield': "Link text"} - assert expected_custom_field == split_fields("u_customfield=Link text") + expected_custom_field = {"u_customfield": "Link text"} + assert expected_custom_field == split_fields("u_customfield=Link text") expected_custom_sys_params = { - "sysparm_display_value": 'all', - "sysparm_exclude_reference_link": 'True', - "sysparm_query": 'number=TASK0000001' + "sysparm_display_value": "all", + "sysparm_exclude_reference_link": "True", + "sysparm_query": "number=TASK0000001", } assert expected_custom_sys_params == split_fields( - "sysparm_display_value=all;sysparm_exclude_reference_link=True;sysparm_query=number=TASK0000001") + "sysparm_display_value=all;sysparm_exclude_reference_link=True;sysparm_query=number=TASK0000001" + ) with pytest.raises(Exception) as err: - split_fields('a') + split_fields("a") assert "must contain a '=' to specify the keys and values" in str(err) @@ -191,14 +288,14 @@ def test_split_fields_with_special_delimiter(): Then - Validate the fields were created correctly """ - expected_dict_fields = {'a': 'b', 'c': 'd'} - assert expected_dict_fields == split_fields('a=b,c=d', ',') + expected_dict_fields = {"a": "b", "c": "d"} + assert expected_dict_fields == split_fields("a=b,c=d", ",") - expected_custom_field = {'u_customfield': "Link text<;/a>"} - assert expected_custom_field == split_fields("u_customfield=Link text<;/a>", ',') + expected_custom_field = {"u_customfield": "Link text<;/a>"} + assert expected_custom_field == split_fields("u_customfield=Link text<;/a>", ",") with pytest.raises(Exception) as e: - split_fields('a') + split_fields("a") assert "must contain a '=' to specify the keys and values" in str(e) @@ -214,36 +311,59 @@ def test_convert_to_notes_result(): # Note: the 'display_value' time is the local time of the SNOW instance, and the 'value' is in UTC. # The results returned for notes are expected to be in UTC time. - expected_result = {'result': [{'sys_created_on': '2022-11-21 21:50:34', - 'value': 'Second comment\n\n Mirrored from Cortex XSOAR', - 'sys_created_by': 'System Administrator', - 'element': 'comments' - }, - {'sys_created_on': '2022-11-21 20:45:37', - 'value': 'First comment', - 'sys_created_by': 'Test User', - 'element': 'comments' - }]} - assert convert_to_notes_result(RESPONSE_COMMENTS_DISPLAY_VALUE_AFTER_FORMAT, - time_info={'display_date_format': DATE_FORMAT, - 'timezone_offset': timedelta(minutes=-60)}) == expected_result + expected_result = { + "result": [ + { + "sys_created_on": "2022-11-21 21:50:34", + "value": "Second comment\n\n Mirrored from Cortex XSOAR", + "sys_created_by": "System Administrator", + "element": "comments", + }, + { + "sys_created_on": "2022-11-21 20:45:37", + "value": "First comment", + "sys_created_by": "Test User", + "element": "comments", + }, + ] + } + assert ( + convert_to_notes_result( + RESPONSE_COMMENTS_DISPLAY_VALUE_AFTER_FORMAT, + time_info={"display_date_format": DATE_FORMAT, "timezone_offset": timedelta(minutes=-60)}, + ) + == expected_result + ) # Filter comments by creation time (filter is given in UTC): - expected_result = {'result': [{'sys_created_on': '2022-11-21 21:50:34', - 'value': 'Second comment\n\n Mirrored from Cortex XSOAR', - 'sys_created_by': 'System Administrator', - 'element': 'comments' - }]} - assert convert_to_notes_result(RESPONSE_COMMENTS_DISPLAY_VALUE_AFTER_FORMAT, - time_info={'display_date_format': DATE_FORMAT, - 'filter': datetime.strptime('2022-11-21 21:44:37', DATE_FORMAT), - 'timezone_offset': timedelta(minutes=-60)}) == expected_result + expected_result = { + "result": [ + { + "sys_created_on": "2022-11-21 21:50:34", + "value": "Second comment\n\n Mirrored from Cortex XSOAR", + "sys_created_by": "System Administrator", + "element": "comments", + } + ] + } + assert ( + convert_to_notes_result( + RESPONSE_COMMENTS_DISPLAY_VALUE_AFTER_FORMAT, + time_info={ + "display_date_format": DATE_FORMAT, + "filter": datetime.strptime("2022-11-21 21:44:37", DATE_FORMAT), + "timezone_offset": timedelta(minutes=-60), + }, + ) + == expected_result + ) ticket_response = {} - assert convert_to_notes_result(ticket_response, time_info={'display_date_format': DATE_FORMAT}) == {} + assert convert_to_notes_result(ticket_response, time_info={"display_date_format": DATE_FORMAT}) == {} - assert convert_to_notes_result(RESPONSE_COMMENTS_DISPLAY_VALUE_NO_COMMENTS, - time_info={'display_date_format': DATE_FORMAT}) == {'result': []} + assert convert_to_notes_result( + RESPONSE_COMMENTS_DISPLAY_VALUE_NO_COMMENTS, time_info={"display_date_format": DATE_FORMAT} + ) == {"result": []} def test_split_notes(): @@ -262,57 +382,78 @@ def test_split_notes(): # timezone_offset is the difference between UTC and local time, e.g. offset = -60, means that local time is UTC+1. # The 'sys_created_on' time, returned by the command is normalized to UTC timezone. - raw_notes = '2022-11-21 22:50:34 - System Administrator (Additional comments)\nSecond comment\n\n Mirrored from ' \ - 'Cortex XSOAR\n\n2022-11-21 21:45:37 - Test User (Additional comments)\nFirst comment\n\n' - - time_info = {'timezone_offset': timedelta(minutes=0), - 'filter': datetime.strptime('2022-11-21 21:44:37', DATE_FORMAT), - 'display_date_format': DATE_FORMAT} - notes = split_notes(raw_notes, 'comments', time_info) - expected_notes = [{'sys_created_on': '2022-11-21 22:50:34', - 'value': 'Second comment\n\n Mirrored from Cortex XSOAR', - 'sys_created_by': 'System Administrator', - 'element': 'comments' - }, - {'sys_created_on': '2022-11-21 21:45:37', - 'value': 'First comment', - 'sys_created_by': 'Test User', - 'element': 'comments' - }] + raw_notes = ( + "2022-11-21 22:50:34 - System Administrator (Additional comments)\nSecond comment\n\n Mirrored from " + "Cortex XSOAR\n\n2022-11-21 21:45:37 - Test User (Additional comments)\nFirst comment\n\n" + ) + + time_info = { + "timezone_offset": timedelta(minutes=0), + "filter": datetime.strptime("2022-11-21 21:44:37", DATE_FORMAT), + "display_date_format": DATE_FORMAT, + } + notes = split_notes(raw_notes, "comments", time_info) + expected_notes = [ + { + "sys_created_on": "2022-11-21 22:50:34", + "value": "Second comment\n\n Mirrored from Cortex XSOAR", + "sys_created_by": "System Administrator", + "element": "comments", + }, + {"sys_created_on": "2022-11-21 21:45:37", "value": "First comment", "sys_created_by": "Test User", "element": "comments"}, + ] assert notes == expected_notes - raw_notes = '21/11/2022 22:50:34 - System Administrator (Additional comments)\nSecond comment\n\n Mirrored from ' \ - 'Cortex XSOAR\n\n21/11/2022 21:45:37 - Test User (Additional comments)\nFirst comment\n\n' - time_info = {'timezone_offset': timedelta(minutes=-60), - 'filter': datetime.strptime('2022-11-21 21:44:37', DATE_FORMAT), - 'display_date_format': DATE_FORMAT_OPTIONS.get('dd/MM/yyyy')} - notes = split_notes(raw_notes, 'comments', time_info) - expected_notes = [{'sys_created_on': '2022-11-21 21:50:34', - 'value': 'Second comment\n\n Mirrored from Cortex XSOAR', - 'sys_created_by': 'System Administrator', - 'element': 'comments' - }] + raw_notes = ( + "21/11/2022 22:50:34 - System Administrator (Additional comments)\nSecond comment\n\n Mirrored from " + "Cortex XSOAR\n\n21/11/2022 21:45:37 - Test User (Additional comments)\nFirst comment\n\n" + ) + time_info = { + "timezone_offset": timedelta(minutes=-60), + "filter": datetime.strptime("2022-11-21 21:44:37", DATE_FORMAT), + "display_date_format": DATE_FORMAT_OPTIONS.get("dd/MM/yyyy"), + } + notes = split_notes(raw_notes, "comments", time_info) + expected_notes = [ + { + "sys_created_on": "2022-11-21 21:50:34", + "value": "Second comment\n\n Mirrored from Cortex XSOAR", + "sys_created_by": "System Administrator", + "element": "comments", + } + ] assert notes == expected_notes - raw_notes = '21.11.2022 22:50:34 - System Administrator (Additional comments)\nSecond comment\n\n Mirrored from ' \ - 'Cortex XSOAR\n\n21.11.2022 21:45:37 - Test User (Additional comments)\nFirst comment\n\n' - time_info = {'timezone_offset': timedelta(minutes=-60), - 'filter': datetime.strptime('2022-11-21 21:44:37', DATE_FORMAT), - 'display_date_format': DATE_FORMAT_OPTIONS.get('dd.MM.yyyy')} - notes = split_notes(raw_notes, 'comments', time_info) - expected_notes = [{'sys_created_on': '2022-11-21 21:50:34', - 'value': 'Second comment\n\n Mirrored from Cortex XSOAR', - 'sys_created_by': 'System Administrator', - 'element': 'comments' - }] + raw_notes = ( + "21.11.2022 22:50:34 - System Administrator (Additional comments)\nSecond comment\n\n Mirrored from " + "Cortex XSOAR\n\n21.11.2022 21:45:37 - Test User (Additional comments)\nFirst comment\n\n" + ) + time_info = { + "timezone_offset": timedelta(minutes=-60), + "filter": datetime.strptime("2022-11-21 21:44:37", DATE_FORMAT), + "display_date_format": DATE_FORMAT_OPTIONS.get("dd.MM.yyyy"), + } + notes = split_notes(raw_notes, "comments", time_info) + expected_notes = [ + { + "sys_created_on": "2022-11-21 21:50:34", + "value": "Second comment\n\n Mirrored from Cortex XSOAR", + "sys_created_by": "System Administrator", + "element": "comments", + } + ] assert notes == expected_notes - raw_notes = '11-21-2022 22:50:34 - System Administrator (Additional comments)\nSecond comment\n\n Mirrored from ' \ - 'Cortex XSOAR\n\n11-21-2022 21:45:37 - Test User (Additional comments)\nFirst comment\n\n' - time_info = {'timezone_offset': timedelta(minutes=-120), - 'filter': datetime.strptime('2022-11-21 21:44:37', DATE_FORMAT), - 'display_date_format': DATE_FORMAT_OPTIONS.get('MM-dd-yyyy')} - notes = split_notes(raw_notes, 'comments', time_info) + raw_notes = ( + "11-21-2022 22:50:34 - System Administrator (Additional comments)\nSecond comment\n\n Mirrored from " + "Cortex XSOAR\n\n11-21-2022 21:45:37 - Test User (Additional comments)\nFirst comment\n\n" + ) + time_info = { + "timezone_offset": timedelta(minutes=-120), + "filter": datetime.strptime("2022-11-21 21:44:37", DATE_FORMAT), + "display_date_format": DATE_FORMAT_OPTIONS.get("MM-dd-yyyy"), + } + notes = split_notes(raw_notes, "comments", time_info) assert len(notes) == 0 @@ -325,36 +466,36 @@ def test_get_timezone_offset(): Then: - Assert the offset between the UTC and the instance times are correct. """ - full_response = {'sys_created_on': {'display_value': '2022-12-07 05:38:52', 'value': '2022-12-07 13:38:52'}} + full_response = {"sys_created_on": {"display_value": "2022-12-07 05:38:52", "value": "2022-12-07 13:38:52"}} offset = get_timezone_offset(full_response, display_date_format=DATE_FORMAT) assert offset == timedelta(minutes=480) - full_response = {'sys_created_on': {'display_value': '12-07-2022 15:47:34', 'value': '2022-12-07 13:47:34'}} - offset = get_timezone_offset(full_response, display_date_format=DATE_FORMAT_OPTIONS.get('MM-dd-yyyy')) + full_response = {"sys_created_on": {"display_value": "12-07-2022 15:47:34", "value": "2022-12-07 13:47:34"}} + offset = get_timezone_offset(full_response, display_date_format=DATE_FORMAT_OPTIONS.get("MM-dd-yyyy")) assert offset == timedelta(minutes=-120) - full_response = {'sys_created_on': {'display_value': '06/12/2022 23:38:52', 'value': '2022-12-07 09:38:52'}} - offset = get_timezone_offset(full_response, display_date_format=DATE_FORMAT_OPTIONS.get('dd/MM/yyyy')) + full_response = {"sys_created_on": {"display_value": "06/12/2022 23:38:52", "value": "2022-12-07 09:38:52"}} + offset = get_timezone_offset(full_response, display_date_format=DATE_FORMAT_OPTIONS.get("dd/MM/yyyy")) assert offset == timedelta(minutes=600) - full_response = {'sys_created_on': {'display_value': '06/12/2022 23:38:52 PM', 'value': '2022-12-07 09:38:52'}} - offset = get_timezone_offset(full_response, display_date_format=DATE_FORMAT_OPTIONS.get('dd/MM/yyyy')) + full_response = {"sys_created_on": {"display_value": "06/12/2022 23:38:52 PM", "value": "2022-12-07 09:38:52"}} + offset = get_timezone_offset(full_response, display_date_format=DATE_FORMAT_OPTIONS.get("dd/MM/yyyy")) assert offset == timedelta(minutes=600) - full_response = {'sys_created_on': {'display_value': '07.12.2022 0:38:52', 'value': '2022-12-06 19:38:52'}} - offset = get_timezone_offset(full_response, display_date_format=DATE_FORMAT_OPTIONS.get('dd.MM.yyyy')) + full_response = {"sys_created_on": {"display_value": "07.12.2022 0:38:52", "value": "2022-12-06 19:38:52"}} + offset = get_timezone_offset(full_response, display_date_format=DATE_FORMAT_OPTIONS.get("dd.MM.yyyy")) assert offset == timedelta(minutes=-300) - full_response = {'sys_created_on': {'display_value': 'Dec-07-2022 00:38:52', 'value': '2022-12-06 19:38:52'}} - offset = get_timezone_offset(full_response, display_date_format=DATE_FORMAT_OPTIONS.get('mmm-dd-yyyy')) + full_response = {"sys_created_on": {"display_value": "Dec-07-2022 00:38:52", "value": "2022-12-06 19:38:52"}} + offset = get_timezone_offset(full_response, display_date_format=DATE_FORMAT_OPTIONS.get("mmm-dd-yyyy")) assert offset == timedelta(minutes=-300) - full_response = {'sys_created_on': {'display_value': 'Dec-07-2022 00:38:52 AM', 'value': '2022-12-06 19:38:52'}} - offset = get_timezone_offset(full_response, display_date_format=DATE_FORMAT_OPTIONS.get('mmm-dd-yyyy')) + full_response = {"sys_created_on": {"display_value": "Dec-07-2022 00:38:52 AM", "value": "2022-12-06 19:38:52"}} + offset = get_timezone_offset(full_response, display_date_format=DATE_FORMAT_OPTIONS.get("mmm-dd-yyyy")) assert offset == timedelta(minutes=-300) - full_response = {'sys_created_on': {'display_value': 'Dec-07-2022 00:38:52 AM ', 'value': '2022-12-06 19:38:52'}} - offset = get_timezone_offset(full_response, display_date_format=DATE_FORMAT_OPTIONS.get('mmm-dd-yyyy')) + full_response = {"sys_created_on": {"display_value": "Dec-07-2022 00:38:52 AM ", "value": "2022-12-06 19:38:52"}} + offset = get_timezone_offset(full_response, display_date_format=DATE_FORMAT_OPTIONS.get("mmm-dd-yyyy")) assert offset == timedelta(minutes=-300) @@ -371,19 +512,31 @@ def test_get_ticket_notes_command_success(mocker): - Ensure the expected API call is made - Validate the expected CommandResults are returned """ - client = Client('server_url', 'sc_server_url', 'cr_server_url', 'username', 'password', - 'verify', 'fetch_time', 'sysparm_query', 'sysparm_limit', 'timestamp_field', - 'ticket_type', 'get_attachments', 'incident_name') - args = {'id': 'sys_id'} + client = Client( + "server_url", + "sc_server_url", + "cr_server_url", + "username", + "password", + "verify", + "fetch_time", + "sysparm_query", + "sysparm_limit", + "timestamp_field", + "ticket_type", + "get_attachments", + "incident_name", + ) + args = {"id": "sys_id"} - mock_send_request = mocker.patch.object(Client, 'send_request') + mock_send_request = mocker.patch.object(Client, "send_request") mock_send_request.return_value = RESPONSE_GET_TICKET_NOTES result = get_ticket_notes_command(client, args, {}) assert isinstance(result[0], CommandResults) assert mock_send_request.called assert len(result[0].raw_response.get("result")) == 5 - assert result[0].outputs_prefix == 'ServiceNow.Ticket' + assert result[0].outputs_prefix == "ServiceNow.Ticket" assert result[0].outputs == EXPECTED_GET_TICKET_NOTES @@ -400,20 +553,33 @@ def test_get_ticket_notes_command_use_display_value(mocker): - Ensure the expected API call is made - Validate the expected CommandResults are returned """ - client = Client('server_url', 'sc_server_url', 'cr_server_url', 'username', 'password', - 'verify', 'fetch_time', 'sysparm_query', 'sysparm_limit', 'timestamp_field', - 'ticket_type', 'get_attachments', 'incident_name', use_display_value=True, - display_date_format="yyyy-MM-dd") - args = {'id': 'sys_id'} + client = Client( + "server_url", + "sc_server_url", + "cr_server_url", + "username", + "password", + "verify", + "fetch_time", + "sysparm_query", + "sysparm_limit", + "timestamp_field", + "ticket_type", + "get_attachments", + "incident_name", + use_display_value=True, + display_date_format="yyyy-MM-dd", + ) + args = {"id": "sys_id"} - mock_send_request = mocker.patch.object(Client, 'send_request') + mock_send_request = mocker.patch.object(Client, "send_request") mock_send_request.return_value = RESPONSE_COMMENTS_DISPLAY_VALUE result = get_ticket_notes_command(client, args, {}) assert isinstance(result[0], CommandResults) assert mock_send_request.called assert len(result[0].raw_response.get("result")) == 2 - assert result[0].outputs_prefix == 'ServiceNow.Ticket' + assert result[0].outputs_prefix == "ServiceNow.Ticket" assert result[0].outputs == EXPECTED_GET_TICKET_NOTES_DISPLAY_VALUE @@ -430,13 +596,26 @@ def test_get_ticket_notes_command_use_display_value_no_comments(mocker): - Ensure the expected API call is made - Validate the expected CommandResults are returned """ - client = Client('server_url', 'sc_server_url', 'cr_server_url', 'username', 'password', - 'verify', 'fetch_time', 'sysparm_query', 'sysparm_limit', 'timestamp_field', - 'ticket_type', 'get_attachments', 'incident_name', use_display_value=True, - display_date_format="yyyy-MM-dd") - args = {'id': 'sys_id'} + client = Client( + "server_url", + "sc_server_url", + "cr_server_url", + "username", + "password", + "verify", + "fetch_time", + "sysparm_query", + "sysparm_limit", + "timestamp_field", + "ticket_type", + "get_attachments", + "incident_name", + use_display_value=True, + display_date_format="yyyy-MM-dd", + ) + args = {"id": "sys_id"} - mock_send_request = mocker.patch.object(Client, 'send_request') + mock_send_request = mocker.patch.object(Client, "send_request") mock_send_request.return_value = RESPONSE_COMMENTS_DISPLAY_VALUE_NO_COMMENTS result = get_ticket_notes_command(client, args, {}) @@ -445,32 +624,33 @@ def test_get_ticket_notes_command_use_display_value_no_comments(mocker): assert result[0].raw_response == "No comment found on ticket sys_id." -@pytest.mark.parametrize("notes, params, expected", [ - ( - [ - { - "value": "First comment", - "sys_created_by": "Test User", - "sys_created_on": "2022-11-21 20:45:37", - "element": "comments" - } - ], - { - "comment_tag_from_servicenow": "CommentFromServiceNow" - }, - [ - { - "Type": 1, - "Category": None, - "Contents": "Type: comments\nCreated By: Test User\nCreated On: 2022-11-21 20:45:37\nFirst comment", - "ContentsFormat": None, - "Tags": ["CommentFromServiceNow"], - "Note": True, - "EntryContext": {"comments_and_work_notes": "First comment"} - } - ] - ) -]) +@pytest.mark.parametrize( + "notes, params, expected", + [ + ( + [ + { + "value": "First comment", + "sys_created_by": "Test User", + "sys_created_on": "2022-11-21 20:45:37", + "element": "comments", + } + ], + {"comment_tag_from_servicenow": "CommentFromServiceNow"}, + [ + { + "Type": 1, + "Category": None, + "Contents": "Type: comments\nCreated By: Test User\nCreated On: 2022-11-21 20:45:37\nFirst comment", + "ContentsFormat": None, + "Tags": ["CommentFromServiceNow"], + "Note": True, + "EntryContext": {"comments_and_work_notes": "First comment"}, + } + ], + ) + ], +) def test_get_entries_for_notes_with_comment(notes, params, expected): """ Given @@ -484,51 +664,125 @@ def test_get_entries_for_notes_with_comment(notes, params, expected): assert get_entries_for_notes(notes, params) == expected -@pytest.mark.parametrize('command, args, response, expected_result, expected_auto_extract', [ - (update_ticket_command, {'id': '1234', 'impact': '2'}, RESPONSE_UPDATE_TICKET, EXPECTED_UPDATE_TICKET, True), - (update_ticket_command, {'id': '1234', 'ticket_type': 'sc_req_item', 'approval': 'requested'}, - RESPONSE_UPDATE_TICKET_SC_REQ, EXPECTED_UPDATE_TICKET_SC_REQ, True), - (update_ticket_command, {'id': '1234', 'severity': '3', 'additional_fields': "approval=rejected"}, - RESPONSE_UPDATE_TICKET_ADDITIONAL, EXPECTED_UPDATE_TICKET_ADDITIONAL, True), - (create_ticket_command, {'active': 'true', 'severity': "3", 'description': "creating a test ticket", - 'sla_due': "2020-10-10 10:10:11"}, RESPONSE_CREATE_TICKET, EXPECTED_CREATE_TICKET, True), - (create_ticket_command, {'active': 'true', 'severity': "3", 'description': "creating a test ticket", - 'sla_due': "2020-10-10 10:10:11"}, RESPONSE_CREATE_TICKET_WITH_OUT_JSON, - EXPECTED_CREATE_TICKET_WITH_OUT_JSON, True), - (query_tickets_command, {'limit': "3", 'query': "impact<2^short_descriptionISNOTEMPTY", 'ticket_type': "incident"}, - RESPONSE_QUERY_TICKETS, EXPECTED_QUERY_TICKETS, True), - (query_tickets_command, - {"ticket_type": "incident", "query": "number=INC0000001", "system_params": "sysparm_exclude_reference_link=true"}, - RESPONSE_QUERY_TICKETS_EXCLUDE_REFERENCE_LINK, EXPECTED_QUERY_TICKETS_EXCLUDE_REFERENCE_LINK, True), - (upload_file_command, {'id': "sys_id", 'file_id': "entry_id", 'file_name': 'test_file'}, RESPONSE_UPLOAD_FILE, - EXPECTED_UPLOAD_FILE, True), - (get_record_command, {'table_name': "alm_asset", 'id': "sys_id", 'fields': "asset_tag,display_name"}, - RESPONSE_GET_RECORD, EXPECTED_GET_RECORD, True), - (update_record_command, {'name': "alm_asset", 'id': "1234", 'custom_fields': "display_name=test4"}, - RESPONSE_UPDATE_RECORD, EXPECTED_UPDATE_RECORD, True), - (create_record_command, {'table_name': "alm_asset", 'fields': "asset_tag=P4325434;display_name=my_test_record"}, - RESPONSE_CREATE_RECORD, EXPECTED_CREATE_RECORD, True), - (query_table_command, {'table_name': "alm_asset", 'fields': "asset_tag,sys_updated_by,display_name", - 'query': "display_nameCONTAINSMacBook", 'limit': 3}, RESPONSE_QUERY_TABLE, - EXPECTED_QUERY_TABLE, False), - (query_table_command, { - 'table_name': "sc_task", 'system_params': - 'sysparm_display_value=all;sysparm_exclude_reference_link=True;sysparm_query=number=TASK0000001', - 'fields': "approval,state,escalation,number,description" - }, RESPONSE_QUERY_TABLE_SYS_PARAMS, EXPECTED_QUERY_TABLE_SYS_PARAMS, False), - (list_table_fields_command, {'table_name': "alm_asset"}, RESPONSE_LIST_TABLE_FIELDS, EXPECTED_LIST_TABLE_FIELDS, - False), - (query_computers_command, {'computer_id': '1234'}, RESPONSE_QUERY_COMPUTERS, EXPECTED_QUERY_COMPUTERS, False), - (get_table_name_command, {'label': "ACE"}, RESPONSE_GET_TABLE_NAME, EXPECTED_GET_TABLE_NAME, False), - (add_tag_command, {'id': "123", 'tag_id': '1234', 'title': 'title'}, RESPONSE_ADD_TAG, EXPECTED_ADD_TAG, True), - (query_items_command, {'name': "ipad", 'limit': '2'}, RESPONSE_QUERY_ITEMS, EXPECTED_QUERY_ITEMS, True), - (get_item_details_command, {'id': "1234"}, RESPONSE_ITEM_DETAILS, EXPECTED_ITEM_DETAILS, True), - (create_order_item_command, {'id': "1234", 'quantity': "3", - 'variables': "Additional_software_requirements=best_pc"}, - RESPONSE_CREATE_ITEM_ORDER, EXPECTED_CREATE_ITEM_ORDER, True), - (document_route_to_table, {'queue_id': 'queue_id', 'document_id': 'document_id'}, RESPONSE_DOCUMENT_ROUTE, - EXPECTED_DOCUMENT_ROUTE, True), -]) # noqa: E124 +@pytest.mark.parametrize( + "command, args, response, expected_result, expected_auto_extract", + [ + (update_ticket_command, {"id": "1234", "impact": "2"}, RESPONSE_UPDATE_TICKET, EXPECTED_UPDATE_TICKET, True), + ( + update_ticket_command, + {"id": "1234", "ticket_type": "sc_req_item", "approval": "requested"}, + RESPONSE_UPDATE_TICKET_SC_REQ, + EXPECTED_UPDATE_TICKET_SC_REQ, + True, + ), + ( + update_ticket_command, + {"id": "1234", "severity": "3", "additional_fields": "approval=rejected"}, + RESPONSE_UPDATE_TICKET_ADDITIONAL, + EXPECTED_UPDATE_TICKET_ADDITIONAL, + True, + ), + ( + create_ticket_command, + {"active": "true", "severity": "3", "description": "creating a test ticket", "sla_due": "2020-10-10 10:10:11"}, + RESPONSE_CREATE_TICKET, + EXPECTED_CREATE_TICKET, + True, + ), + ( + create_ticket_command, + {"active": "true", "severity": "3", "description": "creating a test ticket", "sla_due": "2020-10-10 10:10:11"}, + RESPONSE_CREATE_TICKET_WITH_OUT_JSON, + EXPECTED_CREATE_TICKET_WITH_OUT_JSON, + True, + ), + ( + query_tickets_command, + {"limit": "3", "query": "impact<2^short_descriptionISNOTEMPTY", "ticket_type": "incident"}, + RESPONSE_QUERY_TICKETS, + EXPECTED_QUERY_TICKETS, + True, + ), + ( + query_tickets_command, + {"ticket_type": "incident", "query": "number=INC0000001", "system_params": "sysparm_exclude_reference_link=true"}, + RESPONSE_QUERY_TICKETS_EXCLUDE_REFERENCE_LINK, + EXPECTED_QUERY_TICKETS_EXCLUDE_REFERENCE_LINK, + True, + ), + ( + upload_file_command, + {"id": "sys_id", "file_id": "entry_id", "file_name": "test_file"}, + RESPONSE_UPLOAD_FILE, + EXPECTED_UPLOAD_FILE, + True, + ), + ( + get_record_command, + {"table_name": "alm_asset", "id": "sys_id", "fields": "asset_tag,display_name"}, + RESPONSE_GET_RECORD, + EXPECTED_GET_RECORD, + True, + ), + ( + update_record_command, + {"name": "alm_asset", "id": "1234", "custom_fields": "display_name=test4"}, + RESPONSE_UPDATE_RECORD, + EXPECTED_UPDATE_RECORD, + True, + ), + ( + create_record_command, + {"table_name": "alm_asset", "fields": "asset_tag=P4325434;display_name=my_test_record"}, + RESPONSE_CREATE_RECORD, + EXPECTED_CREATE_RECORD, + True, + ), + ( + query_table_command, + { + "table_name": "alm_asset", + "fields": "asset_tag,sys_updated_by,display_name", + "query": "display_nameCONTAINSMacBook", + "limit": 3, + }, + RESPONSE_QUERY_TABLE, + EXPECTED_QUERY_TABLE, + False, + ), + ( + query_table_command, + { + "table_name": "sc_task", + "system_params": "sysparm_display_value=all;sysparm_exclude_reference_link=True;sysparm_query=number=TASK0000001", + "fields": "approval,state,escalation,number,description", + }, + RESPONSE_QUERY_TABLE_SYS_PARAMS, + EXPECTED_QUERY_TABLE_SYS_PARAMS, + False, + ), + (list_table_fields_command, {"table_name": "alm_asset"}, RESPONSE_LIST_TABLE_FIELDS, EXPECTED_LIST_TABLE_FIELDS, False), + (query_computers_command, {"computer_id": "1234"}, RESPONSE_QUERY_COMPUTERS, EXPECTED_QUERY_COMPUTERS, False), + (get_table_name_command, {"label": "ACE"}, RESPONSE_GET_TABLE_NAME, EXPECTED_GET_TABLE_NAME, False), + (add_tag_command, {"id": "123", "tag_id": "1234", "title": "title"}, RESPONSE_ADD_TAG, EXPECTED_ADD_TAG, True), + (query_items_command, {"name": "ipad", "limit": "2"}, RESPONSE_QUERY_ITEMS, EXPECTED_QUERY_ITEMS, True), + (get_item_details_command, {"id": "1234"}, RESPONSE_ITEM_DETAILS, EXPECTED_ITEM_DETAILS, True), + ( + create_order_item_command, + {"id": "1234", "quantity": "3", "variables": "Additional_software_requirements=best_pc"}, + RESPONSE_CREATE_ITEM_ORDER, + EXPECTED_CREATE_ITEM_ORDER, + True, + ), + ( + document_route_to_table, + {"queue_id": "queue_id", "document_id": "document_id"}, + RESPONSE_DOCUMENT_ROUTE, + EXPECTED_DOCUMENT_ROUTE, + True, + ), + ], +) # noqa: E124 def test_commands(command, args, response, expected_result, expected_auto_extract, mocker): """Unit test Given @@ -542,23 +796,49 @@ def test_commands(command, args, response, expected_result, expected_auto_extrac - create the context validate the entry context """ - client = Client('server_url', 'sc_server_url', 'cr_server_url', 'username', 'password', - 'verify', 'fetch_time', 'sysparm_query', 'sysparm_limit', 'timestamp_field', - 'ticket_type', 'get_attachments', 'incident_name', display_date_format='yyyy-MM-dd') - mocker.patch.object(client, 'send_request', return_value=response) + client = Client( + "server_url", + "sc_server_url", + "cr_server_url", + "username", + "password", + "verify", + "fetch_time", + "sysparm_query", + "sysparm_limit", + "timestamp_field", + "ticket_type", + "get_attachments", + "incident_name", + display_date_format="yyyy-MM-dd", + ) + mocker.patch.object(client, "send_request", return_value=response) result = command(client, args) assert expected_result == result[1] # entry context is found in the 2nd place in the result of the command assert expected_auto_extract == result[3] # ignore_auto_extract is in the 4th place in the result of the command -@pytest.mark.parametrize('command, args, response, expected_hr, expected_auto_extract', [ - (delete_ticket_command, {'id': '1234'}, {}, 'Ticket with ID 1234 was successfully deleted.', True), - (add_link_command, {'id': '1234', 'link': "http://www.demisto.com", 'text': 'demsito_link'}, RESPONSE_ADD_LINK, - EXPECTED_ADD_LINK_HR, True), - (add_comment_command, {'id': "1234", 'comment': "Nice work!"}, RESPONSE_ADD_COMMENT, EXPECTED_ADD_COMMENT_HR, True), - (delete_record_command, {'table_name': "alm_asset", 'id': '1234'}, {}, - 'ServiceNow record with ID 1234 was successfully deleted.', True), -]) # noqa: E124 +@pytest.mark.parametrize( + "command, args, response, expected_hr, expected_auto_extract", + [ + (delete_ticket_command, {"id": "1234"}, {}, "Ticket with ID 1234 was successfully deleted.", True), + ( + add_link_command, + {"id": "1234", "link": "http://www.demisto.com", "text": "demsito_link"}, + RESPONSE_ADD_LINK, + EXPECTED_ADD_LINK_HR, + True, + ), + (add_comment_command, {"id": "1234", "comment": "Nice work!"}, RESPONSE_ADD_COMMENT, EXPECTED_ADD_COMMENT_HR, True), + ( + delete_record_command, + {"table_name": "alm_asset", "id": "1234"}, + {}, + "ServiceNow record with ID 1234 was successfully deleted.", + True, + ), + ], +) # noqa: E124 def test_no_ec_commands(command, args, response, expected_hr, expected_auto_extract, mocker): """Unit test Given @@ -572,37 +852,73 @@ def test_no_ec_commands(command, args, response, expected_hr, expected_auto_extr - create the context validate the human readable """ - client = Client('server_url', 'sc_server_url', 'cr_server_url', 'username', 'password', - 'verify', 'fetch_time', 'sysparm_query', 'sysparm_limit', 'timestamp_field', - 'ticket_type', 'get_attachments', 'incident_name') - mocker.patch.object(client, 'send_request', return_value=response) + client = Client( + "server_url", + "sc_server_url", + "cr_server_url", + "username", + "password", + "verify", + "fetch_time", + "sysparm_query", + "sysparm_limit", + "timestamp_field", + "ticket_type", + "get_attachments", + "incident_name", + ) + mocker.patch.object(client, "send_request", return_value=response) result = command(client, args) assert expected_hr in result[0] # HR is found in the 1st place in the result of the command assert expected_auto_extract == result[3] # ignore_auto_extract is in the 4th place in the result of the command def test_delete_attachment_command(mocker): - client = Client('server_url', 'sc_server_url', 'cr_server_url', 'username', 'password', - 'verify', 'fetch_time', 'sysparm_query', 'sysparm_limit', 'timestamp_field', - 'ticket_type', 'get_attachments', 'incident_name') + client = Client( + "server_url", + "sc_server_url", + "cr_server_url", + "username", + "password", + "verify", + "fetch_time", + "sysparm_query", + "sysparm_limit", + "timestamp_field", + "ticket_type", + "get_attachments", + "incident_name", + ) - mocker.patch.object(client, 'delete_attachment', return_value=None) + mocker.patch.object(client, "delete_attachment", return_value=None) result = delete_attachment_command(client=client, args={"file_sys_id": "1234"}) - assert 'Attachment with Sys ID 1234 was successfully deleted.' in result[0] + assert "Attachment with Sys ID 1234 was successfully deleted." in result[0] def test_delete_attachment_command_failed(mocker): - client = Client('server_url', 'sc_server_url', 'cr_server_url', 'username', 'password', - 'verify', 'fetch_time', 'sysparm_query', 'sysparm_limit', 'timestamp_field', - 'ticket_type', 'get_attachments', 'incident_name') + client = Client( + "server_url", + "sc_server_url", + "cr_server_url", + "username", + "password", + "verify", + "fetch_time", + "sysparm_query", + "sysparm_limit", + "timestamp_field", + "ticket_type", + "get_attachments", + "incident_name", + ) - mocker.patch.object(client, 'delete_attachment', return_value="Error") + mocker.patch.object(client, "delete_attachment", return_value="Error") with pytest.raises(DemistoException) as e: delete_attachment_command(client=client, args={"file_sys_id": "1234"}) assert "Error: No record found. Record doesn't exist or ACL restricts the record retrieval." in str(e) -@freeze_time('2022-05-01 12:52:29') +@freeze_time("2022-05-01 12:52:29") def test_fetch_incidents(mocker): """Unit test Given @@ -617,24 +933,34 @@ def test_fetch_incidents(mocker): - Validate The length of the results. - Ensure the incident sys IDs are stored in integration context for the first mirroring. """ - RESPONSE_FETCH['result'][0]['opened_at'] = (datetime.utcnow() - timedelta(minutes=15)).strftime('%Y-%m-%d %H:%M:%S') - RESPONSE_FETCH['result'][1]['opened_at'] = (datetime.utcnow() - timedelta(minutes=8)).strftime('%Y-%m-%d %H:%M:%S') - mocker.patch( - 'CommonServerPython.get_fetch_run_time_range', return_value=('2022-05-01 01:05:07', '2022-05-01 12:08:29') - ) - mocker.patch('ServiceNowv2.parse_dict_ticket_fields', return_value=RESPONSE_FETCH['result']) - mocker.patch.object(demisto, 'params', return_value={"mirror_notes_for_new_incidents": True}) - client = Client('server_url', 'sc_server_url', 'cr_server_url', 'username', 'password', - 'verify', '2 days', 'sysparm_query', sysparm_limit=10, - timestamp_field='opened_at', ticket_type='incident', get_attachments=False, incident_name='number') - mocker.patch.object(client, 'send_request', return_value=RESPONSE_FETCH) + RESPONSE_FETCH["result"][0]["opened_at"] = (datetime.utcnow() - timedelta(minutes=15)).strftime("%Y-%m-%d %H:%M:%S") + RESPONSE_FETCH["result"][1]["opened_at"] = (datetime.utcnow() - timedelta(minutes=8)).strftime("%Y-%m-%d %H:%M:%S") + mocker.patch("CommonServerPython.get_fetch_run_time_range", return_value=("2022-05-01 01:05:07", "2022-05-01 12:08:29")) + mocker.patch("ServiceNowv2.parse_dict_ticket_fields", return_value=RESPONSE_FETCH["result"]) + mocker.patch.object(demisto, "params", return_value={"mirror_notes_for_new_incidents": True}) + client = Client( + "server_url", + "sc_server_url", + "cr_server_url", + "username", + "password", + "verify", + "2 days", + "sysparm_query", + sysparm_limit=10, + timestamp_field="opened_at", + ticket_type="incident", + get_attachments=False, + incident_name="number", + ) + mocker.patch.object(client, "send_request", return_value=RESPONSE_FETCH) incidents = fetch_incidents(client) assert len(incidents) == 2 - assert incidents[0].get('name') == 'ServiceNow Incident INC0000040' + assert incidents[0].get("name") == "ServiceNow Incident INC0000040" assert demisto.getIntegrationContext()["last_fetched_incident_ids"] == ["sys_id1", "sys_id2"] -@freeze_time('2022-05-01 12:52:29') +@freeze_time("2022-05-01 12:52:29") def test_fetch_incidents_with_changed_fetch_limit(mocker): """Unit test Given @@ -648,24 +974,34 @@ def test_fetch_incidents_with_changed_fetch_limit(mocker): - run the fetch incidents command using the Client Validate The number of fetch_limit in the last_run """ - RESPONSE_FETCH['result'][0]['opened_at'] = (datetime.utcnow() - timedelta(minutes=15)).strftime('%Y-%m-%d %H:%M:%S') - RESPONSE_FETCH['result'][1]['opened_at'] = (datetime.utcnow() - timedelta(minutes=8)).strftime('%Y-%m-%d %H:%M:%S') - mocker.patch( - 'CommonServerPython.get_fetch_run_time_range', return_value=('2022-05-01 01:05:07', '2022-05-01 12:08:29') - ) - mocker.patch('ServiceNowv2.parse_dict_ticket_fields', return_value=RESPONSE_FETCH['result']) - client = Client('server_url', 'sc_server_url', 'cr_server_url', 'username', 'password', - 'verify', '2 days', 'sysparm_query', sysparm_limit=20, - timestamp_field='opened_at', ticket_type='incident', get_attachments=False, incident_name='number') - mocker.patch.object(client, 'send_request', return_value=RESPONSE_FETCH) - mocker.patch.object(demisto, 'getLastRun', return_value={'limit': 10}) - set_last_run = mocker.patch.object(demisto, 'setLastRun') + RESPONSE_FETCH["result"][0]["opened_at"] = (datetime.utcnow() - timedelta(minutes=15)).strftime("%Y-%m-%d %H:%M:%S") + RESPONSE_FETCH["result"][1]["opened_at"] = (datetime.utcnow() - timedelta(minutes=8)).strftime("%Y-%m-%d %H:%M:%S") + mocker.patch("CommonServerPython.get_fetch_run_time_range", return_value=("2022-05-01 01:05:07", "2022-05-01 12:08:29")) + mocker.patch("ServiceNowv2.parse_dict_ticket_fields", return_value=RESPONSE_FETCH["result"]) + client = Client( + "server_url", + "sc_server_url", + "cr_server_url", + "username", + "password", + "verify", + "2 days", + "sysparm_query", + sysparm_limit=20, + timestamp_field="opened_at", + ticket_type="incident", + get_attachments=False, + incident_name="number", + ) + mocker.patch.object(client, "send_request", return_value=RESPONSE_FETCH) + mocker.patch.object(demisto, "getLastRun", return_value={"limit": 10}) + set_last_run = mocker.patch.object(demisto, "setLastRun") fetch_incidents(client) - assert set_last_run.call_args[0][0].get('limit') == 20 + assert set_last_run.call_args[0][0].get("limit") == 20 -@freeze_time('2022-05-01 12:52:29') +@freeze_time("2022-05-01 12:52:29") def test_fetch_incidents_with_attachments(mocker): """Unit test Given @@ -680,28 +1016,37 @@ def test_fetch_incidents_with_attachments(mocker): - run the fetch incidents command using the Client Validate The length of the results and the attachment content. """ - RESPONSE_FETCH_ATTACHMENTS_TICKET['result'][0]['opened_at'] = ( - datetime.utcnow() - timedelta(minutes=15) - ).strftime('%Y-%m-%d %H:%M:%S') - mocker.patch( - 'CommonServerPython.get_fetch_run_time_range', return_value=('2022-05-01 01:05:07', '2022-05-01 12:08:29') + RESPONSE_FETCH_ATTACHMENTS_TICKET["result"][0]["opened_at"] = (datetime.utcnow() - timedelta(minutes=15)).strftime( + "%Y-%m-%d %H:%M:%S" + ) + mocker.patch("CommonServerPython.get_fetch_run_time_range", return_value=("2022-05-01 01:05:07", "2022-05-01 12:08:29")) + mocker.patch("ServiceNowv2.parse_dict_ticket_fields", return_value=RESPONSE_FETCH["result"]) + client = Client( + "server_url", + "sc_server_url", + "cr_server_url", + "username", + "password", + "verify", + "2 days", + "sysparm_query", + sysparm_limit=10, + timestamp_field="opened_at", + ticket_type="incident", + get_attachments=True, + incident_name="number", ) - mocker.patch('ServiceNowv2.parse_dict_ticket_fields', return_value=RESPONSE_FETCH['result']) - client = Client('server_url', 'sc_server_url', 'cr_server_url', 'username', 'password', - 'verify', '2 days', 'sysparm_query', sysparm_limit=10, - timestamp_field='opened_at', ticket_type='incident', get_attachments=True, - incident_name='number') - mocker.patch.object(client, 'send_request', return_value=RESPONSE_FETCH_ATTACHMENTS_TICKET) - mocker.patch.object(client, 'get_ticket_attachment_entries', return_value=RESPONSE_FETCH_ATTACHMENTS_FILE) + mocker.patch.object(client, "send_request", return_value=RESPONSE_FETCH_ATTACHMENTS_TICKET) + mocker.patch.object(client, "get_ticket_attachment_entries", return_value=RESPONSE_FETCH_ATTACHMENTS_FILE) incidents = fetch_incidents(client) assert len(incidents) == 1 - assert incidents[0].get('attachment')[0]['name'] == 'wireframe' - assert incidents[0].get('attachment')[0]['path'] == 'file_id' + assert incidents[0].get("attachment")[0]["name"] == "wireframe" + assert incidents[0].get("attachment")[0]["path"] == "file_id" -@freeze_time('2022-05-01 12:52:29') +@freeze_time("2022-05-01 12:52:29") def test_fetch_incidents_with_incident_name(mocker): """Unit test Given @@ -715,19 +1060,28 @@ def test_fetch_incidents_with_incident_name(mocker): - run the fetch incidents command using the Client Validate The length of the results. """ - RESPONSE_FETCH['result'][0]['opened_at'] = (datetime.utcnow() - timedelta(minutes=15)).strftime('%Y-%m-%d %H:%M:%S') - RESPONSE_FETCH['result'][1]['opened_at'] = (datetime.utcnow() - timedelta(minutes=8)).strftime('%Y-%m-%d %H:%M:%S') - mocker.patch('ServiceNowv2.parse_dict_ticket_fields', return_value=RESPONSE_FETCH['result']) - mocker.patch( - 'CommonServerPython.get_fetch_run_time_range', return_value=('2022-05-01 01:05:07', '2022-05-01 12:08:29') - ) - client = Client('server_url', 'sc_server_url', 'cr_server_url', 'username', 'password', - 'verify', '2 days', 'sysparm_query', sysparm_limit=10, - timestamp_field='opened_at', ticket_type='incident', - get_attachments=False, incident_name='description') - mocker.patch.object(client, 'send_request', return_value=RESPONSE_FETCH) + RESPONSE_FETCH["result"][0]["opened_at"] = (datetime.utcnow() - timedelta(minutes=15)).strftime("%Y-%m-%d %H:%M:%S") + RESPONSE_FETCH["result"][1]["opened_at"] = (datetime.utcnow() - timedelta(minutes=8)).strftime("%Y-%m-%d %H:%M:%S") + mocker.patch("ServiceNowv2.parse_dict_ticket_fields", return_value=RESPONSE_FETCH["result"]) + mocker.patch("CommonServerPython.get_fetch_run_time_range", return_value=("2022-05-01 01:05:07", "2022-05-01 12:08:29")) + client = Client( + "server_url", + "sc_server_url", + "cr_server_url", + "username", + "password", + "verify", + "2 days", + "sysparm_query", + sysparm_limit=10, + timestamp_field="opened_at", + ticket_type="incident", + get_attachments=False, + incident_name="description", + ) + mocker.patch.object(client, "send_request", return_value=RESPONSE_FETCH) incidents = fetch_incidents(client) - assert incidents[0].get('name') == 'ServiceNow Incident Unable to access Oregon mail server. Is it down?' + assert incidents[0].get("name") == "ServiceNow Incident Unable to access Oregon mail server. Is it down?" def start_freeze_time(timestamp): @@ -739,113 +1093,99 @@ def start_freeze_time(timestamp): class TestFetchIncidentsWithLookBack: LAST_RUN = {} - API_TIME_FORMAT = '%Y-%m-%d %H:%M:%S' - FREEZE_TIMESTAMP = '2022-05-01 12:52:29' + API_TIME_FORMAT = "%Y-%m-%d %H:%M:%S" + FREEZE_TIMESTAMP = "2022-05-01 12:52:29" def set_last_run(self, new_last_run): self.LAST_RUN = new_last_run @pytest.mark.parametrize( - 'start_incidents, phase2_incident, phase3_incident, look_back', + "start_incidents, phase2_incident, phase3_incident, look_back", [ ( { - 'result': [ + "result": [ { - 'opened_at': ( - start_freeze_time(FREEZE_TIMESTAMP) - timedelta(minutes=10) - ).strftime(API_TIME_FORMAT), - 'severity': '2', - 'number': '2', - 'sys_id': '2' + "opened_at": (start_freeze_time(FREEZE_TIMESTAMP) - timedelta(minutes=10)).strftime(API_TIME_FORMAT), + "severity": "2", + "number": "2", + "sys_id": "2", }, { - 'opened_at': ( - start_freeze_time(FREEZE_TIMESTAMP) - timedelta(minutes=5) - ).strftime(API_TIME_FORMAT), - 'severity': '1', - 'number': '4', - 'sys_id': '4' + "opened_at": (start_freeze_time(FREEZE_TIMESTAMP) - timedelta(minutes=5)).strftime(API_TIME_FORMAT), + "severity": "1", + "number": "4", + "sys_id": "4", }, { - 'opened_at': ( - start_freeze_time(FREEZE_TIMESTAMP) - timedelta(minutes=2) - ).strftime(API_TIME_FORMAT), - 'severity': '2', - 'number': '5', - 'sys_id': '5' - } + "opened_at": (start_freeze_time(FREEZE_TIMESTAMP) - timedelta(minutes=2)).strftime(API_TIME_FORMAT), + "severity": "2", + "number": "5", + "sys_id": "5", + }, ] }, { - 'opened_at': ( - start_freeze_time(FREEZE_TIMESTAMP) - timedelta(minutes=8) - ).strftime(API_TIME_FORMAT), - 'severity': '1', - 'number': '3', - 'sys_id': '3' + "opened_at": (start_freeze_time(FREEZE_TIMESTAMP) - timedelta(minutes=8)).strftime(API_TIME_FORMAT), + "severity": "1", + "number": "3", + "sys_id": "3", }, { - 'opened_at': ( - start_freeze_time(FREEZE_TIMESTAMP) - timedelta(minutes=11) - ).strftime(API_TIME_FORMAT), - 'severity': '1', - 'number': '1', - 'sys_id': '1' + "opened_at": (start_freeze_time(FREEZE_TIMESTAMP) - timedelta(minutes=11)).strftime(API_TIME_FORMAT), + "severity": "1", + "number": "1", + "sys_id": "1", }, - 15 + 15, ), ( { - 'result': [ + "result": [ { - 'opened_at': ( - start_freeze_time(FREEZE_TIMESTAMP) - timedelta(hours=3, minutes=20) - ).strftime(API_TIME_FORMAT), - 'severity': '2', - 'number': '2', - 'sys_id': '2' + "opened_at": (start_freeze_time(FREEZE_TIMESTAMP) - timedelta(hours=3, minutes=20)).strftime( + API_TIME_FORMAT + ), + "severity": "2", + "number": "2", + "sys_id": "2", }, { - 'opened_at': ( - start_freeze_time(FREEZE_TIMESTAMP) - timedelta(hours=2, minutes=26) - ).strftime(API_TIME_FORMAT), - 'severity': '1', - 'number': '4', - 'sys_id': '4' + "opened_at": (start_freeze_time(FREEZE_TIMESTAMP) - timedelta(hours=2, minutes=26)).strftime( + API_TIME_FORMAT + ), + "severity": "1", + "number": "4", + "sys_id": "4", }, { - 'opened_at': ( - start_freeze_time(FREEZE_TIMESTAMP) - timedelta(hours=1, minutes=20) - ).strftime(API_TIME_FORMAT), - 'severity': '2', - 'number': '5', - 'sys_id': '5' - } + "opened_at": (start_freeze_time(FREEZE_TIMESTAMP) - timedelta(hours=1, minutes=20)).strftime( + API_TIME_FORMAT + ), + "severity": "2", + "number": "5", + "sys_id": "5", + }, ] }, { - 'opened_at': ( - start_freeze_time(FREEZE_TIMESTAMP) - timedelta(hours=2, minutes=45) - ).strftime(API_TIME_FORMAT), - 'severity': '1', - 'number': '3', - 'sys_id': '3' + "opened_at": (start_freeze_time(FREEZE_TIMESTAMP) - timedelta(hours=2, minutes=45)).strftime(API_TIME_FORMAT), + "severity": "1", + "number": "3", + "sys_id": "3", }, { - 'opened_at': ( - start_freeze_time(FREEZE_TIMESTAMP) - timedelta(hours=3, minutes=50) - ).strftime(API_TIME_FORMAT), - 'severity': '1', - 'number': '1', - 'sys_id': '1' + "opened_at": (start_freeze_time(FREEZE_TIMESTAMP) - timedelta(hours=3, minutes=50)).strftime(API_TIME_FORMAT), + "severity": "1", + "number": "1", + "sys_id": "1", }, - 1000 - ) - ] + 1000, + ), + ], ) def test_fetch_incidents_with_look_back_greater_than_zero( - self, mocker, start_incidents, phase2_incident, phase3_incident, look_back + self, mocker, start_incidents, phase2_incident, phase3_incident, look_back ): """ Given @@ -864,162 +1204,160 @@ def test_fetch_incidents_with_look_back_greater_than_zero( - make sure that incidents who were already fetched would not be fetched again. """ client = Client( - server_url='', sc_server_url='', cr_server_url='', username='', password='', verify=False, - fetch_time='6 hours', sysparm_query='stateNOT IN6,7^assignment_group=123', sysparm_limit=10, - timestamp_field='opened_at', ticket_type='incident', get_attachments=False, incident_name='number', - look_back=look_back + server_url="", + sc_server_url="", + cr_server_url="", + username="", + password="", + verify=False, + fetch_time="6 hours", + sysparm_query="stateNOT IN6,7^assignment_group=123", + sysparm_limit=10, + timestamp_field="opened_at", + ticket_type="incident", + get_attachments=False, + incident_name="number", + look_back=look_back, ) # reset last run self.LAST_RUN = {} - mocker.patch.object(demisto, 'getLastRun', return_value=self.LAST_RUN) - mocker.patch.object(demisto, 'setLastRun', side_effect=self.set_last_run) + mocker.patch.object(demisto, "getLastRun", return_value=self.LAST_RUN) + mocker.patch.object(demisto, "setLastRun", side_effect=self.set_last_run) - mocker.patch.object(client, 'send_request', return_value=start_incidents) + mocker.patch.object(client, "send_request", return_value=start_incidents) # first fetch tickets = fetch_incidents(client=client) assert len(tickets) == 3 - for expected_incident_id, ticket in zip(['2', '4', '5'], tickets): - assert ticket.get('name') == f'ServiceNow Incident {expected_incident_id}' + for expected_incident_id, ticket in zip(["2", "4", "5"], tickets): + assert ticket.get("name") == f"ServiceNow Incident {expected_incident_id}" # second fetch preparation - start_incidents.get('result').append(phase2_incident) + start_incidents.get("result").append(phase2_incident) # second fetch tickets = fetch_incidents(client=client) assert len(tickets) == 1 - assert tickets[0].get('name') == 'ServiceNow Incident 3' + assert tickets[0].get("name") == "ServiceNow Incident 3" # third fetch preparation - start_incidents.get('result').append(phase3_incident) + start_incidents.get("result").append(phase3_incident) # third fetch tickets = fetch_incidents(client=client) assert len(tickets) == 1 - assert tickets[0].get('name') == 'ServiceNow Incident 1' + assert tickets[0].get("name") == "ServiceNow Incident 1" # forth fetch tickets = fetch_incidents(client=client) assert len(tickets) == 0 @pytest.mark.parametrize( - 'incidents, phase2_incident, phase3_incident', + "incidents, phase2_incident, phase3_incident", [ ( { - 'result': [ + "result": [ { - 'opened_at': ( - start_freeze_time(FREEZE_TIMESTAMP) - timedelta(minutes=10) - ).strftime(API_TIME_FORMAT), - 'severity': '2', - 'number': '1', - 'sys_id': '1' + "opened_at": (start_freeze_time(FREEZE_TIMESTAMP) - timedelta(minutes=10)).strftime(API_TIME_FORMAT), + "severity": "2", + "number": "1", + "sys_id": "1", }, { - 'opened_at': ( - start_freeze_time(FREEZE_TIMESTAMP) - timedelta(minutes=8) - ).strftime(API_TIME_FORMAT), - 'severity': '1', - 'number': '2', - 'sys_id': '2' + "opened_at": (start_freeze_time(FREEZE_TIMESTAMP) - timedelta(minutes=8)).strftime(API_TIME_FORMAT), + "severity": "1", + "number": "2", + "sys_id": "2", }, { - 'opened_at': ( - start_freeze_time(FREEZE_TIMESTAMP) - timedelta(minutes=7) - ).strftime(API_TIME_FORMAT), - 'severity': '2', - 'number': '3', - 'sys_id': '3' - } + "opened_at": (start_freeze_time(FREEZE_TIMESTAMP) - timedelta(minutes=7)).strftime(API_TIME_FORMAT), + "severity": "2", + "number": "3", + "sys_id": "3", + }, ] }, { - 'result': [ + "result": [ { - 'opened_at': ( - start_freeze_time(FREEZE_TIMESTAMP) - timedelta(minutes=5) - ).strftime(API_TIME_FORMAT), - 'severity': '1', - 'number': '4', - 'sys_id': '4' + "opened_at": (start_freeze_time(FREEZE_TIMESTAMP) - timedelta(minutes=5)).strftime(API_TIME_FORMAT), + "severity": "1", + "number": "4", + "sys_id": "4", } ] }, { - 'result': [ + "result": [ { - 'opened_at': ( - start_freeze_time(FREEZE_TIMESTAMP) - timedelta(minutes=4) - ).strftime(API_TIME_FORMAT), - 'severity': '1', - 'number': '5', - 'sys_id': '5' + "opened_at": (start_freeze_time(FREEZE_TIMESTAMP) - timedelta(minutes=4)).strftime(API_TIME_FORMAT), + "severity": "1", + "number": "5", + "sys_id": "5", } ] }, ), ( { - 'result': [ + "result": [ { - 'opened_at': ( - start_freeze_time(FREEZE_TIMESTAMP) - timedelta(hours=8, minutes=51) - ).strftime(API_TIME_FORMAT), - 'severity': '2', - 'number': '1', - 'sys_id': '1' + "opened_at": (start_freeze_time(FREEZE_TIMESTAMP) - timedelta(hours=8, minutes=51)).strftime( + API_TIME_FORMAT + ), + "severity": "2", + "number": "1", + "sys_id": "1", }, { - 'opened_at': ( - start_freeze_time(FREEZE_TIMESTAMP) - timedelta(hours=7, minutes=45) - ).strftime(API_TIME_FORMAT), - 'severity': '1', - 'number': '2', - 'sys_id': '2' + "opened_at": (start_freeze_time(FREEZE_TIMESTAMP) - timedelta(hours=7, minutes=45)).strftime( + API_TIME_FORMAT + ), + "severity": "1", + "number": "2", + "sys_id": "2", }, { - 'opened_at': ( - start_freeze_time(FREEZE_TIMESTAMP) - timedelta(hours=7, minutes=44) - ).strftime(API_TIME_FORMAT), - 'severity': '2', - 'number': '3', - 'sys_id': '3' - } + "opened_at": (start_freeze_time(FREEZE_TIMESTAMP) - timedelta(hours=7, minutes=44)).strftime( + API_TIME_FORMAT + ), + "severity": "2", + "number": "3", + "sys_id": "3", + }, ] }, { - 'result': [ + "result": [ { - 'opened_at': ( - start_freeze_time(FREEZE_TIMESTAMP) - timedelta(hours=7, minutes=44) - ).strftime(API_TIME_FORMAT), - 'severity': '1', - 'number': '4', - 'sys_id': '4' + "opened_at": (start_freeze_time(FREEZE_TIMESTAMP) - timedelta(hours=7, minutes=44)).strftime( + API_TIME_FORMAT + ), + "severity": "1", + "number": "4", + "sys_id": "4", } ] }, { - 'result': [ + "result": [ { - 'opened_at': ( - start_freeze_time(FREEZE_TIMESTAMP) - timedelta(hours=1, minutes=34) - ).strftime(API_TIME_FORMAT), - 'severity': '1', - 'number': '5', - 'sys_id': '5' + "opened_at": (start_freeze_time(FREEZE_TIMESTAMP) - timedelta(hours=1, minutes=34)).strftime( + API_TIME_FORMAT + ), + "severity": "1", + "number": "5", + "sys_id": "5", } ] - } - ) - ] + }, + ), + ], ) - def test_fetch_incidents_with_look_back_equals_zero( - self, mocker, incidents, phase2_incident, phase3_incident - ): + def test_fetch_incidents_with_look_back_equals_zero(self, mocker, incidents, phase2_incident, phase3_incident): """ Given - fetch incidents parameters with any look back according to their opened time (normal fetch incidents). @@ -1036,46 +1374,56 @@ def test_fetch_incidents_with_look_back_equals_zero( - fourth fetch - should fetch nothing as there are not new incidents who match the query """ client = Client( - server_url='', sc_server_url='', cr_server_url='', username='', password='', verify=False, - fetch_time='12 hours', sysparm_query='stateNOT IN6,7^assignment_group=123', sysparm_limit=10, - timestamp_field='opened_at', ticket_type='incident', get_attachments=False, incident_name='number', - look_back=0 + server_url="", + sc_server_url="", + cr_server_url="", + username="", + password="", + verify=False, + fetch_time="12 hours", + sysparm_query="stateNOT IN6,7^assignment_group=123", + sysparm_limit=10, + timestamp_field="opened_at", + ticket_type="incident", + get_attachments=False, + incident_name="number", + look_back=0, ) # reset last fetch and tickets self.LAST_RUN = {} - mocker.patch.object(demisto, 'getLastRun', return_value=self.LAST_RUN) - mocker.patch.object(demisto, 'setLastRun', side_effect=self.set_last_run) - mocker.patch.object(client, 'send_request', return_value=incidents) + mocker.patch.object(demisto, "getLastRun", return_value=self.LAST_RUN) + mocker.patch.object(demisto, "setLastRun", side_effect=self.set_last_run) + mocker.patch.object(client, "send_request", return_value=incidents) # first fetch tickets = fetch_incidents(client=client) assert len(tickets) == 3 - for expected_incident_id, ticket in zip(['1', '2', '3'], tickets): - assert ticket.get('name') == f'ServiceNow Incident {expected_incident_id}' + for expected_incident_id, ticket in zip(["1", "2", "3"], tickets): + assert ticket.get("name") == f"ServiceNow Incident {expected_incident_id}" # second fetch preparation incidents = phase2_incident - mocker.patch.object(client, 'send_request', return_value=incidents) + mocker.patch.object(client, "send_request", return_value=incidents) # second fetch tickets = fetch_incidents(client=client) assert len(tickets) == 1 - assert tickets[0].get('name') == 'ServiceNow Incident 4' + assert tickets[0].get("name") == "ServiceNow Incident 4" # third fetch preparation incidents = phase3_incident - mocker.patch.object(client, 'send_request', return_value=incidents) + mocker.patch.object(client, "send_request", return_value=incidents) # third fetch tickets = fetch_incidents(client=client) assert len(tickets) == 1 - assert tickets[0].get('name') == 'ServiceNow Incident 5' + assert tickets[0].get("name") == "ServiceNow Incident 5" # forth fetch preparation - incidents = {'result': []} - mocker.patch.object(client, 'send_request', return_value=incidents) + incidents = {"result": []} + mocker.patch.object(client, "send_request", return_value=incidents) # forth fetch tickets = fetch_incidents(client=client) @@ -1093,43 +1441,36 @@ def test_incident_name_is_initialized(mocker, requests_mock): Then: - Verify expected exception is raised as default incident name value is not in response """ - url = 'https://test.service-now.com' + url = "https://test.service-now.com" mocker.patch.object( demisto, - 'params', + "params", return_value={ - 'isFetch': True, - 'url': url, - 'credentials': { - 'identifier': 'identifier', - 'password': 'password', + "isFetch": True, + "url": url, + "credentials": { + "identifier": "identifier", + "password": "password", }, - 'incident_name': None, - 'file_tag_from_service_now': 'FromServiceNow', - 'file_tag_to_service_now': 'ToServiceNow', - 'comment_tag': 'comments', - 'comment_tag_from_servicenow': 'CommentFromServiceNow', - 'work_notes_tag': 'work_notes', - 'work_notes_tag_from_servicenow': 'WorkNoteFromServiceNow' - } + "incident_name": None, + "file_tag_from_service_now": "FromServiceNow", + "file_tag_to_service_now": "ToServiceNow", + "comment_tag": "comments", + "comment_tag_from_servicenow": "CommentFromServiceNow", + "work_notes_tag": "work_notes", + "work_notes_tag_from_servicenow": "WorkNoteFromServiceNow", + }, ) - mocker.patch.object(demisto, 'command', return_value='test-module') + mocker.patch.object(demisto, "command", return_value="test-module") def return_error_mock(message, error): raise - mocker.patch('ServiceNowv2.return_error', side_effect=return_error_mock) - requests_mock.get( - f'{url}/api/now/table/incident?sysparm_limit=1', - json={ - 'result': [{ - 'opened_at': 'sometime' - }] - } - ) + mocker.patch("ServiceNowv2.return_error", side_effect=return_error_mock) + requests_mock.get(f"{url}/api/now/table/incident?sysparm_limit=1", json={"result": [{"opened_at": "sometime"}]}) with pytest.raises(ValueError) as e: main() - assert str(e.value) == 'The field [number] does not exist in the ticket.' + assert str(e.value) == "The field [number] does not exist in the ticket." def test_file_tags_names_are_the_same_main_flow(mocker): @@ -1144,17 +1485,12 @@ def test_file_tags_names_are_the_same_main_flow(mocker): - make sure an exception is raised """ import ServiceNowv2 - mocker.patch.object( - demisto, - 'params', - return_value={'file_tag_from_service_now': 'ServiceNow', 'file_tag': 'ServiceNow'} - ) - mocker.patch.object(ServiceNowv2, 'get_server_url', return_value='test') + + mocker.patch.object(demisto, "params", return_value={"file_tag_from_service_now": "ServiceNow", "file_tag": "ServiceNow"}) + mocker.patch.object(ServiceNowv2, "get_server_url", return_value="test") with pytest.raises( - Exception, - match=re.escape( - 'File Entry Tag To ServiceNow and File Entry Tag From ServiceNow cannot be the same name [ServiceNow].' - ) + Exception, + match=re.escape("File Entry Tag To ServiceNow and File Entry Tag From ServiceNow cannot be the same name [ServiceNow]."), ): main() @@ -1171,35 +1507,49 @@ def test_not_authenticated_retry_positive(requests_mock, mocker): - Verify debug messages - Ensure the send_request function runs successfully without exceptions """ - mocker.patch.object(demisto, 'debug') - client = Client('http://server_url', 'sc_server_url', 'cr_server_url', 'username', 'password', - 'verify', 'fetch_time', 'sysparm_query', 'sysparm_limit', 'timestamp_field', - 'ticket_type', 'get_attachments', 'incident_name') - requests_mock.get('http://server_url', [ - { - 'status_code': 401, - 'json': { - 'error': {'message': 'User Not Authenticated', 'detail': 'Required to provide Auth information'}, - 'status': 'failure' - } - }, - { - 'status_code': 401, - 'json': { - 'error': {'message': 'User Not Authenticated', 'detail': 'Required to provide Auth information'}, - 'status': 'failure' - } - }, - { - 'status_code': 200, - 'json': {} - } - ]) - assert client.send_request('') == {} + mocker.patch.object(demisto, "debug") + client = Client( + "http://server_url", + "sc_server_url", + "cr_server_url", + "username", + "password", + "verify", + "fetch_time", + "sysparm_query", + "sysparm_limit", + "timestamp_field", + "ticket_type", + "get_attachments", + "incident_name", + ) + requests_mock.get( + "http://server_url", + [ + { + "status_code": 401, + "json": { + "error": {"message": "User Not Authenticated", "detail": "Required to provide Auth information"}, + "status": "failure", + }, + }, + { + "status_code": 401, + "json": { + "error": {"message": "User Not Authenticated", "detail": "Required to provide Auth information"}, + "status": "failure", + }, + }, + {"status_code": 200, "json": {}}, + ], + ) + assert client.send_request("") == {} assert demisto.debug.call_count == 2 debug = demisto.debug.call_args_list - expected_debug_msg = "Got status code 401 - {'error': {'message': 'User Not Authenticated', " \ - "'detail': 'Required to provide Auth information'}, 'status': 'failure'}. Retrying ..." + expected_debug_msg = ( + "Got status code 401 - {'error': {'message': 'User Not Authenticated', " + "'detail': 'Required to provide Auth information'}, 'status': 'failure'}. Retrying ..." + ) assert debug[0][0][0] == expected_debug_msg assert debug[1][0][0] == expected_debug_msg @@ -1216,42 +1566,61 @@ def test_not_authenticated_retry_negative(requests_mock, mocker): - Verify debug messages - Ensure the send_request function fails and raises expected error message """ - mocker.patch.object(demisto, 'debug') - client = Client('http://server_url', 'sc_server_url', 'cr_server_url', 'username', 'password', - 'verify', 'fetch_time', 'sysparm_query', 'sysparm_limit', 'timestamp_field', - 'ticket_type', 'get_attachments', 'incident_name') - requests_mock.get('http://server_url', [ - { - 'status_code': 401, - 'json': { - 'error': {'message': 'User Not Authenticated', 'detail': 'Required to provide Auth information'}, - 'status': 'failure' - } - }, - { - 'status_code': 401, - 'json': { - 'error': {'message': 'User Not Authenticated', 'detail': 'Required to provide Auth information'}, - 'status': 'failure' - } - }, - { - 'status_code': 401, - 'json': { - 'error': {'message': 'User Not Authenticated', 'detail': 'Required to provide Auth information'}, - 'status': 'failure' - } - } - ]) + mocker.patch.object(demisto, "debug") + client = Client( + "http://server_url", + "sc_server_url", + "cr_server_url", + "username", + "password", + "verify", + "fetch_time", + "sysparm_query", + "sysparm_limit", + "timestamp_field", + "ticket_type", + "get_attachments", + "incident_name", + ) + requests_mock.get( + "http://server_url", + [ + { + "status_code": 401, + "json": { + "error": {"message": "User Not Authenticated", "detail": "Required to provide Auth information"}, + "status": "failure", + }, + }, + { + "status_code": 401, + "json": { + "error": {"message": "User Not Authenticated", "detail": "Required to provide Auth information"}, + "status": "failure", + }, + }, + { + "status_code": 401, + "json": { + "error": {"message": "User Not Authenticated", "detail": "Required to provide Auth information"}, + "status": "failure", + }, + }, + ], + ) with pytest.raises(Exception) as ex: - client.send_request('') - assert str(ex.value) == "Got status code 401 with url http://server_url with body b'{\"error\": {\"message\": " \ - "\"User Not Authenticated\", \"detail\": \"Required to provide Auth information\"}, " \ - "\"status\": \"failure\"}' with headers {}" + client.send_request("") + assert ( + str(ex.value) == 'Got status code 401 with url http://server_url with body b\'{"error": {"message": ' + '"User Not Authenticated", "detail": "Required to provide Auth information"}, ' + '"status": "failure"}\' with headers {}' + ) assert demisto.debug.call_count == 3 debug = demisto.debug.call_args_list - expected_debug_msg = "Got status code 401 - {'error': {'message': 'User Not Authenticated', " \ - "'detail': 'Required to provide Auth information'}, 'status': 'failure'}. Retrying ..." + expected_debug_msg = ( + "Got status code 401 - {'error': {'message': 'User Not Authenticated', " + "'detail': 'Required to provide Auth information'}, 'status': 'failure'}. Retrying ..." + ) assert debug[0][0][0] == expected_debug_msg assert debug[1][0][0] == expected_debug_msg assert debug[2][0][0] == expected_debug_msg @@ -1269,36 +1638,27 @@ def test_oauth_authentication(mocker, requests_mock): - Verify that oauth authorization flow is used by checking that the get_access_token is called. """ from unittest.mock import MagicMock - url = 'https://test.service-now.com' - mocker.patch.object(demisto, 'command', return_value='servicenow-oauth-test') - mocker.patch.object(ServiceNowClient, 'get_access_token') - requests_mock.get( - f'{url}/api/now/table/incident?sysparm_limit=1', - json={ - 'result': [{ - 'opened_at': 'sometime' - }] - } - ) + + url = "https://test.service-now.com" + mocker.patch.object(demisto, "command", return_value="servicenow-oauth-test") + mocker.patch.object(ServiceNowClient, "get_access_token") + requests_mock.get(f"{url}/api/now/table/incident?sysparm_limit=1", json={"result": [{"opened_at": "sometime"}]}) # Assert that get_access_token is called when `Use OAuth Login` checkbox is selected: mocker.patch.object( demisto, - 'params', + "params", return_value={ - 'url': url, - 'credentials': { - 'identifier': 'client_id', - 'password': 'client_secret' - }, - 'use_oauth': True, - 'file_tag_from_service_now': 'FromServiceNow', - 'file_tag': 'ForServiceNow', - 'comment_tag': 'comments', - 'comment_tag_from_servicenow': 'CommentFromServiceNow', - 'work_notes_tag': 'work_notes', - 'work_notes_tag_from_servicenow': 'WorkNoteFromServiceNow' - } + "url": url, + "credentials": {"identifier": "client_id", "password": "client_secret"}, + "use_oauth": True, + "file_tag_from_service_now": "FromServiceNow", + "file_tag": "ForServiceNow", + "comment_tag": "comments", + "comment_tag_from_servicenow": "CommentFromServiceNow", + "work_notes_tag": "work_notes", + "work_notes_tag_from_servicenow": "WorkNoteFromServiceNow", + }, ) ServiceNowClient.get_access_token = MagicMock() main() @@ -1323,21 +1683,46 @@ def test_test_module(mocker): (b) Validate that an error is returned, indicating that the `Test` button can't be used when using OAuth 2.0. """ - mocker.patch('ServiceNowv2.parse_date_range', return_value=("2019-02-23 08:14:21", 'never mind')) - client = Client('server_url', 'sc_server_url', 'cr_server_url', 'username', 'password', 'verify', 'fetch_time', - 'sysparm_query', sysparm_limit=10, timestamp_field='opened_at', - ticket_type='incident', get_attachments=False, incident_name='description') - mocker.patch.object(client, 'send_request', return_value=RESPONSE_FETCH) + mocker.patch("ServiceNowv2.parse_date_range", return_value=("2019-02-23 08:14:21", "never mind")) + client = Client( + "server_url", + "sc_server_url", + "cr_server_url", + "username", + "password", + "verify", + "fetch_time", + "sysparm_query", + sysparm_limit=10, + timestamp_field="opened_at", + ticket_type="incident", + get_attachments=False, + incident_name="description", + ) + mocker.patch.object(client, "send_request", return_value=RESPONSE_FETCH) result = module(client) - assert result[0] == 'ok' - - client = Client('server_url', 'sc_server_url', 'cr_server_url', 'username', 'password', 'verify', 'fetch_time', - 'sysparm_query', sysparm_limit=10, timestamp_field='opened_at', ticket_type='incident', - get_attachments=False, incident_name='description', oauth_params=OAUTH_PARAMS) + assert result[0] == "ok" + + client = Client( + "server_url", + "sc_server_url", + "cr_server_url", + "username", + "password", + "verify", + "fetch_time", + "sysparm_query", + sysparm_limit=10, + timestamp_field="opened_at", + ticket_type="incident", + get_attachments=False, + incident_name="description", + oauth_params=OAUTH_PARAMS, + ) with pytest.raises(Exception) as e: module(client) - assert 'Test button cannot be used when using OAuth 2.0' in str(e) + assert "Test button cannot be used when using OAuth 2.0" in str(e) def test_oauth_test_module(mocker): @@ -1354,20 +1739,45 @@ def test_oauth_test_module(mocker): - (a) validate that an error is returned, indicating that the function should be called when using OAuth only. - (b) Validate that the instance was configured successfully. """ - mocker.patch('ServiceNowv2.parse_date_range', return_value=("2019-02-23 08:14:21", 'never mind')) - client = Client('server_url', 'sc_server_url', 'cr_server_url', 'username', 'password', 'verify', 'fetch_time', - 'sysparm_query', sysparm_limit=10, timestamp_field='opened_at', ticket_type='incident', - get_attachments=False, incident_name='description') + mocker.patch("ServiceNowv2.parse_date_range", return_value=("2019-02-23 08:14:21", "never mind")) + client = Client( + "server_url", + "sc_server_url", + "cr_server_url", + "username", + "password", + "verify", + "fetch_time", + "sysparm_query", + sysparm_limit=10, + timestamp_field="opened_at", + ticket_type="incident", + get_attachments=False, + incident_name="description", + ) with pytest.raises(Exception) as e: oauth_test_module(client) - assert 'command should be used only when using OAuth 2.0 authorization.' in str(e) - - client = Client('server_url', 'sc_server_url', 'cr_server_url', 'username', 'password', 'verify', 'fetch_time', - 'sysparm_query', sysparm_limit=10, timestamp_field='opened_at', ticket_type='incident', - get_attachments=False, incident_name='description', oauth_params=OAUTH_PARAMS) - mocker.patch.object(client, 'send_request', return_value=RESPONSE_FETCH) + assert "command should be used only when using OAuth 2.0 authorization." in str(e) + + client = Client( + "server_url", + "sc_server_url", + "cr_server_url", + "username", + "password", + "verify", + "fetch_time", + "sysparm_query", + sysparm_limit=10, + timestamp_field="opened_at", + ticket_type="incident", + get_attachments=False, + incident_name="description", + oauth_params=OAUTH_PARAMS, + ) + mocker.patch.object(client, "send_request", return_value=RESPONSE_FETCH) result = oauth_test_module(client) - assert '### Instance Configured Successfully.' in result[0] + assert "### Instance Configured Successfully." in result[0] def test_oauth_login_command(mocker): @@ -1383,20 +1793,45 @@ def test_oauth_login_command(mocker): - (a) validate that an error is returned, indicating that the function should be called when using OAuth only. - (b) Validate that the login was successful. """ - mocker.patch('ServiceNowv2.ServiceNowClient.login') - client = Client('server_url', 'sc_server_url', 'cr_server_url', 'username', 'password', 'verify', 'fetch_time', - 'sysparm_query', sysparm_limit=10, timestamp_field='opened_at', ticket_type='incident', - get_attachments=False, incident_name='description') + mocker.patch("ServiceNowv2.ServiceNowClient.login") + client = Client( + "server_url", + "sc_server_url", + "cr_server_url", + "username", + "password", + "verify", + "fetch_time", + "sysparm_query", + sysparm_limit=10, + timestamp_field="opened_at", + ticket_type="incident", + get_attachments=False, + incident_name="description", + ) with pytest.raises(Exception) as e: - login_command(client, args={'username': 'username', 'password': 'password'}) - assert '!servicenow-oauth-login command can be used only when using OAuth 2.0 authorization' in str(e) - - client = Client('server_url', 'sc_server_url', 'cr_server_url', 'username', 'password', 'verify', 'fetch_time', - 'sysparm_query', sysparm_limit=10, timestamp_field='opened_at', ticket_type='incident', - get_attachments=False, incident_name='description', oauth_params=OAUTH_PARAMS) - mocker.patch.object(client, 'send_request', return_value=RESPONSE_FETCH) - result = login_command(client, args={'username': 'username', 'password': 'password'}) - assert '### Logged in successfully.' in result[0] + login_command(client, args={"username": "username", "password": "password"}) + assert "!servicenow-oauth-login command can be used only when using OAuth 2.0 authorization" in str(e) + + client = Client( + "server_url", + "sc_server_url", + "cr_server_url", + "username", + "password", + "verify", + "fetch_time", + "sysparm_query", + sysparm_limit=10, + timestamp_field="opened_at", + ticket_type="incident", + get_attachments=False, + incident_name="description", + oauth_params=OAUTH_PARAMS, + ) + mocker.patch.object(client, "send_request", return_value=RESPONSE_FETCH) + result = login_command(client, args={"username": "username", "password": "password"}) + assert "### Logged in successfully." in result[0] def test_sysparm_input_display_value(mocker, requests_mock): @@ -1412,30 +1847,49 @@ def test_sysparm_input_display_value(mocker, requests_mock): Validate that the sysparm_input_display_value parameter has the correct value """ - client = Client(server_url='https://server_url.com/', sc_server_url='sc_server_url', cr_server_url='cr_server_url', - username='username', password='password', verify=False, fetch_time='fetch_time', - sysparm_query='sysparm_query', sysparm_limit=10, timestamp_field='opened_at', - ticket_type='incident', get_attachments=False, incident_name='description') + client = Client( + server_url="https://server_url.com/", + sc_server_url="sc_server_url", + cr_server_url="cr_server_url", + username="username", + password="password", + verify=False, + fetch_time="fetch_time", + sysparm_query="sysparm_query", + sysparm_limit=10, + timestamp_field="opened_at", + ticket_type="incident", + get_attachments=False, + incident_name="description", + ) - mocker.patch.object(demisto, 'args', return_value={'input_display_value': 'true', - 'table_name': "alm_asset", - 'fields': "asset_tag=P4325434;display_name=my_test_record" - } - ) - requests_mock.post('https://server_url.com/table/alm_asset?sysparm_input_display_value=True', json={}) + mocker.patch.object( + demisto, + "args", + return_value={ + "input_display_value": "true", + "table_name": "alm_asset", + "fields": "asset_tag=P4325434;display_name=my_test_record", + }, + ) + requests_mock.post("https://server_url.com/table/alm_asset?sysparm_input_display_value=True", json={}) # will raise a requests_mock.exceptions.NoMockAddress if the url address will not be as given in the requests_mock create_record_command(client, demisto.args()) - assert requests_mock.request_history[0].method == 'POST' - - mocker.patch.object(demisto, 'args', return_value={'input_display_value': 'false', - 'table_name': "alm_asset", - 'fields': "asset_tag=P4325434;display_name=my_test_record" - } - ) - requests_mock.post('https://server_url.com/table/alm_asset?sysparm_input_display_value=False', json={}) + assert requests_mock.request_history[0].method == "POST" + + mocker.patch.object( + demisto, + "args", + return_value={ + "input_display_value": "false", + "table_name": "alm_asset", + "fields": "asset_tag=P4325434;display_name=my_test_record", + }, + ) + requests_mock.post("https://server_url.com/table/alm_asset?sysparm_input_display_value=False", json={}) # will raise a requests_mock.exceptions.NoMockAddress if the url address will not be as given in the requests_mock create_record_command(client, demisto.args()) - assert requests_mock.request_history[1].method == 'POST' + assert requests_mock.request_history[1].method == "POST" def test_get_mapping_fields(): @@ -1448,10 +1902,21 @@ def test_get_mapping_fields(): Then - the result fits the expected mapping. """ - client = Client(server_url='https://server_url.com/', sc_server_url='sc_server_url', cr_server_url='cr_server_url', - username='username', password='password', verify=False, fetch_time='fetch_time', - sysparm_query='sysparm_query', sysparm_limit=10, timestamp_field='opened_at', - ticket_type='incident', get_attachments=False, incident_name='description') + client = Client( + server_url="https://server_url.com/", + sc_server_url="sc_server_url", + cr_server_url="cr_server_url", + username="username", + password="password", + verify=False, + fetch_time="fetch_time", + sysparm_query="sysparm_query", + sysparm_limit=10, + timestamp_field="opened_at", + ticket_type="incident", + get_attachments=False, + incident_name="description", + ) res = get_mapping_fields_command(client) assert res.extract_mapping() == EXPECTED_MAPPING @@ -1468,24 +1933,34 @@ def test_get_remote_data(mocker): - The ticket was updated with the entries. """ - client = Client(server_url='https://server_url.com/', sc_server_url='sc_server_url', - cr_server_url='cr_server_url', username='username', - password='password', verify=False, fetch_time='fetch_time', - sysparm_query='sysparm_query', sysparm_limit=10, timestamp_field='opened_at', - ticket_type='incident', get_attachments=False, incident_name='description') + client = Client( + server_url="https://server_url.com/", + sc_server_url="sc_server_url", + cr_server_url="cr_server_url", + username="username", + password="password", + verify=False, + fetch_time="fetch_time", + sysparm_query="sysparm_query", + sysparm_limit=10, + timestamp_field="opened_at", + ticket_type="incident", + get_attachments=False, + incident_name="description", + ) - args = {'id': 'sys_id', 'lastUpdate': 0} + args = {"id": "sys_id", "lastUpdate": 0} params = {"file_tag_from_service_now": "FromServiceNow"} - mocker.patch.object(client, 'get', return_value=RESPONSE_TICKET_MIRROR) - mocker.patch.object(client, 'get_ticket_attachment_entries', return_value=RESPONSE_MIRROR_FILE_ENTRY) - mocker.patch.object(client, 'query', return_value=MIRROR_COMMENTS_RESPONSE) - mocker.patch.object(client, 'get', return_value=RESPONSE_ASSIGNMENT_GROUP) + mocker.patch.object(client, "get", return_value=RESPONSE_TICKET_MIRROR) + mocker.patch.object(client, "get_ticket_attachment_entries", return_value=RESPONSE_MIRROR_FILE_ENTRY) + mocker.patch.object(client, "query", return_value=MIRROR_COMMENTS_RESPONSE) + mocker.patch.object(client, "get", return_value=RESPONSE_ASSIGNMENT_GROUP) res = get_remote_data_command(client, args, params) - assert res[1]['Tags'] == ['FromServiceNow'] - assert res[1]['File'] == 'test.txt' - assert res[2]['Contents'] == 'Type: comments\nCreated By: admin\nCreated On: 2020-08-17 06:31:49\nThis is a comment' + assert res[1]["Tags"] == ["FromServiceNow"] + assert res[1]["File"] == "test.txt" + assert res[2]["Contents"] == "Type: comments\nCreated By: admin\nCreated On: 2020-08-17 06:31:49\nThis is a comment" def test_get_remote_data_last_fetched_incidents_entries(mocker): @@ -1498,23 +1973,33 @@ def test_get_remote_data_last_fetched_incidents_entries(mocker): Then - The ticket was updated with the entries even the lastUpdate is higher than modification time. """ - client = Client(server_url='https://server_url.com/', sc_server_url='sc_server_url', - cr_server_url='cr_server_url', username='username', - password='password', verify=False, fetch_time='fetch_time', - sysparm_query='sysparm_query', sysparm_limit=10, timestamp_field='opened_at', - ticket_type='incident', get_attachments=False, incident_name='description') + client = Client( + server_url="https://server_url.com/", + sc_server_url="sc_server_url", + cr_server_url="cr_server_url", + username="username", + password="password", + verify=False, + fetch_time="fetch_time", + sysparm_query="sysparm_query", + sysparm_limit=10, + timestamp_field="opened_at", + ticket_type="incident", + get_attachments=False, + incident_name="description", + ) - args = {'id': 'sys_id', 'lastUpdate': 9999999999} + args = {"id": "sys_id", "lastUpdate": 9999999999} params = {"file_tag_from_service_now": "FromServiceNow"} demisto.setIntegrationContext({"last_fetched_incident_ids": ["sys_id"]}) - mocker.patch.object(client, 'get', side_effect=[RESPONSE_TICKET_MIRROR, RESPONSE_ASSIGNMENT_GROUP]) - mocker.patch.object(client, 'get_ticket_attachment_entries', return_value=[]) - client_query_mocker = mocker.patch.object(client, 'query', return_value=MIRROR_COMMENTS_RESPONSE) + mocker.patch.object(client, "get", side_effect=[RESPONSE_TICKET_MIRROR, RESPONSE_ASSIGNMENT_GROUP]) + mocker.patch.object(client, "get_ticket_attachment_entries", return_value=[]) + client_query_mocker = mocker.patch.object(client, "query", return_value=MIRROR_COMMENTS_RESPONSE) res = get_remote_data_command(client, args, params) - assert 'sys_created_on' not in client_query_mocker.call_args[0][3] - assert res[1]['Contents'] == 'Type: comments\nCreated By: admin\nCreated On: 2020-08-17 06:31:49\nThis is a comment' + assert "sys_created_on" not in client_query_mocker.call_args[0][3] + assert res[1]["Contents"] == "Type: comments\nCreated By: admin\nCreated On: 2020-08-17 06:31:49\nThis is a comment" assert not demisto.getIntegrationContext()["last_fetched_incident_ids"] @@ -1528,23 +2013,33 @@ def test_get_remote_data_no_last_fetched_incidents(mocker): Then - The ticket is not updated with the entries. """ - client = Client(server_url='https://server_url.com/', sc_server_url='sc_server_url', - cr_server_url='cr_server_url', username='username', - password='password', verify=False, fetch_time='fetch_time', - sysparm_query='sysparm_query', sysparm_limit=10, timestamp_field='opened_at', - ticket_type='incident', get_attachments=False, incident_name='description') + client = Client( + server_url="https://server_url.com/", + sc_server_url="sc_server_url", + cr_server_url="cr_server_url", + username="username", + password="password", + verify=False, + fetch_time="fetch_time", + sysparm_query="sysparm_query", + sysparm_limit=10, + timestamp_field="opened_at", + ticket_type="incident", + get_attachments=False, + incident_name="description", + ) - args = {'id': 'sys_id', 'lastUpdate': 9999999999} + args = {"id": "sys_id", "lastUpdate": 9999999999} params = {"file_tag_from_service_now": "FromServiceNow"} demisto.setIntegrationContext({"last_fetched_incident_ids": []}) - mocker.patch.object(demisto, 'params', return_value={"isFetch": True}) - mocker.patch.object(client, 'get', side_effect=[RESPONSE_TICKET_MIRROR, RESPONSE_ASSIGNMENT_GROUP]) - mocker.patch.object(client, 'get_ticket_attachment_entries', return_value=[]) - client_query_mocker = mocker.patch.object(client, 'query', return_value={'result': []}) + mocker.patch.object(demisto, "params", return_value={"isFetch": True}) + mocker.patch.object(client, "get", side_effect=[RESPONSE_TICKET_MIRROR, RESPONSE_ASSIGNMENT_GROUP]) + mocker.patch.object(client, "get_ticket_attachment_entries", return_value=[]) + client_query_mocker = mocker.patch.object(client, "query", return_value={"result": []}) res = get_remote_data_command(client, args, params) - assert 'sys_created_on' in client_query_mocker.call_args[0][3] + assert "sys_created_on" in client_query_mocker.call_args[0][3] assert len(res) == 1 assert not res[0] @@ -1560,24 +2055,35 @@ def test_get_remote_data_last_fetched_incidents_use_display_value(mocker): Then - The ticket was updated with the entries even the lastUpdate is higher than modification time. """ - client = Client(server_url='https://server_url.com/', sc_server_url='sc_server_url', - cr_server_url='cr_server_url', username='username', - password='password', verify=False, fetch_time='fetch_time', - sysparm_query='sysparm_query', sysparm_limit=10, timestamp_field='opened_at', - ticket_type='incident', get_attachments=False, incident_name='description', - use_display_value=True, display_date_format='yyyy-MM-dd') + client = Client( + server_url="https://server_url.com/", + sc_server_url="sc_server_url", + cr_server_url="cr_server_url", + username="username", + password="password", + verify=False, + fetch_time="fetch_time", + sysparm_query="sysparm_query", + sysparm_limit=10, + timestamp_field="opened_at", + ticket_type="incident", + get_attachments=False, + incident_name="description", + use_display_value=True, + display_date_format="yyyy-MM-dd", + ) - args = {'id': 'sys_id', 'lastUpdate': 9999999999} + args = {"id": "sys_id", "lastUpdate": 9999999999} params = {"file_tag_from_service_now": "FromServiceNow"} demisto.setIntegrationContext({"last_fetched_incident_ids": ["sys_id"]}) - mocker.patch.object(client, 'get', side_effect=[RESPONSE_QUERY_TABLE_SYS_PARAMS, RESPONSE_ASSIGNMENT_GROUP]) - mocker.patch.object(client, 'get_ticket_attachment_entries', return_value=[]) - client_query_mocker = mocker.patch.object(ServiceNowv2, 'convert_to_notes_result', return_value=MIRROR_COMMENTS_RESPONSE) + mocker.patch.object(client, "get", side_effect=[RESPONSE_QUERY_TABLE_SYS_PARAMS, RESPONSE_ASSIGNMENT_GROUP]) + mocker.patch.object(client, "get_ticket_attachment_entries", return_value=[]) + client_query_mocker = mocker.patch.object(ServiceNowv2, "convert_to_notes_result", return_value=MIRROR_COMMENTS_RESPONSE) res = get_remote_data_command(client, args, params) - assert 'filter' not in client_query_mocker.call_args[0][1] - assert res[1]['Contents'] == 'Type: comments\nCreated By: admin\nCreated On: 2020-08-17 06:31:49\nThis is a comment' + assert "filter" not in client_query_mocker.call_args[0][1] + assert res[1]["Contents"] == "Type: comments\nCreated By: admin\nCreated On: 2020-08-17 06:31:49\nThis is a comment" assert not demisto.getIntegrationContext()["last_fetched_incident_ids"] @@ -1594,12 +2100,11 @@ def test_assigned_to_field_no_user(): class Client: def get(self, table, value, no_record_found_res): - return {'results': {}} + return {"results": {}} - assigned_to = {'link': 'https://test.service-now.com/api/now/table/sys_user/oscar@example.com', - 'value': 'oscar@example.com'} + assigned_to = {"link": "https://test.service-now.com/api/now/table/sys_user/oscar@example.com", "value": "oscar@example.com"} res = check_assigned_to_field(Client(), assigned_to) - assert res == '' + assert res == "" def test_assigned_to_field_user_exists(): @@ -1617,25 +2122,28 @@ class Client: def get(self, table, value, no_record_found_res): return USER_RESPONSE - assigned_to = {'link': 'https://test.service-now.com/api/now/table/sys_user/oscar@example.com', - 'value': 'oscar@example.com'} + assigned_to = {"link": "https://test.service-now.com/api/now/table/sys_user/oscar@example.com", "value": "oscar@example.com"} res = check_assigned_to_field(Client(), assigned_to) - assert res == 'oscar@example.com' + assert res == "oscar@example.com" -CLOSING_RESPONSE = {'dbotIncidentClose': True, 'closeNotes': 'Test', 'closeReason': 'Resolved'} -CLOSING_RESPONSE_CUSTOM = {'dbotIncidentClose': True, 'closeNotes': 'Test', 'closeReason': 'Test'} +CLOSING_RESPONSE = {"dbotIncidentClose": True, "closeNotes": "Test", "closeReason": "Resolved"} +CLOSING_RESPONSE_CUSTOM = {"dbotIncidentClose": True, "closeNotes": "Test", "closeReason": "Test"} -closed_ticket_state = (RESPONSE_CLOSING_TICKET_MIRROR_CLOSED, { - 'close_incident': 'closed'}, 'closed_at', CLOSING_RESPONSE) -resolved_ticket_state = (RESPONSE_CLOSING_TICKET_MIRROR_RESOLVED, { - 'close_incident': 'resolved'}, 'resolved_at', CLOSING_RESPONSE) -custom_ticket_state = (RESPONSE_CLOSING_TICKET_MIRROR_CUSTOM, - {'close_incident': 'closed', 'server_close_custom_state': '9=Test'}, '', CLOSING_RESPONSE_CUSTOM) +closed_ticket_state = (RESPONSE_CLOSING_TICKET_MIRROR_CLOSED, {"close_incident": "closed"}, "closed_at", CLOSING_RESPONSE) +resolved_ticket_state = (RESPONSE_CLOSING_TICKET_MIRROR_RESOLVED, {"close_incident": "resolved"}, "resolved_at", CLOSING_RESPONSE) +custom_ticket_state = ( + RESPONSE_CLOSING_TICKET_MIRROR_CUSTOM, + {"close_incident": "closed", "server_close_custom_state": "9=Test"}, + "", + CLOSING_RESPONSE_CUSTOM, +) -@pytest.mark.parametrize('response_closing_ticket_mirror, parameters, time, closing_response', - [closed_ticket_state, resolved_ticket_state, custom_ticket_state]) +@pytest.mark.parametrize( + "response_closing_ticket_mirror, parameters, time, closing_response", + [closed_ticket_state, resolved_ticket_state, custom_ticket_state], +) def test_get_remote_data_closing_incident(mocker, response_closing_ticket_mirror, parameters, time, closing_response): """ Given: @@ -1651,22 +2159,32 @@ def test_get_remote_data_closing_incident(mocker, response_closing_ticket_mirror - Closed notes exists. """ - client = Client(server_url='https://server_url.com/', sc_server_url='sc_server_url', - cr_server_url="cr_server_url", username='username', - password='password', verify=False, fetch_time='fetch_time', - sysparm_query='sysparm_query', sysparm_limit=10, timestamp_field='opened_at', - ticket_type='sc_task', get_attachments=False, incident_name='description') + client = Client( + server_url="https://server_url.com/", + sc_server_url="sc_server_url", + cr_server_url="cr_server_url", + username="username", + password="password", + verify=False, + fetch_time="fetch_time", + sysparm_query="sysparm_query", + sysparm_limit=10, + timestamp_field="opened_at", + ticket_type="sc_task", + get_attachments=False, + incident_name="description", + ) - args = {'id': 'sys_id', 'lastUpdate': 0} + args = {"id": "sys_id", "lastUpdate": 0} params = parameters - mocker.patch.object(client, 'get', return_value=response_closing_ticket_mirror) - mocker.patch.object(client, 'get_ticket_attachment_entries', return_value=[]) - mocker.patch.object(client, 'query', return_value=MIRROR_COMMENTS_RESPONSE) + mocker.patch.object(client, "get", return_value=response_closing_ticket_mirror) + mocker.patch.object(client, "get_ticket_attachment_entries", return_value=[]) + mocker.patch.object(client, "query", return_value=MIRROR_COMMENTS_RESPONSE) res = get_remote_data_command(client, args, params) if time: assert time in res[0] - assert closing_response == res[2]['Contents'] + assert closing_response == res[2]["Contents"] def test_get_remote_data_closing_incident_with_different_closing_state(mocker): @@ -1683,21 +2201,31 @@ def test_get_remote_data_closing_incident_with_different_closing_state(mocker): - Validate that the incident does not get closed """ - client = Client(server_url='https://server_url.com/', sc_server_url='sc_server_url', - cr_server_url="cr_server_url", username='username', - password='password', verify=False, fetch_time='fetch_time', - sysparm_query='sysparm_query', sysparm_limit=10, timestamp_field='opened_at', - ticket_type='sc_task', get_attachments=False, incident_name='description') + client = Client( + server_url="https://server_url.com/", + sc_server_url="sc_server_url", + cr_server_url="cr_server_url", + username="username", + password="password", + verify=False, + fetch_time="fetch_time", + sysparm_query="sysparm_query", + sysparm_limit=10, + timestamp_field="opened_at", + ticket_type="sc_task", + get_attachments=False, + incident_name="description", + ) - args = {'id': 'sys_id', 'lastUpdate': 0} - params = {'close_incident': 'closed', 'server_close_custom_state': '6=Design'} - mocker.patch.object(client, 'get', return_value=RESPONSE_CLOSING_TICKET_MIRROR_CUSTOM) - mocker.patch.object(client, 'get_ticket_attachment_entries', return_value=[]) - mocker.patch.object(client, 'query', return_value=MIRROR_COMMENTS_RESPONSE) + args = {"id": "sys_id", "lastUpdate": 0} + params = {"close_incident": "closed", "server_close_custom_state": "6=Design"} + mocker.patch.object(client, "get", return_value=RESPONSE_CLOSING_TICKET_MIRROR_CUSTOM) + mocker.patch.object(client, "get_ticket_attachment_entries", return_value=[]) + mocker.patch.object(client, "query", return_value=MIRROR_COMMENTS_RESPONSE) res = get_remote_data_command(client, args, params) assert len(res) == 2 # This means that the entry is of type Note, which does not indicate the closing of the incident - assert res[1].get('Note', False) is True + assert res[1].get("Note", False) is True def test_get_remote_data_no_attachment(mocker): @@ -1712,22 +2240,32 @@ def test_get_remote_data_no_attachment(mocker): - The ticket was updated with no attachment. """ - client = Client(server_url='https://server_url.com/', sc_server_url='sc_server_url', - cr_server_url="cr_server_url", username='username', - password='password', verify=False, fetch_time='fetch_time', - sysparm_query='sysparm_query', sysparm_limit=10, timestamp_field='opened_at', - ticket_type='incident', get_attachments=False, incident_name='description') + client = Client( + server_url="https://server_url.com/", + sc_server_url="sc_server_url", + cr_server_url="cr_server_url", + username="username", + password="password", + verify=False, + fetch_time="fetch_time", + sysparm_query="sysparm_query", + sysparm_limit=10, + timestamp_field="opened_at", + ticket_type="incident", + get_attachments=False, + incident_name="description", + ) - args = {'id': 'sys_id', 'lastUpdate': 0} + args = {"id": "sys_id", "lastUpdate": 0} params = {} - mocker.patch.object(client, 'get', return_value=RESPONSE_TICKET_MIRROR) - mocker.patch.object(client, 'get_ticket_attachments', return_value=[]) - mocker.patch.object(client, 'get_ticket_attachment_entries', return_value=[]) - mocker.patch.object(client, 'query', return_value=MIRROR_COMMENTS_RESPONSE) - mocker.patch.object(client, 'get', return_value=RESPONSE_ASSIGNMENT_GROUP) + mocker.patch.object(client, "get", return_value=RESPONSE_TICKET_MIRROR) + mocker.patch.object(client, "get_ticket_attachments", return_value=[]) + mocker.patch.object(client, "get_ticket_attachment_entries", return_value=[]) + mocker.patch.object(client, "query", return_value=MIRROR_COMMENTS_RESPONSE) + mocker.patch.object(client, "get", return_value=RESPONSE_ASSIGNMENT_GROUP) res = get_remote_data_command(client, args, params) - assert res[1]['Contents'] == 'Type: comments\nCreated By: admin\nCreated On: 2020-08-17 06:31:49\nThis is a comment' + assert res[1]["Contents"] == "Type: comments\nCreated By: admin\nCreated On: 2020-08-17 06:31:49\nThis is a comment" assert len(res) == 2 @@ -1744,35 +2282,45 @@ def test_get_remote_data_no_entries(mocker): - The checked entries was not returned. """ - client = Client(server_url='https://server_url.com/', sc_server_url='sc_server_url', - cr_server_url='cr_server_url', username='username', - password='password', verify=False, fetch_time='fetch_time', - sysparm_query='sysparm_query', sysparm_limit=10, timestamp_field='opened_at', - ticket_type='incident', get_attachments=False, incident_name='description') + client = Client( + server_url="https://server_url.com/", + sc_server_url="sc_server_url", + cr_server_url="cr_server_url", + username="username", + password="password", + verify=False, + fetch_time="fetch_time", + sysparm_query="sysparm_query", + sysparm_limit=10, + timestamp_field="opened_at", + ticket_type="incident", + get_attachments=False, + incident_name="description", + ) - args = {'id': 'sys_id', 'lastUpdate': 0} + args = {"id": "sys_id", "lastUpdate": 0} params = {} - mocker.patch.object(client, 'get', return_value=[RESPONSE_TICKET_MIRROR, RESPONSE_ASSIGNMENT_GROUP]) - mocker.patch.object(client, 'get_ticket_attachment_entries', return_value=RESPONSE_MIRROR_FILE_ENTRY_FROM_XSOAR) - mocker.patch.object(client, 'query', return_value=MIRROR_COMMENTS_RESPONSE_FROM_XSOAR) + mocker.patch.object(client, "get", return_value=[RESPONSE_TICKET_MIRROR, RESPONSE_ASSIGNMENT_GROUP]) + mocker.patch.object(client, "get_ticket_attachment_entries", return_value=RESPONSE_MIRROR_FILE_ENTRY_FROM_XSOAR) + mocker.patch.object(client, "query", return_value=MIRROR_COMMENTS_RESPONSE_FROM_XSOAR) res = get_remote_data_command(client, args, params) - assert 'This is a comment\n\n Mirrored from Cortex XSOAR' not in res - assert 'test_mirrored_from_xsoar.txt' not in res + assert "This is a comment\n\n Mirrored from Cortex XSOAR" not in res + assert "test_mirrored_from_xsoar.txt" not in res def upload_file_request(*args): - assert args[2] == 'test_mirrored_from_xsoar.txt' - return {'id': "sys_id", 'file_id': "entry_id", 'file_name': 'test.txt'} + assert args[2] == "test_mirrored_from_xsoar.txt" + return {"id": "sys_id", "file_id": "entry_id", "file_name": "test.txt"} def add_comment_request(*args): - assert args[3] == '(dbot): This is a comment\n\n Mirrored from Cortex XSOAR' - return {'id': "1234", 'comment': "This is a comment"} + assert args[3] == "(dbot): This is a comment\n\n Mirrored from Cortex XSOAR" + return {"id": "1234", "comment": "This is a comment"} -@pytest.mark.parametrize('mirror_entries', [MIRROR_ENTRIES, MIRROR_ENTRIES_WITH_EMPTY_USERNAME]) +@pytest.mark.parametrize("mirror_entries", [MIRROR_ENTRIES, MIRROR_ENTRIES_WITH_EMPTY_USERNAME]) def test_upload_entries_update_remote_system_command(mocker, mirror_entries): """ Given: @@ -1783,47 +2331,93 @@ def test_upload_entries_update_remote_system_command(mocker, mirror_entries): Then - The checked entries was sent as expected with suffix. """ - client = Client(server_url='https://server_url.com/', sc_server_url='sc_server_url', - cr_server_url='cr_server_url', username='username', - password='password', verify=False, fetch_time='fetch_time', - sysparm_query='sysparm_query', sysparm_limit=10, timestamp_field='opened_at', - ticket_type='incident', get_attachments=False, incident_name='description') + client = Client( + server_url="https://server_url.com/", + sc_server_url="sc_server_url", + cr_server_url="cr_server_url", + username="username", + password="password", + verify=False, + fetch_time="fetch_time", + sysparm_query="sysparm_query", + sysparm_limit=10, + timestamp_field="opened_at", + ticket_type="incident", + get_attachments=False, + incident_name="description", + ) params = {} - args = {'remoteId': '1234', 'data': {}, 'entries': mirror_entries, 'incidentChanged': False, 'delta': {}} - mocker.patch.object(client, 'upload_file', side_effect=upload_file_request) - mocker.patch.object(client, 'add_comment', side_effect=add_comment_request) + args = {"remoteId": "1234", "data": {}, "entries": mirror_entries, "incidentChanged": False, "delta": {}} + mocker.patch.object(client, "upload_file", side_effect=upload_file_request) + mocker.patch.object(client, "add_comment", side_effect=add_comment_request) update_remote_system_command(client, args, params) -TICKET_FIELDS = {'close_notes': 'This is closed', 'closed_at': '2020-10-29T13:19:07.345995+02:00', 'impact': '3', - 'priority': '4', 'resolved_at': '2020-10-29T13:19:07.345995+02:00', 'severity': '1 - Low', - 'short_description': 'Post parcel', 'sla_due': '0001-01-01T00:00:00Z', 'urgency': '3', 'state': '1', - 'work_start': '0001-01-01T00:00:00Z'} +TICKET_FIELDS = { + "close_notes": "This is closed", + "closed_at": "2020-10-29T13:19:07.345995+02:00", + "impact": "3", + "priority": "4", + "resolved_at": "2020-10-29T13:19:07.345995+02:00", + "severity": "1 - Low", + "short_description": "Post parcel", + "sla_due": "0001-01-01T00:00:00Z", + "urgency": "3", + "state": "1", + "work_start": "0001-01-01T00:00:00Z", +} def ticket_fields(*args, **kwargs): - state = '7' if kwargs.get('ticket_type') == 'incident' else '3' - assert args[0] == {'close_notes': 'This is closed', 'closed_at': '2020-10-29T13:19:07.345995+02:00', 'impact': '3', - 'priority': '4', 'resolved_at': '2020-10-29T13:19:07.345995+02:00', 'severity': '1 - Low', - 'short_description': 'Post parcel', 'sla_due': '0001-01-01T00:00:00Z', 'urgency': '3', 'state': state, - 'work_start': '0001-01-01T00:00:00Z'} + state = "7" if kwargs.get("ticket_type") == "incident" else "3" + assert args[0] == { + "close_notes": "This is closed", + "closed_at": "2020-10-29T13:19:07.345995+02:00", + "impact": "3", + "priority": "4", + "resolved_at": "2020-10-29T13:19:07.345995+02:00", + "severity": "1 - Low", + "short_description": "Post parcel", + "sla_due": "0001-01-01T00:00:00Z", + "urgency": "3", + "state": state, + "work_start": "0001-01-01T00:00:00Z", + } - return {'close_notes': 'This is closed', 'closed_at': '2020-10-29T13:19:07.345995+02:00', 'impact': '3', - 'priority': '4', 'resolved_at': '2020-10-29T13:19:07.345995+02:00', 'severity': '1 - Low', - 'short_description': 'Post parcel', 'sla_due': '0001-01-01T00:00:00Z', 'urgency': '3', 'state': '1', - 'work_start': '0001-01-01T00:00:00Z'} + return { + "close_notes": "This is closed", + "closed_at": "2020-10-29T13:19:07.345995+02:00", + "impact": "3", + "priority": "4", + "resolved_at": "2020-10-29T13:19:07.345995+02:00", + "severity": "1 - Low", + "short_description": "Post parcel", + "sla_due": "0001-01-01T00:00:00Z", + "urgency": "3", + "state": "1", + "work_start": "0001-01-01T00:00:00Z", + } def update_ticket(*args): - state = '7' if 'incident' in args else '3' - return {'short_description': 'Post parcel', 'close_notes': 'This is closed', - 'closed_at': '2020-10-29T13:19:07.345995+02:00', 'impact': '3', 'priority': '4', - 'resolved_at': '2020-10-29T13:19:07.345995+02:00', 'severity': '1 - High - Low', - 'sla_due': '0001-01-01T00:00:00Z', 'state': state, 'urgency': '3', 'work_start': '0001-01-01T00:00:00Z'} + state = "7" if "incident" in args else "3" + return { + "short_description": "Post parcel", + "close_notes": "This is closed", + "closed_at": "2020-10-29T13:19:07.345995+02:00", + "impact": "3", + "priority": "4", + "resolved_at": "2020-10-29T13:19:07.345995+02:00", + "severity": "1 - High - Low", + "sla_due": "0001-01-01T00:00:00Z", + "state": state, + "urgency": "3", + "work_start": "0001-01-01T00:00:00Z", + } -@pytest.mark.parametrize('ticket_type', ['sc_task', 'sc_req_item', 'incident']) +@pytest.mark.parametrize("ticket_type", ["sc_task", "sc_req_item", "incident"]) def test_update_remote_data_sc_task_sc_req_item(mocker, ticket_type): """ Given: @@ -1838,23 +2432,35 @@ def test_update_remote_data_sc_task_sc_req_item(mocker, ticket_type): - The state is changed to 3 (closed) after update for sc_task and sc_req_item. - The state is changed to 7 (closed) after update for incident. """ - client = Client(server_url='https://server_url.com/', sc_server_url='sc_server_url', - cr_server_url='cr_server_url', username='username', - password='password', verify=False, fetch_time='fetch_time', - sysparm_query='sysparm_query', sysparm_limit=10, timestamp_field='opened_at', - ticket_type=ticket_type, get_attachments=False, incident_name='description') - params = {'ticket_type': ticket_type, 'close_ticket_multiple_options': 'None', 'close_ticket': True} - args = {'remoteId': '1234', 'data': TICKET_FIELDS, 'entries': [], 'incidentChanged': True, 'delta': {}, - 'status': 2} - mocker.patch('ServiceNowv2.get_ticket_fields', side_effect=ticket_fields) - mocker.patch.object(client, 'update', side_effect=update_ticket) + client = Client( + server_url="https://server_url.com/", + sc_server_url="sc_server_url", + cr_server_url="cr_server_url", + username="username", + password="password", + verify=False, + fetch_time="fetch_time", + sysparm_query="sysparm_query", + sysparm_limit=10, + timestamp_field="opened_at", + ticket_type=ticket_type, + get_attachments=False, + incident_name="description", + ) + params = {"ticket_type": ticket_type, "close_ticket_multiple_options": "None", "close_ticket": True} + args = {"remoteId": "1234", "data": TICKET_FIELDS, "entries": [], "incidentChanged": True, "delta": {}, "status": 2} + mocker.patch("ServiceNowv2.get_ticket_fields", side_effect=ticket_fields) + mocker.patch.object(client, "update", side_effect=update_ticket) update_remote_system_command(client, args, params) -@pytest.mark.parametrize('command, args', [ - (query_tickets_command, {'limit': "50", 'query': "assigned_to=123^active=true", 'ticket_type': "sc_task"}), - (query_table_command, {'limit': "50", 'query': "assigned_to=123^active=true", 'table_name': "sc_task"}) -]) +@pytest.mark.parametrize( + "command, args", + [ + (query_tickets_command, {"limit": "50", "query": "assigned_to=123^active=true", "ticket_type": "sc_task"}), + (query_table_command, {"limit": "50", "query": "assigned_to=123^active=true", "table_name": "sc_task"}), + ], +) def test_multiple_query_params(requests_mock, command, args): """ Given: @@ -1867,22 +2473,39 @@ def test_multiple_query_params(requests_mock, command, args): Then: - Verify the right request is called with '^' distinguishing different arguments. """ - url = 'https://test.service-now.com/api/now/v2/' - client = Client(url, 'sc_server_url', 'cr_server_url', 'username', 'password', - 'verify', 'fetch_time', 'sysparm_query', 'sysparm_limit', 'timestamp_field', - 'ticket_type', 'get_attachments', 'incident_name') - requests_mock.request('GET', f'{url}table/sc_task?sysparm_limit=50&sysparm_offset=0&' - 'sysparm_query=assigned_to%3D123^active%3Dtrue', - json=RESPONSE_TICKET_ASSIGNED) + url = "https://test.service-now.com/api/now/v2/" + client = Client( + url, + "sc_server_url", + "cr_server_url", + "username", + "password", + "verify", + "fetch_time", + "sysparm_query", + "sysparm_limit", + "timestamp_field", + "ticket_type", + "get_attachments", + "incident_name", + ) + requests_mock.request( + "GET", + f"{url}table/sc_task?sysparm_limit=50&sysparm_offset=0&" "sysparm_query=assigned_to%3D123^active%3Dtrue", + json=RESPONSE_TICKET_ASSIGNED, + ) human_readable, entry_context, result, bol = command(client, args) assert result == RESPONSE_TICKET_ASSIGNED -@pytest.mark.parametrize('api_response', [ - ({'result': []}), - ({'result': [{'sys_id': 'sys_id1'}, {'sys_id': 'sys_id2'}]}), -]) +@pytest.mark.parametrize( + "api_response", + [ + ({"result": []}), + ({"result": [{"sys_id": "sys_id1"}, {"sys_id": "sys_id2"}]}), + ], +) def test_get_modified_remote_data(requests_mock, mocker, api_response): """ Given: @@ -1896,35 +2519,46 @@ def test_get_modified_remote_data(requests_mock, mocker, api_response): - Case A: Ensure no record IDs returned - Case B: Ensure the 2 records IDs returned """ - mocker.patch.object(demisto, 'debug') - url = 'https://test.service-now.com/api/now/v2/' - client = Client(url, 'sc_server_url', 'cr_server_url', 'username', 'password', 'verify', 'fetch_time', - 'sysparm_query', 'sysparm_limit', 'timestamp_field', 'ticket_type', 'get_attachments', - 'incident_name') - last_update = '2020-11-18T13:16:52.005381+02:00' + mocker.patch.object(demisto, "debug") + url = "https://test.service-now.com/api/now/v2/" + client = Client( + url, + "sc_server_url", + "cr_server_url", + "username", + "password", + "verify", + "fetch_time", + "sysparm_query", + "sysparm_limit", + "timestamp_field", + "ticket_type", + "get_attachments", + "incident_name", + ) + last_update = "2020-11-18T13:16:52.005381+02:00" params = { - 'sysparm_limit': '100', - 'sysparm_offset': '0', - 'sysparm_query': 'sys_updated_on>2020-11-18 11:16:52', - 'sysparm_fields': 'sys_id', + "sysparm_limit": "100", + "sysparm_offset": "0", + "sysparm_query": "sys_updated_on>2020-11-18 11:16:52", + "sysparm_fields": "sys_id", } - requests_mock.request( - 'GET', - f'{url}table/ticket_type?{urlencode(params)}', - json=api_response - ) - result = get_modified_remote_data_command(client, {'lastUpdate': last_update}) + requests_mock.request("GET", f"{url}table/ticket_type?{urlencode(params)}", json=api_response) + result = get_modified_remote_data_command(client, {"lastUpdate": last_update}) - assert sorted(result.modified_incident_ids) == sorted([ - record.get('sys_id') for record in api_response.get('result') if 'sys_id' in record - ]) + assert sorted(result.modified_incident_ids) == sorted( + [record.get("sys_id") for record in api_response.get("result") if "sys_id" in record] + ) -@pytest.mark.parametrize('sys_created_on, expected', [ - (None, 'table_sys_id=id'), - ('', 'table_sys_id=id'), - ('2020-11-18 11:16:52', 'table_sys_id=id^sys_created_on>2020-11-18 11:16:52') -]) +@pytest.mark.parametrize( + "sys_created_on, expected", + [ + (None, "table_sys_id=id"), + ("", "table_sys_id=id"), + ("2020-11-18 11:16:52", "table_sys_id=id^sys_created_on>2020-11-18 11:16:52"), + ], +) def test_get_ticket_attachments(mocker, sys_created_on, expected): """ Given: @@ -1938,26 +2572,44 @@ def test_get_ticket_attachments(mocker, sys_created_on, expected): - Case A+B: Ensure that the query parameters do not include ^sys_created_on> - Case C: Ensure that the query parameters include ^sys_created_on> """ - client = Client("url", 'sc_server_url', 'cr_server_url', 'username', 'password', 'verify', 'fetch_time', - 'sysparm_query', 'sysparm_limit', 'timestamp_field', 'ticket_type', 'get_attachments', - 'incident_name') - mocker.patch.object(client, 'send_request', return_value=[]) + client = Client( + "url", + "sc_server_url", + "cr_server_url", + "username", + "password", + "verify", + "fetch_time", + "sysparm_query", + "sysparm_limit", + "timestamp_field", + "ticket_type", + "get_attachments", + "incident_name", + ) + mocker.patch.object(client, "send_request", return_value=[]) - client.get_ticket_attachments('id', sys_created_on) - client.send_request.assert_called_with('attachment', 'GET', params={'sysparm_query': f'{expected}'}, get_attachments=True) + client.get_ticket_attachments("id", sys_created_on) + client.send_request.assert_called_with("attachment", "GET", params={"sysparm_query": f"{expected}"}, get_attachments=True) -@pytest.mark.parametrize('args,expected_ticket_fields', [ - ({'clear_fields': 'assigned_to,severity'}, {'assigned_to': '', 'severity': ''}), - ({'clear_fields': 'assigned_to,severity', 'assigned_to': 'assigned@to.com'}, {'assigned_to': '', 'severity': ''}), - ({}, {}), -]) +@pytest.mark.parametrize( + "args,expected_ticket_fields", + [ + ({"clear_fields": "assigned_to,severity"}, {"assigned_to": "", "severity": ""}), + ({"clear_fields": "assigned_to,severity", "assigned_to": "assigned@to.com"}, {"assigned_to": "", "severity": ""}), + ({}, {}), + ], +) def test_clear_fields_in_get_ticket_fields(args, expected_ticket_fields): - if 'assigned_to' in args: + if "assigned_to" in args: with pytest.raises(DemistoException) as e: res = get_ticket_fields(args) - assert str(e.value) == "Could not set a value for the argument 'assigned_to' and add it to the clear_fields. \ + assert ( + str(e.value) + == "Could not set a value for the argument 'assigned_to' and add it to the clear_fields. \ You can either set or clear the field value." + ) else: res = get_ticket_fields(args) assert res == expected_ticket_fields @@ -1972,14 +2624,24 @@ def test_clear_fields_for_update_remote_system(): Then: - Validate that the ampty fields exists in the fields that returns. """ - parsed_args_data = {'assigned_to': '', 'category': 'Software', 'description': '', 'impact': '3 - Low', - 'notify': '1 - Do Not Notify', 'priority': '5 - Planning', 'severity': '1 - High - Low', - 'short_description': 'Testing 3', 'sla_due': '0001-01-01T02:22:42+02:20', - 'state': '2 - In Progress', 'subcategory': '', 'urgency': '3 - Low', - 'work_start': '0001-01-01T02:22:42+02:20'} + parsed_args_data = { + "assigned_to": "", + "category": "Software", + "description": "", + "impact": "3 - Low", + "notify": "1 - Do Not Notify", + "priority": "5 - Planning", + "severity": "1 - High - Low", + "short_description": "Testing 3", + "sla_due": "0001-01-01T02:22:42+02:20", + "state": "2 - In Progress", + "subcategory": "", + "urgency": "3 - Low", + "work_start": "0001-01-01T02:22:42+02:20", + } res = get_ticket_fields(parsed_args_data) - assert 'assigned_to' in res + assert "assigned_to" in res def test_query_table_with_fields(mocker): @@ -1995,30 +2657,40 @@ def test_query_table_with_fields(mocker): """ # prepare - client = Client('server_url', 'sc_server_url', 'cr_server_url', 'username', 'password', 'verify', 'fetch_time', - 'sysparm_query', 'sysparm_limit', 'timestamp_field', 'ticket_type', 'get_attachments', - 'incident_name') + client = Client( + "server_url", + "sc_server_url", + "cr_server_url", + "username", + "password", + "verify", + "fetch_time", + "sysparm_query", + "sysparm_limit", + "timestamp_field", + "ticket_type", + "get_attachments", + "incident_name", + ) - mocker.patch.object(client, 'send_request', return_value={ - "result": [ - { - "sys_id": "test_id", - "sys_updated_by": "test_updated_name", - "opened_by.name": "test_opened_name" - } - ]}) + mocker.patch.object( + client, + "send_request", + return_value={ + "result": [{"sys_id": "test_id", "sys_updated_by": "test_updated_name", "opened_by.name": "test_opened_name"}] + }, + ) fields = "sys_updated_by,opened_by.name" - fields_with_sys_id = f'{fields},sys_id' - args = {'table_name': "alm_asset", 'fields': fields, - 'query': "display_nameCONTAINSMacBook", 'limit': 3} + fields_with_sys_id = f"{fields},sys_id" + args = {"table_name": "alm_asset", "fields": fields, "query": "display_nameCONTAINSMacBook", "limit": 3} # run result = query_table_command(client, args) # validate - assert client.send_request.call_args[1]['params']['sysparm_fields'] == fields_with_sys_id + assert client.send_request.call_args[1]["params"]["sysparm_fields"] == fields_with_sys_id # validate that the '.' in the key was replaced to '_' - assert result[1]['ServiceNow.Record(val.ID===obj.ID)'][0]['opened_by_name'] == 'test_opened_name' + assert result[1]["ServiceNow.Record(val.ID===obj.ID)"][0]["opened_by_name"] == "test_opened_name" def test_create_co_from_template_command(mocker): @@ -2032,21 +2704,31 @@ def test_create_co_from_template_command(mocker): Then: - Validate the output is correct. """ - client = Client('server_url', 'sc_server_url', 'cr_server_url', 'username', 'password', 'verify', 'fetch_time', - 'sysparm_query', 'sysparm_limit', 'timestamp_field', 'ticket_type', 'get_attachments', - 'incident_name') + client = Client( + "server_url", + "sc_server_url", + "cr_server_url", + "username", + "password", + "verify", + "fetch_time", + "sysparm_query", + "sysparm_limit", + "timestamp_field", + "ticket_type", + "get_attachments", + "incident_name", + ) args = {"template": "Add network switch to datacenter cabinet"} - mocker.patch.object(client, - 'send_request', - return_value=util_load_json('test_data/create_co_from_template_result.json')) + mocker.patch.object(client, "send_request", return_value=util_load_json("test_data/create_co_from_template_result.json")) result = ServiceNowv2.create_co_from_template_command(client, args) assert result.outputs_prefix == "ServiceNow.Ticket" assert result.outputs == { - 'Ticket(val.ID===obj.ID)': CREATED_TICKET_CONTEXT_CREATE_CO_FROM_TEMPLATE_COMMAND, - 'ServiceNow.Ticket(val.ID===obj.ID)': CREATED_TICKET_CONTEXT_CREATE_CO_FROM_TEMPLATE_COMMAND + "Ticket(val.ID===obj.ID)": CREATED_TICKET_CONTEXT_CREATE_CO_FROM_TEMPLATE_COMMAND, + "ServiceNow.Ticket(val.ID===obj.ID)": CREATED_TICKET_CONTEXT_CREATE_CO_FROM_TEMPLATE_COMMAND, } - assert result.raw_response == util_load_json('test_data/create_co_from_template_result.json') + assert result.raw_response == util_load_json("test_data/create_co_from_template_result.json") def test_get_tasks_for_co_command(mocker): @@ -2060,19 +2742,28 @@ def test_get_tasks_for_co_command(mocker): Then: - Validate the output is correct. """ - client = Client('server_url', 'sc_server_url', 'cr_server_url', 'username', 'password', 'verify', 'fetch_time', - 'sysparm_query', 'sysparm_limit', 'timestamp_field', 'problem', 'get_attachments', 'incident_name') + client = Client( + "server_url", + "sc_server_url", + "cr_server_url", + "username", + "password", + "verify", + "fetch_time", + "sysparm_query", + "sysparm_limit", + "timestamp_field", + "problem", + "get_attachments", + "incident_name", + ) args = {"id": "a9e9c33dc61122760072455df62663d2"} - mocker.patch.object(client, - 'send_request', - return_value=util_load_json('test_data/get_tasks_for_co_command.json')) + mocker.patch.object(client, "send_request", return_value=util_load_json("test_data/get_tasks_for_co_command.json")) result = ServiceNowv2.get_tasks_for_co_command(client, args) assert result.outputs_prefix == "ServiceNow.Tasks" - assert result.outputs == { - 'ServiceNow.Tasks(val.ID===obj.ID)': CREATED_TICKET_CONTEXT_GET_TASKS_FOR_CO_COMMAND - } - assert result.raw_response == util_load_json('test_data/get_tasks_for_co_command.json') + assert result.outputs == {"ServiceNow.Tasks(val.ID===obj.ID)": CREATED_TICKET_CONTEXT_GET_TASKS_FOR_CO_COMMAND} + assert result.raw_response == util_load_json("test_data/get_tasks_for_co_command.json") def test_get_ticket_attachment_entries_with_oauth_token(mocker): @@ -2091,71 +2782,103 @@ def test_get_ticket_attachment_entries_with_oauth_token(mocker): - Verify that the 'requests.get' function's arguments are arguments of a call with OAuth 2.0 Authorization. """ # Preparations and mocking: - client = Client("url", 'sc_server_url', 'cr_server_url', 'username', 'password', 'verify', 'fetch_time', - 'sysparm_query', 'sysparm_limit', 'timestamp_field', 'ticket_type', 'get_attachments', - 'incident_name', oauth_params={'oauth_params': ''}) + client = Client( + "url", + "sc_server_url", + "cr_server_url", + "username", + "password", + "verify", + "fetch_time", + "sysparm_query", + "sysparm_limit", + "timestamp_field", + "ticket_type", + "get_attachments", + "incident_name", + oauth_params={"oauth_params": ""}, + ) - mock_res_for_get_ticket_attachments = \ - {'result': [ + mock_res_for_get_ticket_attachments = { + "result": [ { - 'file_name': 'attachment for test.txt', - 'download_link': 'https://ven03941.service-now.com/api/now/attachment/12b7ea411b15cd10042611b4bd4/file' - }]} + "file_name": "attachment for test.txt", + "download_link": "https://ven03941.service-now.com/api/now/attachment/12b7ea411b15cd10042611b4bd4/file", + } + ] + } - mock_res_for_get_access_token = 'access_token' + mock_res_for_get_access_token = "access_token" - mocker.patch.object(client, 'get_ticket_attachments', return_value=mock_res_for_get_ticket_attachments) - mocker.patch.object(client.snow_client, 'get_access_token', return_value=mock_res_for_get_access_token) - requests_get_mocker = mocker.patch('requests.get', return_value=None) + mocker.patch.object(client, "get_ticket_attachments", return_value=mock_res_for_get_ticket_attachments) + mocker.patch.object(client.snow_client, "get_access_token", return_value=mock_res_for_get_access_token) + requests_get_mocker = mocker.patch("requests.get", return_value=None) # Running get_ticket_attachment_entries function: - client.get_ticket_attachment_entries(ticket_id='id') + client.get_ticket_attachment_entries(ticket_id="id") # Validate Results are as expected: - assert requests_get_mocker.call_args.kwargs.get('auth') is None, \ - "When An OAuth 2.0 client is configured the 'auth' argument shouldn't be passed to 'requests.get' function" - assert requests_get_mocker.call_args.kwargs.get('headers').get('Authorization') == \ - f"Bearer {mock_res_for_get_access_token}", "When An OAuth 2.0 client is configured the 'Authorization'" \ - " Header argument should be passed to 'requests.get' function" + assert ( + requests_get_mocker.call_args.kwargs.get("auth") is None + ), "When An OAuth 2.0 client is configured the 'auth' argument shouldn't be passed to 'requests.get' function" + assert ( + requests_get_mocker.call_args.kwargs.get("headers").get("Authorization") == f"Bearer {mock_res_for_get_access_token}" + ), "When An OAuth 2.0 client is configured the 'Authorization'" " Header argument should be passed to 'requests.get' function" @pytest.mark.parametrize( - 'command, args, response', + "command, args, response", [ - (generic_api_call_command, - {"method": "GET", - "path": "table/sn_si_incident?sysparam_limit=1&sysparam_query=active=true^ORDERBYDESCnumber", - "body": {}, - "headers": {}, - }, - RESPONSE_GENERIC_TICKET), - (generic_api_call_command, - {"method": "GET", - "path": "/table/sn_si_incident?sysparam_limit=1&sysparam_query=active=true^ORDERBYDESCnumber", - "body": {}, - "headers": {}, - "custom_api": "/api/custom" - }, - RESPONSE_GENERIC_TICKET) - ]) + ( + generic_api_call_command, + { + "method": "GET", + "path": "table/sn_si_incident?sysparam_limit=1&sysparam_query=active=true^ORDERBYDESCnumber", + "body": {}, + "headers": {}, + }, + RESPONSE_GENERIC_TICKET, + ), + ( + generic_api_call_command, + { + "method": "GET", + "path": "/table/sn_si_incident?sysparam_limit=1&sysparam_query=active=true^ORDERBYDESCnumber", + "body": {}, + "headers": {}, + "custom_api": "/api/custom", + }, + RESPONSE_GENERIC_TICKET, + ), + ], +) def test_generic_api_call_command(command, args, response, mocker): """test case for `generic_api_call_command`""" - client = Client('server_url', 'sc_server_url', 'cr_server_url', 'username', 'password', - 'verify', 'fetch_time', 'sysparm_query', 'sysparm_limit', 'timestamp_field', - 'ticket_type', 'get_attachments', 'incident_name') + client = Client( + "server_url", + "sc_server_url", + "cr_server_url", + "username", + "password", + "verify", + "fetch_time", + "sysparm_query", + "sysparm_limit", + "timestamp_field", + "ticket_type", + "get_attachments", + "incident_name", + ) - mocker.patch.object(client, 'send_request', return_value=response) + mocker.patch.object(client, "send_request", return_value=response) result = command(client, args) assert result.outputs == response -@pytest.mark.parametrize('file_type , expected', - [(EntryType.FILE, True), - (3, True), - (EntryType.IMAGE, True), - (EntryType.NOTE, False), - (15, False)]) +@pytest.mark.parametrize( + "file_type , expected", [(EntryType.FILE, True), (3, True), (EntryType.IMAGE, True), (EntryType.NOTE, False), (15, False)] +) def test_is_entry_type_mirror_supported(file_type, expected): """ Given: @@ -2168,13 +2891,17 @@ def test_is_entry_type_mirror_supported(file_type, expected): assert ServiceNowv2.is_entry_type_mirror_supported(file_type) == expected -@pytest.mark.parametrize('params, expected', - [({'close_ticket_multiple_options': 'None', 'close_ticket': True}, 'closed'), - ({'close_ticket_multiple_options': 'None', 'close_ticket': False}, None), - ({'close_ticket_multiple_options': 'resolved', 'close_ticket': True}, 'resolved'), - ({'close_ticket_multiple_options': 'resolved', 'close_ticket': False}, 'resolved'), - ({'close_ticket_multiple_options': 'closed', 'close_ticket': True}, 'closed'), - ({'close_ticket_multiple_options': 'closed', 'close_ticket': False}, 'closed')]) +@pytest.mark.parametrize( + "params, expected", + [ + ({"close_ticket_multiple_options": "None", "close_ticket": True}, "closed"), + ({"close_ticket_multiple_options": "None", "close_ticket": False}, None), + ({"close_ticket_multiple_options": "resolved", "close_ticket": True}, "resolved"), + ({"close_ticket_multiple_options": "resolved", "close_ticket": False}, "resolved"), + ({"close_ticket_multiple_options": "closed", "close_ticket": True}, "closed"), + ({"close_ticket_multiple_options": "closed", "close_ticket": False}, "closed"), + ], +) def test_get_closure_case(params, expected): """ Given: @@ -2199,23 +2926,27 @@ def test_get_closure_case(params, expected): assert get_closure_case(params) == expected -@pytest.mark.parametrize('ticket_state, ticket_close_code, server_close_custom_state, server_close_custom_code, expected_res', - [('1', 'default close code', '', '', 'Other'), - ('7', 'default close code', '', '', 'Resolved'), - ('6', 'default close code', '', '', 'Resolved'), - ('10', 'default close code', '10=Test', '', 'Test'), - ('10', 'default close code', '10=Test,11=Test2', '', 'Test'), - # If builtin state was override by custom. - ('6', 'default close code', '6=Test', '', 'Test'), - ('corrupt_state', 'default close code', '', '', 'Other'), - ('corrupt_state', 'default close code', 'custom_state=Test', '', 'Other'), - ('6', 'default close code', 'custom_state=Test', '', 'Resolved'), - # custom close_code overwrites custom sate. - ('10', 'custom close code', '10=Test,11=Test2', 'custom close code=Custom,90=90 Custom', 'Custom'), - ('10', '90', '10=Test,11=Test2', '80=Custom, 90=90 Custom', '90 Custom'), - ]) -def test_converts_close_code_or_state_to_close_reason(ticket_state, ticket_close_code, server_close_custom_state, - server_close_custom_code, expected_res): +@pytest.mark.parametrize( + "ticket_state, ticket_close_code, server_close_custom_state, server_close_custom_code, expected_res", + [ + ("1", "default close code", "", "", "Other"), + ("7", "default close code", "", "", "Resolved"), + ("6", "default close code", "", "", "Resolved"), + ("10", "default close code", "10=Test", "", "Test"), + ("10", "default close code", "10=Test,11=Test2", "", "Test"), + # If builtin state was override by custom. + ("6", "default close code", "6=Test", "", "Test"), + ("corrupt_state", "default close code", "", "", "Other"), + ("corrupt_state", "default close code", "custom_state=Test", "", "Other"), + ("6", "default close code", "custom_state=Test", "", "Resolved"), + # custom close_code overwrites custom sate. + ("10", "custom close code", "10=Test,11=Test2", "custom close code=Custom,90=90 Custom", "Custom"), + ("10", "90", "10=Test,11=Test2", "80=Custom, 90=90 Custom", "90 Custom"), + ], +) +def test_converts_close_code_or_state_to_close_reason( + ticket_state, ticket_close_code, server_close_custom_state, server_close_custom_code, expected_res +): """ Given: - ticket_state: The state for the closed service now ticket @@ -2227,48 +2958,79 @@ def test_converts_close_code_or_state_to_close_reason(ticket_state, ticket_close Then: - return the matching XSOAR incident state. """ - assert converts_close_code_or_state_to_close_reason(ticket_state, ticket_close_code, server_close_custom_state, - server_close_custom_code) == expected_res + assert ( + converts_close_code_or_state_to_close_reason( + ticket_state, ticket_close_code, server_close_custom_state, server_close_custom_code + ) + == expected_res + ) def ticket_fields_mocker(*args, **kwargs): - state = '88' if kwargs.get('ticket_type') == 'incident' else '90' - fields = {'close_notes': 'This is closed', 'closed_at': '2020-10-29T13:19:07.345995+02:00', 'impact': '3', - 'priority': '4', 'resolved_at': '2020-10-29T13:19:07.345995+02:00', 'severity': '1 - Low', - 'short_description': 'Post parcel', 'sla_due': '0001-01-01T00:00:00Z', 'urgency': '3', 'state': state, - 'work_start': '0001-01-01T00:00:00Z'} + state = "88" if kwargs.get("ticket_type") == "incident" else "90" + fields = { + "close_notes": "This is closed", + "closed_at": "2020-10-29T13:19:07.345995+02:00", + "impact": "3", + "priority": "4", + "resolved_at": "2020-10-29T13:19:07.345995+02:00", + "severity": "1 - Low", + "short_description": "Post parcel", + "sla_due": "0001-01-01T00:00:00Z", + "urgency": "3", + "state": state, + "work_start": "0001-01-01T00:00:00Z", + } assert fields == args[0] return fields -@pytest.mark.parametrize('file_name , expected', - [('123.png', 'image/png'), - ('123.gif', 'image/gif'), - ('123.jpeg', 'image/jpeg'), - ('123.pdf', 'application/pdf'), - ('123', '*/*')]) +@pytest.mark.parametrize( + "file_name , expected", + [ + ("123.png", "image/png"), + ("123.gif", "image/gif"), + ("123.jpeg", "image/jpeg"), + ("123.pdf", "application/pdf"), + ("123", "*/*"), + ], +) def test_upload_file_types(file_name, expected): - client = Client(server_url='https://server_url.com/', sc_server_url='sc_server_url', - cr_server_url='cr_server_url', username='username', - password='password', verify=False, fetch_time='fetch_time', - sysparm_query='sysparm_query', sysparm_limit=10, timestamp_field='opened_at', - get_attachments=False, incident_name='description', ticket_type='incident') + client = Client( + server_url="https://server_url.com/", + sc_server_url="sc_server_url", + cr_server_url="cr_server_url", + username="username", + password="password", + verify=False, + fetch_time="fetch_time", + sysparm_query="sysparm_query", + sysparm_limit=10, + timestamp_field="opened_at", + get_attachments=False, + incident_name="description", + ticket_type="incident", + ) assert client.get_content_type(file_name) == expected -@pytest.mark.parametrize('ticket_type, ticket_state, close_custom_state, result_close_state, update_call_count', - [ - # case 1 - SIR ticket closed by custom state - ('sn_si_incident', '16', '90', '90', 1), - # case 2 - custom state doesn't exist, closed by default state code - '3' - ('sn_si_incident', '16', '90', '3', 2), - # case 3 - ticket closed by custom state - ('incident', '1', '88', '88', 1), - # case 4 - custom state doesn't exist, closed by default state code - '7' - ('incident', '1', '88', '7', 2), - ], ids=['case - 1', 'case - 2', 'case - 3', 'case - 4']) -def test_update_remote_data_custom_state(mocker, ticket_type, ticket_state, close_custom_state, result_close_state, - update_call_count): +@pytest.mark.parametrize( + "ticket_type, ticket_state, close_custom_state, result_close_state, update_call_count", + [ + # case 1 - SIR ticket closed by custom state + ("sn_si_incident", "16", "90", "90", 1), + # case 2 - custom state doesn't exist, closed by default state code - '3' + ("sn_si_incident", "16", "90", "3", 2), + # case 3 - ticket closed by custom state + ("incident", "1", "88", "88", 1), + # case 4 - custom state doesn't exist, closed by default state code - '7' + ("incident", "1", "88", "7", 2), + ], + ids=["case - 1", "case - 2", "case - 3", "case - 4"], +) +def test_update_remote_data_custom_state( + mocker, ticket_type, ticket_state, close_custom_state, result_close_state, update_call_count +): """ Given: - ServiceNow client @@ -2280,33 +3042,56 @@ def test_update_remote_data_custom_state(mocker, ticket_type, ticket_state, clos Then - The state is changed accordingly """ - client = Client(server_url='https://server_url.com/', sc_server_url='sc_server_url', - cr_server_url='cr_server_url', username='username', - password='password', verify=False, fetch_time='fetch_time', - sysparm_query='sysparm_query', sysparm_limit=10, timestamp_field='opened_at', - ticket_type=ticket_type, get_attachments=False, incident_name='description') - params = {'ticket_type': ticket_type, 'close_ticket_multiple_options': 'None', 'close_ticket': True, - 'close_custom_state': close_custom_state} + client = Client( + server_url="https://server_url.com/", + sc_server_url="sc_server_url", + cr_server_url="cr_server_url", + username="username", + password="password", + verify=False, + fetch_time="fetch_time", + sysparm_query="sysparm_query", + sysparm_limit=10, + timestamp_field="opened_at", + ticket_type=ticket_type, + get_attachments=False, + incident_name="description", + ) + params = { + "ticket_type": ticket_type, + "close_ticket_multiple_options": "None", + "close_ticket": True, + "close_custom_state": close_custom_state, + } - TICKET_FIELDS['state'] = ticket_state - args = {'remoteId': '1234', 'data': TICKET_FIELDS, 'entries': [], 'incidentChanged': True, 'delta': {}, - 'status': 2} + TICKET_FIELDS["state"] = ticket_state + args = {"remoteId": "1234", "data": TICKET_FIELDS, "entries": [], "incidentChanged": True, "delta": {}, "status": 2} def update_ticket_mocker(*args): # Represents only the response of the last call to client.update # In case the custom state doesn't exist - # in the first call will return the ticket's state as before (in case2 - '16', case4 - '1') - return {'result': {'short_description': 'Post parcel', 'close_notes': 'This is closed', - 'closed_at': '2020-10-29T13:19:07.345995+02:00', 'impact': '3', 'priority': '4', - 'resolved_at': '2020-10-29T13:19:07.345995+02:00', 'severity': '1 - High - Low', - 'sla_due': '0001-01-01T00:00:00Z', 'state': result_close_state, 'urgency': '3', - 'work_start': '0001-01-01T00:00:00Z'}} - - mocker.patch('ServiceNowv2.get_ticket_fields', side_effect=ticket_fields_mocker) - mocker_update = mocker.patch.object(client, 'update', side_effect=update_ticket_mocker) + return { + "result": { + "short_description": "Post parcel", + "close_notes": "This is closed", + "closed_at": "2020-10-29T13:19:07.345995+02:00", + "impact": "3", + "priority": "4", + "resolved_at": "2020-10-29T13:19:07.345995+02:00", + "severity": "1 - High - Low", + "sla_due": "0001-01-01T00:00:00Z", + "state": result_close_state, + "urgency": "3", + "work_start": "0001-01-01T00:00:00Z", + } + } + + mocker.patch("ServiceNowv2.get_ticket_fields", side_effect=ticket_fields_mocker) + mocker_update = mocker.patch.object(client, "update", side_effect=update_ticket_mocker) update_remote_system_command(client, args, params) # assert the state argument in the last call to client.update - assert mocker_update.call_args[0][2]['state'] == result_close_state + assert mocker_update.call_args[0][2]["state"] == result_close_state assert mocker_update.call_count == update_call_count @@ -2320,14 +3105,30 @@ def test_update_remote_data_upload_file_exception(mocker): Then - The invalid entry raised an exception and function has continued. """ - client = Client(server_url='https://server_url.com/', sc_server_url='sc_server_url', - cr_server_url='cr_server_url', username='username', - password='password', verify=False, fetch_time='fetch_time', - sysparm_query='sysparm_query', sysparm_limit=10, timestamp_field='opened_at', - ticket_type='incident', get_attachments=False, incident_name='description') + client = Client( + server_url="https://server_url.com/", + sc_server_url="sc_server_url", + cr_server_url="cr_server_url", + username="username", + password="password", + verify=False, + fetch_time="fetch_time", + sysparm_query="sysparm_query", + sysparm_limit=10, + timestamp_field="opened_at", + ticket_type="incident", + get_attachments=False, + incident_name="description", + ) params = {} - args = {'remoteId': '1234', 'data': {}, 'entries': [MIRROR_ENTRIES[0], MIRROR_ENTRIES[0]], 'incidentChanged': True, - 'delta': {}, 'status': 2} + args = { + "remoteId": "1234", + "data": {}, + "entries": [MIRROR_ENTRIES[0], MIRROR_ENTRIES[0]], + "incidentChanged": True, + "delta": {}, + "status": 2, + } def upload_file_mock(*args): raise Exception("ERROR!!!") @@ -2335,24 +3136,30 @@ def upload_file_mock(*args): def add_comment_mock(*args): assert "An attempt to mirror a file from Cortex XSOAR was failed." in args[3] - mocker.patch.object(client, 'update', side_effect=update_ticket) - mocker.patch.object(client, 'upload_file', side_effect=upload_file_mock) - mocker.patch.object(client, 'add_comment', side_effect=add_comment_mock) + mocker.patch.object(client, "update", side_effect=update_ticket) + mocker.patch.object(client, "upload_file", side_effect=upload_file_mock) + mocker.patch.object(client, "add_comment", side_effect=add_comment_mock) - demisto_mocker = mocker.patch.object(demisto, 'error') + demisto_mocker = mocker.patch.object(demisto, "error") res = update_remote_system_command(client, args, params) - assert demisto_mocker.call_args[0][0] == "An attempt to mirror a file has failed. entry_id=entry-id, " \ - "file_name='test'\nERROR!!!" - assert res == '1234' + assert ( + demisto_mocker.call_args[0][0] == "An attempt to mirror a file has failed. entry_id=entry-id, " + "file_name='test'\nERROR!!!" + ) + assert res == "1234" -@pytest.mark.parametrize('mock_json, expected_results', - [ - ({'error': 'invalid client.'}, 'ServiceNow Error: invalid client.'), - ({'error': {'message': 'invalid client', 'detail': 'the client you have entered is invalid.'}}, - 'ServiceNow Error: invalid client, details: the client you have entered is invalid.') - ]) +@pytest.mark.parametrize( + "mock_json, expected_results", + [ + ({"error": "invalid client."}, "ServiceNow Error: invalid client."), + ( + {"error": {"message": "invalid client", "detail": "the client you have entered is invalid."}}, + "ServiceNow Error: invalid client, details: the client you have entered is invalid.", + ), + ], +) def test_send_request_with_str_error_response(mocker, mock_json, expected_results): """ Given: @@ -2368,31 +3175,54 @@ def test_send_request_with_str_error_response(mocker, mock_json, expected_result - case 1: Shouldn't attempt to extract inner fields from the error field, only present the error value. - case 2: Should attempt to extract inner fields from the error field, present the parsed extracted error values. """ - client = Client('server_url', 'sc_server_url', 'cr_server_url', 'username', 'password', - 'verify', 'fetch_time', 'sysparm_query', 'sysparm_limit', 'timestamp_field', - 'ticket_type', 'get_attachments', 'incident_name', display_date_format='yyyy-MM-dd') + client = Client( + "server_url", + "sc_server_url", + "cr_server_url", + "username", + "password", + "verify", + "fetch_time", + "sysparm_query", + "sysparm_limit", + "timestamp_field", + "ticket_type", + "get_attachments", + "incident_name", + display_date_format="yyyy-MM-dd", + ) class MockResponse: def __init__(self, mock_json): - self.text = 'some text' + self.text = "some text" self.json_data = mock_json self.status_code = 400 def json(self): return self.json_data - mocker.patch.object(requests, 'request', return_value=MockResponse(mock_json)) + + mocker.patch.object(requests, "request", return_value=MockResponse(mock_json)) with pytest.raises(Exception) as e: - client.send_request(path='table') + client.send_request(path="table") assert str(e.value) == expected_results -@pytest.mark.parametrize('ticket, expected_ticket', - [ - ({}, {}), - ({"assigned_to": ""}, {"assigned_to": ""}), - ({"assigned_to": {'link': 'https://test.service-now.com/api/now/table/sys_user/oscar@example.com', - 'value': 'oscar@example.com'}}, {'assigned_to': 'oscar@example.com'}) - ]) +@pytest.mark.parametrize( + "ticket, expected_ticket", + [ + ({}, {}), + ({"assigned_to": ""}, {"assigned_to": ""}), + ( + { + "assigned_to": { + "link": "https://test.service-now.com/api/now/table/sys_user/oscar@example.com", + "value": "oscar@example.com", + } + }, + {"assigned_to": "oscar@example.com"}, + ), + ], +) def test_parse_dict_ticket_fields_empty_ticket(ticket, expected_ticket): """ Given: @@ -2410,9 +3240,11 @@ def test_parse_dict_ticket_fields_empty_ticket(ticket, expected_ticket): - case 2: Should add assigned_to field with an empty string as a value. - case 3: Should add assigned_to field with the user email as a value. """ + class Client: def get(self, table, value, no_record_found_res): return USER_RESPONSE + parse_dict_ticket_fields(Client(), ticket) # type: ignore assert ticket == expected_ticket @@ -2458,14 +3290,17 @@ def test_format_incidents_response_with_display_values_with_incidents(): assert result[1]["assignment_group"] == "" -@pytest.mark.parametrize("input_string, expected", [ - ("2023-02-15 10:30:45", True), - ("invalid", False), - ("15.02.2023 10:30:45", False), - ("a2023-02-15 10:30:45", False), - ("2023-02-15 10:30:45a", False), - ("2023-02-15 10:30:45 a", False), -]) +@pytest.mark.parametrize( + "input_string, expected", + [ + ("2023-02-15 10:30:45", True), + ("invalid", False), + ("15.02.2023 10:30:45", False), + ("a2023-02-15 10:30:45", False), + ("2023-02-15 10:30:45a", False), + ("2023-02-15 10:30:45 a", False), + ], +) def test_is_time_field(input_string, expected): """ Given: @@ -2480,29 +3315,29 @@ def test_is_time_field(input_string, expected): def test_get_attachment_command_success(): client = MagicMock() - args = {'sys_id': '12345'} + args = {"sys_id": "12345"} mock_attachments = [ - {'file_name': 'file1.txt', 'content': 'file1 content'}, - {'file_name': 'file2.txt', 'content': 'file2 content'} + {"file_name": "file1.txt", "content": "file1 content"}, + {"file_name": "file2.txt", "content": "file2 content"}, ] client.get_ticket_attachment_entries = MagicMock(return_value=mock_attachments) result = get_attachment_command(client, args) - client.get_ticket_attachment_entries.assert_called_once_with('12345') + client.get_ticket_attachment_entries.assert_called_once_with("12345") assert isinstance(result, list) assert isinstance(result[0], CommandResults) - assert result[0].readable_output == 'Successfully retrieved attachments for ticket with sys id 12345.' + assert result[0].readable_output == "Successfully retrieved attachments for ticket with sys id 12345." assert result[1] == mock_attachments def test_get_attachment_command_missing_sys_id(): client = MagicMock() - args = {'sys_id': '12345'} + args = {"sys_id": "12345"} mock_attachments = [] client.get_ticket_attachment_entries = MagicMock(return_value=mock_attachments) result = get_attachment_command(client, args) - client.get_ticket_attachment_entries.assert_called_once_with('12345') + client.get_ticket_attachment_entries.assert_called_once_with("12345") assert isinstance(result, CommandResults) - assert result.readable_output == 'Ticket with sys id 12345 has no attachments to retrieve.' + assert result.readable_output == "Ticket with sys id 12345 has no attachments to retrieve." def test_incident_id_in_last_fetched_updates_correctly(mocker): @@ -2514,15 +3349,18 @@ def test_incident_id_in_last_fetched_updates_correctly(mocker): Then: It should remove the id without modifying the existing integration context keys """ - mocker.patch.object(ServiceNowv2, 'get_integration_context', - return_value={"access_token": "token", "last_fetched_incident_ids": ['ABC123', 'XYZ789']}) - res = mocker.patch.object(ServiceNowv2, 'set_integration_context') + mocker.patch.object( + ServiceNowv2, + "get_integration_context", + return_value={"access_token": "token", "last_fetched_incident_ids": ["ABC123", "XYZ789"]}, + ) + res = mocker.patch.object(ServiceNowv2, "set_integration_context") # Executing the function with the incident id to be checked is_new_incident("XYZ789") # Setup verification context with wrapper to cover the whole integration context if necessary - expected_context = {"access_token": "token", "last_fetched_incident_ids": ['ABC123']} + expected_context = {"access_token": "token", "last_fetched_incident_ids": ["ABC123"]} # Verifying that set_integration_context was called with the correct new context res.assert_called_once_with(expected_context) @@ -2538,10 +3376,13 @@ def test_incident_id_not_in_last_fetched(mocker): It should not modify the integration context """ # Mock the get_integration_context to return some incident IDs which does not include the tested ID - mocker.patch.object(ServiceNowv2, 'get_integration_context', - return_value={"access_token": "token", "last_fetched_incident_ids": ['ABC123', 'XYZ789']}) + mocker.patch.object( + ServiceNowv2, + "get_integration_context", + return_value={"access_token": "token", "last_fetched_incident_ids": ["ABC123", "XYZ789"]}, + ) # Mock the set_integration_context to check it is not called - res = mocker.patch.object(ServiceNowv2, 'set_integration_context') + res = mocker.patch.object(ServiceNowv2, "set_integration_context") # Executing the function with an incident id that is not in the context's list is_new_incident("DEF456") diff --git a/Packs/ServiceNow/Integrations/ServiceNowv2/test_data/created_ticket_context.py b/Packs/ServiceNow/Integrations/ServiceNowv2/test_data/created_ticket_context.py index 1e799852401c..ab152d29f53b 100644 --- a/Packs/ServiceNow/Integrations/ServiceNowv2/test_data/created_ticket_context.py +++ b/Packs/ServiceNow/Integrations/ServiceNowv2/test_data/created_ticket_context.py @@ -1,25 +1,56 @@ CREATED_TICKET_CONTEXT_CREATE_CO_FROM_TEMPLATE_COMMAND = { - 'ID': {'display_value': '11ab5ccfdb784d505da401a3ca96190a', 'value': '11ab5ccfdb784d505da401a3ca96190a'}, - 'Summary': {'display_value': 'Add network switch to cabinet', 'value': 'Add network switch to cabinet'}, - 'Number': {'display_value': 'CHG0031889', 'value': 'CHG0031889'}, - 'CreatedOn': {'display_value': '06/01/2022 04:12:43', 'value': '2022-01-06 12:12:43', - 'display_value_internal': '2022-01-06 04:12:43'}, 'Active': {'display_value': 'true', 'value': True}, - 'AdditionalComments': {'display_value': '', 'value': ''}, 'CloseCode': {'display_value': '', 'value': ''}, - 'OpenedAt': {'display_value': '06/01/2022 04:12:43', 'value': '2022-01-06 12:12:43', - 'display_value_internal': '2022-01-06 04:12:43'}, 'OpenedBy': '46428af1db202c1026fca015ca961994', - 'Creator': '46428af1db202c1026fca015ca961994', 'Priority': '4 - Low', 'State': {'display_value': 'New', 'value': -5.0} + "ID": {"display_value": "11ab5ccfdb784d505da401a3ca96190a", "value": "11ab5ccfdb784d505da401a3ca96190a"}, + "Summary": {"display_value": "Add network switch to cabinet", "value": "Add network switch to cabinet"}, + "Number": {"display_value": "CHG0031889", "value": "CHG0031889"}, + "CreatedOn": { + "display_value": "06/01/2022 04:12:43", + "value": "2022-01-06 12:12:43", + "display_value_internal": "2022-01-06 04:12:43", + }, + "Active": {"display_value": "true", "value": True}, + "AdditionalComments": {"display_value": "", "value": ""}, + "CloseCode": {"display_value": "", "value": ""}, + "OpenedAt": { + "display_value": "06/01/2022 04:12:43", + "value": "2022-01-06 12:12:43", + "display_value_internal": "2022-01-06 04:12:43", + }, + "OpenedBy": "46428af1db202c1026fca015ca961994", + "Creator": "46428af1db202c1026fca015ca961994", + "Priority": "4 - Low", + "State": {"display_value": "New", "value": -5.0}, } CREATED_TICKET_CONTEXT_GET_TASKS_FOR_CO_COMMAND = [ - {'ID': 'a9f030cfc611227600f37bb8d031ab1b', 'Name': 'CTASK0010001', 'Description': 'Back-Up Database', 'State': '1 - Open'}, - {'ID': 'a9f0a2d2c6112276001c51b4e5715988', 'Name': 'CTASK0010002', 'Description': 'Take VmWare snapshot of Environment', - 'State': '1 - Open'}, - {'ID': 'a9f1ab97c611227600a920ad739997ea', 'Name': 'CTASK0010003', 'Description': 'Install Oracle 10g onto SD1', - 'State': '1 - Open'}, - {'ID': 'a9f2473bc61122760086a27688428a40', 'Name': 'CTASK0010004', 'Description': 'Configure Siebel Software for new DB', - 'State': '1 - Open'}, - {'ID': 'a9f2e5bdc61122760052c1250f7ac503', 'Name': 'CTASK0010005', 'Description': 'Preliminary System Testing', - 'State': '1 - Open'}, - {'ID': 'a9f3703ac611227601245c4f5fa75b6d', 'Name': 'CTASK0010006', 'Description': 'Multi-User Testing and QA', - 'State': '1 - Open'} + {"ID": "a9f030cfc611227600f37bb8d031ab1b", "Name": "CTASK0010001", "Description": "Back-Up Database", "State": "1 - Open"}, + { + "ID": "a9f0a2d2c6112276001c51b4e5715988", + "Name": "CTASK0010002", + "Description": "Take VmWare snapshot of Environment", + "State": "1 - Open", + }, + { + "ID": "a9f1ab97c611227600a920ad739997ea", + "Name": "CTASK0010003", + "Description": "Install Oracle 10g onto SD1", + "State": "1 - Open", + }, + { + "ID": "a9f2473bc61122760086a27688428a40", + "Name": "CTASK0010004", + "Description": "Configure Siebel Software for new DB", + "State": "1 - Open", + }, + { + "ID": "a9f2e5bdc61122760052c1250f7ac503", + "Name": "CTASK0010005", + "Description": "Preliminary System Testing", + "State": "1 - Open", + }, + { + "ID": "a9f3703ac611227601245c4f5fa75b6d", + "Name": "CTASK0010006", + "Description": "Multi-User Testing and QA", + "State": "1 - Open", + }, ] diff --git a/Packs/ServiceNow/Integrations/ServiceNowv2/test_data/response_constants.py b/Packs/ServiceNow/Integrations/ServiceNowv2/test_data/response_constants.py index b5ac743fc981..4875a5feed98 100644 --- a/Packs/ServiceNow/Integrations/ServiceNowv2/test_data/response_constants.py +++ b/Packs/ServiceNow/Integrations/ServiceNowv2/test_data/response_constants.py @@ -1,273 +1,836 @@ RESPONSE_TICKET = { - 'parent': '', 'made_sla': 'false', 'caused_by': '', 'watch_list': '', 'upon_reject': '', - 'sys_updated_on': '2020-04-02 14:03:31', - 'child_incidents': '', 'hold_reason': '', 'approval_history': '', 'number': 'INC0000039', 'resolved_by': '', - 'sys_updated_by': 'admin', - 'opened_by': {'link': 'demisto.com', 'value': 'test'}, - 'user_input': '', 'sys_created_on': '2019-09-05 00:42:29', - 'sys_domain': {'link': 'demisto.com', 'value': 'global'}, 'state': '1', 'sys_created_by': 'admin', - 'knowledge': 'false', 'order': '', 'calendar_stc': '', 'closed_at': '', - 'cmdb_ci': {'link': 'demisto.com', 'value': 'test'}, 'delivery_plan': '', 'impact': '2', 'active': 'true', - 'work_notes_list': '', 'business_service': '', 'priority': '4', 'sys_domain_path': '/', 'rfc': '', - 'time_worked': '', 'expected_start': '', 'opened_at': '2019-09-05 00:41:01', 'business_duration': '', - 'group_list': '', 'work_end': '', 'caller_id': {'link': 'demisto.com', 'value': 'test'}, - 'reopened_time': '', 'resolved_at': '', 'approval_set': '', 'subcategory': '', 'work_notes': '', - 'short_description': 'Trouble getting to Oregon mail server', 'close_code': '', 'correlation_display': '', - 'delivery_task': '', 'work_start': '', 'assignment_group': {'link': 'demisto.com', 'value': 'test'}, - 'additional_assignee_list': '', 'business_stc': '', - 'description': 'Unable to access Oregon mail server. Is it down?', 'calendar_duration': '', 'close_notes': '', - 'notify': '1', 'sys_class_name': 'incident', 'closed_by': '', 'follow_up': '', 'parent_incident': '', - 'sys_id': 'sys_id', 'contact_type': 'phone', 'reopened_by': '', 'incident_state': '1', 'urgency': '3', - 'problem_id': '', 'company': {'link': 'demisto.com', 'value': 'test'}, 'reassignment_count': '', - 'u_custom_field_test': 'NYC', 'activity_due': '', 'assigned_to': '', 'severity': '3', 'comments': '', - 'approval': 'not requested', 'sla_due': '2019-09-26 00:41:01', 'comments_and_work_notes': '', 'due_date': '', - 'sys_mod_count': '8', 'reopen_count': '', 'sys_tags': '', 'escalation': '0', 'upon_approval': '', - 'correlation_id': '', 'location': {'link': 'demisto.com', 'value': 'test'}, 'category': 'network' + "parent": "", + "made_sla": "false", + "caused_by": "", + "watch_list": "", + "upon_reject": "", + "sys_updated_on": "2020-04-02 14:03:31", + "child_incidents": "", + "hold_reason": "", + "approval_history": "", + "number": "INC0000039", + "resolved_by": "", + "sys_updated_by": "admin", + "opened_by": {"link": "demisto.com", "value": "test"}, + "user_input": "", + "sys_created_on": "2019-09-05 00:42:29", + "sys_domain": {"link": "demisto.com", "value": "global"}, + "state": "1", + "sys_created_by": "admin", + "knowledge": "false", + "order": "", + "calendar_stc": "", + "closed_at": "", + "cmdb_ci": {"link": "demisto.com", "value": "test"}, + "delivery_plan": "", + "impact": "2", + "active": "true", + "work_notes_list": "", + "business_service": "", + "priority": "4", + "sys_domain_path": "/", + "rfc": "", + "time_worked": "", + "expected_start": "", + "opened_at": "2019-09-05 00:41:01", + "business_duration": "", + "group_list": "", + "work_end": "", + "caller_id": {"link": "demisto.com", "value": "test"}, + "reopened_time": "", + "resolved_at": "", + "approval_set": "", + "subcategory": "", + "work_notes": "", + "short_description": "Trouble getting to Oregon mail server", + "close_code": "", + "correlation_display": "", + "delivery_task": "", + "work_start": "", + "assignment_group": {"link": "demisto.com", "value": "test"}, + "additional_assignee_list": "", + "business_stc": "", + "description": "Unable to access Oregon mail server. Is it down?", + "calendar_duration": "", + "close_notes": "", + "notify": "1", + "sys_class_name": "incident", + "closed_by": "", + "follow_up": "", + "parent_incident": "", + "sys_id": "sys_id", + "contact_type": "phone", + "reopened_by": "", + "incident_state": "1", + "urgency": "3", + "problem_id": "", + "company": {"link": "demisto.com", "value": "test"}, + "reassignment_count": "", + "u_custom_field_test": "NYC", + "activity_due": "", + "assigned_to": "", + "severity": "3", + "comments": "", + "approval": "not requested", + "sla_due": "2019-09-26 00:41:01", + "comments_and_work_notes": "", + "due_date": "", + "sys_mod_count": "8", + "reopen_count": "", + "sys_tags": "", + "escalation": "0", + "upon_approval": "", + "correlation_id": "", + "location": {"link": "demisto.com", "value": "test"}, + "category": "network", } RESPONSE_TICKET_ASSIGNED = { - 'result': [ + "result": [ { - 'parent': '', 'made_sla': 'false', 'caused_by': '', 'watch_list': '', 'upon_reject': '', - 'sys_updated_on': '2020-04-02 14:03:31', - 'child_incidents': '', 'hold_reason': '', 'approval_history': '', 'number': 'INC0000039', 'resolved_by': '', - 'sys_updated_by': 'admin', - 'opened_by': {'link': 'demisto.com', 'value': 'test'}, - 'user_input': '', 'sys_created_on': '2019-09-05 00:42:29', - 'sys_domain': {'link': 'demisto.com', 'value': 'global'}, 'state': '1', 'sys_created_by': 'admin', - 'knowledge': 'false', 'order': '', 'calendar_stc': '', 'closed_at': '', - 'cmdb_ci': {'link': 'demisto.com', 'value': 'test'}, 'delivery_plan': '', 'impact': '2', 'active': 'true', - 'work_notes_list': '', 'business_service': '', 'priority': '4', 'sys_domain_path': '/', 'rfc': '', - 'time_worked': '', 'expected_start': '', 'opened_at': '2019-09-05 00:41:01', 'business_duration': '', - 'group_list': '', 'work_end': '', 'caller_id': {'link': 'demisto.com', 'value': 'test'}, - 'reopened_time': '', 'resolved_at': '', 'approval_set': '', 'subcategory': '', 'work_notes': '', - 'short_description': 'Trouble getting to Oregon mail server', 'close_code': '', 'correlation_display': '', - 'delivery_task': '', 'work_start': '', 'assignment_group': {'link': 'demisto.com', 'value': 'test'}, - 'additional_assignee_list': '', 'business_stc': '', - 'description': 'Unable to access Oregon mail server. Is it down?', 'calendar_duration': '', - 'close_notes': '', 'notify': '1', 'sys_class_name': 'incident', - 'closed_by': '', 'follow_up': '', 'parent_incident': '', - 'sys_id': 'sys_id', 'contact_type': 'phone', 'reopened_by': '', 'incident_state': '1', 'urgency': '3', - 'problem_id': '', 'company': {'link': 'demisto.com', 'value': 'test'}, 'reassignment_count': '', - 'u_custom_field_test': 'NYC', 'activity_due': '', 'assigned_to': {"link": "link", "value": "123"}, - 'severity': '3', 'comments': '', - 'approval': 'not requested', 'sla_due': '2019-09-26 00:41:01', - 'comments_and_work_notes': '', 'due_date': '', - 'sys_mod_count': '8', 'reopen_count': '', 'sys_tags': '', 'escalation': '0', 'upon_approval': '', - 'correlation_id': '', 'location': {'link': 'demisto.com', 'value': 'test'}, 'category': 'network' + "parent": "", + "made_sla": "false", + "caused_by": "", + "watch_list": "", + "upon_reject": "", + "sys_updated_on": "2020-04-02 14:03:31", + "child_incidents": "", + "hold_reason": "", + "approval_history": "", + "number": "INC0000039", + "resolved_by": "", + "sys_updated_by": "admin", + "opened_by": {"link": "demisto.com", "value": "test"}, + "user_input": "", + "sys_created_on": "2019-09-05 00:42:29", + "sys_domain": {"link": "demisto.com", "value": "global"}, + "state": "1", + "sys_created_by": "admin", + "knowledge": "false", + "order": "", + "calendar_stc": "", + "closed_at": "", + "cmdb_ci": {"link": "demisto.com", "value": "test"}, + "delivery_plan": "", + "impact": "2", + "active": "true", + "work_notes_list": "", + "business_service": "", + "priority": "4", + "sys_domain_path": "/", + "rfc": "", + "time_worked": "", + "expected_start": "", + "opened_at": "2019-09-05 00:41:01", + "business_duration": "", + "group_list": "", + "work_end": "", + "caller_id": {"link": "demisto.com", "value": "test"}, + "reopened_time": "", + "resolved_at": "", + "approval_set": "", + "subcategory": "", + "work_notes": "", + "short_description": "Trouble getting to Oregon mail server", + "close_code": "", + "correlation_display": "", + "delivery_task": "", + "work_start": "", + "assignment_group": {"link": "demisto.com", "value": "test"}, + "additional_assignee_list": "", + "business_stc": "", + "description": "Unable to access Oregon mail server. Is it down?", + "calendar_duration": "", + "close_notes": "", + "notify": "1", + "sys_class_name": "incident", + "closed_by": "", + "follow_up": "", + "parent_incident": "", + "sys_id": "sys_id", + "contact_type": "phone", + "reopened_by": "", + "incident_state": "1", + "urgency": "3", + "problem_id": "", + "company": {"link": "demisto.com", "value": "test"}, + "reassignment_count": "", + "u_custom_field_test": "NYC", + "activity_due": "", + "assigned_to": {"link": "link", "value": "123"}, + "severity": "3", + "comments": "", + "approval": "not requested", + "sla_due": "2019-09-26 00:41:01", + "comments_and_work_notes": "", + "due_date": "", + "sys_mod_count": "8", + "reopen_count": "", + "sys_tags": "", + "escalation": "0", + "upon_approval": "", + "correlation_id": "", + "location": {"link": "demisto.com", "value": "test"}, + "category": "network", } ] } -RESPONSE_TICKET_MIRROR = {'result': [{ - 'parent': '', 'made_sla': 'false', 'caused_by': '', 'watch_list': '', 'upon_reject': '', - 'sys_updated_on': '2020-04-02 14:03:31', - 'child_incidents': '', 'hold_reason': '', 'approval_history': '', 'number': 'INC0000039', 'resolved_by': '', - 'sys_updated_by': 'admin', - 'opened_by': {'link': 'demisto.com', 'value': 'test'}, - 'user_input': '', 'sys_created_on': '2019-09-05 00:42:29', - 'sys_domain': {'link': 'demisto.com', 'value': 'global'}, 'state': '1', 'sys_created_by': 'admin', - 'knowledge': 'false', 'order': '', 'calendar_stc': '', 'closed_at': '', - 'cmdb_ci': {'link': 'demisto.com', 'value': 'test'}, 'delivery_plan': '', 'impact': '2', 'active': 'true', - 'work_notes_list': '', 'business_service': '', 'priority': '4', 'sys_domain_path': '/', 'rfc': '', - 'time_worked': '', 'expected_start': '', 'opened_at': '2019-09-05 00:41:01', 'business_duration': '', - 'group_list': '', 'work_end': '', 'caller_id': '', - 'reopened_time': '', 'resolved_at': '', 'approval_set': '', 'subcategory': '', 'work_notes': '', - 'short_description': 'Trouble getting to Oregon mail server', 'close_code': '', 'correlation_display': '', - 'delivery_task': '', 'work_start': '', 'assignment_group': - {"link": "https://dev59633.service-now.com/api/now/table/sys_user_group/8a5055c9c61122780043563ef53438e3", - "value": "8a5055c9c61122780043563ef53438e3"}, - 'additional_assignee_list': '', 'business_stc': '', - 'description': 'Unable to access Oregon mail server. Is it down?', 'calendar_duration': '', 'close_notes': '', - 'notify': '1', 'sys_class_name': 'incident', 'closed_by': '', 'follow_up': '', 'parent_incident': '', - 'sys_id': 'sys_id', 'contact_type': 'phone', 'reopened_by': '', 'incident_state': '1', 'urgency': '3', - 'problem_id': '', 'company': {'link': 'demisto.com', 'value': 'test'}, 'reassignment_count': '', - 'u_custom_field_test': 'NYC', 'activity_due': '', 'assigned_to': '', 'severity': '3', 'comments': '', - 'approval': 'not requested', 'sla_due': '2019-09-26 00:41:01', 'comments_and_work_notes': '', 'due_date': '', - 'sys_mod_count': '8', 'reopen_count': '', 'sys_tags': '', 'escalation': '0', 'upon_approval': '', - 'correlation_id': '', 'location': {'link': 'demisto.com', 'value': 'test'}, 'category': 'network' -}] +RESPONSE_TICKET_MIRROR = { + "result": [ + { + "parent": "", + "made_sla": "false", + "caused_by": "", + "watch_list": "", + "upon_reject": "", + "sys_updated_on": "2020-04-02 14:03:31", + "child_incidents": "", + "hold_reason": "", + "approval_history": "", + "number": "INC0000039", + "resolved_by": "", + "sys_updated_by": "admin", + "opened_by": {"link": "demisto.com", "value": "test"}, + "user_input": "", + "sys_created_on": "2019-09-05 00:42:29", + "sys_domain": {"link": "demisto.com", "value": "global"}, + "state": "1", + "sys_created_by": "admin", + "knowledge": "false", + "order": "", + "calendar_stc": "", + "closed_at": "", + "cmdb_ci": {"link": "demisto.com", "value": "test"}, + "delivery_plan": "", + "impact": "2", + "active": "true", + "work_notes_list": "", + "business_service": "", + "priority": "4", + "sys_domain_path": "/", + "rfc": "", + "time_worked": "", + "expected_start": "", + "opened_at": "2019-09-05 00:41:01", + "business_duration": "", + "group_list": "", + "work_end": "", + "caller_id": "", + "reopened_time": "", + "resolved_at": "", + "approval_set": "", + "subcategory": "", + "work_notes": "", + "short_description": "Trouble getting to Oregon mail server", + "close_code": "", + "correlation_display": "", + "delivery_task": "", + "work_start": "", + "assignment_group": { + "link": "https://dev59633.service-now.com/api/now/table/sys_user_group/8a5055c9c61122780043563ef53438e3", + "value": "8a5055c9c61122780043563ef53438e3", + }, + "additional_assignee_list": "", + "business_stc": "", + "description": "Unable to access Oregon mail server. Is it down?", + "calendar_duration": "", + "close_notes": "", + "notify": "1", + "sys_class_name": "incident", + "closed_by": "", + "follow_up": "", + "parent_incident": "", + "sys_id": "sys_id", + "contact_type": "phone", + "reopened_by": "", + "incident_state": "1", + "urgency": "3", + "problem_id": "", + "company": {"link": "demisto.com", "value": "test"}, + "reassignment_count": "", + "u_custom_field_test": "NYC", + "activity_due": "", + "assigned_to": "", + "severity": "3", + "comments": "", + "approval": "not requested", + "sla_due": "2019-09-26 00:41:01", + "comments_and_work_notes": "", + "due_date": "", + "sys_mod_count": "8", + "reopen_count": "", + "sys_tags": "", + "escalation": "0", + "upon_approval": "", + "correlation_id": "", + "location": {"link": "demisto.com", "value": "test"}, + "category": "network", + } + ] } -USER_RESPONSE = { - 'result': {'first_name': 'Oscar', 'email': 'oscar@example.com'}} +USER_RESPONSE = {"result": {"first_name": "Oscar", "email": "oscar@example.com"}} -RESPONSE_CLOSING_TICKET_MIRROR_CLOSED = {'result': [{ - 'parent': '', 'made_sla': 'false', 'caused_by': '', 'watch_list': '', 'upon_reject': '', - 'sys_updated_on': '2020-04-02 14:03:31', - 'child_incidents': '', 'hold_reason': '', 'approval_history': '', 'number': 'INC0000039', 'resolved_by': '', - 'sys_updated_by': 'admin', - 'opened_by': {'link': 'demisto.com', 'value': 'test'}, - 'user_input': '', 'sys_created_on': '2019-09-05 00:42:29', - 'sys_domain': {'link': 'demisto.com', 'value': 'global'}, 'state': '6', 'sys_created_by': 'admin', - 'knowledge': 'false', 'order': '', 'calendar_stc': '', 'closed_at': '2019-09-10 00:41:01', - 'cmdb_ci': {'link': 'demisto.com', 'value': 'test'}, 'delivery_plan': '', 'impact': '2', 'active': 'true', - 'work_notes_list': '', 'business_service': '', 'priority': '4', 'sys_domain_path': '/', 'rfc': '', - 'time_worked': '', 'expected_start': '', 'opened_at': '2019-09-05 00:41:01', 'business_duration': '', - 'group_list': '', 'work_end': '', 'caller_id': '', - 'reopened_time': '', 'approval_set': '', 'subcategory': '', 'work_notes': '', - 'short_description': 'Trouble getting to Oregon mail server', 'close_code': '', 'correlation_display': '', - 'delivery_task': '', 'work_start': '', 'assignment_group': {}, - 'additional_assignee_list': '', 'business_stc': '', - 'description': 'Unable to access Oregon mail server. Is it down?', 'calendar_duration': '', 'close_notes': 'Test', - 'notify': '1', 'sys_class_name': 'incident', 'closed_by': '', 'follow_up': '', 'parent_incident': '', - 'sys_id': 'sys_id', 'contact_type': 'phone', 'reopened_by': '', 'incident_state': '1', 'urgency': '3', - 'problem_id': '', 'company': {'link': 'demisto.com', 'value': 'test'}, 'reassignment_count': '', - 'u_custom_field_test': 'NYC', 'activity_due': '', 'assigned_to': '', 'severity': '3', 'comments': '', - 'approval': 'not requested', 'sla_due': '2019-09-26 00:41:01', 'comments_and_work_notes': '', 'due_date': '', - 'sys_mod_count': '8', 'reopen_count': '', 'sys_tags': '', 'escalation': '0', 'upon_approval': '', - 'correlation_id': '', 'location': {'link': 'demisto.com', 'value': 'test'}, 'category': 'network' -}] +RESPONSE_CLOSING_TICKET_MIRROR_CLOSED = { + "result": [ + { + "parent": "", + "made_sla": "false", + "caused_by": "", + "watch_list": "", + "upon_reject": "", + "sys_updated_on": "2020-04-02 14:03:31", + "child_incidents": "", + "hold_reason": "", + "approval_history": "", + "number": "INC0000039", + "resolved_by": "", + "sys_updated_by": "admin", + "opened_by": {"link": "demisto.com", "value": "test"}, + "user_input": "", + "sys_created_on": "2019-09-05 00:42:29", + "sys_domain": {"link": "demisto.com", "value": "global"}, + "state": "6", + "sys_created_by": "admin", + "knowledge": "false", + "order": "", + "calendar_stc": "", + "closed_at": "2019-09-10 00:41:01", + "cmdb_ci": {"link": "demisto.com", "value": "test"}, + "delivery_plan": "", + "impact": "2", + "active": "true", + "work_notes_list": "", + "business_service": "", + "priority": "4", + "sys_domain_path": "/", + "rfc": "", + "time_worked": "", + "expected_start": "", + "opened_at": "2019-09-05 00:41:01", + "business_duration": "", + "group_list": "", + "work_end": "", + "caller_id": "", + "reopened_time": "", + "approval_set": "", + "subcategory": "", + "work_notes": "", + "short_description": "Trouble getting to Oregon mail server", + "close_code": "", + "correlation_display": "", + "delivery_task": "", + "work_start": "", + "assignment_group": {}, + "additional_assignee_list": "", + "business_stc": "", + "description": "Unable to access Oregon mail server. Is it down?", + "calendar_duration": "", + "close_notes": "Test", + "notify": "1", + "sys_class_name": "incident", + "closed_by": "", + "follow_up": "", + "parent_incident": "", + "sys_id": "sys_id", + "contact_type": "phone", + "reopened_by": "", + "incident_state": "1", + "urgency": "3", + "problem_id": "", + "company": {"link": "demisto.com", "value": "test"}, + "reassignment_count": "", + "u_custom_field_test": "NYC", + "activity_due": "", + "assigned_to": "", + "severity": "3", + "comments": "", + "approval": "not requested", + "sla_due": "2019-09-26 00:41:01", + "comments_and_work_notes": "", + "due_date": "", + "sys_mod_count": "8", + "reopen_count": "", + "sys_tags": "", + "escalation": "0", + "upon_approval": "", + "correlation_id": "", + "location": {"link": "demisto.com", "value": "test"}, + "category": "network", + } + ] } -RESPONSE_CLOSING_TICKET_MIRROR_RESOLVED = {'result': [{ - 'parent': '', 'made_sla': 'false', 'caused_by': '', 'watch_list': '', 'upon_reject': '', - 'sys_updated_on': '2020-04-02 14:03:31', - 'child_incidents': '', 'hold_reason': '', 'approval_history': '', 'number': 'INC0000039', 'resolved_by': '', - 'sys_updated_by': 'admin', - 'opened_by': {'link': 'demisto.com', 'value': 'test'}, - 'user_input': '', 'sys_created_on': '2019-09-05 00:42:29', - 'sys_domain': {'link': 'demisto.com', 'value': 'global'}, 'state': '6', 'sys_created_by': 'admin', - 'knowledge': 'false', 'order': '', 'calendar_stc': '', 'resolved_at': '2019-09-10 00:41:01', - 'cmdb_ci': {'link': 'demisto.com', 'value': 'test'}, 'delivery_plan': '', 'impact': '2', 'active': 'true', - 'work_notes_list': '', 'business_service': '', 'priority': '4', 'sys_domain_path': '/', 'rfc': '', - 'time_worked': '', 'expected_start': '', 'opened_at': '2019-09-05 00:41:01', 'business_duration': '', - 'group_list': '', 'work_end': '', 'caller_id': '', - 'reopened_time': '', 'approval_set': '', 'subcategory': '', 'work_notes': '', - 'short_description': 'Trouble getting to Oregon mail server', 'close_code': '', 'correlation_display': '', - 'delivery_task': '', 'work_start': '', 'assignment_group': {}, - 'additional_assignee_list': '', 'business_stc': '', - 'description': 'Unable to access Oregon mail server. Is it down?', 'calendar_duration': '', 'close_notes': 'Test', - 'notify': '1', 'sys_class_name': 'incident', 'closed_by': '', 'follow_up': '', 'parent_incident': '', - 'sys_id': 'sys_id', 'contact_type': 'phone', 'reopened_by': '', 'incident_state': '1', 'urgency': '3', - 'problem_id': '', 'company': {'link': 'demisto.com', 'value': 'test'}, 'reassignment_count': '', - 'u_custom_field_test': 'NYC', 'activity_due': '', 'assigned_to': '', 'severity': '3', 'comments': '', - 'approval': 'not requested', 'sla_due': '2019-09-26 00:41:01', 'comments_and_work_notes': '', 'due_date': '', - 'sys_mod_count': '8', 'reopen_count': '', 'sys_tags': '', 'escalation': '0', 'upon_approval': '', - 'correlation_id': '', 'location': {'link': 'demisto.com', 'value': 'test'}, 'category': 'network' -}] +RESPONSE_CLOSING_TICKET_MIRROR_RESOLVED = { + "result": [ + { + "parent": "", + "made_sla": "false", + "caused_by": "", + "watch_list": "", + "upon_reject": "", + "sys_updated_on": "2020-04-02 14:03:31", + "child_incidents": "", + "hold_reason": "", + "approval_history": "", + "number": "INC0000039", + "resolved_by": "", + "sys_updated_by": "admin", + "opened_by": {"link": "demisto.com", "value": "test"}, + "user_input": "", + "sys_created_on": "2019-09-05 00:42:29", + "sys_domain": {"link": "demisto.com", "value": "global"}, + "state": "6", + "sys_created_by": "admin", + "knowledge": "false", + "order": "", + "calendar_stc": "", + "resolved_at": "2019-09-10 00:41:01", + "cmdb_ci": {"link": "demisto.com", "value": "test"}, + "delivery_plan": "", + "impact": "2", + "active": "true", + "work_notes_list": "", + "business_service": "", + "priority": "4", + "sys_domain_path": "/", + "rfc": "", + "time_worked": "", + "expected_start": "", + "opened_at": "2019-09-05 00:41:01", + "business_duration": "", + "group_list": "", + "work_end": "", + "caller_id": "", + "reopened_time": "", + "approval_set": "", + "subcategory": "", + "work_notes": "", + "short_description": "Trouble getting to Oregon mail server", + "close_code": "", + "correlation_display": "", + "delivery_task": "", + "work_start": "", + "assignment_group": {}, + "additional_assignee_list": "", + "business_stc": "", + "description": "Unable to access Oregon mail server. Is it down?", + "calendar_duration": "", + "close_notes": "Test", + "notify": "1", + "sys_class_name": "incident", + "closed_by": "", + "follow_up": "", + "parent_incident": "", + "sys_id": "sys_id", + "contact_type": "phone", + "reopened_by": "", + "incident_state": "1", + "urgency": "3", + "problem_id": "", + "company": {"link": "demisto.com", "value": "test"}, + "reassignment_count": "", + "u_custom_field_test": "NYC", + "activity_due": "", + "assigned_to": "", + "severity": "3", + "comments": "", + "approval": "not requested", + "sla_due": "2019-09-26 00:41:01", + "comments_and_work_notes": "", + "due_date": "", + "sys_mod_count": "8", + "reopen_count": "", + "sys_tags": "", + "escalation": "0", + "upon_approval": "", + "correlation_id": "", + "location": {"link": "demisto.com", "value": "test"}, + "category": "network", + } + ] } -RESPONSE_CLOSING_TICKET_MIRROR_CUSTOM = {'result': [{ - 'parent': '', 'made_sla': 'false', 'caused_by': '', 'watch_list': '', 'upon_reject': '', - 'sys_updated_on': '2020-04-02 14:03:31', - 'child_incidents': '', 'hold_reason': '', 'approval_history': '', 'number': 'INC0000039', 'resolved_by': '', - 'sys_updated_by': 'admin', - 'opened_by': {'link': 'demisto.com', 'value': 'test'}, - 'user_input': '', 'sys_created_on': '2019-09-05 00:42:29', - 'sys_domain': {'link': 'demisto.com', 'value': 'global'}, 'state': '9', 'sys_created_by': 'admin', - 'knowledge': 'false', 'order': '', 'calendar_stc': '', - 'cmdb_ci': {'link': 'demisto.com', 'value': 'test'}, 'delivery_plan': '', 'impact': '2', 'active': 'true', - 'work_notes_list': '', 'business_service': '', 'priority': '4', 'sys_domain_path': '/', 'rfc': '', - 'time_worked': '', 'expected_start': '', 'opened_at': '2019-09-05 00:41:01', 'business_duration': '', - 'group_list': '', 'work_end': '', 'caller_id': '', - 'reopened_time': '', 'approval_set': '', 'subcategory': '', 'work_notes': '', - 'short_description': 'Trouble getting to Oregon mail server', 'close_code': '', 'correlation_display': '', - 'delivery_task': '', 'work_start': '', 'assignment_group': {}, - 'additional_assignee_list': '', 'business_stc': '', - 'description': 'Unable to access Oregon mail server. Is it down?', 'calendar_duration': '', 'close_notes': 'Test', - 'notify': '1', 'sys_class_name': 'incident', 'closed_by': '', 'follow_up': '', 'parent_incident': '', - 'sys_id': 'sys_id', 'contact_type': 'phone', 'reopened_by': '', 'incident_state': '1', 'urgency': '3', - 'problem_id': '', 'company': {'link': 'demisto.com', 'value': 'test'}, 'reassignment_count': '', - 'u_custom_field_test': 'NYC', 'activity_due': '', 'assigned_to': '', 'severity': '3', 'comments': '', - 'approval': 'not requested', 'sla_due': '2019-09-26 00:41:01', 'comments_and_work_notes': '', 'due_date': '', - 'sys_mod_count': '8', 'reopen_count': '', 'sys_tags': '', 'escalation': '0', 'upon_approval': '', - 'correlation_id': '', 'location': {'link': 'demisto.com', 'value': 'test'}, 'category': 'network' -}] +RESPONSE_CLOSING_TICKET_MIRROR_CUSTOM = { + "result": [ + { + "parent": "", + "made_sla": "false", + "caused_by": "", + "watch_list": "", + "upon_reject": "", + "sys_updated_on": "2020-04-02 14:03:31", + "child_incidents": "", + "hold_reason": "", + "approval_history": "", + "number": "INC0000039", + "resolved_by": "", + "sys_updated_by": "admin", + "opened_by": {"link": "demisto.com", "value": "test"}, + "user_input": "", + "sys_created_on": "2019-09-05 00:42:29", + "sys_domain": {"link": "demisto.com", "value": "global"}, + "state": "9", + "sys_created_by": "admin", + "knowledge": "false", + "order": "", + "calendar_stc": "", + "cmdb_ci": {"link": "demisto.com", "value": "test"}, + "delivery_plan": "", + "impact": "2", + "active": "true", + "work_notes_list": "", + "business_service": "", + "priority": "4", + "sys_domain_path": "/", + "rfc": "", + "time_worked": "", + "expected_start": "", + "opened_at": "2019-09-05 00:41:01", + "business_duration": "", + "group_list": "", + "work_end": "", + "caller_id": "", + "reopened_time": "", + "approval_set": "", + "subcategory": "", + "work_notes": "", + "short_description": "Trouble getting to Oregon mail server", + "close_code": "", + "correlation_display": "", + "delivery_task": "", + "work_start": "", + "assignment_group": {}, + "additional_assignee_list": "", + "business_stc": "", + "description": "Unable to access Oregon mail server. Is it down?", + "calendar_duration": "", + "close_notes": "Test", + "notify": "1", + "sys_class_name": "incident", + "closed_by": "", + "follow_up": "", + "parent_incident": "", + "sys_id": "sys_id", + "contact_type": "phone", + "reopened_by": "", + "incident_state": "1", + "urgency": "3", + "problem_id": "", + "company": {"link": "demisto.com", "value": "test"}, + "reassignment_count": "", + "u_custom_field_test": "NYC", + "activity_due": "", + "assigned_to": "", + "severity": "3", + "comments": "", + "approval": "not requested", + "sla_due": "2019-09-26 00:41:01", + "comments_and_work_notes": "", + "due_date": "", + "sys_mod_count": "8", + "reopen_count": "", + "sys_tags": "", + "escalation": "0", + "upon_approval": "", + "correlation_id": "", + "location": {"link": "demisto.com", "value": "test"}, + "category": "network", + } + ] } -RESPONSE_GET_ATTACHMENT = {'result': [{ - 'size_bytes': '17', 'file_name': 'test.txt', 'sys_mod_count': '1', 'average_image_color': '', 'image_width': '', - 'sys_updated_on': '2020-08-16 17:54:16', 'sys_tags': '', 'table_name': 'incident', - 'sys_id': 'd722a127dbe6101053482fb74896195d', 'image_height': '', 'sys_updated_by': 'system', - 'download_link': 'https://dev59633.service-now.com/api/now/attachment/d722a127dbe6101053482fb74896195d/file', - 'content_type': 'text/plain', 'sys_created_on': '2020-08-16 17:54:15', 'size_compressed': '35', - 'compressed': 'true', 'state': 'available', 'table_sys_id': 'e6b06163dbe6101053482fb74896194e', - 'chunk_size_bytes': '700000', 'hash': '1669594220a92d73d62727293e988b4213b5b4829de36c3afe43c9b4f3ddf35e', - 'sys_created_by': 'admin'}]} +RESPONSE_GET_ATTACHMENT = { + "result": [ + { + "size_bytes": "17", + "file_name": "test.txt", + "sys_mod_count": "1", + "average_image_color": "", + "image_width": "", + "sys_updated_on": "2020-08-16 17:54:16", + "sys_tags": "", + "table_name": "incident", + "sys_id": "d722a127dbe6101053482fb74896195d", + "image_height": "", + "sys_updated_by": "system", + "download_link": "https://dev59633.service-now.com/api/now/attachment/d722a127dbe6101053482fb74896195d/file", + "content_type": "text/plain", + "sys_created_on": "2020-08-16 17:54:15", + "size_compressed": "35", + "compressed": "true", + "state": "available", + "table_sys_id": "e6b06163dbe6101053482fb74896194e", + "chunk_size_bytes": "700000", + "hash": "1669594220a92d73d62727293e988b4213b5b4829de36c3afe43c9b4f3ddf35e", + "sys_created_by": "admin", + } + ] +} RESPONSE_MULTIPLE_TICKET = [ { - 'parent': '', 'made_sla': 'false', 'caused_by': '', 'watch_list': '', 'upon_reject': '', - 'sys_updated_on': '2020-04-02 14:03:31', - 'child_incidents': '', 'hold_reason': '', 'approval_history': '', 'number': 'INC0000040', 'resolved_by': '', - 'sys_updated_by': 'admin', - 'opened_by': {'link': 'demisto.com', 'value': 'test2'}, - 'user_input': '', 'sys_created_on': '2019-09-05 00:42:29', - 'sys_domain': {'link': 'demisto.com', 'value': 'global'}, 'state': '1', 'sys_created_by': 'admin', - 'knowledge': 'false', 'order': '', 'calendar_stc': '', 'closed_at': '', - 'cmdb_ci': {'link': 'demisto.com', 'value': 'test'}, 'delivery_plan': '', 'impact': '2', 'active': 'true', - 'work_notes_list': '', 'business_service': '', 'priority': '4', 'sys_domain_path': '/', 'rfc': '', - 'time_worked': '', 'expected_start': '', 'opened_at': '2019-09-05 00:41:01', 'business_duration': '', - 'group_list': '', 'work_end': '', 'caller_id': {'link': 'demisto.com', 'value': 'test'}, - 'reopened_time': '', 'resolved_at': '', 'approval_set': '', 'subcategory': '', 'work_notes': '', - 'short_description': 'Trouble getting to Oregon mail server', 'close_code': '', 'correlation_display': '', - 'delivery_task': '', 'work_start': '', 'assignment_group': {'link': 'demisto.com', 'value': 'test'}, - 'additional_assignee_list': '', 'business_stc': '', - 'description': 'Unable to access Oregon mail server. Is it down?', 'calendar_duration': '', 'close_notes': '', - 'notify': '1', 'sys_class_name': 'incident', 'closed_by': '', 'follow_up': '', 'parent_incident': '', - 'sys_id': 'sys_id', 'contact_type': 'phone', 'reopened_by': '', 'incident_state': '1', 'urgency': '3', - 'problem_id': '', 'company': {'link': 'demisto.com', 'value': 'test'}, 'reassignment_count': '', - 'u_custom_field_test': 'NYC', 'activity_due': '', 'assigned_to': '', 'severity': '3', 'comments': '', - 'approval': 'not requested', 'sla_due': '2019-09-26 00:41:01', 'comments_and_work_notes': '', 'due_date': '', - 'sys_mod_count': '8', 'reopen_count': '', 'sys_tags': '', 'escalation': '0', 'upon_approval': '', - 'correlation_id': '', 'location': {'link': 'demisto.com', 'value': 'test'}, 'category': 'network' + "parent": "", + "made_sla": "false", + "caused_by": "", + "watch_list": "", + "upon_reject": "", + "sys_updated_on": "2020-04-02 14:03:31", + "child_incidents": "", + "hold_reason": "", + "approval_history": "", + "number": "INC0000040", + "resolved_by": "", + "sys_updated_by": "admin", + "opened_by": {"link": "demisto.com", "value": "test2"}, + "user_input": "", + "sys_created_on": "2019-09-05 00:42:29", + "sys_domain": {"link": "demisto.com", "value": "global"}, + "state": "1", + "sys_created_by": "admin", + "knowledge": "false", + "order": "", + "calendar_stc": "", + "closed_at": "", + "cmdb_ci": {"link": "demisto.com", "value": "test"}, + "delivery_plan": "", + "impact": "2", + "active": "true", + "work_notes_list": "", + "business_service": "", + "priority": "4", + "sys_domain_path": "/", + "rfc": "", + "time_worked": "", + "expected_start": "", + "opened_at": "2019-09-05 00:41:01", + "business_duration": "", + "group_list": "", + "work_end": "", + "caller_id": {"link": "demisto.com", "value": "test"}, + "reopened_time": "", + "resolved_at": "", + "approval_set": "", + "subcategory": "", + "work_notes": "", + "short_description": "Trouble getting to Oregon mail server", + "close_code": "", + "correlation_display": "", + "delivery_task": "", + "work_start": "", + "assignment_group": {"link": "demisto.com", "value": "test"}, + "additional_assignee_list": "", + "business_stc": "", + "description": "Unable to access Oregon mail server. Is it down?", + "calendar_duration": "", + "close_notes": "", + "notify": "1", + "sys_class_name": "incident", + "closed_by": "", + "follow_up": "", + "parent_incident": "", + "sys_id": "sys_id", + "contact_type": "phone", + "reopened_by": "", + "incident_state": "1", + "urgency": "3", + "problem_id": "", + "company": {"link": "demisto.com", "value": "test"}, + "reassignment_count": "", + "u_custom_field_test": "NYC", + "activity_due": "", + "assigned_to": "", + "severity": "3", + "comments": "", + "approval": "not requested", + "sla_due": "2019-09-26 00:41:01", + "comments_and_work_notes": "", + "due_date": "", + "sys_mod_count": "8", + "reopen_count": "", + "sys_tags": "", + "escalation": "0", + "upon_approval": "", + "correlation_id": "", + "location": {"link": "demisto.com", "value": "test"}, + "category": "network", }, { - 'parent': '', 'made_sla': 'false', 'caused_by': '', 'watch_list': '', 'upon_reject': '', - 'sys_updated_on': '2020-04-02 14:03:31', - 'child_incidents': '', 'hold_reason': '', 'approval_history': '', 'number': 'INC0000039', 'resolved_by': '', - 'sys_updated_by': 'admin', - 'opened_by': {'link': 'demisto.com', 'value': 'test'}, - 'user_input': '', 'sys_created_on': '2019-09-05 00:42:29', - 'sys_domain': {'link': 'demisto.com', 'value': 'global'}, 'state': '1', 'sys_created_by': 'admin', - 'knowledge': 'false', 'order': '', 'calendar_stc': '', 'closed_at': '', - 'cmdb_ci': {'link': 'demisto.com', 'value': 'test'}, 'delivery_plan': '', 'impact': '2', 'active': 'true', - 'work_notes_list': '', 'business_service': '', 'priority': '4', 'sys_domain_path': '/', 'rfc': '', - 'time_worked': '', 'expected_start': '', 'opened_at': '2019-09-05 00:41:01', 'business_duration': '', - 'group_list': '', 'work_end': '', 'caller_id': {'link': 'demisto.com', 'value': 'test'}, - 'reopened_time': '', 'resolved_at': '', 'approval_set': '', 'subcategory': '', 'work_notes': '', - 'short_description': 'Trouble getting to Oregon mail server', 'close_code': '', 'correlation_display': '', - 'delivery_task': '', 'work_start': '', 'assignment_group': {'link': 'demisto.com', 'value': 'test'}, - 'additional_assignee_list': '', 'business_stc': '', - 'description': 'Unable to access Oregon mail server. Is it down?', 'calendar_duration': '', 'close_notes': '', - 'notify': '1', 'sys_class_name': 'incident', 'closed_by': '', 'follow_up': '', 'parent_incident': '', - 'sys_id': 'sys_id', 'contact_type': 'phone', 'reopened_by': '', 'incident_state': '1', 'urgency': '3', - 'problem_id': '', 'company': {'link': 'demisto.com', 'value': 'test'}, 'reassignment_count': '', - 'u_custom_field_test': 'NYC', 'activity_due': '', 'assigned_to': '', 'severity': '3', 'comments': '', - 'approval': 'not requested', 'sla_due': '2019-09-26 00:41:01', 'comments_and_work_notes': '', 'due_date': '', - 'sys_mod_count': '8', 'reopen_count': '', 'sys_tags': '', 'escalation': '0', 'upon_approval': '', - 'correlation_id': '', 'location': {'link': 'demisto.com', 'value': 'test'}, 'category': 'network' - } + "parent": "", + "made_sla": "false", + "caused_by": "", + "watch_list": "", + "upon_reject": "", + "sys_updated_on": "2020-04-02 14:03:31", + "child_incidents": "", + "hold_reason": "", + "approval_history": "", + "number": "INC0000039", + "resolved_by": "", + "sys_updated_by": "admin", + "opened_by": {"link": "demisto.com", "value": "test"}, + "user_input": "", + "sys_created_on": "2019-09-05 00:42:29", + "sys_domain": {"link": "demisto.com", "value": "global"}, + "state": "1", + "sys_created_by": "admin", + "knowledge": "false", + "order": "", + "calendar_stc": "", + "closed_at": "", + "cmdb_ci": {"link": "demisto.com", "value": "test"}, + "delivery_plan": "", + "impact": "2", + "active": "true", + "work_notes_list": "", + "business_service": "", + "priority": "4", + "sys_domain_path": "/", + "rfc": "", + "time_worked": "", + "expected_start": "", + "opened_at": "2019-09-05 00:41:01", + "business_duration": "", + "group_list": "", + "work_end": "", + "caller_id": {"link": "demisto.com", "value": "test"}, + "reopened_time": "", + "resolved_at": "", + "approval_set": "", + "subcategory": "", + "work_notes": "", + "short_description": "Trouble getting to Oregon mail server", + "close_code": "", + "correlation_display": "", + "delivery_task": "", + "work_start": "", + "assignment_group": {"link": "demisto.com", "value": "test"}, + "additional_assignee_list": "", + "business_stc": "", + "description": "Unable to access Oregon mail server. Is it down?", + "calendar_duration": "", + "close_notes": "", + "notify": "1", + "sys_class_name": "incident", + "closed_by": "", + "follow_up": "", + "parent_incident": "", + "sys_id": "sys_id", + "contact_type": "phone", + "reopened_by": "", + "incident_state": "1", + "urgency": "3", + "problem_id": "", + "company": {"link": "demisto.com", "value": "test"}, + "reassignment_count": "", + "u_custom_field_test": "NYC", + "activity_due": "", + "assigned_to": "", + "severity": "3", + "comments": "", + "approval": "not requested", + "sla_due": "2019-09-26 00:41:01", + "comments_and_work_notes": "", + "due_date": "", + "sys_mod_count": "8", + "reopen_count": "", + "sys_tags": "", + "escalation": "0", + "upon_approval": "", + "correlation_id": "", + "location": {"link": "demisto.com", "value": "test"}, + "category": "network", + }, ] RESPONSE_UPDATE_TICKET = { - 'result': { - 'parent': '', 'made_sla': 'false', 'caused_by': '', 'watch_list': '', 'upon_reject': '', - 'sys_updated_on': '2020-04-02 14:03:31', - 'child_incidents': '', 'hold_reason': '', 'approval_history': '', 'number': 'INC0000039', 'resolved_by': '', - 'sys_updated_by': 'admin', - 'opened_by': {'link': 'demisto.com', 'value': 'test'}, - 'user_input': '', 'sys_created_on': '2019-09-05 00:42:29', - 'sys_domain': {'link': 'demisto.com', 'value': 'global'}, 'state': '1', 'sys_created_by': 'admin', - 'knowledge': 'false', 'order': '', 'calendar_stc': '', 'closed_at': '', - 'cmdb_ci': {'link': 'demisto.com', 'value': 'test'}, 'delivery_plan': '', 'impact': '2', 'active': 'true', - 'work_notes_list': '', 'business_service': '', 'priority': '4', 'sys_domain_path': '/', 'rfc': '', - 'time_worked': '', 'expected_start': '', 'opened_at': '2019-09-05 00:41:01', 'business_duration': '', - 'group_list': '', 'work_end': '', 'caller_id': {'link': 'demisto.com', 'value': 'test'}, - 'reopened_time': '', 'resolved_at': '', 'approval_set': '', 'subcategory': '', 'work_notes': '', - 'short_description': 'Trouble getting to Oregon mail server', 'close_code': '', 'correlation_display': '', - 'delivery_task': '', 'work_start': '', 'assignment_group': {'link': 'demisto.com', 'value': 'test'}, - 'additional_assignee_list': '', 'business_stc': '', - 'description': 'Unable to access Oregon mail server. Is it down?', 'calendar_duration': '', 'close_notes': '', - 'notify': '1', 'sys_class_name': 'incident', 'closed_by': '', 'follow_up': '', 'parent_incident': '', - 'sys_id': 'sys_id', 'contact_type': 'phone', 'reopened_by': '', 'incident_state': '1', 'urgency': '3', - 'problem_id': '', 'company': {'link': 'demisto.com', 'value': 'test'}, 'reassignment_count': '', - 'u_custom_field_test': 'NYC', 'activity_due': '', 'assigned_to': '', 'severity': '3', 'comments': '', - 'approval': 'not requested', 'sla_due': '2019-09-26 00:41:01', 'comments_and_work_notes': '', 'due_date': '', - 'sys_mod_count': '8', 'reopen_count': '', 'sys_tags': '', 'escalation': '0', 'upon_approval': '', - 'correlation_id': '', 'location': {'link': 'demisto.com', 'value': 'test'}, 'category': 'network' + "result": { + "parent": "", + "made_sla": "false", + "caused_by": "", + "watch_list": "", + "upon_reject": "", + "sys_updated_on": "2020-04-02 14:03:31", + "child_incidents": "", + "hold_reason": "", + "approval_history": "", + "number": "INC0000039", + "resolved_by": "", + "sys_updated_by": "admin", + "opened_by": {"link": "demisto.com", "value": "test"}, + "user_input": "", + "sys_created_on": "2019-09-05 00:42:29", + "sys_domain": {"link": "demisto.com", "value": "global"}, + "state": "1", + "sys_created_by": "admin", + "knowledge": "false", + "order": "", + "calendar_stc": "", + "closed_at": "", + "cmdb_ci": {"link": "demisto.com", "value": "test"}, + "delivery_plan": "", + "impact": "2", + "active": "true", + "work_notes_list": "", + "business_service": "", + "priority": "4", + "sys_domain_path": "/", + "rfc": "", + "time_worked": "", + "expected_start": "", + "opened_at": "2019-09-05 00:41:01", + "business_duration": "", + "group_list": "", + "work_end": "", + "caller_id": {"link": "demisto.com", "value": "test"}, + "reopened_time": "", + "resolved_at": "", + "approval_set": "", + "subcategory": "", + "work_notes": "", + "short_description": "Trouble getting to Oregon mail server", + "close_code": "", + "correlation_display": "", + "delivery_task": "", + "work_start": "", + "assignment_group": {"link": "demisto.com", "value": "test"}, + "additional_assignee_list": "", + "business_stc": "", + "description": "Unable to access Oregon mail server. Is it down?", + "calendar_duration": "", + "close_notes": "", + "notify": "1", + "sys_class_name": "incident", + "closed_by": "", + "follow_up": "", + "parent_incident": "", + "sys_id": "sys_id", + "contact_type": "phone", + "reopened_by": "", + "incident_state": "1", + "urgency": "3", + "problem_id": "", + "company": {"link": "demisto.com", "value": "test"}, + "reassignment_count": "", + "u_custom_field_test": "NYC", + "activity_due": "", + "assigned_to": "", + "severity": "3", + "comments": "", + "approval": "not requested", + "sla_due": "2019-09-26 00:41:01", + "comments_and_work_notes": "", + "due_date": "", + "sys_mod_count": "8", + "reopen_count": "", + "sys_tags": "", + "escalation": "0", + "upon_approval": "", + "correlation_id": "", + "location": {"link": "demisto.com", "value": "test"}, + "category": "network", } } RESPONSE_CREATE_TICKET = { @@ -317,10 +880,7 @@ "notify": "1", "number": "INC0010007", "opened_at": "2020-04-06 13:04:44", - "opened_by": { - "link": "demisto.com", - "value": "test" - }, + "opened_by": {"link": "demisto.com", "value": "test"}, "order": "", "parent": "", "parent_incident": "", @@ -341,10 +901,7 @@ "sys_class_name": "incident", "sys_created_by": "admin", "sys_created_on": "2020-04-06 13:04:44", - "sys_domain": { - "link": "demisto.com", - "value": "global" - }, + "sys_domain": {"link": "demisto.com", "value": "global"}, "sys_domain_path": "/", "sys_id": "sys_id", "sys_mod_count": "0", @@ -361,7 +918,7 @@ "work_end": "", "work_notes": "", "work_notes_list": "", - "work_start": "" + "work_start": "", } } RESPONSE_CREATE_TICKET_WITH_OUT_JSON = "The ticket was successfully created." @@ -380,10 +937,7 @@ "business_duration": "", "business_service": "", "calendar_duration": "", - "cat_item": { - "link": "demisto.com", - "value": "admin" - }, + "cat_item": {"link": "demisto.com", "value": "admin"}, "close_notes": "", "closed_at": "", "closed_by": "", @@ -393,10 +947,7 @@ "company": "", "configuration_item": "", "contact_type": "", - "context": { - "link": "demisto.com", - "value": "admin" - }, + "context": {"link": "demisto.com", "value": "admin"}, "correlation_display": "", "correlation_id": "", "delivery_plan": "", @@ -414,10 +965,7 @@ "made_sla": "true", "number": "RITM0010028", "opened_at": "2020-04-16 15:33:00", - "opened_by": { - "link": "demisto.com", - "value": "admin" - }, + "opened_by": {"link": "demisto.com", "value": "admin"}, "order": "", "order_guide": "", "parent": "", @@ -427,10 +975,7 @@ "reassignment_count": "0", "recurring_frequency": "", "recurring_price": "0", - "request": { - "link": "demisto.com", - "value": "admin" - }, + "request": {"link": "demisto.com", "value": "admin"}, "sc_catalog": "", "short_description": "Microsoft Access", "sla_due": "", @@ -439,10 +984,7 @@ "sys_class_name": "sc_req_item", "sys_created_by": "admin", "sys_created_on": "2020-04-16 15:33:00", - "sys_domain": { - "link": "demisto.com", - "value": "global" - }, + "sys_domain": {"link": "demisto.com", "value": "global"}, "sys_domain_path": "/", "sys_id": "1234", "sys_mod_count": "2", @@ -458,7 +1000,7 @@ "work_end": "", "work_notes": "", "work_notes_list": "", - "work_start": "" + "work_start": "", } } RESPONSE_UPDATE_TICKET_ADDITIONAL = { @@ -470,19 +1012,13 @@ "approval_history": "", "approval_set": "", "assigned_to": "", - "assignment_group": { - "link": "demisto.com", - "value": "admin" - }, + "assignment_group": {"link": "demisto.com", "value": "admin"}, "business_duration": "", "business_service": "", "business_stc": "", "calendar_duration": "", "calendar_stc": "", - "caller_id": { - "link": "demisto.com", - "value": "admin" - }, + "caller_id": {"link": "demisto.com", "value": "admin"}, "category": "network", "caused_by": "", "child_incidents": "", @@ -490,16 +1026,10 @@ "close_notes": "", "closed_at": "", "closed_by": "", - "cmdb_ci": { - "link": "demisto.com", - "value": "admin" - }, + "cmdb_ci": {"link": "demisto.com", "value": "admin"}, "comments": "", "comments_and_work_notes": "", - "company": { - "link": "demisto.com", - "value": "admin" - }, + "company": {"link": "demisto.com", "value": "admin"}, "contact_type": "phone", "correlation_display": "", "correlation_id": "", @@ -515,18 +1045,12 @@ "impact": "3", "incident_state": "1", "knowledge": "false", - "location": { - "link": "demisto.com", - "value": "admin" - }, + "location": {"link": "demisto.com", "value": "admin"}, "made_sla": "false", "notify": "1", "number": "INC0000039", "opened_at": "2019-09-05 00:41:01", - "opened_by": { - "link": "demisto.com", - "value": "admin" - }, + "opened_by": {"link": "demisto.com", "value": "admin"}, "order": "", "parent": "", "parent_incident": "", @@ -547,10 +1071,7 @@ "sys_class_name": "incident", "sys_created_by": "admin", "sys_created_on": "2019-09-05 00:42:29", - "sys_domain": { - "link": "demisto.com", - "value": "admin" - }, + "sys_domain": {"link": "demisto.com", "value": "admin"}, "sys_domain_path": "/", "sys_id": "1234", "sys_mod_count": "15", @@ -567,7 +1088,7 @@ "work_end": "", "work_notes": "", "work_notes_list": "", - "work_start": "" + "work_start": "", } } RESPONSE_QUERY_TICKETS = { @@ -579,37 +1100,22 @@ "approval": "", "approval_history": "", "approval_set": "", - "assigned_to": { - "link": "demisto.com", - "value": "admin" - }, - "assignment_group": { - "link": "demisto.com", - "value": "admin" - }, + "assigned_to": {"link": "demisto.com", "value": "admin"}, + "assignment_group": {"link": "demisto.com", "value": "admin"}, "business_duration": "1970-01-22 21:46:21", "business_service": "", "business_stc": "1892781", "calendar_duration": "1970-04-02 20:46:21", "calendar_stc": "7937181", - "caller_id": { - "link": "demisto.com", - "value": "admin" - }, + "caller_id": {"link": "demisto.com", "value": "admin"}, "category": "network", "caused_by": "", "child_incidents": "", "close_code": "Closed/Resolved by Caller", "close_notes": "Closed before close notes were made mandatory\n\t\t", "closed_at": "2019-09-03 23:10:06", - "closed_by": { - "link": "demisto.com", - "value": "admin" - }, - "cmdb_ci": { - "link": "demisto.com", - "value": "admin" - }, + "closed_by": {"link": "demisto.com", "value": "admin"}, + "cmdb_ci": {"link": "demisto.com", "value": "admin"}, "comments": "", "comments_and_work_notes": "", "company": "", @@ -628,35 +1134,23 @@ "impact": "1", "incident_state": "7", "knowledge": "false", - "location": { - "link": "demisto.com", - "value": "admin" - }, + "location": {"link": "demisto.com", "value": "admin"}, "made_sla": "false", "notify": "1", "number": "INC0000001", "opened_at": "2019-09-02 23:09:51", - "opened_by": { - "link": "demisto.com", - "value": "admin" - }, + "opened_by": {"link": "demisto.com", "value": "admin"}, "order": "", "parent": "", "parent_incident": "", "priority": "1", - "problem_id": { - "link": "demisto.com", - "value": "admin" - }, + "problem_id": {"link": "demisto.com", "value": "admin"}, "reassignment_count": "1", "reopen_count": "", "reopened_by": "", "reopened_time": "", "resolved_at": "2019-12-03 19:56:12", - "resolved_by": { - "link": "demisto.com", - "value": "admin" - }, + "resolved_by": {"link": "demisto.com", "value": "admin"}, "rfc": "", "severity": "1", "short_description": "Can't read email", @@ -666,10 +1160,7 @@ "sys_class_name": "incident", "sys_created_by": "pat", "sys_created_on": "2018-04-03 18:24:13", - "sys_domain": { - "link": "demisto.com", - "value": "global" - }, + "sys_domain": {"link": "demisto.com", "value": "global"}, "sys_domain_path": "/", "sys_id": "sys_id", "sys_mod_count": "21", @@ -686,7 +1177,7 @@ "work_end": "", "work_notes": "", "work_notes_list": "", - "work_start": "" + "work_start": "", }, { "active": "true", @@ -695,23 +1186,14 @@ "approval": "", "approval_history": "", "approval_set": "", - "assigned_to": { - "link": "demisto.com", - "value": "admin" - }, - "assignment_group": { - "link": "demisto.com", - "value": "admin" - }, + "assigned_to": {"link": "demisto.com", "value": "admin"}, + "assignment_group": {"link": "demisto.com", "value": "admin"}, "business_duration": "", "business_service": "", "business_stc": "", "calendar_duration": "", "calendar_stc": "", - "caller_id": { - "link": "demisto.com", - "value": "admin" - }, + "caller_id": {"link": "demisto.com", "value": "admin"}, "category": "network", "caused_by": "", "child_incidents": "", @@ -719,10 +1201,7 @@ "close_notes": "", "closed_at": "", "closed_by": "", - "cmdb_ci": { - "link": "demisto.com", - "value": "admin" - }, + "cmdb_ci": {"link": "demisto.com", "value": "admin"}, "comments": "", "comments_and_work_notes": "", "company": "", @@ -741,26 +1220,17 @@ "impact": "1", "incident_state": "3", "knowledge": "false", - "location": { - "link": "demisto.com", - "value": "admin" - }, + "location": {"link": "demisto.com", "value": "admin"}, "made_sla": "false", "notify": "1", "number": "INC0000002", "opened_at": "2019-08-27 23:07:12", - "opened_by": { - "link": "demisto.com", - "value": "admin" - }, + "opened_by": {"link": "demisto.com", "value": "admin"}, "order": "", "parent": "", "parent_incident": "", "priority": "1", - "problem_id": { - "link": "demisto.com", - "value": "admin" - }, + "problem_id": {"link": "demisto.com", "value": "admin"}, "reassignment_count": "1", "reopen_count": "", "reopened_by": "", @@ -776,10 +1246,7 @@ "sys_class_name": "incident", "sys_created_by": "pat", "sys_created_on": "2018-03-23 22:30:06", - "sys_domain": { - "link": "demisto.com", - "value": "global" - }, + "sys_domain": {"link": "demisto.com", "value": "global"}, "sys_domain_path": "/", "sys_id": "sys_id", "sys_mod_count": "17", @@ -796,7 +1263,7 @@ "work_end": "", "work_notes": "", "work_notes_list": "", - "work_start": "" + "work_start": "", }, { "active": "true", @@ -805,23 +1272,14 @@ "approval": "", "approval_history": "", "approval_set": "", - "assigned_to": { - "link": "demisto.com", - "value": "admin" - }, - "assignment_group": { - "link": "demisto.com", - "value": "admin" - }, + "assigned_to": {"link": "demisto.com", "value": "admin"}, + "assignment_group": {"link": "demisto.com", "value": "admin"}, "business_duration": "", "business_service": "", "business_stc": "", "calendar_duration": "", "calendar_stc": "", - "caller_id": { - "link": "demisto.com", - "value": "admin" - }, + "caller_id": {"link": "demisto.com", "value": "admin"}, "category": "network", "caused_by": "", "child_incidents": "", @@ -832,10 +1290,7 @@ "cmdb_ci": "", "comments": "", "comments_and_work_notes": "", - "company": { - "link": "demisto.com", - "value": "admin" - }, + "company": {"link": "demisto.com", "value": "admin"}, "contact_type": "", "correlation_display": "", "correlation_id": "", @@ -851,18 +1306,12 @@ "impact": "1", "incident_state": "2", "knowledge": "false", - "location": { - "link": "demisto.com", - "value": "admin" - }, + "location": {"link": "demisto.com", "value": "admin"}, "made_sla": "false", "notify": "1", "number": "INC0000003", "opened_at": "2019-09-03 23:07:30", - "opened_by": { - "link": "demisto.com", - "value": "admin" - }, + "opened_by": {"link": "demisto.com", "value": "admin"}, "order": "", "parent": "", "parent_incident": "", @@ -883,10 +1332,7 @@ "sys_class_name": "incident", "sys_created_by": "admin", "sys_created_on": "2018-04-07 14:41:46", - "sys_domain": { - "link": "demisto.com", - "value": "global" - }, + "sys_domain": {"link": "demisto.com", "value": "global"}, "sys_domain_path": "/", "sys_id": "sys_id", "sys_mod_count": "12", @@ -903,8 +1349,8 @@ "work_end": "", "work_notes": "", "work_notes_list": "", - "work_start": "" - } + "work_start": "", + }, ] } RESPONSE_QUERY_TICKETS_EXCLUDE_REFERENCE_LINK = { @@ -997,7 +1443,7 @@ "upon_approval": "", "correlation_id": "", "location": "2617 South Robinson Avenue, Oklahoma City,OK", - "category": "Network" + "category": "Network", } ] } @@ -1009,43 +1455,25 @@ "approval": "", "approval_history": "", "approval_set": "", - "assigned_to": { - "link": "demisto.com", - "value": "admin" - }, - "assignment_group": { - "link": "demisto.com", - "value": "admin" - }, + "assigned_to": {"link": "demisto.com", "value": "admin"}, + "assignment_group": {"link": "demisto.com", "value": "admin"}, "business_duration": "1970-01-22 21:46:21", "business_service": "", "business_stc": "1892781", "calendar_duration": "1970-04-02 20:46:21", "calendar_stc": "7937181", - "caller_id": { - "link": "demisto.com", - "value": "admin" - }, + "caller_id": {"link": "demisto.com", "value": "admin"}, "category": "network", "caused_by": "", "child_incidents": "", "close_code": "Closed/Resolved by Caller", "close_notes": "Closed before close notes were made mandatory\n\t\t", "closed_at": "2019-09-03 23:10:06", - "closed_by": { - "link": "demisto.com", - "value": "admin" - }, - "cmdb_ci": { - "link": "demisto.com", - "value": "admin" - }, + "closed_by": {"link": "demisto.com", "value": "admin"}, + "cmdb_ci": {"link": "demisto.com", "value": "admin"}, "comments": "", "comments_and_work_notes": "", - "company": { - "link": "demisto.com", - "value": "admin" - }, + "company": {"link": "demisto.com", "value": "admin"}, "contact_type": "", "correlation_display": "", "correlation_id": "", @@ -1061,35 +1489,23 @@ "impact": "1", "incident_state": "7", "knowledge": "false", - "location": { - "link": "demisto.com", - "value": "admin" - }, + "location": {"link": "demisto.com", "value": "admin"}, "made_sla": "false", "notify": "1", "number": "INC0000001", "opened_at": "2019-09-02 23:09:51", - "opened_by": { - "link": "demisto.com", - "value": "admin" - }, + "opened_by": {"link": "demisto.com", "value": "admin"}, "order": "", "parent": "", "parent_incident": "", "priority": "1", - "problem_id": { - "link": "demisto.com", - "value": "admin" - }, + "problem_id": {"link": "demisto.com", "value": "admin"}, "reassignment_count": "1", "reopen_count": "", "reopened_by": "", "reopened_time": "", "resolved_at": "2019-12-03 19:56:12", - "resolved_by": { - "link": "demisto.com", - "value": "admin" - }, + "resolved_by": {"link": "demisto.com", "value": "admin"}, "rfc": "", "severity": "1", "short_description": "Can't read email", @@ -1099,10 +1515,7 @@ "sys_class_name": "incident", "sys_created_by": "pat", "sys_created_on": "2018-04-03 18:24:13", - "sys_domain": { - "link": "demisto.com", - "value": "global" - }, + "sys_domain": {"link": "demisto.com", "value": "global"}, "sys_domain_path": "/", "sys_id": "sys_id", "sys_mod_count": "23", @@ -1119,7 +1532,7 @@ "work_end": "", "work_notes": "", "work_notes_list": "", - "work_start": "" + "work_start": "", } } RESPONSE_ADD_COMMENT = { @@ -1130,43 +1543,25 @@ "approval": "", "approval_history": "", "approval_set": "", - "assigned_to": { - "link": "demisto.com", - "value": "admin" - }, - "assignment_group": { - "link": "demisto.com", - "value": "admin" - }, + "assigned_to": {"link": "demisto.com", "value": "admin"}, + "assignment_group": {"link": "demisto.com", "value": "admin"}, "business_duration": "1970-01-22 21:46:21", "business_service": "", "business_stc": "1892781", "calendar_duration": "1970-04-02 20:46:21", "calendar_stc": "7937181", - "caller_id": { - "link": "demisto.com", - "value": "admin" - }, + "caller_id": {"link": "demisto.com", "value": "admin"}, "category": "network", "caused_by": "", "child_incidents": "", "close_code": "Closed/Resolved by Caller", "close_notes": "Closed before close notes were made mandatory\n\t\t", "closed_at": "2019-09-03 23:10:06", - "closed_by": { - "link": "demisto.com", - "value": "admin" - }, - "cmdb_ci": { - "link": "demisto.com", - "value": "admin" - }, + "closed_by": {"link": "demisto.com", "value": "admin"}, + "cmdb_ci": {"link": "demisto.com", "value": "admin"}, "comments": "", "comments_and_work_notes": "", - "company": { - "link": "demisto.com", - "value": "admin" - }, + "company": {"link": "demisto.com", "value": "admin"}, "contact_type": "", "correlation_display": "", "correlation_id": "", @@ -1182,35 +1577,23 @@ "impact": "1", "incident_state": "7", "knowledge": "false", - "location": { - "link": "demisto.com", - "value": "admin" - }, + "location": {"link": "demisto.com", "value": "admin"}, "made_sla": "false", "notify": "1", "number": "INC0000001", "opened_at": "2019-09-02 23:09:51", - "opened_by": { - "link": "demisto.com", - "value": "admin" - }, + "opened_by": {"link": "demisto.com", "value": "admin"}, "order": "", "parent": "", "parent_incident": "", "priority": "1", - "problem_id": { - "link": "demisto.com", - "value": "admin" - }, + "problem_id": {"link": "demisto.com", "value": "admin"}, "reassignment_count": "1", "reopen_count": "", "reopened_by": "", "reopened_time": "", "resolved_at": "2019-12-03 19:56:12", - "resolved_by": { - "link": "demisto.com", - "value": "admin" - }, + "resolved_by": {"link": "demisto.com", "value": "admin"}, "rfc": "", "severity": "1", "short_description": "Can't read email", @@ -1220,10 +1603,7 @@ "sys_class_name": "incident", "sys_created_by": "pat", "sys_created_on": "2018-04-03 18:24:13", - "sys_domain": { - "link": "demisto.com", - "value": "global" - }, + "sys_domain": {"link": "demisto.com", "value": "global"}, "sys_domain_path": "/", "sys_id": "sys_id", "sys_mod_count": "23", @@ -1240,7 +1620,7 @@ "work_end": "", "work_notes": "", "work_notes_list": "", - "work_start": "" + "work_start": "", } } RESPONSE_UPLOAD_FILE = { @@ -1264,7 +1644,7 @@ "sys_updated_by": "admin", "sys_updated_on": "2020-04-07 08:07:44", "table_name": "incident", - "table_sys_id": "system_id" + "table_sys_id": "system_id", } } RESPONSE_GET_TICKET_NOTES = { @@ -1277,8 +1657,8 @@ "sys_created_on": "2020-04-07 07:32:12", "sys_id": "sys_id", "sys_tags": "", - "value": "[code]\u003ca class=\"web\" target=\"_blank\" href=\"http://www.demisto.com\"" - " \u003edemsito_link\u003c/a\u003e[/code]" + "value": '[code]\u003ca class="web" target="_blank" href="http://www.demisto.com"' + " \u003edemsito_link\u003c/a\u003e[/code]", }, { "element": "work_notes", @@ -1288,8 +1668,8 @@ "sys_created_on": "2020-04-07 07:25:51", "sys_id": "sys_id", "sys_tags": "", - "value": "[code]\u003ca class=\"web\" target=\"_blank\" href=\"http://www.demisto.com\"" - " \u003edemsito_link\u003c/a\u003e[/code]" + "value": '[code]\u003ca class="web" target="_blank" href="http://www.demisto.com"' + " \u003edemsito_link\u003c/a\u003e[/code]", }, { "element": "work_notes", @@ -1299,7 +1679,7 @@ "sys_created_on": "2020-04-07 07:46:34", "sys_id": "sys_id", "sys_tags": "", - "value": "Nice work!" + "value": "Nice work!", }, { "element": "work_notes", @@ -1309,7 +1689,7 @@ "sys_created_on": "2020-04-07 07:46:25", "sys_id": "sys_id", "sys_tags": "", - "value": "Nice work!" + "value": "Nice work!", }, { "element": "work_notes", @@ -1319,9 +1699,9 @@ "sys_created_on": "2020-04-07 07:26:01", "sys_id": "d31a605ddb845010ebea8a18489619e2", "sys_tags": "", - "value": "[code]\u003ca class=\"web\" target=\"_blank\" href=\"http://www.demisto.com\"" - " \u003edemsito_link\u003c/a\u003e[/code]" - } + "value": '[code]\u003ca class="web" target="_blank" href="http://www.demisto.com"' + " \u003edemsito_link\u003c/a\u003e[/code]", + }, ] } RESPONSE_GET_RECORD = { @@ -1329,39 +1709,21 @@ "acquisition_method": "", "asset_tag": "P1000479", "assigned": "2017-10-31 07:00:00", - "assigned_to": { - "link": "demisto.com", - "value": "admin" - }, + "assigned_to": {"link": "demisto.com", "value": "admin"}, "beneficiary": "", "checked_in": "", "checked_out": "", - "ci": { - "link": "demisto.com", - "value": "admin" - }, + "ci": {"link": "demisto.com", "value": "admin"}, "comments": "", - "company": { - "link": "demisto.com", - "value": "admin" - }, + "company": {"link": "demisto.com", "value": "admin"}, "cost": "1799.99", - "cost_center": { - "link": "demisto.com", - "value": "admin" - }, + "cost_center": {"link": "demisto.com", "value": "admin"}, "delivery_date": "2017-04-20 07:00:00", - "department": { - "link": "demisto.com", - "value": "admin" - }, + "department": {"link": "demisto.com", "value": "admin"}, "depreciated_amount": "1023.64", - "depreciation": { - "link": "demisto.com", - "value": "admin" - }, + "depreciation": {"link": "demisto.com", "value": "admin"}, "depreciation_date": "2017-06-03 07:00:00", - "display_name": "P1000479 - Apple MacBook Pro 15\"", + "display_name": 'P1000479 - Apple MacBook Pro 15"', "disposal_reason": "", "due": "", "due_in": "", @@ -1372,19 +1734,10 @@ "invoice_number": "", "justification": "", "lease_id": "", - "location": { - "link": "demisto.com", - "value": "admin" - }, + "location": {"link": "demisto.com", "value": "admin"}, "managed_by": "", - "model": { - "link": "demisto.com", - "value": "admin" - }, - "model_category": { - "link": "demisto.com", - "value": "admin" - }, + "model": {"link": "demisto.com", "value": "admin"}, + "model_category": {"link": "demisto.com", "value": "admin"}, "old_status": "", "old_substatus": "", "order_date": "2017-03-27 07:00:00", @@ -1411,22 +1764,16 @@ "sys_class_name": "alm_hardware", "sys_created_by": "admin", "sys_created_on": "2019-02-23 08:14:21", - "sys_domain": { - "link": "demisto.com", - "value": "global" - }, + "sys_domain": {"link": "demisto.com", "value": "global"}, "sys_domain_path": "/", "sys_id": "sys_id", "sys_mod_count": "18", "sys_tags": "", "sys_updated_by": "system", "sys_updated_on": "2020-04-07 06:31:50", - "vendor": { - "link": "demisto.com", - "value": "admin" - }, + "vendor": {"link": "demisto.com", "value": "admin"}, "warranty_expiration": "2020-06-01", - "work_notes": "" + "work_notes": "", } } RESPONSE_UPDATE_RECORD = { @@ -1434,39 +1781,21 @@ "acquisition_method": "", "asset_tag": "P1000479", "assigned": "2017-10-31 07:00:00", - "assigned_to": { - "link": "demisto.com", - "value": "admin" - }, + "assigned_to": {"link": "demisto.com", "value": "admin"}, "beneficiary": "", "checked_in": "", "checked_out": "", - "ci": { - "link": "demisto.com", - "value": "admin" - }, + "ci": {"link": "demisto.com", "value": "admin"}, "comments": "", - "company": { - "link": "demisto.com", - "value": "admin" - }, + "company": {"link": "demisto.com", "value": "admin"}, "cost": "1799.99", - "cost_center": { - "link": "demisto.com", - "value": "admin" - }, + "cost_center": {"link": "demisto.com", "value": "admin"}, "delivery_date": "2017-04-20 07:00:00", - "department": { - "link": "demisto.com", - "value": "admin" - }, + "department": {"link": "demisto.com", "value": "admin"}, "depreciated_amount": "1023.64", - "depreciation": { - "link": "demisto.com", - "value": "admin" - }, + "depreciation": {"link": "demisto.com", "value": "admin"}, "depreciation_date": "2017-06-03 07:00:00", - "display_name": "P1000479 - Apple MacBook Pro 15\"", + "display_name": 'P1000479 - Apple MacBook Pro 15"', "disposal_reason": "", "due": "", "due_in": "", @@ -1477,19 +1806,10 @@ "invoice_number": "", "justification": "", "lease_id": "", - "location": { - "link": "demisto.com", - "value": "admin" - }, + "location": {"link": "demisto.com", "value": "admin"}, "managed_by": "", - "model": { - "link": "demisto.com", - "value": "admin" - }, - "model_category": { - "link": "demisto.com", - "value": "admin" - }, + "model": {"link": "demisto.com", "value": "admin"}, + "model_category": {"link": "demisto.com", "value": "admin"}, "old_status": "", "old_substatus": "", "order_date": "2017-03-27 07:00:00", @@ -1516,42 +1836,88 @@ "sys_class_name": "alm_hardware", "sys_created_by": "admin", "sys_created_on": "2019-02-23 08:14:21", - "sys_domain": { - "link": "demisto.com", - "value": "global" - }, + "sys_domain": {"link": "demisto.com", "value": "global"}, "sys_domain_path": "/", "sys_id": "sys_id", "sys_mod_count": "18", "sys_tags": "", "sys_updated_by": "system", "sys_updated_on": "2020-04-07 06:31:50", - "vendor": { - "link": "demisto.com", - "value": "admin" - }, + "vendor": {"link": "demisto.com", "value": "admin"}, "warranty_expiration": "2020-06-01", - "work_notes": "" + "work_notes": "", } } RESPONSE_CREATE_RECORD = { "result": { - 'parent': '', 'skip_sync': 'false', 'residual_date': '', 'residual': '0', - 'sys_updated_on': '2020-04-07 12:48:38', 'request_line': '', 'sys_updated_by': 'admin', - 'due_in': '', 'model_category': '', 'sys_created_on': '2020-04-07 12:48:38', - 'sys_domain': {'link': 'demisto.com', 'value': 'global'}, 'disposal_reason': '', 'model': '', - 'install_date': '', 'gl_account': '', 'invoice_number': '', 'sys_created_by': 'admin', - 'warranty_expiration': '', 'asset_tag': 'P4325434', 'depreciated_amount': '0', 'substatus': '', - 'pre_allocated': 'false', 'owned_by': '', 'checked_out': '', 'display_name': 'P4325434 -', - 'sys_domain_path': '/', 'delivery_date': '', 'retirement_date': '', 'beneficiary': '', - 'install_status': '1', 'cost_center': '', 'supported_by': '', 'assigned': '', 'purchase_date': '', - 'work_notes': '', 'managed_by': '', 'sys_class_name': 'alm_asset', 'sys_id': 'sys_id', 'po_number': '', - 'stockroom': '', 'checked_in': '', 'resale_price': '0', 'vendor': '', 'company': '', 'retired': '', - 'justification': '', 'department': '', 'expenditure_type': '', 'depreciation': '', 'assigned_to': '', - 'depreciation_date': '', 'old_status': '', 'comments': '', 'cost': '0', 'quantity': '1', - 'acquisition_method': '', 'ci': '', 'sys_mod_count': '0', 'old_substatus': '', 'serial_number': '', - 'sys_tags': '', 'order_date': '', 'support_group': '', 'reserved_for': '', 'due': '', 'location': '', - 'lease_id': '', 'salvage_value': '0' + "parent": "", + "skip_sync": "false", + "residual_date": "", + "residual": "0", + "sys_updated_on": "2020-04-07 12:48:38", + "request_line": "", + "sys_updated_by": "admin", + "due_in": "", + "model_category": "", + "sys_created_on": "2020-04-07 12:48:38", + "sys_domain": {"link": "demisto.com", "value": "global"}, + "disposal_reason": "", + "model": "", + "install_date": "", + "gl_account": "", + "invoice_number": "", + "sys_created_by": "admin", + "warranty_expiration": "", + "asset_tag": "P4325434", + "depreciated_amount": "0", + "substatus": "", + "pre_allocated": "false", + "owned_by": "", + "checked_out": "", + "display_name": "P4325434 -", + "sys_domain_path": "/", + "delivery_date": "", + "retirement_date": "", + "beneficiary": "", + "install_status": "1", + "cost_center": "", + "supported_by": "", + "assigned": "", + "purchase_date": "", + "work_notes": "", + "managed_by": "", + "sys_class_name": "alm_asset", + "sys_id": "sys_id", + "po_number": "", + "stockroom": "", + "checked_in": "", + "resale_price": "0", + "vendor": "", + "company": "", + "retired": "", + "justification": "", + "department": "", + "expenditure_type": "", + "depreciation": "", + "assigned_to": "", + "depreciation_date": "", + "old_status": "", + "comments": "", + "cost": "0", + "quantity": "1", + "acquisition_method": "", + "ci": "", + "sys_mod_count": "0", + "old_substatus": "", + "serial_number": "", + "sys_tags": "", + "order_date": "", + "support_group": "", + "reserved_for": "", + "due": "", + "location": "", + "lease_id": "", + "salvage_value": "0", } } RESPONSE_QUERY_TABLE = { @@ -1560,39 +1926,21 @@ "acquisition_method": "", "asset_tag": "P1000807", "assigned": "2018-08-07 07:00:00", - "assigned_to": { - "link": "demisto.com", - "value": "admin" - }, + "assigned_to": {"link": "demisto.com", "value": "admin"}, "beneficiary": "", "checked_in": "", "checked_out": "", - "ci": { - "link": "demisto.com", - "value": "admin" - }, + "ci": {"link": "demisto.com", "value": "admin"}, "comments": "", - "company": { - "link": "demisto.com", - "value": "admin" - }, + "company": {"link": "demisto.com", "value": "admin"}, "cost": "2499.99", - "cost_center": { - "link": "demisto.com", - "value": "admin" - }, + "cost_center": {"link": "demisto.com", "value": "admin"}, "delivery_date": "2018-03-14 08:00:00", - "department": { - "link": "demisto.com", - "value": "admin" - }, + "department": {"link": "demisto.com", "value": "admin"}, "depreciated_amount": "934.59", - "depreciation": { - "link": "demisto.com", - "value": "admin" - }, + "depreciation": {"link": "demisto.com", "value": "admin"}, "depreciation_date": "2018-05-27 07:00:00", - "display_name": "P1000807 - Apple MacBook Pro 17\"", + "display_name": 'P1000807 - Apple MacBook Pro 17"', "disposal_reason": "", "due": "", "due_in": "", @@ -1603,19 +1951,10 @@ "invoice_number": "", "justification": "", "lease_id": "", - "location": { - "link": "demisto.com", - "value": "admin" - }, + "location": {"link": "demisto.com", "value": "admin"}, "managed_by": "", - "model": { - "link": "demisto.com", - "value": "admin" - }, - "model_category": { - "link": "demisto.com", - "value": "admin" - }, + "model": {"link": "demisto.com", "value": "admin"}, + "model_category": {"link": "demisto.com", "value": "admin"}, "old_status": "", "old_substatus": "", "order_date": "2018-02-22 08:00:00", @@ -1642,60 +1981,36 @@ "sys_class_name": "alm_hardware", "sys_created_by": "admin", "sys_created_on": "2019-02-23 08:14:09", - "sys_domain": { - "link": "demisto.com", - "value": "global" - }, + "sys_domain": {"link": "demisto.com", "value": "global"}, "sys_domain_path": "/", "sys_id": "sys_id2", "sys_mod_count": "20", "sys_tags": "", "sys_updated_by": "system", "sys_updated_on": "2020-04-09 06:20:19", - "vendor": { - "link": "demisto.com", - "value": "admin" - }, + "vendor": {"link": "demisto.com", "value": "admin"}, "warranty_expiration": "2021-05-25", - "work_notes": "" + "work_notes": "", }, { "acquisition_method": "", "asset_tag": "P1000637", "assigned": "2019-07-03 07:00:00", - "assigned_to": { - "link": "demisto.com", - "value": "admin" - }, + "assigned_to": {"link": "demisto.com", "value": "admin"}, "beneficiary": "", "checked_in": "", "checked_out": "", - "ci": { - "link": "demisto.com", - "value": "admin" - }, + "ci": {"link": "demisto.com", "value": "admin"}, "comments": "", - "company": { - "link": "demisto.com", - "value": "admin" - }, + "company": {"link": "demisto.com", "value": "admin"}, "cost": "1599.99", - "cost_center": { - "link": "demisto.com", - "value": "admin" - }, + "cost_center": {"link": "demisto.com", "value": "admin"}, "delivery_date": "2018-12-19 08:00:00", - "department": { - "link": "demisto.com", - "value": "admin" - }, + "department": {"link": "demisto.com", "value": "admin"}, "depreciated_amount": "389.71", - "depreciation": { - "link": "admin", - "value": "demisto.com" - }, + "depreciation": {"link": "admin", "value": "demisto.com"}, "depreciation_date": "2019-01-19 08:00:00", - "display_name": "P1000637 - Apple MacBook Air 13\"", + "display_name": 'P1000637 - Apple MacBook Air 13"', "disposal_reason": "", "due": "", "due_in": "", @@ -1706,19 +2021,10 @@ "invoice_number": "", "justification": "", "lease_id": "", - "location": { - "link": "demisto.com", - "value": "admin" - }, + "location": {"link": "demisto.com", "value": "admin"}, "managed_by": "", - "model": { - "link": "demisto.com", - "value": "admin" - }, - "model_category": { - "link": "demisto.com", - "value": "admin" - }, + "model": {"link": "demisto.com", "value": "admin"}, + "model_category": {"link": "demisto.com", "value": "admin"}, "old_status": "", "old_substatus": "", "order_date": "2018-11-24 08:00:00", @@ -1745,60 +2051,36 @@ "sys_class_name": "alm_hardware", "sys_created_by": "admin", "sys_created_on": "2019-02-23 08:13:36", - "sys_domain": { - "link": "demisto.com", - "value": "global" - }, + "sys_domain": {"link": "demisto.com", "value": "global"}, "sys_domain_path": "/", "sys_id": "sys_id3", "sys_mod_count": "20", "sys_tags": "", "sys_updated_by": "system", "sys_updated_on": "2020-04-09 06:20:19", - "vendor": { - "link": "demisto.com", - "value": "admin" - }, + "vendor": {"link": "demisto.com", "value": "admin"}, "warranty_expiration": "2022-01-17", - "work_notes": "" + "work_notes": "", }, { "acquisition_method": "", "asset_tag": "P1000412", "assigned": "2017-08-17 07:00:00", - "assigned_to": { - "link": "demisto.com", - "value": "admin" - }, + "assigned_to": {"link": "demisto.com", "value": "admin"}, "beneficiary": "", "checked_in": "", "checked_out": "", - "ci": { - "link": "demisto.com", - "value": "admin" - }, + "ci": {"link": "demisto.com", "value": "admin"}, "comments": "", - "company": { - "link": "demisto.com", - "value": "admin" - }, + "company": {"link": "demisto.com", "value": "admin"}, "cost": "2499.99", - "cost_center": { - "link": "demisto.com", - "value": "admin" - }, + "cost_center": {"link": "demisto.com", "value": "admin"}, "delivery_date": "2017-01-20 08:00:00", - "department": { - "link": "demisto.com", - "value": "admin" - }, + "department": {"link": "demisto.com", "value": "admin"}, "depreciated_amount": "1564.03", - "depreciation": { - "link": "demisto.com", - "value": "admin" - }, + "depreciation": {"link": "demisto.com", "value": "admin"}, "depreciation_date": "2017-02-20 08:00:00", - "display_name": "P1000412 - Apple MacBook Pro 17\"", + "display_name": 'P1000412 - Apple MacBook Pro 17"', "disposal_reason": "", "due": "", "due_in": "", @@ -1809,19 +2091,10 @@ "invoice_number": "", "justification": "", "lease_id": "", - "location": { - "link": "demisto.com", - "value": "admin" - }, + "location": {"link": "demisto.com", "value": "admin"}, "managed_by": "", - "model": { - "link": "demisto.com", - "value": "admin" - }, - "model_category": { - "link": "demisto.com", - "value": "admin" - }, + "model": {"link": "demisto.com", "value": "admin"}, + "model_category": {"link": "demisto.com", "value": "admin"}, "old_status": "", "old_substatus": "", "order_date": "2016-12-28 08:00:00", @@ -1848,308 +2121,98 @@ "sys_class_name": "alm_hardware", "sys_created_by": "admin", "sys_created_on": "2019-02-23 08:13:40", - "sys_domain": { - "link": "demisto.com", - "value": "global" - }, + "sys_domain": {"link": "demisto.com", "value": "global"}, "sys_domain_path": "/", "sys_id": "sys_id4", "sys_mod_count": "21", "sys_tags": "", "sys_updated_by": "system", "sys_updated_on": "2020-04-09 06:20:20", - "vendor": { - "link": "demisto.com", - "value": "admin" - }, + "vendor": {"link": "demisto.com", "value": "admin"}, "warranty_expiration": "2020-02-19", - "work_notes": "" - } + "work_notes": "", + }, ] } RESPONSE_QUERY_TABLE_SYS_PARAMS = { "result": [ { - "active": { - "display_value": "true", - "value": "true" - }, - "activity_due": { - "display_value": "UNKNOWN", - "value": "" - }, - "additional_assignee_list": { - "display_value": "", - "value": "" - }, - "approval": { - "display_value": "Not Yet Requested", - "value": "not requested" - }, - "approval_history": { - "display_value": "", - "value": "" - }, - "approval_set": { - "display_value": "", - "value": "" - }, - "assigned_to": { - "display_value": "", - "value": "" - }, - "assignment_group": { - "display_value": "Procurement", - "value": "1234" - }, - "business_duration": { - "display_value": "", - "value": "" - }, - "business_service": { - "display_value": "", - "value": "" - }, - "calendar_duration": { - "display_value": "", - "value": "" - }, - "calendar_stc": { - "display_value": "", - "value": "" - }, - "close_notes": { - "display_value": "", - "value": "" - }, - "closed_at": { - "display_value": "", - "value": "" - }, - "closed_by": { - "display_value": "", - "value": "" - }, - "cmdb_ci": { - "display_value": "", - "value": "" - }, - "comments": { - "display_value": "", - "value": "" - }, - "comments_and_work_notes": { - "display_value": "", - "value": "" - }, - "company": { - "display_value": "", - "value": "" - }, - "contact_type": { - "display_value": None, - "value": "" - }, - "contract": { - "display_value": "", - "value": "" - }, - "correlation_display": { - "display_value": "", - "value": "" - }, - "correlation_id": { - "display_value": "", - "value": "" - }, - "delivery_plan": { - "display_value": "", - "value": "" - }, - "delivery_task": { - "display_value": "", - "value": "" - }, + "active": {"display_value": "true", "value": "true"}, + "activity_due": {"display_value": "UNKNOWN", "value": ""}, + "additional_assignee_list": {"display_value": "", "value": ""}, + "approval": {"display_value": "Not Yet Requested", "value": "not requested"}, + "approval_history": {"display_value": "", "value": ""}, + "approval_set": {"display_value": "", "value": ""}, + "assigned_to": {"display_value": "", "value": ""}, + "assignment_group": {"display_value": "Procurement", "value": "1234"}, + "business_duration": {"display_value": "", "value": ""}, + "business_service": {"display_value": "", "value": ""}, + "calendar_duration": {"display_value": "", "value": ""}, + "calendar_stc": {"display_value": "", "value": ""}, + "close_notes": {"display_value": "", "value": ""}, + "closed_at": {"display_value": "", "value": ""}, + "closed_by": {"display_value": "", "value": ""}, + "cmdb_ci": {"display_value": "", "value": ""}, + "comments": {"display_value": "", "value": ""}, + "comments_and_work_notes": {"display_value": "", "value": ""}, + "company": {"display_value": "", "value": ""}, + "contact_type": {"display_value": None, "value": ""}, + "contract": {"display_value": "", "value": ""}, + "correlation_display": {"display_value": "", "value": ""}, + "correlation_id": {"display_value": "", "value": ""}, + "delivery_plan": {"display_value": "", "value": ""}, + "delivery_task": {"display_value": "", "value": ""}, "description": { "display_value": "Order from vendor or move from in-stock inventory\n\t\t", - "value": "Order from vendor or move from in-stock inventory\n\t\t" - }, - "due_date": { - "display_value": "2020-04-20 13:58:46", - "value": "2020-04-20 20:58:46" - }, - "escalation": { - "display_value": "Normal", - "value": "0" - }, - "expected_start": { - "display_value": "2020-04-20 13:58:46", - "value": "2020-04-20 20:58:46" - }, - "follow_up": { - "display_value": "", - "value": "" - }, - "group_list": { - "display_value": "", - "value": "" - }, - "impact": { - "display_value": "3 - Low", - "value": "3" - }, - "knowledge": { - "display_value": "false", - "value": "false" - }, - "location": { - "display_value": "", - "value": "" - }, - "made_sla": { - "display_value": "true", - "value": "true" - }, - "number": { - "display_value": "TASK0000001", - "value": "TASK0000001" - }, - "opened_at": { - "display_value": "2020-04-20 13:58:46", - "value": "2020-04-20 20:58:46" - }, - "opened_by": { - "display_value": "System Administrator", - "value": "1234" - }, - "order": { - "display_value": "", - "value": "" - }, - "parent": { - "display_value": "RITM0000001", - "value": "aeed229047801200e0ef563dbb9a71c2" - }, - "priority": { - "display_value": "4 - Low", - "value": "4" - }, - "reassignment_count": { - "display_value": "0", - "value": "0" - }, - "request": { - "display_value": "REQ0000001", - "value": "1234" - }, - "request_item": { - "display_value": "RITM0000001", - "value": "1234" - }, - "sc_catalog": { - "display_value": "", - "value": "" - }, - "service_offering": { - "display_value": "", - "value": "" - }, + "value": "Order from vendor or move from in-stock inventory\n\t\t", + }, + "due_date": {"display_value": "2020-04-20 13:58:46", "value": "2020-04-20 20:58:46"}, + "escalation": {"display_value": "Normal", "value": "0"}, + "expected_start": {"display_value": "2020-04-20 13:58:46", "value": "2020-04-20 20:58:46"}, + "follow_up": {"display_value": "", "value": ""}, + "group_list": {"display_value": "", "value": ""}, + "impact": {"display_value": "3 - Low", "value": "3"}, + "knowledge": {"display_value": "false", "value": "false"}, + "location": {"display_value": "", "value": ""}, + "made_sla": {"display_value": "true", "value": "true"}, + "number": {"display_value": "TASK0000001", "value": "TASK0000001"}, + "opened_at": {"display_value": "2020-04-20 13:58:46", "value": "2020-04-20 20:58:46"}, + "opened_by": {"display_value": "System Administrator", "value": "1234"}, + "order": {"display_value": "", "value": ""}, + "parent": {"display_value": "RITM0000001", "value": "aeed229047801200e0ef563dbb9a71c2"}, + "priority": {"display_value": "4 - Low", "value": "4"}, + "reassignment_count": {"display_value": "0", "value": "0"}, + "request": {"display_value": "REQ0000001", "value": "1234"}, + "request_item": {"display_value": "RITM0000001", "value": "1234"}, + "sc_catalog": {"display_value": "", "value": ""}, + "service_offering": {"display_value": "", "value": ""}, "short_description": { "display_value": "Order from vendor or move from in-stock inventory\n\t\t", - "value": "Order from vendor or move from in-stock inventory\n\t\t" - }, - "skills": { - "display_value": "", - "value": "" - }, - "sla_due": { - "display_value": "UNKNOWN", - "value": "" - }, - "state": { - "display_value": "Open", - "value": "1" - }, - "sys_class_name": { - "display_value": "Catalog Task", - "value": "sc_task" - }, - "sys_created_by": { - "display_value": "admin", - "value": "admin" - }, - "sys_created_on": { - "display_value": "2020-04-20 13:58:46", - "value": "2020-04-20 20:58:46" - }, - "sys_domain": { - "display_value": "global", - "value": "global" - }, - "sys_domain_path": { - "display_value": "/", - "value": "/" - }, - "sys_id": { - "display_value": "1234", - "value": "1234" - }, - "sys_mod_count": { - "display_value": "0", - "value": "0" - }, - "sys_tags": { - "display_value": "", - "value": "" - }, - "sys_updated_by": { - "display_value": "admin", - "value": "admin" - }, - "sys_updated_on": { - "display_value": "2020-04-20 13:58:46", - "value": "2020-04-20 20:58:46" - }, - "time_worked": { - "display_value": "", - "value": "" - }, - "upon_approval": { - "display_value": "Proceed to Next Task", - "value": "proceed" - }, - "upon_reject": { - "display_value": "Cancel all future Tasks", - "value": "cancel" - }, - "urgency": { - "display_value": "3 - Low", - "value": "3" - }, - "user_input": { - "display_value": "", - "value": "" - }, - "watch_list": { - "display_value": "", - "value": "" - }, - "work_end": { - "display_value": "", - "value": "" - }, - "work_notes": { - "display_value": "", - "value": "" - }, - "work_notes_list": { - "display_value": "", - "value": "" - }, - "work_start": { - "display_value": "", - "value": "" - } + "value": "Order from vendor or move from in-stock inventory\n\t\t", + }, + "skills": {"display_value": "", "value": ""}, + "sla_due": {"display_value": "UNKNOWN", "value": ""}, + "state": {"display_value": "Open", "value": "1"}, + "sys_class_name": {"display_value": "Catalog Task", "value": "sc_task"}, + "sys_created_by": {"display_value": "admin", "value": "admin"}, + "sys_created_on": {"display_value": "2020-04-20 13:58:46", "value": "2020-04-20 20:58:46"}, + "sys_domain": {"display_value": "global", "value": "global"}, + "sys_domain_path": {"display_value": "/", "value": "/"}, + "sys_id": {"display_value": "1234", "value": "1234"}, + "sys_mod_count": {"display_value": "0", "value": "0"}, + "sys_tags": {"display_value": "", "value": ""}, + "sys_updated_by": {"display_value": "admin", "value": "admin"}, + "sys_updated_on": {"display_value": "2020-04-20 13:58:46", "value": "2020-04-20 20:58:46"}, + "time_worked": {"display_value": "", "value": ""}, + "upon_approval": {"display_value": "Proceed to Next Task", "value": "proceed"}, + "upon_reject": {"display_value": "Cancel all future Tasks", "value": "cancel"}, + "urgency": {"display_value": "3 - Low", "value": "3"}, + "user_input": {"display_value": "", "value": ""}, + "watch_list": {"display_value": "", "value": ""}, + "work_end": {"display_value": "", "value": ""}, + "work_notes": {"display_value": "", "value": ""}, + "work_notes_list": {"display_value": "", "value": ""}, + "work_start": {"display_value": "", "value": ""}, } ] } @@ -2159,39 +2222,21 @@ "acquisition_method": "", "asset_tag": "P1000479", "assigned": "2017-10-31 07:00:00", - "assigned_to": { - "link": "demisto.com", - "value": "admin" - }, + "assigned_to": {"link": "demisto.com", "value": "admin"}, "beneficiary": "", "checked_in": "", "checked_out": "", - "ci": { - "link": "demisto.com", - "value": "admin" - }, + "ci": {"link": "demisto.com", "value": "admin"}, "comments": "", - "company": { - "link": "demisto.com", - "value": "admin" - }, + "company": {"link": "demisto.com", "value": "admin"}, "cost": "1799.99", - "cost_center": { - "link": "demisto.com", - "value": "admin" - }, + "cost_center": {"link": "demisto.com", "value": "admin"}, "delivery_date": "2017-04-20 07:00:00", - "department": { - "link": "demisto.com", - "value": "admin" - }, + "department": {"link": "demisto.com", "value": "admin"}, "depreciated_amount": "1025.61", - "depreciation": { - "link": "demisto.com", - "value": "admin" - }, + "depreciation": {"link": "demisto.com", "value": "admin"}, "depreciation_date": "2017-06-03 07:00:00", - "display_name": "P1000479 - Apple MacBook Pro 15\"", + "display_name": 'P1000479 - Apple MacBook Pro 15"', "disposal_reason": "", "due": "", "due_in": "", @@ -2202,19 +2247,10 @@ "invoice_number": "", "justification": "", "lease_id": "", - "location": { - "link": "demisto.com", - "value": "admin" - }, + "location": {"link": "demisto.com", "value": "admin"}, "managed_by": "", - "model": { - "link": "demisto.com", - "value": "admin" - }, - "model_category": { - "link": "demisto.com", - "value": "admin" - }, + "model": {"link": "demisto.com", "value": "admin"}, + "model_category": {"link": "demisto.com", "value": "admin"}, "old_status": "", "old_substatus": "", "order_date": "2017-03-27 07:00:00", @@ -2241,37 +2277,25 @@ "sys_class_name": "alm_hardware", "sys_created_by": "admin", "sys_created_on": "2019-02-23 08:14:21", - "sys_domain": { - "link": "demisto.com", - "value": "global" - }, + "sys_domain": {"link": "demisto.com", "value": "global"}, "sys_domain_path": "/", "sys_id": "00a96c0d3790200044e0bfc8bcbe5dc3", "sys_mod_count": "20", "sys_tags": "", "sys_updated_by": "system", "sys_updated_on": "2020-04-09 06:20:10", - "vendor": { - "link": "demisto.com", - "value": "admin" - }, + "vendor": {"link": "demisto.com", "value": "admin"}, "warranty_expiration": "2020-06-01", - "work_notes": "" + "work_notes": "", } ] } RESPONSE_QUERY_COMPUTERS = { "result": { - "asset": { - "link": "demisto.com", - "value": "a" - }, + "asset": {"link": "demisto.com", "value": "a"}, "asset_tag": "P1000357", "assigned": "2017-09-09 07:00:00", - "assigned_to": { - "link": "demisto.com", - "value": "admin" - }, + "assigned_to": {"link": "demisto.com", "value": "admin"}, "assignment_group": "", "attributes": "", "can_print": "false", @@ -2283,17 +2307,11 @@ "checked_in": "", "checked_out": "", "comments": "", - "company": { - "link": "demisto.com", - "value": "admin" - }, + "company": {"link": "demisto.com", "value": "admin"}, "correlation_id": "", "cost": "1329", "cost_cc": "USD", - "cost_center": { - "link": "demisto.com", - "value": "a" - }, + "cost_center": {"link": "demisto.com", "value": "a"}, "cpu_core_count": "1", "cpu_core_thread": "", "cpu_count": "1", @@ -2303,10 +2321,7 @@ "cpu_type": "", "default_gateway": "", "delivery_date": "2017-05-21 07:00:00", - "department": { - "link": "demisto.com", - "value": "a" - }, + "department": {"link": "demisto.com", "value": "a"}, "discovery_source": "", "disk_space": "", "dns_domain": "", @@ -2327,21 +2342,12 @@ "justification": "", "last_discovered": "", "lease_id": "", - "location": { - "link": "demisto.com", - "value": "a" - }, + "location": {"link": "demisto.com", "value": "a"}, "mac_address": "", "maintenance_schedule": "", "managed_by": "", - "manufacturer": { - "link": "demisto.com", - "value": "a" - }, - "model_id": { - "link": "demisto.com", - "value": "a" - }, + "manufacturer": {"link": "demisto.com", "value": "a"}, + "model_id": {"link": "demisto.com", "value": "a"}, "model_number": "", "monitor": "false", "name": "Precision T5500 Workstation", @@ -2369,10 +2375,7 @@ "sys_class_path": "/!!/!2/!(", "sys_created_by": "admin", "sys_created_on": "2012-02-18 08:14:45", - "sys_domain": { - "link": "demisto.com", - "value": "global" - }, + "sys_domain": {"link": "demisto.com", "value": "global"}, "sys_domain_path": "/", "sys_id": "1234", "sys_mod_count": "18", @@ -2380,12 +2383,9 @@ "sys_updated_by": "system", "sys_updated_on": "2020-04-09 06:20:21", "unverified": "false", - "vendor": { - "link": "demisto.com", - "value": "a" - }, + "vendor": {"link": "demisto.com", "value": "a"}, "virtual": "false", - "warranty_expiration": "2020-07-28" + "warranty_expiration": "2020-07-28", } } RESPONSE_GET_TABLE_NAME = { @@ -2406,10 +2406,7 @@ "name": "cmdb_ci_lb_ace", "number_ref": "", "read_access": "true", - "super_class": { - "link": "demisto.com", - "value": "admin" - }, + "super_class": {"link": "demisto.com", "value": "admin"}, "sys_class_code": "!.", "sys_class_name": "sys_db_object", "sys_class_path": "/!!/!2/!(/!!/!$/!.", @@ -2418,21 +2415,15 @@ "sys_id": "123", "sys_mod_count": "0", "sys_name": "CMDB CI Lb Ace", - "sys_package": { - "link": "demisto.com", - "value": "admin" - }, + "sys_package": {"link": "demisto.com", "value": "admin"}, "sys_policy": "", - "sys_scope": { - "link": "demisto.com", - "value": "global" - }, + "sys_scope": {"link": "demisto.com", "value": "global"}, "sys_update_name": "sys_db_object_547a54c1e86100107850694c2d9d2d3e", "sys_updated_by": "system", "sys_updated_on": "2019-12-04 05:02:05", "update_access": "true", "user_role": "", - "ws_access": "true" + "ws_access": "true", } ] } @@ -2442,10 +2433,7 @@ "comments": "", "id_display": "INC0000009", "id_type": "Incident", - "label": { - "link": "demisto.com", - "value": "admin" - }, + "label": {"link": "demisto.com", "value": "admin"}, "notify_onchange": "false", "notify_script": "", "notify_when": "", @@ -2462,7 +2450,7 @@ "table_key": "", "title": "title", "url": "incident.do?sys_id=1234\u0026sysparm_view=", - "view_name": "" + "view_name": "", } } RESPONSE_QUERY_ITEMS = { @@ -2471,10 +2459,7 @@ "active": "true", "availability": "on_both", "billable": "false", - "category": { - "link": "demisto.com", - "value": "admin" - }, + "category": {"link": "demisto.com", "value": "admin"}, "cost": "0", "custom_cart": "", "delivery_plan": "", @@ -2496,10 +2481,7 @@ "mobile_hide_price": "false", "mobile_picture": "", "mobile_picture_type": "use_desktop_picture", - "model": { - "link": "demisto.com", - "value": "admin" - }, + "model": {"link": "demisto.com", "value": "admin"}, "name": "Apple iPad 3", "no_attachment_v2": "false", "no_cart": "false", @@ -2534,15 +2516,9 @@ "sys_id": "123", "sys_mod_count": "17", "sys_name": "Apple iPad 3", - "sys_package": { - "link": "demisto.com", - "value": "admin" - }, + "sys_package": {"link": "demisto.com", "value": "admin"}, "sys_policy": "", - "sys_scope": { - "link": "demisto.com", - "value": "global" - }, + "sys_scope": {"link": "demisto.com", "value": "global"}, "sys_tags": "", "sys_update_name": "pc_hardware_cat_item_060f3afa3731300054b6a3549dbe5d3e", "sys_updated_by": "admin", @@ -2550,26 +2526,17 @@ "template": "", "type": "item", "use_sc_layout": "true", - "vendor": { - "link": "demisto.com", - "value": "admin" - }, + "vendor": {"link": "demisto.com", "value": "admin"}, "visible_bundle": "true", "visible_guide": "true", "visible_standalone": "true", - "workflow": { - "link": "demisto.com", - "value": "admin" - } + "workflow": {"link": "demisto.com", "value": "admin"}, }, { "active": "true", "availability": "on_desktop", "billable": "false", - "category": { - "link": "demisto.com", - "value": "admin" - }, + "category": {"link": "demisto.com", "value": "admin"}, "cost": "50", "custom_cart": "", "delivery_plan": "", @@ -2626,15 +2593,9 @@ "sys_id": "1234", "sys_mod_count": "12", "sys_name": "Belkin iPad Mini Case", - "sys_package": { - "link": "demisto.com", - "value": "admin" - }, + "sys_package": {"link": "demisto.com", "value": "admin"}, "sys_policy": "", - "sys_scope": { - "link": "demisto.com", - "value": "global" - }, + "sys_scope": {"link": "demisto.com", "value": "global"}, "sys_tags": "", "sys_update_name": "pc_hardware_cat_item_90af095bcd38798071a208d710d1b64f", "sys_updated_by": "admin", @@ -2646,43 +2607,23 @@ "visible_bundle": "true", "visible_guide": "true", "visible_standalone": "true", - "workflow": { - "link": "demisto.com", - "value": "admin" - } - } + "workflow": {"link": "demisto.com", "value": "admin"}, + }, ] } RESPONSE_ITEM_DETAILS = { "result": { - "catalogs": [ - { - "active": True, - "sys_id": "1234", - "title": "Service Catalog" - } - ], + "catalogs": [{"active": True, "sys_id": "1234", "title": "Service Catalog"}], "categories": [ { "active": True, - "category": { - "active": True, - "sys_id": "1234", - "title": "Mobiles" - }, + "category": {"active": True, "sys_id": "1234", "title": "Mobiles"}, "sys_id": "1234", - "title": "Hardware" - } - ], - "category": { - "sys_id": "1234", - "title": "Mobiles" - }, - "client_script": { - "onChange": [], - "onLoad": [], - "onSubmit": [] - }, + "title": "Hardware", + } + ], + "category": {"sys_id": "1234", "title": "Mobiles"}, + "client_script": {"onChange": [], "onLoad": [], "onSubmit": []}, "content_type": "", "data_lookup": [], "description": "desc", @@ -2722,7 +2663,7 @@ "short_description": "Hide allocated carrier", "source_table": "catalog_ui_policy", "sys_id": "sys_id", - "table": "(catalog)" + "table": "(catalog)", } ], "url": "", @@ -2730,11 +2671,7 @@ { "attributes": "edge_encryption_enabled=true", "choices": [ - { - "index": 0, - "label": "-- None --", - "value": "" - }, + {"index": 0, "label": "-- None --", "value": ""}, { "index": 1, "label": "AT \u0026 T Mobility", @@ -2742,7 +2679,7 @@ "price_currency": "USD", "recurring_price": 0, "recurring_price_currency": "USD", - "value": "at_and_t_mobility" + "value": "at_and_t_mobility", }, { "index": 2, @@ -2751,7 +2688,7 @@ "price_currency": "USD", "recurring_price": 0, "recurring_price_currency": "USD", - "value": "cricket_wireless" + "value": "cricket_wireless", }, { "index": 3, @@ -2760,7 +2697,7 @@ "price_currency": "USD", "recurring_price": 0, "recurring_price_currency": "USD", - "value": "metropcs" + "value": "metropcs", }, { "index": 4, @@ -2769,7 +2706,7 @@ "price_currency": "USD", "recurring_price": 0, "recurring_price_currency": "USD", - "value": "sprint_nextel" + "value": "sprint_nextel", }, { "index": 5, @@ -2778,7 +2715,7 @@ "price_currency": "USD", "recurring_price": 0, "recurring_price_currency": "USD", - "value": "t_mobile_usa" + "value": "t_mobile_usa", }, { "index": 6, @@ -2787,7 +2724,7 @@ "price_currency": "USD", "recurring_price": 0, "recurring_price_currency": "USD", - "value": "tracfone_wireless" + "value": "tracfone_wireless", }, { "index": 7, @@ -2796,7 +2733,7 @@ "price_currency": "USD", "recurring_price": 0, "recurring_price_currency": "USD", - "value": "us_cellular" + "value": "us_cellular", }, { "index": 8, @@ -2805,8 +2742,8 @@ "price_currency": "USD", "recurring_price": 0, "recurring_price_currency": "USD", - "value": "verizon_wireless" - } + "value": "verizon_wireless", + }, ], "display_type": "Select Box", "displayvalue": "", @@ -2820,16 +2757,12 @@ "read_only": False, "render_label": True, "type": 5, - "value": "" + "value": "", }, { "attributes": "edge_encryption_enabled=true", "choices": [ - { - "index": 0, - "label": "-- None --", - "value": "" - }, + {"index": 0, "label": "-- None --", "value": ""}, { "index": 1, "label": "500MB", @@ -2837,7 +2770,7 @@ "price_currency": "USD", "recurring_price": 1, "recurring_price_currency": "USD", - "value": "500MB" + "value": "500MB", }, { "index": 2, @@ -2846,8 +2779,8 @@ "price_currency": "USD", "recurring_price": 4, "recurring_price_currency": "USD", - "value": "unlimited" - } + "value": "unlimited", + }, ], "display_type": "Select Box", "displayvalue": "", @@ -2861,7 +2794,7 @@ "read_only": False, "render_label": True, "type": 5, - "value": "" + "value": "", }, { "attributes": "edge_encryption_enabled=true", @@ -2873,7 +2806,7 @@ "price_currency": "USD", "recurring_price": 0, "recurring_price_currency": "USD", - "value": "twelve_months" + "value": "twelve_months", }, { "index": 1, @@ -2882,7 +2815,7 @@ "price_currency": "USD", "recurring_price": -2, "recurring_price_currency": "USD", - "value": "eighteen_months" + "value": "eighteen_months", }, { "index": 2, @@ -2891,8 +2824,8 @@ "price_currency": "USD", "recurring_price": -4, "recurring_price_currency": "USD", - "value": "twentyfour_months" - } + "value": "twentyfour_months", + }, ], "display_type": "Select Box", "displayvalue": "", @@ -2906,7 +2839,7 @@ "read_only": False, "render_label": True, "type": 5, - "value": "" + "value": "", }, { "attributes": "edge_encryption_enabled=true", @@ -2918,7 +2851,7 @@ "price_currency": "USD", "recurring_price": 0, "recurring_price_currency": "USD", - "value": "black" + "value": "black", }, { "index": 1, @@ -2927,8 +2860,8 @@ "price_currency": "USD", "recurring_price": 0, "recurring_price_currency": "USD", - "value": "slate" - } + "value": "slate", + }, ], "display_type": "Select Box", "displayvalue": "", @@ -2942,7 +2875,7 @@ "read_only": False, "render_label": True, "type": 5, - "value": "" + "value": "", }, { "attributes": "edge_encryption_enabled=true", @@ -2954,7 +2887,7 @@ "price_currency": "USD", "recurring_price": 0, "recurring_price_currency": "USD", - "value": "sixtyfour" + "value": "sixtyfour", }, { "index": 1, @@ -2963,7 +2896,7 @@ "price_currency": "USD", "recurring_price": 0, "recurring_price_currency": "USD", - "value": "thirtytwo" + "value": "thirtytwo", }, { "index": 2, @@ -2972,8 +2905,8 @@ "price_currency": "USD", "recurring_price": 0, "recurring_price_currency": "USD", - "value": "sixteen" - } + "value": "sixteen", + }, ], "display_type": "Select Box", "displayvalue": "64GB", @@ -2987,10 +2920,10 @@ "read_only": False, "render_label": True, "type": 5, - "value": "sixtyfour" - } + "value": "sixtyfour", + }, ], - "visible_standalone": True + "visible_standalone": True, } } RESPONSE_CREATE_ITEM_ORDER = { @@ -2999,7 +2932,7 @@ "request_id": "1234", "request_number": "REQ0010002", "sys_id": "12", - "table": "sc_request" + "table": "sc_request", } } RESPONSE_DOCUMENT_ROUTE = { @@ -3008,79 +2941,184 @@ "document_id": "document_id", "document_table": "incident", "queue": "queue_id", - "sys_id": "work_item_id" + "sys_id": "work_item_id", } } RESPONSE_FETCH = { - 'result': - [ - { - 'parent': '', 'made_sla': 'false', 'caused_by': '', 'watch_list': '', 'upon_reject': '', - 'sys_updated_on': '2020-04-02 14:03:31', - 'child_incidents': '', 'hold_reason': '', 'approval_history': '', 'number': 'INC0000040', - 'resolved_by': '', - 'sys_updated_by': 'admin', - 'opened_by': {'link': 'demisto.com', 'value': 'test2'}, - 'user_input': '', 'sys_created_on': '2019-09-05 00:42:29', - 'sys_domain': {'link': 'demisto.com', 'value': 'global'}, 'state': '1', 'sys_created_by': 'admin', - 'knowledge': 'false', 'order': '', 'calendar_stc': '', 'closed_at': '', - 'cmdb_ci': {'link': 'demisto.com', 'value': 'test'}, 'delivery_plan': '', 'impact': '2', - 'active': 'true', - 'work_notes_list': '', 'business_service': '', 'priority': '4', 'sys_domain_path': '/', 'rfc': '', - 'time_worked': '', 'expected_start': '', - 'opened_at': '', - 'business_duration': '', - 'group_list': '', 'work_end': '', 'caller_id': {'link': 'demisto.com', 'value': 'test'}, - 'reopened_time': '', 'resolved_at': '', 'approval_set': '', 'subcategory': '', 'work_notes': '', - 'short_description': 'Trouble getting to Oregon mail server', 'close_code': '', - 'correlation_display': '', - 'delivery_task': '', 'work_start': '', 'assignment_group': {'link': 'demisto.com', 'value': 'test'}, - 'additional_assignee_list': '', 'business_stc': '', - 'description': 'Unable to access Oregon mail server. Is it down?', 'calendar_duration': '', - 'close_notes': '', - 'notify': '1', 'sys_class_name': 'incident', 'closed_by': '', 'follow_up': '', 'parent_incident': '', - 'sys_id': 'sys_id1', 'contact_type': 'phone', 'reopened_by': '', 'incident_state': '1', 'urgency': '3', - 'problem_id': '', 'company': {'link': 'demisto.com', 'value': 'test'}, 'reassignment_count': '', - 'u_custom_field_test': 'NYC', 'activity_due': '', 'assigned_to': '', 'severity': '3', 'comments': '', - 'approval': 'not requested', 'sla_due': '2019-09-26 00:41:01', 'comments_and_work_notes': '', - 'due_date': '', - 'sys_mod_count': '8', 'reopen_count': '', 'sys_tags': '', 'escalation': '0', 'upon_approval': '', - 'correlation_id': '', 'location': {'link': 'demisto.com', 'value': 'test'}, 'category': 'network' - }, - { - 'parent': '', 'made_sla': 'false', 'caused_by': '', 'watch_list': '', 'upon_reject': '', - 'sys_updated_on': '2020-04-02 14:03:31', - 'child_incidents': '', 'hold_reason': '', 'approval_history': '', 'number': 'INC0000039', - 'resolved_by': '', - 'sys_updated_by': 'admin', - 'opened_by': {'link': 'demisto.com', 'value': 'test'}, - 'user_input': '', 'sys_created_on': '2019-09-05 00:42:29', - 'sys_domain': {'link': 'demisto.com', 'value': 'global'}, 'state': '1', 'sys_created_by': 'admin', - 'knowledge': 'false', 'order': '', 'calendar_stc': '', 'closed_at': '', - 'cmdb_ci': {'link': 'demisto.com', 'value': 'test'}, 'delivery_plan': '', 'impact': '2', - 'active': 'true', - 'work_notes_list': '', 'business_service': '', 'priority': '4', 'sys_domain_path': '/', 'rfc': '', - 'time_worked': '', 'expected_start': '', - 'opened_at': '', - 'business_duration': '', - 'group_list': '', 'work_end': '', 'caller_id': {'link': 'demisto.com', 'value': 'test'}, - 'reopened_time': '', 'resolved_at': '', 'approval_set': '', 'subcategory': '', 'work_notes': '', - 'short_description': 'Trouble getting to Oregon mail server', 'close_code': '', - 'correlation_display': '', - 'delivery_task': '', 'work_start': '', 'assignment_group': {'link': 'demisto.com', 'value': 'test'}, - 'additional_assignee_list': '', 'business_stc': '', - 'description': 'Unable to access Oregon mail server. Is it down?', 'calendar_duration': '', - 'close_notes': '', - 'notify': '1', 'sys_class_name': 'incident', 'closed_by': '', 'follow_up': '', 'parent_incident': '', - 'sys_id': 'sys_id2', 'contact_type': 'phone', 'reopened_by': '', 'incident_state': '1', 'urgency': '3', - 'problem_id': '', 'company': {'link': 'demisto.com', 'value': 'test'}, 'reassignment_count': '', - 'u_custom_field_test': 'NYC', 'activity_due': '', 'assigned_to': '', 'severity': '3', 'comments': '', - 'approval': 'not requested', 'sla_due': '2019-09-26 00:41:01', 'comments_and_work_notes': '', - 'due_date': '', - 'sys_mod_count': '8', 'reopen_count': '', 'sys_tags': '', 'escalation': '0', 'upon_approval': '', - 'correlation_id': '', 'location': {'link': 'demisto.com', 'value': 'test'}, 'category': 'network' - } - ] + "result": [ + { + "parent": "", + "made_sla": "false", + "caused_by": "", + "watch_list": "", + "upon_reject": "", + "sys_updated_on": "2020-04-02 14:03:31", + "child_incidents": "", + "hold_reason": "", + "approval_history": "", + "number": "INC0000040", + "resolved_by": "", + "sys_updated_by": "admin", + "opened_by": {"link": "demisto.com", "value": "test2"}, + "user_input": "", + "sys_created_on": "2019-09-05 00:42:29", + "sys_domain": {"link": "demisto.com", "value": "global"}, + "state": "1", + "sys_created_by": "admin", + "knowledge": "false", + "order": "", + "calendar_stc": "", + "closed_at": "", + "cmdb_ci": {"link": "demisto.com", "value": "test"}, + "delivery_plan": "", + "impact": "2", + "active": "true", + "work_notes_list": "", + "business_service": "", + "priority": "4", + "sys_domain_path": "/", + "rfc": "", + "time_worked": "", + "expected_start": "", + "opened_at": "", + "business_duration": "", + "group_list": "", + "work_end": "", + "caller_id": {"link": "demisto.com", "value": "test"}, + "reopened_time": "", + "resolved_at": "", + "approval_set": "", + "subcategory": "", + "work_notes": "", + "short_description": "Trouble getting to Oregon mail server", + "close_code": "", + "correlation_display": "", + "delivery_task": "", + "work_start": "", + "assignment_group": {"link": "demisto.com", "value": "test"}, + "additional_assignee_list": "", + "business_stc": "", + "description": "Unable to access Oregon mail server. Is it down?", + "calendar_duration": "", + "close_notes": "", + "notify": "1", + "sys_class_name": "incident", + "closed_by": "", + "follow_up": "", + "parent_incident": "", + "sys_id": "sys_id1", + "contact_type": "phone", + "reopened_by": "", + "incident_state": "1", + "urgency": "3", + "problem_id": "", + "company": {"link": "demisto.com", "value": "test"}, + "reassignment_count": "", + "u_custom_field_test": "NYC", + "activity_due": "", + "assigned_to": "", + "severity": "3", + "comments": "", + "approval": "not requested", + "sla_due": "2019-09-26 00:41:01", + "comments_and_work_notes": "", + "due_date": "", + "sys_mod_count": "8", + "reopen_count": "", + "sys_tags": "", + "escalation": "0", + "upon_approval": "", + "correlation_id": "", + "location": {"link": "demisto.com", "value": "test"}, + "category": "network", + }, + { + "parent": "", + "made_sla": "false", + "caused_by": "", + "watch_list": "", + "upon_reject": "", + "sys_updated_on": "2020-04-02 14:03:31", + "child_incidents": "", + "hold_reason": "", + "approval_history": "", + "number": "INC0000039", + "resolved_by": "", + "sys_updated_by": "admin", + "opened_by": {"link": "demisto.com", "value": "test"}, + "user_input": "", + "sys_created_on": "2019-09-05 00:42:29", + "sys_domain": {"link": "demisto.com", "value": "global"}, + "state": "1", + "sys_created_by": "admin", + "knowledge": "false", + "order": "", + "calendar_stc": "", + "closed_at": "", + "cmdb_ci": {"link": "demisto.com", "value": "test"}, + "delivery_plan": "", + "impact": "2", + "active": "true", + "work_notes_list": "", + "business_service": "", + "priority": "4", + "sys_domain_path": "/", + "rfc": "", + "time_worked": "", + "expected_start": "", + "opened_at": "", + "business_duration": "", + "group_list": "", + "work_end": "", + "caller_id": {"link": "demisto.com", "value": "test"}, + "reopened_time": "", + "resolved_at": "", + "approval_set": "", + "subcategory": "", + "work_notes": "", + "short_description": "Trouble getting to Oregon mail server", + "close_code": "", + "correlation_display": "", + "delivery_task": "", + "work_start": "", + "assignment_group": {"link": "demisto.com", "value": "test"}, + "additional_assignee_list": "", + "business_stc": "", + "description": "Unable to access Oregon mail server. Is it down?", + "calendar_duration": "", + "close_notes": "", + "notify": "1", + "sys_class_name": "incident", + "closed_by": "", + "follow_up": "", + "parent_incident": "", + "sys_id": "sys_id2", + "contact_type": "phone", + "reopened_by": "", + "incident_state": "1", + "urgency": "3", + "problem_id": "", + "company": {"link": "demisto.com", "value": "test"}, + "reassignment_count": "", + "u_custom_field_test": "NYC", + "activity_due": "", + "assigned_to": "", + "severity": "3", + "comments": "", + "approval": "not requested", + "sla_due": "2019-09-26 00:41:01", + "comments_and_work_notes": "", + "due_date": "", + "sys_mod_count": "8", + "reopen_count": "", + "sys_tags": "", + "escalation": "0", + "upon_approval": "", + "correlation_id": "", + "location": {"link": "demisto.com", "value": "test"}, + "category": "network", + }, + ] } RESPONSE_FETCH_ATTACHMENTS_TICKET = { "result": [ @@ -3093,23 +3131,23 @@ "approval_set": "", "assigned_to": { "link": "https://dev61466.service-now.com/api/now/table/sys_user/5137153cc611227c000bbd1bd8cd2007", - "value": "5137153cc611227c000bbd1bd8cd2007" + "value": "5137153cc611227c000bbd1bd8cd2007", }, "assignment_group": { "link": "https://dev61466.service-now.com/api/now/table/sys_user_group/287ebd7da9fe198100f92cc8d1d2154e", - "value": "287ebd7da9fe198100f92cc8d1d2154e" + "value": "287ebd7da9fe198100f92cc8d1d2154e", }, "business_duration": "1970-01-01 08:00:00", "business_service": { "link": "https://dev61466.service-now.com/api/now/table/cmdb_ci_service/27d32778c0a8000b00db970eeaa60f16", - "value": "27d32778c0a8000b00db970eeaa60f16" + "value": "27d32778c0a8000b00db970eeaa60f16", }, "business_stc": "28800", "calendar_duration": "1970-01-02 04:23:17", "calendar_stc": "102197", "caller_id": { "link": "https://dev61466.service-now.com/api/now/table/sys_user/681ccaf9c0a8016400b98a06818d57c7", - "value": "681ccaf9c0a8016400b98a06818d57c7" + "value": "681ccaf9c0a8016400b98a06818d57c7", }, "category": "inquiry", "caused_by": "", @@ -3119,17 +3157,17 @@ "closed_at": "2016-12-14 02:46:44", "closed_by": { "link": "https://dev61466.service-now.com/api/now/table/sys_user/681ccaf9c0a8016400b98a06818d57c7", - "value": "681ccaf9c0a8016400b98a06818d57c7" + "value": "681ccaf9c0a8016400b98a06818d57c7", }, "cmdb_ci": { "link": "https://dev61466.service-now.com/api/now/table/cmdb_ci/109562a3c611227500a7b7ff98cc0dc7", - "value": "109562a3c611227500a7b7ff98cc0dc7" + "value": "109562a3c611227500a7b7ff98cc0dc7", }, "comments": "", "comments_and_work_notes": "", "company": { "link": "https://dev61466.service-now.com/api/now/table/core_company/31bea3d53790200044e0bfc8bcbe5dec", - "value": "31bea3d53790200044e0bfc8bcbe5dec" + "value": "31bea3d53790200044e0bfc8bcbe5dec", }, "contact_type": "self-service", "contract": "", @@ -3151,10 +3189,10 @@ "made_sla": "true", "notify": "1", "number": "INC0000060", - "opened_at": '', + "opened_at": "", "opened_by": { "link": "https://dev61466.service-now.com/api/now/table/sys_user/681ccaf9c0a8016400b98a06818d57c7", - "value": "681ccaf9c0a8016400b98a06818d57c7" + "value": "681ccaf9c0a8016400b98a06818d57c7", }, "order": "", "parent": "", @@ -3168,7 +3206,7 @@ "resolved_at": "2016-12-13 21:43:14", "resolved_by": { "link": "https://dev61466.service-now.com/api/now/table/sys_user/5137153cc611227c000bbd1bd8cd2007", - "value": "5137153cc611227c000bbd1bd8cd2007" + "value": "5137153cc611227c000bbd1bd8cd2007", }, "rfc": "", "service_offering": "", @@ -3181,10 +3219,7 @@ "sys_class_name": "incident", "sys_created_by": "employee", "sys_created_on": "2016-12-12 15:19:57", - "sys_domain": { - "link": "https://dev61466.service-now.com/api/now/table/sys_user_group/global", - "value": "global" - }, + "sys_domain": {"link": "https://dev61466.service-now.com/api/now/table/sys_user_group/global", "value": "global"}, "sys_domain_path": "/", "sys_id": "1c741bd70b2322007518478d83673af3", "sys_mod_count": "16", @@ -3200,347 +3235,272 @@ "work_end": "", "work_notes": "", "work_notes_list": "", - "work_start": "" + "work_start": "", } ] } RESPONSE_FETCH_ATTACHMENTS_FILE = [ - {'Contents': '', 'ContentsFormat': 'text', 'Type': 3, 'File': 'wireframe', 'FileID': 'file_id'} + {"Contents": "", "ContentsFormat": "text", "Type": 3, "File": "wireframe", "FileID": "file_id"} ] RESPONSE_FETCH_USE_DISPLAY_VALUE = { "result": [ { - "upon_reject": { - "display_value": "Cancel all future Tasks", - "value": "cancel" - }, - "sys_updated_on": { - "display_value": "29.02.2024 15:09:46", - "value": "2024-02-29 13:09:46" - }, - "number": { - "display_value": "INC0011111", - "value": "INC0011111" - }, - "state": { - "display_value": "New", - "value": "1" - }, - "sys_created_by": { - "display_value": "test_xsoar", - "value": "test_xsoar" - }, - "knowledge": { - "display_value": "false", - "value": "false" - }, - "impact": { - "display_value": "3 - Low", - "value": "3" - }, - "active": { - "display_value": "true", - "value": "true" - }, - "priority": { - "display_value": "5 - Planning", - "value": "5" - }, + "upon_reject": {"display_value": "Cancel all future Tasks", "value": "cancel"}, + "sys_updated_on": {"display_value": "29.02.2024 15:09:46", "value": "2024-02-29 13:09:46"}, + "number": {"display_value": "INC0011111", "value": "INC0011111"}, + "state": {"display_value": "New", "value": "1"}, + "sys_created_by": {"display_value": "test_xsoar", "value": "test_xsoar"}, + "knowledge": {"display_value": "false", "value": "false"}, + "impact": {"display_value": "3 - Low", "value": "3"}, + "active": {"display_value": "true", "value": "true"}, + "priority": {"display_value": "5 - Planning", "value": "5"}, "assignment_group": { "display_value": "Test_group", "link": "https://test.service-now.com/api/now/table/sys_user_group/3434343434343434343434", - "value": "3434343434343434343434" - }, - "short_description": { - "display_value": "Test for use_display_value 1", - "value": "Test for use_display_value 1" - }, - "notify": { - "display_value": "Do Not Notify", - "value": "1" + "value": "3434343434343434343434", }, + "short_description": {"display_value": "Test for use_display_value 1", "value": "Test for use_display_value 1"}, + "notify": {"display_value": "Do Not Notify", "value": "1"}, "comments_and_work_notes": { "display_value": "29.02.2024 15:34:56 - Caller ID test (Work notes)\nTest work notes\n\n", - "value": "" - }, - "sys_class_name": { - "display_value": "Incident", - "value": "incident" - }, - "reassignment_count": { - "display_value": "0", - "value": "0" - }, - "sla_due": { - "display_value": "UNKNOWN", - "value": "" - }, - "u_src_ip": { - "display_value": "", - "value": "" - }, - "task_effective_number": { - "display_value": "INC0011111", - "value": "INC0011111" - }, - "sys_updated_by": { - "display_value": "test_xsoar", - "value": "test_xsoar" - }, + "value": "", + }, + "sys_class_name": {"display_value": "Incident", "value": "incident"}, + "reassignment_count": {"display_value": "0", "value": "0"}, + "sla_due": {"display_value": "UNKNOWN", "value": ""}, + "u_src_ip": {"display_value": "", "value": ""}, + "task_effective_number": {"display_value": "INC0011111", "value": "INC0011111"}, + "sys_updated_by": {"display_value": "test_xsoar", "value": "test_xsoar"}, "opened_by": { "display_value": "Test admin", "link": "https://test.service-now.com/api/now/table/sys_user/00001111000011110000111100001111", - "value": "00001111000011110000111100001111" - }, - "sys_created_on": { - "display_value": "29.02.2024 15:09:46", - "value": "2024-02-29 13:09:46" + "value": "00001111000011110000111100001111", }, + "sys_created_on": {"display_value": "29.02.2024 15:09:46", "value": "2024-02-29 13:09:46"}, "sys_domain": { "display_value": "global", "link": "https://test.service-now.com/api/now/table/sys_user_group/global", - "value": "global" - }, - "opened_at": { - "display_value": "29.02.2024 15:08:46", - "value": "2024-02-29 13:08:46" + "value": "global", }, + "opened_at": {"display_value": "29.02.2024 15:08:46", "value": "2024-02-29 13:08:46"}, "caller_id": { "display_value": "Caller ID test", "link": "https://test.service-now.com/api/now/table/sys_user/11111111111111111111111111111", - "value": "11111111111111111111111111111" - }, - "subcategory": { - "display_value": None, - "value": "" + "value": "11111111111111111111111111111", }, + "subcategory": {"display_value": None, "value": ""}, "work_notes": { "display_value": "29.02.2024 15:34:56 - Caller ID test (Work notes)\nTest work notes\n\n", - "value": "" - }, - "sys_id": { - "display_value": "12121212121212121212121212", - "value": "12121212121212121212121212" - }, - "incident_state": { - "display_value": "New", - "value": "1" - }, - "approval": { - "display_value": "Not Yet Requested", - "value": "not requested" + "value": "", }, + "sys_id": {"display_value": "12121212121212121212121212", "value": "12121212121212121212121212"}, + "incident_state": {"display_value": "New", "value": "1"}, + "approval": {"display_value": "Not Yet Requested", "value": "not requested"}, }, { - "upon_reject": { - "display_value": "Cancel all future Tasks", - "value": "cancel" - }, - "sys_updated_on": { - "display_value": "29.02.2024 13:08:44", - "value": "2024-02-29 11:08:44" - }, - "number": { - "display_value": "INC0022222", - "value": "INC0022222" - }, - "state": { - "display_value": "New", - "value": "1" - }, - "sys_created_by": { - "display_value": "test_xsoar", - "value": "test_xsoar" - }, - "knowledge": { - "display_value": "false", - "value": "false" - }, - "impact": { - "display_value": "3 - Low", - "value": "3" - }, - "active": { - "display_value": "true", - "value": "true" - }, - "priority": { - "display_value": "5 - Planning", - "value": "5" - }, - "assignment_group": { - "display_value": "", - "value": "" - }, - "short_description": { - "display_value": "Test for use_display_value 2", - "value": "Test for use_display_value 2" - }, - "notify": { - "display_value": "Do Not Notify", - "value": "1" - }, - "comments_and_work_notes": { - "display_value": "", - "value": "" - }, - "sys_class_name": { - "display_value": "Incident", - "value": "incident" - }, - "reassignment_count": { - "display_value": "0", - "value": "0" - }, - "sla_due": { - "display_value": "UNKNOWN", - "value": "" - }, - "u_src_ip": { - "display_value": "", - "value": "" - }, - "task_effective_number": { - "display_value": "INC0022222", - "value": "INC0022222" - }, - "sys_updated_by": { - "display_value": "test_xsoar", - "value": "test_xsoar" - }, + "upon_reject": {"display_value": "Cancel all future Tasks", "value": "cancel"}, + "sys_updated_on": {"display_value": "29.02.2024 13:08:44", "value": "2024-02-29 11:08:44"}, + "number": {"display_value": "INC0022222", "value": "INC0022222"}, + "state": {"display_value": "New", "value": "1"}, + "sys_created_by": {"display_value": "test_xsoar", "value": "test_xsoar"}, + "knowledge": {"display_value": "false", "value": "false"}, + "impact": {"display_value": "3 - Low", "value": "3"}, + "active": {"display_value": "true", "value": "true"}, + "priority": {"display_value": "5 - Planning", "value": "5"}, + "assignment_group": {"display_value": "", "value": ""}, + "short_description": {"display_value": "Test for use_display_value 2", "value": "Test for use_display_value 2"}, + "notify": {"display_value": "Do Not Notify", "value": "1"}, + "comments_and_work_notes": {"display_value": "", "value": ""}, + "sys_class_name": {"display_value": "Incident", "value": "incident"}, + "reassignment_count": {"display_value": "0", "value": "0"}, + "sla_due": {"display_value": "UNKNOWN", "value": ""}, + "u_src_ip": {"display_value": "", "value": ""}, + "task_effective_number": {"display_value": "INC0022222", "value": "INC0022222"}, + "sys_updated_by": {"display_value": "test_xsoar", "value": "test_xsoar"}, "opened_by": { "display_value": "Test admin", "link": "https://test.service-now.com/api/now/table/sys_user/00001111000011110000111100001111", - "value": "00001111000011110000111100001111" - }, - "sys_created_on": { - "display_value": "29.02.2024 13:08:44", - "value": "2024-02-29 11:08:44" + "value": "00001111000011110000111100001111", }, + "sys_created_on": {"display_value": "29.02.2024 13:08:44", "value": "2024-02-29 11:08:44"}, "sys_domain": { "display_value": "global", "link": "https://test.service-now.com/api/now/table/sys_user_group/global", - "value": "global" - }, - "opened_at": { - "display_value": "29.02.2024 13:07:48", - "value": "2024-02-29 11:07:48" + "value": "global", }, + "opened_at": {"display_value": "29.02.2024 13:07:48", "value": "2024-02-29 11:07:48"}, "caller_id": { "display_value": "Caller ID test", "link": "https://test.service-now.com/api/now/table/sys_user/11111111111111111111111111111", - "value": "11111111111111111111111111111" - }, - "subcategory": { - "display_value": None, - "value": "" - }, - "work_notes": { - "display_value": "", - "value": "" - }, - "sys_id": { - "display_value": "2121212121212121212121", - "value": "2121212121212121212121" - }, - "incident_state": { - "display_value": "New", - "value": "1" - }, - "approval": { - "display_value": "Not Yet Requested", - "value": "not requested" + "value": "11111111111111111111111111111", }, - } + "subcategory": {"display_value": None, "value": ""}, + "work_notes": {"display_value": "", "value": ""}, + "sys_id": {"display_value": "2121212121212121212121", "value": "2121212121212121212121"}, + "incident_state": {"display_value": "New", "value": "1"}, + "approval": {"display_value": "Not Yet Requested", "value": "not requested"}, + }, ] } -MIRROR_COMMENTS_RESPONSE = {'result': [{ - 'sys_id': '549fc0bfdbaa101053482fb748961941', 'sys_created_on': '2020-08-17 06:31:49', 'name': 'incident', - 'element_id': '9bf0f1afdbe6101053482fb748961908', 'sys_tags': '', 'value': 'This is a comment', - 'sys_created_by': 'admin', 'element': 'comments'}]} +MIRROR_COMMENTS_RESPONSE = { + "result": [ + { + "sys_id": "549fc0bfdbaa101053482fb748961941", + "sys_created_on": "2020-08-17 06:31:49", + "name": "incident", + "element_id": "9bf0f1afdbe6101053482fb748961908", + "sys_tags": "", + "value": "This is a comment", + "sys_created_by": "admin", + "element": "comments", + } + ] +} RESPONSE_COMMENTS_DISPLAY_VALUE_AFTER_FORMAT = { - 'sys_created_on': '2022-11-21 19:59:49', - 'sys_created_by': 'admin', - 'sys_id': '123456789', - 'urgency': '3 - Low', - 'severity': '3 - Low', - 'comments': '2022-11-21 22:50:34 - System Administrator (Additional comments)\nSecond comment' - '\n\n Mirrored from Cortex XSOAR\n\n' - '2022-11-21 21:45:37 - Test User (Additional comments)\nFirst comment\n\n' + "sys_created_on": "2022-11-21 19:59:49", + "sys_created_by": "admin", + "sys_id": "123456789", + "urgency": "3 - Low", + "severity": "3 - Low", + "comments": "2022-11-21 22:50:34 - System Administrator (Additional comments)\nSecond comment" + "\n\n Mirrored from Cortex XSOAR\n\n" + "2022-11-21 21:45:37 - Test User (Additional comments)\nFirst comment\n\n", } RESPONSE_COMMENTS_DISPLAY_VALUE = { - 'result': {'sys_created_on': {'display_value': '2022-11-21 20:59:49', 'value': '2022-11-21 19:59:49'}, - 'sys_created_by': {'display_value': 'admin', 'value': 'admin'}, - 'sys_id': {'display_value': '123456789', 'value': '123456789'}, - 'urgency': {'display_value': '3 - Low', 'value': '3'}, - 'severity': {'display_value': '3 - Low', 'value': '3'}, - 'comments': {'display_value': - '2022-11-21 22:50:34 - System Administrator (Additional comments)\nSecond comment' - '\n\n Mirrored from Cortex XSOAR\n\n' - '2022-11-21 21:45:37 - Test User (Additional comments)\nFirst comment\n\n', - 'value': ''}}} + "result": { + "sys_created_on": {"display_value": "2022-11-21 20:59:49", "value": "2022-11-21 19:59:49"}, + "sys_created_by": {"display_value": "admin", "value": "admin"}, + "sys_id": {"display_value": "123456789", "value": "123456789"}, + "urgency": {"display_value": "3 - Low", "value": "3"}, + "severity": {"display_value": "3 - Low", "value": "3"}, + "comments": { + "display_value": "2022-11-21 22:50:34 - System Administrator (Additional comments)\nSecond comment" + "\n\n Mirrored from Cortex XSOAR\n\n" + "2022-11-21 21:45:37 - Test User (Additional comments)\nFirst comment\n\n", + "value": "", + }, + } +} RESPONSE_COMMENTS_DISPLAY_VALUE_NO_COMMENTS = { - 'result': {'sys_created_on': {'display_value': '2022-11-21 09:59:49', 'value': '2022-11-21 08:59:49'}, - 'sys_created_by': {'display_value': 'admin', 'value': 'admin'}, - 'sys_id': {'display_value': '123456789', 'value': '123456789'}, - 'urgency': {'display_value': '3 - Low', 'value': '3'}, - 'severity': {'display_value': '3 - Low', 'value': '3'}, - 'comments': {'display_value': '', 'value': ''}}} + "result": { + "sys_created_on": {"display_value": "2022-11-21 09:59:49", "value": "2022-11-21 08:59:49"}, + "sys_created_by": {"display_value": "admin", "value": "admin"}, + "sys_id": {"display_value": "123456789", "value": "123456789"}, + "urgency": {"display_value": "3 - Low", "value": "3"}, + "severity": {"display_value": "3 - Low", "value": "3"}, + "comments": {"display_value": "", "value": ""}, + } +} RESPONSE_MIRROR_FILE_ENTRY = [ - {'Contents': '', 'ContentsFormat': 'text', 'Type': 3, 'File': 'test.txt', - 'FileID': '20eb1079-d6c3-47cf-81bf-95968f93f6d3'}] + {"Contents": "", "ContentsFormat": "text", "Type": 3, "File": "test.txt", "FileID": "20eb1079-d6c3-47cf-81bf-95968f93f6d3"} +] -RESPONSE_ASSIGNMENT_GROUP = {'result': {'parent': '', 'manager': - {'link': 'https://dev59633.service-now.com/api/now/table/sys_user/b6b364e253131300e321ddeeff7b121b', - 'value': 'b6b364e253131300e321ddeeff7b121b'}, 'roles': '', 'sys_mod_count': '0', 'active': 'true', - 'description': '', 'source': '', 'sys_updated_on': '2020-07-11 09:50:32', - 'sys_tags': '', 'type': '', 'sys_id': '679434f053231300e321ddeeff7b12d8', - 'sys_updated_by': 'admin', 'cost_center': '', 'default_assignee': '', - 'sys_created_on': '2020-07-11 09:50:32', 'name': 'Help Desk', - 'exclude_manager': 'false', 'email': '', 'include_members': 'false', - 'sys_created_by': 'admin'}} +RESPONSE_ASSIGNMENT_GROUP = { + "result": { + "parent": "", + "manager": { + "link": "https://dev59633.service-now.com/api/now/table/sys_user/b6b364e253131300e321ddeeff7b121b", + "value": "b6b364e253131300e321ddeeff7b121b", + }, + "roles": "", + "sys_mod_count": "0", + "active": "true", + "description": "", + "source": "", + "sys_updated_on": "2020-07-11 09:50:32", + "sys_tags": "", + "type": "", + "sys_id": "679434f053231300e321ddeeff7b12d8", + "sys_updated_by": "admin", + "cost_center": "", + "default_assignee": "", + "sys_created_on": "2020-07-11 09:50:32", + "name": "Help Desk", + "exclude_manager": "false", + "email": "", + "include_members": "false", + "sys_created_by": "admin", + } +} -RESPONSE_MIRROR_FILE_ENTRY_FROM_XSOAR = [{'Contents': '', 'ContentsFormat': 'text', 'Type': 3, - 'File': 'test_mirrored_from_xsoar.txt', - 'FileID': '20eb1079-d6c3-47cf-81bf-95968f93f6d3'}] +RESPONSE_MIRROR_FILE_ENTRY_FROM_XSOAR = [ + { + "Contents": "", + "ContentsFormat": "text", + "Type": 3, + "File": "test_mirrored_from_xsoar.txt", + "FileID": "20eb1079-d6c3-47cf-81bf-95968f93f6d3", + } +] -MIRROR_COMMENTS_RESPONSE_FROM_XSOAR = {'result': [{ - 'sys_id': '549fc0bfdbaa101053482fb748961941', 'sys_created_on': '2020-08-17 06:31:49', 'name': 'incident', - 'element_id': '9bf0f1afdbe6101053482fb748961908', 'sys_tags': '', - 'value': 'This is a comment\n\n Mirrored from Cortex XSOAR', 'sys_created_by': 'admin', 'element': 'comments'}]} +MIRROR_COMMENTS_RESPONSE_FROM_XSOAR = { + "result": [ + { + "sys_id": "549fc0bfdbaa101053482fb748961941", + "sys_created_on": "2020-08-17 06:31:49", + "name": "incident", + "element_id": "9bf0f1afdbe6101053482fb748961908", + "sys_tags": "", + "value": "This is a comment\n\n Mirrored from Cortex XSOAR", + "sys_created_by": "admin", + "element": "comments", + } + ] +} MIRROR_ENTRIES = [ - {'Contents': '', 'ContentsFormat': 'text', 'type': 3, 'File': 'test.txt', - 'FileID': '20eb1079-d6c3-47cf-81bf-95968f93f6d3', 'id': 'entry-id'}, - {'type': None, 'category': None, 'contents': 'This is a comment', 'contentsFormat': None, - 'tags': ['comments', 'work_notes'], 'note': True}] + { + "Contents": "", + "ContentsFormat": "text", + "type": 3, + "File": "test.txt", + "FileID": "20eb1079-d6c3-47cf-81bf-95968f93f6d3", + "id": "entry-id", + }, + { + "type": None, + "category": None, + "contents": "This is a comment", + "contentsFormat": None, + "tags": ["comments", "work_notes"], + "note": True, + }, +] MIRROR_ENTRIES_WITH_EMPTY_USERNAME = [ - {'Contents': '', 'ContentsFormat': 'text', 'type': 3, 'File': 'test.txt', - 'FileID': '20eb1079-d6c3-47cf-81bf-95968f93f6d3', 'user': ''}, - {'type': None, 'category': None, 'contents': 'This is a comment', 'contentsFormat': None, - 'tags': ['comments', 'work_notes'], 'note': True, 'user': ''}] + { + "Contents": "", + "ContentsFormat": "text", + "type": 3, + "File": "test.txt", + "FileID": "20eb1079-d6c3-47cf-81bf-95968f93f6d3", + "user": "", + }, + { + "type": None, + "category": None, + "contents": "This is a comment", + "contentsFormat": None, + "tags": ["comments", "work_notes"], + "note": True, + "user": "", + }, +] OAUTH_PARAMS = { - 'insecure': False, - 'credentials': { - 'identifier': 'user1', - 'password:': '12345' - }, - 'proxy': False, - 'client_id': 'client_id', - 'client_secret': 'client_secret', - 'use_oauth': True + "insecure": False, + "credentials": {"identifier": "user1", "password:": "12345"}, + "proxy": False, + "client_id": "client_id", + "client_secret": "client_secret", + "use_oauth": True, } -RESPONSE_GENERIC_TICKET = { - 'sys_created_by': 'svc.SecOps_SplunkPhantom', - 'sys_class_name': 'sn_si_incident' -} +RESPONSE_GENERIC_TICKET = {"sys_created_by": "svc.SecOps_SplunkPhantom", "sys_class_name": "sn_si_incident"} diff --git a/Packs/ServiceNow/Integrations/ServiceNowv2/test_data/result_constants.py b/Packs/ServiceNow/Integrations/ServiceNowv2/test_data/result_constants.py index a9552de70328..de356151d50f 100644 --- a/Packs/ServiceNow/Integrations/ServiceNowv2/test_data/result_constants.py +++ b/Packs/ServiceNow/Integrations/ServiceNowv2/test_data/result_constants.py @@ -1,464 +1,644 @@ EXPECTED_TICKET_CONTEXT = { - 'Active': 'true', - 'CreatedOn': '2019-09-05 00:42:29', - 'Creator': 'test', - 'ID': 'sys_id', - 'Number': 'INC0000039', - 'OpenedAt': '2019-09-05 00:41:01', - 'OpenedBy': 'test', - 'Priority': '4 - Low', - 'State': '1', - 'Summary': 'Trouble getting to Oregon mail server' + "Active": "true", + "CreatedOn": "2019-09-05 00:42:29", + "Creator": "test", + "ID": "sys_id", + "Number": "INC0000039", + "OpenedAt": "2019-09-05 00:41:01", + "OpenedBy": "test", + "Priority": "4 - Low", + "State": "1", + "Summary": "Trouble getting to Oregon mail server", } EXPECTED_TICKET_CONTEXT_WITH_ADDITIONAL_FIELDS = { - 'Active': 'true', - 'CreatedOn': '2019-09-05 00:42:29', - 'Creator': 'test', - 'ID': 'sys_id', - 'Number': 'INC0000039', - 'OpenedAt': '2019-09-05 00:41:01', - 'OpenedBy': 'test', - 'Priority': '4 - Low', - 'State': '1', - 'Summary': 'Trouble getting to Oregon mail server', - 'sys_created_by': 'admin' + "Active": "true", + "CreatedOn": "2019-09-05 00:42:29", + "Creator": "test", + "ID": "sys_id", + "Number": "INC0000039", + "OpenedAt": "2019-09-05 00:41:01", + "OpenedBy": "test", + "Priority": "4 - Low", + "State": "1", + "Summary": "Trouble getting to Oregon mail server", + "sys_created_by": "admin", } EXPECTED_TICKET_CONTEXT_WITH_NESTED_ADDITIONAL_FIELDS = { - 'Active': 'true', - 'CreatedOn': '2019-09-05 00:42:29', - 'Creator': 'test', - 'ID': 'sys_id', - 'Number': 'INC0000039', - 'OpenedAt': '2019-09-05 00:41:01', - 'OpenedBy': 'test', - 'Priority': '4 - Low', - 'State': '1', - 'Summary': 'Trouble getting to Oregon mail server', - 'opened_by': {'link': 'demisto.com'} + "Active": "true", + "CreatedOn": "2019-09-05 00:42:29", + "Creator": "test", + "ID": "sys_id", + "Number": "INC0000039", + "OpenedAt": "2019-09-05 00:41:01", + "OpenedBy": "test", + "Priority": "4 - Low", + "State": "1", + "Summary": "Trouble getting to Oregon mail server", + "opened_by": {"link": "demisto.com"}, } EXPECTED_MULTIPLE_TICKET_CONTEXT = [ { - 'Active': 'true', - 'CreatedOn': '2019-09-05 00:42:29', - 'Creator': 'test2', - 'ID': 'sys_id', - 'Number': 'INC0000040', - 'OpenedAt': '2019-09-05 00:41:01', - 'OpenedBy': 'test2', - 'Priority': '4 - Low', - 'State': '1', - 'Summary': 'Trouble getting to Oregon mail server' + "Active": "true", + "CreatedOn": "2019-09-05 00:42:29", + "Creator": "test2", + "ID": "sys_id", + "Number": "INC0000040", + "OpenedAt": "2019-09-05 00:41:01", + "OpenedBy": "test2", + "Priority": "4 - Low", + "State": "1", + "Summary": "Trouble getting to Oregon mail server", }, { - 'Active': 'true', - 'CreatedOn': '2019-09-05 00:42:29', - 'Creator': 'test', - 'ID': 'sys_id', - 'Number': 'INC0000039', - 'OpenedAt': '2019-09-05 00:41:01', - 'OpenedBy': 'test', - 'Priority': '4 - Low', - 'State': '1', - 'Summary': 'Trouble getting to Oregon mail server' - } + "Active": "true", + "CreatedOn": "2019-09-05 00:42:29", + "Creator": "test", + "ID": "sys_id", + "Number": "INC0000039", + "OpenedAt": "2019-09-05 00:41:01", + "OpenedBy": "test", + "Priority": "4 - Low", + "State": "1", + "Summary": "Trouble getting to Oregon mail server", + }, ] EXPECTED_TICKET_HR = [ { - 'Active': 'true', - 'Additional Comments': '', - 'Close Code': '', - 'Close Notes': '', - 'Created By': 'admin', - 'Created On': '2019-09-05 00:42:29', - 'Description': 'Unable to access Oregon mail server. Is it down?', - 'Due Date': '', - 'Impact': '2 - Medium', - 'Number': 'INC0000039', - 'Opened At': '2019-09-05 00:41:01', - 'Priority': '4 - Low', - 'Resolved At': '', - 'Resolved By': '', - 'SLA Due': '2019-09-26 00:41:01', - 'Severity': '3 - Low', - 'Short Description': 'Trouble getting to Oregon mail server', - 'State': '1 - New', - 'System ID': 'sys_id', - 'Urgency': '3 - Low' + "Active": "true", + "Additional Comments": "", + "Close Code": "", + "Close Notes": "", + "Created By": "admin", + "Created On": "2019-09-05 00:42:29", + "Description": "Unable to access Oregon mail server. Is it down?", + "Due Date": "", + "Impact": "2 - Medium", + "Number": "INC0000039", + "Opened At": "2019-09-05 00:41:01", + "Priority": "4 - Low", + "Resolved At": "", + "Resolved By": "", + "SLA Due": "2019-09-26 00:41:01", + "Severity": "3 - Low", + "Short Description": "Trouble getting to Oregon mail server", + "State": "1 - New", + "System ID": "sys_id", + "Urgency": "3 - Low", } ] EXPECTED_MULTIPLE_TICKET_HR = [ { - 'Active': 'true', - 'Additional Comments': '', - 'Close Code': '', - 'Close Notes': '', - 'Created By': 'admin', - 'Created On': '2019-09-05 00:42:29', - 'Description': 'Unable to access Oregon mail server. Is it down?', - 'Due Date': '', - 'Impact': '2 - Medium', - 'Number': 'INC0000040', - 'Opened At': '2019-09-05 00:41:01', - 'Priority': '4 - Low', - 'Resolved At': '', - 'Resolved By': '', - 'SLA Due': '2019-09-26 00:41:01', - 'Severity': '3 - Low', - 'Short Description': 'Trouble getting to Oregon mail server', - 'State': '1 - New', - 'System ID': 'sys_id', - 'Urgency': '3 - Low' + "Active": "true", + "Additional Comments": "", + "Close Code": "", + "Close Notes": "", + "Created By": "admin", + "Created On": "2019-09-05 00:42:29", + "Description": "Unable to access Oregon mail server. Is it down?", + "Due Date": "", + "Impact": "2 - Medium", + "Number": "INC0000040", + "Opened At": "2019-09-05 00:41:01", + "Priority": "4 - Low", + "Resolved At": "", + "Resolved By": "", + "SLA Due": "2019-09-26 00:41:01", + "Severity": "3 - Low", + "Short Description": "Trouble getting to Oregon mail server", + "State": "1 - New", + "System ID": "sys_id", + "Urgency": "3 - Low", }, { - 'Active': 'true', - 'Additional Comments': '', - 'Close Code': '', - 'Close Notes': '', - 'Created By': 'admin', - 'Created On': '2019-09-05 00:42:29', - 'Description': 'Unable to access Oregon mail server. Is it down?', - 'Due Date': '', - 'Impact': '2 - Medium', - 'Number': 'INC0000040', - 'Opened At': '2019-09-05 00:41:01', - 'Priority': '4 - Low', - 'Resolved At': '', - 'Resolved By': '', - 'SLA Due': '2019-09-26 00:41:01', - 'Severity': '3 - Low', - 'Short Description': 'Trouble getting to Oregon mail server', - 'State': '1 - New', - 'System ID': 'sys_id', - 'Urgency': '3 - Low' - } + "Active": "true", + "Additional Comments": "", + "Close Code": "", + "Close Notes": "", + "Created By": "admin", + "Created On": "2019-09-05 00:42:29", + "Description": "Unable to access Oregon mail server. Is it down?", + "Due Date": "", + "Impact": "2 - Medium", + "Number": "INC0000040", + "Opened At": "2019-09-05 00:41:01", + "Priority": "4 - Low", + "Resolved At": "", + "Resolved By": "", + "SLA Due": "2019-09-26 00:41:01", + "Severity": "3 - Low", + "Short Description": "Trouble getting to Oregon mail server", + "State": "1 - New", + "System ID": "sys_id", + "Urgency": "3 - Low", + }, ] EXPECTED_UPDATE_TICKET = { - 'ServiceNow.Ticket(val.ID===obj.ID)': { - 'ID': 'sys_id', 'Summary': 'Trouble getting to Oregon mail server', - 'Number': 'INC0000039', 'CreatedOn': '2019-09-05 00:42:29', 'Active': 'true', 'OpenedAt': '2019-09-05 00:41:01', - 'OpenedBy': 'test', 'Creator': 'test', - 'Priority': '4 - Low', 'State': '1', 'impact': '2' + "ServiceNow.Ticket(val.ID===obj.ID)": { + "ID": "sys_id", + "Summary": "Trouble getting to Oregon mail server", + "Number": "INC0000039", + "CreatedOn": "2019-09-05 00:42:29", + "Active": "true", + "OpenedAt": "2019-09-05 00:41:01", + "OpenedBy": "test", + "Creator": "test", + "Priority": "4 - Low", + "State": "1", + "impact": "2", } } EXPECTED_UPDATE_TICKET_SC_REQ = { - 'ServiceNow.Ticket(val.ID===obj.ID)': { - 'ID': '1234', 'Summary': 'Microsoft Access', 'Number': 'RITM0010028', 'CreatedOn': '2020-04-16 15:33:00', - 'Active': 'true', 'OpenedAt': '2020-04-16 15:33:00', 'OpenedBy': 'admin', - 'Creator': 'admin', 'Priority': '4 - Low', 'State': '1', 'approval': 'requested' + "ServiceNow.Ticket(val.ID===obj.ID)": { + "ID": "1234", + "Summary": "Microsoft Access", + "Number": "RITM0010028", + "CreatedOn": "2020-04-16 15:33:00", + "Active": "true", + "OpenedAt": "2020-04-16 15:33:00", + "OpenedBy": "admin", + "Creator": "admin", + "Priority": "4 - Low", + "State": "1", + "approval": "requested", } } EXPECTED_UPDATE_TICKET_ADDITIONAL = { - 'ServiceNow.Ticket(val.ID===obj.ID)': { - 'ID': '1234', 'Summary': 'Trouble getting to Oregon mail server', 'Number': 'INC0000039', - 'CreatedOn': '2019-09-05 00:42:29', 'Active': 'true', 'OpenedAt': '2019-09-05 00:41:01', - 'OpenedBy': 'admin', 'Creator': 'admin', - 'Priority': '5 - Planning', 'State': '1', 'severity': '3', 'approval': 'rejected' + "ServiceNow.Ticket(val.ID===obj.ID)": { + "ID": "1234", + "Summary": "Trouble getting to Oregon mail server", + "Number": "INC0000039", + "CreatedOn": "2019-09-05 00:42:29", + "Active": "true", + "OpenedAt": "2019-09-05 00:41:01", + "OpenedBy": "admin", + "Creator": "admin", + "Priority": "5 - Planning", + "State": "1", + "severity": "3", + "approval": "rejected", } } EXPECTED_CREATE_TICKET = { - 'Ticket(val.ID===obj.ID)': { - 'ID': 'sys_id', 'Number': 'INC0010007', 'CreatedOn': '2020-04-06 13:04:44', - 'Active': 'true', 'OpenedAt': '2020-04-06 13:04:44', 'OpenedBy': 'test', - 'Creator': 'test', 'Priority': '5 - Planning', 'State': '1', 'severity': '3', 'sla_due': '2020-10-10 10:10:11', - "description": "creating a test ticket" + "Ticket(val.ID===obj.ID)": { + "ID": "sys_id", + "Number": "INC0010007", + "CreatedOn": "2020-04-06 13:04:44", + "Active": "true", + "OpenedAt": "2020-04-06 13:04:44", + "OpenedBy": "test", + "Creator": "test", + "Priority": "5 - Planning", + "State": "1", + "severity": "3", + "sla_due": "2020-10-10 10:10:11", + "description": "creating a test ticket", + }, + "ServiceNow.Ticket(val.ID===obj.ID)": { + "ID": "sys_id", + "Number": "INC0010007", + "CreatedOn": "2020-04-06 13:04:44", + "Active": "true", + "OpenedAt": "2020-04-06 13:04:44", + "OpenedBy": "test", + "Creator": "test", + "Priority": "5 - Planning", + "State": "1", + "severity": "3", + "sla_due": "2020-10-10 10:10:11", + "description": "creating a test ticket", }, - 'ServiceNow.Ticket(val.ID===obj.ID)': { - 'ID': 'sys_id', 'Number': 'INC0010007', 'CreatedOn': '2020-04-06 13:04:44', - 'Active': 'true', 'OpenedAt': '2020-04-06 13:04:44', 'OpenedBy': 'test', - 'Creator': 'test', 'Priority': '5 - Planning', 'State': '1', 'severity': '3', 'sla_due': '2020-10-10 10:10:11', - "description": "creating a test ticket" - } } EXPECTED_CREATE_TICKET_WITH_OUT_JSON = {} EXPECTED_QUERY_TICKETS = { - 'Ticket(val.ID===obj.ID)': [ + "Ticket(val.ID===obj.ID)": [ { - 'ID': 'sys_id', 'Summary': "Can't read email", 'Number': 'INC0000001', - 'CreatedOn': '2018-04-03 18:24:13', 'Active': 'false', 'CloseCode': 'Closed/Resolved by Caller', - 'OpenedAt': '2019-09-02 23:09:51', 'ResolvedBy': 'admin', 'OpenedBy': 'admin', 'Creator': 'admin', - 'Assignee': 'admin', 'Priority': '1 - Critical', 'State': '7' + "ID": "sys_id", + "Summary": "Can't read email", + "Number": "INC0000001", + "CreatedOn": "2018-04-03 18:24:13", + "Active": "false", + "CloseCode": "Closed/Resolved by Caller", + "OpenedAt": "2019-09-02 23:09:51", + "ResolvedBy": "admin", + "OpenedBy": "admin", + "Creator": "admin", + "Assignee": "admin", + "Priority": "1 - Critical", + "State": "7", }, { - 'ID': 'sys_id', 'Summary': 'Network file shares access issue', 'Number': 'INC0000002', - 'CreatedOn': '2018-03-23 22:30:06', 'Active': 'true', 'OpenedAt': '2019-08-27 23:07:12', - 'OpenedBy': 'admin', 'Creator': 'admin', 'Assignee': 'admin', 'Priority': '1 - Critical', 'State': '3' + "ID": "sys_id", + "Summary": "Network file shares access issue", + "Number": "INC0000002", + "CreatedOn": "2018-03-23 22:30:06", + "Active": "true", + "OpenedAt": "2019-08-27 23:07:12", + "OpenedBy": "admin", + "Creator": "admin", + "Assignee": "admin", + "Priority": "1 - Critical", + "State": "3", + }, + { + "ID": "sys_id", + "Summary": "Wireless access is down in my area", + "Number": "INC0000003", + "CreatedOn": "2018-04-07 14:41:46", + "Active": "true", + "OpenedAt": "2019-09-03 23:07:30", + "OpenedBy": "admin", + "Creator": "admin", + "Assignee": "admin", + "Priority": "1 - Critical", + "State": "2", }, - {'ID': 'sys_id', 'Summary': 'Wireless access is down in my area', 'Number': 'INC0000003', - 'CreatedOn': '2018-04-07 14:41:46', 'Active': 'true', 'OpenedAt': '2019-09-03 23:07:30', - 'OpenedBy': 'admin', 'Creator': 'admin', 'Assignee': 'admin', 'Priority': '1 - Critical', 'State': '2' - } ], - 'ServiceNow.Ticket(val.ID===obj.ID)': [ + "ServiceNow.Ticket(val.ID===obj.ID)": [ { - 'ID': 'sys_id', 'Summary': "Can't read email", 'Number': 'INC0000001', 'CreatedOn': '2018-04-03 18:24:13', - 'Active': 'false', 'CloseCode': 'Closed/Resolved by Caller', 'OpenedAt': '2019-09-02 23:09:51', - 'ResolvedBy': 'admin', 'OpenedBy': 'admin', 'Creator': 'admin', 'Assignee': 'admin', - 'Priority': '1 - Critical', 'State': '7' + "ID": "sys_id", + "Summary": "Can't read email", + "Number": "INC0000001", + "CreatedOn": "2018-04-03 18:24:13", + "Active": "false", + "CloseCode": "Closed/Resolved by Caller", + "OpenedAt": "2019-09-02 23:09:51", + "ResolvedBy": "admin", + "OpenedBy": "admin", + "Creator": "admin", + "Assignee": "admin", + "Priority": "1 - Critical", + "State": "7", }, - {'ID': 'sys_id', 'Summary': 'Network file shares access issue', 'Number': 'INC0000002', - 'CreatedOn': '2018-03-23 22:30:06', 'Active': 'true', 'OpenedAt': '2019-08-27 23:07:12', - 'OpenedBy': 'admin', 'Creator': 'admin', 'Assignee': 'admin', 'Priority': '1 - Critical', 'State': '3' - }, - {'ID': 'sys_id', 'Summary': 'Wireless access is down in my area', 'Number': 'INC0000003', - 'CreatedOn': '2018-04-07 14:41:46', 'Active': 'true', 'OpenedAt': '2019-09-03 23:07:30', 'OpenedBy': 'admin', - 'Creator': 'admin', 'Assignee': 'admin', 'Priority': '1 - Critical', 'State': '2' - } - ] + { + "ID": "sys_id", + "Summary": "Network file shares access issue", + "Number": "INC0000002", + "CreatedOn": "2018-03-23 22:30:06", + "Active": "true", + "OpenedAt": "2019-08-27 23:07:12", + "OpenedBy": "admin", + "Creator": "admin", + "Assignee": "admin", + "Priority": "1 - Critical", + "State": "3", + }, + { + "ID": "sys_id", + "Summary": "Wireless access is down in my area", + "Number": "INC0000003", + "CreatedOn": "2018-04-07 14:41:46", + "Active": "true", + "OpenedAt": "2019-09-03 23:07:30", + "OpenedBy": "admin", + "Creator": "admin", + "Assignee": "admin", + "Priority": "1 - Critical", + "State": "2", + }, + ], } EXPECTED_QUERY_TICKETS_EXCLUDE_REFERENCE_LINK = { - 'Ticket(val.ID===obj.ID)': [ + "Ticket(val.ID===obj.ID)": [ { - 'ID': '9c573169c611228700193229fff72400', 'Summary': "Can't read email", 'Number': 'INC0000001', - 'CreatedOn': '06/12/2018 10:24:13', 'Active': 'false', 'CloseCode': 'Closed/Resolved by Caller', - 'OpenedAt': '06/05/2020 16:09:51', 'ResolvedBy': 'Don Goodliffe', 'OpenedBy': 'Joe Employee', - 'Creator': 'Joe Employee', 'Assignee': 'Charlie Whitherspoon', 'Priority': '1 - Critical', 'State': 'Closed' + "ID": "9c573169c611228700193229fff72400", + "Summary": "Can't read email", + "Number": "INC0000001", + "CreatedOn": "06/12/2018 10:24:13", + "Active": "false", + "CloseCode": "Closed/Resolved by Caller", + "OpenedAt": "06/05/2020 16:09:51", + "ResolvedBy": "Don Goodliffe", + "OpenedBy": "Joe Employee", + "Creator": "Joe Employee", + "Assignee": "Charlie Whitherspoon", + "Priority": "1 - Critical", + "State": "Closed", } ], - 'ServiceNow.Ticket(val.ID===obj.ID)': [ + "ServiceNow.Ticket(val.ID===obj.ID)": [ { - 'ID': '9c573169c611228700193229fff72400', - 'Summary': "Can't read email", 'Number': 'INC0000001', - 'CreatedOn': '06/12/2018 10:24:13', 'Active': 'false', - 'CloseCode': 'Closed/Resolved by Caller', - 'OpenedAt': '06/05/2020 16:09:51', 'ResolvedBy': 'Don Goodliffe', - 'OpenedBy': 'Joe Employee', 'Creator': 'Joe Employee', - 'Assignee': 'Charlie Whitherspoon', 'Priority': '1 - Critical', - 'State': 'Closed' + "ID": "9c573169c611228700193229fff72400", + "Summary": "Can't read email", + "Number": "INC0000001", + "CreatedOn": "06/12/2018 10:24:13", + "Active": "false", + "CloseCode": "Closed/Resolved by Caller", + "OpenedAt": "06/05/2020 16:09:51", + "ResolvedBy": "Don Goodliffe", + "OpenedBy": "Joe Employee", + "Creator": "Joe Employee", + "Assignee": "Charlie Whitherspoon", + "Priority": "1 - Critical", + "State": "Closed", } - ] + ], } -EXPECTED_ADD_LINK_HR = '### Link successfully added to ServiceNow ticket' -EXPECTED_ADD_COMMENT_HR = '### Comment successfully added to ServiceNow ticket' +EXPECTED_ADD_LINK_HR = "### Link successfully added to ServiceNow ticket" +EXPECTED_ADD_COMMENT_HR = "### Comment successfully added to ServiceNow ticket" EXPECTED_UPLOAD_FILE = { - 'ServiceNow.Ticket(val.ID===obj.ID)': { - 'ID': 'sys_id', 'File': {'Filename': 'test_file', 'Link': 'test_link', 'SystemID': 'system_id'} + "ServiceNow.Ticket(val.ID===obj.ID)": { + "ID": "sys_id", + "File": {"Filename": "test_file", "Link": "test_link", "SystemID": "system_id"}, }, - 'Ticket(val.ID===obj.ID)': { - 'ID': 'sys_id', 'File': {'Filename': 'test_file', 'Link': 'test_link', 'SystemID': 'system_id'} - } + "Ticket(val.ID===obj.ID)": {"ID": "sys_id", "File": {"Filename": "test_file", "Link": "test_link", "SystemID": "system_id"}}, } EXPECTED_GET_TICKET_NOTES = { - 'ID': 'sys_id', 'Note': [ - {'Value': '[code]demsito_link[/code]', - 'CreatedOn': '2020-04-07 07:32:12', 'CreatedBy': 'admin', 'Type': 'Work Note'}, - {'Value': '[code]demsito_link[/code]', - 'CreatedOn': '2020-04-07 07:25:51', 'CreatedBy': 'admin', 'Type': 'Work Note'}, - {'Value': 'Nice work!', 'CreatedOn': '2020-04-07 07:46:34', 'CreatedBy': 'admin', 'Type': 'Work Note'}, - {'Value': 'Nice work!', 'CreatedOn': '2020-04-07 07:46:25', 'CreatedBy': 'admin', 'Type': 'Work Note'}, - {'Value': '[code]demsito_link[/code]', - 'CreatedOn': '2020-04-07 07:26:01', 'CreatedBy': 'admin', 'Type': 'Work Note'}] + "ID": "sys_id", + "Note": [ + { + "Value": '[code]demsito_link[/code]', + "CreatedOn": "2020-04-07 07:32:12", + "CreatedBy": "admin", + "Type": "Work Note", + }, + { + "Value": '[code]demsito_link[/code]', + "CreatedOn": "2020-04-07 07:25:51", + "CreatedBy": "admin", + "Type": "Work Note", + }, + {"Value": "Nice work!", "CreatedOn": "2020-04-07 07:46:34", "CreatedBy": "admin", "Type": "Work Note"}, + {"Value": "Nice work!", "CreatedOn": "2020-04-07 07:46:25", "CreatedBy": "admin", "Type": "Work Note"}, + { + "Value": '[code]demsito_link[/code]', + "CreatedOn": "2020-04-07 07:26:01", + "CreatedBy": "admin", + "Type": "Work Note", + }, + ], } EXPECTED_GET_TICKET_NOTES_DISPLAY_VALUE = { - 'ID': 'sys_id', 'Note': [ - {'Value': 'Second comment\n\n Mirrored from Cortex XSOAR', 'CreatedOn': '2022-11-21 21:50:34', - 'CreatedBy': 'System Administrator', 'Type': 'Comment'}, - {'Value': 'First comment', 'CreatedOn': '2022-11-21 20:45:37', 'CreatedBy': 'Test User', 'Type': 'Comment'}] + "ID": "sys_id", + "Note": [ + { + "Value": "Second comment\n\n Mirrored from Cortex XSOAR", + "CreatedOn": "2022-11-21 21:50:34", + "CreatedBy": "System Administrator", + "Type": "Comment", + }, + {"Value": "First comment", "CreatedOn": "2022-11-21 20:45:37", "CreatedBy": "Test User", "Type": "Comment"}, + ], } EXPECTED_GET_RECORD = { - 'ServiceNow.Record(val.ID===obj.ID)': { - 'asset_tag': 'P1000479', 'display_name': 'P1000479 - Apple MacBook Pro 15"', 'ID': 'sys_id' + "ServiceNow.Record(val.ID===obj.ID)": { + "asset_tag": "P1000479", + "display_name": 'P1000479 - Apple MacBook Pro 15"', + "ID": "sys_id", } } EXPECTED_UPDATE_RECORD = { - 'ServiceNow.Record(val.ID===obj.ID)': { - 'ID': 'sys_id', 'UpdatedBy': 'system', 'UpdatedAt': '2020-04-07 06:31:50', 'CreatedBy': 'admin', - 'CreatedAt': '2019-02-23 08:14:21' + "ServiceNow.Record(val.ID===obj.ID)": { + "ID": "sys_id", + "UpdatedBy": "system", + "UpdatedAt": "2020-04-07 06:31:50", + "CreatedBy": "admin", + "CreatedAt": "2019-02-23 08:14:21", } } EXPECTED_CREATE_RECORD = { - 'ServiceNow.Record(val.ID===obj.ID)': { - 'ID': 'sys_id', 'UpdatedBy': 'admin', 'UpdatedAt': '2020-04-07 12:48:38', 'CreatedBy': 'admin', - 'CreatedAt': '2020-04-07 12:48:38' + "ServiceNow.Record(val.ID===obj.ID)": { + "ID": "sys_id", + "UpdatedBy": "admin", + "UpdatedAt": "2020-04-07 12:48:38", + "CreatedBy": "admin", + "CreatedAt": "2020-04-07 12:48:38", } } EXPECTED_QUERY_TABLE = { - 'ServiceNow.Record(val.ID===obj.ID)': [ + "ServiceNow.Record(val.ID===obj.ID)": [ { - 'sys_updated_by': 'system', 'asset_tag': 'P1000807', 'display_name': 'P1000807 - Apple MacBook Pro 17"', - 'ID': 'sys_id2' + "sys_updated_by": "system", + "asset_tag": "P1000807", + "display_name": 'P1000807 - Apple MacBook Pro 17"', + "ID": "sys_id2", }, { - 'sys_updated_by': 'system', 'asset_tag': 'P1000637', 'display_name': 'P1000637 - Apple MacBook Air 13"', - 'ID': 'sys_id3' + "sys_updated_by": "system", + "asset_tag": "P1000637", + "display_name": 'P1000637 - Apple MacBook Air 13"', + "ID": "sys_id3", }, { - 'sys_updated_by': 'system', 'asset_tag': 'P1000412', 'display_name': - 'P1000412 - Apple MacBook Pro 17"', 'ID': 'sys_id4' - } + "sys_updated_by": "system", + "asset_tag": "P1000412", + "display_name": 'P1000412 - Apple MacBook Pro 17"', + "ID": "sys_id4", + }, ] } EXPECTED_QUERY_TABLE_SYS_PARAMS = { - 'ServiceNow.Record(val.ID===obj.ID)': [ + "ServiceNow.Record(val.ID===obj.ID)": [ { - 'number': 'TASK0000001', 'state': '1', - 'description': 'Order from vendor or move from in-stock inventory\n\t\t', - 'approval': 'not requested', 'escalation': '0', 'ID': '1234' + "number": "TASK0000001", + "state": "1", + "description": "Order from vendor or move from in-stock inventory\n\t\t", + "approval": "not requested", + "escalation": "0", + "ID": "1234", } ] } EXPECTED_LIST_TABLE_FIELDS = { - 'ServiceNow.Field': [ - {'Name': 'acquisition_method'}, {'Name': 'asset_tag'}, {'Name': 'assigned'}, {'Name': 'assigned_to'}, - {'Name': 'beneficiary'}, {'Name': 'checked_in'}, {'Name': 'checked_out'}, {'Name': 'ci'}, {'Name': 'comments'}, - {'Name': 'company'}, {'Name': 'cost'}, {'Name': 'cost_center'}, {'Name': 'delivery_date'}, - {'Name': 'department'}, {'Name': 'depreciated_amount'}, {'Name': 'depreciation'}, {'Name': 'depreciation_date'}, - {'Name': 'display_name'}, {'Name': 'disposal_reason'}, {'Name': 'due'}, {'Name': 'due_in'}, - {'Name': 'expenditure_type'}, {'Name': 'gl_account'}, {'Name': 'install_date'}, {'Name': 'install_status'}, - {'Name': 'invoice_number'}, {'Name': 'justification'}, {'Name': 'lease_id'}, {'Name': 'location'}, - {'Name': 'managed_by'}, {'Name': 'model'}, {'Name': 'model_category'}, {'Name': 'old_status'}, - {'Name': 'old_substatus'}, {'Name': 'order_date'}, {'Name': 'owned_by'}, {'Name': 'parent'}, - {'Name': 'po_number'}, {'Name': 'pre_allocated'}, {'Name': 'purchase_date'}, {'Name': 'quantity'}, - {'Name': 'request_line'}, {'Name': 'resale_price'}, {'Name': 'reserved_for'}, {'Name': 'residual'}, - {'Name': 'residual_date'}, {'Name': 'retired'}, {'Name': 'retirement_date'}, {'Name': 'salvage_value'}, - {'Name': 'serial_number'}, {'Name': 'skip_sync'}, {'Name': 'stockroom'}, {'Name': 'substatus'}, - {'Name': 'support_group'}, {'Name': 'supported_by'}, {'Name': 'sys_class_name'}, {'Name': 'sys_created_by'}, - {'Name': 'sys_created_on'}, {'Name': 'sys_domain'}, {'Name': 'sys_domain_path'}, {'Name': 'sys_id'}, - {'Name': 'sys_mod_count'}, {'Name': 'sys_tags'}, {'Name': 'sys_updated_by'}, {'Name': 'sys_updated_on'}, - {'Name': 'vendor'}, {'Name': 'warranty_expiration'}, {'Name': 'work_notes'} + "ServiceNow.Field": [ + {"Name": "acquisition_method"}, + {"Name": "asset_tag"}, + {"Name": "assigned"}, + {"Name": "assigned_to"}, + {"Name": "beneficiary"}, + {"Name": "checked_in"}, + {"Name": "checked_out"}, + {"Name": "ci"}, + {"Name": "comments"}, + {"Name": "company"}, + {"Name": "cost"}, + {"Name": "cost_center"}, + {"Name": "delivery_date"}, + {"Name": "department"}, + {"Name": "depreciated_amount"}, + {"Name": "depreciation"}, + {"Name": "depreciation_date"}, + {"Name": "display_name"}, + {"Name": "disposal_reason"}, + {"Name": "due"}, + {"Name": "due_in"}, + {"Name": "expenditure_type"}, + {"Name": "gl_account"}, + {"Name": "install_date"}, + {"Name": "install_status"}, + {"Name": "invoice_number"}, + {"Name": "justification"}, + {"Name": "lease_id"}, + {"Name": "location"}, + {"Name": "managed_by"}, + {"Name": "model"}, + {"Name": "model_category"}, + {"Name": "old_status"}, + {"Name": "old_substatus"}, + {"Name": "order_date"}, + {"Name": "owned_by"}, + {"Name": "parent"}, + {"Name": "po_number"}, + {"Name": "pre_allocated"}, + {"Name": "purchase_date"}, + {"Name": "quantity"}, + {"Name": "request_line"}, + {"Name": "resale_price"}, + {"Name": "reserved_for"}, + {"Name": "residual"}, + {"Name": "residual_date"}, + {"Name": "retired"}, + {"Name": "retirement_date"}, + {"Name": "salvage_value"}, + {"Name": "serial_number"}, + {"Name": "skip_sync"}, + {"Name": "stockroom"}, + {"Name": "substatus"}, + {"Name": "support_group"}, + {"Name": "supported_by"}, + {"Name": "sys_class_name"}, + {"Name": "sys_created_by"}, + {"Name": "sys_created_on"}, + {"Name": "sys_domain"}, + {"Name": "sys_domain_path"}, + {"Name": "sys_id"}, + {"Name": "sys_mod_count"}, + {"Name": "sys_tags"}, + {"Name": "sys_updated_by"}, + {"Name": "sys_updated_on"}, + {"Name": "vendor"}, + {"Name": "warranty_expiration"}, + {"Name": "work_notes"}, ] } EXPECTED_QUERY_COMPUTERS = { - 'ServiceNow.Computer(val.ID===obj.ID)': [ + "ServiceNow.Computer(val.ID===obj.ID)": [ { - 'ID': '1234', 'AssetTag': 'P1000357', 'Name': 'Precision T5500 Workstation', - 'DisplayName': 'P1000357 - Precision T5500 Workstation', 'OperatingSystem': 'Windows XP Professional', - 'Company': 'admin', 'AssignedTo': 'admin', 'State': 'In use', 'Cost': '1329 USD' + "ID": "1234", + "AssetTag": "P1000357", + "Name": "Precision T5500 Workstation", + "DisplayName": "P1000357 - Precision T5500 Workstation", + "OperatingSystem": "Windows XP Professional", + "Company": "admin", + "AssignedTo": "admin", + "State": "In use", + "Cost": "1329 USD", } ] } EXPECTED_GET_TABLE_NAME = { - 'ServiceNow.Table(val.ID===obj.ID)': [ - { - 'ID': '123', 'Name': 'cmdb_ci_lb_ace', 'SystemName': 'CMDB CI Lb Ace' - } - ] -} -EXPECTED_ADD_TAG = { - 'ServiceNow.Ticket(val.ID===obj.ID)': { - 'ID': '123', 'TagTitle': 'title', 'TagID': '1234' - } + "ServiceNow.Table(val.ID===obj.ID)": [{"ID": "123", "Name": "cmdb_ci_lb_ace", "SystemName": "CMDB CI Lb Ace"}] } +EXPECTED_ADD_TAG = {"ServiceNow.Ticket(val.ID===obj.ID)": {"ID": "123", "TagTitle": "title", "TagID": "1234"}} EXPECTED_QUERY_ITEMS = { - 'ServiceNow.CatalogItem(val.ID===obj.ID)': - [ - { - 'ID': '123', - 'Name': 'Apple iPad 3', - 'Description': 'Apple iPad 3', - 'Price': '600' - }, - { - 'ID': '1234', - 'Name': 'Belkin iPad Mini Case', - 'Description': 'Belkin iPad Mini 2 Case', - 'Price': '50' - } - ] + "ServiceNow.CatalogItem(val.ID===obj.ID)": [ + {"ID": "123", "Name": "Apple iPad 3", "Description": "Apple iPad 3", "Price": "600"}, + {"ID": "1234", "Name": "Belkin iPad Mini Case", "Description": "Belkin iPad Mini 2 Case", "Price": "50"}, + ] } EXPECTED_ITEM_DETAILS = { - 'ServiceNow.CatalogItem(val.ID===obj.ID)': { - 'ID': '1234', 'Name': 'Apple iPhone 5', 'Description': 'Apple iPhone 5', 'Price': '$599.99', - 'Variables': [ - {'Question': 'Allocated carrier', 'Type': 'Select Box', 'Name': 'carrier', 'Mandatory': False}, - {'Question': 'Monthly data allowance', 'Type': 'Select Box', 'Name': 'data_plan', 'Mandatory': False}, - {'Question': 'Contract duration', 'Type': 'Select Box', 'Name': 'duration', 'Mandatory': False}, - {'Question': 'Color', 'Type': 'Select Box', 'Name': 'color', 'Mandatory': False}, - {'Question': 'Storage', 'Type': 'Select Box', 'Name': 'storage', 'Mandatory': False} - ] - } -} -EXPECTED_CREATE_ITEM_ORDER = { - 'ServiceNow.OrderRequest(val.ID===obj.ID)': { - 'ID': '12', 'RequestNumber': 'REQ0010002' + "ServiceNow.CatalogItem(val.ID===obj.ID)": { + "ID": "1234", + "Name": "Apple iPhone 5", + "Description": "Apple iPhone 5", + "Price": "$599.99", + "Variables": [ + {"Question": "Allocated carrier", "Type": "Select Box", "Name": "carrier", "Mandatory": False}, + {"Question": "Monthly data allowance", "Type": "Select Box", "Name": "data_plan", "Mandatory": False}, + {"Question": "Contract duration", "Type": "Select Box", "Name": "duration", "Mandatory": False}, + {"Question": "Color", "Type": "Select Box", "Name": "color", "Mandatory": False}, + {"Question": "Storage", "Type": "Select Box", "Name": "storage", "Mandatory": False}, + ], } } +EXPECTED_CREATE_ITEM_ORDER = {"ServiceNow.OrderRequest(val.ID===obj.ID)": {"ID": "12", "RequestNumber": "REQ0010002"}} EXPECTED_DOCUMENT_ROUTE = { - 'ServiceNow.WorkItem(val.WorkItemID===obj.WorkItemID)': - { - 'DisplayName': 'Incident: INC0000060', 'DocumentID': 'document_id', 'DocumentTable': 'incident', - 'QueueID': 'queue_id', 'WorkItemID': 'work_item_id' - } + "ServiceNow.WorkItem(val.WorkItemID===obj.WorkItemID)": { + "DisplayName": "Incident: INC0000060", + "DocumentID": "document_id", + "DocumentTable": "incident", + "QueueID": "queue_id", + "WorkItemID": "work_item_id", + } } EXPECTED_MAPPING = { - 'incident': - { - 'active': '', - 'activity_due': '', - 'opened_at': '', - 'short_description': '', - 'additional_assignee_list': '', - 'approval_history': '', - 'approval': '', - 'approval_set': '', - 'assigned_to': '', - 'assignment_group': '', - 'business_duration': '', - 'business_service': '', - 'business_stc': '', - 'change_type': '', - 'category': '', - 'caller': '', - 'calendar_duration': '', - 'calendar_stc': '', - 'caller_id': '', - 'caused_by': '', - 'close_code': '', - 'close_notes': '', - 'closed_at': '', - 'closed_by': '', - 'cmdb_ci': '', - 'comments': '', - 'comments_and_work_notes': '', - 'company': '', - 'contact_type': '', - 'correlation_display': '', - 'correlation_id': '', - 'delivery_plan': '', - 'delivery_task': '', - 'description': '', - 'due_date': '', - 'expected_start': '', - 'follow_up': '', - 'group_list': '', - 'hold_reason': '', - 'impact': '', - 'incident_state': '', - 'knowledge': '', - 'location': '', - 'made_sla': '', - 'notify': '', - 'order': '', - 'parent': '', - 'parent_incident': '', - 'priority': '', - 'problem_id': '', - 'reassignment_count': '', - 'reopen_count': '', - 'resolved_at': '', - 'resolved_by': '', - 'rfc': '', - 'severity': '', - 'sla_due': '', - 'state': '', - 'subcategory': '', - 'sys_tags': '', - 'sys_updated_by': '', - 'sys_updated_on': '', - 'time_worked': '', - 'title': '', - 'type': '', - 'urgency': '', - 'user_input': '', - 'watch_list': '', - 'work_end': '', - 'work_notes': '', - 'work_notes_list': '', - 'work_start': '', - 'business_criticality': '', - 'risk_score': '' - } + "incident": { + "active": "", + "activity_due": "", + "opened_at": "", + "short_description": "", + "additional_assignee_list": "", + "approval_history": "", + "approval": "", + "approval_set": "", + "assigned_to": "", + "assignment_group": "", + "business_duration": "", + "business_service": "", + "business_stc": "", + "change_type": "", + "category": "", + "caller": "", + "calendar_duration": "", + "calendar_stc": "", + "caller_id": "", + "caused_by": "", + "close_code": "", + "close_notes": "", + "closed_at": "", + "closed_by": "", + "cmdb_ci": "", + "comments": "", + "comments_and_work_notes": "", + "company": "", + "contact_type": "", + "correlation_display": "", + "correlation_id": "", + "delivery_plan": "", + "delivery_task": "", + "description": "", + "due_date": "", + "expected_start": "", + "follow_up": "", + "group_list": "", + "hold_reason": "", + "impact": "", + "incident_state": "", + "knowledge": "", + "location": "", + "made_sla": "", + "notify": "", + "order": "", + "parent": "", + "parent_incident": "", + "priority": "", + "problem_id": "", + "reassignment_count": "", + "reopen_count": "", + "resolved_at": "", + "resolved_by": "", + "rfc": "", + "severity": "", + "sla_due": "", + "state": "", + "subcategory": "", + "sys_tags": "", + "sys_updated_by": "", + "sys_updated_on": "", + "time_worked": "", + "title": "", + "type": "", + "urgency": "", + "user_input": "", + "watch_list": "", + "work_end": "", + "work_notes": "", + "work_notes_list": "", + "work_start": "", + "business_criticality": "", + "risk_score": "", + } } diff --git a/Packs/ServiceNow/ReleaseNotes/2_7_9.md b/Packs/ServiceNow/ReleaseNotes/2_7_9.md new file mode 100644 index 000000000000..2d39cf46e15c --- /dev/null +++ b/Packs/ServiceNow/ReleaseNotes/2_7_9.md @@ -0,0 +1,36 @@ + +#### Integrations + +##### ServiceNow Event Collector + +- Metadata and documentation improvements. +##### ServiceNow v2 + +- Metadata and documentation improvements. +##### ServiceNow IAM + +- Metadata and documentation improvements. +##### ServiceNow CMDB + +- Metadata and documentation improvements. + +#### Scripts + +##### ServiceNowCreateIncident + +- Metadata and documentation improvements. +##### ServiceNowUpdateIncident + +- Metadata and documentation improvements. +##### ServiceNowQueryIncident + +- Metadata and documentation improvements. +##### ServiceNowIncidentStatus + +- Metadata and documentation improvements. +##### ServiceNowTroubleshoot + +- Metadata and documentation improvements. +##### ServiceNowAddComment + +- Metadata and documentation improvements. diff --git a/Packs/ServiceNow/Scripts/ServiceNowAddComment/ServiceNowAddComment.py b/Packs/ServiceNow/Scripts/ServiceNowAddComment/ServiceNowAddComment.py index 3f6201f66d61..82633d0978de 100644 --- a/Packs/ServiceNow/Scripts/ServiceNowAddComment/ServiceNowAddComment.py +++ b/Packs/ServiceNow/Scripts/ServiceNowAddComment/ServiceNowAddComment.py @@ -3,50 +3,50 @@ def update_comment_or_worknote(args: Dict[str, Any]) -> CommandResults: - ticket_id = args.get('ticket_id', 'none') - note = args.get('note') - tag = args.get('tag') - table_name = args.get('table_name') - using = args.get('instance_name') + ticket_id = args.get("ticket_id", "none") + note = args.get("note") + tag = args.get("tag") + table_name = args.get("table_name") + using = args.get("instance_name") command_args = {} - if ticket_id == 'none': - ticket_id = demisto.incident()['CustomFields'].get('servicenowticketid') + if ticket_id == "none": + ticket_id = demisto.incident()["CustomFields"].get("servicenowticketid") - command_args['id'] = ticket_id - demisto.debug(f'Using ticket_type: {table_name}') + command_args["id"] = ticket_id + demisto.debug(f"Using ticket_type: {table_name}") if table_name: - command_args['ticket_type'] = table_name - if tag == 'comment': - command_args['comments'] = note + command_args["ticket_type"] = table_name + if tag == "comment": + command_args["comments"] = note else: - command_args['work_notes'] = note - command_args['using'] = using + command_args["work_notes"] = note + command_args["using"] = using try: - demisto.debug(f'Calling servicenow-update-ticket, {command_args=}') + demisto.debug(f"Calling servicenow-update-ticket, {command_args=}") command_res = demisto.executeCommand("servicenow-update-ticket", command_args) - demisto.debug(f'After calling servicenow-update-ticket, {command_res=}') + demisto.debug(f"After calling servicenow-update-ticket, {command_res=}") resp = command_res[0] if isError(resp): - raise Exception(resp['Contents']) + raise Exception(resp["Contents"]) else: - if 'result' not in resp['Contents'] or not resp['Contents']['result']: + if "result" not in resp["Contents"] or not resp["Contents"]["result"]: message = "Empty result. Please check your input. e.g. the ticket_id, or table_name" demisto.info(message) return_error(message) - result = resp['Contents']['result'] + result = resp["Contents"]["result"] output_results = {} - output_results['Ticket ID'] = result['sys_id'] - output_results['Ticket Updated on'] = result['sys_updated_on'] - output_results['Ticket Updated by'] = result['sys_updated_by'] - output_results['Ticket Number'] = result['number'] - output_results['Table'] = result['sys_class_name'] - output_results['Ticket Created by'] = result['sys_created_by'] - output_results['Ticket Created on'] = result['sys_created_on'] + output_results["Ticket ID"] = result["sys_id"] + output_results["Ticket Updated on"] = result["sys_updated_on"] + output_results["Ticket Updated by"] = result["sys_updated_by"] + output_results["Ticket Number"] = result["number"] + output_results["Table"] = result["sys_class_name"] + output_results["Ticket Created by"] = result["sys_created_by"] + output_results["Ticket Created on"] = result["sys_created_on"] md = tableToMarkdown("ServiceNow Comment Added", [output_results]) @@ -62,8 +62,8 @@ def main(): return_results(res) except Exception as ex2: - return_error(f'Failed to execute ServiceNowAddComment. Error: {str(ex2)}') + return_error(f"Failed to execute ServiceNowAddComment. Error: {ex2!s}") -if __name__ in ["__builtin__", "builtins", '__main__']: +if __name__ in ["__builtin__", "builtins", "__main__"]: main() diff --git a/Packs/ServiceNow/Scripts/ServiceNowAddComment/ServiceNowAddComment_test.py b/Packs/ServiceNow/Scripts/ServiceNowAddComment/ServiceNowAddComment_test.py index 5ac76d8f98d3..7163b78b1a0d 100644 --- a/Packs/ServiceNow/Scripts/ServiceNowAddComment/ServiceNowAddComment_test.py +++ b/Packs/ServiceNow/Scripts/ServiceNowAddComment/ServiceNowAddComment_test.py @@ -1,17 +1,30 @@ def test_update_comment_or_worknote(mocker): # test update_comment_or_worknote function - from ServiceNowAddComment import update_comment_or_worknote, demisto import ServiceNowAddComment - mocker.patch.object(demisto, 'executeCommand', return_value=[{'Contents': {'result': {'sys_id': '1', - 'sys_updated_on': '2', - 'sys_updated_by': '3', - 'number': '4', - 'sys_class_name': '5', - 'sys_created_by': '6', - 'sys_created_on': '7'}}}]) - mocker.patch.object(ServiceNowAddComment, 'isError', return_value=False) - update_comment_or_worknote({'ticket_id': '1', 'note': 'test'}) + from ServiceNowAddComment import demisto, update_comment_or_worknote + + mocker.patch.object( + demisto, + "executeCommand", + return_value=[ + { + "Contents": { + "result": { + "sys_id": "1", + "sys_updated_on": "2", + "sys_updated_by": "3", + "number": "4", + "sys_class_name": "5", + "sys_created_by": "6", + "sys_created_on": "7", + } + } + } + ], + ) + mocker.patch.object(ServiceNowAddComment, "isError", return_value=False) + update_comment_or_worknote({"ticket_id": "1", "note": "test"}) assert demisto.executeCommand.call_count == 1 - assert demisto.executeCommand.call_args[0][0] == 'servicenow-update-ticket' - assert demisto.executeCommand.call_args[0][1]['id'] == '1' - assert demisto.executeCommand.call_args[0][1]['work_notes'] == 'test' + assert demisto.executeCommand.call_args[0][0] == "servicenow-update-ticket" + assert demisto.executeCommand.call_args[0][1]["id"] == "1" + assert demisto.executeCommand.call_args[0][1]["work_notes"] == "test" diff --git a/Packs/ServiceNow/Scripts/ServiceNowCreateIncident/ServiceNowCreateIncident.py b/Packs/ServiceNow/Scripts/ServiceNowCreateIncident/ServiceNowCreateIncident.py index ae7c279f65a3..2c38e9cca618 100644 --- a/Packs/ServiceNow/Scripts/ServiceNowCreateIncident/ServiceNowCreateIncident.py +++ b/Packs/ServiceNow/Scripts/ServiceNowCreateIncident/ServiceNowCreateIncident.py @@ -14,11 +14,7 @@ """ Mapping of severity display names to their corresponding values in the API """ -TICKET_SEVERITY = { - '1 - High': '1', - '2 - Medium': '2', - '3 - Low': '3' -} +TICKET_SEVERITY = {"1 - High": "1", "2 - Medium": "2", "3 - Low": "3"} """ Function to use the query command to retrieve records from the users table. @@ -26,37 +22,37 @@ def get_user(query): - using = demisto.args().get('using') + using = demisto.args().get("using") user_args = { - 'table_name': 'sys_user', - 'query': query, - 'using': using, + "table_name": "sys_user", + "query": query, + "using": using, } - user_result = demisto.executeCommand('servicenow-query-table', user_args)[0] - user_data = demisto.get(user_result, 'Contents') + user_result = demisto.executeCommand("servicenow-query-table", user_args)[0] + user_data = demisto.get(user_result, "Contents") if not user_data: - return_error('Could not get the contents from the command result: ' + json.dumps(user_result)) + return_error("Could not get the contents from the command result: " + json.dumps(user_result)) if not isinstance(user_data, dict): # In case of string result, e.g "No incidents found" - demisto.results('User not found') + demisto.results("User not found") sys.exit(0) - user = user_data['result'] + user = user_data["result"] if not user or len(user) == 0: - demisto.results('User not found') + demisto.results("User not found") sys.exit(0) return user def get_user_id(user_name): - user_name = user_name.split(' ') - query = 'first_name={}^last_name={}'.format(user_name[0], user_name[1]) + user_name = user_name.split(" ") + query = f"first_name={user_name[0]}^last_name={user_name[1]}" user = get_user(query) - return user[0]['sys_id'] + return user[0]["sys_id"] """ @@ -65,65 +61,60 @@ def get_user_id(user_name): def get_group(query): - using = demisto.args().get('using') + using = demisto.args().get("using") group_args = { - 'table_name': 'sys_user_group', - 'query': query, - 'using': using, + "table_name": "sys_user_group", + "query": query, + "using": using, } - group_result = demisto.executeCommand('servicenow-query-table', group_args)[0] - group_data = demisto.get(group_result, 'Contents') + group_result = demisto.executeCommand("servicenow-query-table", group_args)[0] + group_data = demisto.get(group_result, "Contents") if not group_data: - return_error('Could not get the contents from the command result: ' + json.dumps(group_result)) + return_error("Could not get the contents from the command result: " + json.dumps(group_result)) if not isinstance(group_data, dict): # In case of string result, e.g "No incidents found" - demisto.results('Group not found') + demisto.results("Group not found") sys.exit(0) - group = group_data['result'] + group = group_data["result"] if not group or len(group) == 0: - demisto.results('Group not found') + demisto.results("Group not found") sys.exit(0) return group def get_group_id(group_name): - query = 'name=' + group_name + query = "name=" + group_name group = get_group(query) - return group[0]['sys_id'] + return group[0]["sys_id"] def main(): """ The table name is required by the API. To acquire the table name, use the servicenow-get-table-name command. """ - command_args = { - 'table_name': 'incident' - } + command_args = {"table_name": "incident"} """ These record fields(columns) are mapped from their names in ServiceNow to your choice of field names. To view all fields for a given table, use the servicenow-list-fields command. The ID field must be included to manage unique context entries. """ - fields_to_map = { - 'sys_id': 'ID', - 'number': 'Number' - } + fields_to_map = {"sys_id": "ID", "number": "Number"} """ For each field in the arguments, you need to check if it was provided and apply any operations required (e.g, get a user id from a user name) to send them to the API. """ - incident_severity = demisto.args().get('severity') - group_name = demisto.args().get('assigned_group') - user_name = demisto.args().get('assignee') - description = demisto.args().get('description') - using = demisto.args().get('using') + incident_severity = demisto.args().get("severity") + group_name = demisto.args().get("assigned_group") + user_name = demisto.args().get("assignee") + description = demisto.args().get("description") + using = demisto.args().get("using") user_id = None group_id = None @@ -144,52 +135,51 @@ def main(): fields = [] if incident_severity: - fields.append('severity' + '=' + TICKET_SEVERITY[incident_severity]) + fields.append("severity" + "=" + TICKET_SEVERITY[incident_severity]) if user_id: - fields.append('assigned_to' + '=' + user_id) + fields.append("assigned_to" + "=" + user_id) if description: - fields.append('short_description' + '=' + description) + fields.append("short_description" + "=" + description) if group_id: - fields.append('assignment_group' + '=' + group_id) + fields.append("assignment_group" + "=" + group_id) - command_args['fields'] = ';'.join(fields) - command_args['using'] = using + command_args["fields"] = ";".join(fields) + command_args["using"] = using - command_res = demisto.executeCommand('servicenow-create-record', command_args) + command_res = demisto.executeCommand("servicenow-create-record", command_args) result = {} try: entry = command_res[0] if isError(entry): - return_error(entry['Contents']) + return_error(entry["Contents"]) else: - record_data = demisto.get(entry, 'Contents') + record_data = demisto.get(entry, "Contents") if not record_data: - return_error('Could not get the contents from the command result: ' + json.dumps(entry)) + return_error("Could not get the contents from the command result: " + json.dumps(entry)) if not isinstance(record_data, dict): # In case of string result, e.g "No incidents found" result = record_data else: # Get the actual record - record = record_data['result'] + record = record_data["result"] # Map fields according to fields_to_map that were defined earlier mapped_record = dict( - (fields_to_map[key], value) for (key, value) in - list([k_v for k_v in list(record.items()) if k_v[0] in list(fields_to_map.keys())]) + (fields_to_map[key], value) + for (key, value) in list([k_v for k_v in list(record.items()) if k_v[0] in list(fields_to_map.keys())]) ) - display_headers = ['ID', 'Number'] + display_headers = ["ID", "Number"] # Output entry result = { - 'Type': entryTypes['note'], - 'Contents': record_data, - 'ContentsFormat': formats['json'], - 'ReadableContentsFormat': formats['markdown'], - 'HumanReadable': tableToMarkdown('Incident successfully created', mapped_record, - headers=display_headers, removeNull=True), - 'EntryContext': { - 'ServiceNow.Incident(val.ID===obj.ID)': createContext(mapped_record) - } + "Type": entryTypes["note"], + "Contents": record_data, + "ContentsFormat": formats["json"], + "ReadableContentsFormat": formats["markdown"], + "HumanReadable": tableToMarkdown( + "Incident successfully created", mapped_record, headers=display_headers, removeNull=True + ), + "EntryContext": {"ServiceNow.Incident(val.ID===obj.ID)": createContext(mapped_record)}, } except Exception as ex: @@ -198,5 +188,5 @@ def main(): demisto.results(result) -if __name__ in ["__builtin__", "builtins", '__main__']: +if __name__ in ["__builtin__", "builtins", "__main__"]: main() diff --git a/Packs/ServiceNow/Scripts/ServiceNowCreateIncident/ServiceNowCreateIncident_test.py b/Packs/ServiceNow/Scripts/ServiceNowCreateIncident/ServiceNowCreateIncident_test.py index b360aea30c4e..05521f2daffa 100644 --- a/Packs/ServiceNow/Scripts/ServiceNowCreateIncident/ServiceNowCreateIncident_test.py +++ b/Packs/ServiceNow/Scripts/ServiceNowCreateIncident/ServiceNowCreateIncident_test.py @@ -13,11 +13,12 @@ def test_get_user(mocker): - make sure the incident result key is being retrieved """ from ServiceNowCreateIncident import get_user - mocker.patch.object(demisto, 'executeCommand') - mocker.patch.object(demisto, 'get', return_value={'result': 'test'}) - user = get_user('query') - assert user == 'test' + mocker.patch.object(demisto, "executeCommand") + mocker.patch.object(demisto, "get", return_value={"result": "test"}) + + user = get_user("query") + assert user == "test" def test_get_user_id(mocker): @@ -32,10 +33,11 @@ def test_get_user_id(mocker): - make sure that the incident sys ID is retrieved. """ from ServiceNowCreateIncident import get_user_id - mocker.patch.object(demisto, 'executeCommand') - mocker.patch.object(demisto, 'get', return_value={'result': [{'sys_id': '123'}]}) - incident_sys_id = get_user_id('user1 user2') - assert incident_sys_id == '123' + + mocker.patch.object(demisto, "executeCommand") + mocker.patch.object(demisto, "get", return_value={"result": [{"sys_id": "123"}]}) + incident_sys_id = get_user_id("user1 user2") + assert incident_sys_id == "123" def test_get_group(mocker): @@ -50,11 +52,12 @@ def test_get_group(mocker): - make sure the incident result key is being retrieved """ from ServiceNowCreateIncident import get_group - mocker.patch.object(demisto, 'executeCommand') - mocker.patch.object(demisto, 'get', return_value={'result': 'test'}) - user = get_group('query') - assert user == 'test' + mocker.patch.object(demisto, "executeCommand") + mocker.patch.object(demisto, "get", return_value={"result": "test"}) + + user = get_group("query") + assert user == "test" def test_get_group_id(mocker): @@ -69,10 +72,11 @@ def test_get_group_id(mocker): - make sure that the incident sys ID is retrieved. """ from ServiceNowCreateIncident import get_group_id - mocker.patch.object(demisto, 'executeCommand') - mocker.patch.object(demisto, 'get', return_value={'result': [{'sys_id': '123'}]}) - incident_sys_id = get_group_id('user1 user2') - assert incident_sys_id == '123' + + mocker.patch.object(demisto, "executeCommand") + mocker.patch.object(demisto, "get", return_value={"result": [{"sys_id": "123"}]}) + incident_sys_id = get_group_id("user1 user2") + assert incident_sys_id == "123" def test_main_flow_success(mocker): @@ -87,19 +91,20 @@ def test_main_flow_success(mocker): - make sure the main flow gets executed without errors. """ from ServiceNowCreateIncident import main - mocked_data = {'result': {'number': 'INC0021211', 'sys_id': '123'}} - mocker.patch.object(demisto, 'executeCommand') - mocker.patch.object(demisto, 'get', return_value=mocked_data) - result = mocker.patch('demistomock.results') + mocked_data = {"result": {"number": "INC0021211", "sys_id": "123"}} + mocker.patch.object(demisto, "executeCommand") + mocker.patch.object(demisto, "get", return_value=mocked_data) + + result = mocker.patch("demistomock.results") main() assert result.call_args.args[0] == { - 'Type': 1, 'Contents': { - 'result': {'number': 'INC0021211', 'sys_id': '123'} - }, - 'ContentsFormat': 'json', 'ReadableContentsFormat': 'markdown', - 'HumanReadable': '### Incident successfully created\n|ID|Number|\n|---|---|\n| 123 | INC0021211 |\n', - 'EntryContext': {'ServiceNow.Incident(val.ID===obj.ID)': {'Number': 'INC0021211', 'ID': '123'}} + "Type": 1, + "Contents": {"result": {"number": "INC0021211", "sys_id": "123"}}, + "ContentsFormat": "json", + "ReadableContentsFormat": "markdown", + "HumanReadable": "### Incident successfully created\n|ID|Number|\n|---|---|\n| 123 | INC0021211 |\n", + "EntryContext": {"ServiceNow.Incident(val.ID===obj.ID)": {"Number": "INC0021211", "ID": "123"}}, } diff --git a/Packs/ServiceNow/Scripts/ServiceNowIncidentStatus/ServiceNowIncidentStatus.py b/Packs/ServiceNow/Scripts/ServiceNowIncidentStatus/ServiceNowIncidentStatus.py index 3dab09c9f188..b968bf46122c 100644 --- a/Packs/ServiceNow/Scripts/ServiceNowIncidentStatus/ServiceNowIncidentStatus.py +++ b/Packs/ServiceNow/Scripts/ServiceNowIncidentStatus/ServiceNowIncidentStatus.py @@ -2,43 +2,39 @@ from CommonServerPython import * # noqa: F401 COLORS = { - '1 - New': '#00CD33', # (success green) - '2 - In Progress': '#7995D4', # (royal blue) - '3 - On Hold': '#FF9000', # (warning orange) - '4 - Awaiting Caller': '#FF9000', # (warning orange) - '5 - Awaiting Evidence': '#FF9000', # (warning orange) - '6 - Resolved': '#89A5C1', # (polo) - '7 - Closed': '#9AA0A3', # (natural grey) - '8 - Canceled': '#FF1744' # (alert-red) + "1 - New": "#00CD33", # (success green) + "2 - In Progress": "#7995D4", # (royal blue) + "3 - On Hold": "#FF9000", # (warning orange) + "4 - Awaiting Caller": "#FF9000", # (warning orange) + "5 - Awaiting Evidence": "#FF9000", # (warning orange) + "6 - Resolved": "#89A5C1", # (polo) + "7 - Closed": "#9AA0A3", # (natural grey) + "8 - Canceled": "#FF1744", # (alert-red) } TEXT = { - '1 - New': 'New', - '2 - In Progress': 'In Progress', - '3 - On Hold': 'On-Hold', - '4 - Awaiting Caller': 'Awaiting Caller', - '5 - Awaiting Evidence': 'Awaiting Evidence', - '6 - Resolved': 'Resolved', - '7 - Closed': 'Closed', - '8 - Canceled': 'Canceled' + "1 - New": "New", + "2 - In Progress": "In Progress", + "3 - On Hold": "On-Hold", + "4 - Awaiting Caller": "Awaiting Caller", + "5 - Awaiting Evidence": "Awaiting Evidence", + "6 - Resolved": "Resolved", + "7 - Closed": "Closed", + "8 - Canceled": "Canceled", } incident = demisto.incidents() -service_now_state = (incident[0].get('CustomFields', {}).get('servicenowstate')) +service_now_state = incident[0].get("CustomFields", {}).get("servicenowstate") try: text_color = COLORS[service_now_state] text_content = TEXT[service_now_state] except Exception as e: - demisto.debug(f'SnowIncidentStatus debug - state is: {service_now_state}\n{e}') - text_color = '#000000' - text_content = 'Pending Update' + demisto.debug(f"SnowIncidentStatus debug - state is: {service_now_state}\n{e}") + text_color = "#000000" + text_content = "Pending Update" html = f"

{text_content}

" -demisto.results({ - 'ContentsFormat': formats['html'], - 'Type': entryTypes['note'], - 'Contents': html -}) +demisto.results({"ContentsFormat": formats["html"], "Type": entryTypes["note"], "Contents": html}) diff --git a/Packs/ServiceNow/Scripts/ServiceNowQueryIncident/ServiceNowQueryIncident.py b/Packs/ServiceNow/Scripts/ServiceNowQueryIncident/ServiceNowQueryIncident.py index d450f50cba9b..067e76dd4128 100644 --- a/Packs/ServiceNow/Scripts/ServiceNowQueryIncident/ServiceNowQueryIncident.py +++ b/Packs/ServiceNow/Scripts/ServiceNowQueryIncident/ServiceNowQueryIncident.py @@ -14,13 +14,7 @@ """ Mapping of priority values to their corresponding display in the UI """ -INCIDENT_PRIORITY = { - '1': '1 - Critical', - '2': '2 - High', - '3': '3 - Moderate', - '4': '4 - Low', - '5': '5 - Planning' -} +INCIDENT_PRIORITY = {"1": "1 - Critical", "2": "2 - High", "3": "3 - Moderate", "4": "4 - Low", "5": "5 - Planning"} """ Function to use the query command to retrieve records from the users table. @@ -28,43 +22,40 @@ def get_user(query): - user_args = { - 'table_name': 'sys_user', - 'query': query - } + user_args = {"table_name": "sys_user", "query": query} - user_result = demisto.executeCommand('servicenow-query-table', user_args)[0] - user_data = demisto.get(user_result, 'Contents') + user_result = demisto.executeCommand("servicenow-query-table", user_args)[0] + user_data = demisto.get(user_result, "Contents") if not user_data: - return_error('Could not get the contents from the command result: ' + json.dumps(user_result)) + return_error("Could not get the contents from the command result: " + json.dumps(user_result)) if not isinstance(user_data, dict): # In case of string result, e.g "No incidents found" - demisto.results('User not found') + demisto.results("User not found") sys.exit(0) - user = user_data['result'] + user = user_data["result"] if not user or len(user) == 0: - demisto.results('User not found') + demisto.results("User not found") sys.exit(0) return user def get_user_id(user_name): - user_name = user_name.split(' ') - query = f'first_name={user_name[0]}^last_name={user_name[1]}' + user_name = user_name.split(" ") + query = f"first_name={user_name[0]}^last_name={user_name[1]}" user = get_user(query) - return user[0]['sys_id'] + return user[0]["sys_id"] def get_user_name(user_id): - query = 'id=' + user_id + query = "id=" + user_id user = get_user(query) - return '{} {}'.format(user[0]['first_name'], user[0]['last_name']) + return "{} {}".format(user[0]["first_name"], user[0]["last_name"]) def main(): @@ -75,28 +66,25 @@ def main(): The ID field must be included to manage unique context entries. """ fields_to_map = { - 'sys_id': 'ID', - 'priority': 'Priority', - 'opened_by': 'Caller', - 'number': 'Number', - 'short_description': 'Description' + "sys_id": "ID", + "priority": "Priority", + "opened_by": "Caller", + "number": "Number", + "short_description": "Description", } """ The table name is required by the API. To acquire the table name, use the servicenow-get-table-name command. """ - command_args = { - 'table_name': 'incident', - 'fields': list(fields_to_map.keys()) - } + command_args = {"table_name": "incident", "fields": list(fields_to_map.keys())} """ For each field in the arguments, you need to check if it was provided and apply any operations required (e.g, get a user id from a user name) to send them to the API. """ - incident_id = demisto.args().get('id') - incident_number = demisto.args().get('number') - user_name = demisto.args().get('assignee') + incident_id = demisto.args().get("id") + incident_number = demisto.args().get("number") + user_name = demisto.args().get("assignee") user_id = None if user_name: @@ -107,60 +95,61 @@ def main(): Set up the query according to the arguments and execute the command """ if incident_id: - query = 'id=' + incident_id + query = "id=" + incident_id elif incident_number: - query = 'number=' + incident_number + query = "number=" + incident_number elif user_id: - query = 'assigned_to=' + user_id + query = "assigned_to=" + user_id else: query = "" demisto.debug(f"No incident_id,incident_number or user_id. {query=}") - command_args['query'] = query + command_args["query"] = query - command_res = demisto.executeCommand('servicenow-query-table', command_args) + command_res = demisto.executeCommand("servicenow-query-table", command_args) result = {} try: entry = command_res[0] if isError(entry): - return_error(entry['Contents']) + return_error(entry["Contents"]) else: - record_data = demisto.get(entry, 'Contents') + record_data = demisto.get(entry, "Contents") if not record_data: - return_error('Could not get the contents from the command result: ' + json.dumps(entry)) + return_error("Could not get the contents from the command result: " + json.dumps(entry)) if not isinstance(record_data, dict): # In case of string result, e.g "No incidents found" result = record_data else: # Get the actual records - records = record_data['result'] + records = record_data["result"] # Map fields according to fields_to_map that were defined earlier mapped_records = [ - {fields_to_map[key]: value for (key, value) in - [k_v for k_v in list(r.items()) if k_v[0] in list(fields_to_map.keys())]} + { + fields_to_map[key]: value + for (key, value) in [k_v for k_v in list(r.items()) if k_v[0] in list(fields_to_map.keys())] + } for r in records ] for mr in mapped_records: # Query the user table to get the name of the caller - if mr.get('Caller'): - mr['Caller'] = get_user_name(mr['Caller'].get('value')) + if mr.get("Caller"): + mr["Caller"] = get_user_name(mr["Caller"].get("value")) # Map the priority - if mr.get('Priority'): - mr['Priority'] = INCIDENT_PRIORITY.get(mr['Priority'], mr['Priority']) - display_headers = ['ID', 'Number', 'Priority', 'Description', 'Caller'] + if mr.get("Priority"): + mr["Priority"] = INCIDENT_PRIORITY.get(mr["Priority"], mr["Priority"]) + display_headers = ["ID", "Number", "Priority", "Description", "Caller"] # Output entry result = { - 'Type': entryTypes['note'], - 'Contents': record_data, - 'ContentsFormat': formats['json'], - 'ReadableContentsFormat': formats['markdown'], - 'HumanReadable': tableToMarkdown('ServiceNow Incidents', mapped_records, headers=display_headers, - removeNull=True), - 'EntryContext': { - 'ServiceNow.Incident(val.ID===obj.ID)': createContext(mapped_records) - } + "Type": entryTypes["note"], + "Contents": record_data, + "ContentsFormat": formats["json"], + "ReadableContentsFormat": formats["markdown"], + "HumanReadable": tableToMarkdown( + "ServiceNow Incidents", mapped_records, headers=display_headers, removeNull=True + ), + "EntryContext": {"ServiceNow.Incident(val.ID===obj.ID)": createContext(mapped_records)}, } except Exception as ex: @@ -169,5 +158,5 @@ def main(): demisto.results(result) -if __name__ in ["__builtin__", "builtins", '__main__']: +if __name__ in ["__builtin__", "builtins", "__main__"]: main() diff --git a/Packs/ServiceNow/Scripts/ServiceNowQueryIncident/ServiceNowQueryIncident_test.py b/Packs/ServiceNow/Scripts/ServiceNowQueryIncident/ServiceNowQueryIncident_test.py index 6beb81fe1263..362afb039cb0 100644 --- a/Packs/ServiceNow/Scripts/ServiceNowQueryIncident/ServiceNowQueryIncident_test.py +++ b/Packs/ServiceNow/Scripts/ServiceNowQueryIncident/ServiceNowQueryIncident_test.py @@ -13,11 +13,12 @@ def test_get_user(mocker): - make sure the incident result key is being retrieved """ from ServiceNowQueryIncident import get_user - mocker.patch.object(demisto, 'executeCommand') - mocker.patch.object(demisto, 'get', return_value={'result': 'test'}) - user = get_user('query') - assert user == 'test' + mocker.patch.object(demisto, "executeCommand") + mocker.patch.object(demisto, "get", return_value={"result": "test"}) + + user = get_user("query") + assert user == "test" def test_get_user_id(mocker): @@ -32,10 +33,11 @@ def test_get_user_id(mocker): - make sure that the incident sys ID is retrieved. """ from ServiceNowQueryIncident import get_user_id - mocker.patch.object(demisto, 'executeCommand') - mocker.patch.object(demisto, 'get', return_value={'result': [{'sys_id': '123'}]}) - incident_sys_id = get_user_id('user1 user2') - assert incident_sys_id == '123' + + mocker.patch.object(demisto, "executeCommand") + mocker.patch.object(demisto, "get", return_value={"result": [{"sys_id": "123"}]}) + incident_sys_id = get_user_id("user1 user2") + assert incident_sys_id == "123" def test_get_username(mocker): @@ -50,13 +52,12 @@ def test_get_username(mocker): - make sure that the first name and last name are being returned. """ from ServiceNowQueryIncident import get_user_name - mocker.patch.object(demisto, 'executeCommand') - mocker.patch.object( - demisto, 'get', return_value={'result': [{'first_name': 'first-name', 'last_name': 'last-name'}]} - ) - result = get_user_name('123') - assert result == 'first-name last-name' + mocker.patch.object(demisto, "executeCommand") + mocker.patch.object(demisto, "get", return_value={"result": [{"first_name": "first-name", "last_name": "last-name"}]}) + + result = get_user_name("123") + assert result == "first-name last-name" def test_main_flow_success(mocker): @@ -71,26 +72,25 @@ def test_main_flow_success(mocker): - make sure the main flow gets executed without errors. """ from ServiceNowQueryIncident import main - args_mock = {'number': 'INC0021211'} - mocked_command_data = {'result': [{'number': 'INC0021211', 'sys_id': '123', 'priority': '1'}]} - mocker.patch.object(demisto, 'executeCommand') - mocker.patch.object(demisto, 'args', return_value=args_mock) - mocker.patch.object(demisto, 'get', return_value=mocked_command_data) - result = mocker.patch('demistomock.results') + args_mock = {"number": "INC0021211"} + mocked_command_data = {"result": [{"number": "INC0021211", "sys_id": "123", "priority": "1"}]} + mocker.patch.object(demisto, "executeCommand") + mocker.patch.object(demisto, "args", return_value=args_mock) + mocker.patch.object(demisto, "get", return_value=mocked_command_data) + + result = mocker.patch("demistomock.results") main() assert result.call_args.args[0] == { - 'Type': 1, - 'Contents': {'result': [{'number': 'INC0021211', 'sys_id': '123', 'priority': '1'}]}, - 'ContentsFormat': 'json', - 'ReadableContentsFormat': 'markdown', - 'HumanReadable': '### ServiceNow Incidents\n|ID|Number|Priority|\n|---|---|---|' - '\n| 123 | INC0021211 | 1 - Critical |\n', - 'EntryContext': { - 'ServiceNow.Incident(val.ID===obj.ID)': [ - {'Number': 'INC0021211', 'ID': '123', 'Priority': '1 - Critical'} - ] - } + "Type": 1, + "Contents": {"result": [{"number": "INC0021211", "sys_id": "123", "priority": "1"}]}, + "ContentsFormat": "json", + "ReadableContentsFormat": "markdown", + "HumanReadable": "### ServiceNow Incidents\n|ID|Number|Priority|\n|---|---|---|" + "\n| 123 | INC0021211 | 1 - Critical |\n", + "EntryContext": { + "ServiceNow.Incident(val.ID===obj.ID)": [{"Number": "INC0021211", "ID": "123", "Priority": "1 - Critical"}] + }, } diff --git a/Packs/ServiceNow/Scripts/ServiceNowTroubleshoot/ServiceNowTroubleshoot.py b/Packs/ServiceNow/Scripts/ServiceNowTroubleshoot/ServiceNowTroubleshoot.py index 3420a3f59e44..b3b8ae3bf7c1 100644 --- a/Packs/ServiceNow/Scripts/ServiceNowTroubleshoot/ServiceNowTroubleshoot.py +++ b/Packs/ServiceNow/Scripts/ServiceNowTroubleshoot/ServiceNowTroubleshoot.py @@ -1,14 +1,17 @@ -import demistomock as demisto -from typing import Any -from CommonServerPython import * import json from collections import defaultdict +from typing import Any -INTEGRATION = 'ServiceNow v2' -NOTE_INCIDENTS = ("### Note: The active incidents, created 30 days ago and listed in the tables for both enabled and" - " disabled instances, are still being mirrored.\n ### If the issue is no longer relevant and does not" - " require further attention, it is recommended to close related incidents promptly to prevent" - " potential system overload.") +import demistomock as demisto +from CommonServerPython import * + +INTEGRATION = "ServiceNow v2" +NOTE_INCIDENTS = ( + "### Note: The active incidents, created 30 days ago and listed in the tables for both enabled and" + " disabled instances, are still being mirrored.\n ### If the issue is no longer relevant and does not" + " require further attention, it is recommended to close related incidents promptly to prevent" + " potential system overload." +) def http_request_wrapper(method: str, url: str, body: dict | None = None): @@ -24,11 +27,11 @@ def http_request_wrapper(method: str, url: str, body: dict | None = None): dict: Parsed JSON response or an empty dictionary if parsing fails. """ http_result = demisto.internalHttpRequest(method, url, body=json.dumps(body)) - http_result_body_raw_response = cast(dict, http_result).get('body', '{}') + http_result_body_raw_response = cast(dict, http_result).get("body", "{}") try: http_result_body_response = json.loads(http_result_body_raw_response) except json.JSONDecodeError as e: - raise DemistoException(f'Unable to load response {http_result_body_raw_response}: {str(e)}') + raise DemistoException(f"Unable to load response {http_result_body_raw_response}: {e!s}") return http_result_body_response @@ -39,12 +42,12 @@ def get_integrations_details() -> dict[str, Any]: :return: A Dictionary containing the details of the integrations and their health status. :rtype: Dict[str, Any] """ - integrations_search_response = http_request_wrapper(method='POST', url='settings/integration/search') - instances, health = integrations_search_response.get('instances', {}), integrations_search_response.get('health', {}) + integrations_search_response = http_request_wrapper(method="POST", url="settings/integration/search") + instances, health = integrations_search_response.get("instances", {}), integrations_search_response.get("health", {}) instances_health = {} for instance in instances: - instance_name = instance.get('name') - if instance.get('brand') == INTEGRATION: + instance_name = instance.get("name") + if instance.get("brand") == INTEGRATION: instances_health[instance_name] = instance if instance_name in health: instances_health[instance_name].update({"health": health[instance_name]}) @@ -64,11 +67,13 @@ def filter_instances_data(instances_data: dict[str, Any]) -> tuple[dict, list]: filtered_data = {} disabled_instances = [] for instance_name, data in instances_data.items(): - if data.get('enabled') == 'true': + if data.get("enabled") == "true": filtered_data[instance_name] = data continue - if (int(data.get('health', {}).get('incidentsPulled', 0)) > 0 - and data.get('configvalues', {}).get('mirror_direction', '') != 'None'): + if ( + int(data.get("health", {}).get("incidentsPulled", 0)) > 0 + and data.get("configvalues", {}).get("mirror_direction", "") != "None" + ): disabled_instances.append(instance_name) else: filtered_data[instance_name] = data @@ -87,15 +92,11 @@ def categorize_active_incidents(disabled_instances: list[str]) -> tuple[dict, di :return: A Tuple containing the active incidents for enabled instances and for disabled instances. :rtype: Tuple[Dict, list] """ - query = { - 'filter': { - 'query': f'sourceBrand: "{INTEGRATION}" and status: Active and created: >="30 days ago"' - } - } - incidents_response = http_request_wrapper(method='POST', url='incidents/search', body=query) + query = {"filter": {"query": f'sourceBrand: "{INTEGRATION}" and status: Active and created: >="30 days ago"'}} + incidents_response = http_request_wrapper(method="POST", url="incidents/search", body=query) categorized_incidents: dict[str, Any] = {"enabled": defaultdict(list), "disabled": defaultdict(list)} - for incident in incidents_response.get('data', {}): + for incident in incidents_response.get("data", {}): source_instance = incident.get("sourceInstance") incident_name = incident.get("name") @@ -119,10 +120,7 @@ def parse_disabled_instances(disabled_incidents_instances: dict[str, Any]) -> st :rtype: ``str`` """ markdown_data = [ - {'Instance': instance, - "Total": len(incidents), - "Active incidents more than created 30 days ago": "\n".join(incidents - )} + {"Instance": instance, "Total": len(incidents), "Active incidents more than created 30 days ago": "\n".join(incidents)} for instance, incidents in disabled_incidents_instances.items() ] return tableToMarkdown( @@ -147,19 +145,18 @@ def parse_enabled_instances(enabled_instances_health: dict[str, Any], enabled_in human_readable_dict = [] for instance_name, instance_data in enabled_instances_health.items(): filtered_data = { - 'Instance Name': instance_name, - 'Size In Bytes': instance_data.get('sizeInBytes', ''), - 'Number of Incidents Pulled in Last Fetch': instance_data.get('health', {}).get('incidentsPulled', ''), - 'Last Pull Time': instance_data.get('health').get('lastPullTime', ''), - 'Query': instance_data.get('configvalues').get('sysparm_query', ''), - 'Last Error': instance_data.get('health').get('lastError', ''), + "Instance Name": instance_name, + "Size In Bytes": instance_data.get("sizeInBytes", ""), + "Number of Incidents Pulled in Last Fetch": instance_data.get("health", {}).get("incidentsPulled", ""), + "Last Pull Time": instance_data.get("health").get("lastPullTime", ""), + "Query": instance_data.get("configvalues").get("sysparm_query", ""), + "Last Error": instance_data.get("health").get("lastError", ""), } if instance_name in enabled_incidents_instances: filtered_data["Names of Active Incidents Created 30 days ago"] = enabled_incidents_instances[instance_name] filtered_data["Total Active Incidents Created 30 days ago"] = len(enabled_incidents_instances[instance_name]) human_readable_dict.append(filtered_data) - return tableToMarkdown(name="Enabled Instances Health Information", t=human_readable_dict, - removeNull=True) + return tableToMarkdown(name="Enabled Instances Health Information", t=human_readable_dict, removeNull=True) def main(): @@ -169,12 +166,13 @@ def main(): enabled_incidents_instances, disabled_incidents_instances = categorize_active_incidents(disabled_instances) disabled_instances_hr = parse_disabled_instances(disabled_incidents_instances) enabled_instances_hr = parse_enabled_instances(enabled_instances_health, enabled_incidents_instances) - return_results(CommandResults( - readable_output=f'{enabled_instances_hr} \n --- \n {disabled_instances_hr}\n{NOTE_INCIDENTS}')) + return_results( + CommandResults(readable_output=f"{enabled_instances_hr} \n --- \n {disabled_instances_hr}\n{NOTE_INCIDENTS}") + ) except Exception as e: - return_error(f'Failed to execute ServiceNowTroubleshoot. Error: {str(e)}') + return_error(f"Failed to execute ServiceNowTroubleshoot. Error: {e!s}") -if __name__ in ["__builtin__", "builtins", '__main__']: +if __name__ in ["__builtin__", "builtins", "__main__"]: main() diff --git a/Packs/ServiceNow/Scripts/ServiceNowTroubleshoot/ServiceNowTroubleshoot_test.py b/Packs/ServiceNow/Scripts/ServiceNowTroubleshoot/ServiceNowTroubleshoot_test.py index 328bc2792e69..3e26078f5c24 100644 --- a/Packs/ServiceNow/Scripts/ServiceNowTroubleshoot/ServiceNowTroubleshoot_test.py +++ b/Packs/ServiceNow/Scripts/ServiceNowTroubleshoot/ServiceNowTroubleshoot_test.py @@ -1,14 +1,20 @@ import json -from ServiceNowTroubleshoot import (get_integrations_details, filter_instances_data, categorize_active_incidents, - parse_disabled_instances, parse_enabled_instances) + import demistomock as demisto +from ServiceNowTroubleshoot import ( + categorize_active_incidents, + filter_instances_data, + get_integrations_details, + parse_disabled_instances, + parse_enabled_instances, +) def util_load_json(path): with open(path, encoding="utf-8") as f: res = json.loads(f.read()) - if 'body' in res: - res['body'] = json.dumps(res.get('body')) + if "body" in res: + res["body"] = json.dumps(res.get("body")) return res @@ -24,12 +30,12 @@ def test_get_integrations_details(mocker): - Each integration entry should contain a 'health' field. """ http_response = util_load_json("test_data/setting_integration_search_http_response.json") - mocker.patch.object(demisto, 'internalHttpRequest', return_value=http_response) + mocker.patch.object(demisto, "internalHttpRequest", return_value=http_response) res = get_integrations_details() assert len(res) == 2 - assert list(res.keys()) == ['ServiceNow v2_instance_2', 'ServiceNow v2_instance_1'] - assert 'health' in res['ServiceNow v2_instance_2'] - assert 'health' in res['ServiceNow v2_instance_1'] + assert list(res.keys()) == ["ServiceNow v2_instance_2", "ServiceNow v2_instance_1"] + assert "health" in res["ServiceNow v2_instance_2"] + assert "health" in res["ServiceNow v2_instance_1"] def test_filter_instances_data(): @@ -46,8 +52,8 @@ def test_filter_instances_data(): """ instances_health = util_load_json("test_data/filtering_instances_data.json") filtered_data, disabled_instances = filter_instances_data(instances_health) - assert 'ServiceNow v2_instance_2' in filtered_data - assert 'ServiceNow v2_instance_1' in disabled_instances + assert "ServiceNow v2_instance_2" in filtered_data + assert "ServiceNow v2_instance_1" in disabled_instances def test_categorize_active_incidents(mocker): @@ -60,12 +66,12 @@ def test_categorize_active_incidents(mocker): Then: - Incidents should be correctly grouped under enabled and disabled instances. """ - disabled_instances = ['ServiceNow v2_instance_1'] + disabled_instances = ["ServiceNow v2_instance_1"] http_response = util_load_json("test_data/incidents_search_http_response.json") - mocker.patch.object(demisto, 'internalHttpRequest', return_value=http_response) + mocker.patch.object(demisto, "internalHttpRequest", return_value=http_response) res_enabled_incidents_instances, res_disabled_incidents_instances = categorize_active_incidents(disabled_instances) - assert res_enabled_incidents_instances == {'ServiceNow v2_instance_2': ['ServiceNow Incident INC0011111']} - assert res_disabled_incidents_instances == {'ServiceNow v2_instance_1': ['ServiceNow Incident INC0022222']} + assert res_enabled_incidents_instances == {"ServiceNow v2_instance_2": ["ServiceNow Incident INC0011111"]} + assert res_disabled_incidents_instances == {"ServiceNow v2_instance_1": ["ServiceNow Incident INC0022222"]} def test_parse_disabled_instances(): @@ -78,11 +84,13 @@ def test_parse_disabled_instances(): - The function should return a formatted markdown string listing disabled instances and their active incidents. """ - disabled_incidents_instances = {'ServiceNow v2_instance_1': ['ServiceNow Incident INC0022222']} + disabled_incidents_instances = {"ServiceNow v2_instance_1": ["ServiceNow Incident INC0022222"]} res = parse_disabled_instances(disabled_incidents_instances) - expected_result = ('### Disabled instances with active incidents created more than 30 days ago\n' - '|Active incidents more than created 30 days ago|Instance|Total|\n|---|---|---|\n|' - ' ServiceNow Incident INC0022222 | ServiceNow v2_instance_1 | 1 |\n') + expected_result = ( + "### Disabled instances with active incidents created more than 30 days ago\n" + "|Active incidents more than created 30 days ago|Instance|Total|\n|---|---|---|\n|" + " ServiceNow Incident INC0022222 | ServiceNow v2_instance_1 | 1 |\n" + ) assert res == expected_result @@ -96,37 +104,78 @@ def test_parse_enabled_instances(): - It should return a formatted markdown string listing disabled instances and their active incidents. """ enabled_instances_health = { - 'ServiceNow v2_instance_2': { - 'id': '11111', 'name': 'ServiceNow v2_instance_2', 'version': 5, 'sequenceNumber': 453233, 'primaryTerm': 1, - 'modified': '2025-01-27T09:14:18.207475895Z', 'sizeInBytes': 3066, 'enabled': 'true', - 'configvalues': {'api_version': 'None', 'close_custom_state': 'None', 'close_incident': 'None', - 'close_ticket': 'False', 'close_ticket_multiple_options': 'None', 'comment_tag': 'comments', - 'comment_tag_from_servicenow': 'CommentFromServiceNow', 'custom_fields': 'None', - 'display_date_format': 'None', 'fetch_limit': '10', 'fetch_time': '1 year', - 'file_tag': 'ForServiceNow', 'file_tag_from_service_now': 'FromServiceNow', - 'get_attachments': 'False', 'incidentFetchInterval': '1', 'incidentType': 'None', - 'incident_name': 'number', 'insecure': 'False', 'isFetch': 'True', 'look_back': '0', - 'mirror_direction': 'None', 'mirror_limit': '100', 'mirror_notes_for_new_incidents': 'False', - 'proxy': 'False', 'server_close_custom_state': 'None', 'server_custom_close_code': 'None', - 'sysparm_query': 'stateNOT IN6,7', 'ticket_type': 'incident', 'timestamp_field': 'opened_at', - 'update_timestamp_field': 'sys_updated_on', 'url': 'https://url_dummy/', - 'use_display_value': 'False', 'use_oauth': 'False', 'work_notes_tag': 'work_notes', - 'work_notes_tag_from_servicenow': 'WorkNoteFromServiceNow'}, - 'brand': 'ServiceNow v2', 'category': 'Case Management', - 'health': {'id': 'ServiceNow v2.ServiceNow v2_instance_2', 'version': 16, - 'sequenceNumber': 454401, 'primaryTerm': 1, 'modified': '2025-01-27T09:26:49.458264357Z', - 'sizeInBytes': 516, 'sortValues': ['5994'], 'brand': 'ServiceNow v2', - 'instance': 'ServiceNow v2_instance_2', 'incidentsPulled': 10, 'incidentsDropped': 0, - 'lastPullTime': '2025-01-27T09:26:45.226409678Z', 'lastError': '' - } + "ServiceNow v2_instance_2": { + "id": "11111", + "name": "ServiceNow v2_instance_2", + "version": 5, + "sequenceNumber": 453233, + "primaryTerm": 1, + "modified": "2025-01-27T09:14:18.207475895Z", + "sizeInBytes": 3066, + "enabled": "true", + "configvalues": { + "api_version": "None", + "close_custom_state": "None", + "close_incident": "None", + "close_ticket": "False", + "close_ticket_multiple_options": "None", + "comment_tag": "comments", + "comment_tag_from_servicenow": "CommentFromServiceNow", + "custom_fields": "None", + "display_date_format": "None", + "fetch_limit": "10", + "fetch_time": "1 year", + "file_tag": "ForServiceNow", + "file_tag_from_service_now": "FromServiceNow", + "get_attachments": "False", + "incidentFetchInterval": "1", + "incidentType": "None", + "incident_name": "number", + "insecure": "False", + "isFetch": "True", + "look_back": "0", + "mirror_direction": "None", + "mirror_limit": "100", + "mirror_notes_for_new_incidents": "False", + "proxy": "False", + "server_close_custom_state": "None", + "server_custom_close_code": "None", + "sysparm_query": "stateNOT IN6,7", + "ticket_type": "incident", + "timestamp_field": "opened_at", + "update_timestamp_field": "sys_updated_on", + "url": "https://url_dummy/", + "use_display_value": "False", + "use_oauth": "False", + "work_notes_tag": "work_notes", + "work_notes_tag_from_servicenow": "WorkNoteFromServiceNow", + }, + "brand": "ServiceNow v2", + "category": "Case Management", + "health": { + "id": "ServiceNow v2.ServiceNow v2_instance_2", + "version": 16, + "sequenceNumber": 454401, + "primaryTerm": 1, + "modified": "2025-01-27T09:26:49.458264357Z", + "sizeInBytes": 516, + "sortValues": ["5994"], + "brand": "ServiceNow v2", + "instance": "ServiceNow v2_instance_2", + "incidentsPulled": 10, + "incidentsDropped": 0, + "lastPullTime": "2025-01-27T09:26:45.226409678Z", + "lastError": "", + }, } } - enabled_incidents_instances = {'ServiceNow v2_instance_2': ['ServiceNow Incident INC0011111']} - expected = ('### Enabled Instances Health Information\n' - '|Instance Name|Last Pull Time|Names of Active Incidents Created 30 days ago|' - 'Number of Incidents Pulled in Last Fetch|Query|Size In Bytes|Total Active Incidents Created 30 days ago|\n' - '|---|---|---|---|---|---|---|\n| ServiceNow v2_instance_2 | 2025-01-27T09:26:45.226409678Z |' - ' ServiceNow Incident INC0011111 | 10 | stateNOT IN6,7 | 3066 | 1 |\n' - ) + enabled_incidents_instances = {"ServiceNow v2_instance_2": ["ServiceNow Incident INC0011111"]} + expected = ( + "### Enabled Instances Health Information\n" + "|Instance Name|Last Pull Time|Names of Active Incidents Created 30 days ago|" + "Number of Incidents Pulled in Last Fetch|Query|Size In Bytes|Total Active Incidents Created 30 days ago|\n" + "|---|---|---|---|---|---|---|\n| ServiceNow v2_instance_2 | 2025-01-27T09:26:45.226409678Z |" + " ServiceNow Incident INC0011111 | 10 | stateNOT IN6,7 | 3066 | 1 |\n" + ) res = parse_enabled_instances(enabled_instances_health, enabled_incidents_instances) assert res == expected diff --git a/Packs/ServiceNow/Scripts/ServiceNowUpdateIncident/ServiceNowUpdateIncident.py b/Packs/ServiceNow/Scripts/ServiceNowUpdateIncident/ServiceNowUpdateIncident.py index 471063e02410..70afec1c676d 100644 --- a/Packs/ServiceNow/Scripts/ServiceNowUpdateIncident/ServiceNowUpdateIncident.py +++ b/Packs/ServiceNow/Scripts/ServiceNowUpdateIncident/ServiceNowUpdateIncident.py @@ -14,11 +14,7 @@ """ Mapping of severity display names to their corresponding values in the API """ -TICKET_SEVERITY = { - '1 - High': '1', - '2 - Medium': '2', - '3 - Low': '3' -} +TICKET_SEVERITY = {"1 - High": "1", "2 - Medium": "2", "3 - Low": "3"} """ Function to use the query command to retrieve an incident by a query. @@ -26,36 +22,36 @@ def get_incident(query): - using = demisto.args().get('using') + using = demisto.args().get("using") incident_args = { - 'table_name': 'incident', - 'query': query, - 'using': using, + "table_name": "incident", + "query": query, + "using": using, } - incident_result = demisto.executeCommand('servicenow-query-table', incident_args)[0] - incident_data = demisto.get(incident_result, 'Contents') + incident_result = demisto.executeCommand("servicenow-query-table", incident_args)[0] + incident_data = demisto.get(incident_result, "Contents") if not incident_data: - return_error('Could not get the contents from the command result: ' + json.dumps(incident_result)) + return_error("Could not get the contents from the command result: " + json.dumps(incident_result)) if not isinstance(incident_data, dict): # In case of string result, e.g "No incidents found" - demisto.results('Incident not found') + demisto.results("Incident not found") sys.exit(0) - incident = incident_data['result'] + incident = incident_data["result"] if not incident or len(incident) == 0: - demisto.results('Incident not found') + demisto.results("Incident not found") sys.exit(0) return incident def get_incident_id(incident_number): - query = 'number=' + incident_number + query = "number=" + incident_number incident = get_incident(query) - return incident[0]['sys_id'] + return incident[0]["sys_id"] """ @@ -64,37 +60,37 @@ def get_incident_id(incident_number): def get_user(query): - using = demisto.args().get('using') + using = demisto.args().get("using") user_args = { - 'table_name': 'sys_user', - 'query': query, - 'using': using, + "table_name": "sys_user", + "query": query, + "using": using, } - user_result = demisto.executeCommand('servicenow-query-table', user_args)[0] - user_data = demisto.get(user_result, 'Contents') + user_result = demisto.executeCommand("servicenow-query-table", user_args)[0] + user_data = demisto.get(user_result, "Contents") if not user_data: - return_error('Could not get the contents from the command result: ' + json.dumps(user_result)) + return_error("Could not get the contents from the command result: " + json.dumps(user_result)) if not isinstance(user_data, dict): # In case of string result, e.g "No incidents found" - demisto.results('User not found') + demisto.results("User not found") sys.exit(0) - user = user_data['result'] + user = user_data["result"] if not user or len(user) == 0: - demisto.results('User not found') + demisto.results("User not found") sys.exit(0) return user def get_user_id(user_name): - user_name = user_name.split(' ') - query = 'first_name={}^last_name={}'.format(user_name[0], user_name[1]) + user_name = user_name.split(" ") + query = f"first_name={user_name[0]}^last_name={user_name[1]}" user = get_user(query) - return user[0]['sys_id'] + return user[0]["sys_id"] """ @@ -103,58 +99,56 @@ def get_user_id(user_name): def get_group(query): - using = demisto.args().get('using') + using = demisto.args().get("using") group_args = { - 'table_name': 'sys_user_group', - 'query': query, - 'using': using, + "table_name": "sys_user_group", + "query": query, + "using": using, } - group_result = demisto.executeCommand('servicenow-query-table', group_args)[0] - group_data = demisto.get(group_result, 'Contents') + group_result = demisto.executeCommand("servicenow-query-table", group_args)[0] + group_data = demisto.get(group_result, "Contents") if not group_data: - return_error('Could not get the contents from the command result: ' + json.dumps(group_result)) + return_error("Could not get the contents from the command result: " + json.dumps(group_result)) if not isinstance(group_data, dict): # In case of string result, e.g "No incidents found" - demisto.results('Group not found') + demisto.results("Group not found") sys.exit(0) - group = group_data['result'] + group = group_data["result"] if not group or len(group) == 0: - demisto.results('Group not found') + demisto.results("Group not found") sys.exit(0) return group def get_group_id(group_name): - query = 'name=' + group_name + query = "name=" + group_name group = get_group(query) - return group[0]['sys_id'] + return group[0]["sys_id"] def main(): """ The table name is required by the API. To acquire the table name, use the servicenow-get-table-name command. """ - command_args = { - 'table_name': 'incident' - } + command_args = {"table_name": "incident"} """ For each field in the arguments, you need to check if it was provided and apply any operations required (e.g, get a user id from a user name) to send them to the API. """ - incident_id = demisto.args().get('id') - incident_number = demisto.args().get('number') - incident_severity = demisto.args().get('severity') - description = demisto.args().get('description') - group_name = demisto.args().get('assigned_group') - user_name = demisto.args().get('assignee') - using = demisto.args().get('using') + incident_id = demisto.args().get("id") + incident_number = demisto.args().get("number") + incident_severity = demisto.args().get("severity") + description = demisto.args().get("description") + group_name = demisto.args().get("assigned_group") + user_name = demisto.args().get("assignee") + using = demisto.args().get("using") user_id = None group_id = None @@ -176,53 +170,51 @@ def main(): fields = [] if incident_id: - command_args['id'] = incident_id + command_args["id"] = incident_id elif incident_number: # Query the incident table to get the system ID of the incident - command_args['id'] = get_incident_id(incident_number) + command_args["id"] = get_incident_id(incident_number) else: - raise ValueError('Incident ID or number must be ') + raise ValueError("Incident ID or number must be ") if incident_severity: - fields.append('severity' + '=' + TICKET_SEVERITY[incident_severity]) + fields.append("severity" + "=" + TICKET_SEVERITY[incident_severity]) if user_id: - fields.append('assigned_to' + '=' + user_id) + fields.append("assigned_to" + "=" + user_id) if group_id: - fields.append('assignment_group' + '=' + group_id) + fields.append("assignment_group" + "=" + group_id) if description: - fields.append('short_description' + '=' + description) + fields.append("short_description" + "=" + description) - command_args['fields'] = ';'.join(fields) - command_args['using'] = using + command_args["fields"] = ";".join(fields) + command_args["using"] = using command_res = demisto.executeCommand("servicenow-update-record", command_args) result = {} try: entry = command_res[0] if isError(entry): - return_error(entry['Contents']) + return_error(entry["Contents"]) else: - record_data = demisto.get(entry, 'Contents') + record_data = demisto.get(entry, "Contents") if not record_data: - return_error('Could not get the contents from the command result: ' + json.dumps(entry)) + return_error("Could not get the contents from the command result: " + json.dumps(entry)) if not isinstance(record_data, dict): # In case of string result, e.g "No incidents found" result = record_data else: # Get the actual record - record = record_data['result'] + record = record_data["result"] # Map the ID - mapped_record = {'ID': record['sys_id']} + mapped_record = {"ID": record["sys_id"]} # Output entry result = { - 'Type': entryTypes['note'], - 'Contents': record_data, - 'ContentsFormat': formats['json'], - 'ReadableContentsFormat': formats['markdown'], - 'HumanReadable': 'Incident with ID ' + mapped_record['ID'] + ' successfully updated', - 'EntryContext': { - 'ServiceNow.Incident(val.ID===obj.ID)': createContext(mapped_record) - } + "Type": entryTypes["note"], + "Contents": record_data, + "ContentsFormat": formats["json"], + "ReadableContentsFormat": formats["markdown"], + "HumanReadable": "Incident with ID " + mapped_record["ID"] + " successfully updated", + "EntryContext": {"ServiceNow.Incident(val.ID===obj.ID)": createContext(mapped_record)}, } except Exception as ex: @@ -231,5 +223,5 @@ def main(): demisto.results(result) -if __name__ in ["__builtin__", "builtins", '__main__']: +if __name__ in ["__builtin__", "builtins", "__main__"]: main() diff --git a/Packs/ServiceNow/Scripts/ServiceNowUpdateIncident/ServiceNowUpdateIncident_test.py b/Packs/ServiceNow/Scripts/ServiceNowUpdateIncident/ServiceNowUpdateIncident_test.py index d2a3c533490f..8505179d6923 100644 --- a/Packs/ServiceNow/Scripts/ServiceNowUpdateIncident/ServiceNowUpdateIncident_test.py +++ b/Packs/ServiceNow/Scripts/ServiceNowUpdateIncident/ServiceNowUpdateIncident_test.py @@ -13,11 +13,12 @@ def test_get_incident(mocker): - make sure the incident result key is being retrieved """ from ServiceNowUpdateIncident import get_incident - mocker.patch.object(demisto, 'executeCommand') - mocker.patch.object(demisto, 'get', return_value={'result': 'test'}) - result = get_incident('query') - assert result == 'test' + mocker.patch.object(demisto, "executeCommand") + mocker.patch.object(demisto, "get", return_value={"result": "test"}) + + result = get_incident("query") + assert result == "test" def test_get_incident_id(mocker): @@ -32,10 +33,11 @@ def test_get_incident_id(mocker): - make sure that the incident sys ID is retrieved. """ from ServiceNowUpdateIncident import get_incident_id - mocker.patch.object(demisto, 'executeCommand') - mocker.patch.object(demisto, 'get', return_value={'result': [{'sys_id': '123'}]}) - incident_sys_id = get_incident_id('123') - assert incident_sys_id == '123' + + mocker.patch.object(demisto, "executeCommand") + mocker.patch.object(demisto, "get", return_value={"result": [{"sys_id": "123"}]}) + incident_sys_id = get_incident_id("123") + assert incident_sys_id == "123" def test_get_user(mocker): @@ -50,11 +52,12 @@ def test_get_user(mocker): - make sure the incident result key is being retrieved """ from ServiceNowUpdateIncident import get_user - mocker.patch.object(demisto, 'executeCommand') - mocker.patch.object(demisto, 'get', return_value={'result': 'test'}) - user = get_user('query') - assert user == 'test' + mocker.patch.object(demisto, "executeCommand") + mocker.patch.object(demisto, "get", return_value={"result": "test"}) + + user = get_user("query") + assert user == "test" def test_get_user_id(mocker): @@ -69,10 +72,11 @@ def test_get_user_id(mocker): - make sure that the incident sys ID is retrieved. """ from ServiceNowUpdateIncident import get_user_id - mocker.patch.object(demisto, 'executeCommand') - mocker.patch.object(demisto, 'get', return_value={'result': [{'sys_id': '123'}]}) - incident_sys_id = get_user_id('user1 user2') - assert incident_sys_id == '123' + + mocker.patch.object(demisto, "executeCommand") + mocker.patch.object(demisto, "get", return_value={"result": [{"sys_id": "123"}]}) + incident_sys_id = get_user_id("user1 user2") + assert incident_sys_id == "123" def test_get_group(mocker): @@ -87,11 +91,12 @@ def test_get_group(mocker): - make sure the incident result key is being retrieved """ from ServiceNowUpdateIncident import get_group - mocker.patch.object(demisto, 'executeCommand') - mocker.patch.object(demisto, 'get', return_value={'result': 'test'}) - user = get_group('query') - assert user == 'test' + mocker.patch.object(demisto, "executeCommand") + mocker.patch.object(demisto, "get", return_value={"result": "test"}) + + user = get_group("query") + assert user == "test" def test_get_group_id(mocker): @@ -106,10 +111,11 @@ def test_get_group_id(mocker): - make sure that the incident sys ID is retrieved. """ from ServiceNowUpdateIncident import get_group_id - mocker.patch.object(demisto, 'executeCommand') - mocker.patch.object(demisto, 'get', return_value={'result': [{'sys_id': '123'}]}) - incident_sys_id = get_group_id('user1 user2') - assert incident_sys_id == '123' + + mocker.patch.object(demisto, "executeCommand") + mocker.patch.object(demisto, "get", return_value={"result": [{"sys_id": "123"}]}) + incident_sys_id = get_group_id("user1 user2") + assert incident_sys_id == "123" def test_main_flow_success(mocker): @@ -124,22 +130,23 @@ def test_main_flow_success(mocker): - make sure the main flow gets executed without errors. """ from ServiceNowUpdateIncident import main - args_mock = {'number': 'INC0021211', 'query': 'description=hello'} - mocked_command_data = {'result': {'number': 'INC0021211', 'sys_id': '123'}} - mocker.patch.object(demisto, 'executeCommand') - mocker.patch('ServiceNowUpdateIncident.get_incident_id', return_value='123') - mocker.patch.object(demisto, 'args', return_value=args_mock) - mocker.patch.object(demisto, 'get', return_value=mocked_command_data) - result = mocker.patch('demistomock.results') + args_mock = {"number": "INC0021211", "query": "description=hello"} + mocked_command_data = {"result": {"number": "INC0021211", "sys_id": "123"}} + mocker.patch.object(demisto, "executeCommand") + mocker.patch("ServiceNowUpdateIncident.get_incident_id", return_value="123") + mocker.patch.object(demisto, "args", return_value=args_mock) + mocker.patch.object(demisto, "get", return_value=mocked_command_data) + + result = mocker.patch("demistomock.results") main() assert result.call_args.args[0] == { - 'Type': 1, - 'Contents': {'result': {'number': 'INC0021211', 'sys_id': '123'}}, - 'ContentsFormat': 'json', - 'ReadableContentsFormat': 'markdown', - 'HumanReadable': 'Incident with ID 123 successfully updated', - 'EntryContext': {'ServiceNow.Incident(val.ID===obj.ID)': {'ID': '123'}} + "Type": 1, + "Contents": {"result": {"number": "INC0021211", "sys_id": "123"}}, + "ContentsFormat": "json", + "ReadableContentsFormat": "markdown", + "HumanReadable": "Incident with ID 123 successfully updated", + "EntryContext": {"ServiceNow.Incident(val.ID===obj.ID)": {"ID": "123"}}, } diff --git a/Packs/ServiceNow/pack_metadata.json b/Packs/ServiceNow/pack_metadata.json index e603fdb8d799..87208edf7e48 100644 --- a/Packs/ServiceNow/pack_metadata.json +++ b/Packs/ServiceNow/pack_metadata.json @@ -2,7 +2,7 @@ "name": "ServiceNow", "description": "Use The ServiceNow IT Service Management (ITSM) solution to modernize the way you manage and deliver services to your users.", "support": "xsoar", - "currentVersion": "2.7.8", + "currentVersion": "2.7.9", "author": "Cortex XSOAR", "url": "https://www.paloaltonetworks.com/cortex", "email": "", From 9ae3d3deacc2b6fcdd37ec9fca7ee0a370ad6b2b Mon Sep 17 00:00:00 2001 From: Content Bot Date: Sun, 23 Mar 2025 12:52:36 +0000 Subject: [PATCH 02/18] FireEye-Detection-on-Demand: Apply ruff Format --- .../FireEye-Detection-on-Demand.py | 312 +++++++----------- .../ReleaseNotes/1_0_7.md | 6 + .../pack_metadata.json | 2 +- 3 files changed, 133 insertions(+), 187 deletions(-) create mode 100644 Packs/FireEye-Detection-on-Demand/ReleaseNotes/1_0_7.md diff --git a/Packs/FireEye-Detection-on-Demand/Integrations/FireEye-Detection-on-Demand/FireEye-Detection-on-Demand.py b/Packs/FireEye-Detection-on-Demand/Integrations/FireEye-Detection-on-Demand/FireEye-Detection-on-Demand.py index 139ea126cd33..12ae9d25996e 100644 --- a/Packs/FireEye-Detection-on-Demand/Integrations/FireEye-Detection-on-Demand/FireEye-Detection-on-Demand.py +++ b/Packs/FireEye-Detection-on-Demand/Integrations/FireEye-Detection-on-Demand/FireEye-Detection-on-Demand.py @@ -1,20 +1,20 @@ -import demistomock as demisto # noqa: F401 -from CommonServerPython import * # noqa: F401 from typing import Any import dateparser +import demistomock as demisto # noqa: F401 import urllib3 +from CommonServerPython import * # noqa: F401 # Disable insecure warnings urllib3.disable_warnings() -''' CONSTANTS ''' +""" CONSTANTS """ -DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ' +DATE_FORMAT = "%Y-%m-%dT%H:%M:%SZ" -''' CLIENT CLASS ''' +""" CLIENT CLASS """ class Client(BaseClient): @@ -28,63 +28,35 @@ class Client(BaseClient): """ def get_file_reputation(self, file: str) -> dict[str, Any]: - return self._http_request( - method='GET', - url_suffix=f'/hashes/{file}' - ) + return self._http_request(method="GET", url_suffix=f"/hashes/{file}") def get_health(self) -> dict[str, Any]: - return self._http_request( - method='GET', - url_suffix='/health' - ) + return self._http_request(method="GET", url_suffix="/health") def submit_file(self, files: dict[str, Any], data: dict[str, Any]) -> dict[str, Any]: - return self._http_request( - method='POST', - url_suffix='/files', - files=files, - data=data - ) + return self._http_request(method="POST", url_suffix="/files", files=files, data=data) def submit_urls(self, data: dict[str, Any]) -> dict[str, Any]: - return self._http_request( - method='POST', - url_suffix='/urls', - files=data, - data=None - ) + return self._http_request(method="POST", url_suffix="/urls", files=data, data=None) def get_report_url(self, report_id: str, expiration: int) -> dict[str, Any]: - return self._http_request( - method='GET', - url_suffix=f'/presigned-url/{report_id}', - params={ - 'expiry': expiration - } - ) + return self._http_request(method="GET", url_suffix=f"/presigned-url/{report_id}", params={"expiry": expiration}) def report_status(self, report_id: str, extended: str) -> dict[str, Any]: - return self._http_request( - method='GET', - url_suffix=f'/reports/{report_id}', - params={ - 'extended': extended - } - ) + return self._http_request(method="GET", url_suffix=f"/reports/{report_id}", params={"extended": extended}) def report_artifact(self, report_id: str, artifact_type: str) -> dict[str, Any]: return self._http_request( - method='GET', - url_suffix=f'/artifacts/{report_id}', + method="GET", + url_suffix=f"/artifacts/{report_id}", params={ - 'type': artifact_type, + "type": artifact_type, }, - resp_type='content' + resp_type="content", ) -''' HELPER FUNCTIONS ''' +""" HELPER FUNCTIONS """ def convert_to_demisto_severity(severity: str) -> int: @@ -92,10 +64,10 @@ def convert_to_demisto_severity(severity: str) -> int: # might be required in your integration, so a dedicated function is # recommended. This mapping should also be documented. return { - 'Low': 1, # low severity - 'Medium': 2, # medium severity - 'High': 3, # high severity - 'Critical': 4 # critical severity + "Low": 1, # low severity + "Medium": 2, # medium severity + "High": 3, # high severity + "Critical": 4, # critical severity }[severity] @@ -126,10 +98,10 @@ def arg_to_timestamp(arg: Any, arg_name: str, required: bool = False) -> int | N # we use dateparser to handle strings either in ISO8601 format, or # relative time stamps. # For example: format 2019-10-23T00:00:00 or "3 days", etc - date = dateparser.parse(arg, settings={'TIMEZONE': 'UTC'}) + date = dateparser.parse(arg, settings={"TIMEZONE": "UTC"}) if date is None: # if d is None it means dateparser failed to parse it - raise ValueError(f'Invalid date: {arg_name}') + raise ValueError(f"Invalid date: {arg_name}") return int(date.timestamp()) if isinstance(arg, int | float): @@ -138,7 +110,7 @@ def arg_to_timestamp(arg: Any, arg_name: str, required: bool = False) -> int | N raise ValueError(f'Invalid date: "{arg_name}"') -''' COMMAND FUNCTIONS ''' +""" COMMAND FUNCTIONS """ def test_module(client: Client) -> str: @@ -155,23 +127,22 @@ def test_module(client: Client) -> str: # client.get_health() except DemistoException as e: - if 'Forbidden' in str(e): - return 'Authorization Error: make sure API Key is correctly set' + if "Forbidden" in str(e): + return "Authorization Error: make sure API Key is correctly set" else: raise e - return 'ok' + return "ok" def get_hashes_command(client: Client, args: dict[str, Any]) -> tuple[str, dict, Any]: - - hashes = argToList(args.get('md5_hashes')) + hashes = argToList(args.get("md5_hashes")) if len(hashes) == 0: - raise ValueError('hash(es) not specified') + raise ValueError("hash(es) not specified") for hash in hashes: if md5Regex.match(hash): continue - raise Exception('Invalid hash. Only MD5 is supported.') + raise Exception("Invalid hash. Only MD5 is supported.") dbot_score_list: list[dict[str, Any]] = [] file_standard_list: list[dict[str, Any]] = [] @@ -179,107 +150,87 @@ def get_hashes_command(client: Client, args: dict[str, Any]) -> tuple[str, dict, for hash in hashes: file_data = client.get_file_reputation(hash) - file_data['MD5'] = file_data['md5'] - del file_data['md5'] + file_data["MD5"] = file_data["md5"] + del file_data["md5"] # demisto.results(file_data) - engines = file_data.get('engine_results', {}) + engines = file_data.get("engine_results", {}) for key in engines: - if engines[key].get('sha256'): - file_data['SHA256'] = engines[key].get('sha256') - del engines[key]['sha256'] + if engines[key].get("sha256"): + file_data["SHA256"] = engines[key].get("sha256") + del engines[key]["sha256"] # If the outer `is_malicious` is set to True, assume the score should be bad # Otherwise, default to unknown unless at least one engine has returned a verdict besides `not_found` - if file_data['is_malicious']: + if file_data["is_malicious"]: score = 3 # bad else: score = 0 # unknown for key in engines: - verdict = engines[key].get('verdict', 'not_found') + verdict = engines[key].get("verdict", "not_found") if verdict != "not_found" and verdict != "malicious": score = 1 # good break - dbot_score = { - 'Indicator': hash, - 'Vendor': 'FireEye DoD', - 'Type': 'file', - 'Score': score - } + dbot_score = {"Indicator": hash, "Vendor": "FireEye DoD", "Type": "file", "Score": score} file_standard_context = { - 'MD5': hash, + "MD5": hash, } if score == 3: # if score is bad must add DBotScore Vendor and Description - file_standard_context['Malicious'] = { - 'Vendor': 'FireEye DoD' - } + file_standard_context["Malicious"] = {"Vendor": "FireEye DoD"} filedata = {} - filedata['FireEyeDoD'] = file_data - filedata['MD5'] = file_data['MD5'] - del filedata['FireEyeDoD']['MD5'] - if file_data.get('SHA256'): - dbot_score_sha256 = { - 'Indicator': file_data.get('SHA256'), - 'Vendor': 'FireEye DoD', - 'Type': 'file', - 'Score': score - } + filedata["FireEyeDoD"] = file_data + filedata["MD5"] = file_data["MD5"] + del filedata["FireEyeDoD"]["MD5"] + if file_data.get("SHA256"): + dbot_score_sha256 = {"Indicator": file_data.get("SHA256"), "Vendor": "FireEye DoD", "Type": "file", "Score": score} dbot_score_list.append(dbot_score_sha256) - filedata['SHA256'] = file_data['SHA256'] - file_standard_context['SHA256'] = file_data['SHA256'] - del filedata['FireEyeDoD']['SHA256'] + filedata["SHA256"] = file_data["SHA256"] + file_standard_context["SHA256"] = file_data["SHA256"] + del filedata["FireEyeDoD"]["SHA256"] file_standard_list.append(file_standard_context) dbot_score_list.append(dbot_score) file_data_list.append(filedata) outputs = { - 'DBotScore(val.Vendor == obj.Vendor && val.Indicator == obj.Indicator)': dbot_score_list, - outputPaths['file']: file_standard_list, - 'File(val.MD5 == obj.MD5 || val.SHA256 == obj.SHA256)': file_data_list + "DBotScore(val.Vendor == obj.Vendor && val.Indicator == obj.Indicator)": dbot_score_list, + outputPaths["file"]: file_standard_list, + "File(val.MD5 == obj.MD5 || val.SHA256 == obj.SHA256)": file_data_list, } - readable_output = tableToMarkdown('FireEye DoD Results', file_standard_list, headers=["MD5", "SHA256", "Malicious"]) + readable_output = tableToMarkdown("FireEye DoD Results", file_standard_list, headers=["MD5", "SHA256", "Malicious"]) - return ( - readable_output, - outputs, - file_data_list - ) + return (readable_output, outputs, file_data_list) def generate_report_url(client: Client, args: dict[str, Any]) -> tuple[str, dict, dict]: - report_id = str(args.get('report_id')) - expiration = arg_to_int(arg=args.get('expiration'), arg_name='expiration', required=True) + report_id = str(args.get("report_id")) + expiration = arg_to_int(arg=args.get("expiration"), arg_name="expiration", required=True) if expiration: if expiration < 1 or expiration > 8760: - raise ValueError('Expiration must be between 1 and 8760 hours.') + raise ValueError("Expiration must be between 1 and 8760 hours.") else: - raise ValueError('Expiration not specified or not a number.') + raise ValueError("Expiration not specified or not a number.") report = client.get_report_url(report_id=report_id, expiration=expiration) - presigned_report_url = report.get('presigned_report_url') + presigned_report_url = report.get("presigned_report_url") - readable_output = f'Report {report_id} is available [here]({presigned_report_url})' + readable_output = f"Report {report_id} is available [here]({presigned_report_url})" - return ( - readable_output, - {}, - report - ) + return (readable_output, {}, report) def submit_file_command(client: Client, args: dict[str, Any]) -> tuple[str, dict, dict]: - entry_id = demisto.args().get('entryID') + entry_id = demisto.args().get("entryID") file_entry = demisto.getFilePath(entry_id) # .get('path') - file_name = file_entry['name'] - file_path = file_entry['path'] - files = {'file': (file_name, open(file_path, 'rb'))} + file_name = file_entry["name"] + file_path = file_entry["path"] + files = {"file": (file_name, open(file_path, "rb"))} # Optional parameters to send along with the file - optional_params = ['password', 'param', 'screenshot', 'video', 'fileExtraction', 'memoryDump', 'pcap'] + optional_params = ["password", "param", "screenshot", "video", "fileExtraction", "memoryDump", "pcap"] data = {} for param in optional_params: value = demisto.args().get(param) @@ -288,93 +239,88 @@ def submit_file_command(client: Client, args: dict[str, Any]) -> tuple[str, dict scan = client.submit_file(files=files, data=data) - scan['filename'] = file_name - del scan['status'] - scan['overall_status'] = 'RUNNING' + scan["filename"] = file_name + del scan["status"] + scan["overall_status"] = "RUNNING" - report_id = scan.get('report_id') + report_id = scan.get("report_id") readable_output = ( - f'Started analysis of {file_name} with FireEye Detection on Demand.' - f'Results will be published to report id: {report_id}' - ) - outputs = { - 'FireEyeDoD.Scan(val.report_id == obj.report_id)': scan - } - return ( - readable_output, - outputs, - scan + f"Started analysis of {file_name} with FireEye Detection on Demand." + f"Results will be published to report id: {report_id}" ) + outputs = {"FireEyeDoD.Scan(val.report_id == obj.report_id)": scan} + return (readable_output, outputs, scan) def submit_urls_command(client: Client, args: dict[str, Any]) -> tuple[str, dict, dict]: - urls = argToList(args.get('urls')) + urls = argToList(args.get("urls")) if len(urls) == 0: - raise ValueError('hash(es) not specified') + raise ValueError("hash(es) not specified") # Format the URLs into a string list, which the API understands formatted_urls = "[" + ",".join([url.replace(url, f'"{url}"') for url in urls]) + "]" - data = {'urls': formatted_urls} + data = {"urls": formatted_urls} scan = client.submit_urls(data=data) - del scan['status'] - scan['overall_status'] = 'RUNNING' + del scan["status"] + scan["overall_status"] = "RUNNING" - report_id = scan.get('report_id') + report_id = scan.get("report_id") readable_output = ( - f'Started analysis of {urls} with FireEye Detection on Demand.' - f'Results will be published to report id: {report_id}' - ) - outputs = { - 'FireEyeDoD.Scan(val.report_id == obj.report_id)': scan - } - return ( - readable_output, - outputs, - scan + f"Started analysis of {urls} with FireEye Detection on Demand." f"Results will be published to report id: {report_id}" ) + outputs = {"FireEyeDoD.Scan(val.report_id == obj.report_id)": scan} + return (readable_output, outputs, scan) def get_reports_command(client: Client, args: dict[str, Any]) -> tuple[str, dict, Any]: - report_id_list = argToList(args.get('report_ids', [])) - extended = args.get('extended_report', "False") - screenshot = args.get('get_screenshot', "false") - artifact = args.get('get_artifact', "") + report_id_list = argToList(args.get("report_ids", [])) + extended = args.get("extended_report", "False") + screenshot = args.get("get_screenshot", "false") + artifact = args.get("get_artifact", "") if len(report_id_list) == 0: - raise ValueError('report_id(s) not specified') + raise ValueError("report_id(s) not specified") report_list: list[dict[str, Any]] = [] for report_id in report_id_list: report = client.report_status(report_id=report_id, extended=extended) if screenshot.lower() == "true": screenshot = client.report_artifact(report_id=report_id, artifact_type="screenshot") - stored_img = fileResult('screenshot.gif', screenshot) - demisto.results({'Type': entryTypes['image'], 'ContentsFormat': formats['text'], - 'File': stored_img['File'], 'FileID': stored_img['FileID'], 'Contents': ''}) + stored_img = fileResult("screenshot.gif", screenshot) + demisto.results( + { + "Type": entryTypes["image"], + "ContentsFormat": formats["text"], + "File": stored_img["File"], + "FileID": stored_img["FileID"], + "Contents": "", + } + ) if artifact != "": artifacts = client.report_artifact(report_id=report_id, artifact_type=artifact) - stored_artifacts = fileResult('artifacts.zip', artifacts) - demisto.results({'Type': entryTypes['file'], 'ContentsFormat': formats['text'], - 'File': stored_artifacts['File'], 'FileID': stored_artifacts['FileID'], 'Contents': ''}) + stored_artifacts = fileResult("artifacts.zip", artifacts) + demisto.results( + { + "Type": entryTypes["file"], + "ContentsFormat": formats["text"], + "File": stored_artifacts["File"], + "FileID": stored_artifacts["FileID"], + "Contents": "", + } + ) report_list.append(report) - readable_output = tableToMarkdown('Scan status', report_list) - outputs = { - 'FireEyeDoD.Scan(val.report_id == obj.report_id)': report_list - } - return ( - readable_output, - outputs, - report_list - ) + readable_output = tableToMarkdown("Scan status", report_list) + outputs = {"FireEyeDoD.Scan(val.report_id == obj.report_id)": report_list} + return (readable_output, outputs, report_list) -''' MAIN FUNCTION ''' +""" MAIN FUNCTION """ def main() -> None: @@ -384,19 +330,19 @@ def main() -> None: :rtype: """ - api_key = demisto.params().get('apikey') + api_key = demisto.params().get("apikey") # get the service API url - base_url = demisto.params()['url'] + base_url = demisto.params()["url"] # if your Client class inherits from BaseClient, SSL verification is # handled out of the box by it, just pass ``verify_certificate`` to # the Client constructor - verify_certificate = not demisto.params().get('insecure', False) + verify_certificate = not demisto.params().get("insecure", False) # if your Client class inherits from BaseClient, system proxy is handled # out of the box by it, just pass ``proxy`` to the Client constructor - proxy = demisto.params().get('proxy', False) + proxy = demisto.params().get("proxy", False) # INTEGRATION DEVELOPER TIP # You can use functions such as ``demisto.debug()``, ``demisto.info()``, @@ -404,35 +350,29 @@ def main() -> None: # level on the server configuration # See: https://xsoar.pan.dev/docs/integrations/code-conventions#logging - demisto.debug(f'Command being called is {demisto.command()}') + demisto.debug(f"Command being called is {demisto.command()}") try: - headers = { - 'feye-auth-key': f'{api_key}' - } - client = Client( - base_url=base_url, - verify=verify_certificate, - headers=headers, - proxy=proxy) + headers = {"feye-auth-key": f"{api_key}"} + client = Client(base_url=base_url, verify=verify_certificate, headers=headers, proxy=proxy) - if demisto.command() == 'test-module': + if demisto.command() == "test-module": # This is the call made when pressing the integration Test button. result = test_module(client) demisto.results(result) - elif demisto.command() == 'fireeye-dod-get-hashes': + elif demisto.command() == "fireeye-dod-get-hashes": return_outputs(*get_hashes_command(client, demisto.args())) - elif demisto.command() == 'fireeye-dod-get-reports': + elif demisto.command() == "fireeye-dod-get-reports": return_outputs(*get_reports_command(client, demisto.args())) - elif demisto.command() == 'fireeye-dod-submit-file': + elif demisto.command() == "fireeye-dod-submit-file": return_outputs(*submit_file_command(client, demisto.args())) - elif demisto.command() == 'fireeye-dod-submit-urls': + elif demisto.command() == "fireeye-dod-submit-urls": return_outputs(*submit_urls_command(client, demisto.args())) - elif demisto.command() == 'fireeye-dod-get-report-url': + elif demisto.command() == "fireeye-dod-get-report-url": return_outputs(*generate_report_url(client, demisto.args())) # Log exceptions and return errors @@ -442,8 +382,8 @@ def main() -> None: # return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}') -''' ENTRY POINT ''' +""" ENTRY POINT """ -if __name__ in ('__main__', '__builtin__', 'builtins'): +if __name__ in ("__main__", "__builtin__", "builtins"): main() diff --git a/Packs/FireEye-Detection-on-Demand/ReleaseNotes/1_0_7.md b/Packs/FireEye-Detection-on-Demand/ReleaseNotes/1_0_7.md new file mode 100644 index 000000000000..845b312e1332 --- /dev/null +++ b/Packs/FireEye-Detection-on-Demand/ReleaseNotes/1_0_7.md @@ -0,0 +1,6 @@ + +#### Integrations + +##### FireEye Detection on Demand + +- Metadata and documentation improvements. diff --git a/Packs/FireEye-Detection-on-Demand/pack_metadata.json b/Packs/FireEye-Detection-on-Demand/pack_metadata.json index 3e726b0352d4..6610f281506b 100644 --- a/Packs/FireEye-Detection-on-Demand/pack_metadata.json +++ b/Packs/FireEye-Detection-on-Demand/pack_metadata.json @@ -2,7 +2,7 @@ "name": "FireEye Detection on Demand", "description": "Detonate files, hashes, and URLs using FireEye Detection on Demand", "support": "partner", - "currentVersion": "1.0.6", + "currentVersion": "1.0.7", "author": "FireEye Inc.", "githubUser": [ "mckibbenc", From 9a5ed4de47aa527cde44400f50f3b84ec7d53e80 Mon Sep 17 00:00:00 2001 From: Content Bot Date: Sun, 23 Mar 2025 12:52:39 +0000 Subject: [PATCH 03/18] OpenPhish: Apply ruff Format --- .../Integrations/OpenPhish_v2/OpenPhish_v2.py | 86 +++++++++---------- .../OpenPhish_v2/OpenPhish_v2_test.py | 58 ++++++------- .../OpenPhish_v2/test_data/api_raw.py | 18 ++-- Packs/OpenPhish/ReleaseNotes/2_0_20.md | 6 ++ Packs/OpenPhish/pack_metadata.json | 2 +- 5 files changed, 85 insertions(+), 85 deletions(-) create mode 100644 Packs/OpenPhish/ReleaseNotes/2_0_20.md diff --git a/Packs/OpenPhish/Integrations/OpenPhish_v2/OpenPhish_v2.py b/Packs/OpenPhish/Integrations/OpenPhish_v2/OpenPhish_v2.py index 8c7727bd673a..a1e107b77c80 100644 --- a/Packs/OpenPhish/Integrations/OpenPhish_v2/OpenPhish_v2.py +++ b/Packs/OpenPhish/Integrations/OpenPhish_v2/OpenPhish_v2.py @@ -1,17 +1,19 @@ from CommonServerPython import * + from CommonServerUserPython import * -''' IMPORTS ''' +""" IMPORTS """ -import urllib3 import traceback +import urllib3 + # Disable insecure warnings urllib3.disable_warnings() -''' CONSTANTS ''' +""" CONSTANTS """ -DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ' +DATE_FORMAT = "%Y-%m-%dT%H:%M:%SZ" class Error(Exception): @@ -30,7 +32,6 @@ def __init__(self, message): class Client(BaseClient): - def __init__(self, url: str, use_ssl: bool, use_proxy: bool, fetch_interval_hours: float = 1): super().__init__(url, verify=use_ssl, proxy=use_proxy) self.fetch_interval_hours = fetch_interval_hours @@ -40,7 +41,7 @@ def http_request(self, name, resp_type=None): initiates a http request to openphish """ data = self._http_request( - method='GET', + method="GET", url_suffix=name, resp_type=resp_type, ) @@ -55,16 +56,15 @@ def _save_urls_to_instance(client: Client): """ try: # gets the urls from api and formats them - data = client.http_request('feed.txt', resp_type='text') + data = client.http_request("feed.txt", resp_type="text") data = data.splitlines() data = list(map(remove_backslash, data)) - context = {"list": data, - "timestamp": date_to_timestamp(datetime.now(), DATE_FORMAT)} + context = {"list": data, "timestamp": date_to_timestamp(datetime.now(), DATE_FORMAT)} set_integration_context(context) except NotFoundError as e: - raise Exception(f'Check server URL - {e.message}') + raise Exception(f"Check server URL - {e.message}") def _is_reload_needed(client: Client, data: dict) -> bool: @@ -77,11 +77,11 @@ def _is_reload_needed(client: Client, data: dict) -> bool: or if the memory is empty, Otherwise False. """ - if not data or not data.get('timestamp') or not data.get('list'): + if not data or not data.get("timestamp") or not data.get("list"): return True now = datetime.now() - if data.get('timestamp') <= date_to_timestamp(now - timedelta(hours=client.fetch_interval_hours)): + if data.get("timestamp") <= date_to_timestamp(now - timedelta(hours=client.fetch_interval_hours)): return True return False @@ -112,7 +112,7 @@ def remove_backslash(url: str) -> str: """ url.strip() - if url.endswith('/'): + if url.endswith("/"): return url[:-1] return url @@ -127,13 +127,13 @@ def url_command(client: Client, **kwargs) -> List[CommandResults]: if not data: raise DemistoException("Data was not saved correctly to the integration context.") - url_list_from_user = argToList(kwargs.get('url')) - urls_in_db = data.get('list', []) + url_list_from_user = argToList(kwargs.get("url")) + urls_in_db = data.get("list", []) for url in url_list_from_user: url_fixed = remove_backslash(url) if url_fixed in urls_in_db: dbotscore = Common.DBotScore.BAD - desc = 'Match found in OpenPhish database' + desc = "Match found in OpenPhish database" markdown = f"#### Found matches for given URL {url}\n" else: dbotscore = Common.DBotScore.NONE @@ -141,33 +141,31 @@ def url_command(client: Client, **kwargs) -> List[CommandResults]: markdown = f"#### No matches for URL {url}\n" dbot = Common.DBotScore( - url, DBotScoreType.URL, - 'OpenPhish', dbotscore, desc, - reliability=demisto.params().get('integrationReliability') + url, DBotScoreType.URL, "OpenPhish", dbotscore, desc, reliability=demisto.params().get("integrationReliability") ) url_object = Common.URL(url, dbot) - command_results.append(CommandResults( - indicator=url_object, - readable_output=markdown, - )) + command_results.append( + CommandResults( + indicator=url_object, + readable_output=markdown, + ) + ) return command_results def reload_command(client: Client, **kwargs) -> CommandResults: _save_urls_to_instance(client) - return CommandResults(readable_output='Database was updated successfully to the integration context.') + return CommandResults(readable_output="Database was updated successfully to the integration context.") def status_command(client: Client, **kwargs) -> CommandResults: data = get_integration_context() md = "OpenPhish Database Status\n" - if data and data.get('list', None): - md += f"Total **{str(len(data.get('list')))}** URLs loaded.\n" - load_time = timestamp_to_datestring(data.get('timestamp'), - "%a %b %d %Y %H:%M:%S (UTC)", - is_utc=True) + if data and data.get("list", None): + md += f"Total **{len(data.get('list'))!s}** URLs loaded.\n" + load_time = timestamp_to_datestring(data.get("timestamp"), "%a %b %d %Y %H:%M:%S (UTC)", is_utc=True) md += f"Last load time **{load_time}**\n" else: md += "Database not loaded.\n" @@ -177,35 +175,36 @@ def status_command(client: Client, **kwargs) -> CommandResults: def main(): """ - PARSE AND VALIDATE INTEGRATION PARAMS + PARSE AND VALIDATE INTEGRATION PARAMS """ - demisto.debug(f'Command being called is {demisto.command()}') + demisto.debug(f"Command being called is {demisto.command()}") # get the service API url base_url = "http://openphish.com" https_base_url = "https://openphish.com" commands = { - 'url': url_command, - 'openphish-reload': reload_command, - 'openphish-status': status_command, + "url": url_command, + "openphish-reload": reload_command, + "openphish-status": status_command, } user_params = demisto.params() - hours_to_refresh = user_params.get('fetchIntervalHours', '1') + hours_to_refresh = user_params.get("fetchIntervalHours", "1") try: hours_to_refresh = float(hours_to_refresh) - use_ssl = not user_params.get('insecure', False) - use_proxy = user_params.get('proxy', False) - use_https = user_params.get('https', False) + use_ssl = not user_params.get("insecure", False) + use_proxy = user_params.get("proxy", False) + use_https = user_params.get("https", False) client = Client( url=https_base_url if use_https else base_url, use_ssl=use_ssl, use_proxy=use_proxy, - fetch_interval_hours=hours_to_refresh) + fetch_interval_hours=hours_to_refresh, + ) command = demisto.command() - if command == 'test-module': + if command == "test-module": # This is the call made when pressing the integration Test button. result = test_module(client) return_results(result) @@ -214,11 +213,10 @@ def main(): # Log exceptions except ValueError: - return_error('Invalid parameter was given as database refresh interval.') + return_error("Invalid parameter was given as database refresh interval.") except Exception as e: - return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)} \n ' - f'tracback: {traceback.format_exc()}') + return_error(f"Failed to execute {demisto.command()} command. Error: {e!s} \n " f"tracback: {traceback.format_exc()}") -if __name__ in ('__main__', '__builtin__', 'builtins'): +if __name__ in ("__main__", "__builtin__", "builtins"): main() diff --git a/Packs/OpenPhish/Integrations/OpenPhish_v2/OpenPhish_v2_test.py b/Packs/OpenPhish/Integrations/OpenPhish_v2/OpenPhish_v2_test.py index 58d3790c8107..41f5adcec723 100644 --- a/Packs/OpenPhish/Integrations/OpenPhish_v2/OpenPhish_v2_test.py +++ b/Packs/OpenPhish/Integrations/OpenPhish_v2/OpenPhish_v2_test.py @@ -1,16 +1,17 @@ from datetime import datetime -import pytest -import OpenPhish_v2 + import demistomock as demisto +import OpenPhish_v2 +import pytest +from freezegun import freeze_time from OpenPhish_v2 import ( Client, _is_reload_needed, - remove_backslash, reload_command, + remove_backslash, status_command, url_command, ) -from freezegun import freeze_time from test_data.api_raw import RAW_DATA MOCK_URL = "http://openphish.com" @@ -77,12 +78,12 @@ True, ), ] -INTEGRATION_NAME = 'OpenPhish' +INTEGRATION_NAME = "OpenPhish" @pytest.fixture(autouse=True) def handle_calling_context(mocker): - mocker.patch.object(demisto, 'callingContext', {'context': {'IntegrationBrand': INTEGRATION_NAME}}) + mocker.patch.object(demisto, "callingContext", {"context": {"IntegrationBrand": INTEGRATION_NAME}}) @pytest.mark.parametrize("client,data,output", RELOADED_DATA) @@ -108,46 +109,39 @@ def test_is_reload_needed(mocker, client, data, output): @pytest.mark.parametrize("url, expected_result", LINKS) def test_remove_backslash(url: str, expected_result: str): """ - Given: - - string representing url + Given: + - string representing url - When: - - saving data from to the integration context or checking a specific url + When: + - saving data from to the integration context or checking a specific url - Then: - - checks the url format is without a backslash as last character + Then: + - checks the url format is without a backslash as last character - """ + """ assert remove_backslash(url) == expected_result def test_reload_command(mocker): """ - When: - - reloading data from to the api to integration context + When: + - reloading data from to the api to integration context - Then: - - checks if the reloading finished successfully + Then: + - checks if the reloading finished successfully - """ + """ mock_data_from_api = RAW_DATA mocker.patch.object(Client, "http_request", return_value=mock_data_from_api) mocker.patch.object(demisto, "setIntegrationContext") - client = Client( - url=MOCK_URL, use_ssl=False, use_proxy=False, fetch_interval_hours=1 - ) + client = Client(url=MOCK_URL, use_ssl=False, use_proxy=False, fetch_interval_hours=1) status = reload_command(client) - assert ( - status.readable_output - == "Database was updated successfully to the integration context." - ) + assert status.readable_output == "Database was updated successfully to the integration context." STANDARD_NOT_LOADED_MSG = "OpenPhish Database Status\nDatabase not loaded.\n" STANDARD_4_LOADED_MSG = ( - "OpenPhish Database Status\n" - "Total **4** URLs loaded.\n" - "Last load time **Thu Oct 01 2020 06:00:00 (UTC)**\n" + "OpenPhish Database Status\n" "Total **4** URLs loaded.\n" "Last load time **Thu Oct 01 2020 06:00:00 (UTC)**\n" ) CONTEXT_MOCK_WITH_STATUS = [ ({}, STANDARD_NOT_LOADED_MSG), # case no data in memory @@ -316,7 +310,9 @@ def test_url_command(mocker, url, context, expected_results): """ mocker.patch.object( - demisto, "getIntegrationContext", return_value=context, + demisto, + "getIntegrationContext", + return_value=context, ) mocker.patch.object(OpenPhish_v2, "_is_reload_needed", return_value=False) client = Client(MOCK_URL, True, False, 1) @@ -324,7 +320,5 @@ def test_url_command(mocker, url, context, expected_results): assert len(results) >= 1 for i in range(len(results)): output = results[i].to_context().get("EntryContext", {}) - assert output.get( - "URL(val.Data && val.Data == obj.Data)", [] - ) == expected_results[i].get("URL") + assert output.get("URL(val.Data && val.Data == obj.Data)", []) == expected_results[i].get("URL") assert output.get(DBOT_KEY, []) == expected_results[i].get("DBOTSCORE") diff --git a/Packs/OpenPhish/Integrations/OpenPhish_v2/test_data/api_raw.py b/Packs/OpenPhish/Integrations/OpenPhish_v2/test_data/api_raw.py index 734dfd9cc498..c21d88230395 100644 --- a/Packs/OpenPhish/Integrations/OpenPhish_v2/test_data/api_raw.py +++ b/Packs/OpenPhish/Integrations/OpenPhish_v2/test_data/api_raw.py @@ -1,8 +1,10 @@ -RAW_DATA = 'https://cnannord.com/paypal/firebasecloud/83792/htmjrtfgdsaopjdnbhhdmmdgrhehnndnmmmbvvbnmn' \ - 'dmnbnnbbmnm/service/paypal\nhttp://payameghdir.ir/cxxc/Owa/\nhttps://fxsearchdesk.net/Client' \ - '/tang/step4.html\nhttps://fxsearchdesk.net/Client/tang/step3.html\nhttps://fxsearchdesk.net/' \ - 'Client/tang/step2.html\nhttp://fxsearchdesk.net/Client/tang/step2.html\n' \ - 'http://fxsearchdesk.net/Client/tang/step3.html\nhttp://fxsearchdesk.net/Client/tang/step4.html\n' \ - 'https://fxsearchdesk.net/Client/tang\nhttp://fxsearchdesk.net/Client/tang/\n' \ - 'http://fxsearchdesk.net/Client/tang\nhttp://revisepayee.com/admin\n' \ - 'http://hmrc.resolutionfix.com/\nhttps://hmrc.resolutionfix.com/refund/details' +RAW_DATA = ( + "https://cnannord.com/paypal/firebasecloud/83792/htmjrtfgdsaopjdnbhhdmmdgrhehnndnmmmbvvbnmn" + "dmnbnnbbmnm/service/paypal\nhttp://payameghdir.ir/cxxc/Owa/\nhttps://fxsearchdesk.net/Client" + "/tang/step4.html\nhttps://fxsearchdesk.net/Client/tang/step3.html\nhttps://fxsearchdesk.net/" + "Client/tang/step2.html\nhttp://fxsearchdesk.net/Client/tang/step2.html\n" + "http://fxsearchdesk.net/Client/tang/step3.html\nhttp://fxsearchdesk.net/Client/tang/step4.html\n" + "https://fxsearchdesk.net/Client/tang\nhttp://fxsearchdesk.net/Client/tang/\n" + "http://fxsearchdesk.net/Client/tang\nhttp://revisepayee.com/admin\n" + "http://hmrc.resolutionfix.com/\nhttps://hmrc.resolutionfix.com/refund/details" +) diff --git a/Packs/OpenPhish/ReleaseNotes/2_0_20.md b/Packs/OpenPhish/ReleaseNotes/2_0_20.md new file mode 100644 index 000000000000..2f7d2f514613 --- /dev/null +++ b/Packs/OpenPhish/ReleaseNotes/2_0_20.md @@ -0,0 +1,6 @@ + +#### Integrations + +##### OpenPhish v2 + +- Metadata and documentation improvements. diff --git a/Packs/OpenPhish/pack_metadata.json b/Packs/OpenPhish/pack_metadata.json index 82a5390c3934..e07ade54977d 100644 --- a/Packs/OpenPhish/pack_metadata.json +++ b/Packs/OpenPhish/pack_metadata.json @@ -2,7 +2,7 @@ "name": "OpenPhish", "description": "OpenPhish uses proprietary Artificial Intelligence algorithms to automatically identify zero-day phishing sites and provide comprehensive, actionable, real-time threat intelligence.", "support": "xsoar", - "currentVersion": "2.0.19", + "currentVersion": "2.0.20", "author": "Cortex XSOAR", "url": "https://www.paloaltonetworks.com/cortex", "email": "", From c04583dfd1de28bd14825210b82f123328b78f1b Mon Sep 17 00:00:00 2001 From: Content Bot Date: Sun, 23 Mar 2025 12:52:43 +0000 Subject: [PATCH 04/18] ProofpointThreatResponse: Apply ruff Format --- .../ProofpointThreatResponse.py | 735 +++++++++--------- .../ProofpointThreatResponse_test.py | 560 +++++++------ .../ProofpointThreatResponseEventCollector.py | 233 +++--- ...fpointThreatResponseEventCollector_test.py | 39 +- .../ReleaseNotes/2_0_24.md | 9 + .../pack_metadata.json | 2 +- 6 files changed, 763 insertions(+), 815 deletions(-) create mode 100644 Packs/ProofpointThreatResponse/ReleaseNotes/2_0_24.md diff --git a/Packs/ProofpointThreatResponse/Integrations/ProofpointThreatResponse/ProofpointThreatResponse.py b/Packs/ProofpointThreatResponse/Integrations/ProofpointThreatResponse/ProofpointThreatResponse.py index e97e990594b7..59c3ff115423 100644 --- a/Packs/ProofpointThreatResponse/Integrations/ProofpointThreatResponse/ProofpointThreatResponse.py +++ b/Packs/ProofpointThreatResponse/Integrations/ProofpointThreatResponse/ProofpointThreatResponse.py @@ -1,148 +1,130 @@ -import demistomock as demisto # noqa: F401 -from CommonServerPython import * # noqa: F401 import json -import requests from datetime import date, timedelta import dateparser - +import demistomock as demisto # noqa: F401 +import requests import urllib3 +from CommonServerPython import * # noqa: F401 urllib3.disable_warnings() -''' GLOBAL VARS ''' -TIME_FORMAT = '%Y-%m-%dT%H:%M:%SZ' -BASE_URL = demisto.params().get('url') -if BASE_URL and BASE_URL[-1] != '/': - BASE_URL += '/' -API_KEY = demisto.params().get('credentials', {}).get('password') or demisto.params().get('apikey') -VERIFY_CERTIFICATE = not demisto.params().get('insecure') +""" GLOBAL VARS """ +TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ" +BASE_URL = demisto.params().get("url") +if BASE_URL and BASE_URL[-1] != "/": + BASE_URL += "/" +API_KEY = demisto.params().get("credentials", {}).get("password") or demisto.params().get("apikey") +VERIFY_CERTIFICATE = not demisto.params().get("insecure") # How many time before the first fetch to retrieve incidents -FIRST_FETCH, _ = parse_date_range(demisto.params().get('first_fetch', '12 hours') or '12 hours', - date_format=TIME_FORMAT) +FIRST_FETCH, _ = parse_date_range(demisto.params().get("first_fetch", "12 hours") or "12 hours", date_format=TIME_FORMAT) -''' COMMAND FUNCTIONS ''' +""" COMMAND FUNCTIONS """ def get_list(list_id): - fullurl = BASE_URL + f'api/lists/{list_id}/members.json' - res = requests.get( - fullurl, - headers={ - 'Content-Type': 'application/json', - 'Authorization': API_KEY - }, - verify=VERIFY_CERTIFICATE - ) + fullurl = BASE_URL + f"api/lists/{list_id}/members.json" + res = requests.get(fullurl, headers={"Content-Type": "application/json", "Authorization": API_KEY}, verify=VERIFY_CERTIFICATE) if res.status_code < 200 or res.status_code >= 300: - return_error(f'Get list failed. URL: {fullurl}, StatusCode: {res.status_code}') + return_error(f"Get list failed. URL: {fullurl}, StatusCode: {res.status_code}") return res.json() def get_list_command(): - ''' Retrieves all indicators of a the given list ID in Threat Response ''' - list_id = demisto.args().get('list-id') + """Retrieves all indicators of a the given list ID in Threat Response""" + list_id = demisto.args().get("list-id") list_items = get_list(list_id) - demisto.results({'list': list_items}) + demisto.results({"list": list_items}) def add_to_list(list_id, indicator, comment, expiration): - fullurl = BASE_URL + f'api/lists/{list_id}/members.json' + fullurl = BASE_URL + f"api/lists/{list_id}/members.json" - indicator = { - 'member': indicator - } + indicator = {"member": indicator} if comment: - indicator['description'] = comment + indicator["description"] = comment if expiration: - indicator['expiration'] = expiration + indicator["expiration"] = expiration - res = requests.post( - fullurl, - headers={ - 'Authorization': API_KEY - }, - verify=VERIFY_CERTIFICATE, - json=indicator - ) + res = requests.post(fullurl, headers={"Authorization": API_KEY}, verify=VERIFY_CERTIFICATE, json=indicator) if res.status_code < 200 or res.status_code >= 300: - return_error(f'Add to list failed. URL: {fullurl}, Request Body: {json.dumps(indicator)}') + return_error(f"Add to list failed. URL: {fullurl}, Request Body: {json.dumps(indicator)}") return res.json() def add_to_list_command(): - ''' Adds given indicators to the given list ID in Threat Response ''' - list_id = demisto.args().get('list-id') - indicators = argToList(demisto.args().get('indicator')) - comment = demisto.args().get('comment') - expiration = demisto.args().get('expiration') + """Adds given indicators to the given list ID in Threat Response""" + list_id = demisto.args().get("list-id") + indicators = argToList(demisto.args().get("indicator")) + comment = demisto.args().get("comment") + expiration = demisto.args().get("expiration") - message = '' + message = "" for indicator in indicators: add_to_list(list_id, indicator, comment, expiration) - message += f'{indicator} added successfully to {list_id}\n' + message += f"{indicator} added successfully to {list_id}\n" demisto.results(message) def block_ip_command(): - ''' Adds given IPs to the relevant blacklist in Threat Response ''' - list_id = demisto.params().get('blacklist_ip', demisto.args().get('blacklist_ip')) - ips = argToList(demisto.args().get('ip')) - expiration = demisto.args().get('expiration') + """Adds given IPs to the relevant blacklist in Threat Response""" + list_id = demisto.params().get("blacklist_ip", demisto.args().get("blacklist_ip")) + ips = argToList(demisto.args().get("ip")) + expiration = demisto.args().get("expiration") - message = '' + message = "" for ip in ips: add_to_list(list_id, ip, None, expiration) - message += f'{ip} added successfully to block_ip list\n' + message += f"{ip} added successfully to block_ip list\n" demisto.results(message) def block_domain_command(): - ''' Adds given domains to the relevant blacklist in Threat Response ''' - list_id = demisto.params().get('blacklist_domain', demisto.args().get('blacklist_domain')) - domains = argToList(demisto.args().get('domain')) - expiration = demisto.args().get('expiration') + """Adds given domains to the relevant blacklist in Threat Response""" + list_id = demisto.params().get("blacklist_domain", demisto.args().get("blacklist_domain")) + domains = argToList(demisto.args().get("domain")) + expiration = demisto.args().get("expiration") - message = '' + message = "" for domain in domains: add_to_list(list_id, domain, None, expiration) - message += f'{domain} added successfully to block_domain list\n' + message += f"{domain} added successfully to block_domain list\n" demisto.results(message) def block_url_command(): - ''' Adds given URLs to the relevant blacklist in Threat Response ''' - list_id = demisto.params().get('blacklist_url', demisto.args().get('blacklist_url')) - urls = argToList(demisto.args().get('url')) - expiration = demisto.args().get('expiration') + """Adds given URLs to the relevant blacklist in Threat Response""" + list_id = demisto.params().get("blacklist_url", demisto.args().get("blacklist_url")) + urls = argToList(demisto.args().get("url")) + expiration = demisto.args().get("expiration") - message = '' + message = "" for url in urls: add_to_list(list_id, url, None, expiration) - message += f'{url} added successfully to block_url list\n' + message += f"{url} added successfully to block_url list\n" demisto.results(message) def block_hash_command(): - ''' Adds given hashes to the relevant blacklist in Threat Response ''' - list_id = demisto.params().get('blacklist_hash', demisto.args().get('blacklist_hash')) - hashes = argToList(demisto.args().get('hash')) - expiration = demisto.args().get('expiration') + """Adds given hashes to the relevant blacklist in Threat Response""" + list_id = demisto.params().get("blacklist_hash", demisto.args().get("blacklist_hash")) + hashes = argToList(demisto.args().get("hash")) + expiration = demisto.args().get("expiration") - message = '' + message = "" for h in hashes: add_to_list(list_id, h, None, expiration) - message += f'{h} added successfully to block_hash list\n' + message += f"{h} added successfully to block_hash list\n" demisto.results(message) @@ -151,7 +133,7 @@ def search_indicators(list_id, indicator_filter): list_indicators = get_list(list_id) found_items = [] for item in list_indicators: - item_indicator = demisto.get(item, 'host.host') + item_indicator = demisto.get(item, "host.host") if item_indicator and indicator_filter in item_indicator: found_items.append(item) @@ -159,39 +141,33 @@ def search_indicators(list_id, indicator_filter): def search_indicator_command(): - ''' Retrieves indicators of a list, using a filter ''' - list_id = demisto.args().get('list-id') - indicator_filter = demisto.args().get('filter') + """Retrieves indicators of a list, using a filter""" + list_id = demisto.args().get("list-id") + indicator_filter = demisto.args().get("filter") found = search_indicators(list_id, indicator_filter) - demisto.results({'indicators': found}) + demisto.results({"indicators": found}) def delete_indicator(list_id, indicator_filter): indicator = search_indicators(list_id, indicator_filter) if len(indicator) == 0: - return_error(f'{indicator_filter} not exists in {list_id}') + return_error(f"{indicator_filter} not exists in {list_id}") - indicator_id = indicator.get('id') # pylint: disable=E1101 - fullurl = BASE_URL + f'api/lists/{list_id}/members/{indicator_id}.json' - res = requests.delete( - fullurl, - headers={ - 'Authorization': API_KEY - }, - verify=VERIFY_CERTIFICATE - ) + indicator_id = indicator.get("id") # pylint: disable=E1101 + fullurl = BASE_URL + f"api/lists/{list_id}/members/{indicator_id}.json" + res = requests.delete(fullurl, headers={"Authorization": API_KEY}, verify=VERIFY_CERTIFICATE) if res.status_code < 200 or res.status_code >= 300: - return_error(f'Delete indicator failed. URL: {fullurl}, StatusCode: {res.status_code}') + return_error(f"Delete indicator failed. URL: {fullurl}, StatusCode: {res.status_code}") def delete_indicator_command(): - ''' Deletes an indicator from a list ''' - list_id = demisto.args().get('list-id') - indicator = demisto.args().get('indicator') + """Deletes an indicator from a list""" + list_id = demisto.args().get("list-id") + indicator = demisto.args().get("indicator") delete_indicator(list_id, indicator) - demisto.results(f'{list_id} deleted successfully from list {indicator}') + demisto.results(f"{list_id} deleted successfully from list {indicator}") def test(): @@ -201,15 +177,10 @@ def test(): 'ok' if test passed, anything else will fail the test. """ integration_params = demisto.params() - if integration_params.get('isFetch') and not integration_params.get('states'): + if integration_params.get("isFetch") and not integration_params.get("states"): raise DemistoException("Missing argument - You must provide at least one incident state.") - get_incidents_request( - { - 'created_after': date.today(), - 'state': 'open' - } - ) - demisto.results('ok') + get_incidents_request({"created_after": date.today(), "state": "open"}) + demisto.results("ok") # TRAP API @@ -223,8 +194,8 @@ def create_incident_field_context(incident): list. The parsed incident fields list """ incident_field_values = {} - for incident_field in incident.get('incident_field_values', []): - incident_field_values[incident_field['name'].replace(" ", "_")] = incident_field['value'] + for incident_field in incident.get("incident_field_values", []): + incident_field_values[incident_field["name"].replace(" ", "_")] = incident_field["value"] return incident_field_values @@ -239,28 +210,26 @@ def get_emails_context(event): list. The parsed emails list from the event """ emails_context = [] - for email in event.get('emails', []): + for email in event.get("emails", []): email_obj = { - 'sender': email.get('sender', {}).get('email'), - 'recipient': email.get('recipient', {}).get('email'), - 'subject': email.get('subject'), - 'message_id': email.get('messageId'), - 'body': email.get('body'), - 'body_type': email.get('bodyType'), - 'headers': email.get('headers'), - 'urls': email.get('urls'), - 'sender_vap': email.get('sender', {}).get('vap'), - 'recipient_vap': email.get('recipient', {}).get('vap'), - 'attachments': email.get('attachments'), + "sender": email.get("sender", {}).get("email"), + "recipient": email.get("recipient", {}).get("email"), + "subject": email.get("subject"), + "message_id": email.get("messageId"), + "body": email.get("body"), + "body_type": email.get("bodyType"), + "headers": email.get("headers"), + "urls": email.get("urls"), + "sender_vap": email.get("sender", {}).get("vap"), + "recipient_vap": email.get("recipient", {}).get("vap"), + "attachments": email.get("attachments"), } - message_delivery_time = email.get('messageDeliveryTime', {}) + message_delivery_time = email.get("messageDeliveryTime", {}) if message_delivery_time and isinstance(message_delivery_time, dict): - email_obj['message_delivery_time'] = message_delivery_time.get('millis') + email_obj["message_delivery_time"] = message_delivery_time.get("millis") elif message_delivery_time and isinstance(message_delivery_time, str): - email_obj['message_delivery_time'] = message_delivery_time - emails_context.append( - assign_params(**email_obj) - ) + email_obj["message_delivery_time"] = message_delivery_time + emails_context.append(assign_params(**email_obj)) return emails_context @@ -276,11 +245,11 @@ def create_incidents_context(incidents_list): """ context = list(incidents_list) for incident in context: - incident['incident_field_values'] = create_incident_field_context(incident) + incident["incident_field_values"] = create_incident_field_context(incident) - if incident.get('events'): - for event in incident['events']: - event['emails'] = get_emails_context(event) + if incident.get("events"): + for event in incident["events"]: + event["emails"] = get_emails_context(event) return context @@ -296,67 +265,76 @@ def create_incidents_human_readable(human_readable_message, incidents_list): str. The incidents human readable in markdown format """ human_readable = [] - human_readable_headers = ['ID', 'Created At', 'Type', 'Summary', 'Score', 'Event Count', 'Assignee', - 'Successful Quarantines', 'Failed Quarantines', 'Pending Quarantines'] + human_readable_headers = [ + "ID", + "Created At", + "Type", + "Summary", + "Score", + "Event Count", + "Assignee", + "Successful Quarantines", + "Failed Quarantines", + "Pending Quarantines", + ] for incident in incidents_list: - human_readable.append({ - 'Created At': incident.get('created_at'), - 'ID': incident.get('id'), - 'Type': incident.get('type'), - 'Summary': incident.get('summary'), - 'Score': incident.get('score'), - 'Event Count': incident.get('event_count'), - 'Assignee': incident.get('assignee'), - 'Successful Quarantines': incident.get('successful_quarantine'), - 'Failed Quarantines': incident.get('failed_quarantines'), - 'Pending Quarantines': incident.get('pending_quarantines') - }) + human_readable.append( + { + "Created At": incident.get("created_at"), + "ID": incident.get("id"), + "Type": incident.get("type"), + "Summary": incident.get("summary"), + "Score": incident.get("score"), + "Event Count": incident.get("event_count"), + "Assignee": incident.get("assignee"), + "Successful Quarantines": incident.get("successful_quarantine"), + "Failed Quarantines": incident.get("failed_quarantines"), + "Pending Quarantines": incident.get("pending_quarantines"), + } + ) return tableToMarkdown(human_readable_message, human_readable, human_readable_headers, removeNull=True) def list_incidents_command(): - """ Retrieves incidents from ProofPoint API """ + """Retrieves incidents from ProofPoint API""" args = demisto.args() - limit = int(args.pop('limit')) + limit = int(args.pop("limit")) incidents_list = get_incidents_request(args) incidents_list = incidents_list[:limit] - human_readable = create_incidents_human_readable('List Incidents Results:', incidents_list) + human_readable = create_incidents_human_readable("List Incidents Results:", incidents_list) context = create_incidents_context(incidents_list) - return_outputs(human_readable, {'ProofPointTRAP.Incident(val.id === obj.id)': context}, incidents_list) + return_outputs(human_readable, {"ProofPointTRAP.Incident(val.id === obj.id)": context}, incidents_list) def get_incident_command(): """ - Retrieves a single incident from ProofPoint API + Retrieves a single incident from ProofPoint API """ args = demisto.args() - incident_id = args.pop('incident_id') - expand_events = args.get('expand_events') - fullurl = BASE_URL + f'api/incidents/{incident_id}.json' + incident_id = args.pop("incident_id") + expand_events = args.get("expand_events") + fullurl = BASE_URL + f"api/incidents/{incident_id}.json" incident_data = requests.get( fullurl, - headers={ - 'Content-Type': 'application/json', - 'Authorization': API_KEY - }, + headers={"Content-Type": "application/json", "Authorization": API_KEY}, params={ - 'expand_events': expand_events, + "expand_events": expand_events, }, verify=VERIFY_CERTIFICATE, ) if incident_data.status_code < 200 or incident_data.status_code >= 300: - return_error(f'Get incident failed. URL: {fullurl}, StatusCode: {incident_data.status_code}') + return_error(f"Get incident failed. URL: {fullurl}, StatusCode: {incident_data.status_code}") incident_data = incident_data.json() - human_readable = create_incidents_human_readable('Incident Results:', [incident_data]) + human_readable = create_incidents_human_readable("Incident Results:", [incident_data]) context = create_incidents_context([incident_data]) - return_outputs(human_readable, {'ProofPointTRAP.Incident(val.id === obj.id)': context}, incident_data) + return_outputs(human_readable, {"ProofPointTRAP.Incident(val.id === obj.id)": context}, incident_data) def pass_sources_list_filter(incident, sources_list): @@ -372,7 +350,7 @@ def pass_sources_list_filter(incident, sources_list): if len(sources_list) == 0: return True - return any(source in incident.get('event_sources') for source in sources_list) + return any(source in incident.get("event_sources") for source in sources_list) def pass_abuse_disposition_filter(incident, abuse_disposition_values): @@ -388,8 +366,8 @@ def pass_abuse_disposition_filter(incident, abuse_disposition_values): if len(abuse_disposition_values) == 0: return True - for incident_field in incident.get('incident_field_values', []): - if incident_field['name'] == 'Abuse Disposition' and incident_field['value'] in abuse_disposition_values: + for incident_field in incident.get("incident_field_values", []): + if incident_field["name"] == "Abuse Disposition" and incident_field["value"] in abuse_disposition_values: return True return False @@ -406,15 +384,14 @@ def filter_incidents(incidents_list): """ filtered_incidents_list = [] params = demisto.params() - sources_list = argToList(params.get('event_sources')) - abuse_disposition_values = argToList(params.get('abuse_disposition')) + sources_list = argToList(params.get("event_sources")) + abuse_disposition_values = argToList(params.get("abuse_disposition")) if not sources_list and not abuse_disposition_values: return incidents_list for incident in incidents_list: - if pass_sources_list_filter(incident, sources_list) and pass_abuse_disposition_filter(incident, - abuse_disposition_values): + if pass_sources_list_filter(incident, sources_list) and pass_abuse_disposition_filter(incident, abuse_disposition_values): filtered_incidents_list.append(incident) return filtered_incidents_list @@ -429,23 +406,19 @@ def get_incidents_request(params): Returns: list. The incidents returned from the API call """ - fullurl = BASE_URL + 'api/incidents' + fullurl = BASE_URL + "api/incidents" incidents_list = requests.get( - fullurl, - headers={ - 'Content-Type': 'application/json', - 'Authorization': API_KEY - }, - params=params, - verify=VERIFY_CERTIFICATE + fullurl, headers={"Content-Type": "application/json", "Authorization": API_KEY}, params=params, verify=VERIFY_CERTIFICATE ) if incidents_list.status_code < 200 or incidents_list.status_code >= 300: if incidents_list.status_code == 502 or incidents_list.status_code == 504: - return_error('The operation failed. There is a possibility you are trying to get too many incidents.\n' - 'You may consider adding a filter argument to the command.\n' - f'URL: {fullurl}, StatusCode: {incidents_list.status_code}') + return_error( + "The operation failed. There is a possibility you are trying to get too many incidents.\n" + "You may consider adding a filter argument to the command.\n" + f"URL: {fullurl}, StatusCode: {incidents_list.status_code}" + ) else: - return_error(f'The operation failed. URL: {fullurl}, StatusCode: {incidents_list.status_code}') + return_error(f"The operation failed. URL: {fullurl}, StatusCode: {incidents_list.status_code}") return incidents_list.json() @@ -457,20 +430,22 @@ def get_time_delta(fetch_delta): Returns: The time delta. """ - fetch_delta_split = fetch_delta.strip().split(' ') + fetch_delta_split = fetch_delta.strip().split(" ") if len(fetch_delta_split) != 2: - raise Exception( - 'The fetch_delta is invalid. Please make sure to insert both the number and the unit of the fetch delta.') + raise Exception("The fetch_delta is invalid. Please make sure to insert both the number and the unit of the fetch delta.") unit = fetch_delta_split[1].lower() number = int(fetch_delta_split[0]) - if unit not in ['minute', 'minutes', - 'hour', 'hours', - ]: + if unit not in [ + "minute", + "minutes", + "hour", + "hours", + ]: raise Exception('The unit of fetch_delta is invalid. Possible values are "minutes" or "hours".') - if 'hour' in unit: + if "hour" in unit: time_delta = timedelta(hours=number) # batch by hours else: time_delta = timedelta(minutes=number) # batch by minutes @@ -491,8 +466,8 @@ def get_new_incidents(request_params, last_fetched_id): """ incidents = get_incidents_request(request_params) filtered_incidents_list = filter_incidents(incidents) - ordered_incidents = sorted(filtered_incidents_list, key=lambda k: (k['created_at'], k['id'])) - return list(filter(lambda incident: int(incident.get('id')) > last_fetched_id, ordered_incidents)) + ordered_incidents = sorted(filtered_incidents_list, key=lambda k: (k["created_at"], k["id"])) + return list(filter(lambda incident: int(incident.get("id")) > last_fetched_id, ordered_incidents)) def get_incidents_batch_by_time_request(params): @@ -508,33 +483,38 @@ def get_incidents_batch_by_time_request(params): """ incidents_list = [] # type:list - fetch_delta = params.get('fetch_delta', '6 hours') - fetch_limit = int(params.get('fetch_limit', '50')) - last_fetched_id = int(params.get('last_fetched_id', '0')) + fetch_delta = params.get("fetch_delta", "6 hours") + fetch_limit = int(params.get("fetch_limit", "50")) + last_fetched_id = int(params.get("last_fetched_id", "0")) current_time = datetime.now() time_delta = get_time_delta(fetch_delta) - created_after = datetime.strptime(params.get('created_after'), TIME_FORMAT) + created_after = datetime.strptime(params.get("created_after"), TIME_FORMAT) created_before = created_after + time_delta request_params = { - 'state': params.get('state'), - 'created_after': created_after.isoformat().split('.')[0] + 'Z', - 'created_before': created_before.isoformat().split('.')[0] + 'Z' + "state": params.get("state"), + "created_after": created_after.isoformat().split(".")[0] + "Z", + "created_before": created_before.isoformat().split(".")[0] + "Z", } - if message_id := params.get('message_id'): # used in search quarantine - request_params['message_id'] = message_id + if message_id := params.get("message_id"): # used in search quarantine + request_params["message_id"] = message_id # while loop relevant for fetching old incidents while created_before < current_time and len(incidents_list) < fetch_limit: demisto.debug( "PTR: Entered the batch loop , with fetch_limit {} and incidents list {} and incident length {} " "with created_after {} and created_before {}.".format( - str(fetch_limit), str([incident.get('id') for incident in incidents_list]), str(len(incidents_list)), - str(request_params['created_after']), str(request_params['created_before']))) + str(fetch_limit), + str([incident.get("id") for incident in incidents_list]), + str(len(incidents_list)), + str(request_params["created_after"]), + str(request_params["created_before"]), + ) + ) new_incidents = get_new_incidents(request_params, last_fetched_id) incidents_list.extend(new_incidents) @@ -544,20 +524,22 @@ def get_incidents_batch_by_time_request(params): created_before = created_before + time_delta # updating params according to the new times - request_params['created_after'] = created_after.isoformat().split('.')[0] + 'Z' - request_params['created_before'] = created_before.isoformat().split('.')[0] + 'Z' - demisto.debug(f"PTR: End of the current batch loop with {str(len(incidents_list))} incidents") + request_params["created_after"] = created_after.isoformat().split(".")[0] + "Z" + request_params["created_before"] = created_before.isoformat().split(".")[0] + "Z" + demisto.debug(f"PTR: End of the current batch loop with {len(incidents_list)!s} incidents") # fetching the last batch when created_before is bigger then current time = fetching new incidents if len(incidents_list) < fetch_limit: # fetching the last batch - request_params['created_before'] = current_time.isoformat().split('.')[0] + 'Z' + request_params["created_before"] = current_time.isoformat().split(".")[0] + "Z" new_incidents = get_new_incidents(request_params, last_fetched_id) incidents_list.extend(new_incidents) demisto.debug( "PTR: Finished the last batch, with fetch_limit {} and incidents list {} and incident length {}".format( - str(fetch_limit), str([incident.get('id') for incident in incidents_list]), str(len(incidents_list)))) + str(fetch_limit), str([incident.get("id") for incident in incidents_list]), str(len(incidents_list)) + ) + ) incidents_list_limit = incidents_list[:fetch_limit] return incidents_list_limit @@ -565,57 +547,57 @@ def get_incidents_batch_by_time_request(params): def fetch_incidents_command(): """ - Fetches incidents from the ProofPoint API. + Fetches incidents from the ProofPoint API. """ integration_params = demisto.params() - last_fetch = demisto.getLastRun().get('last_fetch', {}) - last_fetched_id = demisto.getLastRun().get('last_fetched_incident_id', {}) + last_fetch = demisto.getLastRun().get("last_fetch", {}) + last_fetched_id = demisto.getLastRun().get("last_fetched_incident_id", {}) - fetch_delta = integration_params.get('fetch_delta', '6 hours') - fetch_limit = integration_params.get('fetch_limit', '50') + fetch_delta = integration_params.get("fetch_delta", "6 hours") + fetch_limit = integration_params.get("fetch_limit", "50") - incidents_states = integration_params.get('states') + incidents_states = integration_params.get("states") for state in incidents_states: if not last_fetch.get(state): last_fetch[state] = FIRST_FETCH for state in incidents_states: if not last_fetched_id.get(state): - last_fetched_id[state] = '0' + last_fetched_id[state] = "0" incidents = [] for state in incidents_states: request_params = { - 'created_after': last_fetch[state], - 'last_fetched_id': last_fetched_id[state], - 'fetch_delta': fetch_delta, - 'state': state, - 'fetch_limit': fetch_limit + "created_after": last_fetch[state], + "last_fetched_id": last_fetched_id[state], + "fetch_delta": fetch_delta, + "state": state, + "fetch_limit": fetch_limit, } id = last_fetched_id[state] incidents_list = get_incidents_batch_by_time_request(request_params) for incident in incidents_list: - id = incident.get('id') - inc = { - 'name': f'ProofPoint_TRAP - ID {id}', - 'rawJSON': json.dumps(incident), - 'occurred': incident['created_at'] - } + id = incident.get("id") + inc = {"name": f"ProofPoint_TRAP - ID {id}", "rawJSON": json.dumps(incident), "occurred": incident["created_at"]} incidents.append(inc) if incidents: - last_fetch_time = incidents[-1]['occurred'] - last_fetch[state] = \ - (datetime.strptime(last_fetch_time, TIME_FORMAT) - timedelta(minutes=1)).isoformat().split('.')[0] + 'Z' + last_fetch_time = incidents[-1]["occurred"] + last_fetch[state] = (datetime.strptime(last_fetch_time, TIME_FORMAT) - timedelta(minutes=1)).isoformat().split(".")[ + 0 + ] + "Z" last_fetched_id[state] = id - demisto.debug("PTR: End of current fetch function with last_fetch {} and last_fetched_id {}".format(str(last_fetch), str( - last_fetched_id))) + demisto.debug( + "PTR: End of current fetch function with last_fetch {} and last_fetched_id {}".format( + str(last_fetch), str(last_fetched_id) + ) + ) - demisto.setLastRun({'last_fetch': last_fetch}) - demisto.setLastRun({'last_fetched_incident_id': last_fetched_id}) + demisto.setLastRun({"last_fetch": last_fetch}) + demisto.setLastRun({"last_fetched_incident_id": last_fetched_id}) - demisto.info(f'extracted {len(incidents)} incidents') + demisto.info(f"extracted {len(incidents)} incidents") demisto.incidents(incidents) @@ -630,85 +612,74 @@ def create_add_comment_human_readable(incident): str. The command human readable in markdown format """ human_readable = [] - human_readable_headers = ['Incident ID', 'Created At', 'Details', 'Comments Summary', 'Action ID'] - incident_id = incident.get('incident_id') - human_readable.append({ - 'Created At': incident.get('created_at'), - 'Incident ID': incident_id, - 'Details': incident.get('detail'), - 'Comments Summary': incident.get('summary'), - 'Action ID': incident.get('id') - }) + human_readable_headers = ["Incident ID", "Created At", "Details", "Comments Summary", "Action ID"] + incident_id = incident.get("incident_id") + human_readable.append( + { + "Created At": incident.get("created_at"), + "Incident ID": incident_id, + "Details": incident.get("detail"), + "Comments Summary": incident.get("summary"), + "Action ID": incident.get("id"), + } + ) - return tableToMarkdown(f'Comments added successfully to incident:{incident_id}', human_readable, - human_readable_headers, removeNull=True) + return tableToMarkdown( + f"Comments added successfully to incident:{incident_id}", human_readable, human_readable_headers, removeNull=True + ) def add_comment_to_incident_command(): """ - Adds comments to an incident by incident ID + Adds comments to an incident by incident ID """ args = demisto.args() - incident_id = args.get('incident_id') - comments_to_add = args.get('comments') - details = args.get('details') - request_body = { - "summary": comments_to_add, - "detail": details - } + incident_id = args.get("incident_id") + comments_to_add = args.get("comments") + details = args.get("details") + request_body = {"summary": comments_to_add, "detail": details} - fullurl = BASE_URL + f'api/incidents/{incident_id}/comments.json' + fullurl = BASE_URL + f"api/incidents/{incident_id}/comments.json" incident_data = requests.post( fullurl, - headers={ - 'Content-Type': 'application/json', - 'Authorization': API_KEY - }, + headers={"Content-Type": "application/json", "Authorization": API_KEY}, json=request_body, - verify=VERIFY_CERTIFICATE + verify=VERIFY_CERTIFICATE, ) if incident_data.status_code < 200 or incident_data.status_code >= 300: - return_error(f'Add comment to incident command failed. URL: {fullurl}, ' - f'StatusCode: {incident_data.status_code}') + return_error(f"Add comment to incident command failed. URL: {fullurl}, " f"StatusCode: {incident_data.status_code}") incident_data = incident_data.json() human_readable = create_add_comment_human_readable(incident_data) - return_outputs(human_readable, - {'ProofPointTRAP.IncidentComment(val.incident_id === obj.incident_id)': incident_data}, - incident_data) + return_outputs( + human_readable, {"ProofPointTRAP.IncidentComment(val.incident_id === obj.incident_id)": incident_data}, incident_data + ) def add_user_to_incident_command(): """ - Adds user to an incident by incident ID + Adds user to an incident by incident ID """ args = demisto.args() - incident_id = args.get('incident_id') - attackers = argToList(args.get('attackers')) - targets = argToList(args.get('targets')) - request_body = { - "targets": targets, - "attackers": attackers - } + incident_id = args.get("incident_id") + attackers = argToList(args.get("attackers")) + targets = argToList(args.get("targets")) + request_body = {"targets": targets, "attackers": attackers} - fullurl = BASE_URL + 'api/incidents/{incident_id}/users.json' + fullurl = BASE_URL + "api/incidents/{incident_id}/users.json" incident_data = requests.post( fullurl, - headers={ - 'Content-Type': 'application/json', - 'Authorization': API_KEY - }, + headers={"Content-Type": "application/json", "Authorization": API_KEY}, json=request_body, - verify=VERIFY_CERTIFICATE + verify=VERIFY_CERTIFICATE, ) if incident_data.status_code < 200 or incident_data.status_code >= 300: - return_error(f'Add comment to incident command failed. URL: {fullurl}, ' - f'StatusCode: {incident_data.status_code}') + return_error(f"Add comment to incident command failed. URL: {fullurl}, " f"StatusCode: {incident_data.status_code}") - return_outputs(f'The user was added successfully to incident {incident_id}', {}, {}) + return_outputs(f"The user was added successfully to incident {incident_id}", {}, {}) def parse_json_argument(argument_string_value, argument_name): @@ -724,8 +695,7 @@ def parse_json_argument(argument_string_value, argument_name): def prepare_ingest_alert_request_body(args): - json_arguments = ['attacker', 'cnc_host', 'detector', 'email', 'forensics_hosts', 'target', 'threat_info', - 'custom_fields'] + json_arguments = ["attacker", "cnc_host", "detector", "email", "forensics_hosts", "target", "threat_info", "custom_fields"] request_body = {} # type: dict for argument_name, argument_value in args.items(): if argument_name in json_arguments: @@ -739,64 +709,52 @@ def prepare_ingest_alert_request_body(args): def ingest_alert_command(): """ - Ingest an alert into Threat Response. + Ingest an alert into Threat Response. """ args = demisto.args() - json_source_id = args.pop('post_url_id', demisto.params().get('post_url_id')) + json_source_id = args.pop("post_url_id", demisto.params().get("post_url_id")) if not json_source_id: - return_error("To ingest alert into TRAP, you mast specify a post_url_id," - "either as an argument or as an integration parameter.") + return_error( + "To ingest alert into TRAP, you mast specify a post_url_id," "either as an argument or as an integration parameter." + ) request_body = prepare_ingest_alert_request_body(assign_params(**args)) - fullurl = BASE_URL + f'threat/json_event/events/{json_source_id}' + fullurl = BASE_URL + f"threat/json_event/events/{json_source_id}" alert_data = requests.post( - fullurl, - headers={ - 'Content-Type': 'application/json' - }, - json=request_body, - verify=VERIFY_CERTIFICATE + fullurl, headers={"Content-Type": "application/json"}, json=request_body, verify=VERIFY_CERTIFICATE ) if alert_data.status_code < 200 or alert_data.status_code >= 300: - return_error(f'Failed to ingest the alert into TRAP. URL: {fullurl}, ' - f'StatusCode: {alert_data.status_code}') + return_error(f"Failed to ingest the alert into TRAP. URL: {fullurl}, " f"StatusCode: {alert_data.status_code}") - return_outputs('The alert was successfully ingested to TRAP', {}, {}) + return_outputs("The alert was successfully ingested to TRAP", {}, {}) def close_incident_command(): args = demisto.args() - incident_id = args.get('incident_id') - details = args.get('details') - summary = args.get('summary') - request_body = { - "summary": summary, - "detail": details - } + incident_id = args.get("incident_id") + details = args.get("details") + summary = args.get("summary") + request_body = {"summary": summary, "detail": details} - fullurl = BASE_URL + f'api/incidents/{incident_id}/close.json' + fullurl = BASE_URL + f"api/incidents/{incident_id}/close.json" incident_data = requests.post( fullurl, - headers={ - 'Content-Type': 'application/json', - 'Authorization': API_KEY - }, + headers={"Content-Type": "application/json", "Authorization": API_KEY}, json=request_body, - verify=VERIFY_CERTIFICATE + verify=VERIFY_CERTIFICATE, ) if incident_data.status_code < 200 or incident_data.status_code >= 300: - return_error(f'Incident closure failed. URL: {fullurl}, ' - f'StatusCode: {incident_data.status_code}') + return_error(f"Incident closure failed. URL: {fullurl}, " f"StatusCode: {incident_data.status_code}") - return_outputs(f'The incident {incident_id} was successfully closed', {}, {}) + return_outputs(f"The incident {incident_id} was successfully closed", {}, {}) def search_quarantine(): args = demisto.args() - arg_time = dateparser.parse(args.get('time')) + arg_time = dateparser.parse(args.get("time")) emailTAPtime = 0 if isinstance(arg_time, datetime): emailTAPtime = int(arg_time.timestamp()) @@ -804,183 +762,190 @@ def search_quarantine(): return_error("Timestamp was bad") lstAlert = [] - mid = args.get('message_id') - recipient = args.get('recipient') - limit_quarantine_occurred_time = argToBoolean(args.get('limit_quarantine_occurred_time', 'True')) - quarantine_limit = arg_to_number(args.get('quarantine_limit', 120)) - fetch_delta = arg_to_number(args.get('fetch_delta', 6)) + mid = args.get("message_id") + recipient = args.get("recipient") + limit_quarantine_occurred_time = argToBoolean(args.get("limit_quarantine_occurred_time", "True")) + quarantine_limit = arg_to_number(args.get("quarantine_limit", 120)) + fetch_delta = arg_to_number(args.get("fetch_delta", 6)) request_params = { - 'created_after': datetime.strftime(arg_time - get_time_delta('1 hour'), TIME_FORMAT), # for safety - 'fetch_delta': f'{fetch_delta} hours', - 'fetch_limit': '50', - 'message_id': mid + "created_after": datetime.strftime(arg_time - get_time_delta("1 hour"), TIME_FORMAT), # for safety + "fetch_delta": f"{fetch_delta} hours", + "fetch_limit": "50", + "message_id": mid, } incidents_list = get_incidents_batch_by_time_request(request_params) - found = {'email': False, 'mid': False, 'quarantine': False} + found = {"email": False, "mid": False, "quarantine": False} resQ = [] # Collecting emails inside alert to find those with same recipient and messageId for incident in incidents_list: - for alert in incident.get('events'): + for alert in incident.get("events"): demisto.debug(f'New alert being processed with Alertid = {alert.get("id")}') - for email in alert.get('emails'): + for email in alert.get("emails"): demisto.debug(f'New email being processed with messageid {email.get("messageId")}') - message_delivery_time = email.get('messageDeliveryTime', {}) - demisto.debug(f'PTR: Got {message_delivery_time=} with type {type(message_delivery_time)}.') + message_delivery_time = email.get("messageDeliveryTime", {}) + demisto.debug(f"PTR: Got {message_delivery_time=} with type {type(message_delivery_time)}.") if message_delivery_time and isinstance(message_delivery_time, dict): - message_delivery_time = message_delivery_time.get('millis') + message_delivery_time = message_delivery_time.get("millis") elif message_delivery_time and isinstance(message_delivery_time, str): message_delivery_time = dateparser.parse(message_delivery_time) if message_delivery_time: message_delivery_time = int(message_delivery_time.timestamp() * 1000) else: - demisto.info(f'PTR: Could not parse time of incident {incident.get("id")}, got ' - f'{message_delivery_time=}') + demisto.info( + f'PTR: Could not parse time of incident {incident.get("id")}, got ' f'{message_delivery_time=}' + ) continue - if email.get('messageId') == mid and email.get('recipient').get('email') == recipient and message_delivery_time: - found['mid'] = True - demisto.debug('PTR: Found the email, adding the alert') + if email.get("messageId") == mid and email.get("recipient").get("email") == recipient and message_delivery_time: + found["mid"] = True + demisto.debug("PTR: Found the email, adding the alert") emailTRAPtimestamp = int(message_delivery_time / 1000) if emailTAPtime == emailTRAPtimestamp: demisto.debug(f'PTR: Adding the alert with id {alert.get("id")}') - found['email'] = True - lstAlert.append({ - 'incidentid': incident.get('id'), - 'alertid': alert.get('id'), - 'alerttime': alert.get('received'), - 'incidenttime': incident.get('created_at'), - 'messageId': mid, - 'quarantine_results': incident.get('quarantine_results') - }) + found["email"] = True + lstAlert.append( + { + "incidentid": incident.get("id"), + "alertid": alert.get("id"), + "alerttime": alert.get("received"), + "incidenttime": incident.get("created_at"), + "messageId": mid, + "quarantine_results": incident.get("quarantine_results"), + } + ) else: - demisto.debug(f'PTR: Alert id {alert.get("id")} found but not added to lstAlert list as ' - f'{emailTAPtime=} did not match {emailTRAPtimestamp=}') + demisto.debug( + f'PTR: Alert id {alert.get("id")} found but not added to lstAlert list as ' + f'{emailTAPtime=} did not match {emailTRAPtimestamp=}' + ) else: - demisto.debug(f'PTR: skipped message with ID {email.get("messageId")} and recipient' - f' {email.get("recipient").get("email")}. As it did not match {mid=} and' - f' recipient={email.get("recipient").get("email")}') + demisto.debug( + f'PTR: skipped message with ID {email.get("messageId")} and recipient' + f' {email.get("recipient").get("email")}. As it did not match {mid=} and' + f' recipient={email.get("recipient").get("email")}' + ) quarantineFoundcpt = 0 # Go though the alert list, and check the quarantine results: for alert in lstAlert: - for quarantine in alert.get('quarantine_results'): - if quarantine.get('messageId') == mid and quarantine.get('recipient') == recipient: - found['quarantine'] = True + for quarantine in alert.get("quarantine_results"): + if quarantine.get("messageId") == mid and quarantine.get("recipient") == recipient: + found["quarantine"] = True tsquarantine = dateparser.parse(quarantine.get("startTime")) tsalert = dateparser.parse(alert.get("alerttime")) if isinstance(tsquarantine, datetime) and isinstance(tsalert, datetime): diff = (tsquarantine - tsalert).total_seconds() # Append alerts if limit_quarantine_occurred_time=False # else checks diff is less then quarantine_limit - if ((not limit_quarantine_occurred_time) - or (quarantine_limit and 0 < diff < quarantine_limit)): - resQ.append({ - 'quarantine': quarantine, - 'alert': { - 'id': alert.get('alertid'), - 'time': alert.get('alerttime') - }, - 'incident': { - 'id': alert.get('incidentid'), - 'time': alert.get('incidenttime') + if (not limit_quarantine_occurred_time) or (quarantine_limit and 0 < diff < quarantine_limit): + resQ.append( + { + "quarantine": quarantine, + "alert": {"id": alert.get("alertid"), "time": alert.get("alerttime")}, + "incident": {"id": alert.get("incidentid"), "time": alert.get("incidenttime")}, } - }) + ) else: quarantineFoundcpt += 1 demisto.debug( f'PTR: Quarantine found for {quarantine.get("messageId")} but not returned as it did not meet filter' f' requirements. limit_quarantine_occurred_time = {limit_quarantine_occurred_time} with type ' - f'{type(limit_quarantine_occurred_time)}. diff = {diff}, quarantine_limit = {quarantine_limit}') + f'{type(limit_quarantine_occurred_time)}. diff = {diff}, quarantine_limit = {quarantine_limit}' + ) else: demisto.debug(f"PTR: Failed to parse timestamp of incident: {alert=} {quarantine=}.") if quarantineFoundcpt > 0: return CommandResults( - readable_output=(f"{mid} Message ID matches to {quarantineFoundcpt} emails quarantined, but time between alert " - f"received and the quarantine starting exceeded the quarantine_limit provided")) - if not found['mid']: + readable_output=( + f"{mid} Message ID matches to {quarantineFoundcpt} emails quarantined, but time between alert " + f"received and the quarantine starting exceeded the quarantine_limit provided" + ) + ) + if not found["mid"]: return CommandResults(readable_output=f"Message ID {mid} not found in TRAP incidents") - midtxt = f'{mid} Message ID found in TRAP alerts,' - if not found['email']: + midtxt = f"{mid} Message ID found in TRAP alerts," + if not found["email"]: return CommandResults( - readable_output=f"{midtxt} but timestamp between email delivery time and time given as argument doesn't match") - elif not found['quarantine']: + readable_output=f"{midtxt} but timestamp between email delivery time and time given as argument doesn't match" + ) + elif not found["quarantine"]: demisto.debug("PTR: " + "\n".join([json.dumps(alt, indent=4) for alt in lstAlert])) return CommandResults(f"{midtxt} but not in the quarantine list meaning that email has not be quarantined.") return CommandResults( - outputs_prefix='ProofPointTRAP.Quarantine', + outputs_prefix="ProofPointTRAP.Quarantine", outputs=resQ, readable_output=tableToMarkdown("Quarantine Result", resQ), - raw_response=resQ + raw_response=resQ, ) -''' EXECUTION CODE ''' +""" EXECUTION CODE """ def main(): - handle_proxy(demisto.params().get('proxy')) + handle_proxy(demisto.params().get("proxy")) command = demisto.command() - demisto.info(f'Command being called is {command}') + demisto.info(f"Command being called is {command}") - if command == 'test-module': + if command == "test-module": test() - elif command == 'fetch-incidents': + elif command == "fetch-incidents": fetch_incidents_command() - elif command == 'proofpoint-tr-get-list': + elif command == "proofpoint-tr-get-list": get_list_command() - elif command == 'proofpoint-tr-add-to-list': + elif command == "proofpoint-tr-add-to-list": add_to_list_command() - elif command == 'proofpoint-tr-block-ip': + elif command == "proofpoint-tr-block-ip": block_ip_command() - elif command == 'proofpoint-tr-block-domain': + elif command == "proofpoint-tr-block-domain": block_domain_command() - elif command == 'proofpoint-tr-block-url': + elif command == "proofpoint-tr-block-url": block_url_command() - elif command == 'proofpoint-tr-block-hash': + elif command == "proofpoint-tr-block-hash": block_hash_command() - elif command == 'proofpoint-tr-delete-indicator': + elif command == "proofpoint-tr-delete-indicator": delete_indicator_command() - elif command == 'proofpoint-tr-search-indicator': + elif command == "proofpoint-tr-search-indicator": search_indicator_command() - elif command == 'proofpoint-tr-list-incidents': + elif command == "proofpoint-tr-list-incidents": list_incidents_command() - elif command == 'proofpoint-tr-get-incident': + elif command == "proofpoint-tr-get-incident": get_incident_command() - elif command == 'proofpoint-tr-update-incident-comment': + elif command == "proofpoint-tr-update-incident-comment": add_comment_to_incident_command() - elif command == 'proofpoint-tr-add-user-to-incident': + elif command == "proofpoint-tr-add-user-to-incident": add_user_to_incident_command() - elif command == 'proofpoint-tr-ingest-alert': + elif command == "proofpoint-tr-ingest-alert": ingest_alert_command() - elif command == 'proofpoint-tr-close-incident': + elif command == "proofpoint-tr-close-incident": close_incident_command() - elif command == 'proofpoint-tr-verify-quarantine': + elif command == "proofpoint-tr-verify-quarantine": return_results(search_quarantine()) -if __name__ == '__builtin__' or __name__ == 'builtins': +if __name__ == "__builtin__" or __name__ == "builtins": main() diff --git a/Packs/ProofpointThreatResponse/Integrations/ProofpointThreatResponse/ProofpointThreatResponse_test.py b/Packs/ProofpointThreatResponse/Integrations/ProofpointThreatResponse/ProofpointThreatResponse_test.py index 98400b1bd8fd..fc9f4f6ee024 100644 --- a/Packs/ProofpointThreatResponse/Integrations/ProofpointThreatResponse/ProofpointThreatResponse_test.py +++ b/Packs/ProofpointThreatResponse/Integrations/ProofpointThreatResponse/ProofpointThreatResponse_test.py @@ -1,18 +1,23 @@ import copy import pytest - from CommonServerPython import * -from ProofpointThreatResponse import (create_incident_field_context, - filter_incidents, get_emails_context, - get_incident_command, - get_incidents_batch_by_time_request, - get_new_incidents, get_time_delta, - pass_abuse_disposition_filter, - pass_sources_list_filter, - prepare_ingest_alert_request_body, - close_incident_command, - search_quarantine, list_incidents_command, search_indicator_command) +from ProofpointThreatResponse import ( + close_incident_command, + create_incident_field_context, + filter_incidents, + get_emails_context, + get_incident_command, + get_incidents_batch_by_time_request, + get_new_incidents, + get_time_delta, + list_incidents_command, + pass_abuse_disposition_filter, + pass_sources_list_filter, + prepare_ingest_alert_request_body, + search_indicator_command, + search_quarantine, +) MOCK_INCIDENT_1 = { "id": 1, @@ -23,39 +28,21 @@ "state": "Open", "created_at": "2018-05-26T21:07:17Z", "event_count": 3, - "event_sources": [ - "Proofpoint TAP" - ], - "users": [ - "" - ], + "event_sources": ["Proofpoint TAP"], + "users": [""], "assignee": "Unassigned", "team": "Unassigned", "hosts": { - "attacker": [ - "" - ], + "attacker": [""], "forensics": [ "", - ] + ], }, "incident_field_values": [ - { - "name": "Attack Vector", - "value": "Email" - }, - { - "name": "Classification", - "value": "Spam" - }, - { - "name": "Severity", - "value": "Critical" - }, - { - "name": "Abuse Disposition", - "value": "Unknown" - } + {"name": "Attack Vector", "value": "Email"}, + {"name": "Classification", "value": "Spam"}, + {"name": "Severity", "value": "Critical"}, + {"name": "Abuse Disposition", "value": "Unknown"}, ], "events": [ { @@ -71,27 +58,19 @@ "malwareName": "", "emails": [ { - "sender": { - "email": "test" - }, - "recipient": { - "email": "test" - }, + "sender": {"email": "test"}, + "recipient": {"email": "test"}, "subject": "test", "messageId": "test", "messageDeliveryTime": { - "chronology": { - "zone": { - "id": "UTC" - } - }, + "chronology": {"zone": {"id": "UTC"}}, "millis": 1544640072000, }, "abuseCopy": "false", "body": "test", - 'bodyType': "test", - 'headers': "test", - 'urls': "test" + "bodyType": "test", + "headers": "test", + "urls": "test", } ], } @@ -99,33 +78,31 @@ "quarantine_results": [], "successful_quarantines": 0, "failed_quarantines": 0, - "pending_quarantines": 0 + "pending_quarantines": 0, } MOCK_INCIDENT_2 = copy.deepcopy(MOCK_INCIDENT_1) -MOCK_INCIDENT_2['events'][0]['emails'][0]['messageDeliveryTime'] = 'messageDeliveryTime' +MOCK_INCIDENT_2["events"][0]["emails"][0]["messageDeliveryTime"] = "messageDeliveryTime" INCIDENT_FIELD_CONTEXT = { "Attack_Vector": "Email", "Classification": "Spam", "Severity": "Critical", - "Abuse_Disposition": "Unknown" + "Abuse_Disposition": "Unknown", } -INCIDENT_FIELD_INPUT = [ - (MOCK_INCIDENT_1, INCIDENT_FIELD_CONTEXT) -] +INCIDENT_FIELD_INPUT = [(MOCK_INCIDENT_1, INCIDENT_FIELD_CONTEXT)] def get_fetch_data(): - with open('./test_data/raw_response.json') as f: + with open("./test_data/raw_response.json") as f: file = json.loads(f.read()) - return file.get('result') + return file.get("result") FETCH_RESPONSE = get_fetch_data() -@pytest.mark.parametrize('incident, answer', INCIDENT_FIELD_INPUT) +@pytest.mark.parametrize("incident, answer", INCIDENT_FIELD_INPUT) def test_get_incident_field_context(incident, answer): incident_field_values = create_incident_field_context(incident) assert incident_field_values == answer @@ -133,24 +110,22 @@ def test_get_incident_field_context(incident, answer): EMAIL_RESULT = [ { - 'sender': "test", - 'recipient': "test", - 'subject': "test", - 'message_id': "test", - 'message_delivery_time': 1544640072000, - 'body': "test", - 'body_type': "test", - 'headers': "test", - 'urls': "test" + "sender": "test", + "recipient": "test", + "subject": "test", + "message_id": "test", + "message_delivery_time": 1544640072000, + "body": "test", + "body_type": "test", + "headers": "test", + "urls": "test", } ] -EMAILS_CONTEXT_INPUT = [ - (MOCK_INCIDENT_1['events'][0], EMAIL_RESULT) -] +EMAILS_CONTEXT_INPUT = [(MOCK_INCIDENT_1["events"][0], EMAIL_RESULT)] -@pytest.mark.parametrize('event, answer', EMAILS_CONTEXT_INPUT) +@pytest.mark.parametrize("event, answer", EMAILS_CONTEXT_INPUT) def test_get_emails_context(event, answer): emails_context = get_emails_context(event) assert emails_context == answer @@ -160,56 +135,53 @@ def test_get_emails_context(event, answer): (["Proofpoint TAP"], True), ([], True), (["No such source"], False), - (["No such source", "Proofpoint TAP"], True) + (["No such source", "Proofpoint TAP"], True), ] -@pytest.mark.parametrize('sources_list, expected_answer', SOURCE_LIST_INPUT) +@pytest.mark.parametrize("sources_list, expected_answer", SOURCE_LIST_INPUT) def test_pass_sources_list_filter(sources_list, expected_answer): result = pass_sources_list_filter(MOCK_INCIDENT_1, sources_list) assert result == expected_answer -ABUSE_DISPOSITION_INPUT = [ - (["Unknown"], True), - ([], True), - (["No such value"], False), - (["No such value", "Unknown"], True) -] +ABUSE_DISPOSITION_INPUT = [(["Unknown"], True), ([], True), (["No such value"], False), (["No such value", "Unknown"], True)] -@pytest.mark.parametrize('abuse_dispotion_values, expected_answer', ABUSE_DISPOSITION_INPUT) +@pytest.mark.parametrize("abuse_dispotion_values, expected_answer", ABUSE_DISPOSITION_INPUT) def test_pass_abuse_disposition_filter(abuse_dispotion_values, expected_answer): result = pass_abuse_disposition_filter(MOCK_INCIDENT_1, abuse_dispotion_values) assert result == expected_answer -DEMISTO_PARAMS = [({'event_sources': "No such source, Proofpoint TAP", 'abuse_disposition': "No such value, Unknown"}, - [MOCK_INCIDENT_1]), ({'event_sources': "", 'abuse_disposition': ""}, [MOCK_INCIDENT_1]), - ({'event_sources': "No such source", 'abuse_disposition': "No such value, Unknown"}, []), - ({'event_sources': "No such source, Proofpoint TAP", 'abuse_disposition': "No such value"}, []), - ({'event_sources': "No such source", 'abuse_disposition': "No such value"}, [])] +DEMISTO_PARAMS = [ + ({"event_sources": "No such source, Proofpoint TAP", "abuse_disposition": "No such value, Unknown"}, [MOCK_INCIDENT_1]), + ({"event_sources": "", "abuse_disposition": ""}, [MOCK_INCIDENT_1]), + ({"event_sources": "No such source", "abuse_disposition": "No such value, Unknown"}, []), + ({"event_sources": "No such source, Proofpoint TAP", "abuse_disposition": "No such value"}, []), + ({"event_sources": "No such source", "abuse_disposition": "No such value"}, []), +] -@pytest.mark.parametrize('demisto_params, expected_answer', DEMISTO_PARAMS) +@pytest.mark.parametrize("demisto_params, expected_answer", DEMISTO_PARAMS) def test_filter_incidents(mocker, demisto_params, expected_answer): - mocker.patch.object(demisto, 'params', return_value=demisto_params) + mocker.patch.object(demisto, "params", return_value=demisto_params) filtered_incidents = filter_incidents([MOCK_INCIDENT_1]) assert filtered_incidents == expected_answer INGEST_ALERT_ARGS = { - "attacker": "{\"attacker\":{\"key\":\"value\"}}", - "cnc_host": "{\"cnc_host\":{\"key\":\"value\"}}", - "detector": "{\"detector\":{\"key\":\"value\"}}", - "email": "{\"email\":{\"key\":\"value\"}}", - "forensics_hosts": "{\"forensics_hosts\":{\"key\":\"value\"}}", - "target": "{\"target\":{\"key\":\"value\"}}", - "threat_info": "{\"threat_info\":{\"key\":\"value\"}}", - "custom_fields": "{\"custom_fields\":{\"key\":\"value\"}}", + "attacker": '{"attacker":{"key":"value"}}', + "cnc_host": '{"cnc_host":{"key":"value"}}', + "detector": '{"detector":{"key":"value"}}', + "email": '{"email":{"key":"value"}}', + "forensics_hosts": '{"forensics_hosts":{"key":"value"}}', + "target": '{"target":{"key":"value"}}', + "threat_info": '{"threat_info":{"key":"value"}}', + "custom_fields": '{"custom_fields":{"key":"value"}}', "post_url_id": "value", "json_version": "value", - "summary": "value" + "summary": "value", } EXPECTED_RESULT = { @@ -223,7 +195,7 @@ def test_filter_incidents(mocker, demisto_params, expected_answer): "custom_fields": {"key": "value"}, "post_url_id": "value", "json_version": "value", - "summary": "value" + "summary": "value", } @@ -234,112 +206,103 @@ def test_prepare_ingest_alert_request_body(): def test_fetch_incidents_limit_exceed(mocker): """ - Given - - a dict of params given to the function which is gathered originally from demisto.params() - The dict includes the relevant params for the fetch e.g. fetch_delta, fetch_limit, created_after, state. - - response of the api - When - - a single iteration of the fetch is activated with a fetch limit set to 5 - Then - - validate that the number or incidents that is returned is equal to the limit when the api returned more. - """ - params = { - 'fetch_delta': '6 hours', - 'fetch_limit': ' 5', - 'created_after': '2021-03-30T11:44:24Z', - 'state': 'closed' - } - mocker.patch('ProofpointThreatResponse.get_incidents_request', return_value=FETCH_RESPONSE) + Given + - a dict of params given to the function which is gathered originally from demisto.params() + The dict includes the relevant params for the fetch e.g. fetch_delta, fetch_limit, created_after, state. + - response of the api + When + - a single iteration of the fetch is activated with a fetch limit set to 5 + Then + - validate that the number or incidents that is returned is equal to the limit when the api returned more. + """ + params = {"fetch_delta": "6 hours", "fetch_limit": " 5", "created_after": "2021-03-30T11:44:24Z", "state": "closed"} + mocker.patch("ProofpointThreatResponse.get_incidents_request", return_value=FETCH_RESPONSE) incidents_list = get_incidents_batch_by_time_request(params) assert len(incidents_list) == 5 def test_fetch_incidents_with_same_created_time(mocker): """ - Given - - a dict of params given to the function which is gathered originally from demisto.params() - The dict includes the relevant params for the fetch e.g. fetch_delta, fetch_limit, created_after, state and - last_fetched_id. - - response of the api - When - - when a fetch occurs and the last fetched incident has exactly the same time of the next incident. - Then - - validate that only one of the incidents appear as to the fetch limit. - - validate that the next incident whose time is exactly the same is brought in the next fetch loop. - ( e.g. 3057 and 3058) - """ + Given + - a dict of params given to the function which is gathered originally from demisto.params() + The dict includes the relevant params for the fetch e.g. fetch_delta, fetch_limit, created_after, state and + last_fetched_id. + - response of the api + When + - when a fetch occurs and the last fetched incident has exactly the same time of the next incident. + Then + - validate that only one of the incidents appear as to the fetch limit. + - validate that the next incident whose time is exactly the same is brought in the next fetch loop. + ( e.g. 3057 and 3058) + """ expected_ids_to_fetch_first = [3055, 3056, 3057] expected_ids_to_fetch_second = [3058, 3059, 3060] - params = { - 'fetch_delta': '2 hours', - 'fetch_limit': '3', - 'created_after': '2021-03-30T10:44:24Z', - 'state': 'closed' - } + params = {"fetch_delta": "2 hours", "fetch_limit": "3", "created_after": "2021-03-30T10:44:24Z", "state": "closed"} - mocker.patch('ProofpointThreatResponse.get_incidents_request', return_value=FETCH_RESPONSE) + mocker.patch("ProofpointThreatResponse.get_incidents_request", return_value=FETCH_RESPONSE) new_fetched_first = get_incidents_batch_by_time_request(params) for incident in new_fetched_first: - assert incident.get('id') in expected_ids_to_fetch_first + assert incident.get("id") in expected_ids_to_fetch_first params = { - 'fetch_delta': '2 hour', - 'fetch_limit': '3', - 'created_after': '2021-03-30T11:21:24Z', - 'last_fetched_id': '3057', - 'state': 'closed' + "fetch_delta": "2 hour", + "fetch_limit": "3", + "created_after": "2021-03-30T11:21:24Z", + "last_fetched_id": "3057", + "state": "closed", } new_fetched_second = get_incidents_batch_by_time_request(params) for incident in new_fetched_second: - assert incident.get('id') in expected_ids_to_fetch_second + assert incident.get("id") in expected_ids_to_fetch_second def test_get_new_incidents(mocker): """ - Given - - a dict of request_params to the api. - - The last fetched incident id. - When - - Get new incidents is called during the fetch process. - Then - - validate that the number of expected incidents return. - - validate that all of the returned incident have a bigger id then the last fetched incident. - """ + Given + - a dict of request_params to the api. + - The last fetched incident id. + When + - Get new incidents is called during the fetch process. + Then + - validate that the number of expected incidents return. + - validate that all of the returned incident have a bigger id then the last fetched incident. + """ last_incident_fetched = 3057 request_params = { - 'state': 'closed', - 'created_after': '2021-03-30T10:21:24Z', - 'created_before': '2021-03-31T11:21:24Z', + "state": "closed", + "created_after": "2021-03-30T10:21:24Z", + "created_before": "2021-03-31T11:21:24Z", } - mocker.patch('ProofpointThreatResponse.get_incidents_request', return_value=FETCH_RESPONSE) + mocker.patch("ProofpointThreatResponse.get_incidents_request", return_value=FETCH_RESPONSE) new_incidnets = get_new_incidents(request_params, last_incident_fetched) assert len(new_incidnets) == 14 for incident in new_incidnets: - assert incident.get('id') > 3057 + assert incident.get("id") > 3057 def test_get_time_delta(): """ - Given - - input to the get_time_delta function which is valid and invalid - When - - run the get_time_delta function. - Then - - validate that on invalid input such as days or no units relevant errors are raised. - - validate that on valid inputs the return value is as expected. - """ - time_delta = get_time_delta('1 minute') - assert str(time_delta) == '0:01:00' - time_delta = get_time_delta('2 hours') - assert str(time_delta) == '2:00:00' + Given + - input to the get_time_delta function which is valid and invalid + When + - run the get_time_delta function. + Then + - validate that on invalid input such as days or no units relevant errors are raised. + - validate that on valid inputs the return value is as expected. + """ + time_delta = get_time_delta("1 minute") + assert str(time_delta) == "0:01:00" + time_delta = get_time_delta("2 hours") + assert str(time_delta) == "2:00:00" try: - get_time_delta('2') + get_time_delta("2") except Exception as ex: - assert 'The fetch_delta is invalid. Please make sure to insert both the number and the unit of the fetch delta.' in str( - ex) + assert "The fetch_delta is invalid. Please make sure to insert both the number and the unit of the fetch delta." in str( + ex + ) try: - get_time_delta('2 days') + get_time_delta("2 days") except Exception as ex: assert 'The unit of fetch_delta is invalid. Possible values are "minutes" or "hours' in str(ex) @@ -355,17 +318,15 @@ def test_get_incident_command(mocker, requests_mock): Then: - Ensure expected fields ('attachments', 'sender_vap', 'recipient_vap') are populated to the context data """ - base_url = 'https://server_url/' - requests_mock.get(f'{base_url}api/incidents/3064.json', json=FETCH_RESPONSE[0]) - mocker.patch.object(demisto, 'results') - mocker.patch('ProofpointThreatResponse.BASE_URL', base_url) - mocker.patch.object(demisto, 'args', return_value={ - 'incident_id': '3064' - }) + base_url = "https://server_url/" + requests_mock.get(f"{base_url}api/incidents/3064.json", json=FETCH_RESPONSE[0]) + mocker.patch.object(demisto, "results") + mocker.patch("ProofpointThreatResponse.BASE_URL", base_url) + mocker.patch.object(demisto, "args", return_value={"incident_id": "3064"}) get_incident_command() results = demisto.results.call_args[0][0] - emails = results['EntryContext']['ProofPointTRAP.Incident(val.id === obj.id)'][0]['events'][0]['emails'][0].keys() - assert {'attachments', 'sender_vap', 'recipient_vap'}.issubset(set(emails)) + emails = results["EntryContext"]["ProofPointTRAP.Incident(val.id === obj.id)"][0]["events"][0]["emails"][0].keys() + assert {"attachments", "sender_vap", "recipient_vap"}.issubset(set(emails)) def test_get_incident_command_expand_events_false(mocker, requests_mock): @@ -381,21 +342,25 @@ def test_get_incident_command_expand_events_false(mocker, requests_mock): - Ensure events field is not returned - Ensure event_ids field is populated as expected """ - base_url = 'https://server_url/' - with open('./test_data/incident_expand_events_false.json') as f: + base_url = "https://server_url/" + with open("./test_data/incident_expand_events_false.json") as f: incident = json.loads(f.read()) - requests_mock.get(f'{base_url}api/incidents/3064.json?expand_events=false', json=incident) - mocker.patch.object(demisto, 'results') - mocker.patch('ProofpointThreatResponse.BASE_URL', base_url) - mocker.patch.object(demisto, 'args', return_value={ - 'incident_id': '3064', - 'expand_events': 'false', - }) + requests_mock.get(f"{base_url}api/incidents/3064.json?expand_events=false", json=incident) + mocker.patch.object(demisto, "results") + mocker.patch("ProofpointThreatResponse.BASE_URL", base_url) + mocker.patch.object( + demisto, + "args", + return_value={ + "incident_id": "3064", + "expand_events": "false", + }, + ) get_incident_command() results = demisto.results.call_args[0][0] - incident_result = results['EntryContext']['ProofPointTRAP.Incident(val.id === obj.id)'][0] - assert not incident_result['events'] - assert incident_result['event_ids'] + incident_result = results["EntryContext"]["ProofPointTRAP.Incident(val.id === obj.id)"][0] + assert not incident_result["events"] + assert incident_result["event_ids"] def test_close_incident_command(mocker, requests_mock): @@ -409,18 +374,14 @@ def test_close_incident_command(mocker, requests_mock): Then: - Ensure output is success message """ - base_url = 'https://server_url/' - requests_mock.post(f'{base_url}api/incidents/3064/close.json') - mocker.patch.object(demisto, 'results') - mocker.patch('ProofpointThreatResponse.BASE_URL', base_url) - mocker.patch.object(demisto, 'args', return_value={ - 'incident_id': '3064', - "summary": "summary", - "details": "details" - }) + base_url = "https://server_url/" + requests_mock.post(f"{base_url}api/incidents/3064/close.json") + mocker.patch.object(demisto, "results") + mocker.patch("ProofpointThreatResponse.BASE_URL", base_url) + mocker.patch.object(demisto, "args", return_value={"incident_id": "3064", "summary": "summary", "details": "details"}) close_incident_command() results = demisto.results.call_args[0][0] - assert 'success' in results['HumanReadable'] + assert "success" in results["HumanReadable"] def test_search_quarantine_command(mocker, requests_mock): @@ -434,19 +395,23 @@ def test_search_quarantine_command(mocker, requests_mock): Then: - Ensure output is success message (at least one success). """ - base_url = 'https://server_url/' - with open('./test_data/incidents.json') as f: + base_url = "https://server_url/" + with open("./test_data/incidents.json") as f: incident = json.loads(f.read()) - requests_mock.get(f'{base_url}api/incidents', json=incident) - mocker.patch('ProofpointThreatResponse.BASE_URL', base_url) - mocker.patch.object(demisto, 'args', return_value={ - 'message_id': "", - "recipient": "sabrina.test@test.com", - "time": "2021-03-30T11:17:39Z" - }) + requests_mock.get(f"{base_url}api/incidents", json=incident) + mocker.patch("ProofpointThreatResponse.BASE_URL", base_url) + mocker.patch.object( + demisto, + "args", + return_value={ + "message_id": "", + "recipient": "sabrina.test@test.com", + "time": "2021-03-30T11:17:39Z", + }, + ) res = search_quarantine() - quarantines_res = [x.get('quarantine').get('status') for x in res.outputs] - assert 'successful' in quarantines_res + quarantines_res = [x.get("quarantine").get("status") for x in res.outputs] + assert "successful" in quarantines_res def test_search_quarantine_command_with_str_messageDeliveryTime(mocker, requests_mock): @@ -460,22 +425,24 @@ def test_search_quarantine_command_with_str_messageDeliveryTime(mocker, requests Then: - Ensure output is success message (at least one success). """ - base_url = 'https://server_url/' - with open('./test_data/incident_str_messageDeliveryTime.json') as f: + base_url = "https://server_url/" + with open("./test_data/incident_str_messageDeliveryTime.json") as f: incident = json.loads(f.read()) - requests_mock.get(f'{base_url}api/incidents', json=incident) - mocker.patch('ProofpointThreatResponse.BASE_URL', base_url) - mocker.patch('ProofpointThreatResponse.get_incidents_batch_by_time_request', return_value=incident) - - mocker.patch.object(demisto, 'args', return_value={ - 'message_id': "", - "recipient": "sabrina.test@test.com", - "time": "2021-03-30T11:17:39Z" - }) + requests_mock.get(f"{base_url}api/incidents", json=incident) + mocker.patch("ProofpointThreatResponse.BASE_URL", base_url) + mocker.patch("ProofpointThreatResponse.get_incidents_batch_by_time_request", return_value=incident) + + mocker.patch.object( + demisto, + "args", + return_value={"message_id": "", "recipient": "sabrina.test@test.com", "time": "2021-03-30T11:17:39Z"}, + ) res = search_quarantine() - assert res.outputs_prefix == ' Message ID found in TRAP alerts, ' \ - 'but not in the quarantine list meaning that email has not be quarantined.' + assert ( + res.outputs_prefix == " Message ID found in TRAP alerts, " + "but not in the quarantine list meaning that email has not be quarantined." + ) def test_list_incidents_command(mocker, requests_mock): @@ -489,24 +456,32 @@ def test_list_incidents_command(mocker, requests_mock): Then: - Ensure output generated successfully without errors. """ - base_url = 'https://server_url/' - requests_mock.get(f'{base_url}api/incidents', json=[MOCK_INCIDENT_1, MOCK_INCIDENT_2]) - mocker.patch('ProofpointThreatResponse.BASE_URL', base_url) - mocker.patch.object(demisto, 'args', return_value={'limit': 2}) - results = mocker.patch.object(demisto, 'results') + base_url = "https://server_url/" + requests_mock.get(f"{base_url}api/incidents", json=[MOCK_INCIDENT_1, MOCK_INCIDENT_2]) + mocker.patch("ProofpointThreatResponse.BASE_URL", base_url) + mocker.patch.object(demisto, "args", return_value={"limit": 2}) + results = mocker.patch.object(demisto, "results") list_incidents_command() - incidents = results.call_args[0][0]['Contents'] + incidents = results.call_args[0][0]["Contents"] assert len(incidents) == 2 -@pytest.mark.parametrize('list_id_to_search, filter_to_apply, indicators_to_return, expected_result', [ - ('1', '1.1.1.1', [{"host": {"host": "1.1.1.1"}}, {"host": {"host": "2.2.2.2"}}], [{"host": {"host": "1.1.1.1"}}]), - ('1', '', [{"host": {"host": "1.1.1.1"}}, {"host": {"host": "2.2.2.2"}}], - [{"host": {"host": "1.1.1.1"}}, {"host": {"host": "2.2.2.2"}}]), - ('1', '', [{}], []), -]) -def test_search_indicator_command(mocker, requests_mock, list_id_to_search, filter_to_apply, indicators_to_return, - expected_result): +@pytest.mark.parametrize( + "list_id_to_search, filter_to_apply, indicators_to_return, expected_result", + [ + ("1", "1.1.1.1", [{"host": {"host": "1.1.1.1"}}, {"host": {"host": "2.2.2.2"}}], [{"host": {"host": "1.1.1.1"}}]), + ( + "1", + "", + [{"host": {"host": "1.1.1.1"}}, {"host": {"host": "2.2.2.2"}}], + [{"host": {"host": "1.1.1.1"}}, {"host": {"host": "2.2.2.2"}}], + ), + ("1", "", [{}], []), + ], +) +def test_search_indicator_command( + mocker, requests_mock, list_id_to_search, filter_to_apply, indicators_to_return, expected_result +): """ Given: - Case A: List id = 1, and filter is 1.1.1.1 @@ -523,13 +498,13 @@ def test_search_indicator_command(mocker, requests_mock, list_id_to_search, filt - Case B: Ensure the list is not filtered and both 1.1.1.1 and 2.2.2.2 are returned. - Case C: Ensure the logic is working, and an empty list is returnd """ - base_url = 'https://server_url/' - requests_mock.get(f'{base_url}api/lists/{list_id_to_search}/members.json', json=indicators_to_return) - mocker.patch('ProofpointThreatResponse.BASE_URL', base_url) - mocker.patch.object(demisto, 'args', return_value={'list-id': list_id_to_search, 'filter': filter_to_apply}) - results = mocker.patch.object(demisto, 'results') + base_url = "https://server_url/" + requests_mock.get(f"{base_url}api/lists/{list_id_to_search}/members.json", json=indicators_to_return) + mocker.patch("ProofpointThreatResponse.BASE_URL", base_url) + mocker.patch.object(demisto, "args", return_value={"list-id": list_id_to_search, "filter": filter_to_apply}) + results = mocker.patch.object(demisto, "results") search_indicator_command() - indicators = results.call_args[0][0]['indicators'] + indicators = results.call_args[0][0]["indicators"] assert indicators == expected_result @@ -544,22 +519,24 @@ def test_search_quarantine_command_mismatch_time(mocker, requests_mock): Then: - test fails on time mismatch """ - base_url = 'https://server_url/' - with open('./test_data/incident_str_messageDeliveryTime.json') as f: + base_url = "https://server_url/" + with open("./test_data/incident_str_messageDeliveryTime.json") as f: incident = json.loads(f.read()) - requests_mock.get(f'{base_url}api/incidents', json=incident) - mocker.patch('ProofpointThreatResponse.BASE_URL', base_url) - mocker.patch('ProofpointThreatResponse.get_incidents_batch_by_time_request', return_value=incident) - - mocker.patch.object(demisto, 'args', return_value={ - 'message_id': "", - "recipient": "sabrina.test@test.com", - "time": "2021-04-30T11:17:39Z" - }) + requests_mock.get(f"{base_url}api/incidents", json=incident) + mocker.patch("ProofpointThreatResponse.BASE_URL", base_url) + mocker.patch("ProofpointThreatResponse.get_incidents_batch_by_time_request", return_value=incident) + + mocker.patch.object( + demisto, + "args", + return_value={"message_id": "", "recipient": "sabrina.test@test.com", "time": "2021-04-30T11:17:39Z"}, + ) res = search_quarantine() - assert res.readable_output == (" Message ID found in TRAP alerts, but timestamp between email delivery time " - "and time given as argument doesn't match") + assert res.readable_output == ( + " Message ID found in TRAP alerts, but timestamp between email delivery time " + "and time given as argument doesn't match" + ) def test_search_quarantine_command_with_incident_far_from_alert_time_fail(mocker, requests_mock): @@ -573,22 +550,24 @@ def test_search_quarantine_command_with_incident_far_from_alert_time_fail(mocker Then: - test fails on time mismatch """ - base_url = 'https://server_url/' - with open('./test_data/incident_email_manually_quarantined.json') as f: + base_url = "https://server_url/" + with open("./test_data/incident_email_manually_quarantined.json") as f: incident = json.loads(f.read()) - requests_mock.get(f'{base_url}api/incidents', json=incident) - mocker.patch('ProofpointThreatResponse.BASE_URL', base_url) - mocker.patch('ProofpointThreatResponse.get_incidents_batch_by_time_request', return_value=incident) - - mocker.patch.object(demisto, 'args', return_value={ - 'message_id': "", - "recipient": "sabrina.test@test.com", - "time": "2021-04-30T11:17:39Z" - }) + requests_mock.get(f"{base_url}api/incidents", json=incident) + mocker.patch("ProofpointThreatResponse.BASE_URL", base_url) + mocker.patch("ProofpointThreatResponse.get_incidents_batch_by_time_request", return_value=incident) + + mocker.patch.object( + demisto, + "args", + return_value={"message_id": "", "recipient": "sabrina.test@test.com", "time": "2021-04-30T11:17:39Z"}, + ) res = search_quarantine() - assert res.readable_output == (' Message ID matches to 1 emails quarantined, but time between alert received ' - 'and the quarantine starting exceeded the quarantine_limit provided') + assert res.readable_output == ( + " Message ID matches to 1 emails quarantined, but time between alert received " + "and the quarantine starting exceeded the quarantine_limit provided" + ) def test_search_quarantine_command_with_incident_far_from_alert_time_succeed(mocker, requests_mock): @@ -602,22 +581,27 @@ def test_search_quarantine_command_with_incident_far_from_alert_time_succeed(moc Then: - test succeed """ - base_url = 'https://server_url/' - with open('./test_data/incident_email_manually_quarantined.json') as f: + base_url = "https://server_url/" + with open("./test_data/incident_email_manually_quarantined.json") as f: incident = json.loads(f.read()) - requests_mock.get(f'{base_url}api/incidents', json=incident) - mocker.patch('ProofpointThreatResponse.BASE_URL', base_url) - mocker.patch('ProofpointThreatResponse.get_incidents_batch_by_time_request', return_value=incident) - - mocker.patch.object(demisto, 'args', return_value={ - 'message_id': "", - "recipient": "sabrina.test@test.com", - "time": "2021-04-30T11:17:39Z", - "quarantine_limit": "2665996" - - }) + requests_mock.get(f"{base_url}api/incidents", json=incident) + mocker.patch("ProofpointThreatResponse.BASE_URL", base_url) + mocker.patch("ProofpointThreatResponse.get_incidents_batch_by_time_request", return_value=incident) + + mocker.patch.object( + demisto, + "args", + return_value={ + "message_id": "", + "recipient": "sabrina.test@test.com", + "time": "2021-04-30T11:17:39Z", + "quarantine_limit": "2665996", + }, + ) res = search_quarantine() - assert res.readable_output == ("### Quarantine Result\n|alert|incident|quarantine|\n|---|---|---|\n| id: 9225
time: " - "2021-03-30T11:44:24Z | id: 3065
time: 2021-03-30T11:44:24Z | messageId:
recipient: sabrina.test@test.com
startTime: 2021-04-30T08:17:39Z |\n") + assert res.readable_output == ( + "### Quarantine Result\n|alert|incident|quarantine|\n|---|---|---|\n| id: 9225
time: " + "2021-03-30T11:44:24Z | id: 3065
time: 2021-03-30T11:44:24Z | messageId:
recipient: sabrina.test@test.com
startTime: 2021-04-30T08:17:39Z |\n" + ) diff --git a/Packs/ProofpointThreatResponse/Integrations/ProofpointThreatResponseEventCollector/ProofpointThreatResponseEventCollector.py b/Packs/ProofpointThreatResponse/Integrations/ProofpointThreatResponseEventCollector/ProofpointThreatResponseEventCollector.py index d8a19de5d46e..fed5b516f9b7 100644 --- a/Packs/ProofpointThreatResponse/Integrations/ProofpointThreatResponseEventCollector/ProofpointThreatResponseEventCollector.py +++ b/Packs/ProofpointThreatResponse/Integrations/ProofpointThreatResponseEventCollector/ProofpointThreatResponseEventCollector.py @@ -1,16 +1,17 @@ -import demistomock as demisto # noqa: F401 -from CommonServerPython import * # noqa: F401 import copy from datetime import timedelta +import demistomock as demisto # noqa: F401 + # Disable insecure warnings import urllib3 +from CommonServerPython import * # noqa: F401 urllib3.disable_warnings() -TIME_FORMAT = '%Y-%m-%dT%H:%M:%SZ' -PRODUCT = 'threat_response' -VENDOR = 'proofpoint' +TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ" +PRODUCT = "threat_response" +VENDOR = "proofpoint" class Client(BaseClient): @@ -29,8 +30,8 @@ def get_incidents_request(self, query_params): list. The incidents returned from the API call """ raw_response = self._http_request( - method='GET', - url_suffix='api/incidents', + method="GET", + url_suffix="api/incidents", params=query_params, ) return raw_response @@ -42,13 +43,10 @@ def test_module(client, first_fetch): Returns: 'ok' if test passed, anything else will fail the test. """ - query_params = { - 'created_after': first_fetch, - 'state': 'open' - } + query_params = {"created_after": first_fetch, "state": "open"} client.get_incidents_request(query_params) - return 'ok' + return "ok" def create_incidents_human_readable(human_readable_message, incidents_list): @@ -62,35 +60,47 @@ def create_incidents_human_readable(human_readable_message, incidents_list): str. The incidents human readable in markdown format """ human_readable = [] - human_readable_headers = ['ID', 'Created At', 'Type', 'Summary', 'Score', 'Event Count', 'Assignee', - 'Successful Quarantines', 'Failed Quarantines', 'Pending Quarantines'] + human_readable_headers = [ + "ID", + "Created At", + "Type", + "Summary", + "Score", + "Event Count", + "Assignee", + "Successful Quarantines", + "Failed Quarantines", + "Pending Quarantines", + ] for incident in incidents_list: - human_readable.append({ - 'Created At': incident.get('created_at'), - 'ID': incident.get('id'), - 'State': incident.get('state'), - 'Type': incident.get('type'), - 'Summary': incident.get('summary'), - 'Score': incident.get('score'), - 'Event Count': incident.get('event_count'), - 'Assignee': incident.get('assignee'), - 'Successful Quarantines': incident.get('successful_quarantine'), - 'Failed Quarantines': incident.get('failed_quarantines'), - 'Pending Quarantines': incident.get('pending_quarantines') - }) + human_readable.append( + { + "Created At": incident.get("created_at"), + "ID": incident.get("id"), + "State": incident.get("state"), + "Type": incident.get("type"), + "Summary": incident.get("summary"), + "Score": incident.get("score"), + "Event Count": incident.get("event_count"), + "Assignee": incident.get("assignee"), + "Successful Quarantines": incident.get("successful_quarantine"), + "Failed Quarantines": incident.get("failed_quarantines"), + "Pending Quarantines": incident.get("pending_quarantines"), + } + ) return tableToMarkdown(human_readable_message, human_readable, human_readable_headers, removeNull=True) def list_incidents_command(client, args): - """ Retrieves incidents from ProofPoint API """ - limit = arg_to_number(args.pop('limit')) + """Retrieves incidents from ProofPoint API""" + limit = arg_to_number(args.pop("limit")) raw_response = client.get_incidents_request(args) incidents_list = raw_response[:limit] events = get_events_from_incidents(incidents_list) - human_readable = create_incidents_human_readable('List Incidents Results:', events) + human_readable = create_incidents_human_readable("List Incidents Results:", events) return events, human_readable, raw_response @@ -108,7 +118,7 @@ def pass_sources_list_filter(incident, sources_list): if len(sources_list) == 0: return True - return any(source in incident.get('event_sources') for source in sources_list) + return any(source in incident.get("event_sources") for source in sources_list) def pass_abuse_disposition_filter(incident, abuse_disposition_values): @@ -124,8 +134,8 @@ def pass_abuse_disposition_filter(incident, abuse_disposition_values): if len(abuse_disposition_values) == 0: return True - for incident_field in incident.get('incident_field_values', []): - if incident_field['name'] == 'Abuse Disposition' and incident_field['value'] in abuse_disposition_values: + for incident_field in incident.get("incident_field_values", []): + if incident_field["name"] == "Abuse Disposition" and incident_field["value"] in abuse_disposition_values: return True return False @@ -142,15 +152,14 @@ def filter_incidents(incidents_list): """ filtered_incidents_list = [] params = demisto.params() - sources_list = argToList(params.get('event_sources')) - abuse_disposition_values = argToList(params.get('abuse_disposition')) + sources_list = argToList(params.get("event_sources")) + abuse_disposition_values = argToList(params.get("abuse_disposition")) if not sources_list and not abuse_disposition_values: return incidents_list for incident in incidents_list: - if pass_sources_list_filter(incident, sources_list) and pass_abuse_disposition_filter(incident, - abuse_disposition_values): + if pass_sources_list_filter(incident, sources_list) and pass_abuse_disposition_filter(incident, abuse_disposition_values): filtered_incidents_list.append(incident) return filtered_incidents_list @@ -163,20 +172,22 @@ def get_time_delta(fetch_delta): Returns: The time delta. """ - fetch_delta_split = fetch_delta.strip().split(' ') + fetch_delta_split = fetch_delta.strip().split(" ") if len(fetch_delta_split) != 2: - raise Exception( - 'The fetch_delta is invalid. Please make sure to insert both the number and the unit of the fetch delta.') + raise Exception("The fetch_delta is invalid. Please make sure to insert both the number and the unit of the fetch delta.") unit = fetch_delta_split[1].lower() number = int(fetch_delta_split[0]) - if unit not in ['minute', 'minutes', - 'hour', 'hours', - ]: + if unit not in [ + "minute", + "minutes", + "hour", + "hours", + ]: raise Exception('The unit of fetch_delta is invalid. Possible values are "minutes" or "hours".') - if 'hour' in unit: + if "hour" in unit: time_delta = timedelta(hours=number) # batch by hours else: time_delta = timedelta(minutes=number) # batch by minutes @@ -197,8 +208,8 @@ def get_new_incidents(client, request_params, last_fetched_id): """ incidents = client.get_incidents_request(request_params) filtered_incidents_list = filter_incidents(incidents) - ordered_incidents = sorted(filtered_incidents_list, key=lambda k: (k['created_at'], k['id'])) - return list(filter(lambda incident: int(incident.get('id')) > last_fetched_id, ordered_incidents)) + ordered_incidents = sorted(filtered_incidents_list, key=lambda k: (k["created_at"], k["id"])) + return list(filter(lambda incident: int(incident.get("id")) > last_fetched_id, ordered_incidents)) def get_incidents_batch_by_time_request(client, params): @@ -214,21 +225,21 @@ def get_incidents_batch_by_time_request(client, params): """ incidents_list = [] # type:list - fetch_delta = params.get('fetch_delta', '6 hours') - fetch_limit = arg_to_number(params.get('fetch_limit', '100')) - last_fetched_id = arg_to_number(params.get('last_fetched_id', '0')) + fetch_delta = params.get("fetch_delta", "6 hours") + fetch_limit = arg_to_number(params.get("fetch_limit", "100")) + last_fetched_id = arg_to_number(params.get("last_fetched_id", "0")) current_time = datetime.now() time_delta = get_time_delta(fetch_delta) - created_after = datetime.strptime(params.get('created_after'), TIME_FORMAT) + created_after = datetime.strptime(params.get("created_after"), TIME_FORMAT) created_before = created_after + time_delta request_params = { - 'state': params.get('state'), - 'created_after': created_after.isoformat().split('.')[0] + 'Z', - 'created_before': created_before.isoformat().split('.')[0] + 'Z' + "state": params.get("state"), + "created_after": created_after.isoformat().split(".")[0] + "Z", + "created_before": created_before.isoformat().split(".")[0] + "Z", } # while loop relevant for fetching old incidents @@ -237,7 +248,8 @@ def get_incidents_batch_by_time_request(client, params): f"Entered the batch loop , with fetch_limit {fetch_limit} and events list " f"{[incident.get('id') for incident in incidents_list]} and event length {len(incidents_list)} " f"with created_after {request_params['created_after']} and " - f"created_before {request_params['created_before']}") + f"created_before {request_params['created_before']}" + ) new_incidents = get_new_incidents(client, request_params, last_fetched_id) incidents_list.extend(new_incidents) @@ -247,20 +259,21 @@ def get_incidents_batch_by_time_request(client, params): created_before = created_before + time_delta # updating params according to the new times - request_params['created_after'] = created_after.isoformat().split('.')[0] + 'Z' - request_params['created_before'] = created_before.isoformat().split('.')[0] + 'Z' - demisto.debug(f"End of the current batch loop with {str(len(incidents_list))} events") + request_params["created_after"] = created_after.isoformat().split(".")[0] + "Z" + request_params["created_before"] = created_before.isoformat().split(".")[0] + "Z" + demisto.debug(f"End of the current batch loop with {len(incidents_list)!s} events") # fetching the last batch when created_before is bigger then current time = fetching new events if len(incidents_list) < fetch_limit: # type: ignore[operator] # fetching the last batch - request_params['created_before'] = current_time.isoformat().split('.')[0] + 'Z' + request_params["created_before"] = current_time.isoformat().split(".")[0] + "Z" new_incidents = get_new_incidents(client, request_params, last_fetched_id) incidents_list.extend(new_incidents) demisto.debug( f"Finished the last batch, with fetch_limit {fetch_limit} and events list:" - f" {[incident.get('id') for incident in incidents_list]} and event length {len(incidents_list)}") + f" {[incident.get('id') for incident in incidents_list]} and event length {len(incidents_list)}" + ) incidents_list_limit = incidents_list[:fetch_limit] return incidents_list_limit @@ -268,46 +281,43 @@ def get_incidents_batch_by_time_request(client, params): def fetch_events_command(client, first_fetch, last_run, fetch_limit, fetch_delta, incidents_states): """ - Fetches incidents from the ProofPoint API. + Fetches incidents from the ProofPoint API. """ - last_fetch = last_run.get('last_fetch', {}) - last_fetched_id = last_run.get('last_fetched_incident_id', {}) + last_fetch = last_run.get("last_fetch", {}) + last_fetched_id = last_run.get("last_fetched_incident_id", {}) for state in incidents_states: if not last_fetch.get(state): last_fetch[state] = first_fetch if not last_fetched_id.get(state): - last_fetched_id[state] = '0' + last_fetched_id[state] = "0" incidents = [] for state in incidents_states: request_params = { - 'created_after': last_fetch[state], - 'last_fetched_id': last_fetched_id[state], - 'fetch_delta': fetch_delta, - 'state': state, - 'fetch_limit': fetch_limit + "created_after": last_fetch[state], + "last_fetched_id": last_fetched_id[state], + "fetch_delta": fetch_delta, + "state": state, + "fetch_limit": fetch_limit, } id = last_fetched_id[state] incidents_list = get_incidents_batch_by_time_request(client, request_params) incidents.extend(incidents_list) if incidents_list: - id = incidents_list[-1].get('id') - last_fetch_time = incidents_list[-1]['created_at'] - last_fetch[state] = \ - (datetime.strptime(last_fetch_time, TIME_FORMAT) - timedelta(minutes=1)).isoformat().split('.')[0] + 'Z' + id = incidents_list[-1].get("id") + last_fetch_time = incidents_list[-1]["created_at"] + last_fetch[state] = (datetime.strptime(last_fetch_time, TIME_FORMAT) - timedelta(minutes=1)).isoformat().split(".")[ + 0 + ] + "Z" last_fetched_id[state] = id - demisto.debug(f"End of current fetch function with last_fetch {str(last_fetch)} and last_fetched_id" - f" {str(last_fetched_id)}") + demisto.debug(f"End of current fetch function with last_fetch {last_fetch!s} and last_fetched_id" f" {last_fetched_id!s}") - last_run = { - 'last_fetch': last_fetch, - 'last_fetched_incident_id': last_fetched_id - } + last_run = {"last_fetch": last_fetch, "last_fetched_incident_id": last_fetched_id} - demisto.debug(f'Fetched {len(incidents)} events') + demisto.debug(f"Fetched {len(incidents)} events") events = get_events_from_incidents(incidents) return events, last_run @@ -341,45 +351,40 @@ def get_events_from_incidents(incidents): """ fetched_events = [] for incident in incidents: - if events := incident.get('events'): + if events := incident.get("events"): for event in events: new_incident = copy.deepcopy(incident) - del new_incident['events'] - new_incident['event'] = event + del new_incident["events"] + new_incident["event"] = event fetched_events.append(new_incident) else: - del incident['events'] - incident['event'] = {} + del incident["events"] + incident["event"] = {} fetched_events.append(incident) return fetched_events def main(): # pragma: no cover - """main function, parses params and runs command functions - """ + """main function, parses params and runs command functions""" args = demisto.args() command = demisto.command() params = demisto.params() - api_key = params.get('credentials', {}).get('password') - base_url = params.get('url') - verify_certificate = not params.get('insecure', False) - proxy = params.get('proxy', False) + api_key = params.get("credentials", {}).get("password") + base_url = params.get("url") + verify_certificate = not params.get("insecure", False) + proxy = params.get("proxy", False) # How many time before the first fetch to retrieve incidents - first_fetch, _ = parse_date_range(params.get('first_fetch', '3 days') or '3 days', - date_format=TIME_FORMAT) - fetch_limit = params.get('fetch_limit', '100') - fetch_delta = params.get('fetch_delta', '6 hours') - incidents_states = argToList(params.get('states', ['new', 'open', 'assigned', 'closed', 'ignored'])) + first_fetch, _ = parse_date_range(params.get("first_fetch", "3 days") or "3 days", date_format=TIME_FORMAT) + fetch_limit = params.get("fetch_limit", "100") + fetch_delta = params.get("fetch_delta", "6 hours") + incidents_states = argToList(params.get("states", ["new", "open", "assigned", "closed", "ignored"])) - demisto.debug(f'Command being called is {command}') + demisto.debug(f"Command being called is {command}") try: - headers = { - 'Content-Type': 'application/json', - 'Authorization': api_key - } + headers = {"Content-Type": "application/json", "Authorization": api_key} client = Client( base_url=base_url, verify=verify_certificate, @@ -387,24 +392,20 @@ def main(): # pragma: no cover proxy=proxy, ) - if command == 'test-module': + if command == "test-module": return_results(test_module(client, first_fetch)) - elif command == 'proofpoint-trap-get-events': - should_push_events = args.pop('should_push_events') + elif command == "proofpoint-trap-get-events": + should_push_events = args.pop("should_push_events") events, human_readable, raw_response = list_incidents_command(client, args) results = CommandResults(raw_response=raw_response, readable_output=human_readable) return_results(results) if argToBoolean(should_push_events): - send_events_to_xsiam( - events, - VENDOR, - PRODUCT - ) + send_events_to_xsiam(events, VENDOR, PRODUCT) - elif command == 'fetch-events': + elif command == "fetch-events": last_run = demisto.getLastRun() - demisto.debug(f'last_run before fetch_events_command {last_run=}') + demisto.debug(f"last_run before fetch_events_command {last_run=}") events, last_run = fetch_events_command( client, first_fetch, @@ -414,19 +415,15 @@ def main(): # pragma: no cover incidents_states, ) - send_events_to_xsiam( - events, - VENDOR, - PRODUCT - ) + send_events_to_xsiam(events, VENDOR, PRODUCT) demisto.debug(f'Fetched event ids: {[event.get("id") for event in events]}') - demisto.debug(f'last_run after fetch_events_command {last_run=}') + demisto.debug(f"last_run after fetch_events_command {last_run=}") demisto.setLastRun(last_run) # Log exceptions and return errors except Exception as e: - return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}') + return_error(f"Failed to execute {demisto.command()} command.\nError:\n{e!s}") -if __name__ == '__builtin__' or __name__ == 'builtins': +if __name__ == "__builtin__" or __name__ == "builtins": main() diff --git a/Packs/ProofpointThreatResponse/Integrations/ProofpointThreatResponseEventCollector/ProofpointThreatResponseEventCollector_test.py b/Packs/ProofpointThreatResponse/Integrations/ProofpointThreatResponseEventCollector/ProofpointThreatResponseEventCollector_test.py index 6386272ae830..5c441d3b65f4 100644 --- a/Packs/ProofpointThreatResponse/Integrations/ProofpointThreatResponseEventCollector/ProofpointThreatResponseEventCollector_test.py +++ b/Packs/ProofpointThreatResponse/Integrations/ProofpointThreatResponseEventCollector/ProofpointThreatResponseEventCollector_test.py @@ -1,5 +1,5 @@ from CommonServerPython import * -from ProofpointThreatResponseEventCollector import fetch_events_command, TIME_FORMAT, Client, list_incidents_command +from ProofpointThreatResponseEventCollector import TIME_FORMAT, Client, fetch_events_command, list_incidents_command def test_fetch_events_command(requests_mock): @@ -13,21 +13,17 @@ def test_fetch_events_command(requests_mock): Then: - Ensure last-fetch id is 2 """ - base_url = 'https://server_url/' - with open('./test_data/raw_response.json') as f: + base_url = "https://server_url/" + with open("./test_data/raw_response.json") as f: incidents = json.loads(f.read()) - with open('./test_data/expected_result.json') as f: + with open("./test_data/expected_result.json") as f: expected_result = json.loads(f.read()) - requests_mock.get(f'{base_url}api/incidents', json=incidents) - client = Client(base_url=base_url, - verify=True, - headers={}, - proxy=False) - first_fetch, _ = parse_date_range('2 hours', date_format=TIME_FORMAT) - events, last_fetch = fetch_events_command(client=client, first_fetch=first_fetch, last_run={}, - fetch_limit='100', - fetch_delta='6 hours', - incidents_states=['open']) + requests_mock.get(f"{base_url}api/incidents", json=incidents) + client = Client(base_url=base_url, verify=True, headers={}, proxy=False) + first_fetch, _ = parse_date_range("2 hours", date_format=TIME_FORMAT) + events, last_fetch = fetch_events_command( + client=client, first_fetch=first_fetch, last_run={}, fetch_limit="100", fetch_delta="6 hours", incidents_states=["open"] + ) assert events == expected_result @@ -42,14 +38,11 @@ def test_list_incidents_command(requests_mock): Then: - Ensure List Incidents Results in human-readable. """ - base_url = 'https://server_url/' - with open('./test_data/raw_response.json') as f: + base_url = "https://server_url/" + with open("./test_data/raw_response.json") as f: incidents = json.loads(f.read()) - requests_mock.get(f'{base_url}api/incidents', json=incidents) - client = Client(base_url=base_url, - verify=True, - headers={}, - proxy=False) - args = {'limit': 2} + requests_mock.get(f"{base_url}api/incidents", json=incidents) + client = Client(base_url=base_url, verify=True, headers={}, proxy=False) + args = {"limit": 2} incidents, human_readable, raw_response = list_incidents_command(client, args) - assert 'List Incidents Results:' in human_readable + assert "List Incidents Results:" in human_readable diff --git a/Packs/ProofpointThreatResponse/ReleaseNotes/2_0_24.md b/Packs/ProofpointThreatResponse/ReleaseNotes/2_0_24.md new file mode 100644 index 000000000000..c4c576073e0a --- /dev/null +++ b/Packs/ProofpointThreatResponse/ReleaseNotes/2_0_24.md @@ -0,0 +1,9 @@ + +#### Integrations + +##### Proofpoint Threat Response Event Collector + +- Metadata and documentation improvements. +##### Proofpoint Threat Response (Beta) + +- Metadata and documentation improvements. diff --git a/Packs/ProofpointThreatResponse/pack_metadata.json b/Packs/ProofpointThreatResponse/pack_metadata.json index e9f7a3b77fb2..1ca8ded98208 100644 --- a/Packs/ProofpointThreatResponse/pack_metadata.json +++ b/Packs/ProofpointThreatResponse/pack_metadata.json @@ -2,7 +2,7 @@ "name": "Proofpoint Threat Response", "description": "Use the Proofpoint Threat Response integration to orchestrate and automate incident response.", "support": "xsoar", - "currentVersion": "2.0.23", + "currentVersion": "2.0.24", "author": "Cortex XSOAR", "url": "https://www.paloaltonetworks.com/cortex", "email": "", From 0eab5ed4cd48956a2c7572a6496e731057eeb49e Mon Sep 17 00:00:00 2001 From: Content Bot Date: Sun, 23 Mar 2025 12:52:46 +0000 Subject: [PATCH 05/18] SplunkPy: Apply ruff Format --- .../Integrations/SplunkPy/SplunkPy.py | 2527 +++++++------ .../Integrations/SplunkPy/SplunkPy_test.py | 3302 ++++++++++------- Packs/SplunkPy/ReleaseNotes/3_2_6.md | 24 + .../SplunkAddComment/SplunkAddComment.py | 14 +- .../SplunkAddComment/SplunkAddComment_test.py | 7 +- .../SplunkConvertCommentsToTable.py | 19 +- .../SplunkConvertCommentsToTable_test.py | 9 +- .../SplunkShowAsset/SplunkShowAsset.py | 29 +- .../SplunkShowDrilldown.py | 35 +- .../SplunkShowDrilldown_test.py | 307 +- .../SplunkShowIdentity/SplunkShowIdentity.py | 29 +- Packs/SplunkPy/pack_metadata.json | 2 +- 12 files changed, 3709 insertions(+), 2595 deletions(-) create mode 100644 Packs/SplunkPy/ReleaseNotes/3_2_6.md diff --git a/Packs/SplunkPy/Integrations/SplunkPy/SplunkPy.py b/Packs/SplunkPy/Integrations/SplunkPy/SplunkPy.py index 1961799d5638..15384c731593 100644 --- a/Packs/SplunkPy/Integrations/SplunkPy/SplunkPy.py +++ b/Packs/SplunkPy/Integrations/SplunkPy/SplunkPy.py @@ -1,112 +1,112 @@ -import demistomock as demisto # noqa: F401 -from CommonServerPython import * # noqa: F401 import hashlib import io import json import re from datetime import datetime, timedelta + import dateparser +import demistomock as demisto # noqa: F401 import pytz import requests - -from splunklib import client -from splunklib import results -from splunklib.data import Record +from CommonServerPython import * # noqa: F401 +from splunklib import client, results from splunklib.binding import AuthenticationError, HTTPError, namespace - +from splunklib.data import Record INTEGRATION_LOG = "Splunk- " -OUTPUT_MODE_JSON = 'json' # type of response from splunk-sdk query (json/csv/xml) +OUTPUT_MODE_JSON = "json" # type of response from splunk-sdk query (json/csv/xml) INDEXES_REGEX = r"""["'][\s]*index[\s]*["'][\s]*:[\s]*["']([^"']+)["']""" # Define utf8 as default encoding params = demisto.params() SPLUNK_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S" -DEFAULT_ASSET_ENRICH_TABLES = 'asset_lookup_by_str,asset_lookup_by_cidr' -DEFAULT_IDENTITY_ENRICH_TABLE = 'identity_lookup_expanded' -VERIFY_CERTIFICATE = not bool(params.get('unsecure')) -FETCH_LIMIT = int(params.get('fetch_limit')) if params.get('fetch_limit') else 50 +DEFAULT_ASSET_ENRICH_TABLES = "asset_lookup_by_str,asset_lookup_by_cidr" +DEFAULT_IDENTITY_ENRICH_TABLE = "identity_lookup_expanded" +VERIFY_CERTIFICATE = not bool(params.get("unsecure")) +FETCH_LIMIT = int(params.get("fetch_limit")) if params.get("fetch_limit") else 50 FETCH_LIMIT = max(min(200, FETCH_LIMIT), 1) MIRROR_LIMIT = 1000 -PROBLEMATIC_CHARACTERS = ['.', '(', ')', '[', ']'] -REPLACE_WITH = '_' -REPLACE_FLAG = params.get('replaceKeys', False) -FETCH_TIME = params.get('fetch_time') +PROBLEMATIC_CHARACTERS = [".", "(", ")", "[", "]"] +REPLACE_WITH = "_" +REPLACE_FLAG = params.get("replaceKeys", False) +FETCH_TIME = params.get("fetch_time") PROXIES = handle_proxy() -TIME_UNIT_TO_MINUTES = {'minute': 1, 'hour': 60, 'day': 24 * 60, 'week': 7 * 24 * 60, 'month': 30 * 24 * 60, - 'year': 365 * 24 * 60} +TIME_UNIT_TO_MINUTES = { + "minute": 1, + "hour": 60, + "day": 24 * 60, + "week": 7 * 24 * 60, + "month": 30 * 24 * 60, + "year": 365 * 24 * 60, +} DEFAULT_DISPOSITIONS = { - 'True Positive - Suspicious Activity': 'disposition:1', - 'Benign Positive - Suspicious But Expected': 'disposition:2', - 'False Positive - Incorrect Analytic Logic': 'disposition:3', - 'False Positive - Inaccurate Data': 'disposition:4', - 'Other': 'disposition:5', - 'Undetermined': 'disposition:6' + "True Positive - Suspicious Activity": "disposition:1", + "Benign Positive - Suspicious But Expected": "disposition:2", + "False Positive - Incorrect Analytic Logic": "disposition:3", + "False Positive - Inaccurate Data": "disposition:4", + "Other": "disposition:5", + "Undetermined": "disposition:6", } # =========== Mirroring Mechanism Globals =========== -MIRROR_DIRECTION = { - 'None': None, - 'Incoming': 'In', - 'Outgoing': 'Out', - 'Incoming And Outgoing': 'Both' -} -OUTGOING_MIRRORED_FIELDS = ['comment', 'status', 'owner', 'urgency', 'reviewer', 'disposition'] +MIRROR_DIRECTION = {"None": None, "Incoming": "In", "Outgoing": "Out", "Incoming And Outgoing": "Both"} +OUTGOING_MIRRORED_FIELDS = ["comment", "status", "owner", "urgency", "reviewer", "disposition"] # =========== Enrichment Mechanism Globals =========== -ENABLED_ENRICHMENTS = params.get('enabled_enrichments', []) - -DRILLDOWN_ENRICHMENT = 'Drilldown' -ASSET_ENRICHMENT = 'Asset' -IDENTITY_ENRICHMENT = 'Identity' -SUBMITTED_NOTABLES = 'submitted_notables' -EVENT_ID = 'event_id' -RULE_ID = 'rule_id' -JOB_CREATION_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%f' -NOT_YET_SUBMITTED_NOTABLES = 'not_yet_submitted_notables' +ENABLED_ENRICHMENTS = params.get("enabled_enrichments", []) + +DRILLDOWN_ENRICHMENT = "Drilldown" +ASSET_ENRICHMENT = "Asset" +IDENTITY_ENRICHMENT = "Identity" +SUBMITTED_NOTABLES = "submitted_notables" +EVENT_ID = "event_id" +RULE_ID = "rule_id" +JOB_CREATION_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%f" +NOT_YET_SUBMITTED_NOTABLES = "not_yet_submitted_notables" INFO_MIN_TIME = "info_min_time" INFO_MAX_TIME = "info_max_time" -INCIDENTS = 'incidents' -MIRRORED_ENRICHING_NOTABLES = 'MIRRORED_ENRICHING_NOTABLES' -DUMMY = 'dummy' -NOTABLE = 'notable' -ENRICHMENTS = 'enrichments' +INCIDENTS = "incidents" +MIRRORED_ENRICHING_NOTABLES = "MIRRORED_ENRICHING_NOTABLES" +DUMMY = "dummy" +NOTABLE = "notable" +ENRICHMENTS = "enrichments" MAX_HANDLE_NOTABLES = 20 MAX_SUBMIT_NOTABLES = 30 -CACHE = 'cache' -STATUS = 'status' -DATA = 'data' -TYPE = 'type' -ID = 'id' -CREATION_TIME = 'creation_time' -QUERY_NAME = 'query_name' -QUERY_SEARCH = 'query_search' -INCIDENT_CREATED = 'incident_created' +CACHE = "cache" +STATUS = "status" +DATA = "data" +TYPE = "type" +ID = "id" +CREATION_TIME = "creation_time" +QUERY_NAME = "query_name" +QUERY_SEARCH = "query_search" +INCIDENT_CREATED = "incident_created" DRILLDOWN_REGEX = r'([^\s\$]+)\s*=\s*"?(\$[^\s\$\\]+\$)"?|"?(\$[^\s\$\\]+\$)"?' ENRICHMENT_TYPE_TO_ENRICHMENT_STATUS = { - DRILLDOWN_ENRICHMENT: 'successful_drilldown_enrichment', - ASSET_ENRICHMENT: 'successful_asset_enrichment', - IDENTITY_ENRICHMENT: 'successful_identity_enrichment' + DRILLDOWN_ENRICHMENT: "successful_drilldown_enrichment", + ASSET_ENRICHMENT: "successful_asset_enrichment", + IDENTITY_ENRICHMENT: "successful_identity_enrichment", } -COMMENT_MIRRORED_FROM_XSOAR = 'Mirrored from Cortex XSOAR' -USER_RELATED_FIELDS = ['user', 'src_user'] +COMMENT_MIRRORED_FROM_XSOAR = "Mirrored from Cortex XSOAR" +USER_RELATED_FIELDS = ["user", "src_user"] # =========== Not Missing Events Mechanism Globals =========== -CUSTOM_ID = 'custom_id' -OCCURRED = 'occurred' -INDEX_TIME = 'index_time' -TIME_IS_MISSING = 'time_is_missing' +CUSTOM_ID = "custom_id" +OCCURRED = "occurred" +INDEX_TIME = "index_time" +TIME_IS_MISSING = "time_is_missing" # =========== Enrich User Mechanism ============ class UserMappingObject: def __init__( - self, service: client.Service, + self, + service: client.Service, should_map_user: bool, - table_name: str = 'splunk_xsoar_users', - xsoar_user_column_name: str = 'xsoar_user', - splunk_user_column_name: str = 'splunk_user' + table_name: str = "splunk_xsoar_users", + xsoar_user_column_name: str = "xsoar_user", + splunk_user_column_name: str = "splunk_user", ): self.service = service self.should_map = should_map_user @@ -116,71 +116,71 @@ def __init__( self._kvstore_data: list[dict[str, Any]] = [] def _get_record(self, col: str, value_to_search: str): - """ Gets the records with the value found in the relevant column. """ + """Gets the records with the value found in the relevant column.""" if not self._kvstore_data: - demisto.debug('UserMapping: kvstore data empty, initialize it') + demisto.debug("UserMapping: kvstore data empty, initialize it") kvstore: client.KVStoreCollection = self.service.kvstore[self.table_name] self._kvstore_data = kvstore.data.query() - demisto.debug(f'UserMapping: {self._kvstore_data=}') + demisto.debug(f"UserMapping: {self._kvstore_data=}") return filter(lambda row: row.get(col) == value_to_search, self._kvstore_data) def get_xsoar_user_by_splunk(self, splunk_user): - record = list(self._get_record(self.splunk_user_column_name, splunk_user)) if not record: - demisto.error( f"UserMapping: Could not find xsoar user matching splunk's {splunk_user}. " - f"Consider adding it to the {self.table_name} lookup.") - return '' + f"Consider adding it to the {self.table_name} lookup." + ) + return "" # assuming username is unique, so only one record is returned. xsoar_user = record[0].get(self.xsoar_user_column_name) if not xsoar_user: demisto.error( - f"UserMapping: Xsoar user matching splunk's {splunk_user} is empty. Fix the record in {self.table_name} lookup.") - return '' + f"UserMapping: Xsoar user matching splunk's {splunk_user} is empty. Fix the record in {self.table_name} lookup." + ) + return "" return xsoar_user def get_splunk_user_by_xsoar(self, xsoar_user, map_missing=True): - record = list(self._get_record(self.xsoar_user_column_name, xsoar_user)) if not record: demisto.error( f"UserMapping: Could not find splunk user matching xsoar's {xsoar_user}. " - f"Consider adding it to the {self.table_name} lookup.") - return 'unassigned' if map_missing else None + f"Consider adding it to the {self.table_name} lookup." + ) + return "unassigned" if map_missing else None # assuming username is unique, so only one record is returned. splunk_user = record[0].get(self.splunk_user_column_name) if not splunk_user: demisto.error( - f"UserMapping: Splunk user matching Xsoar's {xsoar_user} is empty. Fix the record in {self.table_name} lookup.") - return 'unassigned' if map_missing else None + f"UserMapping: Splunk user matching Xsoar's {xsoar_user} is empty. Fix the record in {self.table_name} lookup." + ) + return "unassigned" if map_missing else None return splunk_user def get_splunk_user_by_xsoar_command(self, args): - xsoar_users = argToList(args.get('xsoar_username')) - map_missing = argToBoolean(args.get('map_missing', True)) + xsoar_users = argToList(args.get("xsoar_username")) + map_missing = argToBoolean(args.get("map_missing", True)) outputs = [] for user in xsoar_users: splunk_user = self.get_splunk_user_by_xsoar(user, map_missing=map_missing) if user else None outputs.append( - {'XsoarUser': user, - 'SplunkUser': splunk_user or 'Could not map splunk user, Check logs for more info.'}) + {"XsoarUser": user, "SplunkUser": splunk_user or "Could not map splunk user, Check logs for more info."} + ) return CommandResults( outputs=outputs, - outputs_prefix='Splunk.UserMapping', - readable_output=tableToMarkdown('Xsoar-Splunk Username Mapping', outputs, - headers=['XsoarUser', 'SplunkUser']) + outputs_prefix="Splunk.UserMapping", + readable_output=tableToMarkdown("Xsoar-Splunk Username Mapping", outputs, headers=["XsoarUser", "SplunkUser"]), ) def update_xsoar_user_in_notables(self, notables_data): @@ -192,7 +192,7 @@ def update_xsoar_user_in_notables(self, notables_data): if self.should_map: demisto.debug("UserMapping: instance configured to map Splunk user to XSOAR users, trying to map.") for notable_data in notables_data: - if splunk_user := notable_data.get('owner'): + if splunk_user := notable_data.get("owner"): xsoar_user = self.get_xsoar_user_by_splunk(splunk_user) notable_data["owner"] = xsoar_user demisto.debug( @@ -217,8 +217,8 @@ class SplunkGetModifiedRemoteDataResponse(GetModifiedRemoteDataResponse): def __init__(self, modified_notables_data, entries): self.modified_notables_data = modified_notables_data self.entries = entries - extensive_log(f'mirror-in: updated notables: {self.modified_notables_data}') - extensive_log(f'mirror-in: updated entries: {self.entries}') + extensive_log(f"mirror-in: updated notables: {self.modified_notables_data}") + extensive_log(f"mirror-in: updated entries: {self.entries}") def to_entry(self): """Convert data to entries. @@ -228,18 +228,20 @@ def to_entry(self): """ return [ { - 'EntryContext': {'mirrorRemoteId': data[RULE_ID]}, - 'Contents': data, - 'Type': EntryType.NOTE, - 'ContentsFormat': EntryFormat.JSON} + "EntryContext": {"mirrorRemoteId": data[RULE_ID]}, + "Contents": data, + "Type": EntryType.NOTE, + "ContentsFormat": EntryFormat.JSON, + } for data in self.modified_notables_data ] + self.entries + # =========== Regular Fetch Mechanism =========== def splunk_time_to_datetime(incident_ocurred_time): - incident_time_without_timezone = incident_ocurred_time.split('.')[0] + incident_time_without_timezone = incident_ocurred_time.split(".")[0] return datetime.strptime(incident_time_without_timezone, SPLUNK_TIME_FORMAT) @@ -270,32 +272,33 @@ def create_incident_custom_id(incident: dict[str, Any]): str: The custom incident ID. """ incident_raw_data = json.loads(incident["rawJSON"]) - fields_to_add = ['_cd', 'index', '_time', '_indextime', '_raw'] - fields_supplied_by_user = demisto.params().get('unique_id_fields', '') + fields_to_add = ["_cd", "index", "_time", "_indextime", "_raw"] + fields_supplied_by_user = demisto.params().get("unique_id_fields", "") fields_supplied_by_user = fields_supplied_by_user or "" - fields_to_add.extend(fields_supplied_by_user.split(',')) + fields_to_add.extend(fields_supplied_by_user.split(",")) - incident_custom_id = '___' + incident_custom_id = "___" for field_name in fields_to_add: if field_name in incident_raw_data: - incident_custom_id += f'{field_name}___{incident_raw_data[field_name]}' + incident_custom_id += f"{field_name}___{incident_raw_data[field_name]}" elif field_name in incident: - incident_custom_id += f'{field_name}___{incident[field_name]}' + incident_custom_id += f"{field_name}___{incident[field_name]}" - extensive_log(f'[SplunkPy] ID after all fields were added: {incident_custom_id}') + extensive_log(f"[SplunkPy] ID after all fields were added: {incident_custom_id}") - unique_id = hashlib.md5(incident_custom_id.encode('utf-8')).hexdigest() # nosec # guardrails-disable-line - extensive_log(f'[SplunkPy] Found incident ID is: {unique_id}') + unique_id = hashlib.md5(incident_custom_id.encode("utf-8")).hexdigest() # nosec # guardrails-disable-line + extensive_log(f"[SplunkPy] Found incident ID is: {unique_id}") return unique_id def extensive_log(message): - if demisto.params().get('extensive_logs', False): + if demisto.params().get("extensive_logs", False): demisto.debug(message) -def remove_irrelevant_incident_ids(last_run_fetched_ids: dict[str, dict[str, str]], window_start_time: str, - window_end_time: str) -> dict[str, Any]: +def remove_irrelevant_incident_ids( + last_run_fetched_ids: dict[str, dict[str, str]], window_start_time: str, window_end_time: str +) -> dict[str, Any]: """Remove all the IDs of the fetched incidents that are no longer in the fetch window, to prevent our last run object from becoming too large. @@ -309,36 +312,38 @@ def remove_irrelevant_incident_ids(last_run_fetched_ids: dict[str, dict[str, str """ new_last_run_fetched_ids: dict[str, dict[str, str]] = {} window_start_datetime = datetime.strptime(window_start_time, SPLUNK_TIME_FORMAT) - demisto.debug(f'Beginning to filter irrelevant IDs with respect to window {window_start_time} - {window_end_time}') + demisto.debug(f"Beginning to filter irrelevant IDs with respect to window {window_start_time} - {window_end_time}") for incident_id, incident_occurred_time in last_run_fetched_ids.items(): # We divided the handling of the last fetched IDs since we changed the handling of them # The first implementation caused IDs to be removed from the cache, even though they were still relevant # The second implementation now only removes the cached IDs that are not relevant to the fetch window - extensive_log(f'[SplunkPy] Checking if {incident_id} is relevant to fetch window') + extensive_log(f"[SplunkPy] Checking if {incident_id} is relevant to fetch window") if isinstance(incident_occurred_time, dict): # To handle last fetched IDs # Last fetched IDs hold the occurred time that they were seen, which is basically the end time of the fetch window # they were fetched in, and will be deleted from the last fetched IDs once they pass the fetch window - incident_window_end_datetime = datetime.strptime(incident_occurred_time.get('occurred_time', ''), SPLUNK_TIME_FORMAT) + incident_window_end_datetime = datetime.strptime(incident_occurred_time.get("occurred_time", ""), SPLUNK_TIME_FORMAT) if incident_window_end_datetime >= window_start_datetime: # We keep the incident, since it is still in the fetch window - extensive_log(f'[SplunkPy] Keeping {incident_id} as part of the last fetched IDs.' - f' {incident_window_end_datetime=}') + extensive_log( + f"[SplunkPy] Keeping {incident_id} as part of the last fetched IDs." f" {incident_window_end_datetime=}" + ) new_last_run_fetched_ids[incident_id] = incident_occurred_time else: - extensive_log(f'[SplunkPy] Removing {incident_id} from the last fetched IDs. {incident_window_end_datetime=}') + extensive_log(f"[SplunkPy] Removing {incident_id} from the last fetched IDs. {incident_window_end_datetime=}") else: # To handle last fetched IDs before version 3_1_20 # Last fetched IDs held the epoch time of their appearance, they will now hold the # new format, with an occurred time equal to the end of the window - extensive_log(f'[SplunkPy] {incident_id} was saved using old implementation,' - f' with value {incident_occurred_time}, keeping') - new_last_run_fetched_ids[incident_id] = {'occurred_time': window_end_time} + extensive_log( + f"[SplunkPy] {incident_id} was saved using old implementation," f" with value {incident_occurred_time}, keeping" + ) + new_last_run_fetched_ids[incident_id] = {"occurred_time": window_end_time} return new_last_run_fetched_ids def enforce_look_behind_time(last_run_time, now, look_behind_time): - """ Verifies that the start time of the fetch is at X minutes before + """Verifies that the start time of the fetch is at X minutes before the end time, X being the number of minutes specified in the look_behind parameter. The reason this is needed is to ensure that events that have a significant difference between their index time and occurrence time in Splunk are still fetched and are not missed. @@ -356,19 +361,17 @@ def enforce_look_behind_time(last_run_time, now, look_behind_time): now_datetime = datetime.strptime(now, SPLUNK_TIME_FORMAT) if now_datetime - last_run_datetime < timedelta(minutes=look_behind_time): time_before_given_look_behind_datetime = now_datetime - timedelta(minutes=look_behind_time) - return datetime.strftime( - time_before_given_look_behind_datetime, SPLUNK_TIME_FORMAT - ) + return datetime.strftime(time_before_given_look_behind_datetime, SPLUNK_TIME_FORMAT) return last_run_time def get_fetch_start_times(params, service, last_run_earliest_time, occurence_time_look_behind): current_time_for_fetch = datetime.utcnow() - if timezone_ := params.get('timezone'): + if timezone_ := params.get("timezone"): current_time_for_fetch = current_time_for_fetch + timedelta(minutes=int(timezone_)) now = current_time_for_fetch.strftime(SPLUNK_TIME_FORMAT) - if params.get('useSplunkTime'): + if params.get("useSplunkTime"): now = get_current_splunk_time(service) current_time_in_splunk = datetime.strptime(now, SPLUNK_TIME_FORMAT) current_time_for_fetch = current_time_in_splunk @@ -377,7 +380,7 @@ def get_fetch_start_times(params, service, last_run_earliest_time, occurence_tim fetch_time_in_minutes = parse_time_to_minutes() start_time_for_fetch = current_time_for_fetch - timedelta(minutes=fetch_time_in_minutes) last_run_earliest_time = start_time_for_fetch.strftime(SPLUNK_TIME_FORMAT) - extensive_log(f'[SplunkPy] SplunkPy last run is None. Last run earliest time is: {last_run_earliest_time}') + extensive_log(f"[SplunkPy] SplunkPy last run is None. Last run earliest time is: {last_run_earliest_time}") occured_start_time = enforce_look_behind_time(last_run_earliest_time, now, occurence_time_look_behind) @@ -388,44 +391,50 @@ def build_fetch_kwargs(params, occured_start_time, latest_time, search_offset): occurred_start_time_fieldname = params.get("earliest_occurrence_time_fieldname", "earliest_time") occurred_end_time_fieldname = params.get("latest_occurrence_time_fieldname", "latest_time") - extensive_log(f'[SplunkPy] occurred_start_time_fieldname: {occurred_start_time_fieldname}') - extensive_log(f'[SplunkPy] occured_start_time: {occured_start_time}') + extensive_log(f"[SplunkPy] occurred_start_time_fieldname: {occurred_start_time_fieldname}") + extensive_log(f"[SplunkPy] occured_start_time: {occured_start_time}") return { occurred_start_time_fieldname: occured_start_time, occurred_end_time_fieldname: latest_time, "count": FETCH_LIMIT, - 'offset': search_offset, + "offset": search_offset, "output_mode": OUTPUT_MODE_JSON, } def build_fetch_query(params): - fetch_query = params['fetchQuery'] + fetch_query = params["fetchQuery"] - if (extract_fields := params.get('extractFields')): - for field in extract_fields.split(','): + if extract_fields := params.get("extractFields"): + for field in extract_fields.split(","): field_trimmed = field.strip() - fetch_query = f'{fetch_query} | eval {field_trimmed}={field_trimmed}' + fetch_query = f"{fetch_query} | eval {field_trimmed}={field_trimmed}" return fetch_query -def fetch_notables(service: client.Service, mapper: UserMappingObject, comment_tag_to_splunk: str, comment_tag_from_splunk: str, - cache_object: "Cache" = None, enrich_notables=False): +def fetch_notables( + service: client.Service, + mapper: UserMappingObject, + comment_tag_to_splunk: str, + comment_tag_from_splunk: str, + cache_object: "Cache" = None, + enrich_notables=False, +): last_run_data = demisto.getLastRun() params = demisto.params() if not last_run_data: - extensive_log('[SplunkPy] SplunkPy first run') + extensive_log("[SplunkPy] SplunkPy first run") - last_run_earliest_time = last_run_data and last_run_data.get('time') - last_run_latest_time = last_run_data and last_run_data.get('latest_time') - extensive_log(f'[SplunkPy] SplunkPy last run is:\n {last_run_data}') + last_run_earliest_time = last_run_data and last_run_data.get("time") + last_run_latest_time = last_run_data and last_run_data.get("latest_time") + extensive_log(f"[SplunkPy] SplunkPy last run is:\n {last_run_data}") - search_offset = last_run_data.get('offset', 0) + search_offset = last_run_data.get("offset", 0) - occurred_look_behind = int(params.get('occurrence_look_behind', 15) or 15) - extensive_log(f'[SplunkPy] occurrence look behind is: {occurred_look_behind}') + occurred_look_behind = int(params.get("occurrence_look_behind", 15) or 15) + extensive_log(f"[SplunkPy] occurrence look behind is: {occurred_look_behind}") occured_start_time, now = get_fetch_start_times(params, service, last_run_earliest_time, occurred_look_behind) @@ -433,37 +442,37 @@ def fetch_notables(service: client.Service, mapper: UserMappingObject, comment_t latest_time = last_run_latest_time or now kwargs_oneshot = build_fetch_kwargs(params, occured_start_time, latest_time, search_offset) fetch_query = build_fetch_query(params) - last_run_fetched_ids: dict[str, Any] = last_run_data.get('found_incidents_ids', {}) - if late_indexed_pagination := last_run_data.get('late_indexed_pagination'): + last_run_fetched_ids: dict[str, Any] = last_run_data.get("found_incidents_ids", {}) + if late_indexed_pagination := last_run_data.get("late_indexed_pagination"): # This is for handling the case when events get indexed late, and inserted in pages # that we have already went through window = f'{kwargs_oneshot.get("earliest_time")}-{kwargs_oneshot.get("latest_time")}' - demisto.debug(f'[SplunkPy] additional fetch for the window {window} to check for late indexed incidents') + demisto.debug(f"[SplunkPy] additional fetch for the window {window} to check for late indexed incidents") if last_run_fetched_ids: ids_to_exclude = [f'"{fetched_id}"' for fetched_id in last_run_fetched_ids] exclude_id_where = f'where not event_id in ({",".join(ids_to_exclude)})' - fetch_query = f'{fetch_query} | {exclude_id_where}' - kwargs_oneshot['offset'] = 0 + fetch_query = f"{fetch_query} | {exclude_id_where}" + kwargs_oneshot["offset"] = 0 - demisto.debug(f'[SplunkPy] fetch query = {fetch_query}') - demisto.debug(f'[SplunkPy] oneshot query args = {kwargs_oneshot}') + demisto.debug(f"[SplunkPy] fetch query = {fetch_query}") + demisto.debug(f"[SplunkPy] oneshot query args = {kwargs_oneshot}") oneshotsearch_results = service.jobs.oneshot(fetch_query, **kwargs_oneshot) reader = results.JSONResultsReader(oneshotsearch_results) - error_message = '' + error_message = "" incidents = [] notables = [] incident_ids_to_add = [] num_of_dropped = 0 for item in reader: if handle_message(item): - if 'Error' in str(item.message) or 'error' in str(item.message): - error_message = f'{error_message}\n{item.message}' + if "Error" in str(item.message) or "error" in str(item.message): + error_message = f"{error_message}\n{item.message}" continue - extensive_log(f'[SplunkPy] Incident data before parsing to notable: {item}') + extensive_log(f"[SplunkPy] Incident data before parsing to notable: {item}") notable_incident = Notable(data=item) inc = notable_incident.to_incident(mapper, comment_tag_to_splunk, comment_tag_from_splunk) - extensive_log(f'[SplunkPy] Incident data after parsing to notable: {inc}') + extensive_log(f"[SplunkPy] Incident data after parsing to notable: {inc}") custom_inc_id = create_incident_custom_id(inc) incident_id = notable_incident.id or custom_inc_id @@ -471,26 +480,25 @@ def fetch_notables(service: client.Service, mapper: UserMappingObject, comment_t incident_ids_to_add.append(incident_id) incidents.append(inc) notables.append(notable_incident) - extensive_log(f'[SplunkPy] - Fetched incident {incident_id} to be created.') + extensive_log(f"[SplunkPy] - Fetched incident {incident_id} to be created.") else: num_of_dropped += 1 - extensive_log(f'[SplunkPy] - Dropped incident {incident_id} due to duplication.') + extensive_log(f"[SplunkPy] - Dropped incident {incident_id} due to duplication.") if error_message and not incident_ids_to_add: - raise DemistoException(f'Failed to fetch incidents, check the provided query in Splunk web search - {error_message}') - extensive_log(f'[SplunkPy] Size of last_run_fetched_ids before adding new IDs: {len(last_run_fetched_ids)}') + raise DemistoException(f"Failed to fetch incidents, check the provided query in Splunk web search - {error_message}") + extensive_log(f"[SplunkPy] Size of last_run_fetched_ids before adding new IDs: {len(last_run_fetched_ids)}") for incident_id in incident_ids_to_add: - last_run_fetched_ids[incident_id] = {'occurred_time': latest_time} - extensive_log(f'[SplunkPy] Size of last_run_fetched_ids after adding new IDs: {len(last_run_fetched_ids)}') + last_run_fetched_ids[incident_id] = {"occurred_time": latest_time} + extensive_log(f"[SplunkPy] Size of last_run_fetched_ids after adding new IDs: {len(last_run_fetched_ids)}") # New way to remove IDs last_run_fetched_ids = remove_irrelevant_incident_ids(last_run_fetched_ids, occured_start_time, latest_time) - extensive_log('[SplunkPy] Size of last_run_fetched_ids after ' - f'removing old IDs: {len(last_run_fetched_ids)}') - extensive_log(f'[SplunkPy] SplunkPy - incidents fetched on last run = {last_run_fetched_ids}') + extensive_log("[SplunkPy] Size of last_run_fetched_ids after " f"removing old IDs: {len(last_run_fetched_ids)}") + extensive_log(f"[SplunkPy] SplunkPy - incidents fetched on last run = {last_run_fetched_ids}") - demisto.debug(f'SplunkPy - total number of new incidents found is: {len(incidents)}') - demisto.debug(f'SplunkPy - total number of dropped incidents is: {num_of_dropped}') + demisto.debug(f"SplunkPy - total number of new incidents found is: {len(incidents)}") + demisto.debug(f"SplunkPy - total number of dropped incidents is: {num_of_dropped}") if not enrich_notables or not cache_object: demisto.incidents(incidents) @@ -505,39 +513,47 @@ def fetch_notables(service: client.Service, mapper: UserMappingObject, comment_t # We didn't get any new incidents or got less than limit, # so the next run's earliest time will be the latest_time from this iteration if (len(incidents) + num_of_dropped) < FETCH_LIMIT: - demisto.debug(f'[SplunkPy] Number of fetched incidents = {len(incidents)}, dropped = {num_of_dropped}. Sum is less' - f' than {FETCH_LIMIT=}. Starting new fetch') + demisto.debug( + f"[SplunkPy] Number of fetched incidents = {len(incidents)}, dropped = {num_of_dropped}. Sum is less" + f" than {FETCH_LIMIT=}. Starting new fetch" + ) next_run_earliest_time = latest_time new_last_run = { - 'time': next_run_earliest_time, - 'latest_time': None, - 'offset': 0, - 'found_incidents_ids': last_run_fetched_ids + "time": next_run_earliest_time, + "latest_time": None, + "offset": 0, + "found_incidents_ids": last_run_fetched_ids, } # we get limit notables from splunk # we should fetch the entire queue with offset - so set the offset, time and latest_time for the next run else: - demisto.debug(f'[SplunkPy] Number of fetched incidents = {len(incidents)}, dropped = {num_of_dropped}. Sum is' - f' equal/greater than {FETCH_LIMIT=}. Continue pagination') + demisto.debug( + f"[SplunkPy] Number of fetched incidents = {len(incidents)}, dropped = {num_of_dropped}. Sum is" + f" equal/greater than {FETCH_LIMIT=}. Continue pagination" + ) new_last_run = { - 'time': occured_start_time, - 'latest_time': latest_time, - 'offset': search_offset + FETCH_LIMIT, - 'found_incidents_ids': last_run_fetched_ids + "time": occured_start_time, + "latest_time": latest_time, + "offset": search_offset + FETCH_LIMIT, + "found_incidents_ids": last_run_fetched_ids, } - new_last_run['late_indexed_pagination'] = False + new_last_run["late_indexed_pagination"] = False # Need to fetch again this "window" to be sure no "late" indexed events are missed - if num_of_dropped >= FETCH_LIMIT and '`notable`' in fetch_query: + if num_of_dropped >= FETCH_LIMIT and "`notable`" in fetch_query: demisto.debug('Need to fetch this "window" again to make sure no "late" indexed events are missed') - new_last_run['late_indexed_pagination'] = True + new_last_run["late_indexed_pagination"] = True # If we are in the process of checking late indexed events, and len(fetch_incidents) == FETCH_LIMIT, # that means we need to continue the process of checking late indexed events if len(incidents) == FETCH_LIMIT and late_indexed_pagination: - demisto.debug(f'Number of valid incidents equals {FETCH_LIMIT=}, and current fetch checked for late indexed events.' - ' Continue checking for late events') - new_last_run['late_indexed_pagination'] = True - demisto.debug(f'SplunkPy set last run - {new_last_run["time"]=}, {new_last_run["latest_time"]=}, {new_last_run["offset"]=}' - f', late_indexed_pagination={new_last_run.get("late_indexed_pagination")}') + demisto.debug( + f"Number of valid incidents equals {FETCH_LIMIT=}, and current fetch checked for late indexed events." + " Continue checking for late events" + ) + new_last_run["late_indexed_pagination"] = True + demisto.debug( + f'SplunkPy set last run - {new_last_run["time"]=}, {new_last_run["latest_time"]=}, {new_last_run["offset"]=}' + f', late_indexed_pagination={new_last_run.get("late_indexed_pagination")}' + ) last_run_data.update(new_last_run) demisto.setLastRun(last_run_data) @@ -550,17 +566,22 @@ def fetch_incidents(service: client.Service, mapper: UserMappingObject, comment_ # In "Pull from instance" in Classification & Mapping the last run object is empty, integration context # will not be empty because of the enrichment mechanism. In regular enriched fetch, we use dummy data # in the last run object to avoid entering this case - demisto.debug('running fetch_incidents_for_mapping') + demisto.debug("running fetch_incidents_for_mapping") fetch_incidents_for_mapping(integration_context) else: - demisto.debug('running run_enrichment_mechanism') + demisto.debug("running run_enrichment_mechanism") run_enrichment_mechanism(service, integration_context, mapper, comment_tag_to_splunk, comment_tag_from_splunk) else: - demisto.debug('enrichments not enabled running fetch_notables') - - fetch_notables(service=service, enrich_notables=False, mapper=mapper, comment_tag_to_splunk=comment_tag_to_splunk, - comment_tag_from_splunk=comment_tag_from_splunk) + demisto.debug("enrichments not enabled running fetch_notables") + + fetch_notables( + service=service, + enrich_notables=False, + mapper=mapper, + comment_tag_to_splunk=comment_tag_to_splunk, + comment_tag_from_splunk=comment_tag_from_splunk, + ) # =========== Regular Fetch Mechanism =========== @@ -568,8 +589,9 @@ def fetch_incidents(service: client.Service, mapper: UserMappingObject, comment_ # =========== Enriching Fetch Mechanism =========== + class Enrichment: - """ A class to represent an Enrichment. Each notable has 3 possible enrichment types: Drilldown, Asset & Identity + """A class to represent an Enrichment. Each notable has 3 possible enrichment types: Drilldown, Asset & Identity Attributes: type (str): The enrichment type. Possible values are: Drilldown, Asset & Identity. @@ -580,14 +602,16 @@ class Enrichment: query_name (str): The enrichment's query name. query_search (str): The enrichment's query search. """ - FAILED = 'Enrichment failed' - EXCEEDED_TIMEOUT = 'Enrichment exceed the given timeout' - IN_PROGRESS = 'Enrichment is in progress' - SUCCESSFUL = 'Enrichment successfully handled' + + FAILED = "Enrichment failed" + EXCEEDED_TIMEOUT = "Enrichment exceed the given timeout" + IN_PROGRESS = "Enrichment is in progress" + SUCCESSFUL = "Enrichment successfully handled" HANDLED = (EXCEEDED_TIMEOUT, FAILED, SUCCESSFUL) - def __init__(self, enrichment_type, status=None, enrichment_id=None, data=None, creation_time=None, - query_name=None, query_search=None): + def __init__( + self, enrichment_type, status=None, enrichment_id=None, data=None, creation_time=None, query_name=None, query_search=None + ): self.type = enrichment_type self.id = enrichment_id self.data = data or [] @@ -598,7 +622,7 @@ def __init__(self, enrichment_type, status=None, enrichment_id=None, data=None, @classmethod def from_job(cls, enrichment_type, job: client.Job, query_name=None, query_search=None): - """ Creates an Enrichment object from Splunk Job object + """Creates an Enrichment object from Splunk Job object Args: enrichment_type (str): The enrichment type @@ -610,14 +634,15 @@ def from_job(cls, enrichment_type, job: client.Job, query_name=None, query_searc The created enrichment (Enrichment) """ if job: - return cls(enrichment_type=enrichment_type, enrichment_id=job["sid"], - query_name=query_name, query_search=query_search) + return cls( + enrichment_type=enrichment_type, enrichment_id=job["sid"], query_name=query_name, query_search=query_search + ) else: return cls(enrichment_type=enrichment_type, status=Enrichment.FAILED) @classmethod def from_json(cls, enrichment_dict): - """ Deserialization method. + """Deserialization method. Args: enrichment_dict (dict): The enrichment dict in JSON format. @@ -633,12 +658,12 @@ def from_json(cls, enrichment_dict): enrichment_id=enrichment_dict.get(ID), creation_time=enrichment_dict.get(CREATION_TIME), query_name=enrichment_dict.get(QUERY_NAME), - query_search=enrichment_dict.get(QUERY_SEARCH) + query_search=enrichment_dict.get(QUERY_SEARCH), ) class Notable: - """ A class to represent a notable. + """A class to represent a notable. Attributes: data (dict): The notable data. @@ -651,14 +676,23 @@ class Notable: index_time (str): The time the notable have been indexed. """ - def __init__(self, data, enrichments=None, notable_id=None, occurred=None, custom_id=None, index_time=None, - time_is_missing=None, incident_created=None): + def __init__( + self, + data, + enrichments=None, + notable_id=None, + occurred=None, + custom_id=None, + index_time=None, + time_is_missing=None, + incident_created=None, + ): self.data = data self.id = notable_id or self.get_id() self.enrichments = enrichments or [] self.incident_created = incident_created or False self.time_is_missing = time_is_missing or False - self.index_time = index_time or self.data.get('_indextime') + self.index_time = index_time or self.data.get("_indextime") self.occurred = occurred or self.get_occurred() self.custom_id = custom_id or self.create_custom_id() @@ -666,70 +700,70 @@ def get_id(self): if EVENT_ID in self.data: return self.data[EVENT_ID] if ENABLED_ENRICHMENTS: - raise Exception('When using the enrichment mechanism, an event_id field is needed, and thus, ' - 'one must use a fetch query of the following format: search `notable` .......\n' - 'Please re-edit the fetchQuery parameter in the integration configuration, reset ' - 'the fetch mechanism using the splunk-reset-enriching-fetch-mechanism command and ' - 'run the fetch again.') + raise Exception( + "When using the enrichment mechanism, an event_id field is needed, and thus, " + "one must use a fetch query of the following format: search `notable` .......\n" + "Please re-edit the fetchQuery parameter in the integration configuration, reset " + "the fetch mechanism using the splunk-reset-enriching-fetch-mechanism command and " + "run the fetch again." + ) else: return None @staticmethod - def create_incident(notable_data, occurred, mapper: UserMappingObject, comment_tag_to_splunk: str, - comment_tag_from_splunk: str): - rule_title, rule_name = '', '' + def create_incident( + notable_data, occurred, mapper: UserMappingObject, comment_tag_to_splunk: str, comment_tag_from_splunk: str + ): + rule_title, rule_name = "", "" params = demisto.params() - if demisto.get(notable_data, 'rule_title'): - rule_title = notable_data['rule_title'] - if demisto.get(notable_data, 'rule_name'): - rule_name = notable_data['rule_name'] + if demisto.get(notable_data, "rule_title"): + rule_title = notable_data["rule_title"] + if demisto.get(notable_data, "rule_name"): + rule_name = notable_data["rule_name"] incident: dict[str, Any] = {"name": f"{rule_title} : {rule_name}"} - if demisto.get(notable_data, 'urgency'): - incident["severity"] = severity_to_level(notable_data['urgency']) - if demisto.get(notable_data, 'rule_description'): + if demisto.get(notable_data, "urgency"): + incident["severity"] = severity_to_level(notable_data["urgency"]) + if demisto.get(notable_data, "rule_description"): incident["details"] = notable_data["rule_description"] - if ( - notable_data.get("owner") - and mapper.should_map - and (owner := mapper.get_xsoar_user_by_splunk(notable_data["owner"])) - ): + if notable_data.get("owner") and mapper.should_map and (owner := mapper.get_xsoar_user_by_splunk(notable_data["owner"])): incident["owner"] = owner incident["occurred"] = occurred notable_data = parse_notable(notable_data) - notable_data.update({ - 'mirror_instance': demisto.integrationInstance(), - 'mirror_direction': MIRROR_DIRECTION.get(params.get('mirror_direction')), - 'mirror_tags': [comment_tag_from_splunk, comment_tag_to_splunk] - }) + notable_data.update( + { + "mirror_instance": demisto.integrationInstance(), + "mirror_direction": MIRROR_DIRECTION.get(params.get("mirror_direction")), + "mirror_tags": [comment_tag_from_splunk, comment_tag_to_splunk], + } + ) comment_entries = [] labels = [] - if params.get('parseNotableEventsRaw'): - for key, value in rawToDict(notable_data['_raw']).items(): + if params.get("parseNotableEventsRaw"): + for key, value in rawToDict(notable_data["_raw"]).items(): if not isinstance(value, str): value = convert_to_str(value) - labels.append({'type': key, 'value': value}) - if demisto.get(notable_data, 'security_domain'): - labels.append({'type': 'security_domain', 'value': notable_data["security_domain"]}) - if demisto.get(notable_data, 'comment'): - comments = argToList(notable_data.get('comment', [])) + labels.append({"type": key, "value": value}) + if demisto.get(notable_data, "security_domain"): + labels.append({"type": "security_domain", "value": notable_data["security_domain"]}) + if demisto.get(notable_data, "comment"): + comments = argToList(notable_data.get("comment", [])) demisto.debug(f"data to update comment= {comments}") for comment in comments: # Creating a comment - comment_entries.append({ - 'Comment': comment}) - labels.append({'type': 'SplunkComments', 'value': str(comment_entries)}) - incident['labels'] = labels + comment_entries.append({"Comment": comment}) + labels.append({"type": "SplunkComments", "value": str(comment_entries)}) + incident["labels"] = labels if notable_data.get(EVENT_ID): - incident['dbotMirrorId'] = notable_data.get(EVENT_ID) - notable_data['SplunkComments'] = comment_entries + incident["dbotMirrorId"] = notable_data.get(EVENT_ID) + notable_data["SplunkComments"] = comment_entries incident["rawJSON"] = json.dumps(notable_data) - incident['SplunkComments'] = comment_entries + incident["SplunkComments"] = comment_entries return incident def to_incident(self, mapper: UserMappingObject, comment_tag_to_splunk: str, comment_tag_from_splunk: str): - """ Gathers all data from all notable's enrichments and return an incident """ + """Gathers all data from all notable's enrichments and return an incident""" self.incident_created = True total_drilldown_searches = self.drilldown_searches_counter() @@ -739,8 +773,12 @@ def to_incident(self, mapper: UserMappingObject, comment_tag_to_splunk: str, com # A notable can have more than one drilldown search enrichment, in that case we keep the searches results in # a list of dictionaries - each dict contains the query detail and the search results of a drilldown search - drilldown_enrichment_details = {"query_name": e.query_name, "query_search": e.query_search, - "query_results": e.data, "enrichment_status": e.status} + drilldown_enrichment_details = { + "query_name": e.query_name, + "query_search": e.query_search, + "query_results": e.data, + "enrichment_status": e.status, + } if not self.data.get(e.type): # first drilldown enrichment result to add - initiate the list self.data[e.type] = [drilldown_enrichment_details] @@ -748,20 +786,25 @@ def to_incident(self, mapper: UserMappingObject, comment_tag_to_splunk: str, com else: # there are previous drilldown enrichments in the notable's data self.data[e.type].append(drilldown_enrichment_details) - if not self.data.get('successful_drilldown_enrichment'): + if not self.data.get("successful_drilldown_enrichment"): # Drilldown enrichment is successful if at least one drilldown search was successful - self.data['successful_drilldown_enrichment'] = e.status == Enrichment.SUCCESSFUL + self.data["successful_drilldown_enrichment"] = e.status == Enrichment.SUCCESSFUL else: # asset enrichment, identity enrichment or a single drilldown enrichment # (return a list to maintain Backwards compatibility) self.data[e.type] = e.data self.data[ENRICHMENT_TYPE_TO_ENRICHMENT_STATUS[e.type]] = e.status == Enrichment.SUCCESSFUL - return self.create_incident(self.data, self.occurred, mapper=mapper, comment_tag_to_splunk=comment_tag_to_splunk, - comment_tag_from_splunk=comment_tag_from_splunk) + return self.create_incident( + self.data, + self.occurred, + mapper=mapper, + comment_tag_to_splunk=comment_tag_to_splunk, + comment_tag_from_splunk=comment_tag_from_splunk, + ) def drilldown_searches_counter(self): - """ Counts the drilldown searches of a notable """ + """Counts the drilldown searches of a notable""" drilldown_search_cnt = 0 for e in self.enrichments: @@ -771,10 +814,11 @@ def drilldown_searches_counter(self): return drilldown_search_cnt def submitted(self) -> bool: - """ Returns an indicator on whether any of the notable's enrichments was submitted or not """ + """Returns an indicator on whether any of the notable's enrichments was submitted or not""" notable_enrichment_types = {e.type for e in self.enrichments} return any(enrichment.status == Enrichment.IN_PROGRESS for enrichment in self.enrichments) and len( - notable_enrichment_types) == len(ENABLED_ENRICHMENTS) + notable_enrichment_types + ) == len(ENABLED_ENRICHMENTS) # Explanation of the conditions: # 1. First condition - if any of the notable's enrichments is 'in progress', it means that it was submitted to splunk. @@ -787,18 +831,20 @@ def submitted(self) -> bool: # have more than one enrichment object - in a case of multiple drilldown searches enrichment). def failed_to_submit(self): - """ Returns an indicator on whether all notable's enrichments were failed to submit or not """ + """Returns an indicator on whether all notable's enrichments were failed to submit or not""" notable_enrichment_types = {e.type for e in self.enrichments} return all(enrichment.status == Enrichment.FAILED for enrichment in self.enrichments) and len( - notable_enrichment_types) == len(ENABLED_ENRICHMENTS) + notable_enrichment_types + ) == len(ENABLED_ENRICHMENTS) def handled(self): - """ Returns an indicator on whether all notable's enrichments were handled or not """ + """Returns an indicator on whether all notable's enrichments were handled or not""" return all(enrichment.status in Enrichment.HANDLED for enrichment in self.enrichments) or any( - enrichment.status == Enrichment.EXCEEDED_TIMEOUT for enrichment in self.enrichments) + enrichment.status == Enrichment.EXCEEDED_TIMEOUT for enrichment in self.enrichments + ) def get_submitted_enrichments(self): - """ Returns indicators on whether each enrichment was submitted/failed or not initiated """ + """Returns indicators on whether each enrichment was submitted/failed or not initiated""" submitted_drilldown, submitted_asset, submitted_identity = False, False, False for enrichment in self.enrichments: @@ -812,35 +858,35 @@ def get_submitted_enrichments(self): return submitted_drilldown, submitted_asset, submitted_identity def get_occurred(self): - """ Returns the occurred time, if not exists in data, returns the current fetch time """ - if '_time' in self.data: - notable_occurred = self.data['_time'] + """Returns the occurred time, if not exists in data, returns the current fetch time""" + if "_time" in self.data: + notable_occurred = self.data["_time"] else: # Use-cases where fetching non-notables from Splunk - notable_occurred = datetime.now().strftime('%Y-%m-%dT%H:%M:%S.0+00:00') + notable_occurred = datetime.now().strftime("%Y-%m-%dT%H:%M:%S.0+00:00") self.time_is_missing = True - demisto.debug(f'\n\n occurred time in else: {notable_occurred} \n\n') + demisto.debug(f"\n\n occurred time in else: {notable_occurred} \n\n") return notable_occurred def create_custom_id(self): - """ Generates a custom ID for a given notable """ + """Generates a custom ID for a given notable""" if self.id: return self.id - notable_raw_data = self.data.get('_raw', '') - raw_hash = hashlib.md5(notable_raw_data.encode('utf-8')).hexdigest() # nosec # guardrails-disable-line + notable_raw_data = self.data.get("_raw", "") + raw_hash = hashlib.md5(notable_raw_data.encode("utf-8")).hexdigest() # nosec # guardrails-disable-line if self.time_is_missing and self.index_time: - notable_custom_id = f'{self.index_time}_{raw_hash}' # index_time stays in epoch to differentiate - demisto.debug('Creating notable custom id using the index time') + notable_custom_id = f"{self.index_time}_{raw_hash}" # index_time stays in epoch to differentiate + demisto.debug("Creating notable custom id using the index time") else: - notable_custom_id = f'{self.occurred}_{raw_hash}' + notable_custom_id = f"{self.occurred}_{raw_hash}" return notable_custom_id def is_enrichment_process_exceeding_timeout(self, enrichment_timeout): - """ Checks whether an enrichment process has exceeded timeout or not + """Checks whether an enrichment process has exceeded timeout or not Args: enrichment_timeout (int): The timeout for the enrichment process @@ -861,7 +907,7 @@ def is_enrichment_process_exceeding_timeout(self, enrichment_timeout): @classmethod def from_json(cls, notable_dict): - """ Deserialization method. + """Deserialization method. Args: notable_dict: The notable dict in JSON format. @@ -877,12 +923,12 @@ def from_json(cls, notable_dict): occurred=notable_dict.get(OCCURRED), time_is_missing=notable_dict.get(TIME_IS_MISSING), index_time=notable_dict.get(INDEX_TIME), - incident_created=notable_dict.get(INCIDENT_CREATED) + incident_created=notable_dict.get(INCIDENT_CREATED), ) class Cache: - """ A class to represent the cache for the enriching fetch mechanism. + """A class to represent the cache for the enriching fetch mechanism. Attributes: not_yet_submitted_notables (list): The list of all notables that were fetched but not yet submitted. @@ -900,7 +946,7 @@ def done_handling(self): return not self.submitted_notables def organize(self): - """ This function is designated to handle unexpected behaviors in the enrichment mechanism. + """This function is designated to handle unexpected behaviors in the enrichment mechanism. E.g. Connection error, instance disabling, etc... It re-organizes the cache object to the correct state of the mechanism when the exception was caught. If there are notables that were handled but the mechanism didn't create an incident for them, it returns them. @@ -936,7 +982,7 @@ def organize(self): @classmethod def from_json(cls, cache_dict): - """ Deserialization method. + """Deserialization method. Args: cache_dict: The cache dict in JSON format. @@ -946,7 +992,7 @@ def from_json(cls, cache_dict): """ return cls( not_yet_submitted_notables=list(map(Notable.from_json, cache_dict.get(NOT_YET_SUBMITTED_NOTABLES, []))), - submitted_notables=list(map(Notable.from_json, cache_dict.get(SUBMITTED_NOTABLES, []))) + submitted_notables=list(map(Notable.from_json, cache_dict.get(SUBMITTED_NOTABLES, []))), ) @classmethod @@ -960,7 +1006,7 @@ def dump_to_integration_context(self): def get_fields_query_part(notable_data, prefix, fields, raw_dict=None, add_backslash=False): - """ Given the fields to search for in the notables and the prefix, creates the query part for splunk search. + """Given the fields to search for in the notables and the prefix, creates the query part for splunk search. For example: if fields are ["user"], and the value of the "user" fields in the notable is ["u1", "u2"], and the prefix is "identity", the function returns: (identity="u1" OR identity="u2") @@ -974,12 +1020,12 @@ def get_fields_query_part(notable_data, prefix, fields, raw_dict=None, add_backs Returns: The query part """ if not raw_dict: - raw_dict = rawToDict(notable_data.get('_raw', '')) + raw_dict = rawToDict(notable_data.get("_raw", "")) raw_list: list = [] for field in fields: raw_list += argToList(notable_data.get(field, "")) + argToList(raw_dict.get(field, "")) if add_backslash: - raw_list = [item.replace('\\', '\\\\') for item in raw_list] + raw_list = [item.replace("\\", "\\\\") for item in raw_list] raw_list = [f"""{prefix}="{item.strip('"')}\"""" for item in raw_list] if not raw_list: @@ -991,7 +1037,7 @@ def get_fields_query_part(notable_data, prefix, fields, raw_dict=None, add_backs def get_notable_field_and_value(raw_field, notable_data, raw=None): - """ Gets the value by the name of the raw_field. We don't search for equivalence because raw field + """Gets the value by the name of the raw_field. We don't search for equivalence because raw field can be "threat_match_field|s" while the field is "threat_match_field". Args: @@ -1003,19 +1049,19 @@ def get_notable_field_and_value(raw_field, notable_data, raw=None): """ if not raw: - raw = rawToDict(notable_data.get('_raw', '')) + raw = rawToDict(notable_data.get("_raw", "")) for field in notable_data: if field in raw_field: return field, notable_data[field] for field in raw: if field in raw_field: return field, raw[field] - demisto.error(f'Field {raw_field} was not found in the notable.') + demisto.error(f"Field {raw_field} was not found in the notable.") return "", "" def build_drilldown_search(notable_data, search, raw_dict, is_query_name=False): - """ Replaces all needed fields in a drilldown search query, or a search query name + """Replaces all needed fields in a drilldown search query, or a search query name Args: notable_data (dict): The notable data search (str): The drilldown search query @@ -1030,11 +1076,11 @@ def build_drilldown_search(notable_data, search, raw_dict, is_query_name=False): for match in re.finditer(DRILLDOWN_REGEX, search): groups = match.groups() prefix = groups[0] - raw_field = (groups[1] or groups[2]).strip('$') + raw_field = (groups[1] or groups[2]).strip("$") field, replacement = get_notable_field_and_value(raw_field, notable_data, raw_dict) if not field and not replacement: if not is_query_name: - demisto.error(f'Failed building drilldown search query. Field {raw_field} was not found in the notable.') + demisto.error(f"Failed building drilldown search query. Field {raw_field} was not found in the notable.") return "" if prefix: @@ -1048,7 +1094,7 @@ def build_drilldown_search(notable_data, search, raw_dict, is_query_name=False): start = match.end() searchable_search.append(search[start:]) # Handling the tail of the query - parsed_query = ''.join(searchable_search) + parsed_query = "".join(searchable_search) demisto.debug(f"Parsed query is: {parsed_query}") @@ -1056,7 +1102,7 @@ def build_drilldown_search(notable_data, search, raw_dict, is_query_name=False): def get_drilldown_timeframe(notable_data, raw) -> tuple[str, str]: - """ Sets the drilldown search timeframe data. + """Sets the drilldown search timeframe data. Args: notable_data (dict): The notable @@ -1086,7 +1132,7 @@ def get_drilldown_timeframe(notable_data, raw) -> tuple[str, str]: def escape_invalid_chars_in_drilldown_json(drilldown_search): - """ Goes over the drilldown search, and replace the unescaped or invalid chars. + """Goes over the drilldown search, and replace the unescaped or invalid chars. Args: drilldown_search (str): The drilldown search. @@ -1095,21 +1141,21 @@ def escape_invalid_chars_in_drilldown_json(drilldown_search): str: The escaped drilldown search. """ # escape the " of string from the form of 'some_key="value"' which the " char are invalid in json value - for unescaped_val in re.findall(r'(?<==)\"[^\"]*\"', drilldown_search): + for unescaped_val in re.findall(r"(?<==)\"[^\"]*\"", drilldown_search): escaped_val = unescaped_val.replace('"', '\\"') drilldown_search = drilldown_search.replace(unescaped_val, escaped_val) # replace the new line (\n) with in the IN (...) condition with ',' # Splunk replace the value of some multiline fields to the value which contain \n # due to the 'expandtoken' macro - for multiline_val in re.findall(r'(?<=in|IN)\s*\([^\)]*\n[^\)]*\)', drilldown_search): - csv_val = multiline_val.replace('\n', ',') + for multiline_val in re.findall(r"(?<=in|IN)\s*\([^\)]*\n[^\)]*\)", drilldown_search): + csv_val = multiline_val.replace("\n", ",") drilldown_search = drilldown_search.replace(multiline_val, csv_val) return drilldown_search def parse_drilldown_searches(drilldown_searches: list) -> list[dict]: - """ Goes over the drilldown searches list, parses each drilldown search and converts it to a python dictionary. + """Goes over the drilldown searches list, parses each drilldown search and converts it to a python dictionary. Args: drilldown_searches (list): The list of the drilldown searches. @@ -1130,14 +1176,16 @@ def parse_drilldown_searches(drilldown_searches: list) -> list[dict]: else: searches.append(search) except json.JSONDecodeError as e: - demisto.error(f"Caught an exception while parsing a drilldown search object." - f"Drilldown search is: {drilldown_search}, Original Error is: {str(e)}") + demisto.error( + f"Caught an exception while parsing a drilldown search object." + f"Drilldown search is: {drilldown_search}, Original Error is: {e!s}" + ) return searches def get_drilldown_searches(notable_data): - """ Extract the drilldown_searches from the notable_data. + """Extract the drilldown_searches from the notable_data. It can be a list of objects, a single object or a simple string that contains the query. Args: @@ -1164,7 +1212,7 @@ def get_drilldown_searches(notable_data): def drilldown_enrichment(service: client.Service, notable_data, num_enrichment_events) -> list[tuple[str, str, client.Job]]: - """ Performs a drilldown enrichment. + """Performs a drilldown enrichment. If the notable has multiple drilldown searches, enriches all the drilldown searches. Args: @@ -1181,12 +1229,12 @@ def drilldown_enrichment(service: client.Service, notable_data, num_enrichment_e raw_dict = rawToDict(notable_data.get("_raw", "")) total_searches = len(searches) - demisto.debug(f'Notable {notable_data[EVENT_ID]} has {total_searches} drilldown searches to enrich') + demisto.debug(f"Notable {notable_data[EVENT_ID]} has {total_searches} drilldown searches to enrich") for i in range(total_searches): # Iterates over the drilldown searches of the given notable to enrich each one of them search = searches[i] - demisto.debug(f'Enriches drilldown search number {i+1} out of {total_searches} for notable {notable_data[EVENT_ID]}') + demisto.debug(f"Enriches drilldown search number {i+1} out of {total_searches} for notable {notable_data[EVENT_ID]}") if isinstance(search, dict): query_name = search.get("name", "") @@ -1204,25 +1252,23 @@ def drilldown_enrichment(service: client.Service, notable_data, num_enrichment_e parsed_query_name = build_drilldown_search(notable_data, query_name, raw_dict, True) if not parsed_query_name: # if parsing failed - keep original unparsed name demisto.debug( - f'Failed parsing drilldown search query name, using the original ' - f'un-parsed query name instead: {query_name}.') + f"Failed parsing drilldown search query name, using the original " + f"un-parsed query name instead: {query_name}." + ) parsed_query_name = query_name except Exception as e: - demisto.error( - f"Caught an exception while parsing the query name, using the original query name instead: {str(e)}") + demisto.error(f"Caught an exception while parsing the query name, using the original query name instead: {e!s}") parsed_query_name = query_name - if searchable_query := build_drilldown_search( - notable_data, query_search, raw_dict - ): + if searchable_query := build_drilldown_search(notable_data, query_search, raw_dict): demisto.debug(f"Search Query was build successfully for notable {notable_data[EVENT_ID]}") if earliest_offset and latest_offset: kwargs = {"max_count": num_enrichment_events, "exec_mode": "normal"} if latest_offset: - kwargs['latest_time'] = latest_offset + kwargs["latest_time"] = latest_offset if earliest_offset: - kwargs['earliest_time'] = earliest_offset + kwargs["earliest_time"] = earliest_offset query = build_search_query({"query": searchable_query}) demisto.debug(f"Drilldown query for notable {notable_data[EVENT_ID]} is: {query}") try: @@ -1230,9 +1276,9 @@ def drilldown_enrichment(service: client.Service, notable_data, num_enrichment_e jobs_and_queries.append((parsed_query_name, query, job)) except Exception as e: - demisto.error(f"Caught an exception in drilldown_enrichment function: {str(e)}") + demisto.error(f"Caught an exception in drilldown_enrichment function: {e!s}") else: - demisto.debug(f'Failed getting the drilldown timeframe for notable {notable_data[EVENT_ID]}') + demisto.debug(f"Failed getting the drilldown timeframe for notable {notable_data[EVENT_ID]}") jobs_and_queries.append((None, None, None)) else: demisto.debug( @@ -1248,7 +1294,7 @@ def drilldown_enrichment(service: client.Service, notable_data, num_enrichment_e def identity_enrichment(service: client.Service, notable_data, num_enrichment_events) -> client.Job: - """ Performs an identity enrichment. + """Performs an identity enrichment. Args: service (splunklib.client.Service): Splunk service object @@ -1265,24 +1311,24 @@ def identity_enrichment(service: client.Service, notable_data, num_enrichment_ev fields=USER_RELATED_FIELDS, add_backslash=True, ): - tables = argToList(demisto.params().get('identity_enrich_lookup_tables', DEFAULT_IDENTITY_ENRICH_TABLE)) - query = '' + tables = argToList(demisto.params().get("identity_enrich_lookup_tables", DEFAULT_IDENTITY_ENRICH_TABLE)) + query = "" for table in tables: - query += f'| inputlookup {table} where {users}' + query += f"| inputlookup {table} where {users}" demisto.debug(f"Identity query for notable {notable_data[EVENT_ID]}: {query}") try: kwargs = {"max_count": num_enrichment_events, "exec_mode": "normal"} job = service.jobs.create(query, **kwargs) except Exception as e: - demisto.error(f"Caught an exception in identity_enrichment function: {str(e)}") + demisto.error(f"Caught an exception in identity_enrichment function: {e!s}") else: - demisto.debug(f'No users were found in notable. {error_msg}') + demisto.debug(f"No users were found in notable. {error_msg}") return job def asset_enrichment(service: client.Service, notable_data, num_enrichment_events) -> client.Job: - """ Performs an asset enrichment. + """Performs an asset enrichment. Args: service (splunklib.client.Service): Splunk service object @@ -1298,27 +1344,27 @@ def asset_enrichment(service: client.Service, notable_data, num_enrichment_event prefix="asset", fields=["src", "dest", "src_ip", "dst_ip"], ): - tables = argToList(demisto.params().get('asset_enrich_lookup_tables', DEFAULT_ASSET_ENRICH_TABLES)) + tables = argToList(demisto.params().get("asset_enrich_lookup_tables", DEFAULT_ASSET_ENRICH_TABLES)) - query = '' + query = "" for table in tables: - query += f'| inputlookup append=T {table} where {assets}' - query += '| rename _key as asset_id | stats values(*) as * by asset_id' + query += f"| inputlookup append=T {table} where {assets}" + query += "| rename _key as asset_id | stats values(*) as * by asset_id" demisto.debug(f"Asset query for notable {notable_data[EVENT_ID]}: {query}") try: kwargs = {"max_count": num_enrichment_events, "exec_mode": "normal"} job = service.jobs.create(query, **kwargs) except Exception as e: - demisto.error(f"Caught an exception in asset_enrichment function: {str(e)}") + demisto.error(f"Caught an exception in asset_enrichment function: {e!s}") else: - demisto.debug(f'No assets were found in notable. {error_msg}') + demisto.debug(f"No assets were found in notable. {error_msg}") return job def handle_submitted_notables(service: client.Service, cache_object: Cache) -> list[Notable]: - """ Handles submitted notables. For each submitted notable, tries to retrieve its results, if results aren't ready, + """Handles submitted notables. For each submitted notable, tries to retrieve its results, if results aren't ready, it moves to the next submitted notable. Args: @@ -1329,16 +1375,14 @@ def handle_submitted_notables(service: client.Service, cache_object: Cache) -> l handled_notables (list[Notable]): The handled Notables """ handled_notables = [] - if not (enrichment_timeout := arg_to_number(str(demisto.params().get('enrichment_timeout', '5')))): + if not (enrichment_timeout := arg_to_number(str(demisto.params().get("enrichment_timeout", "5")))): enrichment_timeout = 5 notables = cache_object.submitted_notables total = len(notables) demisto.debug(f"Trying to handle {len(notables[:MAX_HANDLE_NOTABLES])}/{total} open enrichments") for notable in notables[:MAX_HANDLE_NOTABLES]: - if handle_submitted_notable( - service, notable, enrichment_timeout - ): + if handle_submitted_notable(service, notable, enrichment_timeout): handled_notables.append(notable) cache_object.submitted_notables = [n for n in notables if n not in handled_notables] @@ -1349,7 +1393,7 @@ def handle_submitted_notables(service: client.Service, cache_object: Cache) -> l def handle_submitted_notable(service: client.Service, notable: Notable, enrichment_timeout: int) -> bool: - """ Handles submitted notable. If enrichment process timeout has reached, creates an incident. + """Handles submitted notable. If enrichment process timeout has reached, creates an incident. Args: service (splunklib.client.Service): Splunk service object @@ -1368,25 +1412,26 @@ def handle_submitted_notable(service: client.Service, notable: Notable, enrichme try: job = client.Job(service=service, sid=enrichment.id) if job.is_done(): - demisto.debug(f'Handling {enrichment.id=} of {enrichment.type=} for notable {notable.id}') + demisto.debug(f"Handling {enrichment.id=} of {enrichment.type=} for notable {notable.id}") for item in results.JSONResultsReader(job.results(output_mode=OUTPUT_MODE_JSON)): if handle_message(item): continue enrichment.data.append(item) enrichment.status = Enrichment.SUCCESSFUL - demisto.debug(f'{enrichment.id=} of {enrichment.type=} for notable {notable.id} status is successful ' - f'{len(enrichment.data)=}') + demisto.debug( + f"{enrichment.id=} of {enrichment.type=} for notable {notable.id} status is successful " + f"{len(enrichment.data)=}" + ) else: - demisto.debug(f'{enrichment.id=} of {enrichment.type=} for notable {notable.id} is still not done') + demisto.debug(f"{enrichment.id=} of {enrichment.type=} for notable {notable.id} is still not done") except Exception as e: - demisto.error( f"Caught an exception while retrieving {enrichment.id=} of {enrichment.type=}\ - results for notable {notable.id}: {str(e)}" + results for notable {notable.id}: {e!s}" ) enrichment.status = Enrichment.FAILED - demisto.error(f'{enrichment.id=} of {enrichment.type=} for notable {notable.id} was failed.') + demisto.error(f"{enrichment.id=} of {enrichment.type=} for notable {notable.id} was failed.") if notable.handled(): task_status = True @@ -1405,7 +1450,7 @@ def handle_submitted_notable(service: client.Service, notable: Notable, enrichme def submit_notables(service: client.Service, cache_object: Cache) -> tuple[list[Notable], list[Notable]]: - """ Submits fetched notables to Splunk for an enrichment. + """Submits fetched notables to Splunk for an enrichment. Args: service (splunklib.client.Service): Splunk service object @@ -1415,39 +1460,37 @@ def submit_notables(service: client.Service, cache_object: Cache) -> tuple[list[ tuple[list[Notable], list[Notable]]: failed_notables, submitted_notables """ failed_notables, submitted_notables = [], [] - num_enrichment_events = arg_to_number(str(demisto.params().get('num_enrichment_events', '20'))) + num_enrichment_events = arg_to_number(str(demisto.params().get("num_enrichment_events", "20"))) notables = cache_object.not_yet_submitted_notables total = len(notables) if notables: - demisto.debug(f'Enriching {len(notables[:MAX_SUBMIT_NOTABLES])}/{total} fetched notables') + demisto.debug(f"Enriching {len(notables[:MAX_SUBMIT_NOTABLES])}/{total} fetched notables") for notable in notables[:MAX_SUBMIT_NOTABLES]: - if submit_notable( - service, notable, num_enrichment_events - ): + if submit_notable(service, notable, num_enrichment_events): cache_object.submitted_notables.append(notable) submitted_notables.append(notable) - demisto.debug(f'Submitted enrichment request to Splunk for notable {notable.id}') + demisto.debug(f"Submitted enrichment request to Splunk for notable {notable.id}") else: failed_notables.append(notable) - demisto.debug(f'Incident will be created from notable {notable.id} as each enrichment submission failed') + demisto.debug(f"Incident will be created from notable {notable.id} as each enrichment submission failed") cache_object.not_yet_submitted_notables = [n for n in notables if n not in submitted_notables + failed_notables] if submitted_notables: - demisto.debug(f'Submitted {len(submitted_notables)}/{total} notables successfully.') + demisto.debug(f"Submitted {len(submitted_notables)}/{total} notables successfully.") if failed_notables: demisto.debug( - f'The following {len(failed_notables)} notables failed the enrichment process: \ + f"The following {len(failed_notables)} notables failed the enrichment process: \ {[notable.id for notable in failed_notables]}, \ - creating incidents without enrichment.' + creating incidents without enrichment." ) return failed_notables, submitted_notables def submit_notable(service: client.Service, notable: Notable, num_enrichment_events) -> bool: - """ Submits fetched notable to Splunk for an Enrichment. Three enrichments possible: Drilldown, Asset & Identity. + """Submits fetched notable to Splunk for an Enrichment. Three enrichments possible: Drilldown, Asset & Identity. If all enrichment type executions were unsuccessful, creates a regular incident, Otherwise updates the integration context for the next fetch to handle the submitted notable. @@ -1465,8 +1508,10 @@ def submit_notable(service: client.Service, notable: Notable, num_enrichment_eve jobs_and_queries = drilldown_enrichment(service, notable.data, num_enrichment_events) for job_and_query in jobs_and_queries: notable.enrichments.append( - Enrichment.from_job(DRILLDOWN_ENRICHMENT, job=job_and_query[2], - query_name=job_and_query[0], query_search=job_and_query[1])) + Enrichment.from_job( + DRILLDOWN_ENRICHMENT, job=job_and_query[2], query_name=job_and_query[0], query_search=job_and_query[1] + ) + ) if ASSET_ENRICHMENT in ENABLED_ENRICHMENTS and not submitted_asset: job = asset_enrichment(service, notable.data, num_enrichment_events) notable.enrichments.append(Enrichment.from_job(ASSET_ENRICHMENT, job)) @@ -1478,10 +1523,7 @@ def submit_notable(service: client.Service, notable: Notable, num_enrichment_eve def create_incidents_from_notables( - notables_to_be_created: list[Notable], - mapper: UserMappingObject, - comment_tag_to_splunk: str, - comment_tag_from_splunk: str + notables_to_be_created: list[Notable], mapper: UserMappingObject, comment_tag_to_splunk: str, comment_tag_from_splunk: str ): """Create the actual incident from the handled Notables in addition, taking in account the data from the integration_context (from mirror-in process) @@ -1503,11 +1545,10 @@ def create_incidents_from_notables( if is_mirror_in_enabled(): integration_context = get_integration_context() mirrored_in_notables = integration_context.get(MIRRORED_ENRICHING_NOTABLES, {}) - demisto.debug(f'found {len(mirrored_in_notables)} enriched notables updated in mirror-in') - demisto.debug(f'{mirrored_in_notables=}') + demisto.debug(f"found {len(mirrored_in_notables)} enriched notables updated in mirror-in") + demisto.debug(f"{mirrored_in_notables=}") for notable in notables_to_be_created: - # in case the Notable was updated in Splunk between the time of fetch and create incident, # we need to take the updated delta. if notable.id in mirrored_in_notables: @@ -1523,12 +1564,13 @@ def create_incidents_from_notables( def is_mirror_in_enabled(): params = demisto.params() - return MIRROR_DIRECTION.get(params.get('mirror_direction', '')) in ['Both', 'In'] + return MIRROR_DIRECTION.get(params.get("mirror_direction", "")) in ["Both", "In"] -def run_enrichment_mechanism(service: client.Service, integration_context, mapper: UserMappingObject, - comment_tag_to_splunk, comment_tag_from_splunk): - """ Execute the enriching fetch mechanism +def run_enrichment_mechanism( + service: client.Service, integration_context, mapper: UserMappingObject, comment_tag_to_splunk, comment_tag_from_splunk +): + """Execute the enriching fetch mechanism 1. We first handle submitted notables that have not been handled in the last fetch run 2. If we finished handling and submitting all fetched notables, we fetch new notables 3. After we finish to fetch new notables or if we have left notables that have not been submitted, we submit @@ -1545,20 +1587,26 @@ def run_enrichment_mechanism(service: client.Service, integration_context, mappe try: handled_notables = handle_submitted_notables(service, cache_object) if cache_object.done_submitting() and cache_object.done_handling(): - fetch_notables(service=service, cache_object=cache_object, enrich_notables=True, mapper=mapper, - comment_tag_to_splunk=comment_tag_to_splunk, - comment_tag_from_splunk=comment_tag_from_splunk) + fetch_notables( + service=service, + cache_object=cache_object, + enrich_notables=True, + mapper=mapper, + comment_tag_to_splunk=comment_tag_to_splunk, + comment_tag_from_splunk=comment_tag_from_splunk, + ) if is_mirror_in_enabled(): # if mirror-in enabled, we need to store in cache the fetched notables ASAP, # as they need to be able to update by the mirror in process - demisto.debug('dumping the cache object direct after fetch as mirror-in enabled') + demisto.debug("dumping the cache object direct after fetch as mirror-in enabled") cache_object.dump_to_integration_context() failed_notables, _ = submit_notables(service, cache_object) - incidents = create_incidents_from_notables(handled_notables + failed_notables, - mapper, comment_tag_to_splunk, comment_tag_from_splunk) + incidents = create_incidents_from_notables( + handled_notables + failed_notables, mapper, comment_tag_to_splunk, comment_tag_from_splunk + ) except Exception as e: - err = f'Caught an exception while executing the enriching fetch mechanism. Additional Info: {str(e)}' + err = f"Caught an exception while executing the enriching fetch mechanism. Additional Info: {e!s}" demisto.error(err) # we throw exception only if there is no incident to create if not incidents: @@ -1568,13 +1616,15 @@ def run_enrichment_mechanism(service: client.Service, integration_context, mappe store_incidents_for_mapping(incidents) handled_but_not_created_incidents = cache_object.organize() cache_object.dump_to_integration_context() - incidents += [notable.to_incident(mapper, comment_tag_to_splunk, comment_tag_from_splunk) - for notable in handled_but_not_created_incidents] + incidents += [ + notable.to_incident(mapper, comment_tag_to_splunk, comment_tag_from_splunk) + for notable in handled_but_not_created_incidents + ] demisto.incidents(incidents) def store_incidents_for_mapping(incidents): - """ Stores ready incidents in integration context to allow the mapping to pull the incidents from the instance. + """Stores ready incidents in integration context to allow the mapping to pull the incidents from the instance. We store at most 20 incidents. Args: @@ -1587,19 +1637,18 @@ def store_incidents_for_mapping(incidents): def fetch_incidents_for_mapping(integration_context): - """ Gets the stored incidents to the "Pull from instance" in Classification & Mapping (In case of enriched fetch) + """Gets the stored incidents to the "Pull from instance" in Classification & Mapping (In case of enriched fetch) Args: integration_context (dict): The integration context """ incidents = integration_context.get(INCIDENTS, []) - demisto.debug( - f'Retrieving {len(incidents)} incidents for "Pull from instance" in Classification & Mapping.') + demisto.debug(f'Retrieving {len(incidents)} incidents for "Pull from instance" in Classification & Mapping.') demisto.incidents(incidents) def reset_enriching_fetch_mechanism(): - """ Resets all the fields regarding the enriching fetch mechanism & the last run object """ + """Resets all the fields regarding the enriching fetch mechanism & the last run object""" integration_context = get_integration_context() for field in (INCIDENTS, CACHE, MIRRORED_ENRICHING_NOTABLES): @@ -1615,25 +1664,26 @@ def reset_enriching_fetch_mechanism(): # =========== Mirroring Mechanism =========== + def get_last_update_in_splunk_time(last_update): - """ Transforms the time to the corresponding time on the Splunk server + """Transforms the time to the corresponding time on the Splunk server Args: last_update (str): The time to be transformed, E.g 2021-02-09T16:41:30.589575+02:00 Returns (int): The corresponding timestamp on the Splunk server """ - last_update_utc_datetime = dateparser.parse(last_update, settings={'TIMEZONE': 'UTC'}) + last_update_utc_datetime = dateparser.parse(last_update, settings={"TIMEZONE": "UTC"}) if not last_update_utc_datetime: - raise Exception(f'Could not parse the last update time: {last_update}') + raise Exception(f"Could not parse the last update time: {last_update}") params = demisto.params() try: - splunk_timezone = int(params['timezone']) + splunk_timezone = int(params["timezone"]) except (KeyError, ValueError, TypeError) as e: raise Exception( - 'Cannot mirror incidents when timezone is not configured. Please enter the ' - 'timezone of the Splunk server being used in the integration configuration.' + "Cannot mirror incidents when timezone is not configured. Please enter the " + "timezone of the Splunk server being used in the integration configuration." ) from e dt = last_update_utc_datetime + timedelta(minutes=splunk_timezone) @@ -1646,32 +1696,35 @@ def get_comments_data(service: client.Service, notable_id: str, comment_tag_from comment_tag_from_splunk (str): _description_ """ notes = [] - search = '|`incident_review` ' \ - '| eval last_modified_timestamp=_time ' \ - f'| where rule_id="{notable_id}" ' \ - f'| where last_modified_timestamp>{last_update_splunk_timestamp} ' \ - '| fields - time ' \ - - demisto.debug(f'Performing get-comments-data command with query: {search}') + search = ( + "|`incident_review` " + "| eval last_modified_timestamp=_time " + f'| where rule_id="{notable_id}" ' + f"| where last_modified_timestamp>{last_update_splunk_timestamp} " + "| fields - time " + ) + demisto.debug(f"Performing get-comments-data command with query: {search}") for item in results.JSONResultsReader(service.jobs.oneshot(search, output_mode=OUTPUT_MODE_JSON)): - demisto.debug(f'item: {item}') + demisto.debug(f"item: {item}") if handle_message(item): continue updated_notable = parse_notable(item, to_dict=True) - demisto.debug(f'updated_notable: {updated_notable}') - comment = updated_notable.get('comment', '') + demisto.debug(f"updated_notable: {updated_notable}") + comment = updated_notable.get("comment", "") if comment and COMMENT_MIRRORED_FROM_XSOAR not in comment: # Creating a note - notes.append({ - 'Type': EntryType.NOTE, - 'Contents': comment, - 'ContentsFormat': EntryFormat.TEXT, - 'Tags': [comment_tag_from_splunk], # The list of tags to add to the entry - 'Note': True, - }) - demisto.debug(f'Update new comment-{comment}') - demisto.debug(f'notes={notes}') + notes.append( + { + "Type": EntryType.NOTE, + "Contents": comment, + "ContentsFormat": EntryFormat.TEXT, + "Tags": [comment_tag_from_splunk], # The list of tags to add to the entry + "Note": True, + } + ) + demisto.debug(f"Update new comment-{comment}") + demisto.debug(f"notes={notes}") return notes @@ -1685,11 +1738,9 @@ def handle_enriching_notables(modified_notables: dict[str, dict]): integration_context = get_integration_context() cache_object = Cache.load_from_integration_context(integration_context) if enriching_notables := (cache_object.submitted_notables + cache_object.not_yet_submitted_notables): - enriched_and_changed = [ - notable for notable in enriching_notables if notable.id in modified_notables - ] + enriched_and_changed = [notable for notable in enriching_notables if notable.id in modified_notables] if enriched_and_changed: - demisto.debug(f'mirror-in: found {len(enriched_and_changed)} submitted notables, updating delta in cache.') + demisto.debug(f"mirror-in: found {len(enriched_and_changed)} submitted notables, updating delta in cache.") delta_map = integration_context.get(MIRRORED_ENRICHING_NOTABLES, {}) for notable in enriched_and_changed: updated_notable = modified_notables[notable.id] @@ -1700,43 +1751,55 @@ def handle_enriching_notables(modified_notables: dict[str, dict]): del modified_notables[notable.id] integration_context[MIRRORED_ENRICHING_NOTABLES] = delta_map - extensive_log(f'delta map after mirror update: {delta_map}') + extensive_log(f"delta map after mirror update: {delta_map}") set_integration_context(integration_context) - demisto.debug(f'mirror-in: delta updated for the enriching notables - {[n.id for n in enriched_and_changed]}') + demisto.debug(f"mirror-in: delta updated for the enriching notables - {[n.id for n in enriched_and_changed]}") else: - demisto.debug('mirror-in: enriching notables was not updated in remote.') + demisto.debug("mirror-in: enriching notables was not updated in remote.") else: - demisto.debug('mirror-in: no enriching notables found.') + demisto.debug("mirror-in: no enriching notables found.") except Exception as e: - demisto.error(f'mirror-in: failed to check for enriching notables, {e}') + demisto.error(f"mirror-in: failed to check for enriching notables, {e}") def handle_closed_notable(notable, notable_id, close_extra_labels, close_end_statuses, entries): - if notable.get('status_label'): - status_label = notable['status_label'] - - if status_label == "Closed" or (status_label in close_extra_labels) \ - or (close_end_statuses and argToBoolean(notable.get('status_end', 'false'))): - demisto.info(f'mirror-in: closing incident related to notable {notable_id} with status_label: {status_label}') - entries.append({ - 'EntryContext': {'mirrorRemoteId': notable_id}, - 'Type': EntryType.NOTE, - 'Contents': { - 'dbotIncidentClose': True, - 'closeReason': f'Notable event was closed on Splunk with status \"{status_label}\".' - }, - 'ContentsFormat': EntryFormat.JSON - }) + if notable.get("status_label"): + status_label = notable["status_label"] + + if ( + status_label == "Closed" + or (status_label in close_extra_labels) + or (close_end_statuses and argToBoolean(notable.get("status_end", "false"))) + ): + demisto.info(f"mirror-in: closing incident related to notable {notable_id} with status_label: {status_label}") + entries.append( + { + "EntryContext": {"mirrorRemoteId": notable_id}, + "Type": EntryType.NOTE, + "Contents": { + "dbotIncidentClose": True, + "closeReason": f'Notable event was closed on Splunk with status "{status_label}".', + }, + "ContentsFormat": EntryFormat.JSON, + } + ) else: - demisto.debug('"status_label" key could not be found on the returned data, ' - f'skipping closure mirror for notable {notable_id}.') + demisto.debug( + '"status_label" key could not be found on the returned data, ' f"skipping closure mirror for notable {notable_id}." + ) -def get_modified_remote_data_command(service: client.Service, args: dict, - close_incident: bool, close_end_statuses: bool, close_extra_labels: list[str], - mapper: UserMappingObject, comment_tag_from_splunk: str): - """ Gets the list of the notables data that have change since a given time +def get_modified_remote_data_command( + service: client.Service, + args: dict, + close_incident: bool, + close_end_statuses: bool, + close_extra_labels: list[str], + mapper: UserMappingObject, + comment_tag_from_splunk: str, +): + """Gets the list of the notables data that have change since a given time Args: service (splunklib.client.Service): Splunk service object @@ -1755,41 +1818,45 @@ def get_modified_remote_data_command(service: client.Service, args: dict, entries: list[dict] = [] remote_args = GetModifiedRemoteDataArgs(args) last_update_splunk_timestamp = get_last_update_in_splunk_time(remote_args.last_update) - incident_review_search = '|`incident_review` ' \ - '| eval last_modified_timestamp=_time ' \ - f'| where last_modified_timestamp>{last_update_splunk_timestamp} ' \ - '| fields - _time,time ' \ - '| expandtoken' - demisto.debug(f'mirror-in: performing `incident_review` search with query: {incident_review_search}.') - for item in results.JSONResultsReader(service.jobs.oneshot( - query=incident_review_search, count=MIRROR_LIMIT, output_mode=OUTPUT_MODE_JSON - )): + incident_review_search = ( + "|`incident_review` " + "| eval last_modified_timestamp=_time " + f"| where last_modified_timestamp>{last_update_splunk_timestamp} " + "| fields - _time,time " + "| expandtoken" + ) + demisto.debug(f"mirror-in: performing `incident_review` search with query: {incident_review_search}.") + for item in results.JSONResultsReader( + service.jobs.oneshot(query=incident_review_search, count=MIRROR_LIMIT, output_mode=OUTPUT_MODE_JSON) + ): if handle_message(item): continue updated_notable = parse_notable(item, to_dict=True) - notable_id = updated_notable['rule_id'] # in the `incident_review` macro - the ID are in the rule_id key + notable_id = updated_notable["rule_id"] # in the `incident_review` macro - the ID are in the rule_id key modified_notables_map[notable_id] = updated_notable if close_incident: handle_closed_notable(updated_notable, notable_id, close_extra_labels, close_end_statuses, entries) - if (comment := updated_notable.get('comment')) and COMMENT_MIRRORED_FROM_XSOAR not in comment: + if (comment := updated_notable.get("comment")) and COMMENT_MIRRORED_FROM_XSOAR not in comment: # comment, here in the `incident_review` macro results, hold only the updated comment # Creating a note - entries.append({ - 'EntryContext': {'mirrorRemoteId': notable_id}, - 'Type': EntryType.NOTE, - 'Contents': comment, - 'ContentsFormat': EntryFormat.TEXT, - 'Tags': [comment_tag_from_splunk], # The list of tags to add to the entry - 'Note': True, - }) + entries.append( + { + "EntryContext": {"mirrorRemoteId": notable_id}, + "Type": EntryType.NOTE, + "Contents": comment, + "ContentsFormat": EntryFormat.TEXT, + "Tags": [comment_tag_from_splunk], # The list of tags to add to the entry + "Note": True, + } + ) if modified_notables_map: notable_ids_with_quotes = [f'"{notable_id}"' for notable_id in modified_notables_map] notable_search = f'search `notable` | where {EVENT_ID} in ({",".join(notable_ids_with_quotes)}) | expandtoken' - kwargs = {'query': notable_search, 'earliest_time': '-3d', 'count': MIRROR_LIMIT, 'output_mode': OUTPUT_MODE_JSON} - demisto.debug(f'mirror-in: performing `notable` search with the kwargs: {kwargs}') + kwargs = {"query": notable_search, "earliest_time": "-3d", "count": MIRROR_LIMIT, "output_mode": OUTPUT_MODE_JSON} + demisto.debug(f"mirror-in: performing `notable` search with the kwargs: {kwargs}") for item in results.JSONResultsReader(service.jobs.oneshot(**kwargs)): if handle_message(item): continue @@ -1798,9 +1865,9 @@ def get_modified_remote_data_command(service: client.Service, args: dict, if modified_notables_map.get(notable_id): modified_notables_map[notable_id] |= updated_notable # comment in the `notable` macro, hold all the comments for an notable - if comment := updated_notable.get('comment'): + if comment := updated_notable.get("comment"): comments = comment if isinstance(comment, list) else [comment] - modified_notables_map[notable_id]['SplunkComments'] = [{'Comment': comment} for comment in comments] + modified_notables_map[notable_id]["SplunkComments"] = [{"Comment": comment} for comment in comments] demisto.debug(f'Updated comment for {notable_id}: {modified_notables_map[notable_id]["SplunkComments"]}') mapper.update_xsoar_user_in_notables(modified_notables_map.values()) @@ -1808,18 +1875,18 @@ def get_modified_remote_data_command(service: client.Service, args: dict, if ENABLED_ENRICHMENTS: handle_enriching_notables(modified_notables_map) - demisto.debug(f'mirror-in: updated notable ids: {list(modified_notables_map.keys())}') + demisto.debug(f"mirror-in: updated notable ids: {list(modified_notables_map.keys())}") else: - demisto.debug(f'mirror-in: no notables was changed since {last_update_splunk_timestamp}') + demisto.debug(f"mirror-in: no notables was changed since {last_update_splunk_timestamp}") if len(modified_notables_map) >= MIRROR_LIMIT: - demisto.info(f'mirror-in: the number of mirrored notables reach the limit of: {MIRROR_LIMIT}') + demisto.info(f"mirror-in: the number of mirrored notables reach the limit of: {MIRROR_LIMIT}") res = SplunkGetModifiedRemoteDataResponse(modified_notables_data=modified_notables_map.values(), entries=entries) return_results(res) def update_remote_system_command(args, params, service: client.Service, auth_token, mapper, comment_tag_to_splunk): - """ Pushes changes in XSOAR incident into the corresponding notable event in Splunk Server. + """Pushes changes in XSOAR incident into the corresponding notable event in Splunk Server. Args: args (dict): Demisto args @@ -1840,71 +1907,80 @@ def update_remote_system_command(args, params, service: client.Service, auth_tok demisto.debug(f"mirroring args: entries:{parsed_args.entries} delta:{parsed_args.delta}") if parsed_args.incident_changed and delta: demisto.debug( - f'Got the following delta keys {list(delta.keys())} to update incident corresponding to notable {notable_id}' + f"Got the following delta keys {list(delta.keys())} to update incident corresponding to notable {notable_id}" ) changed_data: dict[str, Any] = {field: None for field in OUTGOING_MIRRORED_FIELDS} for field in delta: - if field == 'owner' and params.get('userMapping', False): + if field == "owner" and params.get("userMapping", False): new_owner = mapper.get_splunk_user_by_xsoar(delta["owner"]) if mapper.should_map else None if new_owner: - changed_data['owner'] = new_owner + changed_data["owner"] = new_owner else: - demisto.error('New owner was not found while userMapping is enabled.') + demisto.error("New owner was not found while userMapping is enabled.") elif field in OUTGOING_MIRRORED_FIELDS: changed_data[field] = delta[field] # Close notable if relevant - if parsed_args.inc_status == IncidentStatus.DONE and params.get('close_notable'): - demisto.debug(f'Closing notable {notable_id}') - changed_data['status'] = '5' + if parsed_args.inc_status == IncidentStatus.DONE and params.get("close_notable"): + demisto.debug(f"Closing notable {notable_id}") + changed_data["status"] = "5" if any(changed_data.values()): - demisto.debug(f'Sending update request to Splunk for notable {notable_id}, data: {changed_data}') + demisto.debug(f"Sending update request to Splunk for notable {notable_id}, data: {changed_data}") try: session_key = None if auth_token else get_auth_session_key(service) response_info = update_notable_events( - baseurl=base_url, comment=changed_data['comment'], status=changed_data['status'], - urgency=changed_data['urgency'], owner=changed_data['owner'], eventIDs=[notable_id], - disposition=changed_data.get('disposition'), auth_token=auth_token, sessionKey=session_key + baseurl=base_url, + comment=changed_data["comment"], + status=changed_data["status"], + urgency=changed_data["urgency"], + owner=changed_data["owner"], + eventIDs=[notable_id], + disposition=changed_data.get("disposition"), + auth_token=auth_token, + sessionKey=session_key, ) - if 'success' not in response_info or not response_info['success']: - demisto.error(f'Failed updating notable {notable_id}: {str(response_info)}') + if "success" not in response_info or not response_info["success"]: + demisto.error(f"Failed updating notable {notable_id}: {response_info!s}") else: - demisto.debug( - f"update-remote-system for notable {notable_id}: {response_info.get('message')}" - ) + demisto.debug(f"update-remote-system for notable {notable_id}: {response_info.get('message')}") except Exception as e: demisto.error( - f'Error in Splunk outgoing mirror for incident corresponding to notable {notable_id}. Error message: {str(e)}' + f"Error in Splunk outgoing mirror for incident corresponding to notable {notable_id}. Error message: {e!s}" ) else: demisto.debug(f"Didn't find changed data to update incident corresponding to notable {notable_id}") else: - demisto.debug(f'Incident corresponding to notable {notable_id} was not changed.') + demisto.debug(f"Incident corresponding to notable {notable_id} was not changed.") if entries: for entry in entries: - entry_tags = entry.get('tags', []) - demisto.debug(f'Got the entry tags: {entry_tags}') + entry_tags = entry.get("tags", []) + demisto.debug(f"Got the entry tags: {entry_tags}") if comment_tag_to_splunk in entry_tags: - demisto.debug('Add new comment') + demisto.debug("Add new comment") comment_body = f'{entry.get("contents", "")}\n {COMMENT_MIRRORED_FROM_XSOAR}' try: session_key = get_auth_session_key(service) if not auth_token else None response_info = update_notable_events( - baseurl=base_url, comment=comment_body, auth_token=auth_token, sessionKey=session_key, - eventIDs=[notable_id]) - if 'success' not in response_info or not response_info['success']: - demisto.error(f'Failed updating notable {notable_id}: {str(response_info)}') + baseurl=base_url, + comment=comment_body, + auth_token=auth_token, + sessionKey=session_key, + eventIDs=[notable_id], + ) + if "success" not in response_info or not response_info["success"]: + demisto.error(f"Failed updating notable {notable_id}: {response_info!s}") else: - demisto.debug('update-remote-system for notable {}: {}' - .format(notable_id, response_info.get('message'))) + demisto.debug("update-remote-system for notable {}: {}".format(notable_id, response_info.get("message"))) except Exception as e: - demisto.error(f'Error in Splunk outgoing mirror for incident corresponding to notable {notable_id}. ' - f'Error message: {str(e)}') + demisto.error( + f"Error in Splunk outgoing mirror for incident corresponding to notable {notable_id}. " + f"Error message: {e!s}" + ) return notable_id @@ -1913,6 +1989,7 @@ def update_remote_system_command(args, params, service: client.Service, auth_tok # =========== Mapping Mechanism =========== + def create_mapping_dict(total_parsed_results, type_field): """ Create a {'field_name': 'fields_properties'} dict to be used as mapping schemas. @@ -1922,28 +1999,29 @@ def create_mapping_dict(total_parsed_results, type_field): """ types_map = {} for result in total_parsed_results: - raw_json = json.loads(result.get('rawJSON', "{}")) - if event_type_name := raw_json.get(type_field, ''): + raw_json = json.loads(result.get("rawJSON", "{}")) + if event_type_name := raw_json.get(type_field, ""): types_map[event_type_name] = raw_json return types_map -def get_mapping_fields_command(service: client.Service, mapper, params: dict, comment_tag_to_splunk: str, - comment_tag_from_splunk: str): +def get_mapping_fields_command( + service: client.Service, mapper, params: dict, comment_tag_to_splunk: str, comment_tag_from_splunk: str +): # Create the query to get unique objects # The logic is identical to the 'fetch_incidents' command - type_field = params.get('type_field', 'source') + type_field = params.get("type_field", "source") total_parsed_results = [] - search_offset = demisto.getLastRun().get('offset', 0) + search_offset = demisto.getLastRun().get("offset", 0) current_time_for_fetch = datetime.utcnow() - if (timezone_ := params.get('timezone')): + if timezone_ := params.get("timezone"): current_time_for_fetch = current_time_for_fetch + timedelta(minutes=int(timezone_)) now = current_time_for_fetch.strftime(SPLUNK_TIME_FORMAT) - if params.get('useSplunkTime'): + if params.get("useSplunkTime"): now = get_current_splunk_time(service) current_time_in_splunk = datetime.strptime(now, SPLUNK_TIME_FORMAT) current_time_for_fetch = current_time_in_splunk @@ -1953,23 +2031,21 @@ def get_mapping_fields_command(service: client.Service, mapper, params: dict, co last_run = start_time_for_fetch.strftime(SPLUNK_TIME_FORMAT) kwargs_oneshot = { - 'earliest_time': last_run, - 'latest_time': now, - 'count': FETCH_LIMIT, - 'offset': search_offset, - 'output_mode': OUTPUT_MODE_JSON, + "earliest_time": last_run, + "latest_time": now, + "count": FETCH_LIMIT, + "offset": search_offset, + "output_mode": OUTPUT_MODE_JSON, } - searchquery_oneshot = params['fetchQuery'] + searchquery_oneshot = params["fetchQuery"] - if (extractFields := params.get('extractFields')): - for field in extractFields.split(','): + if extractFields := params.get("extractFields"): + for field in extractFields.split(","): field_trimmed = field.strip() - searchquery_oneshot = ( - f'{searchquery_oneshot} | eval {field_trimmed}={field_trimmed}' - ) + searchquery_oneshot = f"{searchquery_oneshot} | eval {field_trimmed}={field_trimmed}" - searchquery_oneshot = f'{searchquery_oneshot} | dedup {type_field}' + searchquery_oneshot = f"{searchquery_oneshot} | dedup {type_field}" oneshotsearch_results = service.jobs.oneshot(searchquery_oneshot, **kwargs_oneshot) reader = results.JSONResultsReader(oneshotsearch_results) for item in reader: @@ -1985,158 +2061,474 @@ def get_mapping_fields_command(service: client.Service, mapper, params: dict, co def get_cim_mapping_field_command(): notable = { - 'rule_name': 'string', 'rule_title': 'string', 'security_domain': 'string', 'index': 'string', - 'rule_description': 'string', 'risk_score': 'string', 'host': 'string', - 'host_risk_object_type': 'string', 'dest_risk_object_type': 'string', 'dest_risk_score': 'string', - 'splunk_server': 'string', '_sourcetype': 'string', '_indextime': 'string', '_time': 'string', - 'src_risk_object_type': 'string', 'src_risk_score': 'string', '_raw': 'string', 'urgency': 'string', - 'owner': 'string', 'info_min_time': 'string', 'info_max_time': 'string', 'comment': 'string', - 'reviewer': 'string', 'rule_id': 'string', 'action': 'string', 'app': 'string', - 'authentication_method': 'string', 'authentication_service': 'string', 'bugtraq': 'string', - 'bytes': 'string', 'bytes_in': 'string', 'bytes_out': 'string', 'category': 'string', 'cert': 'string', - 'change': 'string', 'change_type': 'string', 'command': 'string', 'comments': 'string', - 'cookie': 'string', 'creation_time': 'string', 'cve': 'string', 'cvss': 'string', 'date': 'string', - 'description': 'string', 'dest': 'string', 'dest_bunit': 'string', 'dest_category': 'string', - 'dest_dns': 'string', 'dest_interface': 'string', 'dest_ip': 'string', 'dest_ip_range': 'string', - 'dest_mac': 'string', 'dest_nt_domain': 'string', 'dest_nt_host': 'string', 'dest_port': 'string', - 'dest_priority': 'string', 'dest_translated_ip': 'string', 'dest_translated_port': 'string', - 'dest_type': 'string', 'dest_zone': 'string', 'direction': 'string', 'dlp_type': 'string', - 'dns': 'string', 'duration': 'string', 'dvc': 'string', 'dvc_bunit': 'string', 'dvc_category': 'string', - 'dvc_ip': 'string', 'dvc_mac': 'string', 'dvc_priority': 'string', 'dvc_zone': 'string', - 'file_hash': 'string', 'file_name': 'string', 'file_path': 'string', 'file_size': 'string', - 'http_content_type': 'string', 'http_method': 'string', 'http_referrer': 'string', - 'http_referrer_domain': 'string', 'http_user_agent': 'string', 'icmp_code': 'string', - 'icmp_type': 'string', 'id': 'string', 'ids_type': 'string', 'incident': 'string', 'ip': 'string', - 'mac': 'string', 'message_id': 'string', 'message_info': 'string', 'message_priority': 'string', - 'message_type': 'string', 'mitre_technique_id': 'string', 'msft': 'string', 'mskb': 'string', - 'name': 'string', 'orig_dest': 'string', 'orig_recipient': 'string', 'orig_src': 'string', - 'os': 'string', 'packets': 'string', 'packets_in': 'string', 'packets_out': 'string', - 'parent_process': 'string', 'parent_process_id': 'string', 'parent_process_name': 'string', - 'parent_process_path': 'string', 'password': 'string', 'payload': 'string', 'payload_type': 'string', - 'priority': 'string', 'problem': 'string', 'process': 'string', 'process_hash': 'string', - 'process_id': 'string', 'process_name': 'string', 'process_path': 'string', 'product_version': 'string', - 'protocol': 'string', 'protocol_version': 'string', 'query': 'string', 'query_count': 'string', - 'query_type': 'string', 'reason': 'string', 'recipient': 'string', 'recipient_count': 'string', - 'recipient_domain': 'string', 'recipient_status': 'string', 'record_type': 'string', - 'registry_hive': 'string', 'registry_key_name': 'string', 'registry_path': 'string', - 'registry_value_data': 'string', 'registry_value_name': 'string', 'registry_value_text': 'string', - 'registry_value_type': 'string', 'request_sent_time': 'string', 'request_payload': 'string', - 'request_payload_type': 'string', 'response_code': 'string', 'response_payload_type': 'string', - 'response_received_time': 'string', 'response_time': 'string', 'result': 'string', - 'return_addr': 'string', 'rule': 'string', 'rule_action': 'string', 'sender': 'string', - 'service': 'string', 'service_hash': 'string', 'service_id': 'string', 'service_name': 'string', - 'service_path': 'string', 'session_id': 'string', 'sessions': 'string', 'severity': 'string', - 'severity_id': 'string', 'sid': 'string', 'signature': 'string', 'signature_id': 'string', - 'signature_version': 'string', 'site': 'string', 'size': 'string', 'source': 'string', - 'sourcetype': 'string', 'src': 'string', 'src_bunit': 'string', 'src_category': 'string', - 'src_dns': 'string', 'src_interface': 'string', 'src_ip': 'string', 'src_ip_range': 'string', - 'src_mac': 'string', 'src_nt_domain': 'string', 'src_nt_host': 'string', 'src_port': 'string', - 'src_priority': 'string', 'src_translated_ip': 'string', 'src_translated_port': 'string', - 'src_type': 'string', 'src_user': 'string', 'src_user_bunit': 'string', 'src_user_category': 'string', - 'src_user_domain': 'string', 'src_user_id': 'string', 'src_user_priority': 'string', - 'src_user_role': 'string', 'src_user_type': 'string', 'src_zone': 'string', 'state': 'string', - 'status': 'string', 'status_code': 'string', 'status_description': 'string', 'subject': 'string', - 'tag': 'string', 'ticket_id': 'string', 'time': 'string', 'time_submitted': 'string', - 'transport': 'string', 'transport_dest_port': 'string', 'type': 'string', 'uri': 'string', - 'uri_path': 'string', 'uri_query': 'string', 'url': 'string', 'url_domain': 'string', - 'url_length': 'string', 'user': 'string', 'user_agent': 'string', 'user_bunit': 'string', - 'user_category': 'string', 'user_id': 'string', 'user_priority': 'string', 'user_role': 'string', - 'user_type': 'string', 'vendor_account': 'string', 'vendor_product': 'string', 'vlan': 'string', - 'xdelay': 'string', 'xref': 'string' + "rule_name": "string", + "rule_title": "string", + "security_domain": "string", + "index": "string", + "rule_description": "string", + "risk_score": "string", + "host": "string", + "host_risk_object_type": "string", + "dest_risk_object_type": "string", + "dest_risk_score": "string", + "splunk_server": "string", + "_sourcetype": "string", + "_indextime": "string", + "_time": "string", + "src_risk_object_type": "string", + "src_risk_score": "string", + "_raw": "string", + "urgency": "string", + "owner": "string", + "info_min_time": "string", + "info_max_time": "string", + "comment": "string", + "reviewer": "string", + "rule_id": "string", + "action": "string", + "app": "string", + "authentication_method": "string", + "authentication_service": "string", + "bugtraq": "string", + "bytes": "string", + "bytes_in": "string", + "bytes_out": "string", + "category": "string", + "cert": "string", + "change": "string", + "change_type": "string", + "command": "string", + "comments": "string", + "cookie": "string", + "creation_time": "string", + "cve": "string", + "cvss": "string", + "date": "string", + "description": "string", + "dest": "string", + "dest_bunit": "string", + "dest_category": "string", + "dest_dns": "string", + "dest_interface": "string", + "dest_ip": "string", + "dest_ip_range": "string", + "dest_mac": "string", + "dest_nt_domain": "string", + "dest_nt_host": "string", + "dest_port": "string", + "dest_priority": "string", + "dest_translated_ip": "string", + "dest_translated_port": "string", + "dest_type": "string", + "dest_zone": "string", + "direction": "string", + "dlp_type": "string", + "dns": "string", + "duration": "string", + "dvc": "string", + "dvc_bunit": "string", + "dvc_category": "string", + "dvc_ip": "string", + "dvc_mac": "string", + "dvc_priority": "string", + "dvc_zone": "string", + "file_hash": "string", + "file_name": "string", + "file_path": "string", + "file_size": "string", + "http_content_type": "string", + "http_method": "string", + "http_referrer": "string", + "http_referrer_domain": "string", + "http_user_agent": "string", + "icmp_code": "string", + "icmp_type": "string", + "id": "string", + "ids_type": "string", + "incident": "string", + "ip": "string", + "mac": "string", + "message_id": "string", + "message_info": "string", + "message_priority": "string", + "message_type": "string", + "mitre_technique_id": "string", + "msft": "string", + "mskb": "string", + "name": "string", + "orig_dest": "string", + "orig_recipient": "string", + "orig_src": "string", + "os": "string", + "packets": "string", + "packets_in": "string", + "packets_out": "string", + "parent_process": "string", + "parent_process_id": "string", + "parent_process_name": "string", + "parent_process_path": "string", + "password": "string", + "payload": "string", + "payload_type": "string", + "priority": "string", + "problem": "string", + "process": "string", + "process_hash": "string", + "process_id": "string", + "process_name": "string", + "process_path": "string", + "product_version": "string", + "protocol": "string", + "protocol_version": "string", + "query": "string", + "query_count": "string", + "query_type": "string", + "reason": "string", + "recipient": "string", + "recipient_count": "string", + "recipient_domain": "string", + "recipient_status": "string", + "record_type": "string", + "registry_hive": "string", + "registry_key_name": "string", + "registry_path": "string", + "registry_value_data": "string", + "registry_value_name": "string", + "registry_value_text": "string", + "registry_value_type": "string", + "request_sent_time": "string", + "request_payload": "string", + "request_payload_type": "string", + "response_code": "string", + "response_payload_type": "string", + "response_received_time": "string", + "response_time": "string", + "result": "string", + "return_addr": "string", + "rule": "string", + "rule_action": "string", + "sender": "string", + "service": "string", + "service_hash": "string", + "service_id": "string", + "service_name": "string", + "service_path": "string", + "session_id": "string", + "sessions": "string", + "severity": "string", + "severity_id": "string", + "sid": "string", + "signature": "string", + "signature_id": "string", + "signature_version": "string", + "site": "string", + "size": "string", + "source": "string", + "sourcetype": "string", + "src": "string", + "src_bunit": "string", + "src_category": "string", + "src_dns": "string", + "src_interface": "string", + "src_ip": "string", + "src_ip_range": "string", + "src_mac": "string", + "src_nt_domain": "string", + "src_nt_host": "string", + "src_port": "string", + "src_priority": "string", + "src_translated_ip": "string", + "src_translated_port": "string", + "src_type": "string", + "src_user": "string", + "src_user_bunit": "string", + "src_user_category": "string", + "src_user_domain": "string", + "src_user_id": "string", + "src_user_priority": "string", + "src_user_role": "string", + "src_user_type": "string", + "src_zone": "string", + "state": "string", + "status": "string", + "status_code": "string", + "status_description": "string", + "subject": "string", + "tag": "string", + "ticket_id": "string", + "time": "string", + "time_submitted": "string", + "transport": "string", + "transport_dest_port": "string", + "type": "string", + "uri": "string", + "uri_path": "string", + "uri_query": "string", + "url": "string", + "url_domain": "string", + "url_length": "string", + "user": "string", + "user_agent": "string", + "user_bunit": "string", + "user_category": "string", + "user_id": "string", + "user_priority": "string", + "user_role": "string", + "user_type": "string", + "vendor_account": "string", + "vendor_product": "string", + "vlan": "string", + "xdelay": "string", + "xref": "string", } drilldown = { - 'Drilldown': { - 'action': 'string', 'app': 'string', 'authentication_method': 'string', - 'authentication_service': 'string', 'bugtraq': 'string', 'bytes': 'string', - 'bytes_in': 'string', 'bytes_out': 'string', 'category': 'string', 'cert': 'string', - 'change': 'string', 'change_type': 'string', 'command': 'string', 'comments': 'string', - 'cookie': 'string', 'creation_time': 'string', 'cve': 'string', 'cvss': 'string', - 'date': 'string', 'description': 'string', 'dest': 'string', 'dest_bunit': 'string', - 'dest_category': 'string', 'dest_dns': 'string', 'dest_interface': 'string', - 'dest_ip': 'string', 'dest_ip_range': 'string', 'dest_mac': 'string', - 'dest_nt_domain': 'string', 'dest_nt_host': 'string', 'dest_port': 'string', - 'dest_priority': 'string', 'dest_translated_ip': 'string', - 'dest_translated_port': 'string', 'dest_type': 'string', 'dest_zone': 'string', - 'direction': 'string', 'dlp_type': 'string', 'dns': 'string', 'duration': 'string', - 'dvc': 'string', 'dvc_bunit': 'string', 'dvc_category': 'string', 'dvc_ip': 'string', - 'dvc_mac': 'string', 'dvc_priority': 'string', 'dvc_zone': 'string', - 'file_hash': 'string', 'file_name': 'string', 'file_path': 'string', - 'file_size': 'string', 'http_content_type': 'string', 'http_method': 'string', - 'http_referrer': 'string', 'http_referrer_domain': 'string', 'http_user_agent': 'string', - 'icmp_code': 'string', 'icmp_type': 'string', 'id': 'string', 'ids_type': 'string', - 'incident': 'string', 'ip': 'string', 'mac': 'string', 'message_id': 'string', - 'message_info': 'string', 'message_priority': 'string', 'message_type': 'string', - 'mitre_technique_id': 'string', 'msft': 'string', 'mskb': 'string', 'name': 'string', - 'orig_dest': 'string', 'orig_recipient': 'string', 'orig_src': 'string', 'os': 'string', - 'packets': 'string', 'packets_in': 'string', 'packets_out': 'string', - 'parent_process': 'string', 'parent_process_id': 'string', - 'parent_process_name': 'string', 'parent_process_path': 'string', 'password': 'string', - 'payload': 'string', 'payload_type': 'string', 'priority': 'string', 'problem': 'string', - 'process': 'string', 'process_hash': 'string', 'process_id': 'string', - 'process_name': 'string', 'process_path': 'string', 'product_version': 'string', - 'protocol': 'string', 'protocol_version': 'string', 'query': 'string', - 'query_count': 'string', 'query_type': 'string', 'reason': 'string', - 'recipient': 'string', 'recipient_count': 'string', 'recipient_domain': 'string', - 'recipient_status': 'string', 'record_type': 'string', 'registry_hive': 'string', - 'registry_key_name': 'string', 'registry_path': 'string', - 'registry_value_data': 'string', 'registry_value_name': 'string', - 'registry_value_text': 'string', 'registry_value_type': 'string', - 'request_payload': 'string', 'request_payload_type': 'string', - 'request_sent_time': 'string', 'response_code': 'string', - 'response_payload_type': 'string', 'response_received_time': 'string', - 'response_time': 'string', 'result': 'string', 'return_addr': 'string', 'rule': 'string', - 'rule_action': 'string', 'sender': 'string', 'service': 'string', - 'service_hash': 'string', 'service_id': 'string', 'service_name': 'string', - 'service_path': 'string', 'session_id': 'string', 'sessions': 'string', - 'severity': 'string', 'severity_id': 'string', 'sid': 'string', 'signature': 'string', - 'signature_id': 'string', 'signature_version': 'string', 'site': 'string', - 'size': 'string', 'source': 'string', 'sourcetype': 'string', 'src': 'string', - 'src_bunit': 'string', 'src_category': 'string', 'src_dns': 'string', - 'src_interface': 'string', 'src_ip': 'string', 'src_ip_range': 'string', - 'src_mac': 'string', 'src_nt_domain': 'string', 'src_nt_host': 'string', - 'src_port': 'string', 'src_priority': 'string', 'src_translated_ip': 'string', - 'src_translated_port': 'string', 'src_type': 'string', 'src_user': 'string', - 'src_user_bunit': 'string', 'src_user_category': 'string', 'src_user_domain': 'string', - 'src_user_id': 'string', 'src_user_priority': 'string', 'src_user_role': 'string', - 'src_user_type': 'string', 'src_zone': 'string', 'state': 'string', 'status': 'string', - 'status_code': 'string', 'subject': 'string', 'tag': 'string', 'ticket_id': 'string', - 'time': 'string', 'time_submitted': 'string', 'transport': 'string', - 'transport_dest_port': 'string', 'type': 'string', 'uri': 'string', 'uri_path': 'string', - 'uri_query': 'string', 'url': 'string', 'url_domain': 'string', 'url_length': 'string', - 'user': 'string', 'user_agent': 'string', 'user_bunit': 'string', - 'user_category': 'string', 'user_id': 'string', 'user_priority': 'string', - 'user_role': 'string', 'user_type': 'string', 'vendor_account': 'string', - 'vendor_product': 'string', 'vlan': 'string', 'xdelay': 'string', 'xref': 'string' + "Drilldown": { + "action": "string", + "app": "string", + "authentication_method": "string", + "authentication_service": "string", + "bugtraq": "string", + "bytes": "string", + "bytes_in": "string", + "bytes_out": "string", + "category": "string", + "cert": "string", + "change": "string", + "change_type": "string", + "command": "string", + "comments": "string", + "cookie": "string", + "creation_time": "string", + "cve": "string", + "cvss": "string", + "date": "string", + "description": "string", + "dest": "string", + "dest_bunit": "string", + "dest_category": "string", + "dest_dns": "string", + "dest_interface": "string", + "dest_ip": "string", + "dest_ip_range": "string", + "dest_mac": "string", + "dest_nt_domain": "string", + "dest_nt_host": "string", + "dest_port": "string", + "dest_priority": "string", + "dest_translated_ip": "string", + "dest_translated_port": "string", + "dest_type": "string", + "dest_zone": "string", + "direction": "string", + "dlp_type": "string", + "dns": "string", + "duration": "string", + "dvc": "string", + "dvc_bunit": "string", + "dvc_category": "string", + "dvc_ip": "string", + "dvc_mac": "string", + "dvc_priority": "string", + "dvc_zone": "string", + "file_hash": "string", + "file_name": "string", + "file_path": "string", + "file_size": "string", + "http_content_type": "string", + "http_method": "string", + "http_referrer": "string", + "http_referrer_domain": "string", + "http_user_agent": "string", + "icmp_code": "string", + "icmp_type": "string", + "id": "string", + "ids_type": "string", + "incident": "string", + "ip": "string", + "mac": "string", + "message_id": "string", + "message_info": "string", + "message_priority": "string", + "message_type": "string", + "mitre_technique_id": "string", + "msft": "string", + "mskb": "string", + "name": "string", + "orig_dest": "string", + "orig_recipient": "string", + "orig_src": "string", + "os": "string", + "packets": "string", + "packets_in": "string", + "packets_out": "string", + "parent_process": "string", + "parent_process_id": "string", + "parent_process_name": "string", + "parent_process_path": "string", + "password": "string", + "payload": "string", + "payload_type": "string", + "priority": "string", + "problem": "string", + "process": "string", + "process_hash": "string", + "process_id": "string", + "process_name": "string", + "process_path": "string", + "product_version": "string", + "protocol": "string", + "protocol_version": "string", + "query": "string", + "query_count": "string", + "query_type": "string", + "reason": "string", + "recipient": "string", + "recipient_count": "string", + "recipient_domain": "string", + "recipient_status": "string", + "record_type": "string", + "registry_hive": "string", + "registry_key_name": "string", + "registry_path": "string", + "registry_value_data": "string", + "registry_value_name": "string", + "registry_value_text": "string", + "registry_value_type": "string", + "request_payload": "string", + "request_payload_type": "string", + "request_sent_time": "string", + "response_code": "string", + "response_payload_type": "string", + "response_received_time": "string", + "response_time": "string", + "result": "string", + "return_addr": "string", + "rule": "string", + "rule_action": "string", + "sender": "string", + "service": "string", + "service_hash": "string", + "service_id": "string", + "service_name": "string", + "service_path": "string", + "session_id": "string", + "sessions": "string", + "severity": "string", + "severity_id": "string", + "sid": "string", + "signature": "string", + "signature_id": "string", + "signature_version": "string", + "site": "string", + "size": "string", + "source": "string", + "sourcetype": "string", + "src": "string", + "src_bunit": "string", + "src_category": "string", + "src_dns": "string", + "src_interface": "string", + "src_ip": "string", + "src_ip_range": "string", + "src_mac": "string", + "src_nt_domain": "string", + "src_nt_host": "string", + "src_port": "string", + "src_priority": "string", + "src_translated_ip": "string", + "src_translated_port": "string", + "src_type": "string", + "src_user": "string", + "src_user_bunit": "string", + "src_user_category": "string", + "src_user_domain": "string", + "src_user_id": "string", + "src_user_priority": "string", + "src_user_role": "string", + "src_user_type": "string", + "src_zone": "string", + "state": "string", + "status": "string", + "status_code": "string", + "subject": "string", + "tag": "string", + "ticket_id": "string", + "time": "string", + "time_submitted": "string", + "transport": "string", + "transport_dest_port": "string", + "type": "string", + "uri": "string", + "uri_path": "string", + "uri_query": "string", + "url": "string", + "url_domain": "string", + "url_length": "string", + "user": "string", + "user_agent": "string", + "user_bunit": "string", + "user_category": "string", + "user_id": "string", + "user_priority": "string", + "user_role": "string", + "user_type": "string", + "vendor_account": "string", + "vendor_product": "string", + "vlan": "string", + "xdelay": "string", + "xref": "string", } } asset = { - 'Asset': { - 'asset': 'string', 'asset_id': 'string', 'asset_tag': 'string', 'bunit': 'string', - 'category': 'string', 'city': 'string', 'country': 'string', 'dns': 'string', - 'ip': 'string', 'is_expected': 'string', 'lat': 'string', 'long': 'string', 'mac': 'string', - 'nt_host': 'string', 'owner': 'string', 'pci_domain': 'string', 'priority': 'string', - 'requires_av': 'string' + "Asset": { + "asset": "string", + "asset_id": "string", + "asset_tag": "string", + "bunit": "string", + "category": "string", + "city": "string", + "country": "string", + "dns": "string", + "ip": "string", + "is_expected": "string", + "lat": "string", + "long": "string", + "mac": "string", + "nt_host": "string", + "owner": "string", + "pci_domain": "string", + "priority": "string", + "requires_av": "string", } } identity = { - 'Identity': { - 'bunit': 'string', 'category': 'string', 'email': 'string', 'endDate': 'string', 'first': 'string', - 'identity': 'string', 'identity_tag': 'string', 'last': 'string', 'managedBy': 'string', - 'nick': 'string', 'phone': 'string', 'prefix': 'string', 'priority': 'string', - 'startDate': 'string', 'suffix': 'string', 'watchlist': 'string', 'work_city': 'string', - 'work_lat': 'string', 'work_long': 'string' + "Identity": { + "bunit": "string", + "category": "string", + "email": "string", + "endDate": "string", + "first": "string", + "identity": "string", + "identity_tag": "string", + "last": "string", + "managedBy": "string", + "nick": "string", + "phone": "string", + "prefix": "string", + "priority": "string", + "startDate": "string", + "suffix": "string", + "watchlist": "string", + "work_city": "string", + "work_lat": "string", + "work_long": "string", } } - return { - 'Notable Data': notable, - 'Drilldown Data': drilldown, - 'Asset Data': asset, - 'Identity Data': identity - } + return {"Notable Data": notable, "Drilldown Data": drilldown, "Asset Data": asset, "Identity Data": identity} # =========== Mapping Mechanism =========== @@ -2144,8 +2536,9 @@ def get_cim_mapping_field_command(): # =========== Integration Functions & Classes =========== + class ResponseReaderWrapper(io.RawIOBase): - """ This class was supplied as a solution for a bug in Splunk causing the search to run slowly.""" + """This class was supplied as a solution for a bug in Splunk causing the search to run slowly.""" def __init__(self, responseReader): self.responseReader = responseReader @@ -2171,9 +2564,14 @@ def readinto(self, b): def get_current_splunk_time(splunk_service: client.Service): t = datetime.utcnow() - timedelta(days=3) time = t.strftime(SPLUNK_TIME_FORMAT) - kwargs_oneshot = {'count': 1, 'earliest_time': time, 'output_mode': OUTPUT_MODE_JSON, } - searchquery_oneshot = '| gentimes start=-1 | eval clock = strftime(time(), "%Y-%m-%dT%H:%M:%S")' \ - ' | sort 1 -_time | table clock' + kwargs_oneshot = { + "count": 1, + "earliest_time": time, + "output_mode": OUTPUT_MODE_JSON, + } + searchquery_oneshot = ( + '| gentimes start=-1 | eval clock = strftime(time(), "%Y-%m-%dT%H:%M:%S")' " | sort 1 -_time | table clock" + ) oneshotsearch_results = splunk_service.jobs.oneshot(searchquery_oneshot, **kwargs_oneshot) @@ -2184,16 +2582,16 @@ def get_current_splunk_time(splunk_service: client.Service): if handle_message(item): continue - raise ValueError('Error: Could not fetch Splunk time') + raise ValueError("Error: Could not fetch Splunk time") def quote_group(text): - """ A function that splits groups of key value pairs. - Taking into consideration key values pairs with nested quotes. + """A function that splits groups of key value pairs. + Taking into consideration key values pairs with nested quotes. """ def clean(t): - return t.strip().rstrip(',') + return t.strip().rstrip(",") # Return strings that aren't key-valued, as is. if len(text.strip()) < 3 or "=" not in text: @@ -2202,7 +2600,7 @@ def clean(t): # Remove prefix & suffix wrapping quotes if present around all the text # For example a text could be: # "a="123"", we want it to be: a="123" - text = re.sub(r'^\"([\s\S]+\")\"$', r'\1', text) + text = re.sub(r"^\"([\s\S]+\")\"$", r"\1", text) # Some of the texts don't end with a comma so we add it to make sure # everything acts the same. @@ -2229,7 +2627,7 @@ def clean(t): # with a comma. We also check for quotes for this case: # a="b=nested_value_without_a_wrapping_quote", as we want to # wrap 'nested_value_without_a_wrapping_quote' with quotes. - text = re.sub(r'([^\"\,]+?=)([^\"]+?)(,|\")', r'\1"\2"\3', text) + text = re.sub(r"([^\"\,]+?=)([^\"]+?)(,|\")", r'\1"\2"\3', text) # The basic idea here is to check that every key value ends with a `",` # Assuming that there are even number of quotes before @@ -2239,7 +2637,6 @@ def clean(t): lindex = 0 groups = [] while rindex < len(text): - # For every quote we increment the quote counter # (to preserve context on the opening/closed quotes) if text[rindex] == '"': @@ -2284,7 +2681,7 @@ def rawToDict(raw): result = json.loads(raw) except ValueError: if '"message"' in raw: - raw = raw.replace('"', '').strip('{').strip('}') + raw = raw.replace('"', "").strip("{").strip("}") key_val_arr = raw.split(",") for key_val in key_val_arr: single_key_val = key_val.split(":", 1) @@ -2301,12 +2698,12 @@ def rawToDict(raw): # we append `, ` to the end of the string to catch the last value groups = quote_group(raw) for g in groups: - key_value = g.replace('"', '').strip() - if key_value == '': + key_value = g.replace('"', "").strip() + if key_value == "": continue - if '=' in key_value: - key_and_val = key_value.split('=', 1) + if "=" in key_value: + key_and_val = key_value.split("=", 1) if key_and_val[0] not in result: result[key_and_val[0]] = key_and_val[1] else: @@ -2320,11 +2717,21 @@ def rawToDict(raw): # Converts to an str def convert_to_str(obj): - return obj.encode('utf-8') if isinstance(obj, str) else str(obj) - - -def update_notable_events(baseurl, comment, status=None, urgency=None, owner=None, eventIDs=None, - disposition=None, searchID=None, auth_token=None, sessionKey=None): + return obj.encode("utf-8") if isinstance(obj, str) else str(obj) + + +def update_notable_events( + baseurl, + comment, + status=None, + urgency=None, + owner=None, + eventIDs=None, + disposition=None, + searchID=None, + auth_token=None, + sessionKey=None, +): """ Update some notable events. @@ -2349,36 +2756,34 @@ def update_notable_events(baseurl, comment, status=None, urgency=None, owner=Non raise Exception("Either eventIDs of a searchID must be provided (or both)") # These the arguments to the REST handler - args = {'comment': comment} + args = {"comment": comment} if status is not None: - args['status'] = status + args["status"] = status if urgency is not None: - args['urgency'] = urgency + args["urgency"] = urgency if owner is not None: - args['newOwner'] = owner + args["newOwner"] = owner # Provide the list of event IDs that you want to change: if eventIDs is not None: - args['ruleUIDs'] = eventIDs + args["ruleUIDs"] = eventIDs if disposition: - args['disposition'] = disposition + args["disposition"] = disposition # If you want to manipulate the notable events returned by a search then include the search ID if searchID is not None: - args['searchID'] = searchID + args["searchID"] = searchID - auth_header = ( - {"Authorization": f"Bearer {auth_token}"} if auth_token else {"Authorization": sessionKey} - ) + auth_header = {"Authorization": f"Bearer {auth_token}"} if auth_token else {"Authorization": sessionKey} - args['output_mode'] = OUTPUT_MODE_JSON + args["output_mode"] = OUTPUT_MODE_JSON mod_notables = requests.post( - f'{baseurl}services/notable_update', + f"{baseurl}services/notable_update", data=args, headers=auth_header, verify=VERIFY_CERTIFICATE, @@ -2389,20 +2794,20 @@ def update_notable_events(baseurl, comment, status=None, urgency=None, owner=Non def severity_to_level(severity: str | None) -> int | float: match severity: - case 'informational': + case "informational": return 0.5 - case 'critical': + case "critical": return 4 - case 'high': + case "high": return 3 - case 'medium': + case "medium": return 2 case _: return 1 def parse_notable(notable, to_dict=False): - """ Parses the notable + """Parses the notable Args: notable (OrderedDict): The notable @@ -2417,34 +2822,27 @@ def parse_notable(notable, to_dict=False): # so we go over the fields, and check if the key equals the value and set the value to be empty string if key == val: demisto.debug( - f'Found notable event raw field [{key}] with key that equals the value - replacing the value with empty string' + f"Found notable event raw field [{key}] with key that equals the value - replacing the value with empty string" ) - notable[key] = '' + notable[key] = "" return dict(notable) if to_dict else notable def requests_handler(url, message, **kwargs): - method = message['method'].lower() - data = message.get('body', '') if method == 'post' else None - headers = dict(message.get('headers', [])) + method = message["method"].lower() + data = message.get("body", "") if method == "post" else None + headers = dict(message.get("headers", [])) try: - response = requests.request( - method, - url, - data=data, - headers=headers, - verify=VERIFY_CERTIFICATE, - **kwargs - ) + response = requests.request(method, url, data=data, headers=headers, verify=VERIFY_CERTIFICATE, **kwargs) except requests.exceptions.HTTPError as e: # Propagate HTTP errors via the returned response message response = e.response - demisto.debug(f'Got exception while using requests handler - {str(e)}') + demisto.debug(f"Got exception while using requests handler - {e!s}") return { - 'status': response.status_code, - 'reason': response.reason, - 'headers': list(response.headers.items()), - 'body': io.BytesIO(response.content) + "status": response.status_code, + "reason": response.reason, + "headers": list(response.headers.items()), + "body": io.BytesIO(response.content), } @@ -2455,22 +2853,22 @@ def build_search_kwargs(args, polling=False): kwargs_normalsearch: dict[str, Any] = { "earliest_time": time_str, } - if demisto.get(args, 'earliest_time'): - kwargs_normalsearch['earliest_time'] = args['earliest_time'] - if demisto.get(args, 'latest_time'): - kwargs_normalsearch['latest_time'] = args['latest_time'] - if demisto.get(args, 'app'): - kwargs_normalsearch['app'] = args['app'] - if argToBoolean(demisto.get(args, 'fast_mode')): - kwargs_normalsearch['adhoc_search_level'] = "fast" - kwargs_normalsearch['exec_mode'] = "normal" if polling else "blocking" + if demisto.get(args, "earliest_time"): + kwargs_normalsearch["earliest_time"] = args["earliest_time"] + if demisto.get(args, "latest_time"): + kwargs_normalsearch["latest_time"] = args["latest_time"] + if demisto.get(args, "app"): + kwargs_normalsearch["app"] = args["app"] + if argToBoolean(demisto.get(args, "fast_mode")): + kwargs_normalsearch["adhoc_search_level"] = "fast" + kwargs_normalsearch["exec_mode"] = "normal" if polling else "blocking" return kwargs_normalsearch def build_search_query(args): - query = args['query'] - if not query.startswith('search') and not query.startswith('Search') and not query.startswith('|'): - query = f'search {query}' + query = args["query"] + if not query.startswith("search") and not query.startswith("Search") and not query.startswith("|"): + query = f"search {query}" return query @@ -2479,18 +2877,17 @@ def create_entry_context(args: dict, parsed_search_results, dbot_scores, status_ dbot_ec = {} number_of_results = len(parsed_search_results) - if args.get('update_context', "true") == "true": - ec['Splunk.Result'] = parsed_search_results + if args.get("update_context", "true") == "true": + ec["Splunk.Result"] = parsed_search_results if len(dbot_scores) > 0: - dbot_ec['DBotScore'] = dbot_scores + dbot_ec["DBotScore"] = dbot_scores if status_res: - ec['Splunk.JobStatus(val.SID && val.SID === obj.SID)'] = { - **status_res.outputs, 'TotalResults': number_of_results} + ec["Splunk.JobStatus(val.SID && val.SID === obj.SID)"] = {**status_res.outputs, "TotalResults": number_of_results} if job_id and not status_res: - status = 'DONE' if (number_of_results > 0) else 'NO RESULTS' - ec['Splunk.JobStatus(val.SID && val.SID === obj.SID)'] = [{'SID': job_id, - 'TotalResults': number_of_results, - 'Status': status}] + status = "DONE" if (number_of_results > 0) else "NO RESULTS" + ec["Splunk.JobStatus(val.SID && val.SID === obj.SID)"] = [ + {"SID": job_id, "TotalResults": number_of_results, "Status": status} + ] return ec, dbot_ec @@ -2498,12 +2895,7 @@ def schedule_polling_command(command: str, args: dict, interval_in_secs: int) -> """ Returns a ScheduledCommand object which contain the needed arguments for schedule the polling command. """ - return ScheduledCommand( - command=command, - next_run_in_seconds=interval_in_secs, - args=args, - timeout_in_seconds=600 - ) + return ScheduledCommand(command=command, next_run_in_seconds=interval_in_secs, args=args, timeout_in_seconds=600) def build_search_human_readable(args: dict, parsed_search_results, sid) -> str: @@ -2512,19 +2904,13 @@ def build_search_human_readable(args: dict, parsed_search_results, sid) -> str: if not isinstance(parsed_search_results[0], dict): headers = "results" else: - query = args.get('query', '') - table_args = re.findall(' table (?P[^|]*)', query) - rename_args = re.findall(' rename (?P[^|]*)', query) + query = args.get("query", "") + table_args = re.findall(" table (?P
[^|]*)", query) + rename_args = re.findall(" rename (?P[^|]*)", query) chosen_fields: list = [] for arg_string in table_args: - chosen_fields.extend( - field.strip('"') - for field in re.findall( - r'((?:".*?")|(?:[^\s,]+))', arg_string - ) - if field - ) + chosen_fields.extend(field.strip('"') for field in re.findall(r'((?:".*?")|(?:[^\s,]+))', arg_string) if field) rename_dict = {} for arg_string in rename_args: for field in re.findall(r'((?:".*?")|(?:[^\s,]+))( AS )((?:".*?")|(?:[^\s,]+))', arg_string): @@ -2536,20 +2922,19 @@ def build_search_human_readable(args: dict, parsed_search_results, sid) -> str: headers = update_headers_from_field_names(parsed_search_results, chosen_fields) - query = args['query'].replace('`', r'\`') - hr_headline = 'Splunk Search results for query:\n' + query = args["query"].replace("`", r"\`") + hr_headline = "Splunk Search results for query:\n" if sid: - hr_headline += f'sid: {str(sid)}' + hr_headline += f"sid: {sid!s}" return tableToMarkdown(hr_headline, parsed_search_results, headers) def update_headers_from_field_names(search_result, chosen_fields): - headers: list = [] search_result_keys: set = set().union(*(list(d.keys()) for d in search_result)) for field in chosen_fields: - if field[-1] == '*': - temp_field = field.replace('*', '.*') + if field[-1] == "*": + temp_field = field.replace("*", ".*") headers.extend(key for key in search_result_keys if re.search(temp_field, key)) elif field in search_result_keys: headers.append(field) @@ -2561,7 +2946,7 @@ def get_current_results_batch(search_job: client.Job, batch_size: int, results_o current_batch_kwargs = { "count": batch_size, "offset": results_offset, - 'output_mode': OUTPUT_MODE_JSON, + "output_mode": OUTPUT_MODE_JSON, } return search_job.results(**current_batch_kwargs) @@ -2576,11 +2961,12 @@ def parse_batch_of_results(current_batch_of_results, max_results_to_add, app): continue elif isinstance(item, dict): - if demisto.get(item, 'host'): - batch_dbot_scores.append({'Indicator': item['host'], 'Type': 'hostname', - 'Vendor': 'Splunk', 'Score': 0, 'isTypedIndicator': True}) + if demisto.get(item, "host"): + batch_dbot_scores.append( + {"Indicator": item["host"], "Type": "hostname", "Vendor": "Splunk", "Score": 0, "isTypedIndicator": True} + ) if app: - item['app'] = app + item["app"] = app # Normal events are returned as dicts parsed_batch_results.append(item) @@ -2603,16 +2989,16 @@ def raise_error_for_failed_job(job): """ err_msg = None try: - if job and job['dispatchState'] == 'FAILED': - messages = job['messages'] - for err_type in ['fatal', 'error']: + if job and job["dispatchState"] == "FAILED": + messages = job["messages"] + for err_type in ["fatal", "error"]: if messages.get(err_type): - err_msg = ','.join(messages[err_type]) + err_msg = ",".join(messages[err_type]) break except Exception: pass if err_msg: - raise DemistoException(f'Failed to run the search in Splunk: {err_msg}') + raise DemistoException(f"Failed to run the search in Splunk: {err_msg}") def splunk_search_command(service: client.Service, args: dict) -> CommandResults | list[CommandResults]: @@ -2621,24 +3007,24 @@ def splunk_search_command(service: client.Service, args: dict) -> CommandResults search_kwargs = build_search_kwargs(args, polling) job_sid = args.get("sid") search_job = None - interval_in_secs = int(args.get('interval_in_seconds', 30)) + interval_in_secs = int(args.get("interval_in_seconds", 30)) if not job_sid or not polling: # create a new job to search the query. search_job = service.jobs.create(query, **search_kwargs) job_sid = search_job["sid"] - args['sid'] = job_sid + args["sid"] = job_sid raise_error_for_failed_job(search_job) status_cmd_result: CommandResults | None = None if polling: status_cmd_result = splunk_job_status(service, args) assert status_cmd_result # if polling is true, status_cmd_result should not be None - status = status_cmd_result.outputs['Status'] # type: ignore[index] - if status.lower() != 'done': + status = status_cmd_result.outputs["Status"] # type: ignore[index] + if status.lower() != "done": # Job is still running, schedule the next run of the command. scheduled_command = schedule_polling_command("splunk-search", args, interval_in_secs) status_cmd_result.scheduled_command = scheduled_command - status_cmd_result.readable_output = 'Job is still running, it may take a little while...' + status_cmd_result.readable_output = "Job is still running, it may take a little while..." return status_cmd_result else: # Get the job by its SID. @@ -2661,52 +3047,54 @@ def splunk_search_command(service: client.Service, args: dict) -> CommandResults ): current_batch_of_results = get_current_results_batch(search_job, batch_size, results_offset) max_results_to_add = results_limit - len(total_parsed_results) - parsed_batch_results, batch_dbot_scores = parse_batch_of_results(current_batch_of_results, max_results_to_add, - search_kwargs.get('app', '')) + parsed_batch_results, batch_dbot_scores = parse_batch_of_results( + current_batch_of_results, max_results_to_add, search_kwargs.get("app", "") + ) total_parsed_results.extend(parsed_batch_results) dbot_scores.extend(batch_dbot_scores) results_offset += batch_size entry_context_splunk_search, entry_context_dbot_score = create_entry_context( - args, total_parsed_results, dbot_scores, status_cmd_result, str(job_sid)) + args, total_parsed_results, dbot_scores, status_cmd_result, str(job_sid) + ) human_readable = build_search_human_readable(args, total_parsed_results, str(job_sid)) - results = [CommandResults( - outputs=entry_context_splunk_search, - raw_response=total_parsed_results, - readable_output=human_readable - )] - dbot_table_headers = ['Indicator', 'Type', 'Vendor', 'Score', 'isTypedIndicator'] + results = [ + CommandResults(outputs=entry_context_splunk_search, raw_response=total_parsed_results, readable_output=human_readable) + ] + dbot_table_headers = ["Indicator", "Type", "Vendor", "Score", "isTypedIndicator"] if entry_context_dbot_score: - results.append(CommandResults( - outputs=entry_context_dbot_score, - readable_output=tableToMarkdown("DBot Score", entry_context_dbot_score['DBotScore'], headers=dbot_table_headers))) + results.append( + CommandResults( + outputs=entry_context_dbot_score, + readable_output=tableToMarkdown("DBot Score", entry_context_dbot_score["DBotScore"], headers=dbot_table_headers), + ) + ) return results def splunk_job_create_command(service: client.Service, args: dict): - app = args.get('app', '') + app = args.get("app", "") query = build_search_query(args) - search_kwargs = { - "exec_mode": "normal", - "app": app - } + search_kwargs = {"exec_mode": "normal", "app": app} search_job = service.jobs.create(query, **search_kwargs) - return_results(CommandResults( - outputs_prefix='Splunk', - readable_output=f"Splunk Job created with SID: {search_job.sid}", - outputs={'Job': search_job.sid} - )) + return_results( + CommandResults( + outputs_prefix="Splunk", + readable_output=f"Splunk Job created with SID: {search_job.sid}", + outputs={"Job": search_job.sid}, + ) + ) def splunk_results_command(service: client.Service, args: dict): res = [] - sid = args.get('sid', '') - limit = int(args.get('limit', '100')) + sid = args.get("sid", "") + limit = int(args.get("limit", "100")) try: job = service.job(sid) except HTTPError as error: - msg = error.message if hasattr(error, 'message') else str(error) + msg = error.message if hasattr(error, "message") else str(error) if error.status == 404: return f"Found no job for sid: {sid}" else: @@ -2718,10 +3106,12 @@ def splunk_results_command(service: client.Service, args: dict): elif isinstance(result, dict): # Normal events are returned as dicts res.append(result) - return_results(CommandResults( - raw_response=json.dumps(res), - content_format=EntryFormat.JSON, - )) + return_results( + CommandResults( + raw_response=json.dumps(res), + content_format=EntryFormat.JSON, + ) + ) def parse_time_to_minutes(): @@ -2729,22 +3119,21 @@ def parse_time_to_minutes(): Calculate how much time to fetch back in minutes Returns (int): Time to fetch back in minutes """ - number_of_times, time_unit = FETCH_TIME.split(' ') + number_of_times, time_unit = FETCH_TIME.split(" ") if str(number_of_times).isdigit(): number_of_times = int(number_of_times) else: - return_error("Error: Invalid fetch time, need to be a positive integer with the time unit afterwards" - " e.g '2 months, 4 days'.") + return_error( + "Error: Invalid fetch time, need to be a positive integer with the time unit afterwards" " e.g '2 months, 4 days'." + ) # If the user input contains a plural of a time unit, for example 'hours', we remove the 's' as it doesn't # impact the minutes in that time unit - if time_unit[-1] == 's': + if time_unit[-1] == "s": time_unit = time_unit[:-1] - if time_unit_value_in_minutes := TIME_UNIT_TO_MINUTES.get( - time_unit.lower() - ): + if time_unit_value_in_minutes := TIME_UNIT_TO_MINUTES.get(time_unit.lower()): return number_of_times * time_unit_value_in_minutes - return_error('Error: Invalid time unit.') + return_error("Error: Invalid time unit.") return None @@ -2752,26 +3141,28 @@ def splunk_get_indexes_command(service: client.Service): indexes = service.indexes indexesNames = [] for index in indexes: - index_json = {'name': index.name, 'count': index["totalEventCount"]} + index_json = {"name": index.name, "count": index["totalEventCount"]} indexesNames.append(index_json) - return_results(CommandResults( - content_format=EntryFormat.JSON, - raw_response=json.dumps(indexesNames), - readable_output=tableToMarkdown("Splunk Indexes names", indexesNames, '') - )) + return_results( + CommandResults( + content_format=EntryFormat.JSON, + raw_response=json.dumps(indexesNames), + readable_output=tableToMarkdown("Splunk Indexes names", indexesNames, ""), + ) + ) def splunk_submit_event_command(service: client.Service, args: dict): try: - index = service.indexes[args['index']] + index = service.indexes[args["index"]] except KeyError: return_error(f'Found no Splunk index: {args["index"]}') else: - data = args['data'] - data_formatted = data.encode('utf8') - r = index.submit(data_formatted, sourcetype=args['sourcetype'], host=args['host']) - return_results(f'Event was created in Splunk index: {r.name}') + data = args["data"] + data_formatted = data.encode("utf8") + r = index.submit(data_formatted, sourcetype=args["sourcetype"], host=args["host"]) + return_results(f"Event was created in Splunk index: {r.name}") def validate_indexes(indexes, service): @@ -2796,7 +3187,7 @@ def get_events_from_file(entry_id): """ get_file_path_res = demisto.getFilePath(entry_id) file_path = get_file_path_res["path"] - with open(file_path, encoding='utf-8') as file_data: + with open(file_path, encoding="utf-8") as file_data: return file_data.read() @@ -2819,8 +3210,8 @@ def parse_fields(fields): try: parsed_fields = json.loads(fields) except Exception: - demisto.debug('Fields provided are not valid JSON; treating as a single field') - parsed_fields = {'fields': fields} + demisto.debug("Fields provided are not valid JSON; treating as a single field") + parsed_fields = {"fields": fields} return parsed_fields return None @@ -2856,52 +3247,46 @@ def splunk_submit_event_hec( request_channel: str | None, batch_event_data: str | None, entry_id: int | None, - service + service, ): if hec_token is None: - raise Exception('The HEC Token was not provided') + raise Exception("The HEC Token was not provided") if batch_event_data: events = batch_event_data elif entry_id: - demisto.debug(f'{INTEGRATION_LOG} - loading events data from file with {entry_id=}') + demisto.debug(f"{INTEGRATION_LOG} - loading events data from file with {entry_id=}") events = get_events_from_file(entry_id) else: parsed_fields = parse_fields(fields) events = assign_params( - event=event, - host=host, - fields=parsed_fields, - index=index, - sourcetype=source_type, - source=source, - time=time_ + event=event, host=host, fields=parsed_fields, index=index, sourcetype=source_type, source=source, time=time_ ) indexes = extract_indexes(events) if not validate_indexes(indexes, service): - raise DemistoException('Index name does not exist in your splunk instance') + raise DemistoException("Index name does not exist in your splunk instance") demisto.debug("All indexes are valid, sending events to Splunk.") headers = { - 'Authorization': f'Splunk {hec_token}', - 'Content-Type': 'application/json', + "Authorization": f"Splunk {hec_token}", + "Content-Type": "application/json", } if request_channel: - headers['X-Splunk-Request-Channel'] = request_channel + headers["X-Splunk-Request-Channel"] = request_channel - data = '' + data = "" if entry_id or batch_event_data: data = events else: data = json.dumps(events) return requests.post( - f'{baseurl}/services/collector/event', + f"{baseurl}/services/collector/event", data=data, headers=headers, verify=VERIFY_CERTIFICATE, @@ -2909,101 +3294,112 @@ def splunk_submit_event_hec( def splunk_submit_event_hec_command(params: dict, service, args: dict): - hec_token = params.get('cred_hec_token', {}).get('password') or params.get('hec_token') - baseurl = params.get('hec_url') + hec_token = params.get("cred_hec_token", {}).get("password") or params.get("hec_token") + baseurl = params.get("hec_url") if baseurl is None: - raise Exception('The HEC URL was not provided.') - - event = args.get('event') - host = args.get('host') - fields = args.get('fields') - index = args.get('index') - source_type = args.get('source_type') - source = args.get('source') - time_ = args.get('time') - request_channel = args.get('request_channel') - batch_event_data = args.get('batch_event_data') - entry_id = args.get('entry_id') + raise Exception("The HEC URL was not provided.") + + event = args.get("event") + host = args.get("host") + fields = args.get("fields") + index = args.get("index") + source_type = args.get("source_type") + source = args.get("source") + time_ = args.get("time") + request_channel = args.get("request_channel") + batch_event_data = args.get("batch_event_data") + entry_id = args.get("entry_id") if not event and not batch_event_data and not entry_id: - raise DemistoException("Invalid input: Please specify one of the following arguments: `event`, " - "`batch_event_data`, or `entry_id`.") + raise DemistoException( + "Invalid input: Please specify one of the following arguments: `event`, " "`batch_event_data`, or `entry_id`." + ) - response_info = splunk_submit_event_hec(hec_token, baseurl, event, fields, host, index, source_type, source, time_, - request_channel, batch_event_data, entry_id, service) + response_info = splunk_submit_event_hec( + hec_token, + baseurl, + event, + fields, + host, + index, + source_type, + source, + time_, + request_channel, + batch_event_data, + entry_id, + service, + ) - if 'Success' not in response_info.text: + if "Success" not in response_info.text: return_error(f"Could not send event to Splunk {response_info.text}") else: - response_dict = json.loads(response_info.text - ) - if response_dict and 'ackId' in response_dict: + response_dict = json.loads(response_info.text) + if response_dict and "ackId" in response_dict: return_results(f"The events were sent successfully to Splunk. AckID: {response_dict['ackId']}") else: - return_results('The events were sent successfully to Splunk.') + return_results("The events were sent successfully to Splunk.") def splunk_edit_notable_event_command(base_url: str, token: str, auth_token: str | None, args: dict) -> None: session_key = None if auth_token else token event_ids = None - if args.get('eventIDs'): - event_ids_str = args['eventIDs'] + if args.get("eventIDs"): + event_ids_str = args["eventIDs"] event_ids = event_ids_str.split(",") - status = int(args['status']) if args.get('status') else None + status = int(args["status"]) if args.get("status") else None # Map the label to the disposition id - disposition = args.get('disposition', '') + disposition = args.get("disposition", "") if disposition and disposition in DEFAULT_DISPOSITIONS: disposition = DEFAULT_DISPOSITIONS[disposition] - response_info = update_notable_events(baseurl=base_url, - comment=args.get('comment'), status=status, - urgency=args.get('urgency'), - owner=args.get('owner'), eventIDs=event_ids, - disposition=disposition, - auth_token=auth_token, sessionKey=session_key) + response_info = update_notable_events( + baseurl=base_url, + comment=args.get("comment"), + status=status, + urgency=args.get("urgency"), + owner=args.get("owner"), + eventIDs=event_ids, + disposition=disposition, + auth_token=auth_token, + sessionKey=session_key, + ) - if 'success' not in response_info or not response_info['success']: - return_error(f'Could not update notable events: {args.get("eventIDs", "")}: {str(response_info)}') + if "success" not in response_info or not response_info["success"]: + return_error(f'Could not update notable events: {args.get("eventIDs", "")}: {response_info!s}') else: return_results(f'Splunk ES Notable events: {response_info.get("message")}') def splunk_job_status(service: client.Service, args: dict) -> CommandResults | None: - sid = args.get('sid') + sid = args.get("sid") try: job = service.job(sid) except HTTPError as error: - if str(error) == 'HTTP 404 Not Found -- Unknown sid.': + if str(error) == "HTTP 404 Not Found -- Unknown sid.": return CommandResults(readable_output=f"Not found job for SID: {sid}") else: return_error(error) # pylint: disable=no-member return None else: - status = job.state.content.get('dispatchState') - entry_context = { - 'SID': sid, - 'Status': status - } - human_readable = tableToMarkdown('Splunk Job Status', entry_context) + status = job.state.content.get("dispatchState") + entry_context = {"SID": sid, "Status": status} + human_readable = tableToMarkdown("Splunk Job Status", entry_context) return CommandResults( - outputs=entry_context, - readable_output=human_readable, - outputs_prefix="Splunk.JobStatus", - outputs_key_field="SID" + outputs=entry_context, readable_output=human_readable, outputs_prefix="Splunk.JobStatus", outputs_key_field="SID" ) def splunk_parse_raw_command(args: dict): - raw = args.get('raw', '') + raw = args.get("raw", "") rawDict = rawToDict(raw) - return_results(CommandResults( - outputs_prefix='Splunk.Raw.Parsed', - raw_response=json.dumps(rawDict), - outputs=rawDict, - content_format=EntryFormat.JSON - )) + return_results( + CommandResults( + outputs_prefix="Splunk.Raw.Parsed", raw_response=json.dumps(rawDict), outputs=rawDict, content_format=EntryFormat.JSON + ) + ) def test_module(service: client.Service, params: dict) -> None: @@ -3011,42 +3407,43 @@ def test_module(service: client.Service, params: dict) -> None: # validate connection service.info() except AuthenticationError: - return_error('Authentication error, please validate your credentials.') + return_error("Authentication error, please validate your credentials.") # validate fetch - if params.get('isFetch'): + if params.get("isFetch"): t = datetime.utcnow() - timedelta(hours=1) time = t.strftime(SPLUNK_TIME_FORMAT) - kwargs = {'count': 1, 'earliest_time': time, 'output_mode': OUTPUT_MODE_JSON} - query = params['fetchQuery'] + kwargs = {"count": 1, "earliest_time": time, "output_mode": OUTPUT_MODE_JSON} + query = params["fetchQuery"] try: - if MIRROR_DIRECTION.get(params.get('mirror_direction', '')) and not params.get('timezone'): - return_error('Cannot mirror incidents when timezone is not configured. Please enter the ' - 'timezone of the Splunk server being used in the integration configuration.') + if MIRROR_DIRECTION.get(params.get("mirror_direction", "")) and not params.get("timezone"): + return_error( + "Cannot mirror incidents when timezone is not configured. Please enter the " + "timezone of the Splunk server being used in the integration configuration." + ) for item in results.JSONResultsReader(service.jobs.oneshot(query, **kwargs)): if isinstance(item, results.Message): continue if EVENT_ID not in item: - if MIRROR_DIRECTION.get(params.get('mirror_direction', '')): - return_error('Cannot mirror incidents if fetch query does not use the `notable` macro.') + if MIRROR_DIRECTION.get(params.get("mirror_direction", "")): + return_error("Cannot mirror incidents if fetch query does not use the `notable` macro.") if ENABLED_ENRICHMENTS: - return_error('When using the enrichment mechanism, an event_id field is needed, and thus, ' - 'one must use a fetch query of the following format: search `notable` .......\n' - 'Please re-edit the fetchQuery parameter in the integration configuration, reset ' - 'the fetch mechanism using the splunk-reset-enriching-fetch-mechanism command and ' - 'run the fetch again.') + return_error( + "When using the enrichment mechanism, an event_id field is needed, and thus, " + "one must use a fetch query of the following format: search `notable` .......\n" + "Please re-edit the fetchQuery parameter in the integration configuration, reset " + "the fetch mechanism using the splunk-reset-enriching-fetch-mechanism command and " + "run the fetch again." + ) except HTTPError as error: return_error(str(error)) - if params.get('hec_url'): - headers = { - 'Content-Type': 'application/json' - } + if params.get("hec_url"): + headers = {"Content-Type": "application/json"} try: - requests.get(params.get('hec_url', '') + '/services/collector/health', headers=headers, - verify=VERIFY_CERTIFICATE) + requests.get(params.get("hec_url", "") + "/services/collector/health", headers=headers, verify=VERIFY_CERTIFICATE) except Exception as e: return_error("Could not connect to HEC server. Make sure URL and token are correct.", e) @@ -3065,9 +3462,9 @@ def replace_keys(data): def kv_store_collection_create(service: client.Service, args: dict) -> CommandResults: try: - service.kvstore.create(args['kv_store_name']) + service.kvstore.create(args["kv_store_name"]) except HTTPError as error: - if error.status == 409 and error.reason == 'Conflict': + if error.status == 409 and error.reason == "Conflict": raise DemistoException( f"KV store collection {service.namespace['app']} already exists.", ) from error @@ -3079,133 +3476,120 @@ def kv_store_collection_create(service: client.Service, args: dict) -> CommandRe def kv_store_collection_config(service: client.Service, args: dict) -> CommandResults: - app = service.namespace['app'] - kv_store_collection_name = args['kv_store_collection_name'] - kv_store_fields = args['kv_store_fields'].split(',') + app = service.namespace["app"] + kv_store_collection_name = args["kv_store_collection_name"] + kv_store_fields = args["kv_store_fields"].split(",") for key_val in kv_store_fields: try: - _key, val = key_val.split('=', 1) + _key, val = key_val.split("=", 1) except ValueError: - return_error(f'error when trying to parse {key_val} you possibly forgot to add the field type.') + return_error(f"error when trying to parse {key_val} you possibly forgot to add the field type.") else: - if _key.startswith('index.'): - service.kvstore[kv_store_collection_name].update_index(_key.replace('index.', ''), val) + if _key.startswith("index."): + service.kvstore[kv_store_collection_name].update_index(_key.replace("index.", ""), val) else: - service.kvstore[kv_store_collection_name].update_field(_key.replace('field.', ''), val) - return CommandResults( - readable_output=f"KV store collection {app} configured successfully" - ) + service.kvstore[kv_store_collection_name].update_field(_key.replace("field.", ""), val) + return CommandResults(readable_output=f"KV store collection {app} configured successfully") def kv_store_collection_create_transform(service: client.Service, args: dict) -> CommandResults: - collection_name = args['kv_store_collection_name'] - fields = args.get('supported_fields') + collection_name = args["kv_store_collection_name"] + fields = args.get("supported_fields") if not fields: kv_store = service.kvstore[collection_name] default_keys = get_keys_and_types(kv_store).keys() if not default_keys: - raise DemistoException('Please provide supported_fields or run first splunk-kv-store-collection-config') - default_keys = (key.replace('field.', '').replace('index.', '') for key in default_keys) + raise DemistoException("Please provide supported_fields or run first splunk-kv-store-collection-config") + default_keys = (key.replace("field.", "").replace("index.", "") for key in default_keys) fields = f"_key,{','.join(default_keys)}" transforms = service.confs["transforms"] - params = { - "external_type": "kvstore", - "collection": collection_name, - "namespace": service.namespace, - "fields_list": fields - } + params = {"external_type": "kvstore", "collection": collection_name, "namespace": service.namespace, "fields_list": fields} transforms.create(name=collection_name, **params) - return CommandResults( - readable_output=f"KV store collection transforms {collection_name} created successfully" - ) + return CommandResults(readable_output=f"KV store collection transforms {collection_name} created successfully") def batch_kv_upload(kv_data_service_client: client.KVStoreCollectionData, json_data: str) -> dict: - if json_data.startswith('[') and json_data.endswith(']'): + if json_data.startswith("[") and json_data.endswith("]"): record: Record = kv_data_service_client._post( - 'batch_save', headers=client.KVStoreCollectionData.JSON_HEADER, body=json_data.encode('utf-8')) + "batch_save", headers=client.KVStoreCollectionData.JSON_HEADER, body=json_data.encode("utf-8") + ) return dict(record.items()) - elif json_data.startswith('{') and json_data.endswith('}'): - return kv_data_service_client.insert(json_data.encode('utf-8')) + elif json_data.startswith("{") and json_data.endswith("}"): + return kv_data_service_client.insert(json_data.encode("utf-8")) else: - raise DemistoException('kv_store_data argument should be in json format. ' - '(e.g. {"key": "value"} or [{"key": "value"}, {"key": "value"}]') + raise DemistoException( + "kv_store_data argument should be in json format. " '(e.g. {"key": "value"} or [{"key": "value"}, {"key": "value"}]' + ) def kv_store_collection_add_entries(service: client.Service, args: dict) -> None: - kv_store_data = args.get('kv_store_data', '') - kv_store_collection_name = args['kv_store_collection_name'] - indicator_path = args.get('indicator_path') + kv_store_data = args.get("kv_store_data", "") + kv_store_collection_name = args["kv_store_collection_name"] + indicator_path = args.get("indicator_path") batch_kv_upload(service.kvstore[kv_store_collection_name].data, kv_store_data) indicators_timeline = None if indicator_path: kv_store_data = json.loads(kv_store_data) - indicators = extract_indicator(indicator_path, - kv_store_data if isinstance(kv_store_data, list) else [kv_store_data]) + indicators = extract_indicator(indicator_path, kv_store_data if isinstance(kv_store_data, list) else [kv_store_data]) indicators_timeline = IndicatorsTimeline( indicators=indicators, - category='Integration Update', - message=f'Indicator added to {kv_store_collection_name} store in Splunk' + category="Integration Update", + message=f"Indicator added to {kv_store_collection_name} store in Splunk", ) - return_results(CommandResults( - readable_output=f"Data added to {kv_store_collection_name}", - indicators_timeline=indicators_timeline - )) + return_results( + CommandResults(readable_output=f"Data added to {kv_store_collection_name}", indicators_timeline=indicators_timeline) + ) def kv_store_collections_list(service: client.Service) -> None: - app_name = service.namespace['app'] + app_name = service.namespace["app"] names = [x.name for x in service.kvstore.iter()] - readable_output = "list of collection names {}\n| name |\n| --- |\n|{}|".format(app_name, '|\n|'.join(names)) - return_results(CommandResults( - outputs_prefix='Splunk.CollectionList', - outputs=names, - readable_output=readable_output, - raw_response=names - )) + readable_output = "list of collection names {}\n| name |\n| --- |\n|{}|".format(app_name, "|\n|".join(names)) + return_results( + CommandResults(outputs_prefix="Splunk.CollectionList", outputs=names, readable_output=readable_output, raw_response=names) + ) def kv_store_collection_data_delete(service: client.Service, args: dict) -> None: - kv_store_collection_name = args['kv_store_collection_name'].split(',') + kv_store_collection_name = args["kv_store_collection_name"].split(",") for store in kv_store_collection_name: service.kvstore[store].data.delete() return_results(f"The values of the {args['kv_store_collection_name']} were deleted successfully") def kv_store_collection_delete(service: client.Service, args: dict) -> CommandResults: - kv_store_names = args['kv_store_name'] - for store in kv_store_names.split(','): + kv_store_names = args["kv_store_name"] + for store in kv_store_names.split(","): service.kvstore[store].delete() - return CommandResults(readable_output=f'The following KV store {kv_store_names} were deleted successfully.') + return CommandResults(readable_output=f"The following KV store {kv_store_names} were deleted successfully.") def build_kv_store_query(kv_store: client.KVStoreCollection, args: dict): - if 'key' in args and 'value' in args: - _type = get_key_type(kv_store, args['key']) - args['value'] = _type(args['value']) if _type else args['value'] - return json.dumps({args['key']: args['value']}) - elif 'limit' in args: - return {'limit': args['limit']} + if "key" in args and "value" in args: + _type = get_key_type(kv_store, args["key"]) + args["value"] = _type(args["value"]) if _type else args["value"] + return json.dumps({args["key"]: args["value"]}) + elif "limit" in args: + return {"limit": args["limit"]} else: - return args.get('query', '{}') + return args.get("query", "{}") def kv_store_collection_data(service: client.Service, args: dict) -> None: - stores = args['kv_store_collection_name'].split(',') + stores = args["kv_store_collection_name"].split(",") for i, store_res in enumerate(get_store_data(service)): store = service.kvstore[stores[i]] if store_res: - readable_output = tableToMarkdown(name=f"list of collection values {store.name}", - t=store_res) + readable_output = tableToMarkdown(name=f"list of collection values {store.name}", t=store_res) return_results( CommandResults( - outputs_prefix='Splunk.KVstoreData', + outputs_prefix="Splunk.KVstoreData", outputs={store.name: store_res}, readable_output=readable_output, - raw_response=store_res + raw_response=store_res, ) ) else: @@ -3213,45 +3597,42 @@ def kv_store_collection_data(service: client.Service, args: dict) -> None: def kv_store_collection_delete_entry(service: client.Service, args: dict) -> None: - store_name = args['kv_store_collection_name'] - indicator_path = args.get('indicator_path') + store_name = args["kv_store_collection_name"] + indicator_path = args.get("indicator_path") store: client.KVStoreCollection = service.kvstore[store_name] query = build_kv_store_query(store, args) store_res = next(get_store_data(service)) indicators = extract_indicator(indicator_path, store_res) if indicator_path else [] store.data.delete(query=query) - indicators_timeline = IndicatorsTimeline( - indicators=indicators, - category='Integration Update', - message=f'Indicator deleted from {store_name} store in Splunk' - ) if indicators else None - return_results(CommandResults( - readable_output=f'The values of the {store_name} were deleted successfully', - indicators_timeline=indicators_timeline - )) + indicators_timeline = ( + IndicatorsTimeline( + indicators=indicators, category="Integration Update", message=f"Indicator deleted from {store_name} store in Splunk" + ) + if indicators + else None + ) + return_results( + CommandResults( + readable_output=f"The values of the {store_name} were deleted successfully", indicators_timeline=indicators_timeline + ) + ) def check_error(service: client.Service, args: dict) -> None: - app = args.get('app_name') - store_name = args.get('kv_store_collection_name') + app = args.get("app_name") + store_name = args.get("kv_store_collection_name") if app not in service.apps: - raise DemistoException('app not found') + raise DemistoException("app not found") elif store_name and store_name not in service.kvstore: - raise DemistoException('KV Store not found') + raise DemistoException("KV Store not found") def get_key_type(kv_store: client.KVStoreCollection, _key: str): keys_and_types = get_keys_and_types(kv_store) - types = { - 'number': float, - 'string': str, - 'cidr': str, - 'boolean': bool, - 'time': str - } - index = f'index.{_key}' - field = f'field.{_key}' - val_type = keys_and_types.get(field) or keys_and_types.get(index) or '' + types = {"number": float, "string": str, "cidr": str, "boolean": bool, "time": str} + index = f"index.{_key}" + field = f"field.{_key}" + val_type = keys_and_types.get(field) or keys_and_types.get(index) or "" return types.get(val_type) @@ -3265,25 +3646,23 @@ def get_keys_and_types(kv_store: client.KVStoreCollection) -> dict[str, str]: def get_kv_store_config(kv_store: client.KVStoreCollection) -> str: keys = get_keys_and_types(kv_store) - readable = [f'#### configuration for {kv_store.name} store', - '| field name | type |', - '| --- | --- |'] - readable.extend(f'| {_key} | {val} |' for _key, val in keys.items()) - return '\n'.join(readable) + readable = [f"#### configuration for {kv_store.name} store", "| field name | type |", "| --- | --- |"] + readable.extend(f"| {_key} | {val} |" for _key, val in keys.items()) + return "\n".join(readable) def get_auth_session_key(service: client.Service) -> str: """ Get the session key or token for POST request based on whether the Splunk basic auth are true or not """ - return service and service.basic and service._auth_headers[0][1] or service.token + return (service and service.basic and service._auth_headers[0][1]) or service.token def extract_indicator(indicator_path: str, _dict_objects: list[dict]) -> list[str]: indicators = [] - indicator_paths = indicator_path.split('.') + indicator_paths = indicator_path.split(".") for indicator_obj in _dict_objects: - indicator = '' + indicator = "" for path in indicator_paths: indicator = indicator_obj.get(path, {}) indicators.append(str(indicator)) @@ -3292,13 +3671,13 @@ def extract_indicator(indicator_path: str, _dict_objects: list[dict]) -> list[st def get_store_data(service: client.Service): args = demisto.args() - stores = args['kv_store_collection_name'].split(',') + stores = args["kv_store_collection_name"].split(",") for store in stores: kvstore: client.KVStoreCollection = service.kvstore[store] query = build_kv_store_query(kvstore, args) if isinstance(query, str): - query = {'query': query} + query = {"query": query} yield kvstore.data.query(**query) @@ -3308,14 +3687,14 @@ def get_connection_args(params: dict) -> dict: Returns: connection args """ - app = params.get('app', '-') + app = params.get("app", "-") return { - 'host': params['host'].replace('https://', '').rstrip('/'), - 'port': params['port'], - 'app': app or "-", - 'verify': VERIFY_CERTIFICATE, - 'retries': 3, - 'retryDelay': 3, + "host": params["host"].replace("https://", "").rstrip("/"), + "port": params["port"], + "app": app or "-", + "verify": VERIFY_CERTIFICATE, + "retries": 3, + "retryDelay": 3, } @@ -3343,123 +3722,129 @@ def main(): # pragma: no cover params = demisto.params() args = demisto.args() - if command == 'splunk-parse-raw': + if command == "splunk-parse-raw": splunk_parse_raw_command(args) sys.exit(0) service = None - proxy = argToBoolean(params.get('proxy', False)) + proxy = argToBoolean(params.get("proxy", False)) connection_args = get_connection_args(params) auth_token = None - username = params['authentication']['identifier'] - password = params['authentication']['password'] - if username == '_token': - connection_args['splunkToken'] = password + username = params["authentication"]["identifier"] + password = params["authentication"]["password"] + if username == "_token": + connection_args["splunkToken"] = password auth_token = password else: - if '@_basic' in username: - username = username.split('@_basic')[0] - connection_args['basic'] = True - connection_args['username'] = username - connection_args['password'] = password - connection_args['autologin'] = True + if "@_basic" in username: + username = username.split("@_basic")[0] + connection_args["basic"] = True + connection_args["username"] = username + connection_args["password"] = password + connection_args["autologin"] = True if proxy: handle_proxy() - comment_tag_to_splunk = params.get('comment_tag_to_splunk', 'FROM XSOAR') - comment_tag_from_splunk = params.get('comment_tag_from_splunk', 'FROM SPLUNK') + comment_tag_to_splunk = params.get("comment_tag_to_splunk", "FROM XSOAR") + comment_tag_from_splunk = params.get("comment_tag_from_splunk", "FROM SPLUNK") if comment_tag_to_splunk == comment_tag_from_splunk: - raise DemistoException('Comment Tag to Splunk and Comment Tag ' - 'from Splunk cannot have the same value.') + raise DemistoException("Comment Tag to Splunk and Comment Tag " "from Splunk cannot have the same value.") - connection_args['handler'] = requests_handler + connection_args["handler"] = requests_handler if (service := client.connect(**connection_args)) is None: demisto.error("Could not connect to SplunkPy") - mapper = UserMappingObject(service, params.get('userMapping'), params.get('user_map_lookup_name'), - params.get('xsoar_user_field'), params.get('splunk_user_field')) + mapper = UserMappingObject( + service, + params.get("userMapping"), + params.get("user_map_lookup_name"), + params.get("xsoar_user_field"), + params.get("splunk_user_field"), + ) # The command command holds the command sent from the user. - if command == 'test-module': + if command == "test-module": test_module(service, params) - return_results('ok') - elif command == 'splunk-reset-enriching-fetch-mechanism': + return_results("ok") + elif command == "splunk-reset-enriching-fetch-mechanism": reset_enriching_fetch_mechanism() - elif command == 'splunk-search': + elif command == "splunk-search": return_results(splunk_search_command(service, args)) - elif command == 'splunk-job-create': + elif command == "splunk-job-create": splunk_job_create_command(service, args) - elif command == 'splunk-results': + elif command == "splunk-results": splunk_results_command(service, args) - elif command == 'splunk-get-indexes': + elif command == "splunk-get-indexes": splunk_get_indexes_command(service) - elif command == 'fetch-incidents': - demisto.info('########### FETCH #############') + elif command == "fetch-incidents": + demisto.info("########### FETCH #############") fetch_incidents(service, mapper, comment_tag_to_splunk, comment_tag_from_splunk) - elif command == 'splunk-submit-event': + elif command == "splunk-submit-event": splunk_submit_event_command(service, args) - elif command == 'splunk-notable-event-edit': + elif command == "splunk-notable-event-edit": base_url = f"https://{connection_args['host']}:{connection_args['port']}/" token = get_auth_session_key(service) splunk_edit_notable_event_command(base_url, token, auth_token, args) - elif command == 'splunk-submit-event-hec': + elif command == "splunk-submit-event-hec": splunk_submit_event_hec_command(params, service, args) - elif command == 'splunk-job-status': + elif command == "splunk-job-status": return_results(splunk_job_status(service, args)) - elif command.startswith('splunk-kv-') and service is not None: - app = args.get('app_name', 'search') - service.namespace = namespace(app=app, owner='nobody', sharing='app') + elif command.startswith("splunk-kv-") and service is not None: + app = args.get("app_name", "search") + service.namespace = namespace(app=app, owner="nobody", sharing="app") check_error(service, args) - if command == 'splunk-kv-store-collection-create': + if command == "splunk-kv-store-collection-create": return_results(kv_store_collection_create(service, args)) - elif command == 'splunk-kv-store-collection-config': + elif command == "splunk-kv-store-collection-config": return_results(kv_store_collection_config(service, args)) - elif command == 'splunk-kv-store-collection-create-transform': + elif command == "splunk-kv-store-collection-create-transform": return_results(kv_store_collection_create_transform(service, args)) - elif command == 'splunk-kv-store-collection-delete': + elif command == "splunk-kv-store-collection-delete": return_results(kv_store_collection_delete(service, args)) - elif command == 'splunk-kv-store-collections-list': + elif command == "splunk-kv-store-collections-list": kv_store_collections_list(service) - elif command == 'splunk-kv-store-collection-add-entries': + elif command == "splunk-kv-store-collection-add-entries": kv_store_collection_add_entries(service, args) - elif command in ['splunk-kv-store-collection-data-list', - 'splunk-kv-store-collection-search-entry']: + elif command in ["splunk-kv-store-collection-data-list", "splunk-kv-store-collection-search-entry"]: kv_store_collection_data(service, args) - elif command == 'splunk-kv-store-collection-data-delete': + elif command == "splunk-kv-store-collection-data-delete": kv_store_collection_data_delete(service, args) - elif command == 'splunk-kv-store-collection-delete-entry': + elif command == "splunk-kv-store-collection-delete-entry": kv_store_collection_delete_entry(service, args) - elif command == 'get-mapping-fields': - if argToBoolean(params.get('use_cim', False)): + elif command == "get-mapping-fields": + if argToBoolean(params.get("use_cim", False)): return_results(get_cim_mapping_field_command()) else: return_results(get_mapping_fields_command(service, mapper, params, comment_tag_to_splunk, comment_tag_from_splunk)) - elif command == 'get-remote-data': - raise NotImplementedError(f'the {command} command is not implemented, use get-modified-remote-data instead.') - elif command == 'get-modified-remote-data': - demisto.info('########### MIRROR IN #############') + elif command == "get-remote-data": + raise NotImplementedError(f"the {command} command is not implemented, use get-modified-remote-data instead.") + elif command == "get-modified-remote-data": + demisto.info("########### MIRROR IN #############") try: - get_modified_remote_data_command(service=service, args=args, - close_incident=params.get('close_incident'), - close_end_statuses=params.get('close_end_status_statuses'), - close_extra_labels=argToList(params.get('close_extra_labels', '')), - mapper=mapper, - comment_tag_from_splunk=comment_tag_from_splunk) + get_modified_remote_data_command( + service=service, + args=args, + close_incident=params.get("close_incident"), + close_end_statuses=params.get("close_end_status_statuses"), + close_extra_labels=argToList(params.get("close_extra_labels", "")), + mapper=mapper, + comment_tag_from_splunk=comment_tag_from_splunk, + ) except Exception as e: return_error(f"An error occurred during the Mirror In - in get_modified_remote_data_command: {e}") - elif command == 'update-remote-system': - demisto.info('########### MIRROR OUT #############') + elif command == "update-remote-system": + demisto.info("########### MIRROR OUT #############") return_results(update_remote_system_command(args, params, service, auth_token, mapper, comment_tag_to_splunk)) - elif command == 'splunk-get-username-by-xsoar-user': + elif command == "splunk-get-username-by-xsoar-user": return_results(mapper.get_splunk_user_by_xsoar_command(args)) else: - raise NotImplementedError(f'Command not implemented: {command}') + raise NotImplementedError(f"Command not implemented: {command}") -if __name__ in ['__main__', '__builtin__', 'builtins']: +if __name__ in ["__main__", "__builtin__", "builtins"]: main() diff --git a/Packs/SplunkPy/Integrations/SplunkPy/SplunkPy_test.py b/Packs/SplunkPy/Integrations/SplunkPy/SplunkPy_test.py index f4b698146353..4e6953324098 100644 --- a/Packs/SplunkPy/Integrations/SplunkPy/SplunkPy_test.py +++ b/Packs/SplunkPy/Integrations/SplunkPy/SplunkPy_test.py @@ -1,100 +1,105 @@ -import demistomock as demisto -from CommonServerPython import * - -import pytest -from copy import deepcopy from collections import namedtuple +from copy import deepcopy +from unittest.mock import MagicMock, patch -from splunklib.binding import AuthenticationError -from splunklib import client -from splunklib import results +import demistomock as demisto +import pytest import SplunkPy as splunk +from CommonServerPython import * from pytest_mock import MockerFixture -from unittest.mock import MagicMock, patch +from splunklib import client, results +from splunklib.binding import AuthenticationError +RETURN_ERROR_TARGET = "SplunkPy.return_error" -RETURN_ERROR_TARGET = 'SplunkPy.return_error' - -DICT_RAW_RESPONSE = '"1528755951, url="https://test.url.com", search_name="NG_SIEM_UC25- High number of hits against ' \ - 'unknown website from same subnet", action="allowed", dest="bb.bbb.bb.bbb , cc.ccc.ccc.cc , ' \ - 'xx.xx.xxx.xx , yyy.yy.yyy.yy , zz.zzz.zz.zzz , aa.aa.aaa.aaa", distinct_hosts="5", ' \ - 'first_3_octets="1.1.1", first_time="06/11/18 17:34:07 , 06/11/18 17:37:55 , 06/11/18 17:41:28 , ' \ - '06/11/18 17:42:05 , 06/11/18 17:42:38", info_max_time="+Infinity", info_min_time="0.000", ' \ - 'src="xx.xx.xxx.xx , yyy.yy.yyy.yy , zz.zzz.zz.zzz , aa.aa.aaa.aaa", u_category="unknown", ' \ - 'user="xyz\\a1234 , xyz\\b5678 , xyz\\c91011 , xyz\\d121314 , unknown", website="2.2.2.2""' - -DICT_RAW_RESPONSE_WITH_MESSAGE_ID = '"1528755951, message-id="1", url="https://test.url.com", ' \ - 'search_name="NG_SIEM_UC25- High number of hits against ' \ - 'unknown website from same subnet", action="allowed", dest="bb.bbb.bb.bbb , ' \ - 'cc.ccc.ccc.cc , xx.xx.xxx.xx , yyy.yy.yyy.yy , zz.zzz.zz.zzz , aa.aa.aaa.aaa", ' \ - 'distinct_hosts="5", ' \ - 'first_3_octets="1.1.1", first_time="06/11/18 17:34:07 , ' \ - '06/11/18 17:37:55 , 06/11/18 17:41:28 , ' \ - '06/11/18 17:42:05 , 06/11/18 17:42:38", info_max_time="+Infinity", info_min_time="0.000", ' \ - 'src="xx.xx.xxx.xx , yyy.yy.yyy.yy , zz.zzz.zz.zzz , aa.aa.aaa.aaa", u_category="unknown", ' \ - 'user="xyz\\a1234 , xyz\\b5678 , xyz\\c91011 , xyz\\d121314 , unknown", website="2.2.2.2""' - -LIST_RAW = 'Feb 13 09:02:55 1,2020/02/13 09:02:55,001606001116,THREAT,url,' \ - '1,2020/02/13 09:02:55,10.1.1.1,1.2.3.4,0.0.0.0,0.0.0.0,rule1,jordy,,web-browsing,vsys1,trust,untrust,' \ - 'ethernet1/2,ethernet1/1,forwardAll,2020/02/13 09:02:55,59460,1,62889,80,0,0,0x208000,tcp,alert,' \ - '"ushship.com/xed/config.bin",(9999),not-resolved,informational,client-to-server,' \ - '0,0x0,1.1.22.22-5.6.7.8,United States,0,text/html' - -RAW_WITH_MESSAGE = '{"@timestamp":"2019-10-15T13:30:08.578-04:00","message":"{"TimeStamp":"2019-10-15 13:30:08",' \ - '"CATEGORY_1":"CONTACT","ASSOCIATEOID":"G2N2TJETBRAAX68V","HOST":' \ - '"step-up-authentication-api.gslb.es.oneadp.com","SCOPE[4]":"PiSvcsProvider\/payroll","SCOPE[19]":' \ - '"\/api\/events\/core\/v1\/user-status","CONTEXT":"\/smsstepup","FLOW":"API","X-REAL-IP":' \ - '"2.2.2.2","PRODUCT_CODE":"WFNPortal","X-FORWARDED-PROTO":"http","ERROR_ID":"4008",' \ - '"SCOPE[23]":"\/security\/notification-communication-response-value.accept","REQ_URL":' \ - '"http:\/\/step-up-authentication-api.gslb.es.blabla.com\/smsstepup\/events\/core\/v1\/step-up-' \ - 'user-authorization-request.evaluate","SCOPE[35]":"autopay\/payroll\/v1\/cafeteria-plan-' \ - 'configurations\/{configurationItemID}","SCOPE_MATCHED":"Y","SCOPE[43]":"communication\/n' \ - 'otification-message-template.add","SCOPE[11]":"\/ISIJWSUserSecurity","SCOPE[27]":"autopay\/events' \ - '\/payroll\/v1\/earning-configuration.add","ORGOID":"G2SY6MR3ATKA232T","SCOPE[8]":"\/' \ - 'ISIJWSAssociatesService","SCOPE[39]":"autopay\/payroll\/v1\/earning-configurations",' \ - '"SETUP_SELF":"N","SCOPE[47]":"communication\/notification.publish","SCOPE[15]":"' \ - '\/OrganizationSoftPurge","X-FORWARDED-HOST":"step-up-authentication-api.gslb.es.blabla.com",' \ - '"ADP-MESSAGEID":"a1d57ed2-1fe6-4800-be7a-26cd89bhello","CNAME":"JRJG INC","CONTENT-LENGTH":' \ - '"584","SCOPE[31]":"autopay\/events\/payroll\/v1\/earning-configuration.remove","CID":"BSTAR00044"' \ - ',"ACTOR_UID":"ABinters@BSTAR00044","SECURE_API_MODE":"HTTPS_SECURE","X-REQUEST-ID":' \ - '"2473a981bef27bc8444e510adc12234a","SCOPE[1]":"AVSSCP\/Docstash\/Download","SCOPE[18]":' \ - '"\/api\/events\/core\/v1\/product-role.assign","BLOCK_SESSION":"Y","CONSUMER_ID":' \ - '"ab2e715e-41c4-43d6-bff7-fc2d713hello","SCOPE[34]":"autopay\/payroll\/v1\/cafeteria-plan-' \ - 'configurations","SCOPE[46]":"communication\/notification-message-template.remove","MODULE":' \ - '"STEPUP_API","SCOPE[9]":"\/ISIJWSClientService","SCOPE[10]":"\/ISIJWSJobsService","SCOPE[22]":' \ - '"\/api\/person-account-registration","SCOPE[38]":"autopay\/payroll\/v1\/deposit-configurations",' \ - '"SUBJECT_ORGOID":"G2SY6MR3ATKA232T","SCOPE[5]":"\/Associate","SCOPE[14]":"\/Organization",' \ - '"SCOPE[26]":"WFNSvcsProvider\/payrollPi","EVENT_ID":"9ea87118-5679-5b0e-a67f-1abd8ccabcde",' \ - '"SCOPE[30]":"autopay\/events\/payroll\/v1\/earning-configuration.payroll-accumulators.modify",' \ - '"X-FORWARDED-PORT":"80","SCOPE[42]":"autopay\/payroll\/v1\/worker-employment-records","JTI":' \ - '"867b6d06-47cf-40ab-8dd7-bd0d57babcde","X-DOMAIN":"secure.api.es.abc.com","SOR_CODE":' \ - '"WFNPortal","SCOPE[29]":"autopay\/events\/payroll\/v1\/earning-configuration.configuration' \ - '-tags.modify","SCOPE[2]":"AVSSCP\/Docstash\/Get","OUTPUT_TYPE":"FAIL","ERR_MSG":"BLOCK_SESSION",' \ - '"TRANS_ID":"3AF-D30-7CTTCQ","SCOPE[45]":"communication\/notification-message-template.read",' \ - '"USE_HISTORY":"Y","SCHEME":"http","SCOPE[13]":"\/ISIJWSUsersService","SCOPE[21]":"\/api\/person",' \ - '"SCOPE[33]":"autopay\/events\/payroll\/v1\/worker-insurable-payments.modify","X-FORWARDED-FOR":' \ - '"8.8.8.8, 10.10.10.10, 1.2.3.4, 5.6.7.8","SCOPE[17]":"\/api\/core\/v1\/organization",' \ - '"SCOPE[25]":"\/step-up-user-authorization.initiate","SCOPE[6]":"\/Associate\/PIC","SCOPE[37]":' \ - '"autopay\/payroll\/v1\/cafeteria-plan-configurations\/{configurationItemID}\/' \ - 'payroll-item-configurations\/{payrollItemID}","FLOW_TYPE":"REST","SCOPE[41]":' \ - '"autopay\/payroll\/v1\/payroll-output","CONSUMERAPPOID":"WFNPortal","RESOURCE":' \ - '"\/events\/core\/v1\/step-up-user-authorization-request.evaluate","USER-AGENT":' \ - '"Apache-HttpClient\/4.5.5 (Java\/10.0.1)","SCOPE[3]":"AVSSCP\/Docstash\/List",' \ - '"SUB_CATEGORY_1":"worker.businessCommunication.email.change","TIME":"9","X-SCHEME":' \ - '"http","ADP-CONVERSATIONID":"stY46PpweABoT5JX04CZGCeBbX8=","SCOPE[12]":' \ - '"\/ISIJWSUserSecurityService","SCOPE[24]":"\/step-up-user-authorization-request.evaluate",' \ - '"SCOPE[32]":"autopay\/events\/payroll\/v1\/retro-pay-request.add","SCOPE[44]":' \ - '"communication\/notification-message-template.change","ACTION":"POST","SCOPE[7]":' \ - '"\/AssociateSoftPurge","SCOPE[16]":"\/api\/authentication","X-ORIGINAL-URI":' \ - '"\/smsstepup\/events\/core\/v1\/step-up-user-authorization-request.evaluate","SCOPE[28]":' \ - '"autopay\/events\/payroll\/v1\/earning-configuration.change","SCOPE[36]":' \ - '"autopay\/payroll\/v1\/cafeteria-plan-configurations\/{configurationItemID}\/payroll-item' \ - '-configurations","SESSION_ID":"f50be909-9e4f-408d-bf77-68499012bc35","SCOPE[20]":' \ - '"\/api\/events\/core\/v1\/user.provision","SUBJECT_AOID":"G370XX6XYCABCDE",' \ - '"X-ORIGINAL-FORWARDED-FOR":"1.1.1.1, 3.3.3.3, 4.4.4.4","SCOPE[40]":' \ - '"autopay\/payroll\/v1\/employer-details"}","TXID":"3AF-D30-ABCDEF","ADP-MessageID":' \ - '"a1d57ed2-1fe6-4800-be7a-26cd89bf686d","SESSIONID":"stY46PpweFToT5JX04CZGMeCvP8=","ORGOID":' \ - '"G2SY6MR3ATKA232T","AOID":"G2N2TJETBRAAXAAA","MSGID":"a1d57ed2-1fe6-0000-be7a-26cd89bf686d"}' +DICT_RAW_RESPONSE = ( + '"1528755951, url="https://test.url.com", search_name="NG_SIEM_UC25- High number of hits against ' + 'unknown website from same subnet", action="allowed", dest="bb.bbb.bb.bbb , cc.ccc.ccc.cc , ' + 'xx.xx.xxx.xx , yyy.yy.yyy.yy , zz.zzz.zz.zzz , aa.aa.aaa.aaa", distinct_hosts="5", ' + 'first_3_octets="1.1.1", first_time="06/11/18 17:34:07 , 06/11/18 17:37:55 , 06/11/18 17:41:28 , ' + '06/11/18 17:42:05 , 06/11/18 17:42:38", info_max_time="+Infinity", info_min_time="0.000", ' + 'src="xx.xx.xxx.xx , yyy.yy.yyy.yy , zz.zzz.zz.zzz , aa.aa.aaa.aaa", u_category="unknown", ' + 'user="xyz\\a1234 , xyz\\b5678 , xyz\\c91011 , xyz\\d121314 , unknown", website="2.2.2.2""' +) + +DICT_RAW_RESPONSE_WITH_MESSAGE_ID = ( + '"1528755951, message-id="1", url="https://test.url.com", ' + 'search_name="NG_SIEM_UC25- High number of hits against ' + 'unknown website from same subnet", action="allowed", dest="bb.bbb.bb.bbb , ' + 'cc.ccc.ccc.cc , xx.xx.xxx.xx , yyy.yy.yyy.yy , zz.zzz.zz.zzz , aa.aa.aaa.aaa", ' + 'distinct_hosts="5", ' + 'first_3_octets="1.1.1", first_time="06/11/18 17:34:07 , ' + "06/11/18 17:37:55 , 06/11/18 17:41:28 , " + '06/11/18 17:42:05 , 06/11/18 17:42:38", info_max_time="+Infinity", info_min_time="0.000", ' + 'src="xx.xx.xxx.xx , yyy.yy.yyy.yy , zz.zzz.zz.zzz , aa.aa.aaa.aaa", u_category="unknown", ' + 'user="xyz\\a1234 , xyz\\b5678 , xyz\\c91011 , xyz\\d121314 , unknown", website="2.2.2.2""' +) + +LIST_RAW = ( + "Feb 13 09:02:55 1,2020/02/13 09:02:55,001606001116,THREAT,url," + "1,2020/02/13 09:02:55,10.1.1.1,1.2.3.4,0.0.0.0,0.0.0.0,rule1,jordy,,web-browsing,vsys1,trust,untrust," + "ethernet1/2,ethernet1/1,forwardAll,2020/02/13 09:02:55,59460,1,62889,80,0,0,0x208000,tcp,alert," + '"ushship.com/xed/config.bin",(9999),not-resolved,informational,client-to-server,' + "0,0x0,1.1.22.22-5.6.7.8,United States,0,text/html" +) + +RAW_WITH_MESSAGE = ( + '{"@timestamp":"2019-10-15T13:30:08.578-04:00","message":"{"TimeStamp":"2019-10-15 13:30:08",' + '"CATEGORY_1":"CONTACT","ASSOCIATEOID":"G2N2TJETBRAAX68V","HOST":' + '"step-up-authentication-api.gslb.es.oneadp.com","SCOPE[4]":"PiSvcsProvider\/payroll","SCOPE[19]":' + '"\/api\/events\/core\/v1\/user-status","CONTEXT":"\/smsstepup","FLOW":"API","X-REAL-IP":' + '"2.2.2.2","PRODUCT_CODE":"WFNPortal","X-FORWARDED-PROTO":"http","ERROR_ID":"4008",' + '"SCOPE[23]":"\/security\/notification-communication-response-value.accept","REQ_URL":' + '"http:\/\/step-up-authentication-api.gslb.es.blabla.com\/smsstepup\/events\/core\/v1\/step-up-' + 'user-authorization-request.evaluate","SCOPE[35]":"autopay\/payroll\/v1\/cafeteria-plan-' + 'configurations\/{configurationItemID}","SCOPE_MATCHED":"Y","SCOPE[43]":"communication\/n' + 'otification-message-template.add","SCOPE[11]":"\/ISIJWSUserSecurity","SCOPE[27]":"autopay\/events' + '\/payroll\/v1\/earning-configuration.add","ORGOID":"G2SY6MR3ATKA232T","SCOPE[8]":"\/' + 'ISIJWSAssociatesService","SCOPE[39]":"autopay\/payroll\/v1\/earning-configurations",' + '"SETUP_SELF":"N","SCOPE[47]":"communication\/notification.publish","SCOPE[15]":"' + '\/OrganizationSoftPurge","X-FORWARDED-HOST":"step-up-authentication-api.gslb.es.blabla.com",' + '"ADP-MESSAGEID":"a1d57ed2-1fe6-4800-be7a-26cd89bhello","CNAME":"JRJG INC","CONTENT-LENGTH":' + '"584","SCOPE[31]":"autopay\/events\/payroll\/v1\/earning-configuration.remove","CID":"BSTAR00044"' + ',"ACTOR_UID":"ABinters@BSTAR00044","SECURE_API_MODE":"HTTPS_SECURE","X-REQUEST-ID":' + '"2473a981bef27bc8444e510adc12234a","SCOPE[1]":"AVSSCP\/Docstash\/Download","SCOPE[18]":' + '"\/api\/events\/core\/v1\/product-role.assign","BLOCK_SESSION":"Y","CONSUMER_ID":' + '"ab2e715e-41c4-43d6-bff7-fc2d713hello","SCOPE[34]":"autopay\/payroll\/v1\/cafeteria-plan-' + 'configurations","SCOPE[46]":"communication\/notification-message-template.remove","MODULE":' + '"STEPUP_API","SCOPE[9]":"\/ISIJWSClientService","SCOPE[10]":"\/ISIJWSJobsService","SCOPE[22]":' + '"\/api\/person-account-registration","SCOPE[38]":"autopay\/payroll\/v1\/deposit-configurations",' + '"SUBJECT_ORGOID":"G2SY6MR3ATKA232T","SCOPE[5]":"\/Associate","SCOPE[14]":"\/Organization",' + '"SCOPE[26]":"WFNSvcsProvider\/payrollPi","EVENT_ID":"9ea87118-5679-5b0e-a67f-1abd8ccabcde",' + '"SCOPE[30]":"autopay\/events\/payroll\/v1\/earning-configuration.payroll-accumulators.modify",' + '"X-FORWARDED-PORT":"80","SCOPE[42]":"autopay\/payroll\/v1\/worker-employment-records","JTI":' + '"867b6d06-47cf-40ab-8dd7-bd0d57babcde","X-DOMAIN":"secure.api.es.abc.com","SOR_CODE":' + '"WFNPortal","SCOPE[29]":"autopay\/events\/payroll\/v1\/earning-configuration.configuration' + '-tags.modify","SCOPE[2]":"AVSSCP\/Docstash\/Get","OUTPUT_TYPE":"FAIL","ERR_MSG":"BLOCK_SESSION",' + '"TRANS_ID":"3AF-D30-7CTTCQ","SCOPE[45]":"communication\/notification-message-template.read",' + '"USE_HISTORY":"Y","SCHEME":"http","SCOPE[13]":"\/ISIJWSUsersService","SCOPE[21]":"\/api\/person",' + '"SCOPE[33]":"autopay\/events\/payroll\/v1\/worker-insurable-payments.modify","X-FORWARDED-FOR":' + '"8.8.8.8, 10.10.10.10, 1.2.3.4, 5.6.7.8","SCOPE[17]":"\/api\/core\/v1\/organization",' + '"SCOPE[25]":"\/step-up-user-authorization.initiate","SCOPE[6]":"\/Associate\/PIC","SCOPE[37]":' + '"autopay\/payroll\/v1\/cafeteria-plan-configurations\/{configurationItemID}\/' + 'payroll-item-configurations\/{payrollItemID}","FLOW_TYPE":"REST","SCOPE[41]":' + '"autopay\/payroll\/v1\/payroll-output","CONSUMERAPPOID":"WFNPortal","RESOURCE":' + '"\/events\/core\/v1\/step-up-user-authorization-request.evaluate","USER-AGENT":' + '"Apache-HttpClient\/4.5.5 (Java\/10.0.1)","SCOPE[3]":"AVSSCP\/Docstash\/List",' + '"SUB_CATEGORY_1":"worker.businessCommunication.email.change","TIME":"9","X-SCHEME":' + '"http","ADP-CONVERSATIONID":"stY46PpweABoT5JX04CZGCeBbX8=","SCOPE[12]":' + '"\/ISIJWSUserSecurityService","SCOPE[24]":"\/step-up-user-authorization-request.evaluate",' + '"SCOPE[32]":"autopay\/events\/payroll\/v1\/retro-pay-request.add","SCOPE[44]":' + '"communication\/notification-message-template.change","ACTION":"POST","SCOPE[7]":' + '"\/AssociateSoftPurge","SCOPE[16]":"\/api\/authentication","X-ORIGINAL-URI":' + '"\/smsstepup\/events\/core\/v1\/step-up-user-authorization-request.evaluate","SCOPE[28]":' + '"autopay\/events\/payroll\/v1\/earning-configuration.change","SCOPE[36]":' + '"autopay\/payroll\/v1\/cafeteria-plan-configurations\/{configurationItemID}\/payroll-item' + '-configurations","SESSION_ID":"f50be909-9e4f-408d-bf77-68499012bc35","SCOPE[20]":' + '"\/api\/events\/core\/v1\/user.provision","SUBJECT_AOID":"G370XX6XYCABCDE",' + '"X-ORIGINAL-FORWARDED-FOR":"1.1.1.1, 3.3.3.3, 4.4.4.4","SCOPE[40]":' + '"autopay\/payroll\/v1\/employer-details"}","TXID":"3AF-D30-ABCDEF","ADP-MessageID":' + '"a1d57ed2-1fe6-4800-be7a-26cd89bf686d","SESSIONID":"stY46PpweFToT5JX04CZGMeCvP8=","ORGOID":' + '"G2SY6MR3ATKA232T","AOID":"G2N2TJETBRAAXAAA","MSGID":"a1d57ed2-1fe6-0000-be7a-26cd89bf686d"}' +) SAMPLE_RESPONSE = [ results.Message("INFO-TEST", "test message"), @@ -249,49 +254,51 @@ EXPECTED = { "action": "allowed", "dest": "bb.bbb.bb.bbb , cc.ccc.ccc.cc , xx.xx.xxx.xx , yyy.yy.yyy.yy , zz.zzz.zz.zzz , aa.aa.aaa.aaa", - "distinct_hosts": '5', + "distinct_hosts": "5", "first_3_octets": "1.1.1", "first_time": "06/11/18 17:34:07 , 06/11/18 17:37:55 , 06/11/18 17:41:28 , 06/11/18 17:42:05 , 06/11/18 17:42:38", "info_max_time": "+Infinity", - "info_min_time": '0.000', + "info_min_time": "0.000", "search_name": "NG_SIEM_UC25- High number of hits against unknown website from same subnet", "src": "xx.xx.xxx.xx , yyy.yy.yyy.yy , zz.zzz.zz.zzz , aa.aa.aaa.aaa", "u_category": "unknown", "user": "xyz\\a1234 , xyz\\b5678 , xyz\\c91011 , xyz\\d121314 , unknown", "website": "2.2.2.2", - "url": "https://test.url.com" + "url": "https://test.url.com", } EXPECTED_WITH_MESSAGE_ID = { "message-id": "1", "action": "allowed", "dest": "bb.bbb.bb.bbb , cc.ccc.ccc.cc , xx.xx.xxx.xx , yyy.yy.yyy.yy , zz.zzz.zz.zzz , aa.aa.aaa.aaa", - "distinct_hosts": '5', + "distinct_hosts": "5", "first_3_octets": "1.1.1", "first_time": "06/11/18 17:34:07 , 06/11/18 17:37:55 , 06/11/18 17:41:28 , 06/11/18 17:42:05 , 06/11/18 17:42:38", "info_max_time": "+Infinity", - "info_min_time": '0.000', + "info_min_time": "0.000", "search_name": "NG_SIEM_UC25- High number of hits against unknown website from same subnet", "src": "xx.xx.xxx.xx , yyy.yy.yyy.yy , zz.zzz.zz.zzz , aa.aa.aaa.aaa", "u_category": "unknown", "user": "xyz\\a1234 , xyz\\b5678 , xyz\\c91011 , xyz\\d121314 , unknown", "website": "2.2.2.2", - "url": "https://test.url.com" + "url": "https://test.url.com", } URL_TESTING_IN = '"url="https://test.com?key=val"' -URL_TESTING_OUT = {'url': 'https://test.com?key=val'} +URL_TESTING_OUT = {"url": "https://test.com?key=val"} # checking a case where the last character for each value was cut -RESPONSE = 'NAS-IP-Address=2.2.2.2, NAS-Port=50222, NAS-Identifier=de-wilm-251littl-idf3b-s2, NAS-Port-Type=' \ - 'Ethernet, NAS-Port-Id=GigabitEthernet2/0/05' +RESPONSE = ( + "NAS-IP-Address=2.2.2.2, NAS-Port=50222, NAS-Identifier=de-wilm-251littl-idf3b-s2, NAS-Port-Type=" + "Ethernet, NAS-Port-Id=GigabitEthernet2/0/05" +) POSITIVE = { "NAS-IP-Address": "2.2.2.2", "NAS-Identifier": "de-wilm-251littl-idf3b-s2", "NAS-Port": "50222", "NAS-Port-Id": "GigabitEthernet2/0/05", - "NAS-Port-Type": "Ethernet" + "NAS-Port-Type": "Ethernet", } # testing the ValueError and json sections @@ -303,15 +310,15 @@ class Jobs: def __init__(self, status, service): self.oneshot = lambda x, **kwargs: x - state = namedtuple('state', 'content') - self.state = state(content={'dispatchState': str(status)}) + state = namedtuple("state", "content") + self.state = state(content={"dispatchState": str(status)}) self.service = service def __getitem__(self, arg): return 0 def create(self, query, **kwargs): - job = client.Job(sid='123456', service=self.service, **kwargs) + job = client.Job(sid="123456", service=self.service, **kwargs) job.resultCount = 0 job._state = self.state return job @@ -322,11 +329,11 @@ def __init__(self, status): self.jobs = Jobs(status, self) self.status = status self.disable_v2_api = False - self.namespace = {'app': 'test', 'owner': 'test', 'sharing': 'global'} + self.namespace = {"app": "test", "owner": "test", "sharing": "global"} self._abspath = lambda x, **kwargs: x def get(self, path_segment, owner=None, app=None, headers=None, sharing=None, **query): - return {'status': '200', 'body': 'test', 'headers': {'content-type': 'application/json'}, 'reason': 'OK'} + return {"status": "200", "body": "test", "headers": {"content-type": "application/json"}, "reason": "OK"} def job(self, sid): return self.jobs @@ -338,15 +345,14 @@ def test_raw_to_dict(): response_with_message = splunk.rawToDict(DICT_RAW_RESPONSE_WITH_MESSAGE_ID) list_response = splunk.rawToDict(LIST_RAW) raw_message = splunk.rawToDict(RAW_WITH_MESSAGE) - empty = splunk.rawToDict('') + empty = splunk.rawToDict("") url_test = splunk.rawToDict(URL_TESTING_IN) character_check = splunk.rawToDict(RESPONSE) assert response == EXPECTED assert response_with_message == EXPECTED_WITH_MESSAGE_ID assert list_response == {} - assert raw_message.get('SCOPE[29]') == 'autopay\/events\/payroll\/v1\/earning-configuration.configuration-tags' \ - '.modify' + assert raw_message.get("SCOPE[29]") == "autopay\/events\/payroll\/v1\/earning-configuration.configuration-tags" ".modify" assert isinstance(raw_message, dict) assert empty == {} assert url_test == URL_TESTING_OUT @@ -354,83 +360,84 @@ def test_raw_to_dict(): assert splunk.rawToDict(RAW_JSON) == RAW_JSON_AND_STANDARD_OUTPUT assert splunk.rawToDict(RAW_STANDARD) == RAW_JSON_AND_STANDARD_OUTPUT - assert splunk.rawToDict('drilldown_search="key IN ("test1","test2")') == { - 'drilldown_search': 'key IN (test1,test2)'} - assert splunk.rawToDict('123456, sample_account="sample1", ' - 'sample_account="sample2", sample_account="sample3",' - ' distinct_count_ac="5"') == {'sample_account': 'sample1, sample2, sample3', - 'distinct_count_ac': '5'} - - -@pytest.mark.parametrize('text, output', [ - ('', ['']), - ('"",', ['"",']), - # a value shouldn't do anything special - ('woopwoop', ['woopwoop']), - # a normal key value without quotes - ('abc=123', ['abc="123"']), - # add a comma at the end - ('abc=123,', ['abc="123"']), - # a normal key value with quotes - ('cbd="123"', ['cbd="123"']), - # check all wrapped with quotes removed - ('"abc="123""', ['abc="123"']), - # we need to remove 111 at the start. - ('111, cbd="123"', ['cbd="123"']), - # Testing with/without quotes and/or spaces: - ('abc=123,cbd=123', ['abc="123"', 'cbd="123"']), - ('abc=123,cbd="123"', ['abc="123"', 'cbd="123"']), - ('abc="123",cbd=123', ['abc="123"', 'cbd="123"']), - ('abc="123",cbd="123"', ['abc="123"', 'cbd="123"']), - ('abc=123, cbd=123', ['abc="123"', 'cbd="123"']), - ('abc=123, cbd="123"', ['abc="123"', 'cbd="123"']), - ('cbd="123", abc=123', ['abc="123"', 'cbd="123"']), - ('cbd="123",abc=123', ['abc="123"', 'cbd="123"']), - # Continue testing quotes with more values: - ('xyz=321,cbd=123,abc=123', ['xyz="321"', 'abc="123"', 'cbd="123"']), - ('xyz=321,cbd="123",abc=123', ['xyz="321"', 'abc="123"', 'cbd="123"']), - ('xyz="321",cbd="123",abc=123', ['xyz="321"', 'abc="123"', 'cbd="123"']), - ('xyz="321",cbd="123",abc="123"', ['xyz="321"', 'abc="123"', 'cbd="123"']), - # Testing nested quotes (the main reason for quote_group): - # Try to remove the start 111. - ('111, cbd="a="123""', ['cbd="a="123""']), - ('cbd="a="123""', ['cbd="a="123""']), - ('cbd="a="123", b=321"', ['cbd="a="123", b="321""']), - ('cbd="a=123, b=321"', ['cbd="a="123", b="321""']), - ('cbd="a=123, b="321""', ['cbd="a="123", b="321""']), - ('cbd="a="123", b="321""', ['cbd="a="123", b="321""']), - ('cbd="a=123, b=321"', ['cbd="a="123", b="321""']), - ('xyz=123, cbd="a="123", b=321"', ['xyz="123"', 'cbd="a="123", b="321""']), - ('xyz="123", cbd="a="123", b="321""', ['xyz="123"', 'cbd="a="123", b="321""']), - ('xyz="123", cbd="a="123", b="321"", qqq=2', ['xyz="123"', 'cbd="a="123", b="321""', 'qqq="2"']), - ('xyz="123", cbd="a="123", b="321"", qqq="2"', ['xyz="123"', 'cbd="a="123", b="321""', 'qqq="2"']), -]) + assert splunk.rawToDict('drilldown_search="key IN ("test1","test2")') == {"drilldown_search": "key IN (test1,test2)"} + assert splunk.rawToDict( + '123456, sample_account="sample1", ' 'sample_account="sample2", sample_account="sample3",' ' distinct_count_ac="5"' + ) == {"sample_account": "sample1, sample2, sample3", "distinct_count_ac": "5"} + + +@pytest.mark.parametrize( + "text, output", + [ + ("", [""]), + ('"",', ['"",']), + # a value shouldn't do anything special + ("woopwoop", ["woopwoop"]), + # a normal key value without quotes + ("abc=123", ['abc="123"']), + # add a comma at the end + ("abc=123,", ['abc="123"']), + # a normal key value with quotes + ('cbd="123"', ['cbd="123"']), + # check all wrapped with quotes removed + ('"abc="123""', ['abc="123"']), + # we need to remove 111 at the start. + ('111, cbd="123"', ['cbd="123"']), + # Testing with/without quotes and/or spaces: + ("abc=123,cbd=123", ['abc="123"', 'cbd="123"']), + ('abc=123,cbd="123"', ['abc="123"', 'cbd="123"']), + ('abc="123",cbd=123', ['abc="123"', 'cbd="123"']), + ('abc="123",cbd="123"', ['abc="123"', 'cbd="123"']), + ("abc=123, cbd=123", ['abc="123"', 'cbd="123"']), + ('abc=123, cbd="123"', ['abc="123"', 'cbd="123"']), + ('cbd="123", abc=123', ['abc="123"', 'cbd="123"']), + ('cbd="123",abc=123', ['abc="123"', 'cbd="123"']), + # Continue testing quotes with more values: + ("xyz=321,cbd=123,abc=123", ['xyz="321"', 'abc="123"', 'cbd="123"']), + ('xyz=321,cbd="123",abc=123', ['xyz="321"', 'abc="123"', 'cbd="123"']), + ('xyz="321",cbd="123",abc=123', ['xyz="321"', 'abc="123"', 'cbd="123"']), + ('xyz="321",cbd="123",abc="123"', ['xyz="321"', 'abc="123"', 'cbd="123"']), + # Testing nested quotes (the main reason for quote_group): + # Try to remove the start 111. + ('111, cbd="a="123""', ['cbd="a="123""']), + ('cbd="a="123""', ['cbd="a="123""']), + ('cbd="a="123", b=321"', ['cbd="a="123", b="321""']), + ('cbd="a=123, b=321"', ['cbd="a="123", b="321""']), + ('cbd="a=123, b="321""', ['cbd="a="123", b="321""']), + ('cbd="a="123", b="321""', ['cbd="a="123", b="321""']), + ('cbd="a=123, b=321"', ['cbd="a="123", b="321""']), + ('xyz=123, cbd="a="123", b=321"', ['xyz="123"', 'cbd="a="123", b="321""']), + ('xyz="123", cbd="a="123", b="321""', ['xyz="123"', 'cbd="a="123", b="321""']), + ('xyz="123", cbd="a="123", b="321"", qqq=2', ['xyz="123"', 'cbd="a="123", b="321""', 'qqq="2"']), + ('xyz="123", cbd="a="123", b="321"", qqq="2"', ['xyz="123"', 'cbd="a="123", b="321""', 'qqq="2"']), + ], +) def test_quote_group(text, output): assert sorted(splunk.quote_group(text)) == sorted(output) data_test_replace_keys = [ ({}, {}), - ({'test': 'test'}, {'test': 'test'}), - ({'test.': 'test.'}, {'test_': 'test.'}), - ({'te.st': 'te.st'}, {'te_st': 'te.st'}), - ({'te[st': 'te[st'}, {'te_st': 'te[st'}), - ({'te]st': 'te]st'}, {'te_st': 'te]st'}), - ({'te)st': 'te)st'}, {'te_st': 'te)st'}), - ({'te(st': 'te(st'}, {'te_st': 'te(st'}), - ('', ''), - (None, None) + ({"test": "test"}, {"test": "test"}), + ({"test.": "test."}, {"test_": "test."}), + ({"te.st": "te.st"}, {"te_st": "te.st"}), + ({"te[st": "te[st"}, {"te_st": "te[st"}), + ({"te]st": "te]st"}, {"te_st": "te]st"}), + ({"te)st": "te)st"}, {"te_st": "te)st"}), + ({"te(st": "te(st"}, {"te_st": "te(st"}), + ("", ""), + (None, None), ] -@pytest.mark.parametrize('dict_in, dict_out', data_test_replace_keys) +@pytest.mark.parametrize("dict_in, dict_out", data_test_replace_keys) def test_replace_keys(dict_in, dict_out): out = splunk.replace_keys(deepcopy(dict_in)) - assert out == dict_out, f'replace_keys({dict_in}) got: {out} instead: {dict_out}' + assert out == dict_out, f"replace_keys({dict_in}) got: {out} instead: {dict_out}" def test_parse_time_to_minutes_no_error(): - splunk.FETCH_TIME = '3 hours' + splunk.FETCH_TIME = "3 hours" res = splunk.parse_time_to_minutes() assert res == 180 @@ -438,11 +445,13 @@ def test_parse_time_to_minutes_no_error(): def test_parse_time_to_minutes_invalid_time_integer(mocker): return_error_mock = mocker.patch(RETURN_ERROR_TARGET) - splunk.FETCH_TIME = 'abc hours' + splunk.FETCH_TIME = "abc hours" splunk.parse_time_to_minutes() err_msg = return_error_mock.call_args[0][0] - assert err_msg == "Error: Invalid fetch time, need to be a positive integer with the time unit afterwards " \ - "e.g '2 months, 4 days'." + assert ( + err_msg == "Error: Invalid fetch time, need to be a positive integer with the time unit afterwards " + "e.g '2 months, 4 days'." + ) def test_splunk_submit_event_hec_command(mocker): @@ -466,7 +475,7 @@ def check_request_channel(args: dict): args: A dict of args. Returns: A MockResRequestChannel with the correct text value. """ - if args.get('request_channel'): + if args.get("request_channel"): return MockResRequestChannel('{"text":"Success","code":0,"ackId":1}') else: return MockResRequestChannel('{"text":"Data channel is missing","code":10}') @@ -488,9 +497,8 @@ def test_splunk_submit_event_hec_command_request_channel(mocker): """ args = {"request_channel": "11111111-1111-1111-1111-111111111111", "entry_id": "some_entry"} mocker.patch.object(splunk, "splunk_submit_event_hec", return_value=check_request_channel(args)) - moc = mocker.patch.object(demisto, 'results') - splunk.splunk_submit_event_hec_command(params={"hec_url": "mock_url"}, - args=args, service=Service) + moc = mocker.patch.object(demisto, "results") + splunk.splunk_submit_event_hec_command(params={"hec_url": "mock_url"}, args=args, service=Service) readable_output = moc.call_args[0][0] assert readable_output == "The events were sent successfully to Splunk. AckID: 1" @@ -508,8 +516,7 @@ def test_splunk_submit_event_hec_command_without_request_channel(mocker): mocker.patch.object(splunk, "splunk_submit_event_hec", return_value=check_request_channel(args)) return_error_mock = mocker.patch(RETURN_ERROR_TARGET) - splunk.splunk_submit_event_hec_command(params={"hec_url": "mock_url"}, - args=args, service=Service) + splunk.splunk_submit_event_hec_command(params={"hec_url": "mock_url"}, args=args, service=Service) err_msg = return_error_mock.call_args[0][0] assert err_msg == 'Could not send event to Splunk {"text":"Data channel is missing","code":10}' @@ -517,86 +524,60 @@ def test_splunk_submit_event_hec_command_without_request_channel(mocker): def test_parse_time_to_minutes_invalid_time_unit(mocker): return_error_mock = mocker.patch(RETURN_ERROR_TARGET) - splunk.FETCH_TIME = '3 hoursss' + splunk.FETCH_TIME = "3 hoursss" splunk.parse_time_to_minutes() err_msg = return_error_mock.call_args[0][0] - assert err_msg == 'Error: Invalid time unit.' + assert err_msg == "Error: Invalid time unit." SEARCH_RESULT = [ - { - "But": { - "This": "is" - }, - "Very": "Unique" - }, - { - "Something": "regular", - "But": { - "This": "is" - }, - "Very": "Unique" - }, - { - "Something": "natural", - "But": { - "This": "is a very very" - }, - "Very": "Unique and awesome" - } -] -REGULAR_ALL_CHOSEN_FIELDS = [ - "Something", - "But", - "Very" -] -REGULAR_CHOSEN_FIELDS_SUBSET = [ - "Something", - "Very" -] -REGEX_CHOSEN_FIELDS_SUBSET = [ - "Some*", - "Very" + {"But": {"This": "is"}, "Very": "Unique"}, + {"Something": "regular", "But": {"This": "is"}, "Very": "Unique"}, + {"Something": "natural", "But": {"This": "is a very very"}, "Very": "Unique and awesome"}, ] +REGULAR_ALL_CHOSEN_FIELDS = ["Something", "But", "Very"] +REGULAR_CHOSEN_FIELDS_SUBSET = ["Something", "Very"] +REGEX_CHOSEN_FIELDS_SUBSET = ["Some*", "Very"] -NON_EXISTING_FIELDS = [ - "SDFAFSD", - "ASBLFKDJK" -] +NON_EXISTING_FIELDS = ["SDFAFSD", "ASBLFKDJK"] -@pytest.mark.parametrize('search_result, chosen_fields, expected_result', [ - (SEARCH_RESULT, REGULAR_ALL_CHOSEN_FIELDS, REGULAR_ALL_CHOSEN_FIELDS), - (SEARCH_RESULT, REGULAR_CHOSEN_FIELDS_SUBSET, REGULAR_CHOSEN_FIELDS_SUBSET), - (SEARCH_RESULT, REGEX_CHOSEN_FIELDS_SUBSET, REGULAR_CHOSEN_FIELDS_SUBSET), - (SEARCH_RESULT, NON_EXISTING_FIELDS, []), -]) +@pytest.mark.parametrize( + "search_result, chosen_fields, expected_result", + [ + (SEARCH_RESULT, REGULAR_ALL_CHOSEN_FIELDS, REGULAR_ALL_CHOSEN_FIELDS), + (SEARCH_RESULT, REGULAR_CHOSEN_FIELDS_SUBSET, REGULAR_CHOSEN_FIELDS_SUBSET), + (SEARCH_RESULT, REGEX_CHOSEN_FIELDS_SUBSET, REGULAR_CHOSEN_FIELDS_SUBSET), + (SEARCH_RESULT, NON_EXISTING_FIELDS, []), + ], +) def test_commands(search_result, chosen_fields, expected_result): from SplunkPy import update_headers_from_field_names + headers = update_headers_from_field_names(search_result, chosen_fields) assert expected_result == headers -APPS = ['app'] -STORES = ['store'] +APPS = ["app"] +STORES = ["store"] EMPTY_CASE = {} STORE_WITHOUT_APP = {"kv_store_collection_name": "test"} -JUST_APP_NAME = {'app_name': 'app'} # happens in splunk-kv-store-collections-list command -CREATE_COMMAND = {'app_name': 'app', 'kv_store_name': 'not_store'} -CORRECT = {'app_name': 'app', 'kv_store_collection_name': 'store'} -INCORRECT_STORE_NAME = {'app_name': 'app', 'kv_store_collection_name': 'not_store'} +JUST_APP_NAME = {"app_name": "app"} # happens in splunk-kv-store-collections-list command +CREATE_COMMAND = {"app_name": "app", "kv_store_name": "not_store"} +CORRECT = {"app_name": "app", "kv_store_collection_name": "store"} +INCORRECT_STORE_NAME = {"app_name": "app", "kv_store_collection_name": "not_store"} data_test_check_error = [ - (EMPTY_CASE, 'app not found'), - (STORE_WITHOUT_APP, 'app not found'), - (JUST_APP_NAME, 'empty'), - (CREATE_COMMAND, 'empty'), - (CORRECT, 'empty'), - (INCORRECT_STORE_NAME, 'KV Store not found'), + (EMPTY_CASE, "app not found"), + (STORE_WITHOUT_APP, "app not found"), + (JUST_APP_NAME, "empty"), + (CREATE_COMMAND, "empty"), + (CORRECT, "empty"), + (INCORRECT_STORE_NAME, "KV Store not found"), ] -@pytest.mark.parametrize('args, out_error', data_test_check_error) +@pytest.mark.parametrize("args, out_error", data_test_check_error) def test_check_error(args, out_error): class Service: def __init__(self): @@ -605,12 +586,10 @@ def __init__(self): try: splunk.check_error(Service(), args) - raise splunk.DemistoException('empty') + raise splunk.DemistoException("empty") except splunk.DemistoException as error: output = str(error) - assert ( - output == out_error - ), f'check_error(service, {args})\n\treturns: {output}\n\tinstead: {out_error}' + assert output == out_error, f"check_error(service, {args})\n\treturns: {output}\n\tinstead: {out_error}" EMPTY_CASE = {} @@ -618,28 +597,26 @@ def __init__(self): WITH_ALL_PARAMS = {"key": "demisto", "value": "is awesome", "limit": 1, "query": "test"} STANDARD_KEY_VAL = {"key": "demisto", "value": "is awesome"} KEY_AND_LIMIT = {"key": "key", "limit": 1} -KEY_AND_QUERY = {"key": "key", "query": 'test_query'} -QUERY = {"query": 'test_query'} -QUERY_AND_VALUE = {"query": 'test_query', "value": "awesome"} +KEY_AND_QUERY = {"key": "key", "query": "test_query"} +QUERY = {"query": "test_query"} +QUERY_AND_VALUE = {"query": "test_query", "value": "awesome"} data_test_build_kv_store_query = [ (EMPTY_CASE, str(EMPTY_CASE)), (JUST_KEY, str(EMPTY_CASE)), (STANDARD_KEY_VAL, '{"demisto": "is awesome"}'), (WITH_ALL_PARAMS, '{"demisto": "is awesome"}'), (KEY_AND_LIMIT, {"limit": 1}), - (KEY_AND_QUERY, 'test_query'), - (QUERY, 'test_query'), - (QUERY_AND_VALUE, 'test_query'), + (KEY_AND_QUERY, "test_query"), + (QUERY, "test_query"), + (QUERY_AND_VALUE, "test_query"), ] -@pytest.mark.parametrize('args, expected_query', data_test_build_kv_store_query) +@pytest.mark.parametrize("args, expected_query", data_test_build_kv_store_query) def test_build_kv_store_query(args, expected_query, mocker): - mocker.patch('SplunkPy.get_key_type', return_value=None) + mocker.patch("SplunkPy.get_key_type", return_value=None) output = splunk.build_kv_store_query(None, args) - assert ( - output == expected_query - ), f'build_kv_store_query({args})\n\treturns: {output}\n\tinstead: {expected_query}' + assert output == expected_query, f"build_kv_store_query({args})\n\treturns: {output}\n\tinstead: {expected_query}" data_test_build_kv_store_query_with_key_val = [ @@ -649,46 +626,46 @@ def test_build_kv_store_query(args, expected_query, mocker): ] -@pytest.mark.parametrize('args, _type, expected_query', data_test_build_kv_store_query_with_key_val) +@pytest.mark.parametrize("args, _type, expected_query", data_test_build_kv_store_query_with_key_val) def test_build_kv_store_query_with_key_val(args, _type, expected_query, mocker): - mocker.patch('SplunkPy.get_key_type', return_value=_type) + mocker.patch("SplunkPy.get_key_type", return_value=_type) output = splunk.build_kv_store_query(None, args) - assert output == expected_query, f'build_kv_store_query({args})\n\treturns: {output}\n\tinstead: {expected_query}' + assert output == expected_query, f"build_kv_store_query({args})\n\treturns: {output}\n\tinstead: {expected_query}" test_test_get_key_type = [ - ({'field.key': 'number'}, float), - ({'field.key': 'string'}, str), - ({'field.key': 'cidr'}, str), - ({'field.key': 'boolean'}, bool), - ({'field.key': 'empty'}, None), - ({'field.key': 'time'}, str), + ({"field.key": "number"}, float), + ({"field.key": "string"}, str), + ({"field.key": "cidr"}, str), + ({"field.key": "boolean"}, bool), + ({"field.key": "empty"}, None), + ({"field.key": "time"}, str), ] - @pytest.mark.parametrize('keys_and_types, expected_type', test_test_get_key_type) + @pytest.mark.parametrize("keys_and_types, expected_type", test_test_get_key_type) def test_get_key_type(keys_and_types, expected_type, mocker): - mocker.patch('SplunkPy.get_keys_and_types', return_value=keys_and_types) + mocker.patch("SplunkPy.get_keys_and_types", return_value=keys_and_types) - output = splunk.get_key_type(None, 'key') - assert output == expected_type, f'get_key_type(kv_store, key)\n\treturns: {output}\n\tinstead: {expected_type}' + output = splunk.get_key_type(None, "key") + assert output == expected_type, f"get_key_type(kv_store, key)\n\treturns: {output}\n\tinstead: {expected_type}" EMPTY_CASE = {} -WITHOUT_FIELD = {'empty': 'number'} -STRING_FIELD = {'field.test': 'string'} -NUMBER_FIELD = {'field.test': 'number'} -INDEX = {'index.test': 'string'} -MIXED = {'field.test': 'string', 'empty': 'field'} +WITHOUT_FIELD = {"empty": "number"} +STRING_FIELD = {"field.test": "string"} +NUMBER_FIELD = {"field.test": "number"} +INDEX = {"index.test": "string"} +MIXED = {"field.test": "string", "empty": "field"} data_test_get_keys_and_types = [ (EMPTY_CASE, EMPTY_CASE), (WITHOUT_FIELD, EMPTY_CASE), - (STRING_FIELD, {'field.test': 'string'}), - (NUMBER_FIELD, {'field.test': 'number'}), - (INDEX, {'index.test': 'string'}), - (MIXED, {'field.test': 'string'}), + (STRING_FIELD, {"field.test": "string"}), + (NUMBER_FIELD, {"field.test": "number"}), + (INDEX, {"index.test": "string"}), + (MIXED, {"field.test": "string"}), ] -@pytest.mark.parametrize('raw_keys, expected_keys', data_test_get_keys_and_types) +@pytest.mark.parametrize("raw_keys, expected_keys", data_test_get_keys_and_types) def test_get_keys_and_types(raw_keys, expected_keys): class KVMock: def __init__(self): @@ -698,38 +675,31 @@ def content(self): return raw_keys output = splunk.get_keys_and_types(KVMock()) - assert ( - output == expected_keys - ), f'get_keys_and_types(kv_store)\n\treturns: {output}\n\tinstead: {expected_keys}' + assert output == expected_keys, f"get_keys_and_types(kv_store)\n\treturns: {output}\n\tinstead: {expected_keys}" -START_OUTPUT = ( - '#### configuration for name store\n| field name | type |\n| --- | --- |' -) -EMPTY_OUTPUT = '' -STANDARD_CASE = {'field.test': 'number'} -STANDARD_OUTPUT = '\n| field.test | number |' -data_test_get_kv_store_config = [ - ({}, EMPTY_OUTPUT), - (STANDARD_CASE, STANDARD_OUTPUT) -] +START_OUTPUT = "#### configuration for name store\n| field name | type |\n| --- | --- |" +EMPTY_OUTPUT = "" +STANDARD_CASE = {"field.test": "number"} +STANDARD_OUTPUT = "\n| field.test | number |" +data_test_get_kv_store_config = [({}, EMPTY_OUTPUT), (STANDARD_CASE, STANDARD_OUTPUT)] -@pytest.mark.parametrize('fields, expected_output', data_test_get_kv_store_config) +@pytest.mark.parametrize("fields, expected_output", data_test_get_kv_store_config) def test_get_kv_store_config(fields, expected_output, mocker): class Name: def __init__(self): - self.name = 'name' + self.name = "name" - mocker.patch('SplunkPy.get_keys_and_types', return_value=fields) + mocker.patch("SplunkPy.get_keys_and_types", return_value=fields) output = splunk.get_kv_store_config(Name()) - expected_output = f'{START_OUTPUT}{expected_output}' + expected_output = f"{START_OUTPUT}{expected_output}" assert output == expected_output class TestFetchRemovingIrrelevantIncidents: - notable1 = {'status': '5', 'event_id': '3'} - notable2 = {'status': '6', 'event_id': '4'} + notable1 = {"status": "5", "event_id": "3"} + notable2 = {"status": "6", "event_id": "4"} # In order to mock the service.jobs.oneshot() call in the fetch_notables function, we need to create # the following two classes @@ -754,22 +724,26 @@ def test_backwards_compatible(self, mocker: MockerFixture): """ from SplunkPy import UserMappingObject - mocker.patch.object(demisto, 'setLastRun') - mock_last_run = {'time': '2024-02-12T10:00:00', 'latest_time': '2024-02-19T10:00:00', - 'found_incidents_ids': {'1': 1700497516}} - mock_params = {'fetchQuery': '`notable` is cool', 'fetch_limit': 2} - mocker.patch('demistomock.getLastRun', return_value=mock_last_run) - mocker.patch('demistomock.params', return_value=mock_params) - mocker.patch('splunklib.results.JSONResultsReader', return_value=[self.notable1, - self.notable2]) + mocker.patch.object(demisto, "setLastRun") + mock_last_run = { + "time": "2024-02-12T10:00:00", + "latest_time": "2024-02-19T10:00:00", + "found_incidents_ids": {"1": 1700497516}, + } + mock_params = {"fetchQuery": "`notable` is cool", "fetch_limit": 2} + mocker.patch("demistomock.getLastRun", return_value=mock_last_run) + mocker.patch("demistomock.params", return_value=mock_params) + mocker.patch("splunklib.results.JSONResultsReader", return_value=[self.notable1, self.notable2]) service = self.Service() - set_last_run_mocker = mocker.patch('demistomock.setLastRun') + set_last_run_mocker = mocker.patch("demistomock.setLastRun") mapper = UserMappingObject(service, False) - splunk.fetch_incidents(service, mapper, 'from_xsoar', 'from_splunk') - last_fetched_ids = set_last_run_mocker.call_args_list[0][0][0]['found_incidents_ids'] - assert last_fetched_ids == {'1': {'occurred_time': '2024-02-19T10:00:00'}, - '3': {'occurred_time': '2024-02-19T10:00:00'}, - '4': {'occurred_time': '2024-02-19T10:00:00'}} + splunk.fetch_incidents(service, mapper, "from_xsoar", "from_splunk") + last_fetched_ids = set_last_run_mocker.call_args_list[0][0][0]["found_incidents_ids"] + assert last_fetched_ids == { + "1": {"occurred_time": "2024-02-19T10:00:00"}, + "3": {"occurred_time": "2024-02-19T10:00:00"}, + "4": {"occurred_time": "2024-02-19T10:00:00"}, + } def test_remove_irrelevant_fetched_incident_ids(self, mocker: MockerFixture): """ @@ -784,28 +758,31 @@ def test_remove_irrelevant_fetched_incident_ids(self, mocker: MockerFixture): """ from SplunkPy import UserMappingObject - mocker.patch.object(demisto, 'setLastRun') - mock_last_run = {'time': '2024-02-12T10:00:00', 'latest_time': '2024-02-19T10:00:00', - 'found_incidents_ids': {'1': {'occurred_time': '2024-02-12T09:59:59'}, - '2': {'occurred_time': '2024-02-18T10:00:00'}}} - mock_params = {'fetchQuery': '`notable` is cool', 'fetch_limit': 2} - mocker.patch('demistomock.getLastRun', return_value=mock_last_run) - mocker.patch('demistomock.params', return_value=mock_params) - mocker.patch('splunklib.results.JSONResultsReader', return_value=[self.notable1, - self.notable2]) + mocker.patch.object(demisto, "setLastRun") + mock_last_run = { + "time": "2024-02-12T10:00:00", + "latest_time": "2024-02-19T10:00:00", + "found_incidents_ids": {"1": {"occurred_time": "2024-02-12T09:59:59"}, "2": {"occurred_time": "2024-02-18T10:00:00"}}, + } + mock_params = {"fetchQuery": "`notable` is cool", "fetch_limit": 2} + mocker.patch("demistomock.getLastRun", return_value=mock_last_run) + mocker.patch("demistomock.params", return_value=mock_params) + mocker.patch("splunklib.results.JSONResultsReader", return_value=[self.notable1, self.notable2]) service = self.Service() - set_last_run_mocker = mocker.patch('demistomock.setLastRun') + set_last_run_mocker = mocker.patch("demistomock.setLastRun") mapper = UserMappingObject(service, False) - splunk.fetch_incidents(service, mapper, 'from_xsoar', 'from_splunk') - last_fetched_ids = set_last_run_mocker.call_args_list[0][0][0]['found_incidents_ids'] - assert last_fetched_ids == {'2': {'occurred_time': '2024-02-18T10:00:00'}, - '3': {'occurred_time': '2024-02-19T10:00:00'}, - '4': {'occurred_time': '2024-02-19T10:00:00'}} + splunk.fetch_incidents(service, mapper, "from_xsoar", "from_splunk") + last_fetched_ids = set_last_run_mocker.call_args_list[0][0][0]["found_incidents_ids"] + assert last_fetched_ids == { + "2": {"occurred_time": "2024-02-18T10:00:00"}, + "3": {"occurred_time": "2024-02-19T10:00:00"}, + "4": {"occurred_time": "2024-02-19T10:00:00"}, + } class TestFetchForLateIndexedEvents: - notable1 = {'status': '5', 'event_id': 'id_1'} - notable2 = {'status': '6', 'event_id': 'id_2'} + notable1 = {"status": "5", "event_id": "id_1"} + notable2 = {"status": "6", "event_id": "id_2"} # In order to mock the service.jobs.oneshot() call in the fetch_notables function, we need to create # the following two classes @@ -834,19 +811,23 @@ def test_fetch_query_and_oneshot_args(self, mocker: MockerFixture): - Make sure that the offset of the fetch query is set to 0 """ from SplunkPy import UserMappingObject - mocker.patch.object(demisto, 'setLastRun') - mock_last_run = {'time': '2018-10-24T14:13:20', 'late_indexed_pagination': True, - 'found_incidents_ids': {'1234': 1700497516, '5678': 1700497516}} - mock_params = {'fetchQuery': 'something'} - mocker.patch('demistomock.getLastRun', return_value=mock_last_run) - mocker.patch('demistomock.params', return_value=mock_params) - mocker.patch('splunklib.results.JSONResultsReader', return_value=[self.notable1]) + + mocker.patch.object(demisto, "setLastRun") + mock_last_run = { + "time": "2018-10-24T14:13:20", + "late_indexed_pagination": True, + "found_incidents_ids": {"1234": 1700497516, "5678": 1700497516}, + } + mock_params = {"fetchQuery": "something"} + mocker.patch("demistomock.getLastRun", return_value=mock_last_run) + mocker.patch("demistomock.params", return_value=mock_params) + mocker.patch("splunklib.results.JSONResultsReader", return_value=[self.notable1]) service = self.Service() - oneshot_mocker = mocker.patch.object(service.jobs, 'oneshot', side_effect=service.jobs.oneshot) + oneshot_mocker = mocker.patch.object(service.jobs, "oneshot", side_effect=service.jobs.oneshot) mapper = UserMappingObject(service, False) - splunk.fetch_incidents(service, mapper, 'from_xsoar', 'from_splunk') + splunk.fetch_incidents(service, mapper, "from_xsoar", "from_splunk") assert oneshot_mocker.call_args_list[0][0][0] == 'something | where not event_id in ("1234","5678")' - assert oneshot_mocker.call_args_list[0][1]['offset'] == 0 + assert oneshot_mocker.call_args_list[0][1]["offset"] == 0 # If (num_of_dropped == FETCH_LIMIT and '`notable`' in fetch_query), then late_indexed_pagination should be set to True def test_first_condition_for_late_indexed_pagination(self, mocker: MockerFixture, monkeypatch: pytest.MonkeyPatch): @@ -863,21 +844,20 @@ def test_first_condition_for_late_indexed_pagination(self, mocker: MockerFixture - Make sure that the key "late_indexed_pagination" in last run object is set to True """ from SplunkPy import UserMappingObject + # MonkeyPatch can be used to patch global variables - monkeypatch.setattr(splunk, 'FETCH_LIMIT', 2) - mocker.patch.object(demisto, 'setLastRun') - mock_last_run = {'time': '2018-10-24T14:13:20', - 'found_incidents_ids': {'id_1': 1700497516, 'id_2': 1700497516}} - mock_params = {'fetchQuery': '`notable` is cool', 'fetch_limit': 2} - mocker.patch('demistomock.getLastRun', return_value=mock_last_run) - mocker.patch('demistomock.params', return_value=mock_params) - mocker.patch('splunklib.results.JSONResultsReader', return_value=[self.notable1, - self.notable2]) - set_last_run_mocker = mocker.patch('demistomock.setLastRun') + monkeypatch.setattr(splunk, "FETCH_LIMIT", 2) + mocker.patch.object(demisto, "setLastRun") + mock_last_run = {"time": "2018-10-24T14:13:20", "found_incidents_ids": {"id_1": 1700497516, "id_2": 1700497516}} + mock_params = {"fetchQuery": "`notable` is cool", "fetch_limit": 2} + mocker.patch("demistomock.getLastRun", return_value=mock_last_run) + mocker.patch("demistomock.params", return_value=mock_params) + mocker.patch("splunklib.results.JSONResultsReader", return_value=[self.notable1, self.notable2]) + set_last_run_mocker = mocker.patch("demistomock.setLastRun") service = self.Service() mapper = UserMappingObject(service, False) - splunk.fetch_incidents(service, mapper, 'from_xsoar', 'from_splunk') - assert set_last_run_mocker.call_args_list[0][0][0]['late_indexed_pagination'] is True + splunk.fetch_incidents(service, mapper, "from_xsoar", "from_splunk") + assert set_last_run_mocker.call_args_list[0][0][0]["late_indexed_pagination"] is True # If (len(incidents) == FETCH_LIMIT and late_indexed_pagination), then late_indexed_pagination should be set to True def test_second_condition_for_late_indexed_pagination(self, mocker: MockerFixture, monkeypatch: pytest.MonkeyPatch): @@ -894,21 +874,24 @@ def test_second_condition_for_late_indexed_pagination(self, mocker: MockerFixtur - Make sure that the key "late_indexed_pagination" in last run object is set to True """ from SplunkPy import UserMappingObject + # MonkeyPatch can be used to patch global variables - monkeypatch.setattr(splunk, 'FETCH_LIMIT', 2) - mocker.patch.object(demisto, 'setLastRun') - mock_last_run = {'time': '2018-10-24T14:13:20', 'late_indexed_pagination': True, - 'found_incidents_ids': {'1234': 1700497516, '5678': 1700497516}} - mock_params = {'fetchQuery': '`notable` is cool', 'fetch_limit': 2} - mocker.patch('demistomock.getLastRun', return_value=mock_last_run) - mocker.patch('demistomock.params', return_value=mock_params) - mocker.patch('splunklib.results.JSONResultsReader', return_value=[self.notable1, - self.notable2]) - set_last_run_mocker = mocker.patch('demistomock.setLastRun') + monkeypatch.setattr(splunk, "FETCH_LIMIT", 2) + mocker.patch.object(demisto, "setLastRun") + mock_last_run = { + "time": "2018-10-24T14:13:20", + "late_indexed_pagination": True, + "found_incidents_ids": {"1234": 1700497516, "5678": 1700497516}, + } + mock_params = {"fetchQuery": "`notable` is cool", "fetch_limit": 2} + mocker.patch("demistomock.getLastRun", return_value=mock_last_run) + mocker.patch("demistomock.params", return_value=mock_params) + mocker.patch("splunklib.results.JSONResultsReader", return_value=[self.notable1, self.notable2]) + set_last_run_mocker = mocker.patch("demistomock.setLastRun") service = self.Service() mapper = UserMappingObject(service, False) - splunk.fetch_incidents(service, mapper, 'from_xsoar', 'from_splunk') - assert set_last_run_mocker.call_args_list[0][0][0]['late_indexed_pagination'] is True + splunk.fetch_incidents(service, mapper, "from_xsoar", "from_splunk") + assert set_last_run_mocker.call_args_list[0][0][0]["late_indexed_pagination"] is True def test_fetch_incidents(mocker): @@ -925,50 +908,41 @@ def test_fetch_incidents(mocker): - make sure that the owner is not part of the incident response """ from SplunkPy import UserMappingObject - mocker.patch.object(demisto, 'incidents') - mocker.patch.object(demisto, 'setLastRun') - mock_last_run = {'time': '2018-10-24T14:13:20'} - mock_params = {'fetchQuery': "something"} - mocker.patch('demistomock.getLastRun', return_value=mock_last_run) - mocker.patch('demistomock.params', return_value=mock_params) - service = mocker.patch('splunklib.client.connect', return_value=None) - mocker.patch('splunklib.results.JSONResultsReader', return_value=deepcopy(SAMPLE_RESPONSE)) + + mocker.patch.object(demisto, "incidents") + mocker.patch.object(demisto, "setLastRun") + mock_last_run = {"time": "2018-10-24T14:13:20"} + mock_params = {"fetchQuery": "something"} + mocker.patch("demistomock.getLastRun", return_value=mock_last_run) + mocker.patch("demistomock.params", return_value=mock_params) + service = mocker.patch("splunklib.client.connect", return_value=None) + mocker.patch("splunklib.results.JSONResultsReader", return_value=deepcopy(SAMPLE_RESPONSE)) mapper = UserMappingObject(service, False) - splunk.fetch_incidents(service, mapper, 'from_xsoar', 'from_splunk') + splunk.fetch_incidents(service, mapper, "from_xsoar", "from_splunk") incidents = demisto.incidents.call_args[0][0] assert demisto.incidents.call_count == 1 assert len(incidents) == 2 - assert incidents[0]["name"] == "Endpoint - Recurring Malware Infection - Rule : Endpoint - " \ - "Recurring Malware Infection - Rule" - assert not incidents[0].get('owner') + assert ( + incidents[0]["name"] == "Endpoint - Recurring Malware Infection - Rule : Endpoint - " "Recurring Malware Infection - Rule" + ) + assert not incidents[0].get("owner") SPLUNK_RESULTS = [ { - "rawJSON": - '{"source": "This is the alert type", "field_name1": "field_val1", "field_name2": "field_val2"}', + "rawJSON": '{"source": "This is the alert type", "field_name1": "field_val1", "field_name2": "field_val2"}', "details": "Endpoint - High Or Critical Priority Host With Malware - Rule", - "labels": [ - { - "type": "security_domain", - "value": "Endpoint - High Or Critical Priority Host With Malware - Rule" - } - ], + "labels": [{"type": "security_domain", "value": "Endpoint - High Or Critical Priority Host With Malware - Rule"}], } ] EXPECTED_OUTPUT = { - 'This is the alert type': { - "source": "This is the alert type", - "field_name1": "field_val1", - "field_name2": "field_val2" - } - + "This is the alert type": {"source": "This is the alert type", "field_name1": "field_val1", "field_name2": "field_val2"} } def test_create_mapping_dict(): - mapping_dict = splunk.create_mapping_dict(SPLUNK_RESULTS, type_field='source') + mapping_dict = splunk.create_mapping_dict(SPLUNK_RESULTS, type_field="source") assert mapping_dict == EXPECTED_OUTPUT @@ -985,50 +959,54 @@ def test_fetch_notables(mocker): - make sure the incident response is valid. - make sure that the owner is not part of the incident response """ - mocker.patch.object(splunk.client.Job, 'is_done', return_value=True) - mocker.patch.object(splunk.client.Job, 'results', return_value=None) - mocker.patch.object(splunk, 'ENABLED_ENRICHMENTS', [splunk.ASSET_ENRICHMENT, - splunk.DRILLDOWN_ENRICHMENT, splunk.IDENTITY_ENRICHMENT]) - mocker.patch.object(demisto, 'incidents') - mocker.patch.object(demisto, 'setLastRun') - mock_last_run = {'time': '2018-10-24T14:13:20'} - mock_params = {'fetchQuery': "something"} - mocker.patch('demistomock.getLastRun', return_value=mock_last_run) - mocker.patch('demistomock.params', return_value=mock_params) - service = Service('DONE') - mocker.patch('splunklib.results.JSONResultsReader', return_value=deepcopy(SAMPLE_RESPONSE)) + mocker.patch.object(splunk.client.Job, "is_done", return_value=True) + mocker.patch.object(splunk.client.Job, "results", return_value=None) + mocker.patch.object( + splunk, "ENABLED_ENRICHMENTS", [splunk.ASSET_ENRICHMENT, splunk.DRILLDOWN_ENRICHMENT, splunk.IDENTITY_ENRICHMENT] + ) + mocker.patch.object(demisto, "incidents") + mocker.patch.object(demisto, "setLastRun") + mock_last_run = {"time": "2018-10-24T14:13:20"} + mock_params = {"fetchQuery": "something"} + mocker.patch("demistomock.getLastRun", return_value=mock_last_run) + mocker.patch("demistomock.params", return_value=mock_params) + service = Service("DONE") + mocker.patch("splunklib.results.JSONResultsReader", return_value=deepcopy(SAMPLE_RESPONSE)) mapper = splunk.UserMappingObject(service, False) - splunk.fetch_incidents(service, mapper=mapper, comment_tag_to_splunk='comment_tag_to_splunk', - comment_tag_from_splunk='comment_tag_from_splunk') + splunk.fetch_incidents( + service, mapper=mapper, comment_tag_to_splunk="comment_tag_to_splunk", comment_tag_from_splunk="comment_tag_from_splunk" + ) cache_object = splunk.Cache.load_from_integration_context(get_integration_context()) assert cache_object.submitted_notables notable = cache_object.submitted_notables[0] - incident_from_cache = notable.to_incident(mapper, 'comment_tag_to_splunk', 'comment_tag_from_splunk') + incident_from_cache = notable.to_incident(mapper, "comment_tag_to_splunk", "comment_tag_from_splunk") incidents = demisto.incidents.call_args[0][0] assert demisto.incidents.call_count == 1 assert len(incidents) == 0 - assert incident_from_cache["name"] == "Endpoint - Recurring Malware Infection - Rule : Endpoint - " \ - "Recurring Malware Infection - Rule" - assert not incident_from_cache.get('owner') + assert ( + incident_from_cache["name"] == "Endpoint - Recurring Malware Infection - Rule : Endpoint - " + "Recurring Malware Infection - Rule" + ) + assert not incident_from_cache.get("owner") # now call second time to make sure that the incident fetched - splunk.fetch_incidents(service, mapper=mapper, comment_tag_to_splunk='comment_tag_to_splunk', - comment_tag_from_splunk='comment_tag_from_splunk') + splunk.fetch_incidents( + service, mapper=mapper, comment_tag_to_splunk="comment_tag_to_splunk", comment_tag_from_splunk="comment_tag_from_splunk" + ) incidents = demisto.incidents.call_args[0][0] assert len(incidents) == 2 - assert incidents[0]["name"] == "Endpoint - Recurring Malware Infection - Rule : Endpoint - " \ - "Recurring Malware Infection - Rule" - assert not incidents[0].get('owner') + assert ( + incidents[0]["name"] == "Endpoint - Recurring Malware Infection - Rule : Endpoint - " "Recurring Malware Infection - Rule" + ) + assert not incidents[0].get("owner") """ ========== Enriching Fetch Mechanism Tests ========== """ -@pytest.mark.parametrize('integration_context, output', [ - ({splunk.INCIDENTS: ['incident']}, ['incident']), - ({splunk.INCIDENTS: []}, []), - ({}, []) -]) +@pytest.mark.parametrize( + "integration_context, output", [({splunk.INCIDENTS: ["incident"]}, ["incident"]), ({splunk.INCIDENTS: []}, []), ({}, [])] +) def test_fetch_incidents_for_mapping(integration_context, output, mocker): """ Scenario: When a user configures a mapper using Fetch from Instance when the enrichment mechanism is working, @@ -1045,8 +1023,8 @@ def test_fetch_incidents_for_mapping(integration_context, output, mocker): Then: - Return the expected result """ - mocker.patch.object(demisto, 'info') - mocker.patch.object(demisto, 'incidents') + mocker.patch.object(demisto, "info") + mocker.patch.object(demisto, "incidents") splunk.fetch_incidents_for_mapping(integration_context) assert demisto.incidents.call_count == 1 assert demisto.incidents.call_args[0][0] == output @@ -1065,15 +1043,11 @@ def test_reset_enriching_fetch_mechanism(mocker): Then: - Check that the integration context does not contain this fields """ - integration_context = { - splunk.CACHE: "cache_string", - splunk.INCIDENTS: ['i1', 'i2'], - 'wow': 'wow' - } - mocker.patch('SplunkPy.get_integration_context', return_value=integration_context) - mocker.patch('SplunkPy.set_integration_context') + integration_context = {splunk.CACHE: "cache_string", splunk.INCIDENTS: ["i1", "i2"], "wow": "wow"} + mocker.patch("SplunkPy.get_integration_context", return_value=integration_context) + mocker.patch("SplunkPy.set_integration_context") splunk.reset_enriching_fetch_mechanism() - assert integration_context == {'wow': 'wow'} + assert integration_context == {"wow": "wow"} @pytest.mark.parametrize( @@ -1088,8 +1062,7 @@ def test_reset_enriching_fetch_mechanism(mocker): ), ], ) -def test_is_enrichment_exceeding_timeout(mocker, drilldown_creation_time, asset_creation_time, enrichment_timeout, - output): +def test_is_enrichment_exceeding_timeout(mocker, drilldown_creation_time, asset_creation_time, enrichment_timeout, output): """ Scenario: When one of the notable's enrichments is exceeding the timeout, we want to create an incident with all the data gathered so far. @@ -1104,22 +1077,18 @@ def test_is_enrichment_exceeding_timeout(mocker, drilldown_creation_time, asset_ Then: - Return the expected result """ - mocker.patch.object(splunk, 'ENABLED_ENRICHMENTS', - return_value=[splunk.DRILLDOWN_ENRICHMENT, splunk.ASSET_ENRICHMENT]) - notable = splunk.Notable({splunk.EVENT_ID: 'id'}) + mocker.patch.object(splunk, "ENABLED_ENRICHMENTS", return_value=[splunk.DRILLDOWN_ENRICHMENT, splunk.ASSET_ENRICHMENT]) + notable = splunk.Notable({splunk.EVENT_ID: "id"}) notable.enrichments.append(splunk.Enrichment(splunk.DRILLDOWN_ENRICHMENT, creation_time=drilldown_creation_time)) notable.enrichments.append(splunk.Enrichment(splunk.ASSET_ENRICHMENT, creation_time=asset_creation_time)) assert notable.is_enrichment_process_exceeding_timeout(enrichment_timeout) is output -INCIDENT_1 = {'name': 'incident1', 'rawJSON': json.dumps({})} -INCIDENT_2 = {'name': 'incident2', 'rawJSON': json.dumps({})} +INCIDENT_1 = {"name": "incident1", "rawJSON": json.dumps({})} +INCIDENT_2 = {"name": "incident2", "rawJSON": json.dumps({})} -@pytest.mark.parametrize('incidents, output', [ - ([], []), - ([INCIDENT_1, INCIDENT_2], [INCIDENT_1, INCIDENT_2]) -]) +@pytest.mark.parametrize("incidents, output", [([], []), ([INCIDENT_1, INCIDENT_2], [INCIDENT_1, INCIDENT_2])]) def test_store_incidents_for_mapping(incidents, output): """ Scenario: Store ready incidents in integration context, to be retrieved by a user configuring a mapper @@ -1140,13 +1109,27 @@ def test_store_incidents_for_mapping(incidents, output): assert splunk.get_integration_context().get(splunk.INCIDENTS, []) == output -@pytest.mark.parametrize('notable_data, raw, earliest, latest', [ - ({}, {}, "", ""), - ({"drilldown_earliest": f"${splunk.INFO_MIN_TIME}$", - "drilldown_latest": f"${splunk.INFO_MAX_TIME}$"}, - {splunk.INFO_MIN_TIME: '1', splunk.INFO_MAX_TIME: '2'}, '1', '2'), - ({"drilldown_earliest": '1', "drilldown_latest": '2', }, {}, '1', '2') -]) +@pytest.mark.parametrize( + "notable_data, raw, earliest, latest", + [ + ({}, {}, "", ""), + ( + {"drilldown_earliest": f"${splunk.INFO_MIN_TIME}$", "drilldown_latest": f"${splunk.INFO_MAX_TIME}$"}, + {splunk.INFO_MIN_TIME: "1", splunk.INFO_MAX_TIME: "2"}, + "1", + "2", + ), + ( + { + "drilldown_earliest": "1", + "drilldown_latest": "2", + }, + {}, + "1", + "2", + ), + ], +) def test_get_drilldown_timeframe(notable_data, raw, earliest, latest, mocker): """ Scenario: Trying to get the drilldown's timeframe from the notable's data @@ -1162,18 +1145,21 @@ def test_get_drilldown_timeframe(notable_data, raw, earliest, latest, mocker): Then: - Return the expected result """ - mocker.patch.object(demisto, 'info') + mocker.patch.object(demisto, "info") earliest_offset, latest_offset = splunk.get_drilldown_timeframe(notable_data, raw) assert earliest_offset == earliest assert latest_offset == latest -@pytest.mark.parametrize('raw_field, notable_data, expected_field, expected_value', [ - ('field|s', {'field': '1'}, 'field', '1'), - ('field', {'field': '1'}, 'field', '1'), - ('field|s', {'_raw': 'field=1, value=2'}, 'field', '1'), - ('x', {'y': '2'}, '', '') -]) +@pytest.mark.parametrize( + "raw_field, notable_data, expected_field, expected_value", + [ + ("field|s", {"field": "1"}, "field", "1"), + ("field", {"field": "1"}, "field", "1"), + ("field|s", {"_raw": "field=1, value=2"}, "field", "1"), + ("x", {"y": "2"}, "", ""), + ], +) def test_get_notable_field_and_value(raw_field, notable_data, expected_field, expected_value, mocker): """ Scenario: When building the drilldown search query, we search for the field in the raw search query @@ -1192,44 +1178,69 @@ def test_get_notable_field_and_value(raw_field, notable_data, expected_field, ex Then: - Return the expected result """ - mocker.patch.object(demisto, 'error') + mocker.patch.object(demisto, "error") field, value = splunk.get_notable_field_and_value(raw_field, notable_data) assert field == expected_field assert value == expected_value -@pytest.mark.parametrize('notable_data, search, raw, is_query_name, expected_search', [ - ({'a': '1', '_raw': 'c=3'}, 'search a=$a|s$ c=$c$ suffix', {'c': '3'}, False, 'search a="1" c="3" suffix'), - ({'a': ['1', '2'], 'b': '3'}, 'search a=$a|s$ b=$b|s$ suffix', {}, False, 'search (a="1" OR a="2") b="3" suffix'), - ({'a': '1', '_raw': 'b=3', 'event_id': '123'}, 'search a=$a|s$ c=$c$ suffix', {'b': '3'}, False, ''), - ({"signature": "Backdoor.test"}, "View related '$signature$' events for $dest$", {"dest": "ACME-test-005"}, True, - "View related 'Backdoor.test' events for ACME-test-005"), - ({}, 'View all wineventlogs involving user="$user$"', {'user': "test"}, True, - 'View all wineventlogs involving user="test"'), - ({}, 'Test query name', {}, True, 'Test query name'), - ({'user': 'test\crusher'}, 'index="test" | where user = $user|s$', {}, False, - 'index="test" | where user="test\\\\crusher"'), - ({'user': 'test\crusher'}, 'index="test" | where user = "$user|s$"', {}, False, - 'index="test" | where user="test\\\\crusher"'), - ({'countryNameA': '"test\country"', 'countryNameB': '""'}, - 'search countryA="$countryNameA|s$" countryB=$countryNameB|s$', {}, False, - 'search countryA="test\country" countryB=""'), - ({'test': 'test_user'}, - 'search countryA=\$this is a test\$', {}, False, - 'search countryA=\$this is a test\$'), -], ids=[ - "search query fields in notables data and raw data", - "search query fields in notable data more than one value", - "search query fields don't exist in notable data and raw data", - "query name fields in notables data and raw data", - "query name fields in raw data", - "query name without fields to replace", - "search query with a user field that contains a backslash", - "search query with a user field that is surrounded by quotation marks and contains a backslash", - "search query fields in notable data more than one value, with one empty value", - "search query with $ as part of the search - no need to replace" - -]) +@pytest.mark.parametrize( + "notable_data, search, raw, is_query_name, expected_search", + [ + ({"a": "1", "_raw": "c=3"}, "search a=$a|s$ c=$c$ suffix", {"c": "3"}, False, 'search a="1" c="3" suffix'), + ({"a": ["1", "2"], "b": "3"}, "search a=$a|s$ b=$b|s$ suffix", {}, False, 'search (a="1" OR a="2") b="3" suffix'), + ({"a": "1", "_raw": "b=3", "event_id": "123"}, "search a=$a|s$ c=$c$ suffix", {"b": "3"}, False, ""), + ( + {"signature": "Backdoor.test"}, + "View related '$signature$' events for $dest$", + {"dest": "ACME-test-005"}, + True, + "View related 'Backdoor.test' events for ACME-test-005", + ), + ( + {}, + 'View all wineventlogs involving user="$user$"', + {"user": "test"}, + True, + 'View all wineventlogs involving user="test"', + ), + ({}, "Test query name", {}, True, "Test query name"), + ( + {"user": "test\crusher"}, + 'index="test" | where user = $user|s$', + {}, + False, + 'index="test" | where user="test\\\\crusher"', + ), + ( + {"user": "test\crusher"}, + 'index="test" | where user = "$user|s$"', + {}, + False, + 'index="test" | where user="test\\\\crusher"', + ), + ( + {"countryNameA": '"test\country"', "countryNameB": '""'}, + 'search countryA="$countryNameA|s$" countryB=$countryNameB|s$', + {}, + False, + 'search countryA="test\country" countryB=""', + ), + ({"test": "test_user"}, "search countryA=\$this is a test\$", {}, False, "search countryA=\$this is a test\$"), + ], + ids=[ + "search query fields in notables data and raw data", + "search query fields in notable data more than one value", + "search query fields don't exist in notable data and raw data", + "query name fields in notables data and raw data", + "query name fields in raw data", + "query name without fields to replace", + "search query with a user field that contains a backslash", + "search query with a user field that is surrounded by quotation marks and contains a backslash", + "search query fields in notable data more than one value, with one empty value", + "search query with $ as part of the search - no need to replace", + ], +) def test_build_drilldown_search(notable_data, search, raw, is_query_name, expected_search, mocker): """ Scenario: When building the drilldown search query, we replace every field in between "$" sign with its @@ -1252,19 +1263,26 @@ def test_build_drilldown_search(notable_data, search, raw, is_query_name, expect Then: - Return the expected result """ - mocker.patch.object(demisto, 'error') - mocker.patch.object(demisto, 'params', return_value={}) + mocker.patch.object(demisto, "error") + mocker.patch.object(demisto, "params", return_value={}) parsed_query = splunk.build_drilldown_search(notable_data, search, raw, is_query_name) assert parsed_query == expected_search -@pytest.mark.parametrize('notable_data, prefix, fields, query_part', [ - ({'user': ['u1', 'u2']}, 'identity', ['user'], '(identity="u1" OR identity="u2")'), - ({'_raw': '1233, user=u1'}, 'user', ['user'], 'user="u1"'), - ({'user': ['u1', 'u2'], '_raw': '1321, src_user=u3'}, 'user', ['user', 'src_user'], - '(user="u1" OR user="u2" OR user="u3")'), - ({}, 'prefix', ['field'], '') -]) +@pytest.mark.parametrize( + "notable_data, prefix, fields, query_part", + [ + ({"user": ["u1", "u2"]}, "identity", ["user"], '(identity="u1" OR identity="u2")'), + ({"_raw": "1233, user=u1"}, "user", ["user"], 'user="u1"'), + ( + {"user": ["u1", "u2"], "_raw": "1321, src_user=u3"}, + "user", + ["user", "src_user"], + '(user="u1" OR user="u2" OR user="u3")', + ), + ({}, "prefix", ["field"], ""), + ], +) def test_get_fields_query_part(notable_data, prefix, fields, query_part): """ Scenario: When building an enrichment search query, we search for values in the notable's data / notable's raw data @@ -1285,21 +1303,40 @@ def test_get_fields_query_part(notable_data, prefix, fields, query_part): assert splunk.get_fields_query_part(notable_data, prefix, fields) == query_part -@pytest.mark.parametrize('enrichments, expected_result', [ - ([splunk.Enrichment(splunk.DRILLDOWN_ENRICHMENT, enrichment_id='1'), - splunk.Enrichment(splunk.DRILLDOWN_ENRICHMENT, enrichment_id='2'), - splunk.Enrichment(splunk.DRILLDOWN_ENRICHMENT, enrichment_id='3')], 3), - ([splunk.Enrichment(splunk.DRILLDOWN_ENRICHMENT, enrichment_id='1'), - splunk.Enrichment(splunk.ASSET_ENRICHMENT, enrichment_id='2'), - splunk.Enrichment(splunk.IDENTITY_ENRICHMENT, enrichment_id='3')], 1), - ([splunk.Enrichment(splunk.ASSET_ENRICHMENT, enrichment_id='1'), - splunk.Enrichment(splunk.ASSET_ENRICHMENT, enrichment_id='2'), - splunk.Enrichment(splunk.IDENTITY_ENRICHMENT, enrichment_id='3')], 0) -], ids=[ - "A Notable with 3 drilldown enrichments", - "A Notable with 1 drilldown enrichment, 1 asset enrichment and 1 identity enrichment", - "A Notable with 2 asset enrichments and 1 identity enrichment" -]) +@pytest.mark.parametrize( + "enrichments, expected_result", + [ + ( + [ + splunk.Enrichment(splunk.DRILLDOWN_ENRICHMENT, enrichment_id="1"), + splunk.Enrichment(splunk.DRILLDOWN_ENRICHMENT, enrichment_id="2"), + splunk.Enrichment(splunk.DRILLDOWN_ENRICHMENT, enrichment_id="3"), + ], + 3, + ), + ( + [ + splunk.Enrichment(splunk.DRILLDOWN_ENRICHMENT, enrichment_id="1"), + splunk.Enrichment(splunk.ASSET_ENRICHMENT, enrichment_id="2"), + splunk.Enrichment(splunk.IDENTITY_ENRICHMENT, enrichment_id="3"), + ], + 1, + ), + ( + [ + splunk.Enrichment(splunk.ASSET_ENRICHMENT, enrichment_id="1"), + splunk.Enrichment(splunk.ASSET_ENRICHMENT, enrichment_id="2"), + splunk.Enrichment(splunk.IDENTITY_ENRICHMENT, enrichment_id="3"), + ], + 0, + ), + ], + ids=[ + "A Notable with 3 drilldown enrichments", + "A Notable with 1 drilldown enrichment, 1 asset enrichment and 1 identity enrichment", + "A Notable with 2 asset enrichments and 1 identity enrichment", + ], +) def test_drilldown_searches_counter(enrichments, expected_result): """ Tests the drilldown searches enrichment counter. @@ -1315,44 +1352,117 @@ def test_drilldown_searches_counter(enrichments, expected_result): Then: - Return the expected result - number of drilldown enrichments. """ - notable = splunk.Notable({}, notable_id='id', enrichments=enrichments) + notable = splunk.Notable({}, notable_id="id", enrichments=enrichments) assert notable.drilldown_searches_counter() == expected_result -@pytest.mark.parametrize('enrichments, expected_data', [ - ([splunk.Enrichment(splunk.DRILLDOWN_ENRICHMENT, enrichment_id='1', status=splunk.Enrichment.SUCCESSFUL, - query_name='query_name1', query_search='query_search1', data=[{'result1': 'a'}, {'result2': 'b'}]), - splunk.Enrichment(splunk.DRILLDOWN_ENRICHMENT, enrichment_id='2', status=splunk.Enrichment.SUCCESSFUL, - query_name='query_name2', query_search='query_search2', data=[{'result1': 'c'}, {'result2': 'd'}]), - splunk.Enrichment(splunk.DRILLDOWN_ENRICHMENT, enrichment_id='3', status=splunk.Enrichment.SUCCESSFUL, - query_name='query_name3', query_search='query_search3', data=[{'result1': 'e'}, {'result2': 'f'}])], - [{'query_name': 'query_name1', 'query_search': 'query_search1', 'query_results': [{'result1': 'a'}, {'result2': 'b'}], - 'enrichment_status': splunk.Enrichment.SUCCESSFUL}, - {'query_name': 'query_name2', 'query_search': 'query_search2', 'query_results': [{'result1': 'c'}, {'result2': 'd'}], - 'enrichment_status': splunk.Enrichment.SUCCESSFUL}, - {'query_name': 'query_name3', 'query_search': 'query_search3', 'query_results': [{'result1': 'e'}, {'result2': 'f'}], - 'enrichment_status': splunk.Enrichment.SUCCESSFUL}] - ), - ([splunk.Enrichment(splunk.DRILLDOWN_ENRICHMENT, enrichment_id='1', status=splunk.Enrichment.SUCCESSFUL, - query_name='query_name1', query_search='query_search1', data=[{'result1': 'a'}, {'result2': 'b'}]), - splunk.Enrichment(splunk.DRILLDOWN_ENRICHMENT, enrichment_id='2', status=splunk.Enrichment.SUCCESSFUL, - query_name='query_name2', query_search='query_search2', data=[{'result1': 'c'}, {'result2': 'd'}])], - [{'query_name': 'query_name1', 'query_search': 'query_search1', 'query_results': [{'result1': 'a'}, {'result2': 'b'}], - 'enrichment_status': splunk.Enrichment.SUCCESSFUL}, - {'query_name': 'query_name2', 'query_search': 'query_search2', 'query_results': [{'result1': 'c'}, {'result2': 'd'}], - 'enrichment_status': splunk.Enrichment.SUCCESSFUL}] - ), - ([splunk.Enrichment(splunk.DRILLDOWN_ENRICHMENT, enrichment_id='1', status=splunk.Enrichment.SUCCESSFUL, - query_name='query_name1', query_search='query_search1', data=[{'result1': 'a'}, {'result2': 'b'}])], - [{'result1': 'a'}, {'result2': 'b'}] - ), - ([], None) -], ids=[ - "A Notable with 3 drilldown enrichments, 1 asset enrichment and 1 identity enrichment", - "A Notable with 2 drilldown enrichment, 1 asset enrichment and 1 identity enrichment", - "A Notable with 1 drilldown enrichment, 1 asset enrichment and 1 identity enrichment", - "A Notable without drilldown enrichments, 1 asset enrichments and 1 identity enrichment" -]) +@pytest.mark.parametrize( + "enrichments, expected_data", + [ + ( + [ + splunk.Enrichment( + splunk.DRILLDOWN_ENRICHMENT, + enrichment_id="1", + status=splunk.Enrichment.SUCCESSFUL, + query_name="query_name1", + query_search="query_search1", + data=[{"result1": "a"}, {"result2": "b"}], + ), + splunk.Enrichment( + splunk.DRILLDOWN_ENRICHMENT, + enrichment_id="2", + status=splunk.Enrichment.SUCCESSFUL, + query_name="query_name2", + query_search="query_search2", + data=[{"result1": "c"}, {"result2": "d"}], + ), + splunk.Enrichment( + splunk.DRILLDOWN_ENRICHMENT, + enrichment_id="3", + status=splunk.Enrichment.SUCCESSFUL, + query_name="query_name3", + query_search="query_search3", + data=[{"result1": "e"}, {"result2": "f"}], + ), + ], + [ + { + "query_name": "query_name1", + "query_search": "query_search1", + "query_results": [{"result1": "a"}, {"result2": "b"}], + "enrichment_status": splunk.Enrichment.SUCCESSFUL, + }, + { + "query_name": "query_name2", + "query_search": "query_search2", + "query_results": [{"result1": "c"}, {"result2": "d"}], + "enrichment_status": splunk.Enrichment.SUCCESSFUL, + }, + { + "query_name": "query_name3", + "query_search": "query_search3", + "query_results": [{"result1": "e"}, {"result2": "f"}], + "enrichment_status": splunk.Enrichment.SUCCESSFUL, + }, + ], + ), + ( + [ + splunk.Enrichment( + splunk.DRILLDOWN_ENRICHMENT, + enrichment_id="1", + status=splunk.Enrichment.SUCCESSFUL, + query_name="query_name1", + query_search="query_search1", + data=[{"result1": "a"}, {"result2": "b"}], + ), + splunk.Enrichment( + splunk.DRILLDOWN_ENRICHMENT, + enrichment_id="2", + status=splunk.Enrichment.SUCCESSFUL, + query_name="query_name2", + query_search="query_search2", + data=[{"result1": "c"}, {"result2": "d"}], + ), + ], + [ + { + "query_name": "query_name1", + "query_search": "query_search1", + "query_results": [{"result1": "a"}, {"result2": "b"}], + "enrichment_status": splunk.Enrichment.SUCCESSFUL, + }, + { + "query_name": "query_name2", + "query_search": "query_search2", + "query_results": [{"result1": "c"}, {"result2": "d"}], + "enrichment_status": splunk.Enrichment.SUCCESSFUL, + }, + ], + ), + ( + [ + splunk.Enrichment( + splunk.DRILLDOWN_ENRICHMENT, + enrichment_id="1", + status=splunk.Enrichment.SUCCESSFUL, + query_name="query_name1", + query_search="query_search1", + data=[{"result1": "a"}, {"result2": "b"}], + ) + ], + [{"result1": "a"}, {"result2": "b"}], + ), + ([], None), + ], + ids=[ + "A Notable with 3 drilldown enrichments, 1 asset enrichment and 1 identity enrichment", + "A Notable with 2 drilldown enrichment, 1 asset enrichment and 1 identity enrichment", + "A Notable with 1 drilldown enrichment, 1 asset enrichment and 1 identity enrichment", + "A Notable without drilldown enrichments, 1 asset enrichments and 1 identity enrichment", + ], +) def test_to_incident_notable_enrichments_data(enrichments, expected_data): """ Tests the logic of the Notable.to_incident() function, regarding the results data of multiple drilldown enrichments. @@ -1374,85 +1484,222 @@ def test_to_incident_notable_enrichments_data(enrichments, expected_data): 4. No 'Drilldown' key in the notables data. """ - notable = splunk.Notable({}, notable_id='id', enrichments=enrichments) + notable = splunk.Notable({}, notable_id="id", enrichments=enrichments) enrichments_to_add = [ - splunk.Enrichment(splunk.ASSET_ENRICHMENT, enrichment_id='111', status=splunk.Enrichment.SUCCESSFUL, - data=[{'result1': 'a'}, {'result2': 'b'}]), - splunk.Enrichment(splunk.IDENTITY_ENRICHMENT, enrichment_id='222', status=splunk.Enrichment.FAILED, - data=[{'result1': 'a'}, {'result2': 'b'}]) + splunk.Enrichment( + splunk.ASSET_ENRICHMENT, + enrichment_id="111", + status=splunk.Enrichment.SUCCESSFUL, + data=[{"result1": "a"}, {"result2": "b"}], + ), + splunk.Enrichment( + splunk.IDENTITY_ENRICHMENT, + enrichment_id="222", + status=splunk.Enrichment.FAILED, + data=[{"result1": "a"}, {"result2": "b"}], + ), ] notable.enrichments.extend(enrichments_to_add) - service = Service('DONE') + service = Service("DONE") mapper = splunk.UserMappingObject(service, False) - notable.to_incident(mapper, 'comment_tag_to_splunk', 'comment_tag_from_splunk') + notable.to_incident(mapper, "comment_tag_to_splunk", "comment_tag_from_splunk") - assert notable.data.get(splunk.ASSET_ENRICHMENT) == [{'result1': 'a'}, {'result2': 'b'}] - assert notable.data.get(splunk.IDENTITY_ENRICHMENT) == [{'result1': 'a'}, {'result2': 'b'}] + assert notable.data.get(splunk.ASSET_ENRICHMENT) == [{"result1": "a"}, {"result2": "b"}] + assert notable.data.get(splunk.IDENTITY_ENRICHMENT) == [{"result1": "a"}, {"result2": "b"}] assert notable.data.get(splunk.DRILLDOWN_ENRICHMENT) == expected_data -@pytest.mark.parametrize('enrichments, enrichment_type, expected_stauts_result', [ - ([splunk.Enrichment(splunk.ASSET_ENRICHMENT, enrichment_id='1', status=splunk.Enrichment.SUCCESSFUL, - data=[{'result1': 'a'}, {'result2': 'b'}])], splunk.ASSET_ENRICHMENT, True - ), - ([splunk.Enrichment(splunk.ASSET_ENRICHMENT, enrichment_id='1', status=splunk.Enrichment.FAILED, - data=[{'result1': 'a'}, {'result2': 'b'}])], splunk.ASSET_ENRICHMENT, False - ), - ([splunk.Enrichment(splunk.IDENTITY_ENRICHMENT, enrichment_id='1', status=splunk.Enrichment.SUCCESSFUL, - data=[{'result1': 'a'}, {'result2': 'b'}])], splunk.IDENTITY_ENRICHMENT, True - ), - ([splunk.Enrichment(splunk.IDENTITY_ENRICHMENT, enrichment_id='1', status=splunk.Enrichment.FAILED, - data=[{'result1': 'a'}, {'result2': 'b'}])], splunk.IDENTITY_ENRICHMENT, False - ), - ([splunk.Enrichment(splunk.DRILLDOWN_ENRICHMENT, enrichment_id='1', status=splunk.Enrichment.SUCCESSFUL, - query_name='query_name1', query_search='query_search1', data=[{'result1': 'a'}, {'result2': 'b'}])], - splunk.DRILLDOWN_ENRICHMENT, True - ), - ([splunk.Enrichment(splunk.DRILLDOWN_ENRICHMENT, enrichment_id='1', status=splunk.Enrichment.FAILED, - query_name='query_name1', query_search='query_search1', data=[{'result1': 'a'}, {'result2': 'b'}])], - splunk.DRILLDOWN_ENRICHMENT, False - ), - ([splunk.Enrichment(splunk.DRILLDOWN_ENRICHMENT, enrichment_id='1', status=splunk.Enrichment.SUCCESSFUL, - data=[{'result1': 'a'}, {'result2': 'b'}]), - splunk.Enrichment(splunk.DRILLDOWN_ENRICHMENT, enrichment_id='1', status=splunk.Enrichment.FAILED, - data=[{'result1': 'a'}, {'result2': 'b'}])], splunk.DRILLDOWN_ENRICHMENT, True - ), - ([splunk.Enrichment(splunk.DRILLDOWN_ENRICHMENT, enrichment_id='1', status=splunk.Enrichment.FAILED, - data=[{'result1': 'a'}, {'result2': 'b'}]), - splunk.Enrichment(splunk.DRILLDOWN_ENRICHMENT, enrichment_id='1', status=splunk.Enrichment.SUCCESSFUL, - data=[{'result1': 'a'}, {'result2': 'b'}])], splunk.DRILLDOWN_ENRICHMENT, True - ), - ([splunk.Enrichment(splunk.DRILLDOWN_ENRICHMENT, enrichment_id='1', status=splunk.Enrichment.FAILED, - data=[{'result1': 'a'}, {'result2': 'b'}]), - splunk.Enrichment(splunk.DRILLDOWN_ENRICHMENT, enrichment_id='1', status=splunk.Enrichment.FAILED, - data=[{'result1': 'a'}, {'result2': 'b'}])], splunk.DRILLDOWN_ENRICHMENT, False - ), - ([splunk.Enrichment(splunk.DRILLDOWN_ENRICHMENT, enrichment_id='1', status=splunk.Enrichment.SUCCESSFUL, - data=[{'result1': 'a'}, {'result2': 'b'}]), - splunk.Enrichment(splunk.DRILLDOWN_ENRICHMENT, enrichment_id='1', status=splunk.Enrichment.SUCCESSFUL, - data=[{'result1': 'a'}, {'result2': 'b'}])], splunk.DRILLDOWN_ENRICHMENT, True - ), - ([splunk.Enrichment(splunk.DRILLDOWN_ENRICHMENT, enrichment_id='1', status=splunk.Enrichment.FAILED, - data=[{'result1': 'a'}, {'result2': 'b'}]), - splunk.Enrichment(splunk.DRILLDOWN_ENRICHMENT, enrichment_id='1', status=splunk.Enrichment.SUCCESSFUL, - data=[{'result1': 'a'}, {'result2': 'b'}]), - splunk.Enrichment(splunk.DRILLDOWN_ENRICHMENT, enrichment_id='1', status=splunk.Enrichment.FAILED, - data=[{'result1': 'a'}, {'result2': 'b'}])], splunk.DRILLDOWN_ENRICHMENT, True - ) -], ids=[ - "A Notable with 1 successful Asset enrichment", - "A Notable with 1 failed Asset enrichment", - "A Notable with 1 successful Identity enrichment", - "A Notable with 1 failed Identity enrichment", - "A Notable with 1 successful Drilldown enrichment", - "A Notable with 1 failed Drilldown enrichment", - "A Notable with 1 successful Drilldown enrichment and 1 failed drilldown enrichment (the first is successful)", - "A Notable with 1 successful Drilldown enrichment and 1 failed drilldown enrichment (the second is successful)", - "A Notable with 2 Drilldown enrichments [failed, failed]", - "A Notable with 2 Drilldown enrichments [successful, successful]", - "A Notable with 3 Drilldown enrichments [failed, successful, failed]" -]) +@pytest.mark.parametrize( + "enrichments, enrichment_type, expected_stauts_result", + [ + ( + [ + splunk.Enrichment( + splunk.ASSET_ENRICHMENT, + enrichment_id="1", + status=splunk.Enrichment.SUCCESSFUL, + data=[{"result1": "a"}, {"result2": "b"}], + ) + ], + splunk.ASSET_ENRICHMENT, + True, + ), + ( + [ + splunk.Enrichment( + splunk.ASSET_ENRICHMENT, + enrichment_id="1", + status=splunk.Enrichment.FAILED, + data=[{"result1": "a"}, {"result2": "b"}], + ) + ], + splunk.ASSET_ENRICHMENT, + False, + ), + ( + [ + splunk.Enrichment( + splunk.IDENTITY_ENRICHMENT, + enrichment_id="1", + status=splunk.Enrichment.SUCCESSFUL, + data=[{"result1": "a"}, {"result2": "b"}], + ) + ], + splunk.IDENTITY_ENRICHMENT, + True, + ), + ( + [ + splunk.Enrichment( + splunk.IDENTITY_ENRICHMENT, + enrichment_id="1", + status=splunk.Enrichment.FAILED, + data=[{"result1": "a"}, {"result2": "b"}], + ) + ], + splunk.IDENTITY_ENRICHMENT, + False, + ), + ( + [ + splunk.Enrichment( + splunk.DRILLDOWN_ENRICHMENT, + enrichment_id="1", + status=splunk.Enrichment.SUCCESSFUL, + query_name="query_name1", + query_search="query_search1", + data=[{"result1": "a"}, {"result2": "b"}], + ) + ], + splunk.DRILLDOWN_ENRICHMENT, + True, + ), + ( + [ + splunk.Enrichment( + splunk.DRILLDOWN_ENRICHMENT, + enrichment_id="1", + status=splunk.Enrichment.FAILED, + query_name="query_name1", + query_search="query_search1", + data=[{"result1": "a"}, {"result2": "b"}], + ) + ], + splunk.DRILLDOWN_ENRICHMENT, + False, + ), + ( + [ + splunk.Enrichment( + splunk.DRILLDOWN_ENRICHMENT, + enrichment_id="1", + status=splunk.Enrichment.SUCCESSFUL, + data=[{"result1": "a"}, {"result2": "b"}], + ), + splunk.Enrichment( + splunk.DRILLDOWN_ENRICHMENT, + enrichment_id="1", + status=splunk.Enrichment.FAILED, + data=[{"result1": "a"}, {"result2": "b"}], + ), + ], + splunk.DRILLDOWN_ENRICHMENT, + True, + ), + ( + [ + splunk.Enrichment( + splunk.DRILLDOWN_ENRICHMENT, + enrichment_id="1", + status=splunk.Enrichment.FAILED, + data=[{"result1": "a"}, {"result2": "b"}], + ), + splunk.Enrichment( + splunk.DRILLDOWN_ENRICHMENT, + enrichment_id="1", + status=splunk.Enrichment.SUCCESSFUL, + data=[{"result1": "a"}, {"result2": "b"}], + ), + ], + splunk.DRILLDOWN_ENRICHMENT, + True, + ), + ( + [ + splunk.Enrichment( + splunk.DRILLDOWN_ENRICHMENT, + enrichment_id="1", + status=splunk.Enrichment.FAILED, + data=[{"result1": "a"}, {"result2": "b"}], + ), + splunk.Enrichment( + splunk.DRILLDOWN_ENRICHMENT, + enrichment_id="1", + status=splunk.Enrichment.FAILED, + data=[{"result1": "a"}, {"result2": "b"}], + ), + ], + splunk.DRILLDOWN_ENRICHMENT, + False, + ), + ( + [ + splunk.Enrichment( + splunk.DRILLDOWN_ENRICHMENT, + enrichment_id="1", + status=splunk.Enrichment.SUCCESSFUL, + data=[{"result1": "a"}, {"result2": "b"}], + ), + splunk.Enrichment( + splunk.DRILLDOWN_ENRICHMENT, + enrichment_id="1", + status=splunk.Enrichment.SUCCESSFUL, + data=[{"result1": "a"}, {"result2": "b"}], + ), + ], + splunk.DRILLDOWN_ENRICHMENT, + True, + ), + ( + [ + splunk.Enrichment( + splunk.DRILLDOWN_ENRICHMENT, + enrichment_id="1", + status=splunk.Enrichment.FAILED, + data=[{"result1": "a"}, {"result2": "b"}], + ), + splunk.Enrichment( + splunk.DRILLDOWN_ENRICHMENT, + enrichment_id="1", + status=splunk.Enrichment.SUCCESSFUL, + data=[{"result1": "a"}, {"result2": "b"}], + ), + splunk.Enrichment( + splunk.DRILLDOWN_ENRICHMENT, + enrichment_id="1", + status=splunk.Enrichment.FAILED, + data=[{"result1": "a"}, {"result2": "b"}], + ), + ], + splunk.DRILLDOWN_ENRICHMENT, + True, + ), + ], + ids=[ + "A Notable with 1 successful Asset enrichment", + "A Notable with 1 failed Asset enrichment", + "A Notable with 1 successful Identity enrichment", + "A Notable with 1 failed Identity enrichment", + "A Notable with 1 successful Drilldown enrichment", + "A Notable with 1 failed Drilldown enrichment", + "A Notable with 1 successful Drilldown enrichment and 1 failed drilldown enrichment (the first is successful)", + "A Notable with 1 successful Drilldown enrichment and 1 failed drilldown enrichment (the second is successful)", + "A Notable with 2 Drilldown enrichments [failed, failed]", + "A Notable with 2 Drilldown enrichments [successful, successful]", + "A Notable with 3 Drilldown enrichments [failed, successful, failed]", + ], +) def test_to_incident_notable_enrichments_status(enrichments, enrichment_type, expected_stauts_result): """ Tests the logic of the Notable.to_incident() function, regarding the statuses of enrichments. @@ -1491,10 +1738,10 @@ def test_to_incident_notable_enrichments_status(enrichments, enrichment_type, ex 11. Drilldown Enrichment status is: successful_drilldown_enrichment = True. """ - notable = splunk.Notable({}, notable_id='id', enrichments=enrichments) - service = Service('DONE') + notable = splunk.Notable({}, notable_id="id", enrichments=enrichments) + service = Service("DONE") mapper = splunk.UserMappingObject(service, False) - notable.to_incident(mapper, 'comment_tag_to_splunk', 'comment_tag_from_splunk') + notable.to_incident(mapper, "comment_tag_to_splunk", "comment_tag_from_splunk") assert notable.data[splunk.ENRICHMENT_TYPE_TO_ENRICHMENT_STATUS[enrichment_type]] == expected_stauts_result @@ -1510,40 +1757,47 @@ def test_parse_drilldown_searches(): Then: - Verify that the search data was parsed into a python dictionary as expected. """ - searches = ["{\"name\":\"View related '$signature$' events for $dest$\",\"search\":\"| from datamodel:\\\"Malware\\\"." - "\\\"Malware_Attacks\\\" | search dest=$dest|s$ signature=$signature|s$\",\"earliest\":17145" - "63300,\"latest\":1715168700}", - "{\"name\":\"View related '$category$' events for $signature$\",\"search\":\"| from datamodel:\\\"Malw" - "are\\\".\\\"Malware_Attacks\\\" \\n| fields category, dest, signature | search dest=$dest|s$ signature=" - "$signature|s$\",\"earliest\":1714563300,\"latest\":1715168700}" - ] + searches = [ + '{"name":"View related \'$signature$\' events for $dest$","search":"| from datamodel:\\"Malware\\".' + '\\"Malware_Attacks\\" | search dest=$dest|s$ signature=$signature|s$","earliest":17145' + '63300,"latest":1715168700}', + '{"name":"View related \'$category$\' events for $signature$","search":"| from datamodel:\\"Malw' + 'are\\".\\"Malware_Attacks\\" \\n| fields category, dest, signature | search dest=$dest|s$ signature=' + '$signature|s$","earliest":1714563300,"latest":1715168700}', + ] parsed_searches = splunk.parse_drilldown_searches(searches) for search in parsed_searches: assert isinstance(search, dict) assert parsed_searches == [ - {'name': "View related '$signature$' events for $dest$", - 'search': '| from datamodel:"Malware"."Malware_Attacks" | search dest=$dest|s$ signature=$signature|s$', - 'earliest': 1714563300, - 'latest': 1715168700 - }, - {'name': "View related '$category$' events for $signature$", - 'search': '| from datamodel:"Malware"."Malware_Attacks" \n| fields category, dest, signature | search dest=$dest|s$ ' - 'signature=$signature|s$', - 'earliest': 1714563300, - 'latest': 1715168700 - } + { + "name": "View related '$signature$' events for $dest$", + "search": '| from datamodel:"Malware"."Malware_Attacks" | search dest=$dest|s$ signature=$signature|s$', + "earliest": 1714563300, + "latest": 1715168700, + }, + { + "name": "View related '$category$' events for $signature$", + "search": '| from datamodel:"Malware"."Malware_Attacks" \n| fields category, dest, signature | search dest=$dest|s$ ' + "signature=$signature|s$", + "earliest": 1714563300, + "latest": 1715168700, + }, ] -@pytest.mark.parametrize('notable_data, expected_call_count', [ - ({'event_id': 'test_id', 'drilldown_search': 'test_search', 'drilldown_searches': ['test_search1', 'test_search2']}, 0), - ({'event_id': 'test_id', 'drilldown_search': '', 'drilldown_searches': ['test_search1', 'test_search2']}, 1), - ({'event_id': 'test_id', 'drilldown_searches': ['test_search1', 'test_search2']}, 1) -], ids=[ - "A notable data with both 'drilldown_search' and 'drilldown_searches' keys with values", - "A notable data with both 'drilldown_search' and 'drilldown_searches' keys but 'drilldown_search' has no value", - "A notable data with 'drilldown_searches' key only" -]) +@pytest.mark.parametrize( + "notable_data, expected_call_count", + [ + ({"event_id": "test_id", "drilldown_search": "test_search", "drilldown_searches": ["test_search1", "test_search2"]}, 0), + ({"event_id": "test_id", "drilldown_search": "", "drilldown_searches": ["test_search1", "test_search2"]}, 1), + ({"event_id": "test_id", "drilldown_searches": ["test_search1", "test_search2"]}, 1), + ], + ids=[ + "A notable data with both 'drilldown_search' and 'drilldown_searches' keys with values", + "A notable data with both 'drilldown_search' and 'drilldown_searches' keys but 'drilldown_search' has no value", + "A notable data with 'drilldown_searches' key only", + ], +) def test_drilldown_enrichment_main_condition(mocker, notable_data, expected_call_count): """ Tests the logic of the first (main) condition in the drilldown_enrichment() function. @@ -1567,28 +1821,32 @@ def test_drilldown_enrichment_main_condition(mocker, notable_data, expected_call 3. The value of the 'drilldown_searches' key is taken, and therefore we call the parse_drilldown_searches function. """ - mock_parse_drilldown_searches = mocker.patch('SplunkPy.parse_drilldown_searches', return_value=[]) - service = Service('DONE') + mock_parse_drilldown_searches = mocker.patch("SplunkPy.parse_drilldown_searches", return_value=[]) + service = Service("DONE") splunk.drilldown_enrichment(service, notable_data, 5) assert mock_parse_drilldown_searches.call_count == expected_call_count -@pytest.mark.parametrize('notable_data, expected_call_count', [ - ({'event_id': 'test_id', 'drilldown_search': 'test_search', 'drilldown_searches': [{}], '_raw': "{'test':1}"}, 1), - ({'event_id': 'test_id', - 'drilldown_searches': - ["{\"name\":\"View related '$signature$' events for $dest$\",\"search\":\"| from datamodel:\\\"Malware\\\".\\\"Malwa" - "re_Attacks\\\" | search dest=$dest|s$ signature=$signature|s$\",\"earliest\":1714563300,\"latest\":1715168700}", - "{\"name\":\"View related '$category$' events for $signature$\",\"search\":\"| from datamodel:\\\"Malware\\\".\\\"M" - "alware_Attacks\\\" \\n| fields category, dest, signature | search dest=$dest|s$ signature=$signature|s$\",\"ear" - "liest\":1714563300,\"latest\":1715168700}" - ] - }, - 0) -], ids=[ - "A notable data with one drilldown search", - "A notable data with multiple drilldown searches" -]) +@pytest.mark.parametrize( + "notable_data, expected_call_count", + [ + ({"event_id": "test_id", "drilldown_search": "test_search", "drilldown_searches": [{}], "_raw": "{'test':1}"}, 1), + ( + { + "event_id": "test_id", + "drilldown_searches": [ + '{"name":"View related \'$signature$\' events for $dest$","search":"| from datamodel:\\"Malware\\".\\"Malwa' + 're_Attacks\\" | search dest=$dest|s$ signature=$signature|s$","earliest":1714563300,"latest":1715168700}', + '{"name":"View related \'$category$\' events for $signature$","search":"| from datamodel:\\"Malware\\".\\"M' + 'alware_Attacks\\" \\n| fields category, dest, signature | search dest=$dest|s$ signature=$signature|s$","ear' + 'liest":1714563300,"latest":1715168700}', + ], + }, + 0, + ), + ], + ids=["A notable data with one drilldown search", "A notable data with multiple drilldown searches"], +) def test_drilldown_enrichment_get_timeframe(mocker, notable_data, expected_call_count): """ Tests that in a case of one drildown search we extract the search timeframe from the notable data by calling the @@ -1610,49 +1868,80 @@ def test_drilldown_enrichment_get_timeframe(mocker, notable_data, expected_call_ 2. The timeframe is determined according to fields of each drilldown search data dict. """ - mock_get_drilldown_timeframe = mocker.patch('SplunkPy.get_drilldown_timeframe', return_value=("", "")) - mocker.patch('SplunkPy.build_drilldown_search', return_value='') - service = Service('DONE') + mock_get_drilldown_timeframe = mocker.patch("SplunkPy.get_drilldown_timeframe", return_value=("", "")) + mocker.patch("SplunkPy.build_drilldown_search", return_value="") + service = Service("DONE") splunk.drilldown_enrichment(service, notable_data, 5) assert mock_get_drilldown_timeframe.call_count == expected_call_count -@pytest.mark.parametrize('notable_data, expected_result', [ - ({'event_id': 'test_id', 'drilldown_name': 'View all login attempts by system $src$', - 'drilldown_search': "| from datamodel:\"Authentication\".\"Authentication\" | search src=$src|s$", - 'drilldown_searches': "{\"name\":\"View all login attempts by system $src$\",\"search\":\"| from datamodel:\\\"Authent" - "ication\\\".\\\"Authentication\\\" | search src=$src|s$\",\"earliest\":1715040000," - "\"latest\":1715126400}", - '_raw': "src=\'test_src\'", "drilldown_latest": "1715126400.000000000", "drilldown_earliest": "1715040000.000000000"}, - [ - ("View all login attempts by system 'test_src'", - '| from datamodel:"Authentication"."Authentication" | search src="\'test_src\'"')]), - - ({'event_id': 'test_id2', 'drilldown_searches': - ["{\"name\":\"View all login attempts by system $src$\",\"search\":\"| from datamodel:\\\"Authentication\\\".\\\"Authe" - "ntication\\\" | search src=$src|s$\",\"earliest\":1715040000,\"latest\":1715126400}", - "{\"name\":\"View all test involving user=\\\"$user$\\\"\",\"search\":\"index=\\\"test\\\"\\n| where " - "user = $user|s$\",\"earliest\":1716955500,\"latest\":1716959400}"], - '_raw': "src=\'test_src\', user='test_user'"}, - [("View all login attempts by system 'test_src'", - '| from datamodel:"Authentication"."Authentication" | search src="\'test_src\'"'), - ('View all test involving user="\'test_user\'"', - 'search index="test"\n| where user="\'test_user\'"')]), - ({'event_id': 'test_id3', 'drilldown_searches': - ["{\"name\":\"View all login attempts by system $src$\",\"search\":\"| from datamodel:\\\"Authentication\\\".\\\"Authe" - "ntication\\\" | search src=$src|s$\",\"earliest_offset\":1715040000,\"latest_offset\":1715126400}", - "{\"name\":\"View all test involving user=\\\"$user$\\\"\",\"search\":\"index=\\\"test\\\"\\n| where " - "user = $user|s$\",\"earliest_offset\":1716955500,\"latest_offset\":1716959400}"], - '_raw': "src=\'test_src\', user='test_user'"}, - [("View all login attempts by system 'test_src'", - '| from datamodel:"Authentication"."Authentication" | search src="\'test_src\'"'), - ('View all test involving user="\'test_user\'"', - 'search index="test"\n| where user="\'test_user\'"')]), -], ids=[ - "A notable data with one drilldown search enrichment", - "A notable data with two drilldown searches which contained the earlies in 'earliest' key ", - "A notable data with two drilldown searches which contained the earlies in 'earliest_offset' key " -]) +@pytest.mark.parametrize( + "notable_data, expected_result", + [ + ( + { + "event_id": "test_id", + "drilldown_name": "View all login attempts by system $src$", + "drilldown_search": '| from datamodel:"Authentication"."Authentication" | search src=$src|s$', + "drilldown_searches": '{"name":"View all login attempts by system $src$","search":"| from datamodel:\\"Authent' + 'ication\\".\\"Authentication\\" | search src=$src|s$","earliest":1715040000,' + '"latest":1715126400}', + "_raw": "src='test_src'", + "drilldown_latest": "1715126400.000000000", + "drilldown_earliest": "1715040000.000000000", + }, + [ + ( + "View all login attempts by system 'test_src'", + '| from datamodel:"Authentication"."Authentication" | search src="\'test_src\'"', + ) + ], + ), + ( + { + "event_id": "test_id2", + "drilldown_searches": [ + '{"name":"View all login attempts by system $src$","search":"| from datamodel:\\"Authentication\\".\\"Authe' + 'ntication\\" | search src=$src|s$","earliest":1715040000,"latest":1715126400}', + '{"name":"View all test involving user=\\"$user$\\"","search":"index=\\"test\\"\\n| where ' + 'user = $user|s$","earliest":1716955500,"latest":1716959400}', + ], + "_raw": "src='test_src', user='test_user'", + }, + [ + ( + "View all login attempts by system 'test_src'", + '| from datamodel:"Authentication"."Authentication" | search src="\'test_src\'"', + ), + ("View all test involving user=\"'test_user'\"", 'search index="test"\n| where user="\'test_user\'"'), + ], + ), + ( + { + "event_id": "test_id3", + "drilldown_searches": [ + '{"name":"View all login attempts by system $src$","search":"| from datamodel:\\"Authentication\\".\\"Authe' + 'ntication\\" | search src=$src|s$","earliest_offset":1715040000,"latest_offset":1715126400}', + '{"name":"View all test involving user=\\"$user$\\"","search":"index=\\"test\\"\\n| where ' + 'user = $user|s$","earliest_offset":1716955500,"latest_offset":1716959400}', + ], + "_raw": "src='test_src', user='test_user'", + }, + [ + ( + "View all login attempts by system 'test_src'", + '| from datamodel:"Authentication"."Authentication" | search src="\'test_src\'"', + ), + ("View all test involving user=\"'test_user'\"", 'search index="test"\n| where user="\'test_user\'"'), + ], + ), + ], + ids=[ + "A notable data with one drilldown search enrichment", + "A notable data with two drilldown searches which contained the earlies in 'earliest' key ", + "A notable data with two drilldown searches which contained the earlies in 'earliest_offset' key ", + ], +) def test_drilldown_enrichment(notable_data, expected_result): """ Tests the logic of the drilldown_enrichment function. @@ -1670,7 +1959,8 @@ def test_drilldown_enrichment(notable_data, expected_result): """ from splunklib import client - service = Service('DONE') + + service = Service("DONE") jobs_and_queries = splunk.drilldown_enrichment(service, notable_data, 5) for i in range(len(jobs_and_queries)): job_and_queries = jobs_and_queries[i] @@ -1679,42 +1969,67 @@ def test_drilldown_enrichment(notable_data, expected_result): assert isinstance(job_and_queries[2], client.Job) -@pytest.mark.parametrize('notable_data, debug_log_message', [ - ({'event_id': 'test_id'}, 'drill-down was not properly configured for notable test_id'), - - ({'event_id': 'test_id', 'drilldown_name': 'View all login attempts by system $src$', - 'drilldown_search': "| from datamodel:\"Authentication\".\"Authentication\" | search src=$src|s$", - '_raw': "src=\'test_src\'", "drilldown_latest": "", "drilldown_earliest": ""}, - 'Failed getting the drilldown timeframe for notable test_id'), - - ({'event_id': 'test_id', 'drilldown_name': 'View all login attempts by system $src$', - 'drilldown_search': "| from datamodel:\"Authentication\".\"Authentication\" | search src=$src|s$", '_raw': "", - "drilldown_latest": "00101", "drilldown_earliest": "00001"}, - "Couldn't build search query for notable test_id with the following drilldown search "), - - ({'event_id': 'test_id', - 'drilldown_searches': [ - "{\"name\":\"View all login attempts by system $src$\",\"search\":\"| from datamodel:\\\"Authentica" - "tion\\\".\\\"Authentication\\\" | search src=$src|s$\",\"earliest\":\"\",\"latest\":\"\"}", - "{\"name\":\"View all test involving user=\\\"$user$\\\"\",\"search\":\"index=\\\"test\\\"\\n| where user =" - "$user|s$\",\"earliest\":\"\",\"latest\":\"\"}"], - '_raw': "src=\'test_src\', user='test_user'"}, - 'Failed getting the drilldown timeframe for notable test_id'), - - ({'event_id': 'test_id', - 'drilldown_searches': - ["{\"name\":\"View all login attempts by system $src$\",\"search\":\"| from datamodel:\\\"Authentic" - "ation\\\".\\\"Authentication\\\" | search src=$src|s$\",\"earliest\":\"\",\"latest\":\"\"}", - "{\"name\":\"View all test involving user=\\\"$user$\\\"\",\"search\":\"index=\\\"test\\\"\\n| where user =" - "$user|s$\",\"earliest\":\"\",\"latest\":\"\"}"], '_raw': ""}, - "Couldn't build search query for notable test_id with the following drilldown search"), -], ids=[ - "A notable data without drilldown enrichment data", - "A notable data with a single drilldown enrichment without search timeframe data", - "A notable data with a single drilldown enrichment with an invalid search query", - "A notable data with multiple drilldown enrichments without search timeframe data", - "A notable data with multiple drilldown enrichments with invalid search queries" -]) +@pytest.mark.parametrize( + "notable_data, debug_log_message", + [ + ({"event_id": "test_id"}, "drill-down was not properly configured for notable test_id"), + ( + { + "event_id": "test_id", + "drilldown_name": "View all login attempts by system $src$", + "drilldown_search": '| from datamodel:"Authentication"."Authentication" | search src=$src|s$', + "_raw": "src='test_src'", + "drilldown_latest": "", + "drilldown_earliest": "", + }, + "Failed getting the drilldown timeframe for notable test_id", + ), + ( + { + "event_id": "test_id", + "drilldown_name": "View all login attempts by system $src$", + "drilldown_search": '| from datamodel:"Authentication"."Authentication" | search src=$src|s$', + "_raw": "", + "drilldown_latest": "00101", + "drilldown_earliest": "00001", + }, + "Couldn't build search query for notable test_id with the following drilldown search ", + ), + ( + { + "event_id": "test_id", + "drilldown_searches": [ + '{"name":"View all login attempts by system $src$","search":"| from datamodel:\\"Authentica' + 'tion\\".\\"Authentication\\" | search src=$src|s$","earliest":"","latest":""}', + '{"name":"View all test involving user=\\"$user$\\"","search":"index=\\"test\\"\\n| where user =' + '$user|s$","earliest":"","latest":""}', + ], + "_raw": "src='test_src', user='test_user'", + }, + "Failed getting the drilldown timeframe for notable test_id", + ), + ( + { + "event_id": "test_id", + "drilldown_searches": [ + '{"name":"View all login attempts by system $src$","search":"| from datamodel:\\"Authentic' + 'ation\\".\\"Authentication\\" | search src=$src|s$","earliest":"","latest":""}', + '{"name":"View all test involving user=\\"$user$\\"","search":"index=\\"test\\"\\n| where user =' + '$user|s$","earliest":"","latest":""}', + ], + "_raw": "", + }, + "Couldn't build search query for notable test_id with the following drilldown search", + ), + ], + ids=[ + "A notable data without drilldown enrichment data", + "A notable data with a single drilldown enrichment without search timeframe data", + "A notable data with a single drilldown enrichment with an invalid search query", + "A notable data with multiple drilldown enrichments without search timeframe data", + "A notable data with multiple drilldown enrichments with invalid search queries", + ], +) def test_drilldown_enrichment_no_enrichement_cases(mocker, notable_data, debug_log_message): """ Tests the logic of the drilldown_enrichment function when for some reason the enrichments raw data is invalid. @@ -1733,9 +2048,9 @@ def test_drilldown_enrichment_no_enrichement_cases(mocker, notable_data, debug_l - Verify that the returned value is a tuple of None values as expected. """ - debug_log = mocker.patch.object(demisto, 'debug') - mocker.patch.object(demisto, 'error') - service = Service('DONE') + debug_log = mocker.patch.object(demisto, "debug") + mocker.patch.object(demisto, "error") + service = Service("DONE") jobs_and_queries = splunk.drilldown_enrichment(service, notable_data, 5) for i in range(len(jobs_and_queries)): assert jobs_and_queries[i] == (None, None, None) @@ -1745,14 +2060,17 @@ def test_drilldown_enrichment_no_enrichement_cases(mocker, notable_data, debug_l """ ========== Mirroring Mechanism Tests ========== """ -@pytest.mark.parametrize('last_update, demisto_params, splunk_time_timestamp', [ - ('2021-02-22T18:39:47.753+00:00', {'timezone': '0'}, 1614019187.753), - ('2021-02-22T18:39:47.753+02:00', {'timezone': '+120'}, 1614019187.753), - ('2021-02-22T20:39:47.753+02:00', {'timezone': '0'}, 1614019187.753), - ('2021-02-09T16:41:30.589575+02:00', {}, '') -]) +@pytest.mark.parametrize( + "last_update, demisto_params, splunk_time_timestamp", + [ + ("2021-02-22T18:39:47.753+00:00", {"timezone": "0"}, 1614019187.753), + ("2021-02-22T18:39:47.753+02:00", {"timezone": "+120"}, 1614019187.753), + ("2021-02-22T20:39:47.753+02:00", {"timezone": "0"}, 1614019187.753), + ("2021-02-09T16:41:30.589575+02:00", {}, ""), + ], +) def test_get_last_update_in_splunk_time(last_update, demisto_params, splunk_time_timestamp, mocker): - """ Tests the conversion of the Demisto server time into timestamp in Splunk Server time + """Tests the conversion of the Demisto server time into timestamp in Splunk Server time Given: - The last update time in the Demisto server @@ -1763,12 +2081,12 @@ def test_get_last_update_in_splunk_time(last_update, demisto_params, splunk_time - Conversion is correct - An Exception is raised in case that Splunk Server timezone is not specified in Demisto params """ - mocker.patch.object(demisto, 'params', return_value=demisto_params) + mocker.patch.object(demisto, "params", return_value=demisto_params) if demisto_params: assert splunk.get_last_update_in_splunk_time(last_update) == splunk_time_timestamp else: - error_msg = 'Cannot mirror incidents when timezone is not configured. Please enter the ' - 'timezone of the Splunk server being used in the integration configuration.' + error_msg = "Cannot mirror incidents when timezone is not configured. Please enter the " + "timezone of the Splunk server being used in the integration configuration." with pytest.raises(Exception, match=error_msg): splunk.get_last_update_in_splunk_time(last_update) @@ -1902,29 +2220,36 @@ def test_get_last_update_in_splunk_time(last_update, demisto_params, splunk_time ), ], ) -def test_get_modified_remote_data_command_close_incident(mocker, notable_data: list[results.Message | dict], - func_call_kwargs: dict, expected_closure_data: dict): +def test_get_modified_remote_data_command_close_incident( + mocker, notable_data: list[results.Message | dict], func_call_kwargs: dict, expected_closure_data: dict +): class Jobs: def oneshot(self, **kwargs): - assert kwargs['output_mode'] == splunk.OUTPUT_MODE_JSON + assert kwargs["output_mode"] == splunk.OUTPUT_MODE_JSON return notable_data class Service: def __init__(self): self.jobs = Jobs() - expected_entries = {'EntryContext': {'mirrorRemoteId': 'id'}, 'Type': EntryType.NOTE, 'ContentsFormat': EntryFormat.JSON} - args = {'lastUpdate': '2021-02-09T16:41:30.589575+02:00', 'id': 'id'} - mocker.patch.object(demisto, 'params', return_value={'timezone': '0'}) - mocker.patch.object(demisto, 'debug') - mocker.patch.object(demisto, 'info') - mocker.patch('SplunkPy.results.JSONResultsReader', return_value=notable_data) - mocker.patch.object(demisto, 'results') + + expected_entries = {"EntryContext": {"mirrorRemoteId": "id"}, "Type": EntryType.NOTE, "ContentsFormat": EntryFormat.JSON} + args = {"lastUpdate": "2021-02-09T16:41:30.589575+02:00", "id": "id"} + mocker.patch.object(demisto, "params", return_value={"timezone": "0"}) + mocker.patch.object(demisto, "debug") + mocker.patch.object(demisto, "info") + mocker.patch("SplunkPy.results.JSONResultsReader", return_value=notable_data) + mocker.patch.object(demisto, "results") service = Service() - splunk.get_modified_remote_data_command(service, args, mapper=splunk.UserMappingObject(service, False), - comment_tag_from_splunk='comment_tag_from_splunk', **func_call_kwargs) + splunk.get_modified_remote_data_command( + service, + args, + mapper=splunk.UserMappingObject(service, False), + comment_tag_from_splunk="comment_tag_from_splunk", + **func_call_kwargs, + ) results = demisto.results.call_args[0][0] - expected_entries['Contents'] = notable_data[1] + expected_entries["Contents"] = notable_data[1] expected_results = [expected_entries] if expected_closure_data: @@ -1947,7 +2272,7 @@ def test_get_remote_data_command_with_message(mocker): Returns: None """ - service = mocker.patch.object(client, 'Service') + service = mocker.patch.object(client, "Service") mocker.patch.object(demisto, "info") mocker.patch.object(demisto, "params", return_value={"timezone": "0"}) func_call_kwargs = { @@ -1956,12 +2281,10 @@ def test_get_remote_data_command_with_message(mocker): "close_end_statuses": True, "close_extra_labels": ["Custom"], "mapper": splunk.UserMappingObject(service, False), - "comment_tag_from_splunk": 'from_splunk' + "comment_tag_from_splunk": "from_splunk", } - mocker.patch( - "SplunkPy.results.JSONResultsReader", return_value=[results.Message("INFO-test", "test message")] - ) + mocker.patch("SplunkPy.results.JSONResultsReader", return_value=[results.Message("INFO-test", "test message")]) splunk.get_modified_remote_data_command(service, **func_call_kwargs) assert demisto.info.call_args[0][0] == "Splunk-SDK message: test message" @@ -1974,16 +2297,16 @@ def test_fetch_with_error_in_message(mocker): Then - assert DemistoException is raised """ - mock_params = {'fetchQuery': "something", "parseNotableEventsRaw": True} - mocker.patch('demistomock.getLastRun', return_value={'time': '2018-10-24T14:13:20'}) - mocker.patch('demistomock.params', return_value=mock_params) - mocker.patch('splunklib.results.JSONResultsReader', return_value=[results.Message("FATAL", "Error")]) + mock_params = {"fetchQuery": "something", "parseNotableEventsRaw": True} + mocker.patch("demistomock.getLastRun", return_value={"time": "2018-10-24T14:13:20"}) + mocker.patch("demistomock.params", return_value=mock_params) + mocker.patch("splunklib.results.JSONResultsReader", return_value=[results.Message("FATAL", "Error")]) # run - service = mocker.patch('splunklib.client.connect') + service = mocker.patch("splunklib.client.connect") with pytest.raises(DemistoException) as e: splunk.fetch_incidents(service, None, None, None) - assert 'Failed to fetch incidents, check the provided query in Splunk web search' in e.value.message + assert "Failed to fetch incidents, check the provided query in Splunk web search" in e.value.message def test_get_modified_remote_data_command_add_comment(mocker): @@ -1998,29 +2321,29 @@ def test_get_modified_remote_data_command_add_comment(mocker): - ensure the event was updated """ - test_id = 'test_event_id' - notable_data = {'status_label': 'New', 'rule_id': test_id, 'event_id': test_id, 'status_end': 'false', - 'comment': 'new comment from splunk', 'reviewer': 'admin', - 'review_time': '1612881691.589575'} - entry_tempale = { - 'EntryContext': {'mirrorRemoteId': test_id}, - 'Type': 1 + test_id = "test_event_id" + notable_data = { + "status_label": "New", + "rule_id": test_id, + "event_id": test_id, + "status_end": "false", + "comment": "new comment from splunk", + "reviewer": "admin", + "review_time": "1612881691.589575", } + entry_tempale = {"EntryContext": {"mirrorRemoteId": test_id}, "Type": 1} expected_comment_entry = entry_tempale | { - 'Contents': 'new comment from splunk', - 'ContentsFormat': 'text', - 'Tags': ['from_splunk'], - 'Note': True - } - expected_notable_entry = entry_tempale | { - 'Contents': notable_data, - 'ContentsFormat': 'json' + "Contents": "new comment from splunk", + "ContentsFormat": "text", + "Tags": ["from_splunk"], + "Note": True, } + expected_notable_entry = entry_tempale | {"Contents": notable_data, "ContentsFormat": "json"} - mocker.patch.object(demisto, 'params', return_value={'timezone': '0'}) - mocker.patch('SplunkPy.results.JSONResultsReader', return_value=[notable_data]) - mocker.patch.object(demisto, 'results') - service = mocker.patch.object(client, 'Service') + mocker.patch.object(demisto, "params", return_value={"timezone": "0"}) + mocker.patch("SplunkPy.results.JSONResultsReader", return_value=[notable_data]) + mocker.patch.object(demisto, "results") + service = mocker.patch.object(client, "Service") func_call_kwargs = { "args": {"lastUpdate": "2021-02-09T16:41:30.589575+02:00", "id": "id"}, @@ -2028,11 +2351,11 @@ def test_get_modified_remote_data_command_add_comment(mocker): "close_end_statuses": True, "close_extra_labels": ["Custom"], "mapper": splunk.UserMappingObject(service, False), - "comment_tag_from_splunk": 'from_splunk' + "comment_tag_from_splunk": "from_splunk", } splunk.get_modified_remote_data_command(service, **func_call_kwargs) results = demisto.results.call_args[0][0][0] - notable_data.update({'SplunkComments': [{'Comment': 'new comment from splunk'}]}) + notable_data.update({"SplunkComments": [{"Comment": "new comment from splunk"}]}) note_results = demisto.results.call_args[0][0][1] assert demisto.results.call_count == 1 @@ -2041,21 +2364,21 @@ def test_get_modified_remote_data_command_add_comment(mocker): def test_get_modified_remote_data_command(mocker): - updated_incidet_review = {'rule_id': 'id', 'event_id': 'id'} - service = mocker.patch.object(client, 'Service') + updated_incidet_review = {"rule_id": "id", "event_id": "id"} + service = mocker.patch.object(client, "Service") func_call_kwargs = { "args": {"lastUpdate": "2021-02-09T16:41:30.589575+02:00", "id": "id"}, "close_incident": True, "close_end_statuses": True, "close_extra_labels": ["Custom"], "mapper": splunk.UserMappingObject(service, False), - "comment_tag_from_splunk": 'from_splunk' + "comment_tag_from_splunk": "from_splunk", } - mocker.patch.object(demisto, 'params', return_value={'timezone': '0'}) - mocker.patch('SplunkPy.results.JSONResultsReader', return_value=[updated_incidet_review]) - mocker.patch.object(demisto, 'results') + mocker.patch.object(demisto, "params", return_value={"timezone": "0"}) + mocker.patch("SplunkPy.results.JSONResultsReader", return_value=[updated_incidet_review]) + mocker.patch.object(demisto, "results") splunk.get_modified_remote_data_command(service, **func_call_kwargs) - results = demisto.results.call_args[0][0][0]['Contents'] + results = demisto.results.call_args[0][0][0]["Contents"] assert demisto.results.call_count == 1 assert results == updated_incidet_review @@ -2072,222 +2395,546 @@ def test_edit_notable_event__failed_to_update(mocker, requests_mock): Then - ensure the error message parsed correctly and returned to the user """ - test_base_url = 'https://test.url.com:8089/' - test_token = 'token12345' - test_args = { - 'eventIDs': 'ID100', - 'owner': 'dbot' - } - mocker.patch.object(splunk, 'return_error') + test_base_url = "https://test.url.com:8089/" + test_token = "token12345" + test_args = {"eventIDs": "ID100", "owner": "dbot"} + mocker.patch.object(splunk, "return_error") - requests_mock.post(f'{test_base_url}services/notable_update', json='ValueError: Invalid owner value.') + requests_mock.post(f"{test_base_url}services/notable_update", json="ValueError: Invalid owner value.") - splunk.splunk_edit_notable_event_command( - base_url=test_base_url, - token=test_token, - auth_token=None, - args=test_args - ) + splunk.splunk_edit_notable_event_command(base_url=test_base_url, token=test_token, auth_token=None, args=test_args) assert splunk.return_error.call_count == 1 error_message = splunk.return_error.call_args[0][0] - assert error_message == 'Could not update notable events: ID100: ValueError: Invalid owner value.' - - -@pytest.mark.parametrize('args, params, call_count, success', [ - ({'delta': {'status': '2'}, 'remoteId': '12345', 'status': 2, 'incidentChanged': True}, - {'host': 'ec.com', 'port': '8089', 'authentication': {'identifier': 'i', 'password': 'p'}}, 4, True), - ({'delta': {'status': '2'}, 'remoteId': '12345', 'status': 2, 'incidentChanged': True}, - {'host': 'ec.com', 'port': '8089', 'authentication': {'identifier': 'i', 'password': 'p'}}, 3, False), - ({'delta': {'status': '2'}, 'remoteId': '12345', 'status': 2, 'incidentChanged': True}, - {'host': 'ec.com', 'port': '8089', 'authentication': {'identifier': 'i', 'password': 'p'}, 'close_notable': True}, - 5, True) -]) + assert error_message == "Could not update notable events: ID100: ValueError: Invalid owner value." + + +@pytest.mark.parametrize( + "args, params, call_count, success", + [ + ( + {"delta": {"status": "2"}, "remoteId": "12345", "status": 2, "incidentChanged": True}, + {"host": "ec.com", "port": "8089", "authentication": {"identifier": "i", "password": "p"}}, + 4, + True, + ), + ( + {"delta": {"status": "2"}, "remoteId": "12345", "status": 2, "incidentChanged": True}, + {"host": "ec.com", "port": "8089", "authentication": {"identifier": "i", "password": "p"}}, + 3, + False, + ), + ( + {"delta": {"status": "2"}, "remoteId": "12345", "status": 2, "incidentChanged": True}, + {"host": "ec.com", "port": "8089", "authentication": {"identifier": "i", "password": "p"}, "close_notable": True}, + 5, + True, + ), + ], +) def test_update_remote_system(args, params, call_count, success, mocker, requests_mock): class Service: def __init__(self): - self.token = 'fake_token' + self.token = "fake_token" self.basic = True - self._auth_headers = [('Authentication', self.token)] + self._auth_headers = [("Authentication", self.token)] - mocker.patch.object(demisto, 'info') - mocker.patch.object(demisto, 'debug') - base_url = 'https://' + params['host'] + ':' + params['port'] + '/' - requests_mock.post( - f'{base_url}services/auth/login', json={'sessionKey': 'session_key'} - ) + mocker.patch.object(demisto, "info") + mocker.patch.object(demisto, "debug") + base_url = "https://" + params["host"] + ":" + params["port"] + "/" + requests_mock.post(f"{base_url}services/auth/login", json={"sessionKey": "session_key"}) requests_mock.post( - f'{base_url}services/notable_update', - json={'success': success, 'message': 'wow'}, + f"{base_url}services/notable_update", + json={"success": success, "message": "wow"}, ) if not success: - mocker.patch.object(demisto, 'error') + mocker.patch.object(demisto, "error") service = Service() mapper = splunk.UserMappingObject(service, False) - assert splunk.update_remote_system_command(args, params, service, None, mapper=mapper, - comment_tag_to_splunk='comment_tag_to_splunk') == args['remoteId'] + assert ( + splunk.update_remote_system_command( + args, params, service, None, mapper=mapper, comment_tag_to_splunk="comment_tag_to_splunk" + ) + == args["remoteId"] + ) assert demisto.debug.call_count == call_count if not success: assert demisto.error.call_count == 1 NOTABLE = { - 'rule_name': 'string', 'rule_title': 'string', 'security_domain': 'string', 'index': 'string', - 'rule_description': 'string', 'risk_score': 'string', 'host': 'string', - 'host_risk_object_type': 'string', 'dest_risk_object_type': 'string', 'dest_risk_score': 'string', - 'splunk_server': 'string', '_sourcetype': 'string', '_indextime': 'string', '_time': 'string', - 'src_risk_object_type': 'string', 'src_risk_score': 'string', '_raw': 'string', 'urgency': 'string', - 'owner': 'string', 'info_min_time': 'string', 'info_max_time': 'string', 'comment': 'string', - 'reviewer': 'string', 'rule_id': 'string', 'action': 'string', 'app': 'string', - 'authentication_method': 'string', 'authentication_service': 'string', 'bugtraq': 'string', - 'bytes': 'string', 'bytes_in': 'string', 'bytes_out': 'string', 'category': 'string', 'cert': 'string', - 'change': 'string', 'change_type': 'string', 'command': 'string', 'comments': 'string', - 'cookie': 'string', 'creation_time': 'string', 'cve': 'string', 'cvss': 'string', 'date': 'string', - 'description': 'string', 'dest': 'string', 'dest_bunit': 'string', 'dest_category': 'string', - 'dest_dns': 'string', 'dest_interface': 'string', 'dest_ip': 'string', 'dest_ip_range': 'string', - 'dest_mac': 'string', 'dest_nt_domain': 'string', 'dest_nt_host': 'string', 'dest_port': 'string', - 'dest_priority': 'string', 'dest_translated_ip': 'string', 'dest_translated_port': 'string', - 'dest_type': 'string', 'dest_zone': 'string', 'direction': 'string', 'dlp_type': 'string', - 'dns': 'string', 'duration': 'string', 'dvc': 'string', 'dvc_bunit': 'string', 'dvc_category': 'string', - 'dvc_ip': 'string', 'dvc_mac': 'string', 'dvc_priority': 'string', 'dvc_zone': 'string', - 'file_hash': 'string', 'file_name': 'string', 'file_path': 'string', 'file_size': 'string', - 'http_content_type': 'string', 'http_method': 'string', 'http_referrer': 'string', - 'http_referrer_domain': 'string', 'http_user_agent': 'string', 'icmp_code': 'string', - 'icmp_type': 'string', 'id': 'string', 'ids_type': 'string', 'incident': 'string', 'ip': 'string', - 'mac': 'string', 'message_id': 'string', 'message_info': 'string', 'message_priority': 'string', - 'message_type': 'string', 'mitre_technique_id': 'string', 'msft': 'string', 'mskb': 'string', - 'name': 'string', 'orig_dest': 'string', 'orig_recipient': 'string', 'orig_src': 'string', - 'os': 'string', 'packets': 'string', 'packets_in': 'string', 'packets_out': 'string', - 'parent_process': 'string', 'parent_process_id': 'string', 'parent_process_name': 'string', - 'parent_process_path': 'string', 'password': 'string', 'payload': 'string', 'payload_type': 'string', - 'priority': 'string', 'problem': 'string', 'process': 'string', 'process_hash': 'string', - 'process_id': 'string', 'process_name': 'string', 'process_path': 'string', 'product_version': 'string', - 'protocol': 'string', 'protocol_version': 'string', 'query': 'string', 'query_count': 'string', - 'query_type': 'string', 'reason': 'string', 'recipient': 'string', 'recipient_count': 'string', - 'recipient_domain': 'string', 'recipient_status': 'string', 'record_type': 'string', - 'registry_hive': 'string', 'registry_key_name': 'string', 'registry_path': 'string', - 'registry_value_data': 'string', 'registry_value_name': 'string', 'registry_value_text': 'string', - 'registry_value_type': 'string', 'request_sent_time': 'string', 'request_payload': 'string', - 'request_payload_type': 'string', 'response_code': 'string', 'response_payload_type': 'string', - 'response_received_time': 'string', 'response_time': 'string', 'result': 'string', - 'return_addr': 'string', 'rule': 'string', 'rule_action': 'string', 'sender': 'string', - 'service': 'string', 'service_hash': 'string', 'service_id': 'string', 'service_name': 'string', - 'service_path': 'string', 'session_id': 'string', 'sessions': 'string', 'severity': 'string', - 'severity_id': 'string', 'sid': 'string', 'signature': 'string', 'signature_id': 'string', - 'signature_version': 'string', 'site': 'string', 'size': 'string', 'source': 'string', - 'sourcetype': 'string', 'src': 'string', 'src_bunit': 'string', 'src_category': 'string', - 'src_dns': 'string', 'src_interface': 'string', 'src_ip': 'string', 'src_ip_range': 'string', - 'src_mac': 'string', 'src_nt_domain': 'string', 'src_nt_host': 'string', 'src_port': 'string', - 'src_priority': 'string', 'src_translated_ip': 'string', 'src_translated_port': 'string', - 'src_type': 'string', 'src_user': 'string', 'src_user_bunit': 'string', 'src_user_category': 'string', - 'src_user_domain': 'string', 'src_user_id': 'string', 'src_user_priority': 'string', - 'src_user_role': 'string', 'src_user_type': 'string', 'src_zone': 'string', 'state': 'string', - 'status': 'string', 'status_code': 'string', 'status_description': 'string', 'subject': 'string', - 'tag': 'string', 'ticket_id': 'string', 'time': 'string', 'time_submitted': 'string', - 'transport': 'string', 'transport_dest_port': 'string', 'type': 'string', 'uri': 'string', - 'uri_path': 'string', 'uri_query': 'string', 'url': 'string', 'url_domain': 'string', - 'url_length': 'string', 'user': 'string', 'user_agent': 'string', 'user_bunit': 'string', - 'user_category': 'string', 'user_id': 'string', 'user_priority': 'string', 'user_role': 'string', - 'user_type': 'string', 'vendor_account': 'string', 'vendor_product': 'string', 'vlan': 'string', - 'xdelay': 'string', 'xref': 'string' + "rule_name": "string", + "rule_title": "string", + "security_domain": "string", + "index": "string", + "rule_description": "string", + "risk_score": "string", + "host": "string", + "host_risk_object_type": "string", + "dest_risk_object_type": "string", + "dest_risk_score": "string", + "splunk_server": "string", + "_sourcetype": "string", + "_indextime": "string", + "_time": "string", + "src_risk_object_type": "string", + "src_risk_score": "string", + "_raw": "string", + "urgency": "string", + "owner": "string", + "info_min_time": "string", + "info_max_time": "string", + "comment": "string", + "reviewer": "string", + "rule_id": "string", + "action": "string", + "app": "string", + "authentication_method": "string", + "authentication_service": "string", + "bugtraq": "string", + "bytes": "string", + "bytes_in": "string", + "bytes_out": "string", + "category": "string", + "cert": "string", + "change": "string", + "change_type": "string", + "command": "string", + "comments": "string", + "cookie": "string", + "creation_time": "string", + "cve": "string", + "cvss": "string", + "date": "string", + "description": "string", + "dest": "string", + "dest_bunit": "string", + "dest_category": "string", + "dest_dns": "string", + "dest_interface": "string", + "dest_ip": "string", + "dest_ip_range": "string", + "dest_mac": "string", + "dest_nt_domain": "string", + "dest_nt_host": "string", + "dest_port": "string", + "dest_priority": "string", + "dest_translated_ip": "string", + "dest_translated_port": "string", + "dest_type": "string", + "dest_zone": "string", + "direction": "string", + "dlp_type": "string", + "dns": "string", + "duration": "string", + "dvc": "string", + "dvc_bunit": "string", + "dvc_category": "string", + "dvc_ip": "string", + "dvc_mac": "string", + "dvc_priority": "string", + "dvc_zone": "string", + "file_hash": "string", + "file_name": "string", + "file_path": "string", + "file_size": "string", + "http_content_type": "string", + "http_method": "string", + "http_referrer": "string", + "http_referrer_domain": "string", + "http_user_agent": "string", + "icmp_code": "string", + "icmp_type": "string", + "id": "string", + "ids_type": "string", + "incident": "string", + "ip": "string", + "mac": "string", + "message_id": "string", + "message_info": "string", + "message_priority": "string", + "message_type": "string", + "mitre_technique_id": "string", + "msft": "string", + "mskb": "string", + "name": "string", + "orig_dest": "string", + "orig_recipient": "string", + "orig_src": "string", + "os": "string", + "packets": "string", + "packets_in": "string", + "packets_out": "string", + "parent_process": "string", + "parent_process_id": "string", + "parent_process_name": "string", + "parent_process_path": "string", + "password": "string", + "payload": "string", + "payload_type": "string", + "priority": "string", + "problem": "string", + "process": "string", + "process_hash": "string", + "process_id": "string", + "process_name": "string", + "process_path": "string", + "product_version": "string", + "protocol": "string", + "protocol_version": "string", + "query": "string", + "query_count": "string", + "query_type": "string", + "reason": "string", + "recipient": "string", + "recipient_count": "string", + "recipient_domain": "string", + "recipient_status": "string", + "record_type": "string", + "registry_hive": "string", + "registry_key_name": "string", + "registry_path": "string", + "registry_value_data": "string", + "registry_value_name": "string", + "registry_value_text": "string", + "registry_value_type": "string", + "request_sent_time": "string", + "request_payload": "string", + "request_payload_type": "string", + "response_code": "string", + "response_payload_type": "string", + "response_received_time": "string", + "response_time": "string", + "result": "string", + "return_addr": "string", + "rule": "string", + "rule_action": "string", + "sender": "string", + "service": "string", + "service_hash": "string", + "service_id": "string", + "service_name": "string", + "service_path": "string", + "session_id": "string", + "sessions": "string", + "severity": "string", + "severity_id": "string", + "sid": "string", + "signature": "string", + "signature_id": "string", + "signature_version": "string", + "site": "string", + "size": "string", + "source": "string", + "sourcetype": "string", + "src": "string", + "src_bunit": "string", + "src_category": "string", + "src_dns": "string", + "src_interface": "string", + "src_ip": "string", + "src_ip_range": "string", + "src_mac": "string", + "src_nt_domain": "string", + "src_nt_host": "string", + "src_port": "string", + "src_priority": "string", + "src_translated_ip": "string", + "src_translated_port": "string", + "src_type": "string", + "src_user": "string", + "src_user_bunit": "string", + "src_user_category": "string", + "src_user_domain": "string", + "src_user_id": "string", + "src_user_priority": "string", + "src_user_role": "string", + "src_user_type": "string", + "src_zone": "string", + "state": "string", + "status": "string", + "status_code": "string", + "status_description": "string", + "subject": "string", + "tag": "string", + "ticket_id": "string", + "time": "string", + "time_submitted": "string", + "transport": "string", + "transport_dest_port": "string", + "type": "string", + "uri": "string", + "uri_path": "string", + "uri_query": "string", + "url": "string", + "url_domain": "string", + "url_length": "string", + "user": "string", + "user_agent": "string", + "user_bunit": "string", + "user_category": "string", + "user_id": "string", + "user_priority": "string", + "user_role": "string", + "user_type": "string", + "vendor_account": "string", + "vendor_product": "string", + "vlan": "string", + "xdelay": "string", + "xref": "string", } DRILLDOWN = { - 'Drilldown': { - 'action': 'string', 'app': 'string', 'authentication_method': 'string', - 'authentication_service': 'string', 'bugtraq': 'string', 'bytes': 'string', - 'bytes_in': 'string', 'bytes_out': 'string', 'category': 'string', 'cert': 'string', - 'change': 'string', 'change_type': 'string', 'command': 'string', 'comments': 'string', - 'cookie': 'string', 'creation_time': 'string', 'cve': 'string', 'cvss': 'string', - 'date': 'string', 'description': 'string', 'dest': 'string', 'dest_bunit': 'string', - 'dest_category': 'string', 'dest_dns': 'string', 'dest_interface': 'string', - 'dest_ip': 'string', 'dest_ip_range': 'string', 'dest_mac': 'string', - 'dest_nt_domain': 'string', 'dest_nt_host': 'string', 'dest_port': 'string', - 'dest_priority': 'string', 'dest_translated_ip': 'string', - 'dest_translated_port': 'string', 'dest_type': 'string', 'dest_zone': 'string', - 'direction': 'string', 'dlp_type': 'string', 'dns': 'string', 'duration': 'string', - 'dvc': 'string', 'dvc_bunit': 'string', 'dvc_category': 'string', 'dvc_ip': 'string', - 'dvc_mac': 'string', 'dvc_priority': 'string', 'dvc_zone': 'string', - 'file_hash': 'string', 'file_name': 'string', 'file_path': 'string', - 'file_size': 'string', 'http_content_type': 'string', 'http_method': 'string', - 'http_referrer': 'string', 'http_referrer_domain': 'string', 'http_user_agent': 'string', - 'icmp_code': 'string', 'icmp_type': 'string', 'id': 'string', 'ids_type': 'string', - 'incident': 'string', 'ip': 'string', 'mac': 'string', 'message_id': 'string', - 'message_info': 'string', 'message_priority': 'string', 'message_type': 'string', - 'mitre_technique_id': 'string', 'msft': 'string', 'mskb': 'string', 'name': 'string', - 'orig_dest': 'string', 'orig_recipient': 'string', 'orig_src': 'string', 'os': 'string', - 'packets': 'string', 'packets_in': 'string', 'packets_out': 'string', - 'parent_process': 'string', 'parent_process_id': 'string', - 'parent_process_name': 'string', 'parent_process_path': 'string', 'password': 'string', - 'payload': 'string', 'payload_type': 'string', 'priority': 'string', 'problem': 'string', - 'process': 'string', 'process_hash': 'string', 'process_id': 'string', - 'process_name': 'string', 'process_path': 'string', 'product_version': 'string', - 'protocol': 'string', 'protocol_version': 'string', 'query': 'string', - 'query_count': 'string', 'query_type': 'string', 'reason': 'string', - 'recipient': 'string', 'recipient_count': 'string', 'recipient_domain': 'string', - 'recipient_status': 'string', 'record_type': 'string', 'registry_hive': 'string', - 'registry_key_name': 'string', 'registry_path': 'string', - 'registry_value_data': 'string', 'registry_value_name': 'string', - 'registry_value_text': 'string', 'registry_value_type': 'string', - 'request_payload': 'string', 'request_payload_type': 'string', - 'request_sent_time': 'string', 'response_code': 'string', - 'response_payload_type': 'string', 'response_received_time': 'string', - 'response_time': 'string', 'result': 'string', 'return_addr': 'string', 'rule': 'string', - 'rule_action': 'string', 'sender': 'string', 'service': 'string', - 'service_hash': 'string', 'service_id': 'string', 'service_name': 'string', - 'service_path': 'string', 'session_id': 'string', 'sessions': 'string', - 'severity': 'string', 'severity_id': 'string', 'sid': 'string', 'signature': 'string', - 'signature_id': 'string', 'signature_version': 'string', 'site': 'string', - 'size': 'string', 'source': 'string', 'sourcetype': 'string', 'src': 'string', - 'src_bunit': 'string', 'src_category': 'string', 'src_dns': 'string', - 'src_interface': 'string', 'src_ip': 'string', 'src_ip_range': 'string', - 'src_mac': 'string', 'src_nt_domain': 'string', 'src_nt_host': 'string', - 'src_port': 'string', 'src_priority': 'string', 'src_translated_ip': 'string', - 'src_translated_port': 'string', 'src_type': 'string', 'src_user': 'string', - 'src_user_bunit': 'string', 'src_user_category': 'string', 'src_user_domain': 'string', - 'src_user_id': 'string', 'src_user_priority': 'string', 'src_user_role': 'string', - 'src_user_type': 'string', 'src_zone': 'string', 'state': 'string', 'status': 'string', - 'status_code': 'string', 'subject': 'string', 'tag': 'string', 'ticket_id': 'string', - 'time': 'string', 'time_submitted': 'string', 'transport': 'string', - 'transport_dest_port': 'string', 'type': 'string', 'uri': 'string', 'uri_path': 'string', - 'uri_query': 'string', 'url': 'string', 'url_domain': 'string', 'url_length': 'string', - 'user': 'string', 'user_agent': 'string', 'user_bunit': 'string', - 'user_category': 'string', 'user_id': 'string', 'user_priority': 'string', - 'user_role': 'string', 'user_type': 'string', 'vendor_account': 'string', - 'vendor_product': 'string', 'vlan': 'string', 'xdelay': 'string', 'xref': 'string' + "Drilldown": { + "action": "string", + "app": "string", + "authentication_method": "string", + "authentication_service": "string", + "bugtraq": "string", + "bytes": "string", + "bytes_in": "string", + "bytes_out": "string", + "category": "string", + "cert": "string", + "change": "string", + "change_type": "string", + "command": "string", + "comments": "string", + "cookie": "string", + "creation_time": "string", + "cve": "string", + "cvss": "string", + "date": "string", + "description": "string", + "dest": "string", + "dest_bunit": "string", + "dest_category": "string", + "dest_dns": "string", + "dest_interface": "string", + "dest_ip": "string", + "dest_ip_range": "string", + "dest_mac": "string", + "dest_nt_domain": "string", + "dest_nt_host": "string", + "dest_port": "string", + "dest_priority": "string", + "dest_translated_ip": "string", + "dest_translated_port": "string", + "dest_type": "string", + "dest_zone": "string", + "direction": "string", + "dlp_type": "string", + "dns": "string", + "duration": "string", + "dvc": "string", + "dvc_bunit": "string", + "dvc_category": "string", + "dvc_ip": "string", + "dvc_mac": "string", + "dvc_priority": "string", + "dvc_zone": "string", + "file_hash": "string", + "file_name": "string", + "file_path": "string", + "file_size": "string", + "http_content_type": "string", + "http_method": "string", + "http_referrer": "string", + "http_referrer_domain": "string", + "http_user_agent": "string", + "icmp_code": "string", + "icmp_type": "string", + "id": "string", + "ids_type": "string", + "incident": "string", + "ip": "string", + "mac": "string", + "message_id": "string", + "message_info": "string", + "message_priority": "string", + "message_type": "string", + "mitre_technique_id": "string", + "msft": "string", + "mskb": "string", + "name": "string", + "orig_dest": "string", + "orig_recipient": "string", + "orig_src": "string", + "os": "string", + "packets": "string", + "packets_in": "string", + "packets_out": "string", + "parent_process": "string", + "parent_process_id": "string", + "parent_process_name": "string", + "parent_process_path": "string", + "password": "string", + "payload": "string", + "payload_type": "string", + "priority": "string", + "problem": "string", + "process": "string", + "process_hash": "string", + "process_id": "string", + "process_name": "string", + "process_path": "string", + "product_version": "string", + "protocol": "string", + "protocol_version": "string", + "query": "string", + "query_count": "string", + "query_type": "string", + "reason": "string", + "recipient": "string", + "recipient_count": "string", + "recipient_domain": "string", + "recipient_status": "string", + "record_type": "string", + "registry_hive": "string", + "registry_key_name": "string", + "registry_path": "string", + "registry_value_data": "string", + "registry_value_name": "string", + "registry_value_text": "string", + "registry_value_type": "string", + "request_payload": "string", + "request_payload_type": "string", + "request_sent_time": "string", + "response_code": "string", + "response_payload_type": "string", + "response_received_time": "string", + "response_time": "string", + "result": "string", + "return_addr": "string", + "rule": "string", + "rule_action": "string", + "sender": "string", + "service": "string", + "service_hash": "string", + "service_id": "string", + "service_name": "string", + "service_path": "string", + "session_id": "string", + "sessions": "string", + "severity": "string", + "severity_id": "string", + "sid": "string", + "signature": "string", + "signature_id": "string", + "signature_version": "string", + "site": "string", + "size": "string", + "source": "string", + "sourcetype": "string", + "src": "string", + "src_bunit": "string", + "src_category": "string", + "src_dns": "string", + "src_interface": "string", + "src_ip": "string", + "src_ip_range": "string", + "src_mac": "string", + "src_nt_domain": "string", + "src_nt_host": "string", + "src_port": "string", + "src_priority": "string", + "src_translated_ip": "string", + "src_translated_port": "string", + "src_type": "string", + "src_user": "string", + "src_user_bunit": "string", + "src_user_category": "string", + "src_user_domain": "string", + "src_user_id": "string", + "src_user_priority": "string", + "src_user_role": "string", + "src_user_type": "string", + "src_zone": "string", + "state": "string", + "status": "string", + "status_code": "string", + "subject": "string", + "tag": "string", + "ticket_id": "string", + "time": "string", + "time_submitted": "string", + "transport": "string", + "transport_dest_port": "string", + "type": "string", + "uri": "string", + "uri_path": "string", + "uri_query": "string", + "url": "string", + "url_domain": "string", + "url_length": "string", + "user": "string", + "user_agent": "string", + "user_bunit": "string", + "user_category": "string", + "user_id": "string", + "user_priority": "string", + "user_role": "string", + "user_type": "string", + "vendor_account": "string", + "vendor_product": "string", + "vlan": "string", + "xdelay": "string", + "xref": "string", } } ASSET = { - 'Asset': { - 'asset': 'string', 'asset_id': 'string', 'asset_tag': 'string', 'bunit': 'string', - 'category': 'string', 'city': 'string', 'country': 'string', 'dns': 'string', - 'ip': 'string', 'is_expected': 'string', 'lat': 'string', 'long': 'string', 'mac': 'string', - 'nt_host': 'string', 'owner': 'string', 'pci_domain': 'string', 'priority': 'string', - 'requires_av': 'string' + "Asset": { + "asset": "string", + "asset_id": "string", + "asset_tag": "string", + "bunit": "string", + "category": "string", + "city": "string", + "country": "string", + "dns": "string", + "ip": "string", + "is_expected": "string", + "lat": "string", + "long": "string", + "mac": "string", + "nt_host": "string", + "owner": "string", + "pci_domain": "string", + "priority": "string", + "requires_av": "string", } } IDENTITY = { - 'Identity': { - 'bunit': 'string', 'category': 'string', 'email': 'string', 'endDate': 'string', 'first': 'string', - 'identity': 'string', 'identity_tag': 'string', 'last': 'string', 'managedBy': 'string', - 'nick': 'string', 'phone': 'string', 'prefix': 'string', 'priority': 'string', - 'startDate': 'string', 'suffix': 'string', 'watchlist': 'string', 'work_city': 'string', - 'work_lat': 'string', 'work_long': 'string' + "Identity": { + "bunit": "string", + "category": "string", + "email": "string", + "endDate": "string", + "first": "string", + "identity": "string", + "identity_tag": "string", + "last": "string", + "managedBy": "string", + "nick": "string", + "phone": "string", + "prefix": "string", + "priority": "string", + "startDate": "string", + "suffix": "string", + "watchlist": "string", + "work_city": "string", + "work_lat": "string", + "work_long": "string", } } def test_get_cim_mapping_field_command(mocker): - """ Scenario: When the mapping is based on Splunk CIM. """ + """Scenario: When the mapping is based on Splunk CIM.""" fields = splunk.get_cim_mapping_field_command() - assert fields == { - 'Notable Data': NOTABLE, - 'Drilldown Data': DRILLDOWN, - 'Asset Data': ASSET, - 'Identity Data': IDENTITY - } + assert fields == {"Notable Data": NOTABLE, "Drilldown Data": DRILLDOWN, "Asset Data": ASSET, "Identity Data": IDENTITY} def test_build_search_human_readable(mocker): @@ -2304,22 +2951,43 @@ def test_build_search_human_readable(mocker): * support commas and spaces inside header values (if surrounded with parenthesis) * rename headers """ - func_patch = mocker.patch('SplunkPy.update_headers_from_field_names') + func_patch = mocker.patch("SplunkPy.update_headers_from_field_names") results = [ - {'ID': 1, 'Header with space': 'h1', 'header3': 1, 'header_without_space': '1234', - 'old_header_1': '1', 'old_header_2': '2'}, - {'ID': 2, 'Header with space': 'h2', 'header3': 2, 'header_without_space': '1234', - 'old_header_1': '1', 'old_header_2': '2'}, + { + "ID": 1, + "Header with space": "h1", + "header3": 1, + "header_without_space": "1234", + "old_header_1": "1", + "old_header_2": "2", + }, + { + "ID": 2, + "Header with space": "h2", + "header3": 2, + "header_without_space": "1234", + "old_header_1": "1", + "old_header_2": "2", + }, ] args = { - 'query': 'something | table ID "Header with space" header3 header_without_space ' - 'comma,separated "Single,Header,with,Commas" old_header_1 old_header_2 | something else' - ' | rename old_header_1 AS new_header_1 old_header_2 AS new_header_2' + "query": 'something | table ID "Header with space" header3 header_without_space ' + 'comma,separated "Single,Header,with,Commas" old_header_1 old_header_2 | something else' + " | rename old_header_1 AS new_header_1 old_header_2 AS new_header_2" } - expected_headers = ['ID', 'Header with space', 'header3', 'header_without_space', - 'comma', 'separated', 'Single,Header,with,Commas', 'new_header_1', 'new_header_2'] + expected_headers = [ + "ID", + "Header with space", + "header3", + "header_without_space", + "comma", + "separated", + "Single,Header,with,Commas", + "new_header_1", + "new_header_2", + ] - splunk.build_search_human_readable(args, results, sid='123456') + splunk.build_search_human_readable(args, results, sid="123456") headers = func_patch.call_args[0][1] assert headers == expected_headers @@ -2336,17 +3004,16 @@ def test_build_search_human_readable_multi_table_in_query(mocker): Test headers are calculated correctly: * all expected header exist without duplications """ - args = { - "query": " table header_1, header_2 | stats state_1, state_2 | table header_1, header_2, header_3, header_4"} + args = {"query": " table header_1, header_2 | stats state_1, state_2 | table header_1, header_2, header_3, header_4"} results = [ - {'header_1': 'val_1', 'header_2': 'val_2', 'header_3': 'val_3', 'header_4': 'val_4'}, + {"header_1": "val_1", "header_2": "val_2", "header_3": "val_3", "header_4": "val_4"}, ] expected_headers_hr = "|header_1|header_2|header_3|header_4|\n|---|---|---|---|" - hr = splunk.build_search_human_readable(args, results, sid='123456') + hr = splunk.build_search_human_readable(args, results, sid="123456") assert expected_headers_hr in hr -@pytest.mark.parametrize('polling, fast_mode', [(False, True), (True, True)]) +@pytest.mark.parametrize("polling, fast_mode", [(False, True), (True, True)]) def test_build_search_kwargs(polling, fast_mode): """ Given: @@ -2358,25 +3025,28 @@ def test_build_search_kwargs(polling, fast_mode): Then: Ensure the query kwargs as expected. """ - args = {'earliest_time': '2021-11-23T10:10:10', 'latest_time': '2021-11-23T10:10:20', 'app': 'test_app', - 'fast_mode': fast_mode, 'polling': polling} + args = { + "earliest_time": "2021-11-23T10:10:10", + "latest_time": "2021-11-23T10:10:20", + "app": "test_app", + "fast_mode": fast_mode, + "polling": polling, + } kwargs_normalsearch = splunk.build_search_kwargs(args, polling) for field in args: - if field == 'polling': - assert 'exec_mode' in kwargs_normalsearch + if field == "polling": + assert "exec_mode" in kwargs_normalsearch if polling: - assert kwargs_normalsearch['exec_mode'] == 'normal' + assert kwargs_normalsearch["exec_mode"] == "normal" else: - assert kwargs_normalsearch['exec_mode'] == 'blocking' - elif field == 'fast_mode' and fast_mode: - assert kwargs_normalsearch['adhoc_search_level'] == 'fast' + assert kwargs_normalsearch["exec_mode"] == "blocking" + elif field == "fast_mode" and fast_mode: + assert kwargs_normalsearch["adhoc_search_level"] == "fast" else: assert field in kwargs_normalsearch -@pytest.mark.parametrize('polling,status', [ - (False, 'DONE'), (True, 'DONE'), (True, 'RUNNING') -]) +@pytest.mark.parametrize("polling,status", [(False, "DONE"), (True, "DONE"), (True, "RUNNING")]) def test_splunk_search_command(mocker, polling, status): """ Given: @@ -2397,23 +3067,21 @@ def test_splunk_search_command(mocker, polling, status): "polling": polling, } - mocker.patch.object(ScheduledCommand, 'raise_error_if_not_supported') + mocker.patch.object(ScheduledCommand, "raise_error_if_not_supported") search_result = splunk.splunk_search_command(Service(status), mock_args) search_result = search_result if isinstance(search_result, CommandResults) else search_result[0] if search_result.scheduled_command: - assert search_result.outputs['Status'] == status - assert search_result.scheduled_command._args['sid'] == '123456' + assert search_result.outputs["Status"] == status + assert search_result.scheduled_command._args["sid"] == "123456" else: - assert search_result.outputs['Splunk.Result'] == [] - assert search_result.readable_output == '### Splunk Search results for query:\n' \ - 'sid: 123456\n**No entries.**\n' + assert search_result.outputs["Splunk.Result"] == [] + assert search_result.readable_output == "### Splunk Search results for query:\n" "sid: 123456\n**No entries.**\n" -@pytest.mark.parametrize('messages,expected_msg', [ - ({'fatal': ['fatal msg']}, 'fatal msg'), - ({'error': ['error msg']}, 'error msg') -]) +@pytest.mark.parametrize( + "messages,expected_msg", [({"fatal": ["fatal msg"]}, "fatal msg"), ({"error": ["error msg"]}, "error msg")] +) def test_err_in_splunk_search(mocker, messages, expected_msg): """ Given: @@ -2432,15 +3100,14 @@ def test_err_in_splunk_search(mocker, messages, expected_msg): "fast_mode": "false", } service = Service(status="FAILED") - service.jobs.state.content['messages'] = messages + service.jobs.state.content["messages"] = messages with pytest.raises(DemistoException) as e: splunk.splunk_search_command(service, mock_args) - assert f'Failed to run the search in Splunk: {expected_msg}' in str(e) + assert f"Failed to run the search in Splunk: {expected_msg}" in str(e) @pytest.mark.parametrize( - argnames='credentials', - argvalues=[{'username': 'test', 'password': 'test'}, {'splunkToken': 'token', 'password': 'test'}] + argnames="credentials", argvalues=[{"username": "test", "password": "test"}, {"splunkToken": "token", "password": "test"}] ) def test_module_test(mocker, credentials): """ @@ -2454,8 +3121,8 @@ def test_module_test(mocker, credentials): - Validate the info method was called """ # prepare - mocker.patch.object(client.Service, 'info') - mocker.patch.object(client.Service, 'login') + mocker.patch.object(client.Service, "info") + mocker.patch.object(client.Service, "login") service = client.Service(**credentials) # run @@ -2466,8 +3133,7 @@ def test_module_test(mocker, credentials): @pytest.mark.parametrize( - argnames='credentials', - argvalues=[{'username': 'test', 'password': 'test'}, {'splunkToken': 'token', 'password': 'test'}] + argnames="credentials", argvalues=[{"username": "test", "password": "test"}, {"splunkToken": "token", "password": "test"}] ) def test_module__exception_raised(mocker, credentials): """ @@ -2485,9 +3151,9 @@ def test_module__exception_raised(mocker, credentials): def exception_raiser(): raise AuthenticationError - mocker.patch.object(AuthenticationError, '__init__', return_value=None) - mocker.patch.object(client.Service, 'info', side_effect=exception_raiser) - mocker.patch.object(client.Service, 'login') + mocker.patch.object(AuthenticationError, "__init__", return_value=None) + mocker.patch.object(client.Service, "info", side_effect=exception_raiser) + mocker.patch.object(client.Service, "login") return_error_mock = mocker.patch(RETURN_ERROR_TARGET) service = client.Service(**credentials) @@ -2496,7 +3162,7 @@ def exception_raiser(): splunk.test_module(service, {}) # validate - assert return_error_mock.call_args[0][0] == 'Authentication error, please validate your credentials.' + assert return_error_mock.call_args[0][0] == "Authentication error, please validate your credentials." def test_module_hec_url(mocker): @@ -2511,17 +3177,17 @@ def test_module_hec_url(mocker): - Validate that the request.get was called with the expected args """ # prepare - mocker.patch.object(client.Service, 'info') - mocker.patch.object(client.Service, 'login') - mocker.patch.object(requests, 'get') + mocker.patch.object(client.Service, "info") + mocker.patch.object(client.Service, "login") + mocker.patch.object(requests, "get") - service = client.Service(username='test', password='test') + service = client.Service(username="test", password="test") # run - splunk.test_module(service, {'hec_url': 'test_hec_url'}) + splunk.test_module(service, {"hec_url": "test_hec_url"}) # validate - assert requests.get.call_args[0][0] == 'test_hec_url/services/collector/health' + assert requests.get.call_args[0][0] == "test_hec_url/services/collector/health" def test_module_message_object(mocker): @@ -2537,10 +3203,10 @@ def test_module_message_object(mocker): """ # prepare message = results.Message("DEBUG", "There's something in that variable...") - mocker.patch('splunklib.results.JSONResultsReader', return_value=[message]) - service = mocker.patch('splunklib.client.connect', return_value=None) + mocker.patch("splunklib.results.JSONResultsReader", return_value=[message]) + service = mocker.patch("splunklib.client.connect", return_value=None) # run - splunk.test_module(service, {'isFetch': True, 'fetchQuery': 'something'}) + splunk.test_module(service, {"isFetch": True, "fetchQuery": "something"}) # validate assert service.info.call_count == 1 @@ -2558,41 +3224,33 @@ def test_labels_with_non_str_values(mocker): - Validate the Labels created in the incident are well formatted to avoid server errors on json.Unmarshal """ from SplunkPy import UserMappingObject + # prepare raw = { "message": "Authentication of user via Radius", - "actor_obj": { - "id": "test", - "type": "User", - "alternateId": "test", - "displayName": "test" - }, - "actor_list": [{ - "id": "test", - "type": "User", - "alternateId": "test", - "displayName": "test" - }], + "actor_obj": {"id": "test", "type": "User", "alternateId": "test", "displayName": "test"}, + "actor_list": [{"id": "test", "type": "User", "alternateId": "test", "displayName": "test"}], "actor_tuple": ("id", "test"), "num_val": 100, "bool_val": False, - "float_val": 100.0 + "float_val": 100.0, } mocked_response: list[results.Message | dict] = deepcopy(SAMPLE_RESPONSE) - mocked_response[1]['_raw'] = json.dumps(raw) - mock_last_run = {'time': '2018-10-24T14:13:20'} - mock_params = {'fetchQuery': "something", "parseNotableEventsRaw": True} - mocker.patch.object(demisto, 'incidents') - mocker.patch.object(demisto, 'setLastRun') - mocker.patch('demistomock.getLastRun', return_value=mock_last_run) - mocker.patch('demistomock.params', return_value=mock_params) - mocker.patch('splunklib.results.JSONResultsReader', return_value=mocked_response) + mocked_response[1]["_raw"] = json.dumps(raw) + mock_last_run = {"time": "2018-10-24T14:13:20"} + mock_params = {"fetchQuery": "something", "parseNotableEventsRaw": True} + mocker.patch.object(demisto, "incidents") + mocker.patch.object(demisto, "setLastRun") + mocker.patch("demistomock.getLastRun", return_value=mock_last_run) + mocker.patch("demistomock.params", return_value=mock_params) + mocker.patch("splunklib.results.JSONResultsReader", return_value=mocked_response) # run - service = mocker.patch('splunklib.client.connect', return_value=None) + service = mocker.patch("splunklib.client.connect", return_value=None) mapper = UserMappingObject(service, False) - splunk.fetch_incidents(service, mapper, comment_tag_to_splunk='comment_tag_to_splunk', - comment_tag_from_splunk='comment_tag_from_splunk') + splunk.fetch_incidents( + service, mapper, comment_tag_to_splunk="comment_tag_to_splunk", comment_tag_from_splunk="comment_tag_from_splunk" + ) incidents = demisto.incidents.call_args[0][0] # validate @@ -2600,7 +3258,7 @@ def test_labels_with_non_str_values(mocker): assert len(incidents) == 2 labels = incidents[0]["labels"] assert len(labels) >= 7 - assert all(isinstance(label['value'], str) for label in labels) + assert all(isinstance(label["value"], str) for label in labels) def test_empty_string_as_app_param_value(mocker): @@ -2615,21 +3273,24 @@ def test_empty_string_as_app_param_value(mocker): - Validate that the value of the 'app' key in connection_args is '-' """ # prepare - mock_params = {'app': '', 'host': '111', 'port': '111'} + mock_params = {"app": "", "host": "111", "port": "111"} # run connection_args = splunk.get_connection_args(mock_params) # validate - assert connection_args.get('app') == '-' + assert connection_args.get("app") == "-" -@pytest.mark.parametrize(argnames='host, expected_host', argvalues=[ - ('8.8.8.8', '8.8.8.8'), - ('8.8.8.8/', '8.8.8.8'), - ('https://www.test.com', 'www.test.com'), - ('https://www.test.com/', 'www.test.com'), -]) +@pytest.mark.parametrize( + argnames="host, expected_host", + argvalues=[ + ("8.8.8.8", "8.8.8.8"), + ("8.8.8.8/", "8.8.8.8"), + ("https://www.test.com", "www.test.com"), + ("https://www.test.com/", "www.test.com"), + ], +) def test_host_param(host, expected_host): """ Given: @@ -2639,28 +3300,35 @@ def test_host_param(host, expected_host): Then: - Ensure the host is as expected """ - params = {'host': host, 'port': '111'} + params = {"host": host, "port": "111"} - actuall_host = splunk.get_connection_args(params)['host'] + actuall_host = splunk.get_connection_args(params)["host"] assert actuall_host == expected_host -OWNER_MAPPING = [{'xsoar_user': 'test_xsoar', 'splunk_user': 'test_splunk', 'wait': True}, - {'xsoar_user': 'test_not_full', 'splunk_user': '', 'wait': True}, - {'xsoar_user': '', 'splunk_user': 'test_not_full', 'wait': True}, ] +OWNER_MAPPING = [ + {"xsoar_user": "test_xsoar", "splunk_user": "test_splunk", "wait": True}, + {"xsoar_user": "test_not_full", "splunk_user": "", "wait": True}, + {"xsoar_user": "", "splunk_user": "test_not_full", "wait": True}, +] MAPPER_CASES_XSOAR_TO_SPLUNK = [ - ('', 'unassigned', - 'UserMapping: Could not find splunk user matching xsoar\'s . Consider adding it to the splunk_xsoar_users lookup.'), - ('not_in_table', 'unassigned', - 'UserMapping: Could not find splunk user matching xsoar\'s not_in_table. ' - 'Consider adding it to the splunk_xsoar_users lookup.') - + ( + "", + "unassigned", + "UserMapping: Could not find splunk user matching xsoar's . Consider adding it to the splunk_xsoar_users lookup.", + ), + ( + "not_in_table", + "unassigned", + "UserMapping: Could not find splunk user matching xsoar's not_in_table. " + "Consider adding it to the splunk_xsoar_users lookup.", + ), ] -@pytest.mark.parametrize('xsoar_name, expected_splunk, expected_msg', MAPPER_CASES_XSOAR_TO_SPLUNK) +@pytest.mark.parametrize("xsoar_name, expected_splunk, expected_msg", MAPPER_CASES_XSOAR_TO_SPLUNK) def test_owner_mapping_mechanism_xsoar_to_splunk(mocker, xsoar_name, expected_splunk, expected_msg): """ Given: @@ -2676,12 +3344,12 @@ def test_owner_mapping_mechanism_xsoar_to_splunk(mocker, xsoar_name, expected_sp def mocked_get_record(col, value_to_search): return filter(lambda x: x[col] == value_to_search, OWNER_MAPPING[:-1]) - service = mocker.patch('splunklib.client.connect', return_value=None) - mapper = splunk.UserMappingObject(service, True, table_name='splunk_xsoar_users', - xsoar_user_column_name='xsoar_user', - splunk_user_column_name='splunk_user') - mocker.patch.object(mapper, '_get_record', side_effect=mocked_get_record) - error_mock = mocker.patch.object(demisto, 'error') + service = mocker.patch("splunklib.client.connect", return_value=None) + mapper = splunk.UserMappingObject( + service, True, table_name="splunk_xsoar_users", xsoar_user_column_name="xsoar_user", splunk_user_column_name="splunk_user" + ) + mocker.patch.object(mapper, "_get_record", side_effect=mocked_get_record) + error_mock = mocker.patch.object(demisto, "error") s_user = mapper.get_splunk_user_by_xsoar(xsoar_name) assert s_user == expected_splunk if error_mock.called: @@ -2689,19 +3357,27 @@ def mocked_get_record(col, value_to_search): MAPPER_CASES_SPLUNK_TO_XSOAR = [ - ('test_splunk', 'test_xsoar', None), - ('test_not_full', '', - "UserMapping: Xsoar user matching splunk's test_not_full is empty. Fix the record in splunk_xsoar_users lookup."), - ('unassigned', '', - "UserMapping: Could not find xsoar user matching splunk's unassigned. Consider adding it to the splunk_xsoar_users lookup."), - ('not_in_table', '', - "UserMapping: Could not find xsoar user matching splunk's not_in_table. " - "Consider adding it to the splunk_xsoar_users lookup.") - + ("test_splunk", "test_xsoar", None), + ( + "test_not_full", + "", + "UserMapping: Xsoar user matching splunk's test_not_full is empty. Fix the record in splunk_xsoar_users lookup.", + ), + ( + "unassigned", + "", + "UserMapping: Could not find xsoar user matching splunk's unassigned. Consider adding it to the splunk_xsoar_users lookup.", + ), + ( + "not_in_table", + "", + "UserMapping: Could not find xsoar user matching splunk's not_in_table. " + "Consider adding it to the splunk_xsoar_users lookup.", + ), ] -@pytest.mark.parametrize('splunk_name, expected_xsoar, expected_msg', MAPPER_CASES_SPLUNK_TO_XSOAR) +@pytest.mark.parametrize("splunk_name, expected_xsoar, expected_msg", MAPPER_CASES_SPLUNK_TO_XSOAR) def test_owner_mapping_mechanism_splunk_to_xsoar(mocker, splunk_name, expected_xsoar, expected_msg): """ Given: @@ -2717,12 +3393,12 @@ def test_owner_mapping_mechanism_splunk_to_xsoar(mocker, splunk_name, expected_x def mocked_get_record(col, value_to_search): return filter(lambda x: x[col] == value_to_search, OWNER_MAPPING) - service = mocker.patch('splunklib.client.connect', return_value=None) - mapper = splunk.UserMappingObject(service, True, table_name='splunk_xsoar_users', - xsoar_user_column_name='xsoar_user', - splunk_user_column_name='splunk_user') - mocker.patch.object(mapper, '_get_record', side_effect=mocked_get_record) - error_mock = mocker.patch.object(demisto, 'error') + service = mocker.patch("splunklib.client.connect", return_value=None) + mapper = splunk.UserMappingObject( + service, True, table_name="splunk_xsoar_users", xsoar_user_column_name="xsoar_user", splunk_user_column_name="splunk_user" + ) + mocker.patch.object(mapper, "_get_record", side_effect=mocked_get_record) + error_mock = mocker.patch.object(demisto, "error") s_user = mapper.get_xsoar_user_by_splunk(splunk_name) assert s_user == expected_xsoar if error_mock.called: @@ -2730,29 +3406,44 @@ def mocked_get_record(col, value_to_search): COMMAND_CASES = [ - ({'xsoar_username': 'test_xsoar'}, # case normal single username was provided - [{'SplunkUser': 'test_splunk', 'XsoarUser': 'test_xsoar'}]), - ({'xsoar_username': 'test_xsoar, Non existing'}, # case normal multiple usernames were provided - [{'SplunkUser': 'test_splunk', 'XsoarUser': 'test_xsoar'}, - {'SplunkUser': 'unassigned', 'XsoarUser': 'Non existing'}]), - ({'xsoar_username': 'Non Existing,'}, # case normal&empty multiple usernames were provided - [{'SplunkUser': 'unassigned', 'XsoarUser': 'Non Existing'}, - {'SplunkUser': 'Could not map splunk user, Check logs for more info.', 'XsoarUser': ''}]), - ({'xsoar_username': ['test_xsoar', 'Non existing']}, # case normal&missing multiple usernames were provided - [{'SplunkUser': 'test_splunk', 'XsoarUser': 'test_xsoar'}, - {'SplunkUser': 'unassigned', 'XsoarUser': 'Non existing'}]), - ({'xsoar_username': ['test_xsoar', 'Non existing'], 'map_missing': False}, - # case normal & missing multiple usernames were provided without missing's mapping activated - [{'SplunkUser': 'test_splunk', 'XsoarUser': 'test_xsoar'}, - {'SplunkUser': 'Could not map splunk user, Check logs for more info.', 'XsoarUser': 'Non existing'}]), - ({'xsoar_username': 'Non Existing,', 'map_missing': False}, # case missing&empty multiple usernames were provided - [{'SplunkUser': 'Could not map splunk user, Check logs for more info.', 'XsoarUser': 'Non Existing'}, - {'SplunkUser': 'Could not map splunk user, Check logs for more info.', 'XsoarUser': ''}] - ), + ( + {"xsoar_username": "test_xsoar"}, # case normal single username was provided + [{"SplunkUser": "test_splunk", "XsoarUser": "test_xsoar"}], + ), + ( + {"xsoar_username": "test_xsoar, Non existing"}, # case normal multiple usernames were provided + [{"SplunkUser": "test_splunk", "XsoarUser": "test_xsoar"}, {"SplunkUser": "unassigned", "XsoarUser": "Non existing"}], + ), + ( + {"xsoar_username": "Non Existing,"}, # case normal&empty multiple usernames were provided + [ + {"SplunkUser": "unassigned", "XsoarUser": "Non Existing"}, + {"SplunkUser": "Could not map splunk user, Check logs for more info.", "XsoarUser": ""}, + ], + ), + ( + {"xsoar_username": ["test_xsoar", "Non existing"]}, # case normal&missing multiple usernames were provided + [{"SplunkUser": "test_splunk", "XsoarUser": "test_xsoar"}, {"SplunkUser": "unassigned", "XsoarUser": "Non existing"}], + ), + ( + {"xsoar_username": ["test_xsoar", "Non existing"], "map_missing": False}, + # case normal & missing multiple usernames were provided without missing's mapping activated + [ + {"SplunkUser": "test_splunk", "XsoarUser": "test_xsoar"}, + {"SplunkUser": "Could not map splunk user, Check logs for more info.", "XsoarUser": "Non existing"}, + ], + ), + ( + {"xsoar_username": "Non Existing,", "map_missing": False}, # case missing&empty multiple usernames were provided + [ + {"SplunkUser": "Could not map splunk user, Check logs for more info.", "XsoarUser": "Non Existing"}, + {"SplunkUser": "Could not map splunk user, Check logs for more info.", "XsoarUser": ""}, + ], + ), ] -@pytest.mark.parametrize('xsoar_names, expected_outputs', COMMAND_CASES) +@pytest.mark.parametrize("xsoar_names, expected_outputs", COMMAND_CASES) def test_get_splunk_user_by_xsoar_command(mocker, xsoar_names, expected_outputs): """ Given: a list of xsoar users @@ -2763,21 +3454,22 @@ def test_get_splunk_user_by_xsoar_command(mocker, xsoar_names, expected_outputs) def mocked_get_record(col, value_to_search): return filter(lambda x: x[col] == value_to_search, OWNER_MAPPING[:-1]) - service = mocker.patch('splunklib.client.connect', return_value=None) + service = mocker.patch("splunklib.client.connect", return_value=None) - mapper = splunk.UserMappingObject(service, True, table_name='splunk_xsoar_users', - xsoar_user_column_name='xsoar_user', - splunk_user_column_name='splunk_user') + mapper = splunk.UserMappingObject( + service, True, table_name="splunk_xsoar_users", xsoar_user_column_name="xsoar_user", splunk_user_column_name="splunk_user" + ) # Ignoring logging pytest error - mocker.patch.object(demisto, 'error') - mocker.patch.object(mapper, '_get_record', side_effect=mocked_get_record) + mocker.patch.object(demisto, "error") + mocker.patch.object(mapper, "_get_record", side_effect=mocked_get_record) res = mapper.get_splunk_user_by_xsoar_command(xsoar_names) assert res.outputs == expected_outputs -@pytest.mark.parametrize(argnames='username, expected_username, basic_auth', argvalues=[ - ('test_user', 'test_user', False), - ('test@_basic', 'test', True)]) +@pytest.mark.parametrize( + argnames="username, expected_username, basic_auth", + argvalues=[("test_user", "test_user", False), ("test@_basic", "test", True)], +) def test_basic_authentication_param(mocker, username, expected_username, basic_auth): """ Given: - the username contain '@_basic' suffix @@ -2786,30 +3478,30 @@ def test_basic_authentication_param(mocker, username, expected_username, basic_a """ mocked_params = { - 'host': 'test_host', - 'port': '8089', - 'proxy': 'false', - 'authentication': { - 'identifier': username, - 'password': 'test_password' - } + "host": "test_host", + "port": "8089", + "proxy": "false", + "authentication": {"identifier": username, "password": "test_password"}, } - mocker.patch.object(client, 'connect') - mocker.patch.object(demisto, 'params', return_value=mocked_params) - mocker.patch.object(demisto, 'command', return_value='not_impl_command') + mocker.patch.object(client, "connect") + mocker.patch.object(demisto, "params", return_value=mocked_params) + mocker.patch.object(demisto, "command", return_value="not_impl_command") with pytest.raises(NotImplementedError): splunk.main() - assert client.connect.call_args[1]['username'] == expected_username - assert ('basic' in client.connect.call_args[1]) == basic_auth + assert client.connect.call_args[1]["username"] == expected_username + assert ("basic" in client.connect.call_args[1]) == basic_auth -@pytest.mark.parametrize(argnames='host, expected_base_url', argvalues=[ - ('8.8.8.8', 'https://8.8.8.8:8089/'), - ('https://www.test.com', 'https://www.test.com:8089/'), - ('http://www.test.com', 'https://http://www.test.com:8089/'), # we don't want to silently replace http with https -]) +@pytest.mark.parametrize( + argnames="host, expected_base_url", + argvalues=[ + ("8.8.8.8", "https://8.8.8.8:8089/"), + ("https://www.test.com", "https://www.test.com:8089/"), + ("http://www.test.com", "https://http://www.test.com:8089/"), # we don't want to silently replace http with https + ], +) def test_base_url(mocker, host, expected_base_url): """ Given: - Different host values @@ -2818,30 +3510,23 @@ def test_base_url(mocker, host, expected_base_url): """ mocked_params = { - 'host': host, - 'port': '8089', - 'proxy': 'false', - 'authentication': { - 'identifier': 'username', - 'password': 'test_password' - } + "host": host, + "port": "8089", + "proxy": "false", + "authentication": {"identifier": "username", "password": "test_password"}, } - mocker.patch.object(demisto, 'command', return_value='splunk-notable-event-edit') - mocker.patch.object(demisto, 'params', return_value=mocked_params) - mocker.patch.object(client, 'connect') + mocker.patch.object(demisto, "command", return_value="splunk-notable-event-edit") + mocker.patch.object(demisto, "params", return_value=mocked_params) + mocker.patch.object(client, "connect") - cmd = mocker.patch.object(splunk, 'splunk_edit_notable_event_command') + cmd = mocker.patch.object(splunk, "splunk_edit_notable_event_command") splunk.main() assert cmd.call_args[0][0] == expected_base_url @pytest.mark.parametrize( - 'item, expected', - [ - ({'message': 'Test message'}, False), - (results.Message('INFO', 'Test message'), True) - ] + "item, expected", [({"message": "Test message"}, False), (results.Message("INFO", "Test message"), True)] ) def test_handle_message(item: dict | results.Message, expected: bool): """ @@ -2859,35 +3544,33 @@ def test_single_drilldown_searches(mocker): """ drilldown_searches = json.dumps( - { - "name": "test drilldown", - "search": "| from datamodel: test", - "earliest": 1719218100, - "latest": 1719823500 - } + {"name": "test drilldown", "search": "| from datamodel: test", "earliest": 1719218100, "latest": 1719823500} ) - mocker.patch.object(demisto, 'error') - mocker.patch.object(splunk, 'build_drilldown_search', return_value=None) + mocker.patch.object(demisto, "error") + mocker.patch.object(splunk, "build_drilldown_search", return_value=None) splunk.drilldown_enrichment( - service=None, - notable_data={'drilldown_searches': drilldown_searches, 'event_id': 'test_id'}, - num_enrichment_events=1) + service=None, notable_data={"drilldown_searches": drilldown_searches, "event_id": "test_id"}, num_enrichment_events=1 + ) - assert demisto.error.call_count == 0, 'Something was wrong in the drilldown_enrichment process' + assert demisto.error.call_count == 0, "Something was wrong in the drilldown_enrichment process" @pytest.mark.parametrize( - 'drilldown_data, expected', - [({'drilldown_search': 'test'}, ['test']), - ({'drilldown_searches': '{"search_1":"test_1"}'}, [{'search_1': 'test_1'}]), - ({'drilldown_searches': ['{"search_1":"test_1"}', '{"search_2":"test_2"}']}, - [{'search_1': 'test_1'}, {'search_2': 'test_2'}]), - ({'drilldown_searches': '[{"search_1":"test_1"}]'}, - [{'search_1': 'test_1'}]), - ({'drilldown_searches': '[{"search_1":"test_1"}, {"search_2":"test_2"}]'}, - [{'search_1': 'test_1'}, {'search_2': 'test_2'}]) - ] + "drilldown_data, expected", + [ + ({"drilldown_search": "test"}, ["test"]), + ({"drilldown_searches": '{"search_1":"test_1"}'}, [{"search_1": "test_1"}]), + ( + {"drilldown_searches": ['{"search_1":"test_1"}', '{"search_2":"test_2"}']}, + [{"search_1": "test_1"}, {"search_2": "test_2"}], + ), + ({"drilldown_searches": '[{"search_1":"test_1"}]'}, [{"search_1": "test_1"}]), + ( + {"drilldown_searches": '[{"search_1":"test_1"}, {"search_2":"test_2"}]'}, + [{"search_1": "test_1"}, {"search_2": "test_2"}], + ), + ], ) def test_get_drilldown_searches(drilldown_data, expected): """ @@ -2904,10 +3587,14 @@ def test_get_drilldown_searches(drilldown_data, expected): assert splunk.get_drilldown_searches(drilldown_data) == expected -@pytest.mark.parametrize('drilldown_search, expected_res', - [('{"name":"test", "query":"|key="the value""}', 'key="the value"'), - ('{"name":"test", "query":"|key in (line_1\nline_2)"}', 'key in (line_1,line_2)'), - ('{"name":"test", "query":"search a=$a|s$ c=$c$ suffix"}', 'search a=$a|s$ c=$c$ suffix')]) +@pytest.mark.parametrize( + "drilldown_search, expected_res", + [ + ('{"name":"test", "query":"|key="the value""}', 'key="the value"'), + ('{"name":"test", "query":"|key in (line_1\nline_2)"}', "key in (line_1,line_2)"), + ('{"name":"test", "query":"search a=$a|s$ c=$c$ suffix"}', "search a=$a|s$ c=$c$ suffix"), + ], +) def test_escape_invalid_chars_in_drilldown_json(drilldown_search, expected_res): """ Scenario: When extracting the drilldown search query which are a json string, @@ -2928,7 +3615,7 @@ def test_escape_invalid_chars_in_drilldown_json(drilldown_search, expected_res): res = splunk.escape_invalid_chars_in_drilldown_json(drilldown_search) - assert expected_res in json.loads(res)['query'] + assert expected_res in json.loads(res)["query"] # Define minimal classes to simulate the service and index behavior @@ -2947,13 +3634,11 @@ def __init__(self, indexes): [ # Test case: All indexes exist in the service (["index1", "index2"], ["index1", "index2", "index3"], True), - # Test case: Some indexes do not exist in the service (["index1", "index4"], ["index1", "index2", "index3"], False), - # Test case: Empty input indexes list ([], ["index1", "index2", "index3"], True), - ] + ], ) def test_validate_indexes(given_indexes, service_indexes, expected): """ @@ -2963,6 +3648,7 @@ def test_validate_indexes(given_indexes, service_indexes, expected): otherwise, it returns `False`. """ from SplunkPy import validate_indexes + service = ServiceIndex(service_indexes) # Assert that the function returns the expected result assert validate_indexes(given_indexes, service) == expected @@ -2973,16 +3659,13 @@ def test_validate_indexes(given_indexes, service_indexes, expected): [ # Valid JSON input ('{"key": "value"}', {"key": "value"}), - # Valid JSON with multiple key-value pairs ('{"key1": "value1", "key2": 2}', {"key1": "value1", "key2": 2}), - # Invalid JSON input (non-JSON string) ("not a json string", {"fields": "not a json string"}), - # Another invalid JSON input (partially structured JSON) ("{'key': 'value'}", {"fields": "{'key': 'value'}"}), - ] + ], ) def test_parse_fields(fields, expected): """ @@ -2992,20 +3675,41 @@ def test_parse_fields(fields, expected): the function returns a dictionary with a single key-value pair, where the entire input string is the key. """ from SplunkPy import parse_fields + result = parse_fields(fields) assert result == expected -@pytest.mark.parametrize("event, batch_event_data, entry_id, expected_data", [ - ("Somthing happened", None, None, '{"event": "Somthing happened", "fields": {"field1": "value1"}, "index": "main"}'), - (None, "{'event': 'some event', 'index': 'some index'} {'event': 'some event', 'index': 'some index'}", None, - "{'event': 'some event', 'index': 'some index'} {'event': 'some event', 'index': 'some index'}"), # Batch event data - (None, None, "some entry_id", - "{'event': 'some event', 'index': 'some index'} {'event': 'some event', 'index': 'some index'}"), - (None, """{'event': "some event's", 'index': 'some index'} {'event': 'some event', 'index': 'some index'}""", None, - """{'event': "some event's", 'index': 'some index'} {'event': 'some event', 'index': 'some index'}"""), # with ' - (None, None, "some entry_id", "{'event': 'some event', 'index': 'some index'} {'event': 'some event', 'index': 'some index'}") -]) +@pytest.mark.parametrize( + "event, batch_event_data, entry_id, expected_data", + [ + ("Somthing happened", None, None, '{"event": "Somthing happened", "fields": {"field1": "value1"}, "index": "main"}'), + ( + None, + "{'event': 'some event', 'index': 'some index'} {'event': 'some event', 'index': 'some index'}", + None, + "{'event': 'some event', 'index': 'some index'} {'event': 'some event', 'index': 'some index'}", + ), # Batch event data + ( + None, + None, + "some entry_id", + "{'event': 'some event', 'index': 'some index'} {'event': 'some event', 'index': 'some index'}", + ), + ( + None, + """{'event': "some event's", 'index': 'some index'} {'event': 'some event', 'index': 'some index'}""", + None, + """{'event': "some event's", 'index': 'some index'} {'event': 'some event', 'index': 'some index'}""", + ), # with ' + ( + None, + None, + "some entry_id", + "{'event': 'some event', 'index': 'some index'} {'event': 'some event', 'index': 'some index'}", + ), + ], +) @patch("requests.post") @patch("SplunkPy.get_events_from_file") @patch("SplunkPy.extract_indexes") @@ -3020,7 +3724,7 @@ def test_splunk_submit_event_hec( event, batch_event_data, entry_id, - expected_data + expected_data, ): """ Given: Different types of event submission (single event, batch event, entry_id). @@ -3028,6 +3732,7 @@ def test_splunk_submit_event_hec( Then: Ensure a POST request is sent with the correct data and headers. """ from SplunkPy import splunk_submit_event_hec + # Arrange hec_token = "valid_token" baseurl = "https://splunk.example.com" @@ -3040,15 +3745,16 @@ def test_splunk_submit_event_hec( if event: # Single event - mock_extract_indexes.return_value = ['some index'] + mock_extract_indexes.return_value = ["some index"] elif batch_event_data: # Batch event data - mock_extract_indexes.return_value = ['some index1', 'some index2'] + mock_extract_indexes.return_value = ["some index1", "some index2"] elif entry_id: # Entry ID - mock_get_events_from_file.return_value =\ + mock_get_events_from_file.return_value = ( "{'event': 'some event', 'index': 'some index'} {'event': 'some event', 'index': 'some index'}" - mock_extract_indexes.return_value = ['some index1', 'some index2'] + ) + mock_extract_indexes.return_value = ["some index1", "some index2"] # Act splunk_submit_event_hec( @@ -3080,70 +3786,83 @@ def test_splunk_submit_event_hec( def test_splunk_submit_event_hec_command_no_required_arguments(): - """ Given: none of these arguments: 'entry_id', 'event', 'batch_event_data' - When: Runing splunk-submit-event-hec command - Then: An exception is thrown + """Given: none of these arguments: 'entry_id', 'event', 'batch_event_data' + When: Runing splunk-submit-event-hec command + Then: An exception is thrown """ from SplunkPy import splunk_submit_event_hec_command - with pytest.raises(DemistoException, - match=r"Invalid input: Please specify one of the following arguments: `event`, " - r"`batch_event_data`, or `entry_id`."): - splunk_submit_event_hec_command({'hec_url': 'hec_url'}, None, {}) - - -@pytest.mark.parametrize("events, expected_result", [ - ("{'index': 'index1', 'event': 'Something happend '} {'index': 'index 2', 'event': 'Something's happend'}", - ['index1', 'index 2']), - ({'index': 'index1', 'value': '123'}, ['index1']), - ("{'event': 'value'}", []), - ('{"index": "index: 3", "event": "Something happend"}, {"index": "index: 3", "event": "Something happend"}', - ['index: 3', 'index: 3']), - ("{'key': 'value'}, {'key': 'value'}", []), - ("""{"index": "index_3", "event": "Something` happend"}, {"index": "index-4", "event": "Something' happend"}""", - ['index_3', 'index-4']), -]) + + with pytest.raises( + DemistoException, + match=r"Invalid input: Please specify one of the following arguments: `event`, " r"`batch_event_data`, or `entry_id`.", + ): + splunk_submit_event_hec_command({"hec_url": "hec_url"}, None, {}) + + +@pytest.mark.parametrize( + "events, expected_result", + [ + ( + "{'index': 'index1', 'event': 'Something happend '} {'index': 'index 2', 'event': 'Something's happend'}", + ["index1", "index 2"], + ), + ({"index": "index1", "value": "123"}, ["index1"]), + ("{'event': 'value'}", []), + ( + '{"index": "index: 3", "event": "Something happend"}, {"index": "index: 3", "event": "Something happend"}', + ["index: 3", "index: 3"], + ), + ("{'key': 'value'}, {'key': 'value'}", []), + ( + """{"index": "index_3", "event": "Something` happend"}, {"index": "index-4", "event": "Something' happend"}""", + ["index_3", "index-4"], + ), + ], +) def test_extract_indexes(events, expected_result): from SplunkPy import extract_indexes + assert extract_indexes(events) == expected_result -@pytest.mark.parametrize(argnames='should_map_user', argvalues=[True, False]) +@pytest.mark.parametrize(argnames="should_map_user", argvalues=[True, False]) def test_get_modified_remote_data_command_with_user_mapping(mocker, should_map_user): - """ Given: - - Different values for the splunk.UserMappingObject.should_map arguments - and `notable` query response without 'owner' key - When: - - Runing test_get_modified_remote_data_command - Then: - - Verify the correct owner are returned. + """Given: + - Different values for the splunk.UserMappingObject.should_map arguments + and `notable` query response without 'owner' key + When: + - Runing test_get_modified_remote_data_command + Then: + - Verify the correct owner are returned. """ notable_without_owner = deepcopy(SAMPLE_RESPONSE[2]) - del notable_without_owner['owner'] + del notable_without_owner["owner"] - mapped_user = 'mapped_splunk_user' + mapped_user = "mapped_splunk_user" - mocker.patch.object(demisto, 'results') - mocker.patch.object(demisto, 'params', return_value={'timezone': '0'}) - mocker.patch.object(splunk.UserMappingObject, 'get_xsoar_user_by_splunk', return_value=mapped_user) - mocker.patch('SplunkPy.results.JSONResultsReader', side_effect=lambda res: res) - mocked_service = mocker.patch('SplunkPy.client.Service') - mocked_service.jobs.oneshot = \ - lambda query, **kwargs: [SAMPLE_INCIDENT_REVIEW_RESPONSE[0]] if '`incident_review`' in query else [notable_without_owner] + mocker.patch.object(demisto, "results") + mocker.patch.object(demisto, "params", return_value={"timezone": "0"}) + mocker.patch.object(splunk.UserMappingObject, "get_xsoar_user_by_splunk", return_value=mapped_user) + mocker.patch("SplunkPy.results.JSONResultsReader", side_effect=lambda res: res) + mocked_service = mocker.patch("SplunkPy.client.Service") + mocked_service.jobs.oneshot = ( + lambda query, **kwargs: [SAMPLE_INCIDENT_REVIEW_RESPONSE[0]] if "`incident_review`" in query else [notable_without_owner] + ) splunk.get_modified_remote_data_command( mocked_service, - args={'lastUpdate': '2021-02-09T16:41:30.589575+02:00'}, + args={"lastUpdate": "2021-02-09T16:41:30.589575+02:00"}, mapper=splunk.UserMappingObject(mocked_service, should_map_user), - comment_tag_from_splunk='comment_tag_from_splunk', + comment_tag_from_splunk="comment_tag_from_splunk", close_incident=True, close_end_statuses=False, - close_extra_labels=[] + close_extra_labels=[], ) - contents = demisto.results.call_args[0][0][0]['Contents'] - expected_owner = mapped_user if should_map_user else SAMPLE_INCIDENT_REVIEW_RESPONSE[0]['owner'] - assert len(contents['SplunkComments']) == 3 - assert contents['owner'] == expected_owner + contents = demisto.results.call_args[0][0][0]["Contents"] + expected_owner = mapped_user if should_map_user else SAMPLE_INCIDENT_REVIEW_RESPONSE[0]["owner"] + assert len(contents["SplunkComments"]) == 3 + assert contents["owner"] == expected_owner def test_mirror_in_with_enrichment_enabled(mocker): @@ -3159,40 +3878,42 @@ def test_mirror_in_with_enrichment_enabled(mocker): # create an integration context in order to simulate the context in a normal run. integration_context = { splunk.CACHE: json.dumps( - {splunk.SUBMITTED_NOTABLES: [splunk.Notable(SAMPLE_RESPONSE[2])]}, default=lambda obj: obj.__dict__), + {splunk.SUBMITTED_NOTABLES: [splunk.Notable(SAMPLE_RESPONSE[2])]}, default=lambda obj: obj.__dict__ + ), } - mocker.patch('SplunkPy.set_integration_context') - mocker.patch('SplunkPy.get_integration_context', return_value=integration_context) - mocker.patch.object(demisto, 'params', return_value={'timezone': '0'}) - mocker.patch.object(splunk, 'ENABLED_ENRICHMENTS', new=[splunk.DRILLDOWN_ENRICHMENT]) - mocker.patch('SplunkPy.results.JSONResultsReader', side_effect=lambda res: res) - mocker.patch.object(splunk.UserMappingObject, 'get_xsoar_user_by_splunk', return_value='after_mirror_owner') - mocked_service = mocker.patch('SplunkPy.client.Service') + mocker.patch("SplunkPy.set_integration_context") + mocker.patch("SplunkPy.get_integration_context", return_value=integration_context) + mocker.patch.object(demisto, "params", return_value={"timezone": "0"}) + mocker.patch.object(splunk, "ENABLED_ENRICHMENTS", new=[splunk.DRILLDOWN_ENRICHMENT]) + mocker.patch("SplunkPy.results.JSONResultsReader", side_effect=lambda res: res) + mocker.patch.object(splunk.UserMappingObject, "get_xsoar_user_by_splunk", return_value="after_mirror_owner") + mocked_service = mocker.patch("SplunkPy.client.Service") # the get_modified_remote_data send two queries to Splunk, # the first one used the `incident_review` macro and the second used the `notable` - notable_delta = {'status_label': 'after_mirror_status', 'urgency': 'after_mirror_urgency'} + notable_delta = {"status_label": "after_mirror_status", "urgency": "after_mirror_urgency"} updated_notable = SAMPLE_RESPONSE[2] | notable_delta - mocked_service.jobs.oneshot = \ - lambda query, **kwargs: [SAMPLE_INCIDENT_REVIEW_RESPONSE[0]] if '`incident_review`' in query else [updated_notable] + mocked_service.jobs.oneshot = ( + lambda query, **kwargs: [SAMPLE_INCIDENT_REVIEW_RESPONSE[0]] if "`incident_review`" in query else [updated_notable] + ) splunk.get_modified_remote_data_command( mocked_service, - args={'lastUpdate': '2021-02-09T16:41:30.589575+02:00'}, + args={"lastUpdate": "2021-02-09T16:41:30.589575+02:00"}, mapper=splunk.UserMappingObject(mocked_service, True), - comment_tag_from_splunk='comment_tag_from_splunk', + comment_tag_from_splunk="comment_tag_from_splunk", close_incident=True, close_end_statuses=False, - close_extra_labels=[] + close_extra_labels=[], ) - notable_id = SAMPLE_RESPONSE[2]['event_id'] + notable_id = SAMPLE_RESPONSE[2]["event_id"] mirrored_enriching_natables = splunk.set_integration_context.call_args[0][0][splunk.MIRRORED_ENRICHING_NOTABLES] actual_mirrored_notable_delta = mirrored_enriching_natables[notable_id] # 'SplunkComments' is calculated and added in the mirror process and not returned from Splunk - assert 'SplunkComments' not in updated_notable - assert len(actual_mirrored_notable_delta['SplunkComments']) == 3 - assert actual_mirrored_notable_delta['owner'] == 'after_mirror_owner' + assert "SplunkComments" not in updated_notable + assert len(actual_mirrored_notable_delta["SplunkComments"]) == 3 + assert actual_mirrored_notable_delta["owner"] == "after_mirror_owner" assert all(actual_mirrored_notable_delta[k] == v for k, v in notable_delta.items()) @@ -3205,24 +3926,27 @@ def test_user_mapping_used_cache(mocker): Then: - Validate that the function use cache to store the mapped values and called only once. """ - mocker.patch.object(demisto, 'error') - mocked_service = mocker.patch('SplunkPy.client.Service') + mocker.patch.object(demisto, "error") + mocked_service = mocker.patch("SplunkPy.client.Service") mapper = splunk.UserMappingObject(mocked_service, True) for _ in range(5): - mapper.get_xsoar_user_by_splunk('test_splunk_user') + mapper.get_xsoar_user_by_splunk("test_splunk_user") assert mocked_service.kvstore.__getitem__().data.query.call_count == 1 -@pytest.mark.parametrize("query, expected_query", [ - ("search index=_internal", "search index=_internal"), - ("| inputlookup some_lookup", "| inputlookup some_lookup"), - ("index=_internal", "search index=_internal") -]) +@pytest.mark.parametrize( + "query, expected_query", + [ + ("search index=_internal", "search index=_internal"), + ("| inputlookup some_lookup", "| inputlookup some_lookup"), + ("index=_internal", "search index=_internal"), + ], +) def test_splunk_job_create_command(mocker, query, expected_query): - mocked_service = mocker.patch('SplunkPy.client.Service') + mocked_service = mocker.patch("SplunkPy.client.Service") mocked_create_job = MagicMock() mocked_service.jobs.create = mocked_create_job - mocker.patch('SplunkPy.return_results') - args = {'query': query} + mocker.patch("SplunkPy.return_results") + args = {"query": query} splunk.splunk_job_create_command(mocked_service, args) mocked_create_job.assert_called_once_with(expected_query, exec_mode="normal", app="") diff --git a/Packs/SplunkPy/ReleaseNotes/3_2_6.md b/Packs/SplunkPy/ReleaseNotes/3_2_6.md new file mode 100644 index 000000000000..f3e2615b5a7f --- /dev/null +++ b/Packs/SplunkPy/ReleaseNotes/3_2_6.md @@ -0,0 +1,24 @@ + +#### Integrations + +##### SplunkPy + +- Metadata and documentation improvements. + +#### Scripts + +##### SplunkShowDrilldown + +- Metadata and documentation improvements. +##### SplunkShowAsset + +- Metadata and documentation improvements. +##### SplunkConvertCommentsToTable + +- Metadata and documentation improvements. +##### SplunkAddComment + +- Metadata and documentation improvements. +##### SplunkShowIdentity + +- Metadata and documentation improvements. diff --git a/Packs/SplunkPy/Scripts/SplunkAddComment/SplunkAddComment.py b/Packs/SplunkPy/Scripts/SplunkAddComment/SplunkAddComment.py index c8edef5b92dc..27ada6fa6219 100644 --- a/Packs/SplunkPy/Scripts/SplunkAddComment/SplunkAddComment.py +++ b/Packs/SplunkPy/Scripts/SplunkAddComment/SplunkAddComment.py @@ -4,23 +4,21 @@ def add_comment(args: Dict[str, Any]) -> CommandResults: demisto.debug("adding comment") - tags = argToList(args.get('tags', 'FROM XSOAR')) - comment_body = args.get('comment', '') + tags = argToList(args.get("tags", "FROM XSOAR")) + comment_body = args.get("comment", "") - return CommandResults( - readable_output=comment_body, mark_as_note=True, tags=tags - ) + return CommandResults(readable_output=comment_body, mark_as_note=True, tags=tags) def main(): # pragma: no cover try: - demisto.debug('SplunkAddComment is being called') + demisto.debug("SplunkAddComment is being called") res = add_comment(demisto.args()) return_results(res) except Exception as ex: - return_error(f'Failed to execute SplunkAddComment. Error: {str(ex)}') + return_error(f"Failed to execute SplunkAddComment. Error: {ex!s}") -if __name__ in ["__builtin__", "builtins", '__main__']: +if __name__ in ["__builtin__", "builtins", "__main__"]: main() diff --git a/Packs/SplunkPy/Scripts/SplunkAddComment/SplunkAddComment_test.py b/Packs/SplunkPy/Scripts/SplunkAddComment/SplunkAddComment_test.py index c25221d23c30..54732fad3f15 100644 --- a/Packs/SplunkPy/Scripts/SplunkAddComment/SplunkAddComment_test.py +++ b/Packs/SplunkPy/Scripts/SplunkAddComment/SplunkAddComment_test.py @@ -3,8 +3,9 @@ def test_add_comment_as_note(): adding a comment as a note. """ from SplunkAddComment import add_comment - result = add_comment({'comment': 'New comment', 'tags': 'comment tag to splunk'}) - assert result.readable_output == 'New comment' - assert result.tags == ['comment tag to splunk'] + result = add_comment({"comment": "New comment", "tags": "comment tag to splunk"}) + + assert result.readable_output == "New comment" + assert result.tags == ["comment tag to splunk"] assert result.mark_as_note diff --git a/Packs/SplunkPy/Scripts/SplunkConvertCommentsToTable/SplunkConvertCommentsToTable.py b/Packs/SplunkPy/Scripts/SplunkConvertCommentsToTable/SplunkConvertCommentsToTable.py index 62cf416238a6..6070bbe585e8 100644 --- a/Packs/SplunkPy/Scripts/SplunkConvertCommentsToTable/SplunkConvertCommentsToTable.py +++ b/Packs/SplunkPy/Scripts/SplunkConvertCommentsToTable/SplunkConvertCommentsToTable.py @@ -6,25 +6,22 @@ def main(): incident = demisto.incident() splunkComments = [] if not incident: - raise ValueError("Error - demisto.incident() expected to return current incident " - "from context but returned None") - fields = incident.get('CustomFields', []) + raise ValueError("Error - demisto.incident() expected to return current incident " "from context but returned None") + fields = incident.get("CustomFields", []) if fields: - splunkComments_str = fields.get('splunkcomments', []) + splunkComments_str = fields.get("splunkcomments", []) for data in splunkComments_str: parsed_data = json.loads(data) splunkComments.append(parsed_data) if not splunkComments: - return CommandResults(readable_output='No comments were found in the notable') + return CommandResults(readable_output="No comments were found in the notable") - markdown = tableToMarkdown("", splunkComments, headers=['Comment']) - return CommandResults( - readable_output=markdown - ) + markdown = tableToMarkdown("", splunkComments, headers=["Comment"]) + return CommandResults(readable_output=markdown) -if __name__ in ('__main__', '__builtin__', 'builtins'): +if __name__ in ("__main__", "__builtin__", "builtins"): try: return_results(main()) except Exception as e: - return_error(f'Got an error while parsing Splunk events: {e}', error=e) + return_error(f"Got an error while parsing Splunk events: {e}", error=e) diff --git a/Packs/SplunkPy/Scripts/SplunkConvertCommentsToTable/SplunkConvertCommentsToTable_test.py b/Packs/SplunkPy/Scripts/SplunkConvertCommentsToTable/SplunkConvertCommentsToTable_test.py index 7277941907be..eb80895fd047 100644 --- a/Packs/SplunkPy/Scripts/SplunkConvertCommentsToTable/SplunkConvertCommentsToTable_test.py +++ b/Packs/SplunkPy/Scripts/SplunkConvertCommentsToTable/SplunkConvertCommentsToTable_test.py @@ -1,8 +1,6 @@ import SplunkConvertCommentsToTable -EXPECTED_TABLE = ('|Comment|\n' - '|---|\n' - '| new comment |\n') +EXPECTED_TABLE = "|Comment|\n" "|---|\n" "| new comment |\n" def test_convert_to_table(mocker): @@ -14,9 +12,8 @@ def test_convert_to_table(mocker): Then: - Validate the table is created correctly """ - incident = {'CustomFields': {'splunkcomments': [ - '{"Comment":"new comment"}']}} - mocker.patch('demistomock.incident', return_value=incident) + incident = {"CustomFields": {"splunkcomments": ['{"Comment":"new comment"}']}} + mocker.patch("demistomock.incident", return_value=incident) result = SplunkConvertCommentsToTable.main() assert result.readable_output == EXPECTED_TABLE diff --git a/Packs/SplunkPy/Scripts/SplunkShowAsset/SplunkShowAsset.py b/Packs/SplunkPy/Scripts/SplunkShowAsset/SplunkShowAsset.py index 32e2d1dcfdce..3810bc99b023 100644 --- a/Packs/SplunkPy/Scripts/SplunkShowAsset/SplunkShowAsset.py +++ b/Packs/SplunkPy/Scripts/SplunkShowAsset/SplunkShowAsset.py @@ -1,34 +1,33 @@ +import json + import demistomock as demisto # noqa: F401 from CommonServerPython import * # noqa: F401 -import json - def main(): asset_results = [] incident = demisto.incident() if not incident: - raise ValueError("Error - demisto.incident() expected to return current incident " - "from context but returned None") - labels = incident.get('labels', []) + raise ValueError("Error - demisto.incident() expected to return current incident " "from context but returned None") + labels = incident.get("labels", []) for label in labels: - if label.get('type') == 'successful_asset_enrichment': - is_successful = label.get('value') - if is_successful == 'false': - return CommandResults(readable_output='Asset enrichment failed.') - if label.get('type') == 'Asset': - asset_results = json.loads(label.get('value', [])) + if label.get("type") == "successful_asset_enrichment": + is_successful = label.get("value") + if is_successful == "false": + return CommandResults(readable_output="Asset enrichment failed.") + if label.get("type") == "Asset": + asset_results = json.loads(label.get("value", [])) if not asset_results: - return CommandResults(readable_output='No assets were found in the notable') + return CommandResults(readable_output="No assets were found in the notable") markdown = tableToMarkdown("", asset_results, headers=asset_results[0].keys()) - return {'ContentsFormat': formats['markdown'], 'Type': entryTypes['note'], 'Contents': markdown} + return {"ContentsFormat": formats["markdown"], "Type": entryTypes["note"], "Contents": markdown} -if __name__ in ('__main__', '__builtin__', 'builtins'): +if __name__ in ("__main__", "__builtin__", "builtins"): try: return_results(main()) except Exception as e: - return_error(f'Got an error while parsing Splunk events: {e}', error=e) + return_error(f"Got an error while parsing Splunk events: {e}", error=e) diff --git a/Packs/SplunkPy/Scripts/SplunkShowDrilldown/SplunkShowDrilldown.py b/Packs/SplunkPy/Scripts/SplunkShowDrilldown/SplunkShowDrilldown.py index 0a824f5e7a99..e4e542d56880 100644 --- a/Packs/SplunkPy/Scripts/SplunkShowDrilldown/SplunkShowDrilldown.py +++ b/Packs/SplunkPy/Scripts/SplunkShowDrilldown/SplunkShowDrilldown.py @@ -6,34 +6,35 @@ def main(): drilldown_results = [] incident = demisto.incident() if not incident: - raise ValueError("Error - demisto.incident() expected to return current incident " - "from context but returned None") + raise ValueError("Error - demisto.incident() expected to return current incident " "from context but returned None") - labels = incident.get('labels', []) + labels = incident.get("labels", []) for label in labels: - if label.get('type') == 'successful_drilldown_enrichment': - is_successful = label.get('value') - if is_successful == 'false': - return CommandResults(readable_output='Drilldown enrichment failed.') - if label.get('type') == 'Drilldown': + if label.get("type") == "successful_drilldown_enrichment": + is_successful = label.get("value") + if is_successful == "false": + return CommandResults(readable_output="Drilldown enrichment failed.") + if label.get("type") == "Drilldown": try: - drilldown_results = json.loads(label.get('value', [])) + drilldown_results = json.loads(label.get("value", [])) except Exception as e: - raise ValueError(f'Drilldown is not in a valid JSON structure:\n{e}') + raise ValueError(f"Drilldown is not in a valid JSON structure:\n{e}") if not drilldown_results: - return CommandResults(readable_output='Drilldown was not configured for notable.') + return CommandResults(readable_output="Drilldown was not configured for notable.") if isinstance(drilldown_results, list): - if 'query_name' in drilldown_results[0]: + if "query_name" in drilldown_results[0]: # Get drilldown results of multiple drilldown searches markdown = "#### Drilldown Searches Results\n" for drilldown in drilldown_results: - markdown += f"**Query Name:** {drilldown.get('query_name','')}\n\n **Query"\ + markdown += ( + f"**Query Name:** {drilldown.get('query_name','')}\n\n **Query" f"Search:**\n{drilldown.get('query_search','')}\n\n **Results:**\n" + ) - if drilldown.get('enrichment_status') == 'Enrichment failed': + if drilldown.get("enrichment_status") == "Enrichment failed": markdown += "\nDrilldown enrichment failed." elif results := drilldown.get("query_results", []): @@ -51,11 +52,11 @@ def main(): else: markdown = tableToMarkdown("", drilldown_results) - return {'ContentsFormat': formats['markdown'], 'Type': entryTypes['note'], 'Contents': markdown} + return {"ContentsFormat": formats["markdown"], "Type": entryTypes["note"], "Contents": markdown} -if __name__ in ('__main__', '__builtin__', 'builtins'): +if __name__ in ("__main__", "__builtin__", "builtins"): try: return_results(main()) except Exception as e: - return_error(f'Got an error while parsing Splunk events: {e}', error=e) + return_error(f"Got an error while parsing Splunk events: {e}", error=e) diff --git a/Packs/SplunkPy/Scripts/SplunkShowDrilldown/SplunkShowDrilldown_test.py b/Packs/SplunkPy/Scripts/SplunkShowDrilldown/SplunkShowDrilldown_test.py index 4e6cad282454..77a705d1077f 100644 --- a/Packs/SplunkPy/Scripts/SplunkShowDrilldown/SplunkShowDrilldown_test.py +++ b/Packs/SplunkPy/Scripts/SplunkShowDrilldown/SplunkShowDrilldown_test.py @@ -1,4 +1,5 @@ import json + import SplunkShowDrilldown from pytest import raises @@ -129,80 +130,76 @@ def test_incident_multiple_drilldown_search_results(mocker): Verifies that the output returned is correct """ drilldown = [ - {"query_name": "query_name1", - "query_search": "query_search1", - "enrichment_status": "Enrichment successfully handled", - "query_results": [ - { - "_bkt": "main~Test1", - "_cd": "524:1111111", - "_indextime": "1715859867", - "_raw": "2024-05-16 11:26:32,Virus found,IP Address: 1.1.1.1,Computer name: Test1", - "_serial": "0", - "_si": ["ip-1-1-1-1", "main"], - "_sourcetype": "test1", - "_time": "2024-05-16T11:26:32.000+00:00", - "category": "Other", - "dest": "Test_dest1", - "signature": "test_signature1", - }, - { - "_bkt": "main~Test2", - "_cd": "524:2222222", - "_indextime": "1715859867", - "_raw": "2024-05-16 11:26:32,Virus found,IP Address: 2.2.2.2,Computer name: Test2", - "_serial": "0", - "_si": ["ip-2-2-2-2", "main"], - "_sourcetype": "test2", - "_time": "2024-05-16T11:26:32.000+00:00", - "category": "Other", - "dest": "Test_dest2", - "signature": "test_signature2", - }, - ], - - }, - {"query_name": "query_name2", - "query_search": "query_search2", - "enrichment_status": "Enrichment successfully handled", - "query_results": [ - { - "_bkt": "main~Test3", - "_cd": "524:1111111", - "_indextime": "1715859867", - "_raw": "2024-05-16 11:26:32,Virus found,IP Address: 1.1.1.1,Computer name: Test1", - "_serial": "0", - "_si": ["ip-1-1-1-1", "main"], - "_sourcetype": "test1", - "_time": "2024-05-16T11:26:32.000+00:00", - "category": "Other", - "dest": "Test_dest1", - "signature": "test_signature3", - }, - { - "_bkt": "main~Test4", - "_cd": "524:2222222", - "_indextime": "1715859867", - "_raw": "2024-05-16 11:26:32,Virus found,IP Address: 2.2.2.2,Computer name: Test2", - "_serial": "0", - "_si": ["ip-2-2-2-2", "main"], - "_sourcetype": "test2", - "_time": "2024-05-16T11:26:32.000+00:00", - "category": "Other", - "dest": "Test_dest2", - "signature": "test_signature4", - }, - ], - } + { + "query_name": "query_name1", + "query_search": "query_search1", + "enrichment_status": "Enrichment successfully handled", + "query_results": [ + { + "_bkt": "main~Test1", + "_cd": "524:1111111", + "_indextime": "1715859867", + "_raw": "2024-05-16 11:26:32,Virus found,IP Address: 1.1.1.1,Computer name: Test1", + "_serial": "0", + "_si": ["ip-1-1-1-1", "main"], + "_sourcetype": "test1", + "_time": "2024-05-16T11:26:32.000+00:00", + "category": "Other", + "dest": "Test_dest1", + "signature": "test_signature1", + }, + { + "_bkt": "main~Test2", + "_cd": "524:2222222", + "_indextime": "1715859867", + "_raw": "2024-05-16 11:26:32,Virus found,IP Address: 2.2.2.2,Computer name: Test2", + "_serial": "0", + "_si": ["ip-2-2-2-2", "main"], + "_sourcetype": "test2", + "_time": "2024-05-16T11:26:32.000+00:00", + "category": "Other", + "dest": "Test_dest2", + "signature": "test_signature2", + }, + ], + }, + { + "query_name": "query_name2", + "query_search": "query_search2", + "enrichment_status": "Enrichment successfully handled", + "query_results": [ + { + "_bkt": "main~Test3", + "_cd": "524:1111111", + "_indextime": "1715859867", + "_raw": "2024-05-16 11:26:32,Virus found,IP Address: 1.1.1.1,Computer name: Test1", + "_serial": "0", + "_si": ["ip-1-1-1-1", "main"], + "_sourcetype": "test1", + "_time": "2024-05-16T11:26:32.000+00:00", + "category": "Other", + "dest": "Test_dest1", + "signature": "test_signature3", + }, + { + "_bkt": "main~Test4", + "_cd": "524:2222222", + "_indextime": "1715859867", + "_raw": "2024-05-16 11:26:32,Virus found,IP Address: 2.2.2.2,Computer name: Test2", + "_serial": "0", + "_si": ["ip-2-2-2-2", "main"], + "_sourcetype": "test2", + "_time": "2024-05-16T11:26:32.000+00:00", + "category": "Other", + "dest": "Test_dest2", + "signature": "test_signature4", + }, + ], + }, ] str_drilldown = json.dumps(drilldown) incident = { - "labels": [ - {"type": "successful_drilldown_enrichment", "value": "true"}, - {"type": "Drilldown", - "value": str_drilldown - } - ] + "labels": [{"type": "successful_drilldown_enrichment", "value": "true"}, {"type": "Drilldown", "value": str_drilldown}] } mocker.patch("demistomock.incident", return_value=incident) res = SplunkShowDrilldown.main() @@ -224,53 +221,49 @@ def test_incident_multiple_drilldown_search_no_results(mocker): Verifies that the output returned is correct """ drilldown = [ - {"query_name": "query_name1", - "query_search": "query_search1", - "enrichment_status": "Enrichment successfully handled", - "query_results": [ - { - "_bkt": "main~Test1", - "_cd": "524:1111111", - "_indextime": "1715859867", - "_raw": "2024-05-16 11:26:32,Virus found,IP Address: 1.1.1.1,Computer name: Test1", - "_serial": "0", - "_si": ["ip-1-1-1-1", "main"], - "_sourcetype": "test1", - "_time": "2024-05-16T11:26:32.000+00:00", - "category": "Other", - "dest": "Test_dest1", - "signature": "test_signature1", - }, - { - "_bkt": "main~Test2", - "_cd": "524:2222222", - "_indextime": "1715859867", - "_raw": "2024-05-16 11:26:32,Virus found,IP Address: 2.2.2.2,Computer name: Test2", - "_serial": "0", - "_si": ["ip-2-2-2-2", "main"], - "_sourcetype": "test2", - "_time": "2024-05-16T11:26:32.000+00:00", - "category": "Other", - "dest": "Test_dest2", - "signature": "test_signature2", - }, - ], - - }, - {"query_name": "query_name2", - "query_search": "query_search2", - "enrichment_status": "Enrichment successfully handled", - "query_results": [], - } + { + "query_name": "query_name1", + "query_search": "query_search1", + "enrichment_status": "Enrichment successfully handled", + "query_results": [ + { + "_bkt": "main~Test1", + "_cd": "524:1111111", + "_indextime": "1715859867", + "_raw": "2024-05-16 11:26:32,Virus found,IP Address: 1.1.1.1,Computer name: Test1", + "_serial": "0", + "_si": ["ip-1-1-1-1", "main"], + "_sourcetype": "test1", + "_time": "2024-05-16T11:26:32.000+00:00", + "category": "Other", + "dest": "Test_dest1", + "signature": "test_signature1", + }, + { + "_bkt": "main~Test2", + "_cd": "524:2222222", + "_indextime": "1715859867", + "_raw": "2024-05-16 11:26:32,Virus found,IP Address: 2.2.2.2,Computer name: Test2", + "_serial": "0", + "_si": ["ip-2-2-2-2", "main"], + "_sourcetype": "test2", + "_time": "2024-05-16T11:26:32.000+00:00", + "category": "Other", + "dest": "Test_dest2", + "signature": "test_signature2", + }, + ], + }, + { + "query_name": "query_name2", + "query_search": "query_search2", + "enrichment_status": "Enrichment successfully handled", + "query_results": [], + }, ] str_drilldown = json.dumps(drilldown) incident = { - "labels": [ - {"type": "successful_drilldown_enrichment", "value": "true"}, - {"type": "Drilldown", - "value": str_drilldown - } - ] + "labels": [{"type": "successful_drilldown_enrichment", "value": "true"}, {"type": "Drilldown", "value": str_drilldown}] } mocker.patch("demistomock.incident", return_value=incident) res = SplunkShowDrilldown.main() @@ -292,53 +285,49 @@ def test_incident_multiple_drilldown_search_enrichment_failed(mocker): Verifies that the output returned is correct """ drilldown = [ - {"query_name": "query_name1", - "query_search": "query_search1", - "enrichment_status": "Enrichment successfully handled", - "query_results": [ - { - "_bkt": "main~Test1", - "_cd": "524:1111111", - "_indextime": "1715859867", - "_raw": "2024-05-16 11:26:32,Virus found,IP Address: 1.1.1.1,Computer name: Test1", - "_serial": "0", - "_si": ["ip-1-1-1-1", "main"], - "_sourcetype": "test1", - "_time": "2024-05-16T11:26:32.000+00:00", - "category": "Other", - "dest": "Test_dest1", - "signature": "test_signature1", - }, - { - "_bkt": "main~Test2", - "_cd": "524:2222222", - "_indextime": "1715859867", - "_raw": "2024-05-16 11:26:32,Virus found,IP Address: 2.2.2.2,Computer name: Test2", - "_serial": "0", - "_si": ["ip-2-2-2-2", "main"], - "_sourcetype": "test2", - "_time": "2024-05-16T11:26:32.000+00:00", - "category": "Other", - "dest": "Test_dest2", - "signature": "test_signature2", - }, - ], - - }, - {"query_name": "query_name2", - "query_search": "query_search2", - "enrichment_status": "Enrichment failed", - "query_results": [], - } + { + "query_name": "query_name1", + "query_search": "query_search1", + "enrichment_status": "Enrichment successfully handled", + "query_results": [ + { + "_bkt": "main~Test1", + "_cd": "524:1111111", + "_indextime": "1715859867", + "_raw": "2024-05-16 11:26:32,Virus found,IP Address: 1.1.1.1,Computer name: Test1", + "_serial": "0", + "_si": ["ip-1-1-1-1", "main"], + "_sourcetype": "test1", + "_time": "2024-05-16T11:26:32.000+00:00", + "category": "Other", + "dest": "Test_dest1", + "signature": "test_signature1", + }, + { + "_bkt": "main~Test2", + "_cd": "524:2222222", + "_indextime": "1715859867", + "_raw": "2024-05-16 11:26:32,Virus found,IP Address: 2.2.2.2,Computer name: Test2", + "_serial": "0", + "_si": ["ip-2-2-2-2", "main"], + "_sourcetype": "test2", + "_time": "2024-05-16T11:26:32.000+00:00", + "category": "Other", + "dest": "Test_dest2", + "signature": "test_signature2", + }, + ], + }, + { + "query_name": "query_name2", + "query_search": "query_search2", + "enrichment_status": "Enrichment failed", + "query_results": [], + }, ] str_drilldown = json.dumps(drilldown) incident = { - "labels": [ - {"type": "successful_drilldown_enrichment", "value": "true"}, - {"type": "Drilldown", - "value": str_drilldown - } - ] + "labels": [{"type": "successful_drilldown_enrichment", "value": "true"}, {"type": "Drilldown", "value": str_drilldown}] } mocker.patch("demistomock.incident", return_value=incident) res = SplunkShowDrilldown.main() diff --git a/Packs/SplunkPy/Scripts/SplunkShowIdentity/SplunkShowIdentity.py b/Packs/SplunkPy/Scripts/SplunkShowIdentity/SplunkShowIdentity.py index 171b4a414818..c4f66ec7541a 100644 --- a/Packs/SplunkPy/Scripts/SplunkShowIdentity/SplunkShowIdentity.py +++ b/Packs/SplunkPy/Scripts/SplunkShowIdentity/SplunkShowIdentity.py @@ -1,34 +1,33 @@ +import json + import demistomock as demisto # noqa: F401 from CommonServerPython import * # noqa: F401 -import json - def main(): identity_results = [] incident = demisto.incident() if not incident: - raise ValueError("Error - demisto.incident() expected to return current incident " - "from context but returned None") - labels = incident.get('labels', []) + raise ValueError("Error - demisto.incident() expected to return current incident " "from context but returned None") + labels = incident.get("labels", []) for label in labels: - if label.get('type') == 'successful_identity_enrichment': - is_successful = label.get('value') - if is_successful == 'false': - return CommandResults(readable_output='Identity enrichment failed.') - if label.get('type') == 'Identity': - identity_results = json.loads(label.get('value', [])) + if label.get("type") == "successful_identity_enrichment": + is_successful = label.get("value") + if is_successful == "false": + return CommandResults(readable_output="Identity enrichment failed.") + if label.get("type") == "Identity": + identity_results = json.loads(label.get("value", [])) if not identity_results: - return CommandResults(readable_output='No identities were found in the notable') + return CommandResults(readable_output="No identities were found in the notable") markdown = tableToMarkdown("", identity_results, headers=identity_results[0].keys()) - return {'ContentsFormat': formats['markdown'], 'Type': entryTypes['note'], 'Contents': markdown} + return {"ContentsFormat": formats["markdown"], "Type": entryTypes["note"], "Contents": markdown} -if __name__ in ('__main__', '__builtin__', 'builtins'): +if __name__ in ("__main__", "__builtin__", "builtins"): try: return_results(main()) except Exception as e: - return_error(f'Got an error while parsing Splunk events: {e}', error=e) + return_error(f"Got an error while parsing Splunk events: {e}", error=e) diff --git a/Packs/SplunkPy/pack_metadata.json b/Packs/SplunkPy/pack_metadata.json index 0f3ce72e3bae..2f0eee736749 100644 --- a/Packs/SplunkPy/pack_metadata.json +++ b/Packs/SplunkPy/pack_metadata.json @@ -2,7 +2,7 @@ "name": "Splunk", "description": "Run queries on Splunk servers.", "support": "xsoar", - "currentVersion": "3.2.5", + "currentVersion": "3.2.6", "author": "Cortex XSOAR", "url": "https://www.paloaltonetworks.com/cortex", "email": "", From 7b05ba7c22aa35219a20db87e67b74f4a410665b Mon Sep 17 00:00:00 2001 From: Content Bot Date: Sun, 23 Mar 2025 12:52:49 +0000 Subject: [PATCH 06/18] HPEArubaClearPass: Apply ruff Format --- .../HPEArubaClearPass/HPEArubaClearPass.py | 282 +++++++++--------- .../HPEArubaClearPass_test.py | 150 +++++----- .../HPEArubaClearPass/ReleaseNotes/1_0_30.md | 6 + Packs/HPEArubaClearPass/pack_metadata.json | 2 +- 4 files changed, 227 insertions(+), 213 deletions(-) create mode 100644 Packs/HPEArubaClearPass/ReleaseNotes/1_0_30.md diff --git a/Packs/HPEArubaClearPass/Integrations/HPEArubaClearPass/HPEArubaClearPass.py b/Packs/HPEArubaClearPass/Integrations/HPEArubaClearPass/HPEArubaClearPass.py index 393c38eb84d6..2a39c351e9a8 100644 --- a/Packs/HPEArubaClearPass/Integrations/HPEArubaClearPass/HPEArubaClearPass.py +++ b/Packs/HPEArubaClearPass/Integrations/HPEArubaClearPass/HPEArubaClearPass.py @@ -1,14 +1,14 @@ import json from datetime import datetime, timedelta -from CommonServerPython import * +from typing import Any import urllib3 -from typing import Any +from CommonServerPython import * # Disable insecure warnings urllib3.disable_warnings() # pylint: disable=no-member -DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ' # ISO8601 format with UTC, default in XSOAR" +DATE_FORMAT = "%Y-%m-%dT%H:%M:%SZ" # ISO8601 format with UTC, default in XSOAR" class Client(BaseClient): @@ -21,7 +21,7 @@ class Client(BaseClient): base_url (str) : Base URL for the service. client_id (str): HPE Aruba ClearPass client identifier. client_secret (str): HPE Aruba ClearPass client secret. - """ + """ def __init__(self, proxy: bool, verify: bool, base_url: str, client_id: str, client_secret: str): super().__init__(proxy=proxy, verify=verify, base_url=base_url) @@ -36,22 +36,14 @@ def generate_new_access_token(self): Makes an HTTP request in order to get back a new access token. """ demisto.debug("access token does not exist, trying to generate a new one") - body = { - "grant_type": "client_credentials", - "client_id": self.client_id, - "client_secret": self.client_secret - } - return self._http_request( - method='POST', - url_suffix="oauth", - json_data=body - ) + body = {"grant_type": "client_credentials", "client_id": self.client_id, "client_secret": self.client_secret} + return self._http_request(method="POST", url_suffix="oauth", json_data=body) def login(self): """ Checks if a valid access token is set to integration context. Otherwise, generates one and save to it to integration context. - """ + """ integration_context = get_integration_context() if integration_context and self.is_access_token_valid(): self.set_valid_access_token() @@ -74,8 +66,10 @@ def set_request_headers(self): def get_expiration_in_seconds(self, auth_response: dict[str, str]): access_token_expiration_in_seconds = auth_response.get("expires_in") is_expiration_valid = access_token_expiration_in_seconds and isinstance(auth_response.get("expires_in"), int) - error_msg = f"HPEArubaClearPass error: Got an invalid access token expiration time from the API: " \ - f"{access_token_expiration_in_seconds} from type: {type(access_token_expiration_in_seconds)}" + error_msg = ( + f"HPEArubaClearPass error: Got an invalid access token expiration time from the API: " + f"{access_token_expiration_in_seconds} from type: {type(access_token_expiration_in_seconds)}" + ) return access_token_expiration_in_seconds if is_expiration_valid else return_error(error_msg) def save_access_token_to_context(self, auth_response: dict): @@ -86,49 +80,45 @@ def save_access_token_to_context(self, auth_response: dict): context = {"access_token": self.access_token, "expires_in": expiration_timestamp.strftime(DATE_FORMAT)} set_integration_context(context) self.set_request_headers() - demisto.debug(f"New access token that expires in : {expiration_timestamp.strftime(DATE_FORMAT)}" - f" was set to integration_context.") + demisto.debug( + f"New access token that expires in : {expiration_timestamp.strftime(DATE_FORMAT)}" f" was set to integration_context." + ) def is_access_token_valid(self): integration_context = get_integration_context() - access_token_expiration = integration_context.get('expires_in') - access_token = integration_context.get('access_token') + access_token_expiration = integration_context.get("expires_in") + access_token = integration_context.get("access_token") context_has_access_token_and_expiration = access_token and access_token_expiration access_token_expiration_datetime = datetime.strptime(access_token_expiration, DATE_FORMAT) return context_has_access_token_and_expiration and access_token_expiration_datetime > datetime.now() def set_valid_access_token(self): integration_context = get_integration_context() - self.access_token = integration_context.get('access_token') + self.access_token = integration_context.get("access_token") self.set_request_headers() demisto.debug("access token is valid") - def prepare_request(self, method: str, params: dict, url_suffix: str, body: dict = {}, resp_type: str = 'json'): + def prepare_request(self, method: str, params: dict, url_suffix: str, body: dict = {}, resp_type: str = "json"): return self._http_request( - method=method, - params=params, - url_suffix=url_suffix, - headers=self.headers, - json_data=body, - resp_type=resp_type + method=method, params=params, url_suffix=url_suffix, headers=self.headers, json_data=body, resp_type=resp_type ) def command_test_module(client: Client): try: params = {"filter": {}, "offset": 0, "limit": 25} - client.prepare_request(method='GET', params=params, url_suffix='endpoint') - message = 'ok' + client.prepare_request(method="GET", params=params, url_suffix="endpoint") + message = "ok" except DemistoException as e: - if 'Forbidden' in str(e) or 'Authorization' in str(e): - message = 'Authorization Error: make sure API Key is correctly set' + if "Forbidden" in str(e) or "Authorization" in str(e): + message = "Authorization Error: make sure API Key is correctly set" else: raise e return message def parse_items_response(response: dict, active_sessions_parsing=None): # type:ignore - items_list = response.get('_embedded', {}).get('items') + items_list = response.get("_embedded", {}).get("items") human_readable = [] if items_list: for item in items_list: @@ -144,21 +134,21 @@ def get_endpoints_list_command(client: Client, args: dict[str, Any]) -> CommandR Gets a list of endpoints. If mac_address was not given, all the endpoints will be returned. Note: In Aruba ClearPass all endpoints appear in Configuration > Identity > Endpoints. """ - mac_address = args.get('mac_address') - status = args.get('status') - offset = args.get('offset', 0) - limit = args.get('limit', 25) + mac_address = args.get("mac_address") + status = args.get("status") + offset = args.get("offset", 0) + limit = args.get("limit", 25) endpoints_filter = endpoints_filter_to_json_object(status, mac_address) params = {"filter": endpoints_filter, "limit": limit, "offset": offset} - res = client.prepare_request(method='GET', params=params, url_suffix='endpoint') + res = client.prepare_request(method="GET", params=params, url_suffix="endpoint") readable_output, outputs = parse_items_response(res) - human_readable = tableToMarkdown('HPE Aruba ClearPass endpoints', readable_output, removeNull=True) + human_readable = tableToMarkdown("HPE Aruba ClearPass endpoints", readable_output, removeNull=True) return CommandResults( readable_output=human_readable, - outputs_prefix='HPEArubaClearPass.Endpoints', - outputs_key_field='id', + outputs_prefix="HPEArubaClearPass.Endpoints", + outputs_key_field="id", outputs=outputs, ) @@ -174,26 +164,25 @@ def update_endpoint_command(client: Client, args: dict[str, Any]) -> CommandResu """ Updates an endpoint by its endpoint_id. Only endpoint_id is a mandatory field. """ - endpoint_id = args['endpoint_id'] - mac_address = args.get('mac_address') - status = args.get('status') - description = args.get('description') - device_insight_tags = argToList(args.get('device_insight_tags')) - attributes = argToList(args.get('attributes')) + endpoint_id = args["endpoint_id"] + mac_address = args.get("mac_address") + status = args.get("status") + description = args.get("description") + device_insight_tags = argToList(args.get("device_insight_tags")) + attributes = argToList(args.get("attributes")) attributes_values = {} for attribute in attributes: # converting the given list of attributes pairs to a a dict attributes_values.update(attribute) - request_body = get_endpoint_request_body(status, mac_address, description, device_insight_tags, attributes_values, - attributes) - outputs = client.prepare_request(method='PATCH', params={}, url_suffix=f'endpoint/{endpoint_id}', body=request_body) + request_body = get_endpoint_request_body(status, mac_address, description, device_insight_tags, attributes_values, attributes) + outputs = client.prepare_request(method="PATCH", params={}, url_suffix=f"endpoint/{endpoint_id}", body=request_body) delete_redundant_data(outputs) - human_readable = tableToMarkdown('HPE Aruba ClearPass endpoints', outputs, removeNull=True) + human_readable = tableToMarkdown("HPE Aruba ClearPass endpoints", outputs, removeNull=True) return CommandResults( readable_output=human_readable, - outputs_prefix='HPEArubaClearPass.Endpoints', - outputs_key_field='id', + outputs_prefix="HPEArubaClearPass.Endpoints", + outputs_key_field="id", outputs=outputs, ) @@ -203,8 +192,8 @@ def delete_redundant_data(res: dict[str, Any]): Removes the '_links' entity from the response. This entity includes a url to Aruba server for the given object which equals the requested url suffix. """ - if res and '_links' in res: - del res['_links'] + if res and "_links" in res: + del res["_links"] def get_endpoint_request_body(status, mac_address, description, device_insight_tags, attributes_values, attributes): @@ -222,28 +211,28 @@ def get_attributes_list_command(client: Client, args: dict[str, Any]) -> Command Gets a list of attributes. If attribute_id was not given, all the attributes will be returned. Note: In Aruba ClearPass all attributes appear in Administration > Dictionaries > Dictionary Attributes. """ - attribute_id = args.get('attribute_id') + attribute_id = args.get("attribute_id") try: if attribute_id: attribute_id = int(attribute_id) except ValueError: return_error("Please note that attribute_id should be a valid id number (integer).") - name = args.get('name') - entity_name = args.get('entity_name') - offset = args.get('offset', 0) - limit = args.get('limit', 25) + name = args.get("name") + entity_name = args.get("entity_name") + offset = args.get("offset", 0) + limit = args.get("limit", 25) attribute_filter = attributes_filter_to_json_object(attribute_id, name, entity_name) params = {"filter": attribute_filter, "offset": offset, "limit": limit} - res = client.prepare_request(method='GET', params=params, url_suffix='attribute') + res = client.prepare_request(method="GET", params=params, url_suffix="attribute") readable_output, outputs = parse_items_response(res) - human_readable = tableToMarkdown('HPE Aruba ClearPass attributes', readable_output, removeNull=True) + human_readable = tableToMarkdown("HPE Aruba ClearPass attributes", readable_output, removeNull=True) return CommandResults( readable_output=human_readable, - outputs_prefix='HPEArubaClearPass.Attributes', - outputs_key_field='id', + outputs_prefix="HPEArubaClearPass.Attributes", + outputs_key_field="id", outputs=outputs, ) @@ -257,18 +246,18 @@ def attributes_filter_to_json_object(attribute_id, name, entity_name): def check_api_limitation_on_specific_data_types(args: dict[str, Any]): - """ Checks if the attribute data_type match the api limitations like: + """Checks if the attribute data_type match the api limitations like: 1. allow_multiple is available only when data_type is String. 2. allowed_value is available only when data_type is List. API docs are here: {Aruba_url_server}/api-docs/Dictionaries-v1#!/Attribute """ - data_type = args.get('data_type') - allow_multiple_data_type_string = argToBoolean(args.get('allow_multiple', False)) - allowed_list_data_types_value = args.get('allowed_value', "") + data_type = args.get("data_type") + allow_multiple_data_type_string = argToBoolean(args.get("allow_multiple", False)) + allowed_list_data_types_value = args.get("allowed_value", "") if allow_multiple_data_type_string and data_type != "String": return_error(f"Note: allow_multiple argument should be true only for data type String and not for {data_type}.") - if allowed_list_data_types_value and data_type != 'List': + if allowed_list_data_types_value and data_type != "List": return_error(f"Note: allowed_value argument should be set only for data type List and not for {data_type}.") @@ -278,14 +267,14 @@ def create_attribute_command(client: Client, args: dict[str, Any]) -> CommandRes """ check_api_limitation_on_specific_data_types(args) new_attribute_body = create_new_attribute_body(args) - outputs = client.prepare_request(method='POST', params={}, url_suffix='attribute', body=new_attribute_body) + outputs = client.prepare_request(method="POST", params={}, url_suffix="attribute", body=new_attribute_body) delete_redundant_data(outputs) - human_readable = tableToMarkdown('HPE Aruba ClearPass new attribute', outputs, removeNull=True) + human_readable = tableToMarkdown("HPE Aruba ClearPass new attribute", outputs, removeNull=True) return CommandResults( readable_output=human_readable, - outputs_prefix='HPEArubaClearPass.Attributes', - outputs_key_field='id', + outputs_prefix="HPEArubaClearPass.Attributes", + outputs_key_field="id", outputs=outputs, ) @@ -294,30 +283,42 @@ def create_new_attribute_body(args: dict[str, Any]): """ Creates a new attribute body for creating and updating an attribute. """ - name = args.get('name') - entity_name = args.get('entity_name') - data_type = args.get('data_type') - mandatory = argToBoolean(args.get('mandatory', False)) - attribute_default_value = args.get('default_value', "") - allow_multiple_data_type_string = argToBoolean(args.get('allow_multiple', False)) - allowed_list_data_types_value = args.get('allowed_value', "") - - return get_attribute_request_body(name, entity_name, data_type, mandatory, attribute_default_value, - allow_multiple_data_type_string, allowed_list_data_types_value) + name = args.get("name") + entity_name = args.get("entity_name") + data_type = args.get("data_type") + mandatory = argToBoolean(args.get("mandatory", False)) + attribute_default_value = args.get("default_value", "") + allow_multiple_data_type_string = argToBoolean(args.get("allow_multiple", False)) + allowed_list_data_types_value = args.get("allowed_value", "") + + return get_attribute_request_body( + name, + entity_name, + data_type, + mandatory, + attribute_default_value, + allow_multiple_data_type_string, + allowed_list_data_types_value, + ) -def get_attribute_request_body(name, entity_name, data_type, mandatory, attribute_default_value, - allow_multiple_data_type_string, allowed_list_data_types_value): +def get_attribute_request_body( + name, + entity_name, + data_type, + mandatory, + attribute_default_value, + allow_multiple_data_type_string, + allowed_list_data_types_value, +): new_attribute_body = {} new_attribute_body.update({"name": name}) new_attribute_body.update({"entity_name": entity_name}) new_attribute_body.update({"data_type": data_type}) new_attribute_body.update({"mandatory": mandatory}) if mandatory else None new_attribute_body.update({"default_value": attribute_default_value}) if attribute_default_value else None - new_attribute_body.update( - {"allow_multiple": allow_multiple_data_type_string}) if allow_multiple_data_type_string else None - new_attribute_body.update( - {"allowed_value": allowed_list_data_types_value}) if allowed_list_data_types_value else None + new_attribute_body.update({"allow_multiple": allow_multiple_data_type_string}) if allow_multiple_data_type_string else None + new_attribute_body.update({"allowed_value": allowed_list_data_types_value}) if allowed_list_data_types_value else None return new_attribute_body @@ -325,17 +326,16 @@ def update_attribute_command(client: Client, args: dict[str, Any]) -> CommandRes """ Updates an attribute fields by the attribute_id which is a mandatory field. """ - attribute_id = args['attribute_id'] + attribute_id = args["attribute_id"] new_attribute_body = create_new_attribute_body(args) - outputs = client.prepare_request(method='PATCH', params={}, url_suffix=f'attribute/{attribute_id}', - body=new_attribute_body) + outputs = client.prepare_request(method="PATCH", params={}, url_suffix=f"attribute/{attribute_id}", body=new_attribute_body) delete_redundant_data(outputs) - human_readable = tableToMarkdown('HPE Aruba ClearPass update attribute', outputs, removeNull=True) + human_readable = tableToMarkdown("HPE Aruba ClearPass update attribute", outputs, removeNull=True) return CommandResults( readable_output=human_readable, - outputs_prefix='HPEArubaClearPass.Attributes', - outputs_key_field='id', + outputs_prefix="HPEArubaClearPass.Attributes", + outputs_key_field="id", outputs=outputs, ) @@ -344,8 +344,8 @@ def delete_attribute_command(client: Client, args: dict[str, Any]) -> CommandRes """ Deletes an attribute by the attribute_id which is a mandatory field. """ - attribute_id = args['attribute_id'] - client.prepare_request(method='DELETE', params={}, url_suffix=f'attribute/{attribute_id}', resp_type='content') + attribute_id = args["attribute_id"] + client.prepare_request(method="DELETE", params={}, url_suffix=f"attribute/{attribute_id}", resp_type="content") human_readable = f"HPE Aruba ClearPass attribute with ID: {attribute_id} deleted successfully." return CommandResults(readable_output=human_readable) @@ -356,24 +356,24 @@ def get_active_sessions_list_command(client: Client, args: dict[str, Any]) -> Co Gets a list of active sessions. If session_id was not given, all the active sessions will be returned. Note: In Aruba ClearPass all active sessions appear in: Home > Guest > Active Sessions. """ - session_id = args.get('session_id') - device_ip = args.get('device_ip') - device_mac_address = args.get('device_mac_address') - visitor_phone = args.get('visitor_phone') - limit = args.get('limit', 25) + session_id = args.get("session_id") + device_ip = args.get("device_ip") + device_mac_address = args.get("device_mac_address") + visitor_phone = args.get("visitor_phone") + limit = args.get("limit", 25) session_filter = sessions_filter_to_json_object(session_id, device_ip, device_mac_address, visitor_phone) params = {"filter": session_filter, "limit": limit} - res = client.prepare_request(method='GET', params=params, url_suffix='session') + res = client.prepare_request(method="GET", params=params, url_suffix="session") readable_output, all_active_sessions_list = parse_items_response(res, parse_active_sessions_response) outputs = [parse_active_sessions_response(item) for item in all_active_sessions_list] - human_readable = tableToMarkdown('HPE Aruba ClearPass Active Sessions', readable_output, removeNull=True) + human_readable = tableToMarkdown("HPE Aruba ClearPass Active Sessions", readable_output, removeNull=True) return CommandResults( readable_output=human_readable, - outputs_prefix='HPEArubaClearPass.Sessions', - outputs_key_field='id', + outputs_prefix="HPEArubaClearPass.Sessions", + outputs_key_field="id", outputs=outputs, ) @@ -390,11 +390,11 @@ def sessions_filter_to_json_object(session_id, device_ip, device_mac_address, vi def parse_active_sessions_response(response: dict) -> dict: return { - 'ID': response.get('id'), - 'Device_IP': response.get('framedipaddress'), - 'Device_mac_address': response.get('mac_address'), - 'State': response.get('state'), - 'Visitor_phone': response.get('visitor_phone', False) + "ID": response.get("id"), + "Device_IP": response.get("framedipaddress"), + "Device_mac_address": response.get("mac_address"), + "State": response.get("state"), + "Visitor_phone": response.get("visitor_phone", False), } @@ -402,72 +402,68 @@ def disconnect_active_session_command(client: Client, args: dict[str, Any]) -> C """ Disconnects an active session by the session_id which is a mandatory field. """ - session_id = args['session_id'] - encoded_session_id = urllib.parse.quote(session_id, safe='') + session_id = args["session_id"] + encoded_session_id = urllib.parse.quote(session_id, safe="") url_suffix = f"/session/{encoded_session_id}/disconnect" body = {"id": session_id, "confirm_disconnect": True} - res = client.prepare_request(method='POST', params={}, url_suffix=url_suffix, body=body) - outputs = {"Error_code": res.get('error'), "Response_message": res.get('message')} - human_readable = tableToMarkdown('HPE Aruba ClearPass Disconnect active session', outputs, removeNull=True) + res = client.prepare_request(method="POST", params={}, url_suffix=url_suffix, body=body) + outputs = {"Error_code": res.get("error"), "Response_message": res.get("message")} + human_readable = tableToMarkdown("HPE Aruba ClearPass Disconnect active session", outputs, removeNull=True) return CommandResults( readable_output=human_readable, - outputs_prefix='HPEArubaClearPass.Sessions', - outputs_key_field='id', + outputs_prefix="HPEArubaClearPass.Sessions", + outputs_key_field="id", outputs=outputs, ) def main() -> None: params = demisto.params() - base_url = urljoin(params.get('url'), '/api') - client_id = params.get('client_id_creds', {}).get('identifier') or params.get('client_id') - client_secret = params.get('client_id_creds', {}).get('password') or params.get('client_secret') - verify_certificate = not params.get('insecure', False) - proxy = params.get('proxy', False) - - client = Client(proxy=proxy, - verify=verify_certificate, - base_url=base_url, - client_id=client_id, - client_secret=client_secret) - demisto.debug(f'Command being called is {demisto.command()}') + base_url = urljoin(params.get("url"), "/api") + client_id = params.get("client_id_creds", {}).get("identifier") or params.get("client_id") + client_secret = params.get("client_id_creds", {}).get("password") or params.get("client_secret") + verify_certificate = not params.get("insecure", False) + proxy = params.get("proxy", False) + + client = Client(proxy=proxy, verify=verify_certificate, base_url=base_url, client_id=client_id, client_secret=client_secret) + demisto.debug(f"Command being called is {demisto.command()}") try: args = demisto.args() - if demisto.command() == 'test-module': + if demisto.command() == "test-module": return_results(command_test_module(client)) - elif demisto.command() == 'aruba-clearpass-endpoints-list': + elif demisto.command() == "aruba-clearpass-endpoints-list": return_results(get_endpoints_list_command(client, args)) - elif demisto.command() == 'aruba-clearpass-endpoint-update': + elif demisto.command() == "aruba-clearpass-endpoint-update": return_results(update_endpoint_command(client, args)) - elif demisto.command() == 'aruba-clearpass-attributes-list': + elif demisto.command() == "aruba-clearpass-attributes-list": return_results(get_attributes_list_command(client, args)) - elif demisto.command() == 'aruba-clearpass-attribute-create': + elif demisto.command() == "aruba-clearpass-attribute-create": return_results(create_attribute_command(client, args)) - elif demisto.command() == 'aruba-clearpass-attribute-update': + elif demisto.command() == "aruba-clearpass-attribute-update": return_results(update_attribute_command(client, args)) - elif demisto.command() == 'aruba-clearpass-attribute-delete': + elif demisto.command() == "aruba-clearpass-attribute-delete": return_results(delete_attribute_command(client, args)) - elif demisto.command() == 'aruba-clearpass-active-sessions-list': + elif demisto.command() == "aruba-clearpass-active-sessions-list": return_results(get_active_sessions_list_command(client, args)) - elif demisto.command() == 'aruba-clearpass-active-session-disconnect': + elif demisto.command() == "aruba-clearpass-active-session-disconnect": return_results(disconnect_active_session_command(client, args)) else: - raise NotImplementedError(f'{demisto.command()} is not an existing HPE Aruba ClearPass command') + raise NotImplementedError(f"{demisto.command()} is not an existing HPE Aruba ClearPass command") except Exception as e: - return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}') + return_error(f"Failed to execute {demisto.command()} command.\nError:\n{e!s}") -if __name__ in ('__main__', '__builtin__', 'builtins'): +if __name__ in ("__main__", "__builtin__", "builtins"): main() diff --git a/Packs/HPEArubaClearPass/Integrations/HPEArubaClearPass/HPEArubaClearPass_test.py b/Packs/HPEArubaClearPass/Integrations/HPEArubaClearPass/HPEArubaClearPass_test.py index e3a90199351a..45b8e9fef3ff 100644 --- a/Packs/HPEArubaClearPass/Integrations/HPEArubaClearPass/HPEArubaClearPass_test.py +++ b/Packs/HPEArubaClearPass/Integrations/HPEArubaClearPass/HPEArubaClearPass_test.py @@ -1,44 +1,49 @@ import json + import HPEArubaClearPass -from HPEArubaClearPass import * -from freezegun import freeze_time import pytest +from freezegun import freeze_time +from HPEArubaClearPass import * from pytest import raises CLIENT_ID = "id123" CLIENT_SECRET = "secret123" -CLIENT_AUTH = \ - { - "access_token": "auth123", - "expires_in": 28800, - "token_type": "Bearer", - "scope": None - } +CLIENT_AUTH = {"access_token": "auth123", "expires_in": 28800, "token_type": "Bearer", "scope": None} NEW_ACCESS_TOKEN = "new123" -TEST_LOGIN_LIST = \ - [ - ({}, "auth123"), # no integration context, should generate new access token - ({"access_token": "old123", "expires_in": "2021-05-03T12:00:00Z"}, # access token valid - "old123"), - ({"access_token": "old123", "expires_in": "2021-05-03T10:00:00Z"}, # access token expired - "auth123"), - ] +TEST_LOGIN_LIST = [ + ({}, "auth123"), # no integration context, should generate new access token + ( + {"access_token": "old123", "expires_in": "2021-05-03T12:00:00Z"}, # access token valid + "old123", + ), + ( + {"access_token": "old123", "expires_in": "2021-05-03T10:00:00Z"}, # access token expired + "auth123", + ), +] def util_load_json(path): - with open(path, encoding='utf-8') as f: + with open(path, encoding="utf-8") as f: return json.loads(f.read()) -def create_client(mocker, proxy: bool = False, verify: bool = False, base_url: str = "https://example.com/api/", - client_id: str = CLIENT_ID, client_secret: str = CLIENT_SECRET): - mocker.patch.object(HPEArubaClearPass.Client, 'login') - return HPEArubaClearPass.Client(proxy=proxy, verify=verify, base_url=base_url, client_id=client_id, - client_secret=client_secret) +def create_client( + mocker, + proxy: bool = False, + verify: bool = False, + base_url: str = "https://example.com/api/", + client_id: str = CLIENT_ID, + client_secret: str = CLIENT_SECRET, +): + mocker.patch.object(HPEArubaClearPass.Client, "login") + return HPEArubaClearPass.Client( + proxy=proxy, verify=verify, base_url=base_url, client_id=client_id, client_secret=client_secret + ) -@pytest.mark.parametrize('integration_context, expected_token', TEST_LOGIN_LIST) +@pytest.mark.parametrize("integration_context, expected_token", TEST_LOGIN_LIST) @freeze_time("2021-05-03T11:00:00Z") def test_login(mocker, integration_context, expected_token): """ @@ -54,8 +59,9 @@ def test_login(mocker, integration_context, expected_token): """ mocker.patch.object(HPEArubaClearPass, "get_integration_context", return_value=integration_context) mocker.patch.object(HPEArubaClearPass.Client, "generate_new_access_token", return_value=CLIENT_AUTH) - client = HPEArubaClearPass.Client(proxy=False, verify=False, base_url="https://example.com/api/", - client_id=CLIENT_ID, client_secret=CLIENT_SECRET) + client = HPEArubaClearPass.Client( + proxy=False, verify=False, base_url="https://example.com/api/", client_id=CLIENT_ID, client_secret=CLIENT_SECRET + ) assert client.access_token == expected_token @@ -76,10 +82,10 @@ def test_get_endpoints_list_command(mocker): results = get_endpoints_list_command(client, {}) assert results.outputs_prefix == "HPEArubaClearPass.Endpoints" assert results.outputs_key_field == "id" - assert results.outputs[0]['id'] == 1 - assert results.outputs[1]['id'] == 2 - assert results.outputs[0]['mac_address'] == '001234567891' - assert results.outputs[1]['mac_address'] == '001234567892' + assert results.outputs[0]["id"] == 1 + assert results.outputs[1]["id"] == 2 + assert results.outputs[0]["mac_address"] == "001234567891" + assert results.outputs[1]["mac_address"] == "001234567892" def test_update_endpoint_command(mocker): @@ -96,14 +102,14 @@ def test_update_endpoint_command(mocker): client = create_client(mocker) mock_endpoint_response = util_load_json("test_data/update_endpoint_response.json") mocker.patch.object(client, "prepare_request", return_value=mock_endpoint_response) - args = {"endpoint_id": '1', "mac_address": "123456789", "description": "test1", "status": "Unknown"} + args = {"endpoint_id": "1", "mac_address": "123456789", "description": "test1", "status": "Unknown"} results = update_endpoint_command(client, args) assert results.outputs_prefix == "HPEArubaClearPass.Endpoints" assert results.outputs_key_field == "id" - assert results.outputs['id'] == 1 - assert results.outputs['mac_address'] == '123456789' - assert results.outputs['description'] == 'test1' - assert results.outputs['status'] == 'Unknown' + assert results.outputs["id"] == 1 + assert results.outputs["mac_address"] == "123456789" + assert results.outputs["description"] == "test1" + assert results.outputs["status"] == "Unknown" def test_get_attributes_list_command(mocker): @@ -123,12 +129,12 @@ def test_get_attributes_list_command(mocker): results = get_attributes_list_command(client, {}) assert results.outputs_prefix == "HPEArubaClearPass.Attributes" assert results.outputs_key_field == "id" - assert results.outputs[0]['id'] == 1 - assert results.outputs[0]['name'] == 'Controller Id' - assert results.outputs[0]['entity_name'] == 'Device' - assert results.outputs[0]['data_type'] == 'String' - assert results.outputs[0]['mandatory'] is False - assert results.outputs[0]['allow_multiple'] is True + assert results.outputs[0]["id"] == 1 + assert results.outputs[0]["name"] == "Controller Id" + assert results.outputs[0]["entity_name"] == "Device" + assert results.outputs[0]["data_type"] == "String" + assert results.outputs[0]["mandatory"] is False + assert results.outputs[0]["allow_multiple"] is True def test_create_attribute_command(mocker): @@ -149,12 +155,12 @@ def test_create_attribute_command(mocker): results = create_attribute_command(client, args) assert results.outputs_prefix == "HPEArubaClearPass.Attributes" assert results.outputs_key_field == "id" - assert results.outputs['id'] == 1 - assert results.outputs['name'] == args.get('name') - assert results.outputs['entity_name'] == args.get('entity_name') - assert results.outputs['data_type'] == args.get('data_type') - assert results.outputs['mandatory'] is False - assert results.outputs['allow_multiple'] is False + assert results.outputs["id"] == 1 + assert results.outputs["name"] == args.get("name") + assert results.outputs["entity_name"] == args.get("entity_name") + assert results.outputs["data_type"] == args.get("data_type") + assert results.outputs["mandatory"] is False + assert results.outputs["allow_multiple"] is False def test_update_attribute_command(mocker): @@ -175,12 +181,12 @@ def test_update_attribute_command(mocker): results = update_attribute_command(client, args) assert results.outputs_prefix == "HPEArubaClearPass.Attributes" assert results.outputs_key_field == "id" - assert results.outputs['id'] == 1 - assert results.outputs['name'] == args.get('name') - assert results.outputs['entity_name'] == args.get('entity_name') - assert results.outputs['data_type'] == args.get('data_type') - assert results.outputs['mandatory'] is False - assert results.outputs['allow_multiple'] is False + assert results.outputs["id"] == 1 + assert results.outputs["name"] == args.get("name") + assert results.outputs["entity_name"] == args.get("entity_name") + assert results.outputs["data_type"] == args.get("data_type") + assert results.outputs["mandatory"] is False + assert results.outputs["allow_multiple"] is False def test_delete_attribute_command(mocker): @@ -219,11 +225,11 @@ def test_get_active_sessions_list_command(mocker): results = get_active_sessions_list_command(client, {}) assert results.outputs_prefix == "HPEArubaClearPass.Sessions" assert results.outputs_key_field == "id" - assert results.outputs[0]['ID'] == 1 - assert results.outputs[0]['Device_IP'] == "1.2.3.4" - assert results.outputs[0]['Device_mac_address'] == "001234567891" - assert results.outputs[0]['State'] == "active" - assert results.outputs[0]['Visitor_phone'] == "+972512345678" + assert results.outputs[0]["ID"] == 1 + assert results.outputs[0]["Device_IP"] == "1.2.3.4" + assert results.outputs[0]["Device_mac_address"] == "001234567891" + assert results.outputs[0]["State"] == "active" + assert results.outputs[0]["Visitor_phone"] == "+972512345678" def test_disconnect_active_session_command(mocker): @@ -240,16 +246,21 @@ def test_disconnect_active_session_command(mocker): client = create_client(mocker) mock_sessions_response = util_load_json("test_data/disconnect_active_session_response.json") mocker.patch.object(client, "prepare_request", return_value=mock_sessions_response) - results = disconnect_active_session_command(client, {'session_id': "1234"}) + results = disconnect_active_session_command(client, {"session_id": "1234"}) assert results.outputs_prefix == "HPEArubaClearPass.Sessions" assert results.outputs_key_field == "id" - assert results.outputs['Error_code'] == 0 - assert results.outputs['Response_message'] == "Success" + assert results.outputs["Error_code"] == 0 + assert results.outputs["Response_message"] == "Success" -@pytest.mark.parametrize('args', [{"data_type": None, "allow_multiple": True}, - {"data_type": "Boolean", "allow_multiple": True}, - {"data_type": "Boolean", "allowed_value": True}]) +@pytest.mark.parametrize( + "args", + [ + {"data_type": None, "allow_multiple": True}, + {"data_type": "Boolean", "allow_multiple": True}, + {"data_type": "Boolean", "allowed_value": True}, + ], +) def test_check_api_limitation_on_specific_data_types(args): """ Given: @@ -283,7 +294,7 @@ def test_check_api_limitation_on_specific_data_types(args): "Space in session ID", "Plus sign in session ID", "Special characters in session ID", - ] + ], ) def test_disconnect_active_session_command_encoding(mocker, session_id, expected_encoded_id): """ @@ -295,11 +306,12 @@ def test_disconnect_active_session_command_encoding(mocker, session_id, expected - Ensure the session ID and buddy.get("id") are properly URL-encoded in the request. """ from HPEArubaClearPass import disconnect_active_session_command + # Mock client client = create_client(mocker) mock_response = {"error": None, "message": "Session disconnected successfully"} - mocker.patch.object(client, 'prepare_request', return_value=mock_response) + mocker.patch.object(client, "prepare_request", return_value=mock_response) args = {"session_id": session_id} result = disconnect_active_session_command(client, args) @@ -309,13 +321,13 @@ def test_disconnect_active_session_command_encoding(mocker, session_id, expected body = kwargs["body"] client.prepare_request.assert_called_once_with( - method='POST', + method="POST", params={}, url_suffix=f"/session/{expected_encoded_id}/disconnect", - body={"id": session_id, "confirm_disconnect": True} + body={"id": session_id, "confirm_disconnect": True}, ) assert body["id"] == session_id - assert urllib.parse.quote(body["id"], safe='') == expected_encoded_id + assert urllib.parse.quote(body["id"], safe="") == expected_encoded_id assert result.outputs == {"Error_code": None, "Response_message": "Session disconnected successfully"} diff --git a/Packs/HPEArubaClearPass/ReleaseNotes/1_0_30.md b/Packs/HPEArubaClearPass/ReleaseNotes/1_0_30.md new file mode 100644 index 000000000000..22942240eac1 --- /dev/null +++ b/Packs/HPEArubaClearPass/ReleaseNotes/1_0_30.md @@ -0,0 +1,6 @@ + +#### Integrations + +##### HPE Aruba ClearPass + +- Metadata and documentation improvements. diff --git a/Packs/HPEArubaClearPass/pack_metadata.json b/Packs/HPEArubaClearPass/pack_metadata.json index 525b71cf3ac0..85b09f9f7eed 100644 --- a/Packs/HPEArubaClearPass/pack_metadata.json +++ b/Packs/HPEArubaClearPass/pack_metadata.json @@ -2,7 +2,7 @@ "name": "HPE Aruba Clearpass", "description": "Aruba ClearPass Policy Manager provides role and device-based network access control for employees, contractors, and guests across any multivendor wired, wireless and VPN infrastructure.", "support": "xsoar", - "currentVersion": "1.0.29", + "currentVersion": "1.0.30", "author": "Cortex XSOAR", "url": "https://www.paloaltonetworks.com/cortex", "email": "", From 9766cc4c1c4cb7416495346d9f1264a5a0d7ec4c Mon Sep 17 00:00:00 2001 From: Content Bot Date: Sun, 23 Mar 2025 12:52:52 +0000 Subject: [PATCH 07/18] Ipstack: Apply ruff Format --- Packs/Ipstack/Integrations/Ipstack/Ipstack.py | 120 +++++++++--------- .../Integrations/Ipstack/Ipstack_test.py | 78 ++++++------ Packs/Ipstack/ReleaseNotes/1_0_17.md | 6 + Packs/Ipstack/pack_metadata.json | 2 +- 4 files changed, 103 insertions(+), 103 deletions(-) create mode 100644 Packs/Ipstack/ReleaseNotes/1_0_17.md diff --git a/Packs/Ipstack/Integrations/Ipstack/Ipstack.py b/Packs/Ipstack/Integrations/Ipstack/Ipstack.py index e9ab58b1f534..8ed1b2d2343b 100644 --- a/Packs/Ipstack/Integrations/Ipstack/Ipstack.py +++ b/Packs/Ipstack/Integrations/Ipstack/Ipstack.py @@ -1,21 +1,23 @@ +import os + import demistomock as demisto +import requests from CommonServerPython import * + from CommonServerUserPython import * -import os -import requests -BASE_URL = 'http://api.ipstack.com' -API_KEY = demisto.params().get('credentials', {}).get('password') or demisto.params().get('apikey') -RELIABILITY = demisto.params().get('integrationReliability', 'C - Fairly reliable') +BASE_URL = "http://api.ipstack.com" +API_KEY = demisto.params().get("credentials", {}).get("password") or demisto.params().get("apikey") +RELIABILITY = demisto.params().get("integrationReliability", "C - Fairly reliable") BRAND_NAME = "Ipstack" -if not demisto.params()['proxy']: - del os.environ['HTTP_PROXY'] - del os.environ['HTTPS_PROXY'] - del os.environ['http_proxy'] - del os.environ['https_proxy'] +if not demisto.params()["proxy"]: + del os.environ["HTTP_PROXY"] + del os.environ["HTTPS_PROXY"] + del os.environ["http_proxy"] + del os.environ["https_proxy"] -''' HELPER FUNCTIONS ''' +""" HELPER FUNCTIONS """ # #returns a result of a api call @@ -26,20 +28,17 @@ def http_request(method, path): HTTP request helper function """ url = BASE_URL + path - res = requests.request( - method=method, - url=url - ) + res = requests.request(method=method, url=url) if not res.ok: - txt = 'error in URL {} status code: {} reason: {}'.format(url, res.status_code, res.text) + txt = f"error in URL {url} status code: {res.status_code} reason: {res.text}" demisto.error(txt) raise Exception(txt) try: res_json = res.json() - if res_json.get('code'): - txt = 'error in URL {} status code: {} reason: {}'.format(url, res.status_code, res.text) + if res_json.get("code"): + txt = f"error in URL {url} status code: {res.status_code} reason: {res.text}" demisto.error(txt) raise Exception(txt) else: @@ -50,16 +49,16 @@ def http_request(method, path): demisto.results({"Type": entryTypes["error"], "ContentsFormat": formats["text"], "Contents": res.text}) -''' Commands ''' +""" Commands """ def do_ip(ip): - path = "/{}?access_key={}".format(ip, API_KEY) - return http_request('GET', path) + path = f"/{ip}?access_key={API_KEY}" + return http_request("GET", path) def do_ip_command(): - ips = demisto.args().get('ip') + ips = demisto.args().get("ip") list_ips = argToList(ips) ips_results = [] @@ -67,10 +66,10 @@ def do_ip_command(): for ip in list_ips: raw_response = do_ip(ip) human_readable_data = { - "Address": raw_response.get('ip'), - "Country": raw_response.get('country_name'), - "Latitude": raw_response.get('latitude'), - "Longitude": raw_response.get('longitude') + "Address": raw_response.get("ip"), + "Country": raw_response.get("country_name"), + "Latitude": raw_response.get("latitude"), + "Longitude": raw_response.get("longitude"), } if DBotScoreReliability.is_valid_type(RELIABILITY): @@ -78,64 +77,63 @@ def do_ip_command(): else: raise Exception("Please provide a valid value for the Source Reliability parameter.") - dbot_score = Common.DBotScore(indicator=ip, - indicator_type=DBotScoreType.IP, - integration_name=BRAND_NAME, - reliability=dbot_reliability, - score=Common.DBotScore.NONE) + dbot_score = Common.DBotScore( + indicator=ip, + indicator_type=DBotScoreType.IP, + integration_name=BRAND_NAME, + reliability=dbot_reliability, + score=Common.DBotScore.NONE, + ) outputs = { - 'IP(val.Address == obj.Address)': { - 'Address': raw_response.get('ip'), - 'Geo': { - 'Location': "{}:{}".format(raw_response.get('latitude'), raw_response.get('longitude')), - 'Country': raw_response.get('country_name') - } + "IP(val.Address == obj.Address)": { + "Address": raw_response.get("ip"), + "Geo": { + "Location": "{}:{}".format(raw_response.get("latitude"), raw_response.get("longitude")), + "Country": raw_response.get("country_name"), + }, + }, + "Ipstack.ip(val.ID==obj.ID)": { + "address": raw_response.get("ip"), + "type": raw_response.get("type"), + "continent_name": raw_response.get("continent_name"), + "latitude": raw_response.get("latitude"), + "longitude": raw_response.get("longitude"), }, - 'Ipstack.ip(val.ID==obj.ID)': { - 'address': raw_response.get('ip'), - 'type': raw_response.get('type'), - 'continent_name': raw_response.get('continent_name'), - 'latitude': raw_response.get('latitude'), - 'longitude': raw_response.get('longitude'), - } } outputs.update(dbot_score.to_context()) - headers = ['Address', 'Country', 'Latitude', 'Longitude'] - human_readable = tableToMarkdown('Ipstack info on {}'.format(raw_response.get('ip')), human_readable_data, - headers=headers) - - result = CommandResults( - readable_output=human_readable, - outputs=outputs, - raw_response=raw_response + headers = ["Address", "Country", "Latitude", "Longitude"] + human_readable = tableToMarkdown( + "Ipstack info on {}".format(raw_response.get("ip")), human_readable_data, headers=headers ) + result = CommandResults(readable_output=human_readable, outputs=outputs, raw_response=raw_response) + ips_results.append(result) return_results(ips_results) def test_module(): - path = "/1.2.3.4?access_key={}".format(API_KEY) - res = requests.request('GET', BASE_URL + path) - if res.json().get('ip') == '1.2.3.4': - demisto.results('ok') + path = f"/1.2.3.4?access_key={API_KEY}" + res = requests.request("GET", BASE_URL + path) + if res.json().get("ip") == "1.2.3.4": + demisto.results("ok") else: - demisto.results('an error occurred. reason: {}'.format(res.text)) + demisto.results(f"an error occurred. reason: {res.text}") def main(): # pragma: no cover try: - if demisto.command() == 'test-module': + if demisto.command() == "test-module": test_module() - elif demisto.command() == 'ip': + elif demisto.command() == "ip": do_ip_command() except Exception as e: - return_error('Unable to perform command : {}, Reason: {}'.format(demisto.command, e)) + return_error(f"Unable to perform command : {demisto.command}, Reason: {e}") -if __name__ in ('__main__', '__builtin__', 'builtins'): +if __name__ in ("__main__", "__builtin__", "builtins"): main() diff --git a/Packs/Ipstack/Integrations/Ipstack/Ipstack_test.py b/Packs/Ipstack/Integrations/Ipstack/Ipstack_test.py index ee765a84a2ac..e2b500edf548 100644 --- a/Packs/Ipstack/Integrations/Ipstack/Ipstack_test.py +++ b/Packs/Ipstack/Integrations/Ipstack/Ipstack_test.py @@ -1,24 +1,23 @@ import demistomock as demisto RAW_RESPONSE_MOCK_1 = { - 'ip': '1.1.1.1', - 'country_name': 'country_name', - 'latitude': '1234', - 'longitude': '5678', - 'continent_name': 'continent_name', - 'type': 'type' + "ip": "1.1.1.1", + "country_name": "country_name", + "latitude": "1234", + "longitude": "5678", + "continent_name": "continent_name", + "type": "type", } RAW_RESPONSE_MOCK_2 = { - 'ip': '8.8.8.8', - 'country_name': 'country_name', - 'latitude': '8888', - 'longitude': '9999', - 'continent_name': 'continent_name', - 'type': 'type' + "ip": "8.8.8.8", + "country_name": "country_name", + "latitude": "8888", + "longitude": "9999", + "continent_name": "continent_name", + "type": "type", } -CONTEXT_PATH = 'DBotScore(val.Indicator && val.Indicator == obj.Indicator && val.Vendor == obj.Vendor ' \ - '&& val.Type == obj.Type)' -CONTEXT_PATH_PRIOR_V5_5 = 'DBotScore' +CONTEXT_PATH = "DBotScore(val.Indicator && val.Indicator == obj.Indicator && val.Vendor == obj.Vendor " "&& val.Type == obj.Type)" +CONTEXT_PATH_PRIOR_V5_5 = "DBotScore" def test_right_location_format(mocker, requests_mock): @@ -30,31 +29,28 @@ def test_right_location_format(mocker, requests_mock): - Case 1: The response should contain Location key with a value in the format of lon:lat and contain DBotScore calculations. """ - mocker.patch.object(demisto, 'params', return_value={'proxy': 'proxy', - 'credentials': {'password': 'password'}, - 'integrationReliability': 'C - Fairly reliable'}) - mocker.patch.object(demisto, 'args', return_value={'ip': '1.2.3.4,8.8.8.8'}) - mocker_results = mocker.patch.object(demisto, 'results') - requests_mock.get( - 'http://api.ipstack.com/1.2.3.4?access_key=password', - json=RAW_RESPONSE_MOCK_1 - ) - requests_mock.get( - 'http://api.ipstack.com/8.8.8.8?access_key=password', - json=RAW_RESPONSE_MOCK_2 + mocker.patch.object( + demisto, + "params", + return_value={"proxy": "proxy", "credentials": {"password": "password"}, "integrationReliability": "C - Fairly reliable"}, ) + mocker.patch.object(demisto, "args", return_value={"ip": "1.2.3.4,8.8.8.8"}) + mocker_results = mocker.patch.object(demisto, "results") + requests_mock.get("http://api.ipstack.com/1.2.3.4?access_key=password", json=RAW_RESPONSE_MOCK_1) + requests_mock.get("http://api.ipstack.com/8.8.8.8?access_key=password", json=RAW_RESPONSE_MOCK_2) from Ipstack import do_ip_command + do_ip_command() results_1234 = mocker_results.call_args_list[0][0][0] - output = results_1234.get('EntryContext').get('IP(val.Address == obj.Address)') - assert output.get('Geo').get('Location') == '1234:5678' - assert CONTEXT_PATH in results_1234.get('EntryContext') or CONTEXT_PATH_PRIOR_V5_5 in results_1234.get('EntryContext') + output = results_1234.get("EntryContext").get("IP(val.Address == obj.Address)") + assert output.get("Geo").get("Location") == "1234:5678" + assert CONTEXT_PATH in results_1234.get("EntryContext") or CONTEXT_PATH_PRIOR_V5_5 in results_1234.get("EntryContext") results_8888 = mocker_results.call_args_list[1][0][0] - output = results_8888.get('EntryContext').get('IP(val.Address == obj.Address)') - assert output.get('Geo').get('Location') == '8888:9999' - assert CONTEXT_PATH in results_8888.get('EntryContext') or CONTEXT_PATH_PRIOR_V5_5 in results_8888.get('EntryContext') + output = results_8888.get("EntryContext").get("IP(val.Address == obj.Address)") + assert output.get("Geo").get("Location") == "8888:9999" + assert CONTEXT_PATH in results_8888.get("EntryContext") or CONTEXT_PATH_PRIOR_V5_5 in results_8888.get("EntryContext") def test_test_module(mocker, requests_mock): @@ -64,14 +60,14 @@ def test_test_module(mocker, requests_mock): Then: - No errors occurred """ - mocker.patch.object(demisto, 'params', return_value={'proxy': 'proxy', - 'credentials': {'password': 'password'}, - 'integrationReliability': 'C - Fairly reliable'}) - results_mock = mocker.patch.object(demisto, 'results') - requests_mock.get( - 'http://api.ipstack.com/1.2.3.4?access_key=password', - json={'ip': '1.2.3.4'} + mocker.patch.object( + demisto, + "params", + return_value={"proxy": "proxy", "credentials": {"password": "password"}, "integrationReliability": "C - Fairly reliable"}, ) + results_mock = mocker.patch.object(demisto, "results") + requests_mock.get("http://api.ipstack.com/1.2.3.4?access_key=password", json={"ip": "1.2.3.4"}) from Ipstack import test_module + test_module() - assert 'ok' in results_mock.call_args[0][0] + assert "ok" in results_mock.call_args[0][0] diff --git a/Packs/Ipstack/ReleaseNotes/1_0_17.md b/Packs/Ipstack/ReleaseNotes/1_0_17.md new file mode 100644 index 000000000000..4c4142c87ea6 --- /dev/null +++ b/Packs/Ipstack/ReleaseNotes/1_0_17.md @@ -0,0 +1,6 @@ + +#### Integrations + +##### ipstack + +- Metadata and documentation improvements. diff --git a/Packs/Ipstack/pack_metadata.json b/Packs/Ipstack/pack_metadata.json index 7cec81feace9..ae5564023a3a 100644 --- a/Packs/Ipstack/pack_metadata.json +++ b/Packs/Ipstack/pack_metadata.json @@ -2,7 +2,7 @@ "name": "Ipstack", "description": "One of the leading IP to geolocation APIs and global IP database services.", "support": "xsoar", - "currentVersion": "1.0.16", + "currentVersion": "1.0.17", "author": "Cortex XSOAR", "url": "https://www.paloaltonetworks.com/cortex", "email": "", From 416e3d645056a83241d1b6dad587d710d9c8381a Mon Sep 17 00:00:00 2001 From: Content Bot Date: Sun, 23 Mar 2025 12:52:55 +0000 Subject: [PATCH 08/18] CaseManagement-Generic: Apply ruff Format --- .../ReleaseNotes/1_4_11.md | 27 +++++++++++++++++++ .../CaseMgmtAnalystTools.py | 3 +-- .../CaseMgmtDisplayLabels.py | 6 ++--- .../CaseMgmtIncidentTypesByRole.py | 6 ++--- .../CaseMgmtIncidentTypesDisplay.py | 6 ++--- .../CaseMgmtResponseProcess.py | 4 +-- .../CloseLinkedIncidentsPostProcessing.py | 20 ++++++++------ .../CompleteTaskOnTimerBreach.py | 2 +- .../TimersOnOwnerChange.py | 6 ++--- .../CaseManagement-Generic/pack_metadata.json | 2 +- 10 files changed, 54 insertions(+), 28 deletions(-) create mode 100644 Packs/CaseManagement-Generic/ReleaseNotes/1_4_11.md diff --git a/Packs/CaseManagement-Generic/ReleaseNotes/1_4_11.md b/Packs/CaseManagement-Generic/ReleaseNotes/1_4_11.md new file mode 100644 index 000000000000..49254377ecd6 --- /dev/null +++ b/Packs/CaseManagement-Generic/ReleaseNotes/1_4_11.md @@ -0,0 +1,27 @@ + +#### Scripts + +##### CaseMgmtResponseProcess + +- Metadata and documentation improvements. +##### CaseMgmtAnalystTools + +- Metadata and documentation improvements. +##### TimersOnOwnerChange + +- Metadata and documentation improvements. +##### CompleteTaskOnTimerBreach + +- Metadata and documentation improvements. +##### CaseMgmtDisplayLabels + +- Metadata and documentation improvements. +##### CaseMgmtIncidentTypesByRole + +- Metadata and documentation improvements. +##### CloseLinkedIncidentsPostProcessing + +- Metadata and documentation improvements. +##### CaseMgmtIncidentTypesDisplay + +- Metadata and documentation improvements. diff --git a/Packs/CaseManagement-Generic/Scripts/CaseMgmtAnalystTools/CaseMgmtAnalystTools.py b/Packs/CaseManagement-Generic/Scripts/CaseMgmtAnalystTools/CaseMgmtAnalystTools.py index 04b2f4622eab..aaf79dd50652 100644 --- a/Packs/CaseManagement-Generic/Scripts/CaseMgmtAnalystTools/CaseMgmtAnalystTools.py +++ b/Packs/CaseManagement-Generic/Scripts/CaseMgmtAnalystTools/CaseMgmtAnalystTools.py @@ -1,11 +1,10 @@ import demistomock as demisto # noqa: F401 from CommonServerPython import * # noqa: F401 - # requires an XSOAR list that contains a markdown table with links to important Analyst Tools (wikis, google, etc) # get the Case Management Analyst Tools list -tools = demisto.executeCommand("getList", {"listName": "Case Management Analyst Tools"})[0]['Contents'] +tools = demisto.executeCommand("getList", {"listName": "Case Management Analyst Tools"})[0]["Contents"] # default tools, if the above list does not exist. default_tools = """ diff --git a/Packs/CaseManagement-Generic/Scripts/CaseMgmtDisplayLabels/CaseMgmtDisplayLabels.py b/Packs/CaseManagement-Generic/Scripts/CaseMgmtDisplayLabels/CaseMgmtDisplayLabels.py index aa2a2a476a55..480c3d5e2716 100644 --- a/Packs/CaseManagement-Generic/Scripts/CaseMgmtDisplayLabels/CaseMgmtDisplayLabels.py +++ b/Packs/CaseManagement-Generic/Scripts/CaseMgmtDisplayLabels/CaseMgmtDisplayLabels.py @@ -4,7 +4,7 @@ def main(): try: - labels = demisto.incident().get('labels', []) + labels = demisto.incident().get("labels", []) if labels: readable = tableToMarkdown("Alert Information", labels) else: @@ -12,8 +12,8 @@ def main(): return_results(CommandResults(readable_output=readable, ignore_auto_extract=True)) except Exception as ex: - return_results(f'Failed to execute BaseScript. Error: {str(ex)}') + return_results(f"Failed to execute BaseScript. Error: {ex!s}") -if __name__ in ('__main__', '__builtin__', 'builtins'): +if __name__ in ("__main__", "__builtin__", "builtins"): main() diff --git a/Packs/CaseManagement-Generic/Scripts/CaseMgmtIncidentTypesByRole/CaseMgmtIncidentTypesByRole.py b/Packs/CaseManagement-Generic/Scripts/CaseMgmtIncidentTypesByRole/CaseMgmtIncidentTypesByRole.py index 7fcde77bf89f..912a465a571d 100644 --- a/Packs/CaseManagement-Generic/Scripts/CaseMgmtIncidentTypesByRole/CaseMgmtIncidentTypesByRole.py +++ b/Packs/CaseManagement-Generic/Scripts/CaseMgmtIncidentTypesByRole/CaseMgmtIncidentTypesByRole.py @@ -1,13 +1,11 @@ import demistomock as demisto # noqa: F401 from CommonServerPython import * # noqa: F401 - # check if this is a new Incident or not incident = demisto.incident().get("id") # if new Incident, the ID will be empty: if not incident: - # get the XSOAR IncidentTypesRBAC XSOAR List types_list = demisto.executeCommand("getList", {"listName": "IncidentTypesRBAC"})[0]["Contents"] @@ -34,7 +32,7 @@ # remove duplicates allowedTypes = list(set(allowedTypes)) - demisto.results({'hidden': False, 'options': allowedTypes}) + demisto.results({"hidden": False, "options": allowedTypes}) except ValueError: pass except Exception: @@ -43,4 +41,4 @@ # if it's an existing Incident, prevent changing the type from the UI. # get the current Incident Type, and only return that type. incident_type = demisto.incident().get("type") - return_results({'hidden': False, 'options': [incident_type]}) + return_results({"hidden": False, "options": [incident_type]}) diff --git a/Packs/CaseManagement-Generic/Scripts/CaseMgmtIncidentTypesDisplay/CaseMgmtIncidentTypesDisplay.py b/Packs/CaseManagement-Generic/Scripts/CaseMgmtIncidentTypesDisplay/CaseMgmtIncidentTypesDisplay.py index b7bb9407d5fd..2cbcb66eb898 100644 --- a/Packs/CaseManagement-Generic/Scripts/CaseMgmtIncidentTypesDisplay/CaseMgmtIncidentTypesDisplay.py +++ b/Packs/CaseManagement-Generic/Scripts/CaseMgmtIncidentTypesDisplay/CaseMgmtIncidentTypesDisplay.py @@ -1,13 +1,11 @@ import demistomock as demisto # noqa: F401 from CommonServerPython import * # noqa: F401 - # check if this is a new Incident or not incident = demisto.incident().get("id") # if new Incident, the ID will be empty: if not incident: - # get the XSOAR IncidentTypesFromList XSOAR List, and split on the comma types_list = demisto.executeCommand("getList", {"listName": "IncidentTypesFromList"})[0]["Contents"] @@ -23,10 +21,10 @@ types_list = [x.strip() for x in types_list] # return the options to display to the user - return_results({'hidden': False, 'options': types_list}) + return_results({"hidden": False, "options": types_list}) # if it's an existing Incident, prevent changing the type from the UI. else: # get the current Incident Type, and only return that type. incident_type = demisto.incident().get("type") - return_results({'hidden': False, 'options': [incident_type]}) + return_results({"hidden": False, "options": [incident_type]}) diff --git a/Packs/CaseManagement-Generic/Scripts/CaseMgmtResponseProcess/CaseMgmtResponseProcess.py b/Packs/CaseManagement-Generic/Scripts/CaseMgmtResponseProcess/CaseMgmtResponseProcess.py index 1a39924ce35f..567708b16987 100644 --- a/Packs/CaseManagement-Generic/Scripts/CaseMgmtResponseProcess/CaseMgmtResponseProcess.py +++ b/Packs/CaseManagement-Generic/Scripts/CaseMgmtResponseProcess/CaseMgmtResponseProcess.py @@ -16,11 +16,11 @@ def main(): # get the list for the IncidentType # list name must be formatted as follows: IncidentType Response Process - response_process = demisto.executeCommand("getList", {"listName": f"{incident_type}ResponseProcess"})[0]['Contents'] + response_process = demisto.executeCommand("getList", {"listName": f"{incident_type}ResponseProcess"})[0]["Contents"] # check if the list exists and return it's contents, if not get or create the Default list and return it's contents. if "Item not found" in response_process: - response_process = demisto.executeCommand("getList", {"listName": default_response_process_list})[0]['Contents'] + response_process = demisto.executeCommand("getList", {"listName": default_response_process_list})[0]["Contents"] if "Item not found" in response_process: result = CommandResults(readable_output=default_contents, ignore_auto_extract=True) else: diff --git a/Packs/CaseManagement-Generic/Scripts/CloseLinkedIncidentsPostProcessing/CloseLinkedIncidentsPostProcessing.py b/Packs/CaseManagement-Generic/Scripts/CloseLinkedIncidentsPostProcessing/CloseLinkedIncidentsPostProcessing.py index 6e69716f8805..a85942af86b7 100644 --- a/Packs/CaseManagement-Generic/Scripts/CloseLinkedIncidentsPostProcessing/CloseLinkedIncidentsPostProcessing.py +++ b/Packs/CaseManagement-Generic/Scripts/CloseLinkedIncidentsPostProcessing/CloseLinkedIncidentsPostProcessing.py @@ -10,15 +10,19 @@ def main(): linked_incidents = incident.get("linkedIncidents") if linked_incidents and not str(close_notes).startswith("Closed from parent Incident"): - demisto.executeCommand("executeCommandAt", - {"command": "closeInvestigation", - "arguments": { - "closeReason": close_reason, - "closeNotes": f"Closed from parent Incident {incident_id}\n" - f"\nClose Notes:\n{close_notes}"}, - "incidents": ",".join(linked_incidents)}) + demisto.executeCommand( + "executeCommandAt", + { + "command": "closeInvestigation", + "arguments": { + "closeReason": close_reason, + "closeNotes": f"Closed from parent Incident {incident_id}\n" f"\nClose Notes:\n{close_notes}", + }, + "incidents": ",".join(linked_incidents), + }, + ) demisto.results(f"Closing linked Incidents {','.join(linked_incidents)}") -if __name__ in ('__main__', '__builtin__', 'builtins'): +if __name__ in ("__main__", "__builtin__", "builtins"): main() diff --git a/Packs/CaseManagement-Generic/Scripts/CompleteTaskOnTimerBreach/CompleteTaskOnTimerBreach.py b/Packs/CaseManagement-Generic/Scripts/CompleteTaskOnTimerBreach/CompleteTaskOnTimerBreach.py index e4beb8e58444..8a7da46995e7 100644 --- a/Packs/CaseManagement-Generic/Scripts/CompleteTaskOnTimerBreach/CompleteTaskOnTimerBreach.py +++ b/Packs/CaseManagement-Generic/Scripts/CompleteTaskOnTimerBreach/CompleteTaskOnTimerBreach.py @@ -2,7 +2,7 @@ from CommonServerPython import * # noqa: F401 # get the incident id. -inc = demisto.incident().get('id') +inc = demisto.incident().get("id") # execute the taskComplete command on all tasks tagged with timerbreach. demisto.executeCommand("taskComplete", {"id": "timerbreach", "incidentId": inc}) diff --git a/Packs/CaseManagement-Generic/Scripts/TimersOnOwnerChange/TimersOnOwnerChange.py b/Packs/CaseManagement-Generic/Scripts/TimersOnOwnerChange/TimersOnOwnerChange.py index fc9df08f50e9..dd19b1f7d491 100644 --- a/Packs/CaseManagement-Generic/Scripts/TimersOnOwnerChange/TimersOnOwnerChange.py +++ b/Packs/CaseManagement-Generic/Scripts/TimersOnOwnerChange/TimersOnOwnerChange.py @@ -1,13 +1,13 @@ import demistomock as demisto # noqa: F401 from CommonServerPython import * # noqa: F401 - # This script stops the Time to Assignment timer when an Owner is assigned to an Incident, and starts the Remediation # SLA Timer. -if not demisto.args().get('old') and demisto.args().get('new'): # If owner was no-one and is now someone: +if not demisto.args().get("old") and demisto.args().get("new"): # If owner was no-one and is now someone: demisto.executeCommand("stopTimer", {"timerField": "timetoassignment"}) demisto.executeCommand("startTimer", {"timerField": "remediationsla"}) demisto.results( "Assignment of the incident was successful, Time to Assignment has been stopped, and the Remediation timer has" - " been started!") + " been started!" + ) diff --git a/Packs/CaseManagement-Generic/pack_metadata.json b/Packs/CaseManagement-Generic/pack_metadata.json index c1c92186ac4b..402e973a88fd 100644 --- a/Packs/CaseManagement-Generic/pack_metadata.json +++ b/Packs/CaseManagement-Generic/pack_metadata.json @@ -2,7 +2,7 @@ "name": "CaseManagement-Generic", "description": "Case Management - Generic\n\nBuilt by the Cortex Customer Success Team to provide quick deployment of Case Management with XSOAR", "support": "community", - "currentVersion": "1.4.10", + "currentVersion": "1.4.11", "author": "Cortex XSOAR Customer Success", "url": "", "email": "", From f48b46d4de13fb32c9bdd14fa65c01fd8be26fb6 Mon Sep 17 00:00:00 2001 From: Content Bot Date: Sun, 23 Mar 2025 12:52:58 +0000 Subject: [PATCH 09/18] OpenLDAP: Apply ruff Format --- .../Integrations/OpenLDAP/OpenLDAP.py | 819 ++++++++++-------- .../Integrations/OpenLDAP/OpenLDAP_test.py | 599 +++++++------ Packs/OpenLDAP/ReleaseNotes/2_0_19.md | 6 + Packs/OpenLDAP/pack_metadata.json | 2 +- 4 files changed, 791 insertions(+), 635 deletions(-) create mode 100644 Packs/OpenLDAP/ReleaseNotes/2_0_19.md diff --git a/Packs/OpenLDAP/Integrations/OpenLDAP/OpenLDAP.py b/Packs/OpenLDAP/Integrations/OpenLDAP/OpenLDAP.py index ea4fc46288bd..108b0550c913 100644 --- a/Packs/OpenLDAP/Integrations/OpenLDAP/OpenLDAP.py +++ b/Packs/OpenLDAP/Integrations/OpenLDAP/OpenLDAP.py @@ -1,20 +1,37 @@ import demistomock as demisto from CommonServerPython import * + from CommonServerUserPython import * -''' IMPORTS ''' +""" IMPORTS """ import ssl -from ldap3 import Server, Connection, Tls, BASE, AUTO_BIND_TLS_BEFORE_BIND, AUTO_BIND_NO_TLS, ALL_ATTRIBUTES, SUBTREE, \ - ALL_OPERATIONAL_ATTRIBUTES + +from ldap3 import ( + ALL_ATTRIBUTES, + ALL_OPERATIONAL_ATTRIBUTES, + AUTO_BIND_NO_TLS, + AUTO_BIND_TLS_BEFORE_BIND, + BASE, + SUBTREE, + Connection, + Server, + Tls, +) +from ldap3.core.exceptions import ( + LDAPBindError, + LDAPInvalidDnError, + LDAPInvalidPortError, + LDAPSocketOpenError, + LDAPSocketReceiveError, + LDAPStartTLSError, +) from ldap3.utils.dn import parse_dn -from ldap3.core.exceptions import LDAPBindError, LDAPInvalidDnError, LDAPSocketOpenError, LDAPInvalidPortError, \ - LDAPSocketReceiveError, LDAPStartTLSError -'''CONSTANTS''' +"""CONSTANTS""" MAX_PAGE_SIZE = 2000 -''' LDAP Authentication CLIENT ''' +""" LDAP Authentication CLIENT """ def listArgToLdapFilterSyntax(arg: str, prefix: str) -> str: @@ -27,10 +44,10 @@ def listArgToLdapFilterSyntax(arg: str, prefix: str) -> str: str: The LDAP filter syntax. """ arg_list = argToList(arg) - joined_list = ''.join([f'({prefix}={item})' for item in arg_list]) + joined_list = "".join([f"({prefix}={item})" for item in arg_list]) if len(arg_list) > 1: - return f'(&{joined_list})' - return joined_list if arg_list else '' + return f"(&{joined_list})" + return joined_list if arg_list else "" def create_entries_search_filter(args: dict) -> str: @@ -41,14 +58,14 @@ def create_entries_search_filter(args: dict) -> str: Returns: str: The search filter. """ - cn_filter = listArgToLdapFilterSyntax(args.get('cn', ''), 'cn') - description_filter = listArgToLdapFilterSyntax(args.get('description', ''), 'description') - object_class = listArgToLdapFilterSyntax(args.get('object_class', ''), 'objectClass') - uid = listArgToLdapFilterSyntax(args.get('uid', ''), 'uid') - search_filter = args.get('search_filter', '') + cn_filter = listArgToLdapFilterSyntax(args.get("cn", ""), "cn") + description_filter = listArgToLdapFilterSyntax(args.get("description", ""), "description") + object_class = listArgToLdapFilterSyntax(args.get("object_class", ""), "objectClass") + uid = listArgToLdapFilterSyntax(args.get("uid", ""), "uid") + search_filter = args.get("search_filter", "") if not any([cn_filter, description_filter, object_class, uid, search_filter]): - return '(objectClass=*)' - return f'(|{cn_filter}{description_filter}{object_class}{uid}{search_filter})' + return "(objectClass=*)" + return f"(|{cn_filter}{description_filter}{object_class}{uid}{search_filter})" def get_search_attributes(attributes: str) -> Optional[list[str] | str]: @@ -59,14 +76,11 @@ def get_search_attributes(attributes: str) -> Optional[list[str] | str]: Returns: list[str] | str: The search attributes. """ - if attributes == 'all': + if attributes == "all": return [ALL_ATTRIBUTES, ALL_OPERATIONAL_ATTRIBUTES] - return { - 'none': None, - 'all_user_attributes': ALL_ATTRIBUTES, - 'all_operational_attributes': ALL_OPERATIONAL_ATTRIBUTES - - }.get(attributes, argToList(attributes)) + return {"none": None, "all_user_attributes": ALL_ATTRIBUTES, "all_operational_attributes": ALL_OPERATIONAL_ATTRIBUTES}.get( + attributes, argToList(attributes) + ) def entries_paged_search(connection: Connection, search_params: dict, page: int, page_size: int) -> list[dict]: @@ -87,61 +101,63 @@ def entries_paged_search(connection: Connection, search_params: dict, page: int, connection.search(**search_params, paged_size=results_to_skip) # After the first search you must send back the cookie you get with each response in each subsequent search. # https://ldap3.readthedocs.io/en/latest/searches.html#simple-paged-search - cookie = connection.result['controls']['1.2.840.113556.1.4.319']['value']['cookie'] + cookie = connection.result["controls"]["1.2.840.113556.1.4.319"]["value"]["cookie"] return connection.search(**search_params, paged_size=page_size, paged_cookie=cookie) class LdapClient: """ - Base client for Ldap authentication. + Base client for Ldap authentication. - :type kwargs: ``dict`` - :param kwargs: Initialize params for ldap client + :type kwargs: ``dict`` + :param kwargs: Initialize params for ldap client """ - OPENLDAP = 'OpenLDAP' - ACTIVE_DIRECTORY = 'Active Directory' - AUTO = 'Auto' - GROUPS_TOKEN = 'primaryGroupToken' - GROUPS_MEMBER = 'memberOf' - GROUPS_PRIMARY_ID = 'primaryGroupID' + OPENLDAP = "OpenLDAP" + ACTIVE_DIRECTORY = "Active Directory" + AUTO = "Auto" + GROUPS_TOKEN = "primaryGroupToken" + GROUPS_MEMBER = "memberOf" + GROUPS_PRIMARY_ID = "primaryGroupID" TIMEOUT = 120 # timeout for ssl/tls socket - DEV_BUILD_NUMBER = 'REPLACE_THIS_WITH_CI_BUILD_NUM' # is used only in dev mode + DEV_BUILD_NUMBER = "REPLACE_THIS_WITH_CI_BUILD_NUM" # is used only in dev mode SUPPORTED_BUILD_NUMBER = 57352 # required server build number - CIPHERS_STRING = '@SECLEVEL=1:ECDHE+AESGCM:ECDHE+CHACHA20:DHE+AESGCM:DHE+CHACHA20:ECDH+AESGCM:DH+AESGCM:' \ - 'ECDH+AES:DH+AES:RSA+ANESGCM:RSA+AES:!aNULL:!eNULL:!MD5:!DSS' # Allowed ciphers for SSL/TLS + CIPHERS_STRING = ( + "@SECLEVEL=1:ECDHE+AESGCM:ECDHE+CHACHA20:DHE+AESGCM:DHE+CHACHA20:ECDH+AESGCM:DH+AESGCM:" + "ECDH+AES:DH+AES:RSA+ANESGCM:RSA+AES:!aNULL:!eNULL:!MD5:!DSS" + ) # Allowed ciphers for SSL/TLS SSL_VERSIONS = { - 'None': None, - 'TLS': ssl.PROTOCOL_TLS, - 'TLSv1': ssl.PROTOCOL_TLSv1, # guardrails-disable-line - 'TLSv1_1': ssl.PROTOCOL_TLSv1_1, # guardrails-disable-line - 'TLSv1_2': ssl.PROTOCOL_TLSv1_2, - 'TLS_CLIENT': ssl.PROTOCOL_TLS_CLIENT + "None": None, + "TLS": ssl.PROTOCOL_TLS, + "TLSv1": ssl.PROTOCOL_TLSv1, # guardrails-disable-line + "TLSv1_1": ssl.PROTOCOL_TLSv1_1, # guardrails-disable-line + "TLSv1_2": ssl.PROTOCOL_TLSv1_2, + "TLS_CLIENT": ssl.PROTOCOL_TLS_CLIENT, } def __init__(self, kwargs): - self._host = kwargs.get('host') - self._port = int(kwargs.get('port')) if kwargs.get('port') else None - self._username = kwargs.get('credentials', {}).get('identifier', '') - self._password = kwargs.get('credentials', {}).get('password', '') - self._base_dn = kwargs.get('base_dn', '').strip() - self._connection_type = kwargs.get('connection_type', 'none').lower() - self._ssl_version = kwargs.get('ssl_version', 'None') - self._fetch_groups = kwargs.get('fetch_groups', True) - self._verify = not kwargs.get('insecure', False) + self._host = kwargs.get("host") + self._port = int(kwargs.get("port")) if kwargs.get("port") else None + self._username = kwargs.get("credentials", {}).get("identifier", "") + self._password = kwargs.get("credentials", {}).get("password", "") + self._base_dn = kwargs.get("base_dn", "").strip() + self._connection_type = kwargs.get("connection_type", "none").lower() + self._ssl_version = kwargs.get("ssl_version", "None") + self._fetch_groups = kwargs.get("fetch_groups", True) + self._verify = not kwargs.get("insecure", False) self._ldap_server = self._initialize_ldap_server() - self._ldap_server_vendor = kwargs.get('ldap_server_vendor', self.AUTO) # OpenLDAP or Active Directory + self._ldap_server_vendor = kwargs.get("ldap_server_vendor", self.AUTO) # OpenLDAP or Active Directory if self._ldap_server_vendor == self.AUTO: self._determine_ldap_vendor_automatically() - self._page_size = int(kwargs.get('page_size', 500)) + self._page_size = int(kwargs.get("page_size", 500)) # OpenLDAP only fields: - self._groups_filter_class = kwargs.get('group_filter_class', 'posixGroup').strip() - self._group_identifier_attribute = kwargs.get('group_identifier_attribute', 'gidNumber').strip() - self._member_identifier_attribute = kwargs.get('member_identifier_attribute', 'memberUid').strip() - self._user_filter_class = kwargs.get('user_filter_class', 'posixAccount') - self._user_identifier_attribute = kwargs.get('user_identifier_attribute', 'uid') - self._custom_attributes = kwargs.get('custom_attributes', '') + self._groups_filter_class = kwargs.get("group_filter_class", "posixGroup").strip() + self._group_identifier_attribute = kwargs.get("group_identifier_attribute", "gidNumber").strip() + self._member_identifier_attribute = kwargs.get("member_identifier_attribute", "memberUid").strip() + self._user_filter_class = kwargs.get("user_filter_class", "posixAccount") + self._user_identifier_attribute = kwargs.get("user_identifier_attribute", "uid") + self._custom_attributes = kwargs.get("custom_attributes", "") @property def GROUPS_OBJECT_CLASS(self): @@ -193,7 +209,7 @@ def CUSTOM_ATTRIBUTE(self): def _get_ssl_version(self): """ - Returns the ssl version object according to the user's selection. + Returns the ssl version object according to the user's selection. """ version = self.SSL_VERSIONS.get(self._ssl_version) if version: @@ -205,19 +221,17 @@ def _get_ssl_version(self): def _get_tls_object(self): """ - Returns a TLS object according to the user's selection of the 'Trust any certificate' checkbox. + Returns a TLS object according to the user's selection of the 'Trust any certificate' checkbox. """ if self._verify: # Trust any certificate is unchecked # Trust any certificate = False means that the LDAP server's certificate must be valid - # i.e if the server's certificate is not valid the connection will fail. - tls = Tls(validate=ssl.CERT_REQUIRED, ca_certs_file=os.environ.get('SSL_CERT_FILE'), - version=self._get_ssl_version()) + tls = Tls(validate=ssl.CERT_REQUIRED, ca_certs_file=os.environ.get("SSL_CERT_FILE"), version=self._get_ssl_version()) else: # Trust any certificate is checked # Trust any certificate = True means that we do not require validation of the LDAP server's certificate, # and allow the use of all possible ciphers. - tls = Tls(validate=ssl.CERT_NONE, ca_certs_file=None, version=self._get_ssl_version(), - ciphers=self.CIPHERS_STRING) + tls = Tls(validate=ssl.CERT_NONE, ca_certs_file=None, version=self._get_ssl_version(), ciphers=self.CIPHERS_STRING) return tls @@ -228,15 +242,18 @@ def _initialize_ldap_server(self): :rtype: ldap3.Server :return: Initialized ldap server object. """ - if self._connection_type == 'ssl': # Secure connection (SSL\TLS) - demisto.info(f"Initializing LDAP sever with SSL/TLS (unsecure: {not self._verify})." - f" port: {self._port or 'default(636)'}") + if self._connection_type == "ssl": # Secure connection (SSL\TLS) + demisto.info( + f"Initializing LDAP sever with SSL/TLS (unsecure: {not self._verify})." f" port: {self._port or 'default(636)'}" + ) tls = self._get_tls_object() server = Server(host=self._host, port=self._port, use_ssl=True, tls=tls, connect_timeout=LdapClient.TIMEOUT) - elif self._connection_type == 'start tls': # Secure connection (STARTTLS) - demisto.info(f"Initializing LDAP sever without a secure connection - Start TLS operation will be executed" - f" during bind. (unsecure: {not self._verify}). port: {self._port or 'default(389)'}") + elif self._connection_type == "start tls": # Secure connection (STARTTLS) + demisto.info( + f"Initializing LDAP sever without a secure connection - Start TLS operation will be executed" + f" during bind. (unsecure: {not self._verify}). port: {self._port or 'default(389)'}" + ) tls = self._get_tls_object() server = Server(host=self._host, port=self._port, use_ssl=False, tls=tls, connect_timeout=LdapClient.TIMEOUT) @@ -248,85 +265,97 @@ def _initialize_ldap_server(self): def _determine_ldap_vendor_automatically(self): """ - Determines the LDAP vendor automatically + Determines the LDAP vendor automatically """ try: with Connection(self._ldap_server) as conn: - conn.search(search_base='', - search_filter='(objectClass=*)', - search_scope=BASE, - attributes=[ALL_ATTRIBUTES]) + conn.search(search_base="", search_filter="(objectClass=*)", search_scope=BASE, attributes=[ALL_ATTRIBUTES]) entry = conn.entries[0] - if 'objectClass' in entry and 'OpenLDAProotDSE' in entry['objectClass'].value: + if "objectClass" in entry and "OpenLDAProotDSE" in entry["objectClass"].value: self._ldap_server_vendor = self.OPENLDAP else: # There is no way to determine the AD vendor. As we support only 2 vendors, # we can assume the AD vendor by saying that it is not OpenLDAP self._ldap_server_vendor = self.ACTIVE_DIRECTORY - demisto.info(f'Determining LDAP vendor is {self._ldap_server_vendor}') + demisto.info(f"Determining LDAP vendor is {self._ldap_server_vendor}") except Exception as e: - raise DemistoException(f'Could not parse LDAP vendor automatically. Try to choose the vendor manually. ' - f'Error: str({e})') + raise DemistoException( + f"Could not parse LDAP vendor automatically. Try to choose the vendor manually. " f"Error: str({e})" + ) @staticmethod def _parse_ldap_group_entries(ldap_group_entries: list[dict], groups_identifier_attribute: str) -> list[dict]: """ - Returns parsed ldap groups entries. - """ - return [{'DN': ldap_group.get('dn'), 'Attributes': [{'Name': LdapClient.GROUPS_TOKEN, - 'Values': [str(ldap_group.get('attributes', {}).get( - groups_identifier_attribute))]}]} - for ldap_group in ldap_group_entries] + Returns parsed ldap groups entries. + """ + return [ + { + "DN": ldap_group.get("dn"), + "Attributes": [ + { + "Name": LdapClient.GROUPS_TOKEN, + "Values": [str(ldap_group.get("attributes", {}).get(groups_identifier_attribute))], + } + ], + } + for ldap_group in ldap_group_entries + ] @staticmethod def _parse_ldap_group_entries_and_referrals(ldap_group_entries: list[dict]) -> tuple[list[str], list[dict]]: """ - Returns parsed ldap groups entries and referrals. + Returns parsed ldap groups entries and referrals. """ referrals: list[str] = [] entries: list[dict] = [] for ldap_group in ldap_group_entries: - if ldap_group_type := ldap_group.get('type'): - if ldap_group_type == 'searchResRef': # a referral - referrals.extend(ldap_group.get('uri') or []) + if ldap_group_type := ldap_group.get("type"): + if ldap_group_type == "searchResRef": # a referral + referrals.extend(ldap_group.get("uri") or []) - elif ldap_group_type == 'searchResEntry': # an entry + elif ldap_group_type == "searchResEntry": # an entry entries.append( - {'DN': ldap_group.get('dn'), - 'Attributes': [{'Name': LdapClient.GROUPS_TOKEN, - 'Values': [str(ldap_group.get('attributes', {}).get(LdapClient.GROUPS_TOKEN))]} - ] - }) + { + "DN": ldap_group.get("dn"), + "Attributes": [ + { + "Name": LdapClient.GROUPS_TOKEN, + "Values": [str(ldap_group.get("attributes", {}).get(LdapClient.GROUPS_TOKEN))], + } + ], + } + ) return referrals, entries - def _parse_and_authenticate_ldap_group_entries_and_referrals(self, ldap_group_entries: list[dict], - password: str) -> tuple[list[str], list[dict]]: + def _parse_and_authenticate_ldap_group_entries_and_referrals( + self, ldap_group_entries: list[dict], password: str + ) -> tuple[list[str], list[dict]]: """ - Returns parsed ldap groups entries and referrals. - Authenticate - performs simple bind operation on the ldap server with the given user and password. + Returns parsed ldap groups entries and referrals. + Authenticate - performs simple bind operation on the ldap server with the given user and password. """ referrals: list[str] = [] entries: list[dict] = [] for entry in ldap_group_entries: - if entry_type := entry.get('type'): - if entry_type == 'searchResRef': # a referral - referrals.extend(entry.get('uri') or []) + if entry_type := entry.get("type"): + if entry_type == "searchResRef": # a referral + referrals.extend(entry.get("uri") or []) - elif entry_type == 'searchResEntry': # an entry + elif entry_type == "searchResEntry": # an entry # (should be only one searchResEntry to authenticate) - entry_dn = entry.get('dn', '') - entry_attributes = entry.get('attributes', {}) + entry_dn = entry.get("dn", "") + entry_attributes = entry.get("attributes", {}) relevant_entry_attributes = [] for attr in entry_attributes: if attr_value := entry_attributes.get(attr, []): if not isinstance(attr_value, list): attr_value = [str(attr_value)] # handle numerical values - relevant_entry_attributes.append({'Name': attr, 'Values': attr_value}) + relevant_entry_attributes.append({"Name": attr, "Values": attr_value}) - entries.append({'DN': entry_dn, 'Attributes': relevant_entry_attributes}) + entries.append({"DN": entry_dn, "Attributes": relevant_entry_attributes}) self.authenticate_ldap_user(entry_dn, password) return referrals, entries @@ -334,37 +363,35 @@ def _parse_and_authenticate_ldap_group_entries_and_referrals(self, ldap_group_en @staticmethod def _parse_ldap_users_groups_entries(ldap_group_entries: list[dict]) -> list[Optional[Any]]: """ - Returns parsed user's group entries. + Returns parsed user's group entries. """ - return [ldap_group.get('dn') for ldap_group in ldap_group_entries] + return [ldap_group.get("dn") for ldap_group in ldap_group_entries] @staticmethod - def _build_entry_for_user(user_groups: str, user_data: dict, - mail_attribute: str, name_attribute: str, phone_attribute: str) -> dict: + def _build_entry_for_user( + user_groups: str, user_data: dict, mail_attribute: str, name_attribute: str, phone_attribute: str + ) -> dict: """ - Returns entry for specific ldap user. + Returns entry for specific ldap user. """ - parsed_ldap_groups = {'Name': LdapClient.GROUPS_MEMBER, 'Values': user_groups} - parsed_group_id = {'Name': LdapClient.GROUPS_PRIMARY_ID, 'Values': user_data['gid_number']} + parsed_ldap_groups = {"Name": LdapClient.GROUPS_MEMBER, "Values": user_groups} + parsed_group_id = {"Name": LdapClient.GROUPS_PRIMARY_ID, "Values": user_data["gid_number"]} attributes = [parsed_ldap_groups, parsed_group_id] - if 'name' in user_data: - attributes.append({'Name': name_attribute, 'Values': [user_data['name']]}) - if 'email' in user_data: - attributes.append({'Name': mail_attribute, 'Values': [user_data['email']]}) - if 'mobile' in user_data: - attributes.append({'Name': phone_attribute, 'Values': [user_data['mobile']]}) + if "name" in user_data: + attributes.append({"Name": name_attribute, "Values": [user_data["name"]]}) + if "email" in user_data: + attributes.append({"Name": mail_attribute, "Values": [user_data["email"]]}) + if "mobile" in user_data: + attributes.append({"Name": phone_attribute, "Values": [user_data["mobile"]]}) - return { - 'DN': user_data['dn'], - 'Attributes': attributes - } + return {"DN": user_data["dn"], "Attributes": attributes} @staticmethod def _is_valid_dn(dn: str, user_identifier_attribute: str) -> tuple[bool, str]: """ - Validates whether given input is valid ldap DN. Returns flag indicator and user's identifier value from DN - (if exists). + Validates whether given input is valid ldap DN. Returns flag indicator and user's identifier value from DN + (if exists). """ try: parsed_dn = parse_dn(dn, strip=False) @@ -372,46 +399,43 @@ def _is_valid_dn(dn: str, user_identifier_attribute: str) -> tuple[bool, str]: if attribute_and_value[0].lower() == user_identifier_attribute.lower(): return True, attribute_and_value[1] - raise Exception(f'OpenLDAP {user_identifier_attribute} attribute was not found in user DN : {dn}') + raise Exception(f"OpenLDAP {user_identifier_attribute} attribute was not found in user DN : {dn}") except LDAPInvalidDnError as e: - demisto.debug(f'OpenLDAP failed parsing DN with error: {str(e)}. Fallback for unique id activated') + demisto.debug(f"OpenLDAP failed parsing DN with error: {e!s}. Fallback for unique id activated") return False, dn except Exception: raise def _fetch_all_groups(self): """ - Fetches all ldap groups under given base DN. + Fetches all ldap groups under given base DN. """ auto_bind = self._get_auto_bind_value() with Connection(self._ldap_server, self._username, self._password, auto_bind=auto_bind) as ldap_conn: - demisto.info(f'LDAP Connection Details: {ldap_conn}') + demisto.info(f"LDAP Connection Details: {ldap_conn}") if self._ldap_server_vendor == self.ACTIVE_DIRECTORY: - search_filter = '(&(objectClass=group)(objectCategory=group))' + search_filter = "(&(objectClass=group)(objectCategory=group))" - referrals, entries = self._get_ldap_groups_entries_and_referrals_ad(ldap_conn=ldap_conn, - search_filter=search_filter) + referrals, entries = self._get_ldap_groups_entries_and_referrals_ad( + ldap_conn=ldap_conn, search_filter=search_filter + ) - return { - 'Controls': None, - 'Referrals': referrals, - 'Entries': entries - } + return {"Controls": None, "Referrals": referrals, "Entries": entries} else: # ldap server is OpenLDAP - search_filter = f'(objectClass={self.GROUPS_OBJECT_CLASS})' - ldap_group_entries = ldap_conn.extend.standard.paged_search(search_base=self._base_dn, - search_filter=search_filter, - attributes=[ - self.GROUPS_IDENTIFIER_ATTRIBUTE], - paged_size=self._page_size) + search_filter = f"(objectClass={self.GROUPS_OBJECT_CLASS})" + ldap_group_entries = ldap_conn.extend.standard.paged_search( + search_base=self._base_dn, + search_filter=search_filter, + attributes=[self.GROUPS_IDENTIFIER_ATTRIBUTE], + paged_size=self._page_size, + ) return { - 'Controls': None, - 'Referrals': ldap_conn.result.get('referrals'), - 'Entries': LdapClient._parse_ldap_group_entries(ldap_group_entries, - self.GROUPS_IDENTIFIER_ATTRIBUTE) + "Controls": None, + "Referrals": ldap_conn.result.get("referrals"), + "Entries": LdapClient._parse_ldap_group_entries(ldap_group_entries, self.GROUPS_IDENTIFIER_ATTRIBUTE), } def _get_formatted_custom_attributes(self) -> str: @@ -419,26 +443,31 @@ def _get_formatted_custom_attributes(self) -> str: :return: custom attributes parsed to the form (att_name1=value1)(attname2=value2) """ if not self.CUSTOM_ATTRIBUTE: - return '' - formatted_attributes = '' - for att in self.CUSTOM_ATTRIBUTE.split(','): - if len(att.split('=')) != 2: - raise Exception(f'User defined attributes must be of the form' - f' \"attrA=valA,attrB=valB,...\", but got: {self.CUSTOM_ATTRIBUTE}') - formatted_attributes = formatted_attributes + f'({att})' + return "" + formatted_attributes = "" + for att in self.CUSTOM_ATTRIBUTE.split(","): + if len(att.split("=")) != 2: + raise Exception( + f"User defined attributes must be of the form" + f' "attrA=valA,attrB=valB,...", but got: {self.CUSTOM_ATTRIBUTE}' + ) + formatted_attributes = formatted_attributes + f"({att})" return formatted_attributes - def _get_ldap_groups_entries_and_referrals_ad(self, ldap_conn: Connection, - search_filter: str) -> tuple[list[str], list[dict]]: + def _get_ldap_groups_entries_and_referrals_ad( + self, ldap_conn: Connection, search_filter: str + ) -> tuple[list[str], list[dict]]: """ - Returns parsed ldap groups entries and referrals. + Returns parsed ldap groups entries and referrals. """ - ldap_group_entries = ldap_conn.extend.standard.paged_search(search_base=self._base_dn, - search_filter=search_filter, - attributes=[LdapClient.GROUPS_TOKEN], - paged_size=self._page_size, - generator=False) + ldap_group_entries = ldap_conn.extend.standard.paged_search( + search_base=self._base_dn, + search_filter=search_filter, + attributes=[LdapClient.GROUPS_TOKEN], + paged_size=self._page_size, + generator=False, + ) referrals, entries = LdapClient._parse_ldap_group_entries_and_referrals(ldap_group_entries) @@ -449,84 +478,81 @@ def _create_search_filter(self, filter_prefix: str) -> str: def _fetch_specific_groups(self, specific_groups: str) -> dict: """ - Fetches specific ldap groups under given base DN. + Fetches specific ldap groups under given base DN. """ auto_bind = self._get_auto_bind_value() dn_list = [group.strip() for group in argToList(specific_groups, separator="#")] with Connection(self._ldap_server, self._username, self._password, auto_bind=auto_bind) as ldap_conn: - demisto.info(f'LDAP Connection Details: {ldap_conn}') + demisto.info(f"LDAP Connection Details: {ldap_conn}") if self._ldap_server_vendor == self.ACTIVE_DIRECTORY: - dns_filter = '' + dns_filter = "" for dn in dn_list: - dns_filter += f'(distinguishedName={dn})' - search_filter = f'(&(objectClass=group)(objectCategory=group)(|{dns_filter}))' + dns_filter += f"(distinguishedName={dn})" + search_filter = f"(&(objectClass=group)(objectCategory=group)(|{dns_filter}))" - referrals, entries = self._get_ldap_groups_entries_and_referrals_ad(ldap_conn=ldap_conn, - search_filter=search_filter) + referrals, entries = self._get_ldap_groups_entries_and_referrals_ad( + ldap_conn=ldap_conn, search_filter=search_filter + ) - return { - 'Controls': None, - 'Referrals': referrals, - 'Entries': entries - } + return {"Controls": None, "Referrals": referrals, "Entries": entries} else: # ldap server is OpenLDAP parsed_ldap_entries = [] for dn in dn_list: - search_filter = f'(objectClass={self.GROUPS_OBJECT_CLASS})' - ldap_group_entries = ldap_conn.extend.standard.paged_search(search_base=dn, - search_filter=search_filter, - attributes=[ - self.GROUPS_IDENTIFIER_ATTRIBUTE], - paged_size=self._page_size, - search_scope=BASE) + search_filter = f"(objectClass={self.GROUPS_OBJECT_CLASS})" + ldap_group_entries = ldap_conn.extend.standard.paged_search( + search_base=dn, + search_filter=search_filter, + attributes=[self.GROUPS_IDENTIFIER_ATTRIBUTE], + paged_size=self._page_size, + search_scope=BASE, + ) parsed_ldap_entries.append( - self._parse_ldap_group_entries(ldap_group_entries, self.GROUPS_IDENTIFIER_ATTRIBUTE)) + self._parse_ldap_group_entries(ldap_group_entries, self.GROUPS_IDENTIFIER_ATTRIBUTE) + ) - return { - 'Controls': None, - 'Referrals': ldap_conn.result.get('referrals'), - 'Entries': parsed_ldap_entries - } + return {"Controls": None, "Referrals": ldap_conn.result.get("referrals"), "Entries": parsed_ldap_entries} @staticmethod def _get_ad_username(logon_name: str) -> str: """ - Gets a User logon name (the username that is used for log in to XSOAR) - and returns the Active Directory username. + Gets a User logon name (the username that is used for log in to XSOAR) + and returns the Active Directory username. """ ad_username = logon_name - if '\\' in logon_name: - ad_username = logon_name.split('\\')[1] - elif '@' in logon_name: - ad_username = logon_name.split('@')[0] + if "\\" in logon_name: + ad_username = logon_name.split("\\")[1] + elif "@" in logon_name: + ad_username = logon_name.split("@")[0] return ad_username @staticmethod def _has_wildcards_in_user_logon(logon_name: str): """ - Gets a User logon name (the username that is used for log in to XSOAR) and checks if it includes wildcards. - Raises exception if wildcards are found in the logon name. + Gets a User logon name (the username that is used for log in to XSOAR) and checks if it includes wildcards. + Raises exception if wildcards are found in the logon name. - Background: - LDAP servers support the use of wildcards primarily through the '*' and the '?' symbols for searching entries in - the directory. - Since the authentication process relies on exact username for login, the login attempt will be denied if the - user logon name includes wildcards. + Background: + LDAP servers support the use of wildcards primarily through the '*' and the '?' symbols for searching entries in + the directory. + Since the authentication process relies on exact username for login, the login attempt will be denied if the + user logon name includes wildcards. - Note: - Wildcards are illegal characters for active directory logon names, however they are valid characters - for OpenLdap usernames. - Since we believe usernames include '*' or '?' symbols are rare, we fail the authentication when a user logon name - includes one of them. + Note: + Wildcards are illegal characters for active directory logon names, however they are valid characters + for OpenLdap usernames. + Since we believe usernames include '*' or '?' symbols are rare, we fail the authentication when a user logon name + includes one of them. """ - err_msg = f"Wildcards were detected in the user logon name - Input Username: '{logon_name}'."\ + err_msg = ( + f"Wildcards were detected in the user logon name - Input Username: '{logon_name}'." f" Wildcards are not permitted for user authentication." + ) - wildcards = ['*', '?'] + wildcards = ["*", "?"] for wildcard in wildcards: if wildcard in logon_name: demisto.debug(f"LDAP Authentication - User login attempt failed - {err_msg}") @@ -534,93 +560,101 @@ def _has_wildcards_in_user_logon(logon_name: str): def _get_auto_bind_value(self) -> str: """ - Returns the proper auto bind value according to the desirable connection type. - The 'TLS' in the auto_bind parameter refers to the STARTTLS LDAP operation, that can be performed only on a - cleartext connection (unsecure connection - port 389). + Returns the proper auto bind value according to the desirable connection type. + The 'TLS' in the auto_bind parameter refers to the STARTTLS LDAP operation, that can be performed only on a + cleartext connection (unsecure connection - port 389). - If the Client's connection type is Start TLS - the secure level will be upgraded to TLS during the - connection bind itself and thus we use the AUTO_BIND_TLS_BEFORE_BIND constant. + If the Client's connection type is Start TLS - the secure level will be upgraded to TLS during the + connection bind itself and thus we use the AUTO_BIND_TLS_BEFORE_BIND constant. - If the Client's connection type is SSL - the connection is already secured (server was initialized with - use_ssl=True and port 636) and therefore we use the AUTO_BIND_NO_TLS constant. + If the Client's connection type is SSL - the connection is already secured (server was initialized with + use_ssl=True and port 636) and therefore we use the AUTO_BIND_NO_TLS constant. - Otherwise, the Client's connection type is None - the connection is unsecured and should stay unsecured, - thus we use the AUTO_BIND_NO_TLS constant here as well. + Otherwise, the Client's connection type is None - the connection is unsecured and should stay unsecured, + thus we use the AUTO_BIND_NO_TLS constant here as well. """ - if self._connection_type == 'start tls': + if self._connection_type == "start tls": auto_bind = AUTO_BIND_TLS_BEFORE_BIND else: auto_bind = AUTO_BIND_NO_TLS return auto_bind - def get_ldap_groups(self, specific_group: str = '') -> dict: + def get_ldap_groups(self, specific_group: str = "") -> dict: """ - Implements ldap groups command. + Implements ldap groups command. """ instance_name = demisto.integrationInstance() if not self._fetch_groups and not specific_group: - demisto.info(f'Instance [{instance_name}] configured not to fetch groups') + demisto.info(f"Instance [{instance_name}] configured not to fetch groups") sys.exit() - searched_results = self._fetch_specific_groups( - specific_group) if not self._fetch_groups else self._fetch_all_groups() + searched_results = self._fetch_specific_groups(specific_group) if not self._fetch_groups else self._fetch_all_groups() demisto.info(f'Retrieved {len(searched_results["Entries"])} groups from LDAP Authentication {instance_name}') return searched_results def authenticate_ldap_user(self, username: str, password: str) -> str: """ - Performs simple bind operation on ldap server. + Performs simple bind operation on ldap server. """ auto_bind = self._get_auto_bind_value() ldap_conn = Connection(server=self._ldap_server, user=username, password=password, auto_bind=auto_bind) - demisto.info(f'LDAP Connection Details: {ldap_conn}') + demisto.info(f"LDAP Connection Details: {ldap_conn}") if ldap_conn.bound: ldap_conn.unbind() return "Done" else: - raise Exception(f"LDAP Authentication - authentication connection failed," - f" server type is: {self._ldap_server_vendor}") + raise Exception( + f"LDAP Authentication - authentication connection failed," f" server type is: {self._ldap_server_vendor}" + ) def search_user_data(self, username: str, attributes: list, search_user_by_dn: bool = False) -> tuple: """ - Returns data for given ldap user. - Raises error if the user is not found in the ldap server. + Returns data for given ldap user. + Raises error if the user is not found in the ldap server. """ auto_bind = self._get_auto_bind_value() with Connection(self._ldap_server, self._username, self._password, auto_bind=auto_bind) as ldap_conn: - demisto.info(f'LDAP Connection Details: {ldap_conn}') + demisto.info(f"LDAP Connection Details: {ldap_conn}") if search_user_by_dn: - search_filter = f'(&(objectClass={self.USER_OBJECT_CLASS})' + \ - self._get_formatted_custom_attributes() + ')' - ldap_conn.search(search_base=username, search_filter=search_filter, size_limit=1, - attributes=attributes, search_scope=BASE) + search_filter = f"(&(objectClass={self.USER_OBJECT_CLASS})" + self._get_formatted_custom_attributes() + ")" + ldap_conn.search( + search_base=username, search_filter=search_filter, size_limit=1, attributes=attributes, search_scope=BASE + ) else: custom_attributes = self._get_formatted_custom_attributes() - search_filter = (f'(&(objectClass={self.USER_OBJECT_CLASS})' - f'({self.USER_IDENTIFIER_ATTRIBUTE}={username}){custom_attributes})') - ldap_conn.search(search_base=self._base_dn, search_filter=search_filter, size_limit=1, - attributes=attributes) + search_filter = ( + f"(&(objectClass={self.USER_OBJECT_CLASS})" + f"({self.USER_IDENTIFIER_ATTRIBUTE}={username}){custom_attributes})" + ) + ldap_conn.search(search_base=self._base_dn, search_filter=search_filter, size_limit=1, attributes=attributes) if not ldap_conn.entries: raise Exception("LDAP Authentication - LDAP user not found") entry = ldap_conn.entries[0] - referrals = ldap_conn.result.get('referrals') + referrals = ldap_conn.result.get("referrals") - if self.GROUPS_IDENTIFIER_ATTRIBUTE not in entry \ - or not entry[self.GROUPS_IDENTIFIER_ATTRIBUTE].value: + if self.GROUPS_IDENTIFIER_ATTRIBUTE not in entry or not entry[self.GROUPS_IDENTIFIER_ATTRIBUTE].value: raise Exception(f"LDAP Authentication - OpenLDAP user's {self.GROUPS_IDENTIFIER_ATTRIBUTE} not found") return entry, referrals - def get_user_data(self, username: str, pull_name: bool, pull_mail: bool, pull_phone: bool, - name_attribute: str, mail_attribute: str, phone_attribute: str, - search_user_by_dn: bool = False) -> dict: + def get_user_data( + self, + username: str, + pull_name: bool, + pull_mail: bool, + pull_phone: bool, + name_attribute: str, + mail_attribute: str, + phone_attribute: str, + search_user_by_dn: bool = False, + ) -> dict: """ - Returns data for given ldap user. + Returns data for given ldap user. """ attributes = [self.GROUPS_IDENTIFIER_ATTRIBUTE] @@ -634,70 +668,103 @@ def get_user_data(self, username: str, pull_name: bool, pull_mail: bool, pull_ph user_data_entry, referrals = self.search_user_data(username, attributes, search_user_by_dn) - user_data = {'dn': user_data_entry.entry_dn, - 'gid_number': [str(user_data_entry[self.GROUPS_IDENTIFIER_ATTRIBUTE].value)], - 'referrals': referrals} + user_data = { + "dn": user_data_entry.entry_dn, + "gid_number": [str(user_data_entry[self.GROUPS_IDENTIFIER_ATTRIBUTE].value)], + "referrals": referrals, + } if name_attribute in user_data_entry and user_data_entry[name_attribute].value: - user_data['name'] = user_data_entry[name_attribute].value + user_data["name"] = user_data_entry[name_attribute].value if mail_attribute in user_data_entry and user_data_entry[mail_attribute].value: - user_data['email'] = user_data_entry[mail_attribute].value + user_data["email"] = user_data_entry[mail_attribute].value if phone_attribute in user_data_entry and user_data_entry[phone_attribute].value: - user_data['mobile'] = user_data_entry[phone_attribute].value + user_data["mobile"] = user_data_entry[phone_attribute].value return user_data def get_user_groups(self, user_identifier: str): """ - Returns user's group. + Returns user's group. """ auto_bind = self._get_auto_bind_value() with Connection(self._ldap_server, self._username, self._password, auto_bind=auto_bind) as ldap_conn: - demisto.info(f'LDAP Connection Details: {ldap_conn}') - - search_filter = (f'(&(objectClass={self.GROUPS_OBJECT_CLASS})' - f'({self.GROUPS_MEMBERSHIP_IDENTIFIER_ATTRIBUTE}={user_identifier}))') - ldap_group_entries = ldap_conn.extend.standard.paged_search(search_base=self._base_dn, - search_filter=search_filter, - attributes=[ - self.GROUPS_IDENTIFIER_ATTRIBUTE], - paged_size=self._page_size) + demisto.info(f"LDAP Connection Details: {ldap_conn}") + + search_filter = ( + f"(&(objectClass={self.GROUPS_OBJECT_CLASS})" + f"({self.GROUPS_MEMBERSHIP_IDENTIFIER_ATTRIBUTE}={user_identifier}))" + ) + ldap_group_entries = ldap_conn.extend.standard.paged_search( + search_base=self._base_dn, + search_filter=search_filter, + attributes=[self.GROUPS_IDENTIFIER_ATTRIBUTE], + paged_size=self._page_size, + ) return LdapClient._parse_ldap_users_groups_entries(ldap_group_entries) - def authenticate_and_roles_openldap(self, username: str, password: str, pull_name: bool = True, - pull_mail: bool = True, pull_phone: bool = False, mail_attribute: str = 'mail', - name_attribute: str = 'name', phone_attribute: str = 'mobile') -> dict: + def authenticate_and_roles_openldap( + self, + username: str, + password: str, + pull_name: bool = True, + pull_mail: bool = True, + pull_phone: bool = False, + mail_attribute: str = "mail", + name_attribute: str = "name", + phone_attribute: str = "mobile", + ) -> dict: """ - Implements authenticate and roles command for OpenLDAP. + Implements authenticate and roles command for OpenLDAP. """ search_user_by_dn, user_identifier = LdapClient._is_valid_dn(username, self.USER_IDENTIFIER_ATTRIBUTE) - user_data = self.get_user_data(username=username, search_user_by_dn=search_user_by_dn, pull_name=pull_name, - pull_mail=pull_mail, pull_phone=pull_phone, mail_attribute=mail_attribute, - name_attribute=name_attribute, phone_attribute=phone_attribute) + user_data = self.get_user_data( + username=username, + search_user_by_dn=search_user_by_dn, + pull_name=pull_name, + pull_mail=pull_mail, + pull_phone=pull_phone, + mail_attribute=mail_attribute, + name_attribute=name_attribute, + phone_attribute=phone_attribute, + ) - self.authenticate_ldap_user(user_data['dn'], password) + self.authenticate_ldap_user(user_data["dn"], password) user_groups = self.get_user_groups(user_identifier) return { - 'Controls': None, - 'Referrals': user_data['referrals'], - 'Entries': [LdapClient._build_entry_for_user(user_groups=user_groups, user_data=user_data, - mail_attribute=mail_attribute, name_attribute=name_attribute, - phone_attribute=phone_attribute)] + "Controls": None, + "Referrals": user_data["referrals"], + "Entries": [ + LdapClient._build_entry_for_user( + user_groups=user_groups, + user_data=user_data, + mail_attribute=mail_attribute, + name_attribute=name_attribute, + phone_attribute=phone_attribute, + ) + ], } - def authenticate_and_roles_active_directory(self, username: str, password: str, pull_name: bool = True, - pull_mail: bool = True, pull_phone: bool = False, - mail_attribute: str = 'mail', name_attribute: str = 'name', - phone_attribute: str = 'mobile') -> dict: + def authenticate_and_roles_active_directory( + self, + username: str, + password: str, + pull_name: bool = True, + pull_mail: bool = True, + pull_phone: bool = False, + mail_attribute: str = "mail", + name_attribute: str = "name", + phone_attribute: str = "mobile", + ) -> dict: """ - Implements authenticate and roles command for Active Directory. + Implements authenticate and roles command for Active Directory. """ ad_username = self._get_ad_username(username) auto_bind = self._get_auto_bind_value() with Connection(self._ldap_server, self._username, self._password, auto_bind=auto_bind) as ldap_conn: - demisto.info(f'LDAP Connection Details: {ldap_conn}') + demisto.info(f"LDAP Connection Details: {ldap_conn}") attributes = [self.GROUPS_MEMBER, self.GROUPS_PRIMARY_ID] if pull_name: @@ -707,88 +774,103 @@ def authenticate_and_roles_active_directory(self, username: str, password: str, if pull_phone: attributes.append(phone_attribute) - search_filter = f'(|(sAMAccountName={ad_username})(userPrincipalName={username}))' - ldap_conn_entries = ldap_conn.extend.standard.paged_search(search_base=self._base_dn, - search_filter=search_filter, - attributes=attributes, - paged_size=self._page_size, - generator=False) + search_filter = f"(|(sAMAccountName={ad_username})(userPrincipalName={username}))" + ldap_conn_entries = ldap_conn.extend.standard.paged_search( + search_base=self._base_dn, + search_filter=search_filter, + attributes=attributes, + paged_size=self._page_size, + generator=False, + ) - referrals, entries = \ - self._parse_and_authenticate_ldap_group_entries_and_referrals(ldap_group_entries=ldap_conn_entries, - password=password) + referrals, entries = self._parse_and_authenticate_ldap_group_entries_and_referrals( + ldap_group_entries=ldap_conn_entries, password=password + ) if not entries: # if the user not exist in AD the query returns no entries raise Exception("LDAP Authentication - LDAP user not found") - return { - 'Controls': [], - 'Referrals': referrals, - 'Entries': entries - } + return {"Controls": [], "Referrals": referrals, "Entries": entries} - def authenticate_and_roles(self, username: str, password: str, pull_name: bool = True, pull_mail: bool = True, - pull_phone: bool = False, mail_attribute: str = 'mail', name_attribute: str = 'name', - phone_attribute: str = 'mobile') -> dict: + def authenticate_and_roles( + self, + username: str, + password: str, + pull_name: bool = True, + pull_mail: bool = True, + pull_phone: bool = False, + mail_attribute: str = "mail", + name_attribute: str = "name", + phone_attribute: str = "mobile", + ) -> dict: """ - Implements authenticate and roles command. + Implements authenticate and roles command. """ self._has_wildcards_in_user_logon(username) # fail login attempt when wildcards are used if self._ldap_server_vendor == self.ACTIVE_DIRECTORY: - return self.authenticate_and_roles_active_directory(username=username, password=password, - pull_name=pull_name, pull_mail=pull_mail, - pull_phone=pull_phone, mail_attribute=mail_attribute, - name_attribute=name_attribute, - phone_attribute=phone_attribute) + return self.authenticate_and_roles_active_directory( + username=username, + password=password, + pull_name=pull_name, + pull_mail=pull_mail, + pull_phone=pull_phone, + mail_attribute=mail_attribute, + name_attribute=name_attribute, + phone_attribute=phone_attribute, + ) else: # ldap server is OpenLDAP - return self.authenticate_and_roles_openldap(username=username, password=password, - pull_name=pull_name, pull_mail=pull_mail, pull_phone=pull_phone, - mail_attribute=mail_attribute, name_attribute=name_attribute, - phone_attribute=phone_attribute) + return self.authenticate_and_roles_openldap( + username=username, + password=password, + pull_name=pull_name, + pull_mail=pull_mail, + pull_phone=pull_phone, + mail_attribute=mail_attribute, + name_attribute=name_attribute, + phone_attribute=phone_attribute, + ) def entries_search_command(self, args: dict[str, Any]) -> CommandResults: """ Implements entries search command. """ - search_params = {'search_base': args.get('search_base', self._base_dn), - 'search_scope': args.get('search_scope', SUBTREE), - 'search_filter': create_entries_search_filter(args), - 'attributes': get_search_attributes(args.get('attributes', 'all')), - } + search_params = { + "search_base": args.get("search_base", self._base_dn), + "search_scope": args.get("search_scope", SUBTREE), + "search_filter": create_entries_search_filter(args), + "attributes": get_search_attributes(args.get("attributes", "all")), + } auto_bind = self._get_auto_bind_value() with Connection(self._ldap_server, self._username, self._password, auto_bind=auto_bind) as ldap_conn: - if page := arg_to_number(args.get('page')): - page_size = int(args.get('page_size', 50)) + if page := arg_to_number(args.get("page")): + page_size = int(args.get("page_size", 50)) if page_size > MAX_PAGE_SIZE: - raise Exception('The page size must be less than or equal to 2000') + raise Exception("The page size must be less than or equal to 2000") else: page = 1 - page_size = int(args.get('limit', 50)) - entries_paged_search(connection=ldap_conn, - search_params=search_params, - page=page, - page_size=page_size) + page_size = int(args.get("limit", 50)) + entries_paged_search(connection=ldap_conn, search_params=search_params, page=page, page_size=page_size) outputs = [ - {**json.loads(entry.entry_to_json()).get('attributes', {}), 'dn': json.loads(entry.entry_to_json()).get('dn')} + {**json.loads(entry.entry_to_json()).get("attributes", {}), "dn": json.loads(entry.entry_to_json()).get("dn")} for entry in ldap_conn.entries ] total = len(outputs) return CommandResults( - outputs_prefix='LDAP.Search', + outputs_prefix="LDAP.Search", outputs=outputs, raw_response=outputs, - readable_output=tableToMarkdown(f'LDAP Entries Search Results - {total} Found', outputs), + readable_output=tableToMarkdown(f"LDAP Entries Search Results - {total} Found", outputs), ) def ad_authenticate(self, username: str, password: str) -> str: """ - Search for the user in the ldap server. - Performs simple bind operation on ldap server. + Search for the user in the ldap server. + Performs simple bind operation on ldap server. """ self._has_wildcards_in_user_logon(username) # fail authentication when wildcards are used @@ -802,15 +884,15 @@ def ad_authenticate(self, username: str, password: str) -> str: def test_module(self): """ - Basic test connection and validation of the Ldap integration. + Basic test connection and validation of the Ldap integration. """ - build_number = get_demisto_version().get('buildNumber', LdapClient.DEV_BUILD_NUMBER) + build_number = get_demisto_version().get("buildNumber", LdapClient.DEV_BUILD_NUMBER) self._get_formatted_custom_attributes() - if build_number != LdapClient.DEV_BUILD_NUMBER \ - and int(build_number) < LdapClient.SUPPORTED_BUILD_NUMBER: - raise Exception(f'LDAP Authentication integration is supported from build number:' - f' {LdapClient.SUPPORTED_BUILD_NUMBER}') + if build_number != LdapClient.DEV_BUILD_NUMBER and int(build_number) < LdapClient.SUPPORTED_BUILD_NUMBER: + raise Exception( + f"LDAP Authentication integration is supported from build number:" f" {LdapClient.SUPPORTED_BUILD_NUMBER}" + ) if self._ldap_server_vendor == self.OPENLDAP: try: @@ -818,69 +900,74 @@ def test_module(self): except LDAPInvalidDnError: raise Exception("Invalid credentials input. User DN must be a full DN.") self.authenticate_ldap_user(username=self._username, password=self._password) - return 'ok' + return "ok" def main(): # pragma: no coverage - """ COMMANDS MANAGER / SWITCH PANEL """ + """COMMANDS MANAGER / SWITCH PANEL""" params = demisto.params() command = demisto.command() args = demisto.args() - demisto.info(f'Command being called is {command}') + demisto.info(f"Command being called is {command}") try: # initialized LDAP Authentication client client = LdapClient(params) - if command == 'test-module': + if command == "test-module": test_result = client.test_module() return_results(test_result) - elif command == 'ad-authenticate': - username = args.get('username') - password = args.get('password') + elif command == "ad-authenticate": + username = args.get("username") + password = args.get("password") authentication_result = client.ad_authenticate(username, password) - demisto.info(f'ad-authenticate command - authentication result: {authentication_result}') + demisto.info(f"ad-authenticate command - authentication result: {authentication_result}") return_results(authentication_result) - elif command == 'ad-groups': - specific_group = args.get('specific-groups') + elif command == "ad-groups": + specific_group = args.get("specific-groups") searched_results = client.get_ldap_groups(specific_group) - demisto.info(f'ad-groups command - searched results: {searched_results}') + demisto.info(f"ad-groups command - searched results: {searched_results}") return_results(searched_results) - elif command == 'ad-authenticate-and-roles': - username = args.get('username') - password = args.get('password') - mail_attribute = args.get('attribute-mail', 'mail') - name_attribute = args.get('attribute-name', 'name') - phone_attribute = args.get('attribute-phone', 'mobile') - pull_name = argToBoolean(args.get('attribute-name-pull', True)) - pull_mail = argToBoolean(args.get('attribute-mail-pull', True)) - pull_phone = argToBoolean(args.get('attribute-phone-pull', False)) - entry_result = client.authenticate_and_roles(username=username, password=password, pull_name=pull_name, - pull_mail=pull_mail, pull_phone=pull_phone, - mail_attribute=mail_attribute, name_attribute=name_attribute, - phone_attribute=phone_attribute) - demisto.info(f'ad-authenticate-and-roles command - entry results: {entry_result}') + elif command == "ad-authenticate-and-roles": + username = args.get("username") + password = args.get("password") + mail_attribute = args.get("attribute-mail", "mail") + name_attribute = args.get("attribute-name", "name") + phone_attribute = args.get("attribute-phone", "mobile") + pull_name = argToBoolean(args.get("attribute-name-pull", True)) + pull_mail = argToBoolean(args.get("attribute-mail-pull", True)) + pull_phone = argToBoolean(args.get("attribute-phone-pull", False)) + entry_result = client.authenticate_and_roles( + username=username, + password=password, + pull_name=pull_name, + pull_mail=pull_mail, + pull_phone=pull_phone, + mail_attribute=mail_attribute, + name_attribute=name_attribute, + phone_attribute=phone_attribute, + ) + demisto.info(f"ad-authenticate-and-roles command - entry results: {entry_result}") return_results(entry_result) - elif command == 'ad-entries-search': + elif command == "ad-entries-search": return_results(client.entries_search_command(args)) else: - raise NotImplementedError(f'Command {command} is not implemented') + raise NotImplementedError(f"Command {command} is not implemented") # Log exceptions except Exception as e: msg = str(e) if isinstance(e, LDAPBindError): - msg = f'LDAP Authentication - authentication connection failed. Additional details: {msg}' + msg = f"LDAP Authentication - authentication connection failed. Additional details: {msg}" elif isinstance(e, LDAPSocketOpenError | LDAPSocketReceiveError | LDAPStartTLSError): - msg = f'LDAP Authentication - Failed to connect to LDAP server. Additional details: {msg}' - if not params.get('insecure', False): + msg = f"LDAP Authentication - Failed to connect to LDAP server. Additional details: {msg}" + if not params.get("insecure", False): msg += ' Try using: "Trust any certificate" option.\n' elif isinstance(e, LDAPInvalidPortError): - msg = 'LDAP Authentication - Not valid ldap server input.' \ - ' Check that server input is of form: ip or ldap://ip' + msg = "LDAP Authentication - Not valid ldap server input." " Check that server input is of form: ip or ldap://ip" return_error(str(msg)) -if __name__ in ['__main__', '__builtin__', 'builtins']: +if __name__ in ["__main__", "__builtin__", "builtins"]: main() diff --git a/Packs/OpenLDAP/Integrations/OpenLDAP/OpenLDAP_test.py b/Packs/OpenLDAP/Integrations/OpenLDAP/OpenLDAP_test.py index a8abc5548726..9e36ad33adea 100644 --- a/Packs/OpenLDAP/Integrations/OpenLDAP/OpenLDAP_test.py +++ b/Packs/OpenLDAP/Integrations/OpenLDAP/OpenLDAP_test.py @@ -1,9 +1,10 @@ """ Tests module for the LDAP Authentication integration """ -from unittest.mock import MagicMock, patch -import unittest + import json +import unittest +from unittest.mock import MagicMock, patch import pytest from OpenLDAP import LdapClient, entries_paged_search @@ -11,14 +12,13 @@ class Entry: def __init__(self): - self.value = 'OpenLDAProotDSE' + self.value = "OpenLDAProotDSE" class Connection: - def __init__(self, vendor): if vendor == OPENLDAP: - self.entries = [{'objectClass': Entry()}] + self.entries = [{"objectClass": Entry()}] else: self.entries = [{}] @@ -33,51 +33,60 @@ def search(search_base, search_filter, search_scope, attributes): return None -OPENLDAP = 'OpenLDAP' -ACTIVE_DIRECTORY = 'Active Directory' +OPENLDAP = "OpenLDAP" +ACTIVE_DIRECTORY = "Active Directory" class TestLDAPClient: - @pytest.mark.parametrize('vendor', [OPENLDAP, ACTIVE_DIRECTORY]) + @pytest.mark.parametrize("vendor", [OPENLDAP, ACTIVE_DIRECTORY]) def test_ldap_vendor(self, mocker, vendor): - mocker.patch('OpenLDAP.Connection', return_value=Connection(vendor)) - client = LdapClient({'ldap_server_vendor': 'Auto', 'host': 'server_ip'}) + mocker.patch("OpenLDAP.Connection", return_value=Connection(vendor)) + client = LdapClient({"ldap_server_vendor": "Auto", "host": "server_ip"}) assert client._ldap_server_vendor == vendor class TestsActiveDirectory: """ - Contains unit tests for functions that deal with Active directory server only. + Contains unit tests for functions that deal with Active directory server only. """ def test_parse_ldap_group_entries_and_referrals(self): """ - Given: - - A raw response of a groups paged search in an Active directory server - (received during the execution of ad-groups command). - When: - - Running the 'parse_ldap_group_entries_and_referrals()' function. - Then: - - Verify that the raw response parsed correctly and that the referrals and entries lists - returned as expected. + Given: + - A raw response of a groups paged search in an Active directory server + (received during the execution of ad-groups command). + When: + - Running the 'parse_ldap_group_entries_and_referrals()' function. + Then: + - Verify that the raw response parsed correctly and that the referrals and entries lists + returned as expected. """ - client = LdapClient({'ldap_server_vendor': 'Active Directory', 'host': 'server_ip'}) + client = LdapClient({"ldap_server_vendor": "Active Directory", "host": "server_ip"}) ldap_group_entries = [ - {'uri': ['ldap://domain1/CN=test,DC=demisto,DC=test'], 'type': 'searchResRef'}, - {'uri': ['ldap://domain2/DC=test,DC=demisto,DC=test'], 'type': 'searchResRef'}, - {'raw_dn': b'CN=test,CN=Users,DC=demisto,DC=test', 'dn': 'CN=test,CN=Users,DC=demisto,DC=test', - 'raw_attributes': {'primaryGroupToken': [b'11111']}, 'attributes': {'primaryGroupToken': 11111}, - 'type': 'searchResEntry'}, - {'raw_dn': b'CN=DisabledUsers,DC=demisto,DC=test', 'dn': 'CN=DisabledUsers,DC=demisto,DC=test', - 'raw_attributes': {'primaryGroupToken': [b'22222']}, 'attributes': {'primaryGroupToken': 22222}, - 'type': 'searchResEntry'}] - - expected_referrals = ['ldap://domain1/CN=test,DC=demisto,DC=test', 'ldap://domain2/DC=test,DC=demisto,DC=test'] - expected_entries = [{'DN': 'CN=test,CN=Users,DC=demisto,DC=test', - 'Attributes': [{'Name': 'primaryGroupToken', 'Values': ['11111']}]}, - {'DN': 'CN=DisabledUsers,DC=demisto,DC=test', - 'Attributes': [{'Name': 'primaryGroupToken', 'Values': ['22222']}]}] + {"uri": ["ldap://domain1/CN=test,DC=demisto,DC=test"], "type": "searchResRef"}, + {"uri": ["ldap://domain2/DC=test,DC=demisto,DC=test"], "type": "searchResRef"}, + { + "raw_dn": b"CN=test,CN=Users,DC=demisto,DC=test", + "dn": "CN=test,CN=Users,DC=demisto,DC=test", + "raw_attributes": {"primaryGroupToken": [b"11111"]}, + "attributes": {"primaryGroupToken": 11111}, + "type": "searchResEntry", + }, + { + "raw_dn": b"CN=DisabledUsers,DC=demisto,DC=test", + "dn": "CN=DisabledUsers,DC=demisto,DC=test", + "raw_attributes": {"primaryGroupToken": [b"22222"]}, + "attributes": {"primaryGroupToken": 22222}, + "type": "searchResEntry", + }, + ] + + expected_referrals = ["ldap://domain1/CN=test,DC=demisto,DC=test", "ldap://domain2/DC=test,DC=demisto,DC=test"] + expected_entries = [ + {"DN": "CN=test,CN=Users,DC=demisto,DC=test", "Attributes": [{"Name": "primaryGroupToken", "Values": ["11111"]}]}, + {"DN": "CN=DisabledUsers,DC=demisto,DC=test", "Attributes": [{"Name": "primaryGroupToken", "Values": ["22222"]}]}, + ] referrals, entries = client._parse_ldap_group_entries_and_referrals(ldap_group_entries) @@ -86,90 +95,105 @@ def test_parse_ldap_group_entries_and_referrals(self): def test_parse_and_authenticate_ldap_group_entries_and_referrals(self, mocker): """ - Given: - - A raw response of a user paged search in an Active directory server. - (received during the execution of ad-authenticate-and-roles command). - When: - - Running the 'parse_and_authenticate_ldap_group_entries_and_referrals()' function. - Then: - - Verify that the raw response parsed correctly and that the referrals and entries lists - returned as expected. + Given: + - A raw response of a user paged search in an Active directory server. + (received during the execution of ad-authenticate-and-roles command). + When: + - Running the 'parse_and_authenticate_ldap_group_entries_and_referrals()' function. + Then: + - Verify that the raw response parsed correctly and that the referrals and entries lists + returned as expected. """ - client = LdapClient({'ldap_server_vendor': 'Active Directory', 'host': 'server_ip'}) + client = LdapClient({"ldap_server_vendor": "Active Directory", "host": "server_ip"}) - password = '123456' + password = "123456" ldap_group_entries = [ - {'uri': ['ldap://domain1/CN=test,DC=demisto,DC=test'], 'type': 'searchResRef'}, - {'uri': ['ldap://domain2/DC=test,DC=demisto,DC=test'], 'type': 'searchResRef'}, - {'raw_dn': b'CN=username,CN=Users,DC=demisto,DC=test', 'dn': 'CN=username,CN=Users,DC=demisto,DC=test', - 'raw_attributes': {'memberOf': [b'CN=test-group,CN=Users,DC=demisto,DC=test'], - 'name': [b'username'], 'primaryGroupID': [b'111'], 'mail': [b'user@mail.com'], - 'mobile': [b'050-1111111']}, - 'attributes': {'memberOf': ['CN=test-group,CN=Users,DC=demisto,DC=test'], 'name': 'username', - 'primaryGroupID': 111, 'mail': 'user@mail.com', 'mobile': '050-1111111'}, - 'type': 'searchResEntry'}] - - expected_referrals = ['ldap://domain1/CN=test,DC=demisto,DC=test', 'ldap://domain2/DC=test,DC=demisto,DC=test'] + {"uri": ["ldap://domain1/CN=test,DC=demisto,DC=test"], "type": "searchResRef"}, + {"uri": ["ldap://domain2/DC=test,DC=demisto,DC=test"], "type": "searchResRef"}, + { + "raw_dn": b"CN=username,CN=Users,DC=demisto,DC=test", + "dn": "CN=username,CN=Users,DC=demisto,DC=test", + "raw_attributes": { + "memberOf": [b"CN=test-group,CN=Users,DC=demisto,DC=test"], + "name": [b"username"], + "primaryGroupID": [b"111"], + "mail": [b"user@mail.com"], + "mobile": [b"050-1111111"], + }, + "attributes": { + "memberOf": ["CN=test-group,CN=Users,DC=demisto,DC=test"], + "name": "username", + "primaryGroupID": 111, + "mail": "user@mail.com", + "mobile": "050-1111111", + }, + "type": "searchResEntry", + }, + ] + + expected_referrals = ["ldap://domain1/CN=test,DC=demisto,DC=test", "ldap://domain2/DC=test,DC=demisto,DC=test"] expected_entries = [ - {'DN': 'CN=username,CN=Users,DC=demisto,DC=test', - 'Attributes': [ - {'Name': 'memberOf', 'Values': ['CN=test-group,CN=Users,DC=demisto,DC=test']}, - {'Name': 'name', 'Values': ['username']}, - {'Name': 'primaryGroupID', 'Values': ['111']}, - {'Name': 'mail', 'Values': ['user@mail.com']}, - {'Name': 'mobile', 'Values': ['050-1111111']} - ]}] - - mocker.patch('OpenLDAP.LdapClient.authenticate_ldap_user', return_value='Done') - referrals, entries = client._parse_and_authenticate_ldap_group_entries_and_referrals(ldap_group_entries, - password) + { + "DN": "CN=username,CN=Users,DC=demisto,DC=test", + "Attributes": [ + {"Name": "memberOf", "Values": ["CN=test-group,CN=Users,DC=demisto,DC=test"]}, + {"Name": "name", "Values": ["username"]}, + {"Name": "primaryGroupID", "Values": ["111"]}, + {"Name": "mail", "Values": ["user@mail.com"]}, + {"Name": "mobile", "Values": ["050-1111111"]}, + ], + } + ] + + mocker.patch("OpenLDAP.LdapClient.authenticate_ldap_user", return_value="Done") + referrals, entries = client._parse_and_authenticate_ldap_group_entries_and_referrals(ldap_group_entries, password) assert referrals == expected_referrals assert entries == expected_entries - @pytest.mark.parametrize('user_logon_name, expected_ad_username', [ - ('DEMISTO\\test1user', 'test1user'), - ('test2user@demisto.ad', 'test2user'), - ]) + @pytest.mark.parametrize( + "user_logon_name, expected_ad_username", + [ + ("DEMISTO\\test1user", "test1user"), + ("test2user@demisto.ad", "test2user"), + ], + ) def test_get_ad_username(self, user_logon_name, expected_ad_username): """ - Given: - - A user logon name (a username to login to XSOAR with). - When: - - Running the 'get_ad_username()' function. - Then: - - Verify that the returned Active Directory username is as expected. + Given: + - A user logon name (a username to login to XSOAR with). + When: + - Running the 'get_ad_username()' function. + Then: + - Verify that the returned Active Directory username is as expected. """ - client = LdapClient({'ldap_server_vendor': 'Active Directory', 'host': 'server_ip'}) + client = LdapClient({"ldap_server_vendor": "Active Directory", "host": "server_ip"}) ad_username = client._get_ad_username(user_logon_name) assert ad_username == expected_ad_username - @pytest.mark.parametrize('connection_type, expected_auto_bind_value', [ - ('Start TLS', 'TLS_BEFORE_BIND'), - ('SSL', 'NO_TLS'), - ('None', 'NO_TLS') - ]) + @pytest.mark.parametrize( + "connection_type, expected_auto_bind_value", [("Start TLS", "TLS_BEFORE_BIND"), ("SSL", "NO_TLS"), ("None", "NO_TLS")] + ) def test_get_auto_bind_value(self, connection_type, expected_auto_bind_value): """ - Given: - - A connection type: - 1. Start TLS - 2. SSL - 3. None - When: - - Running the '_get_auto_bind_value()' function. - Then: - - Verify that the returned auto_bind value is as expected: - 1. 'TLS_BEFORE_BIND' - which means that connection should upgrade it's secure level to TLS before - the bind itself (STARTTLS command is executed). - 2. 'NO_TLS' - The connection is secured from the beginning, - thus STARTTLS command shouldn't be executed. - 3. 'NO_TLS' - Connection is insecure (cleartext) and shouldn't be upgraded to TLS. - """ - client = LdapClient({'ldap_server_vendor': 'Active Directory', 'host': 'server_ip', - 'connection_type': connection_type}) + Given: + - A connection type: + 1. Start TLS + 2. SSL + 3. None + When: + - Running the '_get_auto_bind_value()' function. + Then: + - Verify that the returned auto_bind value is as expected: + 1. 'TLS_BEFORE_BIND' - which means that connection should upgrade it's secure level to TLS before + the bind itself (STARTTLS command is executed). + 2. 'NO_TLS' - The connection is secured from the beginning, + thus STARTTLS command shouldn't be executed. + 3. 'NO_TLS' - Connection is insecure (cleartext) and shouldn't be upgraded to TLS. + """ + client = LdapClient({"ldap_server_vendor": "Active Directory", "host": "server_ip", "connection_type": connection_type}) auto_bind_value = client._get_auto_bind_value() assert auto_bind_value == expected_auto_bind_value @@ -177,158 +201,193 @@ def test_get_auto_bind_value(self, connection_type, expected_auto_bind_value): class TestsOpenLDAP: """ - Contains unit tests for functions that deal with OpenLDAP server only. + Contains unit tests for functions that deal with OpenLDAP server only. """ - @pytest.mark.parametrize('dn, user_identifier_attribute, expected_result', [ - ('uid=user_test,cn=users_test,dc=openldap_test,dc=test,dc=int', 'uid', (True, 'user_test')), - ('not_a_valid_dn_test', 'uid', (False, 'not_a_valid_dn_test')) - ]) + @pytest.mark.parametrize( + "dn, user_identifier_attribute, expected_result", + [ + ("uid=user_test,cn=users_test,dc=openldap_test,dc=test,dc=int", "uid", (True, "user_test")), + ("not_a_valid_dn_test", "uid", (False, "not_a_valid_dn_test")), + ], + ) def test_is_valid_dn(self, dn, user_identifier_attribute, expected_result): """ - Given: - - A DN and a user identifier attribute: - 1. A valid DN. - 2. Invalid DN. - When: - - Running the '_is_valid_dn()' function. - Then: - - Verify that the DN is parsed correctly and that the user returned as expected. - """ - client = LdapClient({'ldap_server_vendor': 'OpenLDAP', 'host': 'server_ip', - 'connection_type': 'SSL', 'user_identifier_attribute': user_identifier_attribute}) + Given: + - A DN and a user identifier attribute: + 1. A valid DN. + 2. Invalid DN. + When: + - Running the '_is_valid_dn()' function. + Then: + - Verify that the DN is parsed correctly and that the user returned as expected. + """ + client = LdapClient( + { + "ldap_server_vendor": "OpenLDAP", + "host": "server_ip", + "connection_type": "SSL", + "user_identifier_attribute": user_identifier_attribute, + } + ) actual_result, dn = client._is_valid_dn(dn, client.USER_IDENTIFIER_ATTRIBUTE) assert (actual_result, dn) == expected_result - @pytest.mark.parametrize('dn, user_identifier_attribute', [ - ('cn=users_test,dc=openldap_test,dc=test,dc=int', 'uid'), - ('uid=user_test,cn=users_test,dc=openldap_test,dc=test,dc=int', 'new_uid') - ]) + @pytest.mark.parametrize( + "dn, user_identifier_attribute", + [ + ("cn=users_test,dc=openldap_test,dc=test,dc=int", "uid"), + ("uid=user_test,cn=users_test,dc=openldap_test,dc=test,dc=int", "new_uid"), + ], + ) def test_is_valid_dn_user_id_not_in_dn(self, dn, user_identifier_attribute): """ - Given: - 1. A DN without a user identifier attribute. - 2. A DN with a wrong user identifier attribute. - When: - - Running the '_is_valid_dn()' function. - Then: - - Verify that the expected err message is raised. - """ - client = LdapClient({'ldap_server_vendor': 'OpenLDAP', 'host': 'server_ip', - 'connection_type': 'SSL', 'user_identifier_attribute': user_identifier_attribute}) + Given: + 1. A DN without a user identifier attribute. + 2. A DN with a wrong user identifier attribute. + When: + - Running the '_is_valid_dn()' function. + Then: + - Verify that the expected err message is raised. + """ + client = LdapClient( + { + "ldap_server_vendor": "OpenLDAP", + "host": "server_ip", + "connection_type": "SSL", + "user_identifier_attribute": user_identifier_attribute, + } + ) with pytest.raises(Exception) as e: client._is_valid_dn(dn, client.USER_IDENTIFIER_ATTRIBUTE) - assert e.value.args[0] == f'OpenLDAP {user_identifier_attribute} attribute was not found in user DN : {dn}' + assert e.value.args[0] == f"OpenLDAP {user_identifier_attribute} attribute was not found in user DN : {dn}" class TestLDAPAuthentication: """ - Contains unit tests for general functions that deal with both OpenLDAP and Active Directory servers. + Contains unit tests for general functions that deal with both OpenLDAP and Active Directory servers. """ - @pytest.mark.parametrize('ssl_version, expected_ssl_version', [ - ('TLS', 2), ('TLSv1', 3), ('TLSv1_1', 4), ('TLSv1_2', 5), ('TLS_CLIENT', 16), (None, None), ('None', None) - ]) + @pytest.mark.parametrize( + "ssl_version, expected_ssl_version", + [("TLS", 2), ("TLSv1", 3), ("TLSv1_1", 4), ("TLSv1_2", 5), ("TLS_CLIENT", 16), (None, None), ("None", None)], + ) def test_get_ssl_version(self, ssl_version, expected_ssl_version): """ - Given: - - An ssl protocol version: - 1. TLS - 2. TLSv1 - 3. TLSv1_1 - 4. TLSv1_2 - 5. TLS_CLIENT - 6. None - 7. 'None' - When: - - Running the '_get_ssl_version()' function. - Then: - - Verify that the returned ssl version value is as expected: - 1. TLS - 2 - 2. TLSv1 - 3 - 3. TLSv1_1 - 4 - 4. TLSv1_2 - 5 - 5. TLS_CLIENT - 16 - 6. None - None - 7. 'None' - None - """ - client = LdapClient({'ldap_server_vendor': 'OpenLDAP', 'host': 'server_ip', - 'connection_type': 'SSL', 'ssl_version': ssl_version}) + Given: + - An ssl protocol version: + 1. TLS + 2. TLSv1 + 3. TLSv1_1 + 4. TLSv1_2 + 5. TLS_CLIENT + 6. None + 7. 'None' + When: + - Running the '_get_ssl_version()' function. + Then: + - Verify that the returned ssl version value is as expected: + 1. TLS - 2 + 2. TLSv1 - 3 + 3. TLSv1_1 - 4 + 4. TLSv1_2 - 5 + 5. TLS_CLIENT - 16 + 6. None - None + 7. 'None' - None + """ + client = LdapClient( + {"ldap_server_vendor": "OpenLDAP", "host": "server_ip", "connection_type": "SSL", "ssl_version": ssl_version} + ) ssl_version_value = client._get_ssl_version() assert ssl_version_value == expected_ssl_version - @pytest.mark.parametrize('custom_attributes, expected_formatted_attributes', [ - ('attr1=val1,attr2=val2,attr3=val3', '(attr1=val1)(attr2=val2)(attr3=val3)'), - ('', '') - ]) + @pytest.mark.parametrize( + "custom_attributes, expected_formatted_attributes", + [("attr1=val1,attr2=val2,attr3=val3", "(attr1=val1)(attr2=val2)(attr3=val3)"), ("", "")], + ) def test_get_formatted_custom_attributes(self, custom_attributes, expected_formatted_attributes): """ - Given: - - Custom attributes: - 1. A valid comma separated list of attributes. - 2. An empty string of attributes. - When: - - Running the '_get_formatted_custom_attributes()' function. - Then: - - Verify that the attributed parsed correctly. - """ - client = LdapClient({'ldap_server_vendor': 'OpenLDAP', 'host': 'server_ip', - 'connection_type': 'SSL', 'custom_attributes': custom_attributes}) + Given: + - Custom attributes: + 1. A valid comma separated list of attributes. + 2. An empty string of attributes. + When: + - Running the '_get_formatted_custom_attributes()' function. + Then: + - Verify that the attributed parsed correctly. + """ + client = LdapClient( + { + "ldap_server_vendor": "OpenLDAP", + "host": "server_ip", + "connection_type": "SSL", + "custom_attributes": custom_attributes, + } + ) formatted_attributes = client._get_formatted_custom_attributes() assert formatted_attributes == expected_formatted_attributes def test_get_formatted_custom_attributes_invalid_attributes_input(self): """ - Given: - - Invalid Custom attributes. - When: - - Running the '_get_formatted_custom_attributes()' function. - Then: - - Verify that the expected error message is raised. - """ - client = LdapClient({'ldap_server_vendor': 'OpenLDAP', 'host': 'server_ip', - 'connection_type': 'SSL', 'custom_attributes': 'attr1val1,attr2=val2,attr3=val3'}) + Given: + - Invalid Custom attributes. + When: + - Running the '_get_formatted_custom_attributes()' function. + Then: + - Verify that the expected error message is raised. + """ + client = LdapClient( + { + "ldap_server_vendor": "OpenLDAP", + "host": "server_ip", + "connection_type": "SSL", + "custom_attributes": "attr1val1,attr2=val2,attr3=val3", + } + ) with pytest.raises(Exception) as e: client._get_formatted_custom_attributes() - assert e.value.args[0] == (f'User defined attributes must be of the form "attrA=valA,attrB=valB,...", but got: ' - f'{client.CUSTOM_ATTRIBUTE}') - - @pytest.mark.parametrize('user_logon_name', [ - ('test*'), - ('test?test'), - ]) + assert e.value.args[0] == ( + f'User defined attributes must be of the form "attrA=valA,attrB=valB,...", but got: ' f"{client.CUSTOM_ATTRIBUTE}" + ) + + @pytest.mark.parametrize( + "user_logon_name", + [ + ("test*"), + ("test?test"), + ], + ) def test_has_wildcards_in_user_logon(self, user_logon_name): """ - Given: - 1. A user logon name contains the "*" symbol. - 2. A user logon name contains the "?" symbol. - When: - - Running the 'has_wildcards_in_user_logon()' function. - Then: - - Verify that an exception is raised due to the use of wildcards in the logon name. + Given: + 1. A user logon name contains the "*" symbol. + 2. A user logon name contains the "?" symbol. + When: + - Running the 'has_wildcards_in_user_logon()' function. + Then: + - Verify that an exception is raised due to the use of wildcards in the logon name. """ - client = LdapClient({'ldap_server_vendor': 'Active Directory', 'host': 'server_ip'}) + client = LdapClient({"ldap_server_vendor": "Active Directory", "host": "server_ip"}) with pytest.raises(Exception) as e: client._has_wildcards_in_user_logon(user_logon_name) - assert 'Wildcards were detected in the user logon name' in e.value.args[0] + assert "Wildcards were detected in the user logon name" in e.value.args[0] assert user_logon_name in e.value.args[0] class TestEntriesPagedSearch(unittest.TestCase): - def setUp(self): """ Set up the test by creating a mock connection and search parameters. """ self.connection = MagicMock() - self.search_params = {'search_base': 'dc=example,dc=com', 'search_filter': '(objectClass=person)'} + self.search_params = {"search_base": "dc=example,dc=com", "search_filter": "(objectClass=person)"} self.page_size = 10 def test_first_page(self): @@ -336,12 +395,12 @@ def test_first_page(self): when running the entries_paged_search function with a page number of 1 then the search method should be called with the correct parameters and the results should be returned. """ - self.connection.search.return_value = [{'name': 'John Doe'}] + self.connection.search.return_value = [{"name": "John Doe"}] results = entries_paged_search(self.connection, self.search_params, page=1, page_size=self.page_size) self.connection.search.assert_called_once_with(**self.search_params, paged_size=self.page_size) - assert results == [{'name': 'John Doe'}] + assert results == [{"name": "John Doe"}] def test_subsequent_page(self): """ @@ -352,26 +411,18 @@ def test_subsequent_page(self): # Mock the connection's search method for the first search (to skip results) self.connection.search.side_effect = [ None, # First call returns None to simulate the skip search - [{'name': 'Jane Doe'}] # Second call returns the actual results + [{"name": "Jane Doe"}], # Second call returns the actual results ] # Mock the result of the first search to include the cookie - self.connection.result = { - 'controls': { - '1.2.840.113556.1.4.319': { - 'value': { - 'cookie': b'cookie_value' - } - } - } - } + self.connection.result = {"controls": {"1.2.840.113556.1.4.319": {"value": {"cookie": b"cookie_value"}}}} results = entries_paged_search(self.connection, self.search_params, page=2, page_size=self.page_size) # Assert the search method was called twice with the correct parameters assert self.connection.search.call_count == 2 self.connection.search.assert_any_call(**self.search_params, paged_size=self.page_size * 1) - self.connection.search.assert_any_call(**self.search_params, paged_size=self.page_size, paged_cookie=b'cookie_value') - assert results == [{'name': 'Jane Doe'}] + self.connection.search.assert_any_call(**self.search_params, paged_size=self.page_size, paged_cookie=b"cookie_value") + assert results == [{"name": "Jane Doe"}] class TestEntriesSearchCommand(unittest.TestCase): @@ -383,61 +434,69 @@ def setUp(self): """ Set up the test by creating an instance of the LdapClient class and mocking the _get_auto_bind_value method. """ - self.instance = LdapClient({ - 'host': 'server_ip', - 'port': '636', - 'credentials': {'identifier': 'username', 'password': 'password'}, - 'base_dn': 'dc=example,dc=com', - 'connection_type': 'SSL', - 'ssl_version': 'TLSv1_2', - 'fetch_groups': True, - 'insecure': False, - 'ldap_server_vendor': 'OpenLDAP', - }) + self.instance = LdapClient( + { + "host": "server_ip", + "port": "636", + "credentials": {"identifier": "username", "password": "password"}, + "base_dn": "dc=example,dc=com", + "connection_type": "SSL", + "ssl_version": "TLSv1_2", + "fetch_groups": True, + "insecure": False, + "ldap_server_vendor": "OpenLDAP", + } + ) self.instance._get_auto_bind_value = MagicMock(return_value=True) - @patch('OpenLDAP.Connection') - @patch('OpenLDAP.create_entries_search_filter', return_value='(objectClass=*)') - @patch('OpenLDAP.get_search_attributes', return_value=['cn', 'mail']) - def test_entries_search_command_first_page(self, mock_get_search_attributes, mock_create_entries_search_filter, - mock_connection): + @patch("OpenLDAP.Connection") + @patch("OpenLDAP.create_entries_search_filter", return_value="(objectClass=*)") + @patch("OpenLDAP.get_search_attributes", return_value=["cn", "mail"]) + def test_entries_search_command_first_page( + self, mock_get_search_attributes, mock_create_entries_search_filter, mock_connection + ): """ when running the entries_search_command function with a page number of 1 then the search method should be called with the correct parameters and the results should be returned. """ # Mock the LDAP connection mock_conn_instance = mock_connection.return_value.__enter__.return_value - mock_conn_instance.entries = [MagicMock(entry_to_json=MagicMock(return_value=json.dumps({ - 'attributes': {'cn': ['John Doe'], 'mail': ['john.doe@example.com']}, - 'dn': 'cn=John Doe,dc=example,dc=com' - })))] - - args = { - 'search_base': 'dc=example,dc=com', - 'search_scope': 'SUBTREE', - 'page': '1', - 'page_size': '50' - } + mock_conn_instance.entries = [ + MagicMock( + entry_to_json=MagicMock( + return_value=json.dumps( + { + "attributes": {"cn": ["John Doe"], "mail": ["john.doe@example.com"]}, + "dn": "cn=John Doe,dc=example,dc=com", + } + ) + ) + ) + ] + + args = {"search_base": "dc=example,dc=com", "search_scope": "SUBTREE", "page": "1", "page_size": "50"} result = self.instance.entries_search_command(args) self.instance._get_auto_bind_value.assert_called_once() mock_create_entries_search_filter.assert_called_once_with(args) - mock_get_search_attributes.assert_called_once_with('all') + mock_get_search_attributes.assert_called_once_with("all") mock_connection.assert_called_once_with( - self.instance._ldap_server, self.instance._username, self.instance._password, auto_bind=True) + self.instance._ldap_server, self.instance._username, self.instance._password, auto_bind=True + ) assert len(result.outputs) == 1 - assert result.outputs[0]['cn'] == ['John Doe'] - assert result.outputs[0]['mail'] == ['john.doe@example.com'] - assert result.outputs[0]['dn'] == 'cn=John Doe,dc=example,dc=com' - - @patch('OpenLDAP.Connection') - @patch('OpenLDAP.create_entries_search_filter', return_value='(objectClass=*)') - @patch('OpenLDAP.get_search_attributes', return_value=['cn', 'mail']) - def test_entries_search_command_subsequent_page(self, mock_get_search_attributes, mock_create_entries_search_filter, - mock_connection): + assert result.outputs[0]["cn"] == ["John Doe"] + assert result.outputs[0]["mail"] == ["john.doe@example.com"] + assert result.outputs[0]["dn"] == "cn=John Doe,dc=example,dc=com" + + @patch("OpenLDAP.Connection") + @patch("OpenLDAP.create_entries_search_filter", return_value="(objectClass=*)") + @patch("OpenLDAP.get_search_attributes", return_value=["cn", "mail"]) + def test_entries_search_command_subsequent_page( + self, mock_get_search_attributes, mock_create_entries_search_filter, mock_connection + ): """ when running the entries_search_command function with a page number greater than 1 then the search method should be called with the correct parameters and the results should be returned. The search method should be called twice, once @@ -445,27 +504,31 @@ def test_entries_search_command_subsequent_page(self, mock_get_search_attributes """ # Mock the LDAP connection mock_conn_instance = mock_connection.return_value.__enter__.return_value - mock_conn_instance.entries = [MagicMock(entry_to_json=MagicMock(return_value=json.dumps({ - 'attributes': {'cn': ['Jane Doe'], 'mail': ['jane.doe@example.com']}, - 'dn': 'cn=Jane Doe,dc=example,dc=com' - })))] - - args = { - 'search_base': 'dc=example,dc=com', - 'search_scope': 'SUBTREE', - 'page': '2', - 'page_size': '50' - } + mock_conn_instance.entries = [ + MagicMock( + entry_to_json=MagicMock( + return_value=json.dumps( + { + "attributes": {"cn": ["Jane Doe"], "mail": ["jane.doe@example.com"]}, + "dn": "cn=Jane Doe,dc=example,dc=com", + } + ) + ) + ) + ] + + args = {"search_base": "dc=example,dc=com", "search_scope": "SUBTREE", "page": "2", "page_size": "50"} result = self.instance.entries_search_command(args) self.instance._get_auto_bind_value.assert_called_once() mock_create_entries_search_filter.assert_called_once_with(args) - mock_get_search_attributes.assert_called_once_with('all') + mock_get_search_attributes.assert_called_once_with("all") mock_connection.assert_called_once_with( - self.instance._ldap_server, self.instance._username, self.instance._password, auto_bind=True) + self.instance._ldap_server, self.instance._username, self.instance._password, auto_bind=True + ) assert len(result.outputs) == 1 - assert result.outputs[0]['cn'] == ['Jane Doe'] - assert result.outputs[0]['mail'] == ['jane.doe@example.com'] - assert result.outputs[0]['dn'] == 'cn=Jane Doe,dc=example,dc=com' + assert result.outputs[0]["cn"] == ["Jane Doe"] + assert result.outputs[0]["mail"] == ["jane.doe@example.com"] + assert result.outputs[0]["dn"] == "cn=Jane Doe,dc=example,dc=com" diff --git a/Packs/OpenLDAP/ReleaseNotes/2_0_19.md b/Packs/OpenLDAP/ReleaseNotes/2_0_19.md new file mode 100644 index 000000000000..4fabf8c0d56f --- /dev/null +++ b/Packs/OpenLDAP/ReleaseNotes/2_0_19.md @@ -0,0 +1,6 @@ + +#### Integrations + +##### LDAP Authentication + +- Metadata and documentation improvements. diff --git a/Packs/OpenLDAP/pack_metadata.json b/Packs/OpenLDAP/pack_metadata.json index ef3437f3019d..c4902362f9c6 100644 --- a/Packs/OpenLDAP/pack_metadata.json +++ b/Packs/OpenLDAP/pack_metadata.json @@ -2,7 +2,7 @@ "name": "LDAP Authentication", "description": "Authenticate using Open LDAP or Active Directory", "support": "xsoar", - "currentVersion": "2.0.18", + "currentVersion": "2.0.19", "author": "Cortex XSOAR", "url": "https://www.paloaltonetworks.com/cortex", "email": "", From 2a92772a92152044dc26c88ba8b920e7b1bea64f Mon Sep 17 00:00:00 2001 From: Content Bot Date: Sun, 23 Mar 2025 12:53:00 +0000 Subject: [PATCH 10/18] CiscoFirepower: Apply ruff Format --- .../CiscoFirepower/CiscoFirepower.py | 2921 ++++++++--------- .../CiscoFirepower/CiscoFirepower_test.py | 993 +++--- Packs/CiscoFirepower/ReleaseNotes/1_2_7.md | 6 + Packs/CiscoFirepower/pack_metadata.json | 2 +- 4 files changed, 1798 insertions(+), 2124 deletions(-) create mode 100644 Packs/CiscoFirepower/ReleaseNotes/1_2_7.md diff --git a/Packs/CiscoFirepower/Integrations/CiscoFirepower/CiscoFirepower.py b/Packs/CiscoFirepower/Integrations/CiscoFirepower/CiscoFirepower.py index bf17b7b46c41..de69e638ef38 100644 --- a/Packs/CiscoFirepower/Integrations/CiscoFirepower/CiscoFirepower.py +++ b/Packs/CiscoFirepower/Integrations/CiscoFirepower/CiscoFirepower.py @@ -1,77 +1,75 @@ import demistomock as demisto # noqa: F401 from CommonServerPython import * # noqa: F401 + """ Cisco Firepower Management Center API Integration for Cortex XSOAR (aka Demisto). """ import copy +from collections.abc import Callable, MutableMapping, MutableSequence from http import HTTPStatus -from typing import Callable, Dict, List, MutableMapping, MutableSequence, Tuple, Union from CommonServerUserPython import * # pylint: disable=wildcard-import - -''' GLOBAL/PARAMS ''' # pylint: disable=pointless-string-statement +""" GLOBAL/PARAMS """ # pylint: disable=pointless-string-statement -INTEGRATION_NAME = 'Cisco Firepower' -INTEGRATION_CONTEXT_NAME = 'CiscoFP' -INTRUSION_POLICY_CONTEXT = 'IntrusionPolicy' -INTRUSION_RULE_CONTEXT = 'IntrusionRule' -INTRUSION_RULE_UPLOAD_CONTEXT = 'IntrusionRuleUpload' -INTRUSION_RULE_GROUP_CONTEXT = 'IntrusionRuleGroup' -NETWORK_ANALYSIS_POLICY_CONTEXT = 'NetworkAnalysisPolicy' -OUTPUT_KEYS_DICTIONARY = { - 'id': 'ID' -} +INTEGRATION_NAME = "Cisco Firepower" +INTEGRATION_CONTEXT_NAME = "CiscoFP" +INTRUSION_POLICY_CONTEXT = "IntrusionPolicy" +INTRUSION_RULE_CONTEXT = "IntrusionRule" +INTRUSION_RULE_UPLOAD_CONTEXT = "IntrusionRuleUpload" +INTRUSION_RULE_GROUP_CONTEXT = "IntrusionRuleGroup" +NETWORK_ANALYSIS_POLICY_CONTEXT = "NetworkAnalysisPolicy" +OUTPUT_KEYS_DICTIONARY = {"id": "ID"} API_LIMIT = 1000 EXECUTION_TIMEOUT = 600 -INTRUSION_POLICY_TITLE = 'Intrusion Policy Information' +INTRUSION_POLICY_TITLE = "Intrusion Policy Information" INTRUSION_POLICY_HEADERS_BY_KEYS = { - 'ID': ['id'], - 'Name': ['name'], - 'Description': ['description'], - 'Detection': ['detection'], - 'Inspection Mode': ['inspectionMode'], - 'Base Policy ID': ['basePolicy', 'id'], + "ID": ["id"], + "Name": ["name"], + "Description": ["description"], + "Detection": ["detection"], + "Inspection Mode": ["inspectionMode"], + "Base Policy ID": ["basePolicy", "id"], } -INTRUSION_RULE_TITLE = 'Intrusion Rule Information' +INTRUSION_RULE_TITLE = "Intrusion Rule Information" INTRUSION_RULE_HEADERS_BY_KEYS = { - 'ID': ['id'], - 'Name': ['name'], - 'Snort ID': ['sid'], - 'Revision': ['revision'], - 'Rule Data': ['ruleData'], - 'Rule Group': ['ruleGroups'], + "ID": ["id"], + "Name": ["name"], + "Snort ID": ["sid"], + "Revision": ["revision"], + "Rule Data": ["ruleData"], + "Rule Group": ["ruleGroups"], } -INTRUSION_RULE_UPLOAD_TITLE = 'Intrusion Rule Upload Information' +INTRUSION_RULE_UPLOAD_TITLE = "Intrusion Rule Upload Information" INTRUSION_RULE_UPLOAD_HEADERS_BY_KEYS = { - 'Added Count': ['summary', 'added', 'count'], - 'Added Rules': ['summary', 'added', 'rules'], - 'Updated Count': ['summary', 'updated', 'count'], - 'Updated Rules': ['summary', 'updated', 'rules'], - 'Deleted Count': ['summary', 'deleted', 'count'], - 'Deleted Rules': ['summary', 'deleted', 'rules'], - 'Skipped Count': ['summary', 'skipped', 'count'], - 'Skipped Rules': ['summary', 'skipped', 'rules'], - 'Unassociated Count': ['summary', 'unassociated', 'count'], - 'Unassociated Rules': ['summary', 'unassociated', 'rules'], + "Added Count": ["summary", "added", "count"], + "Added Rules": ["summary", "added", "rules"], + "Updated Count": ["summary", "updated", "count"], + "Updated Rules": ["summary", "updated", "rules"], + "Deleted Count": ["summary", "deleted", "count"], + "Deleted Rules": ["summary", "deleted", "rules"], + "Skipped Count": ["summary", "skipped", "count"], + "Skipped Rules": ["summary", "skipped", "rules"], + "Unassociated Count": ["summary", "unassociated", "count"], + "Unassociated Rules": ["summary", "unassociated", "rules"], } -INTRUSION_RULE_GROUP_TITLE = 'Intrusion Rule Group Information' +INTRUSION_RULE_GROUP_TITLE = "Intrusion Rule Group Information" INTRUSION_RULE_GROUP_HEADERS_BY_KEYS = { - 'ID': ['id'], - 'Name': ['name'], - 'Description': ['description'], + "ID": ["id"], + "Name": ["name"], + "Description": ["description"], } -NETWORK_ANALYSIS_POLICY_TITLE = 'Network Analysis Policy Information' +NETWORK_ANALYSIS_POLICY_TITLE = "Network Analysis Policy Information" NETWORK_ANALYSIS_POLICY_HEADERS_BY_KEYS = { - 'ID': ['id'], - 'Name': ['name'], - 'Description': ['description'], - 'Inspection Mode': ['inspectionMode'], - 'Base Policy ID': ['basePolicy', 'id'], - 'Base Policy Name': ['basePolicy', 'name'], + "ID": ["id"], + "Name": ["name"], + "Description": ["description"], + "Inspection Mode": ["inspectionMode"], + "Base Policy ID": ["basePolicy", "id"], + "Base Policy Name": ["basePolicy", "name"], } @@ -100,6 +98,7 @@ def pagination( Returns: Callable: Pagination decorator. """ + def dec(func: Callable) -> Callable: """ Pagination decorator holding the callable function. @@ -112,13 +111,8 @@ def dec(func: Callable) -> Callable: """ def inner( - self, - page: Optional[int], - page_size: Optional[int], - limit: Optional[int], - *args, - **kwarg - ) -> tuple[Union[list, dict], Union[list, dict]]: + self, page: Optional[int], page_size: Optional[int], limit: Optional[int], *args, **kwarg + ) -> tuple[list | dict, list | dict]: """ Handle pagination arguments to return multiple response from an API. @@ -138,7 +132,7 @@ def inner( is_manual = bool((page is not None and page > 0) or (page_size is not None and page_size > 0)) if all((is_manual, is_automatic)): - raise ValueError('page or page_size can not be entered with limit.') + raise ValueError("page or page_size can not be entered with limit.") remaining_items: int @@ -165,19 +159,10 @@ def inner( if has_limit: limit = (offset or 0) + remaining_items - raw_response = func( - self, - limit=min(limit, api_limit), - *args, - **kwarg - ) + raw_response = func(self, limit=min(limit, api_limit), *args, **kwarg) else: - raw_response = func( - self, - *args, - **kwarg - ) + raw_response = func(self, *args, **kwarg) items = raw_response @@ -198,13 +183,7 @@ def inner( # Keep calling the API until the required amount of items have been met. while remaining_items > 0: - raw_response = func( - self, - limit=min(remaining_items, api_limit), - offset=offset, - *args, - **kwarg - ) + raw_response = func(self, limit=min(remaining_items, api_limit), offset=offset, *args, **kwarg) raw_item = raw_response @@ -223,19 +202,14 @@ def inner( offset = (offset or 0) + received_items return raw_items, raw_responses + return inner + return dec class Client(BaseClient): - def __init__( - self, - base_url: str, - username: str, - password: str, - verify: bool = False, - proxy: bool = False - ): + def __init__(self, base_url: str, username: str, password: str, verify: bool = False, proxy: bool = False): """ Initialize the client by generating a token. Add the token to the headers and add the Domain UUID to the base URL. @@ -249,28 +223,23 @@ def __init__( proxy (bool, optional): System proxy is handled by BaseClient. Defaults to False. """ - super().__init__( - base_url=base_url, - verify=verify, - proxy=proxy, - auth=(username, password) - ) + super().__init__(base_url=base_url, verify=verify, proxy=proxy, auth=(username, password)) header = self._http_request( - method='POST', - url_suffix='api/fmc_platform/v1/auth/generatetoken', - resp_type='response', + method="POST", + url_suffix="api/fmc_platform/v1/auth/generatetoken", + resp_type="response", ).headers - auth_token: str = header['X-auth-access-token'] - domain_uuid: str = header['DOMAIN_UUID'] + auth_token: str = header["X-auth-access-token"] + domain_uuid: str = header["DOMAIN_UUID"] - self._base_url = urljoin(self._base_url, f'api/fmc_config/v1/domain/{domain_uuid}') + self._base_url = urljoin(self._base_url, f"api/fmc_config/v1/domain/{domain_uuid}") self._headers = { - 'X-auth-access-token': auth_token, + "X-auth-access-token": auth_token, } - def get_list(self, limit: int, offset: int, object_path: str) -> Dict: + def get_list(self, limit: int, offset: int, object_path: str) -> dict: """ Bridge command to list requests. @@ -282,11 +251,11 @@ def get_list(self, limit: int, offset: int, object_path: str) -> Dict: Returns: Dict: API response with the requested items. """ - params = {'expanded': 'true', 'limit': limit, 'offset': offset} - suffix = f'object/{object_path}' - return self._http_request('GET', suffix, params=params) + params = {"expanded": "true", "limit": limit, "offset": offset} + suffix = f"object/{object_path}" + return self._http_request("GET", suffix, params=params) - def get_policy_assignments(self, policy_assignment_id: str) -> Dict[str, Any]: + def get_policy_assignments(self, policy_assignment_id: str) -> dict[str, Any]: """ Retrieves the policy assignment associated with the specified ID. @@ -296,12 +265,9 @@ def get_policy_assignments(self, policy_assignment_id: str) -> Dict[str, Any]: Returns: Dict[str, Any]: Information about the policy assignment. """ - return self._http_request( - method='GET', - url_suffix=f'assignment/policyassignments/{policy_assignment_id}' - ) + return self._http_request(method="GET", url_suffix=f"assignment/policyassignments/{policy_assignment_id}") - def list_policy_assignments(self, limit: int, offset: int) -> Dict: + def list_policy_assignments(self, limit: int, offset: int) -> dict: """ Retrieves a list of all policy assignments to target devices. @@ -312,11 +278,11 @@ def list_policy_assignments(self, limit: int, offset: int) -> Dict: Returns: Dict: Information about policy assignments. """ - params = {'expanded': 'true', 'limit': limit, 'offset': offset} - suffix = 'assignment/policyassignments' - return self._http_request('GET', suffix, params=params) + params = {"expanded": "true", "limit": limit, "offset": offset} + suffix = "assignment/policyassignments" + return self._http_request("GET", suffix, params=params) - def get_deployable_devices(self, limit: int, offset: int, container_uuid: str) -> Dict: + def get_deployable_devices(self, limit: int, offset: int, container_uuid: str) -> dict: """ Retrieves a list of all devices with configuration changes that are ready to deploy. @@ -328,12 +294,12 @@ def get_deployable_devices(self, limit: int, offset: int, container_uuid: str) - Returns: Dict: Information about deployable devices. """ - params = {'expanded': 'true', 'limit': limit, 'offset': offset} - end_suffix = '/' + container_uuid + '/deployments' if container_uuid else '' - suffix = f'deployment/deployabledevices{end_suffix}' - return self._http_request('GET', suffix, params=params) + params = {"expanded": "true", "limit": limit, "offset": offset} + end_suffix = "/" + container_uuid + "/deployments" if container_uuid else "" + suffix = f"deployment/deployabledevices{end_suffix}" + return self._http_request("GET", suffix, params=params) - def get_device_records(self, limit: int, offset: int) -> Dict: + def get_device_records(self, limit: int, offset: int) -> dict: """ Retrieves a list of all device records. @@ -344,11 +310,11 @@ def get_device_records(self, limit: int, offset: int) -> Dict: Returns: Dict: Information about device records. """ - params = {'expanded': 'true', 'limit': limit, 'offset': offset} - suffix = 'devices/devicerecords' - return self._http_request('GET', suffix, params=params) + params = {"expanded": "true", "limit": limit, "offset": offset} + suffix = "devices/devicerecords" + return self._http_request("GET", suffix, params=params) - def get_network_objects(self, limit: int, offset: int, object_id: str) -> Dict: + def get_network_objects(self, limit: int, offset: int, object_id: str) -> dict: """ Retrieves the network objects associated with the specified ID. If not supplied, retrieves a list of all network objects. @@ -361,11 +327,11 @@ def get_network_objects(self, limit: int, offset: int, object_id: str) -> Dict: Returns: Dict: Information about network objects. """ - end_suffix = f'/{object_id}' if object_id else f'?expanded=true&limit={limit}&offset={offset}' - suffix = f'object/networks{end_suffix}' - return self._http_request('GET', suffix) + end_suffix = f"/{object_id}" if object_id else f"?expanded=true&limit={limit}&offset={offset}" + suffix = f"object/networks{end_suffix}" + return self._http_request("GET", suffix) - def get_hosts_objects(self, limit: int, offset: int, object_id: str) -> Dict: + def get_hosts_objects(self, limit: int, offset: int, object_id: str) -> dict: """ Retrieves the groups of host objects associated with the specified ID. If no ID is passed, the input ID retrieves a list of all network objects. @@ -378,11 +344,11 @@ def get_hosts_objects(self, limit: int, offset: int, object_id: str) -> Dict: Returns: Dict: Information about host objects. """ - end_suffix = f'/{object_id}' if object_id else f'?expanded=true&limit={limit}&offset={offset}' - suffix = f'object/hosts{end_suffix}' - return self._http_request('GET', suffix) + end_suffix = f"/{object_id}" if object_id else f"?expanded=true&limit={limit}&offset={offset}" + suffix = f"object/hosts{end_suffix}" + return self._http_request("GET", suffix) - def create_network_objects(self, name: str, value: str, description: str, overridable: bool) -> Dict: + def create_network_objects(self, name: str, value: str, description: str, overridable: bool) -> dict: """ Create a network object. @@ -395,11 +361,11 @@ def create_network_objects(self, name: str, value: str, description: str, overri Returns: Dict: Information about the created network """ - data = {'name': name, 'value': value, 'description': description, 'overridable': overridable} - suffix = 'object/networks' - return self._http_request('POST', suffix, json_data=data) + data = {"name": name, "value": value, "description": description, "overridable": overridable} + suffix = "object/networks" + return self._http_request("POST", suffix, json_data=data) - def create_host_objects(self, name: str, value: str, description: str, overridable: bool) -> Dict: + def create_host_objects(self, name: str, value: str, description: str, overridable: bool) -> dict: """ Create a host object. @@ -412,12 +378,11 @@ def create_host_objects(self, name: str, value: str, description: str, overridab Returns: Dict: Information about the created host. """ - data = {'name': name, 'value': value, 'description': description, 'overridable': overridable} - suffix = 'object/hosts' - return self._http_request('POST', suffix, json_data=data) + data = {"name": name, "value": value, "description": description, "overridable": overridable} + suffix = "object/hosts" + return self._http_request("POST", suffix, json_data=data) - def update_network_objects( - self, name: str, value: str, description: str, overridable: bool, object_id: str) -> Dict: + def update_network_objects(self, name: str, value: str, description: str, overridable: bool, object_id: str) -> dict: """ Update the specified network object. @@ -432,10 +397,10 @@ def update_network_objects( Dict: Information about the updated network. """ data = assign_params(id=object_id, name=name, value=value, description=description, overridable=overridable) - suffix = f'object/networks/{object_id}' - return self._http_request('PUT', suffix, json_data=data) + suffix = f"object/networks/{object_id}" + return self._http_request("PUT", suffix, json_data=data) - def update_host_objects(self, name: str, value: str, description: str, overridable: bool, object_id: str) -> Dict: + def update_host_objects(self, name: str, value: str, description: str, overridable: bool, object_id: str) -> dict: """ Update the specified host object. @@ -450,10 +415,10 @@ def update_host_objects(self, name: str, value: str, description: str, overridab Dict: Information about the updated host. """ data = assign_params(id=object_id, name=name, value=value, description=description, overridable=overridable) - suffix = f'object/hosts/{object_id}' - return self._http_request('PUT', suffix, json_data=data) + suffix = f"object/hosts/{object_id}" + return self._http_request("PUT", suffix, json_data=data) - def delete_network_objects(self, object_id: str) -> Dict: + def delete_network_objects(self, object_id: str) -> dict: """ Delete the specified network object. @@ -463,10 +428,10 @@ def delete_network_objects(self, object_id: str) -> Dict: Returns: Dict: Information about the deleted object. """ - suffix = f'object/networks/{object_id}' - return self._http_request('DELETE', suffix) + suffix = f"object/networks/{object_id}" + return self._http_request("DELETE", suffix) - def delete_host_objects(self, object_id: str) -> Dict: + def delete_host_objects(self, object_id: str) -> dict: """ Delete the specified host object. @@ -476,10 +441,10 @@ def delete_host_objects(self, object_id: str) -> Dict: Returns: Dict: Information about the deleted host. """ - suffix = f'object/hosts/{object_id}' - return self._http_request('DELETE', suffix) + suffix = f"object/hosts/{object_id}" + return self._http_request("DELETE", suffix) - def get_network_groups_objects(self, limit: int, offset: int, object_id: str) -> Dict: + def get_network_groups_objects(self, limit: int, offset: int, object_id: str) -> dict: """ Retrieves the groups of network objects and addresses associated with the specified ID. If not supplied, retrieves a list of all network objects. @@ -492,11 +457,11 @@ def get_network_groups_objects(self, limit: int, offset: int, object_id: str) -> Returns: Dict: Information about network groups. """ - end_suffix = f'/{object_id}' if object_id else f'?expanded=true&limit={limit}&offset={offset}' - suffix = f'object/networkgroups{end_suffix}' - return self._http_request('GET', suffix) + end_suffix = f"/{object_id}" if object_id else f"?expanded=true&limit={limit}&offset={offset}" + suffix = f"object/networkgroups{end_suffix}" + return self._http_request("GET", suffix) - def get_url_groups_objects(self, limit: int, offset: int, object_id: str) -> Dict: + def get_url_groups_objects(self, limit: int, offset: int, object_id: str) -> dict: """ Retrieves the groups of url objects and addresses associated with the specified ID. If not supplied, retrieves a list of all url objects. @@ -509,12 +474,11 @@ def get_url_groups_objects(self, limit: int, offset: int, object_id: str) -> Dic Returns: Dict: Information about url groups. """ - end_suffix = f'/{object_id}' if object_id else f'?expanded=true&limit={limit}&offset={offset}' - suffix = f'object/urlgroups{end_suffix}' - return self._http_request('GET', suffix) + end_suffix = f"/{object_id}" if object_id else f"?expanded=true&limit={limit}&offset={offset}" + suffix = f"object/urlgroups{end_suffix}" + return self._http_request("GET", suffix) - def create_network_groups_objects( - self, name: str, ids: str, values: str, description: str, overridable: bool) -> Dict: + def create_network_groups_objects(self, name: str, ids: str, values: str, description: str, overridable: bool) -> dict: """ Creates a group of network objects. @@ -528,15 +492,15 @@ def create_network_groups_objects( Returns: Dict: Information about the created network group. """ - objects = [{'id': curr_id} for curr_id in argToList(ids)] - values = [{'value': curr_value} for curr_value in argToList(values)] - data = assign_params( - name=name, objects=objects, literals=values, description=description, overridable=overridable) - suffix = 'object/networkgroups' - return self._http_request('POST', suffix, json_data=data) + objects = [{"id": curr_id} for curr_id in argToList(ids)] + values = [{"value": curr_value} for curr_value in argToList(values)] + data = assign_params(name=name, objects=objects, literals=values, description=description, overridable=overridable) + suffix = "object/networkgroups" + return self._http_request("POST", suffix, json_data=data) def update_network_groups_objects( - self, name: str, ids: str, values: str, group_id: str, description: str, overridable: bool) -> Dict: + self, name: str, ids: str, values: str, group_id: str, description: str, overridable: bool + ) -> dict: """ Updates a group of network objects. @@ -551,15 +515,17 @@ def update_network_groups_objects( Returns: Dict: Information about the updated group. """ - objects = [{'id': curr_id} for curr_id in argToList(ids)] - values = [{'value': curr_value} for curr_value in argToList(values)] - data = assign_params(name=name, id=group_id, objects=objects, literals=values, - description=description, overridable=overridable) - suffix = f'object/networkgroups/{group_id}' - return self._http_request('PUT', suffix, json_data=data) + objects = [{"id": curr_id} for curr_id in argToList(ids)] + values = [{"value": curr_value} for curr_value in argToList(values)] + data = assign_params( + name=name, id=group_id, objects=objects, literals=values, description=description, overridable=overridable + ) + suffix = f"object/networkgroups/{group_id}" + return self._http_request("PUT", suffix, json_data=data) def update_url_groups_objects( - self, name: str, ids: str, values: str, group_id: str, description: str, overridable: bool) -> Dict: + self, name: str, ids: str, values: str, group_id: str, description: str, overridable: bool + ) -> dict: """ Update the ID of a group of url objects. @@ -574,14 +540,15 @@ def update_url_groups_objects( Returns: Dict: Information about the updated url group. """ - objects = [{'id': curr_id} for curr_id in argToList(ids)] - values = [{'url': curr_value} for curr_value in argToList(values)] - data = assign_params(name=name, id=group_id, objects=objects, literals=values, - description=description, overridable=overridable) - suffix = f'object/urlgroups/{group_id}' - return self._http_request('PUT', suffix, json_data=data) - - def delete_network_groups_objects(self, object_id: str) -> Dict: + objects = [{"id": curr_id} for curr_id in argToList(ids)] + values = [{"url": curr_value} for curr_value in argToList(values)] + data = assign_params( + name=name, id=group_id, objects=objects, literals=values, description=description, overridable=overridable + ) + suffix = f"object/urlgroups/{group_id}" + return self._http_request("PUT", suffix, json_data=data) + + def delete_network_groups_objects(self, object_id: str) -> dict: """ Deletes a group of network objects. @@ -591,10 +558,10 @@ def delete_network_groups_objects(self, object_id: str) -> Dict: Returns: Dict: Information about the deleted network group. """ - suffix = f'object/networkgroups/{object_id}' - return self._http_request('DELETE', suffix) + suffix = f"object/networkgroups/{object_id}" + return self._http_request("DELETE", suffix) - def get_access_policy(self, limit: int, offset: int, policy_id: str) -> Dict: + def get_access_policy(self, limit: int, offset: int, policy_id: str) -> dict: """ Retrieves the access control policy associated with the specified ID. If no access policy ID is passed, all access control policies are returned. @@ -607,11 +574,11 @@ def get_access_policy(self, limit: int, offset: int, policy_id: str) -> Dict: Returns: Dict: Information about access policies. """ - end_suffix = f'/{policy_id}' if policy_id else f'?expanded=true&limit={limit}&offset={offset}' - suffix = f'policy/accesspolicies{end_suffix}' - return self._http_request('GET', suffix) + end_suffix = f"/{policy_id}" if policy_id else f"?expanded=true&limit={limit}&offset={offset}" + suffix = f"policy/accesspolicies{end_suffix}" + return self._http_request("GET", suffix) - def create_access_policy(self, name: str, action: str) -> Dict: + def create_access_policy(self, name: str, action: str) -> dict: """ Create an access control policy. @@ -622,11 +589,11 @@ def create_access_policy(self, name: str, action: str) -> Dict: Returns: Dict: Information about the created access policy. """ - data = {'name': name, 'defaultAction': {'action': action}} - suffix = 'policy/accesspolicies' - return self._http_request('POST', suffix, json_data=data) + data = {"name": name, "defaultAction": {"action": action}} + suffix = "policy/accesspolicies" + return self._http_request("POST", suffix, json_data=data) - def update_access_policy(self, name: str, policy_id: str, action: str, action_id: str) -> Dict: + def update_access_policy(self, name: str, policy_id: str, action: str, action_id: str) -> dict: """ Update the specified access control policy. @@ -639,17 +606,11 @@ def update_access_policy(self, name: str, policy_id: str, action: str, action_id Returns: Dict: Information about the updated access policy. """ - data = { - 'name': name, - 'id': policy_id, - 'defaultAction': { - 'action': action, - 'id': action_id - }} - suffix = f'policy/accesspolicies/{policy_id}' - return self._http_request('PUT', suffix, json_data=data) - - def delete_access_policy(self, policy_id: str) -> Dict: + data = {"name": name, "id": policy_id, "defaultAction": {"action": action, "id": action_id}} + suffix = f"policy/accesspolicies/{policy_id}" + return self._http_request("PUT", suffix, json_data=data) + + def delete_access_policy(self, policy_id: str) -> dict: """ Deletes the specified access control policy. @@ -659,10 +620,10 @@ def delete_access_policy(self, policy_id: str) -> Dict: Returns: Dict: Information about the deleted access policy. """ - suffix = f'policy/accesspolicies/{policy_id}' - return self._http_request('DELETE', suffix) + suffix = f"policy/accesspolicies/{policy_id}" + return self._http_request("DELETE", suffix) - def get_task_status(self, task_id: str) -> Dict: + def get_task_status(self, task_id: str) -> dict: """ The ID of the task for which to check the status. @@ -673,10 +634,10 @@ def get_task_status(self, task_id: str) -> Dict: Returns: Dict: Information about the task status. """ - suffix = f'job/taskstatuses/{task_id}' - return self._http_request('GET', suffix) + suffix = f"job/taskstatuses/{task_id}" + return self._http_request("GET", suffix) - def create_policy_assignments(self, policy_id: str, device_ids: str, device_group_ids: str) -> Dict: + def create_policy_assignments(self, policy_id: str, device_ids: str, device_group_ids: str) -> dict: """ Creates policy assignments to target devices. @@ -688,13 +649,13 @@ def create_policy_assignments(self, policy_id: str, device_ids: str, device_grou Returns: Dict: Information about the created policy assignment. """ - targets = [{'id': curr_id, 'type': 'Device'} for curr_id in argToList(device_ids)] - targets.extend([{'id': curr_id, 'type': 'DeviceGroup'} for curr_id in argToList(device_group_ids)]) - data_to_post = assign_params(policy={'id': policy_id}, type='PolicyAssignment', targets=targets) - suffix = 'assignment/policyassignments' - return self._http_request('POST', suffix, json_data=data_to_post) + targets = [{"id": curr_id, "type": "Device"} for curr_id in argToList(device_ids)] + targets.extend([{"id": curr_id, "type": "DeviceGroup"} for curr_id in argToList(device_group_ids)]) + data_to_post = assign_params(policy={"id": policy_id}, type="PolicyAssignment", targets=targets) + suffix = "assignment/policyassignments" + return self._http_request("POST", suffix, json_data=data_to_post) - def update_policy_assignments(self, policy_id: str, device_ids: str, device_group_ids: str) -> Dict: + def update_policy_assignments(self, policy_id: str, device_ids: str, device_group_ids: str) -> dict: """ Update the specified policy assignments to target devices. @@ -706,13 +667,13 @@ def update_policy_assignments(self, policy_id: str, device_ids: str, device_grou Returns: Dict: Information about the updated policy assignment. """ - targets = [{'id': curr_id, 'type': 'Device'} for curr_id in argToList(device_ids)] - targets.extend([{'id': curr_id, 'type': 'DeviceGroup'} for curr_id in argToList(device_group_ids)]) - data_to_post = assign_params(policy={'id': policy_id}, type='PolicyAssignment', targets=targets) - suffix = f'assignment/policyassignments/{policy_id}' - return self._http_request('PUT', suffix, json_data=data_to_post) + targets = [{"id": curr_id, "type": "Device"} for curr_id in argToList(device_ids)] + targets.extend([{"id": curr_id, "type": "DeviceGroup"} for curr_id in argToList(device_group_ids)]) + data_to_post = assign_params(policy={"id": policy_id}, type="PolicyAssignment", targets=targets) + suffix = f"assignment/policyassignments/{policy_id}" + return self._http_request("PUT", suffix, json_data=data_to_post) - def get_access_rules(self, limit: int, offset: int, policy_id: str, rule_id: str) -> Dict: + def get_access_rules(self, limit: int, offset: int, policy_id: str, rule_id: str) -> dict: """ Retrieves the access control rule associated with the specified policy ID and rule ID. If no rule ID is specified, retrieves a list of all access rules associated with the specified policy ID. @@ -726,30 +687,30 @@ def get_access_rules(self, limit: int, offset: int, policy_id: str, rule_id: str Returns: Dict: Information about access rules. """ - end_suffix = f'?expanded=true&limit={limit}&offset={offset}' if rule_id == '' else '/' + rule_id - suffix = f'policy/accesspolicies/{policy_id}/accessrules{end_suffix}' - return self._http_request('GET', suffix) + end_suffix = f"?expanded=true&limit={limit}&offset={offset}" if rule_id == "" else "/" + rule_id + suffix = f"policy/accesspolicies/{policy_id}/accessrules{end_suffix}" + return self._http_request("GET", suffix) def create_access_rules( - self, - source_zone_object_ids: str, - destination_zone_object_ids: str, - vlan_tag_object_ids: str, - source_network_object_ids: str, - source_network_addresses: str, - destination_network_object_ids: str, - destination_network_addresses: str, - source_port_object_ids: str, - destination_port_object_ids: str, - source_security_group_tag_object_ids: str, - application_object_ids: str, - url_object_ids: str, - url_addresses: str, - enabled: bool, - name: str, - policy_id: str, - action: str - ) -> Dict: + self, + source_zone_object_ids: str, + destination_zone_object_ids: str, + vlan_tag_object_ids: str, + source_network_object_ids: str, + source_network_addresses: str, + destination_network_object_ids: str, + destination_network_addresses: str, + source_port_object_ids: str, + destination_port_object_ids: str, + source_security_group_tag_object_ids: str, + application_object_ids: str, + url_object_ids: str, + url_addresses: str, + enabled: bool, + name: str, + policy_id: str, + action: str, + ) -> dict: """ Creates an access control rule. @@ -777,58 +738,75 @@ def create_access_rules( Returns: Dict: Information about the created access rule. """ - sourceZones = {'objects': [{'id': curr_id, 'type': 'SecurityZone' - } for curr_id in argToList(source_zone_object_ids)]} - destinationZones = {'objects': [{'id': curr_id, 'type': 'SecurityZone' - } for curr_id in argToList(destination_zone_object_ids)]} - vlanTags = {'objects': [{'id': curr_id, 'type': 'vlanTags'} for curr_id in argToList(vlan_tag_object_ids)]} + sourceZones = {"objects": [{"id": curr_id, "type": "SecurityZone"} for curr_id in argToList(source_zone_object_ids)]} + destinationZones = { + "objects": [{"id": curr_id, "type": "SecurityZone"} for curr_id in argToList(destination_zone_object_ids)] + } + vlanTags = {"objects": [{"id": curr_id, "type": "vlanTags"} for curr_id in argToList(vlan_tag_object_ids)]} sourceNetworks = assign_params( - objects=[{'id': curr_id, 'type': 'NetworkGroup'} for curr_id in argToList(source_network_object_ids)], - literals=[{'value': curr_id, 'type': 'Host'} for curr_id in argToList(source_network_addresses)]) + objects=[{"id": curr_id, "type": "NetworkGroup"} for curr_id in argToList(source_network_object_ids)], + literals=[{"value": curr_id, "type": "Host"} for curr_id in argToList(source_network_addresses)], + ) destinationNetworks = assign_params( - objects=[{'id': curr_id, 'type': 'NetworkGroup'} for curr_id in argToList(destination_network_object_ids)], - literals=[{'value': curr_id, 'type': 'Host'} for curr_id in argToList(destination_network_addresses)]) - sourcePorts = {'objects': [{'id': curr_id, 'type': 'ProtocolPortObject' - } for curr_id in argToList(source_port_object_ids)]} - destinationPorts = {'objects': [{'id': curr_id, 'type': 'ProtocolPortObject' - } for curr_id in argToList(destination_port_object_ids)]} - sourceSecurityGroupTags = {'objects': [{'id': curr_id, 'type': 'SecurityGroupTag' - } for curr_id in argToList(source_security_group_tag_object_ids)]} - applications = {'applications': [{'id': curr_id, 'type': 'Application' - } for curr_id in argToList(application_object_ids)]} + objects=[{"id": curr_id, "type": "NetworkGroup"} for curr_id in argToList(destination_network_object_ids)], + literals=[{"value": curr_id, "type": "Host"} for curr_id in argToList(destination_network_addresses)], + ) + sourcePorts = { + "objects": [{"id": curr_id, "type": "ProtocolPortObject"} for curr_id in argToList(source_port_object_ids)] + } + destinationPorts = { + "objects": [{"id": curr_id, "type": "ProtocolPortObject"} for curr_id in argToList(destination_port_object_ids)] + } + sourceSecurityGroupTags = { + "objects": [ + {"id": curr_id, "type": "SecurityGroupTag"} for curr_id in argToList(source_security_group_tag_object_ids) + ] + } + applications = {"applications": [{"id": curr_id, "type": "Application"} for curr_id in argToList(application_object_ids)]} urls = assign_params( - objects=[{'id': curr_id, 'type': 'Url'} for curr_id in argToList(url_object_ids)], - literals=[{'url': curr_id, 'type': 'Url'} for curr_id in argToList(url_addresses)]) - data = assign_params(name=name, action=action, enabled=enabled, sourceZones=sourceZones, - destinationZones=destinationZones, vlanTags=vlanTags, sourceNetworks=sourceNetworks, - destinationNetworks=destinationNetworks, sourcePorts=sourcePorts, - destinationPorts=destinationPorts, sourceSecurityGroupTags=sourceSecurityGroupTags, - applications=applications, urls=urls) - suffix = f'policy/accesspolicies/{policy_id}/accessrules' - return self._http_request('POST', suffix, json_data=data) + objects=[{"id": curr_id, "type": "Url"} for curr_id in argToList(url_object_ids)], + literals=[{"url": curr_id, "type": "Url"} for curr_id in argToList(url_addresses)], + ) + data = assign_params( + name=name, + action=action, + enabled=enabled, + sourceZones=sourceZones, + destinationZones=destinationZones, + vlanTags=vlanTags, + sourceNetworks=sourceNetworks, + destinationNetworks=destinationNetworks, + sourcePorts=sourcePorts, + destinationPorts=destinationPorts, + sourceSecurityGroupTags=sourceSecurityGroupTags, + applications=applications, + urls=urls, + ) + suffix = f"policy/accesspolicies/{policy_id}/accessrules" + return self._http_request("POST", suffix, json_data=data) def update_access_rules( - self, - update_strategy: str, - source_zone_object_ids: str, - destination_zone_object_ids: str, - vlan_tag_object_ids: str, - source_network_object_ids: str, - source_network_addresses: str, - destination_network_object_ids: str, - destination_network_addresses: str, - source_port_object_ids: str, - destination_port_object_ids: str, - source_security_group_tag_object_ids: str, - application_object_ids: str, - url_object_ids: str, - url_addresses: str, - enabled: bool, - name: str, - policy_id: str, - action: str, - rule_id: str - ) -> Dict: + self, + update_strategy: str, + source_zone_object_ids: str, + destination_zone_object_ids: str, + vlan_tag_object_ids: str, + source_network_object_ids: str, + source_network_addresses: str, + destination_network_object_ids: str, + destination_network_addresses: str, + source_port_object_ids: str, + destination_port_object_ids: str, + source_security_group_tag_object_ids: str, + application_object_ids: str, + url_object_ids: str, + url_addresses: str, + enabled: bool, + name: str, + policy_id: str, + action: str, + rule_id: str, + ) -> dict: """ Update the specified access control rule. @@ -860,45 +838,63 @@ def update_access_rules( Returns: Dict: Information about the updated access rule. """ - suffix = f'policy/accesspolicies/{policy_id}/accessrules/{rule_id}' + suffix = f"policy/accesspolicies/{policy_id}/accessrules/{rule_id}" sourceZones = assign_params( - objects=[{'id': curr_id, 'type': 'SecurityZone'} for curr_id in argToList(source_zone_object_ids)]) + objects=[{"id": curr_id, "type": "SecurityZone"} for curr_id in argToList(source_zone_object_ids)] + ) destinationZones = assign_params( - objects=[{'id': curr_id, 'type': 'SecurityZone'} for curr_id in argToList(destination_zone_object_ids)]) - vlanTags = assign_params( - objects=[{'id': curr_id, 'type': 'vlanTags'} for curr_id in argToList(vlan_tag_object_ids)]) + objects=[{"id": curr_id, "type": "SecurityZone"} for curr_id in argToList(destination_zone_object_ids)] + ) + vlanTags = assign_params(objects=[{"id": curr_id, "type": "vlanTags"} for curr_id in argToList(vlan_tag_object_ids)]) sourceNetworks = assign_params( - objects=[{'id': curr_id, 'type': 'NetworkGroup'} for curr_id in argToList(source_network_object_ids)], - literals=[{'value': curr_id, 'type': 'Host'} for curr_id in argToList(source_network_addresses)]) + objects=[{"id": curr_id, "type": "NetworkGroup"} for curr_id in argToList(source_network_object_ids)], + literals=[{"value": curr_id, "type": "Host"} for curr_id in argToList(source_network_addresses)], + ) destinationNetworks = assign_params( - objects=[{'id': curr_id, 'type': 'NetworkGroup'} for curr_id in argToList(destination_network_object_ids)], - literals=[{'value': curr_id, 'type': 'Host'} for curr_id in argToList(destination_network_addresses)]) + objects=[{"id": curr_id, "type": "NetworkGroup"} for curr_id in argToList(destination_network_object_ids)], + literals=[{"value": curr_id, "type": "Host"} for curr_id in argToList(destination_network_addresses)], + ) sourcePorts = assign_params( - objects=[{'id': curr_id, 'type': 'ProtocolPortObject'} for curr_id in argToList(source_port_object_ids)]) + objects=[{"id": curr_id, "type": "ProtocolPortObject"} for curr_id in argToList(source_port_object_ids)] + ) destinationPorts = assign_params( - objects=[{'id': curr_id, 'type': 'ProtocolPortObject'} for curr_id in - argToList(destination_port_object_ids)]) - sourceSecurityGroupTags = assign_params(objects=[{'id': curr_id, 'type': 'SecurityGroupTag'} for curr_id in - argToList(source_security_group_tag_object_ids)]) + objects=[{"id": curr_id, "type": "ProtocolPortObject"} for curr_id in argToList(destination_port_object_ids)] + ) + sourceSecurityGroupTags = assign_params( + objects=[{"id": curr_id, "type": "SecurityGroupTag"} for curr_id in argToList(source_security_group_tag_object_ids)] + ) applications = assign_params( - applications=[{'id': curr_id, 'type': 'Application'} for curr_id in argToList(application_object_ids)]) + applications=[{"id": curr_id, "type": "Application"} for curr_id in argToList(application_object_ids)] + ) urls = assign_params( - objects=[{'id': curr_id, 'type': 'Url'} for curr_id in argToList(url_object_ids)], - literals=[{'url': curr_id, 'type': 'Url'} for curr_id in argToList(url_addresses)]) - data = assign_params(name=name, action=action, id=rule_id, enabled=enabled, sourceZones=sourceZones, - destinationZones=destinationZones, vlanTags=vlanTags, sourceNetworks=sourceNetworks, - destinationNetworks=destinationNetworks, sourcePorts=sourcePorts, - destinationPorts=destinationPorts, sourceSecurityGroupTags=sourceSecurityGroupTags, - applications=applications, urls=urls) + objects=[{"id": curr_id, "type": "Url"} for curr_id in argToList(url_object_ids)], + literals=[{"url": curr_id, "type": "Url"} for curr_id in argToList(url_addresses)], + ) + data = assign_params( + name=name, + action=action, + id=rule_id, + enabled=enabled, + sourceZones=sourceZones, + destinationZones=destinationZones, + vlanTags=vlanTags, + sourceNetworks=sourceNetworks, + destinationNetworks=destinationNetworks, + sourcePorts=sourcePorts, + destinationPorts=destinationPorts, + sourceSecurityGroupTags=sourceSecurityGroupTags, + applications=applications, + urls=urls, + ) data_from_get = self.get_access_rules(0, 0, rule_id=rule_id, policy_id=policy_id) - if update_strategy == 'override': - if 'name' not in data: - data['name'] = data_from_get.get('name') - if 'action' not in data: - data['action'] = data_from_get.get('action') - return self._http_request('PUT', suffix, json_data=data) + if update_strategy == "override": + if "name" not in data: + data["name"] = data_from_get.get("name") + if "action" not in data: + data["action"] = data_from_get.get("action") + return self._http_request("PUT", suffix, json_data=data) else: for key, value in data.items(): if type(value) is dict: @@ -909,27 +905,28 @@ def update_access_rules( data_from_get[key][in_key] = value[in_key] else: data_from_get[key] = value - del data_from_get['metadata'] - del data_from_get['links'] - return self._http_request('PUT', suffix, json_data=data_from_get) - - def delete_access_rules(self, policy_id, rule_id) -> Dict: - suffix = f'policy/accesspolicies/{policy_id}/accessrules/{rule_id}' - return self._http_request('DELETE', suffix) - - def deploy_to_devices(self, force_deploy, ignore_warning, version, device_ids) -> Dict: - data_to_post = assign_params(forceDeploy=force_deploy, ignoreWarning=ignore_warning, version=version, - deviceList=argToList(device_ids), type="DeploymentRequest") - suffix = 'deployment/deploymentrequests' - return self._http_request('POST', suffix, json_data=data_to_post) + del data_from_get["metadata"] + del data_from_get["links"] + return self._http_request("PUT", suffix, json_data=data_from_get) + + def delete_access_rules(self, policy_id, rule_id) -> dict: + suffix = f"policy/accesspolicies/{policy_id}/accessrules/{rule_id}" + return self._http_request("DELETE", suffix) + + def deploy_to_devices(self, force_deploy, ignore_warning, version, device_ids) -> dict: + data_to_post = assign_params( + forceDeploy=force_deploy, + ignoreWarning=ignore_warning, + version=version, + deviceList=argToList(device_ids), + type="DeploymentRequest", + ) + suffix = "deployment/deploymentrequests" + return self._http_request("POST", suffix, json_data=data_to_post) def create_intrusion_policy( - self, - name: str, - basepolicy_id: str, - description: str = None, - inspection_mode: str = None - ) -> Dict[str, Any]: + self, name: str, basepolicy_id: str, description: str = None, inspection_mode: str = None + ) -> dict[str, Any]: """ Creates an Intrusion Policy with the specified parameters. @@ -944,23 +941,18 @@ def create_intrusion_policy( Returns: Dict[str, Any]: New Intrusion Policy's information. """ - body: Dict[str, Any] = remove_empty_elements({ - 'name': name, - 'description': description, - 'inspection_mode': inspection_mode, - 'basePolicy': { - 'id': basepolicy_id - } - }) + body: dict[str, Any] = remove_empty_elements( + {"name": name, "description": description, "inspection_mode": inspection_mode, "basePolicy": {"id": basepolicy_id}} + ) return self._http_request( - method='POST', - url_suffix='policy/intrusionpolicies', + method="POST", + url_suffix="policy/intrusionpolicies", json_data=body, timeout=EXECUTION_TIMEOUT, ) - def get_intrusion_policy(self, intrusion_policy_id: str, include_count: bool = None) -> Dict[str, Any]: + def get_intrusion_policy(self, intrusion_policy_id: str, include_count: bool = None) -> dict[str, Any]: """ Retrieves the intrusion policy associated with the specified ID. @@ -972,23 +964,16 @@ def get_intrusion_policy(self, intrusion_policy_id: str, include_count: bool = N Returns: Dict[str, Any]: Information about the specific intrusion policy """ - params = assign_params( - includeCount=include_count - ) + params = assign_params(includeCount=include_count) return self._http_request( - method='GET', - url_suffix=f'policy/intrusionpolicies/{intrusion_policy_id}', + method="GET", + url_suffix=f"policy/intrusionpolicies/{intrusion_policy_id}", params=params, ) - @pagination(api_limit=API_LIMIT, items_key_path=['items']) - def list_intrusion_policy( - self, - limit: int = None, - offset: int = None, - expanded_response: bool = None - ) -> Dict[str, Any]: + @pagination(api_limit=API_LIMIT, items_key_path=["items"]) + def list_intrusion_policy(self, limit: int = None, offset: int = None, expanded_response: bool = None) -> dict[str, Any]: """ Retrieves a list of intrusion policies. @@ -1011,8 +996,8 @@ def list_intrusion_policy( ) return self._http_request( - method='GET', - url_suffix='policy/intrusionpolicies', + method="GET", + url_suffix="policy/intrusionpolicies", params=params, ) @@ -1024,7 +1009,7 @@ def update_intrusion_policy( description: str = None, inspection_mode: str = None, replicate_inspection_mode: bool = None, - ) -> Dict[str, Any]: + ) -> dict[str, Any]: """ Modifies the Intrusion Policy associated with the specified ID. @@ -1044,31 +1029,26 @@ def update_intrusion_policy( Returns: Dict[str, Any]: Updated Intrusion Policy information. """ - params = assign_params( - replicateInspectionMode=replicate_inspection_mode - ) - body: Dict[str, Any] = remove_empty_elements({ - 'id': intrusion_policy_id, - 'name': name, - 'description': description, - 'inspection_mode': inspection_mode, - 'basePolicy': { - 'id': basepolicy_id + params = assign_params(replicateInspectionMode=replicate_inspection_mode) + body: dict[str, Any] = remove_empty_elements( + { + "id": intrusion_policy_id, + "name": name, + "description": description, + "inspection_mode": inspection_mode, + "basePolicy": {"id": basepolicy_id}, } - }) + ) return self._http_request( - method='PUT', - url_suffix=f'policy/intrusionpolicies/{intrusion_policy_id}', + method="PUT", + url_suffix=f"policy/intrusionpolicies/{intrusion_policy_id}", params=params, json_data=body, timeout=EXECUTION_TIMEOUT, ) - def delete_intrusion_policy( - self, - intrusion_policy_id: str - ) -> Dict[str, Any]: + def delete_intrusion_policy(self, intrusion_policy_id: str) -> dict[str, Any]: """ Deletes the Intrusion Policy associated with the specified ID. @@ -1079,15 +1059,11 @@ def delete_intrusion_policy( Dict[str, Any]: Information about the deleted Intrusion Policy. """ return self._http_request( - method='DELETE', - url_suffix=f'policy/intrusionpolicies/{intrusion_policy_id}', + method="DELETE", + url_suffix=f"policy/intrusionpolicies/{intrusion_policy_id}", ) - def create_intrusion_rule( - self, - rule_data: str, - rule_group_ids: List[str] - ) -> Dict[str, Any]: + def create_intrusion_rule(self, rule_data: str, rule_group_ids: list[str]) -> dict[str, Any]: """ Creates or overrides the Snort3 Intrusion rule group with the specified parameters. @@ -1098,27 +1074,15 @@ def create_intrusion_rule( Returns: Dict[str, Any]: New Intrusion Rule's information. """ - body: Dict[str, Any] = { - 'ruleData': rule_data, - 'ruleGroups': [ - { - 'id': rule_group_id - } for rule_group_id in rule_group_ids - ] - } + body: dict[str, Any] = {"ruleData": rule_data, "ruleGroups": [{"id": rule_group_id} for rule_group_id in rule_group_ids]} return self._http_request( - method='POST', - url_suffix='object/intrusionrules', + method="POST", + url_suffix="object/intrusionrules", json_data=body, ) - def update_intrusion_rule( - self, - intrusion_rule_id: str, - rule_data: str, - rule_group_ids: List[str] - ) -> Dict[str, Any]: + def update_intrusion_rule(self, intrusion_rule_id: str, rule_data: str, rule_group_ids: list[str]) -> dict[str, Any]: """ Modifies the Snort3 Intrusion rule group with the specified ID. @@ -1130,26 +1094,22 @@ def update_intrusion_rule( Returns: Dict[str, Any]: Modified Intrusion Rule's information. """ - body: Dict[str, Any] = { - 'id': intrusion_rule_id, - 'ruleData': rule_data, - 'ruleGroups': [ - { - 'id': rule_group_id - } for rule_group_id in rule_group_ids - ] + body: dict[str, Any] = { + "id": intrusion_rule_id, + "ruleData": rule_data, + "ruleGroups": [{"id": rule_group_id} for rule_group_id in rule_group_ids], } return self._http_request( - method='PUT', - url_suffix=f'object/intrusionrules/{intrusion_rule_id}', + method="PUT", + url_suffix=f"object/intrusionrules/{intrusion_rule_id}", json_data=body, ) def delete_intrusion_rule( - self, - intrusion_rule_id: str, - ) -> Dict[str, Any]: + self, + intrusion_rule_id: str, + ) -> dict[str, Any]: """ Deletes the specified Snort3 rule. @@ -1160,11 +1120,11 @@ def delete_intrusion_rule( Dict[str, Any]: Deleted Intrusion Rule's information. """ return self._http_request( - method='DELETE', - url_suffix=f'object/intrusionrules/{intrusion_rule_id}', + method="DELETE", + url_suffix=f"object/intrusionrules/{intrusion_rule_id}", ) - def get_intrusion_rule(self, intrusion_rule_id: str) -> Dict[str, Any]: + def get_intrusion_rule(self, intrusion_rule_id: str) -> dict[str, Any]: """ Retrieves the Snort3 Intrusion rule group. @@ -1175,19 +1135,19 @@ def get_intrusion_rule(self, intrusion_rule_id: str) -> Dict[str, Any]: Dict[str, Any]: Information about the specific intrusion rule. """ return self._http_request( - method='GET', - url_suffix=f'object/intrusionrules/{intrusion_rule_id}', + method="GET", + url_suffix=f"object/intrusionrules/{intrusion_rule_id}", ) - @pagination(api_limit=API_LIMIT, items_key_path=['items'], start_count_from_zero=False) + @pagination(api_limit=API_LIMIT, items_key_path=["items"], start_count_from_zero=False) def list_intrusion_rule( self, limit: int = None, offset: int = None, - sort: List[str] = None, + sort: list[str] = None, filter_string: str = None, expanded_response: bool = None, - ) -> Dict[str, Any]: + ) -> dict[str, Any]: """ Retrieves a list of intrusion policies. @@ -1210,14 +1170,14 @@ def list_intrusion_rule( params = assign_params( limit=limit, offset=offset, - sort=','.join(sort) if sort else None, + sort=",".join(sort) if sort else None, filter=filter_string, expanded=expanded_response, ) return self._http_request( - method='GET', - url_suffix='object/intrusionrules', + method="GET", + url_suffix="object/intrusionrules", params=params, ) @@ -1227,8 +1187,8 @@ def upload_intrusion_rule_file( payload_file: str, validate_only: bool, rule_import_mode: str = None, - rule_group_ids: List[str] = None, - ) -> Dict[str, Any]: + rule_group_ids: list[str] = None, + ) -> dict[str, Any]: """ Imports or validate custom Snort 3 intrusion rules within a file. @@ -1246,12 +1206,14 @@ def upload_intrusion_rule_file( Returns: Dict[str, Any]: Information about the intrusion rules format or about the merged/replaced intrusion rules. """ - form_data = remove_empty_elements({ - 'payloadFile': (filename, payload_file), - 'ruleImportMode': rule_import_mode, - 'ruleGroups': ','.join(rule_group_ids) if rule_group_ids else None, - 'validateOnly': validate_only, - }) + form_data = remove_empty_elements( + { + "payloadFile": (filename, payload_file), + "ruleImportMode": rule_import_mode, + "ruleGroups": ",".join(rule_group_ids) if rule_group_ids else None, + "validateOnly": validate_only, + } + ) ok_codes = ( HTTPStatus.OK, @@ -1260,8 +1222,8 @@ def upload_intrusion_rule_file( ) return self._http_request( - method='POST', - url_suffix='object/intrusionrulesupload', + method="POST", + url_suffix="object/intrusionrulesupload", files=form_data, ok_codes=ok_codes, ) @@ -1270,7 +1232,7 @@ def create_intrusion_rule_group( self, name: str, description: str = None, - ) -> Dict[str, Any]: + ) -> dict[str, Any]: """ Creates or overrides the Snort3 Intrusion rule group with the specified parameters. @@ -1282,18 +1244,20 @@ def create_intrusion_rule_group( Returns: Dict[str, Any]: New Intrusion Rule Group's information. """ - body: Dict[str, Any] = remove_empty_elements({ - 'name': name, - 'description': description, - }) + body: dict[str, Any] = remove_empty_elements( + { + "name": name, + "description": description, + } + ) return self._http_request( - method='POST', - url_suffix='object/intrusionrulegroups', + method="POST", + url_suffix="object/intrusionrulegroups", json_data=body, ) - def get_intrusion_rule_group(self, rule_group_id: str) -> Dict[str, Any]: + def get_intrusion_rule_group(self, rule_group_id: str) -> dict[str, Any]: """ Retrieves the Snort3 Intrusion rule group. @@ -1304,18 +1268,18 @@ def get_intrusion_rule_group(self, rule_group_id: str) -> Dict[str, Any]: Dict[str, Any]: Information about the specific intrusion rule group. """ return self._http_request( - method='GET', - url_suffix=f'object/intrusionrulegroups/{rule_group_id}', + method="GET", + url_suffix=f"object/intrusionrulegroups/{rule_group_id}", ) - @pagination(api_limit=API_LIMIT, items_key_path=['items'], start_count_from_zero=False) + @pagination(api_limit=API_LIMIT, items_key_path=["items"], start_count_from_zero=False) def list_intrusion_rule_group( self, limit: int = None, offset: int = None, filter_string: str = None, expanded_response: bool = None, - ) -> Dict[str, Any]: + ) -> dict[str, Any]: """ Retrieves a list of all Snort3 Intrusion rule groups. @@ -1341,8 +1305,8 @@ def list_intrusion_rule_group( ) return self._http_request( - method='GET', - url_suffix='object/intrusionrulegroups', + method="GET", + url_suffix="object/intrusionrulegroups", params=params, ) @@ -1351,7 +1315,7 @@ def update_intrusion_rule_group( rule_group_id: str, name: str, description: str = None, - ) -> Dict[str, Any]: + ) -> dict[str, Any]: """ Modifies the Snort3 Intrusion rule group with the specified ID. @@ -1364,15 +1328,17 @@ def update_intrusion_rule_group( Returns: Dict[str, Any]: Modified Intrusion Rule Group's information. """ - body: Dict[str, Any] = remove_empty_elements({ - 'id': rule_group_id, - 'name': name, - 'description': description, - }) + body: dict[str, Any] = remove_empty_elements( + { + "id": rule_group_id, + "name": name, + "description": description, + } + ) return self._http_request( - method='PUT', - url_suffix=f'object/intrusionrulegroups/{rule_group_id}', + method="PUT", + url_suffix=f"object/intrusionrulegroups/{rule_group_id}", json_data=body, ) @@ -1380,7 +1346,7 @@ def delete_intrusion_rule_group( self, rule_group_id: str, delete_related_rules: bool = None, - ) -> Dict[str, Any]: + ) -> dict[str, Any]: """ Deletes the specified Snort3 intrusion rule group. @@ -1394,13 +1360,11 @@ def delete_intrusion_rule_group( Returns: Dict[str, Any]: Deleted Intrusion Rule Group's information. """ - params = assign_params( - cascadeDeleteOrphanedRules=delete_related_rules - ) + params = assign_params(cascadeDeleteOrphanedRules=delete_related_rules) return self._http_request( - method='DELETE', - url_suffix=f'object/intrusionrulegroups/{rule_group_id}', + method="DELETE", + url_suffix=f"object/intrusionrulegroups/{rule_group_id}", params=params, ) @@ -1410,7 +1374,7 @@ def create_network_analysis_policy( basepolicy_id: str, description: str = None, inspection_mode: str = None, - ) -> Dict[str, Any]: + ) -> dict[str, Any]: """ Creates a network analysis policy. @@ -1426,23 +1390,23 @@ def create_network_analysis_policy( Returns: Dict[str, Any]: New network analysis policy's information. """ - body: Dict[str, Any] = remove_empty_elements({ - 'name': name, - 'description': description, - 'inspectionMode': inspection_mode, - 'basePolicy': { - 'id': basepolicy_id - }, - }) + body: dict[str, Any] = remove_empty_elements( + { + "name": name, + "description": description, + "inspectionMode": inspection_mode, + "basePolicy": {"id": basepolicy_id}, + } + ) return self._http_request( - method='POST', - url_suffix='policy/networkanalysispolicies', + method="POST", + url_suffix="policy/networkanalysispolicies", json_data=body, timeout=EXECUTION_TIMEOUT, ) - def get_network_analysis_policy(self, network_analysis_policy_id: str) -> Dict[str, Any]: + def get_network_analysis_policy(self, network_analysis_policy_id: str) -> dict[str, Any]: """ Retrieves the network analysis policy with the specified ID @@ -1453,17 +1417,17 @@ def get_network_analysis_policy(self, network_analysis_policy_id: str) -> Dict[s Dict[str, Any]: Information about the specific network analysis policy. """ return self._http_request( - method='GET', - url_suffix=f'policy/networkanalysispolicies/{network_analysis_policy_id}', + method="GET", + url_suffix=f"policy/networkanalysispolicies/{network_analysis_policy_id}", ) - @pagination(api_limit=API_LIMIT, items_key_path=['items']) + @pagination(api_limit=API_LIMIT, items_key_path=["items"]) def list_network_analysis_policy( self, limit: int = None, offset: int = None, expanded_response: bool = None, - ) -> Dict[str, Any]: + ) -> dict[str, Any]: """ Retrieves list of all network analysis policies. @@ -1486,8 +1450,8 @@ def list_network_analysis_policy( ) return self._http_request( - method='GET', - url_suffix='policy/networkanalysispolicies', + method="GET", + url_suffix="policy/networkanalysispolicies", params=params, ) @@ -1499,7 +1463,7 @@ def update_network_analysis_policy( description: str = None, inspection_mode: str = None, replicate_inspection_mode: bool = None, - ) -> Dict[str, Any]: + ) -> dict[str, Any]: """ Modifies the network analysis policy associated with the specified ID. @@ -1522,19 +1486,21 @@ def update_network_analysis_policy( params = assign_params( replicateInspectionMode=replicate_inspection_mode, ) - body: Dict[str, Any] = remove_empty_elements({ - 'id': network_analysis_policy_id, - 'name': name, - 'description': description, - 'inspectionMode': inspection_mode, - 'basePolicy': { - 'id': basepolicy_id, - }, - }) + body: dict[str, Any] = remove_empty_elements( + { + "id": network_analysis_policy_id, + "name": name, + "description": description, + "inspectionMode": inspection_mode, + "basePolicy": { + "id": basepolicy_id, + }, + } + ) return self._http_request( - method='PUT', - url_suffix=f'policy/networkanalysispolicies/{network_analysis_policy_id}', + method="PUT", + url_suffix=f"policy/networkanalysispolicies/{network_analysis_policy_id}", params=params, json_data=body, timeout=EXECUTION_TIMEOUT, @@ -1543,7 +1509,7 @@ def update_network_analysis_policy( def delete_network_analysis_policy( self, network_analysis_policy_id: str, - ) -> Dict[str, Any]: + ) -> dict[str, Any]: """ Deletes the network analysis policy associated with the specified ID. @@ -1554,15 +1520,15 @@ def delete_network_analysis_policy( Dict[str, Any]: Deleted network analysis policy's information. """ return self._http_request( - method='DELETE', - url_suffix=f'policy/networkanalysispolicies/{network_analysis_policy_id}', + method="DELETE", + url_suffix=f"policy/networkanalysispolicies/{network_analysis_policy_id}", ) -''' HELPER FUNCTIONS ''' # pylint: disable=pointless-string-statement +""" HELPER FUNCTIONS """ # pylint: disable=pointless-string-statement -def switch_list_to_list_counter(data: Union[Dict, List]) -> Union[Dict, List]: +def switch_list_to_list_counter(data: dict | list) -> dict | list: """Receives a list of dictionaries or a dictionary, and if one of the keys contains a list or dictionary with lists, returns the size of the lists @@ -1601,7 +1567,7 @@ def switch_list_to_list_counter(data: Union[Dict, List]) -> Union[Dict, List]: return new_data -def raw_response_to_context_list(list_key: List, items: Union[Dict, List]) -> Union[Dict, List]: +def raw_response_to_context_list(list_key: list, items: dict | list) -> dict | list: """Receives a dictionary or list of dictionaries and returns only the keys that exist in the list_key and changes the keys by Context Standards @@ -1614,11 +1580,11 @@ def raw_response_to_context_list(list_key: List, items: Union[Dict, List]) -> Un if isinstance(items, list): return [raw_response_to_context_list(list_key, item) for item in items] - list_to_output = {OUTPUT_KEYS_DICTIONARY.get(key, key.capitalize()): items.get(key, '') for key in list_key} + list_to_output = {OUTPUT_KEYS_DICTIONARY.get(key, key.capitalize()): items.get(key, "") for key in list_key} return list_to_output -def raw_response_to_context_network_groups(items: Union[Dict, List]) -> Union[Dict, List]: +def raw_response_to_context_network_groups(items: dict | list) -> dict | list: """Receives raw response and returns Context entry to network groups command :type items: ``list`` or ``dict`` @@ -1630,27 +1596,16 @@ def raw_response_to_context_network_groups(items: Union[Dict, List]) -> Union[Di if isinstance(items, list): return [raw_response_to_context_network_groups(item) for item in items] return { - 'Name': items.get('name'), - 'ID': items.get('id'), - 'Overridable': items.get('overridable'), - 'Description': items.get('description'), - 'Objects': [ - { - 'Name': obj.get('name'), - 'ID': obj.get('id'), - 'Type': obj.get('type') - } for obj in items.get('objects', []) - ], - 'Addresses': [ - { - 'Value': obj.get('value'), - 'Type': obj.get('type') - } for obj in items.get('literals', []) - ] + "Name": items.get("name"), + "ID": items.get("id"), + "Overridable": items.get("overridable"), + "Description": items.get("description"), + "Objects": [{"Name": obj.get("name"), "ID": obj.get("id"), "Type": obj.get("type")} for obj in items.get("objects", [])], + "Addresses": [{"Value": obj.get("value"), "Type": obj.get("type")} for obj in items.get("literals", [])], } -def raw_response_to_context_url_groups(items: Union[Dict, List]) -> Union[Dict, List]: +def raw_response_to_context_url_groups(items: dict | list) -> dict | list: """Receives raw response and returns Context entry to url groups command :type items: ``list`` or ``dict`` :param items: list of dict or dict of data from http request @@ -1660,27 +1615,16 @@ def raw_response_to_context_url_groups(items: Union[Dict, List]) -> Union[Dict, if isinstance(items, list): return [raw_response_to_context_url_groups(item) for item in items] return { - 'Name': items.get('name'), - 'ID': items.get('id'), - 'Overridable': items.get('overridable'), - 'Description': items.get('description'), - 'Objects': [ - { - 'Name': obj.get('name'), - 'ID': obj.get('id'), - 'Type': obj.get('type') - } for obj in items.get('objects', []) - ], - 'Addresses': [ - { - 'Url': obj.get('url'), - 'Type': obj.get('type') - } for obj in items.get('literals', []) - ] + "Name": items.get("name"), + "ID": items.get("id"), + "Overridable": items.get("overridable"), + "Description": items.get("description"), + "Objects": [{"Name": obj.get("name"), "ID": obj.get("id"), "Type": obj.get("type")} for obj in items.get("objects", [])], + "Addresses": [{"Url": obj.get("url"), "Type": obj.get("type")} for obj in items.get("literals", [])], } -def raw_response_to_context_policy_assignment(items: Union[Dict, List]) -> Union[Dict, List]: +def raw_response_to_context_policy_assignment(items: dict | list) -> dict | list: """Receives raw response and returns Context entry to policy assignment command :type items: ``list`` or ``dict`` @@ -1692,22 +1636,16 @@ def raw_response_to_context_policy_assignment(items: Union[Dict, List]) -> Union if isinstance(items, list): return [raw_response_to_context_policy_assignment(item) for item in items] return { - 'Name': items.get('name'), - 'ID': items.get('id'), - 'PolicyName': items.get('policy', {}).get('name', ''), - 'PolicyID': items.get('policy', {}).get('id', ''), - 'PolicyDescription': items.get('policy', {}).get('description', ''), - 'Targets': [ - { - 'Name': obj.get('name'), - 'ID': obj.get('id'), - 'Type': obj.get('type') - } for obj in items.get('targets', []) - ] + "Name": items.get("name"), + "ID": items.get("id"), + "PolicyName": items.get("policy", {}).get("name", ""), + "PolicyID": items.get("policy", {}).get("id", ""), + "PolicyDescription": items.get("policy", {}).get("description", ""), + "Targets": [{"Name": obj.get("name"), "ID": obj.get("id"), "Type": obj.get("type")} for obj in items.get("targets", [])], } -def raw_response_to_context_access_policy(items: Union[Dict, List]) -> Union[Dict, List]: +def raw_response_to_context_access_policy(items: dict | list) -> dict | list: """Receives raw response and returns Context entry to access policy command :type items: ``list`` or ``dict`` @@ -1718,14 +1656,10 @@ def raw_response_to_context_access_policy(items: Union[Dict, List]) -> Union[Dic """ if isinstance(items, list): return [raw_response_to_context_access_policy(item) for item in items] - return { - 'Name': items.get('name'), - 'ID': items.get('id'), - 'DefaultActionID': items.get('defaultAction', {}).get('id', '') - } + return {"Name": items.get("name"), "ID": items.get("id"), "DefaultActionID": items.get("defaultAction", {}).get("id", "")} -def raw_response_to_context_rules(items: Union[Dict, List]) -> Union[Dict, List]: +def raw_response_to_context_rules(items: dict | list) -> dict | list: """Receives raw response and returns Context entry to rules command :type items: ``list`` or ``dict`` @@ -1737,129 +1671,110 @@ def raw_response_to_context_rules(items: Union[Dict, List]) -> Union[Dict, List] if isinstance(items, list): return [raw_response_to_context_rules(item) for item in items] return { - 'ID': items.get('id'), - 'Name': items.get('name'), - 'Action': items.get('action'), - 'Enabled': items.get('enabled'), - 'SendEventsToFMC': items.get('sendEventsToFMC'), - 'RuleIndex': items.get('metadata', {}).get('ruleIndex', ''), - 'Section': items.get('metadata', {}).get('section', ''), - 'Category': items.get('metadata', {}).get('category', ''), - 'Urls': { - 'Addresses': [{ - 'URL': obj.get('url', '') - } for obj in items.get('urls', {}).get('literals', []) + "ID": items.get("id"), + "Name": items.get("name"), + "Action": items.get("action"), + "Enabled": items.get("enabled"), + "SendEventsToFMC": items.get("sendEventsToFMC"), + "RuleIndex": items.get("metadata", {}).get("ruleIndex", ""), + "Section": items.get("metadata", {}).get("section", ""), + "Category": items.get("metadata", {}).get("category", ""), + "Urls": { + "Addresses": [{"URL": obj.get("url", "")} for obj in items.get("urls", {}).get("literals", [])], + "Objects": [ + {"Name": obj.get("name", ""), "ID": obj.get("id", "")} for obj in items.get("urls", {}).get("objects", []) ], - 'Objects': [{ - 'Name': obj.get('name', ''), - 'ID': obj.get('id', '') - } for obj in items.get('urls', {}).get('objects', []) - ] }, - 'VlanTags': { - 'Numbers': [{ - 'EndTag': obj.get('endTag', ''), - 'StartTag': obj.get('startTag', '') - } for obj in items.get('vlanTags', {}).get('literals', []) + "VlanTags": { + "Numbers": [ + {"EndTag": obj.get("endTag", ""), "StartTag": obj.get("startTag", "")} + for obj in items.get("vlanTags", {}).get("literals", []) + ], + "Objects": [ + {"Name": obj.get("name", ""), "ID": obj.get("id", ""), "Type": obj.get("type", "")} + for obj in items.get("vlanTags", {}).get("objects", []) ], - 'Objects': [{ - 'Name': obj.get('name', ''), - 'ID': obj.get('id', ''), - 'Type': obj.get('type', '') - } for obj in items.get('vlanTags', {}).get('objects', []) - ] }, - 'SourceZones': { - 'Objects': [{ - 'Name': obj.get('name', ''), - 'ID': obj.get('id', ''), - 'Type': obj.get('type', '') - } for obj in items.get('sourceZones', {}).get('objects', []) + "SourceZones": { + "Objects": [ + {"Name": obj.get("name", ""), "ID": obj.get("id", ""), "Type": obj.get("type", "")} + for obj in items.get("sourceZones", {}).get("objects", []) ] }, - 'Applications': [{ - 'Name': obj.get('name', ''), - 'ID': obj.get('id', '') - } for obj in items.get('applications', {}).get('applications', []) + "Applications": [ + {"Name": obj.get("name", ""), "ID": obj.get("id", "")} + for obj in items.get("applications", {}).get("applications", []) ], - 'DestinationZones': { - 'Objects': [{ - 'Name': obj.get('name', ''), - 'ID': obj.get('id', ''), - 'Type': obj.get('type', '') - } for obj in items.get('destinationZones', {}).get('objects', []) + "DestinationZones": { + "Objects": [ + {"Name": obj.get("name", ""), "ID": obj.get("id", ""), "Type": obj.get("type", "")} + for obj in items.get("destinationZones", {}).get("objects", []) ] }, - 'SourceNetworks': { - 'Addresses': [{ - 'Type': obj.get('type', ''), - 'Value': obj.get('value', '') - } for obj in items.get('sourceNetworks', {}).get('literals', []) + "SourceNetworks": { + "Addresses": [ + {"Type": obj.get("type", ""), "Value": obj.get("value", "")} + for obj in items.get("sourceNetworks", {}).get("literals", []) + ], + "Objects": [ + {"Name": obj.get("name", ""), "ID": obj.get("id", ""), "Type": obj.get("type", "")} + for obj in items.get("sourceNetworks", {}).get("objects", []) ], - 'Objects': [{ - 'Name': obj.get('name', ''), - 'ID': obj.get('id', ''), - 'Type': obj.get('type', '') - } for obj in items.get('sourceNetworks', {}).get('objects', []) - ] }, - 'DestinationNetworks': { - 'Addresses': [{ - 'Type': obj.get('type', ''), - 'Value': obj.get('value', '') - } for obj in items.get('destinationNetworks', {}).get('literals', []) + "DestinationNetworks": { + "Addresses": [ + {"Type": obj.get("type", ""), "Value": obj.get("value", "")} + for obj in items.get("destinationNetworks", {}).get("literals", []) + ], + "Objects": [ + {"Name": obj.get("name", ""), "ID": obj.get("id", ""), "Type": obj.get("type", "")} + for obj in items.get("destinationNetworks", {}).get("objects", []) ], - 'Objects': [{ - 'Name': obj.get('name', ''), - 'ID': obj.get('id', ''), - 'Type': obj.get('type', '') - } for obj in items.get('destinationNetworks', {}).get('objects', []) - ] }, - 'SourcePorts': { - 'Addresses': [{ - 'Port': obj.get('port', ''), - 'Protocol': obj.get('protocol', '') - } for obj in items.get('sourcePorts', {}).get('literals', []) + "SourcePorts": { + "Addresses": [ + {"Port": obj.get("port", ""), "Protocol": obj.get("protocol", "")} + for obj in items.get("sourcePorts", {}).get("literals", []) + ], + "Objects": [ + { + "Name": obj.get("name", ""), + "ID": obj.get("id", ""), + "Type": obj.get("type", ""), + "Protocol": obj.get("protocol", ""), + } + for obj in items.get("sourcePorts", {}).get("objects", []) ], - 'Objects': [{ - 'Name': obj.get('name', ''), - 'ID': obj.get('id', ''), - 'Type': obj.get('type', ''), - 'Protocol': obj.get('protocol', '') - } for obj in items.get('sourcePorts', {}).get('objects', []) - ] }, - 'DestinationPorts': { - 'Addresses': [{ - 'Port': obj.get('port', ''), - 'Protocol': obj.get('protocol', '') - } for obj in items.get('destinationPorts', {}).get('literals', []) + "DestinationPorts": { + "Addresses": [ + {"Port": obj.get("port", ""), "Protocol": obj.get("protocol", "")} + for obj in items.get("destinationPorts", {}).get("literals", []) + ], + "Objects": [ + { + "Name": obj.get("name", ""), + "ID": obj.get("id", ""), + "Type": obj.get("type", ""), + "Protocol": obj.get("protocol", ""), + } + for obj in items.get("destinationPorts", {}).get("objects", []) ], - 'Objects': [{ - 'Name': obj.get('name', ''), - 'ID': obj.get('id', ''), - 'Type': obj.get('type', ''), - 'Protocol': obj.get('protocol', '') - } for obj in items.get('destinationPorts', {}).get('objects', []) - ] }, - 'SourceSecurityGroupTags': { - 'Objects': [{ - 'Name': obj.get('name', ''), - 'ID': obj.get('id', ''), - 'Type': obj.get('type', '') - } for obj in items.get('sourceSecurityGroupTags', {}).get('objects', []) + "SourceSecurityGroupTags": { + "Objects": [ + {"Name": obj.get("name", ""), "ID": obj.get("id", ""), "Type": obj.get("type", "")} + for obj in items.get("sourceSecurityGroupTags", {}).get("objects", []) ] - } + }, } def get_readable_output( - response: Dict[str, Any], - header_by_keys: Dict[str, List[str]], - keys_to_items: List[str] = None, - title: str = '', + response: dict[str, Any], + header_by_keys: dict[str, list[str]], + keys_to_items: list[str] = None, + title: str = "", ) -> str: """ Get a response's readable output by formatting it through its headers. @@ -1876,16 +1791,13 @@ def get_readable_output( items = dict_safe_get(response, keys_to_items) if keys_to_items else response headers = list(header_by_keys.keys()) - item_readable_arguments: List[Dict[str, Any]] = [] + item_readable_arguments: list[dict[str, Any]] = [] - if isinstance(items, Dict): + if isinstance(items, dict): items = [items] for item in items: - dictionary = { - key: dict_safe_get(item, value) - for key, value in header_by_keys.items() - } + dictionary = {key: dict_safe_get(item, value) for key, value in header_by_keys.items()} item_readable_arguments.append(dictionary) @@ -1899,10 +1811,7 @@ def get_readable_output( return readable_output -def delete_keys_from_dict( - dictionary: MutableMapping, - keys_to_delete: Union[List[str], Set[str]] -) -> Dict[str, Any]: +def delete_keys_from_dict(dictionary: MutableMapping, keys_to_delete: list[str] | Set[str]) -> dict[str, Any]: """ Get a modified dictionary without the requested keys Args: @@ -1912,16 +1821,14 @@ def delete_keys_from_dict( Dict[str, Any]: Modified dictionary without requested keys. """ keys_set = set(keys_to_delete) - modified_dict: Dict[str, Any] = {} + modified_dict: dict[str, Any] = {} for key, value in dictionary.items(): if key not in keys_set: if isinstance(value, MutableMapping): modified_dict[key] = delete_keys_from_dict(value, keys_set) - elif isinstance(value, MutableSequence) \ - and len(value) > 0 \ - and isinstance(value[0], MutableMapping): + elif isinstance(value, MutableSequence) and len(value) > 0 and isinstance(value[0], MutableMapping): modified_dict[key] = [] for val in value: @@ -1934,11 +1841,11 @@ def delete_keys_from_dict( def get_context_output( - response: Dict[str, Any], - contexts_to_delete: List[str], - item_to_add: Tuple[str, Any] = None, - keys_to_items: List[str] = None, -) -> List[Dict[str, Any]]: + response: dict[str, Any], + contexts_to_delete: list[str], + item_to_add: tuple[str, Any] = None, + keys_to_items: list[str] = None, +) -> list[dict[str, Any]]: """ Get context output from the response. Loop through each value and create a modified response without the contexts_to_delete. @@ -1955,22 +1862,19 @@ def get_context_output( """ items = dict_safe_get(response, keys_to_items) if keys_to_items else response - if isinstance(items, Dict): + if isinstance(items, dict): items = [items] - context_outputs: List[Dict[str, Any]] = [] + context_outputs: list[dict[str, Any]] = [] for item in items: - context_output: Dict[str, Any] = {} + context_output: dict[str, Any] = {} if contexts_to_delete: context_output = delete_keys_from_dict(item, contexts_to_delete) if item_to_add: - context_output = { - item_to_add[0]: item_to_add[1], - **context_output - } + context_output = {item_to_add[0]: item_to_add[1], **context_output} context_outputs.append(context_output or item) @@ -1978,11 +1882,11 @@ def get_context_output( def parse_results( - raw_response: Dict[str, Any], - command_headers_by_keys: Dict[str, Any], + raw_response: dict[str, Any], + command_headers_by_keys: dict[str, Any], command_title: str, command_context: str, - raw_responses: Union[List, Dict] = None + raw_responses: list | dict = None, ) -> CommandResults: """ Create a CommandResults from a given response. @@ -1999,10 +1903,7 @@ def parse_results( Returns: CommandResults: Created CommandResults from the API response. """ - context_output = get_context_output( - response=raw_response, - contexts_to_delete=['links'] - ) + context_output = get_context_output(response=raw_response, contexts_to_delete=["links"]) readable_output = get_readable_output( response=raw_response, header_by_keys=command_headers_by_keys, @@ -2010,8 +1911,8 @@ def parse_results( ) command_results = CommandResults( - outputs_prefix='.'.join((INTEGRATION_CONTEXT_NAME, command_context)), - outputs_key_field='id', + outputs_prefix=".".join((INTEGRATION_CONTEXT_NAME, command_context)), + outputs_key_field="id", outputs=context_output, readable_output=readable_output, raw_response=raw_response, @@ -2023,7 +1924,7 @@ def parse_results( return command_results -def append_items_to_value(raw_response: Dict[str, Any], value: str, items_key: str, inner_key: str) -> str: +def append_items_to_value(raw_response: dict[str, Any], value: str, items_key: str, inner_key: str) -> str: """ Appends items within the raw_response to the current value. @@ -2037,11 +1938,11 @@ def append_items_to_value(raw_response: Dict[str, Any], value: str, items_key: s str: Items from raw_response or value + items from raw_response. """ if not (items := raw_response.get(items_key)): - return '' + return "" - prev_value = ','.join(item[inner_key] for item in items) + prev_value = ",".join(item[inner_key] for item in items) - return prev_value if not value else prev_value + f',{value}' + return prev_value if not value else prev_value + f",{value}" def check_is_get_request(get_args: list, list_args: list) -> bool: @@ -2062,7 +1963,7 @@ def check_is_get_request(get_args: list, list_args: list) -> bool: is_list_request = any(list_args) if is_get_request and is_list_request: - raise ValueError('GET and LIST arguments can not be supported simultaneously.') + raise ValueError("GET and LIST arguments can not be supported simultaneously.") return is_get_request @@ -2081,10 +1982,10 @@ def arg_to_optional_bool(arg: Optional[Any]) -> Optional[bool]: return argToBoolean(arg) if arg else None -''' COMMANDS ''' # pylint: disable=pointless-string-statement +""" COMMANDS """ # pylint: disable=pointless-string-statement -def list_zones_command(client: Client, args: Dict) -> CommandResults: +def list_zones_command(client: Client, args: dict) -> CommandResults: """ Retrieves a list of all security zone objects. @@ -2095,28 +1996,24 @@ def list_zones_command(client: Client, args: Dict) -> CommandResults: Returns: CommandResults: Information about security zones. """ - limit = args.get('limit', 50) - offset = args.get('offset', 0) - raw_response = client.get_list(limit, offset, 'securityzones') - items = raw_response.get('items') + limit = args.get("limit", 50) + offset = args.get("offset", 0) + raw_response = client.get_list(limit, offset, "securityzones") + items = raw_response.get("items") if items: - title = f'{INTEGRATION_NAME} - List zones:' - context_entry = [{ - 'ID': item.get('id', ''), - 'Name': item.get('name', ''), - 'InterfaceMode': item.get('interfaceMode', ''), - 'Interfaces': [{ - 'Name': obj.get('name', ''), - 'ID': obj.get('id' '') - } for obj in item.get('interfaces', {}) - ] - } for item in items + title = f"{INTEGRATION_NAME} - List zones:" + context_entry = [ + { + "ID": item.get("id", ""), + "Name": item.get("name", ""), + "InterfaceMode": item.get("interfaceMode", ""), + "Interfaces": [{"Name": obj.get("name", ""), "ID": obj.get("id" "")} for obj in item.get("interfaces", {})], + } + for item in items ] - context = { - f'{INTEGRATION_CONTEXT_NAME}.Zone(val.ID && val.ID === obj.ID)': context_entry - } + context = {f"{INTEGRATION_CONTEXT_NAME}.Zone(val.ID && val.ID === obj.ID)": context_entry} entry_white_list_count = switch_list_to_list_counter(context_entry) - presented_output = ['ID', 'Name', 'InterfaceMode', 'Interfaces'] + presented_output = ["ID", "Name", "InterfaceMode", "Interfaces"] human_readable = tableToMarkdown(title, entry_white_list_count, headers=presented_output) return CommandResults( @@ -2126,12 +2023,10 @@ def list_zones_command(client: Client, args: Dict) -> CommandResults: ) else: - return CommandResults( - readable_output=f'{INTEGRATION_NAME} - Could not find any zone.' - ) + return CommandResults(readable_output=f"{INTEGRATION_NAME} - Could not find any zone.") -def list_ports_command(client: Client, args: Dict) -> CommandResults: +def list_ports_command(client: Client, args: dict) -> CommandResults: """ Retrieves list of all port objects. @@ -2142,18 +2037,16 @@ def list_ports_command(client: Client, args: Dict) -> CommandResults: Returns: CommandResults: Information about ports. """ - limit = args.get('limit', 50) - offset = args.get('offset', 0) - raw_response = client.get_list(limit, offset, 'ports') - items = raw_response.get('items') + limit = args.get("limit", 50) + offset = args.get("offset", 0) + raw_response = client.get_list(limit, offset, "ports") + items = raw_response.get("items") if items: - title = f'{INTEGRATION_NAME} - List ports:' - list_to_output = ['id', 'name', 'protocol', 'port'] + title = f"{INTEGRATION_NAME} - List ports:" + list_to_output = ["id", "name", "protocol", "port"] context_entry = raw_response_to_context_list(list_to_output, items) - context = { - f'{INTEGRATION_CONTEXT_NAME}.Port(val.ID && val.ID === obj.ID)': context_entry - } - presented_output = ['ID', 'Name', 'Protocol', 'Port'] + context = {f"{INTEGRATION_CONTEXT_NAME}.Port(val.ID && val.ID === obj.ID)": context_entry} + presented_output = ["ID", "Name", "Protocol", "Port"] human_readable = tableToMarkdown(title, context_entry, headers=presented_output) return CommandResults( @@ -2163,12 +2056,10 @@ def list_ports_command(client: Client, args: Dict) -> CommandResults: ) else: - return CommandResults( - readable_output=f'{INTEGRATION_NAME} - Could not find any port.' - ) + return CommandResults(readable_output=f"{INTEGRATION_NAME} - Could not find any port.") -def list_url_categories_command(client: Client, args: Dict) -> CommandResults: +def list_url_categories_command(client: Client, args: dict) -> CommandResults: """ Retrieves a list of all URL category objects. @@ -2179,18 +2070,16 @@ def list_url_categories_command(client: Client, args: Dict) -> CommandResults: Returns: CommandResults: Information about URL category. """ - limit = args.get('limit', 50) - offset = args.get('offset', 0) - raw_response = client.get_list(limit, offset, 'urlcategories') - items = raw_response.get('items') + limit = args.get("limit", 50) + offset = args.get("offset", 0) + raw_response = client.get_list(limit, offset, "urlcategories") + items = raw_response.get("items") if items: - title = f'{INTEGRATION_NAME} - List url categories:' - list_to_output = ['id', 'name'] + title = f"{INTEGRATION_NAME} - List url categories:" + list_to_output = ["id", "name"] context_entry = raw_response_to_context_list(list_to_output, items) - context = { - f'{INTEGRATION_CONTEXT_NAME}.Category(val.ID && val.ID === obj.ID)': context_entry - } - presented_output = ['ID', 'Name'] + context = {f"{INTEGRATION_CONTEXT_NAME}.Category(val.ID && val.ID === obj.ID)": context_entry} + presented_output = ["ID", "Name"] human_readable = tableToMarkdown(title, context_entry, headers=presented_output) return CommandResults( @@ -2200,12 +2089,10 @@ def list_url_categories_command(client: Client, args: Dict) -> CommandResults: ) else: - return CommandResults( - readable_output=f'{INTEGRATION_NAME} - Could not find any category.' - ) + return CommandResults(readable_output=f"{INTEGRATION_NAME} - Could not find any category.") -def get_network_objects_command(client: Client, args: Dict) -> CommandResults: +def get_network_objects_command(client: Client, args: dict) -> CommandResults: """ Retrieves the network objects associated with the specified ID. If not supplied, retrieves a list of all network objects. @@ -2217,23 +2104,21 @@ def get_network_objects_command(client: Client, args: Dict) -> CommandResults: Returns: CommandResults: Information about network objects. """ - limit = args.get('limit', '50') - offset = args.get('offset', '0') - object_id = args.get('object_id', '') + limit = args.get("limit", "50") + offset = args.get("offset", "0") + object_id = args.get("object_id", "") raw_response = client.get_network_objects(limit, offset, object_id) - items: Union[List, Dict] = raw_response.get('items') # type:ignore - if items or 'id' in raw_response: - title = f'{INTEGRATION_NAME} - List network objects:' - if 'id' in raw_response: - title = f'{INTEGRATION_NAME} - get network object {object_id}' + items: list | dict = raw_response.get("items") # type:ignore + if items or "id" in raw_response: + title = f"{INTEGRATION_NAME} - List network objects:" + if "id" in raw_response: + title = f"{INTEGRATION_NAME} - get network object {object_id}" items = raw_response - list_to_output = ['id', 'name', 'value', 'overridable', 'description'] + list_to_output = ["id", "name", "value", "overridable", "description"] context_entry = raw_response_to_context_list(list_to_output, items) - context = { - f'{INTEGRATION_CONTEXT_NAME}.Network(val.ID && val.ID === obj.ID)': context_entry - } - presented_output = ['ID', 'Name', 'Value', 'Overridable', 'Description'] + context = {f"{INTEGRATION_CONTEXT_NAME}.Network(val.ID && val.ID === obj.ID)": context_entry} + presented_output = ["ID", "Name", "Value", "Overridable", "Description"] human_readable = tableToMarkdown(title, context_entry, headers=presented_output) return CommandResults( @@ -2243,12 +2128,10 @@ def get_network_objects_command(client: Client, args: Dict) -> CommandResults: ) else: - return CommandResults( - readable_output=f'{INTEGRATION_NAME} - Could not find any network object.' - ) + return CommandResults(readable_output=f"{INTEGRATION_NAME} - Could not find any network object.") -def get_host_objects_command(client: Client, args: Dict) -> CommandResults: +def get_host_objects_command(client: Client, args: dict) -> CommandResults: """ Retrieves the groups of host objects associated with the specified ID. If no ID is passed, the input ID retrieves a list of all network objects. @@ -2260,23 +2143,21 @@ def get_host_objects_command(client: Client, args: Dict) -> CommandResults: Returns: CommandResults: Information about network objects. """ - limit = args.get('limit', '50') - offset = args.get('offset', '0') - object_id = args.get('object_id', '') + limit = args.get("limit", "50") + offset = args.get("offset", "0") + object_id = args.get("object_id", "") raw_response = client.get_hosts_objects(limit, offset, object_id) - items: Union[List, Dict] = raw_response.get('items') # type:ignore - if items or 'id' in raw_response: - title = f'{INTEGRATION_NAME} - List host objects:' - if 'id' in raw_response: - title = f'{INTEGRATION_NAME} - get host object {object_id}' + items: list | dict = raw_response.get("items") # type:ignore + if items or "id" in raw_response: + title = f"{INTEGRATION_NAME} - List host objects:" + if "id" in raw_response: + title = f"{INTEGRATION_NAME} - get host object {object_id}" items = raw_response - list_to_output = ['id', 'name', 'value', 'overridable', 'description'] + list_to_output = ["id", "name", "value", "overridable", "description"] context_entry = raw_response_to_context_list(list_to_output, items) - context = { - f'{INTEGRATION_CONTEXT_NAME}.Host(val.ID && val.ID === obj.ID)': context_entry - } - presented_output = ['ID', 'Name', 'Value', 'Overridable', 'Description'] + context = {f"{INTEGRATION_CONTEXT_NAME}.Host(val.ID && val.ID === obj.ID)": context_entry} + presented_output = ["ID", "Name", "Value", "Overridable", "Description"] human_readable = tableToMarkdown(title, context_entry, headers=presented_output) return CommandResults( @@ -2286,12 +2167,10 @@ def get_host_objects_command(client: Client, args: Dict) -> CommandResults: ) else: - return CommandResults( - readable_output=f'{INTEGRATION_NAME} - Could not find any host object.' - ) + return CommandResults(readable_output=f"{INTEGRATION_NAME} - Could not find any host object.") -def create_network_objects_command(client: Client, args: Dict) -> CommandResults: +def create_network_objects_command(client: Client, args: dict) -> CommandResults: """ Creates a network object. @@ -2302,18 +2181,16 @@ def create_network_objects_command(client: Client, args: Dict) -> CommandResults Returns: CommandResults: Information about the created network object. """ - name: str = args.get('name') # type:ignore - value: str = args.get('value') # type:ignore - description: str = args.get('description', '') # type:ignore - overridable = args.get('overridable', '') + name: str = args.get("name") # type:ignore + value: str = args.get("value") # type:ignore + description: str = args.get("description", "") # type:ignore + overridable = args.get("overridable", "") raw_response = client.create_network_objects(name, value, description, overridable) - title = f'{INTEGRATION_NAME} - network object has been created.' - list_to_output = ['id', 'name', 'value', 'overridable', 'description'] + title = f"{INTEGRATION_NAME} - network object has been created." + list_to_output = ["id", "name", "value", "overridable", "description"] context_entry = raw_response_to_context_list(list_to_output, raw_response) - context = { - f'{INTEGRATION_CONTEXT_NAME}.Network(val.ID && val.ID === obj.ID)': context_entry - } - presented_output = ['ID', 'Name', 'Value', 'Overridable', 'Description'] + context = {f"{INTEGRATION_CONTEXT_NAME}.Network(val.ID && val.ID === obj.ID)": context_entry} + presented_output = ["ID", "Name", "Value", "Overridable", "Description"] human_readable = tableToMarkdown(title, context_entry, headers=presented_output) return CommandResults( @@ -2323,7 +2200,7 @@ def create_network_objects_command(client: Client, args: Dict) -> CommandResults ) -def create_host_objects_command(client: Client, args: Dict) -> CommandResults: +def create_host_objects_command(client: Client, args: dict) -> CommandResults: """ Creates a host object. @@ -2334,18 +2211,16 @@ def create_host_objects_command(client: Client, args: Dict) -> CommandResults: Returns: CommandResults: Information about the created host object. """ - name: str = args.get('name') # type:ignore - value: str = args.get('value') # type:ignore - description: str = args.get('description', '') # type:ignore - overridable = args.get('overridable', '') + name: str = args.get("name") # type:ignore + value: str = args.get("value") # type:ignore + description: str = args.get("description", "") # type:ignore + overridable = args.get("overridable", "") raw_response = client.create_host_objects(name, value, description, overridable) - title = f'{INTEGRATION_NAME} - host object has been created.' - list_to_output = ['id', 'name', 'value', 'overridable', 'description'] + title = f"{INTEGRATION_NAME} - host object has been created." + list_to_output = ["id", "name", "value", "overridable", "description"] context_entry = raw_response_to_context_list(list_to_output, raw_response) - context = { - f'{INTEGRATION_CONTEXT_NAME}.Host(val.ID && val.ID === obj.ID)': context_entry - } - presented_output = ['ID', 'Name', 'Value', 'Overridable', 'Description'] + context = {f"{INTEGRATION_CONTEXT_NAME}.Host(val.ID && val.ID === obj.ID)": context_entry} + presented_output = ["ID", "Name", "Value", "Overridable", "Description"] human_readable = tableToMarkdown(title, context_entry, headers=presented_output) return CommandResults( @@ -2355,7 +2230,7 @@ def create_host_objects_command(client: Client, args: Dict) -> CommandResults: ) -def update_network_objects_command(client: Client, args: Dict) -> CommandResults: +def update_network_objects_command(client: Client, args: dict) -> CommandResults: """ Updates the specified network object. @@ -2366,20 +2241,18 @@ def update_network_objects_command(client: Client, args: Dict) -> CommandResults Returns: CommandResults: Information about the updated network object. """ - object_id: str = args.get('id') # type:ignore - name: str = args.get('name') # type:ignore - value: str = args.get('value') # type:ignore - description: str = args.get('description', '') # type:ignore - overridable = args.get('overridable', '') + object_id: str = args.get("id") # type:ignore + name: str = args.get("name") # type:ignore + value: str = args.get("value") # type:ignore + description: str = args.get("description", "") # type:ignore + overridable = args.get("overridable", "") raw_response = client.update_network_objects(name, value, description, overridable, object_id) - title = f'{INTEGRATION_NAME} - network object has been updated.' - list_to_output = ['id', 'name', 'value', 'overridable', 'description'] + title = f"{INTEGRATION_NAME} - network object has been updated." + list_to_output = ["id", "name", "value", "overridable", "description"] context_entry = raw_response_to_context_list(list_to_output, raw_response) - context = { - f'{INTEGRATION_CONTEXT_NAME}.Network(val.ID && val.ID === obj.ID)': context_entry - } - presented_output = ['ID', 'Name', 'Value', 'Overridable', 'Description'] + context = {f"{INTEGRATION_CONTEXT_NAME}.Network(val.ID && val.ID === obj.ID)": context_entry} + presented_output = ["ID", "Name", "Value", "Overridable", "Description"] human_readable = tableToMarkdown(title, context_entry, headers=presented_output) return CommandResults( @@ -2389,7 +2262,7 @@ def update_network_objects_command(client: Client, args: Dict) -> CommandResults ) -def update_host_objects_command(client: Client, args: Dict) -> CommandResults: +def update_host_objects_command(client: Client, args: dict) -> CommandResults: """ Updates the specified host object. @@ -2400,20 +2273,18 @@ def update_host_objects_command(client: Client, args: Dict) -> CommandResults: Returns: CommandResults: Information about the updated host object. """ - object_id: str = args.get('id') # type:ignore - name: str = args.get('name') # type:ignore - value: str = args.get('value') # type:ignore - description: str = args.get('description', '') # type:ignore - overridable = args.get('overridable', '') + object_id: str = args.get("id") # type:ignore + name: str = args.get("name") # type:ignore + value: str = args.get("value") # type:ignore + description: str = args.get("description", "") # type:ignore + overridable = args.get("overridable", "") raw_response = client.update_host_objects(name, value, description, overridable, object_id) - title = f'{INTEGRATION_NAME} - host object has been updated.' - list_to_output = ['id', 'name', 'value', 'overridable', 'description'] + title = f"{INTEGRATION_NAME} - host object has been updated." + list_to_output = ["id", "name", "value", "overridable", "description"] context_entry = raw_response_to_context_list(list_to_output, raw_response) - context = { - f'{INTEGRATION_CONTEXT_NAME}.Host(val.ID && val.ID === obj.ID)': context_entry - } - presented_output = ['ID', 'Name', 'Value', 'Overridable', 'Description'] + context = {f"{INTEGRATION_CONTEXT_NAME}.Host(val.ID && val.ID === obj.ID)": context_entry} + presented_output = ["ID", "Name", "Value", "Overridable", "Description"] human_readable = tableToMarkdown(title, context_entry, headers=presented_output) return CommandResults( @@ -2423,7 +2294,7 @@ def update_host_objects_command(client: Client, args: Dict) -> CommandResults: ) -def delete_network_objects_command(client: Client, args: Dict) -> CommandResults: +def delete_network_objects_command(client: Client, args: dict) -> CommandResults: """ Deletes the specified network object. @@ -2434,15 +2305,13 @@ def delete_network_objects_command(client: Client, args: Dict) -> CommandResults Returns: CommandResults: Information about the deleted network object. """ - object_id: str = args.get('id') # type:ignore + object_id: str = args.get("id") # type:ignore raw_response = client.delete_network_objects(object_id) - title = f'{INTEGRATION_NAME} - network object has been deleted.' - list_to_output = ['id', 'name', 'value', 'overridable', 'description'] + title = f"{INTEGRATION_NAME} - network object has been deleted." + list_to_output = ["id", "name", "value", "overridable", "description"] context_entry = raw_response_to_context_list(list_to_output, raw_response) - context = { - f'{INTEGRATION_CONTEXT_NAME}.Network(val.ID && val.ID === obj.ID)': context_entry - } - presented_output = ['ID', 'Name', 'Value', 'Overridable', 'Description'] + context = {f"{INTEGRATION_CONTEXT_NAME}.Network(val.ID && val.ID === obj.ID)": context_entry} + presented_output = ["ID", "Name", "Value", "Overridable", "Description"] human_readable = tableToMarkdown(title, context_entry, headers=presented_output) return CommandResults( @@ -2452,7 +2321,7 @@ def delete_network_objects_command(client: Client, args: Dict) -> CommandResults ) -def delete_host_objects_command(client: Client, args: Dict) -> CommandResults: +def delete_host_objects_command(client: Client, args: dict) -> CommandResults: """ Deletes the specified host object. @@ -2463,15 +2332,13 @@ def delete_host_objects_command(client: Client, args: Dict) -> CommandResults: Returns: CommandResults: Information about the deleted host object. """ - object_id: str = args.get('id') # type:ignore + object_id: str = args.get("id") # type:ignore raw_response = client.delete_host_objects(object_id) - title = f'{INTEGRATION_NAME} - host object has been deleted.' - list_to_output = ['id', 'name', 'value', 'overridable', 'description'] + title = f"{INTEGRATION_NAME} - host object has been deleted." + list_to_output = ["id", "name", "value", "overridable", "description"] context_entry = raw_response_to_context_list(list_to_output, raw_response) - context = { - f'{INTEGRATION_CONTEXT_NAME}.Host(val.ID && val.ID === obj.ID)': context_entry - } - presented_output = ['ID', 'Name', 'Value', 'Overridable', 'Description'] + context = {f"{INTEGRATION_CONTEXT_NAME}.Host(val.ID && val.ID === obj.ID)": context_entry} + presented_output = ["ID", "Name", "Value", "Overridable", "Description"] human_readable = tableToMarkdown(title, context_entry, headers=presented_output) return CommandResults( @@ -2481,7 +2348,7 @@ def delete_host_objects_command(client: Client, args: Dict) -> CommandResults: ) -def get_network_groups_objects_command(client: Client, args: Dict) -> CommandResults: +def get_network_groups_objects_command(client: Client, args: dict) -> CommandResults: """ Retrieves the groups of network objects and addresses associated with the specified ID. If not supplied, retrieves a list of all network objects. @@ -2493,21 +2360,19 @@ def get_network_groups_objects_command(client: Client, args: Dict) -> CommandRes Returns: CommandResults: Information about network groups. """ - object_id = args.get('id', '') - limit = args.get('limit', '50') - offset = args.get('offset', '0') + object_id = args.get("id", "") + limit = args.get("limit", "50") + offset = args.get("offset", "0") raw_response = client.get_network_groups_objects(limit, offset, object_id) - items: Union[List, Dict] = raw_response.get('items') # type:ignore - if items or 'id' in raw_response: - title = f'{INTEGRATION_NAME} - List of network groups object:' - if 'id' in raw_response: - title = f'{INTEGRATION_NAME} - network group object:' + items: list | dict = raw_response.get("items") # type:ignore + if items or "id" in raw_response: + title = f"{INTEGRATION_NAME} - List of network groups object:" + if "id" in raw_response: + title = f"{INTEGRATION_NAME} - network group object:" items = raw_response context_entry = raw_response_to_context_network_groups(items) - context = { - f'{INTEGRATION_CONTEXT_NAME}.NetworkGroups(val.ID && val.ID === obj.ID)': context_entry - } - presented_output = ['ID', 'Name', 'Overridable', 'Description', 'Addresses', 'Objects'] + context = {f"{INTEGRATION_CONTEXT_NAME}.NetworkGroups(val.ID && val.ID === obj.ID)": context_entry} + presented_output = ["ID", "Name", "Overridable", "Description", "Addresses", "Objects"] entry_white_list_count = switch_list_to_list_counter(context_entry) human_readable = tableToMarkdown(title, entry_white_list_count, headers=presented_output) @@ -2518,10 +2383,10 @@ def get_network_groups_objects_command(client: Client, args: Dict) -> CommandRes ) else: - raise DemistoException(f'{INTEGRATION_NAME} - Could not get the network groups.') + raise DemistoException(f"{INTEGRATION_NAME} - Could not get the network groups.") -def get_url_groups_objects_command(client: Client, args: Dict) -> CommandResults: +def get_url_groups_objects_command(client: Client, args: dict) -> CommandResults: """ Retrieves the groups of url objects and addresses associated with the specified ID. If not supplied, retrieves a list of all url objects. @@ -2533,21 +2398,19 @@ def get_url_groups_objects_command(client: Client, args: Dict) -> CommandResults Returns: CommandResults: Information about url groups. """ - object_id = args.get('id', '') - limit = args.get('limit', '50') - offset = args.get('offset', '0') + object_id = args.get("id", "") + limit = args.get("limit", "50") + offset = args.get("offset", "0") raw_response = client.get_url_groups_objects(limit, offset, object_id) - items: Union[List, Dict] = raw_response.get('items') # type:ignore - if items or 'id' in raw_response: - title = f'{INTEGRATION_NAME} - List of url groups object:' - if 'id' in raw_response: - title = f'{INTEGRATION_NAME} - url group object:' + items: list | dict = raw_response.get("items") # type:ignore + if items or "id" in raw_response: + title = f"{INTEGRATION_NAME} - List of url groups object:" + if "id" in raw_response: + title = f"{INTEGRATION_NAME} - url group object:" items = raw_response context_entry = raw_response_to_context_url_groups(items) - context = { - f'{INTEGRATION_CONTEXT_NAME}.URLGroups(val.ID && val.ID === obj.ID)': context_entry - } - presented_output = ['ID', 'Name', 'Overridable', 'Description', 'Addresses', 'Objects'] + context = {f"{INTEGRATION_CONTEXT_NAME}.URLGroups(val.ID && val.ID === obj.ID)": context_entry} + presented_output = ["ID", "Name", "Overridable", "Description", "Addresses", "Objects"] entry_white_list_count = switch_list_to_list_counter(context_entry) human_readable = tableToMarkdown(title, entry_white_list_count, headers=presented_output) @@ -2558,10 +2421,10 @@ def get_url_groups_objects_command(client: Client, args: Dict) -> CommandResults ) else: - raise DemistoException(f'{INTEGRATION_NAME} - Could not get the URL groups.') + raise DemistoException(f"{INTEGRATION_NAME} - Could not get the URL groups.") -def create_network_groups_objects_command(client: Client, args: Dict) -> CommandResults: +def create_network_groups_objects_command(client: Client, args: dict) -> CommandResults: """ Creates a group of network objects. @@ -2572,20 +2435,18 @@ def create_network_groups_objects_command(client: Client, args: Dict) -> Command Returns: CommandResults: Information about the created network group. """ - name: str = args.get('name') # type:ignore - ids = args.get('network_objects_id_list', '') - values = args.get('network_address_list', '') - description = args.get('description', '') - overridable = args.get('overridable', '') + name: str = args.get("name") # type:ignore + ids = args.get("network_objects_id_list", "") + values = args.get("network_address_list", "") + description = args.get("description", "") + overridable = args.get("overridable", "") if ids or values: raw_response = client.create_network_groups_objects(name, ids, values, description, overridable) - title = f'{INTEGRATION_NAME} - network group has been created.' + title = f"{INTEGRATION_NAME} - network group has been created." context_entry = raw_response_to_context_network_groups(raw_response) - context = { - f'{INTEGRATION_CONTEXT_NAME}.NetworkGroups(val.ID && val.ID === obj.ID)': context_entry - } + context = {f"{INTEGRATION_CONTEXT_NAME}.NetworkGroups(val.ID && val.ID === obj.ID)": context_entry} - presented_output = ['ID', 'Name', 'Overridable', 'Description', 'Addresses', 'Objects'] + presented_output = ["ID", "Name", "Overridable", "Description", "Addresses", "Objects"] entry_white_list_count = switch_list_to_list_counter(context_entry) human_readable = tableToMarkdown(title, entry_white_list_count, headers=presented_output) @@ -2596,10 +2457,10 @@ def create_network_groups_objects_command(client: Client, args: Dict) -> Command ) else: - raise DemistoException(f'{INTEGRATION_NAME} - Could not create new group, Missing value or ID.') + raise DemistoException(f"{INTEGRATION_NAME} - Could not create new group, Missing value or ID.") -def update_network_groups_objects_command(client: Client, args: Dict) -> CommandResults: +def update_network_groups_objects_command(client: Client, args: dict) -> CommandResults: """ Updates a group of network objects. @@ -2610,48 +2471,42 @@ def update_network_groups_objects_command(client: Client, args: Dict) -> Command Returns: CommandResults: Information about the updated network group. """ - group_id: str = args.get('id') # type:ignore - name: str = args.get('name') # type:ignore - ids = args.get('network_objects_id_list', '') - values = args.get('network_address_list', '') - description = args.get('description', '') - overridable = args.get('overridable', '') - update_strategy = args.get('update_strategy', 'OVERRIDE') + group_id: str = args.get("id") # type:ignore + name: str = args.get("name") # type:ignore + ids = args.get("network_objects_id_list", "") + values = args.get("network_address_list", "") + description = args.get("description", "") + overridable = args.get("overridable", "") + update_strategy = args.get("update_strategy", "OVERRIDE") - is_merge = update_strategy == 'MERGE' + is_merge = update_strategy == "MERGE" if ids or values: if is_merge or not name: - raw_response = client.get_network_groups_objects( - limit=0, - offset=0, - object_id=group_id - ) + raw_response = client.get_network_groups_objects(limit=0, offset=0, object_id=group_id) - name = name or raw_response['name'] + name = name or raw_response["name"] if is_merge: ids = append_items_to_value( raw_response=raw_response, value=ids, - items_key='objects', - inner_key='id', + items_key="objects", + inner_key="id", ) values = append_items_to_value( raw_response=raw_response, value=values, - items_key='literals', - inner_key='value', + items_key="literals", + inner_key="value", ) raw_response = client.update_network_groups_objects(name, ids, values, group_id, description, overridable) - title = f'{INTEGRATION_NAME} - network group has been updated.' + title = f"{INTEGRATION_NAME} - network group has been updated." context_entry = raw_response_to_context_network_groups(raw_response) - context = { - f'{INTEGRATION_CONTEXT_NAME}.NetworkGroups(val.ID && val.ID === obj.ID)': context_entry - } + context = {f"{INTEGRATION_CONTEXT_NAME}.NetworkGroups(val.ID && val.ID === obj.ID)": context_entry} - presented_output = ['ID', 'Name', 'Overridable', 'Description', 'Addresses', 'Objects'] + presented_output = ["ID", "Name", "Overridable", "Description", "Addresses", "Objects"] entry_white_list_count = switch_list_to_list_counter(context_entry) human_readable = tableToMarkdown(title, entry_white_list_count, headers=presented_output) @@ -2662,10 +2517,10 @@ def update_network_groups_objects_command(client: Client, args: Dict) -> Command ) else: - raise DemistoException(f'{INTEGRATION_NAME} - Could not update the group, Missing value or ID.') + raise DemistoException(f"{INTEGRATION_NAME} - Could not update the group, Missing value or ID.") -def update_url_groups_objects_command(client: Client, args: Dict) -> CommandResults: +def update_url_groups_objects_command(client: Client, args: dict) -> CommandResults: """ Updates the ID of a group of url objects. @@ -2676,48 +2531,42 @@ def update_url_groups_objects_command(client: Client, args: Dict) -> CommandResu Returns: CommandResults: Information about the updated url group. """ - group_id: str = args.get('id') # type:ignore - name: str = args.get('name') # type:ignore - ids = args.get('url_objects_id_list', '') - values = args.get('url_list', '') - description = args.get('description', '') - overridable = args.get('overridable', '') - update_strategy = args.get('update_strategy', 'OVERRIDE') + group_id: str = args.get("id") # type:ignore + name: str = args.get("name") # type:ignore + ids = args.get("url_objects_id_list", "") + values = args.get("url_list", "") + description = args.get("description", "") + overridable = args.get("overridable", "") + update_strategy = args.get("update_strategy", "OVERRIDE") - is_merge = update_strategy == 'MERGE' + is_merge = update_strategy == "MERGE" if ids or values: if is_merge or not name: - raw_response = client.get_url_groups_objects( - limit=0, - offset=0, - object_id=group_id - ) + raw_response = client.get_url_groups_objects(limit=0, offset=0, object_id=group_id) - name = name or raw_response['name'] + name = name or raw_response["name"] if is_merge: ids = append_items_to_value( raw_response=raw_response, value=ids, - items_key='objects', - inner_key='id', + items_key="objects", + inner_key="id", ) values = append_items_to_value( raw_response=raw_response, value=values, - items_key='literals', - inner_key='url', + items_key="literals", + inner_key="url", ) raw_response = client.update_url_groups_objects(name, ids, values, group_id, description, overridable) - title = f'{INTEGRATION_NAME} - url group has been updated.' + title = f"{INTEGRATION_NAME} - url group has been updated." context_entry = raw_response_to_context_url_groups(raw_response) - context = { - f'{INTEGRATION_CONTEXT_NAME}.UrlGroups(val.ID && val.ID === obj.ID)': context_entry - } + context = {f"{INTEGRATION_CONTEXT_NAME}.UrlGroups(val.ID && val.ID === obj.ID)": context_entry} - presented_output = ['ID', 'Name', 'Overridable', 'Description', 'Addresses', 'Objects'] + presented_output = ["ID", "Name", "Overridable", "Description", "Addresses", "Objects"] entry_white_list_count = switch_list_to_list_counter(context_entry) human_readable = tableToMarkdown(title, entry_white_list_count, headers=presented_output) @@ -2728,10 +2577,10 @@ def update_url_groups_objects_command(client: Client, args: Dict) -> CommandResu ) else: - raise DemistoException(f'{INTEGRATION_NAME} - Could not update the group, Missing value or ID.') + raise DemistoException(f"{INTEGRATION_NAME} - Could not update the group, Missing value or ID.") -def delete_network_groups_objects_command(client: Client, args: Dict) -> CommandResults: +def delete_network_groups_objects_command(client: Client, args: dict) -> CommandResults: """ Deletes a group of network objects. @@ -2742,14 +2591,12 @@ def delete_network_groups_objects_command(client: Client, args: Dict) -> Command Returns: CommandResults: Information about the deleted network group. """ - object_id = args['id'] + object_id = args["id"] raw_response = client.delete_network_groups_objects(object_id) - title = f'{INTEGRATION_NAME} - network group - {object_id} - has been delete.' + title = f"{INTEGRATION_NAME} - network group - {object_id} - has been delete." context_entry = raw_response_to_context_network_groups(raw_response) - context = { - f'{INTEGRATION_CONTEXT_NAME}.NetworkGroups(val.ID && val.ID === obj.ID)': context_entry - } - presented_output = ['ID', 'Name', 'Overridable', 'Description', 'Addresses', 'Objects'] + context = {f"{INTEGRATION_CONTEXT_NAME}.NetworkGroups(val.ID && val.ID === obj.ID)": context_entry} + presented_output = ["ID", "Name", "Overridable", "Description", "Addresses", "Objects"] entry_white_list_count = switch_list_to_list_counter(context_entry) human_readable = tableToMarkdown(title, entry_white_list_count, headers=presented_output) @@ -2760,7 +2607,7 @@ def delete_network_groups_objects_command(client: Client, args: Dict) -> Command ) -def get_access_policy_command(client: Client, args: Dict) -> CommandResults: +def get_access_policy_command(client: Client, args: dict) -> CommandResults: """ Retrieves the access control policy associated with the specified ID. If no access policy ID is passed, all access control policies are returned. @@ -2772,21 +2619,19 @@ def get_access_policy_command(client: Client, args: Dict) -> CommandResults: Returns: CommandResults: Information about access policies. """ - policy_id = args.get('id', '') - limit = args.get('limit', '50') - offset = args.get('offset', '0') + policy_id = args.get("id", "") + limit = args.get("limit", "50") + offset = args.get("offset", "0") raw_response = client.get_access_policy(limit, offset, policy_id) - items: Union[List, Dict] = raw_response.get('items') # type:ignore - if items or 'id' in raw_response: - title = f'{INTEGRATION_NAME} - List access policy:' - if 'id' in raw_response: - title = f'{INTEGRATION_NAME} - get access policy' + items: list | dict = raw_response.get("items") # type:ignore + if items or "id" in raw_response: + title = f"{INTEGRATION_NAME} - List access policy:" + if "id" in raw_response: + title = f"{INTEGRATION_NAME} - get access policy" items = raw_response context_entry = raw_response_to_context_access_policy(items) - context = { - f'{INTEGRATION_CONTEXT_NAME}.Policy(val.ID && val.ID === obj.ID)': context_entry - } - presented_output = ['ID', 'Name', 'DefaultActionID'] + context = {f"{INTEGRATION_CONTEXT_NAME}.Policy(val.ID && val.ID === obj.ID)": context_entry} + presented_output = ["ID", "Name", "DefaultActionID"] human_readable = tableToMarkdown(title, context_entry, headers=presented_output) return CommandResults( @@ -2796,12 +2641,10 @@ def get_access_policy_command(client: Client, args: Dict) -> CommandResults: ) else: - return CommandResults( - readable_output=f'{INTEGRATION_NAME} - Could not find any access policy.' - ) + return CommandResults(readable_output=f"{INTEGRATION_NAME} - Could not find any access policy.") -def create_access_policy_command(client: Client, args: Dict) -> CommandResults: +def create_access_policy_command(client: Client, args: dict) -> CommandResults: """ Creates an access control policy. @@ -2812,15 +2655,13 @@ def create_access_policy_command(client: Client, args: Dict) -> CommandResults: Returns: CommandResults: Information about the created access policy. """ - name: str = args.get('name') # type:ignore - action: str = args.get('action') # type:ignore + name: str = args.get("name") # type:ignore + action: str = args.get("action") # type:ignore raw_response = client.create_access_policy(name, action) - title = f'{INTEGRATION_NAME} - access policy has been created.' + title = f"{INTEGRATION_NAME} - access policy has been created." context_entry = raw_response_to_context_access_policy(raw_response) - context = { - f'{INTEGRATION_CONTEXT_NAME}.Policy(val.ID && val.ID === obj.ID)': context_entry - } - presented_output = ['ID', 'Name', 'DefaultActionID'] + context = {f"{INTEGRATION_CONTEXT_NAME}.Policy(val.ID && val.ID === obj.ID)": context_entry} + presented_output = ["ID", "Name", "DefaultActionID"] human_readable = tableToMarkdown(title, context_entry, headers=presented_output) return CommandResults( @@ -2830,7 +2671,7 @@ def create_access_policy_command(client: Client, args: Dict) -> CommandResults: ) -def update_access_policy_command(client: Client, args: Dict) -> CommandResults: +def update_access_policy_command(client: Client, args: dict) -> CommandResults: """ Updates the specified access control policy. @@ -2841,18 +2682,16 @@ def update_access_policy_command(client: Client, args: Dict) -> CommandResults: Returns: CommandResults: Information about the updated access policy. """ - name: str = args.get('name') # type:ignore - policy_id: str = args.get('id') # type:ignore - action: str = args.get('action') # type:ignore - action_id: str = args.get('default_action_id') # type:ignore + name: str = args.get("name") # type:ignore + policy_id: str = args.get("id") # type:ignore + action: str = args.get("action") # type:ignore + action_id: str = args.get("default_action_id") # type:ignore raw_response = client.update_access_policy(name, policy_id, action, action_id) - title = f'{INTEGRATION_NAME} - access policy has been updated.' + title = f"{INTEGRATION_NAME} - access policy has been updated." context_entry = raw_response_to_context_access_policy(raw_response) - context = { - f'{INTEGRATION_CONTEXT_NAME}.Policy(val.ID && val.ID === obj.ID)': context_entry - } - presented_output = ['ID', 'Name', 'DefaultActionID'] + context = {f"{INTEGRATION_CONTEXT_NAME}.Policy(val.ID && val.ID === obj.ID)": context_entry} + presented_output = ["ID", "Name", "DefaultActionID"] human_readable = tableToMarkdown(title, context_entry, headers=presented_output) return CommandResults( @@ -2862,7 +2701,7 @@ def update_access_policy_command(client: Client, args: Dict) -> CommandResults: ) -def delete_access_policy_command(client: Client, args: Dict) -> CommandResults: +def delete_access_policy_command(client: Client, args: dict) -> CommandResults: """ Deletes the specified access control policy. @@ -2873,14 +2712,12 @@ def delete_access_policy_command(client: Client, args: Dict) -> CommandResults: Returns: CommandResults: Information about the deleted access policy. """ - policy_id: str = args.get('id') # type:ignore + policy_id: str = args.get("id") # type:ignore raw_response = client.delete_access_policy(policy_id) - title = f'{INTEGRATION_NAME} - access policy deleted.' + title = f"{INTEGRATION_NAME} - access policy deleted." context_entry = raw_response_to_context_access_policy(raw_response) - context = { - f'{INTEGRATION_CONTEXT_NAME}.Policy(val.ID && val.ID === obj.ID)': context_entry - } - presented_output = ['ID', 'Name', 'DefaultActionID'] + context = {f"{INTEGRATION_CONTEXT_NAME}.Policy(val.ID && val.ID === obj.ID)": context_entry} + presented_output = ["ID", "Name", "DefaultActionID"] human_readable = tableToMarkdown(title, context_entry, headers=presented_output) return CommandResults( @@ -2890,7 +2727,7 @@ def delete_access_policy_command(client: Client, args: Dict) -> CommandResults: ) -def list_security_group_tags_command(client: Client, args: Dict) -> CommandResults: +def list_security_group_tags_command(client: Client, args: dict) -> CommandResults: """ Retrieves a list of all custom security group tag objects. @@ -2901,18 +2738,16 @@ def list_security_group_tags_command(client: Client, args: Dict) -> CommandResul Returns: CommandResults: Information about security tags. """ - limit = args.get('limit', 50) - offset = args.get('offset', 0) - raw_response = client.get_list(limit, offset, 'securitygrouptags') - items = raw_response.get('items') + limit = args.get("limit", 50) + offset = args.get("offset", 0) + raw_response = client.get_list(limit, offset, "securitygrouptags") + items = raw_response.get("items") if items: - title = f'{INTEGRATION_NAME} - List security group tags:' - list_to_output = ['id', 'name', 'tag'] + title = f"{INTEGRATION_NAME} - List security group tags:" + list_to_output = ["id", "name", "tag"] context_entry = raw_response_to_context_list(list_to_output, items) - context = { - f'{INTEGRATION_CONTEXT_NAME}.SecurityGroupTags(val.ID && val.ID === obj.ID)': context_entry - } - presented_output = ['ID', 'Name', 'Tag'] + context = {f"{INTEGRATION_CONTEXT_NAME}.SecurityGroupTags(val.ID && val.ID === obj.ID)": context_entry} + presented_output = ["ID", "Name", "Tag"] human_readable = tableToMarkdown(title, context_entry, headers=presented_output) return CommandResults( @@ -2922,12 +2757,10 @@ def list_security_group_tags_command(client: Client, args: Dict) -> CommandResul ) else: - return CommandResults( - readable_output=f'{INTEGRATION_NAME} - Could not find any security group tags.' - ) + return CommandResults(readable_output=f"{INTEGRATION_NAME} - Could not find any security group tags.") -def list_ise_security_group_tags_command(client: Client, args: Dict) -> CommandResults: +def list_ise_security_group_tags_command(client: Client, args: dict) -> CommandResults: """ Retrieves a list of all ISE security group tag objects. @@ -2938,18 +2771,16 @@ def list_ise_security_group_tags_command(client: Client, args: Dict) -> CommandR Returns: CommandResults: Information about security tags. """ - limit = args.get('limit', 50) - offset = args.get('offset', 0) - raw_response = client.get_list(limit, offset, 'isesecuritygrouptags') - items = raw_response.get('items') + limit = args.get("limit", 50) + offset = args.get("offset", 0) + raw_response = client.get_list(limit, offset, "isesecuritygrouptags") + items = raw_response.get("items") if items: - title = f'{INTEGRATION_NAME} - List ise security group tags:' - list_to_output = ['id', 'name', 'tag'] + title = f"{INTEGRATION_NAME} - List ise security group tags:" + list_to_output = ["id", "name", "tag"] context_entry = raw_response_to_context_list(list_to_output, items) - context = { - f'{INTEGRATION_CONTEXT_NAME}.IseSecurityGroupTags(val.ID && val.ID === obj.ID)': context_entry - } - presented_output = ['ID', 'Name', 'Tag'] + context = {f"{INTEGRATION_CONTEXT_NAME}.IseSecurityGroupTags(val.ID && val.ID === obj.ID)": context_entry} + presented_output = ["ID", "Name", "Tag"] human_readable = tableToMarkdown(title, context_entry, headers=presented_output) return CommandResults( @@ -2959,12 +2790,10 @@ def list_ise_security_group_tags_command(client: Client, args: Dict) -> CommandR ) else: - return CommandResults( - readable_output=f'{INTEGRATION_NAME} - Could not find any ise security group tags.' - ) + return CommandResults(readable_output=f"{INTEGRATION_NAME} - Could not find any ise security group tags.") -def list_vlan_tags_command(client: Client, args: Dict) -> CommandResults: +def list_vlan_tags_command(client: Client, args: dict) -> CommandResults: """ Retrieves a list of all vlan tag objects. @@ -2975,26 +2804,25 @@ def list_vlan_tags_command(client: Client, args: Dict) -> CommandResults: Returns: CommandResults: Information about vlan tags. """ - limit = args.get('limit', 50) - offset = args.get('offset', 0) - raw_response = client.get_list(limit, offset, 'vlantags') - items = raw_response.get('items') + limit = args.get("limit", 50) + offset = args.get("offset", 0) + raw_response = client.get_list(limit, offset, "vlantags") + items = raw_response.get("items") if items: - title = f'{INTEGRATION_NAME} - List vlan tags:' + title = f"{INTEGRATION_NAME} - List vlan tags:" context_entry = [ { - 'Name': item.get('name'), - 'ID': item.get('id'), - 'Overridable': item.get('overridable'), - 'Description': item.get('description'), - 'StartTag': item.get('data', {}).get('startTag'), - 'EndTag': item.get('data', {}).get('endTag') - } for item in items + "Name": item.get("name"), + "ID": item.get("id"), + "Overridable": item.get("overridable"), + "Description": item.get("description"), + "StartTag": item.get("data", {}).get("startTag"), + "EndTag": item.get("data", {}).get("endTag"), + } + for item in items ] - context = { - f'{INTEGRATION_CONTEXT_NAME}.VlanTags(val.ID && val.ID === obj.ID)': context_entry - } - presented_output = ['ID', 'Name', 'Overridable', 'Description', 'StartTag', 'EndTag'] + context = {f"{INTEGRATION_CONTEXT_NAME}.VlanTags(val.ID && val.ID === obj.ID)": context_entry} + presented_output = ["ID", "Name", "Overridable", "Description", "StartTag", "EndTag"] human_readable = tableToMarkdown(title, context_entry, headers=presented_output) return CommandResults( @@ -3004,12 +2832,10 @@ def list_vlan_tags_command(client: Client, args: Dict) -> CommandResults: ) else: - return CommandResults( - readable_output=f'{INTEGRATION_NAME} - Could not find any vlan tags.' - ) + return CommandResults(readable_output=f"{INTEGRATION_NAME} - Could not find any vlan tags.") -def list_vlan_tags_group_command(client: Client, args: Dict) -> CommandResults: +def list_vlan_tags_group_command(client: Client, args: dict) -> CommandResults: """ Retrieves a list of all vlan group tag objects. @@ -3020,35 +2846,35 @@ def list_vlan_tags_group_command(client: Client, args: Dict) -> CommandResults: Returns: CommandResults: Information about vlan tag groups. """ - limit = args.get('limit', 50) - offset = args.get('offset', 0) - raw_response = client.get_list(limit, offset, 'vlangrouptags') - items = raw_response.get('items') + limit = args.get("limit", 50) + offset = args.get("offset", 0) + raw_response = client.get_list(limit, offset, "vlangrouptags") + items = raw_response.get("items") if items: - title = f'{INTEGRATION_NAME} - List of vlan tags groups objects:' + title = f"{INTEGRATION_NAME} - List of vlan tags groups objects:" context_entry = [ { - 'Name': item.get('name'), - 'ID': item.get('id'), - 'Overridable': item.get('overridable'), - 'Description': item.get('description'), - 'Objects': [ + "Name": item.get("name"), + "ID": item.get("id"), + "Overridable": item.get("overridable"), + "Description": item.get("description"), + "Objects": [ { - 'Name': obj.get('name'), - 'ID': obj.get('id'), - 'Overridable': obj.get('overridable'), - 'Description': obj.get('description'), - 'StartTag': obj.get('data', {}).get('startTag'), - 'EndTag': obj.get('data', {}).get('endTag') - } for obj in item.get('object', []) - ] - } for item in items + "Name": obj.get("name"), + "ID": obj.get("id"), + "Overridable": obj.get("overridable"), + "Description": obj.get("description"), + "StartTag": obj.get("data", {}).get("startTag"), + "EndTag": obj.get("data", {}).get("endTag"), + } + for obj in item.get("object", []) + ], + } + for item in items ] - context = { - f'{INTEGRATION_CONTEXT_NAME}.VlanTagsGroup(val.ID && val.ID === obj.ID)': context_entry - } + context = {f"{INTEGRATION_CONTEXT_NAME}.VlanTagsGroup(val.ID && val.ID === obj.ID)": context_entry} entry_white_list_count = switch_list_to_list_counter(context_entry) - presented_output = ['ID', 'Name', 'Overridable', 'Description', 'Objects'] + presented_output = ["ID", "Name", "Overridable", "Description", "Objects"] human_readable = tableToMarkdown(title, entry_white_list_count, headers=presented_output) return CommandResults( @@ -3058,12 +2884,10 @@ def list_vlan_tags_group_command(client: Client, args: Dict) -> CommandResults: ) else: - return CommandResults( - readable_output=f'{INTEGRATION_NAME} - Could not find any vlan tags group.' - ) + return CommandResults(readable_output=f"{INTEGRATION_NAME} - Could not find any vlan tags group.") -def list_applications_command(client: Client, args: Dict) -> CommandResults: +def list_applications_command(client: Client, args: dict) -> CommandResults: """ Retrieves a list of all application objects. @@ -3074,37 +2898,29 @@ def list_applications_command(client: Client, args: Dict) -> CommandResults: Returns: CommandResults: Information about applications. """ - limit = args.get('limit', 50) - offset = args.get('offset', 0) - raw_response = client.get_list(limit, offset, 'applications') - items = raw_response.get('items') + limit = args.get("limit", 50) + offset = args.get("offset", 0) + raw_response = client.get_list(limit, offset, "applications") + items = raw_response.get("items") if items: context_entry = [ { - 'Name': item.get('name'), - 'ID': item.get('id'), - 'Risk': item.get('risk', {}).get('name', ''), - 'AppProductivity': item.get('appProductivity', {}).get('name', ''), - 'ApplicationTypes': [ - { - 'Name': obj.get('name') - } for obj in item.get('applicationTypes', []) + "Name": item.get("name"), + "ID": item.get("id"), + "Risk": item.get("risk", {}).get("name", ""), + "AppProductivity": item.get("appProductivity", {}).get("name", ""), + "ApplicationTypes": [{"Name": obj.get("name")} for obj in item.get("applicationTypes", [])], + "AppCategories": [ + {"Name": obj.get("name"), "ID": obj.get("id"), "Count": obj.get("metadata", {}).get("count", "")} + for obj in item.get("appCategories", []) ], - 'AppCategories': [ - { - 'Name': obj.get('name'), - 'ID': obj.get('id'), - 'Count': obj.get('metadata', {}).get('count', '') - } for obj in item.get('appCategories', []) - ] - } for item in items + } + for item in items ] - title = f'{INTEGRATION_NAME} - List of applications objects:' - context = { - f'{INTEGRATION_CONTEXT_NAME}.Applications(val.ID && val.ID === obj.ID)': context_entry - } + title = f"{INTEGRATION_NAME} - List of applications objects:" + context = {f"{INTEGRATION_CONTEXT_NAME}.Applications(val.ID && val.ID === obj.ID)": context_entry} entry_white_list_count = switch_list_to_list_counter(context_entry) - presented_output = ['ID', 'Name', 'Risk', 'AppProductivity', 'ApplicationTypes', 'AppCategories'] + presented_output = ["ID", "Name", "Risk", "AppProductivity", "ApplicationTypes", "AppCategories"] human_readable = tableToMarkdown(title, entry_white_list_count, headers=presented_output) return CommandResults( @@ -3114,12 +2930,10 @@ def list_applications_command(client: Client, args: Dict) -> CommandResults: ) else: - return CommandResults( - readable_output=f'{INTEGRATION_NAME} - Could not find any applications.' - ) + return CommandResults(readable_output=f"{INTEGRATION_NAME} - Could not find any applications.") -def get_access_rules_command(client: Client, args: Dict) -> CommandResults: +def get_access_rules_command(client: Client, args: dict) -> CommandResults: """ Retrieves the access control rule associated with the specified policy ID and rule ID. If no rule ID is specified, retrieves a list of all access rules associated with the specified policy ID. @@ -3131,30 +2945,43 @@ def get_access_rules_command(client: Client, args: Dict) -> CommandResults: Returns: CommandResults: Information about access rules. """ - limit = args.get('limit', 50) - offset = args.get('offset', 0) - policy_id: str = args.get('policy_id') # type:ignore - rule_id = args.get('rule_id', '') + limit = args.get("limit", 50) + offset = args.get("offset", 0) + policy_id: str = args.get("policy_id") # type:ignore + rule_id = args.get("rule_id", "") raw_response = client.get_access_rules(limit, offset, policy_id, rule_id) - items = raw_response.get('items') + items = raw_response.get("items") if items: - title = f'{INTEGRATION_NAME} - List of access rules:' - elif 'id' in raw_response: - title = f'{INTEGRATION_NAME} - access rule:' + title = f"{INTEGRATION_NAME} - List of access rules:" + elif "id" in raw_response: + title = f"{INTEGRATION_NAME} - access rule:" items = raw_response else: - return CommandResults( - readable_output=f'{INTEGRATION_NAME} - Could not find any access rule.' - ) + return CommandResults(readable_output=f"{INTEGRATION_NAME} - Could not find any access rule.") context_entry = raw_response_to_context_rules(items) entry_white_list_count = switch_list_to_list_counter(context_entry) - context = { - f'{INTEGRATION_CONTEXT_NAME}.Rule(val.ID && val.ID === obj.ID)': context_entry - } - presented_output = ['ID', 'Name', 'Action', 'Enabled', 'SendEventsToFMC', 'RuleIndex', 'Section', 'Category', - 'Urls', 'VlanTags', 'SourceZones', 'Applications', 'DestinationZones', 'SourceNetworks', - 'DestinationNetworks', 'SourcePorts', 'DestinationPorts', 'SourceSecurityGroupTags'] + context = {f"{INTEGRATION_CONTEXT_NAME}.Rule(val.ID && val.ID === obj.ID)": context_entry} + presented_output = [ + "ID", + "Name", + "Action", + "Enabled", + "SendEventsToFMC", + "RuleIndex", + "Section", + "Category", + "Urls", + "VlanTags", + "SourceZones", + "Applications", + "DestinationZones", + "SourceNetworks", + "DestinationNetworks", + "SourcePorts", + "DestinationPorts", + "SourceSecurityGroupTags", + ] human_readable = tableToMarkdown(title, entry_white_list_count, headers=presented_output) return CommandResults( @@ -3164,7 +2991,7 @@ def get_access_rules_command(client: Client, args: Dict) -> CommandResults: ) -def create_access_rules_command(client: Client, args: Dict) -> CommandResults: +def create_access_rules_command(client: Client, args: dict) -> CommandResults: """ Creates an access control rule. @@ -3175,50 +3002,67 @@ def create_access_rules_command(client: Client, args: Dict) -> CommandResults: Returns: CommandResults: Information about the created access rules. """ - source_zone_object_ids = args.get('source_zone_object_ids', '') - destination_zone_object_ids = args.get('destination_zone_object_ids', '') - vlan_tag_object_ids = args.get('vlan_tag_object_ids', '') - source_network_object_ids = args.get('source_network_object_ids', '') - source_network_addresses = args.get('source_network_addresses', '') - destination_network_object_ids = args.get('destination_network_object_ids', '') - destination_network_addresses = args.get('destination_network_addresses', '') - source_port_object_ids = args.get('source_port_object_ids', '') - destination_port_object_ids = args.get('destination_port_object_ids', '') - source_security_group_tag_object_ids = args.get('source_security_group_tag_object_ids', '') - application_object_ids = args.get('application_object_ids', '') - url_object_ids = args.get('url_object_ids', '') - url_addresses = args.get('url_addresses', '') - enabled = args.get('enabled', '') - name = args.get('rule_name', '') - policy_id = args.get('policy_id', '') - action = args.get('action', '') - - raw_response = client.create_access_rules(source_zone_object_ids, - destination_zone_object_ids, - vlan_tag_object_ids, - source_network_object_ids, - source_network_addresses, - destination_network_object_ids, - destination_network_addresses, - source_port_object_ids, - destination_port_object_ids, - source_security_group_tag_object_ids, - application_object_ids, - url_object_ids, - url_addresses, - enabled, - name, - policy_id, - action) - title = f'{INTEGRATION_NAME} - the new access rule:' + source_zone_object_ids = args.get("source_zone_object_ids", "") + destination_zone_object_ids = args.get("destination_zone_object_ids", "") + vlan_tag_object_ids = args.get("vlan_tag_object_ids", "") + source_network_object_ids = args.get("source_network_object_ids", "") + source_network_addresses = args.get("source_network_addresses", "") + destination_network_object_ids = args.get("destination_network_object_ids", "") + destination_network_addresses = args.get("destination_network_addresses", "") + source_port_object_ids = args.get("source_port_object_ids", "") + destination_port_object_ids = args.get("destination_port_object_ids", "") + source_security_group_tag_object_ids = args.get("source_security_group_tag_object_ids", "") + application_object_ids = args.get("application_object_ids", "") + url_object_ids = args.get("url_object_ids", "") + url_addresses = args.get("url_addresses", "") + enabled = args.get("enabled", "") + name = args.get("rule_name", "") + policy_id = args.get("policy_id", "") + action = args.get("action", "") + + raw_response = client.create_access_rules( + source_zone_object_ids, + destination_zone_object_ids, + vlan_tag_object_ids, + source_network_object_ids, + source_network_addresses, + destination_network_object_ids, + destination_network_addresses, + source_port_object_ids, + destination_port_object_ids, + source_security_group_tag_object_ids, + application_object_ids, + url_object_ids, + url_addresses, + enabled, + name, + policy_id, + action, + ) + title = f"{INTEGRATION_NAME} - the new access rule:" context_entry = raw_response_to_context_rules(raw_response) entry_white_list_count = switch_list_to_list_counter(context_entry) - context = { - f'{INTEGRATION_CONTEXT_NAME}.Rule(val.ID && val.ID === obj.ID)': context_entry - } - presented_output = ['ID', 'Name', 'Action', 'Enabled', 'SendEventsToFMC', 'RuleIndex', 'Section', 'Category', - 'Urls', 'VlanTags', 'SourceZones', 'Applications', 'DestinationZones', 'SourceNetworks', - 'DestinationNetworks', 'SourcePorts', 'DestinationPorts', 'SourceSecurityGroupTags'] + context = {f"{INTEGRATION_CONTEXT_NAME}.Rule(val.ID && val.ID === obj.ID)": context_entry} + presented_output = [ + "ID", + "Name", + "Action", + "Enabled", + "SendEventsToFMC", + "RuleIndex", + "Section", + "Category", + "Urls", + "VlanTags", + "SourceZones", + "Applications", + "DestinationZones", + "SourceNetworks", + "DestinationNetworks", + "SourcePorts", + "DestinationPorts", + "SourceSecurityGroupTags", + ] human_readable = tableToMarkdown(title, entry_white_list_count, headers=presented_output) return CommandResults( @@ -3228,7 +3072,7 @@ def create_access_rules_command(client: Client, args: Dict) -> CommandResults: ) -def update_access_rules_command(client: Client, args: Dict) -> CommandResults: +def update_access_rules_command(client: Client, args: dict) -> CommandResults: """ Updates the specified access control rule. @@ -3239,54 +3083,71 @@ def update_access_rules_command(client: Client, args: Dict) -> CommandResults: Returns: CommandResults: Information about the updated access rules. """ - update_strategy: str = args.get('update_strategy') # type:ignore - source_zone_object_ids = args.get('source_zone_object_ids', '') - destination_zone_object_ids = args.get('destination_zone_object_ids', '') - vlan_tag_object_ids = args.get('vlan_tag_object_ids', '') - source_network_object_ids = args.get('source_network_object_ids', '') - source_network_addresses = args.get('source_network_addresses', '') - destination_network_object_ids = args.get('destination_network_object_ids', '') - destination_network_addresses = args.get('destination_network_addresses', '') - source_port_object_ids = args.get('source_port_object_ids', '') - destination_port_object_ids = args.get('destination_port_object_ids', '') - source_security_group_tag_object_ids = args.get('source_security_group_tag_object_ids', '') - application_object_ids = args.get('application_object_ids', '') - url_object_ids = args.get('url_object_ids', '') - url_addresses = args.get('url_addresses', '') - enabled = args.get('enabled', '') - name = args.get('rule_name', '') - policy_id = args.get('policy_id', '') - action = args.get('action', '') - rule_id: str = args.get('rule_id') # type:ignore - - raw_response = client.update_access_rules(update_strategy, - source_zone_object_ids, - destination_zone_object_ids, - vlan_tag_object_ids, - source_network_object_ids, - source_network_addresses, - destination_network_object_ids, - destination_network_addresses, - source_port_object_ids, - destination_port_object_ids, - source_security_group_tag_object_ids, - application_object_ids, - url_object_ids, - url_addresses, - enabled, - name, - policy_id, - action, - rule_id) - title = f'{INTEGRATION_NAME} - access rule:' + update_strategy: str = args.get("update_strategy") # type:ignore + source_zone_object_ids = args.get("source_zone_object_ids", "") + destination_zone_object_ids = args.get("destination_zone_object_ids", "") + vlan_tag_object_ids = args.get("vlan_tag_object_ids", "") + source_network_object_ids = args.get("source_network_object_ids", "") + source_network_addresses = args.get("source_network_addresses", "") + destination_network_object_ids = args.get("destination_network_object_ids", "") + destination_network_addresses = args.get("destination_network_addresses", "") + source_port_object_ids = args.get("source_port_object_ids", "") + destination_port_object_ids = args.get("destination_port_object_ids", "") + source_security_group_tag_object_ids = args.get("source_security_group_tag_object_ids", "") + application_object_ids = args.get("application_object_ids", "") + url_object_ids = args.get("url_object_ids", "") + url_addresses = args.get("url_addresses", "") + enabled = args.get("enabled", "") + name = args.get("rule_name", "") + policy_id = args.get("policy_id", "") + action = args.get("action", "") + rule_id: str = args.get("rule_id") # type:ignore + + raw_response = client.update_access_rules( + update_strategy, + source_zone_object_ids, + destination_zone_object_ids, + vlan_tag_object_ids, + source_network_object_ids, + source_network_addresses, + destination_network_object_ids, + destination_network_addresses, + source_port_object_ids, + destination_port_object_ids, + source_security_group_tag_object_ids, + application_object_ids, + url_object_ids, + url_addresses, + enabled, + name, + policy_id, + action, + rule_id, + ) + title = f"{INTEGRATION_NAME} - access rule:" context_entry = raw_response_to_context_rules(raw_response) entry_white_list_count = switch_list_to_list_counter(context_entry) - context = { - f'{INTEGRATION_CONTEXT_NAME}.Rule(val.ID && val.ID === obj.ID)': context_entry - } - presented_output = ['ID', 'Name', 'Action', 'Enabled', 'SendEventsToFMC', 'RuleIndex', 'Section', 'Category', - 'Urls', 'VlanTags', 'SourceZones', 'Applications', 'DestinationZones', 'SourceNetworks', - 'DestinationNetworks', 'SourcePorts', 'DestinationPorts', 'SourceSecurityGroupTags'] + context = {f"{INTEGRATION_CONTEXT_NAME}.Rule(val.ID && val.ID === obj.ID)": context_entry} + presented_output = [ + "ID", + "Name", + "Action", + "Enabled", + "SendEventsToFMC", + "RuleIndex", + "Section", + "Category", + "Urls", + "VlanTags", + "SourceZones", + "Applications", + "DestinationZones", + "SourceNetworks", + "DestinationNetworks", + "SourcePorts", + "DestinationPorts", + "SourceSecurityGroupTags", + ] human_readable = tableToMarkdown(title, entry_white_list_count, headers=presented_output) return CommandResults( @@ -3296,7 +3157,7 @@ def update_access_rules_command(client: Client, args: Dict) -> CommandResults: ) -def delete_access_rules_command(client: Client, args: Dict) -> CommandResults: +def delete_access_rules_command(client: Client, args: dict) -> CommandResults: """ Deletes the specified access control rule. @@ -3307,18 +3168,33 @@ def delete_access_rules_command(client: Client, args: Dict) -> CommandResults: Returns: CommandResults: Information about the deleted access rules. """ - policy_id = args.get('policy_id') - rule_id = args.get('rule_id') + policy_id = args.get("policy_id") + rule_id = args.get("rule_id") raw_response = client.delete_access_rules(policy_id, rule_id) - title = f'{INTEGRATION_NAME} - deleted access rule:' + title = f"{INTEGRATION_NAME} - deleted access rule:" context_entry = raw_response_to_context_rules(raw_response) entry_white_list_count = switch_list_to_list_counter(context_entry) - context = { - f'{INTEGRATION_CONTEXT_NAME}.Rule(val.ID && val.ID === obj.ID)': context_entry - } - presented_output = ['ID', 'Name', 'Action', 'Enabled', 'SendEventsToFMC', 'RuleIndex', 'Section', 'Category', - 'Urls', 'VlanTags', 'SourceZones', 'Applications', 'DestinationZones', 'SourceNetworks', - 'DestinationNetworks', 'SourcePorts', 'DestinationPorts', 'SourceSecurityGroupTags'] + context = {f"{INTEGRATION_CONTEXT_NAME}.Rule(val.ID && val.ID === obj.ID)": context_entry} + presented_output = [ + "ID", + "Name", + "Action", + "Enabled", + "SendEventsToFMC", + "RuleIndex", + "Section", + "Category", + "Urls", + "VlanTags", + "SourceZones", + "Applications", + "DestinationZones", + "SourceNetworks", + "DestinationNetworks", + "SourcePorts", + "DestinationPorts", + "SourceSecurityGroupTags", + ] human_readable = tableToMarkdown(title, entry_white_list_count, headers=presented_output) return CommandResults( @@ -3328,7 +3204,7 @@ def delete_access_rules_command(client: Client, args: Dict) -> CommandResults: ) -def list_policy_assignments_command(client: Client, args: Dict) -> CommandResults: +def list_policy_assignments_command(client: Client, args: dict) -> CommandResults: """ Retrieves the policy assignment associated with the specified ID. If no ID is specified, retrieves a list of all policy assignments to target devices. @@ -3340,18 +3216,16 @@ def list_policy_assignments_command(client: Client, args: Dict) -> CommandResult Returns: CommandResults: Information about policy assignments. """ - limit = args.get('limit', 50) - offset = args.get('offset', 0) + limit = args.get("limit", 50) + offset = args.get("offset", 0) raw_response = client.list_policy_assignments(limit, offset) - items = raw_response.get('items') + items = raw_response.get("items") if items: - title = f'{INTEGRATION_NAME} - List of policy assignments:' + title = f"{INTEGRATION_NAME} - List of policy assignments:" context_entry = raw_response_to_context_policy_assignment(items) - context = { - f'{INTEGRATION_CONTEXT_NAME}.PolicyAssignments(val.ID && val.ID === obj.ID)': context_entry - } + context = {f"{INTEGRATION_CONTEXT_NAME}.PolicyAssignments(val.ID && val.ID === obj.ID)": context_entry} entry_white_list_count = switch_list_to_list_counter(context_entry) - presented_output = ['ID', 'Name', 'PolicyName', 'PolicyID', 'PolicyDescription', 'Targets'] + presented_output = ["ID", "Name", "PolicyName", "PolicyID", "PolicyDescription", "Targets"] human_readable = tableToMarkdown(title, entry_white_list_count, headers=presented_output) return CommandResults( @@ -3361,12 +3235,10 @@ def list_policy_assignments_command(client: Client, args: Dict) -> CommandResult ) else: - return CommandResults( - readable_output=f'{INTEGRATION_NAME} - Could not find any policy assignments.' - ) + return CommandResults(readable_output=f"{INTEGRATION_NAME} - Could not find any policy assignments.") -def create_policy_assignments_command(client: Client, args: Dict) -> CommandResults: +def create_policy_assignments_command(client: Client, args: dict) -> CommandResults: """ Creates policy assignments to target devices. @@ -3377,17 +3249,15 @@ def create_policy_assignments_command(client: Client, args: Dict) -> CommandResu Returns: CommandResults: Information about the created policy assignments. """ - device_ids: str = args.get('device_ids') # type:ignore - device_group_ids: str = args.get('device_group_ids') # type:ignore - policy_id: str = args.get('policy_id') # type:ignore + device_ids: str = args.get("device_ids") # type:ignore + device_group_ids: str = args.get("device_group_ids") # type:ignore + policy_id: str = args.get("policy_id") # type:ignore raw_response = client.create_policy_assignments(policy_id, device_ids, device_group_ids) - title = f'{INTEGRATION_NAME} - Policy assignments has been done.' + title = f"{INTEGRATION_NAME} - Policy assignments has been done." context_entry = raw_response_to_context_policy_assignment(raw_response) - context = { - f'{INTEGRATION_CONTEXT_NAME}.PolicyAssignments(val.ID && val.ID === obj.ID)': context_entry - } + context = {f"{INTEGRATION_CONTEXT_NAME}.PolicyAssignments(val.ID && val.ID === obj.ID)": context_entry} entry_white_list_count = switch_list_to_list_counter(context_entry) - presented_output = ['ID', 'Name', 'PolicyName', 'PolicyID', 'PolicyDescription', 'Targets'] + presented_output = ["ID", "Name", "PolicyName", "PolicyID", "PolicyDescription", "Targets"] human_readable = tableToMarkdown(title, entry_white_list_count, headers=presented_output) return CommandResults( @@ -3397,7 +3267,7 @@ def create_policy_assignments_command(client: Client, args: Dict) -> CommandResu ) -def update_policy_assignments_command(client: Client, args: Dict) -> CommandResults: +def update_policy_assignments_command(client: Client, args: dict) -> CommandResults: """ Updates the specified policy assignments to target devices. @@ -3408,32 +3278,27 @@ def update_policy_assignments_command(client: Client, args: Dict) -> CommandResu Returns: CommandResults: Information about the updated policy assignments. """ - device_ids: str = args.get('device_ids') # type:ignore - device_group_ids: str = args.get('device_group_ids') # type:ignore - policy_id: str = args.get('policy_id') # type:ignore - update_strategy = args.get('update_strategy', 'OVERRIDE') + device_ids: str = args.get("device_ids") # type:ignore + device_group_ids: str = args.get("device_group_ids") # type:ignore + policy_id: str = args.get("policy_id") # type:ignore + update_strategy = args.get("update_strategy", "OVERRIDE") - if update_strategy == 'MERGE': - raw_response = client.get_policy_assignments( - policy_assignment_id=policy_id - ) + if update_strategy == "MERGE": + raw_response = client.get_policy_assignments(policy_assignment_id=policy_id) - targets = raw_response['targets'] - prev_device_ids = ','.join(target['id'] for target in targets if target['type'] == 'Device') - device_ids = prev_device_ids if not device_ids else prev_device_ids + f',{device_ids}' + targets = raw_response["targets"] + prev_device_ids = ",".join(target["id"] for target in targets if target["type"] == "Device") + device_ids = prev_device_ids if not device_ids else prev_device_ids + f",{device_ids}" - prev_device_group_ids = ','.join(target['id'] for target in targets if target['type'] == 'DeviceGroup') - device_group_ids = prev_device_group_ids if not device_group_ids else \ - prev_device_group_ids + f',{device_group_ids}' + prev_device_group_ids = ",".join(target["id"] for target in targets if target["type"] == "DeviceGroup") + device_group_ids = prev_device_group_ids if not device_group_ids else prev_device_group_ids + f",{device_group_ids}" raw_response = client.update_policy_assignments(policy_id, device_ids, device_group_ids) - title = f'{INTEGRATION_NAME} - policy update has been done.' + title = f"{INTEGRATION_NAME} - policy update has been done." context_entry = raw_response_to_context_policy_assignment(raw_response) - context = { - f'{INTEGRATION_CONTEXT_NAME}.PolicyAssignments(val.ID && val.ID === obj.ID)': context_entry - } + context = {f"{INTEGRATION_CONTEXT_NAME}.PolicyAssignments(val.ID && val.ID === obj.ID)": context_entry} entry_white_list_count = switch_list_to_list_counter(context_entry) - presented_output = ['ID', 'Name', 'PolicyName', 'PolicyID', 'PolicyDescription', 'Targets'] + presented_output = ["ID", "Name", "PolicyName", "PolicyID", "PolicyDescription", "Targets"] human_readable = tableToMarkdown(title, entry_white_list_count, headers=presented_output) return CommandResults( @@ -3443,7 +3308,7 @@ def update_policy_assignments_command(client: Client, args: Dict) -> CommandResu ) -def get_deployable_devices_command(client: Client, args: Dict) -> CommandResults: +def get_deployable_devices_command(client: Client, args: dict) -> CommandResults: """ Retrieves a list of all devices with configuration changes that are ready to deploy. @@ -3454,30 +3319,30 @@ def get_deployable_devices_command(client: Client, args: Dict) -> CommandResults Returns: CommandResults: Information about deployable devices. """ - limit = args.get('limit', 50) - offset = args.get('offset', 0) - container_uuid = args.get('container_uuid', '') + limit = args.get("limit", 50) + offset = args.get("offset", 0) + container_uuid = args.get("container_uuid", "") raw_response = client.get_deployable_devices(limit, offset, container_uuid) - items = raw_response.get('items') + items = raw_response.get("items") if container_uuid: if items: - context_entry = [{ - 'EndTime': item.get('endTime', ''), - 'ID': item.get('id', ''), - 'Name': item.get('name', ''), - 'StartTime': item.get('startTime', ''), - 'Status': item.get('status', ''), - 'Type': item.get('type', '') - } for item in items + context_entry = [ + { + "EndTime": item.get("endTime", ""), + "ID": item.get("id", ""), + "Name": item.get("name", ""), + "StartTime": item.get("startTime", ""), + "Status": item.get("status", ""), + "Type": item.get("type", ""), + } + for item in items ] else: context_entry = [] demisto.debug(f"no {items=}") - title = f'{INTEGRATION_NAME} - List of devices status pending deployment:' - context = { - f'{INTEGRATION_CONTEXT_NAME}.PendingDeployment(val.ID && val.ID === obj.ID)': context_entry - } - presented_output = ['EndTime', 'ID', 'Name', 'StartTime', 'Status', 'Type'] + title = f"{INTEGRATION_NAME} - List of devices status pending deployment:" + context = {f"{INTEGRATION_CONTEXT_NAME}.PendingDeployment(val.ID && val.ID === obj.ID)": context_entry} + presented_output = ["EndTime", "ID", "Name", "StartTime", "Status", "Type"] human_readable = tableToMarkdown(title, context_entry, headers=presented_output) return CommandResults( @@ -3487,20 +3352,20 @@ def get_deployable_devices_command(client: Client, args: Dict) -> CommandResults ) if items: - context_entry = [{ - 'CanBeDeployed': item.get('canBeDeployed', ''), - 'UpToDate': item.get('upToDate', ''), - 'DeviceID': item.get('device', {}).get('id', ''), - 'DeviceName': item.get('device', {}).get('name', ''), - 'DeviceType': item.get('device', {}).get('type', ''), - 'Version': item.get('version', '') - } for item in items + context_entry = [ + { + "CanBeDeployed": item.get("canBeDeployed", ""), + "UpToDate": item.get("upToDate", ""), + "DeviceID": item.get("device", {}).get("id", ""), + "DeviceName": item.get("device", {}).get("name", ""), + "DeviceType": item.get("device", {}).get("type", ""), + "Version": item.get("version", ""), + } + for item in items ] - title = f'{INTEGRATION_NAME} - List of deployable devices:' - context = { - f'{INTEGRATION_CONTEXT_NAME}.DeployableDevices(val.ID && val.ID === obj.ID)': context_entry - } - presented_output = ['CanBeDeployed', 'UpToDate', 'DeviceID', 'DeviceName', 'DeviceType', 'Version'] + title = f"{INTEGRATION_NAME} - List of deployable devices:" + context = {f"{INTEGRATION_CONTEXT_NAME}.DeployableDevices(val.ID && val.ID === obj.ID)": context_entry} + presented_output = ["CanBeDeployed", "UpToDate", "DeviceID", "DeviceName", "DeviceType", "Version"] human_readable = tableToMarkdown(title, context_entry, headers=presented_output) return CommandResults( @@ -3510,12 +3375,10 @@ def get_deployable_devices_command(client: Client, args: Dict) -> CommandResults ) else: - return CommandResults( - readable_output=f'{INTEGRATION_NAME} - Could not find any deployable devices.' - ) + return CommandResults(readable_output=f"{INTEGRATION_NAME} - Could not find any deployable devices.") -def get_device_records_command(client: Client, args: Dict) -> CommandResults: +def get_device_records_command(client: Client, args: dict) -> CommandResults: """ Retrieves list of all device records. @@ -3526,24 +3389,24 @@ def get_device_records_command(client: Client, args: Dict) -> CommandResults: Returns: CommandResults: Information about devices records. """ - limit = args.get('limit', 50) - offset = args.get('offset', 0) + limit = args.get("limit", 50) + offset = args.get("offset", 0) raw_response = client.get_device_records(limit, offset) - items = raw_response.get('items') + items = raw_response.get("items") if items: - context_entry = [{ - 'ID': item.get('id', ''), - 'Name': item.get('name', ''), - 'HostName': item.get('hostName', ''), - 'Type': item.get('type', ''), - 'DeviceGroupID': item.get('deviceGroup', {}).get('id', '') - } for item in items + context_entry = [ + { + "ID": item.get("id", ""), + "Name": item.get("name", ""), + "HostName": item.get("hostName", ""), + "Type": item.get("type", ""), + "DeviceGroupID": item.get("deviceGroup", {}).get("id", ""), + } + for item in items ] - title = f'{INTEGRATION_NAME} - List of device records:' - context = { - f'{INTEGRATION_CONTEXT_NAME}.DeviceRecords(val.ID && val.ID === obj.ID)': context_entry - } - presented_output = ['ID', 'Name', 'HostName', 'Type', 'DeviceGroupID'] + title = f"{INTEGRATION_NAME} - List of device records:" + context = {f"{INTEGRATION_CONTEXT_NAME}.DeviceRecords(val.ID && val.ID === obj.ID)": context_entry} + presented_output = ["ID", "Name", "HostName", "Type", "DeviceGroupID"] human_readable = tableToMarkdown(title, context_entry, headers=presented_output) return CommandResults( @@ -3553,12 +3416,10 @@ def get_device_records_command(client: Client, args: Dict) -> CommandResults: ) else: - return CommandResults( - readable_output=f'{INTEGRATION_NAME} - Could not find any device records.' - ) + return CommandResults(readable_output=f"{INTEGRATION_NAME} - Could not find any device records.") -def deploy_to_devices_command(client: Client, args: Dict) -> CommandResults: +def deploy_to_devices_command(client: Client, args: dict) -> CommandResults: """ Creates a request for deploying configuration changes to devices. @@ -3569,25 +3430,23 @@ def deploy_to_devices_command(client: Client, args: Dict) -> CommandResults: Returns: CommandResults: Information about deployed device. """ - force_deploy = args.get('force_deploy', '') - ignore_warning = args.get('ignore_warning', '') - version = args.get('version', '') - device_list = args.get('device_ids', '') + force_deploy = args.get("force_deploy", "") + ignore_warning = args.get("ignore_warning", "") + version = args.get("version", "") + device_list = args.get("device_ids", "") raw_response = client.deploy_to_devices(force_deploy, ignore_warning, version, device_list) - title = f'{INTEGRATION_NAME} - devices requests to deploy.' + title = f"{INTEGRATION_NAME} - devices requests to deploy." context_entry = { - 'TaskID': raw_response.get('metadata', {}).get('task', {}).get('id', ''), - 'ForceDeploy': raw_response.get('forceDeploy'), - 'IgnoreWarning': raw_response.get('ignoreWarning'), - 'Version': raw_response.get('version'), - 'DeviceList': raw_response.get('deviceList') - } - context = { - f'{INTEGRATION_CONTEXT_NAME}.Deploy(val.ID && val.ID === obj.ID)': context_entry + "TaskID": raw_response.get("metadata", {}).get("task", {}).get("id", ""), + "ForceDeploy": raw_response.get("forceDeploy"), + "IgnoreWarning": raw_response.get("ignoreWarning"), + "Version": raw_response.get("version"), + "DeviceList": raw_response.get("deviceList"), } + context = {f"{INTEGRATION_CONTEXT_NAME}.Deploy(val.ID && val.ID === obj.ID)": context_entry} entry_white_list_count = switch_list_to_list_counter(context_entry) - presented_output = ['TaskID', 'ForceDeploy', 'IgnoreWarning', 'Version', 'DeviceList'] + presented_output = ["TaskID", "ForceDeploy", "IgnoreWarning", "Version", "DeviceList"] human_readable = tableToMarkdown(title, entry_white_list_count, headers=presented_output) return CommandResults( @@ -3597,7 +3456,7 @@ def deploy_to_devices_command(client: Client, args: Dict) -> CommandResults: ) -def get_task_status_command(client: Client, args: Dict) -> CommandResults: +def get_task_status_command(client: Client, args: dict) -> CommandResults: """ Retrieves information about a previously submitted pending job or task with the specified ID. Used for deploying. @@ -3609,17 +3468,13 @@ def get_task_status_command(client: Client, args: Dict) -> CommandResults: Returns: CommandResults: Information about task status. """ - task_id: str = args.get('task_id') # type:ignore + task_id: str = args.get("task_id") # type:ignore raw_response = client.get_task_status(task_id) - if 'status' in raw_response: - context_entry = { - 'Status': raw_response.get('status') - } - title = f'{INTEGRATION_NAME} - {task_id} status:' - context = { - f'{INTEGRATION_CONTEXT_NAME}.TaskStatus(val.ID && val.ID === obj.ID)': context_entry - } - human_readable = tableToMarkdown(title, context_entry, headers=['Status']) + if "status" in raw_response: + context_entry = {"Status": raw_response.get("status")} + title = f"{INTEGRATION_NAME} - {task_id} status:" + context = {f"{INTEGRATION_CONTEXT_NAME}.TaskStatus(val.ID && val.ID === obj.ID)": context_entry} + human_readable = tableToMarkdown(title, context_entry, headers=["Status"]) return CommandResults( readable_output=human_readable, @@ -3628,12 +3483,10 @@ def get_task_status_command(client: Client, args: Dict) -> CommandResults: ) else: - return CommandResults( - readable_output=f'{INTEGRATION_NAME} - Could not find any status.' - ) + return CommandResults(readable_output=f"{INTEGRATION_NAME} - Could not find any status.") -def create_intrusion_policy_command(client: Client, args: Dict[str, Any]) -> CommandResults: +def create_intrusion_policy_command(client: Client, args: dict[str, Any]) -> CommandResults: """ Creates an Intrusion Policy with the specified parameters. @@ -3644,10 +3497,10 @@ def create_intrusion_policy_command(client: Client, args: Dict[str, Any]) -> Com Returns: CommandResults: Information about the created Intrusion Policy """ - name = args['name'] - basepolicy_id = args['basepolicy_id'] - description = args.get('description') - inspection_mode = args.get('inspection_mode') + name = args["name"] + basepolicy_id = args["basepolicy_id"] + description = args.get("description") + inspection_mode = args.get("inspection_mode") raw_response = client.create_intrusion_policy( name=name, @@ -3659,12 +3512,12 @@ def create_intrusion_policy_command(client: Client, args: Dict[str, Any]) -> Com return parse_results( raw_response=raw_response, command_headers_by_keys=INTRUSION_POLICY_HEADERS_BY_KEYS, - command_title=f'Created {INTRUSION_POLICY_TITLE}', + command_title=f"Created {INTRUSION_POLICY_TITLE}", command_context=INTRUSION_POLICY_CONTEXT, ) -def list_intrusion_policy_command(client: Client, args: Dict[str, Any]) -> CommandResults: +def list_intrusion_policy_command(client: Client, args: dict[str, Any]) -> CommandResults: """ Retrieves the intrusion policy associated with the specified ID. If no ID is specified, retrieves list of intrusion policies. @@ -3679,20 +3532,17 @@ def list_intrusion_policy_command(client: Client, args: Dict[str, Any]) -> Comma CommandResults: Information about Intrusion Policies. """ # GET arguments - intrusion_policy_id = args.get('intrusion_policy_id', '') - include_count = argToBoolean(args.get('include_count', 'False')) + intrusion_policy_id = args.get("intrusion_policy_id", "") + include_count = argToBoolean(args.get("include_count", "False")) # LIST arguments - limit = arg_to_number(args.get('limit', 0)) - page = arg_to_number(args.get('page', 0)) - page_size = arg_to_number(args.get('page_size', 0)) - expanded_response = argToBoolean(args.get('expanded_response', 'False')) + limit = arg_to_number(args.get("limit", 0)) + page = arg_to_number(args.get("page", 0)) + page_size = arg_to_number(args.get("page_size", 0)) + expanded_response = argToBoolean(args.get("expanded_response", "False")) raw_responses = None - if check_is_get_request( - get_args=[intrusion_policy_id, include_count], - list_args=[limit, page, page_size, expanded_response] - ): + if check_is_get_request(get_args=[intrusion_policy_id, include_count], list_args=[limit, page, page_size, expanded_response]): raw_response = client.get_intrusion_policy( intrusion_policy_id=intrusion_policy_id, include_count=include_count, @@ -3709,13 +3559,13 @@ def list_intrusion_policy_command(client: Client, args: Dict[str, Any]) -> Comma return parse_results( raw_response=raw_response, command_headers_by_keys=INTRUSION_POLICY_HEADERS_BY_KEYS, - command_title=f'Fetched {INTRUSION_POLICY_TITLE}', + command_title=f"Fetched {INTRUSION_POLICY_TITLE}", command_context=INTRUSION_POLICY_CONTEXT, raw_responses=raw_responses, ) -def update_intrusion_policy_command(client: Client, args: Dict[str, Any]) -> CommandResults: +def update_intrusion_policy_command(client: Client, args: dict[str, Any]) -> CommandResults: """ Modifies the Intrusion Policy associated with the specified ID. @@ -3729,27 +3579,27 @@ def update_intrusion_policy_command(client: Client, args: Dict[str, Any]) -> Com Returns: CommandResults: Information about the created Intrusion Policy """ - intrusion_policy_id = args['intrusion_policy_id'] - name = args.get('name', '') - basepolicy_id = args.get('basepolicy_id', '') - description = args.get('description') - inspection_mode = args.get('inspection_mode') - replicate_inspection_mode = arg_to_optional_bool(args.get('replicate_inspection_mode')) + intrusion_policy_id = args["intrusion_policy_id"] + name = args.get("name", "") + basepolicy_id = args.get("basepolicy_id", "") + description = args.get("description") + inspection_mode = args.get("inspection_mode") + replicate_inspection_mode = arg_to_optional_bool(args.get("replicate_inspection_mode")) update_arguments = (name, basepolicy_id, description, inspection_mode) if not any(update_arguments): - raise ValueError('Please enter one of the update arguments: name, basepolicy_id, description, inspection_mode') + raise ValueError("Please enter one of the update arguments: name, basepolicy_id, description, inspection_mode") if not all(update_arguments): previous_data = client.get_intrusion_policy( intrusion_policy_id=intrusion_policy_id, ) - name = name or previous_data['name'] - basepolicy_id = basepolicy_id or previous_data['basePolicy']['id'] - description = description or previous_data.get('description') - inspection_mode = inspection_mode or previous_data.get('inspectionMode') + name = name or previous_data["name"] + basepolicy_id = basepolicy_id or previous_data["basePolicy"]["id"] + description = description or previous_data.get("description") + inspection_mode = inspection_mode or previous_data.get("inspectionMode") raw_response = client.update_intrusion_policy( intrusion_policy_id=intrusion_policy_id, @@ -3763,12 +3613,12 @@ def update_intrusion_policy_command(client: Client, args: Dict[str, Any]) -> Com return parse_results( raw_response=raw_response, command_headers_by_keys=INTRUSION_POLICY_HEADERS_BY_KEYS, - command_title=f'Updated {INTRUSION_POLICY_TITLE}', + command_title=f"Updated {INTRUSION_POLICY_TITLE}", command_context=INTRUSION_POLICY_CONTEXT, ) -def delete_intrusion_policy_command(client: Client, args: Dict[str, Any]) -> CommandResults: +def delete_intrusion_policy_command(client: Client, args: dict[str, Any]) -> CommandResults: """ Deletes the Intrusion Policy associated with the specified ID. @@ -3779,7 +3629,7 @@ def delete_intrusion_policy_command(client: Client, args: Dict[str, Any]) -> Com Returns: CommandResults: Information about the deleted Intrusion Policy """ - intrusion_policy_id = args['intrusion_policy_id'] + intrusion_policy_id = args["intrusion_policy_id"] try: raw_response = client.delete_intrusion_policy( @@ -3787,17 +3637,15 @@ def delete_intrusion_policy_command(client: Client, args: Dict[str, Any]) -> Com ) except DemistoException as exc: - if 'UUID cannot be null' in str(exc): - return CommandResults( - readable_output=f'The Intrusion Policy ID: "{intrusion_policy_id}" does not exist.' - ) + if "UUID cannot be null" in str(exc): + return CommandResults(readable_output=f'The Intrusion Policy ID: "{intrusion_policy_id}" does not exist.') raise readable_output = get_readable_output( response=raw_response, header_by_keys=INTRUSION_POLICY_HEADERS_BY_KEYS, - title=f'Deleted {INTRUSION_POLICY_TITLE}', + title=f"Deleted {INTRUSION_POLICY_TITLE}", ) return CommandResults( @@ -3806,7 +3654,7 @@ def delete_intrusion_policy_command(client: Client, args: Dict[str, Any]) -> Com ) -def create_intrusion_rule_command(client: Client, args: Dict[str, Any]) -> CommandResults: +def create_intrusion_rule_command(client: Client, args: dict[str, Any]) -> CommandResults: """ Creates or overrides the Snort3 Intrusion rule group with the specified parameters. @@ -3817,8 +3665,8 @@ def create_intrusion_rule_command(client: Client, args: Dict[str, Any]) -> Comma Returns: CommandResults: Information about the created Intrusion Rule. """ - rule_data = args['rule_data'] - rule_group_ids = argToList(args['rule_group_ids']) + rule_data = args["rule_data"] + rule_group_ids = argToList(args["rule_group_ids"]) raw_response = client.create_intrusion_rule( rule_data=rule_data, @@ -3828,12 +3676,12 @@ def create_intrusion_rule_command(client: Client, args: Dict[str, Any]) -> Comma return parse_results( raw_response=raw_response, command_headers_by_keys=INTRUSION_RULE_HEADERS_BY_KEYS, - command_title=f'Created {INTRUSION_RULE_TITLE}', + command_title=f"Created {INTRUSION_RULE_TITLE}", command_context=INTRUSION_RULE_CONTEXT, ) -def list_intrusion_rule_command(client: Client, args: Dict[str, Any]) -> CommandResults: +def list_intrusion_rule_command(client: Client, args: dict[str, Any]) -> CommandResults: """ Retrieves the Snort3 Intrusion rule group. If no ID is specified, retrieves a list of all Snort3 Intrusion rule groups. @@ -3851,20 +3699,19 @@ def list_intrusion_rule_command(client: Client, args: Dict[str, Any]) -> Command CommandResults: Information about Intrusion Rules. """ # GET arguments - intrusion_rule_id = args.get('intrusion_rule_id', '') + intrusion_rule_id = args.get("intrusion_rule_id", "") # LIST arguments - limit = arg_to_number(args.get('limit', 0)) - page = arg_to_number(args.get('page', 0)) - page_size = arg_to_number(args.get('page_size', 0)) - sort = argToList(args.get('sort')) - filter_string = args.get('filter') - expanded_response = argToBoolean(args.get('expanded_response', 'False')) + limit = arg_to_number(args.get("limit", 0)) + page = arg_to_number(args.get("page", 0)) + page_size = arg_to_number(args.get("page_size", 0)) + sort = argToList(args.get("sort")) + filter_string = args.get("filter") + expanded_response = argToBoolean(args.get("expanded_response", "False")) raw_responses = None if check_is_get_request( - get_args=[intrusion_rule_id], - list_args=[sort, filter_string, expanded_response, limit, page, page_size] + get_args=[intrusion_rule_id], list_args=[sort, filter_string, expanded_response, limit, page, page_size] ): raw_response = client.get_intrusion_rule( intrusion_rule_id=intrusion_rule_id, @@ -3883,13 +3730,13 @@ def list_intrusion_rule_command(client: Client, args: Dict[str, Any]) -> Command return parse_results( raw_response=raw_response, command_headers_by_keys=INTRUSION_RULE_HEADERS_BY_KEYS, - command_title=f'Fetched {INTRUSION_RULE_TITLE}', + command_title=f"Fetched {INTRUSION_RULE_TITLE}", command_context=INTRUSION_RULE_CONTEXT, raw_responses=raw_responses, ) -def update_intrusion_rule_command(client: Client, args: Dict[str, Any]) -> CommandResults: +def update_intrusion_rule_command(client: Client, args: dict[str, Any]) -> CommandResults: """ Modifies the Snort3 Intrusion rule group with the specified ID. Must enter at least one of the following if not both: rule_data or rule_group_ids. @@ -3907,31 +3754,29 @@ def update_intrusion_rule_command(client: Client, args: Dict[str, Any]) -> Comma Returns: CommandResults: Information about the updated Intrusion Rule. """ - intrusion_rule_id = args['intrusion_rule_id'] - rule_data = args.get('rule_data', '') - rule_group_ids = argToList(args.get('rule_group_ids')) - update_strategy = args.get('update_strategy', 'OVERRIDE') + intrusion_rule_id = args["intrusion_rule_id"] + rule_data = args.get("rule_data", "") + rule_group_ids = argToList(args.get("rule_group_ids")) + update_strategy = args.get("update_strategy", "OVERRIDE") - is_merge = update_strategy == 'MERGE' + is_merge = update_strategy == "MERGE" if not any((rule_data, rule_group_ids)): - raise ValueError('rule_data, rule_group_ids or both must be populated.') + raise ValueError("rule_data, rule_group_ids or both must be populated.") # Rule groups must be entered when merging. if is_merge and not rule_group_ids: - raise ValueError('rule_group_ids must be populated when merging.') + raise ValueError("rule_group_ids must be populated when merging.") # If on of the main arguments are missing, fill there data through a GET request. if bool(rule_data) != bool(rule_group_ids): - raw_response = client.get_intrusion_rule( - intrusion_rule_id=intrusion_rule_id - ) + raw_response = client.get_intrusion_rule(intrusion_rule_id=intrusion_rule_id) - rule_data = rule_data or raw_response['ruleData'] - rule_group_ids = rule_group_ids or [rule_group['id'] for rule_group in raw_response['ruleGroups']] + rule_data = rule_data or raw_response["ruleData"] + rule_group_ids = rule_group_ids or [rule_group["id"] for rule_group in raw_response["ruleGroups"]] if is_merge: - rule_group_ids += [rule_group['id'] for rule_group in raw_response['ruleGroups']] + rule_group_ids += [rule_group["id"] for rule_group in raw_response["ruleGroups"]] raw_response = client.update_intrusion_rule( intrusion_rule_id=intrusion_rule_id, @@ -3942,12 +3787,12 @@ def update_intrusion_rule_command(client: Client, args: Dict[str, Any]) -> Comma return parse_results( raw_response=raw_response, command_headers_by_keys=INTRUSION_RULE_HEADERS_BY_KEYS, - command_title=f'Updated {INTRUSION_RULE_TITLE}', + command_title=f"Updated {INTRUSION_RULE_TITLE}", command_context=INTRUSION_RULE_CONTEXT, ) -def delete_intrusion_rule_command(client: Client, args: Dict[str, Any]) -> CommandResults: +def delete_intrusion_rule_command(client: Client, args: dict[str, Any]) -> CommandResults: """ Deletes the specified Snort3 rule. @@ -3958,16 +3803,14 @@ def delete_intrusion_rule_command(client: Client, args: Dict[str, Any]) -> Comma Returns: CommandResults: Information about the Deleted Intrusion Rule. """ - intrusion_rule_id = args['intrusion_rule_id'] + intrusion_rule_id = args["intrusion_rule_id"] - raw_response = client.delete_intrusion_rule( - intrusion_rule_id=intrusion_rule_id - ) + raw_response = client.delete_intrusion_rule(intrusion_rule_id=intrusion_rule_id) readable_output = get_readable_output( response=raw_response, header_by_keys=INTRUSION_RULE_HEADERS_BY_KEYS, - title=f'Deleted {INTRUSION_RULE_TITLE}', + title=f"Deleted {INTRUSION_RULE_TITLE}", ) return CommandResults( @@ -3976,7 +3819,7 @@ def delete_intrusion_rule_command(client: Client, args: Dict[str, Any]) -> Comma ) -def upload_intrusion_rule_file_command(client: Client, args: Dict[str, Any]) -> CommandResults: +def upload_intrusion_rule_file_command(client: Client, args: dict[str, Any]) -> CommandResults: """ Imports or validate custom Snort 3 intrusion rules within a file. @@ -3991,25 +3834,25 @@ def upload_intrusion_rule_file_command(client: Client, args: Dict[str, Any]) -> Returns: CommandResults: Information about the intrusion rules format or about the merged/replaced intrusion rules. """ - entry_id = args['entry_id'] + entry_id = args["entry_id"] # Import Arguments - rule_import_mode = args.get('rule_import_mode') - rule_group_ids = argToList(args.get('rule_group_ids')) + rule_import_mode = args.get("rule_import_mode") + rule_group_ids = argToList(args.get("rule_group_ids")) # Validation Argument - validate_only = argToBoolean(args.get('validate_only', 'True')) + validate_only = argToBoolean(args.get("validate_only", "True")) # In case import arguments weren't inserted when validate_only is false. if not validate_only and not all((rule_import_mode, rule_group_ids)): raise ValueError('rule_import_mode and rule_group_ids must be inserted when validate_only is "False".') file_entry = demisto.getFilePath(entry_id) - filename = file_entry['name'] + filename = file_entry["name"] file_type = os.path.splitext(filename)[1] - if file_type not in ('.txt', '.rules'): + if file_type not in (".txt", ".rules"): raise ValueError(f'Supported file formats are ".txt" and ".rules", got {file_type}') - with open(file_entry['path'], 'r') as file_handler: + with open(file_entry["path"]) as file_handler: raw_response = client.upload_intrusion_rule_file( filename=filename, payload_file=file_handler.read(), @@ -4018,12 +3861,12 @@ def upload_intrusion_rule_file_command(client: Client, args: Dict[str, Any]) -> validate_only=validate_only, ) - category = dict_safe_get(raw_response, ['error', 'category']) + category = dict_safe_get(raw_response, ["error", "category"]) - if validate_only and category == 'VALIDATION': + if validate_only and category == "VALIDATION": readable_output = tableToMarkdown( f'Validation for Intrusion Rules within: "{filename}"', - dict_safe_get(raw_response, ['error', 'messages']), + dict_safe_get(raw_response, ["error", "messages"]), headerTransform=pascalToSpace, removeNull=True, ) @@ -4041,7 +3884,7 @@ def upload_intrusion_rule_file_command(client: Client, args: Dict[str, Any]) -> ) -def create_intrusion_rule_group_command(client: Client, args: Dict[str, Any]) -> CommandResults: +def create_intrusion_rule_group_command(client: Client, args: dict[str, Any]) -> CommandResults: """ Creates an Intrusion Rule Group with the specified parameters. @@ -4052,8 +3895,8 @@ def create_intrusion_rule_group_command(client: Client, args: Dict[str, Any]) -> Returns: CommandResults: Information about the created Intrusion Rule Group. """ - name = args['name'] - description = args.get('description') + name = args["name"] + description = args.get("description") raw_response = client.create_intrusion_rule_group( name=name, @@ -4063,12 +3906,12 @@ def create_intrusion_rule_group_command(client: Client, args: Dict[str, Any]) -> return parse_results( raw_response=raw_response, command_headers_by_keys=INTRUSION_RULE_GROUP_HEADERS_BY_KEYS, - command_title=f'Created {INTRUSION_RULE_GROUP_TITLE}', + command_title=f"Created {INTRUSION_RULE_GROUP_TITLE}", command_context=INTRUSION_RULE_GROUP_CONTEXT, ) -def list_intrusion_rule_group_command(client: Client, args: Dict[str, Any]) -> CommandResults: +def list_intrusion_rule_group_command(client: Client, args: dict[str, Any]) -> CommandResults: """ Retrieves the Snort3 Intrusion rule group. If no ID is specified, retrieves a list of all Snort3 Intrusion rule groups. @@ -4083,20 +3926,17 @@ def list_intrusion_rule_group_command(client: Client, args: Dict[str, Any]) -> C CommandResults: Information about Intrusion Rule Groups. """ # GET arguments - rule_group_id = args.get('rule_group_id', '') + rule_group_id = args.get("rule_group_id", "") # LIST arguments - limit = arg_to_number(args.get('limit', 0)) - page = arg_to_number(args.get('page', 0)) - page_size = arg_to_number(args.get('page_size', 0)) - filter_string = args.get('filter') - expanded_response = argToBoolean(args.get('expanded_response', 'False')) + limit = arg_to_number(args.get("limit", 0)) + page = arg_to_number(args.get("page", 0)) + page_size = arg_to_number(args.get("page_size", 0)) + filter_string = args.get("filter") + expanded_response = argToBoolean(args.get("expanded_response", "False")) raw_responses = None - if check_is_get_request( - get_args=[rule_group_id], - list_args=[filter_string, expanded_response, limit, page, page_size] - ): + if check_is_get_request(get_args=[rule_group_id], list_args=[filter_string, expanded_response, limit, page, page_size]): raw_response = client.get_intrusion_rule_group( rule_group_id=rule_group_id, ) @@ -4113,13 +3953,13 @@ def list_intrusion_rule_group_command(client: Client, args: Dict[str, Any]) -> C return parse_results( raw_response=raw_response, command_headers_by_keys=INTRUSION_RULE_GROUP_HEADERS_BY_KEYS, - command_title=f'Fetched {INTRUSION_RULE_GROUP_TITLE}', + command_title=f"Fetched {INTRUSION_RULE_GROUP_TITLE}", command_context=INTRUSION_RULE_GROUP_CONTEXT, raw_responses=raw_responses, ) -def update_intrusion_rule_group_command(client: Client, args: Dict[str, Any]) -> CommandResults: +def update_intrusion_rule_group_command(client: Client, args: dict[str, Any]) -> CommandResults: """ Updates an Intrusion Rule Group with the specified parameters. @@ -4130,14 +3970,14 @@ def update_intrusion_rule_group_command(client: Client, args: Dict[str, Any]) -> Returns: CommandResults: Information about the modified Intrusion Rule Group. """ - rule_group_id = args['rule_group_id'] - name = args['name'] - description = args.get('description') + rule_group_id = args["rule_group_id"] + name = args["name"] + description = args.get("description") if not description: description = client.get_intrusion_rule_group( rule_group_id=rule_group_id, - ).get('description') + ).get("description") raw_response = client.update_intrusion_rule_group( rule_group_id=rule_group_id, @@ -4148,12 +3988,12 @@ def update_intrusion_rule_group_command(client: Client, args: Dict[str, Any]) -> return parse_results( raw_response=raw_response, command_headers_by_keys=INTRUSION_RULE_GROUP_HEADERS_BY_KEYS, - command_title=f'Updated {INTRUSION_RULE_GROUP_TITLE}', + command_title=f"Updated {INTRUSION_RULE_GROUP_TITLE}", command_context=INTRUSION_RULE_GROUP_CONTEXT, ) -def delete_intrusion_rule_group_command(client: Client, args: Dict[str, Any]) -> CommandResults: +def delete_intrusion_rule_group_command(client: Client, args: dict[str, Any]) -> CommandResults: """ Deletes an Intrusion Rule Group with the specified parameters. @@ -4164,8 +4004,8 @@ def delete_intrusion_rule_group_command(client: Client, args: Dict[str, Any]) -> Returns: CommandResults: Information about the deleted Intrusion Rule Group. """ - rule_group_id = args['rule_group_id'] - delete_related_rules = arg_to_optional_bool(args.get('delete_related_rules')) + rule_group_id = args["rule_group_id"] + delete_related_rules = arg_to_optional_bool(args.get("delete_related_rules")) raw_response = client.delete_intrusion_rule_group( rule_group_id=rule_group_id, @@ -4175,7 +4015,7 @@ def delete_intrusion_rule_group_command(client: Client, args: Dict[str, Any]) -> readable_output = get_readable_output( response=raw_response, header_by_keys=INTRUSION_RULE_GROUP_HEADERS_BY_KEYS, - title=f'Deleted {INTRUSION_RULE_GROUP_TITLE}', + title=f"Deleted {INTRUSION_RULE_GROUP_TITLE}", ) return CommandResults( @@ -4184,7 +4024,7 @@ def delete_intrusion_rule_group_command(client: Client, args: Dict[str, Any]) -> ) -def create_network_analysis_policy_command(client: Client, args: Dict[str, Any]) -> CommandResults: +def create_network_analysis_policy_command(client: Client, args: dict[str, Any]) -> CommandResults: """ Creates a network analysis policy. @@ -4195,10 +4035,10 @@ def create_network_analysis_policy_command(client: Client, args: Dict[str, Any]) Returns: CommandResults: Information about the created network analysis policy. """ - name = args['name'] - basepolicy_id = args['basepolicy_id'] - description = args.get('description') - inspection_mode = args.get('inspection_mode') + name = args["name"] + basepolicy_id = args["basepolicy_id"] + description = args.get("description") + inspection_mode = args.get("inspection_mode") raw_response = client.create_network_analysis_policy( name=name, @@ -4210,12 +4050,12 @@ def create_network_analysis_policy_command(client: Client, args: Dict[str, Any]) return parse_results( raw_response=raw_response, command_headers_by_keys=NETWORK_ANALYSIS_POLICY_HEADERS_BY_KEYS, - command_title=f'Created {NETWORK_ANALYSIS_POLICY_TITLE}', + command_title=f"Created {NETWORK_ANALYSIS_POLICY_TITLE}", command_context=NETWORK_ANALYSIS_POLICY_CONTEXT, ) -def list_network_analysis_policy_command(client: Client, args: Dict[str, Any]) -> CommandResults: +def list_network_analysis_policy_command(client: Client, args: dict[str, Any]) -> CommandResults: """ Retrieves the network analysis policy with the specified ID. If no ID is specified, retrieves list of all network analysis policies. @@ -4230,19 +4070,16 @@ def list_network_analysis_policy_command(client: Client, args: Dict[str, Any]) - CommandResults: Information about network analysis policies. """ # GET arguments - network_analysis_policy_id = args.get('network_analysis_policy_id', '') + network_analysis_policy_id = args.get("network_analysis_policy_id", "") # LIST arguments - limit = arg_to_number(args.get('limit', 0)) - page = arg_to_number(args.get('page', 0)) - page_size = arg_to_number(args.get('page_size', 0)) - expanded_response = argToBoolean(args.get('expanded_response', 'False')) + limit = arg_to_number(args.get("limit", 0)) + page = arg_to_number(args.get("page", 0)) + page_size = arg_to_number(args.get("page_size", 0)) + expanded_response = argToBoolean(args.get("expanded_response", "False")) raw_responses = None - if check_is_get_request( - get_args=[network_analysis_policy_id], - list_args=[expanded_response, limit, page, page_size] - ): + if check_is_get_request(get_args=[network_analysis_policy_id], list_args=[expanded_response, limit, page, page_size]): raw_response = client.get_network_analysis_policy( network_analysis_policy_id=network_analysis_policy_id, ) @@ -4258,13 +4095,13 @@ def list_network_analysis_policy_command(client: Client, args: Dict[str, Any]) - return parse_results( raw_response=raw_response, command_headers_by_keys=NETWORK_ANALYSIS_POLICY_HEADERS_BY_KEYS, - command_title=f'Fetched {NETWORK_ANALYSIS_POLICY_TITLE}', + command_title=f"Fetched {NETWORK_ANALYSIS_POLICY_TITLE}", command_context=NETWORK_ANALYSIS_POLICY_CONTEXT, raw_responses=raw_responses, ) -def update_network_analysis_policy_command(client: Client, args: Dict[str, Any]) -> CommandResults: +def update_network_analysis_policy_command(client: Client, args: dict[str, Any]) -> CommandResults: """ Modifies the network analysis policy associated with the specified ID. @@ -4278,27 +4115,27 @@ def update_network_analysis_policy_command(client: Client, args: Dict[str, Any]) Returns: CommandResults: Information about the modified network analysis policy. """ - network_analysis_policy_id = args['network_analysis_policy_id'] - basepolicy_id = args.get('basepolicy_id', '') - name = args.get('name', '') - description = args.get('description') - inspection_mode = args.get('inspection_mode') - replicate_inspection_mode = arg_to_optional_bool(args.get('replicate_inspection_mode')) + network_analysis_policy_id = args["network_analysis_policy_id"] + basepolicy_id = args.get("basepolicy_id", "") + name = args.get("name", "") + description = args.get("description") + inspection_mode = args.get("inspection_mode") + replicate_inspection_mode = arg_to_optional_bool(args.get("replicate_inspection_mode")) update_arguments = (name, basepolicy_id, description, inspection_mode) if not any(update_arguments): - raise ValueError('Please enter one of the update arguments: name, basepolicy_id, description, inspection_mode') + raise ValueError("Please enter one of the update arguments: name, basepolicy_id, description, inspection_mode") if not all(update_arguments): previous_data = client.get_network_analysis_policy( network_analysis_policy_id=network_analysis_policy_id, ) - name = name or previous_data['name'] - basepolicy_id = basepolicy_id or previous_data['basePolicy']['id'] - description = description or previous_data.get('description') - inspection_mode = inspection_mode or previous_data.get('inspectionMode') + name = name or previous_data["name"] + basepolicy_id = basepolicy_id or previous_data["basePolicy"]["id"] + description = description or previous_data.get("description") + inspection_mode = inspection_mode or previous_data.get("inspectionMode") raw_response = client.update_network_analysis_policy( network_analysis_policy_id=network_analysis_policy_id, @@ -4312,12 +4149,12 @@ def update_network_analysis_policy_command(client: Client, args: Dict[str, Any]) return parse_results( raw_response=raw_response, command_headers_by_keys=NETWORK_ANALYSIS_POLICY_HEADERS_BY_KEYS, - command_title=f'Updated {NETWORK_ANALYSIS_POLICY_TITLE}', + command_title=f"Updated {NETWORK_ANALYSIS_POLICY_TITLE}", command_context=NETWORK_ANALYSIS_POLICY_CONTEXT, ) -def delete_network_analysis_policy_command(client: Client, args: Dict[str, Any]) -> CommandResults: +def delete_network_analysis_policy_command(client: Client, args: dict[str, Any]) -> CommandResults: """ Deletes the network analysis policy associated with the specified ID. @@ -4328,7 +4165,7 @@ def delete_network_analysis_policy_command(client: Client, args: Dict[str, Any]) Returns: CommandResults: Information about the deleted network analysis policy. """ - network_analysis_policy_id = args['network_analysis_policy_id'] + network_analysis_policy_id = args["network_analysis_policy_id"] raw_response = client.delete_network_analysis_policy( network_analysis_policy_id=network_analysis_policy_id, @@ -4337,7 +4174,7 @@ def delete_network_analysis_policy_command(client: Client, args: Dict[str, Any]) readable_output = get_readable_output( response=raw_response, header_by_keys=NETWORK_ANALYSIS_POLICY_HEADERS_BY_KEYS, - title=f'Deleted {NETWORK_ANALYSIS_POLICY_TITLE}', + title=f"Deleted {NETWORK_ANALYSIS_POLICY_TITLE}", ) return CommandResults( @@ -4346,78 +4183,78 @@ def delete_network_analysis_policy_command(client: Client, args: Dict[str, Any]) ) -''' COMMANDS MANAGER / SWITCH PANEL ''' # pylint: disable=pointless-string-statement +""" COMMANDS MANAGER / SWITCH PANEL """ # pylint: disable=pointless-string-statement def main(): # pragma: no cover - params: Dict[str, Any] = demisto.params() - args: Dict[str, Any] = demisto.args() + params: dict[str, Any] = demisto.params() + args: dict[str, Any] = demisto.args() command: str = demisto.command() - base_url = params['url'] - username = params['credentials']['identifier'] - password = params['credentials']['password'] - verify_ssl = not params.get('insecure', False) - proxy = params.get('proxy', False) - - commands: Dict[str, Callable] = { - 'ciscofp-list-zones': list_zones_command, - 'ciscofp-list-ports': list_ports_command, - 'ciscofp-list-url-categories': list_url_categories_command, - 'ciscofp-get-network-object': get_network_objects_command, - 'ciscofp-create-network-object': create_network_objects_command, - 'ciscofp-update-network-object': update_network_objects_command, - 'ciscofp-delete-network-object': delete_network_objects_command, - 'ciscofp-get-host-object': get_host_objects_command, - 'ciscofp-create-host-object': create_host_objects_command, - 'ciscofp-update-host-object': update_host_objects_command, - 'ciscofp-delete-host-object': delete_host_objects_command, - 'ciscofp-get-network-groups-object': get_network_groups_objects_command, - 'ciscofp-create-network-groups-objects': create_network_groups_objects_command, - 'ciscofp-update-network-groups-objects': update_network_groups_objects_command, - 'ciscofp-delete-network-groups-objects': delete_network_groups_objects_command, - 'ciscofp-get-url-groups-object': get_url_groups_objects_command, - 'ciscofp-update-url-groups-objects': update_url_groups_objects_command, - 'ciscofp-get-access-policy': get_access_policy_command, - 'ciscofp-create-access-policy': create_access_policy_command, - 'ciscofp-update-access-policy': update_access_policy_command, - 'ciscofp-delete-access-policy': delete_access_policy_command, - 'ciscofp-list-security-group-tags': list_security_group_tags_command, - 'ciscofp-list-ise-security-group-tag': list_ise_security_group_tags_command, - 'ciscofp-list-vlan-tags': list_vlan_tags_command, - 'ciscofp-list-vlan-tags-group': list_vlan_tags_group_command, - 'ciscofp-list-applications': list_applications_command, - 'ciscofp-get-access-rules': get_access_rules_command, - 'ciscofp-create-access-rules': create_access_rules_command, - 'ciscofp-update-access-rules': update_access_rules_command, - 'ciscofp-delete-access-rules': delete_access_rules_command, - 'ciscofp-list-policy-assignments': list_policy_assignments_command, - 'ciscofp-create-policy-assignments': create_policy_assignments_command, - 'ciscofp-update-policy-assignments': update_policy_assignments_command, - 'ciscofp-get-deployable-devices': get_deployable_devices_command, - 'ciscofp-get-device-records': get_device_records_command, - 'ciscofp-deploy-to-devices': deploy_to_devices_command, - 'ciscofp-get-task-status': get_task_status_command, - 'ciscofp-create-intrusion-policy': create_intrusion_policy_command, - 'ciscofp-list-intrusion-policy': list_intrusion_policy_command, - 'ciscofp-update-intrusion-policy': update_intrusion_policy_command, - 'ciscofp-delete-intrusion-policy': delete_intrusion_policy_command, - 'ciscofp-create-intrusion-rule': create_intrusion_rule_command, - 'ciscofp-list-intrusion-rule': list_intrusion_rule_command, - 'ciscofp-update-intrusion-rule': update_intrusion_rule_command, - 'ciscofp-delete-intrusion-rule': delete_intrusion_rule_command, - 'ciscofp-upload-intrusion-rule-file': upload_intrusion_rule_file_command, - 'ciscofp-create-intrusion-rule-group': create_intrusion_rule_group_command, - 'ciscofp-list-intrusion-rule-group': list_intrusion_rule_group_command, - 'ciscofp-update-intrusion-rule-group': update_intrusion_rule_group_command, - 'ciscofp-delete-intrusion-rule-group': delete_intrusion_rule_group_command, - 'ciscofp-create-network-analysis-policy': create_network_analysis_policy_command, - 'ciscofp-list-network-analysis-policy': list_network_analysis_policy_command, - 'ciscofp-update-network-analysis-policy': update_network_analysis_policy_command, - 'ciscofp-delete-network-analysis-policy': delete_network_analysis_policy_command, + base_url = params["url"] + username = params["credentials"]["identifier"] + password = params["credentials"]["password"] + verify_ssl = not params.get("insecure", False) + proxy = params.get("proxy", False) + + commands: dict[str, Callable] = { + "ciscofp-list-zones": list_zones_command, + "ciscofp-list-ports": list_ports_command, + "ciscofp-list-url-categories": list_url_categories_command, + "ciscofp-get-network-object": get_network_objects_command, + "ciscofp-create-network-object": create_network_objects_command, + "ciscofp-update-network-object": update_network_objects_command, + "ciscofp-delete-network-object": delete_network_objects_command, + "ciscofp-get-host-object": get_host_objects_command, + "ciscofp-create-host-object": create_host_objects_command, + "ciscofp-update-host-object": update_host_objects_command, + "ciscofp-delete-host-object": delete_host_objects_command, + "ciscofp-get-network-groups-object": get_network_groups_objects_command, + "ciscofp-create-network-groups-objects": create_network_groups_objects_command, + "ciscofp-update-network-groups-objects": update_network_groups_objects_command, + "ciscofp-delete-network-groups-objects": delete_network_groups_objects_command, + "ciscofp-get-url-groups-object": get_url_groups_objects_command, + "ciscofp-update-url-groups-objects": update_url_groups_objects_command, + "ciscofp-get-access-policy": get_access_policy_command, + "ciscofp-create-access-policy": create_access_policy_command, + "ciscofp-update-access-policy": update_access_policy_command, + "ciscofp-delete-access-policy": delete_access_policy_command, + "ciscofp-list-security-group-tags": list_security_group_tags_command, + "ciscofp-list-ise-security-group-tag": list_ise_security_group_tags_command, + "ciscofp-list-vlan-tags": list_vlan_tags_command, + "ciscofp-list-vlan-tags-group": list_vlan_tags_group_command, + "ciscofp-list-applications": list_applications_command, + "ciscofp-get-access-rules": get_access_rules_command, + "ciscofp-create-access-rules": create_access_rules_command, + "ciscofp-update-access-rules": update_access_rules_command, + "ciscofp-delete-access-rules": delete_access_rules_command, + "ciscofp-list-policy-assignments": list_policy_assignments_command, + "ciscofp-create-policy-assignments": create_policy_assignments_command, + "ciscofp-update-policy-assignments": update_policy_assignments_command, + "ciscofp-get-deployable-devices": get_deployable_devices_command, + "ciscofp-get-device-records": get_device_records_command, + "ciscofp-deploy-to-devices": deploy_to_devices_command, + "ciscofp-get-task-status": get_task_status_command, + "ciscofp-create-intrusion-policy": create_intrusion_policy_command, + "ciscofp-list-intrusion-policy": list_intrusion_policy_command, + "ciscofp-update-intrusion-policy": update_intrusion_policy_command, + "ciscofp-delete-intrusion-policy": delete_intrusion_policy_command, + "ciscofp-create-intrusion-rule": create_intrusion_rule_command, + "ciscofp-list-intrusion-rule": list_intrusion_rule_command, + "ciscofp-update-intrusion-rule": update_intrusion_rule_command, + "ciscofp-delete-intrusion-rule": delete_intrusion_rule_command, + "ciscofp-upload-intrusion-rule-file": upload_intrusion_rule_file_command, + "ciscofp-create-intrusion-rule-group": create_intrusion_rule_group_command, + "ciscofp-list-intrusion-rule-group": list_intrusion_rule_group_command, + "ciscofp-update-intrusion-rule-group": update_intrusion_rule_group_command, + "ciscofp-delete-intrusion-rule-group": delete_intrusion_rule_group_command, + "ciscofp-create-network-analysis-policy": create_network_analysis_policy_command, + "ciscofp-list-network-analysis-policy": list_network_analysis_policy_command, + "ciscofp-update-network-analysis-policy": update_network_analysis_policy_command, + "ciscofp-delete-network-analysis-policy": delete_network_analysis_policy_command, } - demisto.debug(f'Command being called is {command}') + demisto.debug(f"Command being called is {command}") try: client = Client( @@ -4428,21 +4265,21 @@ def main(): # pragma: no cover proxy=proxy, ) - if command == 'test-module': + if command == "test-module": # In the Client __init__ there is a already a request made to receive a Bearer token. # If the token has been received successfully, then that means that the test connections has passed. - return_results('ok') + return_results("ok") elif command in commands: return_results(commands[command](client, args)) else: - raise NotImplementedError(f'Command doesn\'t exist - {command}') + raise NotImplementedError(f"Command doesn't exist - {command}") except Exception as exc: # pylint: disable=broad-except demisto.error(traceback.format_exc()) - return_error(f'Failed to execute {command} command.\nError:\n{str(exc)}') + return_error(f"Failed to execute {command} command.\nError:\n{exc!s}") -if __name__ in ('__main__', '__builtin__', 'builtins'): +if __name__ in ("__main__", "__builtin__", "builtins"): main() diff --git a/Packs/CiscoFirepower/Integrations/CiscoFirepower/CiscoFirepower_test.py b/Packs/CiscoFirepower/Integrations/CiscoFirepower/CiscoFirepower_test.py index dba420df4ce8..c3f12520b5ba 100644 --- a/Packs/CiscoFirepower/Integrations/CiscoFirepower/CiscoFirepower_test.py +++ b/Packs/CiscoFirepower/Integrations/CiscoFirepower/CiscoFirepower_test.py @@ -1,75 +1,68 @@ """ Unit testing for Cisco Firepower Management Center. """ -import json + import io +import json import os -from typing import Any, Union from http import HTTPStatus +from typing import Any from unittest import mock + import pytest +from CiscoFirepower import ( + INTEGRATION_CONTEXT_NAME, + INTRUSION_POLICY_CONTEXT, + INTRUSION_RULE_CONTEXT, + INTRUSION_RULE_GROUP_CONTEXT, + INTRUSION_RULE_UPLOAD_CONTEXT, + INTRUSION_RULE_UPLOAD_TITLE, + NETWORK_ANALYSIS_POLICY_CONTEXT, + Client, + raw_response_to_context_access_policy, + raw_response_to_context_list, + raw_response_to_context_network_groups, + raw_response_to_context_policy_assignment, + raw_response_to_context_rules, + switch_list_to_list_counter, +) from CommonServerPython import CommandResults -from CiscoFirepower import Client, switch_list_to_list_counter, raw_response_to_context_list, \ - raw_response_to_context_rules, raw_response_to_context_network_groups, raw_response_to_context_policy_assignment, \ - raw_response_to_context_access_policy, INTEGRATION_CONTEXT_NAME, INTRUSION_POLICY_CONTEXT, INTRUSION_RULE_CONTEXT, \ - INTRUSION_RULE_GROUP_CONTEXT, NETWORK_ANALYSIS_POLICY_CONTEXT, INTRUSION_RULE_UPLOAD_CONTEXT, \ - INTRUSION_RULE_UPLOAD_TITLE - - -USERNAME = 'USERNAME' -PASSWORD = 'PASSWORD' -BASE_URL = 'https://firepower' -SUFFIX = 'api/fmc_config/v1/domain/DOMAIN_UUID' -FILE_ENTRY = { - 'name': 'intrusion_rule_upload.txt', - 'path': 'test_data/intrusion_rule_upload.txt' -} -FILE_ENTRY_ERROR = { - 'name': 'intrusion_rule_upload.json', - 'path': 'test_data/intrusion_rule_upload.json' -} + +USERNAME = "USERNAME" +PASSWORD = "PASSWORD" +BASE_URL = "https://firepower" +SUFFIX = "api/fmc_config/v1/domain/DOMAIN_UUID" +FILE_ENTRY = {"name": "intrusion_rule_upload.txt", "path": "test_data/intrusion_rule_upload.txt"} +FILE_ENTRY_ERROR = {"name": "intrusion_rule_upload.json", "path": "test_data/intrusion_rule_upload.json"} INPUT_TEST_SWITCH_LIST_TO_LIST_COUNTER = [ - ({'name': 'n', 'type': 't', 'devices': [1, 2, 3]}, {'name': 'n', 'type': 't', 'devices': 3}), - ({'name': 'n', 'type': 't', 'devices': {'new': [1, 2], 'old': [1, 2]}}, {'name': 'n', 'type': 't', 'devices': 4}), - ({'name': 'n', 'type': 't', 'devices': {'new': 1, 'old': [1, 2]}}, {'name': 'n', 'type': 't', 'devices': 3}), - ({'name': 'n', 'type': 't', 'devices': {'new': 'my new'}}, {'name': 'n', 'type': 't', 'devices': 1}) + ({"name": "n", "type": "t", "devices": [1, 2, 3]}, {"name": "n", "type": "t", "devices": 3}), + ({"name": "n", "type": "t", "devices": {"new": [1, 2], "old": [1, 2]}}, {"name": "n", "type": "t", "devices": 4}), + ({"name": "n", "type": "t", "devices": {"new": 1, "old": [1, 2]}}, {"name": "n", "type": "t", "devices": 3}), + ({"name": "n", "type": "t", "devices": {"new": "my new"}}, {"name": "n", "type": "t", "devices": 1}), ] INPUT_TEST_RAW_RESPONSE_TO_CONTEXT_LIST = [ ( {"id": "123", "metadata": {"domain": {"id": "456"}}, "name": "home", "type": "URLCategory"}, - ['id', 'name'], + ["id", "name"], {"ID": "123", "Name": "home"}, ), ( { "id": "121212", - "links": { - "self": "link" - }, + "links": {"self": "link"}, "metadata": { - "domain": { - "id": "123456", - "name": "Global", - "type": "Domain" - }, - "lastUser": { - "id": "141414", - "name": "admin", - "type": "user" - }, - "readOnly": { - "state": 'false' - }, - "timestamp": '1575996253' + "domain": {"id": "123456", "name": "Global", "type": "Domain"}, + "lastUser": {"id": "141414", "name": "admin", "type": "user"}, + "readOnly": {"state": "false"}, + "timestamp": "1575996253", }, "name": "Child Abuse Content", - "type": "URLCategory" + "type": "URLCategory", }, - ['id', 'name'], - {"ID": "121212", - "Name": "Child Abuse Content"}, + ["id", "name"], + {"ID": "121212", "Name": "Child Abuse Content"}, ), ] @@ -81,41 +74,23 @@ "links": {"self": "link"}, "literals": [{"type": "Network", "value": "ip"}, {"type": "Host", "value": "::/0"}], "metadata": { - "domain": { - "id": "123456", - "name": "Global", - "type": "Domain" - }, - "lastUser": { - "name": "admin" - }, - "readOnly": { - "reason": "SYSTEM", - "state": 'true' - }, - "timestamp": '1521658703283' + "domain": {"id": "123456", "name": "Global", "type": "Domain"}, + "lastUser": {"name": "admin"}, + "readOnly": {"reason": "SYSTEM", "state": "true"}, + "timestamp": "1521658703283", }, "name": "any", - "overridable": 'false', - "type": "NetworkGroup" + "overridable": "false", + "type": "NetworkGroup", }, { "Name": "any", "ID": "131313", - "Overridable": 'false', + "Overridable": "false", "Description": " ", "Objects": [], - "Addresses": [ - { - "Value": "ip", - "Type": "Network" - }, - { - "Value": "::/0", - "Type": "Host" - } - ] - } + "Addresses": [{"Value": "ip", "Type": "Network"}, {"Value": "::/0", "Type": "Host"}], + }, ) ] @@ -125,20 +100,9 @@ "id": "151515", "links": {"self": "link"}, "name": "BPS-Testing", - "policy": { - "id": "151515", - "name": "BPS-Testing", - "type": "AccessPolicy" - }, - "targets": [ - { - "id": "161616", - "keepLocalEvents": 'false', - "name": "FTD_10.8.49.209", - "type": "Device" - } - ], - "type": "PolicyAssignment" + "policy": {"id": "151515", "name": "BPS-Testing", "type": "AccessPolicy"}, + "targets": [{"id": "161616", "keepLocalEvents": "false", "name": "FTD_10.8.49.209", "type": "Device"}], + "type": "PolicyAssignment", }, { "ID": "151515", @@ -146,15 +110,8 @@ "PolicyID": "151515", "PolicyName": "BPS-Testing", "PolicyDescription": "", - "Targets": [ - { - "ID": "161616", - "Name": "FTD_10.8.49.209", - "Type": "Device" - } - ] + "Targets": [{"ID": "161616", "Name": "FTD_10.8.49.209", "Type": "Device"}], }, - ) ] @@ -164,43 +121,20 @@ "defaultAction": { "action": "BLOCK", "id": "171717", - "logBegin": 'false', - "logEnd": 'false', - "sendEventsToFMC": 'false', - "type": "AccessPolicyDefaultAction" + "logBegin": "false", + "logEnd": "false", + "sendEventsToFMC": "false", + "type": "AccessPolicyDefaultAction", }, "id": "151515", - "links": { - "self": "linkn/123456/policy" - }, - "metadata": { - "domain": { - "id": "123456", - "name": "Global", - "type": "Domain" - }, - "inherit": 'false' - }, + "links": {"self": "linkn/123456/policy"}, + "metadata": {"domain": {"id": "123456", "name": "Global", "type": "Domain"}, "inherit": "false"}, "name": "BPS-Testing", - "prefilterPolicySetting": { - "id": "181818", - "name": "Default Prefilter Policy", - "type": "PrefilterPolicy" - }, - "rules": { - "links": { - "self": "linkn/123456/policy" - }, - "refType": "list", - "type": "AccessRule" - }, - "type": "AccessPolicy" + "prefilterPolicySetting": {"id": "181818", "name": "Default Prefilter Policy", "type": "PrefilterPolicy"}, + "rules": {"links": {"self": "linkn/123456/policy"}, "refType": "list", "type": "AccessRule"}, + "type": "AccessPolicy", }, - { - "DefaultActionID": "171717", - "ID": "151515", - "Name": "BPS-Testing" - } + {"DefaultActionID": "171717", "ID": "151515", "Name": "BPS-Testing"}, ) ] @@ -208,100 +142,53 @@ ( { "action": "BLOCK", - "destinationNetworks": { - "literals": [ - { - "type": "Host", - "value": "ip" - }, - { - "type": "Host", - "value": "ip" - } - ] - }, - "enableSyslog": 'false', - "enabled": 'false', + "destinationNetworks": {"literals": [{"type": "Host", "value": "ip"}, {"type": "Host", "value": "ip"}]}, + "enableSyslog": "false", + "enabled": "false", "id": "202020", - "links": { - "self": "linkn/123456/policy" - }, - "logBegin": 'false', - "logEnd": 'false', - "logFiles": 'false', + "links": {"self": "linkn/123456/policy"}, + "logBegin": "false", + "logEnd": "false", + "logFiles": "false", "metadata": { - "accessPolicy": { - "id": "212121", - "name": "Performance Test Policy without AMP", - "type": "AccessPolicy" - }, + "accessPolicy": {"id": "212121", "name": "Performance Test Policy without AMP", "type": "AccessPolicy"}, "category": "--Undefined--", - "domain": { - "id": "123456", - "name": "Global", - "type": "Domain" - }, - "ruleIndex": '5', + "domain": {"id": "123456", "name": "Global", "type": "Domain"}, + "ruleIndex": "5", "section": "Default", - "timestamp": '1582462113800' + "timestamp": "1582462113800", }, "name": "newUpdateTest", - "sendEventsToFMC": 'false', - "sourceNetworks": { - "literals": [ - { - "type": "Host", - "value": "ip1" - }, - { - "type": "Host", - "value": "ip" - } - ] - }, + "sendEventsToFMC": "false", + "sourceNetworks": {"literals": [{"type": "Host", "value": "ip1"}, {"type": "Host", "value": "ip"}]}, "type": "AccessRule", - "urls": { - "literals": [ - { - "type": "Url", - "url": "url" - }, - { - "type": "Url", - "url": "url" - } - ] - }, - "variableSet": { - "id": "101010", - "name": "Default-Set", - "type": "VariableSet" - }, - "vlanTags": {} + "urls": {"literals": [{"type": "Url", "url": "url"}, {"type": "Url", "url": "url"}]}, + "variableSet": {"id": "101010", "name": "Default-Set", "type": "VariableSet"}, + "vlanTags": {}, }, { - 'Action': 'BLOCK', - 'Applications': [], - 'Category': '--Undefined--', - 'DestinationNetworks': {'Addresses': [{'Type': 'Host', 'Value': 'ip'}, - {'Type': 'Host', 'Value': 'ip'}], 'Objects': []}, - 'DestinationPorts': {'Addresses': [], 'Objects': []}, - 'DestinationZones': {'Objects': []}, - 'Enabled': 'false', - 'ID': '202020', - 'Name': 'newUpdateTest', - 'RuleIndex': '5', - 'Section': 'Default', - 'SendEventsToFMC': 'false', - 'SourceNetworks': { - 'Addresses': [{'Type': 'Host', 'Value': 'ip1'}, {'Type': 'Host', 'Value': 'ip'}], - 'Objects': []}, - 'SourcePorts': {'Addresses': [], 'Objects': []}, - 'SourceSecurityGroupTags': {'Objects': []}, - 'SourceZones': {'Objects': []}, - 'Urls': {'Addresses': [{'URL': 'url'}, {'URL': 'url'}], 'Objects': []}, - 'VlanTags': {'Numbers': [], 'Objects': []} - } + "Action": "BLOCK", + "Applications": [], + "Category": "--Undefined--", + "DestinationNetworks": { + "Addresses": [{"Type": "Host", "Value": "ip"}, {"Type": "Host", "Value": "ip"}], + "Objects": [], + }, + "DestinationPorts": {"Addresses": [], "Objects": []}, + "DestinationZones": {"Objects": []}, + "Enabled": "false", + "ID": "202020", + "Name": "newUpdateTest", + "RuleIndex": "5", + "Section": "Default", + "SendEventsToFMC": "false", + "SourceNetworks": {"Addresses": [{"Type": "Host", "Value": "ip1"}, {"Type": "Host", "Value": "ip"}], "Objects": []}, + "SourcePorts": {"Addresses": [], "Objects": []}, + "SourceSecurityGroupTags": {"Objects": []}, + "SourceZones": {"Objects": []}, + "Urls": {"Addresses": [{"URL": "url"}, {"URL": "url"}], "Objects": []}, + "VlanTags": {"Numbers": [], "Objects": []}, + }, ) ] @@ -309,46 +196,46 @@ """ TESTS FUNCTION """ -@pytest.mark.parametrize('list_input, list_output', INPUT_TEST_SWITCH_LIST_TO_LIST_COUNTER) +@pytest.mark.parametrize("list_input, list_output", INPUT_TEST_SWITCH_LIST_TO_LIST_COUNTER) def test_switch_list_to_list_counter(list_input, list_output): result = switch_list_to_list_counter(list_input) assert list_output == result -@pytest.mark.parametrize('list_input, list_to_output, list_output', INPUT_TEST_RAW_RESPONSE_TO_CONTEXT_LIST) +@pytest.mark.parametrize("list_input, list_to_output, list_output", INPUT_TEST_RAW_RESPONSE_TO_CONTEXT_LIST) def test_raw_response_to_context_list(list_to_output, list_input, list_output): result = raw_response_to_context_list(list_to_output, list_input) assert list_output == result -@pytest.mark.parametrize('list_input, list_output', INPUT_TEST_RAW_RESPONSE_TO_CONTEXT_NETWORK_GROUPS) +@pytest.mark.parametrize("list_input, list_output", INPUT_TEST_RAW_RESPONSE_TO_CONTEXT_NETWORK_GROUPS) def test_raw_response_to_context_network_groups(list_input, list_output): result = raw_response_to_context_network_groups(list_input) assert list_output == result -@pytest.mark.parametrize('list_input, list_output', INPUT_TEST_RAW_RESPONSE_TO_CONTEXT_POLICY_ASSIGNMENT) +@pytest.mark.parametrize("list_input, list_output", INPUT_TEST_RAW_RESPONSE_TO_CONTEXT_POLICY_ASSIGNMENT) def test_raw_response_to_context_policy_assignment(list_input, list_output): result = raw_response_to_context_policy_assignment(list_input) assert list_output == result -@pytest.mark.parametrize('list_input, list_output', INPUT_TEST_RAW_RESPONSE_TO_CONTEXT_ACCESS_POLICY) +@pytest.mark.parametrize("list_input, list_output", INPUT_TEST_RAW_RESPONSE_TO_CONTEXT_ACCESS_POLICY) def test_raw_response_to_context_access_policy(list_input, list_output): result = raw_response_to_context_access_policy(list_input) assert list_output == result -@pytest.mark.parametrize('list_input, list_output', INPUT_TEST_RAW_RESPONSE_TO_CONTEXT_RULS) +@pytest.mark.parametrize("list_input, list_output", INPUT_TEST_RAW_RESPONSE_TO_CONTEXT_RULS) def test_raw_response_to_context_ruls(list_input, list_output): result = raw_response_to_context_rules(list_input) assert list_output == result -''' Helper Functions ''' # pylint: disable=pointless-string-statement +""" Helper Functions """ # pylint: disable=pointless-string-statement -def assert_output_has_no_links(outputs: Union[list[dict[str, Any]], dict[str, Any]]): +def assert_output_has_no_links(outputs: list[dict[str, Any]] | dict[str, Any]): """ Check that there are no 'links' keys in the outputs. Args: @@ -358,10 +245,10 @@ def assert_output_has_no_links(outputs: Union[list[dict[str, Any]], dict[str, An outputs = [outputs] for output in outputs: - assert 'links' not in output + assert "links" not in output -def assert_command_results(command_results: CommandResults, method: str, expected_output_prefix: str = ''): +def assert_command_results(command_results: CommandResults, method: str, expected_output_prefix: str = ""): """ Test that the command results outputs has no links in it, if it exists. Test the output prefix. @@ -374,17 +261,17 @@ def assert_command_results(command_results: CommandResults, method: str, expecte Defaults to ''. """ message_by_method = { - 'POST': 'Created', - 'GET': 'Fetched', - 'PUT': 'Updated', - 'DELETE': 'Deleted', + "POST": "Created", + "GET": "Fetched", + "PUT": "Updated", + "DELETE": "Deleted", } assert command_results.readable_output assert message_by_method[method] in command_results.readable_output - if method != 'DELETE': - context_prefix = '.'.join((INTEGRATION_CONTEXT_NAME, expected_output_prefix)) + if method != "DELETE": + context_prefix = ".".join((INTEGRATION_CONTEXT_NAME, expected_output_prefix)) assert command_results.outputs_prefix == context_prefix assert_output_has_no_links(command_results.outputs) # type: ignore[arg-type] # outputs is Optional[object] @@ -398,10 +285,10 @@ def load_mock_response(file_name: str) -> str | io.TextIOWrapper: Returns: str: Mock file content. """ - path = os.path.join('test_data', file_name) + path = os.path.join("test_data", file_name) - with io.open(path, mode='r', encoding='utf-8') as mock_file: - if os.path.splitext(file_name)[1] == '.json': + with open(path, encoding="utf-8") as mock_file: + if os.path.splitext(file_name)[1] == ".json": return json.loads(mock_file.read()) return mock_file @@ -416,11 +303,8 @@ def mock_client(requests_mock) -> Client: Client: Connection to client. """ requests_mock.post( - f'{BASE_URL}/api/fmc_platform/v1/auth/generatetoken', - headers={ - 'X-auth-access-token': 'X-auth-access-token', - 'DOMAIN_UUID': 'DOMAIN_UUID' - } + f"{BASE_URL}/api/fmc_platform/v1/auth/generatetoken", + headers={"X-auth-access-token": "X-auth-access-token", "DOMAIN_UUID": "DOMAIN_UUID"}, ) return Client( @@ -443,32 +327,28 @@ def test_generate_token_error(): """ mock_response = mock.Mock() mock_response.headers = { - 'X-auth-access-token': '123456', - 'DOMAIN_UUID': 'abcdef', + "X-auth-access-token": "123456", + "DOMAIN_UUID": "abcdef", } - mock_response.raise_for_status.side_effect = Exception('HTTP request failed') + mock_response.raise_for_status.side_effect = Exception("HTTP request failed") - @mock.patch.object(Client, '_http_request', return_value=mock_response) + @mock.patch.object(Client, "_http_request", return_value=mock_response) def test(mock_request): client = Client( base_url=BASE_URL, - username='test', - password='test', + username="test", + password="test", ) try: - client._http_request( - method='POST', - url_suffix='api/test', - resp_type='response' - ) + client._http_request(method="POST", url_suffix="api/test", resp_type="response") except Exception as e: - assert str(e) == 'HTTP request failed' + assert str(e) == "HTTP request failed" test() -''' Intrusion Policy CRUD ''' # pylint: disable=pointless-string-statement +""" Intrusion Policy CRUD """ # pylint: disable=pointless-string-statement def test_create_intrusion_policy_command(requests_mock, mock_client): @@ -485,43 +365,40 @@ def test_create_intrusion_policy_command(requests_mock, mock_client): - Ensure the outputs has no links. """ args = { - 'name': 'name', - 'basepolicy_id': 'basepolicy_id', - 'description': 'description', + "name": "name", + "basepolicy_id": "basepolicy_id", + "description": "description", } - method = 'POST' - mock_response = load_mock_response('intrusion_policy_response.json') + method = "POST" + mock_response = load_mock_response("intrusion_policy_response.json") requests_mock.request( method, - f'{BASE_URL}/{SUFFIX}/policy/intrusionpolicies', + f"{BASE_URL}/{SUFFIX}/policy/intrusionpolicies", json=mock_response, ) from CiscoFirepower import create_intrusion_policy_command + command_results = create_intrusion_policy_command(mock_client, args) - assert_command_results( - command_results=command_results, - method=method, - expected_output_prefix=INTRUSION_POLICY_CONTEXT - ) + assert_command_results(command_results=command_results, method=method, expected_output_prefix=INTRUSION_POLICY_CONTEXT) - assert command_results.outputs[0]['name'] == mock_response['name'] - assert command_results.outputs[0]['id'] == mock_response['id'] - assert command_results.outputs[0]['description'] == mock_response['description'] - assert command_results.outputs[0]['basePolicy'] == mock_response['basePolicy'] - assert command_results.outputs[0]['metadata'] == mock_response['metadata'] + assert command_results.outputs[0]["name"] == mock_response["name"] + assert command_results.outputs[0]["id"] == mock_response["id"] + assert command_results.outputs[0]["description"] == mock_response["description"] + assert command_results.outputs[0]["basePolicy"] == mock_response["basePolicy"] + assert command_results.outputs[0]["metadata"] == mock_response["metadata"] @pytest.mark.parametrize( - 'args', + "args", ( ({}), - ({'limit': '6'}), - ({'page_size': '3'}), - ) + ({"limit": "6"}), + ({"page_size": "3"}), + ), ) def test_list_intrusion_policy_command(requests_mock, mock_client, args): """ @@ -538,29 +415,26 @@ def test_list_intrusion_policy_command(requests_mock, mock_client, args): - Ensure the outputs_prefix is correct. - Ensure the outputs has no links. """ - method = 'GET' - mock_response = load_mock_response('intrusion_policy_list_response.json') + method = "GET" + mock_response = load_mock_response("intrusion_policy_list_response.json") requests_mock.request( method, - f'{BASE_URL}/{SUFFIX}/policy/intrusionpolicies', + f"{BASE_URL}/{SUFFIX}/policy/intrusionpolicies", json=mock_response, ) from CiscoFirepower import list_intrusion_policy_command + command_results = list_intrusion_policy_command(mock_client, args) - assert_command_results( - command_results=command_results, - method=method, - expected_output_prefix=INTRUSION_POLICY_CONTEXT - ) + assert_command_results(command_results=command_results, method=method, expected_output_prefix=INTRUSION_POLICY_CONTEXT) - for output, mock_output in zip(command_results.outputs, mock_response['items']): - assert output['name'] == mock_output['name'] - assert output['id'] == mock_output['id'] - assert output['description'] == mock_output['description'] - assert output['metadata'] == mock_output['metadata'] + for output, mock_output in zip(command_results.outputs, mock_response["items"]): + assert output["name"] == mock_output["name"] + assert output["id"] == mock_output["id"] + assert output["description"] == mock_output["description"] + assert output["metadata"] == mock_output["metadata"] def test_get_intrusion_policy_command(requests_mock, mock_client): @@ -576,10 +450,10 @@ def test_get_intrusion_policy_command(requests_mock, mock_client): - Ensure the outputs_prefix is correct. - Ensure the outputs has no links. """ - args = {'intrusion_policy_id': 'intrusion_policy_id'} + args = {"intrusion_policy_id": "intrusion_policy_id"} - method = 'GET' - mock_response = load_mock_response('intrusion_policy_response.json') + method = "GET" + mock_response = load_mock_response("intrusion_policy_response.json") requests_mock.request( method, @@ -588,18 +462,15 @@ def test_get_intrusion_policy_command(requests_mock, mock_client): ) from CiscoFirepower import list_intrusion_policy_command + command_results = list_intrusion_policy_command(mock_client, args) - assert_command_results( - command_results=command_results, - method=method, - expected_output_prefix=INTRUSION_POLICY_CONTEXT - ) + assert_command_results(command_results=command_results, method=method, expected_output_prefix=INTRUSION_POLICY_CONTEXT) - assert command_results.outputs[0]['name'] == mock_response['name'] - assert command_results.outputs[0]['id'] == mock_response['id'] - assert command_results.outputs[0]['description'] == mock_response['description'] - assert command_results.outputs[0]['metadata'] == mock_response['metadata'] + assert command_results.outputs[0]["name"] == mock_response["name"] + assert command_results.outputs[0]["id"] == mock_response["id"] + assert command_results.outputs[0]["description"] == mock_response["description"] + assert command_results.outputs[0]["metadata"] == mock_response["metadata"] def test_error_get_intrusion_policy_command(mock_client): @@ -614,15 +485,16 @@ def test_error_get_intrusion_policy_command(mock_client): - Ensure an exception has been raised and it is correct. """ args = { - 'limit': '5', - 'intrusion_policy_id': 'intrusion_policy_id', + "limit": "5", + "intrusion_policy_id": "intrusion_policy_id", } with pytest.raises(ValueError) as ve: from CiscoFirepower import list_intrusion_policy_command + list_intrusion_policy_command(mock_client, args) - assert str(ve) == 'GET and LIST arguments can not be supported simutanlesy.' + assert str(ve) == "GET and LIST arguments can not be supported simutanlesy." def test_update_intrusion_policy_command(requests_mock, mock_client): @@ -639,15 +511,15 @@ def test_update_intrusion_policy_command(requests_mock, mock_client): - Ensure the outputs has no links. """ args = { - 'intrusion_policy_id': 'intrusion_policy_id', - 'name': 'name', - 'basepolicy_id': 'basepolicy_id', - 'description': 'description', - 'inspection_mode': 'PREVENTION' + "intrusion_policy_id": "intrusion_policy_id", + "name": "name", + "basepolicy_id": "basepolicy_id", + "description": "description", + "inspection_mode": "PREVENTION", } - method = 'PUT' - mock_response = load_mock_response('intrusion_policy_response.json') + method = "PUT" + mock_response = load_mock_response("intrusion_policy_response.json") requests_mock.request( method, @@ -656,19 +528,16 @@ def test_update_intrusion_policy_command(requests_mock, mock_client): ) from CiscoFirepower import update_intrusion_policy_command + command_results = update_intrusion_policy_command(mock_client, args) - assert_command_results( - command_results=command_results, - method=method, - expected_output_prefix=INTRUSION_POLICY_CONTEXT - ) + assert_command_results(command_results=command_results, method=method, expected_output_prefix=INTRUSION_POLICY_CONTEXT) - assert command_results.outputs[0]['name'] == mock_response['name'] - assert command_results.outputs[0]['id'] == mock_response['id'] - assert command_results.outputs[0]['description'] == mock_response['description'] - assert command_results.outputs[0]['basePolicy'] == mock_response['basePolicy'] - assert command_results.outputs[0]['metadata'] == mock_response['metadata'] + assert command_results.outputs[0]["name"] == mock_response["name"] + assert command_results.outputs[0]["id"] == mock_response["id"] + assert command_results.outputs[0]["description"] == mock_response["description"] + assert command_results.outputs[0]["basePolicy"] == mock_response["basePolicy"] + assert command_results.outputs[0]["metadata"] == mock_response["metadata"] def test_delete_intrusion_policy_command(requests_mock, mock_client): @@ -682,12 +551,10 @@ def test_delete_intrusion_policy_command(requests_mock, mock_client): Then: - Ensure the readable_output is correct. """ - args = { - 'intrusion_policy_id': 'intrusion_policy_id' - } + args = {"intrusion_policy_id": "intrusion_policy_id"} - method = 'DELETE' - mock_response = load_mock_response('intrusion_policy_response.json') + method = "DELETE" + mock_response = load_mock_response("intrusion_policy_response.json") requests_mock.request( method, @@ -696,12 +563,10 @@ def test_delete_intrusion_policy_command(requests_mock, mock_client): ) from CiscoFirepower import delete_intrusion_policy_command + command_results = delete_intrusion_policy_command(mock_client, args) - assert_command_results( - command_results=command_results, - method=method - ) + assert_command_results(command_results=command_results, method=method) def test_delete_intrusion_policy_error_command(requests_mock, mock_client): @@ -715,12 +580,10 @@ def test_delete_intrusion_policy_error_command(requests_mock, mock_client): Then: - Ensure the readable_output is correct. """ - args = { - 'intrusion_policy_id': 'intrusion_policy_id' - } + args = {"intrusion_policy_id": "intrusion_policy_id"} - method = 'DELETE' - mock_response = load_mock_response('intrusion_policy_delete_fail.json') + method = "DELETE" + mock_response = load_mock_response("intrusion_policy_delete_fail.json") requests_mock.request( method, @@ -730,13 +593,13 @@ def test_delete_intrusion_policy_error_command(requests_mock, mock_client): ) from CiscoFirepower import delete_intrusion_policy_command + command_results = delete_intrusion_policy_command(mock_client, args) - assert command_results.readable_output == \ - f'The Intrusion Policy ID: "{args["intrusion_policy_id"]}" does not exist.' + assert command_results.readable_output == f'The Intrusion Policy ID: "{args["intrusion_policy_id"]}" does not exist.' -''' Intrusion Rule CRUD ''' # pylint: disable=pointless-string-statement +""" Intrusion Rule CRUD """ # pylint: disable=pointless-string-statement def test_create_intrusion_rule_command(requests_mock, mock_client): @@ -753,41 +616,38 @@ def test_create_intrusion_rule_command(requests_mock, mock_client): - Ensure the outputs has no links. """ args = { - 'rule_data': 'rule_data', - 'rule_group_ids': 'rule_group_id1,rule_group_id2', + "rule_data": "rule_data", + "rule_group_ids": "rule_group_id1,rule_group_id2", } - method = 'POST' - mock_response = load_mock_response('intrusion_rule_response.json') + method = "POST" + mock_response = load_mock_response("intrusion_rule_response.json") requests_mock.request( method, - f'{BASE_URL}/{SUFFIX}/object/intrusionrules', + f"{BASE_URL}/{SUFFIX}/object/intrusionrules", json=mock_response, ) from CiscoFirepower import create_intrusion_rule_command + command_results = create_intrusion_rule_command(mock_client, args) - assert_command_results( - command_results=command_results, - method=method, - expected_output_prefix=INTRUSION_RULE_CONTEXT - ) + assert_command_results(command_results=command_results, method=method, expected_output_prefix=INTRUSION_RULE_CONTEXT) - assert command_results.outputs[0]['name'] == mock_response['name'] - assert command_results.outputs[0]['id'] == mock_response['id'] - assert command_results.outputs[0]['ruleData'] == mock_response['ruleData'] - assert command_results.outputs[0]['ruleGroups'] == mock_response['ruleGroups'] + assert command_results.outputs[0]["name"] == mock_response["name"] + assert command_results.outputs[0]["id"] == mock_response["id"] + assert command_results.outputs[0]["ruleData"] == mock_response["ruleData"] + assert command_results.outputs[0]["ruleGroups"] == mock_response["ruleGroups"] @pytest.mark.parametrize( - 'args', + "args", ( - ({'expanded_response': 'True'}), - ({'limit': '6', 'expanded_response': 'False'}), - ({'page_size': '3'}), - ) + ({"expanded_response": "True"}), + ({"limit": "6", "expanded_response": "False"}), + ({"page_size": "3"}), + ), ) def test_list_intrusion_rule_command(requests_mock, mock_client, args): """ @@ -804,29 +664,26 @@ def test_list_intrusion_rule_command(requests_mock, mock_client, args): - Ensure the outputs_prefix is correct. - Ensure the outputs has no links. """ - method = 'GET' - mock_response = load_mock_response('intrusion_rule_list_response.json') + method = "GET" + mock_response = load_mock_response("intrusion_rule_list_response.json") requests_mock.request( method, - f'{BASE_URL}/{SUFFIX}/object/intrusionrules', + f"{BASE_URL}/{SUFFIX}/object/intrusionrules", json=mock_response, ) from CiscoFirepower import list_intrusion_rule_command + command_results = list_intrusion_rule_command(mock_client, args) - assert_command_results( - command_results=command_results, - method=method, - expected_output_prefix=INTRUSION_RULE_CONTEXT - ) + assert_command_results(command_results=command_results, method=method, expected_output_prefix=INTRUSION_RULE_CONTEXT) - for output, mock_output in zip(command_results.outputs, mock_response['items']): - assert output['name'] == mock_output['name'] - assert output['id'] == mock_output['id'] - assert output['msg'] == mock_output['msg'] - assert output['ruleAction'] == mock_output['ruleAction'] + for output, mock_output in zip(command_results.outputs, mock_response["items"]): + assert output["name"] == mock_output["name"] + assert output["id"] == mock_output["id"] + assert output["msg"] == mock_output["msg"] + assert output["ruleAction"] == mock_output["ruleAction"] def test_get_intrusion_rule_command(requests_mock, mock_client): @@ -842,9 +699,9 @@ def test_get_intrusion_rule_command(requests_mock, mock_client): - Ensure the outputs_prefix is correct. - Ensure the outputs has no links. """ - args = {'intrusion_rule_id': 'intrusion_rule_id'} - method = 'GET' - mock_response = load_mock_response('intrusion_rule_response.json') + args = {"intrusion_rule_id": "intrusion_rule_id"} + method = "GET" + mock_response = load_mock_response("intrusion_rule_response.json") requests_mock.request( method, @@ -853,18 +710,15 @@ def test_get_intrusion_rule_command(requests_mock, mock_client): ) from CiscoFirepower import list_intrusion_rule_command + command_results = list_intrusion_rule_command(mock_client, args) - assert_command_results( - command_results=command_results, - method=method, - expected_output_prefix=INTRUSION_RULE_CONTEXT - ) + assert_command_results(command_results=command_results, method=method, expected_output_prefix=INTRUSION_RULE_CONTEXT) - assert command_results.outputs[0]['name'] == mock_response['name'] - assert command_results.outputs[0]['id'] == mock_response['id'] - assert command_results.outputs[0]['ruleData'] == mock_response['ruleData'] - assert command_results.outputs[0]['ruleGroups'] == mock_response['ruleGroups'] + assert command_results.outputs[0]["name"] == mock_response["name"] + assert command_results.outputs[0]["id"] == mock_response["id"] + assert command_results.outputs[0]["ruleData"] == mock_response["ruleData"] + assert command_results.outputs[0]["ruleGroups"] == mock_response["ruleGroups"] def test_error_get_intrusion_rule_command(mock_client): @@ -879,34 +733,35 @@ def test_error_get_intrusion_rule_command(mock_client): - Ensure an exception has been raised and it is correct. """ args = { - 'limit': '5', - 'intrusion_rule_id': 'intrusion_rule_id', + "limit": "5", + "intrusion_rule_id": "intrusion_rule_id", } with pytest.raises(ValueError) as ve: from CiscoFirepower import list_intrusion_rule_command + list_intrusion_rule_command(mock_client, args) - assert str(ve) == 'GET and LIST arguments can not be supported simutanlesy.' + assert str(ve) == "GET and LIST arguments can not be supported simutanlesy." @pytest.mark.parametrize( - 'args', + "args", ( { - 'intrusion_rule_id': 'intrusion_rule_id', - 'rule_data': 'rule_data', - 'rule_group_ids': 'rule_group_id1,rule_group_id2', + "intrusion_rule_id": "intrusion_rule_id", + "rule_data": "rule_data", + "rule_group_ids": "rule_group_id1,rule_group_id2", }, { - 'intrusion_rule_id': 'intrusion_rule_id', - 'rule_group_ids': 'rule_group_id1,rule_group_id2', + "intrusion_rule_id": "intrusion_rule_id", + "rule_group_ids": "rule_group_id1,rule_group_id2", }, { - 'intrusion_rule_id': 'intrusion_rule_id', - 'rule_data': 'rule_data', + "intrusion_rule_id": "intrusion_rule_id", + "rule_data": "rule_data", }, - ) + ), ) def test_update_intrusion_rule_command(requests_mock, mock_client, args): """ @@ -923,13 +778,13 @@ def test_update_intrusion_rule_command(requests_mock, mock_client, args): - Ensure the outputs_prefix is correct. - Ensure the outputs has no links. """ - method = 'PUT' + method = "PUT" url = f'{BASE_URL}/{SUFFIX}/object/intrusionrules/{args["intrusion_rule_id"]}' - mock_response = load_mock_response('intrusion_rule_response.json') + mock_response = load_mock_response("intrusion_rule_response.json") - if 'rule_data' not in args or 'rule_group_ids' not in args: + if "rule_data" not in args or "rule_group_ids" not in args: requests_mock.request( - 'GET', + "GET", url, json=mock_response, ) @@ -941,32 +796,29 @@ def test_update_intrusion_rule_command(requests_mock, mock_client, args): ) from CiscoFirepower import update_intrusion_rule_command + command_results = update_intrusion_rule_command(mock_client, args) - assert_command_results( - command_results=command_results, - method=method, - expected_output_prefix=INTRUSION_RULE_CONTEXT - ) + assert_command_results(command_results=command_results, method=method, expected_output_prefix=INTRUSION_RULE_CONTEXT) - assert command_results.outputs[0]['name'] == mock_response['name'] - assert command_results.outputs[0]['id'] == mock_response['id'] - assert command_results.outputs[0]['ruleData'] == mock_response['ruleData'] - assert command_results.outputs[0]['ruleGroups'] == mock_response['ruleGroups'] + assert command_results.outputs[0]["name"] == mock_response["name"] + assert command_results.outputs[0]["id"] == mock_response["id"] + assert command_results.outputs[0]["ruleData"] == mock_response["ruleData"] + assert command_results.outputs[0]["ruleGroups"] == mock_response["ruleGroups"] @pytest.mark.parametrize( - 'args, expected_output', + "args, expected_output", ( ( - {'intrusion_rule_id': 'intrusion_rule_id'}, - 'At least rule_data or rule_group_ids must be entered, if not both of them.' + {"intrusion_rule_id": "intrusion_rule_id"}, + "At least rule_data or rule_group_ids must be entered, if not both of them.", ), ( - {'intrusion_rule_id': 'intrusion_rule_id', 'update_strategy': 'MERGE', 'rule_data': 'rule_data'}, - 'rule_group_ids must be entered when merging.' + {"intrusion_rule_id": "intrusion_rule_id", "update_strategy": "MERGE", "rule_data": "rule_data"}, + "rule_group_ids must be entered when merging.", ), - ) + ), ) def test_error_update_intrusion_rule_command(mock_client, args, expected_output): """ @@ -982,6 +834,7 @@ def test_error_update_intrusion_rule_command(mock_client, args, expected_output) """ with pytest.raises(ValueError) as ve: from CiscoFirepower import update_intrusion_rule_command + update_intrusion_rule_command(mock_client, args) assert str(ve) == expected_output @@ -999,11 +852,11 @@ def test_delete_intrusion_rule_command(requests_mock, mock_client): - Ensure the readable_output is correct. """ args = { - 'intrusion_rule_id': 'intrusion_rule_id', + "intrusion_rule_id": "intrusion_rule_id", } - method = 'DELETE' - mock_response = load_mock_response('intrusion_rule_response.json') + method = "DELETE" + mock_response = load_mock_response("intrusion_rule_response.json") requests_mock.request( method, @@ -1012,6 +865,7 @@ def test_delete_intrusion_rule_command(requests_mock, mock_client): ) from CiscoFirepower import delete_intrusion_rule_command + command_results = delete_intrusion_rule_command(mock_client, args) assert_command_results( @@ -1020,7 +874,7 @@ def test_delete_intrusion_rule_command(requests_mock, mock_client): ) -@mock.patch('CiscoFirepower.demisto.getFilePath', lambda x: FILE_ENTRY) +@mock.patch("CiscoFirepower.demisto.getFilePath", lambda x: FILE_ENTRY) def test_upload_intrusion_file_validation_command(requests_mock, mock_client): """ Scenario: @@ -1035,24 +889,23 @@ def test_upload_intrusion_file_validation_command(requests_mock, mock_client): - Ensure the outputs_prefix is correct. - Ensure the readable_output is correct. """ - args = { - 'entry_id': 'entry_id' - } + args = {"entry_id": "entry_id"} - mock_response = load_mock_response('intrusion_rule_upload_validation_response.json') + mock_response = load_mock_response("intrusion_rule_upload_validation_response.json") requests_mock.post( - f'{BASE_URL}/{SUFFIX}/object/intrusionrulesupload', + f"{BASE_URL}/{SUFFIX}/object/intrusionrulesupload", json=mock_response, ) from CiscoFirepower import upload_intrusion_rule_file_command + command_results = upload_intrusion_rule_file_command(mock_client, args) assert f'Validation for Intrusion Rules within: "{FILE_ENTRY["name"]}"' in command_results.readable_output - assert mock_response['error']['messages'][0]['description'] in command_results.readable_output + assert mock_response["error"]["messages"][0]["description"] in command_results.readable_output -@mock.patch('CiscoFirepower.demisto.getFilePath', lambda x: FILE_ENTRY) +@mock.patch("CiscoFirepower.demisto.getFilePath", lambda x: FILE_ENTRY) def test_upload_intrusion_file_import_command(requests_mock, mock_client): """ Scenario: @@ -1066,57 +919,58 @@ def test_upload_intrusion_file_import_command(requests_mock, mock_client): - Ensure the readable_output is correct. """ args = { - 'entry_id': 'entry_id', - 'rule_import_mode': 'MERGE', - 'rule_group_ids': 'rule_group_id1,rule_group_id2', + "entry_id": "entry_id", + "rule_import_mode": "MERGE", + "rule_group_ids": "rule_group_id1,rule_group_id2", } - mock_response = load_mock_response('intrusion_rule_upload_import_response.json') + mock_response = load_mock_response("intrusion_rule_upload_import_response.json") requests_mock.post( - f'{BASE_URL}/{SUFFIX}/object/intrusionrulesupload', + f"{BASE_URL}/{SUFFIX}/object/intrusionrulesupload", json=mock_response, ) from CiscoFirepower import upload_intrusion_rule_file_command + command_results = upload_intrusion_rule_file_command(mock_client, args) - assert command_results.outputs_prefix == '.'.join((INTEGRATION_CONTEXT_NAME, INTRUSION_RULE_UPLOAD_CONTEXT)) + assert command_results.outputs_prefix == ".".join((INTEGRATION_CONTEXT_NAME, INTRUSION_RULE_UPLOAD_CONTEXT)) assert INTRUSION_RULE_UPLOAD_TITLE in command_results.readable_output - assert command_results.outputs[0]['summary'] == mock_response['summary'] - assert command_results.outputs[0]['ruleGroups'] == mock_response['ruleGroups'] - assert command_results.outputs[0]['files'] == mock_response['files'] + assert command_results.outputs[0]["summary"] == mock_response["summary"] + assert command_results.outputs[0]["ruleGroups"] == mock_response["ruleGroups"] + assert command_results.outputs[0]["files"] == mock_response["files"] @pytest.mark.parametrize( - 'args, expected_output', + "args, expected_output", ( ( { - 'entry_id': 'entry_id', - 'validate_only': 'False', - 'rule_import_mode': 'MERGE', + "entry_id": "entry_id", + "validate_only": "False", + "rule_import_mode": "MERGE", }, - 'rule_import_mode and rule_group_ids must be inserted when validate_only is "False".' + 'rule_import_mode and rule_group_ids must be inserted when validate_only is "False".', ), ( { - 'entry_id': 'entry_id', - 'validate_only': 'False', - 'rule_group_ids': 'rule_group_ids1,rule_group_ids2', + "entry_id": "entry_id", + "validate_only": "False", + "rule_group_ids": "rule_group_ids1,rule_group_ids2", }, - 'rule_import_mode and rule_group_ids must be inserted when validate_only is "False".' + 'rule_import_mode and rule_group_ids must be inserted when validate_only is "False".', ), ( { - 'entry_id': 'entry_id', - 'rule_import_mode': 'REPLACE', - 'rule_group_ids': 'rule_group_ids1,rule_group_ids2', + "entry_id": "entry_id", + "rule_import_mode": "REPLACE", + "rule_group_ids": "rule_group_ids1,rule_group_ids2", }, - 'Supported file formats are ".txt" and ".rules".' + 'Supported file formats are ".txt" and ".rules".', ), - ) + ), ) -@mock.patch('CiscoFirepower.demisto.getFilePath', lambda x: FILE_ENTRY_ERROR) +@mock.patch("CiscoFirepower.demisto.getFilePath", lambda x: FILE_ENTRY_ERROR) def test_error_upload_intrusion_file_command(mock_client, args, expected_output): """ Scenario: @@ -1132,12 +986,13 @@ def test_error_upload_intrusion_file_command(mock_client, args, expected_output) """ with pytest.raises(ValueError) as ve: from CiscoFirepower import upload_intrusion_rule_file_command + upload_intrusion_rule_file_command(mock_client, args) assert str(ve) == expected_output -''' Intrusion Rule Group CRUD ''' # pylint: disable=pointless-string-statement +""" Intrusion Rule Group CRUD """ # pylint: disable=pointless-string-statement def test_create_intrusion_rule_group_command(requests_mock, mock_client): @@ -1154,40 +1009,37 @@ def test_create_intrusion_rule_group_command(requests_mock, mock_client): - Ensure the outputs has no links. """ args = { - 'name': 'name', - 'description': 'description', + "name": "name", + "description": "description", } - method = 'POST' - mock_response = load_mock_response('intrusion_rule_group_response.json') + method = "POST" + mock_response = load_mock_response("intrusion_rule_group_response.json") requests_mock.request( method, - f'{BASE_URL}/{SUFFIX}/object/intrusionrulegroups', + f"{BASE_URL}/{SUFFIX}/object/intrusionrulegroups", json=mock_response, ) from CiscoFirepower import create_intrusion_rule_group_command + command_results = create_intrusion_rule_group_command(mock_client, args) - assert_command_results( - command_results=command_results, - method=method, - expected_output_prefix=INTRUSION_RULE_GROUP_CONTEXT - ) + assert_command_results(command_results=command_results, method=method, expected_output_prefix=INTRUSION_RULE_GROUP_CONTEXT) - assert command_results.outputs[0]['name'] == mock_response['name'] - assert command_results.outputs[0]['id'] == mock_response['id'] - assert command_results.outputs[0]['description'] == mock_response['description'] + assert command_results.outputs[0]["name"] == mock_response["name"] + assert command_results.outputs[0]["id"] == mock_response["id"] + assert command_results.outputs[0]["description"] == mock_response["description"] @pytest.mark.parametrize( - 'args', + "args", ( - ({'expanded_response': 'True'}), - ({'limit': '6', 'expanded_response': 'False'}), - ({'page_size': '3'}), - ) + ({"expanded_response": "True"}), + ({"limit": "6", "expanded_response": "False"}), + ({"page_size": "3"}), + ), ) def test_list_intrusion_rule_group_command(requests_mock, mock_client, args): """ @@ -1205,29 +1057,26 @@ def test_list_intrusion_rule_group_command(requests_mock, mock_client, args): - Ensure the outputs has no links. """ - method = 'GET' - mock_response = load_mock_response('intrusion_rule_group_list_response.json') + method = "GET" + mock_response = load_mock_response("intrusion_rule_group_list_response.json") requests_mock.request( method, - f'{BASE_URL}/{SUFFIX}/object/intrusionrulegroups', + f"{BASE_URL}/{SUFFIX}/object/intrusionrulegroups", json=mock_response, ) from CiscoFirepower import list_intrusion_rule_group_command + command_results = list_intrusion_rule_group_command(mock_client, args) - assert_command_results( - command_results=command_results, - method=method, - expected_output_prefix=INTRUSION_RULE_GROUP_CONTEXT - ) + assert_command_results(command_results=command_results, method=method, expected_output_prefix=INTRUSION_RULE_GROUP_CONTEXT) - for output, mock_output in zip(command_results.outputs, mock_response['items']): - assert output['name'] == mock_output['name'] - assert output['id'] == mock_output['id'] - assert output['description'] == mock_output['description'] - assert output['childGroups'] == mock_output['childGroups'] + for output, mock_output in zip(command_results.outputs, mock_response["items"]): + assert output["name"] == mock_output["name"] + assert output["id"] == mock_output["id"] + assert output["description"] == mock_output["description"] + assert output["childGroups"] == mock_output["childGroups"] def test_get_intrusion_rule_group_command(requests_mock, mock_client): @@ -1243,10 +1092,10 @@ def test_get_intrusion_rule_group_command(requests_mock, mock_client): - Ensure the outputs_prefix is correct. - Ensure the outputs has no links. """ - args = {'rule_group_id': 'rule_group_id'} + args = {"rule_group_id": "rule_group_id"} - method = 'GET' - mock_response = load_mock_response('intrusion_rule_group_response.json') + method = "GET" + mock_response = load_mock_response("intrusion_rule_group_response.json") requests_mock.request( method, @@ -1255,17 +1104,14 @@ def test_get_intrusion_rule_group_command(requests_mock, mock_client): ) from CiscoFirepower import list_intrusion_rule_group_command + command_results = list_intrusion_rule_group_command(mock_client, args) - assert_command_results( - command_results=command_results, - method=method, - expected_output_prefix=INTRUSION_RULE_GROUP_CONTEXT - ) + assert_command_results(command_results=command_results, method=method, expected_output_prefix=INTRUSION_RULE_GROUP_CONTEXT) - assert command_results.outputs[0]['name'] == mock_response['name'] - assert command_results.outputs[0]['id'] == mock_response['id'] - assert command_results.outputs[0]['description'] == mock_response['description'] + assert command_results.outputs[0]["name"] == mock_response["name"] + assert command_results.outputs[0]["id"] == mock_response["id"] + assert command_results.outputs[0]["description"] == mock_response["description"] def test_error_get_intrusion_rule_group_command(mock_client): @@ -1280,15 +1126,16 @@ def test_error_get_intrusion_rule_group_command(mock_client): - Ensure an exception has been raised and it is correct. """ args = { - 'limit': '5', - 'rule_group_id': 'rule_group_id', + "limit": "5", + "rule_group_id": "rule_group_id", } with pytest.raises(ValueError) as ve: from CiscoFirepower import list_intrusion_rule_group_command + list_intrusion_rule_group_command(mock_client, args) - assert str(ve) == 'GET and LIST arguments can not be supported simutanlesy.' + assert str(ve) == "GET and LIST arguments can not be supported simutanlesy." def test_update_intrusion_rule_group_command(requests_mock, mock_client): @@ -1305,13 +1152,13 @@ def test_update_intrusion_rule_group_command(requests_mock, mock_client): - Ensure the outputs has no links. """ args = { - 'rule_group_id': 'rule_group_id', - 'name': 'name', - 'description': 'description', + "rule_group_id": "rule_group_id", + "name": "name", + "description": "description", } - method = 'PUT' - mock_response = load_mock_response('intrusion_rule_group_response.json') + method = "PUT" + mock_response = load_mock_response("intrusion_rule_group_response.json") requests_mock.request( method, @@ -1320,17 +1167,14 @@ def test_update_intrusion_rule_group_command(requests_mock, mock_client): ) from CiscoFirepower import update_intrusion_rule_group_command + command_results = update_intrusion_rule_group_command(mock_client, args) - assert_command_results( - command_results=command_results, - method=method, - expected_output_prefix=INTRUSION_RULE_GROUP_CONTEXT - ) + assert_command_results(command_results=command_results, method=method, expected_output_prefix=INTRUSION_RULE_GROUP_CONTEXT) - assert command_results.outputs[0]['name'] == mock_response['name'] - assert command_results.outputs[0]['id'] == mock_response['id'] - assert command_results.outputs[0]['description'] == mock_response['description'] + assert command_results.outputs[0]["name"] == mock_response["name"] + assert command_results.outputs[0]["id"] == mock_response["id"] + assert command_results.outputs[0]["description"] == mock_response["description"] def test_delete_intrusion_rule_group_command(requests_mock, mock_client): @@ -1344,12 +1188,10 @@ def test_delete_intrusion_rule_group_command(requests_mock, mock_client): Then: - Ensure the readable_output is correct. """ - args = { - 'rule_group_id': 'rule_group_id' - } + args = {"rule_group_id": "rule_group_id"} - method = 'DELETE' - mock_response = load_mock_response('intrusion_rule_group_response.json') + method = "DELETE" + mock_response = load_mock_response("intrusion_rule_group_response.json") requests_mock.request( method, @@ -1358,6 +1200,7 @@ def test_delete_intrusion_rule_group_command(requests_mock, mock_client): ) from CiscoFirepower import delete_intrusion_rule_group_command + command_results = delete_intrusion_rule_group_command(mock_client, args) assert_command_results( @@ -1366,7 +1209,7 @@ def test_delete_intrusion_rule_group_command(requests_mock, mock_client): ) -''' Network Analysis Policy CRUD ''' # pylint: disable=pointless-string-statement +""" Network Analysis Policy CRUD """ # pylint: disable=pointless-string-statement def test_create_network_analysis_policy_command(requests_mock, mock_client): @@ -1383,43 +1226,40 @@ def test_create_network_analysis_policy_command(requests_mock, mock_client): - Ensure the outputs has no links. """ args = { - 'name': 'name', - 'basepolicy_id': 'basepolicy_id', - 'description': 'description', + "name": "name", + "basepolicy_id": "basepolicy_id", + "description": "description", } - method = 'POST' - mock_response = load_mock_response('network_analysis_response.json') + method = "POST" + mock_response = load_mock_response("network_analysis_response.json") requests_mock.request( method, - f'{BASE_URL}/{SUFFIX}/policy/networkanalysispolicies', + f"{BASE_URL}/{SUFFIX}/policy/networkanalysispolicies", json=mock_response, ) from CiscoFirepower import create_network_analysis_policy_command + command_results = create_network_analysis_policy_command(mock_client, args) - assert_command_results( - command_results=command_results, - method=method, - expected_output_prefix=NETWORK_ANALYSIS_POLICY_CONTEXT - ) + assert_command_results(command_results=command_results, method=method, expected_output_prefix=NETWORK_ANALYSIS_POLICY_CONTEXT) - assert command_results.outputs[0]['name'] == mock_response['name'] - assert command_results.outputs[0]['id'] == mock_response['id'] - assert command_results.outputs[0]['description'] == mock_response['description'] - assert command_results.outputs[0]['basePolicy'] == mock_response['basePolicy'] - assert command_results.outputs[0]['metadata'] == mock_response['metadata'] + assert command_results.outputs[0]["name"] == mock_response["name"] + assert command_results.outputs[0]["id"] == mock_response["id"] + assert command_results.outputs[0]["description"] == mock_response["description"] + assert command_results.outputs[0]["basePolicy"] == mock_response["basePolicy"] + assert command_results.outputs[0]["metadata"] == mock_response["metadata"] @pytest.mark.parametrize( - 'args', + "args", ( - ({'expanded_response': 'True'}), - ({'limit': '6', 'expanded_response': 'False'}), - ({'page_size': '3'}), - ) + ({"expanded_response": "True"}), + ({"limit": "6", "expanded_response": "False"}), + ({"page_size": "3"}), + ), ) def test_list_network_analysis_policy_command(requests_mock, mock_client, args): """ @@ -1438,29 +1278,26 @@ def test_list_network_analysis_policy_command(requests_mock, mock_client, args): - Ensure the outputs_prefix is correct. - Ensure the outputs has no links. """ - method = 'GET' - mock_response = load_mock_response('network_analysis_list_response.json') + method = "GET" + mock_response = load_mock_response("network_analysis_list_response.json") requests_mock.request( method, - f'{BASE_URL}/{SUFFIX}/policy/networkanalysispolicies', + f"{BASE_URL}/{SUFFIX}/policy/networkanalysispolicies", json=mock_response, ) from CiscoFirepower import list_network_analysis_policy_command + command_results = list_network_analysis_policy_command(mock_client, args) - assert_command_results( - command_results=command_results, - method=method, - expected_output_prefix=NETWORK_ANALYSIS_POLICY_CONTEXT - ) + assert_command_results(command_results=command_results, method=method, expected_output_prefix=NETWORK_ANALYSIS_POLICY_CONTEXT) - for output, mock_output in zip(command_results.outputs, mock_response['items']): - assert output['name'] == mock_output['name'] - assert output['id'] == mock_output['id'] - assert output['description'] == mock_output['description'] - assert output['metadata'] == mock_output['metadata'] + for output, mock_output in zip(command_results.outputs, mock_response["items"]): + assert output["name"] == mock_output["name"] + assert output["id"] == mock_output["id"] + assert output["description"] == mock_output["description"] + assert output["metadata"] == mock_output["metadata"] def test_get_network_analysis_policy_command(requests_mock, mock_client): @@ -1476,9 +1313,9 @@ def test_get_network_analysis_policy_command(requests_mock, mock_client): - Ensure the outputs_prefix is correct. - Ensure the outputs has no links. """ - args = {'network_analysis_policy_id': 'network_analysis_policy_id'} - method = 'GET' - mock_response = load_mock_response('network_analysis_response.json') + args = {"network_analysis_policy_id": "network_analysis_policy_id"} + method = "GET" + mock_response = load_mock_response("network_analysis_response.json") requests_mock.request( method, @@ -1487,19 +1324,16 @@ def test_get_network_analysis_policy_command(requests_mock, mock_client): ) from CiscoFirepower import list_network_analysis_policy_command + command_results = list_network_analysis_policy_command(mock_client, args) - assert_command_results( - command_results=command_results, - method=method, - expected_output_prefix=NETWORK_ANALYSIS_POLICY_CONTEXT - ) + assert_command_results(command_results=command_results, method=method, expected_output_prefix=NETWORK_ANALYSIS_POLICY_CONTEXT) - assert command_results.outputs[0]['name'] == mock_response['name'] - assert command_results.outputs[0]['id'] == mock_response['id'] - assert command_results.outputs[0]['description'] == mock_response['description'] - assert command_results.outputs[0]['basePolicy'] == mock_response['basePolicy'] - assert command_results.outputs[0]['metadata'] == mock_response['metadata'] + assert command_results.outputs[0]["name"] == mock_response["name"] + assert command_results.outputs[0]["id"] == mock_response["id"] + assert command_results.outputs[0]["description"] == mock_response["description"] + assert command_results.outputs[0]["basePolicy"] == mock_response["basePolicy"] + assert command_results.outputs[0]["metadata"] == mock_response["metadata"] def test_error_get_network_analysis_policy_command(mock_client): @@ -1514,15 +1348,16 @@ def test_error_get_network_analysis_policy_command(mock_client): - Ensure an exception has been raised and it is correct. """ args = { - 'limit': '5', - 'network_analysis_policy_id': 'network_analysis_policy_id', + "limit": "5", + "network_analysis_policy_id": "network_analysis_policy_id", } with pytest.raises(ValueError) as ve: from CiscoFirepower import list_network_analysis_policy_command + list_network_analysis_policy_command(mock_client, args) - assert str(ve) == 'GET and LIST arguments can not be supported simutanlesy.' + assert str(ve) == "GET and LIST arguments can not be supported simutanlesy." def test_update_network_analysis_policy_command(requests_mock, mock_client): @@ -1539,16 +1374,16 @@ def test_update_network_analysis_policy_command(requests_mock, mock_client): - Ensure the outputs has no links. """ args = { - 'network_analysis_policy_id': 'network_analysis_policy_id', - 'name': 'name', - 'basepolicy_id': 'basepolicy_id', - 'description': 'description', - 'inspection_mode': 'PREVENTION', - 'replicate_inspection_mode': 'True' + "network_analysis_policy_id": "network_analysis_policy_id", + "name": "name", + "basepolicy_id": "basepolicy_id", + "description": "description", + "inspection_mode": "PREVENTION", + "replicate_inspection_mode": "True", } - method = 'PUT' - mock_response = load_mock_response('network_analysis_response.json') + method = "PUT" + mock_response = load_mock_response("network_analysis_response.json") requests_mock.request( method, @@ -1557,19 +1392,16 @@ def test_update_network_analysis_policy_command(requests_mock, mock_client): ) from CiscoFirepower import update_network_analysis_policy_command + command_results = update_network_analysis_policy_command(mock_client, args) - assert_command_results( - command_results=command_results, - method=method, - expected_output_prefix=NETWORK_ANALYSIS_POLICY_CONTEXT - ) + assert_command_results(command_results=command_results, method=method, expected_output_prefix=NETWORK_ANALYSIS_POLICY_CONTEXT) - assert command_results.outputs[0]['name'] == mock_response['name'] - assert command_results.outputs[0]['id'] == mock_response['id'] - assert command_results.outputs[0]['description'] == mock_response['description'] - assert command_results.outputs[0]['basePolicy'] == mock_response['basePolicy'] - assert command_results.outputs[0]['metadata'] == mock_response['metadata'] + assert command_results.outputs[0]["name"] == mock_response["name"] + assert command_results.outputs[0]["id"] == mock_response["id"] + assert command_results.outputs[0]["description"] == mock_response["description"] + assert command_results.outputs[0]["basePolicy"] == mock_response["basePolicy"] + assert command_results.outputs[0]["metadata"] == mock_response["metadata"] def test_delete_network_analysis_policy_command(requests_mock, mock_client): @@ -1583,12 +1415,10 @@ def test_delete_network_analysis_policy_command(requests_mock, mock_client): Then: - Ensure the readable_output is correct. """ - args = { - 'network_analysis_policy_id': 'network_analysis_policy_id' - } + args = {"network_analysis_policy_id": "network_analysis_policy_id"} - method = 'DELETE' - mock_response = load_mock_response('network_analysis_response.json') + method = "DELETE" + mock_response = load_mock_response("network_analysis_response.json") requests_mock.request( method, @@ -1597,6 +1427,7 @@ def test_delete_network_analysis_policy_command(requests_mock, mock_client): ) from CiscoFirepower import delete_network_analysis_policy_command + command_results = delete_network_analysis_policy_command(mock_client, args) assert_command_results( diff --git a/Packs/CiscoFirepower/ReleaseNotes/1_2_7.md b/Packs/CiscoFirepower/ReleaseNotes/1_2_7.md new file mode 100644 index 000000000000..605fc9d95fb8 --- /dev/null +++ b/Packs/CiscoFirepower/ReleaseNotes/1_2_7.md @@ -0,0 +1,6 @@ + +#### Integrations + +##### Cisco Firepower + +- Metadata and documentation improvements. diff --git a/Packs/CiscoFirepower/pack_metadata.json b/Packs/CiscoFirepower/pack_metadata.json index f2d02bf43b63..b80e42830c58 100644 --- a/Packs/CiscoFirepower/pack_metadata.json +++ b/Packs/CiscoFirepower/pack_metadata.json @@ -2,7 +2,7 @@ "name": "Cisco Firepower", "description": "Use the CiscoFirepower integration for unified management of firewalls, application control", "support": "xsoar", - "currentVersion": "1.2.6", + "currentVersion": "1.2.7", "author": "Cortex XSOAR", "url": "https://www.paloaltonetworks.com/cortex", "email": "", From 7ec3d7ccb26ae89832285afbaad7c0e14ef2db48 Mon Sep 17 00:00:00 2001 From: Content Bot Date: Sun, 23 Mar 2025 12:53:03 +0000 Subject: [PATCH 11/18] CheckPointHarmonyEndpoint: Apply ruff Format --- .../CheckPointHarmonyEndpoint.py | 174 +++++------------- .../CheckPointHarmonyEndpoint_test.py | 126 ++++--------- .../ReleaseNotes/1_0_3.md | 6 + .../pack_metadata.json | 2 +- 4 files changed, 86 insertions(+), 222 deletions(-) create mode 100644 Packs/CheckPointHarmonyEndpoint/ReleaseNotes/1_0_3.md diff --git a/Packs/CheckPointHarmonyEndpoint/Integrations/CheckPointHarmonyEndpoint/CheckPointHarmonyEndpoint.py b/Packs/CheckPointHarmonyEndpoint/Integrations/CheckPointHarmonyEndpoint/CheckPointHarmonyEndpoint.py index 5fd248ff79d9..64335403c3f7 100644 --- a/Packs/CheckPointHarmonyEndpoint/Integrations/CheckPointHarmonyEndpoint/CheckPointHarmonyEndpoint.py +++ b/Packs/CheckPointHarmonyEndpoint/Integrations/CheckPointHarmonyEndpoint/CheckPointHarmonyEndpoint.py @@ -1,6 +1,5 @@ import dataclasses import http -from typing import Tuple import demistomock as demisto # noqa: F401 from CommonServerPython import * # noqa: F401 @@ -262,9 +261,7 @@ def format_message(self, id): message="Rule {id} modification:", headers=["id", "name", "family", "connectionState", "lastModifiedBy", "job_id"], ), - "harmony-ep-policy-rule-metadata-list": ScheduleCommandMetadata( - outputs_prefix="Rule", message="Rule metadata list:" - ), + "harmony-ep-policy-rule-metadata-list": ScheduleCommandMetadata(outputs_prefix="Rule", message="Rule metadata list:"), "harmony-ep-push-operation-status-list": ScheduleCommandMetadata( outputs_prefix="PushOperation", message="Push operations status list:", @@ -449,13 +446,8 @@ def login(self): self._headers["x-mgmt-api-token"] = response["apiToken"] except DemistoException as exc: - if ( - exc.res is not None - and exc.res.status_code == http.HTTPStatus.BAD_REQUEST - ): - raise DemistoException( - f"Authentication failed: cookie not found. {exc}" - ) + if exc.res is not None and exc.res.status_code == http.HTTPStatus.BAD_REQUEST: + raise DemistoException(f"Authentication failed: cookie not found. {exc}") def job_status_get(self, job_id: str) -> dict[str, Any]: """Get job status and data by ID. @@ -525,9 +517,7 @@ def ioc_update( return self._http_request( method="PUT", url_suffix="/ioc/edit", - json_data=[ - {"comment": comment, "id": ioc_id, "type": ioc_type, "value": value} - ], + json_data=[{"comment": comment, "id": ioc_id, "type": ioc_type, "value": value}], ) def ioc_create( @@ -595,9 +585,7 @@ def rule_assignments_get( f"/policy/{rule_id}/assignments", ) - def rule_assignments_add( - self, rule_id: str, entities_ids: list[str] - ) -> dict[str, Any]: + def rule_assignments_add(self, rule_id: str, entities_ids: list[str]) -> dict[str, Any]: """Assigns the specified entities to the given rule. Specified IDs that are already assigned to the rule are ignored. @@ -617,9 +605,7 @@ def rule_assignments_add( json_data=entities_ids, ) - def rule_assignments_remove( - self, rule_id: str, entities_ids: list[str] - ) -> dict[str, Any]: + def rule_assignments_remove(self, rule_id: str, entities_ids: list[str]) -> dict[str, Any]: """Removes the specified entities from the given rule's assignments. Specified IDs that are not assigned to the rule are ignored. @@ -703,9 +689,7 @@ def rule_metadata_list( params=params, ) - def push_operation_status_list( - self, remediation_operation_id: str | None - ) -> dict[str, Any]: + def push_operation_status_list(self, remediation_operation_id: str | None) -> dict[str, Any]: """Gets the current statuses of all remediation operations or if a specific ID is specified, retrieve the current status of the given remediation operation. @@ -719,11 +703,7 @@ def push_operation_status_list( return self._http_request( "GET", - ( - f"/remediation/{remediation_operation_id}/status" - if remediation_operation_id - else "/remediation/status" - ), + (f"/remediation/{remediation_operation_id}/status" if remediation_operation_id else "/remediation/status"), ) def push_operation_get( @@ -885,9 +865,7 @@ def file_restore( json_data=request_body, ) - def remediation_computer_isolate( - self, request_body: dict[str, Any] - ) -> dict[str, Any]: + def remediation_computer_isolate(self, request_body: dict[str, Any]) -> dict[str, Any]: """Isolates the computers matching the given query. Isolation is the act of denying all network access from a given computer. @@ -905,9 +883,7 @@ def remediation_computer_isolate( json_data=request_body, ) - def remediation_computer_deisolate( - self, request_body: dict[str, Any] - ) -> dict[str, Any]: + def remediation_computer_deisolate(self, request_body: dict[str, Any]) -> dict[str, Any]: """De-Isolates the computers matching the given query. De-isolating a computer restores its access to network resources. @@ -1307,17 +1283,11 @@ def ioc_delete_command(args: dict[str, Any], client: Client) -> CommandResults: client.ioc_delete(ioc_ids=ioc_ids, delete_all=delete_all) return CommandResults( - readable_output=( - "All IOCs were deleted successfully." - if delete_all - else f"IOCs {ioc_ids} was deleted successfully." - ) + readable_output=("All IOCs were deleted successfully." if delete_all else f"IOCs {ioc_ids} was deleted successfully.") ) -def rule_assignments_get_command( - args: dict[str, Any], client: Client -) -> CommandResults: +def rule_assignments_get_command(args: dict[str, Any], client: Client) -> CommandResults: """Gets all entities directly assigned to the given rule. Args: @@ -1349,9 +1319,7 @@ def rule_assignments_get_command( ) -def rule_assignments_add_command( - args: dict[str, Any], client: Client -) -> CommandResults: +def rule_assignments_add_command(args: dict[str, Any], client: Client) -> CommandResults: """Assigns the specified entities to the given rule. Specified IDs that are already assigned to the rule are ignored. Args: @@ -1365,14 +1333,10 @@ def rule_assignments_add_command( entities_ids = argToList(args.get("entities_ids")) client.rule_assignments_add(rule_id=rule_id, entities_ids=entities_ids) - return CommandResults( - readable_output=f"Entities {entities_ids} were assigned to rule {rule_id} successfully." - ) + return CommandResults(readable_output=f"Entities {entities_ids} were assigned to rule {rule_id} successfully.") -def rule_assignments_remove_command( - args: dict[str, Any], client: Client -) -> CommandResults: +def rule_assignments_remove_command(args: dict[str, Any], client: Client) -> CommandResults: """Removes the specified entities from the given rule's assignments. Args: @@ -1386,9 +1350,7 @@ def rule_assignments_remove_command( entities_ids = argToList(args.get("entities_ids")) client.rule_assignments_remove(rule_id=rule_id, entities_ids=entities_ids) - return CommandResults( - readable_output=f"Entities {entities_ids} were removed from rule {rule_id} successfully." - ) + return CommandResults(readable_output=f"Entities {entities_ids} were removed from rule {rule_id} successfully.") @polling_function( @@ -1434,9 +1396,7 @@ def rule_modifications_get_command(args: dict[str, Any], client: Client) -> Poll PollResult: outputs, readable outputs and raw response for XSOAR. """ rule_id = args.get("rule_id", "") - SCHEDULED_COMMANDS_MAPPER[ - "harmony-ep-policy-rule-modifications-get" - ].format_message(rule_id) + SCHEDULED_COMMANDS_MAPPER["harmony-ep-policy-rule-modifications-get"].format_message(rule_id) if not args.get("job_id"): response = client.rule_modifications_get(rule_id=rule_id) @@ -1499,9 +1459,7 @@ def rule_metadata_list_command(args: dict[str, Any], client: Client) -> CommandR poll_message="Fetch remediation status list request is executing", requires_polling_arg=False, ) -def push_operation_status_list_command( - args: dict[str, Any], client: Client -) -> PollResult: +def push_operation_status_list_command(args: dict[str, Any], client: Client) -> PollResult: """Gets the current statuses of all remediation operations or if a specific ID is specified, retrieve the current status of the given remediation operation. @@ -1515,9 +1473,7 @@ def push_operation_status_list_command( if not args.get("job_id"): remediation_operation_id = args.get("remediation_operation_id") - response = client.push_operation_status_list( - remediation_operation_id=remediation_operation_id - ) + response = client.push_operation_status_list(remediation_operation_id=remediation_operation_id) args["job_id"] = response.get("jobId") return schedule_command(args, client, "harmony-ep-push-operation-status-list") @@ -1578,13 +1534,11 @@ def push_operation_abort_command(args: dict[str, Any], client: Client) -> PollRe """ if not args.get("job_id"): remediation_operation_id = args.get("remediation_operation_id", "") - SCHEDULED_COMMANDS_MAPPER["harmony-ep-push-operation-abort"].message = ( - f"Remediation operation {remediation_operation_id} was aborted successfully." - ) + SCHEDULED_COMMANDS_MAPPER[ + "harmony-ep-push-operation-abort" + ].message = f"Remediation operation {remediation_operation_id} was aborted successfully." - response = client.push_operation_abort( - remediation_operation_id=remediation_operation_id - ) + response = client.push_operation_abort(remediation_operation_id=remediation_operation_id) args["job_id"] = response.get("jobId") return schedule_command(args, client, "harmony-ep-push-operation-abort") @@ -1779,9 +1733,7 @@ def file_restore_command(args: dict[str, Any], client: Client) -> PollResult: poll_message="Computer isolate request is executing", requires_polling_arg=False, ) -def remediation_computer_isolate_command( - args: dict[str, Any], client: Client -) -> PollResult: +def remediation_computer_isolate_command(args: dict[str, Any], client: Client) -> PollResult: """Isolates the computers matching the given query. Isolation is the act of denying all network access from a given computer. Args: @@ -1806,9 +1758,7 @@ def remediation_computer_isolate_command( poll_message="Computer de-isolate request is executing", requires_polling_arg=False, ) -def remediation_computer_deisolate_command( - args: dict[str, Any], client: Client -) -> PollResult: +def remediation_computer_deisolate_command(args: dict[str, Any], client: Client) -> PollResult: """De-Isolates the computers matching the given query. De-isolating a computer restores its access to network resources. Args: @@ -2036,9 +1986,7 @@ def agent_registry_key_add_command(args: dict[str, Any], client: Client) -> Poll poll_message="Registry key remove request is executing", requires_polling_arg=False, ) -def agent_registry_key_delete_command( - args: dict[str, Any], client: Client -) -> PollResult: +def agent_registry_key_delete_command(args: dict[str, Any], client: Client) -> PollResult: """Removes the given registry key or value to the registry of computers matching the given query. Args: @@ -2226,8 +2174,7 @@ def test_module(client: Client) -> str: client.ioc_list(0, 1) except DemistoException as exc: if exc.res is not None and ( - exc.res.status_code == http.HTTPStatus.UNAUTHORIZED - or exc.res.status_code == http.HTTPStatus.FORBIDDEN + exc.res.status_code == http.HTTPStatus.UNAUTHORIZED or exc.res.status_code == http.HTTPStatus.FORBIDDEN ): return "Authorization Error: Invalid URL or credentials." raise exc @@ -2238,9 +2185,7 @@ def test_module(client: Client) -> str: # Helper Commands # -def schedule_command( - args: dict[str, Any], client: Client, command_name: str -) -> PollResult: +def schedule_command(args: dict[str, Any], client: Client, command_name: str) -> PollResult: """Build scheduled command in case: - Job state is not 'DONE' - Job state is 'DONE' but the API response data is a remediation operation ID. @@ -2268,22 +2213,17 @@ def schedule_command( # Save new schedule arguments in integration context # (cause for the second run there are new arguments or/and values) - set_integration_context( - {"job_id": response["jobId"], "remediation_operation_id": command_data} - ) + set_integration_context({"job_id": response["jobId"], "remediation_operation_id": command_data}) return PollResult( response=command_results, continue_to_poll=True, args_for_next_run=args, ) else: - - updated_command_readable_output, updated_command_response = ( - prepare_command_output_and_readable_output( - command_data=command_data, - command_name=command_name, - job_id=args["job_id"], - ) + updated_command_readable_output, updated_command_response = prepare_command_output_and_readable_output( + command_data=command_data, + command_name=command_name, + job_id=args["job_id"], ) command_results.readable_output = get_readable_output( @@ -2306,9 +2246,7 @@ def schedule_command( if sample_state == "FAILED": clear_integration_context() # In case the job not succeeded raise the error - raise DemistoException( - f"Executing {args['job_id']} for Harmony Endpoint failed. Error: {command_results.raw_response}" - ) + raise DemistoException(f"Executing {args['job_id']} for Harmony Endpoint failed. Error: {command_results.raw_response}") return PollResult( response=command_results, @@ -2335,9 +2273,7 @@ def update_command_results( command_results.raw_response = updated_command_response command_results.outputs = updated_command_response command_results.outputs_key_field = "job_id" - command_results.outputs_prefix = ( - f"HarmonyEP.{SCHEDULED_COMMANDS_MAPPER[command_name].outputs_prefix}" - ) + command_results.outputs_prefix = f"HarmonyEP.{SCHEDULED_COMMANDS_MAPPER[command_name].outputs_prefix}" return command_results @@ -2456,7 +2392,7 @@ def prepare_computer_list_output_and_readable_output( def prepare_push_operation_output_and_readable_output( command_data: list[dict[str, Any]], job_id: str, -) -> Tuple[list | dict[str, Any], list | dict[str, Any]]: +) -> tuple[list | dict[str, Any], list | dict[str, Any]]: """Update the API response data for the readable output in case the API response is push operation data. Args: @@ -2475,17 +2411,11 @@ def prepare_push_operation_output_and_readable_output( "machine_id": dict_safe_get(data, ["machine", "id"]), "machine_name": dict_safe_get(data, ["machine", "name"]), "operation_status": dict_safe_get(data, ["operation", "status"]), - "operation_response_status": dict_safe_get( - data, ["operation", "response", "status"] - ), - "operation_response_output": dict_safe_get( - data, ["operation", "response", "output"] - ), + "operation_response_status": dict_safe_get(data, ["operation", "response", "status"]), + "operation_response_output": dict_safe_get(data, ["operation", "response", "output"]), } ) - data["operation"] |= { - "id": dict_safe_get(get_integration_context(), ["remediation_operation_id"]) - } + data["operation"] |= {"id": dict_safe_get(get_integration_context(), ["remediation_operation_id"])} data["job_id"] = job_id return updated_command_readable_output, command_data @@ -2507,9 +2437,7 @@ def validate_pagination_arguments( ValueError: Appropriate error message. """ if page_size and (page_size < MIN_PAGE_SIZE or page_size > MAX_PAGE_SIZE): - raise ValueError( - f"page_size argument must be greater than {MIN_PAGE_SIZE} and smaller than {MAX_PAGE_SIZE}." - ) + raise ValueError(f"page_size argument must be greater than {MIN_PAGE_SIZE} and smaller than {MAX_PAGE_SIZE}.") if page and page < MIN_PAGE_NUM: raise ValueError(f"page argument must be greater than {MIN_PAGE_NUM - 1}.") if limit and limit <= MIN_LIMIT: @@ -2539,9 +2467,7 @@ def get_pagination_args(args: dict[str, Any]) -> tuple: new_page_size = page_size new_page = page - 1 - pagination_message = ( - f"Showing page {new_page+1}.\nCurrent page size: {new_page_size}." - ) + pagination_message = f"Showing page {new_page+1}.\nCurrent page size: {new_page_size}." return new_page, new_page_size, pagination_message @@ -2557,14 +2483,10 @@ def validate_filter_arguments(column_name: str | None = None, filter_type: str = ValueError: Raise error in case column_name or filter_type values are not allowed. """ if column_name and column_name not in COLUMN_NAMES: - raise ValueError( - f"'column_name' must be one of the followings: {COLUMN_NAMES}." - ) + raise ValueError(f"'column_name' must be one of the followings: {COLUMN_NAMES}.") if filter_type and filter_type not in FILTER_TYPES: - raise ValueError( - f"'filter_type' must be one of the followings: {FILTER_TYPES}." - ) + raise ValueError(f"'filter_type' must be one of the followings: {FILTER_TYPES}.") def extract_query_filter(args: dict[str, Any]) -> list[dict[str, Any]]: @@ -2585,9 +2507,7 @@ def extract_query_filter(args: dict[str, Any]) -> list[dict[str, Any]]: query_parts = query.split(" ") if len(query_parts) != 3: - raise ValueError( - "'filter' must be in the following format: 'column_name filter_type filter_value'." - ) + raise ValueError("'filter' must be in the following format: 'column_name filter_type filter_value'.") column_name = query_parts[0] filter_type = query_parts[1] @@ -2616,9 +2536,7 @@ def extract_query_filter(args: dict[str, Any]) -> list[dict[str, Any]]: computer_last_connection_times = argToList(computer_last_connection) if len(computer_last_connection_times) != 2: - raise ValueError( - "'computer_last_connection' must be in the following format: 'YYYY-MM-DD HH:MM, YYYY-MM-DD HH:MM'." - ) + raise ValueError("'computer_last_connection' must be in the following format: 'YYYY-MM-DD HH:MM, YYYY-MM-DD HH:MM'.") query_filter += [ { @@ -2717,7 +2635,6 @@ def convert_unix_to_date_string(unix_timestamp: int) -> str: def main() -> None: - params: dict[str, Any] = demisto.params() args: dict[str, Any] = demisto.args() base_url = params.get("base_url", "") @@ -2731,7 +2648,6 @@ def main() -> None: demisto.debug(f"Command being called is {command}") try: - client: Client = Client( base_url=base_url, client_id=client_id, diff --git a/Packs/CheckPointHarmonyEndpoint/Integrations/CheckPointHarmonyEndpoint/CheckPointHarmonyEndpoint_test.py b/Packs/CheckPointHarmonyEndpoint/Integrations/CheckPointHarmonyEndpoint/CheckPointHarmonyEndpoint_test.py index cdf613d07e78..3943164c1321 100644 --- a/Packs/CheckPointHarmonyEndpoint/Integrations/CheckPointHarmonyEndpoint/CheckPointHarmonyEndpoint_test.py +++ b/Packs/CheckPointHarmonyEndpoint/Integrations/CheckPointHarmonyEndpoint/CheckPointHarmonyEndpoint_test.py @@ -1,18 +1,16 @@ import json import os import unittest.mock -from typing import Any, Callable +from collections.abc import Callable +from typing import Any +import CheckPointHarmonyEndpoint import CommonServerPython import pytest -import CheckPointHarmonyEndpoint - TEST_DATA = "test_data" BASE_URL = "https://www.example.com/" -API_URL = CommonServerPython.urljoin( - BASE_URL, "app/endpoint-web-mgmt/harmony/endpoint/api/v1" -) +API_URL = CommonServerPython.urljoin(BASE_URL, "app/endpoint-web-mgmt/harmony/endpoint/api/v1") def load_mock_response(file_name: str) -> dict[str, Any] | list[dict[str, Any]]: @@ -25,7 +23,7 @@ def load_mock_response(file_name: str) -> dict[str, Any] | list[dict[str, Any]]: """ file_path = os.path.join(TEST_DATA, file_name) - with open(file_path, mode="r", encoding="utf-8") as mock_file: + with open(file_path, encoding="utf-8") as mock_file: return json.loads(mock_file.read()) @@ -85,9 +83,7 @@ def test_job_status_get_command( json=mock_response, ) - command_results = CheckPointHarmonyEndpoint.job_status_get_command( - command_args, mock_client - ) + command_results = CheckPointHarmonyEndpoint.job_status_get_command(command_args, mock_client) assert command_results.raw_response == mock_response assert command_results.outputs == mock_response @@ -138,13 +134,9 @@ def test_ioc_list_command( json=mock_response, ) - command_results = CheckPointHarmonyEndpoint.ioc_list_command( - command_args, mock_client - ) - mock_response["content"][0]["modifiedOn"] = ( - CheckPointHarmonyEndpoint.convert_unix_to_date_string( - mock_response["content"][0]["modifiedOn"] - ) + command_results = CheckPointHarmonyEndpoint.ioc_list_command(command_args, mock_client) + mock_response["content"][0]["modifiedOn"] = CheckPointHarmonyEndpoint.convert_unix_to_date_string( + mock_response["content"][0]["modifiedOn"] ) assert command_results.raw_response == mock_response @@ -195,9 +187,7 @@ def test_ioc_update_command( json=mock_response, ) - command_results = CheckPointHarmonyEndpoint.ioc_update_command( - command_args, mock_client - ) + command_results = CheckPointHarmonyEndpoint.ioc_update_command(command_args, mock_client) assert command_results.raw_response == mock_response assert command_results.outputs == mock_response @@ -279,9 +269,7 @@ def test_ioc_delete_command( json=mock_response, ) - command_results = CheckPointHarmonyEndpoint.ioc_delete_command( - command_args, mock_client - ) + command_results = CheckPointHarmonyEndpoint.ioc_delete_command(command_args, mock_client) assert command_results.readable_output == readable_output @@ -309,9 +297,7 @@ def test_rule_assignments_get_command( json=mock_response, ) output = {"id": 1, "assignments": mock_response} - command_results = CheckPointHarmonyEndpoint.rule_assignments_get_command( - {"rule_id": 1}, mock_client - ) + command_results = CheckPointHarmonyEndpoint.rule_assignments_get_command({"rule_id": 1}, mock_client) assert command_results.outputs_prefix == "HarmonyEP.Rule" assert command_results.outputs_key_field == "id" @@ -346,10 +332,7 @@ def test_rule_assignments_add_command( {"rule_id": 1, "entities_ids": ["3", "4"]}, mock_client ) - assert ( - command_results.readable_output - == "Entities ['3', '4'] were assigned to rule 1 successfully." - ) + assert command_results.readable_output == "Entities ['3', '4'] were assigned to rule 1 successfully." def test_rule_assignments_remove_command( @@ -379,10 +362,7 @@ def test_rule_assignments_remove_command( {"rule_id": 1, "entities_ids": ["3", "4"]}, mock_client ) - assert ( - command_results.readable_output - == "Entities ['3', '4'] were removed from rule 1 successfully." - ) + assert command_results.readable_output == "Entities ['3', '4'] were removed from rule 1 successfully." @pytest.mark.parametrize( @@ -421,21 +401,13 @@ def test_rule_metadata_list_command( - Ensure that the CommandResults are as expected. """ - mock_response: dict[str, Any] | list[dict[str, Any]] = load_mock_response( - response_file - ) + mock_response: dict[str, Any] | list[dict[str, Any]] = load_mock_response(response_file) requests_mock.get( url=f"{API_URL}/{endpoint}", json=mock_response, ) - command_results = CheckPointHarmonyEndpoint.rule_metadata_list_command( - command_args, mock_client - ) - mock_response = ( - mock_response[: command_args["limit"]] - if "limit" in command_args - else mock_response - ) + command_results = CheckPointHarmonyEndpoint.rule_metadata_list_command(command_args, mock_client) + mock_response = mock_response[: command_args["limit"]] if "limit" in command_args else mock_response assert command_results.raw_response == mock_response assert command_results.outputs == mock_response @@ -473,14 +445,10 @@ def test_rule_metadata_list_command( None, CommonServerPython.PollResult( response=CommonServerPython.CommandResults( - outputs=load_mock_response( - "push_operation_status_in_progress.json" - ), + outputs=load_mock_response("push_operation_status_in_progress.json"), outputs_prefix="HarmonyEP.Job", outputs_key_field="id", - raw_response=load_mock_response( - "push_operation_status_in_progress.json" - ), + raw_response=load_mock_response("push_operation_status_in_progress.json"), ), continue_to_poll=True, args_for_next_run={"job_id": "3"}, @@ -516,9 +484,7 @@ def test_rule_metadata_list_command( outputs=load_mock_response("push_operation_remediation_data.json"), outputs_prefix="HarmonyEP.Job", outputs_key_field="id", - raw_response=load_mock_response( - "push_operation_remediation_data.json" - ), + raw_response=load_mock_response("push_operation_remediation_data.json"), ), continue_to_poll=True, args_for_next_run={"job_id": "3"}, @@ -581,33 +547,21 @@ def test_schedule_command( "CheckPointHarmonyEndpoint.get_integration_context", return_value=integration_context, ), - unittest.mock.patch( - "CheckPointHarmonyEndpoint.set_integration_context" - ) as mock_set_integration_context, + unittest.mock.patch("CheckPointHarmonyEndpoint.set_integration_context") as mock_set_integration_context, ): - poll_result: CommonServerPython.PollResult = ( - CheckPointHarmonyEndpoint.schedule_command( - client=mock_client, - args=args, - command_name=command_name, - ) + poll_result: CommonServerPython.PollResult = CheckPointHarmonyEndpoint.schedule_command( + client=mock_client, + args=args, + command_name=command_name, ) if expected_integration_context: - mock_set_integration_context.assert_called_once_with( - expected_integration_context - ) + mock_set_integration_context.assert_called_once_with(expected_integration_context) assert poll_result.continue_to_poll == expected_poll_result.continue_to_poll assert poll_result.args_for_next_run == expected_poll_result.args_for_next_run - assert ( - poll_result.response.outputs_prefix - == expected_poll_result.response.outputs_prefix - ) - assert ( - poll_result.response.outputs_key_field - == expected_poll_result.response.outputs_key_field - ) + assert poll_result.response.outputs_prefix == expected_poll_result.response.outputs_prefix + assert poll_result.response.outputs_key_field == expected_poll_result.response.outputs_key_field @pytest.mark.parametrize( @@ -982,23 +936,17 @@ def test_all_schedule_commands( ) with ( - unittest.mock.patch( - "CheckPointHarmonyEndpoint.schedule_command" - ) as mock_schedule_command, + unittest.mock.patch("CheckPointHarmonyEndpoint.schedule_command") as mock_schedule_command, unittest.mock.patch("demistomock.command", return_value=command_name), ): request_function(command_args, mock_client) - mock_schedule_command.assert_called_once_with( - command_args, mock_client, command_name - ) + mock_schedule_command.assert_called_once_with(command_args, mock_client, command_name) # test helper commands -@pytest.mark.parametrize( - "page_size, page, limit", [(-1, 0, 10), (5, -1, 5), (5, 5, -1)] -) +@pytest.mark.parametrize("page_size, page, limit", [(-1, 0, 10), (5, -1, 5), (5, 5, -1)]) def test_validate_pagination_arguments(page_size, page, limit): """ Given: @@ -1012,9 +960,7 @@ def test_validate_pagination_arguments(page_size, page, limit): """ with pytest.raises(ValueError): - CheckPointHarmonyEndpoint.validate_pagination_arguments( - page=page, page_size=page_size, limit=limit - ) + CheckPointHarmonyEndpoint.validate_pagination_arguments(page=page, page_size=page_size, limit=limit) @pytest.mark.parametrize( @@ -1042,9 +988,7 @@ def test_get_pagination_args(args: dict[str, str], expected): "CommonServerPython.arg_to_number", side_effect=lambda x: int(x) if x is not None else None, ): - with unittest.mock.patch( - "CheckPointHarmonyEndpoint.validate_pagination_arguments" - ) as mock_validate: + with unittest.mock.patch("CheckPointHarmonyEndpoint.validate_pagination_arguments") as mock_validate: assert CheckPointHarmonyEndpoint.get_pagination_args(args) == expected mock_validate.assert_called() @@ -1052,7 +996,5 @@ def test_get_pagination_args(args: dict[str, str], expected): def test_validate_filter_arguments(): """Test validate_filter_arguments function and ensure that ValueError is raised.""" with pytest.raises(ValueError) as exc_info: - CheckPointHarmonyEndpoint.validate_filter_arguments( - column_name="invalid_name", filter_type="equals" - ) + CheckPointHarmonyEndpoint.validate_filter_arguments(column_name="invalid_name", filter_type="equals") assert "'column_name' must be one of the followings" in str(exc_info.value) diff --git a/Packs/CheckPointHarmonyEndpoint/ReleaseNotes/1_0_3.md b/Packs/CheckPointHarmonyEndpoint/ReleaseNotes/1_0_3.md new file mode 100644 index 000000000000..faab8644d1cc --- /dev/null +++ b/Packs/CheckPointHarmonyEndpoint/ReleaseNotes/1_0_3.md @@ -0,0 +1,6 @@ + +#### Integrations + +##### Check Point Harmony Endpoint + +- Metadata and documentation improvements. diff --git a/Packs/CheckPointHarmonyEndpoint/pack_metadata.json b/Packs/CheckPointHarmonyEndpoint/pack_metadata.json index 290cdec9731f..4cf427d617f5 100644 --- a/Packs/CheckPointHarmonyEndpoint/pack_metadata.json +++ b/Packs/CheckPointHarmonyEndpoint/pack_metadata.json @@ -2,7 +2,7 @@ "name": "Check Point Harmony Endpoint", "description": "Check Point Harmony Endpoint provides a complete endpoint security solution built to protect organizations and the remote workforce from today's complex threat landscape.", "support": "xsoar", - "currentVersion": "1.0.2", + "currentVersion": "1.0.3", "author": "Cortex XSOAR", "url": "https://www.paloaltonetworks.com/cortex", "email": "", From 09538fa220e970eed81328852cc53f239550b614 Mon Sep 17 00:00:00 2001 From: Content Bot Date: Sun, 23 Mar 2025 12:53:06 +0000 Subject: [PATCH 12/18] ThreatQ: Apply ruff Format --- .../Integrations/ThreatQ_v2/ThreatQ_v2.py | 1223 ++++++++--------- .../ThreatQ_v2/ThreatQ_v2_test.py | 419 +++--- Packs/ThreatQ/ReleaseNotes/1_0_29.md | 6 + Packs/ThreatQ/pack_metadata.json | 2 +- 4 files changed, 795 insertions(+), 855 deletions(-) create mode 100644 Packs/ThreatQ/ReleaseNotes/1_0_29.md diff --git a/Packs/ThreatQ/Integrations/ThreatQ_v2/ThreatQ_v2.py b/Packs/ThreatQ/Integrations/ThreatQ_v2/ThreatQ_v2.py index 388701708d3a..0adbc622d25d 100644 --- a/Packs/ThreatQ/Integrations/ThreatQ_v2/ThreatQ_v2.py +++ b/Packs/ThreatQ/Integrations/ThreatQ_v2/ThreatQ_v2.py @@ -1,156 +1,155 @@ import demistomock as demisto # noqa: F401 from CommonServerPython import * # noqa: F401 -''' IMPORTS ''' +""" IMPORTS """ import json import shutil -import urllib3 import requests +import urllib3 # disable insecure warnings urllib3.disable_warnings() -''' GLOBAL VARIABLES ''' -SERVER_URL = demisto.params()['serverUrl'].rstrip('/') -API_URL = SERVER_URL + '/api' -CLIENT_ID = demisto.params()['client_id'] -EMAIL = demisto.getParam('credentials').get('identifier') -PASSWORD = demisto.getParam('credentials').get('password') -USE_SSL = not demisto.params().get('insecure', False) -THRESHOLD = int(demisto.params().get('threshold', '0')) +""" GLOBAL VARIABLES """ +SERVER_URL = demisto.params()["serverUrl"].rstrip("/") +API_URL = SERVER_URL + "/api" +CLIENT_ID = demisto.params()["client_id"] +EMAIL = demisto.getParam("credentials").get("identifier") +PASSWORD = demisto.getParam("credentials").get("password") +USE_SSL = not demisto.params().get("insecure", False) +THRESHOLD = int(demisto.params().get("threshold", "0")) if THRESHOLD: THRESHOLD = int(THRESHOLD) -url_regex = r'(?:(?:https?|ftp|hxxps?):\/\/|www\[?\.\]?|ftp\[?\.\]?)?(?:[-\w\d]+\[?\.\]?)+[-\w\d]+(?::\d+)?(?:(?:\/|\?)'\ - r'[-\w\d+&@#\/%=~_$?!\-:,.\(\);]*[\w\d+&@#\/%=~_$\(\);])?' +url_regex = ( + r"(?:(?:https?|ftp|hxxps?):\/\/|www\[?\.\]?|ftp\[?\.\]?)?(?:[-\w\d]+\[?\.\]?)+[-\w\d]+(?::\d+)?(?:(?:\/|\?)" + r"[-\w\d+&@#\/%=~_$?!\-:,.\(\);]*[\w\d+&@#\/%=~_$\(\);])?" +) REGEX_MAP = { - 'email': re.compile(emailRegex, regexFlags), - 'url': re.compile(url_regex, regexFlags), - 'md5': re.compile(r'\b[0-9a-fA-F]{32}\b', regexFlags), - 'sha1': re.compile(r'\b[0-9a-fA-F]{40}\b', regexFlags), - 'sha256': re.compile(r'\b[0-9a-fA-F]{64}\b', regexFlags), + "email": re.compile(emailRegex, regexFlags), + "url": re.compile(url_regex, regexFlags), + "md5": re.compile(r"\b[0-9a-fA-F]{32}\b", regexFlags), + "sha1": re.compile(r"\b[0-9a-fA-F]{40}\b", regexFlags), + "sha256": re.compile(r"\b[0-9a-fA-F]{64}\b", regexFlags), } TQ_TO_DEMISTO_INDICATOR_TYPES = { - 'IP Address': 'ip', - 'IPv6 Address': 'ip', - 'Email Address': 'email', - 'URL': 'url', - 'MD5': 'file', - 'SHA-1': 'file', - 'SHA-256': 'file', - 'FQDN': 'domain' + "IP Address": "ip", + "IPv6 Address": "ip", + "Email Address": "email", + "URL": "url", + "MD5": "file", + "SHA-1": "file", + "SHA-256": "file", + "FQDN": "domain", } INDICATOR_TYPES = { - 'File Path': 'file', - 'File': 'file', - 'MD5': 'file', - 'SHA-1': 'file', - 'SHA-256': 'file', - 'SHA-384': 'file', - 'SHA-512': 'file', - 'IP Address': 'ip', - 'IPv6 Address': 'ip', - 'URL': 'url', - 'URL Path': 'url', - 'FQDN': 'domain', - 'Email Address': 'email', + "File Path": "file", + "File": "file", + "MD5": "file", + "SHA-1": "file", + "SHA-256": "file", + "SHA-384": "file", + "SHA-512": "file", + "IP Address": "ip", + "IPv6 Address": "ip", + "URL": "url", + "URL Path": "url", + "FQDN": "domain", + "Email Address": "email", } TABLE_HEADERS = { - 'indicator': ['ID', 'Type', 'Value', 'Description', 'Status', - 'TQScore', 'CreatedAt', 'UpdatedAt', 'URL'], - 'adversary': ['ID', 'Name', 'CreatedAt', 'UpdatedAt', 'URL'], - 'event': ['ID', 'Type', 'Title', 'Description', 'Occurred', 'CreatedAt', 'UpdatedAt', 'URL'], - 'attachment': ['ID', 'Name', 'Title', 'Type', 'Size', 'Description', 'MD5', 'CreatedAt', 'UpdatedAt', - 'MalwareLocked', 'ContentType', 'URL'], - 'attributes': ['ID', 'Name', 'Value'], - 'sources': ['ID', 'Name', 'TLP'] + "indicator": ["ID", "Type", "Value", "Description", "Status", "TQScore", "CreatedAt", "UpdatedAt", "URL"], + "adversary": ["ID", "Name", "CreatedAt", "UpdatedAt", "URL"], + "event": ["ID", "Type", "Title", "Description", "Occurred", "CreatedAt", "UpdatedAt", "URL"], + "attachment": [ + "ID", + "Name", + "Title", + "Type", + "Size", + "Description", + "MD5", + "CreatedAt", + "UpdatedAt", + "MalwareLocked", + "ContentType", + "URL", + ], + "attributes": ["ID", "Name", "Value"], + "sources": ["ID", "Name", "TLP"], } -OBJ_DIRECTORY = { - 'indicator': 'indicators', - 'adversary': 'adversaries', - 'event': 'events', - 'attachment': 'attachments' -} +OBJ_DIRECTORY = {"indicator": "indicators", "adversary": "adversaries", "event": "events", "attachment": "attachments"} -RELATED_KEY = { - 'indicator': 'RelatedIndicator', - 'adversary': 'RelatedAdversary', - 'event': 'RelatedEvent' -} +RELATED_KEY = {"indicator": "RelatedIndicator", "adversary": "RelatedAdversary", "event": "RelatedEvent"} CONTEXT_PATH = { - 'indicator': 'ThreatQ.Indicator((val.ID && val.ID === obj.ID) || (val.Value && val.Value === obj.Value))', - 'adversary': 'ThreatQ.Adversary(val.ID === obj.ID)', - 'event': 'ThreatQ.Event(val.ID === obj.ID)', - 'attachment': 'ThreatQ.File(val.ID === obj.ID)' + "indicator": "ThreatQ.Indicator((val.ID && val.ID === obj.ID) || (val.Value && val.Value === obj.Value))", + "adversary": "ThreatQ.Adversary(val.ID === obj.ID)", + "event": "ThreatQ.Event(val.ID === obj.ID)", + "attachment": "ThreatQ.File(val.ID === obj.ID)", } -TABLE_TLP = { - 4: "WHITE", - 3: "GREEN", - 2: "AMBER", - 1: "RED" -} +TABLE_TLP = {4: "WHITE", 3: "GREEN", 2: "AMBER", 1: "RED"} -''' HELPER FUNCTIONS ''' +""" HELPER FUNCTIONS """ def status_id_to_status(status_id): - res = tq_request('GET', f'/indicator/statuses/{status_id}') - return res.get('data').get('name') + res = tq_request("GET", f"/indicator/statuses/{status_id}") + return res.get("data").get("name") def type_id_to_indicator_type(type_id): - res = tq_request('GET', f'/indicator/types/{type_id}') - return res.get('data').get('name') + res = tq_request("GET", f"/indicator/types/{type_id}") + return res.get("data").get("name") def type_id_to_event_type(type_id): - res = tq_request('GET', f'/event/types/{type_id}') - return res.get('data').get('name') + res = tq_request("GET", f"/event/types/{type_id}") + return res.get("data").get("name") def type_id_to_file_type(type_id): - res = tq_request('GET', f'/attachments/types/{type_id}') - return res.get('data').get('name') + res = tq_request("GET", f"/attachments/types/{type_id}") + return res.get("data").get("name") def get_errors_string_from_bad_request(bad_request_results, status_code): if status_code == 404: - return 'Object does not exist.\n' + return "Object does not exist.\n" # Errors could be retrieved in two forms: # 1. A dictionary of fields and errors list related to the fields, all under 'data' key in the response json object # 2. A list, directly within the response object - errors_string = 'Errors from service:\n\n' + errors_string = "Errors from service:\n\n" # First form - errors_dict = bad_request_results.json().get('data', {}).get('errors', {}) + errors_dict = bad_request_results.json().get("data", {}).get("errors", {}) if errors_dict: for error_num, (key, lst) in enumerate(errors_dict.items(), 1): - curr_error_string = '\n'.join(lst) + '\n\n' - errors_string += f'Error #{error_num}. In \'{key}\':\n{curr_error_string}' + curr_error_string = "\n".join(lst) + "\n\n" + errors_string += f"Error #{error_num}. In '{key}':\n{curr_error_string}" return errors_string # Second form - errors_list = bad_request_results.json().get('errors', []) + errors_list = bad_request_results.json().get("errors", []) if errors_list: for error_num, error in enumerate(errors_list, 1): if isinstance(error, str): - errors_string += f'Error #{error_num}: {error}\n' + errors_string += f"Error #{error_num}: {error}\n" else: # error is a list for i in range(len(error)): - errors_string += f'Error #{error_num}.{i}: {error[i]}\n' + errors_string += f"Error #{error_num}.{i}: {error[i]}\n" return errors_string - return '' # Service did not provide any errors. + return "" # Service did not provide any errors. def get_tlp_from_indicator(sources): @@ -159,37 +158,36 @@ def get_tlp_from_indicator(sources): tlp = 0 for source in sources: try: - tlp = int(source.get('TLP')) if int(source.get('TLP')) > tlp else tlp + tlp = max(tlp, int(source.get("TLP"))) except Exception as e: - demisto.debug(f"Failed getting TLP from {source.get('Name')} source:\n{str(e)}") + demisto.debug(f"Failed getting TLP from {source.get('Name')} source:\n{e!s}") continue return TABLE_TLP.get(tlp) def get_generic_context(indicator, generic_context=None): - - tlp = get_tlp_from_indicator(indicator.get('Source')) + tlp = get_tlp_from_indicator(indicator.get("Source")) if tlp: if generic_context: - generic_context['TrafficLightProtocol'] = tlp + generic_context["TrafficLightProtocol"] = tlp else: - generic_context = {'Data': indicator.get('Value'), 'TrafficLightProtocol': tlp} + generic_context = {"Data": indicator.get("Value"), "TrafficLightProtocol": tlp} else: - generic_context = generic_context or {'Data': indicator.get('Value')} + generic_context = generic_context or {"Data": indicator.get("Value")} return generic_context def tq_request(method, url_suffix, params=None, files=None, retrieve_entire_response=False, allow_redirects=True): api_call_headers = None - if url_suffix != '/token': + if url_suffix != "/token": access_token = get_access_token() - api_call_headers = {'Authorization': 'Bearer ' + access_token} + api_call_headers = {"Authorization": "Bearer " + access_token} if not files: params = json.dumps(params) - api_call_headers.update({'Content-Type': 'application/json'}) + api_call_headers.update({"Content-Type": "application/json"}) demisto.debug(f"[TEST] - Sending request with url endpoint: {url_suffix}") response = requests.request( @@ -199,45 +197,45 @@ def tq_request(method, url_suffix, params=None, files=None, retrieve_entire_resp headers=api_call_headers, verify=USE_SSL, files=files, - allow_redirects=allow_redirects + allow_redirects=allow_redirects, ) demisto.debug(f"Response status code: {response.status_code}") if response.status_code >= 400: errors_string = get_errors_string_from_bad_request(response, response.status_code) - error_message = f'Received an error - status code [{response.status_code}].\n{errors_string}' + error_message = f"Received an error - status code [{response.status_code}].\n{errors_string}" return_error(error_message) if retrieve_entire_response: return response - elif method != 'DELETE': # the DELETE request returns nothing in response + elif method != "DELETE": # the DELETE request returns nothing in response return response.json() return None def request_new_access_token(): - params = {'grant_type': 'password', 'email': EMAIL, 'password': PASSWORD, 'client_id': CLIENT_ID} - access_token_response = tq_request('POST', '/token', params, allow_redirects=False) + params = {"grant_type": "password", "email": EMAIL, "password": PASSWORD, "client_id": CLIENT_ID} + access_token_response = tq_request("POST", "/token", params, allow_redirects=False) updated_integration_context = { - 'access_token': access_token_response['access_token'], - 'access_token_creation_time': int(time.time()) - 1, # decrementing one second to be on the safe side - 'access_token_expires_in': access_token_response['expires_in'] + "access_token": access_token_response["access_token"], + "access_token_creation_time": int(time.time()) - 1, # decrementing one second to be on the safe side + "access_token_expires_in": access_token_response["expires_in"], } demisto.setIntegrationContext(updated_integration_context) - threatq_access_token = access_token_response['access_token'] + threatq_access_token = access_token_response["access_token"] return threatq_access_token def access_token_not_expired(): epoch_time_now = time.time() - epoch_time_when_token_granted = demisto.getIntegrationContext().get('access_token_creation_time') - token_time_until_expiration = demisto.getIntegrationContext().get('access_token_expires_in') + epoch_time_when_token_granted = demisto.getIntegrationContext().get("access_token_creation_time") + token_time_until_expiration = demisto.getIntegrationContext().get("access_token_expires_in") return int(epoch_time_now) - int(epoch_time_when_token_granted) < int(token_time_until_expiration) def get_access_token(): - existing_access_token = demisto.getIntegrationContext().get('access_token') + existing_access_token = demisto.getIntegrationContext().get("access_token") if existing_access_token and access_token_not_expired(): return existing_access_token else: @@ -246,17 +244,17 @@ def get_access_token(): def make_create_object_request(obj_type, params): - url_suffix = f'/{OBJ_DIRECTORY[obj_type]}' - res = tq_request('POST', url_suffix, params) + url_suffix = f"/{OBJ_DIRECTORY[obj_type]}" + res = tq_request("POST", url_suffix, params) # For some reason, only while creating an indicator, the response data is a list of dicts with size 1. # Creating other objects simply returns one dict, as expected. - data = res['data'][0] if obj_type == 'indicator' else res['data'] + data = res["data"][0] if obj_type == "indicator" else res["data"] data = data_to_demisto_format(data, obj_type) entry_context = {CONTEXT_PATH[obj_type]: createContext(data, removeNull=True)} - readable_title = f'{obj_type.title()} was successfully created.' + readable_title = f"{obj_type.title()} was successfully created." readable = build_readable(readable_title, obj_type, data) return_outputs(readable, entry_context, res) @@ -266,16 +264,16 @@ def make_edit_request_for_an_object(obj_id, obj_type, params): # Remove items with empty values. params = {k: v for k, v in params.items() if v is not None} - url_suffix = f'/{OBJ_DIRECTORY[obj_type]}/{obj_id}?with=attributes,sources' - if obj_type == 'indicator': - url_suffix += ',score' + url_suffix = f"/{OBJ_DIRECTORY[obj_type]}/{obj_id}?with=attributes,sources" + if obj_type == "indicator": + url_suffix += ",score" - res = tq_request('PUT', url_suffix, params) + res = tq_request("PUT", url_suffix, params) - data = data_to_demisto_format(res['data'], obj_type) + data = data_to_demisto_format(res["data"], obj_type) entry_context = {CONTEXT_PATH[obj_type]: createContext(data, removeNull=True)} - readable_title = f'Successfully edited {obj_type} with ID {obj_id}' + readable_title = f"Successfully edited {obj_type} with ID {obj_id}" readable = build_readable(readable_title, obj_type, data) return_outputs(readable, entry_context, res) @@ -284,81 +282,74 @@ def make_edit_request_for_an_object(obj_id, obj_type, params): def make_indicator_reputation_request(indicator_type, value, generic_context): # Search for the indicator ID by keyword: body = {} - if indicator_type == 'ip': + if indicator_type == "ip": tq_type = "IP Address" - elif indicator_type == 'url': + elif indicator_type == "url": tq_type = "URL" is_httpx = False - if value.startswith('http://'): - value_without_proto = value.replace('http://', '') + if value.startswith("http://"): + value_without_proto = value.replace("http://", "") is_httpx = True - elif value.startswith('https://'): - value_without_proto = value.replace('https://', '') + elif value.startswith("https://"): + value_without_proto = value.replace("https://", "") is_httpx = True else: value_without_proto = value demisto.debug("value doesn't start with either prefixes. Initializing value_without_proto to value.") if is_httpx: - body = {"criteria": {"+or": [{"value": value}, {"value": value_without_proto}]}, - "filters": {"type_name": tq_type}} + body = {"criteria": {"+or": [{"value": value}, {"value": value_without_proto}]}, "filters": {"type_name": tq_type}} else: - body = { - "criteria": {"value": value}, - "filters": {"type_name": tq_type} - } + body = {"criteria": {"value": value}, "filters": {"type_name": tq_type}} - elif indicator_type == 'domain': + elif indicator_type == "domain": tq_type = "FQDN" - elif indicator_type == 'email': + elif indicator_type == "email": tq_type = "Email Address" - if indicator_type == 'file': - body = {"criteria": {"value": value}, - "filters": { - "+or": [{"type_name": "MD5"}, {"type_name": "SHA-1"}, {"type_name": "SHA-256"}, {"type_name": "SHA-384"}, - {"type_name": "SHA-512"}]}} - elif tq_type != "URL": + if indicator_type == "file": body = { "criteria": {"value": value}, - "filters": {"type_name": tq_type} + "filters": { + "+or": [ + {"type_name": "MD5"}, + {"type_name": "SHA-1"}, + {"type_name": "SHA-256"}, + {"type_name": "SHA-384"}, + {"type_name": "SHA-512"}, + ] + }, } + elif tq_type != "URL": + body = {"criteria": {"value": value}, "filters": {"type_name": tq_type}} - url_suffix = '/indicators/query?limit=500&offset=0&sort=id' + url_suffix = "/indicators/query?limit=500&offset=0&sort=id" - res = tq_request( - method="POST", - url_suffix=url_suffix, - params=body - ) + res = tq_request(method="POST", url_suffix=url_suffix, params=body) indicators: list[dict] = [] - for obj in res.get('data', []): - if 'id' in obj: + for obj in res.get("data", []): + if "id" in obj: # Search for detailed information about the indicator url_suffix = f'/indicators/{obj.get("id")}?with=attributes,sources,score,type' - res = tq_request('GET', url_suffix) - indicators.append(indicator_data_to_demisto_format(res['data'])) - indicators = indicators or [{'Value': value, 'TQScore': -1}] + res = tq_request("GET", url_suffix) + indicators.append(indicator_data_to_demisto_format(res["data"])) + indicators = indicators or [{"Value": value, "TQScore": -1}] entry_context = aggregate_search_results( - indicators=indicators, - default_indicator_type=indicator_type, - generic_context=generic_context + indicators=indicators, default_indicator_type=indicator_type, generic_context=generic_context ) readable = build_readable( - readable_title=f'Search results for {indicator_type} {value}', - obj_type='indicator', - data=indicators + readable_title=f"Search results for {indicator_type} {value}", obj_type="indicator", data=indicators ) return_outputs(readable, entry_context, res) def create_dbot_context(indicator, ind_type, ind_score): - """ This function converts a TQ scoring value of an indicator into a DBot score. + """This function converts a TQ scoring value of an indicator into a DBot score. The default score mapping function is: -1 -> 0, [0,3] -> 1, [4,7] -> 2, [8,10] -> 3. If threshold parameter is set manually, it overrides the default function definition for a @@ -373,32 +364,19 @@ def create_dbot_context(indicator, ind_type, ind_score): (dict). The indicator's DBotScore. """ - dbot_score_map = { - -1: 0, - 0: 1, - 1: 1, - 2: 1, - 3: 1, - 4: 2, - 5: 2, - 6: 2, - 7: 2, - 8: 2, - 9: 2, - 10: 2 - } + dbot_score_map = {-1: 0, 0: 1, 1: 1, 2: 1, 3: 1, 4: 2, 5: 2, 6: 2, 7: 2, 8: 2, 9: 2, 10: 2} ret = { - 'Vendor': 'ThreatQ v2', - 'Indicator': indicator, - 'Type': ind_type, - 'Reliability': demisto.params().get('integrationReliability') + "Vendor": "ThreatQ v2", + "Indicator": indicator, + "Type": ind_type, + "Reliability": demisto.params().get("integrationReliability"), } if ind_score >= THRESHOLD: - ret['Score'] = 3 + ret["Score"] = 3 else: - ret['Score'] = dbot_score_map[ind_score] + ret["Score"] = dbot_score_map[ind_score] return ret @@ -408,8 +386,8 @@ def get_tq_score_from_response(score_data): return None if isinstance(score_data, dict): # score will be max(gen_score, manual_score) - gen_score = str(score_data.get('generated_score')) - manual_score = score_data.get('manual_score', 0.0) + gen_score = str(score_data.get("generated_score")) + manual_score = score_data.get("manual_score", 0.0) if manual_score is None: manual_score = -1 return max(float(gen_score), float(manual_score)) @@ -419,7 +397,7 @@ def get_tq_score_from_response(score_data): def clean_html_from_string(raw_html): - """ This function receives an HTML string of a text, and retrieves a clean string of its content. + """This function receives an HTML string of a text, and retrieves a clean string of its content. Args: raw_html: An HTML format text @@ -429,8 +407,8 @@ def clean_html_from_string(raw_html): """ if not raw_html: return None - clean_r = re.compile('<.*?>') - clean_text = re.sub(clean_r, '', raw_html) + clean_r = re.compile("<.*?>") + clean_text = re.sub(clean_r, "", raw_html) return clean_text @@ -438,144 +416,143 @@ def sources_to_request_format(sources): if not sources: return [] if isinstance(sources, str): - sources = sources.split(',') - return [{'name': source} for source in sources] + sources = sources.split(",") + return [{"name": source} for source in sources] def sources_to_demisto_format(lst): if lst is None: return None - return [{ - 'Name': elem.get('name'), - 'ID': elem.get('pivot', {}).get('id'), - 'TLP': elem.get('tlp_id'), - } for elem in lst] + return [ + { + "Name": elem.get("name"), + "ID": elem.get("pivot", {}).get("id"), + "TLP": elem.get("tlp_id"), + } + for elem in lst + ] def attributes_to_request_format(attributes_names, attributes_values): if not attributes_names and not attributes_values: return [] if isinstance(attributes_names, str): - attributes_names = attributes_names.split(',') + attributes_names = attributes_names.split(",") if isinstance(attributes_values, str): - attributes_values = attributes_values.split(',') + attributes_values = attributes_values.split(",") if not attributes_names or not attributes_values or len(attributes_names) != len(attributes_values): - return_error('Attributes_names and attributes_values arguments must have the same length.') + return_error("Attributes_names and attributes_values arguments must have the same length.") - return [{'name': name, 'value': val} for name, val in zip(attributes_names, attributes_values)] + return [{"name": name, "value": val} for name, val in zip(attributes_names, attributes_values)] def attributes_to_demisto_format(lst): if lst is None: return None - return [{ - 'Name': elem.get('name'), - 'Value': elem.get('value'), - 'ID': elem.get('id') - } for elem in lst] + return [{"Name": elem.get("name"), "Value": elem.get("value"), "ID": elem.get("id")} for elem in lst] def content_type_to_demisto_format(c_type_id): # content_type is a file object property - return 'text/plain' if c_type_id == 1 else 'text/rtf' + return "text/plain" if c_type_id == 1 else "text/rtf" def malware_locked_to_request_format(state): # malware_locked is a file object property if not state: return None - return 1 if state == 'on' else 0 + return 1 if state == "on" else 0 def malware_locked_to_demisto_format(state): - return 'on' if state == 1 else 'off' + return "on" if state == 1 else "off" def parse_date(text): - valid_formats = ['%Y-%m-%d %H:%M:%S', '%Y-%m-%d'] + valid_formats = ["%Y-%m-%d %H:%M:%S", "%Y-%m-%d"] for fmt in valid_formats: try: return str(datetime.strptime(text, fmt)) except ValueError: pass - return_error(f'Time data \'{text}\' does not match any valid format.') + return_error(f"Time data '{text}' does not match any valid format.") return None def data_to_demisto_format(data, obj_type): - if obj_type == 'indicator': + if obj_type == "indicator": return indicator_data_to_demisto_format(data) - elif obj_type == 'event': + elif obj_type == "event": return event_data_to_demisto_format(data) - elif obj_type == 'adversary': + elif obj_type == "adversary": return adversary_data_to_demisto_format(data) - elif obj_type == 'attachment': + elif obj_type == "attachment": return file_data_to_demisto_format(data) return None def indicator_data_to_demisto_format(data): ret = { - 'ID': data.get('id'), - 'UpdatedAt': data.get('updated_at'), - 'CreatedAt': data.get('created_at'), - 'Value': data.get('value'), - 'Status': status_id_to_status(data.get('status_id')), - 'Type': type_id_to_indicator_type(data.get('type_id')), - 'URL': '{}/indicators/{}/details'.format(SERVER_URL, data.get('id')), - 'TQScore': get_tq_score_from_response(data.get('score')), - 'Description': clean_html_from_string(data.get('description')), - 'Source': sources_to_demisto_format(data.get('sources')), - 'Attribute': attributes_to_demisto_format(data.get('attributes')) + "ID": data.get("id"), + "UpdatedAt": data.get("updated_at"), + "CreatedAt": data.get("created_at"), + "Value": data.get("value"), + "Status": status_id_to_status(data.get("status_id")), + "Type": type_id_to_indicator_type(data.get("type_id")), + "URL": "{}/indicators/{}/details".format(SERVER_URL, data.get("id")), + "TQScore": get_tq_score_from_response(data.get("score")), + "Description": clean_html_from_string(data.get("description")), + "Source": sources_to_demisto_format(data.get("sources")), + "Attribute": attributes_to_demisto_format(data.get("attributes")), } return ret def adversary_data_to_demisto_format(data): ret = { - 'ID': data.get('id'), - 'UpdatedAt': data.get('updated_at'), - 'CreatedAt': data.get('created_at'), - 'Name': data.get('name'), - 'URL': '{}/adversaries/{}/details'.format(SERVER_URL, data.get('id')), - 'Source': sources_to_demisto_format(data.get('sources')), - 'Attribute': attributes_to_demisto_format(data.get('attributes')) + "ID": data.get("id"), + "UpdatedAt": data.get("updated_at"), + "CreatedAt": data.get("created_at"), + "Name": data.get("name"), + "URL": "{}/adversaries/{}/details".format(SERVER_URL, data.get("id")), + "Source": sources_to_demisto_format(data.get("sources")), + "Attribute": attributes_to_demisto_format(data.get("attributes")), } return ret def event_data_to_demisto_format(data): ret = { - 'ID': data.get('id'), - 'UpdatedAt': data.get('updated_at'), - 'CreatedAt': data.get('created_at'), - 'Title': data.get('title'), - 'Occurred': data.get('happened_at'), - 'Type': type_id_to_event_type(data.get('type_id')), - 'URL': '{}/events/{}/details'.format(SERVER_URL, data.get('id')), - 'Description': clean_html_from_string(data.get('description')), - 'Source': sources_to_demisto_format(data.get('sources')), - 'Attribute': attributes_to_demisto_format(data.get('attributes')) + "ID": data.get("id"), + "UpdatedAt": data.get("updated_at"), + "CreatedAt": data.get("created_at"), + "Title": data.get("title"), + "Occurred": data.get("happened_at"), + "Type": type_id_to_event_type(data.get("type_id")), + "URL": "{}/events/{}/details".format(SERVER_URL, data.get("id")), + "Description": clean_html_from_string(data.get("description")), + "Source": sources_to_demisto_format(data.get("sources")), + "Attribute": attributes_to_demisto_format(data.get("attributes")), } return ret def file_data_to_demisto_format(data): raw = { - 'ID': data.get('id'), - 'CreatedAt': data.get('created_at'), - 'UpdatedAt': data.get('updated_at'), - 'Size': data.get('file_size'), - 'MD5': data.get('hash'), - 'Type': type_id_to_file_type(data.get('type_id')), - 'URL': '{}/files/{}/details'.format(SERVER_URL, data.get('id')), - 'Name': data.get('name'), - 'Title': data.get('title'), - 'Description': data.get('description'), - 'ContentType': content_type_to_demisto_format(data.get('content_type_id')), - 'MalwareLocked': malware_locked_to_demisto_format(data.get('malware_locked')), - 'Source': sources_to_demisto_format(data.get('sources')), - 'Attribute': attributes_to_demisto_format(data.get('attributes')) + "ID": data.get("id"), + "CreatedAt": data.get("created_at"), + "UpdatedAt": data.get("updated_at"), + "Size": data.get("file_size"), + "MD5": data.get("hash"), + "Type": type_id_to_file_type(data.get("type_id")), + "URL": "{}/files/{}/details".format(SERVER_URL, data.get("id")), + "Name": data.get("name"), + "Title": data.get("title"), + "Description": data.get("description"), + "ContentType": content_type_to_demisto_format(data.get("content_type_id")), + "MalwareLocked": malware_locked_to_demisto_format(data.get("malware_locked")), + "Source": sources_to_demisto_format(data.get("sources")), + "Attribute": attributes_to_demisto_format(data.get("attributes")), } return raw @@ -584,38 +561,33 @@ def file_data_to_demisto_format(data): def get_pivot_id(obj1_type, obj1_id, obj2_type, obj2_id): # A pivot id represents a connection between two objects. - url_suffix = f'/{OBJ_DIRECTORY[obj1_type]}/{obj1_id}/{OBJ_DIRECTORY[obj2_type]}' - res = tq_request('GET', url_suffix) + url_suffix = f"/{OBJ_DIRECTORY[obj1_type]}/{obj1_id}/{OBJ_DIRECTORY[obj2_type]}" + res = tq_request("GET", url_suffix) - for related_object in res['data']: # res['data'] contains all the related objects of obj_id1 - if int(related_object.get('id')) == int(obj2_id): - return int(related_object['pivot']['id']) - return_error('Command failed - objects are not related.') + for related_object in res["data"]: # res['data'] contains all the related objects of obj_id1 + if int(related_object.get("id")) == int(obj2_id): + return int(related_object["pivot"]["id"]) + return_error("Command failed - objects are not related.") return None def get_malicious_data(tq_score): - malicious_data = { - 'Malicious': { - 'Vendor': 'ThreatQ v2', - 'Description': f'Score from ThreatQ is {tq_score}' - } - } + malicious_data = {"Malicious": {"Vendor": "ThreatQ v2", "Description": f"Score from ThreatQ is {tq_score}"}} return malicious_data def set_indicator_entry_context(indicator_type, indicator, generic_context): - dbot_context = create_dbot_context(indicator.get('Value'), indicator_type, indicator.get('TQScore', -1)) + dbot_context = create_dbot_context(indicator.get("Value"), indicator_type, indicator.get("TQScore", -1)) indicator_type = INDICATOR_TYPES.get(indicator_type) or indicator_type - generic_context_path = outputPaths.get(indicator_type, 'Indicator(val.ID && val.ID == obj.ID)') - integration_context_path = CONTEXT_PATH['indicator'] + generic_context_path = outputPaths.get(indicator_type, "Indicator(val.ID && val.ID == obj.ID)") + integration_context_path = CONTEXT_PATH["indicator"] - if dbot_context.get('Score') == 3: - malicious_data = get_malicious_data(indicator.get('TQScore', -1)) + if dbot_context.get("Score") == 3: + malicious_data = get_malicious_data(indicator.get("TQScore", -1)) generic_context.update(malicious_data) - ec = {generic_context_path: generic_context, 'DBotScore': dbot_context} + ec = {generic_context_path: generic_context, "DBotScore": dbot_context} if indicator: ec[integration_context_path] = indicator @@ -624,17 +596,17 @@ def set_indicator_entry_context(indicator_type, indicator, generic_context): def build_readable_for_search_by_name(indicator_context, event_context, adversary_context, file_context): if not (indicator_context or event_context or adversary_context or file_context): - return 'No results.' + return "No results." - human_readable = '' + human_readable = "" if indicator_context: - human_readable += tableToMarkdown('Search Results - Indicators', indicator_context) + human_readable += tableToMarkdown("Search Results - Indicators", indicator_context) if event_context: - human_readable += tableToMarkdown('Search Results - Events', event_context) + human_readable += tableToMarkdown("Search Results - Events", event_context) if adversary_context: - human_readable += tableToMarkdown('Search Results - Adversaries', adversary_context) + human_readable += tableToMarkdown("Search Results - Adversaries", adversary_context) if file_context: - human_readable += tableToMarkdown('Search Results - Files', file_context) + human_readable += tableToMarkdown("Search Results - Files", file_context) return human_readable @@ -647,29 +619,29 @@ def build_readable(readable_title, obj_type, data, metadata=None): headers=TABLE_HEADERS[obj_type], headerTransform=pascalToSpace, removeNull=True, - metadata=metadata + metadata=metadata, ) - if 'Attribute' in data: + if "Attribute" in data: readable += tableToMarkdown( - name='Attributes', - t=data['Attribute'], - headers=TABLE_HEADERS['attributes'], + name="Attributes", + t=data["Attribute"], + headers=TABLE_HEADERS["attributes"], removeNull=True, headerTransform=pascalToSpace, - metadata=metadata + metadata=metadata, ) - if 'Source' in data: + if "Source" in data: readable += tableToMarkdown( - name='Sources', - t=data['Source'], - headers=TABLE_HEADERS['sources'], + name="Sources", + t=data["Source"], + headers=TABLE_HEADERS["sources"], removeNull=True, headerTransform=pascalToSpace, - metadata=metadata + metadata=metadata, ) - if 'URL' in data: - url_in_markdown_format = '[{}]({})'.format(data['URL'], data['URL']) - readable = readable.replace(data['URL'], url_in_markdown_format) + if "URL" in data: + url_in_markdown_format = "[{}]({})".format(data["URL"], data["URL"]) + readable = readable.replace(data["URL"], url_in_markdown_format) else: # 'data' is a list of objects if len(data) == 1: @@ -680,54 +652,52 @@ def build_readable(readable_title, obj_type, data, metadata=None): headers=TABLE_HEADERS[obj_type], headerTransform=pascalToSpace, removeNull=True, - metadata=metadata + metadata=metadata, ) for elem in data: - url_in_markdown_format = '[{}]({})'.format(elem['URL'], elem['URL']) - readable = readable.replace(elem['URL'], url_in_markdown_format) + url_in_markdown_format = "[{}]({})".format(elem["URL"], elem["URL"]) + readable = readable.replace(elem["URL"], url_in_markdown_format) return readable -''' COMMANDS ''' +""" COMMANDS """ def test_module(): token = request_new_access_token() - threshold = demisto.params().get('threshold') + threshold = demisto.params().get("threshold") threshold_is_integer = isinstance(threshold, int) or (isinstance(threshold, str) and threshold.isdigit()) if token and threshold_is_integer and 0 <= int(threshold) <= 10: - demisto.results('ok') + demisto.results("ok") def get_indicator_type_id(indicator_name: str) -> str: - indicator_types_res = tq_request( - method='GET', - url_suffix='/indicator/types', - retrieve_entire_response=True - ) + indicator_types_res = tq_request(method="GET", url_suffix="/indicator/types", retrieve_entire_response=True) try: - indicator_types = indicator_types_res.json().get('data') + indicator_types = indicator_types_res.json().get("data") except ValueError: - raise ValueError(f'Could not parse data from ThreatQ [Status code: {indicator_types_res.status_code}]' - f'\n[Error Message: {indicator_types_res.text}]') + raise ValueError( + f"Could not parse data from ThreatQ [Status code: {indicator_types_res.status_code}]" + f"\n[Error Message: {indicator_types_res.text}]" + ) for indicator in indicator_types: - if indicator.get('name', '').lower() == indicator_name.lower(): - return indicator.get('id') + if indicator.get("name", "").lower() == indicator_name.lower(): + return indicator.get("id") - raise ValueError('Could not find indicator') + raise ValueError("Could not find indicator") def aggregate_search_results(indicators, default_indicator_type, generic_context=None): entry_context = [] for i in indicators: generic_context = get_generic_context(i, generic_context) - entry_context.append(set_indicator_entry_context( - indicator_type=i.get('Type') or default_indicator_type, - indicator=i, - generic_context=generic_context - )) + entry_context.append( + set_indicator_entry_context( + indicator_type=i.get("Type") or default_indicator_type, indicator=i, generic_context=generic_context + ) + ) aggregated: dict = {} for entry in entry_context: @@ -745,15 +715,11 @@ def get_search_body(query, indicator_type): "indicators": [ [ { - 'field': 'indicator_type', - 'operator': 'is', - 'value': indicator_type if indicator_type.isdigit() else get_indicator_type_id(indicator_type) + "field": "indicator_type", + "operator": "is", + "value": indicator_type if indicator_type.isdigit() else get_indicator_type_id(indicator_type), }, - { - 'field': 'indicator_value', - 'operator': 'like', - 'value': str(query) - } + {"field": "indicator_value", "operator": "like", "value": str(query)}, ] ] } @@ -762,25 +728,21 @@ def get_search_body(query, indicator_type): def advance_search_command(): args = demisto.args() - limit = args.get('limit', 10) - query = args.get('query') - indicator_type = args.get('indicator_type') + limit = args.get("limit", 10) + query = args.get("query") + indicator_type = args.get("indicator_type") search_body = get_search_body(query, indicator_type) if limit and isinstance(limit, str) and not limit.isdigit(): - return_error('limit argument must be an integer.') + return_error("limit argument must be an integer.") res = tq_request( - method='POST', - url_suffix=f'/search/advanced?limit={limit}', - params=search_body, - retrieve_entire_response=True + method="POST", url_suffix=f"/search/advanced?limit={limit}", params=search_body, retrieve_entire_response=True ) try: - search_results = res.json().get('data') + search_results = res.json().get("data") except ValueError: - raise ValueError(f'Could not parse data from ThreatQ [Status code: {res.status_code}]' - f'\n[Error Message: {res.text}]') + raise ValueError(f"Could not parse data from ThreatQ [Status code: {res.status_code}]" f"\n[Error Message: {res.text}]") if not isinstance(search_results, list): search_results = [search_results] @@ -789,43 +751,39 @@ def advance_search_command(): for obj in search_results: # Search for detailed information about the indicator url_suffix = f"/indicators/{obj.get('id')}?with=attributes,sources,score,type" - search_results = res = tq_request('GET', url_suffix) - indicators.append(indicator_data_to_demisto_format(res.get('data'))) + search_results = res = tq_request("GET", url_suffix) + indicators.append(indicator_data_to_demisto_format(res.get("data"))) - indicators = indicators or [{'Value': query, 'TQScore': -1}] + indicators = indicators or [{"Value": query, "TQScore": -1}] entry_context = aggregate_search_results(indicators=indicators, default_indicator_type=indicator_type) - readable = build_readable( - readable_title=f'Search results for "{query}":', - obj_type='indicator', - data=indicators - ) + readable = build_readable(readable_title=f'Search results for "{query}":', obj_type="indicator", data=indicators) return_outputs(readable, entry_context, search_results) def search_by_name_command(): args = demisto.args() - name = args.get('name') - limit = args.get('limit', '10') + name = args.get("name") + limit = args.get("limit", "10") if limit and isinstance(limit, str) and not limit.isdigit(): - return_error('limit argument must be an integer.') + return_error("limit argument must be an integer.") - url_suffix = f'/search?query={name}&limit={limit}' - res = tq_request('GET', url_suffix) + url_suffix = f"/search?query={name}&limit={limit}" + res = tq_request("GET", url_suffix) - indicator_context = [{'ID': e['id'], 'Value': e['value']} for e in res['data'] if e['object'] == 'indicator'] - event_context = [{'ID': e['id'], 'Title': e['value']} for e in res['data'] if e['object'] == 'event'] - adversary_context = [{'ID': e['id'], 'Name': e['value']} for e in res['data'] if e['object'] == 'adversary'] - file_context = [{'ID': e['id'], 'Name': e['value'].split()[1]} for e in res['data'] if e['object'] == 'attachment'] + indicator_context = [{"ID": e["id"], "Value": e["value"]} for e in res["data"] if e["object"] == "indicator"] + event_context = [{"ID": e["id"], "Title": e["value"]} for e in res["data"] if e["object"] == "event"] + adversary_context = [{"ID": e["id"], "Name": e["value"]} for e in res["data"] if e["object"] == "adversary"] + file_context = [{"ID": e["id"], "Name": e["value"].split()[1]} for e in res["data"] if e["object"] == "attachment"] # file value in response is returned in the form ["title" name], thus we use the split method above entry_context = { - CONTEXT_PATH['indicator']: indicator_context, - CONTEXT_PATH['event']: event_context, - CONTEXT_PATH['adversary']: adversary_context, - CONTEXT_PATH['attachment']: file_context + CONTEXT_PATH["indicator"]: indicator_context, + CONTEXT_PATH["event"]: event_context, + CONTEXT_PATH["adversary"]: adversary_context, + CONTEXT_PATH["attachment"]: file_context, } # Remove items with empty values: @@ -838,27 +796,27 @@ def search_by_name_command(): def search_by_id_command(): args = demisto.args() - obj_type = args.get('obj_type') - obj_id = args.get('obj_id') + obj_type = args.get("obj_type") + obj_id = args.get("obj_id") if isinstance(obj_id, str) and not obj_id.isdigit(): - return_error('obj_id argument must be an integer.') + return_error("obj_id argument must be an integer.") - url_suffix = f'/{OBJ_DIRECTORY[obj_type]}/{obj_id}?with=attributes,sources' - if obj_type == 'indicator': - url_suffix += ',score,type' + url_suffix = f"/{OBJ_DIRECTORY[obj_type]}/{obj_id}?with=attributes,sources" + if obj_type == "indicator": + url_suffix += ",score,type" - res = tq_request('GET', url_suffix) - data = data_to_demisto_format(res['data'], obj_type) + res = tq_request("GET", url_suffix) + data = data_to_demisto_format(res["data"], obj_type) ec = {CONTEXT_PATH[obj_type]: createContext(data, removeNull=True)} - if obj_type == 'indicator': - indicator_type = TQ_TO_DEMISTO_INDICATOR_TYPES.get(data['Type']) + if obj_type == "indicator": + indicator_type = TQ_TO_DEMISTO_INDICATOR_TYPES.get(data["Type"]) if indicator_type is not None: - ec['DBotScore'] = create_dbot_context(data['Value'], indicator_type, data.get('TQScore', -1)) + ec["DBotScore"] = create_dbot_context(data["Value"], indicator_type, data.get("TQScore", -1)) - readable_title = f'Search results for {obj_type} with ID {obj_id}' + readable_title = f"Search results for {obj_type} with ID {obj_id}" readable = build_readable(readable_title, obj_type, data) return_outputs(readable, ec, res) @@ -866,150 +824,136 @@ def search_by_id_command(): def create_indicator_command(): args = demisto.args() - indicator_type = args.get('type') - status = args.get('status') - value = args.get('value') - sources = args.get('sources') - attributes_names = args.get('attributes_names') - attributes_values = args.get('attributes_values') + indicator_type = args.get("type") + status = args.get("status") + value = args.get("value") + sources = args.get("sources") + attributes_names = args.get("attributes_names") + attributes_values = args.get("attributes_values") params = { - 'type': indicator_type, - 'status': status, - 'value': value, - 'sources': sources_to_request_format(sources), - 'attributes': attributes_to_request_format(attributes_names, attributes_values) + "type": indicator_type, + "status": status, + "value": value, + "sources": sources_to_request_format(sources), + "attributes": attributes_to_request_format(attributes_names, attributes_values), } - make_create_object_request('indicator', params) + make_create_object_request("indicator", params) def create_adversary_command(): args = demisto.args() - name = args.get('name') - sources = args.get('sources') - attributes_names = args.get('attributes_names') - attributes_values = args.get('attributes_values') + name = args.get("name") + sources = args.get("sources") + attributes_names = args.get("attributes_names") + attributes_values = args.get("attributes_values") params = { - 'name': name, - 'sources': sources_to_request_format(sources), - 'attributes': attributes_to_request_format(attributes_names, attributes_values) + "name": name, + "sources": sources_to_request_format(sources), + "attributes": attributes_to_request_format(attributes_names, attributes_values), } - make_create_object_request('adversary', params) + make_create_object_request("adversary", params) def create_event_command(): args = demisto.args() - event_type = args.get('type') - title = args.get('title') - date = args.get('date') - sources = args.get('sources') - attributes_names = args.get('attributes_names') - attributes_values = args.get('attributes_values') + event_type = args.get("type") + title = args.get("title") + date = args.get("date") + sources = args.get("sources") + attributes_names = args.get("attributes_names") + attributes_values = args.get("attributes_values") params = { - 'title': title, - 'type': event_type, - 'happened_at': parse_date(date), - 'sources': sources_to_request_format(sources), - 'attributes': attributes_to_request_format(attributes_names, attributes_values) + "title": title, + "type": event_type, + "happened_at": parse_date(date), + "sources": sources_to_request_format(sources), + "attributes": attributes_to_request_format(attributes_names, attributes_values), } - make_create_object_request('event', params) + make_create_object_request("event", params) def edit_indicator_command(): args = demisto.args() - indicator_id = args.get('id') - value = args.get('value') - indicator_type = args.get('type') - description = args.get('description') + indicator_id = args.get("id") + value = args.get("value") + indicator_type = args.get("type") + description = args.get("description") if isinstance(indicator_id, str) and not indicator_id.isdigit(): - return_error('id argument must be an integer.') + return_error("id argument must be an integer.") - params = { - 'value': value, - 'type': indicator_type, - 'description': description - } + params = {"value": value, "type": indicator_type, "description": description} - make_edit_request_for_an_object(indicator_id, 'indicator', params) + make_edit_request_for_an_object(indicator_id, "indicator", params) def edit_adversary_command(): args = demisto.args() - adversary_id = args.get('id') - name = args.get('name') + adversary_id = args.get("id") + name = args.get("name") if isinstance(adversary_id, str) and not adversary_id.isdigit(): - return_error('id argument must be an integer.') + return_error("id argument must be an integer.") - params = { - 'name': name - } + params = {"name": name} - make_edit_request_for_an_object(adversary_id, 'adversary', params) + make_edit_request_for_an_object(adversary_id, "adversary", params) def edit_event_command(): args = demisto.args() - event_id = args.get('id') - event_type = args.get('type') - title = args.get('title') - date = args.get('date') - description = args.get('description') + event_id = args.get("id") + event_type = args.get("type") + title = args.get("title") + date = args.get("date") + description = args.get("description") if isinstance(event_id, str) and not event_id.isdigit(): - return_error('id argument must be an integer.') + return_error("id argument must be an integer.") - params = { - 'title': title, - 'happened_at': parse_date(date) if date else None, - 'type': event_type, - 'description': description - } + params = {"title": title, "happened_at": parse_date(date) if date else None, "type": event_type, "description": description} - make_edit_request_for_an_object(event_id, 'event', params) + make_edit_request_for_an_object(event_id, "event", params) def delete_object_command(): args = demisto.args() - obj_type = args.get('obj_type') - obj_id = args.get('obj_id') + obj_type = args.get("obj_type") + obj_id = args.get("obj_id") if isinstance(obj_id, str) and not obj_id.isdigit(): - return_error('obj_id argument must be an integer.') + return_error("obj_id argument must be an integer.") - url_suffix = f'/{OBJ_DIRECTORY[obj_type]}/{obj_id}' - tq_request('DELETE', url_suffix) - demisto.results(f'Successfully deleted {obj_type} with ID {obj_id}.') + url_suffix = f"/{OBJ_DIRECTORY[obj_type]}/{obj_id}" + tq_request("DELETE", url_suffix) + demisto.results(f"Successfully deleted {obj_type} with ID {obj_id}.") def get_related_objs_command(related_type): args = demisto.args() - obj_type = args.get('obj_type') - obj_id = args.get('obj_id') + obj_type = args.get("obj_type") + obj_id = args.get("obj_id") if isinstance(obj_id, str) and not obj_id.isdigit(): - return_error('obj_id argument must be an integer.') + return_error("obj_id argument must be an integer.") - url_suffix = f'/{OBJ_DIRECTORY[obj_type]}/{obj_id}/{OBJ_DIRECTORY[related_type]}?with=sources' - if related_type == 'indicator': - url_suffix += ',score' - res = tq_request('GET', url_suffix) + url_suffix = f"/{OBJ_DIRECTORY[obj_type]}/{obj_id}/{OBJ_DIRECTORY[related_type]}?with=sources" + if related_type == "indicator": + url_suffix += ",score" + res = tq_request("GET", url_suffix) - info = [data_to_demisto_format(obj, related_type) for obj in res['data']] + info = [data_to_demisto_format(obj, related_type) for obj in res["data"]] info = createContext(info, removeNull=True) - data = { - RELATED_KEY[related_type]: createContext(info, removeNull=True), - 'ID': int(obj_id) - } + data = {RELATED_KEY[related_type]: createContext(info, removeNull=True), "ID": int(obj_id)} ec = {CONTEXT_PATH[obj_type]: data} if info else {} - readable_title = f'Related {related_type} type objects of {obj_type} with ID {obj_id}' + readable_title = f"Related {related_type} type objects of {obj_type} with ID {obj_id}" readable = build_readable(readable_title, related_type, data[RELATED_KEY[related_type]]) return_outputs(readable, ec, res) @@ -1017,252 +961,242 @@ def get_related_objs_command(related_type): def link_objects_command(): args = demisto.args() - obj1_type = args.get('obj1_type') - obj1_id = args.get('obj1_id') - obj2_type = args.get('obj2_type') - obj2_id = args.get('obj2_id') + obj1_type = args.get("obj1_type") + obj1_id = args.get("obj1_id") + obj2_type = args.get("obj2_type") + obj2_id = args.get("obj2_id") - if isinstance(obj1_id, str) and not obj1_id.isdigit() or isinstance(obj2_id, str) and not obj2_id.isdigit(): - return_error('obj1_id, obj2_id arguments must be integers.') + if (isinstance(obj1_id, str) and not obj1_id.isdigit()) or (isinstance(obj2_id, str) and not obj2_id.isdigit()): + return_error("obj1_id, obj2_id arguments must be integers.") if obj1_type == obj2_type and obj1_id == obj2_id: - return_error('Cannot link an object to itself.') + return_error("Cannot link an object to itself.") - url_suffix = f'/{OBJ_DIRECTORY[obj1_type]}/{obj1_id}/{OBJ_DIRECTORY[obj2_type]}' - params = { - 'id': obj2_id - } - tq_request('POST', url_suffix, params) - demisto.results( - f'Successfully linked {obj1_type} with ID {obj1_id} and {obj2_type} with ID {obj2_id}.') + url_suffix = f"/{OBJ_DIRECTORY[obj1_type]}/{obj1_id}/{OBJ_DIRECTORY[obj2_type]}" + params = {"id": obj2_id} + tq_request("POST", url_suffix, params) + demisto.results(f"Successfully linked {obj1_type} with ID {obj1_id} and {obj2_type} with ID {obj2_id}.") def unlink_objects_command(): args = demisto.args() - obj1_type = args.get('obj1_type') - obj1_id = args.get('obj1_id') - obj2_type = args.get('obj2_type') - obj2_id = args.get('obj2_id') + obj1_type = args.get("obj1_type") + obj1_id = args.get("obj1_id") + obj2_type = args.get("obj2_type") + obj2_id = args.get("obj2_id") - if isinstance(obj1_id, str) and not obj1_id.isdigit() or isinstance(obj2_id, str) and not obj2_id.isdigit(): - return_error('obj1_id, obj2_id arguments must be integers.') + if (isinstance(obj1_id, str) and not obj1_id.isdigit()) or (isinstance(obj2_id, str) and not obj2_id.isdigit()): + return_error("obj1_id, obj2_id arguments must be integers.") if obj1_type == obj2_type and obj1_id == obj2_id: - return_error('An object cannot be linked to itself.') + return_error("An object cannot be linked to itself.") p_id = get_pivot_id(obj1_type, obj1_id, obj2_type, obj2_id) - url_suffix = f'/{OBJ_DIRECTORY[obj1_type]}/{obj1_id}/{OBJ_DIRECTORY[obj2_type]}' - tq_request('DELETE', url_suffix, params=[p_id]) - demisto.results( - f'Successfully unlinked {obj1_type} with ID {obj1_id} and {obj2_type} with ID {obj2_id}.') + url_suffix = f"/{OBJ_DIRECTORY[obj1_type]}/{obj1_id}/{OBJ_DIRECTORY[obj2_type]}" + tq_request("DELETE", url_suffix, params=[p_id]) + demisto.results(f"Successfully unlinked {obj1_type} with ID {obj1_id} and {obj2_type} with ID {obj2_id}.") def update_score_command(): # Note: We can't update DBot Score because API doesn't retrieve the indicator value. args = demisto.args() - indicator_id = args.get('id') - score = args.get('score') + indicator_id = args.get("id") + score = args.get("score") if isinstance(indicator_id, str) and not indicator_id.isdigit(): - return_error('id argument must be an integer.') + return_error("id argument must be an integer.") if isinstance(score, str) and not score.isdigit(): # User chose 'Generated Score' option manual_score = None else: manual_score = int(score) - url_suffix = f'/indicator/{indicator_id}/scores' - params = {'manual_score': manual_score} + url_suffix = f"/indicator/{indicator_id}/scores" + params = {"manual_score": manual_score} - res = tq_request('PUT', url_suffix, params) + res = tq_request("PUT", url_suffix, params) - data = { - 'ID': int(indicator_id), - 'TQScore': get_tq_score_from_response(res['data']) - } + data = {"ID": int(indicator_id), "TQScore": get_tq_score_from_response(res["data"])} - ec = {CONTEXT_PATH['indicator']: data} + ec = {CONTEXT_PATH["indicator"]: data} - readable = 'Successfully updated score of indicator with ID {} to {}. ' \ - 'Notice that final score is the maximum between ' \ - 'manual and generated scores.'.format(indicator_id, int(data['TQScore'])) + readable = ( + "Successfully updated score of indicator with ID {} to {}. " + "Notice that final score is the maximum between " + "manual and generated scores.".format(indicator_id, int(data["TQScore"])) + ) return_outputs(readable, ec, res) def add_source_command(): args = demisto.args() - source = args.get('source') - obj_id = args.get('obj_id') - obj_type = args.get('obj_type') + source = args.get("source") + obj_id = args.get("obj_id") + obj_type = args.get("obj_type") if isinstance(obj_id, str) and not obj_id.isdigit(): - return_error('obj_id argument must be an integer.') + return_error("obj_id argument must be an integer.") - url_suffix = f'/{OBJ_DIRECTORY[obj_type]}/{obj_id}/sources' - params = { - 'name': source - } + url_suffix = f"/{OBJ_DIRECTORY[obj_type]}/{obj_id}/sources" + params = {"name": source} - tq_request('POST', url_suffix, params) - demisto.results(f'Successfully added source {source} to {obj_type} with ID {obj_id}.') + tq_request("POST", url_suffix, params) + demisto.results(f"Successfully added source {source} to {obj_type} with ID {obj_id}.") def delete_source_command(): args = demisto.args() - source_id = args.get('source_id') - obj_id = args.get('obj_id') - obj_type = args.get('obj_type') + source_id = args.get("source_id") + obj_id = args.get("obj_id") + obj_type = args.get("obj_type") if isinstance(obj_id, str) and not obj_id.isdigit(): - return_error('obj_id argument must be an integer.') + return_error("obj_id argument must be an integer.") if isinstance(source_id, str) and not source_id.isdigit(): - return_error('source_id argument must be an integer.') + return_error("source_id argument must be an integer.") - url_suffix = f'/{OBJ_DIRECTORY[obj_type]}/{obj_id}/sources/{source_id}' + url_suffix = f"/{OBJ_DIRECTORY[obj_type]}/{obj_id}/sources/{source_id}" - tq_request('DELETE', url_suffix) - demisto.results(f'Successfully deleted source #{source_id} from {obj_type} with ID {obj_id}.') + tq_request("DELETE", url_suffix) + demisto.results(f"Successfully deleted source #{source_id} from {obj_type} with ID {obj_id}.") def add_attribute_command(): args = demisto.args() - attribute_name = args.get('name') - attribute_value = args.get('value') - obj_type = args.get('obj_type') - obj_id = args.get('obj_id') + attribute_name = args.get("name") + attribute_value = args.get("value") + obj_type = args.get("obj_type") + obj_id = args.get("obj_id") if isinstance(obj_id, str) and not obj_id.isdigit(): - return_error('obj_id argument must be an integer.') + return_error("obj_id argument must be an integer.") - url_suffix = f'/{OBJ_DIRECTORY[obj_type]}/{obj_id}/attributes' - params = { - 'name': attribute_name, - 'value': attribute_value - } + url_suffix = f"/{OBJ_DIRECTORY[obj_type]}/{obj_id}/attributes" + params = {"name": attribute_name, "value": attribute_value} - tq_request('POST', url_suffix, params) - demisto.results(f'Successfully added attribute to {obj_type} with ID {obj_id}.') + tq_request("POST", url_suffix, params) + demisto.results(f"Successfully added attribute to {obj_type} with ID {obj_id}.") def modify_attribute_command(): args = demisto.args() - attribute_id = args.get('attribute_id') - attribute_value = args.get('attribute_value') - obj_type = args.get('obj_type') - obj_id = args.get('obj_id') + attribute_id = args.get("attribute_id") + attribute_value = args.get("attribute_value") + obj_type = args.get("obj_type") + obj_id = args.get("obj_id") if isinstance(obj_id, str) and not obj_id.isdigit(): - return_error('obj_id argument must be an integer.') + return_error("obj_id argument must be an integer.") if isinstance(attribute_id, str) and not attribute_id.isdigit(): - return_error('attribute_id argument must be an integer.') + return_error("attribute_id argument must be an integer.") - url_suffix = f'/{OBJ_DIRECTORY[obj_type]}/{obj_id}/attributes/{attribute_id}' - params = {'value': attribute_value} + url_suffix = f"/{OBJ_DIRECTORY[obj_type]}/{obj_id}/attributes/{attribute_id}" + params = {"value": attribute_value} - tq_request('PUT', url_suffix, params) + tq_request("PUT", url_suffix, params) - demisto.results(f'Successfully modified attribute #{attribute_id} of {obj_type} with ID {obj_id}.') + demisto.results(f"Successfully modified attribute #{attribute_id} of {obj_type} with ID {obj_id}.") def delete_attribute_command(): args = demisto.args() - attribute_id = args.get('attribute_id') - obj_type = args.get('obj_type') - obj_id = args.get('obj_id') + attribute_id = args.get("attribute_id") + obj_type = args.get("obj_type") + obj_id = args.get("obj_id") if isinstance(obj_id, str) and not obj_id.isdigit(): - return_error('obj_id argument must be an integer.') + return_error("obj_id argument must be an integer.") if isinstance(attribute_id, str) and not attribute_id.isdigit(): - return_error('attribute_id argument must be an integer.') + return_error("attribute_id argument must be an integer.") - url_suffix = f'/{OBJ_DIRECTORY[obj_type]}/{obj_id}/attributes/{attribute_id}' + url_suffix = f"/{OBJ_DIRECTORY[obj_type]}/{obj_id}/attributes/{attribute_id}" - tq_request('DELETE', url_suffix) - demisto.results(f'Successfully deleted attribute #{attribute_id} from {obj_type} with ID {obj_id}.') + tq_request("DELETE", url_suffix) + demisto.results(f"Successfully deleted attribute #{attribute_id} from {obj_type} with ID {obj_id}.") def update_status_command(): args = demisto.args() - indicator_id = args.get('id') - status = args.get('status') + indicator_id = args.get("id") + status = args.get("status") if isinstance(indicator_id, str) and not indicator_id.isdigit(): - return_error('id argument must be an integer.') + return_error("id argument must be an integer.") - url_suffix = f'/indicators/{indicator_id}' - params = {'status': status} + url_suffix = f"/indicators/{indicator_id}" + params = {"status": status} - res = tq_request('PUT', url_suffix, params) + res = tq_request("PUT", url_suffix, params) data = { - 'ID': int(indicator_id), - 'Status': status_id_to_status(res['data'].get('status_id')), + "ID": int(indicator_id), + "Status": status_id_to_status(res["data"].get("status_id")), } - ec = {CONTEXT_PATH['indicator']: data} + ec = {CONTEXT_PATH["indicator"]: data} - readable = f'Successfully updated status of indicator with ID {indicator_id} to {status}.' + readable = f"Successfully updated status of indicator with ID {indicator_id} to {status}." return_outputs(readable, ec, res) def upload_file_command(): args = demisto.args() - entry_id = args.get('entry_id') - title = args.get('title') - malware_safety_lock = args.get('malware_safety_lock', 'off') - file_category = args.get('file_category') + entry_id = args.get("entry_id") + title = args.get("title") + malware_safety_lock = args.get("malware_safety_lock", "off") + file_category = args.get("file_category") file_info = demisto.getFilePath(entry_id) if not title: - title = file_info['name'] + title = file_info["name"] params = { - 'name': file_info['name'], - 'title': title, - 'type': file_category, - 'malware_locked': malware_locked_to_request_format(malware_safety_lock) + "name": file_info["name"], + "title": title, + "type": file_category, + "malware_locked": malware_locked_to_request_format(malware_safety_lock), } try: - shutil.copy(file_info['path'], file_info['name']) + shutil.copy(file_info["path"], file_info["name"]) except Exception as e: - return_error(f'Failed to prepare file for upload. Error message: {str(e)}') + return_error(f"Failed to prepare file for upload. Error message: {e!s}") try: - with open(file_info['name'], 'rb') as f: - files = {'file': f} - url_suffix = '/attachments' - res = tq_request('POST', url_suffix, params, files=files) + with open(file_info["name"], "rb") as f: + files = {"file": f} + url_suffix = "/attachments" + res = tq_request("POST", url_suffix, params, files=files) finally: - shutil.rmtree(file_info['name'], ignore_errors=True) + shutil.rmtree(file_info["name"], ignore_errors=True) - data = file_data_to_demisto_format(res['data']) + data = file_data_to_demisto_format(res["data"]) - ec = {CONTEXT_PATH['attachment']: data} + ec = {CONTEXT_PATH["attachment"]: data} - readable_title = 'Successfully uploaded file {}.'.format(file_info['name']) - readable = build_readable(readable_title, 'attachment', data) + readable_title = "Successfully uploaded file {}.".format(file_info["name"]) + readable = build_readable(readable_title, "attachment", data) return_outputs(readable, ec, res) def download_file_command(): args = demisto.args() - file_id = args.get('id') + file_id = args.get("id") if isinstance(file_id, str) and not file_id.isdigit(): - return_error('id argument must be an integer.') + return_error("id argument must be an integer.") - url_suffix = f'/attachments/{file_id}/download' + url_suffix = f"/attachments/{file_id}/download" - res = tq_request('GET', url_suffix, retrieve_entire_response=True) + res = tq_request("GET", url_suffix, retrieve_entire_response=True) # 'Content-Disposition' value is of the form: attachment; filename="filename.txt" # Since we don't have the file name anywhere else in the response object, we parse it from this entry. - filename = res.headers.get('Content-Disposition', '').split('\"')[1] + filename = res.headers.get("Content-Disposition", "").split('"')[1] content = res.content demisto.results(fileResult(filename, content)) @@ -1270,24 +1204,24 @@ def download_file_command(): def get_all_objs_command(obj_type): args = demisto.args() - page = int(args.get('page', 0)) - limit = int(args.get('limit', 50)) + page = int(args.get("page", 0)) + limit = int(args.get("limit", 50)) if limit > 200: limit = 200 - url_suffix = f'/{OBJ_DIRECTORY[obj_type]}?with=attributes,sources' - if obj_type == 'indicator': - url_suffix += ',score' - res = tq_request('GET', url_suffix) + url_suffix = f"/{OBJ_DIRECTORY[obj_type]}?with=attributes,sources" + if obj_type == "indicator": + url_suffix += ",score" + res = tq_request("GET", url_suffix) - from_index = min(page, len(res['data'])) - to_index = min(from_index + limit, len(res['data'])) + from_index = min(page, len(res["data"])) + to_index = min(from_index + limit, len(res["data"])) - data = [data_to_demisto_format(obj, obj_type) for obj in res['data'][from_index:to_index]] + data = [data_to_demisto_format(obj, obj_type) for obj in res["data"][from_index:to_index]] ec = {CONTEXT_PATH[obj_type]: createContext(data, removeNull=True)} if data else {} - readable_title = f'List of all objects of type {obj_type} - {from_index}-{to_index - 1}' - metadata = 'Total number of objects is {}'.format(len(res['data'])) + readable_title = f"List of all objects of type {obj_type} - {from_index}-{to_index - 1}" + metadata = "Total number of objects is {}".format(len(res["data"])) readable = build_readable(readable_title, obj_type, data, metadata=metadata) return_outputs(readable, ec, res) @@ -1295,142 +1229,145 @@ def get_all_objs_command(obj_type): def get_ip_reputation(): args = demisto.args() - ips = argToList(args.get('ip')) + ips = argToList(args.get("ip")) for ip in ips: if not is_ip_valid(ip, accept_v6_ips=True): - return_error(f'{ip} is not a valid IP address.') + return_error(f"{ip} is not a valid IP address.") - generic_context = {'Address': ip} + generic_context = {"Address": ip} - make_indicator_reputation_request(indicator_type='ip', value=ip, generic_context=generic_context) + make_indicator_reputation_request(indicator_type="ip", value=ip, generic_context=generic_context) def get_url_reputation(): args = demisto.args() - urls = argToList(args.get('url')) + urls = argToList(args.get("url")) for url in urls: - if not REGEX_MAP['url'].match(url): - return_error(f'{url} is not a valid URL.') + if not REGEX_MAP["url"].match(url): + return_error(f"{url} is not a valid URL.") - generic_context = {'Data': url} + generic_context = {"Data": url} - make_indicator_reputation_request(indicator_type='url', value=url, generic_context=generic_context) + make_indicator_reputation_request(indicator_type="url", value=url, generic_context=generic_context) def get_email_reputation(): args = demisto.args() - emails = argToList(args.get('email')) + emails = argToList(args.get("email")) for email in emails: - if not REGEX_MAP['email'].match(email): - return_error(f'{email} is not a valid email address.') + if not REGEX_MAP["email"].match(email): + return_error(f"{email} is not a valid email address.") - generic_context = {'Address': email} + generic_context = {"Address": email} - make_indicator_reputation_request(indicator_type='email', value=email, generic_context=generic_context) + make_indicator_reputation_request(indicator_type="email", value=email, generic_context=generic_context) def get_domain_reputation(): args = demisto.args() - domains = argToList(args.get('domain')) + domains = argToList(args.get("domain")) for domain in domains: - generic_context = {'Name': domain} - make_indicator_reputation_request(indicator_type='domain', value=domain, generic_context=generic_context) + generic_context = {"Name": domain} + make_indicator_reputation_request(indicator_type="domain", value=domain, generic_context=generic_context) def get_file_reputation(): args = demisto.args() - files = argToList(args.get('file')) + files = argToList(args.get("file")) for file in files: - for fmt in ['md5', 'sha1', 'sha256']: + for fmt in ["md5", "sha1", "sha256"]: if REGEX_MAP[fmt].match(file): break else: - return_error(f'{file} is not a valid file format.') - - generic_context = createContext({ - 'MD5': file if fmt == 'md5' else None, - 'SHA1': file if fmt == 'sha1' else None, - 'SHA256': file if fmt == 'sha256' else None - }, removeNull=True) + return_error(f"{file} is not a valid file format.") + + generic_context = createContext( + { + "MD5": file if fmt == "md5" else None, + "SHA1": file if fmt == "sha1" else None, + "SHA256": file if fmt == "sha256" else None, + }, + removeNull=True, + ) - make_indicator_reputation_request(indicator_type='file', value=file, generic_context=generic_context) + make_indicator_reputation_request(indicator_type="file", value=file, generic_context=generic_context) -''' EXECUTION CODE ''' +""" EXECUTION CODE """ command = demisto.command() -LOG(f'command is {demisto.command()}') +LOG(f"command is {demisto.command()}") try: handle_proxy() - if command == 'test-module': + if command == "test-module": test_module() - elif command == 'threatq-advanced-search': + elif command == "threatq-advanced-search": advance_search_command() - elif command == 'threatq-search-by-name': + elif command == "threatq-search-by-name": search_by_name_command() - elif command == 'threatq-search-by-id': + elif command == "threatq-search-by-id": search_by_id_command() - elif command == 'threatq-create-indicator': + elif command == "threatq-create-indicator": create_indicator_command() - elif command == 'threatq-create-event': + elif command == "threatq-create-event": create_event_command() - elif command == 'threatq-create-adversary': + elif command == "threatq-create-adversary": create_adversary_command() - elif command == 'threatq-edit-indicator': + elif command == "threatq-edit-indicator": edit_indicator_command() - elif command == 'threatq-edit-event': + elif command == "threatq-edit-event": edit_event_command() - elif command == 'threatq-edit-adversary': + elif command == "threatq-edit-adversary": edit_adversary_command() - elif command == 'threatq-delete-object': + elif command == "threatq-delete-object": delete_object_command() - elif command == 'threatq-get-related-indicators': - get_related_objs_command('indicator') - elif command == 'threatq-get-related-events': - get_related_objs_command('event') - elif command == 'threatq-get-related-adversaries': - get_related_objs_command('adversary') - elif command == 'threatq-link-objects': + elif command == "threatq-get-related-indicators": + get_related_objs_command("indicator") + elif command == "threatq-get-related-events": + get_related_objs_command("event") + elif command == "threatq-get-related-adversaries": + get_related_objs_command("adversary") + elif command == "threatq-link-objects": link_objects_command() - elif command == 'threatq-unlink-objects': + elif command == "threatq-unlink-objects": unlink_objects_command() - elif command == 'threatq-update-score': + elif command == "threatq-update-score": update_score_command() - elif command == 'threatq-add-source': + elif command == "threatq-add-source": add_source_command() - elif command == 'threatq-delete-source': + elif command == "threatq-delete-source": delete_source_command() - elif command == 'threatq-add-attribute': + elif command == "threatq-add-attribute": add_attribute_command() - elif command == 'threatq-modify-attribute': + elif command == "threatq-modify-attribute": modify_attribute_command() - elif command == 'threatq-delete-attribute': + elif command == "threatq-delete-attribute": delete_attribute_command() - elif command == 'threatq-update-status': + elif command == "threatq-update-status": update_status_command() - elif command == 'threatq-upload-file': + elif command == "threatq-upload-file": upload_file_command() - elif command == 'threatq-download-file': + elif command == "threatq-download-file": download_file_command() - elif command == 'threatq-get-all-indicators': - get_all_objs_command('indicator') - elif command == 'threatq-get-all-events': - get_all_objs_command('event') - elif command == 'threatq-get-all-adversaries': - get_all_objs_command('adversary') - elif command == 'ip': + elif command == "threatq-get-all-indicators": + get_all_objs_command("indicator") + elif command == "threatq-get-all-events": + get_all_objs_command("event") + elif command == "threatq-get-all-adversaries": + get_all_objs_command("adversary") + elif command == "ip": get_ip_reputation() - elif command == 'domain': + elif command == "domain": get_domain_reputation() - elif command == 'email': + elif command == "email": get_email_reputation() - elif command == 'url': + elif command == "url": get_url_reputation() - elif command == 'file': + elif command == "file": get_file_reputation() except Exception as ex: diff --git a/Packs/ThreatQ/Integrations/ThreatQ_v2/ThreatQ_v2_test.py b/Packs/ThreatQ/Integrations/ThreatQ_v2/ThreatQ_v2_test.py index 450adca17c72..3d039d2d44b5 100644 --- a/Packs/ThreatQ/Integrations/ThreatQ_v2/ThreatQ_v2_test.py +++ b/Packs/ThreatQ/Integrations/ThreatQ_v2/ThreatQ_v2_test.py @@ -1,158 +1,147 @@ from unittest.mock import Mock -import demistomock as demisto +import demistomock as demisto MOCK_URL = "http://123-fake-api.com" MOCK_API_URL = MOCK_URL + "/api" MOCK_PARAMS = { - "credentials": { - "identifier": "mock_email", - "password": "mock_pass" - }, + "credentials": {"identifier": "mock_email", "password": "mock_pass"}, "insecure": True, "proxy": False, "serverUrl": MOCK_URL, "client_id": "mock_cliend_id", - "threshold": 6 + "threshold": 6, } -MOCK_ACCESS_TOKEN = { - 'expires_in': 3600, - 'access_token': '3220879210' -} +MOCK_ACCESS_TOKEN = {"expires_in": 3600, "access_token": "3220879210"} -MOCK_GET_ALL_OBJS_ARGUMENTS = {'limit': '2', 'page': '0'} +MOCK_GET_ALL_OBJS_ARGUMENTS = {"limit": "2", "page": "0"} -MOCK_EMAIL_REPUTATION_ARGUMENTS = {'email': 'foo@demisto.com'} +MOCK_EMAIL_REPUTATION_ARGUMENTS = {"email": "foo@demisto.com"} -MOCK_SEARCH_BY_ID_ARGUMENTS = {'obj_id': 2019, 'obj_type': 'event'} +MOCK_SEARCH_BY_ID_ARGUMENTS = {"obj_id": 2019, "obj_type": "event"} -MOCK_RELATED_OBJS_ARGUMENTS = {'obj_type': 'adversary', 'obj_id': '1'} +MOCK_RELATED_OBJS_ARGUMENTS = {"obj_type": "adversary", "obj_id": "1"} -MOCK_SEARCH_BY_NAME_ARGUMENTS = {'name': 'foo@demisto', 'limit': '10'} +MOCK_SEARCH_BY_NAME_ARGUMENTS = {"name": "foo@demisto", "limit": "10"} -MOCK_FILE_INFO = {'name': 'TestTitle', 'path': 'test_data/testfile.txt'} +MOCK_FILE_INFO = {"name": "TestTitle", "path": "test_data/testfile.txt"} -MOCK_EDIT_EVENT_ARGUMENTS = {'id': 2019, 'date': '2019-03-01', 'description': 'test', 'type': 'Spearphish'} +MOCK_EDIT_EVENT_ARGUMENTS = {"id": 2019, "date": "2019-03-01", "description": "test", "type": "Spearphish"} -MOCK_UPLOAD_FILE_ARGUMENTS = { - 'entry_id': 'mock', - 'title': 'TestTitle', - 'malware_safety_lock': 'off', - 'file_category': 'Cuckoo' -} +MOCK_UPLOAD_FILE_ARGUMENTS = {"entry_id": "mock", "title": "TestTitle", "malware_safety_lock": "off", "file_category": "Cuckoo"} MOCK_CREATE_INDICATOR_ARGUMENTS = { - 'type': 'Email Address', - 'status': 'Active', - 'value': 'foo@demisto.com', - 'sources': 'test_source1,test_source2', - 'attributes_names': 'test_attribute1,test_attribute2', - 'attributes_values': 'test_value1,test_value2' + "type": "Email Address", + "status": "Active", + "value": "foo@demisto.com", + "sources": "test_source1,test_source2", + "attributes_names": "test_attribute1,test_attribute2", + "attributes_values": "test_value1,test_value2", } MOCK_FILE_UPLOAD_RESPONSE = { - 'data': { - 'type_id': 1, - 'name': 'testfile.txt', - 'title': 'TestTitle', - 'malware_locked': 0, - 'content_type_id': 1 - } + "data": {"type_id": 1, "name": "testfile.txt", "title": "TestTitle", "malware_locked": 0, "content_type_id": 1} } MOCK_INDICATOR_CREATION_RESPONSE = { - 'data': [{ - 'id': 2019, - 'type_id': 4, # 'Email Address' - 'value': 'foo@demisto.com', - 'status_id': 1, # 'Active' - 'sources': [ - {'name': 'test_source1', 'pivot': {'id': 2017}}, - {'name': 'test_source2', 'pivot': {'id': 2018}} - ], - 'attributes': [ - {'name': 'test_attribute1', 'value': 'test_value1', 'id': 2019}, - {'name': 'test_attribute2', 'value': 'test_value2', 'id': 2020} - ], - 'score': 6 - }] + "data": [ + { + "id": 2019, + "type_id": 4, # 'Email Address' + "value": "foo@demisto.com", + "status_id": 1, # 'Active' + "sources": [{"name": "test_source1", "pivot": {"id": 2017}}, {"name": "test_source2", "pivot": {"id": 2018}}], + "attributes": [ + {"name": "test_attribute1", "value": "test_value1", "id": 2019}, + {"name": "test_attribute2", "value": "test_value2", "id": 2020}, + ], + "score": 6, + } + ] } MOCK_GET_INDICATOR_RESPONSE = { - 'data': { - 'id': 2019, - 'type_id': 4, # 'Email Address' - 'value': 'foo@demisto.com', - 'score': 6, - 'status_id': 1 # 'Active' + "data": { + "id": 2019, + "type_id": 4, # 'Email Address' + "value": "foo@demisto.com", + "score": 6, + "status_id": 1, # 'Active' } } MOCK_SEARCH_BY_NAME_RESPONSE = { - 'data': [ - {'id': 2017, 'value': 'foo@demisto.com', 'object': 'event'}, - {'id': 2018, 'value': 'foo@demisto.com', 'object': 'adversary'}, - {'id': 2019, 'value': 'foo@demisto.com', 'object': 'indicator'} + "data": [ + {"id": 2017, "value": "foo@demisto.com", "object": "event"}, + {"id": 2018, "value": "foo@demisto.com", "object": "adversary"}, + {"id": 2019, "value": "foo@demisto.com", "object": "indicator"}, ] } MOCK_SEARCH_BY_EMAIL_RESPONSE = { - 'total': 1, - 'data': [ - {'class': 'network', 'score': 0, 'value': 'foo@demisto.com', 'touched_at': '2019-11-20 08:23:21', 'id': 2019, - 'updated_at': '2019-11-20 08:22:51', 'published_at': '2019-11-20 08:22:51', 'created_at': '2019-11-20 08:22:51', - 'status_id': 5, 'type_id': 4, 'adversaries': [], 'type': {'name': 'Email Address', 'id': 4, 'class': 'network'}, - 'status': {'name': 'Whitelisted', 'id': 5, 'description': 'Poses NO risk and should never be deployed.'}, - 'sources': [{'indicator_id': 20, 'indicator_status_id': 5, 'published_at': '2019-11-20 08:22:51', 'source_id': 8, - 'id': 22, 'created_at': '2019-11-20 08:22:51', 'source_type': 'users', 'creator_source_id': 8, - 'indicator_type_id': 4, 'reference_id': 1, 'updated_at': '2019-11-20 08:22:51', - 'name': 'foo@demisto.com'}]}], 'limit': 500, 'offset': 0} - -MOCK_GET_EVENT_RESPONSE = { - 'data': { - 'id': 2019, - 'happened_at': '2019-03-01 00:00:00', - 'description': 'test', - 'type_id': 1 - } + "total": 1, + "data": [ + { + "class": "network", + "score": 0, + "value": "foo@demisto.com", + "touched_at": "2019-11-20 08:23:21", + "id": 2019, + "updated_at": "2019-11-20 08:22:51", + "published_at": "2019-11-20 08:22:51", + "created_at": "2019-11-20 08:22:51", + "status_id": 5, + "type_id": 4, + "adversaries": [], + "type": {"name": "Email Address", "id": 4, "class": "network"}, + "status": {"name": "Whitelisted", "id": 5, "description": "Poses NO risk and should never be deployed."}, + "sources": [ + { + "indicator_id": 20, + "indicator_status_id": 5, + "published_at": "2019-11-20 08:22:51", + "source_id": 8, + "id": 22, + "created_at": "2019-11-20 08:22:51", + "source_type": "users", + "creator_source_id": 8, + "indicator_type_id": 4, + "reference_id": 1, + "updated_at": "2019-11-20 08:22:51", + "name": "foo@demisto.com", + } + ], + } + ], + "limit": 500, + "offset": 0, } +MOCK_GET_EVENT_RESPONSE = {"data": {"id": 2019, "happened_at": "2019-03-01 00:00:00", "description": "test", "type_id": 1}} + MOCK_INDICATOR_LIST_RESPONSE = { - 'data': [ - {'id': 10, 'value': 'foo@demisto.com', 'type_id': 4, 'status_id': 2}, - {'id': 11, 'value': '8.8.8.8', 'type_id': 14, 'status_id': 1}, - {'id': 12, 'value': '1.2.3.4', 'type_id': 14, 'status_id': 2} + "data": [ + {"id": 10, "value": "foo@demisto.com", "type_id": 4, "status_id": 2}, + {"id": 11, "value": "8.8.8.8", "type_id": 14, "status_id": 1}, + {"id": 12, "value": "1.2.3.4", "type_id": 14, "status_id": 2}, ] } MOCK_ERROR_RESPONSES = [ - { - "data": { - "errors": { - "name": ["The name has already been taken."], - "test": ["test_error1", "test_error2"] - } - } - }, - { - "errors": [ - 'First Error', - ['Second error - part 1', 'Second error - part 2'] - ] - } + {"data": {"errors": {"name": ["The name has already been taken."], "test": ["test_error1", "test_error2"]}}}, + {"errors": ["First Error", ["Second error - part 1", "Second error - part 2"]]}, ] EXPECTED_ERROR_STRINGS = [ "Errors from service:\n\n" "Error #1. In 'name':\nThe name has already been taken.\n\n" "Error #2. In 'test':\ntest_error1\ntest_error2\n\n", - "Errors from service:\n\n" "Error #1: First Error\n" "Error #2.0: Second error - part 1\n" - "Error #2.1: Second error - part 2\n" + "Error #2.1: Second error - part 2\n", ] MOCK_GET_INDICATOR_STATUS_RESPONSE_1 = { @@ -165,7 +154,7 @@ "include_in_export": "Y", "protected": "Y", "created_at": "2017-04-17 04:35:21", - "updated_at": "2017-04-17 04:35:21" + "updated_at": "2017-04-17 04:35:21", } } @@ -179,7 +168,7 @@ "include_in_export": "Y", "protected": "Y", "created_at": "2017-04-17 04:35:21", - "updated_at": "2017-04-17 04:35:21" + "updated_at": "2017-04-17 04:35:21", } } @@ -191,7 +180,7 @@ "score": None, "wildcard_matching": "Y", "created_at": "2017-04-17 04:34:56", - "updated_at": "2017-04-17 04:34:56" + "updated_at": "2017-04-17 04:34:56", } } @@ -203,7 +192,7 @@ "score": None, "wildcard_matching": "Y", "created_at": "2017-04-17 04:34:56", - "updated_at": "2017-04-17 04:34:56" + "updated_at": "2017-04-17 04:34:56", } } @@ -213,7 +202,7 @@ "name": "Spearphish", "user_editable": "N", "created_at": "2017-03-20 13:28:23", - "updated_at": "2017-03-20 13:28:23" + "updated_at": "2017-03-20 13:28:23", } } @@ -224,187 +213,198 @@ "is_parsable": "Y", "parser_class": "Cuckoo", "created_at": "2017-03-16 13:03:46", - "updated_at": "2017-03-16 13:03:46" + "updated_at": "2017-03-16 13:03:46", } } def mock_demisto(mocker, mock_args): - mocker.patch.object(demisto, 'params', return_value=MOCK_PARAMS) - mocker.patch.object(demisto, 'args', return_value=mock_args) - mocker.patch.object(demisto, 'results') + mocker.patch.object(demisto, "params", return_value=MOCK_PARAMS) + mocker.patch.object(demisto, "args", return_value=mock_args) + mocker.patch.object(demisto, "results") def test_create_indicator_command(mocker, requests_mock): mock_demisto(mocker, MOCK_CREATE_INDICATOR_ARGUMENTS) - requests_mock.post(MOCK_API_URL + '/indicators', json=MOCK_INDICATOR_CREATION_RESPONSE) - requests_mock.post(MOCK_API_URL + '/token', json=MOCK_ACCESS_TOKEN) - requests_mock.get(MOCK_API_URL + '/indicator/statuses/1', json=MOCK_GET_INDICATOR_STATUS_RESPONSE_1) - requests_mock.get(MOCK_API_URL + '/indicator/types/4', json=MOCK_GET_INDICATOR_TYPE_RESPONSE_1) + requests_mock.post(MOCK_API_URL + "/indicators", json=MOCK_INDICATOR_CREATION_RESPONSE) + requests_mock.post(MOCK_API_URL + "/token", json=MOCK_ACCESS_TOKEN) + requests_mock.get(MOCK_API_URL + "/indicator/statuses/1", json=MOCK_GET_INDICATOR_STATUS_RESPONSE_1) + requests_mock.get(MOCK_API_URL + "/indicator/types/4", json=MOCK_GET_INDICATOR_TYPE_RESPONSE_1) from ThreatQ_v2 import create_indicator_command + create_indicator_command() results = demisto.results.call_args[0] - entry_context = results[0]['EntryContext'][ - 'ThreatQ.Indicator((val.ID && val.ID === obj.ID) || (val.Value && val.Value === obj.Value))'] + entry_context = results[0]["EntryContext"][ + "ThreatQ.Indicator((val.ID && val.ID === obj.ID) || (val.Value && val.Value === obj.Value))" + ] - assert 'Indicator was successfully created.' in results[0]['HumanReadable'] - assert entry_context['Value'] == 'foo@demisto.com' - assert entry_context['Type'] == 'Email Address' - assert entry_context['Status'] == 'Active' - assert entry_context['Source'][0]['ID'] == 2017 - assert entry_context['Source'][1]['Name'] == 'test_source2' - assert entry_context['Attribute'][0]['Name'] == 'test_attribute1' - assert entry_context['Attribute'][1]['Value'] == 'test_value2' + assert "Indicator was successfully created." in results[0]["HumanReadable"] + assert entry_context["Value"] == "foo@demisto.com" + assert entry_context["Type"] == "Email Address" + assert entry_context["Status"] == "Active" + assert entry_context["Source"][0]["ID"] == 2017 + assert entry_context["Source"][1]["Name"] == "test_source2" + assert entry_context["Attribute"][0]["Name"] == "test_attribute1" + assert entry_context["Attribute"][1]["Value"] == "test_value2" def test_edit_event_command(mocker, requests_mock): mock_demisto(mocker, MOCK_EDIT_EVENT_ARGUMENTS) - requests_mock.put(MOCK_API_URL + '/events/2019', json=MOCK_GET_EVENT_RESPONSE) - requests_mock.post(MOCK_API_URL + '/token', json=MOCK_ACCESS_TOKEN) - requests_mock.get(MOCK_API_URL + '/event/types/1', json=MOCK_GET_EVENT_TYPE_RESPONSE) + requests_mock.put(MOCK_API_URL + "/events/2019", json=MOCK_GET_EVENT_RESPONSE) + requests_mock.post(MOCK_API_URL + "/token", json=MOCK_ACCESS_TOKEN) + requests_mock.get(MOCK_API_URL + "/event/types/1", json=MOCK_GET_EVENT_TYPE_RESPONSE) from ThreatQ_v2 import edit_event_command + edit_event_command() results = demisto.results.call_args[0] - entry_context = results[0]['EntryContext']['ThreatQ.Event(val.ID === obj.ID)'] + entry_context = results[0]["EntryContext"]["ThreatQ.Event(val.ID === obj.ID)"] - assert 'Successfully edited event with ID 2019' in results[0]['HumanReadable'] - assert entry_context['Occurred'] == '2019-03-01 00:00:00' # date format should be changed - assert entry_context['Description'] == 'test' # html markups should be cleaned - assert entry_context['Type'] == 'Spearphish' + assert "Successfully edited event with ID 2019" in results[0]["HumanReadable"] + assert entry_context["Occurred"] == "2019-03-01 00:00:00" # date format should be changed + assert entry_context["Description"] == "test" # html markups should be cleaned + assert entry_context["Type"] == "Spearphish" def test_upload_file_command(mocker, requests_mock): mock_demisto(mocker, MOCK_UPLOAD_FILE_ARGUMENTS) - mocker.patch.object(demisto, 'getFilePath', return_value=MOCK_FILE_INFO) - requests_mock.post(MOCK_API_URL + '/token', json=MOCK_ACCESS_TOKEN) - requests_mock.post(MOCK_API_URL + '/attachments', json=MOCK_FILE_UPLOAD_RESPONSE) - requests_mock.get(MOCK_API_URL + '/attachments/types/1', json=MOCK_GET_FILE_TYPE_RESPONSE) + mocker.patch.object(demisto, "getFilePath", return_value=MOCK_FILE_INFO) + requests_mock.post(MOCK_API_URL + "/token", json=MOCK_ACCESS_TOKEN) + requests_mock.post(MOCK_API_URL + "/attachments", json=MOCK_FILE_UPLOAD_RESPONSE) + requests_mock.get(MOCK_API_URL + "/attachments/types/1", json=MOCK_GET_FILE_TYPE_RESPONSE) from ThreatQ_v2 import upload_file_command + upload_file_command() results = demisto.results.call_args[0] - entry_context = results[0]['EntryContext']['ThreatQ.File(val.ID === obj.ID)'] + entry_context = results[0]["EntryContext"]["ThreatQ.File(val.ID === obj.ID)"] - assert 'Successfully uploaded file TestTitle.' in results[0]['HumanReadable'] - assert entry_context['MalwareLocked'] == 'off' - assert entry_context['Type'] == 'Cuckoo' - assert entry_context['ContentType'] == 'text/plain' - assert entry_context['Title'] == 'TestTitle' - assert entry_context['Name'] == 'testfile.txt' + assert "Successfully uploaded file TestTitle." in results[0]["HumanReadable"] + assert entry_context["MalwareLocked"] == "off" + assert entry_context["Type"] == "Cuckoo" + assert entry_context["ContentType"] == "text/plain" + assert entry_context["Title"] == "TestTitle" + assert entry_context["Name"] == "testfile.txt" def test_get_email_reputation(mocker, requests_mock): mock_demisto(mocker, MOCK_EMAIL_REPUTATION_ARGUMENTS) - requests_mock.post(MOCK_API_URL + '/token', json=MOCK_ACCESS_TOKEN) - requests_mock.post(MOCK_API_URL + '/indicators/query?limit=500&offset=0&sort=id', json=MOCK_SEARCH_BY_EMAIL_RESPONSE) - requests_mock.get(MOCK_API_URL + '/indicators/2019?with=attributes,sources,score,type', - json=MOCK_GET_INDICATOR_RESPONSE) - requests_mock.get(MOCK_API_URL + '/indicator/statuses/1', json=MOCK_GET_INDICATOR_STATUS_RESPONSE_1) - requests_mock.get(MOCK_API_URL + '/indicator/types/4', json=MOCK_GET_INDICATOR_TYPE_RESPONSE_1) + requests_mock.post(MOCK_API_URL + "/token", json=MOCK_ACCESS_TOKEN) + requests_mock.post(MOCK_API_URL + "/indicators/query?limit=500&offset=0&sort=id", json=MOCK_SEARCH_BY_EMAIL_RESPONSE) + requests_mock.get(MOCK_API_URL + "/indicators/2019?with=attributes,sources,score,type", json=MOCK_GET_INDICATOR_RESPONSE) + requests_mock.get(MOCK_API_URL + "/indicator/statuses/1", json=MOCK_GET_INDICATOR_STATUS_RESPONSE_1) + requests_mock.get(MOCK_API_URL + "/indicator/types/4", json=MOCK_GET_INDICATOR_TYPE_RESPONSE_1) from ThreatQ_v2 import get_email_reputation + get_email_reputation() results = demisto.results.call_args[0] - entry_context = results[0]['EntryContext'][ - 'ThreatQ.Indicator((val.ID && val.ID === obj.ID) || (val.Value && val.Value === obj.Value))'] - generic_context = results[0]['EntryContext']['Account.Email(val.Address && val.Address == obj.Address)'] + entry_context = results[0]["EntryContext"][ + "ThreatQ.Indicator((val.ID && val.ID === obj.ID) || (val.Value && val.Value === obj.Value))" + ] + generic_context = results[0]["EntryContext"]["Account.Email(val.Address && val.Address == obj.Address)"] - assert 'Search results for email foo@demisto.com' in results[0]['HumanReadable'] - assert entry_context[0]['Value'] == 'foo@demisto.com' - assert generic_context[0]['Address'] == 'foo@demisto.com' - assert generic_context[0]['Malicious']['Vendor'] == 'ThreatQ v2' # indicator should be marked a malicious - assert results[0]['EntryContext']['DBotScore'][0]['Score'] == 3 + assert "Search results for email foo@demisto.com" in results[0]["HumanReadable"] + assert entry_context[0]["Value"] == "foo@demisto.com" + assert generic_context[0]["Address"] == "foo@demisto.com" + assert generic_context[0]["Malicious"]["Vendor"] == "ThreatQ v2" # indicator should be marked a malicious + assert results[0]["EntryContext"]["DBotScore"][0]["Score"] == 3 def test_get_related_objs_command(mocker, requests_mock): mock_demisto(mocker, MOCK_RELATED_OBJS_ARGUMENTS) - requests_mock.post(MOCK_API_URL + '/token', json=MOCK_ACCESS_TOKEN) - requests_mock.get(MOCK_API_URL + '/adversaries/1/indicators?with=sources,score', json=MOCK_INDICATOR_LIST_RESPONSE) - requests_mock.get(MOCK_API_URL + '/indicator/statuses/2', json=MOCK_GET_INDICATOR_STATUS_RESPONSE_2) - requests_mock.get(MOCK_API_URL + '/indicator/statuses/1', json=MOCK_GET_INDICATOR_STATUS_RESPONSE_1) - requests_mock.get(MOCK_API_URL + '/indicator/types/4', json=MOCK_GET_INDICATOR_TYPE_RESPONSE_1) - requests_mock.get(MOCK_API_URL + '/indicator/types/14', json=MOCK_GET_INDICATOR_TYPE_RESPONSE_2) + requests_mock.post(MOCK_API_URL + "/token", json=MOCK_ACCESS_TOKEN) + requests_mock.get(MOCK_API_URL + "/adversaries/1/indicators?with=sources,score", json=MOCK_INDICATOR_LIST_RESPONSE) + requests_mock.get(MOCK_API_URL + "/indicator/statuses/2", json=MOCK_GET_INDICATOR_STATUS_RESPONSE_2) + requests_mock.get(MOCK_API_URL + "/indicator/statuses/1", json=MOCK_GET_INDICATOR_STATUS_RESPONSE_1) + requests_mock.get(MOCK_API_URL + "/indicator/types/4", json=MOCK_GET_INDICATOR_TYPE_RESPONSE_1) + requests_mock.get(MOCK_API_URL + "/indicator/types/14", json=MOCK_GET_INDICATOR_TYPE_RESPONSE_2) from ThreatQ_v2 import get_related_objs_command - get_related_objs_command('indicator') + + get_related_objs_command("indicator") results = demisto.results.call_args[0] - entry_context = results[0]['EntryContext']['ThreatQ.Adversary(val.ID === obj.ID)'] + entry_context = results[0]["EntryContext"]["ThreatQ.Adversary(val.ID === obj.ID)"] - assert 'Related indicator type objects of adversary with ID 1' in results[0]['HumanReadable'] + assert "Related indicator type objects of adversary with ID 1" in results[0]["HumanReadable"] - assert len(entry_context['RelatedIndicator']) == 3 - assert entry_context['RelatedIndicator'][0]['Type'] == 'Email Address' - assert entry_context['RelatedIndicator'][1]['Type'] == 'IP Address' - assert entry_context['RelatedIndicator'][2]['Status'] == 'Expired' + assert len(entry_context["RelatedIndicator"]) == 3 + assert entry_context["RelatedIndicator"][0]["Type"] == "Email Address" + assert entry_context["RelatedIndicator"][1]["Type"] == "IP Address" + assert entry_context["RelatedIndicator"][2]["Status"] == "Expired" def test_get_all_objs_command(mocker, requests_mock): mock_demisto(mocker, MOCK_GET_ALL_OBJS_ARGUMENTS) - requests_mock.post(MOCK_API_URL + '/token', json=MOCK_ACCESS_TOKEN) - requests_mock.get(MOCK_API_URL + '/indicators?with=attributes,sources,score', json=MOCK_INDICATOR_LIST_RESPONSE) - requests_mock.get(MOCK_API_URL + '/indicator/statuses/2', json=MOCK_GET_INDICATOR_STATUS_RESPONSE_2) - requests_mock.get(MOCK_API_URL + '/indicator/statuses/1', json=MOCK_GET_INDICATOR_STATUS_RESPONSE_1) - requests_mock.get(MOCK_API_URL + '/indicator/types/4', json=MOCK_GET_INDICATOR_TYPE_RESPONSE_1) - requests_mock.get(MOCK_API_URL + '/indicator/types/14', json=MOCK_GET_INDICATOR_TYPE_RESPONSE_2) + requests_mock.post(MOCK_API_URL + "/token", json=MOCK_ACCESS_TOKEN) + requests_mock.get(MOCK_API_URL + "/indicators?with=attributes,sources,score", json=MOCK_INDICATOR_LIST_RESPONSE) + requests_mock.get(MOCK_API_URL + "/indicator/statuses/2", json=MOCK_GET_INDICATOR_STATUS_RESPONSE_2) + requests_mock.get(MOCK_API_URL + "/indicator/statuses/1", json=MOCK_GET_INDICATOR_STATUS_RESPONSE_1) + requests_mock.get(MOCK_API_URL + "/indicator/types/4", json=MOCK_GET_INDICATOR_TYPE_RESPONSE_1) + requests_mock.get(MOCK_API_URL + "/indicator/types/14", json=MOCK_GET_INDICATOR_TYPE_RESPONSE_2) from ThreatQ_v2 import get_all_objs_command - get_all_objs_command('indicator') + + get_all_objs_command("indicator") results = demisto.results.call_args[0] - entry_context = results[0]['EntryContext'][ - 'ThreatQ.Indicator((val.ID && val.ID === obj.ID) || (val.Value && val.Value === obj.Value))'] - assert 'List of all objects of type indicator - 0-1' in results[0]['HumanReadable'] + entry_context = results[0]["EntryContext"][ + "ThreatQ.Indicator((val.ID && val.ID === obj.ID) || (val.Value && val.Value === obj.Value))" + ] + assert "List of all objects of type indicator - 0-1" in results[0]["HumanReadable"] assert len(entry_context) == 2 - assert entry_context[0]['Type'] == 'Email Address' - assert entry_context[0]['Status'] == 'Expired' - assert entry_context[1]['Type'] == 'IP Address' - assert entry_context[1]['Status'] == 'Active' + assert entry_context[0]["Type"] == "Email Address" + assert entry_context[0]["Status"] == "Expired" + assert entry_context[1]["Type"] == "IP Address" + assert entry_context[1]["Status"] == "Active" def test_search_by_name_command(mocker, requests_mock): mock_demisto(mocker, MOCK_SEARCH_BY_NAME_ARGUMENTS) - requests_mock.post(MOCK_API_URL + '/token', json=MOCK_ACCESS_TOKEN) - requests_mock.get(MOCK_API_URL + '/search?query=foo@demisto&limit=10', json=MOCK_SEARCH_BY_NAME_RESPONSE) + requests_mock.post(MOCK_API_URL + "/token", json=MOCK_ACCESS_TOKEN) + requests_mock.get(MOCK_API_URL + "/search?query=foo@demisto&limit=10", json=MOCK_SEARCH_BY_NAME_RESPONSE) from ThreatQ_v2 import search_by_name_command + search_by_name_command() results = demisto.results.call_args[0] - assert 'Search Results - Indicators' in results[0]['HumanReadable'] - assert 'Search Results - Adversaries' in results[0]['HumanReadable'] - assert 'Search Results - Events' in results[0]['HumanReadable'] - assert 'Search Results - Files' not in results[0]['HumanReadable'] - assert len(results[0]['EntryContext']) == 3 + assert "Search Results - Indicators" in results[0]["HumanReadable"] + assert "Search Results - Adversaries" in results[0]["HumanReadable"] + assert "Search Results - Events" in results[0]["HumanReadable"] + assert "Search Results - Files" not in results[0]["HumanReadable"] + assert len(results[0]["EntryContext"]) == 3 def test_search_by_id_command(mocker, requests_mock): mock_demisto(mocker, MOCK_SEARCH_BY_ID_ARGUMENTS) - requests_mock.post(MOCK_API_URL + '/token', json=MOCK_ACCESS_TOKEN) - requests_mock.get(MOCK_API_URL + '/events/2019?with=attributes,sources', json=MOCK_GET_EVENT_RESPONSE) - requests_mock.get(MOCK_API_URL + '/event/types/1', json=MOCK_GET_EVENT_TYPE_RESPONSE) + requests_mock.post(MOCK_API_URL + "/token", json=MOCK_ACCESS_TOKEN) + requests_mock.get(MOCK_API_URL + "/events/2019?with=attributes,sources", json=MOCK_GET_EVENT_RESPONSE) + requests_mock.get(MOCK_API_URL + "/event/types/1", json=MOCK_GET_EVENT_TYPE_RESPONSE) from ThreatQ_v2 import search_by_id_command + search_by_id_command() results = demisto.results.call_args[0] - entry_context = results[0]['EntryContext']['ThreatQ.Event(val.ID === obj.ID)'] + entry_context = results[0]["EntryContext"]["ThreatQ.Event(val.ID === obj.ID)"] - assert 'Search results for event with ID 2019' in results[0]['HumanReadable'] - assert entry_context['Description'] == 'test' - assert entry_context['Occurred'] == '2019-03-01 00:00:00' - assert entry_context['Type'] == 'Spearphish' + assert "Search results for event with ID 2019" in results[0]["HumanReadable"] + assert entry_context["Description"] == "test" + assert entry_context["Occurred"] == "2019-03-01 00:00:00" + assert entry_context["Type"] == "Spearphish" def test_get_errors_string_from_bad_request(): - from ThreatQ_v2 import get_errors_string_from_bad_request from requests.models import Response + from ThreatQ_v2 import get_errors_string_from_bad_request + res = Mock(spec=Response) for error_response, expected_result in zip(MOCK_ERROR_RESPONSES, EXPECTED_ERROR_STRINGS): @@ -415,12 +415,12 @@ def test_get_errors_string_from_bad_request(): def test_second_attempt_for_reputation_requests(mocker): """ - Given: - - An old format ThreatQ request body. - When: - - run tq_request to send a request - Then: - - Verify that a request with the new format body was sent and returned a response as expected. + Given: + - An old format ThreatQ request body. + When: + - run tq_request to send a request + Then: + - Verify that a request with the new format body was sent and returned a response as expected. """ mock_demisto(mocker, MOCK_EMAIL_REPUTATION_ARGUMENTS) @@ -435,16 +435,13 @@ def __init__(self, status_code, data={}) -> None: def json(self): return self.data - def get_response( - method, url, data=None, headers=None, verify=False, files=None, allow_redirects=True - ): - if url.endswith('/token'): + def get_response(method, url, data=None, headers=None, verify=False, files=None, allow_redirects=True): + if url.endswith("/token"): return MockResponse(status_code=200, data=MOCK_ACCESS_TOKEN) return MockResponse(status_code=200, data=MOCK_SEARCH_BY_EMAIL_RESPONSE) mocker.patch.object(requests, "request", side_effect=get_response) - results = tq_request('post', '', params={"criteria": {"value": "foo@demisto.com"}}, - retrieve_entire_response=True) + results = tq_request("post", "", params={"criteria": {"value": "foo@demisto.com"}}, retrieve_entire_response=True) assert results.status_code == 200 - assert results.json()['data'][0]['value'] == 'foo@demisto.com' + assert results.json()["data"][0]["value"] == "foo@demisto.com" diff --git a/Packs/ThreatQ/ReleaseNotes/1_0_29.md b/Packs/ThreatQ/ReleaseNotes/1_0_29.md new file mode 100644 index 000000000000..ff3ea16f3e36 --- /dev/null +++ b/Packs/ThreatQ/ReleaseNotes/1_0_29.md @@ -0,0 +1,6 @@ + +#### Integrations + +##### ThreatQ v2 + +- Metadata and documentation improvements. diff --git a/Packs/ThreatQ/pack_metadata.json b/Packs/ThreatQ/pack_metadata.json index 842289eda29e..87b33609e44d 100644 --- a/Packs/ThreatQ/pack_metadata.json +++ b/Packs/ThreatQ/pack_metadata.json @@ -2,7 +2,7 @@ "name": "ThreatQ", "description": "Platform for collecting and interpreting intelligence data from open sources and managing indicator scores, types, and attributes.", "support": "xsoar", - "currentVersion": "1.0.28", + "currentVersion": "1.0.29", "author": "Cortex XSOAR", "url": "https://www.paloaltonetworks.com/cortex", "email": "", From 98612835124e7d004c4431d87291528f217da8f9 Mon Sep 17 00:00:00 2001 From: Content Bot Date: Sun, 23 Mar 2025 12:53:09 +0000 Subject: [PATCH 13/18] FeedAWS: Apply ruff Format --- Packs/FeedAWS/Integrations/FeedAWS/FeedAWS.py | 94 +++++++++---------- Packs/FeedAWS/ReleaseNotes/1_1_57.md | 6 ++ Packs/FeedAWS/pack_metadata.json | 2 +- 3 files changed, 51 insertions(+), 51 deletions(-) create mode 100644 Packs/FeedAWS/ReleaseNotes/1_1_57.md diff --git a/Packs/FeedAWS/Integrations/FeedAWS/FeedAWS.py b/Packs/FeedAWS/Integrations/FeedAWS/FeedAWS.py index dd281105e84c..0ee36d95d7b9 100644 --- a/Packs/FeedAWS/Integrations/FeedAWS/FeedAWS.py +++ b/Packs/FeedAWS/Integrations/FeedAWS/FeedAWS.py @@ -1,29 +1,30 @@ import demistomock as demisto from CommonServerPython import * -AVAILABLE_FEEDS = ['AMAZON', - 'EC2', - 'ROUTE53', - 'ROUTE53_HEALTHCHECKS', - 'CLOUDFRONT', - 'S3', - 'AMAZON_APPFLOW', - 'AMAZON_CONNECT', - 'API_GATEWAY', - 'CHIME_MEETINGS', - 'CHIME_VOICECONNECTOR', - 'CLOUD9', - 'CLOUDFRONT_ORIGIN_FACING', - 'CODEBUILD', - 'DYNAMODB', - 'EBS', - 'EC2_INSTANCE_CONNECT', - 'GLOBALACCELERATOR', - 'KINESIS_VIDEO_STREAMS', - 'ROUTE53_HEALTHCHECKS_PUBLISHING', - 'ROUTE53_RESOLVER', - 'WORKSPACES_GATEWAYS', - ] +AVAILABLE_FEEDS = [ + "AMAZON", + "EC2", + "ROUTE53", + "ROUTE53_HEALTHCHECKS", + "CLOUDFRONT", + "S3", + "AMAZON_APPFLOW", + "AMAZON_CONNECT", + "API_GATEWAY", + "CHIME_MEETINGS", + "CHIME_VOICECONNECTOR", + "CLOUD9", + "CLOUDFRONT_ORIGIN_FACING", + "CODEBUILD", + "DYNAMODB", + "EBS", + "EC2_INSTANCE_CONNECT", + "GLOBALACCELERATOR", + "KINESIS_VIDEO_STREAMS", + "ROUTE53_HEALTHCHECKS_PUBLISHING", + "ROUTE53_RESOLVER", + "WORKSPACES_GATEWAYS", +] def get_feed_config(services: list, regions: list): @@ -37,38 +38,32 @@ def get_feed_config(services: list, regions: list): The feed configuration. """ - region_path = '' - if regions and 'All' not in regions: + region_path = "" + if regions and "All" not in regions: region_path = f" && contains({regions}, region)" - if 'All' in services or not services: + if "All" in services or not services: services = AVAILABLE_FEEDS feed_name_to_config = {} for feed in services: - feed_name_to_config[f'{feed}$$CIDR'] = { - 'url': 'https://ip-ranges.amazonaws.com/ip-ranges.json', - 'extractor': f"prefixes[?service=='{feed}'{region_path}]", - 'indicator': 'ip_prefix', - 'indicator_type': FeedIndicatorType.CIDR, - 'fields': ['region', 'service'], - 'mapping': { - 'region': 'region', - 'service': 'service' - } + feed_name_to_config[f"{feed}$$CIDR"] = { + "url": "https://ip-ranges.amazonaws.com/ip-ranges.json", + "extractor": f"prefixes[?service=='{feed}'{region_path}]", + "indicator": "ip_prefix", + "indicator_type": FeedIndicatorType.CIDR, + "fields": ["region", "service"], + "mapping": {"region": "region", "service": "service"}, } - feed_name_to_config[f'{feed}$$IPv6'] = { - 'url': 'https://ip-ranges.amazonaws.com/ip-ranges.json', - 'extractor': f"ipv6_prefixes[?service=='{feed}'{region_path}]", - 'indicator': 'ipv6_prefix', - 'indicator_type': FeedIndicatorType.IPv6, - 'fields': ['region', 'service'], - 'mapping': { - 'region': 'region', - 'service': 'service' - } + feed_name_to_config[f"{feed}$$IPv6"] = { + "url": "https://ip-ranges.amazonaws.com/ip-ranges.json", + "extractor": f"ipv6_prefixes[?service=='{feed}'{region_path}]", + "indicator": "ipv6_prefix", + "indicator_type": FeedIndicatorType.IPv6, + "fields": ["region", "service"], + "mapping": {"region": "region", "service": "service"}, } return feed_name_to_config @@ -79,10 +74,9 @@ def get_feed_config(services: list, regions: list): def main(): params = {k: v for k, v in demisto.params().items() if v is not None} - params['feed_name_to_config'] = get_feed_config(params.get('services', ['All']), - argToList(params.get('regions', ['All']))) - feed_main(params, 'AWS Feed', 'aws') + params["feed_name_to_config"] = get_feed_config(params.get("services", ["All"]), argToList(params.get("regions", ["All"]))) + feed_main(params, "AWS Feed", "aws") -if __name__ in ('__main__', '__builtin__', 'builtins'): +if __name__ in ("__main__", "__builtin__", "builtins"): main() diff --git a/Packs/FeedAWS/ReleaseNotes/1_1_57.md b/Packs/FeedAWS/ReleaseNotes/1_1_57.md new file mode 100644 index 000000000000..153f13401cf6 --- /dev/null +++ b/Packs/FeedAWS/ReleaseNotes/1_1_57.md @@ -0,0 +1,6 @@ + +#### Integrations + +##### AWS Feed + +- Metadata and documentation improvements. diff --git a/Packs/FeedAWS/pack_metadata.json b/Packs/FeedAWS/pack_metadata.json index 55be27f19a9a..1d57673a67bd 100644 --- a/Packs/FeedAWS/pack_metadata.json +++ b/Packs/FeedAWS/pack_metadata.json @@ -2,7 +2,7 @@ "name": "AWS Feed", "description": "Indicators feed from AWS", "support": "xsoar", - "currentVersion": "1.1.56", + "currentVersion": "1.1.57", "author": "Cortex XSOAR", "url": "https://www.paloaltonetworks.com/cortex", "email": "", From 1771c07fbacec1900087b637a294c46d1716c869 Mon Sep 17 00:00:00 2001 From: merit-maita Date: Mon, 24 Mar 2025 01:42:29 +0200 Subject: [PATCH 14/18] pre-commit --- .pre-commit-config_template.yaml | 2 ++ .../CloseLinkedIncidentsPostProcessing.py | 2 +- .../CheckPointHarmonyEndpoint_test.py | 7 +++---- .../CiscoFirepower/CiscoFirepower.py | 12 ++++++------ .../FireEye-Detection-on-Demand.py | 2 +- .../HPEArubaClearPass/HPEArubaClearPass.py | 2 +- .../HPEArubaClearPass_test.py | 2 +- .../Integrations/Ipstack/Ipstack_test.py | 2 +- .../Integrations/OpenLDAP/OpenLDAP.py | 10 +++++----- .../Integrations/OpenLDAP/OpenLDAP_test.py | 2 +- .../Integrations/OpenPhish_v2/OpenPhish_v2.py | 7 ++----- .../OpenPhish_v2/OpenPhish_v2_test.py | 2 +- .../ProofpointThreatResponse.py | 16 +++++++--------- .../ProofpointThreatResponseEventCollector.py | 2 +- .../ServiceNow_CMDB/ServiceNow_CMDB.py | 7 +++++-- .../ServiceNowv2/ServiceNowv2_test.py | 4 ++-- .../ServiceNowCreateIncident.py | 4 ++-- .../Integrations/SplunkPy/SplunkPy.py | 18 +++++++++--------- .../Integrations/SplunkPy/SplunkPy_test.py | 19 ++++++++++--------- .../SplunkConvertCommentsToTable.py | 2 +- .../SplunkConvertCommentsToTable_test.py | 2 +- .../SplunkShowAsset/SplunkShowAsset.py | 2 +- .../SplunkShowDrilldown.py | 2 +- .../SplunkShowDrilldown_test.py | 2 +- .../SplunkShowIdentity/SplunkShowIdentity.py | 2 +- .../Integrations/ThreatQ_v2/ThreatQ_v2.py | 2 +- 26 files changed, 68 insertions(+), 68 deletions(-) diff --git a/.pre-commit-config_template.yaml b/.pre-commit-config_template.yaml index 0284ad396d21..20dfb3eba3ba 100644 --- a/.pre-commit-config_template.yaml +++ b/.pre-commit-config_template.yaml @@ -47,6 +47,8 @@ repos: - --config=nightly_ruff.toml args:docker_autoupdate: - --config=nightly_ruff.toml + - id: ruff-format + min_py_version: '3.7' - repo: https://github.com/hhatto/autopep8 rev: v2.3.1 hooks: diff --git a/Packs/CaseManagement-Generic/Scripts/CloseLinkedIncidentsPostProcessing/CloseLinkedIncidentsPostProcessing.py b/Packs/CaseManagement-Generic/Scripts/CloseLinkedIncidentsPostProcessing/CloseLinkedIncidentsPostProcessing.py index a85942af86b7..8e48987b6fcc 100644 --- a/Packs/CaseManagement-Generic/Scripts/CloseLinkedIncidentsPostProcessing/CloseLinkedIncidentsPostProcessing.py +++ b/Packs/CaseManagement-Generic/Scripts/CloseLinkedIncidentsPostProcessing/CloseLinkedIncidentsPostProcessing.py @@ -16,7 +16,7 @@ def main(): "command": "closeInvestigation", "arguments": { "closeReason": close_reason, - "closeNotes": f"Closed from parent Incident {incident_id}\n" f"\nClose Notes:\n{close_notes}", + "closeNotes": f"Closed from parent Incident {incident_id}\n\nClose Notes:\n{close_notes}", }, "incidents": ",".join(linked_incidents), }, diff --git a/Packs/CheckPointHarmonyEndpoint/Integrations/CheckPointHarmonyEndpoint/CheckPointHarmonyEndpoint_test.py b/Packs/CheckPointHarmonyEndpoint/Integrations/CheckPointHarmonyEndpoint/CheckPointHarmonyEndpoint_test.py index 3943164c1321..a5ee23170625 100644 --- a/Packs/CheckPointHarmonyEndpoint/Integrations/CheckPointHarmonyEndpoint/CheckPointHarmonyEndpoint_test.py +++ b/Packs/CheckPointHarmonyEndpoint/Integrations/CheckPointHarmonyEndpoint/CheckPointHarmonyEndpoint_test.py @@ -987,10 +987,9 @@ def test_get_pagination_args(args: dict[str, str], expected): with unittest.mock.patch( "CommonServerPython.arg_to_number", side_effect=lambda x: int(x) if x is not None else None, - ): - with unittest.mock.patch("CheckPointHarmonyEndpoint.validate_pagination_arguments") as mock_validate: - assert CheckPointHarmonyEndpoint.get_pagination_args(args) == expected - mock_validate.assert_called() + ), unittest.mock.patch("CheckPointHarmonyEndpoint.validate_pagination_arguments") as mock_validate: + assert CheckPointHarmonyEndpoint.get_pagination_args(args) == expected + mock_validate.assert_called() def test_validate_filter_arguments(): diff --git a/Packs/CiscoFirepower/Integrations/CiscoFirepower/CiscoFirepower.py b/Packs/CiscoFirepower/Integrations/CiscoFirepower/CiscoFirepower.py index de69e638ef38..cf857f2b4acf 100644 --- a/Packs/CiscoFirepower/Integrations/CiscoFirepower/CiscoFirepower.py +++ b/Packs/CiscoFirepower/Integrations/CiscoFirepower/CiscoFirepower.py @@ -1886,7 +1886,7 @@ def parse_results( command_headers_by_keys: dict[str, Any], command_title: str, command_context: str, - raw_responses: list | dict = None, + raw_responses: list | dict = None, # type: ignore[assignment] ) -> CommandResults: """ Create a CommandResults from a given response. @@ -2007,7 +2007,7 @@ def list_zones_command(client: Client, args: dict) -> CommandResults: "ID": item.get("id", ""), "Name": item.get("name", ""), "InterfaceMode": item.get("interfaceMode", ""), - "Interfaces": [{"Name": obj.get("name", ""), "ID": obj.get("id" "")} for obj in item.get("interfaces", {})], + "Interfaces": [{"Name": obj.get("name", ""), "ID": obj.get("id")} for obj in item.get("interfaces", {})], } for item in items ] @@ -3561,7 +3561,7 @@ def list_intrusion_policy_command(client: Client, args: dict[str, Any]) -> Comma command_headers_by_keys=INTRUSION_POLICY_HEADERS_BY_KEYS, command_title=f"Fetched {INTRUSION_POLICY_TITLE}", command_context=INTRUSION_POLICY_CONTEXT, - raw_responses=raw_responses, + raw_responses=raw_responses, # type: ignore[arg-type] ) @@ -3732,7 +3732,7 @@ def list_intrusion_rule_command(client: Client, args: dict[str, Any]) -> Command command_headers_by_keys=INTRUSION_RULE_HEADERS_BY_KEYS, command_title=f"Fetched {INTRUSION_RULE_TITLE}", command_context=INTRUSION_RULE_CONTEXT, - raw_responses=raw_responses, + raw_responses=raw_responses, # type: ignore[arg-type] ) @@ -3955,7 +3955,7 @@ def list_intrusion_rule_group_command(client: Client, args: dict[str, Any]) -> C command_headers_by_keys=INTRUSION_RULE_GROUP_HEADERS_BY_KEYS, command_title=f"Fetched {INTRUSION_RULE_GROUP_TITLE}", command_context=INTRUSION_RULE_GROUP_CONTEXT, - raw_responses=raw_responses, + raw_responses=raw_responses, # type: ignore[arg-type] ) @@ -4097,7 +4097,7 @@ def list_network_analysis_policy_command(client: Client, args: dict[str, Any]) - command_headers_by_keys=NETWORK_ANALYSIS_POLICY_HEADERS_BY_KEYS, command_title=f"Fetched {NETWORK_ANALYSIS_POLICY_TITLE}", command_context=NETWORK_ANALYSIS_POLICY_CONTEXT, - raw_responses=raw_responses, + raw_responses=raw_responses, # type: ignore[arg-type] ) diff --git a/Packs/FireEye-Detection-on-Demand/Integrations/FireEye-Detection-on-Demand/FireEye-Detection-on-Demand.py b/Packs/FireEye-Detection-on-Demand/Integrations/FireEye-Detection-on-Demand/FireEye-Detection-on-Demand.py index 12ae9d25996e..9f2252d8e0f1 100644 --- a/Packs/FireEye-Detection-on-Demand/Integrations/FireEye-Detection-on-Demand/FireEye-Detection-on-Demand.py +++ b/Packs/FireEye-Detection-on-Demand/Integrations/FireEye-Detection-on-Demand/FireEye-Detection-on-Demand.py @@ -270,7 +270,7 @@ def submit_urls_command(client: Client, args: dict[str, Any]) -> tuple[str, dict report_id = scan.get("report_id") readable_output = ( - f"Started analysis of {urls} with FireEye Detection on Demand." f"Results will be published to report id: {report_id}" + f"Started analysis of {urls} with FireEye Detection on Demand.Results will be published to report id: {report_id}" ) outputs = {"FireEyeDoD.Scan(val.report_id == obj.report_id)": scan} return (readable_output, outputs, scan) diff --git a/Packs/HPEArubaClearPass/Integrations/HPEArubaClearPass/HPEArubaClearPass.py b/Packs/HPEArubaClearPass/Integrations/HPEArubaClearPass/HPEArubaClearPass.py index 2a39c351e9a8..c208ece48ddb 100644 --- a/Packs/HPEArubaClearPass/Integrations/HPEArubaClearPass/HPEArubaClearPass.py +++ b/Packs/HPEArubaClearPass/Integrations/HPEArubaClearPass/HPEArubaClearPass.py @@ -81,7 +81,7 @@ def save_access_token_to_context(self, auth_response: dict): set_integration_context(context) self.set_request_headers() demisto.debug( - f"New access token that expires in : {expiration_timestamp.strftime(DATE_FORMAT)}" f" was set to integration_context." + f"New access token that expires in : {expiration_timestamp.strftime(DATE_FORMAT)} was set to integration_context." ) def is_access_token_valid(self): diff --git a/Packs/HPEArubaClearPass/Integrations/HPEArubaClearPass/HPEArubaClearPass_test.py b/Packs/HPEArubaClearPass/Integrations/HPEArubaClearPass/HPEArubaClearPass_test.py index 45b8e9fef3ff..98d85899ab16 100644 --- a/Packs/HPEArubaClearPass/Integrations/HPEArubaClearPass/HPEArubaClearPass_test.py +++ b/Packs/HPEArubaClearPass/Integrations/HPEArubaClearPass/HPEArubaClearPass_test.py @@ -4,7 +4,7 @@ import pytest from freezegun import freeze_time from HPEArubaClearPass import * -from pytest import raises +from pytest import raises # noqa: PT013 CLIENT_ID = "id123" CLIENT_SECRET = "secret123" diff --git a/Packs/Ipstack/Integrations/Ipstack/Ipstack_test.py b/Packs/Ipstack/Integrations/Ipstack/Ipstack_test.py index e2b500edf548..b8f1d7c3538a 100644 --- a/Packs/Ipstack/Integrations/Ipstack/Ipstack_test.py +++ b/Packs/Ipstack/Integrations/Ipstack/Ipstack_test.py @@ -16,7 +16,7 @@ "continent_name": "continent_name", "type": "type", } -CONTEXT_PATH = "DBotScore(val.Indicator && val.Indicator == obj.Indicator && val.Vendor == obj.Vendor " "&& val.Type == obj.Type)" +CONTEXT_PATH = "DBotScore(val.Indicator && val.Indicator == obj.Indicator && val.Vendor == obj.Vendor && val.Type == obj.Type)" CONTEXT_PATH_PRIOR_V5_5 = "DBotScore" diff --git a/Packs/OpenLDAP/Integrations/OpenLDAP/OpenLDAP.py b/Packs/OpenLDAP/Integrations/OpenLDAP/OpenLDAP.py index 108b0550c913..52018d8e1399 100644 --- a/Packs/OpenLDAP/Integrations/OpenLDAP/OpenLDAP.py +++ b/Packs/OpenLDAP/Integrations/OpenLDAP/OpenLDAP.py @@ -244,7 +244,7 @@ def _initialize_ldap_server(self): """ if self._connection_type == "ssl": # Secure connection (SSL\TLS) demisto.info( - f"Initializing LDAP sever with SSL/TLS (unsecure: {not self._verify})." f" port: {self._port or 'default(636)'}" + f"Initializing LDAP sever with SSL/TLS (unsecure: {not self._verify}). port: {self._port or 'default(636)'}" ) tls = self._get_tls_object() server = Server(host=self._host, port=self._port, use_ssl=True, tls=tls, connect_timeout=LdapClient.TIMEOUT) @@ -280,7 +280,7 @@ def _determine_ldap_vendor_automatically(self): demisto.info(f"Determining LDAP vendor is {self._ldap_server_vendor}") except Exception as e: raise DemistoException( - f"Could not parse LDAP vendor automatically. Try to choose the vendor manually. " f"Error: str({e})" + f"Could not parse LDAP vendor automatically. Try to choose the vendor manually. Error: str({e})" ) @staticmethod @@ -607,7 +607,7 @@ def authenticate_ldap_user(self, username: str, password: str) -> str: return "Done" else: raise Exception( - f"LDAP Authentication - authentication connection failed," f" server type is: {self._ldap_server_vendor}" + f"LDAP Authentication - authentication connection failed, server type is: {self._ldap_server_vendor}" ) def search_user_data(self, username: str, attributes: list, search_user_by_dn: bool = False) -> tuple: @@ -891,7 +891,7 @@ def test_module(self): if build_number != LdapClient.DEV_BUILD_NUMBER and int(build_number) < LdapClient.SUPPORTED_BUILD_NUMBER: raise Exception( - f"LDAP Authentication integration is supported from build number:" f" {LdapClient.SUPPORTED_BUILD_NUMBER}" + f"LDAP Authentication integration is supported from build number: {LdapClient.SUPPORTED_BUILD_NUMBER}" ) if self._ldap_server_vendor == self.OPENLDAP: @@ -965,7 +965,7 @@ def main(): # pragma: no coverage if not params.get("insecure", False): msg += ' Try using: "Trust any certificate" option.\n' elif isinstance(e, LDAPInvalidPortError): - msg = "LDAP Authentication - Not valid ldap server input." " Check that server input is of form: ip or ldap://ip" + msg = "LDAP Authentication - Not valid ldap server input. Check that server input is of form: ip or ldap://ip" return_error(str(msg)) diff --git a/Packs/OpenLDAP/Integrations/OpenLDAP/OpenLDAP_test.py b/Packs/OpenLDAP/Integrations/OpenLDAP/OpenLDAP_test.py index 9e36ad33adea..8d5b666f3290 100644 --- a/Packs/OpenLDAP/Integrations/OpenLDAP/OpenLDAP_test.py +++ b/Packs/OpenLDAP/Integrations/OpenLDAP/OpenLDAP_test.py @@ -353,7 +353,7 @@ def test_get_formatted_custom_attributes_invalid_attributes_input(self): with pytest.raises(Exception) as e: client._get_formatted_custom_attributes() assert e.value.args[0] == ( - f'User defined attributes must be of the form "attrA=valA,attrB=valB,...", but got: ' f"{client.CUSTOM_ATTRIBUTE}" + f'User defined attributes must be of the form "attrA=valA,attrB=valB,...", but got: {client.CUSTOM_ATTRIBUTE}' ) @pytest.mark.parametrize( diff --git a/Packs/OpenPhish/Integrations/OpenPhish_v2/OpenPhish_v2.py b/Packs/OpenPhish/Integrations/OpenPhish_v2/OpenPhish_v2.py index a1e107b77c80..77453f587949 100644 --- a/Packs/OpenPhish/Integrations/OpenPhish_v2/OpenPhish_v2.py +++ b/Packs/OpenPhish/Integrations/OpenPhish_v2/OpenPhish_v2.py @@ -81,10 +81,7 @@ def _is_reload_needed(client: Client, data: dict) -> bool: return True now = datetime.now() - if data.get("timestamp") <= date_to_timestamp(now - timedelta(hours=client.fetch_interval_hours)): - return True - - return False + return data.get("timestamp") <= date_to_timestamp(now - timedelta(hours=client.fetch_interval_hours)) def test_module(client: Client) -> str: @@ -215,7 +212,7 @@ def main(): except ValueError: return_error("Invalid parameter was given as database refresh interval.") except Exception as e: - return_error(f"Failed to execute {demisto.command()} command. Error: {e!s} \n " f"tracback: {traceback.format_exc()}") + return_error(f"Failed to execute {demisto.command()} command. Error: {e!s} \n tracback: {traceback.format_exc()}") if __name__ in ("__main__", "__builtin__", "builtins"): diff --git a/Packs/OpenPhish/Integrations/OpenPhish_v2/OpenPhish_v2_test.py b/Packs/OpenPhish/Integrations/OpenPhish_v2/OpenPhish_v2_test.py index 41f5adcec723..900b6dc46d70 100644 --- a/Packs/OpenPhish/Integrations/OpenPhish_v2/OpenPhish_v2_test.py +++ b/Packs/OpenPhish/Integrations/OpenPhish_v2/OpenPhish_v2_test.py @@ -141,7 +141,7 @@ def test_reload_command(mocker): STANDARD_NOT_LOADED_MSG = "OpenPhish Database Status\nDatabase not loaded.\n" STANDARD_4_LOADED_MSG = ( - "OpenPhish Database Status\n" "Total **4** URLs loaded.\n" "Last load time **Thu Oct 01 2020 06:00:00 (UTC)**\n" + "OpenPhish Database Status\nTotal **4** URLs loaded.\nLast load time **Thu Oct 01 2020 06:00:00 (UTC)**\n" ) CONTEXT_MOCK_WITH_STATUS = [ ({}, STANDARD_NOT_LOADED_MSG), # case no data in memory diff --git a/Packs/ProofpointThreatResponse/Integrations/ProofpointThreatResponse/ProofpointThreatResponse.py b/Packs/ProofpointThreatResponse/Integrations/ProofpointThreatResponse/ProofpointThreatResponse.py index 59c3ff115423..7581efd004bc 100644 --- a/Packs/ProofpointThreatResponse/Integrations/ProofpointThreatResponse/ProofpointThreatResponse.py +++ b/Packs/ProofpointThreatResponse/Integrations/ProofpointThreatResponse/ProofpointThreatResponse.py @@ -589,9 +589,7 @@ def fetch_incidents_command(): last_fetched_id[state] = id demisto.debug( - "PTR: End of current fetch function with last_fetch {} and last_fetched_id {}".format( - str(last_fetch), str(last_fetched_id) - ) + f"PTR: End of current fetch function with last_fetch {str(last_fetch)} and last_fetched_id {str(last_fetched_id)}" ) demisto.setLastRun({"last_fetch": last_fetch}) @@ -648,7 +646,7 @@ def add_comment_to_incident_command(): ) if incident_data.status_code < 200 or incident_data.status_code >= 300: - return_error(f"Add comment to incident command failed. URL: {fullurl}, " f"StatusCode: {incident_data.status_code}") + return_error(f"Add comment to incident command failed. URL: {fullurl}, StatusCode: {incident_data.status_code}") incident_data = incident_data.json() human_readable = create_add_comment_human_readable(incident_data) @@ -677,7 +675,7 @@ def add_user_to_incident_command(): ) if incident_data.status_code < 200 or incident_data.status_code >= 300: - return_error(f"Add comment to incident command failed. URL: {fullurl}, " f"StatusCode: {incident_data.status_code}") + return_error(f"Add comment to incident command failed. URL: {fullurl}, StatusCode: {incident_data.status_code}") return_outputs(f"The user was added successfully to incident {incident_id}", {}, {}) @@ -716,7 +714,7 @@ def ingest_alert_command(): if not json_source_id: return_error( - "To ingest alert into TRAP, you mast specify a post_url_id," "either as an argument or as an integration parameter." + "To ingest alert into TRAP, you mast specify a post_url_id,either as an argument or as an integration parameter." ) request_body = prepare_ingest_alert_request_body(assign_params(**args)) @@ -726,7 +724,7 @@ def ingest_alert_command(): ) if alert_data.status_code < 200 or alert_data.status_code >= 300: - return_error(f"Failed to ingest the alert into TRAP. URL: {fullurl}, " f"StatusCode: {alert_data.status_code}") + return_error(f"Failed to ingest the alert into TRAP. URL: {fullurl}, StatusCode: {alert_data.status_code}") return_outputs("The alert was successfully ingested to TRAP", {}, {}) @@ -747,7 +745,7 @@ def close_incident_command(): ) if incident_data.status_code < 200 or incident_data.status_code >= 300: - return_error(f"Incident closure failed. URL: {fullurl}, " f"StatusCode: {incident_data.status_code}") + return_error(f"Incident closure failed. URL: {fullurl}, StatusCode: {incident_data.status_code}") return_outputs(f"The incident {incident_id} was successfully closed", {}, {}) @@ -796,7 +794,7 @@ def search_quarantine(): message_delivery_time = int(message_delivery_time.timestamp() * 1000) else: demisto.info( - f'PTR: Could not parse time of incident {incident.get("id")}, got ' f'{message_delivery_time=}' + f'PTR: Could not parse time of incident {incident.get("id")}, got {message_delivery_time=}' ) continue diff --git a/Packs/ProofpointThreatResponse/Integrations/ProofpointThreatResponseEventCollector/ProofpointThreatResponseEventCollector.py b/Packs/ProofpointThreatResponse/Integrations/ProofpointThreatResponseEventCollector/ProofpointThreatResponseEventCollector.py index fed5b516f9b7..233c8e3dca29 100644 --- a/Packs/ProofpointThreatResponse/Integrations/ProofpointThreatResponseEventCollector/ProofpointThreatResponseEventCollector.py +++ b/Packs/ProofpointThreatResponse/Integrations/ProofpointThreatResponseEventCollector/ProofpointThreatResponseEventCollector.py @@ -313,7 +313,7 @@ def fetch_events_command(client, first_fetch, last_run, fetch_limit, fetch_delta ] + "Z" last_fetched_id[state] = id - demisto.debug(f"End of current fetch function with last_fetch {last_fetch!s} and last_fetched_id" f" {last_fetched_id!s}") + demisto.debug(f"End of current fetch function with last_fetch {last_fetch!s} and last_fetched_id {last_fetched_id!s}") last_run = {"last_fetch": last_fetch, "last_fetched_incident_id": last_fetched_id} diff --git a/Packs/ServiceNow/Integrations/ServiceNow_CMDB/ServiceNow_CMDB.py b/Packs/ServiceNow/Integrations/ServiceNow_CMDB/ServiceNow_CMDB.py index d36887946a1a..c0abc69fb8b8 100644 --- a/Packs/ServiceNow/Integrations/ServiceNow_CMDB/ServiceNow_CMDB.py +++ b/Packs/ServiceNow/Integrations/ServiceNow_CMDB/ServiceNow_CMDB.py @@ -191,10 +191,13 @@ def create_human_readable(title: str, result: dict, fields: str) -> str: relation_output = { "SysID": list(map(itemgetter("sys_id"), relations)), "Target Display Value": list( - map(itemgetter("display_value"), list(map(itemgetter("target"), result.get(relation_type)))) + map(itemgetter("display_value"), list(map(itemgetter("target"), + result.get(relation_type)))) # type: ignore[arg-type] ), # type: ignore "Type Display Value": list( - map(itemgetter("display_value"), list(map(itemgetter("type"), result.get(relation_type)))) + map(itemgetter("display_value"), list(map(itemgetter("type"), + result.get(relation_type))) # type: ignore[arg-type] + ) ), # type: ignore } md += f" {tableToMarkdown(FIELD_TO_OUTPUT.get(relation_type), t=relation_output)}" diff --git a/Packs/ServiceNow/Integrations/ServiceNowv2/ServiceNowv2_test.py b/Packs/ServiceNow/Integrations/ServiceNowv2/ServiceNowv2_test.py index a454464b7d9c..bb76741f30a6 100644 --- a/Packs/ServiceNow/Integrations/ServiceNowv2/ServiceNowv2_test.py +++ b/Packs/ServiceNow/Integrations/ServiceNowv2/ServiceNowv2_test.py @@ -2491,7 +2491,7 @@ def test_multiple_query_params(requests_mock, command, args): ) requests_mock.request( "GET", - f"{url}table/sc_task?sysparm_limit=50&sysparm_offset=0&" "sysparm_query=assigned_to%3D123^active%3Dtrue", + f"{url}table/sc_task?sysparm_limit=50&sysparm_offset=0&sysparm_query=assigned_to%3D123^active%3Dtrue", json=RESPONSE_TICKET_ASSIGNED, ) human_readable, entry_context, result, bol = command(client, args) @@ -2823,7 +2823,7 @@ def test_get_ticket_attachment_entries_with_oauth_token(mocker): ), "When An OAuth 2.0 client is configured the 'auth' argument shouldn't be passed to 'requests.get' function" assert ( requests_get_mocker.call_args.kwargs.get("headers").get("Authorization") == f"Bearer {mock_res_for_get_access_token}" - ), "When An OAuth 2.0 client is configured the 'Authorization'" " Header argument should be passed to 'requests.get' function" + ), "When An OAuth 2.0 client is configured the 'Authorization' Header argument should be passed to 'requests.get' function" @pytest.mark.parametrize( diff --git a/Packs/ServiceNow/Scripts/ServiceNowCreateIncident/ServiceNowCreateIncident.py b/Packs/ServiceNow/Scripts/ServiceNowCreateIncident/ServiceNowCreateIncident.py index 2c38e9cca618..1f174e95b7f7 100644 --- a/Packs/ServiceNow/Scripts/ServiceNowCreateIncident/ServiceNowCreateIncident.py +++ b/Packs/ServiceNow/Scripts/ServiceNowCreateIncident/ServiceNowCreateIncident.py @@ -163,9 +163,9 @@ def main(): # Get the actual record record = record_data["result"] # Map fields according to fields_to_map that were defined earlier - mapped_record = dict( + mapped_record = dict( # noqa: C402 (fields_to_map[key], value) - for (key, value) in list([k_v for k_v in list(record.items()) if k_v[0] in list(fields_to_map.keys())]) + for (key, value) in [k_v for k_v in list(record.items()) if k_v[0] in list(fields_to_map.keys())] ) display_headers = ["ID", "Number"] diff --git a/Packs/SplunkPy/Integrations/SplunkPy/SplunkPy.py b/Packs/SplunkPy/Integrations/SplunkPy/SplunkPy.py index 15384c731593..2633fbccd8e7 100644 --- a/Packs/SplunkPy/Integrations/SplunkPy/SplunkPy.py +++ b/Packs/SplunkPy/Integrations/SplunkPy/SplunkPy.py @@ -326,7 +326,7 @@ def remove_irrelevant_incident_ids( if incident_window_end_datetime >= window_start_datetime: # We keep the incident, since it is still in the fetch window extensive_log( - f"[SplunkPy] Keeping {incident_id} as part of the last fetched IDs." f" {incident_window_end_datetime=}" + f"[SplunkPy] Keeping {incident_id} as part of the last fetched IDs. {incident_window_end_datetime=}" ) new_last_run_fetched_ids[incident_id] = incident_occurred_time else: @@ -336,7 +336,7 @@ def remove_irrelevant_incident_ids( # Last fetched IDs held the epoch time of their appearance, they will now hold the # new format, with an occurred time equal to the end of the window extensive_log( - f"[SplunkPy] {incident_id} was saved using old implementation," f" with value {incident_occurred_time}, keeping" + f"[SplunkPy] {incident_id} was saved using old implementation, with value {incident_occurred_time}, keeping" ) new_last_run_fetched_ids[incident_id] = {"occurred_time": window_end_time} return new_last_run_fetched_ids @@ -494,7 +494,7 @@ def fetch_notables( # New way to remove IDs last_run_fetched_ids = remove_irrelevant_incident_ids(last_run_fetched_ids, occured_start_time, latest_time) - extensive_log("[SplunkPy] Size of last_run_fetched_ids after " f"removing old IDs: {len(last_run_fetched_ids)}") + extensive_log(f"[SplunkPy] Size of last_run_fetched_ids after removing old IDs: {len(last_run_fetched_ids)}") extensive_log(f"[SplunkPy] SplunkPy - incidents fetched on last run = {last_run_fetched_ids}") demisto.debug(f"SplunkPy - total number of new incidents found is: {len(incidents)}") @@ -1786,7 +1786,7 @@ def handle_closed_notable(notable, notable_id, close_extra_labels, close_end_sta else: demisto.debug( - '"status_label" key could not be found on the returned data, ' f"skipping closure mirror for notable {notable_id}." + f'"status_label" key could not be found on the returned data, skipping closure mirror for notable {notable_id}.' ) @@ -2570,7 +2570,7 @@ def get_current_splunk_time(splunk_service: client.Service): "output_mode": OUTPUT_MODE_JSON, } searchquery_oneshot = ( - '| gentimes start=-1 | eval clock = strftime(time(), "%Y-%m-%dT%H:%M:%S")' " | sort 1 -_time | table clock" + '| gentimes start=-1 | eval clock = strftime(time(), "%Y-%m-%dT%H:%M:%S") | sort 1 -_time | table clock' ) oneshotsearch_results = splunk_service.jobs.oneshot(searchquery_oneshot, **kwargs_oneshot) @@ -3124,7 +3124,7 @@ def parse_time_to_minutes(): number_of_times = int(number_of_times) else: return_error( - "Error: Invalid fetch time, need to be a positive integer with the time unit afterwards" " e.g '2 months, 4 days'." + "Error: Invalid fetch time, need to be a positive integer with the time unit afterwards e.g '2 months, 4 days'." ) # If the user input contains a plural of a time unit, for example 'hours', we remove the 's' as it doesn't # impact the minutes in that time unit @@ -3312,7 +3312,7 @@ def splunk_submit_event_hec_command(params: dict, service, args: dict): if not event and not batch_event_data and not entry_id: raise DemistoException( - "Invalid input: Please specify one of the following arguments: `event`, " "`batch_event_data`, or `entry_id`." + "Invalid input: Please specify one of the following arguments: `event`, `batch_event_data`, or `entry_id`." ) response_info = splunk_submit_event_hec( @@ -3519,7 +3519,7 @@ def batch_kv_upload(kv_data_service_client: client.KVStoreCollectionData, json_d return kv_data_service_client.insert(json_data.encode("utf-8")) else: raise DemistoException( - "kv_store_data argument should be in json format. " '(e.g. {"key": "value"} or [{"key": "value"}, {"key": "value"}]' + 'kv_store_data argument should be in json format. (e.g. {"key": "value"} or [{"key": "value"}, {"key": "value"}]' ) @@ -3750,7 +3750,7 @@ def main(): # pragma: no cover comment_tag_to_splunk = params.get("comment_tag_to_splunk", "FROM XSOAR") comment_tag_from_splunk = params.get("comment_tag_from_splunk", "FROM SPLUNK") if comment_tag_to_splunk == comment_tag_from_splunk: - raise DemistoException("Comment Tag to Splunk and Comment Tag " "from Splunk cannot have the same value.") + raise DemistoException("Comment Tag to Splunk and Comment Tag from Splunk cannot have the same value.") connection_args["handler"] = requests_handler diff --git a/Packs/SplunkPy/Integrations/SplunkPy/SplunkPy_test.py b/Packs/SplunkPy/Integrations/SplunkPy/SplunkPy_test.py index 4e6953324098..a7fd113b5b28 100644 --- a/Packs/SplunkPy/Integrations/SplunkPy/SplunkPy_test.py +++ b/Packs/SplunkPy/Integrations/SplunkPy/SplunkPy_test.py @@ -352,7 +352,7 @@ def test_raw_to_dict(): assert response == EXPECTED assert response_with_message == EXPECTED_WITH_MESSAGE_ID assert list_response == {} - assert raw_message.get("SCOPE[29]") == "autopay\/events\/payroll\/v1\/earning-configuration.configuration-tags" ".modify" + assert raw_message.get("SCOPE[29]") == "autopay\/events\/payroll\/v1\/earning-configuration.configuration-tags.modify" assert isinstance(raw_message, dict) assert empty == {} assert url_test == URL_TESTING_OUT @@ -362,7 +362,7 @@ def test_raw_to_dict(): assert splunk.rawToDict('drilldown_search="key IN ("test1","test2")') == {"drilldown_search": "key IN (test1,test2)"} assert splunk.rawToDict( - '123456, sample_account="sample1", ' 'sample_account="sample2", sample_account="sample3",' ' distinct_count_ac="5"' + '123456, sample_account="sample1", sample_account="sample2", sample_account="sample3", distinct_count_ac="5"' ) == {"sample_account": "sample1, sample2, sample3", "distinct_count_ac": "5"} @@ -923,7 +923,7 @@ def test_fetch_incidents(mocker): assert demisto.incidents.call_count == 1 assert len(incidents) == 2 assert ( - incidents[0]["name"] == "Endpoint - Recurring Malware Infection - Rule : Endpoint - " "Recurring Malware Infection - Rule" + incidents[0]["name"] == "Endpoint - Recurring Malware Infection - Rule : Endpoint - Recurring Malware Infection - Rule" ) assert not incidents[0].get("owner") @@ -996,7 +996,7 @@ def test_fetch_notables(mocker): incidents = demisto.incidents.call_args[0][0] assert len(incidents) == 2 assert ( - incidents[0]["name"] == "Endpoint - Recurring Malware Infection - Rule : Endpoint - " "Recurring Malware Infection - Rule" + incidents[0]["name"] == "Endpoint - Recurring Malware Infection - Rule : Endpoint - Recurring Malware Infection - Rule" ) assert not incidents[0].get("owner") @@ -1838,8 +1838,8 @@ def test_drilldown_enrichment_main_condition(mocker, notable_data, expected_call '{"name":"View related \'$signature$\' events for $dest$","search":"| from datamodel:\\"Malware\\".\\"Malwa' 're_Attacks\\" | search dest=$dest|s$ signature=$signature|s$","earliest":1714563300,"latest":1715168700}', '{"name":"View related \'$category$\' events for $signature$","search":"| from datamodel:\\"Malware\\".\\"M' - 'alware_Attacks\\" \\n| fields category, dest, signature | search dest=$dest|s$ signature=$signature|s$","ear' - 'liest":1714563300,"latest":1715168700}', + 'alware_Attacks\\" \\n| fields category, dest, signature | search dest=$dest|s$ signature=$signature|s$",' + '"earliest":1714563300,"latest":1715168700}', ], }, 0, @@ -3076,7 +3076,7 @@ def test_splunk_search_command(mocker, polling, status): assert search_result.scheduled_command._args["sid"] == "123456" else: assert search_result.outputs["Splunk.Result"] == [] - assert search_result.readable_output == "### Splunk Search results for query:\n" "sid: 123456\n**No entries.**\n" + assert search_result.readable_output == "### Splunk Search results for query:\nsid: 123456\n**No entries.**\n" @pytest.mark.parametrize( @@ -3366,7 +3366,8 @@ def mocked_get_record(col, value_to_search): ( "unassigned", "", - "UserMapping: Could not find xsoar user matching splunk's unassigned. Consider adding it to the splunk_xsoar_users lookup.", + "UserMapping: Could not find xsoar user matching splunk's unassigned. Consider adding it to the" + " splunk_xsoar_users lookup.", ), ( "not_in_table", @@ -3794,7 +3795,7 @@ def test_splunk_submit_event_hec_command_no_required_arguments(): with pytest.raises( DemistoException, - match=r"Invalid input: Please specify one of the following arguments: `event`, " r"`batch_event_data`, or `entry_id`.", + match=r"Invalid input: Please specify one of the following arguments: `event`, `batch_event_data`, or `entry_id`.", ): splunk_submit_event_hec_command({"hec_url": "hec_url"}, None, {}) diff --git a/Packs/SplunkPy/Scripts/SplunkConvertCommentsToTable/SplunkConvertCommentsToTable.py b/Packs/SplunkPy/Scripts/SplunkConvertCommentsToTable/SplunkConvertCommentsToTable.py index 6070bbe585e8..9c864765477c 100644 --- a/Packs/SplunkPy/Scripts/SplunkConvertCommentsToTable/SplunkConvertCommentsToTable.py +++ b/Packs/SplunkPy/Scripts/SplunkConvertCommentsToTable/SplunkConvertCommentsToTable.py @@ -6,7 +6,7 @@ def main(): incident = demisto.incident() splunkComments = [] if not incident: - raise ValueError("Error - demisto.incident() expected to return current incident " "from context but returned None") + raise ValueError("Error - demisto.incident() expected to return current incident from context but returned None") fields = incident.get("CustomFields", []) if fields: splunkComments_str = fields.get("splunkcomments", []) diff --git a/Packs/SplunkPy/Scripts/SplunkConvertCommentsToTable/SplunkConvertCommentsToTable_test.py b/Packs/SplunkPy/Scripts/SplunkConvertCommentsToTable/SplunkConvertCommentsToTable_test.py index eb80895fd047..08d11f77408c 100644 --- a/Packs/SplunkPy/Scripts/SplunkConvertCommentsToTable/SplunkConvertCommentsToTable_test.py +++ b/Packs/SplunkPy/Scripts/SplunkConvertCommentsToTable/SplunkConvertCommentsToTable_test.py @@ -1,6 +1,6 @@ import SplunkConvertCommentsToTable -EXPECTED_TABLE = "|Comment|\n" "|---|\n" "| new comment |\n" +EXPECTED_TABLE = "|Comment|\n|---|\n| new comment |\n" def test_convert_to_table(mocker): diff --git a/Packs/SplunkPy/Scripts/SplunkShowAsset/SplunkShowAsset.py b/Packs/SplunkPy/Scripts/SplunkShowAsset/SplunkShowAsset.py index 3810bc99b023..ff9deab0548e 100644 --- a/Packs/SplunkPy/Scripts/SplunkShowAsset/SplunkShowAsset.py +++ b/Packs/SplunkPy/Scripts/SplunkShowAsset/SplunkShowAsset.py @@ -8,7 +8,7 @@ def main(): asset_results = [] incident = demisto.incident() if not incident: - raise ValueError("Error - demisto.incident() expected to return current incident " "from context but returned None") + raise ValueError("Error - demisto.incident() expected to return current incident from context but returned None") labels = incident.get("labels", []) for label in labels: diff --git a/Packs/SplunkPy/Scripts/SplunkShowDrilldown/SplunkShowDrilldown.py b/Packs/SplunkPy/Scripts/SplunkShowDrilldown/SplunkShowDrilldown.py index e4e542d56880..633d6006af58 100644 --- a/Packs/SplunkPy/Scripts/SplunkShowDrilldown/SplunkShowDrilldown.py +++ b/Packs/SplunkPy/Scripts/SplunkShowDrilldown/SplunkShowDrilldown.py @@ -6,7 +6,7 @@ def main(): drilldown_results = [] incident = demisto.incident() if not incident: - raise ValueError("Error - demisto.incident() expected to return current incident " "from context but returned None") + raise ValueError("Error - demisto.incident() expected to return current incident from context but returned None") labels = incident.get("labels", []) for label in labels: diff --git a/Packs/SplunkPy/Scripts/SplunkShowDrilldown/SplunkShowDrilldown_test.py b/Packs/SplunkPy/Scripts/SplunkShowDrilldown/SplunkShowDrilldown_test.py index 77a705d1077f..3c4f78c1c657 100644 --- a/Packs/SplunkPy/Scripts/SplunkShowDrilldown/SplunkShowDrilldown_test.py +++ b/Packs/SplunkPy/Scripts/SplunkShowDrilldown/SplunkShowDrilldown_test.py @@ -1,7 +1,7 @@ import json import SplunkShowDrilldown -from pytest import raises +from pytest import raises # noqa: PT013 def test_incident_with_empty_custom_fields(mocker): diff --git a/Packs/SplunkPy/Scripts/SplunkShowIdentity/SplunkShowIdentity.py b/Packs/SplunkPy/Scripts/SplunkShowIdentity/SplunkShowIdentity.py index c4f66ec7541a..fb201f1b827b 100644 --- a/Packs/SplunkPy/Scripts/SplunkShowIdentity/SplunkShowIdentity.py +++ b/Packs/SplunkPy/Scripts/SplunkShowIdentity/SplunkShowIdentity.py @@ -8,7 +8,7 @@ def main(): identity_results = [] incident = demisto.incident() if not incident: - raise ValueError("Error - demisto.incident() expected to return current incident " "from context but returned None") + raise ValueError("Error - demisto.incident() expected to return current incident from context but returned None") labels = incident.get("labels", []) for label in labels: diff --git a/Packs/ThreatQ/Integrations/ThreatQ_v2/ThreatQ_v2.py b/Packs/ThreatQ/Integrations/ThreatQ_v2/ThreatQ_v2.py index 0adbc622d25d..9c4e9320f616 100644 --- a/Packs/ThreatQ/Integrations/ThreatQ_v2/ThreatQ_v2.py +++ b/Packs/ThreatQ/Integrations/ThreatQ_v2/ThreatQ_v2.py @@ -742,7 +742,7 @@ def advance_search_command(): try: search_results = res.json().get("data") except ValueError: - raise ValueError(f"Could not parse data from ThreatQ [Status code: {res.status_code}]" f"\n[Error Message: {res.text}]") + raise ValueError(f"Could not parse data from ThreatQ [Status code: {res.status_code}]\n[Error Message: {res.text}]") if not isinstance(search_results, list): search_results = [search_results] From a57b5bb6b214375f36c376c38877f2560fb1c50a Mon Sep 17 00:00:00 2001 From: merit-maita Date: Mon, 24 Mar 2025 01:58:15 +0200 Subject: [PATCH 15/18] edits --- .../CheckPointHarmonyEndpoint_test.py | 11 +++++++---- .../Integrations/CiscoFirepower/CiscoFirepower.py | 8 ++++---- .../HPEArubaClearPass/HPEArubaClearPass_test.py | 2 +- Packs/OpenLDAP/Integrations/OpenLDAP/OpenLDAP.py | 4 +--- .../Integrations/OpenPhish_v2/OpenPhish_v2_test.py | 4 +--- .../ProofpointThreatResponse.py | 4 +--- .../Integrations/ServiceNow_CMDB/ServiceNow_CMDB.py | 10 +++++----- .../ServiceNowCreateIncident.py | 2 +- Packs/SplunkPy/Integrations/SplunkPy/SplunkPy.py | 4 +--- Packs/SplunkPy/Integrations/SplunkPy/SplunkPy_test.py | 8 ++------ .../SplunkShowDrilldown/SplunkShowDrilldown_test.py | 2 +- 11 files changed, 25 insertions(+), 34 deletions(-) diff --git a/Packs/CheckPointHarmonyEndpoint/Integrations/CheckPointHarmonyEndpoint/CheckPointHarmonyEndpoint_test.py b/Packs/CheckPointHarmonyEndpoint/Integrations/CheckPointHarmonyEndpoint/CheckPointHarmonyEndpoint_test.py index a5ee23170625..6d476ae0ab02 100644 --- a/Packs/CheckPointHarmonyEndpoint/Integrations/CheckPointHarmonyEndpoint/CheckPointHarmonyEndpoint_test.py +++ b/Packs/CheckPointHarmonyEndpoint/Integrations/CheckPointHarmonyEndpoint/CheckPointHarmonyEndpoint_test.py @@ -984,10 +984,13 @@ def test_get_pagination_args(args: dict[str, str], expected): args (dict[str, str]): Pagination arguments. expected (tuple): Updated pagination arguments and pagination message. """ - with unittest.mock.patch( - "CommonServerPython.arg_to_number", - side_effect=lambda x: int(x) if x is not None else None, - ), unittest.mock.patch("CheckPointHarmonyEndpoint.validate_pagination_arguments") as mock_validate: + with ( + unittest.mock.patch( + "CommonServerPython.arg_to_number", + side_effect=lambda x: int(x) if x is not None else None, + ), + unittest.mock.patch("CheckPointHarmonyEndpoint.validate_pagination_arguments") as mock_validate, + ): assert CheckPointHarmonyEndpoint.get_pagination_args(args) == expected mock_validate.assert_called() diff --git a/Packs/CiscoFirepower/Integrations/CiscoFirepower/CiscoFirepower.py b/Packs/CiscoFirepower/Integrations/CiscoFirepower/CiscoFirepower.py index cf857f2b4acf..5658035088a1 100644 --- a/Packs/CiscoFirepower/Integrations/CiscoFirepower/CiscoFirepower.py +++ b/Packs/CiscoFirepower/Integrations/CiscoFirepower/CiscoFirepower.py @@ -3561,7 +3561,7 @@ def list_intrusion_policy_command(client: Client, args: dict[str, Any]) -> Comma command_headers_by_keys=INTRUSION_POLICY_HEADERS_BY_KEYS, command_title=f"Fetched {INTRUSION_POLICY_TITLE}", command_context=INTRUSION_POLICY_CONTEXT, - raw_responses=raw_responses, # type: ignore[arg-type] + raw_responses=raw_responses, # type: ignore[arg-type] ) @@ -3732,7 +3732,7 @@ def list_intrusion_rule_command(client: Client, args: dict[str, Any]) -> Command command_headers_by_keys=INTRUSION_RULE_HEADERS_BY_KEYS, command_title=f"Fetched {INTRUSION_RULE_TITLE}", command_context=INTRUSION_RULE_CONTEXT, - raw_responses=raw_responses, # type: ignore[arg-type] + raw_responses=raw_responses, # type: ignore[arg-type] ) @@ -3955,7 +3955,7 @@ def list_intrusion_rule_group_command(client: Client, args: dict[str, Any]) -> C command_headers_by_keys=INTRUSION_RULE_GROUP_HEADERS_BY_KEYS, command_title=f"Fetched {INTRUSION_RULE_GROUP_TITLE}", command_context=INTRUSION_RULE_GROUP_CONTEXT, - raw_responses=raw_responses, # type: ignore[arg-type] + raw_responses=raw_responses, # type: ignore[arg-type] ) @@ -4097,7 +4097,7 @@ def list_network_analysis_policy_command(client: Client, args: dict[str, Any]) - command_headers_by_keys=NETWORK_ANALYSIS_POLICY_HEADERS_BY_KEYS, command_title=f"Fetched {NETWORK_ANALYSIS_POLICY_TITLE}", command_context=NETWORK_ANALYSIS_POLICY_CONTEXT, - raw_responses=raw_responses, # type: ignore[arg-type] + raw_responses=raw_responses, # type: ignore[arg-type] ) diff --git a/Packs/HPEArubaClearPass/Integrations/HPEArubaClearPass/HPEArubaClearPass_test.py b/Packs/HPEArubaClearPass/Integrations/HPEArubaClearPass/HPEArubaClearPass_test.py index 98d85899ab16..932cc635e888 100644 --- a/Packs/HPEArubaClearPass/Integrations/HPEArubaClearPass/HPEArubaClearPass_test.py +++ b/Packs/HPEArubaClearPass/Integrations/HPEArubaClearPass/HPEArubaClearPass_test.py @@ -4,7 +4,7 @@ import pytest from freezegun import freeze_time from HPEArubaClearPass import * -from pytest import raises # noqa: PT013 +from pytest import raises # noqa: PT013 CLIENT_ID = "id123" CLIENT_SECRET = "secret123" diff --git a/Packs/OpenLDAP/Integrations/OpenLDAP/OpenLDAP.py b/Packs/OpenLDAP/Integrations/OpenLDAP/OpenLDAP.py index 52018d8e1399..d66fc6d7890a 100644 --- a/Packs/OpenLDAP/Integrations/OpenLDAP/OpenLDAP.py +++ b/Packs/OpenLDAP/Integrations/OpenLDAP/OpenLDAP.py @@ -606,9 +606,7 @@ def authenticate_ldap_user(self, username: str, password: str) -> str: ldap_conn.unbind() return "Done" else: - raise Exception( - f"LDAP Authentication - authentication connection failed, server type is: {self._ldap_server_vendor}" - ) + raise Exception(f"LDAP Authentication - authentication connection failed, server type is: {self._ldap_server_vendor}") def search_user_data(self, username: str, attributes: list, search_user_by_dn: bool = False) -> tuple: """ diff --git a/Packs/OpenPhish/Integrations/OpenPhish_v2/OpenPhish_v2_test.py b/Packs/OpenPhish/Integrations/OpenPhish_v2/OpenPhish_v2_test.py index 900b6dc46d70..656d529db232 100644 --- a/Packs/OpenPhish/Integrations/OpenPhish_v2/OpenPhish_v2_test.py +++ b/Packs/OpenPhish/Integrations/OpenPhish_v2/OpenPhish_v2_test.py @@ -140,9 +140,7 @@ def test_reload_command(mocker): STANDARD_NOT_LOADED_MSG = "OpenPhish Database Status\nDatabase not loaded.\n" -STANDARD_4_LOADED_MSG = ( - "OpenPhish Database Status\nTotal **4** URLs loaded.\nLast load time **Thu Oct 01 2020 06:00:00 (UTC)**\n" -) +STANDARD_4_LOADED_MSG = "OpenPhish Database Status\nTotal **4** URLs loaded.\nLast load time **Thu Oct 01 2020 06:00:00 (UTC)**\n" CONTEXT_MOCK_WITH_STATUS = [ ({}, STANDARD_NOT_LOADED_MSG), # case no data in memory ( diff --git a/Packs/ProofpointThreatResponse/Integrations/ProofpointThreatResponse/ProofpointThreatResponse.py b/Packs/ProofpointThreatResponse/Integrations/ProofpointThreatResponse/ProofpointThreatResponse.py index 7581efd004bc..00ed673ea294 100644 --- a/Packs/ProofpointThreatResponse/Integrations/ProofpointThreatResponse/ProofpointThreatResponse.py +++ b/Packs/ProofpointThreatResponse/Integrations/ProofpointThreatResponse/ProofpointThreatResponse.py @@ -793,9 +793,7 @@ def search_quarantine(): if message_delivery_time: message_delivery_time = int(message_delivery_time.timestamp() * 1000) else: - demisto.info( - f'PTR: Could not parse time of incident {incident.get("id")}, got {message_delivery_time=}' - ) + demisto.info(f'PTR: Could not parse time of incident {incident.get("id")}, got {message_delivery_time=}') continue if email.get("messageId") == mid and email.get("recipient").get("email") == recipient and message_delivery_time: diff --git a/Packs/ServiceNow/Integrations/ServiceNow_CMDB/ServiceNow_CMDB.py b/Packs/ServiceNow/Integrations/ServiceNow_CMDB/ServiceNow_CMDB.py index c0abc69fb8b8..bf8fa3ff17f8 100644 --- a/Packs/ServiceNow/Integrations/ServiceNow_CMDB/ServiceNow_CMDB.py +++ b/Packs/ServiceNow/Integrations/ServiceNow_CMDB/ServiceNow_CMDB.py @@ -191,13 +191,13 @@ def create_human_readable(title: str, result: dict, fields: str) -> str: relation_output = { "SysID": list(map(itemgetter("sys_id"), relations)), "Target Display Value": list( - map(itemgetter("display_value"), list(map(itemgetter("target"), - result.get(relation_type)))) # type: ignore[arg-type] + map(itemgetter("display_value"), list(map(itemgetter("target"), result.get(relation_type)))) # type: ignore[arg-type] ), # type: ignore "Type Display Value": list( - map(itemgetter("display_value"), list(map(itemgetter("type"), - result.get(relation_type))) # type: ignore[arg-type] - ) + map( + itemgetter("display_value"), + list(map(itemgetter("type"), result.get(relation_type))), # type: ignore[arg-type] + ) ), # type: ignore } md += f" {tableToMarkdown(FIELD_TO_OUTPUT.get(relation_type), t=relation_output)}" diff --git a/Packs/ServiceNow/Scripts/ServiceNowCreateIncident/ServiceNowCreateIncident.py b/Packs/ServiceNow/Scripts/ServiceNowCreateIncident/ServiceNowCreateIncident.py index 1f174e95b7f7..d22073499d9d 100644 --- a/Packs/ServiceNow/Scripts/ServiceNowCreateIncident/ServiceNowCreateIncident.py +++ b/Packs/ServiceNow/Scripts/ServiceNowCreateIncident/ServiceNowCreateIncident.py @@ -163,7 +163,7 @@ def main(): # Get the actual record record = record_data["result"] # Map fields according to fields_to_map that were defined earlier - mapped_record = dict( # noqa: C402 + mapped_record = dict( # noqa: C402 (fields_to_map[key], value) for (key, value) in [k_v for k_v in list(record.items()) if k_v[0] in list(fields_to_map.keys())] ) diff --git a/Packs/SplunkPy/Integrations/SplunkPy/SplunkPy.py b/Packs/SplunkPy/Integrations/SplunkPy/SplunkPy.py index 2633fbccd8e7..d4870c90328e 100644 --- a/Packs/SplunkPy/Integrations/SplunkPy/SplunkPy.py +++ b/Packs/SplunkPy/Integrations/SplunkPy/SplunkPy.py @@ -2569,9 +2569,7 @@ def get_current_splunk_time(splunk_service: client.Service): "earliest_time": time, "output_mode": OUTPUT_MODE_JSON, } - searchquery_oneshot = ( - '| gentimes start=-1 | eval clock = strftime(time(), "%Y-%m-%dT%H:%M:%S") | sort 1 -_time | table clock' - ) + searchquery_oneshot = '| gentimes start=-1 | eval clock = strftime(time(), "%Y-%m-%dT%H:%M:%S") | sort 1 -_time | table clock' oneshotsearch_results = splunk_service.jobs.oneshot(searchquery_oneshot, **kwargs_oneshot) diff --git a/Packs/SplunkPy/Integrations/SplunkPy/SplunkPy_test.py b/Packs/SplunkPy/Integrations/SplunkPy/SplunkPy_test.py index a7fd113b5b28..f7ea30744ef3 100644 --- a/Packs/SplunkPy/Integrations/SplunkPy/SplunkPy_test.py +++ b/Packs/SplunkPy/Integrations/SplunkPy/SplunkPy_test.py @@ -922,9 +922,7 @@ def test_fetch_incidents(mocker): incidents = demisto.incidents.call_args[0][0] assert demisto.incidents.call_count == 1 assert len(incidents) == 2 - assert ( - incidents[0]["name"] == "Endpoint - Recurring Malware Infection - Rule : Endpoint - Recurring Malware Infection - Rule" - ) + assert incidents[0]["name"] == "Endpoint - Recurring Malware Infection - Rule : Endpoint - Recurring Malware Infection - Rule" assert not incidents[0].get("owner") @@ -995,9 +993,7 @@ def test_fetch_notables(mocker): ) incidents = demisto.incidents.call_args[0][0] assert len(incidents) == 2 - assert ( - incidents[0]["name"] == "Endpoint - Recurring Malware Infection - Rule : Endpoint - Recurring Malware Infection - Rule" - ) + assert incidents[0]["name"] == "Endpoint - Recurring Malware Infection - Rule : Endpoint - Recurring Malware Infection - Rule" assert not incidents[0].get("owner") diff --git a/Packs/SplunkPy/Scripts/SplunkShowDrilldown/SplunkShowDrilldown_test.py b/Packs/SplunkPy/Scripts/SplunkShowDrilldown/SplunkShowDrilldown_test.py index 3c4f78c1c657..5209f1f2b6aa 100644 --- a/Packs/SplunkPy/Scripts/SplunkShowDrilldown/SplunkShowDrilldown_test.py +++ b/Packs/SplunkPy/Scripts/SplunkShowDrilldown/SplunkShowDrilldown_test.py @@ -1,7 +1,7 @@ import json import SplunkShowDrilldown -from pytest import raises # noqa: PT013 +from pytest import raises # noqa: PT013 def test_incident_with_empty_custom_fields(mocker): From ff630c29841082782a23550682727e5afa985c4d Mon Sep 17 00:00:00 2001 From: merit-maita Date: Mon, 24 Mar 2025 02:08:04 +0200 Subject: [PATCH 16/18] edits --- .pre-commit-config_template.yaml | 2 -- .../Integrations/ServiceNow_CMDB/ServiceNow_CMDB.py | 5 ++++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config_template.yaml b/.pre-commit-config_template.yaml index 20dfb3eba3ba..0284ad396d21 100644 --- a/.pre-commit-config_template.yaml +++ b/.pre-commit-config_template.yaml @@ -47,8 +47,6 @@ repos: - --config=nightly_ruff.toml args:docker_autoupdate: - --config=nightly_ruff.toml - - id: ruff-format - min_py_version: '3.7' - repo: https://github.com/hhatto/autopep8 rev: v2.3.1 hooks: diff --git a/Packs/ServiceNow/Integrations/ServiceNow_CMDB/ServiceNow_CMDB.py b/Packs/ServiceNow/Integrations/ServiceNow_CMDB/ServiceNow_CMDB.py index bf8fa3ff17f8..eee79266afbf 100644 --- a/Packs/ServiceNow/Integrations/ServiceNow_CMDB/ServiceNow_CMDB.py +++ b/Packs/ServiceNow/Integrations/ServiceNow_CMDB/ServiceNow_CMDB.py @@ -191,7 +191,10 @@ def create_human_readable(title: str, result: dict, fields: str) -> str: relation_output = { "SysID": list(map(itemgetter("sys_id"), relations)), "Target Display Value": list( - map(itemgetter("display_value"), list(map(itemgetter("target"), result.get(relation_type)))) # type: ignore[arg-type] + map( + itemgetter("display_value"), + list(map(itemgetter("target"), result.get(relation_type))), # type: ignore[arg-type] + ) ), # type: ignore "Type Display Value": list( map( From 0b5c053740d61ed0df54d23efe880e1505e369a2 Mon Sep 17 00:00:00 2001 From: Content Bot Date: Tue, 25 Mar 2025 09:31:39 +0000 Subject: [PATCH 17/18] Bump pack from version SplunkPy to 3.2.7. --- Packs/SplunkPy/ReleaseNotes/3_2_7.md | 24 ++++++++++++++++++++++++ Packs/SplunkPy/pack_metadata.json | 2 +- 2 files changed, 25 insertions(+), 1 deletion(-) create mode 100644 Packs/SplunkPy/ReleaseNotes/3_2_7.md diff --git a/Packs/SplunkPy/ReleaseNotes/3_2_7.md b/Packs/SplunkPy/ReleaseNotes/3_2_7.md new file mode 100644 index 000000000000..f3e2615b5a7f --- /dev/null +++ b/Packs/SplunkPy/ReleaseNotes/3_2_7.md @@ -0,0 +1,24 @@ + +#### Integrations + +##### SplunkPy + +- Metadata and documentation improvements. + +#### Scripts + +##### SplunkShowDrilldown + +- Metadata and documentation improvements. +##### SplunkShowAsset + +- Metadata and documentation improvements. +##### SplunkConvertCommentsToTable + +- Metadata and documentation improvements. +##### SplunkAddComment + +- Metadata and documentation improvements. +##### SplunkShowIdentity + +- Metadata and documentation improvements. diff --git a/Packs/SplunkPy/pack_metadata.json b/Packs/SplunkPy/pack_metadata.json index 2f0eee736749..2f3def199933 100644 --- a/Packs/SplunkPy/pack_metadata.json +++ b/Packs/SplunkPy/pack_metadata.json @@ -2,7 +2,7 @@ "name": "Splunk", "description": "Run queries on Splunk servers.", "support": "xsoar", - "currentVersion": "3.2.6", + "currentVersion": "3.2.7", "author": "Cortex XSOAR", "url": "https://www.paloaltonetworks.com/cortex", "email": "", From a7f11e720c7eee1e2e55f520da0c41739cafdebb Mon Sep 17 00:00:00 2001 From: merit-maita Date: Mon, 31 Mar 2025 03:07:53 +0300 Subject: [PATCH 18/18] removed autopep and added ruff_format edits --- .../Integrations/ServiceNowv2/ServiceNowv2.py | 185 +++++++++++------- .../ServiceNowv2/ServiceNowv2_test.py | 168 +++++++++------- Packs/ServiceNow/ReleaseNotes/2_7_11.md | 36 ++++ Packs/ServiceNow/pack_metadata.json | 2 +- 4 files changed, 248 insertions(+), 143 deletions(-) create mode 100644 Packs/ServiceNow/ReleaseNotes/2_7_11.md diff --git a/Packs/ServiceNow/Integrations/ServiceNowv2/ServiceNowv2.py b/Packs/ServiceNow/Integrations/ServiceNowv2/ServiceNowv2.py index ca7a262dbc09..1fa39f452e04 100644 --- a/Packs/ServiceNow/Integrations/ServiceNowv2/ServiceNowv2.py +++ b/Packs/ServiceNow/Integrations/ServiceNowv2/ServiceNowv2.py @@ -719,8 +719,8 @@ def _construct_url(self, custom_api: str, sc_api: bool, cr_api: bool, path: str, if custom_api: if not custom_api.startswith("/"): return_error("Argument custom_api must start with a leading forward slash '/'") - server_url = demisto.params()['url'] - url = f'{get_server_url(server_url)}{custom_api}{path}' + server_url = demisto.params()["url"] + url = f"{get_server_url(server_url)}{custom_api}{path}" elif sc_api: url = f"{self._sc_server_url}{path}" elif cr_api: @@ -736,38 +736,80 @@ def _construct_url(self, custom_api: str, sc_api: bool, cr_api: bool, path: str, def _send_file_request(self, url: str, method: str, headers: dict, body: dict, params: dict, file: dict) -> requests.Response: # Not supported in v2 - url = url.replace('/v2', '/v1') + url = url.replace("/v2", "/v1") try: - file_entry = file['id'] - file_name = file['name'] - file_path = demisto.getFilePath(file_entry)['path'] - with open(file_path, 'rb') as f: + file_entry = file["id"] + file_name = file["name"] + file_path = demisto.getFilePath(file_entry)["path"] + with open(file_path, "rb") as f: file_info = (file_name, f, self.get_content_type(file_name)) if self.use_oauth: access_token = self.snow_client.get_access_token() - headers.update({'Authorization': f'Bearer {access_token}'}) - return requests.request(method, url, headers=headers, data=body, params=params, - files={'file': file_info}, verify=self._verify, proxies=self._proxies) + headers.update({"Authorization": f"Bearer {access_token}"}) + return requests.request( + method, + url, + headers=headers, + data=body, + params=params, + files={"file": file_info}, + verify=self._verify, + proxies=self._proxies, + ) else: - return requests.request(method, url, headers=headers, data=body, params=params, - files={'file': file_info}, auth=self._auth, - verify=self._verify, proxies=self._proxies) + return requests.request( + method, + url, + headers=headers, + data=body, + params=params, + files={"file": file_info}, + auth=self._auth, + verify=self._verify, + proxies=self._proxies, + ) except Exception as err: - raise Exception(f'Failed to upload file - {str(err)}') + raise Exception(f"Failed to upload file - {str(err)}") def _send_regular_request(self, url: str, method: str, headers: dict, body: dict, params: dict) -> requests.Response: if self.use_oauth: access_token = self.snow_client.get_access_token() - headers.update({'Authorization': f'Bearer {access_token}'}) - return requests.request(method, url, headers=headers, data=json.dumps(body) if body else {}, - params=params, verify=self._verify, proxies=self._proxies) + headers.update({"Authorization": f"Bearer {access_token}"}) + return requests.request( + method, + url, + headers=headers, + data=json.dumps(body) if body else {}, + params=params, + verify=self._verify, + proxies=self._proxies, + ) else: - return requests.request(method, url, headers=headers, data=json.dumps(body) if body else {}, - params=params, auth=self._auth, verify=self._verify, proxies=self._proxies) + return requests.request( + method, + url, + headers=headers, + data=json.dumps(body) if body else {}, + params=params, + auth=self._auth, + verify=self._verify, + proxies=self._proxies, + ) - def send_request(self, path: str, method: str = 'GET', body: dict | None = None, params: dict | None = None, - headers: dict | None = None, file=None, sc_api: bool = False, cr_api: bool = False, - get_attachments: bool = False, no_record_found_res: dict = {'result': []}, custom_api: str = ''): + def send_request( + self, + path: str, + method: str = "GET", + body: dict | None = None, + params: dict | None = None, + headers: dict | None = None, + file=None, + sc_api: bool = False, + cr_api: bool = False, + get_attachments: bool = False, + no_record_found_res: dict = {"result": []}, + custom_api: str = "", + ): """Generic request to ServiceNow. This method handles both regular requests and file uploads Args: @@ -792,15 +834,8 @@ def send_request(self, path: str, method: str = 'GET', body: dict | None = None, body = body or {} params = params or {} url = self._construct_url(custom_api, sc_api, cr_api, path, get_attachments) - headers = headers or { - 'Accept': 'application/json', - 'Content-Type': 'application/json' - } - demisto.debug( - f"Constructed URL: {url}\n" - f"Request headers: {headers}\n" - f"Request params: {params}" - ) + headers = headers or {"Accept": "application/json", "Content-Type": "application/json"} + demisto.debug(f"Constructed URL: {url}\nRequest headers: {headers}\nRequest params: {params}") for attempt in range(1, MAX_RETRY + 1): # retry mechanism for 401 Unauthorized errors @@ -828,22 +863,22 @@ def send_request(self, path: str, method: str = 'GET', body: dict | None = None, return "" raise Exception(f"Error parsing reply - {res.content!s} - {err!s}") - if error := json_res.get('error', {}): + if error := json_res.get("error", {}): if res.status_code == 401 and attempt < MAX_RETRY: demisto.debug(f"Got status code 401. Retrying... (Attempt {attempt} of {MAX_RETRY})") continue else: if isinstance(error, dict): - message = error.get('message') - details = error.get('detail') - if message == 'No Record found': + message = error.get("message") + details = error.get("detail") + if message == "No Record found": demisto.debug("No record found, returning empty result") return no_record_found_res else: raise Exception( - f'ServiceNow Error: {message}, details: {details}' - f' Got status code {res.status_code} with url {url} with body {str(res.content)}' - f' with response headers {str(res.headers)}' + f"ServiceNow Error: {message}, details: {details}" + f" Got status code {res.status_code} with url {url} with body {str(res.content)}" + f" with response headers {str(res.headers)}" ) else: raise Exception(f"ServiceNow Error: {error}") @@ -852,8 +887,8 @@ def send_request(self, path: str, method: str = 'GET', body: dict | None = None, return json_res else: raise Exception( - f'Got status code {res.status_code} with url {url} with body {str(res.content)}' - f' with response headers {str(res.headers)}' + f"Got status code {res.status_code} with url {url} with body {str(res.content)}" + f" with response headers {str(res.headers)}" ) return json_res @@ -3441,7 +3476,7 @@ def main(): PARSE AND VALIDATE INTEGRATION PARAMS """ command = demisto.command() - demisto.debug(f'Executing command {command}') + demisto.debug(f"Executing command {command}") params = demisto.params() args = demisto.args() @@ -3552,33 +3587,33 @@ def main(): display_date_format=display_date_format, ) commands: dict[str, Callable[[Client, dict[str, str]], tuple[str, dict[Any, Any], dict[Any, Any], bool]]] = { - 'test-module': test_module, - 'servicenow-oauth-test': oauth_test_module, - 'servicenow-oauth-login': login_command, - 'servicenow-update-ticket': update_ticket_command, - 'servicenow-create-ticket': create_ticket_command, - 'servicenow-create-ticket-quick-action': create_ticket_command, - 'servicenow-delete-ticket': delete_ticket_command, - 'servicenow-query-tickets': query_tickets_command, - 'servicenow-add-link': add_link_command, - 'servicenow-add-comment': add_comment_command, - 'servicenow-upload-file': upload_file_command, - 'servicenow-add-tag': add_tag_command, - 'servicenow-get-record': get_record_command, - 'servicenow-update-record': update_record_command, - 'servicenow-create-record': create_record_command, - 'servicenow-delete-record': delete_record_command, - 'servicenow-query-table': query_table_command, - 'servicenow-list-table-fields': list_table_fields_command, - 'servicenow-query-computers': query_computers_command, - 'servicenow-query-groups': query_groups_command, - 'servicenow-query-users': query_users_command, - 'servicenow-get-table-name': get_table_name_command, - 'servicenow-query-items': query_items_command, - 'servicenow-get-item-details': get_item_details_command, - 'servicenow-create-item-order': create_order_item_command, - 'servicenow-document-route-to-queue': document_route_to_table, - 'servicenow-delete-file': delete_attachment_command, + "test-module": test_module, + "servicenow-oauth-test": oauth_test_module, + "servicenow-oauth-login": login_command, + "servicenow-update-ticket": update_ticket_command, + "servicenow-create-ticket": create_ticket_command, + "servicenow-create-ticket-quick-action": create_ticket_command, + "servicenow-delete-ticket": delete_ticket_command, + "servicenow-query-tickets": query_tickets_command, + "servicenow-add-link": add_link_command, + "servicenow-add-comment": add_comment_command, + "servicenow-upload-file": upload_file_command, + "servicenow-add-tag": add_tag_command, + "servicenow-get-record": get_record_command, + "servicenow-update-record": update_record_command, + "servicenow-create-record": create_record_command, + "servicenow-delete-record": delete_record_command, + "servicenow-query-table": query_table_command, + "servicenow-list-table-fields": list_table_fields_command, + "servicenow-query-computers": query_computers_command, + "servicenow-query-groups": query_groups_command, + "servicenow-query-users": query_users_command, + "servicenow-get-table-name": get_table_name_command, + "servicenow-query-items": query_items_command, + "servicenow-get-item-details": get_item_details_command, + "servicenow-create-item-order": create_order_item_command, + "servicenow-document-route-to-queue": document_route_to_table, + "servicenow-delete-file": delete_attachment_command, } if command == "fetch-incidents": raise_exception = True @@ -3592,24 +3627,24 @@ def main(): return_results(get_remote_data_command(client, demisto.args(), demisto.params())) elif command == "update-remote-system": return_results(update_remote_system_command(client, demisto.args(), demisto.params())) - elif command == 'get-mapping-fields': + elif command == "get-mapping-fields": return_results(get_mapping_fields_command(client)) - elif command == 'get-modified-remote-data': + elif command == "get-modified-remote-data": return_results(get_modified_remote_data_command(client, args, update_timestamp_field, mirror_limit)) - elif command == 'servicenow-create-co-from-template': + elif command == "servicenow-create-co-from-template": return_results(create_co_from_template_command(client, demisto.args())) - elif command == 'servicenow-get-tasks-for-co': + elif command == "servicenow-get-tasks-for-co": return_results(get_tasks_for_co_command(client, demisto.args())) - elif command == 'servicenow-get-ticket-notes': + elif command == "servicenow-get-ticket-notes": return_results(get_ticket_notes_command(client, args, params)) - elif command == 'servicenow-get-ticket-attachments': + elif command == "servicenow-get-ticket-attachments": return_results(get_attachment_command(client, args)) elif command in commands: md_, ec_, raw_response, ignore_auto_extract = commands[command](client, args) return_outputs(md_, ec_, raw_response, ignore_auto_extract=ignore_auto_extract) else: raise_exception = True - raise NotImplementedError(f'{COMMAND_NOT_IMPLEMENTED_MSG}: {command}') + raise NotImplementedError(f"{COMMAND_NOT_IMPLEMENTED_MSG}: {command}") except Exception as err: LOG(err) diff --git a/Packs/ServiceNow/Integrations/ServiceNowv2/ServiceNowv2_test.py b/Packs/ServiceNow/Integrations/ServiceNowv2/ServiceNowv2_test.py index c395d09433da..b65e94afe754 100644 --- a/Packs/ServiceNow/Integrations/ServiceNowv2/ServiceNowv2_test.py +++ b/Packs/ServiceNow/Integrations/ServiceNowv2/ServiceNowv2_test.py @@ -1508,49 +1508,61 @@ def test_not_authenticated_retry_positive(requests_mock, mocker): - Verify debug messages - Ensure the send_request function runs successfully without exceptions """ - mocker.patch.object(demisto, 'debug') - client = Client('http://server_url', 'sc_server_url', 'cr_server_url', 'username', 'password', - 'verify', 'fetch_time', 'sysparm_query', 'sysparm_limit', 'timestamp_field', - 'ticket_type', 'get_attachments', 'incident_name') - requests_mock.get('http://server_url', [ - { - 'status_code': 401, - 'json': { - 'error': {'message': 'User Not Authenticated', 'detail': 'Required to provide Auth information'}, - 'status': 'failure' - } - }, - { - 'status_code': 401, - 'json': { - 'error': {'message': 'User Not Authenticated', 'detail': 'Required to provide Auth information'}, - 'status': 'failure' - } - }, - { - 'status_code': 200, - 'json': {} - } - ]) - assert client.send_request('') == {} + mocker.patch.object(demisto, "debug") + client = Client( + "http://server_url", + "sc_server_url", + "cr_server_url", + "username", + "password", + "verify", + "fetch_time", + "sysparm_query", + "sysparm_limit", + "timestamp_field", + "ticket_type", + "get_attachments", + "incident_name", + ) + requests_mock.get( + "http://server_url", + [ + { + "status_code": 401, + "json": { + "error": {"message": "User Not Authenticated", "detail": "Required to provide Auth information"}, + "status": "failure", + }, + }, + { + "status_code": 401, + "json": { + "error": {"message": "User Not Authenticated", "detail": "Required to provide Auth information"}, + "status": "failure", + }, + }, + {"status_code": 200, "json": {}}, + ], + ) + assert client.send_request("") == {} debug = demisto.debug.call_args_list - assert debug[0][0][0] == 'Sending request to ServiceNow. Method: GET, Path: ' + assert debug[0][0][0] == "Sending request to ServiceNow. Method: GET, Path: " assert debug[1][0][0] == ( "Constructed URL: http://server_url\nRequest headers: " "{'Accept': 'application/json', 'Content-Type': 'application/json'}\nRequest params: {}" ) - assert debug[2][0][0] == f'Request attempt 1 of {MAX_RETRY}' - assert debug[3][0][0] == 'Sending regular request' - assert debug[4][0][0] == 'Response status code: 401' - assert debug[5][0][0] == f'Got status code 401. Retrying... (Attempt 1 of {MAX_RETRY})' - assert debug[6][0][0] == f'Request attempt 2 of {MAX_RETRY}' - assert debug[7][0][0] == 'Sending regular request' - assert debug[8][0][0] == 'Response status code: 401' - assert debug[9][0][0] == f'Got status code 401. Retrying... (Attempt 2 of {MAX_RETRY})' - assert debug[10][0][0] == f'Request attempt 3 of {MAX_RETRY}' - assert debug[11][0][0] == 'Sending regular request' - assert debug[12][0][0] == 'Response status code: 200' + assert debug[2][0][0] == f"Request attempt 1 of {MAX_RETRY}" + assert debug[3][0][0] == "Sending regular request" + assert debug[4][0][0] == "Response status code: 401" + assert debug[5][0][0] == f"Got status code 401. Retrying... (Attempt 1 of {MAX_RETRY})" + assert debug[6][0][0] == f"Request attempt 2 of {MAX_RETRY}" + assert debug[7][0][0] == "Sending regular request" + assert debug[8][0][0] == "Response status code: 401" + assert debug[9][0][0] == f"Got status code 401. Retrying... (Attempt 2 of {MAX_RETRY})" + assert debug[10][0][0] == f"Request attempt 3 of {MAX_RETRY}" + assert debug[11][0][0] == "Sending regular request" + assert debug[12][0][0] == "Response status code: 200" def test_not_authenticated_retry_negative(requests_mock, mocker: MockerFixture): @@ -1565,37 +1577,55 @@ def test_not_authenticated_retry_negative(requests_mock, mocker: MockerFixture): - Verify debug messages - Ensure the send_request function fails and raises expected error message """ - mocker.patch.object(demisto, 'debug') - client = Client('http://server_url', 'sc_server_url', 'cr_server_url', 'username', 'password', - 'verify', 'fetch_time', 'sysparm_query', 'sysparm_limit', 'timestamp_field', - 'ticket_type', 'get_attachments', 'incident_name') - requests_mock.get('http://server_url', [ - { - 'status_code': 401, - 'json': { - 'error': {'message': 'User Not Authenticated', 'detail': 'Required to provide Auth information'}, - 'status': 'failure' - } - }, - ] * MAX_RETRY) + mocker.patch.object(demisto, "debug") + client = Client( + "http://server_url", + "sc_server_url", + "cr_server_url", + "username", + "password", + "verify", + "fetch_time", + "sysparm_query", + "sysparm_limit", + "timestamp_field", + "ticket_type", + "get_attachments", + "incident_name", + ) + requests_mock.get( + "http://server_url", + [ + { + "status_code": 401, + "json": { + "error": {"message": "User Not Authenticated", "detail": "Required to provide Auth information"}, + "status": "failure", + }, + }, + ] + * MAX_RETRY, + ) with pytest.raises(Exception) as ex: - client.send_request('') - assert str(ex.value) == "ServiceNow Error: User Not Authenticated, details: Required to provide Auth information " \ - "Got status code 401 with url http://server_url with body b'{\"error\": {\"message\": " \ - "\"User Not Authenticated\", \"detail\": \"Required to provide Auth information\"}, " \ - "\"status\": \"failure\"}' with response headers {}" + client.send_request("") + assert ( + str(ex.value) == "ServiceNow Error: User Not Authenticated, details: Required to provide Auth information " + 'Got status code 401 with url http://server_url with body b\'{"error": {"message": ' + '"User Not Authenticated", "detail": "Required to provide Auth information"}, ' + '"status": "failure"}\' with response headers {}' + ) debug = demisto.debug.call_args_list - assert debug[0][0][0] == 'Sending request to ServiceNow. Method: GET, Path: ' + assert debug[0][0][0] == "Sending request to ServiceNow. Method: GET, Path: " assert debug[1][0][0] == ( "Constructed URL: http://server_url\nRequest headers: " "{'Accept': 'application/json', 'Content-Type': 'application/json'}\nRequest params: {}" ) - assert debug[2][0][0] == f'Request attempt 1 of {MAX_RETRY}' - assert debug[3][0][0] == 'Sending regular request' - assert debug[4][0][0] == 'Response status code: 401' - assert debug[5][0][0] == f'Got status code 401. Retrying... (Attempt 1 of {MAX_RETRY})' + assert debug[2][0][0] == f"Request attempt 1 of {MAX_RETRY}" + assert debug[3][0][0] == "Sending regular request" + assert debug[4][0][0] == "Response status code: 401" + assert debug[5][0][0] == f"Got status code 401. Retrying... (Attempt 1 of {MAX_RETRY})" def test_oauth_authentication(mocker, requests_mock): @@ -3122,13 +3152,17 @@ def add_comment_mock(*args): assert res == "1234" -@pytest.mark.parametrize('mock_json, expected_results', - [ - ({'error': 'invalid client.'}, 'ServiceNow Error: invalid client.'), - ({'error': {'message': 'invalid client', 'detail': 'the client you have entered is invalid.'}}, - 'ServiceNow Error: invalid client, details: the client you have entered is invalid. ' - 'Got status code 400 with url server_urltable with body with response headers {}') - ]) +@pytest.mark.parametrize( + "mock_json, expected_results", + [ + ({"error": "invalid client."}, "ServiceNow Error: invalid client."), + ( + {"error": {"message": "invalid client", "detail": "the client you have entered is invalid."}}, + "ServiceNow Error: invalid client, details: the client you have entered is invalid. " + "Got status code 400 with url server_urltable with body with response headers {}", + ), + ], +) def test_send_request_with_str_error_response(mocker, mock_json, expected_results): """ Given: diff --git a/Packs/ServiceNow/ReleaseNotes/2_7_11.md b/Packs/ServiceNow/ReleaseNotes/2_7_11.md new file mode 100644 index 000000000000..4c3e3aa40035 --- /dev/null +++ b/Packs/ServiceNow/ReleaseNotes/2_7_11.md @@ -0,0 +1,36 @@ + +#### Integrations + +##### ServiceNow Event Collector + +- Documentation and metadata improvements. +##### ServiceNow IAM + +- Documentation and metadata improvements. +##### ServiceNow v2 + +- Documentation and metadata improvements. +##### ServiceNow CMDB + +- Documentation and metadata improvements. + +#### Scripts + +##### ServiceNowIncidentStatus + +- Documentation and metadata improvements. +##### ServiceNowTroubleshoot + +- Documentation and metadata improvements. +##### ServiceNowQueryIncident + +- Documentation and metadata improvements. +##### ServiceNowAddComment + +- Documentation and metadata improvements. +##### ServiceNowCreateIncident + +- Documentation and metadata improvements. +##### ServiceNowUpdateIncident + +- Documentation and metadata improvements. diff --git a/Packs/ServiceNow/pack_metadata.json b/Packs/ServiceNow/pack_metadata.json index e9bf8327039b..e3d6300b9c80 100644 --- a/Packs/ServiceNow/pack_metadata.json +++ b/Packs/ServiceNow/pack_metadata.json @@ -2,7 +2,7 @@ "name": "ServiceNow", "description": "Use The ServiceNow IT Service Management (ITSM) solution to modernize the way you manage and deliver services to your users.", "support": "xsoar", - "currentVersion": "2.7.10", + "currentVersion": "2.7.11", "author": "Cortex XSOAR", "url": "https://www.paloaltonetworks.com/cortex", "email": "",