diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs new file mode 100644 index 000000000..9ec1a8e27 --- /dev/null +++ b/.git-blame-ignore-revs @@ -0,0 +1,5 @@ +# Migrate python code style to Black + +2d2de2d1e5167b54b253fa496d91e9a3b0b859c1 +e7d4c5be7400bbe6fa5f7b59d34d07a51d15bd31 +e03e94b5439bf30d0b03430980745d5adc9cc631 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 4a93752f9..bda237015 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -35,12 +35,12 @@ jobs: uses: actions/upload-artifact@v3 with: name: build_api - path: services/coverage.xml + path: services/cov.xml - - name: Find coverage.xml + - name: Find cov.xml shell: bash run: | - find "$GITHUB_WORKSPACE/services" -name "coverage.xml" + find "$GITHUB_WORKSPACE/services" -name "cov.xml" webapp: runs-on: ubuntu-latest @@ -89,7 +89,7 @@ jobs: continue-on-error: true with: name: build_api - path: services/coverage.xml + path: services/cov.xml - name: Download Code Coverage Results uses: actions/download-artifact@v3 @@ -97,20 +97,23 @@ jobs: name: webapp path: webapp - - name: Find coverage.xml + - name: Find cov.xml shell: bash run: | - find "$GITHUB_WORKSPACE" -name "coverage.xml" + find "$GITHUB_WORKSPACE" -name "cov.xml" - name: Find lcov.info shell: bash run: | find "$GITHUB_WORKSPACE" -name "lcov.info" - - name: Setup SonarScanner - uses: warchant/setup-sonar-scanner@v4 + - uses: actions/setup-java@v3 with: - version: 4.8.0.2856 + distribution: 'temurin' + java-version: '17' + + - name: Setup SonarScanner + uses: warchant/setup-sonar-scanner@v7 - name: Generate Sonar Properties File run: | @@ -124,10 +127,10 @@ jobs: sonar.projectBaseDir=$GITHUB_WORKSPACE sonar.projectKey=usdot-jpo-ode_jpo-cvmanager sonar.projectName=jpo-cvmanager - sonar.python.coverage.reportPaths=$GITHUB_WORKSPACE/services/coverage.xml - sonar.python.version=3.9 + sonar.python.coverage.reportPaths=$GITHUB_WORKSPACE/services/cov.xml + sonar.python.version=3.12 api.sonar.projectBaseDir=$GITHUB_WORKSPACE/services - api.sonar.sources=addons/images/bsm_query,addons/images/count_metric,addons/images/iss_health_check,addons/images/rsu_ping,api/src,common/pgquery.py + api.sonar.sources=addons/images/bsm_query,addons/images/count_metric,addons/images/firmware_manager,addons/images/iss_health_check,addons/images/rsu_ping,api/src,common/pgquery.py api.sonar.tests=addons/tests,api/tests,common/tests webapp.sonar.projectBaseDir=$GITHUB_WORKSPACE/webapp webapp.sonar.sources=src diff --git a/.gitignore b/.gitignore index 89f7746f5..bc3ddab6b 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,6 @@ *.pyc *.pyc.* -.env +.env* .coverage cov.xml .venv diff --git a/.vscode/extensions.json b/.vscode/extensions.json index 0f148de5e..aaedd29f3 100644 --- a/.vscode/extensions.json +++ b/.vscode/extensions.json @@ -1,5 +1,3 @@ { - "recommendations": [ - "esbenp.prettier-vscode" - ] -} \ No newline at end of file + "recommendations": ["esbenp.prettier-vscode", "ms-python.black-formatter"] +} diff --git a/.vscode/settings.json b/.vscode/settings.json index 6d2dfdbbe..7ba6ab22f 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -18,5 +18,9 @@ "python.envFile": "${workspaceFolder}/.env", "terminal.integrated.env.windows": { "PYTHONPATH": "${workspaceFolder}/services;${workspaceFolder}/services/addons/images/bsm_query;${workspaceFolder}/services/addons/images/count_metric;${workspaceFolder}/services/addons/images/firmware_manager;${workspaceFolder}/services/addons/images/iss_health_check;${workspaceFolder}/services/addons/images/rsu_ping_fetch;${workspaceFolder}/services/api/src;${workspaceFolder}/services/common" + }, + "[python]": { + "editor.defaultFormatter": "ms-python.black-formatter", + "editor.formatOnSave": true } } diff --git a/.vscode/tasks.json b/.vscode/tasks.json index 203f42bc1..e34b83b7f 100644 --- a/.vscode/tasks.json +++ b/.vscode/tasks.json @@ -21,7 +21,7 @@ "up": { "detached": true, "build": true, - "services": ["cvmanager_keycloak", "cvmanager_postgres", "firmware_manager"] + "services": ["cvmanager_keycloak", "cvmanager_postgres"] }, "files": ["${workspaceFolder}/docker-compose.yml"], "envFile": "${workspaceFolder}/.env" diff --git a/README.md b/README.md index 9bdc6c067..ecf65f353 100644 --- a/README.md +++ b/README.md @@ -150,7 +150,7 @@ For the "Debug Solution" to run properly on Windows 10/11 using WSL, the followi - BSM_DB_NAME: The database name for BSM visualization data. - SSM_DB_NAME: The database name for SSM visualization data. - SRM_DB_NAME: The database name for SRM visualization data. -- RSU_REST_ENDPOINT: HTTPS endpoint of the deployed RSU REST API in GCP Kubernetes. +- FIRMWARE_MANAGER_ENDPOINT: Endpoint for the firmware manager deployment's API. - CSM_EMAIL_TO_SEND_FROM: Origin email address for the API. - CSM_EMAIL_APP_USERNAME: Username for the SMTP server. - CSM_EMAIL_APP_PASSWORD: Password for the SMTP server. diff --git a/docker-compose-addons.yml b/docker-compose-addons.yml index 80199bddd..ee3125431 100644 --- a/docker-compose-addons.yml +++ b/docker-compose-addons.yml @@ -1,143 +1,9 @@ version: '3' -services: - cvmanager_api: - build: - context: ./services - dockerfile: Dockerfile.api - image: jpo_cvmanager_api:latest - restart: always - networks: - internal: - extra_hosts: - ${WEBAPP_DOMAIN}: ${WEBAPP_HOST_IP} - ${KEYCLOAK_DOMAIN}: ${KC_HOST_IP} - ports: - - '8081:5000' - environment: - PG_DB_HOST: ${PG_DB_HOST} - PG_DB_USER: ${PG_DB_USER} - PG_DB_PASS: ${PG_DB_PASS} - PG_DB_NAME: postgres - INSTANCE_CONNECTION_NAME: ${INSTANCE_CONNECTION_NAME} - - MONGO_DB_URI: ${MONGO_DB_URI} - MONGO_DB_NAME: ${MONGO_DB_NAME} - - COUNTS_MSG_TYPES: ${COUNTS_MSG_TYPES} - COUNTS_DB_TYPE: ${COUNTS_DB_TYPE} - COUNTS_DB_NAME: ${COUNTS_DB_NAME} - GOOGLE_APPLICATION_CREDENTIALS: '/google/gcp_service_account.json' - - BSM_DB_NAME: ${BSM_DB_NAME} - SSM_DB_NAME: ${SSM_DB_NAME} - SRM_DB_NAME: ${SRM_DB_NAME} - - WZDX_API_KEY: ${WZDX_API_KEY} - WZDX_ENDPOINT: ${WZDX_ENDPOINT} - - CORS_DOMAIN: ${CORS_DOMAIN} - KEYCLOAK_ENDPOINT: http://${KEYCLOAK_DOMAIN}:8084/ - KEYCLOAK_REALM: ${KEYCLOAK_REALM} - KEYCLOAK_API_CLIENT_ID: ${KEYCLOAK_API_CLIENT_ID} - KEYCLOAK_API_CLIENT_SECRET_KEY: ${KEYCLOAK_API_CLIENT_SECRET_KEY} - - CSM_EMAIL_TO_SEND_FROM: ${CSM_EMAIL_TO_SEND_FROM} - CSM_EMAIL_APP_USERNAME: ${CSM_EMAIL_APP_USERNAME} - CSM_EMAIL_APP_PASSWORD: ${CSM_EMAIL_APP_PASSWORD} - CSM_EMAILS_TO_SEND_TO: ${CSM_EMAILS_TO_SEND_TO} - CSM_TARGET_SMTP_SERVER_ADDRESS: ${CSM_TARGET_SMTP_SERVER_ADDRESS} - CSM_TARGET_SMTP_SERVER_PORT: ${CSM_TARGET_SMTP_SERVER_PORT} - - TIMEZONE: ${TIMEZONE} - LOGGING_LEVEL: ${API_LOGGING_LEVEL} - volumes: - - ./resources/google:/google - logging: - options: - max-size: '10m' - max-file: '5' - - cvmanager_webapp: - build: - context: webapp - dockerfile: Dockerfile - args: - API_URI: http://${WEBAPP_DOMAIN}:8081 - MAPBOX_TOKEN: ${MAPBOX_TOKEN} - KEYCLOAK_HOST_URL: http://${KEYCLOAK_DOMAIN}:8084/ - COUNT_MESSAGE_TYPES: ${COUNTS_MSG_TYPES} - DOT_NAME: ${DOT_NAME} - MAPBOX_INIT_LATITUDE: ${MAPBOX_INIT_LATITUDE} - MAPBOX_INIT_LONGITUDE: ${MAPBOX_INIT_LONGITUDE} - MAPBOX_INIT_ZOOM: ${MAPBOX_INIT_ZOOM} - image: jpo_cvmanager_webapp:latest - restart: always - depends_on: - cvmanager_keycloak: - condition: service_healthy - networks: - internal: - extra_hosts: - ${WEBAPP_DOMAIN}: ${WEBAPP_HOST_IP} - ${KEYCLOAK_DOMAIN}: ${KC_HOST_IP} - ports: - - '80:80' - logging: - options: - max-size: '10m' - - cvmanager_postgres: - image: postgis/postgis:15-master - restart: always - ports: - - '5432:5432' - environment: - POSTGRES_USER: ${PG_DB_USER} - POSTGRES_PASSWORD: ${PG_DB_PASS} - volumes: - - pgdb:/var/lib/postgresql/data - - ./resources/sql_scripts:/docker-entrypoint-initdb.d - logging: - options: - max-size: '10m' - cvmanager_keycloak: - build: ./resources/keycloak - image: jpo_cvmanager_keycloak:21.1 - restart: always - depends_on: - - cvmanager_postgres - networks: - internal: - extra_hosts: - ${WEBAPP_DOMAIN}: ${WEBAPP_HOST_IP} - ${KEYCLOAK_DOMAIN}: ${KC_HOST_IP} - ports: - - '8084:8080' - environment: - KEYCLOAK_ADMIN: ${KEYCLOAK_ADMIN} - KEYCLOAK_ADMIN_PASSWORD: ${KEYCLOAK_ADMIN_PASSWORD} - WEBAPP_ORIGIN: http://${WEBAPP_DOMAIN} - KC_HEALTH_ENABLED: true - KC_DB: postgres - KC_DB_URL: jdbc:postgresql://${PG_DB_HOST}/postgres?currentSchema=keycloak - KC_DB_USERNAME: ${PG_DB_USER} - KC_DB_PASSWORD: ${PG_DB_PASS} - KC_HOSTNAME: ${KEYCLOAK_DOMAIN} - KEYCLOAK_API_CLIENT_SECRET_KEY: ${KEYCLOAK_API_CLIENT_SECRET_KEY} - GOOGLE_CLIENT_ID: ${GOOGLE_CLIENT_ID} - GOOGLE_CLIENT_SECRET: ${GOOGLE_CLIENT_SECRET} - command: - - start-dev - - --log-level=${KC_LOGGING_LEVEL} - - --import-realm - volumes: - - ./resources/keycloak/realm.json:/opt/keycloak/data/import/realm.json - - ./resources/keycloak/${KEYCLOAK_LOGIN_THEME_NAME}.jar:/opt/keycloak/providers/theme.jar - logging: - options: - max-size: '10m' +include: + - docker-compose.yml +services: # ADDONS: jpo_bsm_query: build: @@ -145,6 +11,7 @@ services: dockerfile: Dockerfile.bsm_query image: bsm_query:latest restart: always + env_file: - ./services/addons/images/bsm_query/.env logging: @@ -158,6 +25,7 @@ services: dockerfile: Dockerfile.count_metric image: count_metric:latest restart: always + env_file: - ./services/addons/images/count_metric/.env logging: @@ -171,10 +39,11 @@ services: dockerfile: Dockerfile.rsu_ping_fetch image: rsu_ping_fetch:latest restart: always + depends_on: - cvmanager_postgres env_file: - - ./services/addons/images/rsu_ping_fetch/.env + - ./services/addons/images/rsu_ping/.env logging: options: max-size: '10m' @@ -186,6 +55,7 @@ services: dockerfile: Dockerfile.iss_health_check image: iss_health_check:latest restart: always + depends_on: - cvmanager_postgres env_file: @@ -195,9 +65,31 @@ services: max-size: '10m' max-file: '5' -volumes: - pgdb: - driver: local + firmware_manager: + build: + context: services + dockerfile: Dockerfile.firmware_manager + image: jpo_firmware_manager:latest + restart: always + + ports: + - '8089:8080' + environment: + PG_DB_HOST: ${PG_DB_HOST} + PG_DB_NAME: postgres + PG_DB_USER: ${PG_DB_USER} + PG_DB_PASS: ${PG_DB_PASS} -networks: - internal: + BLOB_STORAGE_PROVIDER: ${BLOB_STORAGE_PROVIDER} + BLOB_STORAGE_BUCKET: ${BLOB_STORAGE_BUCKET} + + GCP_PROJECT: ${GCP_PROJECT} + GOOGLE_APPLICATION_CREDENTIALS: '/google/gcp_credentials.json' + + LOGGING_LEVEL: ${API_LOGGING_LEVEL} + volumes: + - ${GOOGLE_APPLICATION_CREDENTIALS}:/google/gcp_credentials.json + logging: + options: + max-size: '10m' + max-file: '5' diff --git a/docker-compose-webapp-deployment.yml b/docker-compose-webapp-deployment.yml new file mode 100644 index 000000000..c719ebe25 --- /dev/null +++ b/docker-compose-webapp-deployment.yml @@ -0,0 +1,23 @@ +# This file is used to build the webapp image for deployment. +# The COUNTS_MSG_TYPES and DOT_NAME variables must be set in .env before building to populate +# correctly in the deployed webapp as they are build-time variables. +version: '3' +services: + cvmanager_webapp: + build: + context: webapp + dockerfile: Dockerfile + args: + API_URI: ${WEBAPP_DOMAIN} # e.g. http://localhost + MAPBOX_TOKEN: ${MAPBOX_TOKEN} + KEYCLOAK_HOST_URL: ${KEYCLOAK_DOMAIN} # e.g. http://localhost + COUNT_MESSAGE_TYPES: ${COUNTS_MSG_TYPES} + DOT_NAME: ${DOT_NAME} + MAPBOX_INIT_LATITUDE: ${MAPBOX_INIT_LATITUDE} + MAPBOX_INIT_LONGITUDE: ${MAPBOX_INIT_LONGITUDE} + MAPBOX_INIT_ZOOM: ${MAPBOX_INIT_ZOOM} + image: jpo_cvmanager_webapp:latest + restart: always + logging: + options: + max-size: '10m' diff --git a/docker-compose.yml b/docker-compose.yml index 909f213b0..f28d210e5 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -6,8 +6,6 @@ services: dockerfile: Dockerfile.api image: jpo_cvmanager_api:latest restart: always - networks: - internal: extra_hosts: ${WEBAPP_DOMAIN}: ${WEBAPP_HOST_IP} ${KEYCLOAK_DOMAIN}: ${KC_HOST_IP} @@ -77,8 +75,6 @@ services: depends_on: cvmanager_keycloak: condition: service_healthy - networks: - internal: extra_hosts: ${WEBAPP_DOMAIN}: ${WEBAPP_HOST_IP} ${KEYCLOAK_DOMAIN}: ${KC_HOST_IP} @@ -113,8 +109,6 @@ services: restart: always depends_on: - cvmanager_postgres - networks: - internal: extra_hosts: ${WEBAPP_DOMAIN}: ${WEBAPP_HOST_IP} ${KEYCLOAK_DOMAIN}: ${KC_HOST_IP} @@ -142,37 +136,6 @@ services: options: max-size: '10m' - firmware_manager: - build: - context: services - dockerfile: Dockerfile.firmware_manager - image: jpo_firmware_manager:latest - restart: always - ports: - - '8089:8080' - environment: - PG_DB_HOST: ${PG_DB_HOST} - PG_DB_NAME: postgres - PG_DB_USER: ${PG_DB_USER} - PG_DB_PASS: ${PG_DB_PASS} - - BLOB_STORAGE_PROVIDER: ${BLOB_STORAGE_PROVIDER} - BLOB_STORAGE_BUCKET: ${BLOB_STORAGE_BUCKET} - - GCP_PROJECT: ${GCP_PROJECT} - GOOGLE_APPLICATION_CREDENTIALS: '/google/gcp_credentials.json' - - LOGGING_LEVEL: ${API_LOGGING_LEVEL} - volumes: - - ${GOOGLE_APPLICATION_CREDENTIALS}:/google/gcp_credentials.json - logging: - options: - max-size: '10m' - max-file: '5' - volumes: pgdb: driver: local - -networks: - internal: diff --git a/resources/kubernetes/README.md b/resources/kubernetes/README.md index 5ebf9e42d..ba96f15d4 100644 --- a/resources/kubernetes/README.md +++ b/resources/kubernetes/README.md @@ -1,20 +1,20 @@ -# Kubernetes Deployment Scripts - -The CV Manager supports being hosted within a Kubernetes cluster which allows for better stability, smoother deployments and performance scaling. The YAML files within this directory provide a starting point to incorporate the CV Manager into your own Helm deployments in any local or cloud based Kubernetes environment. - -## Requirements - -The webapp and API both utilize a K8s Ingress to handle external access to the applications. These Ingress enforce HTTPS and host a ManagedCertificate that require a domain name and SSL policy that must be created and handled outside of the K8s templates provided here. These would be created by the cloud service being utilized or on your own if the CV Manager is being run in a local K8s solution. - -The YAML files use GCP specific specifications for various values such as "networking.gke.io/managed-certificates". These values will not work on AWS and Azure but there should be equivalent fields that these specifications can be updated to if needing to deploy in another cloud environment. - -The environment variables must be set according to the README documentation for each application. The iss-health-check application only supports GCP. - -## Useful Links - -- [Learn about and get started with Kubernetes](https://kubernetes.io/docs/tutorials/kubernetes-basics/) -- [Use Helm to help with Kubernetes deployments](https://helm.sh/) -- Cloud Kubernetes Solutions - - [GCP Google Kubernetes Engine](https://cloud.google.com/kubernetes-engine) - - [AWS Elastic Kubernetes Service](https://aws.amazon.com/eks/) - - [Azure Kubernetes Service](https://azure.microsoft.com/en-us/products/kubernetes-service) +# Kubernetes Deployment Scripts + +The CV Manager supports being hosted within a Kubernetes cluster which allows for better stability, smoother deployments and performance scaling. The YAML files within this directory provide a starting point to incorporate the CV Manager into your own Helm deployments in any local or cloud based Kubernetes environment. + +## Requirements + +The webapp and API both utilize a K8s Ingress to handle external access to the applications. These Ingress enforce HTTPS and host a ManagedCertificate that require a domain name and SSL policy that must be created and handled outside of the K8s templates provided here. These would be created by the cloud service being utilized or on your own if the CV Manager is being run in a local K8s solution. + +The YAML files use GCP specific specifications for various values such as "networking.gke.io/managed-certificates". These values will not work on AWS and Azure but there should be equivalent fields that these specifications can be updated to if needing to deploy in another cloud environment. + +The environment variables must be set according to the README documentation for each application. The iss-health-check application only supports GCP. + +## Useful Links + +- [Learn about and get started with Kubernetes](https://kubernetes.io/docs/tutorials/kubernetes-basics/) +- [Use Helm to help with Kubernetes deployments](https://helm.sh/) +- Cloud Kubernetes Solutions + - [GCP Google Kubernetes Engine](https://cloud.google.com/kubernetes-engine) + - [AWS Elastic Kubernetes Service](https://aws.amazon.com/eks/) + - [Azure Kubernetes Service](https://azure.microsoft.com/en-us/products/kubernetes-service) diff --git a/resources/kubernetes/cv-manager-api.yaml b/resources/kubernetes/cv-manager-api.yaml index ea318df8a..922666dfb 100644 --- a/resources/kubernetes/cv-manager-api.yaml +++ b/resources/kubernetes/cv-manager-api.yaml @@ -1,181 +1,181 @@ -# K8s ManagedCertificate for using SSL/TLS with a domain -# Domain and DNS forwarding to the Ingress endpoint must be configured -apiVersion: networking.gke.io/v1 -kind: ManagedCertificate -metadata: - name: cv-manager-api-managed-cert -spec: - domains: - - your-api.domain.com ---- -# K8s FrontendConfig for applying SSL certificate to Ingress -# Requires 'cv-manager-api-ssl-policy' SSL policy to exist -apiVersion: networking.gke.io/v1beta1 -kind: FrontendConfig -metadata: - name: cv-manager-api-frontend - labels: - app: cv-manager-api -spec: - redirectToHttps: - enabled: true - sslPolicy: cv-manager-api-ssl-policy ---- -# NodePort to expose CV Manager web application -apiVersion: v1 -kind: Service -metadata: - labels: - app: cv-manager-api - name: cv-manager-api-service-internal -spec: - ports: - - port: 80 - protocol: TCP - targetPort: 5000 - selector: - app: cv-manager-api - type: NodePort ---- -# External HTTP/HTTPS Ingress to internal NodePort -# Requires 'cv-manager-api-ip' as a global static external IP to be reserved -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: cv-manager-api-ingress - labels: - app: cv-manager-api - annotations: - kubernetes.io/ingress.regional-static-ip-name: "cv-manager-api-ip" - networking.gke.io/managed-certificates: "cv-manager-api-managed-cert" - networking.gke.io/v1beta1.FrontendConfig: "cv-manager-api-frontend" -spec: - defaultBackend: - service: - name: cv-manager-api-service-internal - port: - number: 80 ---- -# Limits the number of pods that are down simultaneously -apiVersion: policy/v1 -kind: PodDisruptionBudget -metadata: - name: cv-manager-api-pdb -spec: - selector: - matchLabels: - app: cv-manager-api - maxUnavailable: 1 ---- -apiVersion: 'apps/v1' -kind: 'Deployment' -metadata: - name: cv-manager-api -spec: - replicas: 1 - selector: - matchLabels: - app: cv-manager-api - template: - metadata: - labels: - app: cv-manager-api - spec: - containers: - - name: cv-manager-api - imagePullPolicy: Always - # Replace image with an actual built jpo-cvmanager API image from an image repository - image: jpoode-cvmanager-api:tag - resources: - requests: - memory: '4Gi' - cpu: '1' - ephemeral-storage: '4Gi' - limits: - memory: '4Gi' - cpu: '1' - ephemeral-storage: '4Gi' - tty: true - stdin: true - ports: - - containerPort: 5000 - env: - # Fill out the ENV vars with your own values - - name: CORS_DOMAIN - value: '' - - name: GOOGLE_APPLICATION_CREDENTIALS - value: '' - - name: GOOGLE_CLIENT_ID - value: "" - - name: KEYCLOAK_ENDPOINT - value: "" - - name: KEYCLOAK_REALM - value: "" - - name: KEYCLOAK_API_CLIENT_ID - value: "" - - name: KEYCLOAK_API_CLIENT_SECRET_KEY - valueFrom: - secretKeyRef: - name: some-keycloak-secret-name - key: some-keycloak-secret-key - - name: PG_DB_HOST - value: "" - - name: PG_DB_NAME - value: "" - - name: PG_DB_USER - valueFrom: - secretKeyRef: - name: some-postgres-secret-user - key: some-postgres-secret-key - - name: PG_DB_PASS - valueFrom: - secretKeyRef: - name: some-postgres-secret-password - key: some-postgres-secret-key - - name: COUNTS_DB_TYPE - value: "" - - name: COUNTS_MSG_TYPES - value: "" - - name: COUNTS_DB_NAME - value: "" - - name: BSM_DB_NAME - value: "" - - name: SSM_DB_NAME - value: "" - - name: SRM_DB_NAME - value: "" - - name: WZDX_ENDPOINT - value: "" - - name: WZDX_API_KEY - value: "" - - name: CSM_EMAIL_TO_SEND_FROM - value: "" - - name: CSM_EMAIL_APP_USERNAME - valueFrom: - secretKeyRef: - name: some_email_secret_name - key: some_email_secret_key - - name: CSM_EMAIL_APP_PASSWORD - valueFrom: - secretKeyRef: - name: some_email_secret_password - key: some_email_secret_key - - name: CSM_EMAILS_TO_SEND_TO - value: "" - - name: CSM_TARGET_SMTP_SERVER_ADDRESS - value: "" - - name: CSM_TARGET_SMTP_SERVER_PORT - value: "" - - name: TIMEZONE - value: "" - - name: LOGGING_LEVEL - value: "" - volumeMounts: - - name: cv-manager-service-key - mountPath: /home/secret - tty: true - stdin: true - volumes: - - name: some-service-key - secret: - secretName: some-secret-name +# K8s ManagedCertificate for using SSL/TLS with a domain +# Domain and DNS forwarding to the Ingress endpoint must be configured +apiVersion: networking.gke.io/v1 +kind: ManagedCertificate +metadata: + name: cv-manager-api-managed-cert +spec: + domains: + - your-api.domain.com +--- +# K8s FrontendConfig for applying SSL certificate to Ingress +# Requires 'cv-manager-api-ssl-policy' SSL policy to exist +apiVersion: networking.gke.io/v1beta1 +kind: FrontendConfig +metadata: + name: cv-manager-api-frontend + labels: + app: cv-manager-api +spec: + redirectToHttps: + enabled: true + sslPolicy: cv-manager-api-ssl-policy +--- +# NodePort to expose CV Manager web application +apiVersion: v1 +kind: Service +metadata: + labels: + app: cv-manager-api + name: cv-manager-api-service-internal +spec: + ports: + - port: 80 + protocol: TCP + targetPort: 5000 + selector: + app: cv-manager-api + type: NodePort +--- +# External HTTP/HTTPS Ingress to internal NodePort +# Requires 'cv-manager-api-ip' as a global static external IP to be reserved +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: cv-manager-api-ingress + labels: + app: cv-manager-api + annotations: + kubernetes.io/ingress.regional-static-ip-name: "cv-manager-api-ip" + networking.gke.io/managed-certificates: "cv-manager-api-managed-cert" + networking.gke.io/v1beta1.FrontendConfig: "cv-manager-api-frontend" +spec: + defaultBackend: + service: + name: cv-manager-api-service-internal + port: + number: 80 +--- +# Limits the number of pods that are down simultaneously +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: cv-manager-api-pdb +spec: + selector: + matchLabels: + app: cv-manager-api + maxUnavailable: 1 +--- +apiVersion: 'apps/v1' +kind: 'Deployment' +metadata: + name: cv-manager-api +spec: + replicas: 1 + selector: + matchLabels: + app: cv-manager-api + template: + metadata: + labels: + app: cv-manager-api + spec: + containers: + - name: cv-manager-api + imagePullPolicy: Always + # Replace image with an actual built jpo-cvmanager API image from an image repository + image: jpoode-cvmanager-api:tag + resources: + requests: + memory: '4Gi' + cpu: '1' + ephemeral-storage: '4Gi' + limits: + memory: '4Gi' + cpu: '1' + ephemeral-storage: '4Gi' + tty: true + stdin: true + ports: + - containerPort: 5000 + env: + # Fill out the ENV vars with your own values + - name: CORS_DOMAIN + value: '' + - name: GOOGLE_APPLICATION_CREDENTIALS + value: '' + - name: GOOGLE_CLIENT_ID + value: "" + - name: KEYCLOAK_ENDPOINT + value: "" + - name: KEYCLOAK_REALM + value: "" + - name: KEYCLOAK_API_CLIENT_ID + value: "" + - name: KEYCLOAK_API_CLIENT_SECRET_KEY + valueFrom: + secretKeyRef: + name: some-keycloak-secret-name + key: some-keycloak-secret-key + - name: PG_DB_HOST + value: "" + - name: PG_DB_NAME + value: "" + - name: PG_DB_USER + valueFrom: + secretKeyRef: + name: some-postgres-secret-user + key: some-postgres-secret-key + - name: PG_DB_PASS + valueFrom: + secretKeyRef: + name: some-postgres-secret-password + key: some-postgres-secret-key + - name: COUNTS_DB_TYPE + value: "" + - name: COUNTS_MSG_TYPES + value: "" + - name: COUNTS_DB_NAME + value: "" + - name: BSM_DB_NAME + value: "" + - name: SSM_DB_NAME + value: "" + - name: SRM_DB_NAME + value: "" + - name: WZDX_ENDPOINT + value: "" + - name: WZDX_API_KEY + value: "" + - name: CSM_EMAIL_TO_SEND_FROM + value: "" + - name: CSM_EMAIL_APP_USERNAME + valueFrom: + secretKeyRef: + name: some_email_secret_name + key: some_email_secret_key + - name: CSM_EMAIL_APP_PASSWORD + valueFrom: + secretKeyRef: + name: some_email_secret_password + key: some_email_secret_key + - name: CSM_EMAILS_TO_SEND_TO + value: "" + - name: CSM_TARGET_SMTP_SERVER_ADDRESS + value: "" + - name: CSM_TARGET_SMTP_SERVER_PORT + value: "" + - name: TIMEZONE + value: "" + - name: LOGGING_LEVEL + value: "" + volumeMounts: + - name: cv-manager-service-key + mountPath: /home/secret + tty: true + stdin: true + volumes: + - name: some-service-key + secret: + secretName: some-secret-name diff --git a/resources/kubernetes/cv-manager-postgres.yaml b/resources/kubernetes/cv-manager-postgres.yaml index 11cf2c877..255ce6352 100644 --- a/resources/kubernetes/cv-manager-postgres.yaml +++ b/resources/kubernetes/cv-manager-postgres.yaml @@ -74,6 +74,9 @@ spec: - name: cv-manager-postgres-volume persistentVolumeClaim: claimName: cv-manager-postgres-claim + - name: cv-manager-init-tables + configMap: + name: pg-init-tables containers: - name: 'cv-manager-postgis' imagePullPolicy: Always @@ -101,3 +104,360 @@ spec: volumeMounts: - name: cv-manager-postgres-volume mountPath: /var/lib/postgresql/data + - name: cv-manager-init-tables + mountPath: /docker-entrypoint-initdb.d +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: pg-init-tables +data: + create-tables.sql: |- + CREATE EXTENSION IF NOT EXISTS postgis; + + CREATE SEQUENCE public.manufacturers_manufacturer_id_seq + INCREMENT 1 + START 1 + MINVALUE 1 + MAXVALUE 2147483647 + CACHE 1; + + CREATE TABLE IF NOT EXISTS public.manufacturers + ( + manufacturer_id integer NOT NULL DEFAULT nextval('manufacturers_manufacturer_id_seq'::regclass), + name character varying(128) COLLATE pg_catalog.default NOT NULL, + CONSTRAINT manufacturers_pkey PRIMARY KEY (manufacturer_id), + CONSTRAINT manufacturers_name UNIQUE (name) + ); + + CREATE SEQUENCE public.rsu_models_rsu_model_id_seq + INCREMENT 1 + START 1 + MINVALUE 1 + MAXVALUE 2147483647 + CACHE 1; + + CREATE TABLE IF NOT EXISTS public.rsu_models + ( + rsu_model_id integer NOT NULL DEFAULT nextval('rsu_models_rsu_model_id_seq'::regclass), + name character varying(128) COLLATE pg_catalog.default NOT NULL, + supported_radio character varying(128) COLLATE pg_catalog.default NOT NULL, + manufacturer integer NOT NULL, + CONSTRAINT rsu_models_pkey PRIMARY KEY (rsu_model_id), + CONSTRAINT rsu_models_name UNIQUE (name), + CONSTRAINT fk_manufacturer FOREIGN KEY (manufacturer) + REFERENCES public.manufacturers (manufacturer_id) MATCH SIMPLE + ON UPDATE NO ACTION + ON DELETE NO ACTION + ); + + CREATE SEQUENCE public.firmware_images_firmware_id_seq + INCREMENT 1 + START 1 + MINVALUE 1 + MAXVALUE 2147483647 + CACHE 1; + + CREATE TABLE IF NOT EXISTS public.firmware_images + ( + firmware_id integer NOT NULL DEFAULT nextval('firmware_images_firmware_id_seq'::regclass), + name character varying(128) COLLATE pg_catalog.default NOT NULL, + model integer NOT NULL, + install_package character varying(128) COLLATE pg_catalog.default NOT NULL, + version character varying(128) COLLATE pg_catalog.default NOT NULL, + CONSTRAINT firmware_images_pkey PRIMARY KEY (firmware_id), + CONSTRAINT firmware_images_name UNIQUE (name), + CONSTRAINT firmware_images_install_package UNIQUE (install_package), + CONSTRAINT firmware_images_version UNIQUE (version), + CONSTRAINT fk_model FOREIGN KEY (model) + REFERENCES public.rsu_models (rsu_model_id) MATCH SIMPLE + ON UPDATE NO ACTION + ON DELETE NO ACTION + ); + + CREATE SEQUENCE public.firmware_upgrade_rules_firmware_upgrade_rule_id_seq + INCREMENT 1 + START 1 + MINVALUE 1 + MAXVALUE 2147483647 + CACHE 1; + + CREATE TABLE IF NOT EXISTS public.firmware_upgrade_rules + ( + firmware_upgrade_rule_id integer NOT NULL DEFAULT nextval('firmware_upgrade_rules_firmware_upgrade_rule_id_seq'::regclass), + from_id integer NOT NULL, + to_id integer NOT NULL, + CONSTRAINT firmware_upgrade_rules_pkey PRIMARY KEY (firmware_upgrade_rule_id), + CONSTRAINT fk_from_id FOREIGN KEY (from_id) + REFERENCES public.firmware_images (firmware_id) MATCH SIMPLE + ON UPDATE NO ACTION + ON DELETE NO ACTION, + CONSTRAINT fk_to_id FOREIGN KEY (to_id) + REFERENCES public.firmware_images (firmware_id) MATCH SIMPLE + ON UPDATE NO ACTION + ON DELETE NO ACTION + ); + + CREATE SEQUENCE public.rsu_credentials_credential_id_seq + INCREMENT 1 + START 1 + MINVALUE 1 + MAXVALUE 2147483647 + CACHE 1; + + CREATE TABLE IF NOT EXISTS public.rsu_credentials + ( + credential_id integer NOT NULL DEFAULT nextval('rsu_credentials_credential_id_seq'::regclass), + username character varying(128) COLLATE pg_catalog.default NOT NULL, + password character varying(128) COLLATE pg_catalog.default NOT NULL, + nickname character varying(128) COLLATE pg_catalog.default NOT NULL, + CONSTRAINT rsu_credentials_pkey PRIMARY KEY (credential_id), + CONSTRAINT rsu_credentials_nickname UNIQUE (nickname) + ); + + CREATE SEQUENCE public.snmp_credentials_snmp_credential_id_seq + INCREMENT 1 + START 1 + MINVALUE 1 + MAXVALUE 2147483647 + CACHE 1; + + CREATE TABLE IF NOT EXISTS public.snmp_credentials + ( + snmp_credential_id integer NOT NULL DEFAULT nextval('snmp_credentials_snmp_credential_id_seq'::regclass), + username character varying(128) COLLATE pg_catalog.default NOT NULL, + password character varying(128) COLLATE pg_catalog.default NOT NULL, + nickname character varying(128) COLLATE pg_catalog.default NOT NULL, + CONSTRAINT snmp_credentials_pkey PRIMARY KEY (snmp_credential_id), + CONSTRAINT snmp_credentials_nickname UNIQUE (nickname) + ); + + CREATE SEQUENCE public.snmp_versions_snmp_version_id_seq + INCREMENT 1 + START 1 + MINVALUE 1 + MAXVALUE 2147483647 + CACHE 1; + + CREATE TABLE IF NOT EXISTS public.snmp_versions + ( + snmp_version_id integer NOT NULL DEFAULT nextval('snmp_versions_snmp_version_id_seq'::regclass), + version_code character varying(128) COLLATE pg_catalog.default NOT NULL, + nickname character varying(128) COLLATE pg_catalog.default NOT NULL, + CONSTRAINT snmp_versions_pkey PRIMARY KEY (snmp_version_id), + CONSTRAINT snmp_versions_nickname UNIQUE (nickname) + ); + + CREATE SEQUENCE public.rsus_rsu_id_seq + INCREMENT 1 + START 1 + MINVALUE 1 + MAXVALUE 2147483647 + CACHE 1; + + CREATE TABLE IF NOT EXISTS public.rsus + ( + rsu_id integer NOT NULL DEFAULT nextval('rsus_rsu_id_seq'::regclass), + geography geography NOT NULL, + milepost double precision NOT NULL, + ipv4_address inet NOT NULL, + serial_number character varying(128) COLLATE pg_catalog.default NOT NULL, + iss_scms_id character varying(128) COLLATE pg_catalog.default NOT NULL, + primary_route character varying(128) COLLATE pg_catalog.default NOT NULL, + model integer NOT NULL, + credential_id integer NOT NULL, + snmp_credential_id integer NOT NULL, + snmp_version_id integer NOT NULL, + firmware_version integer, + target_firmware_version integer, + CONSTRAINT rsu_pkey PRIMARY KEY (rsu_id), + CONSTRAINT rsu_ipv4_address UNIQUE (ipv4_address), + CONSTRAINT rsu_milepost_primary_route UNIQUE (milepost, primary_route), + CONSTRAINT rsu_serial_number UNIQUE (serial_number), + CONSTRAINT rsu_iss_scms_id UNIQUE (iss_scms_id), + CONSTRAINT fk_model FOREIGN KEY (model) + REFERENCES public.rsu_models (rsu_model_id) MATCH SIMPLE + ON UPDATE NO ACTION + ON DELETE NO ACTION, + CONSTRAINT fk_credential_id FOREIGN KEY (credential_id) + REFERENCES public.rsu_credentials (credential_id) MATCH SIMPLE + ON UPDATE NO ACTION + ON DELETE NO ACTION, + CONSTRAINT fk_snmp_credential_id FOREIGN KEY (snmp_credential_id) + REFERENCES public.snmp_credentials (snmp_credential_id) MATCH SIMPLE + ON UPDATE NO ACTION + ON DELETE NO ACTION, + CONSTRAINT fk_snmp_version_id FOREIGN KEY (snmp_version_id) + REFERENCES public.snmp_versions (snmp_version_id) MATCH SIMPLE + ON UPDATE NO ACTION + ON DELETE NO ACTION, + CONSTRAINT fk_firmware_version FOREIGN KEY (firmware_version) + REFERENCES public.firmware_images (firmware_id) MATCH SIMPLE + ON UPDATE NO ACTION + ON DELETE NO ACTION, + CONSTRAINT fk_target_firmware_version FOREIGN KEY (target_firmware_version) + REFERENCES public.firmware_images (firmware_id) MATCH SIMPLE + ON UPDATE NO ACTION + ON DELETE NO ACTION + ); + + CREATE SEQUENCE public.ping_ping_id_seq + INCREMENT 1 + START 1 + MINVALUE 1 + MAXVALUE 2147483647 + CACHE 1; + + CREATE TABLE IF NOT EXISTS public.ping + ( + ping_id integer NOT NULL DEFAULT nextval('ping_ping_id_seq'::regclass), + timestamp timestamp without time zone NOT NULL, + result bit(1) NOT NULL, + rsu_id integer NOT NULL, + CONSTRAINT ping_pkey PRIMARY KEY (ping_id), + CONSTRAINT fk_rsu_id FOREIGN KEY (rsu_id) + REFERENCES public.rsus (rsu_id) MATCH SIMPLE + ON UPDATE NO ACTION + ON DELETE NO ACTION + ); + + CREATE SEQUENCE public.roles_role_id_seq + INCREMENT 1 + START 1 + MINVALUE 1 + MAXVALUE 2147483647 + CACHE 1; + + CREATE TABLE IF NOT EXISTS public.roles + ( + role_id integer NOT NULL DEFAULT nextval('roles_role_id_seq'::regclass), + name character varying(128) COLLATE pg_catalog.default NOT NULL, + CONSTRAINT roles_pkey PRIMARY KEY (role_id), + CONSTRAINT roles_name UNIQUE (name) + ); + + CREATE SEQUENCE public.users_user_id_seq + INCREMENT 1 + START 1 + MINVALUE 1 + MAXVALUE 2147483647 + CACHE 1; + + CREATE TABLE IF NOT EXISTS public.users + ( + user_id integer NOT NULL DEFAULT nextval('users_user_id_seq'::regclass), + email character varying(128) COLLATE pg_catalog.default NOT NULL, + first_name character varying(128) NOT NULL, + last_name character varying(128) NOT NULL, + super_user bit(1) NOT NULL, + receive_error_emails bit(1) NOT NULL, + CONSTRAINT users_pkey PRIMARY KEY (user_id), + CONSTRAINT users_email UNIQUE (email) + ); + + CREATE SEQUENCE public.organizations_organization_id_seq + INCREMENT 1 + START 1 + MINVALUE 1 + MAXVALUE 2147483647 + CACHE 1; + + CREATE TABLE IF NOT EXISTS public.organizations + ( + organization_id integer NOT NULL DEFAULT nextval('organizations_organization_id_seq'::regclass), + name character varying(128) COLLATE pg_catalog.default NOT NULL, + CONSTRAINT organizations_pkey PRIMARY KEY (organization_id), + CONSTRAINT organizations_name UNIQUE (name) + ); + + CREATE SEQUENCE public.user_organization_user_organization_id_seq + INCREMENT 1 + START 1 + MINVALUE 1 + MAXVALUE 2147483647 + CACHE 1; + + CREATE TABLE IF NOT EXISTS public.user_organization + ( + user_organization_id integer NOT NULL DEFAULT nextval('user_organization_user_organization_id_seq'::regclass), + user_id integer NOT NULL, + organization_id integer NOT NULL, + role_id integer NOT NULL, + CONSTRAINT user_organization_pkey PRIMARY KEY (user_organization_id), + CONSTRAINT fk_user_id FOREIGN KEY (user_id) + REFERENCES public.users (user_id) MATCH SIMPLE + ON UPDATE NO ACTION + ON DELETE NO ACTION, + CONSTRAINT fk_organization_id FOREIGN KEY (organization_id) + REFERENCES public.organizations (organization_id) MATCH SIMPLE + ON UPDATE NO ACTION + ON DELETE NO ACTION, + CONSTRAINT fk_role_id FOREIGN KEY (role_id) + REFERENCES public.roles (role_id) MATCH SIMPLE + ON UPDATE NO ACTION + ON DELETE NO ACTION + ); + + CREATE SEQUENCE public.rsu_organization_rsu_organization_id_seq + INCREMENT 1 + START 1 + MINVALUE 1 + MAXVALUE 2147483647 + CACHE 1; + + CREATE TABLE IF NOT EXISTS public.rsu_organization + ( + rsu_organization_id integer NOT NULL DEFAULT nextval('rsu_organization_rsu_organization_id_seq'::regclass), + rsu_id integer NOT NULL, + organization_id integer NOT NULL, + CONSTRAINT rsu_organization_pkey PRIMARY KEY (rsu_organization_id), + CONSTRAINT fk_rsu_id FOREIGN KEY (rsu_id) + REFERENCES public.rsus (rsu_id) MATCH SIMPLE + ON UPDATE NO ACTION + ON DELETE NO ACTION, + CONSTRAINT fk_organization_id FOREIGN KEY (organization_id) + REFERENCES public.organizations (organization_id) MATCH SIMPLE + ON UPDATE NO ACTION + ON DELETE NO ACTION + ); + + CREATE TABLE IF NOT EXISTS public.map_info + ( + ipv4_address inet NOT NULL, + geojson json NOT NULL, + date character varying(64) COLLATE pg_catalog.default, + CONSTRAINT map_info_pkey PRIMARY KEY (ipv4_address), + CONSTRAINT fk_ipv4_address FOREIGN KEY (ipv4_address) + REFERENCES public.rsus (ipv4_address) MATCH SIMPLE + ON UPDATE NO ACTION + ON DELETE NO ACTION + ); + + CREATE VIEW public.rsu_organization_name AS + SELECT ro.rsu_id, org.name + FROM public.rsu_organization AS ro + JOIN public.organizations AS org ON ro.organization_id = org.organization_id; + + -- Create scms_health table + CREATE SEQUENCE public.scms_health_scms_health_id_seq + INCREMENT 1 + START 1 + MINVALUE 1 + MAXVALUE 2147483647 + CACHE 1; + + CREATE TABLE IF NOT EXISTS public.scms_health + ( + scms_health_id integer NOT NULL DEFAULT nextval('scms_health_scms_health_id_seq'::regclass), + timestamp timestamp without time zone NOT NULL, + health bit(1) NOT NULL, + expiration timestamp without time zone, + rsu_id integer NOT NULL, + CONSTRAINT scms_health_pkey PRIMARY KEY (scms_health_id), + CONSTRAINT fk_rsu_id FOREIGN KEY (rsu_id) + REFERENCES public.rsus (rsu_id) MATCH SIMPLE + ON UPDATE NO ACTION + ON DELETE NO ACTION + ); + + CREATE SCHEMA IF NOT EXISTS keycloak; diff --git a/resources/kubernetes/cv-manager-webapp.yaml b/resources/kubernetes/cv-manager-webapp.yaml index 4b15b0c87..51e6d4c6d 100644 --- a/resources/kubernetes/cv-manager-webapp.yaml +++ b/resources/kubernetes/cv-manager-webapp.yaml @@ -1,101 +1,101 @@ -# K8s ManagedCertificate for using SSL/TLS with a domain -# Domain and DNS forwarding to the Ingress endpoint must be configured -apiVersion: networking.gke.io/v1 -kind: ManagedCertificate -metadata: - name: cv-manager-webapp-managed-cert -spec: - domains: - - your.domain.com ---- -# K8s FrontendConfig for applying SSL certificate to Ingress -# Requires 'cv-manager-webapp-ssl-policy' SSL policy to exist -apiVersion: networking.gke.io/v1beta1 -kind: FrontendConfig -metadata: - name: cv-manager-webapp-frontend - labels: - app: cv-manager-webapp -spec: - redirectToHttps: - enabled: true - sslPolicy: cv-manager-webapp-ssl-policy ---- -# NodePort to expose CV Manager web application -apiVersion: v1 -kind: Service -metadata: - labels: - app: cv-manager-webapp - name: cv-manager-webapp-service-internal -spec: - ports: - - port: 80 - protocol: TCP - targetPort: 80 - selector: - app: cv-manager-webapp - type: NodePort ---- -# External HTTP/HTTPS Ingress to internal NodePort -# Requires 'cv-manager-ip' as a global static external IP to be reserved -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: cv-manager-webapp-ingress - labels: - app: cv-manager-webapp - annotations: - kubernetes.io/ingress.regional-static-ip-name: 'cv-manager-ip' - networking.gke.io/managed-certificates: 'cv-manager-webapp-managed-cert' - networking.gke.io/v1beta1.FrontendConfig: 'cv-manager-webapp-frontend' -spec: - defaultBackend: - service: - name: cv-manager-webapp-service-internal - port: - number: 80 ---- -# Limits the number of pods that are down simultaneously -apiVersion: policy/v1 -kind: PodDisruptionBudget -metadata: - name: cv-manager-webapp-pdb -spec: - selector: - matchLabels: - app: cv-manager-webapp - maxUnavailable: 1 ---- -apiVersion: 'apps/v1' -kind: 'Deployment' -metadata: - name: cv-manager-webapp -spec: - replicas: 1 - selector: - matchLabels: - app: cv-manager-webapp - template: - metadata: - labels: - app: cv-manager-webapp - spec: - containers: - - name: cv-manager-webapp - imagePullPolicy: Always - # Replace image with an actual built jpo-cvmanager webapp image from an image repository - image: jpoode-cvmanager-webapp:tag - resources: - requests: - memory: '4Gi' - cpu: '2' - ephemeral-storage: '4Gi' - limits: - memory: '4Gi' - cpu: '2' - ephemeral-storage: '4Gi' - tty: true - stdin: true - ports: - - containerPort: 80 +# K8s ManagedCertificate for using SSL/TLS with a domain +# Domain and DNS forwarding to the Ingress endpoint must be configured +apiVersion: networking.gke.io/v1 +kind: ManagedCertificate +metadata: + name: cv-manager-webapp-managed-cert +spec: + domains: + - your.domain.com +--- +# K8s FrontendConfig for applying SSL certificate to Ingress +# Requires 'cv-manager-webapp-ssl-policy' SSL policy to exist +apiVersion: networking.gke.io/v1beta1 +kind: FrontendConfig +metadata: + name: cv-manager-webapp-frontend + labels: + app: cv-manager-webapp +spec: + redirectToHttps: + enabled: true + sslPolicy: cv-manager-webapp-ssl-policy +--- +# NodePort to expose CV Manager web application +apiVersion: v1 +kind: Service +metadata: + labels: + app: cv-manager-webapp + name: cv-manager-webapp-service-internal +spec: + ports: + - port: 80 + protocol: TCP + targetPort: 80 + selector: + app: cv-manager-webapp + type: NodePort +--- +# External HTTP/HTTPS Ingress to internal NodePort +# Requires 'cv-manager-ip' as a global static external IP to be reserved +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: cv-manager-webapp-ingress + labels: + app: cv-manager-webapp + annotations: + kubernetes.io/ingress.regional-static-ip-name: 'cv-manager-ip' + networking.gke.io/managed-certificates: 'cv-manager-webapp-managed-cert' + networking.gke.io/v1beta1.FrontendConfig: 'cv-manager-webapp-frontend' +spec: + defaultBackend: + service: + name: cv-manager-webapp-service-internal + port: + number: 80 +--- +# Limits the number of pods that are down simultaneously +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: cv-manager-webapp-pdb +spec: + selector: + matchLabels: + app: cv-manager-webapp + maxUnavailable: 1 +--- +apiVersion: 'apps/v1' +kind: 'Deployment' +metadata: + name: cv-manager-webapp +spec: + replicas: 1 + selector: + matchLabels: + app: cv-manager-webapp + template: + metadata: + labels: + app: cv-manager-webapp + spec: + containers: + - name: cv-manager-webapp + imagePullPolicy: Always + # Replace image with an actual built jpo-cvmanager webapp image from an image repository + image: jpoode-cvmanager-webapp:tag + resources: + requests: + memory: '4Gi' + cpu: '2' + ephemeral-storage: '4Gi' + limits: + memory: '4Gi' + cpu: '2' + ephemeral-storage: '4Gi' + tty: true + stdin: true + ports: + - containerPort: 80 diff --git a/resources/kubernetes/firmware-manager.yaml b/resources/kubernetes/firmware-manager.yaml new file mode 100644 index 000000000..9cad8682e --- /dev/null +++ b/resources/kubernetes/firmware-manager.yaml @@ -0,0 +1,93 @@ +apiVersion: v1 +kind: Service +metadata: + name: firmware-manager-svc + labels: + app: firmware-manager +spec: + type: LoadBalancer + ports: + - name: firmware-manager-api + port: 80 + protocol: TCP + targetPort: 8080 + selector: + app: firmware-manager +--- +# Limits the number of pods that are down simultaneously +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: firmware-manager-pdb +spec: + selector: + matchLabels: + app: firmware-manager + maxUnavailable: 0 +--- +apiVersion: 'apps/v1' +kind: 'Deployment' +metadata: + name: firmware-manager +spec: + replicas: 1 + selector: + matchLabels: + app: firmware-manager + template: + metadata: + labels: + app: firmware-manager + spec: + containers: + - name: firmware-manager + imagePullPolicy: Always + image: firmware-manager-image:tag + securityContext: + capabilities: + add: ["NET_RAW"] + resources: + requests: + memory: '4Gi' + cpu: '2' + limits: + memory: '4Gi' + cpu: '2' + tty: true + stdin: true + ports: + - containerPort: 8080 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: "/home/secret/cv_credentials.json" + - name: GCP_PROJECT + value: "" + - name: PG_DB_HOST + value: "" + - name: PG_DB_NAME + value: "" + - name: PG_DB_USER + valueFrom: + secretKeyRef: + name: some-postgres-secret-user + key: some-postgres-secret-key + - name: PG_DB_PASS + valueFrom: + secretKeyRef: + name: some-postgres-secret-password + key: some-postgres-secret-key + - name: BLOB_STORAGE_PROVIDER + value: "" + - name: BLOB_STORAGE_BUCKET + value: "" + - name: LOGGING_LEVEL + value: "INFO" + volumeMounts: + - name: cv-manager-service-key + mountPath: /home/secret + tty: true + stdin: true + volumes: + - name: cv-manager-service-key + secret: + secretName: cv-manager-service-key diff --git a/resources/kubernetes/iss-health-check.yaml b/resources/kubernetes/iss-health-check.yaml index 8d2f45197..24b842c99 100644 --- a/resources/kubernetes/iss-health-check.yaml +++ b/resources/kubernetes/iss-health-check.yaml @@ -1,62 +1,62 @@ -# This deployment is only usable in a GCP environment due to the GCP Secret Manager dependency -apiVersion: 'apps/v1' -kind: 'Deployment' -metadata: - name: 'iss-health-check' - labels: - app: 'iss-health-check' -spec: - replicas: 1 - selector: - matchLabels: - app: 'iss-health-check' - template: - metadata: - labels: - app: 'iss-health-check' - spec: - containers: - - name: 'iss-health-check' - imagePullPolicy: Always - # Replace image with an actual built iss-health-check image from an image repository - image: 'iss-health-check-image' - resources: - requests: - memory: '1Gi' - cpu: '0.5' - ports: - - containerPort: 8080 - env: - - name: GOOGLE_APPLICATION_CREDENTIALS - value: '/home/secret/cv_credentials.json' - - name: PROJECT_ID - value: '' - - name: ISS_API_KEY - value: '' - - name: ISS_API_KEY_NAME - value: '' - - name: ISS_PROJECT_ID - value: '' - - name: ISS_SCMS_TOKEN_REST_ENDPOINT - value: '' - - name: ISS_SCMS_VEHICLE_REST_ENDPOINT - value: '' - - name: DB_USER - value: '' - - name: DB_PASS - value: '' - - name: DB_NAME - value: '' - - name: DB_HOST - value: '' - - name: LOGGING_LEVEL - value: 'INFO' - volumeMounts: - - name: cv-manager-service-key - mountPath: /home/secret - tty: true - stdin: true - volumes: - - name: cv-manager-service-key - secret: - secretName: cv-manager-service-key +# This deployment is only usable in a GCP environment due to the GCP Secret Manager dependency +apiVersion: 'apps/v1' +kind: 'Deployment' +metadata: + name: 'iss-health-check' + labels: + app: 'iss-health-check' +spec: + replicas: 1 + selector: + matchLabels: + app: 'iss-health-check' + template: + metadata: + labels: + app: 'iss-health-check' + spec: + containers: + - name: 'iss-health-check' + imagePullPolicy: Always + # Replace image with an actual built iss-health-check image from an image repository + image: 'iss-health-check-image' + resources: + requests: + memory: '1Gi' + cpu: '0.5' + ports: + - containerPort: 8080 + env: + - name: GOOGLE_APPLICATION_CREDENTIALS + value: '/home/secret/cv_credentials.json' + - name: PROJECT_ID + value: '' + - name: ISS_API_KEY + value: '' + - name: ISS_API_KEY_NAME + value: '' + - name: ISS_PROJECT_ID + value: '' + - name: ISS_SCMS_TOKEN_REST_ENDPOINT + value: '' + - name: ISS_SCMS_VEHICLE_REST_ENDPOINT + value: '' + - name: DB_USER + value: '' + - name: DB_PASS + value: '' + - name: DB_NAME + value: '' + - name: DB_HOST + value: '' + - name: LOGGING_LEVEL + value: 'INFO' + volumeMounts: + - name: cv-manager-service-key + mountPath: /home/secret + tty: true + stdin: true + volumes: + - name: cv-manager-service-key + secret: + secretName: cv-manager-service-key diff --git a/resources/kubernetes/rsu-ping-fetch.yaml b/resources/kubernetes/rsu-ping-fetch.yaml index a78600331..86d989ff9 100644 --- a/resources/kubernetes/rsu-ping-fetch.yaml +++ b/resources/kubernetes/rsu-ping-fetch.yaml @@ -1,47 +1,47 @@ -apiVersion: 'apps/v1' -kind: 'Deployment' -metadata: - name: 'rsu-ping-fetch' - labels: - app: 'rsu-ping-fetch' -spec: - replicas: 1 - selector: - matchLabels: - app: 'rsu-ping-fetch' - template: - metadata: - labels: - app: 'rsu-ping-fetch' - spec: - containers: - - name: 'rsu-ping-fetch' - imagePullPolicy: Always - image: 'rsu-ping-fetch-image' - resources: - requests: - memory: '1Gi' - cpu: '0.5' - ports: - - containerPort: 8080 - env: - - name: ZABBIX_ENDPOINT - value: '' - - name: ZABBIX_USER - value: '' - - name: ZABBIX_PASSWORD - value: '' - - name: DB_USER - value: '' - - name: DB_PASS - value: '' - - name: DB_NAME - value: '' - - name: DB_HOST - value: '' - - name: STALE_PERIOD - value: '24' - - name: LOGGING_LEVEL - value: 'INFO' - tty: true - stdin: true +apiVersion: 'apps/v1' +kind: 'Deployment' +metadata: + name: 'rsu-ping-fetch' + labels: + app: 'rsu-ping-fetch' +spec: + replicas: 1 + selector: + matchLabels: + app: 'rsu-ping-fetch' + template: + metadata: + labels: + app: 'rsu-ping-fetch' + spec: + containers: + - name: 'rsu-ping-fetch' + imagePullPolicy: Always + image: 'rsu-ping-fetch-image' + resources: + requests: + memory: '1Gi' + cpu: '0.5' + ports: + - containerPort: 8080 + env: + - name: ZABBIX_ENDPOINT + value: '' + - name: ZABBIX_USER + value: '' + - name: ZABBIX_PASSWORD + value: '' + - name: DB_USER + value: '' + - name: DB_PASS + value: '' + - name: DB_NAME + value: '' + - name: DB_HOST + value: '' + - name: STALE_PERIOD + value: '24' + - name: LOGGING_LEVEL + value: 'INFO' + tty: true + stdin: true diff --git a/resources/sql_scripts/CVManager_CreateTables.sql b/resources/sql_scripts/CVManager_CreateTables.sql index 59f84ddb5..a432333fa 100644 --- a/resources/sql_scripts/CVManager_CreateTables.sql +++ b/resources/sql_scripts/CVManager_CreateTables.sql @@ -236,6 +236,7 @@ CREATE TABLE IF NOT EXISTS public.users first_name character varying(128) NOT NULL, last_name character varying(128) NOT NULL, super_user bit(1) NOT NULL, + receive_error_emails bit(1) NOT NULL, CONSTRAINT users_pkey PRIMARY KEY (user_id), CONSTRAINT users_email UNIQUE (email) ); @@ -345,4 +346,4 @@ CREATE TABLE IF NOT EXISTS public.scms_health ON DELETE NO ACTION ); -CREATE SCHEMA IF NOT EXISTS keycloak \ No newline at end of file +CREATE SCHEMA IF NOT EXISTS keycloak; \ No newline at end of file diff --git a/resources/sql_scripts/CVManager_SampleData.sql b/resources/sql_scripts/CVManager_SampleData.sql index 369c2a85c..f28700c93 100644 --- a/resources/sql_scripts/CVManager_SampleData.sql +++ b/resources/sql_scripts/CVManager_SampleData.sql @@ -25,10 +25,10 @@ INSERT INTO public.snmp_credentials( INSERT INTO public.snmp_versions( version_code, nickname) - VALUES ('4.1', '4.1'); + VALUES ('41', 'RSU 4.1'); INSERT INTO public.snmp_versions( version_code, nickname) - VALUES ('12.18', '12.18'); + VALUES ('1218', 'NTCIP 1218'); INSERT INTO public.rsus( geography, milepost, ipv4_address, serial_number, iss_scms_id, primary_route, model, credential_id, snmp_credential_id, snmp_version_id, firmware_version, target_firmware_version) @@ -49,8 +49,8 @@ INSERT INTO public.rsu_organization( -- Replace user with a real gmail to test GCP OAuth2.0 support INSERT INTO public.users( - email, first_name, last_name, super_user) - VALUES ('test@gmail.com', 'Test', 'User', '1'); + email, first_name, last_name, super_user, receive_error_emails) + VALUES ('test@gmail.com', 'Test', 'User', '1', '1'); INSERT INTO public.user_organization( user_id, organization_id, role_id) diff --git a/resources/sql_scripts/README.md b/resources/sql_scripts/README.md index 92df77f61..233287ead 100644 --- a/resources/sql_scripts/README.md +++ b/resources/sql_scripts/README.md @@ -4,7 +4,7 @@ The CV Manager expects most of the data it utilizes to be stored in a PostgreSQL ## CVManager_CreateTables.sql -This is the SQL script to create all of the tables required by the CV Manager. +This is the SQL script to create all of the tables required by the CV Manager. This script is referenced in the [Postgres K8 Deployment](/resources/kubernetes/cv-manager-postgres.yaml) as a Config Map and must be updated if the CVManager_CreateTables.sql script is altered. ### manufacturers diff --git a/resources/sql_scripts/update_scripts/receive_error_emails_update.sql b/resources/sql_scripts/update_scripts/receive_error_emails_update.sql new file mode 100644 index 000000000..8a5ea5c50 --- /dev/null +++ b/resources/sql_scripts/update_scripts/receive_error_emails_update.sql @@ -0,0 +1,3 @@ +ALTER TABLE public.users + ADD receive_error_emails bit(1) NOT NULL + DEFAULT B'0'; \ No newline at end of file diff --git a/resources/sql_scripts/update_scripts/snmp_version_update.sql b/resources/sql_scripts/update_scripts/snmp_version_update.sql index cb1d71657..aa7b11614 100644 --- a/resources/sql_scripts/update_scripts/snmp_version_update.sql +++ b/resources/sql_scripts/update_scripts/snmp_version_update.sql @@ -1,3 +1,7 @@ +-- Run this SQL update script if you already have a deployed CV Manager PostgreSQL database prior to the SNMP version addition +-- This file will create the 'snmp_versions' table and add NTCIP 1218 and RSU 4.1 as SNMP versions +-- All RSUs are given the default of RSU 4.1 as their SNMP version using this script + CREATE SEQUENCE public.snmp_versions_snmp_version_id_seq INCREMENT 1 START 1 @@ -16,10 +20,10 @@ CREATE TABLE IF NOT EXISTS public.snmp_versions INSERT INTO public.snmp_versions( version_code, nickname) - VALUES ('4.1', '4.1'); + VALUES ('41', 'RSU 4.1'); INSERT INTO public.snmp_versions( version_code, nickname) - VALUES ('12.18', '12.18'); + VALUES ('1218', 'NTCIP 1218'); ALTER TABLE public.rsus ADD snmp_version_id integer NOT NULL diff --git a/sample.env b/sample.env index 1a8bc8eca..436a3c24a 100644 --- a/sample.env +++ b/sample.env @@ -2,6 +2,7 @@ DOCKER_HOST_IP= # Note if debugging, this webapp host IP should be set to the IP of the host machine running the webapp (localhost) WEBAPP_HOST_IP=${DOCKER_HOST_IP} +# Note if using WEBAPP_DOMAIN for the docker-compose-webapp-deployment.yml file you will need to include http:// or https:// WEBAPP_DOMAIN=cvmanager.local.com KC_HOST_IP=${DOCKER_HOST_IP} @@ -35,6 +36,7 @@ KEYCLOAK_REALM=cvmanager KEYCLOAK_API_CLIENT_ID=cvmanager-api KEYCLOAK_API_CLIENT_SECRET_KEY= KEYCLOAK_LOGIN_THEME_NAME=sample_theme +# Note if using KEYCLOAK_DOMAIN for the docker-compose-webapp-deployment.yml file you will need to include http:// or https:// KEYCLOAK_DOMAIN=cvmanager.auth.com # GCP OAuth2.0 client ID for SSO authentication in keycloak - if not specified the google SSO will not be functional @@ -53,8 +55,9 @@ MONGO_DB_NAME="ODE" # Set these variables if using either "MONGODB" or "BIGQUERY" # COUNTS_DB_NAME: Used for V2X message counts -# COUNTS_MSG_TYPES: Comma seperated list of message types +# COUNTS_MSG_TYPES: Comma seperated list of message types. COUNTS_DB_NAME= +# COUNTS_MSG_TYPES must be set for the counts menu to correctly populate when building an image for deployment COUNTS_MSG_TYPES='BSM,SSM,SPAT,SRM,MAP' BSM_DB_NAME= SSM_DB_NAME= @@ -77,6 +80,7 @@ TIMEZONE="US/Mountain" # Mapbox token for map rendering in the webapp MAPBOX_TOKEN= +# DOT_NAME must be set for the DOT name to correctly populate when building an image for deployment DOT_NAME="CDOT" MAPBOX_INIT_LATITUDE="39.7392" MAPBOX_INIT_LONGITUDE="-104.9903" diff --git a/services/Dockerfile.api b/services/Dockerfile.api index e1c86524b..47d1be610 100644 --- a/services/Dockerfile.api +++ b/services/Dockerfile.api @@ -14,6 +14,7 @@ ENV APP_HOME /app WORKDIR $APP_HOME ADD api/requirements.txt . ADD api/src/* . +ADD api/src/error_email/* ./error_email/ ADD common/*.py ./common/ # Install production dependencies. diff --git a/services/Dockerfile.bsm_query b/services/Dockerfile.bsm_query index a62d19779..ec3203163 100644 --- a/services/Dockerfile.bsm_query +++ b/services/Dockerfile.bsm_query @@ -1,11 +1,11 @@ -FROM python:3.12.0-alpine3.18 - -WORKDIR /home - -ADD addons/images/bsm_query/requirements.txt . -ADD addons/images/bsm_query/*.py . - -RUN pip3 install -r requirements.txt - -CMD ["/home/bsm_query.py"] +FROM python:3.12.0-alpine3.18 + +WORKDIR /home + +ADD addons/images/bsm_query/requirements.txt . +ADD addons/images/bsm_query/*.py . + +RUN pip3 install -r requirements.txt + +CMD ["/home/bsm_query.py"] ENTRYPOINT ["python3"] \ No newline at end of file diff --git a/services/Dockerfile.count_metric b/services/Dockerfile.count_metric index b68bb30db..1e09bad1e 100644 --- a/services/Dockerfile.count_metric +++ b/services/Dockerfile.count_metric @@ -1,12 +1,12 @@ -FROM python:3.12-slim - -WORKDIR /home - -ADD addons/images/count_metric/requirements.txt . -ADD addons/images/count_metric/*.py . -ADD common/*.py ./common/ - -RUN pip3 install -r requirements.txt - -CMD ["/home/driver.py"] +FROM python:3.12-slim + +WORKDIR /home + +ADD addons/images/count_metric/requirements.txt . +ADD addons/images/count_metric/*.py . +ADD common/*.py ./common/ + +RUN pip3 install -r requirements.txt + +CMD ["/home/driver.py"] ENTRYPOINT ["python3"] \ No newline at end of file diff --git a/services/Dockerfile.firmware_manager b/services/Dockerfile.firmware_manager index fa78e10d2..558642e19 100644 --- a/services/Dockerfile.firmware_manager +++ b/services/Dockerfile.firmware_manager @@ -3,10 +3,14 @@ FROM python:3.12-slim WORKDIR /home ADD addons/images/firmware_manager/requirements.txt . +ADD addons/images/firmware_manager/resources/xfer_yunex.jar ./tools/ ADD addons/images/firmware_manager/*.py . ADD common/*.py ./common/ RUN pip3 install -r requirements.txt +RUN apt-get update +RUN apt-get install -y default-jdk +RUN apt-get install -y iputils-ping CMD ["/home/firmware_manager.py"] ENTRYPOINT ["python3"] \ No newline at end of file diff --git a/services/Dockerfile.iss_health_check b/services/Dockerfile.iss_health_check index f4fc820ec..97854c1f0 100644 --- a/services/Dockerfile.iss_health_check +++ b/services/Dockerfile.iss_health_check @@ -1,16 +1,16 @@ -FROM python:3.12.0-alpine3.18 - -WORKDIR /home - -ADD addons/images/iss_health_check/crontab . -ADD addons/images/iss_health_check/requirements.txt . -ADD addons/images/iss_health_check/*.py . -ADD common/*.py ./common/ - -RUN pip3 install -r /home/requirements.txt -# fix the line endings from windows -RUN dos2unix /home/crontab - -RUN crontab /home/crontab - +FROM python:3.12.0-alpine3.18 + +WORKDIR /home + +ADD addons/images/iss_health_check/crontab . +ADD addons/images/iss_health_check/requirements.txt . +ADD addons/images/iss_health_check/*.py . +ADD common/*.py ./common/ + +RUN pip3 install -r /home/requirements.txt +# fix the line endings from windows +RUN dos2unix /home/crontab + +RUN crontab /home/crontab + CMD ["crond", "-f"] \ No newline at end of file diff --git a/services/Dockerfile.rsu_ping_fetch b/services/Dockerfile.rsu_ping_fetch index 34a2dab4a..bdf569f43 100644 --- a/services/Dockerfile.rsu_ping_fetch +++ b/services/Dockerfile.rsu_ping_fetch @@ -1,17 +1,17 @@ -FROM python:3.12.0-alpine3.18 - -WORKDIR /home - -ADD addons/images/rsu_ping/crontab.rsu_ping_fetch ./crontab -ADD addons/images/rsu_ping/requirements.txt . -ADD addons/images/rsu_ping/rsu_ping_fetch.py . -ADD addons/images/rsu_ping/purger.py . -ADD common/*.py ./common/ - -RUN pip3 install -r requirements.txt -# fix the line endings from windows -RUN dos2unix /home/crontab - -RUN crontab /home/crontab - +FROM python:3.12.0-alpine3.18 + +WORKDIR /home + +ADD addons/images/rsu_ping/crontab.rsu_ping_fetch ./crontab +ADD addons/images/rsu_ping/requirements.txt . +ADD addons/images/rsu_ping/rsu_ping_fetch.py . +ADD addons/images/rsu_ping/purger.py . +ADD common/*.py ./common/ + +RUN pip3 install -r requirements.txt +# fix the line endings from windows +RUN dos2unix /home/crontab + +RUN crontab /home/crontab + CMD ["crond", "-f"] \ No newline at end of file diff --git a/services/README.md b/services/README.md index d79562a12..cea07ae2d 100644 --- a/services/README.md +++ b/services/README.md @@ -1,45 +1,51 @@ -# CV Manager Services - -The CV Manager has multiple backend services that are required to allow the CV Manager to operate at full capacity. - -## CV Manager API - -The CV Manager API is the backend service for the CV Manager webapp. This API is required to be run in an accessible location for the web application to function. The API is a Python Flask REST service. - -To learn more of what the CV Manager API offers, refer to its [README](api/README.md). - -## CV Manager Add-Ons - -The CV Manager add-ons are services that are very useful in allowing a user to collect and create all of the required data to be inserted into the CV Manager PostgreSQL database to allow the CV Manager to function. None of these services are required to be run. Alternative data sources for the following services can be used. However, all of these services are Kubernetes ready and are easy to integrate. - -### bsm_query - -The bsm_query service allows for BSM data to be geospatially queryable in a MongoDB collection. - -Read more about the deployment process in the [bsm_query directory](addons/images/bsm_query/README.md). - -### count_metric - -The count_metric service allows for the creation of count metrics for received V2X data, counted by data type for each RSU in the CV Manager PostgreSQL database. The counter relies on Kafka and an existing deployment of the [jpo-ode](https://github.com/usdot-jpo-ode/jpo-ode/tree/master). The message counts will then be displayable on the CV Manager. - -Read more about the deployment process in the [count_metric directory](addons/images/count_metric/README.md). - -### iss_health_check - -The iss_health_check service allows for RSU ISS SCMS certificate status information to be displayed on the CV Manager. This service has a dependency on the GCP Secret Manager but can be reworked to work with any secret manager. This service requires a service agreement with Greenhills ISS so an API key can be obtained to access a user's RSU profile. - -Read more about the deployment process in the [iss_health_check directory](addons/images/iss_health_check/README.md). - -### rsu_ping_fetch - -The rsu_ping_fetch service allows for RSU online status information to be displayed on the CV Manager. This service requires a Zabbix API endpoint to function. The Zabbix server must be configured to monitor all of the RSUs displayed on the CV Manager to successfully receive online status information for each device. - -Read more about the deployment process in the [rsu_ping_fetch directory](addons/images/rsu_ping_fetch/README.md). - -## Testing - -The API and Add-Ons both have unit tests that must be run from the services directory. The VSCode tasks can alo be used to simply run all of the unit tests. Before running either method, make sure to first install all of the dependencies from the [requirements.txt](requirements.txt). - -1. Ensure working directory is the `services` directory. -2. Install requirements.txt `pip3 install -r requirements.txt`. -3. Run `python3 -m pytest` to run all of the Python unit tests for all services. +# CV Manager Services + +The CV Manager has multiple backend services that are required to allow the CV Manager to operate at full capacity. + +## CV Manager API + +The CV Manager API is the backend service for the CV Manager webapp. This API is required to be run in an accessible location for the web application to function. The API is a Python Flask REST service. + +To learn more of what the CV Manager API offers, refer to its [README](api/README.md). + +## CV Manager Add-Ons + +The CV Manager add-ons are services that are very useful in allowing a user to collect and create all of the required data to be inserted into the CV Manager PostgreSQL database to allow the CV Manager to function. None of these services are required to be run. Alternative data sources for the following services can be used. However, all of these services are Kubernetes ready and are easy to integrate. + +### bsm_query + +The bsm_query service allows for BSM data to be geospatially queryable in a MongoDB collection. + +Read more about the deployment process in the [bsm_query directory](addons/images/bsm_query/README.md). + +### count_metric + +The count_metric service allows for the creation of count metrics for received V2X data, counted by data type for each RSU in the CV Manager PostgreSQL database. The counter relies on Kafka and an existing deployment of the [jpo-ode](https://github.com/usdot-jpo-ode/jpo-ode/tree/master). The message counts will then be displayable on the CV Manager. + +Read more about the deployment process in the [count_metric directory](addons/images/count_metric/README.md). + +### firmware_manager + +The firmware_manager service monitors the CV Manager PostgreSQL database for RSU's with different firmware_version values than their target_firmware_version in the 'rsus' table and performs firmware upgrades accordingly. These checks occur on an hourly basis for all RSUs but can also be executed immediately for an individual RSU utilizing the hosted API endpoints. This feature is intended to be used by the CV Manager API but can also be done manually for test purposes. + +Read more about the deployment process in the [firmware_manager directory](addons/images/firmware_manager/README.md). + +### iss_health_check + +The iss_health_check service allows for RSU ISS SCMS certificate status information to be displayed on the CV Manager. This service has a dependency on the GCP Secret Manager but can be reworked to work with any secret manager. This service requires a service agreement with Greenhills ISS so an API key can be obtained to access a user's RSU profile. + +Read more about the deployment process in the [iss_health_check directory](addons/images/iss_health_check/README.md). + +### rsu_ping + +The rsu_ping directory can be built as the rsu_ping_fetch or rsu_pinger service. Both versions allows for RSU online status information to be displayed on the CV Manager. The rsu_ping_fetch service requires a Zabbix API endpoint to function. The Zabbix server must be configured to monitor all of the RSUs displayed on the CV Manager to successfully receive online status information for each device. The rsu_pinger allows for obtaining RSU online status information without the need of a Zabbix server. The rsu_pinger is a very streamlined option without any other use besides gathering online status information. For a more robust collector of RSU data, a Zabbix server is recommended. + +Read more about the deployment process in the [rsu_ping directory](addons/images/rsu_ping/README.md). + +## Testing + +The API and Add-Ons both have unit tests that must be run from the services directory. The VSCode tasks can alo be used to simply run all of the unit tests. Before running either method, make sure to first install all of the dependencies from the [requirements.txt](requirements.txt). + +1. Ensure working directory is the `services` directory. +2. Install requirements.txt `pip3 install -r requirements.txt`. +3. Run `python3 -m pytest` to run all of the Python unit tests for all services. diff --git a/services/addons/.gitignore b/services/addons/.gitignore index 730f10a4d..d927c321c 100644 --- a/services/addons/.gitignore +++ b/services/addons/.gitignore @@ -1,3 +1,3 @@ -.pytest_cache/ -env/ +.pytest_cache/ +env/ .coverage \ No newline at end of file diff --git a/services/addons/images/bsm_query/bsm_query.py b/services/addons/images/bsm_query/bsm_query.py index c73e1d1af..c4143e3cb 100644 --- a/services/addons/images/bsm_query/bsm_query.py +++ b/services/addons/images/bsm_query/bsm_query.py @@ -1,79 +1,79 @@ -import os -from concurrent.futures import ThreadPoolExecutor -import logging -from pymongo import MongoClient -from datetime import datetime - - -def set_mongo_client(MONGO_DB_URI, MONGO_DB, MONGO_BSM_INPUT_COLLECTION): - client = MongoClient(MONGO_DB_URI) - db = client[MONGO_DB] - collection = db[MONGO_BSM_INPUT_COLLECTION] - return db, collection - - -def create_message(original_message): - new_message = { - "type": "Feature", - "geometry": { - "type": "Point", - "coordinates": [ - original_message["payload"]["data"]["coreData"]["position"][ - "longitude" - ], - original_message["payload"]["data"]["coreData"]["position"]["latitude"], - ], - }, - "properties": { - "id": original_message["metadata"]["originIp"], - "timestamp": datetime.strptime( - original_message["metadata"]["odeReceivedAt"], "%Y-%m-%dT%H:%M:%S.%fZ" - ), - }, - } - return new_message - - -def process_message(message, db, collection): - new_message = create_message(message) - db[collection].insert_one(new_message) - - -def run(): - MONGO_DB_URI = os.getenv("MONGO_DB_URI") - MONGO_DB = os.getenv("MONGO_DB_NAME") - MONGO_BSM_INPUT_COLLECTION = os.getenv("MONGO_BSM_INPUT_COLLECTION") - MONGO_GEO_OUTPUT_COLLECTION = os.getenv("MONGO_GEO_OUTPUT_COLLECTION") - - if ( - MONGO_DB_URI is None - or MONGO_BSM_INPUT_COLLECTION is None - or MONGO_DB is None - or MONGO_GEO_OUTPUT_COLLECTION is None - ): - logging.error("Environment variables are not set! Exiting.") - exit("Environment variables are not set! Exiting.") - - log_level = ( - "INFO" if "LOGGING_LEVEL" not in os.environ else os.environ["LOGGING_LEVEL"] - ) - logging.basicConfig(format="%(levelname)s:%(message)s", level=log_level) - - executor = ThreadPoolExecutor(max_workers=5) - - db, collection = set_mongo_client( - MONGO_DB_URI, MONGO_DB, MONGO_BSM_INPUT_COLLECTION - ) - - count = 0 - with collection.watch() as stream: - for change in stream: - count += 1 - executor.submit( - process_message, change["fullDocument"], db, MONGO_GEO_OUTPUT_COLLECTION - ) - logging.info(count) - - -if __name__ == "__main__": - run() +import os +from concurrent.futures import ThreadPoolExecutor +import logging +from pymongo import MongoClient +from datetime import datetime + + +def set_mongo_client(MONGO_DB_URI, MONGO_DB, MONGO_BSM_INPUT_COLLECTION): + client = MongoClient(MONGO_DB_URI) + db = client[MONGO_DB] + collection = db[MONGO_BSM_INPUT_COLLECTION] + return db, collection + + +def create_message(original_message): + new_message = { + "type": "Feature", + "geometry": { + "type": "Point", + "coordinates": [ + original_message["payload"]["data"]["coreData"]["position"][ + "longitude" + ], + original_message["payload"]["data"]["coreData"]["position"]["latitude"], + ], + }, + "properties": { + "id": original_message["metadata"]["originIp"], + "timestamp": datetime.strptime( + original_message["metadata"]["odeReceivedAt"], "%Y-%m-%dT%H:%M:%S.%fZ" + ), + }, + } + return new_message + + +def process_message(message, db, collection): + new_message = create_message(message) + db[collection].insert_one(new_message) + + +def run(): + MONGO_DB_URI = os.getenv("MONGO_DB_URI") + MONGO_DB = os.getenv("MONGO_DB_NAME") + MONGO_BSM_INPUT_COLLECTION = os.getenv("MONGO_BSM_INPUT_COLLECTION") + MONGO_GEO_OUTPUT_COLLECTION = os.getenv("MONGO_GEO_OUTPUT_COLLECTION") + + if ( + MONGO_DB_URI is None + or MONGO_BSM_INPUT_COLLECTION is None + or MONGO_DB is None + or MONGO_GEO_OUTPUT_COLLECTION is None + ): + logging.error("Environment variables are not set! Exiting.") + exit("Environment variables are not set! Exiting.") + + log_level = ( + "INFO" if "LOGGING_LEVEL" not in os.environ else os.environ["LOGGING_LEVEL"] + ) + logging.basicConfig(format="%(levelname)s:%(message)s", level=log_level) + + executor = ThreadPoolExecutor(max_workers=5) + + db, collection = set_mongo_client( + MONGO_DB_URI, MONGO_DB, MONGO_BSM_INPUT_COLLECTION + ) + + count = 0 + with collection.watch() as stream: + for change in stream: + count += 1 + executor.submit( + process_message, change["fullDocument"], db, MONGO_GEO_OUTPUT_COLLECTION + ) + logging.info(count) + + +if __name__ == "__main__": + run() diff --git a/services/addons/images/bsm_query/docker-compose.yml b/services/addons/images/bsm_query/docker-compose.yml index c4b04f5ba..ebd464dde 100644 --- a/services/addons/images/bsm_query/docker-compose.yml +++ b/services/addons/images/bsm_query/docker-compose.yml @@ -1,12 +1,12 @@ -version: '3' -services: - jpo_kafka_counter: - build: . - image: jpo_bsm_query:latest - restart: always - env_file: - - .env - logging: - options: - max-size: '10m' - max-file: '5' +version: '3' +services: + jpo_kafka_counter: + build: . + image: jpo_bsm_query:latest + restart: always + env_file: + - .env + logging: + options: + max-size: '10m' + max-file: '5' diff --git a/services/addons/images/bsm_query/requirements.txt b/services/addons/images/bsm_query/requirements.txt index f8eb14a03..76b1873ec 100644 --- a/services/addons/images/bsm_query/requirements.txt +++ b/services/addons/images/bsm_query/requirements.txt @@ -1,3 +1,3 @@ -DateTime==5.2 -pymongo==4.5.0 -python-dateutil==2.8.2 +DateTime==5.2 +pymongo==4.5.0 +python-dateutil==2.8.2 diff --git a/services/addons/images/bsm_query/sample.env b/services/addons/images/bsm_query/sample.env index 4d0b2da05..ae432b243 100644 --- a/services/addons/images/bsm_query/sample.env +++ b/services/addons/images/bsm_query/sample.env @@ -1,9 +1,9 @@ -# Mongo connection variables -MONGO_DB_URI = 'mongodb://:27017/' -MONGO_DB_NAME = '' - -# Name of the BSM Database for incoming BSM messages -MONGO_BSM_INPUT_COLLECTION = '' -# Name of output geospatial MongoDB collection -MONGO_GEO_OUTPUT_COLLECTION = '' +# Mongo connection variables +MONGO_DB_URI = 'mongodb://:27017/' +MONGO_DB_NAME = '' + +# Name of the BSM Database for incoming BSM messages +MONGO_BSM_INPUT_COLLECTION = '' +# Name of output geospatial MongoDB collection +MONGO_GEO_OUTPUT_COLLECTION = '' LOGGING_LEVEL = "INFO" \ No newline at end of file diff --git a/services/addons/images/count_metric/docker-compose.yml b/services/addons/images/count_metric/docker-compose.yml index 705c13fc7..85109fe4d 100644 --- a/services/addons/images/count_metric/docker-compose.yml +++ b/services/addons/images/count_metric/docker-compose.yml @@ -1,12 +1,12 @@ -version: '3' -services: - jpo_kafka_counter: - build: . - image: jpo_kafka_counter:latest - restart: always - env_file: - - .env - logging: - options: - max-size: '10m' - max-file: '5' +version: '3' +services: + jpo_kafka_counter: + build: . + image: jpo_kafka_counter:latest + restart: always + env_file: + - .env + logging: + options: + max-size: '10m' + max-file: '5' diff --git a/services/addons/images/count_metric/driver.py b/services/addons/images/count_metric/driver.py index 430652fc4..56974b0c0 100644 --- a/services/addons/images/count_metric/driver.py +++ b/services/addons/images/count_metric/driver.py @@ -1,100 +1,102 @@ -import os -import copy -import threading -import logging -import common.pgquery as pgquery - -from kafka_counter import KafkaMessageCounter - -# Set based on project and subscription, set these outside of the script if deployed - -thread_pool = [] -rsu_location_dict = {} -rsu_count_dict = {} - -# Query for RSU data from CV Manager PostgreSQL database -def get_rsu_list(): - result = [] - - # Execute the query and fetch all results - query = "SELECT to_jsonb(row) FROM (SELECT ipv4_address, primary_route FROM public.rsus ORDER BY ipv4_address) as row" - data = pgquery.query_db(query) - - logging.debug("Parsing results...") - for row in data: - row = dict(row[0]) - result.append(row) - - return result - -# Create template dictionaries for RSU roads and counts using HTTP JSON data -def populateRsuDict(): - rsu_list = get_rsu_list() - for rsu in rsu_list: - rsu_ip = rsu['ipv4_address'] - p_route = rsu['primary_route'] - - rsu_location_dict[rsu_ip] = p_route - # Add IP to dict if the road exists in the dict already - if p_route in rsu_count_dict: - rsu_count_dict[p_route][rsu_ip] = 0 - else: - rsu_count_dict[p_route] = {rsu_ip: 0} - - rsu_count_dict["Unknown"] = {} - - -def run(): - # Pull list of message types to run counts for from environment variable - messageTypesString = os.getenv("MESSAGE_TYPES", "") - if messageTypesString == "": - logging.error("MESSAGE_TYPES environment variable not set! Exiting.") - exit("MESSAGE_TYPES environment variable not set! Exiting.") - message_types = [ - msgtype.strip().lower() for msgtype in messageTypesString.split(",") - ] - - # Configure logging based on ENV var or use default if not set - log_level = os.getenv("LOGGING_LEVEL", "INFO") - logging.basicConfig(format="%(levelname)s:%(message)s", level=log_level) - - logging.debug("Creating RSU and count dictionaries...") - populateRsuDict() - - logging.info("Creating Data-In Kafka count threads...") - # Start the Kafka counters on their own threads - for message_type in message_types: - counter = KafkaMessageCounter( - f"KAFKA_IN_{message_type.upper()}", - message_type, - copy.deepcopy(rsu_location_dict), - copy.deepcopy(rsu_count_dict), - copy.deepcopy(rsu_count_dict), - 0, - ) - new_thread = threading.Thread(target=counter.start_counter) - new_thread.start() - thread_pool.append(new_thread) - - logging.info("Creating Data-Out Kafka count threads...") - # Start the Kafka counters on their own threads - for message_type in message_types: - counter = KafkaMessageCounter( - f"KAFKA_OUT_{message_type.upper()}", - message_type, - copy.deepcopy(rsu_location_dict), - copy.deepcopy(rsu_count_dict), - copy.deepcopy(rsu_count_dict), - 1, - ) - new_thread = threading.Thread(target=counter.start_counter) - new_thread.start() - thread_pool.append(new_thread) - - for thread in thread_pool: - thread.join() - logging.debug("Closed thread") - - -if __name__ == "__main__": - run() +import os +import copy +import threading +import logging +import common.pgquery as pgquery + +from kafka_counter import KafkaMessageCounter + +# Set based on project and subscription, set these outside of the script if deployed + +thread_pool = [] +rsu_location_dict = {} +rsu_count_dict = {} + + +# Query for RSU data from CV Manager PostgreSQL database +def get_rsu_list(): + result = [] + + # Execute the query and fetch all results + query = "SELECT to_jsonb(row) FROM (SELECT ipv4_address, primary_route FROM public.rsus ORDER BY ipv4_address) as row" + data = pgquery.query_db(query) + + logging.debug("Parsing results...") + for row in data: + row = dict(row[0]) + result.append(row) + + return result + + +# Create template dictionaries for RSU roads and counts using HTTP JSON data +def populateRsuDict(): + rsu_list = get_rsu_list() + for rsu in rsu_list: + rsu_ip = rsu["ipv4_address"] + p_route = rsu["primary_route"] + + rsu_location_dict[rsu_ip] = p_route + # Add IP to dict if the road exists in the dict already + if p_route in rsu_count_dict: + rsu_count_dict[p_route][rsu_ip] = 0 + else: + rsu_count_dict[p_route] = {rsu_ip: 0} + + rsu_count_dict["Unknown"] = {} + + +def run(): + # Pull list of message types to run counts for from environment variable + messageTypesString = os.getenv("MESSAGE_TYPES", "") + if messageTypesString == "": + logging.error("MESSAGE_TYPES environment variable not set! Exiting.") + exit("MESSAGE_TYPES environment variable not set! Exiting.") + message_types = [ + msgtype.strip().lower() for msgtype in messageTypesString.split(",") + ] + + # Configure logging based on ENV var or use default if not set + log_level = os.getenv("LOGGING_LEVEL", "INFO") + logging.basicConfig(format="%(levelname)s:%(message)s", level=log_level) + + logging.debug("Creating RSU and count dictionaries...") + populateRsuDict() + + logging.info("Creating Data-In Kafka count threads...") + # Start the Kafka counters on their own threads + for message_type in message_types: + counter = KafkaMessageCounter( + f"KAFKA_IN_{message_type.upper()}", + message_type, + copy.deepcopy(rsu_location_dict), + copy.deepcopy(rsu_count_dict), + copy.deepcopy(rsu_count_dict), + 0, + ) + new_thread = threading.Thread(target=counter.start_counter) + new_thread.start() + thread_pool.append(new_thread) + + logging.info("Creating Data-Out Kafka count threads...") + # Start the Kafka counters on their own threads + for message_type in message_types: + counter = KafkaMessageCounter( + f"KAFKA_OUT_{message_type.upper()}", + message_type, + copy.deepcopy(rsu_location_dict), + copy.deepcopy(rsu_count_dict), + copy.deepcopy(rsu_count_dict), + 1, + ) + new_thread = threading.Thread(target=counter.start_counter) + new_thread.start() + thread_pool.append(new_thread) + + for thread in thread_pool: + thread.join() + logging.debug("Closed thread") + + +if __name__ == "__main__": + run() diff --git a/services/addons/images/count_metric/kafka_counter.py b/services/addons/images/count_metric/kafka_counter.py index 183c8ecee..eb3c639f9 100644 --- a/services/addons/images/count_metric/kafka_counter.py +++ b/services/addons/images/count_metric/kafka_counter.py @@ -52,8 +52,10 @@ def write_bigquery(self, query_values): else: tablename = os.getenv("PUBSUB_BIGQUERY_TABLENAME") - query = f"INSERT INTO `{tablename}`(RSU, Road, Date, Type, Count) " \ - f"VALUES {query_values}" + query = ( + f"INSERT INTO `{tablename}`(RSU, Road, Date, Type, Count) " + f"VALUES {query_values}" + ) query_job = self.bq_client.query(query) # .result() ensures the Python script waits for this request to finish before moving on @@ -95,10 +97,14 @@ def push_metrics(self): try: if len(query_values) > 0: self.write_bigquery(query_values[:-2]) - else: - logging.warning(f'{self.thread_id}: No values found to push for Kafka {self.message_type}') + else: + logging.warning( + f"{self.thread_id}: No values found to push for Kafka {self.message_type}" + ) except Exception as e: - logging.error(f'{self.thread_id}: The metric publish to BigQuery failed for {self.message_type.upper()}: {e}') + logging.error( + f"{self.thread_id}: The metric publish to BigQuery failed for {self.message_type.upper()}: {e}" + ) return elif os.getenv("DESTINATION_DB") == "MONGODB": time = parser.parse(period) @@ -180,25 +186,27 @@ def should_run(self): return True def listen_for_message_and_process(self, topic, bootstrap_server): - logging.debug(f'{self.thread_id}: Listening for messages on Kafka topic {topic}...') + logging.debug( + f"{self.thread_id}: Listening for messages on Kafka topic {topic}..." + ) - if os.getenv('KAFKA_TYPE', '') == 'CONFLUENT': - username = os.getenv('CONFLUENT_KEY') - password = os.getenv('CONFLUENT_SECRET') + if os.getenv("KAFKA_TYPE", "") == "CONFLUENT": + username = os.getenv("CONFLUENT_KEY") + password = os.getenv("CONFLUENT_SECRET") conf = { - 'bootstrap.servers': bootstrap_server, - 'security.protocol': 'SASL_SSL', - 'sasl.mechanism': 'PLAIN', - 'sasl.username': username, - 'sasl.password': password, - 'group.id': f'{self.thread_id}-counter', - 'auto.offset.reset': 'latest' - } + "bootstrap.servers": bootstrap_server, + "security.protocol": "SASL_SSL", + "sasl.mechanism": "PLAIN", + "sasl.username": username, + "sasl.password": password, + "group.id": f"{self.thread_id}-counter", + "auto.offset.reset": "latest", + } else: conf = { - 'bootstrap.servers': bootstrap_server, - 'group.id': f'{self.thread_id}-counter', - 'auto.offset.reset': 'latest' + "bootstrap.servers": bootstrap_server, + "group.id": f"{self.thread_id}-counter", + "auto.offset.reset": "latest", } consumer = Consumer(conf) @@ -207,12 +215,16 @@ def listen_for_message_and_process(self, topic, bootstrap_server): while self.should_run(): msg = consumer.poll(timeout=1.0) - if msg is None: continue + if msg is None: + continue if msg.error(): if msg.error().code() == KafkaError._PARTITION_EOF: # End of partition event - logging.warning('Topic %s [%d] reached end at offset %d\n' % (msg.topic(), msg.partition(), msg.offset())) + logging.warning( + "Topic %s [%d] reached end at offset %d\n" + % (msg.topic(), msg.partition(), msg.offset()) + ) elif msg.error(): raise KafkaException(msg.error()) else: @@ -220,7 +232,9 @@ def listen_for_message_and_process(self, topic, bootstrap_server): finally: # Close down consumer to commit final offsets. consumer.close() - logging.warning(f'{self.thread_id}: Disconnected from Kafka topic, reconnecting...') + logging.warning( + f"{self.thread_id}: Disconnected from Kafka topic, reconnecting..." + ) def get_topic_from_type(self): # 0 - in metric @@ -240,9 +254,11 @@ def read_topic(self): def start_counter(self): # Setup scheduler for async metric uploads - scheduler = BackgroundScheduler({'apscheduler.timezone': 'UTC'}) - scheduler.add_job(self.push_metrics, 'cron', minute="0") + scheduler = BackgroundScheduler({"apscheduler.timezone": "UTC"}) + scheduler.add_job(self.push_metrics, "cron", minute="0") scheduler.start() - logging.info(f'{self.thread_id}: Starting up {self.message_type.upper()} Kafka Metric thread...') + logging.info( + f"{self.thread_id}: Starting up {self.message_type.upper()} Kafka Metric thread..." + ) self.read_topic() diff --git a/services/addons/images/count_metric/requirements.txt b/services/addons/images/count_metric/requirements.txt index fd2b5ae06..5d6324f06 100644 --- a/services/addons/images/count_metric/requirements.txt +++ b/services/addons/images/count_metric/requirements.txt @@ -1,9 +1,9 @@ -confluent-kafka==2.3.0 -google-cloud-bigquery==3.14.1 -APScheduler==3.10.4 -DateTime==5.2 -requests==2.31.0 -pymongo==4.5.0 -sqlalchemy==2.0.21 -pg8000==1.30.2 -python-dateutil==2.8.2 +confluent-kafka==2.3.0 +google-cloud-bigquery==3.14.1 +APScheduler==3.10.4 +DateTime==5.2 +requests==2.31.0 +pymongo==4.5.0 +sqlalchemy==2.0.21 +pg8000==1.30.2 +python-dateutil==2.8.2 diff --git a/services/addons/images/count_metric/sample.env b/services/addons/images/count_metric/sample.env index c551c9147..b7d691cd5 100644 --- a/services/addons/images/count_metric/sample.env +++ b/services/addons/images/count_metric/sample.env @@ -1,23 +1,23 @@ -LOGGING_LEVEL = "INFO" - -MESSAGE_TYPES = 'bsm' -PROJECT_ID = '' -ODE_KAFKA_BROKERS = ':9092' - -# POSTGRES DATABASE VARIABLES -PG_DB_HOST = ':5432' -PG_DB_USER = -PG_DB_PASS = '' -PG_DB_NAME = - -# EITHER "MONGODB" or "BIGQUERY" -DESTINATION_DB = 'MONGODB' - -# MONGODB REQUIRED VARIABLES -MONGO_DB_URI = 'mongodb://:27017/' -MONGO_DB_NAME = 'ODE' -INPUT_COUNTS_MONGO_COLLECTION_NAME = '' -OUTPUT_COUNTS_MONGO_COLLECTION_NAME = '' - -# BIGQUERY REQUIRED VARIABLES +LOGGING_LEVEL = "INFO" + +MESSAGE_TYPES = 'bsm' +PROJECT_ID = '' +ODE_KAFKA_BROKERS = ':9092' + +# POSTGRES DATABASE VARIABLES +PG_DB_HOST = ':5432' +PG_DB_USER = +PG_DB_PASS = '' +PG_DB_NAME = + +# EITHER "MONGODB" or "BIGQUERY" +DESTINATION_DB = 'MONGODB' + +# MONGODB REQUIRED VARIABLES +MONGO_DB_URI = 'mongodb://:27017/' +MONGO_DB_NAME = 'ODE' +INPUT_COUNTS_MONGO_COLLECTION_NAME = '' +OUTPUT_COUNTS_MONGO_COLLECTION_NAME = '' + +# BIGQUERY REQUIRED VARIABLES KAFKA_BIGQUERY_TABLENAME = '' \ No newline at end of file diff --git a/services/addons/images/firmware_manager/README.md b/services/addons/images/firmware_manager/README.md index 6df8ba4a4..2d390ce29 100644 --- a/services/addons/images/firmware_manager/README.md +++ b/services/addons/images/firmware_manager/README.md @@ -6,6 +6,9 @@ - [Table of Contents](#table-of-contents) - [About ](#about-) - [Requirements ](#requirements-) + - [Vendor Specific Requirements](#vendor-specific-requirements) + - [Commsignia](#commsignia) + - [Yunex](#yunex) ## About @@ -20,6 +23,7 @@ Firmware upgrades have unique procedures based on RSU vendor/manufacturer. To av List of currently supported vendors: - Commsignia +- Yunex Available REST endpoints: @@ -55,3 +59,30 @@ GCP Required environment variables: - GCP_PROJECT - GCP project for the firmware cloud storage bucket - GOOGLE_APPLICATION_CREDENTIALS - Service account location. Recommended to attach as a volume. + +## Vendor Specific Requirements + +### Commsignia + +Each upgrade requires just one firmware file. Upload target firmware to a cloud storage bucket or alternative hosting service according to the `vendor/rsu-model/firmware-version/install_package` directory path format. + +### Yunex + +Each upgrade requires 4 total files tarred up into a single TAR file: + +- Core upgrade file - Provided by Yunex +- SDK upgrade file - Provided by Yunex +- Application provisioning file - Provided by Yunex +- upgrade_info.json - Custom JSON file defining the upgrade files' names + +The content of `upgrade_info.json` is created by the implementer in the following format: + +``` +{ + "core": "core-upgrade-file", + "sdk": "sdk-upgrade-file", + "provision": "provision-upgrade-file" +} +``` + +Upload target firmware TAR to a cloud storage bucket or alternative hosting service according to the `vendor/rsu-model/firmware-version/install_package` directory path format, where `install_package` is the TAR file. diff --git a/services/addons/images/firmware_manager/commsignia_upgrader.py b/services/addons/images/firmware_manager/commsignia_upgrader.py index 37a0d1c54..b5b12ffb8 100644 --- a/services/addons/images/firmware_manager/commsignia_upgrader.py +++ b/services/addons/images/firmware_manager/commsignia_upgrader.py @@ -6,50 +6,59 @@ import os import sys -class CommsigniaUpgrader( upgrader.UpgraderAbstractClass ): - def __init__(self, upgrade_info): - super().__init__(upgrade_info) - def upgrade(self): - try: - # Download firmware installation package - self.download_blob() +class CommsigniaUpgrader(upgrader.UpgraderAbstractClass): + def __init__(self, upgrade_info): + super().__init__(upgrade_info) - # Make connection with the target device - logging.info("Making SSH connection with the device...") - ssh = SSHClient() - ssh.set_missing_host_key_policy(WarningPolicy) - ssh.connect(self.rsu_ip, username=self.ssh_username, password=self.ssh_password, look_for_keys=False, allow_agent=False) + def upgrade(self): + try: + # Download firmware installation package + self.download_blob() - # Make SCP client to copy over the firmware installation package to the /tmp/ directory on the remote device - logging.info("Copying installation package to the device...") - scp = SCPClient(ssh.get_transport()) - scp.put(self.local_file_name, remote_path='/tmp/') - scp.close() + # Make connection with the target device + logging.info("Making SSH connection with the device...") + ssh = SSHClient() + ssh.set_missing_host_key_policy(WarningPolicy) + ssh.connect( + self.rsu_ip, + username=self.ssh_username, + password=self.ssh_password, + look_for_keys=False, + allow_agent=False, + ) - # Delete local installation package and its parent directory so it doesn't take up storage space - self.cleanup() + # Make SCP client to copy over the firmware installation package to the /tmp/ directory on the remote device + logging.info("Copying installation package to the device...") + scp = SCPClient(ssh.get_transport()) + scp.put(self.local_file_name, remote_path="/tmp/") + scp.close() - # Run firmware upgrade and reboot - logging.info("Running firmware upgrade...") - _stdin, _stdout,_stderr = ssh.exec_command(f"signedUpgrade.sh /tmp/{self.install_package}") - decoded_stdout = _stdout.read().decode() - logging.info(decoded_stdout) - if "ALL OK" not in decoded_stdout: - ssh.close() - # Notify Firmware Manager of failed firmware upgrade completion - self.notify_firmware_manager(success=False) - return - ssh.exec_command("reboot") - ssh.close() + # Delete local installation package and its parent directory so it doesn't take up storage space + self.cleanup() - # Notify Firmware Manager of successful firmware upgrade completion - self.notify_firmware_manager(success=True) - except Exception as err: - # If something goes wrong, cleanup anything left and report failure if possible - logging.error(f"Failed to perform firmware upgrade: {err}") - self.cleanup() - self.notify_firmware_manager(success=False) + # Run firmware upgrade and reboot + logging.info("Running firmware upgrade...") + _stdin, _stdout, _stderr = ssh.exec_command( + f"signedUpgrade.sh /tmp/{self.install_package}" + ) + decoded_stdout = _stdout.read().decode() + logging.info(decoded_stdout) + if "ALL OK" not in decoded_stdout: + ssh.close() + # Notify Firmware Manager of failed firmware upgrade completion + self.notify_firmware_manager(success=False) + return + ssh.exec_command("reboot") + ssh.close() + + # Notify Firmware Manager of successful firmware upgrade completion + self.notify_firmware_manager(success=True) + except Exception as err: + # If something goes wrong, cleanup anything left and report failure if possible + logging.error(f"Failed to perform firmware upgrade: {err}") + self.cleanup() + self.notify_firmware_manager(success=False) # sys.argv[1] - JSON string with the following key-values: @@ -62,9 +71,9 @@ def upgrade(self): # - target_firmware_version # - install_package if __name__ == "__main__": - log_level = os.environ.get("LOGGING_LEVEL", "INFO") - logging.basicConfig(format="%(levelname)s:%(message)s", level=log_level) - # Trimming outer single quotes from the json.loads - upgrade_info = json.loads(sys.argv[1][1:-1]) - commsignia_upgrader = CommsigniaUpgrader(upgrade_info) - commsignia_upgrader.upgrade() + log_level = os.environ.get("LOGGING_LEVEL", "INFO") + logging.basicConfig(format="%(levelname)s:%(message)s", level=log_level) + # Trimming outer single quotes from the json.loads + upgrade_info = json.loads(sys.argv[1][1:-1]) + commsignia_upgrader = CommsigniaUpgrader(upgrade_info) + commsignia_upgrader.upgrade() diff --git a/services/addons/images/firmware_manager/download_blob.py b/services/addons/images/firmware_manager/download_blob.py index 268a7c78e..487ffbcfa 100644 --- a/services/addons/images/firmware_manager/download_blob.py +++ b/services/addons/images/firmware_manager/download_blob.py @@ -2,12 +2,15 @@ import logging import os + # Only supports GCP Bucket Storage for downloading blobs def download_gcp_blob(blob_name, destination_file_name): - gcp_project = os.environ.get('GCP_PROJECT') - bucket_name = os.environ.get('BLOB_STORAGE_BUCKET') - storage_client = storage.Client(gcp_project) - bucket = storage_client.get_bucket(bucket_name) - blob = bucket.blob(blob_name) - blob.download_to_filename(destination_file_name) - logging.info(f"Downloaded storage object {blob_name} from bucket {bucket_name} to local file {destination_file_name}.") \ No newline at end of file + gcp_project = os.environ.get("GCP_PROJECT") + bucket_name = os.environ.get("BLOB_STORAGE_BUCKET") + storage_client = storage.Client(gcp_project) + bucket = storage_client.get_bucket(bucket_name) + blob = bucket.blob(blob_name) + blob.download_to_filename(destination_file_name) + logging.info( + f"Downloaded storage object {blob_name} from bucket {bucket_name} to local file {destination_file_name}." + ) diff --git a/services/addons/images/firmware_manager/firmware_manager.py b/services/addons/images/firmware_manager/firmware_manager.py index 431675019..21de87e40 100644 --- a/services/addons/images/firmware_manager/firmware_manager.py +++ b/services/addons/images/firmware_manager/firmware_manager.py @@ -2,6 +2,7 @@ from common import pgquery from flask import Flask, jsonify, request from subprocess import Popen, DEVNULL +from threading import Lock from waitress import serve import json import logging @@ -14,7 +15,8 @@ manufacturer_upgrade_scripts = { - "Commsignia": "commsignia_upgrader.py" + "Commsignia": "commsignia_upgrader.py", + "Yunex": "yunex_upgrader.py", } @@ -30,34 +32,37 @@ # - target_firmware_version # - install_package active_upgrades = {} +active_upgrades_lock = Lock() -# Function to query the CV Manager PostgreSQL database for RSUs that have: +# Function to query the CV Manager PostgreSQL database for RSUs that have: # - A different target version than their current version # - A target firmware that complies with an existing upgrade rule relative to the RSU's current version # - An optional RSU IP can be specified for only returning results for a single RSU -def get_rsu_upgrade_data(rsu_ip = "all"): - query = "SELECT to_jsonb(row) " \ - "FROM (" \ - "SELECT ipv4_address, man.name AS manufacturer, rm.name AS model, rc.username AS ssh_username, rc.password AS ssh_password, " \ - "fi.firmware_id AS target_firmware_id, fi.version AS target_firmware_version, fi.install_package AS install_package " \ - "FROM public.rsus rd " \ - "JOIN public.rsu_models rm ON rm.rsu_model_id = rd.model " \ - "JOIN public.manufacturers man ON man.manufacturer_id = rm.manufacturer " \ - "JOIN public.rsu_credentials rc ON rc.credential_id = rd.credential_id " \ - "JOIN public.firmware_upgrade_rules fur ON fur.from_id = rd.firmware_version " \ - "JOIN public.firmware_images fi ON fi.firmware_id = rd.target_firmware_version " \ - "WHERE firmware_version != target_firmware_version AND target_firmware_version = fur.to_id" - if rsu_ip != "all": - query += f" AND ipv4_address = '{rsu_ip}'" - query += ") as row" - - data = pgquery.query_db(query) - - return_list = [] - for row in data: - return_list.append(dict(row[0])) - return return_list +def get_rsu_upgrade_data(rsu_ip="all"): + query = ( + "SELECT to_jsonb(row) " + "FROM (" + "SELECT ipv4_address, man.name AS manufacturer, rm.name AS model, rc.username AS ssh_username, rc.password AS ssh_password, " + "fi.firmware_id AS target_firmware_id, fi.version AS target_firmware_version, fi.install_package AS install_package " + "FROM public.rsus rd " + "JOIN public.rsu_models rm ON rm.rsu_model_id = rd.model " + "JOIN public.manufacturers man ON man.manufacturer_id = rm.manufacturer " + "JOIN public.rsu_credentials rc ON rc.credential_id = rd.credential_id " + "JOIN public.firmware_upgrade_rules fur ON fur.from_id = rd.firmware_version " + "JOIN public.firmware_images fi ON fi.firmware_id = rd.target_firmware_version " + "WHERE firmware_version != target_firmware_version AND target_firmware_version = fur.to_id" + ) + if rsu_ip != "all": + query += f" AND ipv4_address = '{rsu_ip}'" + query += ") as row" + + data = pgquery.query_db(query) + + return_list = [] + for row in data: + return_list.append(dict(row[0])) + return return_list # REST endpoint to manually start firmware upgrades for targeted RSUs. @@ -65,35 +70,75 @@ def get_rsu_upgrade_data(rsu_ip = "all"): # - rsu_ip: Target device IP @app.route("/init_firmware_upgrade", methods=["POST"]) def init_firmware_upgrade(): - request_args = request.get_json() - if "rsu_ip" not in request_args: - return jsonify({"error": "Missing 'rsu_ip' parameter"}), 400 - - # Check if an upgrade is already occurring for the device - logging.info(f"Checking if existing upgrade is running for '{request_args['rsu_ip']}'") - if request_args['rsu_ip'] in active_upgrades: - return jsonify({"error": f"Firmware upgrade failed to start for '{request_args['rsu_ip']}': an upgrade is already underway for the target device"}), 500 - - # Pull RSU data from the PostgreSQL database - logging.info(f"Querying RSU data for '{request_args['rsu_ip']}'") - rsu_to_upgrade = get_rsu_upgrade_data(request_args['rsu_ip']) - if len(rsu_to_upgrade) == 0: - return jsonify({"error": f"Firmware upgrade failed to start for '{request_args['rsu_ip']}': the target firmware is already installed or is an invalid upgrade from the current firmware"}), 500 - rsu_to_upgrade = rsu_to_upgrade[0] - - # Start upgrade process - logging.info(f"Initializing firmware upgrade for '{request_args['rsu_ip']}'") - try: - p = Popen(['python3', f'/home/{manufacturer_upgrade_scripts[rsu_to_upgrade["manufacturer"]]}', f"'{json.dumps(rsu_to_upgrade)}'"], stdout=DEVNULL) - rsu_to_upgrade['process'] = p - except Exception as err: - logging.error(f"Encountered error of type {type(err)} while starting automatic upgrade process for {request_args['rsu_ip']}: {err}") - return jsonify({"error": f"Firmware upgrade failed to start for '{request_args['rsu_ip']}': upgrade process failed to run"}), 500 - - # Remove redundant ipv4_address from rsu_to_upgrade since it is the key for active_upgrades - del rsu_to_upgrade['ipv4_address'] - active_upgrades[request_args['rsu_ip']] = rsu_to_upgrade - return jsonify({"message": f"Firmware upgrade started successfully for '{request_args['rsu_ip']}'"}), 201 + request_args = request.get_json() + if "rsu_ip" not in request_args: + return jsonify({"error": "Missing 'rsu_ip' parameter"}), 400 + + # Acquire lock and check if an upgrade is already occurring for the device + logging.info( + f"Checking if existing upgrade is running for '{request_args['rsu_ip']}'" + ) + with active_upgrades_lock: + if request_args["rsu_ip"] in active_upgrades: + return ( + jsonify( + { + "error": f"Firmware upgrade failed to start for '{request_args['rsu_ip']}': an upgrade is already underway for the target device" + } + ), + 500, + ) + + # Pull RSU data from the PostgreSQL database + logging.info(f"Querying RSU data for '{request_args['rsu_ip']}'") + rsu_to_upgrade = get_rsu_upgrade_data(request_args["rsu_ip"]) + if len(rsu_to_upgrade) == 0: + return ( + jsonify( + { + "error": f"Firmware upgrade failed to start for '{request_args['rsu_ip']}': the target firmware is already installed or is an invalid upgrade from the current firmware" + } + ), + 500, + ) + rsu_to_upgrade = rsu_to_upgrade[0] + + # Start upgrade process + logging.info(f"Initializing firmware upgrade for '{request_args['rsu_ip']}'") + try: + p = Popen( + [ + "python3", + f'/home/{manufacturer_upgrade_scripts[rsu_to_upgrade["manufacturer"]]}', + f"'{json.dumps(rsu_to_upgrade)}'", + ], + stdout=DEVNULL, + ) + rsu_to_upgrade["process"] = p + except Exception as err: + logging.error( + f"Encountered error of type {type(err)} while starting automatic upgrade process for {request_args['rsu_ip']}: {err}" + ) + return ( + jsonify( + { + "error": f"Firmware upgrade failed to start for '{request_args['rsu_ip']}': upgrade process failed to run" + } + ), + 500, + ) + + # Remove redundant ipv4_address from rsu_to_upgrade since it is the key for active_upgrades + del rsu_to_upgrade["ipv4_address"] + active_upgrades[request_args["rsu_ip"]] = rsu_to_upgrade + return ( + jsonify( + { + "message": f"Firmware upgrade started successfully for '{request_args['rsu_ip']}'" + } + ), + 201, + ) # REST endpoint to mark a firmware upgrade as complete and remove it from the active upgrades. @@ -103,92 +148,131 @@ def init_firmware_upgrade(): # - status: "success" or "fail" depending upon result of the firmware upgrade @app.route("/firmware_upgrade_completed", methods=["POST"]) def firmware_upgrade_completed(): - request_args = request.get_json() - if "rsu_ip" not in request_args: - return jsonify({"error": "Missing 'rsu_ip' parameter"}), 400 - elif request_args['rsu_ip'] not in active_upgrades: - return jsonify({"error": "Specified device is not actively being upgraded or was already completed"}), 400 - - if "status" not in request_args: - return jsonify({"error": "Missing 'status' parameter"}), 400 - elif request_args['status'] != "success" and request_args['status'] != "fail": - return jsonify({"error": "Wrong value for 'status' parameter - must be either 'success' or 'fail'"}), 400 - - # Update RSU firmware_version in PostgreSQL if the upgrade was successful - if request_args['status'] == "success": - try: - upgrade_info = active_upgrades[request_args['rsu_ip']] - query = f"UPDATE public.rsus SET firmware_version={upgrade_info['target_firmware_id']} WHERE ipv4_address='{request_args['rsu_ip']}'" - pgquery.write_db(query) - except Exception as err: - logging.error(f"Encountered error of type {type(err)} while querying the PostgreSQL database: {err}") - return jsonify({"error": "Unexpected error occurred while querying the PostgreSQL database - firmware upgrade not marked as complete"}), 500 - - # Remove firmware upgrade from active upgrades - logging.info(f"Marking firmware upgrade as complete for '{request_args['rsu_ip']}'") - del active_upgrades[request_args['rsu_ip']] - - return jsonify({"message": "Firmware upgrade successfully marked as complete"}), 204 + request_args = request.get_json() + with active_upgrades_lock: + if "rsu_ip" not in request_args: + return jsonify({"error": "Missing 'rsu_ip' parameter"}), 400 + elif request_args["rsu_ip"] not in active_upgrades: + return ( + jsonify( + { + "error": "Specified device is not actively being upgraded or was already completed" + } + ), + 400, + ) + + if "status" not in request_args: + return jsonify({"error": "Missing 'status' parameter"}), 400 + elif request_args["status"] != "success" and request_args["status"] != "fail": + return ( + jsonify( + { + "error": "Wrong value for 'status' parameter - must be either 'success' or 'fail'" + } + ), + 400, + ) + + # Update RSU firmware_version in PostgreSQL if the upgrade was successful + if request_args["status"] == "success": + try: + upgrade_info = active_upgrades[request_args["rsu_ip"]] + query = f"UPDATE public.rsus SET firmware_version={upgrade_info['target_firmware_id']} WHERE ipv4_address='{request_args['rsu_ip']}'" + pgquery.write_db(query) + except Exception as err: + logging.error( + f"Encountered error of type {type(err)} while querying the PostgreSQL database: {err}" + ) + return ( + jsonify( + { + "error": "Unexpected error occurred while querying the PostgreSQL database - firmware upgrade not marked as complete" + } + ), + 500, + ) + + # Remove firmware upgrade from active upgrades + logging.info( + f"Marking firmware upgrade as complete for '{request_args['rsu_ip']}'" + ) + del active_upgrades[request_args["rsu_ip"]] + + return jsonify({"message": "Firmware upgrade successfully marked as complete"}), 204 # REST endpoint to retrieve a list of all active firmware upgrades. @app.route("/list_active_upgrades", methods=["GET"]) def list_active_upgrades(): - # Remove all sensitive data from the response - sanitized_active_upgrades = {} - for key, value in active_upgrades.items(): - sanitized_active_upgrades[key] = { - "manufacturer": value['manufacturer'], - "model": value['model'], - "target_firmware_id": value['target_firmware_id'], - "target_firmware_version": value['target_firmware_version'], - "install_package": value['install_package'] - } - return jsonify({"active_upgrades": sanitized_active_upgrades}), 200 + # Remove all sensitive data from the response + sanitized_active_upgrades = {} + with active_upgrades_lock: + for key, value in active_upgrades.items(): + sanitized_active_upgrades[key] = { + "manufacturer": value["manufacturer"], + "model": value["model"], + "target_firmware_id": value["target_firmware_id"], + "target_firmware_version": value["target_firmware_version"], + "install_package": value["install_package"], + } + return jsonify({"active_upgrades": sanitized_active_upgrades}), 200 # Scheduled firmware upgrade checker def check_for_upgrades(): - logging.info("Checking PostgreSQL DB for RSUs with new target firmware") - # Get all RSUs that need to be upgraded from the PostgreSQL database - rsus_to_upgrade = get_rsu_upgrade_data() - - # Start upgrade scripts for any results - for rsu in rsus_to_upgrade: - # Check if an upgrade is already occurring for the device - if rsu['ipv4_address'] in active_upgrades: - continue - - # Start upgrade script - logging.info(f"Running automated firmware upgrade for '{rsu['ipv4_address']}'") - try: - p = Popen(['python3', f'/home/{manufacturer_upgrade_scripts[rsu["manufacturer"]]}', f"'{json.dumps(rsu)}'"], stdout=DEVNULL) - rsu['process'] = p - except Exception as err: - logging.error(f"Encountered error of type {type(err)} while starting automatic upgrade process for {rsu['ipv4_address']}: {err}") - continue - - # Remove redundant ipv4_address from rsu since it is the key for active_upgrades - rsu_ip = rsu['ipv4_address'] - del rsu['ipv4_address'] - active_upgrades[rsu_ip] = rsu - logging.info(f"Firmware upgrade successfully started for '{rsu_ip}'") + logging.info("Checking PostgreSQL DB for RSUs with new target firmware") + # Get all RSUs that need to be upgraded from the PostgreSQL database + rsus_to_upgrade = get_rsu_upgrade_data() + + # Start upgrade scripts for any results + for rsu in rsus_to_upgrade: + # Check if an upgrade is already occurring for the device + with active_upgrades_lock: + if rsu["ipv4_address"] in active_upgrades: + continue + + # Start upgrade script + logging.info( + f"Running automated firmware upgrade for '{rsu['ipv4_address']}'" + ) + try: + p = Popen( + [ + "python3", + f'/home/{manufacturer_upgrade_scripts[rsu["manufacturer"]]}', + f"'{json.dumps(rsu)}'", + ], + stdout=DEVNULL, + ) + rsu["process"] = p + except Exception as err: + logging.error( + f"Encountered error of type {type(err)} while starting automatic upgrade process for {rsu['ipv4_address']}: {err}" + ) + continue + + # Remove redundant ipv4_address from rsu since it is the key for active_upgrades + rsu_ip = rsu["ipv4_address"] + del rsu["ipv4_address"] + active_upgrades[rsu_ip] = rsu + logging.info(f"Firmware upgrade successfully started for '{rsu_ip}'") def serve_rest_api(): - # Run Flask app for manually initiated firmware upgrades - logging.info("Initiating Firmware Manager REST API...") - serve(app, host="0.0.0.0", port=8080) + # Run Flask app for manually initiated firmware upgrades + logging.info("Initiating Firmware Manager REST API...") + serve(app, host="0.0.0.0", port=8080) def init_background_task(): - logging.info("Initiating Firmware Manager background checker...") - # Run scheduler for async RSU firmware upgrade checks - scheduler = BackgroundScheduler({"apscheduler.timezone": "UTC"}) - scheduler.add_job(check_for_upgrades, "cron", minute="0") - scheduler.start() + logging.info("Initiating Firmware Manager background checker...") + # Run scheduler for async RSU firmware upgrade checks + scheduler = BackgroundScheduler({"apscheduler.timezone": "UTC"}) + scheduler.add_job(check_for_upgrades, "cron", minute="0") + scheduler.start() if __name__ == "__main__": - init_background_task() - serve_rest_api() \ No newline at end of file + init_background_task() + serve_rest_api() diff --git a/services/addons/images/firmware_manager/resources/xfer_yunex.jar b/services/addons/images/firmware_manager/resources/xfer_yunex.jar new file mode 100644 index 000000000..a8ccceb6a Binary files /dev/null and b/services/addons/images/firmware_manager/resources/xfer_yunex.jar differ diff --git a/services/addons/images/firmware_manager/upgrader.py b/services/addons/images/firmware_manager/upgrader.py index bab68d250..fd76cc362 100644 --- a/services/addons/images/firmware_manager/upgrader.py +++ b/services/addons/images/firmware_manager/upgrader.py @@ -6,49 +6,55 @@ import requests import shutil -class UpgraderAbstractClass( abc.ABC ): - def __init__(self, upgrade_info): - self.install_package = upgrade_info['install_package'] - self.blob_name = f"{upgrade_info['manufacturer']}/{upgrade_info['model']}/{upgrade_info['target_firmware_version']}/{upgrade_info['install_package']}" - self.local_file_name = f"/home/{upgrade_info['ipv4_address']}/{upgrade_info['install_package']}" - self.rsu_ip = upgrade_info['ipv4_address'] - self.ssh_username = upgrade_info['ssh_username'] - self.ssh_password = upgrade_info['ssh_password'] - - # Deletes the parent directory along with the firmware file - def cleanup(self): - if self.local_file_name is not None: - path = Path(self.local_file_name[:self.local_file_name.rfind("/")]) - if path.exists() and path.is_dir(): - shutil.rmtree(path) - - # Downloads firmware install package blob to /home/rsu_ip/ - def download_blob(self): - # Create parent rsu_ip directory - path = self.local_file_name[:self.local_file_name.rfind("/")] - Path(path).mkdir(exist_ok=True) - - # Download blob, defaults to GCP blob storage - bsp = os.environ.get("BLOB_STORAGE_PROVIDER", "GCP") - if bsp == "GCP": - download_blob.download_gcp_blob(self.blob_name, self.local_file_name) - else: - logging.error("Unsupported blob storage provider") - - # Notifies the firmware manager of the completion status for the upgrade - # success is a boolean - def notify_firmware_manager(self, success): - status = "success" if success else "fail" - logging.info(f"Firmware upgrade script completed with status: {status}") - - url = 'http://127.0.0.1:8080/firmware_upgrade_completed' - body = {"rsu_ip": self.rsu_ip, "status": status} - try: - requests.post(url, json=body) - except Exception as err: - logging.error(f"Failed to connect to the Firmware Manager API for '{self.rsu_ip}': {err}") - - # This needs to be defined for each implementation - @abc.abstractclassmethod - def upgrade(self): - pass \ No newline at end of file + +class UpgraderAbstractClass(abc.ABC): + def __init__(self, upgrade_info): + self.install_package = upgrade_info["install_package"] + self.root_path = f"/home/{upgrade_info['ipv4_address']}" + self.blob_name = f"{upgrade_info['manufacturer']}/{upgrade_info['model']}/{upgrade_info['target_firmware_version']}/{upgrade_info['install_package']}" + self.local_file_name = ( + f"/home/{upgrade_info['ipv4_address']}/{upgrade_info['install_package']}" + ) + self.rsu_ip = upgrade_info["ipv4_address"] + self.ssh_username = upgrade_info["ssh_username"] + self.ssh_password = upgrade_info["ssh_password"] + + # Deletes the parent directory along with the firmware file + def cleanup(self): + if self.local_file_name is not None: + path = Path(self.root_path) + if path.exists() and path.is_dir(): + shutil.rmtree(path) + + # Downloads firmware install package blob to /home/rsu_ip/ + def download_blob(self): + # Create parent rsu_ip directory + path = self.local_file_name[: self.local_file_name.rfind("/")] + Path(path).mkdir(exist_ok=True) + + # Download blob, defaults to GCP blob storage + bsp = os.environ.get("BLOB_STORAGE_PROVIDER", "GCP") + if bsp == "GCP": + download_blob.download_gcp_blob(self.blob_name, self.local_file_name) + else: + logging.error("Unsupported blob storage provider") + + # Notifies the firmware manager of the completion status for the upgrade + # success is a boolean + def notify_firmware_manager(self, success): + status = "success" if success else "fail" + logging.info(f"Firmware upgrade script completed with status: {status}") + + url = "http://127.0.0.1:8080/firmware_upgrade_completed" + body = {"rsu_ip": self.rsu_ip, "status": status} + try: + requests.post(url, json=body) + except Exception as err: + logging.error( + f"Failed to connect to the Firmware Manager API for '{self.rsu_ip}': {err}" + ) + + # This needs to be defined for each implementation + @abc.abstractclassmethod + def upgrade(self): + pass diff --git a/services/addons/images/firmware_manager/yunex_upgrader.py b/services/addons/images/firmware_manager/yunex_upgrader.py new file mode 100644 index 000000000..f5987808f --- /dev/null +++ b/services/addons/images/firmware_manager/yunex_upgrader.py @@ -0,0 +1,132 @@ +import upgrader +import json +import logging +import os +import subprocess +import sys +import tarfile +import time + + +class YunexUpgrader(upgrader.UpgraderAbstractClass): + def __init__(self, upgrade_info): + super().__init__(upgrade_info) + + def run_xfer_upgrade(self, file_name): + xfer_command = [ + "java", + "-jar", + f"/home/tools/xfer_yunex.jar", + "-upload", + file_name, + f"{self.rsu_ip}:3600", + ] + proc = subprocess.run(xfer_command, capture_output=True) + code, stdout, stderr = proc.returncode, proc.stdout, proc.stderr + + # If the command ends with a non-successful status code, return -1 + if code != 0: + logging.error("Firmware not successful: " + stderr.decode("utf-8")) + return -1 + + output_lines = stdout.decode("utf-8").split("\n")[:-1] + # If the command ends with a successful status code but the logs don't contain the expected line, return -1 + if ( + 'TEXT: {"success":{"upload":"Processing OK. Rebooting now ..."}}' + not in output_lines + ): + logging.error("Firmware not successful: " + stderr.decode("utf-8")) + return -1 + + # If everything goes as expected, the XFER upgrade was complete + return 0 + + def wait_until_online(self): + iter = 0 + # Ping once every second for 3 minutes until online + while iter < 180: + time.sleep(1) + code = subprocess.run( + ["ping", "-n", "-c1", self.rsu_ip], capture_output=True + ).returncode + if code == 0: + return 0 + iter += 1 + # 3 minutes pass with no response + return -1 + + def upgrade(self): + try: + # Download firmware installation package TAR file + self.download_blob() + + # Unpack TAR file which must contain the following: + # - Core upgrade file + # - SDK upgrade file + # - Application provision file + # - upgrade_info.json which defines the files as a single JSON object + logging.info("Unpacking TAR file...") + with tarfile.open(self.local_file_name, "r") as tar: + tar.extractall(self.root_path) + + # Obtain upgrade info in the following format: + # { "core": "core-file-name", "sdk": "sdk-file-name", "provision": "provision-file-name"} + with open(f"{self.root_path}/upgrade_info.json") as json_file: + upgrade_info = json.load(json_file) + + # Run Core upgrade + logging.info("Running Core firmware upgrade...") + code = self.run_xfer_upgrade(f"{self.root_path}/{upgrade_info['core']}") + if code == -1: + raise Exception("Yunex RSU Core upgrade failed") + if self.wait_until_online() == -1: + raise Exception("RSU offline for too long after Core upgrade") + # Wait an additional 60 seconds after the Yunex RSU is online - needs time to initialize + time.sleep(60) + + # Run SDK upgrade + logging.info("Running SDK firmware upgrade...") + code = self.run_xfer_upgrade(f"{self.root_path}/{upgrade_info['sdk']}") + if code == -1: + raise Exception("Yunex RSU SDK upgrade failed") + if self.wait_until_online() == -1: + raise Exception("RSU offline for too long after SDK upgrade") + # Wait an additional 60 seconds after the Yunex RSU is online - needs time to initialize + time.sleep(60) + + # Run application provision image + logging.info("Running application provisioning...") + code = self.run_xfer_upgrade( + f"{self.root_path}/{upgrade_info['provision']}" + ) + if code == -1: + raise Exception("Yunex RSU application provisioning upgrade failed") + + # Notify Firmware Manager of successful firmware upgrade completion + self.cleanup() + self.notify_firmware_manager(success=True) + except Exception as err: + # If something goes wrong, cleanup anything left and report failure if possible. + # Yunex RSUs can handle having the same firmware upgraded over again. + # There is no issue with starting from the beginning even with a partially complete upgrade. + logging.error(f"Failed to perform firmware upgrade: {err}") + self.cleanup() + self.notify_firmware_manager(success=False) + + +# sys.argv[1] - JSON string with the following key-values: +# - ipv4_address +# - manufacturer +# - model +# - ssh_username +# - ssh_password +# - target_firmware_id +# - target_firmware_version +# - install_package +if __name__ == "__main__": + log_level = os.environ.get("LOGGING_LEVEL", "INFO") + logging.basicConfig(format="%(levelname)s:%(message)s", level=log_level) + # Trimming outer single quotes from the json.loads + upgrade_info = json.loads(sys.argv[1][1:-1]) + yunex_upgrader = YunexUpgrader(upgrade_info) + yunex_upgrader.upgrade() diff --git a/services/addons/images/iss_health_check/README.md b/services/addons/images/iss_health_check/README.md index aa7f24ef4..7a8f762b2 100644 --- a/services/addons/images/iss_health_check/README.md +++ b/services/addons/images/iss_health_check/README.md @@ -1,41 +1,41 @@ -# Integrity Security Services (ISS) Security Credential Management System (SCMS) Health Checker - -## Table of Contents - -- [Integrity Security Services (ISS) Security Credential Management System (SCMS) Health Checker](#integrity-security-services-iss-security-credential-management-system-scms-health-checker) - - [Table of Contents](#table-of-contents) - - [About ](#about-) - - [Requirements ](#requirements-) - -## About - -This directory contains a microservice that runs within the CV Manager GKE Cluster. The iss_health_checker application populates the CV Manager PostGreSQL database's 'scms_health' table with the current ISS SCMS statuses of all RSUs recorded in the 'rsus' table. These statuses are queried by this application from a provided ISS Green Hills SCMS API endpoint. - -The application schedules the iss_health_checker script to run every 6 hours. A new SCMS API access key is generated every run of the script to ensure the access never expires. This is due to a limitation of the SCMS API not allowing permanent access keys. Access keys are stored in GCP Secret Manager to allow for versioning and encrypted storage. The application removes the previous access key from the SCMS API after runtime to reduce clutter of access keys on the API service account. - -Currently only GCP is supported to run this application due to a reliance on the GCP Secret Manager. Storing the access keys on a local volume is not recommended due to security vulnerabilities. Feel free to contribute to this application for secret manager equivalent support for other cloud environments. - -## Requirements - -To properly run the iss_health_checker microservice the following services are also required: - -- GCP project and service account with GCP Secret Manager access -- CV Manager PostgreSQL database with at least one RSU inserted into the 'rsus' table -- Service agreement with ISS Green Hills to have access to the SCMS API REST service endpoint -- iss_health_checker must be deployed in the same environment or K8s cluster as the PostgreSQL database -- iss_health_checker deployment must have access to the internet or at least the SCMS API endpoint - -The iss_health_checker microservice expects the following environment variables to be set: - -- GOOGLE_APPLICATION_CREDENTIALS - file location for GCP JSON service account key. -- PROJECT_ID - GCP project ID. -- ISS_API_KEY - Initial ISS SCMS API access key to perform the first run of the script. This access key must not expire before the first runtime. -- ISS_API_KEY_NAME - Human readable reference for the access key within ISS SCMS API. Generated access keys will utilize this same name. -- ISS_PROJECT_ID - Project ID the RSUs are under that the SCMS API will be queried for. -- ISS_SCMS_TOKEN_REST_ENDPOINT - Token generation HTTPS endpoint for the ISS Green Hills SCMS API. (https://scms-api-domain/api/v3/token) -- ISS_SCMS_VEHICLE_REST_ENDPOINT - Vehicle/RSU HTTPS endpoint for the ISS Green Hills SCMS API. (https://scms-api-domain/api/v3/devices) -- PG_DB_USER - PostgreSQL access username. -- PG_DB_PASS - PostgreSQL access password. -- PG_DB_NAME - PostgreSQL database name. -- PG_DB_HOST - PostgreSQL hostname, make sure to include port number. -- LOGGING_LEVEL (optional, defaults to 'info') +# Integrity Security Services (ISS) Security Credential Management System (SCMS) Health Checker + +## Table of Contents + +- [Integrity Security Services (ISS) Security Credential Management System (SCMS) Health Checker](#integrity-security-services-iss-security-credential-management-system-scms-health-checker) + - [Table of Contents](#table-of-contents) + - [About ](#about-) + - [Requirements ](#requirements-) + +## About + +This directory contains a microservice that runs within the CV Manager GKE Cluster. The iss_health_checker application populates the CV Manager PostGreSQL database's 'scms_health' table with the current ISS SCMS statuses of all RSUs recorded in the 'rsus' table. These statuses are queried by this application from a provided ISS Green Hills SCMS API endpoint. + +The application schedules the iss_health_checker script to run every 6 hours. A new SCMS API access key is generated every run of the script to ensure the access never expires. This is due to a limitation of the SCMS API not allowing permanent access keys. Access keys are stored in GCP Secret Manager to allow for versioning and encrypted storage. The application removes the previous access key from the SCMS API after runtime to reduce clutter of access keys on the API service account. + +Currently only GCP is supported to run this application due to a reliance on the GCP Secret Manager. Storing the access keys on a local volume is not recommended due to security vulnerabilities. Feel free to contribute to this application for secret manager equivalent support for other cloud environments. + +## Requirements + +To properly run the iss_health_checker microservice the following services are also required: + +- GCP project and service account with GCP Secret Manager access +- CV Manager PostgreSQL database with at least one RSU inserted into the 'rsus' table +- Service agreement with ISS Green Hills to have access to the SCMS API REST service endpoint +- iss_health_checker must be deployed in the same environment or K8s cluster as the PostgreSQL database +- iss_health_checker deployment must have access to the internet or at least the SCMS API endpoint + +The iss_health_checker microservice expects the following environment variables to be set: + +- GOOGLE_APPLICATION_CREDENTIALS - file location for GCP JSON service account key. +- PROJECT_ID - GCP project ID. +- ISS_API_KEY - Initial ISS SCMS API access key to perform the first run of the script. This access key must not expire before the first runtime. +- ISS_API_KEY_NAME - Human readable reference for the access key within ISS SCMS API. Generated access keys will utilize this same name. +- ISS_PROJECT_ID - Project ID the RSUs are under that the SCMS API will be queried for. +- ISS_SCMS_TOKEN_REST_ENDPOINT - Token generation HTTPS endpoint for the ISS Green Hills SCMS API. (https://scms-api-domain/api/v3/token) +- ISS_SCMS_VEHICLE_REST_ENDPOINT - Vehicle/RSU HTTPS endpoint for the ISS Green Hills SCMS API. (https://scms-api-domain/api/v3/devices) +- PG_DB_USER - PostgreSQL access username. +- PG_DB_PASS - PostgreSQL access password. +- PG_DB_NAME - PostgreSQL database name. +- PG_DB_HOST - PostgreSQL hostname, make sure to include port number. +- LOGGING_LEVEL (optional, defaults to 'info') diff --git a/services/addons/images/iss_health_check/crontab b/services/addons/images/iss_health_check/crontab index 27b79e811..9eb53f367 100644 --- a/services/addons/images/iss_health_check/crontab +++ b/services/addons/images/iss_health_check/crontab @@ -1,2 +1,2 @@ -PYTHONPATH=/home -0 */6 * * * /usr/local/bin/python3 /home/iss_health_checker.py +PYTHONPATH=/home +0 */6 * * * /usr/local/bin/python3 /home/iss_health_checker.py diff --git a/services/addons/images/iss_health_check/docker-compose.yml b/services/addons/images/iss_health_check/docker-compose.yml index 3912d2276..ded49900a 100644 --- a/services/addons/images/iss_health_check/docker-compose.yml +++ b/services/addons/images/iss_health_check/docker-compose.yml @@ -1,11 +1,11 @@ -version: '3' -services: - jpo_iss_health_checker: - build: . - image: jpo_iss_health_checker:latest - env_file: - - .env - logging: - options: - max-size: '10m' - max-file: '5' +version: '3' +services: + jpo_iss_health_checker: + build: . + image: jpo_iss_health_checker:latest + env_file: + - .env + logging: + options: + max-size: '10m' + max-file: '5' diff --git a/services/addons/images/iss_health_check/iss_health_checker.py b/services/addons/images/iss_health_check/iss_health_checker.py index 334e43661..32a69b120 100644 --- a/services/addons/images/iss_health_check/iss_health_checker.py +++ b/services/addons/images/iss_health_check/iss_health_checker.py @@ -1,93 +1,118 @@ -from datetime import datetime -import requests -import logging -import os -import iss_token -import common.pgquery as pgquery - -def get_rsu_data(): - result = {} - query = "SELECT jsonb_build_object('rsu_id', rsu_id, 'iss_scms_id', iss_scms_id) " \ - "FROM public.rsus " \ - "WHERE iss_scms_id IS NOT NULL " \ - "ORDER BY rsu_id" - data = pgquery.query_db(query) - - logging.debug('Parsing results...') - for point in data: - point_dict = dict(point[0]) - result[point_dict['iss_scms_id']] = { - 'rsu_id': point_dict['rsu_id'] - } - - return result - -def get_scms_status_data(): - rsu_data = get_rsu_data() - - # Create GET request headers - iss_headers = {} - iss_headers["x-api-key"] = iss_token.get_token() - - # Create the GET request string - iss_base = os.environ["ISS_SCMS_VEHICLE_REST_ENDPOINT"] - project_id = os.environ["ISS_PROJECT_ID"] - page_size = 200 - page = 0 - messages_processed = 0 - - # Loop through all pages of enrolled devices - while True: - iss_request = iss_base + "?pageSize={}&page={}&project_id={}".format(page_size, page, project_id) - logging.debug("GET: " + iss_request) - response = requests.get(iss_request, headers=iss_headers) - enrollment_list = response.json()["data"] - - if len(enrollment_list) == 0: - break - - # Loop through each device on current page - for enrollment_status in enrollment_list: - if enrollment_status["_id"] in rsu_data: - rsu_data[enrollment_status["_id"]]['provisionerCompany'] = enrollment_status["provisionerCompany_id"] - rsu_data[enrollment_status["_id"]]['entityType'] = enrollment_status["entityType"] - rsu_data[enrollment_status["_id"]]['project_id'] = enrollment_status["project_id"] - rsu_data[enrollment_status["_id"]]['deviceHealth'] = enrollment_status["deviceHealth"] - - # If the device has yet to download its first set of certs, set the expiration time to when it was enrolled - if "authorizationCertInfo" in enrollment_status["enrollments"][0]: - rsu_data[enrollment_status["_id"]]['expiration'] = enrollment_status["enrollments"][0]["authorizationCertInfo"]["expireTimeOfLatestDownloadedCert"] - else: - rsu_data[enrollment_status["_id"]]['expiration'] = None - - messages_processed = messages_processed + 1 - - page = page + 1 - - logging.info("Processed {} messages".format(messages_processed)) - return rsu_data - -def insert_scms_data(data): - logging.info('Inserting SCMS data into PostgreSQL...') - now_ts = datetime.strftime(datetime.now(), '%Y-%m-%dT%H:%M:%S.000Z') - - query = "INSERT INTO public.scms_health(\"timestamp\", health, expiration, rsu_id) VALUES" - for value in data.values(): - health = '1' if value['deviceHealth'] == 'Healthy' else '0' - if value['expiration']: - query = query + \ - f" ('{now_ts}', '{health}', '{value['expiration']}', {value['rsu_id']})," - else: - query = query + \ - f" ('{now_ts}', '{health}', NULL, {value['rsu_id']})," - - pgquery.write_db(query[:-1]) - logging.info('SCMS data inserted {} messages into PostgreSQL...'.format(len(data.values()))) - -if __name__ == "__main__": - # Configure logging based on ENV var or use default if not set - log_level = 'INFO' if "LOGGING_LEVEL" not in os.environ else os.environ['LOGGING_LEVEL'] - logging.basicConfig(format='%(levelname)s:%(message)s', level=log_level) - - scms_statuses = get_scms_status_data() - insert_scms_data(scms_statuses) \ No newline at end of file +from datetime import datetime +import requests +import logging +import os +import iss_token +import common.pgquery as pgquery + + +def get_rsu_data(): + result = {} + query = ( + "SELECT jsonb_build_object('rsu_id', rsu_id, 'iss_scms_id', iss_scms_id) " + "FROM public.rsus " + "WHERE iss_scms_id IS NOT NULL " + "ORDER BY rsu_id" + ) + data = pgquery.query_db(query) + + logging.debug("Parsing results...") + for point in data: + point_dict = dict(point[0]) + result[point_dict["iss_scms_id"]] = {"rsu_id": point_dict["rsu_id"]} + + return result + + +def get_scms_status_data(): + rsu_data = get_rsu_data() + + # Create GET request headers + iss_headers = {} + iss_headers["x-api-key"] = iss_token.get_token() + + # Create the GET request string + iss_base = os.environ["ISS_SCMS_VEHICLE_REST_ENDPOINT"] + project_id = os.environ["ISS_PROJECT_ID"] + page_size = 200 + page = 0 + messages_processed = 0 + + # Loop through all pages of enrolled devices + while True: + iss_request = iss_base + "?pageSize={}&page={}&project_id={}".format( + page_size, page, project_id + ) + logging.debug("GET: " + iss_request) + response = requests.get(iss_request, headers=iss_headers) + enrollment_list = response.json()["data"] + + if len(enrollment_list) == 0: + break + + # Loop through each device on current page + for enrollment_status in enrollment_list: + if enrollment_status["_id"] in rsu_data: + rsu_data[enrollment_status["_id"]][ + "provisionerCompany" + ] = enrollment_status["provisionerCompany_id"] + rsu_data[enrollment_status["_id"]]["entityType"] = enrollment_status[ + "entityType" + ] + rsu_data[enrollment_status["_id"]]["project_id"] = enrollment_status[ + "project_id" + ] + rsu_data[enrollment_status["_id"]]["deviceHealth"] = enrollment_status[ + "deviceHealth" + ] + + # If the device has yet to download its first set of certs, set the expiration time to when it was enrolled + if "authorizationCertInfo" in enrollment_status["enrollments"][0]: + rsu_data[enrollment_status["_id"]][ + "expiration" + ] = enrollment_status["enrollments"][0]["authorizationCertInfo"][ + "expireTimeOfLatestDownloadedCert" + ] + else: + rsu_data[enrollment_status["_id"]]["expiration"] = None + + messages_processed = messages_processed + 1 + + page = page + 1 + + logging.info("Processed {} messages".format(messages_processed)) + return rsu_data + + +def insert_scms_data(data): + logging.info("Inserting SCMS data into PostgreSQL...") + now_ts = datetime.strftime(datetime.now(), "%Y-%m-%dT%H:%M:%S.000Z") + + query = ( + 'INSERT INTO public.scms_health("timestamp", health, expiration, rsu_id) VALUES' + ) + for value in data.values(): + health = "1" if value["deviceHealth"] == "Healthy" else "0" + if value["expiration"]: + query = ( + query + + f" ('{now_ts}', '{health}', '{value['expiration']}', {value['rsu_id']})," + ) + else: + query = query + f" ('{now_ts}', '{health}', NULL, {value['rsu_id']})," + + pgquery.write_db(query[:-1]) + logging.info( + "SCMS data inserted {} messages into PostgreSQL...".format(len(data.values())) + ) + + +if __name__ == "__main__": + # Configure logging based on ENV var or use default if not set + log_level = ( + "INFO" if "LOGGING_LEVEL" not in os.environ else os.environ["LOGGING_LEVEL"] + ) + logging.basicConfig(format="%(levelname)s:%(message)s", level=log_level) + + scms_statuses = get_scms_status_data() + insert_scms_data(scms_statuses) diff --git a/services/addons/images/iss_health_check/iss_token.py b/services/addons/images/iss_health_check/iss_token.py index c83c430e1..084b6067f 100644 --- a/services/addons/images/iss_health_check/iss_token.py +++ b/services/addons/images/iss_health_check/iss_token.py @@ -1,114 +1,116 @@ -from google.cloud import secretmanager -import requests -import os -import json -import uuid -import logging - -def create_secret(client, secret_id, parent): - """Create a new GCP secret in GCP Secret Manager - client: GCP Security Manager client - secret_id: ID of the secret being created - parent: GCP secret manager parent ID for the GCP project - """ - client.create_secret( - request={ - "parent": parent, - "secret_id": secret_id, - "secret": {"replication": {"automatic": {}}}, - } - ) - logging.debug("New secret created") - -def check_if_secret_exists(client, secret_id, parent): - """Check if a secret exists in GCP Secret Manager - client: GCP Security Manager client - secret_id: ID of the secret being checked - parent: GCP secret manager parent ID for the GCP project - """ - for secret in client.list_secrets(request=secretmanager.ListSecretsRequest(parent=parent)): - # secret names are in the form of "projects/project_id/secrets/secret_id" - if secret.name.split('/')[-1] == secret_id: - logging.debug(f"Secret {secret_id} exists") - return True - return False - -def get_latest_secret_version(client, secret_id, parent): - """Get latest value of a secret from GCP Secret Manager - client: GCP Security Manager client - secret_id: ID for the secret being retrieved - parent: GCP secret manager parent ID for the GCP project - """ - response = client.access_secret_version(request={"name": f"{parent}/secrets/{secret_id}/versions/latest"}) - return json.loads(response.payload.data.decode("UTF-8")) - -def add_secret_version(client, secret_id, parent, data): - """Add a new version to an existing secret - client: GCP Security Manager client - secret_id: ID for the secret - parent: GCP secret manager parent ID for the GCP project - data: String value for the new version of the secret - """ - client.add_secret_version( - request={"parent": f"{parent}/secrets/{secret_id}", "payload": {"data": str.encode(json.dumps(data))}} - ) - logging.debug("New version added") - -def get_token(): - client = secretmanager.SecretManagerServiceClient() - secret_id = "iss-token-secret" - parent = f"projects/{os.environ['PROJECT_ID']}" - - # Check to see if the GCP secret exists - secret_exists = check_if_secret_exists(client, secret_id, parent) - - if secret_exists: - # Grab the latest token data - value = get_latest_secret_version(client, secret_id, parent) - friendly_name = value["name"] - token = value["token"] - logging.debug(f"Received token: {friendly_name}") - else: - # If there is no available ISS token secret, create secret - logging.debug("Secret does not exist, creating secret") - create_secret(client, secret_id, parent) - # Use environment variable for first run with new secret - token = os.environ["ISS_API_KEY"] - - # Pull new ISS SCMS API token - iss_base = os.environ["ISS_SCMS_TOKEN_REST_ENDPOINT"] - - # Create HTTP request headers - iss_headers = { - "x-api-key": token - } - - # Create the POST body - new_friendly_name = f"{os.environ['ISS_API_KEY_NAME']}_{str(uuid.uuid4())}" - iss_post_body = { - 'friendlyName': new_friendly_name, - 'expireDays': 1 - } - - # Create new ISS SCMS API Token to ensure its freshness - logging.debug("POST: " + iss_base) - response = requests.post(iss_base, json=iss_post_body, headers=iss_headers) - new_token = response.json()["Item"] - logging.debug(f"Received new token: {new_friendly_name}") - - if secret_exists: - # If exists, delete previous API key to prevent key clutter - iss_delete_body = { - 'friendlyName': friendly_name - } - requests.delete(iss_base, json=iss_delete_body, headers=iss_headers) - logging.debug(f"Old token has been deleted from ISS SCMS: {friendly_name}") - - version_data = { - "name": new_friendly_name, - "token": new_token - } - - add_secret_version(client, secret_id, parent, version_data) - - return new_token +from google.cloud import secretmanager +import requests +import os +import json +import uuid +import logging + + +def create_secret(client, secret_id, parent): + """Create a new GCP secret in GCP Secret Manager + client: GCP Security Manager client + secret_id: ID of the secret being created + parent: GCP secret manager parent ID for the GCP project + """ + client.create_secret( + request={ + "parent": parent, + "secret_id": secret_id, + "secret": {"replication": {"automatic": {}}}, + } + ) + logging.debug("New secret created") + + +def check_if_secret_exists(client, secret_id, parent): + """Check if a secret exists in GCP Secret Manager + client: GCP Security Manager client + secret_id: ID of the secret being checked + parent: GCP secret manager parent ID for the GCP project + """ + for secret in client.list_secrets( + request=secretmanager.ListSecretsRequest(parent=parent) + ): + # secret names are in the form of "projects/project_id/secrets/secret_id" + if secret.name.split("/")[-1] == secret_id: + logging.debug(f"Secret {secret_id} exists") + return True + return False + + +def get_latest_secret_version(client, secret_id, parent): + """Get latest value of a secret from GCP Secret Manager + client: GCP Security Manager client + secret_id: ID for the secret being retrieved + parent: GCP secret manager parent ID for the GCP project + """ + response = client.access_secret_version( + request={"name": f"{parent}/secrets/{secret_id}/versions/latest"} + ) + return json.loads(response.payload.data.decode("UTF-8")) + + +def add_secret_version(client, secret_id, parent, data): + """Add a new version to an existing secret + client: GCP Security Manager client + secret_id: ID for the secret + parent: GCP secret manager parent ID for the GCP project + data: String value for the new version of the secret + """ + client.add_secret_version( + request={ + "parent": f"{parent}/secrets/{secret_id}", + "payload": {"data": str.encode(json.dumps(data))}, + } + ) + logging.debug("New version added") + + +def get_token(): + client = secretmanager.SecretManagerServiceClient() + secret_id = "iss-token-secret" + parent = f"projects/{os.environ['PROJECT_ID']}" + + # Check to see if the GCP secret exists + secret_exists = check_if_secret_exists(client, secret_id, parent) + + if secret_exists: + # Grab the latest token data + value = get_latest_secret_version(client, secret_id, parent) + friendly_name = value["name"] + token = value["token"] + logging.debug(f"Received token: {friendly_name}") + else: + # If there is no available ISS token secret, create secret + logging.debug("Secret does not exist, creating secret") + create_secret(client, secret_id, parent) + # Use environment variable for first run with new secret + token = os.environ["ISS_API_KEY"] + + # Pull new ISS SCMS API token + iss_base = os.environ["ISS_SCMS_TOKEN_REST_ENDPOINT"] + + # Create HTTP request headers + iss_headers = {"x-api-key": token} + + # Create the POST body + new_friendly_name = f"{os.environ['ISS_API_KEY_NAME']}_{str(uuid.uuid4())}" + iss_post_body = {"friendlyName": new_friendly_name, "expireDays": 1} + + # Create new ISS SCMS API Token to ensure its freshness + logging.debug("POST: " + iss_base) + response = requests.post(iss_base, json=iss_post_body, headers=iss_headers) + new_token = response.json()["Item"] + logging.debug(f"Received new token: {new_friendly_name}") + + if secret_exists: + # If exists, delete previous API key to prevent key clutter + iss_delete_body = {"friendlyName": friendly_name} + requests.delete(iss_base, json=iss_delete_body, headers=iss_headers) + logging.debug(f"Old token has been deleted from ISS SCMS: {friendly_name}") + + version_data = {"name": new_friendly_name, "token": new_token} + + add_secret_version(client, secret_id, parent, version_data) + + return new_token diff --git a/services/addons/images/iss_health_check/requirements.txt b/services/addons/images/iss_health_check/requirements.txt index f5e868491..6ed6d6503 100644 --- a/services/addons/images/iss_health_check/requirements.txt +++ b/services/addons/images/iss_health_check/requirements.txt @@ -1,5 +1,5 @@ -requests==2.31.0 -sqlalchemy==2.0.21 -pg8000==1.30.2 -uuid==1.30 -google-cloud-secret-manager==2.17.0 +requests==2.31.0 +sqlalchemy==2.0.21 +pg8000==1.30.2 +uuid==1.30 +google-cloud-secret-manager==2.17.0 diff --git a/services/addons/images/iss_health_check/sample.env b/services/addons/images/iss_health_check/sample.env index 8e711c2ea..6fc35f8cd 100644 --- a/services/addons/images/iss_health_check/sample.env +++ b/services/addons/images/iss_health_check/sample.env @@ -1,21 +1,21 @@ -# ISS Account Authentication -ISS_API_KEY= -ISS_API_KEY_NAME= -ISS_PROJECT_ID= -ISS_SCMS_TOKEN_REST_ENDPOINT= -ISS_SCMS_VEHICLE_REST_ENDPOINT= - -# PostgreSQL connection information -# Host port must be specified -PG_DB_HOST=:5432 -PG_DB_NAME= -PG_DB_USER= -PG_DB_PASS= - -# GCP Project ID and service account JSON key file location (mount as volume or secret) -PROJECT_ID= -GOOGLE_APPLICATION_CREDENTIALS= - -# Customize the logging level, defaults to INFO -# Options: DEBUG, INFO, WARN, ERROR (case sensitive) +# ISS Account Authentication +ISS_API_KEY= +ISS_API_KEY_NAME= +ISS_PROJECT_ID= +ISS_SCMS_TOKEN_REST_ENDPOINT= +ISS_SCMS_VEHICLE_REST_ENDPOINT= + +# PostgreSQL connection information +# Host port must be specified +PG_DB_HOST=:5432 +PG_DB_NAME= +PG_DB_USER= +PG_DB_PASS= + +# GCP Project ID and service account JSON key file location (mount as volume or secret) +PROJECT_ID= +GOOGLE_APPLICATION_CREDENTIALS= + +# Customize the logging level, defaults to INFO +# Options: DEBUG, INFO, WARN, ERROR (case sensitive) LOGGING_LEVEL= \ No newline at end of file diff --git a/services/addons/images/rsu_ping/README.md b/services/addons/images/rsu_ping/README.md index e093e8be0..66fb4098f 100644 --- a/services/addons/images/rsu_ping/README.md +++ b/services/addons/images/rsu_ping/README.md @@ -1,8 +1,8 @@ -# RSU Ping Fetch +# RSU Ping Services ## Table of Contents -- [RSU Ping Fetch](#rsu-ping-fetch) +- [RSU Ping Services](#rsu-ping-services) - [Table of Contents](#table-of-contents) - [About ](#about-) - [Requirements ](#requirements-) diff --git a/services/addons/images/rsu_ping/crontab.rsu_ping_fetch b/services/addons/images/rsu_ping/crontab.rsu_ping_fetch index 768a6b195..2056248f3 100644 --- a/services/addons/images/rsu_ping/crontab.rsu_ping_fetch +++ b/services/addons/images/rsu_ping/crontab.rsu_ping_fetch @@ -1,3 +1,3 @@ -PYTHONPATH=/home -*/1 * * * * /usr/local/bin/python3 /home/rsu_ping_fetch.py -0 0 * * * /usr/local/bin/python3 /home/purger.py +PYTHONPATH=/home +*/1 * * * * /usr/local/bin/python3 /home/rsu_ping_fetch.py +0 0 * * * /usr/local/bin/python3 /home/purger.py diff --git a/services/addons/images/rsu_ping/purger.py b/services/addons/images/rsu_ping/purger.py index 6e3b70793..36d833663 100644 --- a/services/addons/images/rsu_ping/purger.py +++ b/services/addons/images/rsu_ping/purger.py @@ -1,53 +1,66 @@ -from datetime import datetime, timedelta -import os -import logging -import common.pgquery as pgquery - -def get_last_online_rsu_records(): - result = [] - - query = "SELECT a.ping_id, a.rsu_id, a.timestamp " \ - "FROM (" \ - "SELECT pd.ping_id, pd.rsu_id, pd.timestamp, ROW_NUMBER() OVER (PARTITION BY pd.rsu_id order by pd.timestamp DESC) AS row_id " \ - "FROM public.ping AS pd " \ - "WHERE pd.result = '1'" \ - ") AS a " \ - "WHERE a.row_id <= 1 ORDER BY rsu_id" - data = pgquery.query_db(query) - - # Create list of RSU last online ping records - # Tuple in the format of (ping_id, rsu_id, timestamp (UTC)) - result = [value for value in data] - - return result - -def purge_ping_data(stale_period): - last_online_list = get_last_online_rsu_records() - - stale_point = datetime.utcnow() - timedelta(hours=stale_period) - stale_point_str = stale_point.strftime("%Y/%m/%dT%H:%M:%S") - - for record in last_online_list: - logging.debug(f"Cleaning up rsu_id: {str(record[1])}") - # Check if the RSU has been offline longer than the stale period - if record[2] < stale_point: - logging.debug(f"Latest record of rsu_id {str(record[1])} is a stale RSU ping record (ping_id: {str(record[0])})") - # Create query to delete all records of the stale ping data besides the latest record - purge_query = "DELETE FROM public.ping " \ - f"WHERE rsu_id = {str(record[1])} AND ping_id != {str(record[0])}" - else: - # Create query to delete all records before the stale_point - purge_query = "DELETE FROM public.ping " \ - f"WHERE rsu_id = {str(record[1])} AND timestamp < '{stale_point_str}'::timestamp" - - pgquery.write_db(purge_query) - - logging.info("Ping data purging successfully completed") - -if __name__ == "__main__": - # Configure logging based on ENV var or use default if not set - log_level = 'INFO' if "LOGGING_LEVEL" not in os.environ else os.environ['LOGGING_LEVEL'] - logging.basicConfig(format='%(levelname)s:%(message)s', level=log_level) - - stale_period = int(os.environ["STALE_PERIOD"]) - purge_ping_data(stale_period) \ No newline at end of file +from datetime import datetime, timedelta +import os +import logging +import common.pgquery as pgquery + + +def get_last_online_rsu_records(): + result = [] + + query = ( + "SELECT a.ping_id, a.rsu_id, a.timestamp " + "FROM (" + "SELECT pd.ping_id, pd.rsu_id, pd.timestamp, ROW_NUMBER() OVER (PARTITION BY pd.rsu_id order by pd.timestamp DESC) AS row_id " + "FROM public.ping AS pd " + "WHERE pd.result = '1'" + ") AS a " + "WHERE a.row_id <= 1 ORDER BY rsu_id" + ) + data = pgquery.query_db(query) + + # Create list of RSU last online ping records + # Tuple in the format of (ping_id, rsu_id, timestamp (UTC)) + result = [value for value in data] + + return result + + +def purge_ping_data(stale_period): + last_online_list = get_last_online_rsu_records() + + stale_point = datetime.utcnow() - timedelta(hours=stale_period) + stale_point_str = stale_point.strftime("%Y/%m/%dT%H:%M:%S") + + for record in last_online_list: + logging.debug(f"Cleaning up rsu_id: {str(record[1])}") + # Check if the RSU has been offline longer than the stale period + if record[2] < stale_point: + logging.debug( + f"Latest record of rsu_id {str(record[1])} is a stale RSU ping record (ping_id: {str(record[0])})" + ) + # Create query to delete all records of the stale ping data besides the latest record + purge_query = ( + "DELETE FROM public.ping " + f"WHERE rsu_id = {str(record[1])} AND ping_id != {str(record[0])}" + ) + else: + # Create query to delete all records before the stale_point + purge_query = ( + "DELETE FROM public.ping " + f"WHERE rsu_id = {str(record[1])} AND timestamp < '{stale_point_str}'::timestamp" + ) + + pgquery.write_db(purge_query) + + logging.info("Ping data purging successfully completed") + + +if __name__ == "__main__": + # Configure logging based on ENV var or use default if not set + log_level = ( + "INFO" if "LOGGING_LEVEL" not in os.environ else os.environ["LOGGING_LEVEL"] + ) + logging.basicConfig(format="%(levelname)s:%(message)s", level=log_level) + + stale_period = int(os.environ["STALE_PERIOD"]) + purge_ping_data(stale_period) diff --git a/services/addons/images/rsu_ping/requirements.txt b/services/addons/images/rsu_ping/requirements.txt index 9ca8ddc5e..96dc349e1 100644 --- a/services/addons/images/rsu_ping/requirements.txt +++ b/services/addons/images/rsu_ping/requirements.txt @@ -1,5 +1,5 @@ -requests==2.31.0 -sqlalchemy==2.0.21 -pg8000==1.30.2 -freezegun==1.2.2 -DateTime==5.2 \ No newline at end of file +requests==2.31.0 +sqlalchemy==2.0.21 +pg8000==1.30.2 +freezegun==1.2.2 +DateTime==5.2 diff --git a/services/addons/images/rsu_ping/rsu_ping_fetch.py b/services/addons/images/rsu_ping/rsu_ping_fetch.py index caa1afb5c..f9c4da778 100644 --- a/services/addons/images/rsu_ping/rsu_ping_fetch.py +++ b/services/addons/images/rsu_ping/rsu_ping_fetch.py @@ -1,155 +1,154 @@ -import requests -import os -import logging -import common.pgquery as pgquery - -def get_rsu_data(): - result = [] - - # Execute the query and fetch all results - query = "SELECT rsu_id, ipv4_address FROM public.rsus ORDER BY rsu_id" - data = pgquery.query_db(query) - - logging.debug('Parsing results...') - for point in data: - rsu = { - 'rsu_id': point[0], - 'rsu_ip': str(point[1]) - } - result.append(rsu) - - return result - -def insert_rsu_ping(request_json): - rsu_id = request_json["rsu_id"] - histories = request_json["histories"] - - logging.debug(f'Inserting {len(histories)} new Ping records for RsuData {rsu_id}') - for history in histories: - try: - query = f'INSERT INTO public.ping (timestamp, result, rsu_id) VALUES (to_timestamp({history["clock"]}), B\'{history["value"]}\', {rsu_id})' - pgquery.write_db(query) - except Exception as e: - logging.exception(f"Error inserting Ping record: {e}") - return False - - return True - -class RsuStatusFetch: - def __init__(self): - self.ZABBIX_ENDPOINT = os.environ['ZABBIX_ENDPOINT'] - self.ZABBIX_AUTH = '' - - def setZabbixAuth(self): - logging.info(f'Fetching Zabbix auth token from {self.ZABBIX_ENDPOINT}') - zabbixAuthPayload = { - "jsonrpc": "2.0", - "method": "user.login", - "id": 1, - "params": { - "username": os.environ['ZABBIX_USER'], - "password": os.environ['ZABBIX_PASSWORD'] - } - } - - zabbixAuthResponse = requests.post(self.ZABBIX_ENDPOINT, json=zabbixAuthPayload) - self.ZABBIX_AUTH = zabbixAuthResponse.json()['result'] - - def getHostInfo(self, rsu_ip): - hostPayload = { - "jsonrpc": "2.0", - "method": "host.get", - "id": 1, - "auth": self.ZABBIX_AUTH, - "params": { - "output": [ - "hostid", - "host" - ], - "selectInterfaces": [ - "interfaceid", - "ip" - ], - "filter": { - "ip": rsu_ip - } - } - } - hostInfoResponse = requests.post(self.ZABBIX_ENDPOINT, json=hostPayload) - return hostInfoResponse.json() - - def getItem(self, hostInfo): - itemPayload = { - "jsonrpc": "2.0", - "method": "item.get", - "id": 1, - "auth": self.ZABBIX_AUTH, - "params": { - "hostids": [hostInfo['result'][0]['hostid']], - "filter": {"key_": "icmpping"} - } - } - getItemResponse = requests.post(self.ZABBIX_ENDPOINT, json=itemPayload) - return getItemResponse.json() - - def getHistory(self, zabbix_item): - historyPayload = { - "jsonrpc": "2.0", - "method": "history.get", - "id": 1, - "auth": self.ZABBIX_AUTH, - "params": { - "itemids": [zabbix_item['result'][0]['itemid']], - "output": "extend", - "sortfield": "clock", - "sortorder": "DESC", - "limit": 5 - } - } - getHistoryResponse = requests.post(self.ZABBIX_ENDPOINT, json=historyPayload) - return getHistoryResponse.json() - - def insertHistoryItem(self, zabbix_history, rsu_item): - historyItemPayload = { - "histories": zabbix_history['result'], - "rsu_id": rsu_item['rsu_id'] - } - logging.info(f'Inserting {len(zabbix_history["result"])} history items for RSU {rsu_item["rsu_ip"]}') - return insert_rsu_ping(historyItemPayload) - - def printConfigInfo(self): - configObject = { - 'ZABBIX_ENDPOINT' : self.ZABBIX_ENDPOINT, - 'ZABBIX_AUTH' : self.ZABBIX_AUTH - } - logging.info(f'Configuration: {configObject}') - - def run(self): - self.setZabbixAuth() - self.printConfigInfo() - rsu_items = get_rsu_data() - logging.info(f'Found {len(rsu_items)} RSUs to fetch status for') - - # loop over rsuInfo, get host info - for rsu_item in rsu_items: - try: - hostInfo = self.getHostInfo(rsu_item["rsu_ip"]) - # with host info, get items - zabbix_item = self.getItem(hostInfo) - # with item get history - zabbix_history = self.getHistory(zabbix_item) - # with history, insert history item - insertSuccess = self.insertHistoryItem(zabbix_history, rsu_item) - if not insertSuccess: - logging.warning(f'Failed to insert history item for {rsu_item["rsu_ip"]}') - except Exception as e: - logging.error(f'Failed to fetch Zabbix data RSU {rsu_item["rsu_ip"]}') - return - -if __name__ == "__main__": - # Configure logging based on ENV var or use default if not set - log_level = 'INFO' if "LOGGING_LEVEL" not in os.environ else os.environ['LOGGING_LEVEL'] - logging.basicConfig(format='%(levelname)s:%(message)s', level=log_level) - - rsf = RsuStatusFetch() - rsf.run() +import requests +import os +import logging +import common.pgquery as pgquery + + +def get_rsu_data(): + result = [] + + # Execute the query and fetch all results + query = "SELECT rsu_id, ipv4_address FROM public.rsus ORDER BY rsu_id" + data = pgquery.query_db(query) + + logging.debug("Parsing results...") + for point in data: + rsu = {"rsu_id": point[0], "rsu_ip": str(point[1])} + result.append(rsu) + + return result + + +def insert_rsu_ping(request_json): + rsu_id = request_json["rsu_id"] + histories = request_json["histories"] + + logging.debug(f"Inserting {len(histories)} new Ping records for RsuData {rsu_id}") + for history in histories: + try: + query = f'INSERT INTO public.ping (timestamp, result, rsu_id) VALUES (to_timestamp({history["clock"]}), B\'{history["value"]}\', {rsu_id})' + pgquery.write_db(query) + except Exception as e: + logging.exception(f"Error inserting Ping record: {e}") + return False + + return True + + +class RsuStatusFetch: + def __init__(self): + self.ZABBIX_ENDPOINT = os.environ["ZABBIX_ENDPOINT"] + self.ZABBIX_AUTH = "" + + def setZabbixAuth(self): + logging.info(f"Fetching Zabbix auth token from {self.ZABBIX_ENDPOINT}") + zabbixAuthPayload = { + "jsonrpc": "2.0", + "method": "user.login", + "id": 1, + "params": { + "username": os.environ["ZABBIX_USER"], + "password": os.environ["ZABBIX_PASSWORD"], + }, + } + + zabbixAuthResponse = requests.post(self.ZABBIX_ENDPOINT, json=zabbixAuthPayload) + self.ZABBIX_AUTH = zabbixAuthResponse.json()["result"] + + def getHostInfo(self, rsu_ip): + hostPayload = { + "jsonrpc": "2.0", + "method": "host.get", + "id": 1, + "auth": self.ZABBIX_AUTH, + "params": { + "output": ["hostid", "host"], + "selectInterfaces": ["interfaceid", "ip"], + "filter": {"ip": rsu_ip}, + }, + } + hostInfoResponse = requests.post(self.ZABBIX_ENDPOINT, json=hostPayload) + return hostInfoResponse.json() + + def getItem(self, hostInfo): + itemPayload = { + "jsonrpc": "2.0", + "method": "item.get", + "id": 1, + "auth": self.ZABBIX_AUTH, + "params": { + "hostids": [hostInfo["result"][0]["hostid"]], + "filter": {"key_": "icmpping"}, + }, + } + getItemResponse = requests.post(self.ZABBIX_ENDPOINT, json=itemPayload) + return getItemResponse.json() + + def getHistory(self, zabbix_item): + historyPayload = { + "jsonrpc": "2.0", + "method": "history.get", + "id": 1, + "auth": self.ZABBIX_AUTH, + "params": { + "itemids": [zabbix_item["result"][0]["itemid"]], + "output": "extend", + "sortfield": "clock", + "sortorder": "DESC", + "limit": 5, + }, + } + getHistoryResponse = requests.post(self.ZABBIX_ENDPOINT, json=historyPayload) + return getHistoryResponse.json() + + def insertHistoryItem(self, zabbix_history, rsu_item): + historyItemPayload = { + "histories": zabbix_history["result"], + "rsu_id": rsu_item["rsu_id"], + } + logging.info( + f'Inserting {len(zabbix_history["result"])} history items for RSU {rsu_item["rsu_ip"]}' + ) + return insert_rsu_ping(historyItemPayload) + + def printConfigInfo(self): + configObject = { + "ZABBIX_ENDPOINT": self.ZABBIX_ENDPOINT, + "ZABBIX_AUTH": self.ZABBIX_AUTH, + } + logging.info(f"Configuration: {configObject}") + + def run(self): + self.setZabbixAuth() + self.printConfigInfo() + rsu_items = get_rsu_data() + logging.info(f"Found {len(rsu_items)} RSUs to fetch status for") + + # loop over rsuInfo, get host info + for rsu_item in rsu_items: + try: + hostInfo = self.getHostInfo(rsu_item["rsu_ip"]) + # with host info, get items + zabbix_item = self.getItem(hostInfo) + # with item get history + zabbix_history = self.getHistory(zabbix_item) + # with history, insert history item + insertSuccess = self.insertHistoryItem(zabbix_history, rsu_item) + if not insertSuccess: + logging.warning( + f'Failed to insert history item for {rsu_item["rsu_ip"]}' + ) + except Exception as e: + logging.error(f'Failed to fetch Zabbix data RSU {rsu_item["rsu_ip"]}') + return + + +if __name__ == "__main__": + # Configure logging based on ENV var or use default if not set + log_level = ( + "INFO" if "LOGGING_LEVEL" not in os.environ else os.environ["LOGGING_LEVEL"] + ) + logging.basicConfig(format="%(levelname)s:%(message)s", level=log_level) + + rsf = RsuStatusFetch() + rsf.run() diff --git a/services/addons/images/rsu_ping/rsu_pinger.py b/services/addons/images/rsu_ping/rsu_pinger.py index ed8a34da7..966216769 100644 --- a/services/addons/images/rsu_ping/rsu_pinger.py +++ b/services/addons/images/rsu_ping/rsu_pinger.py @@ -5,79 +5,86 @@ from datetime import datetime from subprocess import Popen, DEVNULL + def insert_ping_data(ping_data, ping_time): - # Build the insert query with the RSU ping data - query = "INSERT INTO public.ping (timestamp, result, rsu_id) VALUES" - for rsu_id, online_status in ping_data.items(): - query += f" (TO_TIMESTAMP(\'{ping_time}\', 'YYYY-MM-DD HH24:MI:SS'), B\'{online_status}\', {rsu_id})," - query = query[:-1] + # Build the insert query with the RSU ping data + query = "INSERT INTO public.ping (timestamp, result, rsu_id) VALUES" + for rsu_id, online_status in ping_data.items(): + query += f" (TO_TIMESTAMP('{ping_time}', 'YYYY-MM-DD HH24:MI:SS'), B'{online_status}', {rsu_id})," + query = query[:-1] + + # Run query + pgquery.write_db(query) - # Run query - pgquery.write_db(query) def ping_rsu_ips(rsu_list): - p = {} - # Start ping processes - for rsu in rsu_list: - # id: rsu_id - # key: process pinging the RSU's ipv4_address - p[rsu[0]] = Popen(['ping', '-n', '-w5', '-c3', rsu[1]], stdout=DEVNULL) - - ping_data = {} - while p: - for rsu_id, proc in p.items(): - # Check if process has ended - if proc.poll() is not None: - del p[rsu_id] - - if proc.returncode == 0: - # Active - logging.debug('%s active' % rsu_id) - ping_data[rsu_id] = '1' - else: - # Offline/Unresponsive - logging.debug('%s no response' % rsu_id) - ping_data[rsu_id] = '0' - break - - return ping_data + p = {} + # Start ping processes + for rsu in rsu_list: + # id: rsu_id + # key: process pinging the RSU's ipv4_address + p[rsu[0]] = Popen(["ping", "-n", "-w5", "-c3", rsu[1]], stdout=DEVNULL) + + ping_data = {} + while p: + for rsu_id, proc in p.items(): + # Check if process has ended + if proc.poll() is not None: + del p[rsu_id] + + if proc.returncode == 0: + # Active + logging.debug("%s active" % rsu_id) + ping_data[rsu_id] = "1" + else: + # Offline/Unresponsive + logging.debug("%s no response" % rsu_id) + ping_data[rsu_id] = "0" + break + + return ping_data + def get_rsu_ips(): - rsu_list = [] - query = "SELECT to_jsonb(row) " \ - "FROM (" \ - "SELECT rsu_id, ipv4_address FROM public.rsus" \ - ") as row" + rsu_list = [] + query = ( + "SELECT to_jsonb(row) " + "FROM (" + "SELECT rsu_id, ipv4_address FROM public.rsus" + ") as row" + ) - # Query PostgreSQL for the list of RSU IPs - data = pgquery.query_db(query) + # Query PostgreSQL for the list of RSU IPs + data = pgquery.query_db(query) - for row in data: - row = dict(row[0]) - rsu_list.append((row['rsu_id'], row['ipv4_address'])) + for row in data: + row = dict(row[0]) + rsu_list.append((row["rsu_id"], row["ipv4_address"])) + + return rsu_list - return rsu_list def run_rsu_pinger(): - rsu_list = get_rsu_ips() + rsu_list = get_rsu_ips() + + # Ping RSU IPs and collect start/end times + dt_string = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + st = time.time() + ping_data = ping_rsu_ips(rsu_list) + et = time.time() - # Ping RSU IPs and collect start/end times - dt_string = datetime.now().strftime("%Y-%m-%d %H:%M:%S") - st = time.time() - ping_data = ping_rsu_ips(rsu_list) - et = time.time() + elapsed_time = et - st + logging.info(f"Ping execution time: {elapsed_time} seconds") - elapsed_time = et - st - logging.info(f'Ping execution time: {elapsed_time} seconds') + if len(ping_data) > 0: + insert_ping_data(ping_data, dt_string) + else: + logging.error("Ping results are empty, something went wrong during RSU pings") - if len(ping_data) > 0: - insert_ping_data(ping_data, dt_string) - else: - logging.error("Ping results are empty, something went wrong during RSU pings") if __name__ == "__main__": - # Configure logging based on ENV var or use default if not set - log_level = os.environ.get('LOGGING_LEVEL', 'INFO') - logging.basicConfig(format='%(levelname)s:%(message)s', level=log_level) + # Configure logging based on ENV var or use default if not set + log_level = os.environ.get("LOGGING_LEVEL", "INFO") + logging.basicConfig(format="%(levelname)s:%(message)s", level=log_level) - run_rsu_pinger() \ No newline at end of file + run_rsu_pinger() diff --git a/services/addons/images/rsu_ping/sample.env b/services/addons/images/rsu_ping/sample.env index 74273d0c9..d9c1b9e0b 100644 --- a/services/addons/images/rsu_ping/sample.env +++ b/services/addons/images/rsu_ping/sample.env @@ -1,20 +1,20 @@ -# Zabbix endpoint and API authentication -# Only used for rsu_ping_fetch -ZABBIX_ENDPOINT= -ZABBIX_USER= -ZABBIX_PASSWORD= - -# PostgreSQL connection information -# Host port must be specified -PG_DB_HOST=:5432 -PG_DB_NAME= -PG_DB_USER= -PG_DB_PASS= - -# Customize the period at which the purger will determine a ping log is too old and will be deleted -# Number of hours -STALE_PERIOD=24 - -# Customize the logging level, defaults to INFO -# Options: DEBUG, INFO, WARN, ERROR (case sensitive) +# Zabbix endpoint and API authentication +# Only used for rsu_ping_fetch +ZABBIX_ENDPOINT= +ZABBIX_USER= +ZABBIX_PASSWORD= + +# PostgreSQL connection information +# Host port must be specified +PG_DB_HOST=:5432 +PG_DB_NAME= +PG_DB_USER= +PG_DB_PASS= + +# Customize the period at which the purger will determine a ping log is too old and will be deleted +# Number of hours +STALE_PERIOD=24 + +# Customize the logging level, defaults to INFO +# Options: DEBUG, INFO, WARN, ERROR (case sensitive) LOGGING_LEVEL= \ No newline at end of file diff --git a/services/addons/tests/bsm_query/test_bsm_query.py b/services/addons/tests/bsm_query/test_bsm_query.py index d6b357ebc..53feca934 100644 --- a/services/addons/tests/bsm_query/test_bsm_query.py +++ b/services/addons/tests/bsm_query/test_bsm_query.py @@ -1,103 +1,103 @@ -import os -from pymongo import MongoClient -from datetime import datetime -from unittest.mock import MagicMock, patch - -import pytest -from addons.images.bsm_query import bsm_query - -from addons.images.bsm_query.bsm_query import create_message, process_message, run - - -@pytest.fixture -def mock_mongo_client(): - mock_client = MagicMock(spec=MongoClient) - mock_db = MagicMock() - mock_collection = MagicMock() - mock_client.__getitem__.return_value = mock_db - mock_db.__getitem__.return_value = mock_collection - return mock_client - - -def test_create_message(): - original_message = { - "payload": { - "data": {"coreData": {"position": {"longitude": 123.45, "latitude": 67.89}}} - }, - "metadata": { - "originIp": "127.0.0.1", - "odeReceivedAt": "2022-01-01T12:00:00.000Z", - }, - } - - expected_result = { - "type": "Feature", - "geometry": {"type": "Point", "coordinates": [123.45, 67.89]}, - "properties": {"id": "127.0.0.1", "timestamp": datetime(2022, 1, 1, 12, 0, 0)}, - } - - assert create_message(original_message) == expected_result - - -def test_process_message(mock_mongo_client): - message = { - "payload": { - "data": {"coreData": {"position": {"longitude": 123.45, "latitude": 67.89}}} - }, - "metadata": { - "originIp": "127.0.0.1", - "odeReceivedAt": "2022-01-01T12:00:00.000Z", - }, - } - collection_name = "test_collection" - - process_message(message, mock_mongo_client, collection_name) - - mock_collection = mock_mongo_client.__getitem__.return_value - mock_collection.insert_one.assert_called_once_with( - { - "type": "Feature", - "geometry": {"type": "Point", "coordinates": [123.45, 67.89]}, - "properties": { - "id": "127.0.0.1", - "timestamp": datetime(2022, 1, 1, 12, 0, 0), - }, - } - ) - - -@patch.dict( - os.environ, - { - "MONGO_DB_URI": "mongodb://localhost:27017", - "MONGO_DB_NAME": "test_db", - "MONGO_BSM_INPUT_COLLECTION": "bsm_input", - "MONGO_GEO_OUTPUT_COLLECTION": "geo_output", - }, -) -@patch("addons.images.bsm_query.bsm_query.ThreadPoolExecutor") -def test_run(mock_thread_pool_executor, mock_mongo_client): - mock_collection = mock_mongo_client.__getitem__.return_value - bsm_query.set_mongo_client = MagicMock( - return_value=[mock_mongo_client, mock_collection] - ) - - mock_stream = MagicMock() - - mock_stream.return_value = "hi" - - mock_stream.__iter__.return_value = [ - {"fullDocument": "document1"}, - {"fullDocument": "document2"}, - {"fullDocument": "document3"}, - ] - - mock_collection.watch.return_value.__enter__.return_value = mock_stream - - bsm_query.run() - - mock_thread_pool_executor.assert_called_once_with(max_workers=5) - - -if __name__ == "__main__": - pytest.main() +import os +from pymongo import MongoClient +from datetime import datetime +from unittest.mock import MagicMock, patch + +import pytest +from addons.images.bsm_query import bsm_query + +from addons.images.bsm_query.bsm_query import create_message, process_message, run + + +@pytest.fixture +def mock_mongo_client(): + mock_client = MagicMock(spec=MongoClient) + mock_db = MagicMock() + mock_collection = MagicMock() + mock_client.__getitem__.return_value = mock_db + mock_db.__getitem__.return_value = mock_collection + return mock_client + + +def test_create_message(): + original_message = { + "payload": { + "data": {"coreData": {"position": {"longitude": 123.45, "latitude": 67.89}}} + }, + "metadata": { + "originIp": "127.0.0.1", + "odeReceivedAt": "2022-01-01T12:00:00.000Z", + }, + } + + expected_result = { + "type": "Feature", + "geometry": {"type": "Point", "coordinates": [123.45, 67.89]}, + "properties": {"id": "127.0.0.1", "timestamp": datetime(2022, 1, 1, 12, 0, 0)}, + } + + assert create_message(original_message) == expected_result + + +def test_process_message(mock_mongo_client): + message = { + "payload": { + "data": {"coreData": {"position": {"longitude": 123.45, "latitude": 67.89}}} + }, + "metadata": { + "originIp": "127.0.0.1", + "odeReceivedAt": "2022-01-01T12:00:00.000Z", + }, + } + collection_name = "test_collection" + + process_message(message, mock_mongo_client, collection_name) + + mock_collection = mock_mongo_client.__getitem__.return_value + mock_collection.insert_one.assert_called_once_with( + { + "type": "Feature", + "geometry": {"type": "Point", "coordinates": [123.45, 67.89]}, + "properties": { + "id": "127.0.0.1", + "timestamp": datetime(2022, 1, 1, 12, 0, 0), + }, + } + ) + + +@patch.dict( + os.environ, + { + "MONGO_DB_URI": "mongodb://localhost:27017", + "MONGO_DB_NAME": "test_db", + "MONGO_BSM_INPUT_COLLECTION": "bsm_input", + "MONGO_GEO_OUTPUT_COLLECTION": "geo_output", + }, +) +@patch("addons.images.bsm_query.bsm_query.ThreadPoolExecutor") +def test_run(mock_thread_pool_executor, mock_mongo_client): + mock_collection = mock_mongo_client.__getitem__.return_value + bsm_query.set_mongo_client = MagicMock( + return_value=[mock_mongo_client, mock_collection] + ) + + mock_stream = MagicMock() + + mock_stream.return_value = "hi" + + mock_stream.__iter__.return_value = [ + {"fullDocument": "document1"}, + {"fullDocument": "document2"}, + {"fullDocument": "document3"}, + ] + + mock_collection.watch.return_value.__enter__.return_value = mock_stream + + bsm_query.run() + + mock_thread_pool_executor.assert_called_once_with(max_workers=5) + + +if __name__ == "__main__": + pytest.main() diff --git a/services/addons/tests/count_metric/test_count_metric_driver.py b/services/addons/tests/count_metric/test_count_metric_driver.py index eed84cc3d..af0923a2d 100644 --- a/services/addons/tests/count_metric/test_count_metric_driver.py +++ b/services/addons/tests/count_metric/test_count_metric_driver.py @@ -1,97 +1,102 @@ -from os import environ -from addons.images.count_metric import driver -from mock import MagicMock -from unittest.mock import patch - -@patch("addons.images.count_metric.driver.pgquery.query_db") -def test_get_rsu_list(mock_query_db): - # mock - mock_query_db.return_value = [ - ( - { - "ipv4_address": "192.168.0.10", - "primary_route": "I-80", - }, - ), - ] - - # run - result = driver.get_rsu_list() - - expected_result = [{"ipv4_address": "192.168.0.10", "primary_route": "I-80"}] - mock_query_db.assert_called_once() - assert result == expected_result - - -@patch("addons.images.count_metric.driver.get_rsu_list") -def test_populateRsuDict_success(mock_get_rsu_list): - # prepare - mock_get_rsu_list.return_value = [{"ipv4_address": "192.168.0.10", "primary_route": "I-80"}] - - # call - driver.populateRsuDict() - - # check that rsu_location_dict is correct - rsu_location_dict = driver.rsu_location_dict - expected_rsu_location_dict = {"192.168.0.10": "I-80"} - assert rsu_location_dict == expected_rsu_location_dict - - # check that rsu_count_dict is correct - rsu_count_dict = driver.rsu_count_dict - expected_rsu_count_dict = {"I-80": {"192.168.0.10": 0}, "Unknown": {}} - assert rsu_count_dict == expected_rsu_count_dict - -@patch("addons.images.count_metric.driver.get_rsu_list") -def test_populateRsuDict_empty_object(mock_get_rsu_list): - # prepare - mock_get_rsu_list.return_value = [] - - driver.rsu_location_dict = {} - driver.rsu_count_dict = {} - - driver.populateRsuDict() - - assert driver.rsu_location_dict == {} - assert driver.rsu_count_dict == {"Unknown": {}} - -@patch("addons.images.count_metric.driver.rsu_location_dict", {}) -@patch("addons.images.count_metric.driver.rsu_count_dict", {}) -@patch("addons.images.count_metric.driver.populateRsuDict", MagicMock()) -@patch("addons.images.count_metric.driver.KafkaMessageCounter") -def test_run_success(mock_KafkaMessageCounter): - # prepare - mock_KafkaMessageCounter.return_value = MagicMock() - mock_KafkaMessageCounter.return_value.run = MagicMock() - environ["MESSAGE_TYPES"] = "bsm" - - # call - driver.run() - - # check - driver.populateRsuDict.assert_called_once() - driver.KafkaMessageCounter.assert_called() - - -def test_run_message_types_not_set(): - # prepare - environ["MESSAGE_TYPES"] = "" - driver.rsu_location_dict = {} - driver.rsu_count_dict = {} - driver.logging = MagicMock() - driver.logging.error = MagicMock() - driver.exit = MagicMock() - driver.exit.side_effect = SystemExit - - # call - try: - driver.run() - except SystemExit: - pass - - # check - driver.logging.error.assert_called_once_with( - "MESSAGE_TYPES environment variable not set! Exiting." - ) - driver.exit.assert_called_once_with( - "MESSAGE_TYPES environment variable not set! Exiting." - ) +from os import environ +from addons.images.count_metric import driver +from mock import MagicMock +from unittest.mock import patch + + +@patch("addons.images.count_metric.driver.pgquery.query_db") +def test_get_rsu_list(mock_query_db): + # mock + mock_query_db.return_value = [ + ( + { + "ipv4_address": "192.168.0.10", + "primary_route": "I-80", + }, + ), + ] + + # run + result = driver.get_rsu_list() + + expected_result = [{"ipv4_address": "192.168.0.10", "primary_route": "I-80"}] + mock_query_db.assert_called_once() + assert result == expected_result + + +@patch("addons.images.count_metric.driver.get_rsu_list") +def test_populateRsuDict_success(mock_get_rsu_list): + # prepare + mock_get_rsu_list.return_value = [ + {"ipv4_address": "192.168.0.10", "primary_route": "I-80"} + ] + + # call + driver.populateRsuDict() + + # check that rsu_location_dict is correct + rsu_location_dict = driver.rsu_location_dict + expected_rsu_location_dict = {"192.168.0.10": "I-80"} + assert rsu_location_dict == expected_rsu_location_dict + + # check that rsu_count_dict is correct + rsu_count_dict = driver.rsu_count_dict + expected_rsu_count_dict = {"I-80": {"192.168.0.10": 0}, "Unknown": {}} + assert rsu_count_dict == expected_rsu_count_dict + + +@patch("addons.images.count_metric.driver.get_rsu_list") +def test_populateRsuDict_empty_object(mock_get_rsu_list): + # prepare + mock_get_rsu_list.return_value = [] + + driver.rsu_location_dict = {} + driver.rsu_count_dict = {} + + driver.populateRsuDict() + + assert driver.rsu_location_dict == {} + assert driver.rsu_count_dict == {"Unknown": {}} + + +@patch("addons.images.count_metric.driver.rsu_location_dict", {}) +@patch("addons.images.count_metric.driver.rsu_count_dict", {}) +@patch("addons.images.count_metric.driver.populateRsuDict", MagicMock()) +@patch("addons.images.count_metric.driver.KafkaMessageCounter") +def test_run_success(mock_KafkaMessageCounter): + # prepare + mock_KafkaMessageCounter.return_value = MagicMock() + mock_KafkaMessageCounter.return_value.run = MagicMock() + environ["MESSAGE_TYPES"] = "bsm" + + # call + driver.run() + + # check + driver.populateRsuDict.assert_called_once() + driver.KafkaMessageCounter.assert_called() + + +def test_run_message_types_not_set(): + # prepare + environ["MESSAGE_TYPES"] = "" + driver.rsu_location_dict = {} + driver.rsu_count_dict = {} + driver.logging = MagicMock() + driver.logging.error = MagicMock() + driver.exit = MagicMock() + driver.exit.side_effect = SystemExit + + # call + try: + driver.run() + except SystemExit: + pass + + # check + driver.logging.error.assert_called_once_with( + "MESSAGE_TYPES environment variable not set! Exiting." + ) + driver.exit.assert_called_once_with( + "MESSAGE_TYPES environment variable not set! Exiting." + ) diff --git a/services/addons/tests/count_metric/test_kafka_counter.py b/services/addons/tests/count_metric/test_kafka_counter.py index 6dbcc36ba..8db99b79e 100644 --- a/services/addons/tests/count_metric/test_kafka_counter.py +++ b/services/addons/tests/count_metric/test_kafka_counter.py @@ -1,457 +1,464 @@ -import os -import pytest -from mock import call, MagicMock, patch -from confluent_kafka import KafkaError, KafkaException -from addons.images.count_metric import kafka_counter - - -def createKafkaMessageCounter(type: int): - kafka_counter.bigquery.Client = MagicMock() - kafka_counter.bigquery.Client.return_value = MagicMock() - kafka_counter.bigquery.Client.return_value.query = MagicMock() - kafka_counter.bigquery.Client.return_value.query.return_value.result = MagicMock() - kafka_counter.bigquery.Client.return_value.query.return_value.result.return_value.total_rows = ( - 1 - ) - kafka_counter.pymongo.MongoClient = MagicMock() - kafka_counter.pymongo.MongoClient.return_value = MagicMock() - kafka_counter.bigquery.Client.__getitem__.return_value.__getitem__.return_value = ( - MagicMock() - ) - kafka_counter.bigquery.Client.__getitem__.return_value.__getitem__.return_value.insert_many.return_value = ( - MagicMock() - ) - thread_id = 0 - message_type = "bsm" - rsu_location_dict = {"noIP": "Unknown"} - rsu_count_dict = {"Unknown": {"noIP": 1}} - rsu_count_dict_zero = {"Unknown": {"noIP": 0}} - newKafkaMessageCounter = kafka_counter.KafkaMessageCounter( - thread_id, - message_type, - rsu_location_dict, - rsu_count_dict, - rsu_count_dict_zero, - type, - ) - - return newKafkaMessageCounter - - -def test_write_bq_with_type0_kmc_success(): - # prepare - os.environ["DESTINATION_DB"] = "BIGQUERY" - os.environ["KAFKA_BIGQUERY_TABLENAME"] = "test" - kafkaMessageCounterType0 = createKafkaMessageCounter(0) - kafka_counter.logging = MagicMock() - kafka_counter.logging.info = MagicMock() - - # call - query_values = "test" - kafkaMessageCounterType0.write_bigquery(query_values) - - # check - targetTable = os.getenv("KAFKA_BIGQUERY_TABLENAME") - expectedArgument = f"INSERT INTO `{targetTable}`(RSU, Road, Date, Type, Count) VALUES {query_values}" - kafkaMessageCounterType0.bq_client.query.assert_called_once_with(expectedArgument) - kafkaMessageCounterType0.bq_client.query.return_value.result.assert_called_once() - kafka_counter.logging.info.assert_called_once() - - -def test_write_bq_with_type1_kmc_success(): - # prepare - os.environ["DESTINATION_DB"] = "BIGQUERY" - os.environ["PUBSUB_BIGQUERY_TABLENAME"] = "test" - kafkaMessageCounterType1 = createKafkaMessageCounter(1) - kafka_counter.logging = MagicMock() - kafka_counter.logging.info = MagicMock() - - # call - query_values = "test" - kafkaMessageCounterType1.write_bigquery(query_values) - - # check - targetTable = os.getenv("PUBSUB_BIGQUERY_TABLENAME") - expectedArgument = f"INSERT INTO `{targetTable}`(RSU, Road, Date, Type, Count) VALUES {query_values}" - kafkaMessageCounterType1.bq_client.query.assert_called_once_with(expectedArgument) - kafkaMessageCounterType1.bq_client.query.return_value.result.assert_called_once() - kafka_counter.logging.info.assert_called_once() - - -def test_push_metrics_bq_success(): - os.environ["DESTINATION_DB"] = "BIGQUERY" - os.environ["PUBSUB_BIGQUERY_TABLENAME"] = "test" - # prepare - kafkaMessageCounter = createKafkaMessageCounter(0) - kafkaMessageCounter.write_bigquery = MagicMock() - - # call - kafkaMessageCounter.push_metrics() - - # check - kafkaMessageCounter.write_bigquery.assert_called_once() - - -def test_push_metrics_bq_exception(): - os.environ["DESTINATION_DB"] = "BIGQUERY" - os.environ["PUBSUB_BIGQUERY_TABLENAME"] = "test" - # prepare - kafkaMessageCounter = createKafkaMessageCounter(0) - kafkaMessageCounter.write_bigquery = MagicMock() - kafkaMessageCounter.write_bigquery.side_effect = Exception("test") - kafka_counter.logging = MagicMock() - kafka_counter.logging.error = MagicMock() - - # call - kafkaMessageCounter.push_metrics() - - # check - kafkaMessageCounter.write_bigquery.assert_called_once() - kafka_counter.logging.error.assert_called_once() - - -def test_write_mongo_with_type0_kmc_success(): - # prepare - os.environ["DESTINATION_DB"] = "MONGODB" - os.environ["MONGO_DB_URI"] = "URI" - os.environ["INPUT_COUNTS_MONGO_COLLECTION_NAME"] = "test_input" - kafkaMessageCounterType0 = createKafkaMessageCounter(0) - kafka_counter.logging = MagicMock() - kafka_counter.logging.info = MagicMock() - - # call - test_doc = {"test": "doc"} - kafkaMessageCounterType0.write_mongo(test_doc) - - # check - kafkaMessageCounterType0.mongo_client[os.getenv("MONGO_DB_NAME")][ - os.getenv("INPUT_COUNTS_MONGO_COLLECTION_NAME") - ].insert_many.assert_called_once_with(test_doc) - kafka_counter.logging.info.assert_called_once() - - -def test_write_mongo_with_type1_kmc_success(): - # prepare - os.environ["DESTINATION_DB"] = "MONGODB" - os.environ["MONGO_DB_URI"] = "URI" - os.environ["OUTPUT_COUNTS_MONGO_COLLECTION_NAME"] = "test_output" - kafkaMessageCounterType1 = createKafkaMessageCounter(1) - kafka_counter.logging = MagicMock() - kafka_counter.logging.info = MagicMock() - - # call - test_doc = {"test": "doc"} - kafkaMessageCounterType1.write_mongo(test_doc) - - # check - kafkaMessageCounterType1.mongo_client[os.getenv("MONGO_DB_NAME")][ - os.getenv("OUTPUT_COUNTS_MONGO_COLLECTION_NAME") - ].insert_many.assert_called_once_with(test_doc) - kafka_counter.logging.info.assert_called_once() - - -def test_push_metrics_mongo_success(): - os.environ["DESTINATION_DB"] = "MONGODB" - os.environ["MONGO_DB_URI"] = "URI" - os.environ["INPUT_COUNTS_MONGO_COLLECTION_NAME"] = "test_input" - # prepare - kafkaMessageCounter = createKafkaMessageCounter(0) - kafkaMessageCounter.write_mongo = MagicMock() - - # call - kafkaMessageCounter.push_metrics() - - # check - kafkaMessageCounter.write_mongo.assert_called_once() - - -def test_push_metrics_mongo_exception(): - os.environ["DESTINATION_DB"] = "MONGODB" - os.environ["MONGO_DB_URI"] = "URI" - os.environ["INPUT_COUNTS_MONGO_COLLECTION_NAME"] = "test_input" - # prepare - kafkaMessageCounter = createKafkaMessageCounter(0) - kafkaMessageCounter.write_mongo = MagicMock() - kafkaMessageCounter.write_mongo.side_effect = Exception("test") - kafka_counter.logging = MagicMock() - kafka_counter.logging.error = MagicMock() - - # call - kafkaMessageCounter.push_metrics() - - # check - kafkaMessageCounter.write_mongo.assert_called_once() - kafka_counter.logging.error.assert_called_once() - - -@patch('addons.images.count_metric.kafka_counter.logging') -@patch('addons.images.count_metric.kafka_counter.json') -def test_process_message_with_type0_kmc_origin_ip_present_success(mock_json, mock_logging): - kafkaMessageCounter = createKafkaMessageCounter(0) - originIp = "192.168.0.5" - mock_json.loads.return_value = { - "BsmMessageContent": [ - { - "metadata": { - "utctimestamp": "2020-10-01T00:00:00.000Z", - "originRsu": originIp, - }, - "payload": "00131A604A380583702005837800080008100000040583705043002580", - } - ] - } - value_return = MagicMock() - value_return.decode.return_value = "test" - msg = MagicMock() - msg.value.return_value = value_return - - # call - kafkaMessageCounter.process_message(msg) - - # check - assert kafkaMessageCounter.rsu_count_dict["Unknown"][originIp] == 1 - mock_logging.warning.assert_not_called() - mock_logging.error.assert_not_called() - mock_json.loads.assert_called_once_with("test") - - -@patch('addons.images.count_metric.kafka_counter.logging') -@patch('addons.images.count_metric.kafka_counter.json') -def test_process_message_with_type0_kmc_malformed_message(mock_json, mock_logging): - # prepare - kafkaMessageCounter = createKafkaMessageCounter(0) - kafka_counter.json.loads.return_value = { - "BsmMessageContent": [ - { - "metadata": {"utctimestamp": "2020-10-01T00:00:00.000Z"}, - "payload": "00131A604A380583702005837800080008100000040583705043002580", - } - ] - } - value_return = MagicMock() - value_return.decode.return_value = "test" - msg = MagicMock() - msg.value.return_value = value_return - - # call - kafkaMessageCounter.process_message(msg) - - # check - assert kafkaMessageCounter.rsu_count_dict["Unknown"]["noIP"] == 2 - mock_logging.warning.assert_called_once() - mock_logging.error.assert_not_called() - mock_json.loads.assert_called_once_with("test") - - -@patch('addons.images.count_metric.kafka_counter.logging') -@patch('addons.images.count_metric.kafka_counter.json') -def test_process_message_with_type1_kmc_origin_ip_present_success(mock_json, mock_logging): - # prepare - kafkaMessageCounter = createKafkaMessageCounter(1) - originIp = "192.168.0.5" - kafka_counter.json.loads.return_value = { - "metadata": {"utctimestamp": "2020-10-01T00:00:00.000Z", "originIp": originIp}, - "payload": "00131A604A380583702005837800080008100000040583705043002580", - } - value_return = MagicMock() - value_return.decode.return_value = "test" - msg = MagicMock() - msg.value.return_value = value_return - - # call - kafkaMessageCounter.process_message(msg) - - # check - assert kafkaMessageCounter.rsu_count_dict["Unknown"][originIp] == 1 - mock_logging.warning.assert_not_called() - mock_logging.error.assert_not_called() - mock_json.loads.assert_called_once_with("test") - - -@patch('addons.images.count_metric.kafka_counter.logging') -@patch('addons.images.count_metric.kafka_counter.json') -def test_process_message_with_type1_kmc_malformed_message(mock_json, mock_logging): - # prepare - kafkaMessageCounter = createKafkaMessageCounter(1) - kafka_counter.json.loads.return_value = { - "metadata": {"utctimestamp": "2020-10-01T00:00:00.000Z"}, - "payload": "00131A604A380583702005837800080008100000040583705043002580", - } - value_return = MagicMock() - value_return.decode.return_value = "test" - msg = MagicMock() - msg.value.return_value = value_return - - # call - kafkaMessageCounter.process_message(msg) - - # check - assert kafkaMessageCounter.rsu_count_dict["Unknown"]["noIP"] == 2 - mock_logging.warning.assert_called_once() - mock_logging.error.assert_not_called() - mock_json.loads.assert_called_once_with("test") - - -@patch('addons.images.count_metric.kafka_counter.logging') -def test_process_message_exception(mock_logging): - # prepare - kafkaMessageCounter = createKafkaMessageCounter(0) - - # call - message = "" - kafkaMessageCounter.process_message(message) - - # check - assert kafkaMessageCounter.rsu_count_dict["Unknown"]["noIP"] == 1 - mock_logging.warning.assert_not_called() - mock_logging.error.assert_called_once() - - -@patch('addons.images.count_metric.kafka_counter.logging') -@patch('addons.images.count_metric.kafka_counter.Consumer') -def test_listen_for_message_and_process_success(mock_Consumer, mock_logging): - # prepare - kafkaMessageCounter = createKafkaMessageCounter(0) - proc_msg = MagicMock() - proc_msg.side_effect = [True, False] - kafkaMessageCounter.should_run = proc_msg - kafkaMessageCounter.process_message = MagicMock() - - kafkaConsumer = MagicMock() - mock_Consumer.return_value = kafkaConsumer - msg = MagicMock() - kafkaConsumer.poll.return_value = msg - msg.error.return_value = None - - # call - topic = "test" - bootstrap_servers = "test" - kafkaMessageCounter.listen_for_message_and_process(topic, bootstrap_servers) - - # check - kafkaMessageCounter.process_message.assert_called_once() - mock_logging.warning.assert_called_once() - kafkaConsumer.poll.assert_called_once() - kafkaConsumer.close.assert_called_once() - - -@patch('addons.images.count_metric.kafka_counter.logging') -@patch('addons.images.count_metric.kafka_counter.Consumer') -def test_listen_for_message_and_process_eof(mock_Consumer, mock_logging): - # prepare - kafkaMessageCounter = createKafkaMessageCounter(0) - proc_msg = MagicMock() - proc_msg.side_effect = [True, False] - kafkaMessageCounter.should_run = proc_msg - kafkaMessageCounter.process_message = MagicMock() - - kafkaConsumer = MagicMock() - mock_Consumer.return_value = kafkaConsumer - msg = MagicMock() - kafkaConsumer.poll.return_value = msg - msg_code = MagicMock() - msg_code.code.return_value = KafkaError._PARTITION_EOF - msg.error.return_value = msg_code - msg.topic.return_value = 'test' - - # call - topic = "test" - bootstrap_servers = "test" - kafkaMessageCounter.listen_for_message_and_process(topic, bootstrap_servers) - - # check - expected_calls = [ - call('Topic test [1] reached end at offset 1\n'), - call('0: Disconnected from Kafka topic, reconnecting...') - ] - kafkaMessageCounter.process_message.assert_not_called() - mock_logging.warning.assert_has_calls(expected_calls) - kafkaConsumer.close.assert_called_once() - -@patch('addons.images.count_metric.kafka_counter.logging') -@patch('addons.images.count_metric.kafka_counter.Consumer') -def test_listen_for_message_and_process_error(mock_Consumer, mock_logging): - # prepare - kafkaMessageCounter = createKafkaMessageCounter(0) - proc_msg = MagicMock() - proc_msg.side_effect = [True, False] - kafkaMessageCounter.should_run = proc_msg - kafkaMessageCounter.process_message = MagicMock() - - kafkaConsumer = MagicMock() - mock_Consumer.return_value = kafkaConsumer - msg = MagicMock() - kafkaConsumer.poll.return_value = msg - msg_code = MagicMock() - msg_code.code.return_value = None - msg.error.return_value = msg_code - - # call and verify it raises the exception - with pytest.raises(KafkaException): - topic = "test" - bootstrap_servers = "test" - kafkaMessageCounter.listen_for_message_and_process(topic, bootstrap_servers) - - kafkaMessageCounter.process_message.assert_not_called() - mock_logging.warning.assert_called_with('0: Disconnected from Kafka topic, reconnecting...') - kafkaConsumer.close.assert_called_once() - - -def test_get_topic_from_type_success(): - # prepare - kafkaMessageCounterType0 = createKafkaMessageCounter(0) - kafkaMessageCounterType1 = createKafkaMessageCounter(1) - - # call - topicType0 = kafkaMessageCounterType0.get_topic_from_type() - topicType1 = kafkaMessageCounterType1.get_topic_from_type() - - # check - messageType = "bsm" - expectedTopicType0 = f"topic.OdeRawEncoded{messageType.upper()}Json" - expectedTopicType1 = f"topic.Ode{messageType.capitalize()}Json" - assert topicType0 == expectedTopicType0 - assert topicType1 == expectedTopicType1 - - -# # PROBLEM TEST -def test_read_topic_success(): - # prepare - kafkaMessageCounter = createKafkaMessageCounter(0) - kafkaMessageCounter.get_topic_from_type = MagicMock() - kafkaMessageCounter.get_topic_from_type.return_value = "test" - os.environ["ODE_KAFKA_BROKERS"] = "test" - kafkaMessageCounter.listen_for_message_and_process = MagicMock() - kafkaMessageCounter.should_run = MagicMock() - kafkaMessageCounter.should_run.side_effect = [True, False] - - # call - kafkaMessageCounter.read_topic() - - # check - kafkaMessageCounter.get_topic_from_type.assert_called_once() - kafkaMessageCounter.listen_for_message_and_process.assert_called_once_with( - "test", "test" - ) - - -def test_start_counter_success(): - # prepare - kafkaMessageCounter = createKafkaMessageCounter(0) - kafka_counter.logging = MagicMock() - kafka_counter.logging.info = MagicMock() - kafka_counter.BackgroundScheduler = MagicMock() - kafka_counter.BackgroundScheduler.return_value = MagicMock() - kafka_counter.BackgroundScheduler.return_value.add_job = MagicMock() - kafka_counter.BackgroundScheduler.return_value.start = MagicMock() - kafkaMessageCounter.read_topic = MagicMock() - - # call - kafkaMessageCounter.start_counter() - - # check - kafka_counter.BackgroundScheduler.assert_called_once() - kafka_counter.BackgroundScheduler.return_value.add_job.assert_called_once() - kafka_counter.BackgroundScheduler.return_value.start.assert_called_once() - kafka_counter.logging.info.assert_called_once() - kafkaMessageCounter.read_topic.assert_called_once() +import os +import pytest +from mock import call, MagicMock, patch +from confluent_kafka import KafkaError, KafkaException +from addons.images.count_metric import kafka_counter + + +def createKafkaMessageCounter(type: int): + kafka_counter.bigquery.Client = MagicMock() + kafka_counter.bigquery.Client.return_value = MagicMock() + kafka_counter.bigquery.Client.return_value.query = MagicMock() + kafka_counter.bigquery.Client.return_value.query.return_value.result = MagicMock() + kafka_counter.bigquery.Client.return_value.query.return_value.result.return_value.total_rows = ( + 1 + ) + kafka_counter.pymongo.MongoClient = MagicMock() + kafka_counter.pymongo.MongoClient.return_value = MagicMock() + kafka_counter.bigquery.Client.__getitem__.return_value.__getitem__.return_value = ( + MagicMock() + ) + kafka_counter.bigquery.Client.__getitem__.return_value.__getitem__.return_value.insert_many.return_value = ( + MagicMock() + ) + thread_id = 0 + message_type = "bsm" + rsu_location_dict = {"noIP": "Unknown"} + rsu_count_dict = {"Unknown": {"noIP": 1}} + rsu_count_dict_zero = {"Unknown": {"noIP": 0}} + newKafkaMessageCounter = kafka_counter.KafkaMessageCounter( + thread_id, + message_type, + rsu_location_dict, + rsu_count_dict, + rsu_count_dict_zero, + type, + ) + + return newKafkaMessageCounter + + +def test_write_bq_with_type0_kmc_success(): + # prepare + os.environ["DESTINATION_DB"] = "BIGQUERY" + os.environ["KAFKA_BIGQUERY_TABLENAME"] = "test" + kafkaMessageCounterType0 = createKafkaMessageCounter(0) + kafka_counter.logging = MagicMock() + kafka_counter.logging.info = MagicMock() + + # call + query_values = "test" + kafkaMessageCounterType0.write_bigquery(query_values) + + # check + targetTable = os.getenv("KAFKA_BIGQUERY_TABLENAME") + expectedArgument = f"INSERT INTO `{targetTable}`(RSU, Road, Date, Type, Count) VALUES {query_values}" + kafkaMessageCounterType0.bq_client.query.assert_called_once_with(expectedArgument) + kafkaMessageCounterType0.bq_client.query.return_value.result.assert_called_once() + kafka_counter.logging.info.assert_called_once() + + +def test_write_bq_with_type1_kmc_success(): + # prepare + os.environ["DESTINATION_DB"] = "BIGQUERY" + os.environ["PUBSUB_BIGQUERY_TABLENAME"] = "test" + kafkaMessageCounterType1 = createKafkaMessageCounter(1) + kafka_counter.logging = MagicMock() + kafka_counter.logging.info = MagicMock() + + # call + query_values = "test" + kafkaMessageCounterType1.write_bigquery(query_values) + + # check + targetTable = os.getenv("PUBSUB_BIGQUERY_TABLENAME") + expectedArgument = f"INSERT INTO `{targetTable}`(RSU, Road, Date, Type, Count) VALUES {query_values}" + kafkaMessageCounterType1.bq_client.query.assert_called_once_with(expectedArgument) + kafkaMessageCounterType1.bq_client.query.return_value.result.assert_called_once() + kafka_counter.logging.info.assert_called_once() + + +def test_push_metrics_bq_success(): + os.environ["DESTINATION_DB"] = "BIGQUERY" + os.environ["PUBSUB_BIGQUERY_TABLENAME"] = "test" + # prepare + kafkaMessageCounter = createKafkaMessageCounter(0) + kafkaMessageCounter.write_bigquery = MagicMock() + + # call + kafkaMessageCounter.push_metrics() + + # check + kafkaMessageCounter.write_bigquery.assert_called_once() + + +def test_push_metrics_bq_exception(): + os.environ["DESTINATION_DB"] = "BIGQUERY" + os.environ["PUBSUB_BIGQUERY_TABLENAME"] = "test" + # prepare + kafkaMessageCounter = createKafkaMessageCounter(0) + kafkaMessageCounter.write_bigquery = MagicMock() + kafkaMessageCounter.write_bigquery.side_effect = Exception("test") + kafka_counter.logging = MagicMock() + kafka_counter.logging.error = MagicMock() + + # call + kafkaMessageCounter.push_metrics() + + # check + kafkaMessageCounter.write_bigquery.assert_called_once() + kafka_counter.logging.error.assert_called_once() + + +def test_write_mongo_with_type0_kmc_success(): + # prepare + os.environ["DESTINATION_DB"] = "MONGODB" + os.environ["MONGO_DB_URI"] = "URI" + os.environ["INPUT_COUNTS_MONGO_COLLECTION_NAME"] = "test_input" + kafkaMessageCounterType0 = createKafkaMessageCounter(0) + kafka_counter.logging = MagicMock() + kafka_counter.logging.info = MagicMock() + + # call + test_doc = {"test": "doc"} + kafkaMessageCounterType0.write_mongo(test_doc) + + # check + kafkaMessageCounterType0.mongo_client[os.getenv("MONGO_DB_NAME")][ + os.getenv("INPUT_COUNTS_MONGO_COLLECTION_NAME") + ].insert_many.assert_called_once_with(test_doc) + kafka_counter.logging.info.assert_called_once() + + +def test_write_mongo_with_type1_kmc_success(): + # prepare + os.environ["DESTINATION_DB"] = "MONGODB" + os.environ["MONGO_DB_URI"] = "URI" + os.environ["OUTPUT_COUNTS_MONGO_COLLECTION_NAME"] = "test_output" + kafkaMessageCounterType1 = createKafkaMessageCounter(1) + kafka_counter.logging = MagicMock() + kafka_counter.logging.info = MagicMock() + + # call + test_doc = {"test": "doc"} + kafkaMessageCounterType1.write_mongo(test_doc) + + # check + kafkaMessageCounterType1.mongo_client[os.getenv("MONGO_DB_NAME")][ + os.getenv("OUTPUT_COUNTS_MONGO_COLLECTION_NAME") + ].insert_many.assert_called_once_with(test_doc) + kafka_counter.logging.info.assert_called_once() + + +def test_push_metrics_mongo_success(): + os.environ["DESTINATION_DB"] = "MONGODB" + os.environ["MONGO_DB_URI"] = "URI" + os.environ["INPUT_COUNTS_MONGO_COLLECTION_NAME"] = "test_input" + # prepare + kafkaMessageCounter = createKafkaMessageCounter(0) + kafkaMessageCounter.write_mongo = MagicMock() + + # call + kafkaMessageCounter.push_metrics() + + # check + kafkaMessageCounter.write_mongo.assert_called_once() + + +def test_push_metrics_mongo_exception(): + os.environ["DESTINATION_DB"] = "MONGODB" + os.environ["MONGO_DB_URI"] = "URI" + os.environ["INPUT_COUNTS_MONGO_COLLECTION_NAME"] = "test_input" + # prepare + kafkaMessageCounter = createKafkaMessageCounter(0) + kafkaMessageCounter.write_mongo = MagicMock() + kafkaMessageCounter.write_mongo.side_effect = Exception("test") + kafka_counter.logging = MagicMock() + kafka_counter.logging.error = MagicMock() + + # call + kafkaMessageCounter.push_metrics() + + # check + kafkaMessageCounter.write_mongo.assert_called_once() + kafka_counter.logging.error.assert_called_once() + + +@patch("addons.images.count_metric.kafka_counter.logging") +@patch("addons.images.count_metric.kafka_counter.json") +def test_process_message_with_type0_kmc_origin_ip_present_success( + mock_json, mock_logging +): + kafkaMessageCounter = createKafkaMessageCounter(0) + originIp = "192.168.0.5" + mock_json.loads.return_value = { + "BsmMessageContent": [ + { + "metadata": { + "utctimestamp": "2020-10-01T00:00:00.000Z", + "originRsu": originIp, + }, + "payload": "00131A604A380583702005837800080008100000040583705043002580", + } + ] + } + value_return = MagicMock() + value_return.decode.return_value = "test" + msg = MagicMock() + msg.value.return_value = value_return + + # call + kafkaMessageCounter.process_message(msg) + + # check + assert kafkaMessageCounter.rsu_count_dict["Unknown"][originIp] == 1 + mock_logging.warning.assert_not_called() + mock_logging.error.assert_not_called() + mock_json.loads.assert_called_once_with("test") + + +@patch("addons.images.count_metric.kafka_counter.logging") +@patch("addons.images.count_metric.kafka_counter.json") +def test_process_message_with_type0_kmc_malformed_message(mock_json, mock_logging): + # prepare + kafkaMessageCounter = createKafkaMessageCounter(0) + kafka_counter.json.loads.return_value = { + "BsmMessageContent": [ + { + "metadata": {"utctimestamp": "2020-10-01T00:00:00.000Z"}, + "payload": "00131A604A380583702005837800080008100000040583705043002580", + } + ] + } + value_return = MagicMock() + value_return.decode.return_value = "test" + msg = MagicMock() + msg.value.return_value = value_return + + # call + kafkaMessageCounter.process_message(msg) + + # check + assert kafkaMessageCounter.rsu_count_dict["Unknown"]["noIP"] == 2 + mock_logging.warning.assert_called_once() + mock_logging.error.assert_not_called() + mock_json.loads.assert_called_once_with("test") + + +@patch("addons.images.count_metric.kafka_counter.logging") +@patch("addons.images.count_metric.kafka_counter.json") +def test_process_message_with_type1_kmc_origin_ip_present_success( + mock_json, mock_logging +): + # prepare + kafkaMessageCounter = createKafkaMessageCounter(1) + originIp = "192.168.0.5" + kafka_counter.json.loads.return_value = { + "metadata": {"utctimestamp": "2020-10-01T00:00:00.000Z", "originIp": originIp}, + "payload": "00131A604A380583702005837800080008100000040583705043002580", + } + value_return = MagicMock() + value_return.decode.return_value = "test" + msg = MagicMock() + msg.value.return_value = value_return + + # call + kafkaMessageCounter.process_message(msg) + + # check + assert kafkaMessageCounter.rsu_count_dict["Unknown"][originIp] == 1 + mock_logging.warning.assert_not_called() + mock_logging.error.assert_not_called() + mock_json.loads.assert_called_once_with("test") + + +@patch("addons.images.count_metric.kafka_counter.logging") +@patch("addons.images.count_metric.kafka_counter.json") +def test_process_message_with_type1_kmc_malformed_message(mock_json, mock_logging): + # prepare + kafkaMessageCounter = createKafkaMessageCounter(1) + kafka_counter.json.loads.return_value = { + "metadata": {"utctimestamp": "2020-10-01T00:00:00.000Z"}, + "payload": "00131A604A380583702005837800080008100000040583705043002580", + } + value_return = MagicMock() + value_return.decode.return_value = "test" + msg = MagicMock() + msg.value.return_value = value_return + + # call + kafkaMessageCounter.process_message(msg) + + # check + assert kafkaMessageCounter.rsu_count_dict["Unknown"]["noIP"] == 2 + mock_logging.warning.assert_called_once() + mock_logging.error.assert_not_called() + mock_json.loads.assert_called_once_with("test") + + +@patch("addons.images.count_metric.kafka_counter.logging") +def test_process_message_exception(mock_logging): + # prepare + kafkaMessageCounter = createKafkaMessageCounter(0) + + # call + message = "" + kafkaMessageCounter.process_message(message) + + # check + assert kafkaMessageCounter.rsu_count_dict["Unknown"]["noIP"] == 1 + mock_logging.warning.assert_not_called() + mock_logging.error.assert_called_once() + + +@patch("addons.images.count_metric.kafka_counter.logging") +@patch("addons.images.count_metric.kafka_counter.Consumer") +def test_listen_for_message_and_process_success(mock_Consumer, mock_logging): + # prepare + kafkaMessageCounter = createKafkaMessageCounter(0) + proc_msg = MagicMock() + proc_msg.side_effect = [True, False] + kafkaMessageCounter.should_run = proc_msg + kafkaMessageCounter.process_message = MagicMock() + + kafkaConsumer = MagicMock() + mock_Consumer.return_value = kafkaConsumer + msg = MagicMock() + kafkaConsumer.poll.return_value = msg + msg.error.return_value = None + + # call + topic = "test" + bootstrap_servers = "test" + kafkaMessageCounter.listen_for_message_and_process(topic, bootstrap_servers) + + # check + kafkaMessageCounter.process_message.assert_called_once() + mock_logging.warning.assert_called_once() + kafkaConsumer.poll.assert_called_once() + kafkaConsumer.close.assert_called_once() + + +@patch("addons.images.count_metric.kafka_counter.logging") +@patch("addons.images.count_metric.kafka_counter.Consumer") +def test_listen_for_message_and_process_eof(mock_Consumer, mock_logging): + # prepare + kafkaMessageCounter = createKafkaMessageCounter(0) + proc_msg = MagicMock() + proc_msg.side_effect = [True, False] + kafkaMessageCounter.should_run = proc_msg + kafkaMessageCounter.process_message = MagicMock() + + kafkaConsumer = MagicMock() + mock_Consumer.return_value = kafkaConsumer + msg = MagicMock() + kafkaConsumer.poll.return_value = msg + msg_code = MagicMock() + msg_code.code.return_value = KafkaError._PARTITION_EOF + msg.error.return_value = msg_code + msg.topic.return_value = "test" + + # call + topic = "test" + bootstrap_servers = "test" + kafkaMessageCounter.listen_for_message_and_process(topic, bootstrap_servers) + + # check + expected_calls = [ + call("Topic test [1] reached end at offset 1\n"), + call("0: Disconnected from Kafka topic, reconnecting..."), + ] + kafkaMessageCounter.process_message.assert_not_called() + mock_logging.warning.assert_has_calls(expected_calls) + kafkaConsumer.close.assert_called_once() + + +@patch("addons.images.count_metric.kafka_counter.logging") +@patch("addons.images.count_metric.kafka_counter.Consumer") +def test_listen_for_message_and_process_error(mock_Consumer, mock_logging): + # prepare + kafkaMessageCounter = createKafkaMessageCounter(0) + proc_msg = MagicMock() + proc_msg.side_effect = [True, False] + kafkaMessageCounter.should_run = proc_msg + kafkaMessageCounter.process_message = MagicMock() + + kafkaConsumer = MagicMock() + mock_Consumer.return_value = kafkaConsumer + msg = MagicMock() + kafkaConsumer.poll.return_value = msg + msg_code = MagicMock() + msg_code.code.return_value = None + msg.error.return_value = msg_code + + # call and verify it raises the exception + with pytest.raises(KafkaException): + topic = "test" + bootstrap_servers = "test" + kafkaMessageCounter.listen_for_message_and_process(topic, bootstrap_servers) + + kafkaMessageCounter.process_message.assert_not_called() + mock_logging.warning.assert_called_with( + "0: Disconnected from Kafka topic, reconnecting..." + ) + kafkaConsumer.close.assert_called_once() + + +def test_get_topic_from_type_success(): + # prepare + kafkaMessageCounterType0 = createKafkaMessageCounter(0) + kafkaMessageCounterType1 = createKafkaMessageCounter(1) + + # call + topicType0 = kafkaMessageCounterType0.get_topic_from_type() + topicType1 = kafkaMessageCounterType1.get_topic_from_type() + + # check + messageType = "bsm" + expectedTopicType0 = f"topic.OdeRawEncoded{messageType.upper()}Json" + expectedTopicType1 = f"topic.Ode{messageType.capitalize()}Json" + assert topicType0 == expectedTopicType0 + assert topicType1 == expectedTopicType1 + + +# # PROBLEM TEST +def test_read_topic_success(): + # prepare + kafkaMessageCounter = createKafkaMessageCounter(0) + kafkaMessageCounter.get_topic_from_type = MagicMock() + kafkaMessageCounter.get_topic_from_type.return_value = "test" + os.environ["ODE_KAFKA_BROKERS"] = "test" + kafkaMessageCounter.listen_for_message_and_process = MagicMock() + kafkaMessageCounter.should_run = MagicMock() + kafkaMessageCounter.should_run.side_effect = [True, False] + + # call + kafkaMessageCounter.read_topic() + + # check + kafkaMessageCounter.get_topic_from_type.assert_called_once() + kafkaMessageCounter.listen_for_message_and_process.assert_called_once_with( + "test", "test" + ) + + +def test_start_counter_success(): + # prepare + kafkaMessageCounter = createKafkaMessageCounter(0) + kafka_counter.logging = MagicMock() + kafka_counter.logging.info = MagicMock() + kafka_counter.BackgroundScheduler = MagicMock() + kafka_counter.BackgroundScheduler.return_value = MagicMock() + kafka_counter.BackgroundScheduler.return_value.add_job = MagicMock() + kafka_counter.BackgroundScheduler.return_value.start = MagicMock() + kafkaMessageCounter.read_topic = MagicMock() + + # call + kafkaMessageCounter.start_counter() + + # check + kafka_counter.BackgroundScheduler.assert_called_once() + kafka_counter.BackgroundScheduler.return_value.add_job.assert_called_once() + kafka_counter.BackgroundScheduler.return_value.start.assert_called_once() + kafka_counter.logging.info.assert_called_once() + kafkaMessageCounter.read_topic.assert_called_once() diff --git a/services/addons/tests/firmware_manager/test_commsignia_upgrader.py b/services/addons/tests/firmware_manager/test_commsignia_upgrader.py index afe42a68e..c3810b52d 100644 --- a/services/addons/tests/firmware_manager/test_commsignia_upgrader.py +++ b/services/addons/tests/firmware_manager/test_commsignia_upgrader.py @@ -4,158 +4,165 @@ from addons.images.firmware_manager.commsignia_upgrader import CommsigniaUpgrader test_upgrade_info = { - "ipv4_address": "8.8.8.8", - "manufacturer": "test-manufacturer", - "model": "test-model", - "ssh_username": "test-user", - "ssh_password": "test-psw", - "target_firmware_id": 4, - "target_firmware_version": "1.0.0", - "install_package": "firmware_package.tar" + "ipv4_address": "8.8.8.8", + "manufacturer": "test-manufacturer", + "model": "test-model", + "ssh_username": "test-user", + "ssh_password": "test-psw", + "target_firmware_id": 4, + "target_firmware_version": "1.0.0", + "install_package": "firmware_package.tar", } + def test_commsignia_upgrader_init(): - test_commsignia_upgrader = CommsigniaUpgrader(test_upgrade_info) - assert test_commsignia_upgrader.install_package == "firmware_package.tar" - assert test_commsignia_upgrader.blob_name == "test-manufacturer/test-model/1.0.0/firmware_package.tar" - assert test_commsignia_upgrader.local_file_name == "/home/8.8.8.8/firmware_package.tar" - assert test_commsignia_upgrader.rsu_ip == "8.8.8.8" - assert test_commsignia_upgrader.ssh_username == "test-user" - assert test_commsignia_upgrader.ssh_password == "test-psw" - -@patch('addons.images.firmware_manager.commsignia_upgrader.SCPClient') -@patch('addons.images.firmware_manager.commsignia_upgrader.SSHClient') + test_commsignia_upgrader = CommsigniaUpgrader(test_upgrade_info) + assert test_commsignia_upgrader.install_package == "firmware_package.tar" + assert test_commsignia_upgrader.root_path == "/home/8.8.8.8" + assert ( + test_commsignia_upgrader.blob_name + == "test-manufacturer/test-model/1.0.0/firmware_package.tar" + ) + assert ( + test_commsignia_upgrader.local_file_name == "/home/8.8.8.8/firmware_package.tar" + ) + assert test_commsignia_upgrader.rsu_ip == "8.8.8.8" + assert test_commsignia_upgrader.ssh_username == "test-user" + assert test_commsignia_upgrader.ssh_password == "test-psw" + + +@patch("addons.images.firmware_manager.commsignia_upgrader.SCPClient") +@patch("addons.images.firmware_manager.commsignia_upgrader.SSHClient") def test_commsignia_upgrader_upgrade_success(mock_sshclient, mock_scpclient): - # Mock SSH Client and successful firmware upgrade return value - sshclient_obj = mock_sshclient.return_value - _stdout = MagicMock() - sshclient_obj.exec_command.return_value = MagicMock(), _stdout, MagicMock() - _stdout.read.return_value.decode.return_value = "ALL OK" - - # Mock SCP Client - scpclient_obj = mock_scpclient.return_value - - test_commsignia_upgrader = CommsigniaUpgrader(test_upgrade_info) - test_commsignia_upgrader.download_blob = MagicMock() - test_commsignia_upgrader.cleanup = MagicMock() - notify = MagicMock() - test_commsignia_upgrader.notify_firmware_manager = notify - - test_commsignia_upgrader.upgrade() - - # Assert initial SSH connection - sshclient_obj.set_missing_host_key_policy.assert_called_with(WarningPolicy) - sshclient_obj.connect.assert_called_with( - "8.8.8.8", - username="test-user", - password="test-psw", - look_for_keys=False, - allow_agent=False + # Mock SSH Client and successful firmware upgrade return value + sshclient_obj = mock_sshclient.return_value + _stdout = MagicMock() + sshclient_obj.exec_command.return_value = MagicMock(), _stdout, MagicMock() + _stdout.read.return_value.decode.return_value = "ALL OK" + + # Mock SCP Client + scpclient_obj = mock_scpclient.return_value + + test_commsignia_upgrader = CommsigniaUpgrader(test_upgrade_info) + test_commsignia_upgrader.download_blob = MagicMock() + test_commsignia_upgrader.cleanup = MagicMock() + notify = MagicMock() + test_commsignia_upgrader.notify_firmware_manager = notify + + test_commsignia_upgrader.upgrade() + + # Assert initial SSH connection + sshclient_obj.set_missing_host_key_policy.assert_called_with(WarningPolicy) + sshclient_obj.connect.assert_called_with( + "8.8.8.8", + username="test-user", + password="test-psw", + look_for_keys=False, + allow_agent=False, + ) + + # Assert SCP file transfer + mock_scpclient.assert_called_with(sshclient_obj.get_transport()) + scpclient_obj.put.assert_called_with( + "/home/8.8.8.8/firmware_package.tar", remote_path="/tmp/" ) + scpclient_obj.close.assert_called_with() - # Assert SCP file transfer - mock_scpclient.assert_called_with(sshclient_obj.get_transport()) - scpclient_obj.put.assert_called_with( - "/home/8.8.8.8/firmware_package.tar", - remote_path="/tmp/" - ) - scpclient_obj.close.assert_called_with() - - # Assert SSH firmware upgrade run - sshclient_obj.exec_command.assert_has_calls( - [ - call("signedUpgrade.sh /tmp/firmware_package.tar"), - call("reboot") - ] - ) - sshclient_obj.close.assert_called_with() - - # Assert notified success value - notify.assert_called_with(success=True) - -@patch('addons.images.firmware_manager.commsignia_upgrader.SCPClient') -@patch('addons.images.firmware_manager.commsignia_upgrader.SSHClient') + # Assert SSH firmware upgrade run + sshclient_obj.exec_command.assert_has_calls( + [call("signedUpgrade.sh /tmp/firmware_package.tar"), call("reboot")] + ) + sshclient_obj.close.assert_called_with() + + # Assert notified success value + notify.assert_called_with(success=True) + + +@patch("addons.images.firmware_manager.commsignia_upgrader.SCPClient") +@patch("addons.images.firmware_manager.commsignia_upgrader.SSHClient") def test_commsignia_upgrader_upgrade_fail(mock_sshclient, mock_scpclient): - # Mock SSH Client and failed firmware upgrade return value - sshclient_obj = mock_sshclient.return_value - _stdout = MagicMock() - sshclient_obj.exec_command.return_value = MagicMock(), _stdout, MagicMock() - _stdout.read.return_value.decode.return_value = "NOT OK TEST" - - # Mock SCP Client - scpclient_obj = mock_scpclient.return_value - - test_commsignia_upgrader = CommsigniaUpgrader(test_upgrade_info) - test_commsignia_upgrader.download_blob = MagicMock() - test_commsignia_upgrader.cleanup = MagicMock() - notify = MagicMock() - test_commsignia_upgrader.notify_firmware_manager = notify - - test_commsignia_upgrader.upgrade() - - # Assert initial SSH connection - sshclient_obj.set_missing_host_key_policy.assert_called_with(WarningPolicy) - sshclient_obj.connect.assert_called_with( - "8.8.8.8", - username="test-user", - password="test-psw", - look_for_keys=False, - allow_agent=False + # Mock SSH Client and failed firmware upgrade return value + sshclient_obj = mock_sshclient.return_value + _stdout = MagicMock() + sshclient_obj.exec_command.return_value = MagicMock(), _stdout, MagicMock() + _stdout.read.return_value.decode.return_value = "NOT OK TEST" + + # Mock SCP Client + scpclient_obj = mock_scpclient.return_value + + test_commsignia_upgrader = CommsigniaUpgrader(test_upgrade_info) + test_commsignia_upgrader.download_blob = MagicMock() + test_commsignia_upgrader.cleanup = MagicMock() + notify = MagicMock() + test_commsignia_upgrader.notify_firmware_manager = notify + + test_commsignia_upgrader.upgrade() + + # Assert initial SSH connection + sshclient_obj.set_missing_host_key_policy.assert_called_with(WarningPolicy) + sshclient_obj.connect.assert_called_with( + "8.8.8.8", + username="test-user", + password="test-psw", + look_for_keys=False, + allow_agent=False, ) - # Assert SCP file transfer - mock_scpclient.assert_called_with(sshclient_obj.get_transport()) - scpclient_obj.put.assert_called_with( - "/home/8.8.8.8/firmware_package.tar", - remote_path="/tmp/" - ) - scpclient_obj.close.assert_called_with() - - # Assert SSH firmware upgrade run - sshclient_obj.exec_command.assert_has_calls( - [ - call("signedUpgrade.sh /tmp/firmware_package.tar") - ] - ) - sshclient_obj.close.assert_called_with() - - # Assert notified success value - notify.assert_called_with(success=False) - -@patch('addons.images.firmware_manager.commsignia_upgrader.logging') -@patch('addons.images.firmware_manager.commsignia_upgrader.SCPClient') -@patch('addons.images.firmware_manager.commsignia_upgrader.SSHClient') -def test_commsignia_upgrader_upgrade_exception(mock_sshclient, mock_scpclient, mock_logging): - # Mock SSH Client and failed firmware upgrade return value - sshclient_obj = mock_sshclient.return_value - sshclient_obj.connect.side_effect = Exception("Exception test successfully passed") - - test_commsignia_upgrader = CommsigniaUpgrader(test_upgrade_info) - test_commsignia_upgrader.download_blob = MagicMock() - cleanup = MagicMock() - notify = MagicMock() - test_commsignia_upgrader.cleanup = cleanup - test_commsignia_upgrader.notify_firmware_manager = notify - - test_commsignia_upgrader.upgrade() - - # Assert initial SSH connection - sshclient_obj.set_missing_host_key_policy.assert_called_with(WarningPolicy) - sshclient_obj.connect.assert_called_with( - "8.8.8.8", - username="test-user", - password="test-psw", - look_for_keys=False, - allow_agent=False + # Assert SCP file transfer + mock_scpclient.assert_called_with(sshclient_obj.get_transport()) + scpclient_obj.put.assert_called_with( + "/home/8.8.8.8/firmware_package.tar", remote_path="/tmp/" ) + scpclient_obj.close.assert_called_with() - # Assert SCP file transfer doesn't occur - mock_scpclient.assert_not_called() + # Assert SSH firmware upgrade run + sshclient_obj.exec_command.assert_has_calls( + [call("signedUpgrade.sh /tmp/firmware_package.tar")] + ) + sshclient_obj.close.assert_called_with() + + # Assert notified success value + notify.assert_called_with(success=False) + + +@patch("addons.images.firmware_manager.commsignia_upgrader.logging") +@patch("addons.images.firmware_manager.commsignia_upgrader.SCPClient") +@patch("addons.images.firmware_manager.commsignia_upgrader.SSHClient") +def test_commsignia_upgrader_upgrade_exception( + mock_sshclient, mock_scpclient, mock_logging +): + # Mock SSH Client and failed firmware upgrade return value + sshclient_obj = mock_sshclient.return_value + sshclient_obj.connect.side_effect = Exception("Exception occurred during upgrade") + + test_commsignia_upgrader = CommsigniaUpgrader(test_upgrade_info) + test_commsignia_upgrader.download_blob = MagicMock() + cleanup = MagicMock() + notify = MagicMock() + test_commsignia_upgrader.cleanup = cleanup + test_commsignia_upgrader.notify_firmware_manager = notify + + test_commsignia_upgrader.upgrade() + + # Assert initial SSH connection + sshclient_obj.set_missing_host_key_policy.assert_called_with(WarningPolicy) + sshclient_obj.connect.assert_called_with( + "8.8.8.8", + username="test-user", + password="test-psw", + look_for_keys=False, + allow_agent=False, + ) + + # Assert SCP file transfer doesn't occur + mock_scpclient.assert_not_called() - # Assert SSH firmware upgrade run doesn't occur - sshclient_obj.exec_command.assert_not_called() + # Assert SSH firmware upgrade run doesn't occur + sshclient_obj.exec_command.assert_not_called() - # Assert exception was cleaned up and firmware manager was notified of upgrade failure - mock_logging.error.assert_called_with("Failed to perform firmware upgrade: Exception test successfully passed") - cleanup.assert_called_with() - notify.assert_called_with(success=False) \ No newline at end of file + # Assert exception was cleaned up and firmware manager was notified of upgrade failure + mock_logging.error.assert_called_with( + "Failed to perform firmware upgrade: Exception occurred during upgrade" + ) + cleanup.assert_called_with() + notify.assert_called_with(success=False) diff --git a/services/addons/tests/firmware_manager/test_download_blob.py b/services/addons/tests/firmware_manager/test_download_blob.py index a02888963..cc44ced9d 100644 --- a/services/addons/tests/firmware_manager/test_download_blob.py +++ b/services/addons/tests/firmware_manager/test_download_blob.py @@ -3,24 +3,28 @@ from addons.images.firmware_manager import download_blob -@patch.dict(os.environ, { - "GCP_PROJECT": "test-project", - "BLOB_STORAGE_BUCKET": "test-bucket" -}) -@patch('addons.images.firmware_manager.download_blob.logging') -@patch('addons.images.firmware_manager.download_blob.storage.Client') + +@patch.dict( + os.environ, {"GCP_PROJECT": "test-project", "BLOB_STORAGE_BUCKET": "test-bucket"} +) +@patch("addons.images.firmware_manager.download_blob.logging") +@patch("addons.images.firmware_manager.download_blob.storage.Client") def test_download_gcp_blob(mock_storage_client, mock_logging): - # mock - mock_client = mock_storage_client.return_value - mock_bucket = mock_client.get_bucket.return_value - mock_blob = mock_bucket.blob.return_value + # mock + mock_client = mock_storage_client.return_value + mock_bucket = mock_client.get_bucket.return_value + mock_blob = mock_bucket.blob.return_value - # run - download_blob.download_gcp_blob(blob_name="test.blob", destination_file_name="/home/test/") + # run + download_blob.download_gcp_blob( + blob_name="test.blob", destination_file_name="/home/test/" + ) - # validate - mock_storage_client.assert_called_with("test-project") - mock_client.get_bucket.assert_called_with("test-bucket") - mock_bucket.blob.assert_called_with("test.blob") - mock_blob.download_to_filename.assert_called_with("/home/test/") - mock_logging.info.assert_called_with("Downloaded storage object test.blob from bucket test-bucket to local file /home/test/.") + # validate + mock_storage_client.assert_called_with("test-project") + mock_client.get_bucket.assert_called_with("test-bucket") + mock_bucket.blob.assert_called_with("test.blob") + mock_blob.download_to_filename.assert_called_with("/home/test/") + mock_logging.info.assert_called_with( + "Downloaded storage object test.blob from bucket test-bucket to local file /home/test/." + ) diff --git a/services/addons/tests/firmware_manager/test_firmware_manager.py b/services/addons/tests/firmware_manager/test_firmware_manager.py index 4e4ec3a90..3b50e7449 100644 --- a/services/addons/tests/firmware_manager/test_firmware_manager.py +++ b/services/addons/tests/firmware_manager/test_firmware_manager.py @@ -4,283 +4,504 @@ from addons.images.firmware_manager import firmware_manager + @patch("addons.images.firmware_manager.firmware_manager.active_upgrades", {}) -@patch('addons.images.firmware_manager.firmware_manager.pgquery.query_db') +@patch("addons.images.firmware_manager.firmware_manager.pgquery.query_db") def test_get_rsu_upgrade_data_all(mock_querydb): - mock_querydb.return_value = [ - ({"ipv4_address":"8.8.8.8"},""), - ({"ipv4_address":"9.9.9.9"},"") - ] + mock_querydb.return_value = [ + ({"ipv4_address": "8.8.8.8"}, ""), + ({"ipv4_address": "9.9.9.9"}, ""), + ] + + result = firmware_manager.get_rsu_upgrade_data() - result = firmware_manager.get_rsu_upgrade_data() + mock_querydb.assert_called_with(fmv.all_rsus_query) + assert result == [{"ipv4_address": "8.8.8.8"}, {"ipv4_address": "9.9.9.9"}] - mock_querydb.assert_called_with(fmv.all_rsus_query) - assert result == [{"ipv4_address":"8.8.8.8"}, {"ipv4_address":"9.9.9.9"}] @patch("addons.images.firmware_manager.firmware_manager.active_upgrades", {}) -@patch('addons.images.firmware_manager.firmware_manager.pgquery.query_db') +@patch("addons.images.firmware_manager.firmware_manager.pgquery.query_db") def test_get_rsu_upgrade_data_one(mock_querydb): - mock_querydb.return_value = [(fmv.rsu_info, "")] + mock_querydb.return_value = [(fmv.rsu_info, "")] - result = firmware_manager.get_rsu_upgrade_data(rsu_ip="8.8.8.8") + result = firmware_manager.get_rsu_upgrade_data(rsu_ip="8.8.8.8") + + expected_result = [fmv.rsu_info] + mock_querydb.assert_called_with(fmv.one_rsu_query) + assert result == expected_result - expected_result = [fmv.rsu_info] - mock_querydb.assert_called_with(fmv.one_rsu_query) - assert result == expected_result # init_firmware_upgrade tests + @patch("addons.images.firmware_manager.firmware_manager.active_upgrades", {}) def test_init_firmware_upgrade_missing_rsu_ip(): - mock_flask_request = MagicMock() - mock_flask_request.get_json.return_value = {} - mock_flask_jsonify = MagicMock() - with patch("addons.images.firmware_manager.firmware_manager.request", mock_flask_request): - with patch("addons.images.firmware_manager.firmware_manager.jsonify", mock_flask_jsonify): - message, code = firmware_manager.init_firmware_upgrade() - - mock_flask_jsonify.assert_called_with({"error": "Missing 'rsu_ip' parameter"}) - assert code == 400 - -@patch("addons.images.firmware_manager.firmware_manager.active_upgrades", {"8.8.8.8":{}}) + mock_flask_request = MagicMock() + mock_flask_request.get_json.return_value = {} + mock_flask_jsonify = MagicMock() + with patch( + "addons.images.firmware_manager.firmware_manager.request", mock_flask_request + ): + with patch( + "addons.images.firmware_manager.firmware_manager.jsonify", + mock_flask_jsonify, + ): + message, code = firmware_manager.init_firmware_upgrade() + + mock_flask_jsonify.assert_called_with( + {"error": "Missing 'rsu_ip' parameter"} + ) + assert code == 400 + + +@patch( + "addons.images.firmware_manager.firmware_manager.active_upgrades", {"8.8.8.8": {}} +) def test_init_firmware_upgrade_already_running(): - mock_flask_request = MagicMock() - mock_flask_request.get_json.return_value = {"rsu_ip":"8.8.8.8"} - mock_flask_jsonify = MagicMock() - with patch("addons.images.firmware_manager.firmware_manager.request", mock_flask_request): - with patch("addons.images.firmware_manager.firmware_manager.jsonify", mock_flask_jsonify): - message, code = firmware_manager.init_firmware_upgrade() + mock_flask_request = MagicMock() + mock_flask_request.get_json.return_value = {"rsu_ip": "8.8.8.8"} + mock_flask_jsonify = MagicMock() + with patch( + "addons.images.firmware_manager.firmware_manager.request", mock_flask_request + ): + with patch( + "addons.images.firmware_manager.firmware_manager.jsonify", + mock_flask_jsonify, + ): + message, code = firmware_manager.init_firmware_upgrade() + + mock_flask_jsonify.assert_called_with( + { + "error": f"Firmware upgrade failed to start for '8.8.8.8': an upgrade is already underway for the target device" + } + ) + assert code == 500 - mock_flask_jsonify.assert_called_with({"error": f"Firmware upgrade failed to start for '8.8.8.8': an upgrade is already underway for the target device"}) - assert code == 500 @patch("addons.images.firmware_manager.firmware_manager.active_upgrades", {}) -@patch("addons.images.firmware_manager.firmware_manager.get_rsu_upgrade_data", MagicMock(return_value=[])) +@patch( + "addons.images.firmware_manager.firmware_manager.get_rsu_upgrade_data", + MagicMock(return_value=[]), +) def test_init_firmware_upgrade_no_eligible_upgrade(): - mock_flask_request = MagicMock() - mock_flask_request.get_json.return_value = {"rsu_ip":"8.8.8.8"} - mock_flask_jsonify = MagicMock() - with patch("addons.images.firmware_manager.firmware_manager.request", mock_flask_request): - with patch("addons.images.firmware_manager.firmware_manager.jsonify", mock_flask_jsonify): - message, code = firmware_manager.init_firmware_upgrade() + mock_flask_request = MagicMock() + mock_flask_request.get_json.return_value = {"rsu_ip": "8.8.8.8"} + mock_flask_jsonify = MagicMock() + with patch( + "addons.images.firmware_manager.firmware_manager.request", mock_flask_request + ): + with patch( + "addons.images.firmware_manager.firmware_manager.jsonify", + mock_flask_jsonify, + ): + message, code = firmware_manager.init_firmware_upgrade() + + mock_flask_jsonify.assert_called_with( + { + "error": f"Firmware upgrade failed to start for '8.8.8.8': the target firmware is already installed or is an invalid upgrade from the current firmware" + } + ) + assert code == 500 - mock_flask_jsonify.assert_called_with({"error": f"Firmware upgrade failed to start for '8.8.8.8': the target firmware is already installed or is an invalid upgrade from the current firmware"}) - assert code == 500 @patch("addons.images.firmware_manager.firmware_manager.active_upgrades", {}) -@patch("addons.images.firmware_manager.firmware_manager.get_rsu_upgrade_data", MagicMock(return_value=[fmv.rsu_info])) -@patch('addons.images.firmware_manager.firmware_manager.logging') -@patch("addons.images.firmware_manager.firmware_manager.Popen", side_effect=Exception("Process failed to start")) +@patch( + "addons.images.firmware_manager.firmware_manager.get_rsu_upgrade_data", + MagicMock(return_value=[fmv.rsu_info]), +) +@patch("addons.images.firmware_manager.firmware_manager.logging") +@patch( + "addons.images.firmware_manager.firmware_manager.Popen", + side_effect=Exception("Process failed to start"), +) def test_init_firmware_upgrade_popen_fail(mock_popen, mock_logging): - mock_flask_request = MagicMock() - mock_flask_request.get_json.return_value = {"rsu_ip":"8.8.8.8"} - mock_flask_jsonify = MagicMock() - with patch("addons.images.firmware_manager.firmware_manager.request", mock_flask_request): - with patch("addons.images.firmware_manager.firmware_manager.jsonify", mock_flask_jsonify): - message, code = firmware_manager.init_firmware_upgrade() - - # Assert firmware upgrade process was started with expected arguments - expected_json_str = '\'{"ipv4_address": "8.8.8.8", "manufacturer": "Commsignia", "model": "ITS-RS4-M", ' \ - '"ssh_username": "user", "ssh_password": "psw", "target_firmware_id": 2, "target_firmware_version": "y20.39.0", ' \ - '"install_package": "install_package.tar"}\'' - mock_popen.assert_called_with(['python3', f'/home/commsignia_upgrader.py', expected_json_str], stdout=DEVNULL) - mock_logging.error.assert_called_with(f"Encountered error of type {Exception} while starting automatic upgrade process for 8.8.8.8: Process failed to start") + mock_flask_request = MagicMock() + mock_flask_request.get_json.return_value = {"rsu_ip": "8.8.8.8"} + mock_flask_jsonify = MagicMock() + with patch( + "addons.images.firmware_manager.firmware_manager.request", mock_flask_request + ): + with patch( + "addons.images.firmware_manager.firmware_manager.jsonify", + mock_flask_jsonify, + ): + message, code = firmware_manager.init_firmware_upgrade() + + # Assert firmware upgrade process was started with expected arguments + expected_json_str = ( + '\'{"ipv4_address": "8.8.8.8", "manufacturer": "Commsignia", "model": "ITS-RS4-M", ' + '"ssh_username": "user", "ssh_password": "psw", "target_firmware_id": 2, "target_firmware_version": "y20.39.0", ' + '"install_package": "install_package.tar"}\'' + ) + mock_popen.assert_called_with( + ["python3", f"/home/commsignia_upgrader.py", expected_json_str], + stdout=DEVNULL, + ) + mock_logging.error.assert_called_with( + f"Encountered error of type {Exception} while starting automatic upgrade process for 8.8.8.8: Process failed to start" + ) + + # Assert REST response is as expected from a successful run + mock_flask_jsonify.assert_called_with( + { + "error": f"Firmware upgrade failed to start for '8.8.8.8': upgrade process failed to run" + } + ) + assert code == 500 - # Assert REST response is as expected from a successful run - mock_flask_jsonify.assert_called_with({"error": f"Firmware upgrade failed to start for '8.8.8.8': upgrade process failed to run"}) - assert code == 500 @patch("addons.images.firmware_manager.firmware_manager.active_upgrades", {}) -@patch("addons.images.firmware_manager.firmware_manager.get_rsu_upgrade_data", MagicMock(return_value=[fmv.rsu_info])) +@patch( + "addons.images.firmware_manager.firmware_manager.get_rsu_upgrade_data", + MagicMock(return_value=[fmv.rsu_info]), +) @patch("addons.images.firmware_manager.firmware_manager.Popen") def test_init_firmware_upgrade_popen_success(mock_popen): - mock_popen_obj = mock_popen.return_value - mock_flask_request = MagicMock() - mock_flask_request.get_json.return_value = {"rsu_ip":"8.8.8.8"} - mock_flask_jsonify = MagicMock() - with patch("addons.images.firmware_manager.firmware_manager.request", mock_flask_request): - with patch("addons.images.firmware_manager.firmware_manager.jsonify", mock_flask_jsonify): - message, code = firmware_manager.init_firmware_upgrade() - - # Assert firmware upgrade process was started with expected arguments - expected_json_str = '\'{"ipv4_address": "8.8.8.8", "manufacturer": "Commsignia", "model": "ITS-RS4-M", ' \ - '"ssh_username": "user", "ssh_password": "psw", "target_firmware_id": 2, "target_firmware_version": "y20.39.0", ' \ - '"install_package": "install_package.tar"}\'' - mock_popen.assert_called_with(['python3', f'/home/commsignia_upgrader.py', expected_json_str], stdout=DEVNULL) - - # Assert the process reference is successfully tracked in the active_upgrades dictionary - assert firmware_manager.active_upgrades['8.8.8.8']['process'] == mock_popen_obj + mock_popen_obj = mock_popen.return_value + mock_flask_request = MagicMock() + mock_flask_request.get_json.return_value = {"rsu_ip": "8.8.8.8"} + mock_flask_jsonify = MagicMock() + with patch( + "addons.images.firmware_manager.firmware_manager.request", mock_flask_request + ): + with patch( + "addons.images.firmware_manager.firmware_manager.jsonify", + mock_flask_jsonify, + ): + message, code = firmware_manager.init_firmware_upgrade() + + # Assert firmware upgrade process was started with expected arguments + expected_json_str = ( + '\'{"ipv4_address": "8.8.8.8", "manufacturer": "Commsignia", "model": "ITS-RS4-M", ' + '"ssh_username": "user", "ssh_password": "psw", "target_firmware_id": 2, "target_firmware_version": "y20.39.0", ' + '"install_package": "install_package.tar"}\'' + ) + mock_popen.assert_called_with( + ["python3", f"/home/commsignia_upgrader.py", expected_json_str], + stdout=DEVNULL, + ) + + # Assert the process reference is successfully tracked in the active_upgrades dictionary + assert ( + firmware_manager.active_upgrades["8.8.8.8"]["process"] == mock_popen_obj + ) + + # Assert REST response is as expected from a successful run + mock_flask_jsonify.assert_called_with( + {"message": f"Firmware upgrade started successfully for '8.8.8.8'"} + ) + assert code == 201 - # Assert REST response is as expected from a successful run - mock_flask_jsonify.assert_called_with({"message": f"Firmware upgrade started successfully for '8.8.8.8'"}) - assert code == 201 # firmware_upgrade_completed tests + @patch("addons.images.firmware_manager.firmware_manager.active_upgrades", {}) def test_firmware_upgrade_completed_missing_rsu_ip(): - mock_flask_request = MagicMock() - mock_flask_request.get_json.return_value = {} - mock_flask_jsonify = MagicMock() - with patch("addons.images.firmware_manager.firmware_manager.request", mock_flask_request): - with patch("addons.images.firmware_manager.firmware_manager.jsonify", mock_flask_jsonify): - message, code = firmware_manager.firmware_upgrade_completed() + mock_flask_request = MagicMock() + mock_flask_request.get_json.return_value = {} + mock_flask_jsonify = MagicMock() + with patch( + "addons.images.firmware_manager.firmware_manager.request", mock_flask_request + ): + with patch( + "addons.images.firmware_manager.firmware_manager.jsonify", + mock_flask_jsonify, + ): + message, code = firmware_manager.firmware_upgrade_completed() + + mock_flask_jsonify.assert_called_with( + {"error": "Missing 'rsu_ip' parameter"} + ) + assert code == 400 - mock_flask_jsonify.assert_called_with({"error": "Missing 'rsu_ip' parameter"}) - assert code == 400 @patch("addons.images.firmware_manager.firmware_manager.active_upgrades", {}) def test_firmware_upgrade_completed_unknown_process(): - mock_flask_request = MagicMock() - mock_flask_request.get_json.return_value = {"rsu_ip":"8.8.8.8","status":"success"} - mock_flask_jsonify = MagicMock() - with patch("addons.images.firmware_manager.firmware_manager.request", mock_flask_request): - with patch("addons.images.firmware_manager.firmware_manager.jsonify", mock_flask_jsonify): - message, code = firmware_manager.firmware_upgrade_completed() - - mock_flask_jsonify.assert_called_with({"error": "Specified device is not actively being upgraded or was already completed"}) - assert code == 400 - -@patch("addons.images.firmware_manager.firmware_manager.active_upgrades", {"8.8.8.8":fmv.upgrade_info}) + mock_flask_request = MagicMock() + mock_flask_request.get_json.return_value = { + "rsu_ip": "8.8.8.8", + "status": "success", + } + mock_flask_jsonify = MagicMock() + with patch( + "addons.images.firmware_manager.firmware_manager.request", mock_flask_request + ): + with patch( + "addons.images.firmware_manager.firmware_manager.jsonify", + mock_flask_jsonify, + ): + message, code = firmware_manager.firmware_upgrade_completed() + + mock_flask_jsonify.assert_called_with( + { + "error": "Specified device is not actively being upgraded or was already completed" + } + ) + assert code == 400 + + +@patch( + "addons.images.firmware_manager.firmware_manager.active_upgrades", + {"8.8.8.8": fmv.upgrade_info}, +) def test_firmware_upgrade_completed_missing_status(): - mock_flask_request = MagicMock() - mock_flask_request.get_json.return_value = {"rsu_ip":"8.8.8.8"} - mock_flask_jsonify = MagicMock() - with patch("addons.images.firmware_manager.firmware_manager.request", mock_flask_request): - with patch("addons.images.firmware_manager.firmware_manager.jsonify", mock_flask_jsonify): - message, code = firmware_manager.firmware_upgrade_completed() - - mock_flask_jsonify.assert_called_with({"error": "Missing 'status' parameter"}) - assert code == 400 - -@patch("addons.images.firmware_manager.firmware_manager.active_upgrades", {"8.8.8.8":fmv.upgrade_info}) + mock_flask_request = MagicMock() + mock_flask_request.get_json.return_value = {"rsu_ip": "8.8.8.8"} + mock_flask_jsonify = MagicMock() + with patch( + "addons.images.firmware_manager.firmware_manager.request", mock_flask_request + ): + with patch( + "addons.images.firmware_manager.firmware_manager.jsonify", + mock_flask_jsonify, + ): + message, code = firmware_manager.firmware_upgrade_completed() + + mock_flask_jsonify.assert_called_with( + {"error": "Missing 'status' parameter"} + ) + assert code == 400 + + +@patch( + "addons.images.firmware_manager.firmware_manager.active_upgrades", + {"8.8.8.8": fmv.upgrade_info}, +) def test_firmware_upgrade_completed_illegal_status(): - mock_flask_request = MagicMock() - mock_flask_request.get_json.return_value = {"rsu_ip":"8.8.8.8","status":"frog"} - mock_flask_jsonify = MagicMock() - with patch("addons.images.firmware_manager.firmware_manager.request", mock_flask_request): - with patch("addons.images.firmware_manager.firmware_manager.jsonify", mock_flask_jsonify): - message, code = firmware_manager.firmware_upgrade_completed() - - mock_flask_jsonify.assert_called_with({"error": "Wrong value for 'status' parameter - must be either 'success' or 'fail'"}) - assert code == 400 - -@patch("addons.images.firmware_manager.firmware_manager.active_upgrades", {"8.8.8.8":fmv.upgrade_info}) + mock_flask_request = MagicMock() + mock_flask_request.get_json.return_value = {"rsu_ip": "8.8.8.8", "status": "frog"} + mock_flask_jsonify = MagicMock() + with patch( + "addons.images.firmware_manager.firmware_manager.request", mock_flask_request + ): + with patch( + "addons.images.firmware_manager.firmware_manager.jsonify", + mock_flask_jsonify, + ): + message, code = firmware_manager.firmware_upgrade_completed() + + mock_flask_jsonify.assert_called_with( + { + "error": "Wrong value for 'status' parameter - must be either 'success' or 'fail'" + } + ) + assert code == 400 + + +@patch( + "addons.images.firmware_manager.firmware_manager.active_upgrades", + {"8.8.8.8": fmv.upgrade_info}, +) def test_firmware_upgrade_completed_fail_status(): - mock_flask_request = MagicMock() - mock_flask_request.get_json.return_value = {"rsu_ip":"8.8.8.8","status":"fail"} - mock_flask_jsonify = MagicMock() - with patch("addons.images.firmware_manager.firmware_manager.request", mock_flask_request): - with patch("addons.images.firmware_manager.firmware_manager.jsonify", mock_flask_jsonify): - message, code = firmware_manager.firmware_upgrade_completed() - - assert "8.8.8.8" not in firmware_manager.active_upgrades - mock_flask_jsonify.assert_called_with({"message": "Firmware upgrade successfully marked as complete"}) - assert code == 204 - -@patch("addons.images.firmware_manager.firmware_manager.active_upgrades", {"8.8.8.8":fmv.upgrade_info}) -@patch('addons.images.firmware_manager.firmware_manager.pgquery.write_db') + mock_flask_request = MagicMock() + mock_flask_request.get_json.return_value = {"rsu_ip": "8.8.8.8", "status": "fail"} + mock_flask_jsonify = MagicMock() + with patch( + "addons.images.firmware_manager.firmware_manager.request", mock_flask_request + ): + with patch( + "addons.images.firmware_manager.firmware_manager.jsonify", + mock_flask_jsonify, + ): + message, code = firmware_manager.firmware_upgrade_completed() + + assert "8.8.8.8" not in firmware_manager.active_upgrades + mock_flask_jsonify.assert_called_with( + {"message": "Firmware upgrade successfully marked as complete"} + ) + assert code == 204 + + +@patch( + "addons.images.firmware_manager.firmware_manager.active_upgrades", + {"8.8.8.8": fmv.upgrade_info}, +) +@patch("addons.images.firmware_manager.firmware_manager.pgquery.write_db") def test_firmware_upgrade_completed_success_status(mock_writedb): - mock_flask_request = MagicMock() - mock_flask_request.get_json.return_value = {"rsu_ip":"8.8.8.8","status":"success"} - mock_flask_jsonify = MagicMock() - with patch("addons.images.firmware_manager.firmware_manager.request", mock_flask_request): - with patch("addons.images.firmware_manager.firmware_manager.jsonify", mock_flask_jsonify): - message, code = firmware_manager.firmware_upgrade_completed() - - mock_writedb.assert_called_with("UPDATE public.rsus SET firmware_version=2 WHERE ipv4_address='8.8.8.8'") - assert "8.8.8.8" not in firmware_manager.active_upgrades - mock_flask_jsonify.assert_called_with({"message": "Firmware upgrade successfully marked as complete"}) - assert code == 204 - -@patch("addons.images.firmware_manager.firmware_manager.active_upgrades", {"8.8.8.8":fmv.upgrade_info}) -@patch('addons.images.firmware_manager.firmware_manager.pgquery.write_db', side_effect=Exception("Failure to query PostgreSQL")) + mock_flask_request = MagicMock() + mock_flask_request.get_json.return_value = { + "rsu_ip": "8.8.8.8", + "status": "success", + } + mock_flask_jsonify = MagicMock() + with patch( + "addons.images.firmware_manager.firmware_manager.request", mock_flask_request + ): + with patch( + "addons.images.firmware_manager.firmware_manager.jsonify", + mock_flask_jsonify, + ): + message, code = firmware_manager.firmware_upgrade_completed() + + mock_writedb.assert_called_with( + "UPDATE public.rsus SET firmware_version=2 WHERE ipv4_address='8.8.8.8'" + ) + assert "8.8.8.8" not in firmware_manager.active_upgrades + mock_flask_jsonify.assert_called_with( + {"message": "Firmware upgrade successfully marked as complete"} + ) + assert code == 204 + + +@patch( + "addons.images.firmware_manager.firmware_manager.active_upgrades", + {"8.8.8.8": fmv.upgrade_info}, +) +@patch( + "addons.images.firmware_manager.firmware_manager.pgquery.write_db", + side_effect=Exception("Failure to query PostgreSQL"), +) def test_firmware_upgrade_completed_success_status_exception(mock_writedb): - mock_flask_request = MagicMock() - mock_flask_request.get_json.return_value = {"rsu_ip":"8.8.8.8","status":"success"} - mock_flask_jsonify = MagicMock() - with patch("addons.images.firmware_manager.firmware_manager.request", mock_flask_request): - with patch("addons.images.firmware_manager.firmware_manager.jsonify", mock_flask_jsonify): - message, code = firmware_manager.firmware_upgrade_completed() + mock_flask_request = MagicMock() + mock_flask_request.get_json.return_value = { + "rsu_ip": "8.8.8.8", + "status": "success", + } + mock_flask_jsonify = MagicMock() + with patch( + "addons.images.firmware_manager.firmware_manager.request", mock_flask_request + ): + with patch( + "addons.images.firmware_manager.firmware_manager.jsonify", + mock_flask_jsonify, + ): + message, code = firmware_manager.firmware_upgrade_completed() + + mock_writedb.assert_called_with( + "UPDATE public.rsus SET firmware_version=2 WHERE ipv4_address='8.8.8.8'" + ) + mock_flask_jsonify.assert_called_with( + { + "error": "Unexpected error occurred while querying the PostgreSQL database - firmware upgrade not marked as complete" + } + ) + assert code == 500 - mock_writedb.assert_called_with("UPDATE public.rsus SET firmware_version=2 WHERE ipv4_address='8.8.8.8'") - mock_flask_jsonify.assert_called_with({"error": "Unexpected error occurred while querying the PostgreSQL database - firmware upgrade not marked as complete"}) - assert code == 500 # list_active_upgrades tests -@patch("addons.images.firmware_manager.firmware_manager.active_upgrades", {"8.8.8.8":fmv.upgrade_info}) + +@patch( + "addons.images.firmware_manager.firmware_manager.active_upgrades", + {"8.8.8.8": fmv.upgrade_info}, +) def test_list_active_upgrades(): - mock_flask_request = MagicMock() - mock_flask_request.get_json.return_value = {"rsu_ip":"8.8.8.8","status":"success"} - mock_flask_jsonify = MagicMock() - with patch("addons.images.firmware_manager.firmware_manager.request", mock_flask_request): - with patch("addons.images.firmware_manager.firmware_manager.jsonify", mock_flask_jsonify): - message, code = firmware_manager.list_active_upgrades() - - expected_active_upgrades = { - "8.8.8.8": { - "manufacturer": "Commsignia", - "model": "ITS-RS4-M", - "target_firmware_id": 2, - "target_firmware_version": "y20.39.0", - "install_package": "install_package.tar" - } - } - mock_flask_jsonify.assert_called_with({"active_upgrades": expected_active_upgrades}) - assert code == 200 + mock_flask_request = MagicMock() + mock_flask_request.get_json.return_value = { + "rsu_ip": "8.8.8.8", + "status": "success", + } + mock_flask_jsonify = MagicMock() + with patch( + "addons.images.firmware_manager.firmware_manager.request", mock_flask_request + ): + with patch( + "addons.images.firmware_manager.firmware_manager.jsonify", + mock_flask_jsonify, + ): + message, code = firmware_manager.list_active_upgrades() + + expected_active_upgrades = { + "8.8.8.8": { + "manufacturer": "Commsignia", + "model": "ITS-RS4-M", + "target_firmware_id": 2, + "target_firmware_version": "y20.39.0", + "install_package": "install_package.tar", + } + } + mock_flask_jsonify.assert_called_with( + {"active_upgrades": expected_active_upgrades} + ) + assert code == 200 -# check_for_upgrades tests -@patch("addons.images.firmware_manager.firmware_manager.active_upgrades", {"8.8.8.8":fmv.upgrade_info}) -@patch("addons.images.firmware_manager.firmware_manager.get_rsu_upgrade_data", MagicMock(return_value=fmv.multi_rsu_info)) -@patch('addons.images.firmware_manager.firmware_manager.logging') -@patch("addons.images.firmware_manager.firmware_manager.Popen", side_effect=Exception("Process failed to start")) -def test_check_for_upgrades_exception(mock_popen, mock_logging): - firmware_manager.check_for_upgrades() +# check_for_upgrades tests - # Assert firmware upgrade process was started with expected arguments - expected_json_str = '\'{"ipv4_address": "9.9.9.9", "manufacturer": "Commsignia", "model": "ITS-RS4-M", ' \ - '"ssh_username": "user", "ssh_password": "psw", "target_firmware_id": 2, "target_firmware_version": "y20.39.0", ' \ - '"install_package": "install_package.tar"}\'' - mock_popen.assert_called_once_with(['python3', f'/home/commsignia_upgrader.py', expected_json_str], stdout=DEVNULL) - # Assert the process reference is successfully tracked in the active_upgrades dictionary - assert "9.9.9.9" not in firmware_manager.active_upgrades - mock_logging.error.assert_called_with(f"Encountered error of type {Exception} while starting automatic upgrade process for 9.9.9.9: Process failed to start") +@patch( + "addons.images.firmware_manager.firmware_manager.active_upgrades", + {"8.8.8.8": fmv.upgrade_info}, +) +@patch( + "addons.images.firmware_manager.firmware_manager.get_rsu_upgrade_data", + MagicMock(return_value=fmv.multi_rsu_info), +) +@patch("addons.images.firmware_manager.firmware_manager.logging") +@patch( + "addons.images.firmware_manager.firmware_manager.Popen", + side_effect=Exception("Process failed to start"), +) +def test_check_for_upgrades_exception(mock_popen, mock_logging): + firmware_manager.check_for_upgrades() -@patch("addons.images.firmware_manager.firmware_manager.active_upgrades", {"8.8.8.8":fmv.upgrade_info}) -@patch("addons.images.firmware_manager.firmware_manager.get_rsu_upgrade_data", MagicMock(return_value=fmv.multi_rsu_info)) -@patch('addons.images.firmware_manager.firmware_manager.logging') + # Assert firmware upgrade process was started with expected arguments + expected_json_str = ( + '\'{"ipv4_address": "9.9.9.9", "manufacturer": "Commsignia", "model": "ITS-RS4-M", ' + '"ssh_username": "user", "ssh_password": "psw", "target_firmware_id": 2, "target_firmware_version": "y20.39.0", ' + '"install_package": "install_package.tar"}\'' + ) + mock_popen.assert_called_once_with( + ["python3", f"/home/commsignia_upgrader.py", expected_json_str], stdout=DEVNULL + ) + + # Assert the process reference is successfully tracked in the active_upgrades dictionary + assert "9.9.9.9" not in firmware_manager.active_upgrades + mock_logging.error.assert_called_with( + f"Encountered error of type {Exception} while starting automatic upgrade process for 9.9.9.9: Process failed to start" + ) + + +@patch( + "addons.images.firmware_manager.firmware_manager.active_upgrades", + {"8.8.8.8": fmv.upgrade_info}, +) +@patch( + "addons.images.firmware_manager.firmware_manager.get_rsu_upgrade_data", + MagicMock(return_value=fmv.multi_rsu_info), +) +@patch("addons.images.firmware_manager.firmware_manager.logging") @patch("addons.images.firmware_manager.firmware_manager.Popen") def test_check_for_upgrades(mock_popen, mock_logging): - mock_popen_obj = mock_popen.return_value + mock_popen_obj = mock_popen.return_value + + firmware_manager.check_for_upgrades() - firmware_manager.check_for_upgrades() + # Assert firmware upgrade process was started with expected arguments + expected_json_str = ( + '\'{"ipv4_address": "9.9.9.9", "manufacturer": "Commsignia", "model": "ITS-RS4-M", ' + '"ssh_username": "user", "ssh_password": "psw", "target_firmware_id": 2, "target_firmware_version": "y20.39.0", ' + '"install_package": "install_package.tar"}\'' + ) + mock_popen.assert_called_once_with( + ["python3", f"/home/commsignia_upgrader.py", expected_json_str], stdout=DEVNULL + ) - # Assert firmware upgrade process was started with expected arguments - expected_json_str = '\'{"ipv4_address": "9.9.9.9", "manufacturer": "Commsignia", "model": "ITS-RS4-M", ' \ - '"ssh_username": "user", "ssh_password": "psw", "target_firmware_id": 2, "target_firmware_version": "y20.39.0", ' \ - '"install_package": "install_package.tar"}\'' - mock_popen.assert_called_once_with(['python3', f'/home/commsignia_upgrader.py', expected_json_str], stdout=DEVNULL) + # Assert the process reference is successfully tracked in the active_upgrades dictionary + assert firmware_manager.active_upgrades["9.9.9.9"]["process"] == mock_popen_obj + mock_logging.info.assert_called_with( + "Firmware upgrade successfully started for '9.9.9.9'" + ) - # Assert the process reference is successfully tracked in the active_upgrades dictionary - assert firmware_manager.active_upgrades['9.9.9.9']['process'] == mock_popen_obj - mock_logging.info.assert_called_with("Firmware upgrade successfully started for '9.9.9.9'") # Other tests + @patch("addons.images.firmware_manager.firmware_manager.serve") def test_serve_rest_api(mock_serve): - firmware_manager.serve_rest_api() - mock_serve.assert_called_with(firmware_manager.app, host="0.0.0.0", port=8080) + firmware_manager.serve_rest_api() + mock_serve.assert_called_with(firmware_manager.app, host="0.0.0.0", port=8080) + @patch("addons.images.firmware_manager.firmware_manager.BackgroundScheduler") def test_init_background_task(mock_bgscheduler): - mock_bgscheduler_obj = mock_bgscheduler.return_value + mock_bgscheduler_obj = mock_bgscheduler.return_value - firmware_manager.init_background_task() + firmware_manager.init_background_task() - mock_bgscheduler.assert_called_with({"apscheduler.timezone": "UTC"}) - mock_bgscheduler_obj.add_job.assert_called_with(firmware_manager.check_for_upgrades, "cron", minute="0") - mock_bgscheduler_obj.start.assert_called_with() \ No newline at end of file + mock_bgscheduler.assert_called_with({"apscheduler.timezone": "UTC"}) + mock_bgscheduler_obj.add_job.assert_called_with( + firmware_manager.check_for_upgrades, "cron", minute="0" + ) + mock_bgscheduler_obj.start.assert_called_with() diff --git a/services/addons/tests/firmware_manager/test_firmware_manager_values.py b/services/addons/tests/firmware_manager/test_firmware_manager_values.py index 8dfc20d86..f1f3870ca 100644 --- a/services/addons/tests/firmware_manager/test_firmware_manager_values.py +++ b/services/addons/tests/firmware_manager/test_firmware_manager_values.py @@ -1,45 +1,37 @@ from unittest.mock import MagicMock -all_rsus_query = "SELECT to_jsonb(row) " \ - "FROM (" \ - "SELECT ipv4_address, man.name AS manufacturer, rm.name AS model, rc.username AS ssh_username, rc.password AS ssh_password, " \ - "fi.firmware_id AS target_firmware_id, fi.version AS target_firmware_version, fi.install_package AS install_package " \ - "FROM public.rsus rd " \ - "JOIN public.rsu_models rm ON rm.rsu_model_id = rd.model " \ - "JOIN public.manufacturers man ON man.manufacturer_id = rm.manufacturer " \ - "JOIN public.rsu_credentials rc ON rc.credential_id = rd.credential_id " \ - "JOIN public.firmware_upgrade_rules fur ON fur.from_id = rd.firmware_version " \ - "JOIN public.firmware_images fi ON fi.firmware_id = rd.target_firmware_version " \ - "WHERE firmware_version != target_firmware_version AND target_firmware_version = fur.to_id" \ - ") as row" +all_rsus_query = ( + "SELECT to_jsonb(row) " + "FROM (" + "SELECT ipv4_address, man.name AS manufacturer, rm.name AS model, rc.username AS ssh_username, rc.password AS ssh_password, " + "fi.firmware_id AS target_firmware_id, fi.version AS target_firmware_version, fi.install_package AS install_package " + "FROM public.rsus rd " + "JOIN public.rsu_models rm ON rm.rsu_model_id = rd.model " + "JOIN public.manufacturers man ON man.manufacturer_id = rm.manufacturer " + "JOIN public.rsu_credentials rc ON rc.credential_id = rd.credential_id " + "JOIN public.firmware_upgrade_rules fur ON fur.from_id = rd.firmware_version " + "JOIN public.firmware_images fi ON fi.firmware_id = rd.target_firmware_version " + "WHERE firmware_version != target_firmware_version AND target_firmware_version = fur.to_id" + ") as row" +) -one_rsu_query = "SELECT to_jsonb(row) " \ - "FROM (" \ - "SELECT ipv4_address, man.name AS manufacturer, rm.name AS model, rc.username AS ssh_username, rc.password AS ssh_password, " \ - "fi.firmware_id AS target_firmware_id, fi.version AS target_firmware_version, fi.install_package AS install_package " \ - "FROM public.rsus rd " \ - "JOIN public.rsu_models rm ON rm.rsu_model_id = rd.model " \ - "JOIN public.manufacturers man ON man.manufacturer_id = rm.manufacturer " \ - "JOIN public.rsu_credentials rc ON rc.credential_id = rd.credential_id " \ - "JOIN public.firmware_upgrade_rules fur ON fur.from_id = rd.firmware_version " \ - "JOIN public.firmware_images fi ON fi.firmware_id = rd.target_firmware_version " \ - "WHERE firmware_version != target_firmware_version AND target_firmware_version = fur.to_id" \ - " AND ipv4_address = '8.8.8.8'" \ - ") as row" +one_rsu_query = ( + "SELECT to_jsonb(row) " + "FROM (" + "SELECT ipv4_address, man.name AS manufacturer, rm.name AS model, rc.username AS ssh_username, rc.password AS ssh_password, " + "fi.firmware_id AS target_firmware_id, fi.version AS target_firmware_version, fi.install_package AS install_package " + "FROM public.rsus rd " + "JOIN public.rsu_models rm ON rm.rsu_model_id = rd.model " + "JOIN public.manufacturers man ON man.manufacturer_id = rm.manufacturer " + "JOIN public.rsu_credentials rc ON rc.credential_id = rd.credential_id " + "JOIN public.firmware_upgrade_rules fur ON fur.from_id = rd.firmware_version " + "JOIN public.firmware_images fi ON fi.firmware_id = rd.target_firmware_version " + "WHERE firmware_version != target_firmware_version AND target_firmware_version = fur.to_id" + " AND ipv4_address = '8.8.8.8'" + ") as row" +) rsu_info = { - "ipv4_address": "8.8.8.8", - "manufacturer": "Commsignia", - "model": "ITS-RS4-M", - "ssh_username": "user", - "ssh_password": "psw", - "target_firmware_id": 2, - "target_firmware_version": "y20.39.0", - "install_package": "install_package.tar" -} - -multi_rsu_info = [ - { "ipv4_address": "8.8.8.8", "manufacturer": "Commsignia", "model": "ITS-RS4-M", @@ -47,27 +39,39 @@ "ssh_password": "psw", "target_firmware_id": 2, "target_firmware_version": "y20.39.0", - "install_package": "install_package.tar" - }, - { - "ipv4_address": "9.9.9.9", + "install_package": "install_package.tar", +} + +multi_rsu_info = [ + { + "ipv4_address": "8.8.8.8", + "manufacturer": "Commsignia", + "model": "ITS-RS4-M", + "ssh_username": "user", + "ssh_password": "psw", + "target_firmware_id": 2, + "target_firmware_version": "y20.39.0", + "install_package": "install_package.tar", + }, + { + "ipv4_address": "9.9.9.9", + "manufacturer": "Commsignia", + "model": "ITS-RS4-M", + "ssh_username": "user", + "ssh_password": "psw", + "target_firmware_id": 2, + "target_firmware_version": "y20.39.0", + "install_package": "install_package.tar", + }, +] + +upgrade_info = { + "process": MagicMock(), "manufacturer": "Commsignia", "model": "ITS-RS4-M", "ssh_username": "user", "ssh_password": "psw", "target_firmware_id": 2, "target_firmware_version": "y20.39.0", - "install_package": "install_package.tar" - } -] - -upgrade_info = { - "process": MagicMock(), - "manufacturer": "Commsignia", - "model": "ITS-RS4-M", - "ssh_username": "user", - "ssh_password": "psw", - "target_firmware_id": 2, - "target_firmware_version": "y20.39.0", - "install_package": "install_package.tar" -} \ No newline at end of file + "install_package": "install_package.tar", +} diff --git a/services/addons/tests/firmware_manager/test_upgrader.py b/services/addons/tests/firmware_manager/test_upgrader.py index ff25bfeb0..640dcc72c 100644 --- a/services/addons/tests/firmware_manager/test_upgrader.py +++ b/services/addons/tests/firmware_manager/test_upgrader.py @@ -3,124 +3,142 @@ from addons.images.firmware_manager import upgrader + # Test class for testing the abstract class -class TestUpgrader( upgrader.UpgraderAbstractClass ): - # Prevent Pytest from trying to scan class since it begins with "Test" - __test__ = False - def __init__(self, upgrade_info): - super().__init__(upgrade_info) - def upgrade(self): - super().upgrade() +class TestUpgrader(upgrader.UpgraderAbstractClass): + # Prevent Pytest from trying to scan class since it begins with "Test" + __test__ = False + + def __init__(self, upgrade_info): + super().__init__(upgrade_info) + + def upgrade(self): + super().upgrade() + test_upgrade_info = { - "ipv4_address": "8.8.8.8", - "manufacturer": "test-manufacturer", - "model": "test-model", - "ssh_username": "test-user", - "ssh_password": "test-psw", - "target_firmware_id": 4, - "target_firmware_version": "1.0.0", - "install_package": "firmware_package.tar" + "ipv4_address": "8.8.8.8", + "manufacturer": "test-manufacturer", + "model": "test-model", + "ssh_username": "test-user", + "ssh_password": "test-psw", + "target_firmware_id": 4, + "target_firmware_version": "1.0.0", + "install_package": "firmware_package.tar", } + def test_upgrader_init(): - test_upgrader = TestUpgrader(test_upgrade_info) - assert test_upgrader.install_package == "firmware_package.tar" - assert test_upgrader.blob_name == "test-manufacturer/test-model/1.0.0/firmware_package.tar" - assert test_upgrader.local_file_name == "/home/8.8.8.8/firmware_package.tar" - assert test_upgrader.rsu_ip == "8.8.8.8" - assert test_upgrader.ssh_username == "test-user" - assert test_upgrader.ssh_password == "test-psw" - -@patch('addons.images.firmware_manager.upgrader.shutil') -@patch('addons.images.firmware_manager.upgrader.Path') + test_upgrader = TestUpgrader(test_upgrade_info) + assert test_upgrader.install_package == "firmware_package.tar" + assert test_upgrader.root_path == "/home/8.8.8.8" + assert ( + test_upgrader.blob_name + == "test-manufacturer/test-model/1.0.0/firmware_package.tar" + ) + assert test_upgrader.local_file_name == "/home/8.8.8.8/firmware_package.tar" + assert test_upgrader.rsu_ip == "8.8.8.8" + assert test_upgrader.ssh_username == "test-user" + assert test_upgrader.ssh_password == "test-psw" + + +@patch("addons.images.firmware_manager.upgrader.shutil") +@patch("addons.images.firmware_manager.upgrader.Path") def test_cleanup_exists(mock_Path, mock_shutil): - mock_path_obj = mock_Path.return_value - mock_path_obj.exists.return_value = True - mock_path_obj.is_dir.return_value = True - test_upgrader = TestUpgrader(test_upgrade_info) + mock_path_obj = mock_Path.return_value + mock_path_obj.exists.return_value = True + mock_path_obj.is_dir.return_value = True + test_upgrader = TestUpgrader(test_upgrade_info) + + test_upgrader.cleanup() - test_upgrader.cleanup() + mock_Path.assert_called_with("/home/8.8.8.8") + mock_shutil.rmtree.assert_called_with(mock_path_obj) - mock_Path.assert_called_with("/home/8.8.8.8") - mock_shutil.rmtree.assert_called_with(mock_path_obj) -@patch('addons.images.firmware_manager.upgrader.shutil') -@patch('addons.images.firmware_manager.upgrader.Path') +@patch("addons.images.firmware_manager.upgrader.shutil") +@patch("addons.images.firmware_manager.upgrader.Path") def test_cleanup_not_exist(mock_Path, mock_shutil): - mock_path_obj = mock_Path.return_value - mock_path_obj.exists.return_value = False - mock_path_obj.is_dir.return_value = False - test_upgrader = TestUpgrader(test_upgrade_info) + mock_path_obj = mock_Path.return_value + mock_path_obj.exists.return_value = False + mock_path_obj.is_dir.return_value = False + test_upgrader = TestUpgrader(test_upgrade_info) - test_upgrader.cleanup() + test_upgrader.cleanup() - mock_Path.assert_called_with("/home/8.8.8.8") - mock_shutil.rmtree.assert_not_called() + mock_Path.assert_called_with("/home/8.8.8.8") + mock_shutil.rmtree.assert_not_called() -@patch.dict(os.environ, { - "BLOB_STORAGE_PROVIDER": "GCP" -}) -@patch('addons.images.firmware_manager.upgrader.download_blob.download_gcp_blob') -@patch('addons.images.firmware_manager.upgrader.Path') + +@patch.dict(os.environ, {"BLOB_STORAGE_PROVIDER": "GCP"}) +@patch("addons.images.firmware_manager.upgrader.download_blob.download_gcp_blob") +@patch("addons.images.firmware_manager.upgrader.Path") def test_download_blob_gcp(mock_Path, mock_download_gcp_blob): - mock_path_obj = mock_Path.return_value - test_upgrader = TestUpgrader(test_upgrade_info) + mock_path_obj = mock_Path.return_value + test_upgrader = TestUpgrader(test_upgrade_info) - test_upgrader.download_blob() + test_upgrader.download_blob() - mock_path_obj.mkdir.assert_called_with(exist_ok=True) - mock_download_gcp_blob.assert_called_with( - "test-manufacturer/test-model/1.0.0/firmware_package.tar", - "/home/8.8.8.8/firmware_package.tar" + mock_path_obj.mkdir.assert_called_with(exist_ok=True) + mock_download_gcp_blob.assert_called_with( + "test-manufacturer/test-model/1.0.0/firmware_package.tar", + "/home/8.8.8.8/firmware_package.tar", ) -@patch.dict(os.environ, { - "BLOB_STORAGE_PROVIDER": "Test" -}) -@patch('addons.images.firmware_manager.upgrader.logging') -@patch('addons.images.firmware_manager.upgrader.download_blob.download_gcp_blob') -@patch('addons.images.firmware_manager.upgrader.Path') + +@patch.dict(os.environ, {"BLOB_STORAGE_PROVIDER": "Test"}) +@patch("addons.images.firmware_manager.upgrader.logging") +@patch("addons.images.firmware_manager.upgrader.download_blob.download_gcp_blob") +@patch("addons.images.firmware_manager.upgrader.Path") def test_download_blob_not_supported(mock_Path, mock_download_gcp_blob, mock_logging): - mock_path_obj = mock_Path.return_value - test_upgrader = TestUpgrader(test_upgrade_info) + mock_path_obj = mock_Path.return_value + test_upgrader = TestUpgrader(test_upgrade_info) - test_upgrader.download_blob() + test_upgrader.download_blob() - mock_path_obj.mkdir.assert_called_with(exist_ok=True) - mock_download_gcp_blob.assert_not_called() - mock_logging.error.assert_called_with("Unsupported blob storage provider") + mock_path_obj.mkdir.assert_called_with(exist_ok=True) + mock_download_gcp_blob.assert_not_called() + mock_logging.error.assert_called_with("Unsupported blob storage provider") -@patch('addons.images.firmware_manager.upgrader.logging') -@patch('addons.images.firmware_manager.upgrader.requests') + +@patch("addons.images.firmware_manager.upgrader.logging") +@patch("addons.images.firmware_manager.upgrader.requests") def test_notify_firmware_manager_success(mock_requests, mock_logging): - test_upgrader = TestUpgrader(test_upgrade_info) + test_upgrader = TestUpgrader(test_upgrade_info) + + test_upgrader.notify_firmware_manager(success=True) - test_upgrader.notify_firmware_manager(success=True) + expected_url = "http://127.0.0.1:8080/firmware_upgrade_completed" + expected_body = {"rsu_ip": "8.8.8.8", "status": "success"} + mock_logging.info.assert_called_with( + "Firmware upgrade script completed with status: success" + ) + mock_requests.post.assert_called_with(expected_url, json=expected_body) - expected_url = "http://127.0.0.1:8080/firmware_upgrade_completed" - expected_body = {"rsu_ip": "8.8.8.8", "status": "success"} - mock_logging.info.assert_called_with("Firmware upgrade script completed with status: success") - mock_requests.post.assert_called_with(expected_url, json=expected_body) -@patch('addons.images.firmware_manager.upgrader.logging') -@patch('addons.images.firmware_manager.upgrader.requests') +@patch("addons.images.firmware_manager.upgrader.logging") +@patch("addons.images.firmware_manager.upgrader.requests") def test_notify_firmware_manager_fail(mock_requests, mock_logging): - test_upgrader = TestUpgrader(test_upgrade_info) + test_upgrader = TestUpgrader(test_upgrade_info) + + test_upgrader.notify_firmware_manager(success=False) - test_upgrader.notify_firmware_manager(success=False) + expected_url = "http://127.0.0.1:8080/firmware_upgrade_completed" + expected_body = {"rsu_ip": "8.8.8.8", "status": "fail"} + mock_logging.info.assert_called_with( + "Firmware upgrade script completed with status: fail" + ) + mock_requests.post.assert_called_with(expected_url, json=expected_body) - expected_url = "http://127.0.0.1:8080/firmware_upgrade_completed" - expected_body = {"rsu_ip": "8.8.8.8", "status": "fail"} - mock_logging.info.assert_called_with("Firmware upgrade script completed with status: fail") - mock_requests.post.assert_called_with(expected_url, json=expected_body) -@patch('addons.images.firmware_manager.upgrader.logging') -@patch('addons.images.firmware_manager.upgrader.requests') +@patch("addons.images.firmware_manager.upgrader.logging") +@patch("addons.images.firmware_manager.upgrader.requests") def test_notify_firmware_manager_exception(mock_requests, mock_logging): - mock_requests.post.side_effect = Exception('Exception test successfully passed') - test_upgrader = TestUpgrader(test_upgrade_info) + mock_requests.post.side_effect = Exception("Exception occurred during upgrade") + test_upgrader = TestUpgrader(test_upgrade_info) - test_upgrader.notify_firmware_manager(success=True) + test_upgrader.notify_firmware_manager(success=True) - mock_logging.error.assert_called_with("Failed to connect to the Firmware Manager API for '8.8.8.8': Exception test successfully passed") \ No newline at end of file + mock_logging.error.assert_called_with( + "Failed to connect to the Firmware Manager API for '8.8.8.8': Exception occurred during upgrade" + ) diff --git a/services/addons/tests/firmware_manager/test_yunex_upgrader.py b/services/addons/tests/firmware_manager/test_yunex_upgrader.py new file mode 100644 index 000000000..1b1a7f503 --- /dev/null +++ b/services/addons/tests/firmware_manager/test_yunex_upgrader.py @@ -0,0 +1,356 @@ +from unittest.mock import call, patch, MagicMock, mock_open + +from addons.images.firmware_manager.yunex_upgrader import YunexUpgrader + +test_upgrade_info = { + "ipv4_address": "8.8.8.8", + "manufacturer": "test-manufacturer", + "model": "test-model", + "ssh_username": "test-user", + "ssh_password": "test-psw", + "target_firmware_id": 4, + "target_firmware_version": "1.0.0", + "install_package": "firmware_package.tar", +} + +test_upgrade_info_json = { + "core": "core-file-name", + "sdk": "sdk-file-name", + "provision": "provision-file-name", +} + + +def test_yunex_upgrader_init(): + test_yunex_upgrader = YunexUpgrader(test_upgrade_info) + assert test_yunex_upgrader.install_package == "firmware_package.tar" + assert test_yunex_upgrader.root_path == "/home/8.8.8.8" + assert ( + test_yunex_upgrader.blob_name + == "test-manufacturer/test-model/1.0.0/firmware_package.tar" + ) + assert test_yunex_upgrader.local_file_name == "/home/8.8.8.8/firmware_package.tar" + assert test_yunex_upgrader.rsu_ip == "8.8.8.8" + assert test_yunex_upgrader.ssh_username == "test-user" + assert test_yunex_upgrader.ssh_password == "test-psw" + + +@patch("addons.images.firmware_manager.yunex_upgrader.subprocess") +def test_yunex_upgrader_run_xfer_upgrade_success(mock_subprocess): + run_response_obj = MagicMock() + run_response_obj.returncode = 0 + stdout_obj = MagicMock() + stdout_obj.decode.return_value = '\ntest\ntest\nTEXT: {"success":{"upload":"Processing OK. Rebooting now ..."}}\ntest\n' + run_response_obj.stdout = stdout_obj + run_response_obj.stderr = MagicMock() + mock_subprocess.run.return_value = run_response_obj + + test_yunex_upgrader = YunexUpgrader(test_upgrade_info) + code = test_yunex_upgrader.run_xfer_upgrade("core-file-name") + + assert code == 0 + + +@patch("addons.images.firmware_manager.yunex_upgrader.subprocess") +def test_yunex_upgrader_run_xfer_upgrade_fail_code(mock_subprocess): + run_response_obj = MagicMock() + run_response_obj.returncode = 2 + run_response_obj.stdout = MagicMock() + run_response_obj.stderr = MagicMock() + mock_subprocess.run.return_value = run_response_obj + + test_yunex_upgrader = YunexUpgrader(test_upgrade_info) + code = test_yunex_upgrader.run_xfer_upgrade("core-file-name") + + assert code == -1 + + +@patch("addons.images.firmware_manager.yunex_upgrader.subprocess") +def test_yunex_upgrader_run_xfer_upgrade_fail_output(mock_subprocess): + run_response_obj = MagicMock() + run_response_obj.returncode = 0 + stdout_obj = MagicMock() + stdout_obj.decode.return_value = "\ntest\ntest\ntest\n" + run_response_obj.stdout = stdout_obj + run_response_obj.stderr = MagicMock() + mock_subprocess.run.return_value = run_response_obj + + test_yunex_upgrader = YunexUpgrader(test_upgrade_info) + code = test_yunex_upgrader.run_xfer_upgrade("core-file-name") + + assert code == -1 + + +@patch("addons.images.firmware_manager.yunex_upgrader.time") +@patch("addons.images.firmware_manager.yunex_upgrader.subprocess") +def test_yunex_upgrader_wait_until_online_success(mock_subprocess, mock_time): + run_response_obj = MagicMock() + run_response_obj.returncode = 0 + mock_subprocess.run.return_value = run_response_obj + + test_yunex_upgrader = YunexUpgrader(test_upgrade_info) + code = test_yunex_upgrader.wait_until_online() + + assert code == 0 + assert mock_time.sleep.call_count == 1 + + +@patch("addons.images.firmware_manager.yunex_upgrader.time") +@patch("addons.images.firmware_manager.yunex_upgrader.subprocess") +def test_yunex_upgrader_wait_until_online_timeout(mock_subprocess, mock_time): + run_response_obj = MagicMock() + run_response_obj.returncode = 1 + mock_subprocess.run.return_value = run_response_obj + + test_yunex_upgrader = YunexUpgrader(test_upgrade_info) + code = test_yunex_upgrader.wait_until_online() + + assert code == -1 + assert mock_time.sleep.call_count == 180 + + +@patch("addons.images.firmware_manager.yunex_upgrader.time") +@patch("addons.images.firmware_manager.yunex_upgrader.json") +@patch("builtins.open", new_callable=mock_open, read_data="data") +@patch( + "addons.images.firmware_manager.yunex_upgrader.tarfile.open", + return_value=MagicMock(), +) +def test_yunex_upgrader_upgrade_success( + mock_tarfile_open, mock_open, mock_json, mock_time +): + taropen_obj = mock_tarfile_open.return_value.__enter__.return_value + mock_json.load.return_value = test_upgrade_info_json + + test_yunex_upgrader = YunexUpgrader(test_upgrade_info) + test_yunex_upgrader.download_blob = MagicMock() + test_yunex_upgrader.run_xfer_upgrade = MagicMock(return_value=0) + test_yunex_upgrader.wait_until_online = MagicMock(return_value=0) + test_yunex_upgrader.cleanup = MagicMock() + notify = MagicMock() + test_yunex_upgrader.notify_firmware_manager = notify + + test_yunex_upgrader.upgrade() + + # Assert notified success value + mock_tarfile_open.assert_called_with("/home/8.8.8.8/firmware_package.tar", "r") + taropen_obj.extractall.assert_called_with("/home/8.8.8.8") + + mock_open.assert_called_with("/home/8.8.8.8/upgrade_info.json") + mock_json.load.assert_called_with(mock_open.return_value) + + test_yunex_upgrader.run_xfer_upgrade.assert_has_calls( + [ + call("/home/8.8.8.8/core-file-name"), + call("/home/8.8.8.8/sdk-file-name"), + call("/home/8.8.8.8/provision-file-name"), + ] + ) + assert test_yunex_upgrader.wait_until_online.call_count == 2 + assert mock_time.sleep.call_count == 2 + + # Assert notified success value + notify.assert_called_with(success=True) + + +@patch("addons.images.firmware_manager.yunex_upgrader.time") +@patch("addons.images.firmware_manager.yunex_upgrader.json") +@patch("builtins.open", new_callable=mock_open, read_data="data") +@patch( + "addons.images.firmware_manager.yunex_upgrader.tarfile.open", + return_value=MagicMock(), +) +def test_yunex_upgrader_core_upgrade_fail( + mock_tarfile_open, mock_open, mock_json, mock_time +): + taropen_obj = mock_tarfile_open.return_value.__enter__.return_value + mock_json.load.return_value = test_upgrade_info_json + + test_yunex_upgrader = YunexUpgrader(test_upgrade_info) + test_yunex_upgrader.download_blob = MagicMock() + test_yunex_upgrader.run_xfer_upgrade = MagicMock(return_value=-1) + test_yunex_upgrader.wait_until_online = MagicMock(return_value=0) + test_yunex_upgrader.cleanup = MagicMock() + notify = MagicMock() + test_yunex_upgrader.notify_firmware_manager = notify + + test_yunex_upgrader.upgrade() + + # Assert notified success value + mock_tarfile_open.assert_called_with("/home/8.8.8.8/firmware_package.tar", "r") + taropen_obj.extractall.assert_called_with("/home/8.8.8.8") + + mock_open.assert_called_with("/home/8.8.8.8/upgrade_info.json") + mock_json.load.assert_called_with(mock_open.return_value) + + test_yunex_upgrader.run_xfer_upgrade.assert_called_with( + "/home/8.8.8.8/core-file-name" + ) + assert test_yunex_upgrader.wait_until_online.call_count == 0 + assert mock_time.sleep.call_count == 0 + + # Assert notified success value + notify.assert_called_with(success=False) + + +@patch("addons.images.firmware_manager.yunex_upgrader.time") +@patch("addons.images.firmware_manager.yunex_upgrader.json") +@patch("builtins.open", new_callable=mock_open, read_data="data") +@patch( + "addons.images.firmware_manager.yunex_upgrader.tarfile.open", + return_value=MagicMock(), +) +def test_yunex_upgrader_core_ping_fail( + mock_tarfile_open, mock_open, mock_json, mock_time +): + taropen_obj = mock_tarfile_open.return_value.__enter__.return_value + mock_json.load.return_value = test_upgrade_info_json + + test_yunex_upgrader = YunexUpgrader(test_upgrade_info) + test_yunex_upgrader.download_blob = MagicMock() + test_yunex_upgrader.run_xfer_upgrade = MagicMock(return_value=0) + test_yunex_upgrader.wait_until_online = MagicMock(return_value=-1) + test_yunex_upgrader.cleanup = MagicMock() + notify = MagicMock() + test_yunex_upgrader.notify_firmware_manager = notify + + test_yunex_upgrader.upgrade() + + # Assert notified success value + mock_tarfile_open.assert_called_with("/home/8.8.8.8/firmware_package.tar", "r") + taropen_obj.extractall.assert_called_with("/home/8.8.8.8") + + mock_open.assert_called_with("/home/8.8.8.8/upgrade_info.json") + mock_json.load.assert_called_with(mock_open.return_value) + + test_yunex_upgrader.run_xfer_upgrade.assert_called_with( + "/home/8.8.8.8/core-file-name" + ) + assert test_yunex_upgrader.wait_until_online.call_count == 1 + assert mock_time.sleep.call_count == 0 + + # Assert notified success value + notify.assert_called_with(success=False) + + +@patch("addons.images.firmware_manager.yunex_upgrader.time") +@patch("addons.images.firmware_manager.yunex_upgrader.json") +@patch("builtins.open", new_callable=mock_open, read_data="data") +@patch( + "addons.images.firmware_manager.yunex_upgrader.tarfile.open", + return_value=MagicMock(), +) +def test_yunex_upgrader_sdk_upgrade_fail( + mock_tarfile_open, mock_open, mock_json, mock_time +): + taropen_obj = mock_tarfile_open.return_value.__enter__.return_value + mock_json.load.return_value = test_upgrade_info_json + + test_yunex_upgrader = YunexUpgrader(test_upgrade_info) + test_yunex_upgrader.download_blob = MagicMock() + test_yunex_upgrader.run_xfer_upgrade = MagicMock(side_effect=[0, -1]) + test_yunex_upgrader.wait_until_online = MagicMock(return_value=0) + test_yunex_upgrader.cleanup = MagicMock() + notify = MagicMock() + test_yunex_upgrader.notify_firmware_manager = notify + + test_yunex_upgrader.upgrade() + + # Assert notified success value + mock_tarfile_open.assert_called_with("/home/8.8.8.8/firmware_package.tar", "r") + taropen_obj.extractall.assert_called_with("/home/8.8.8.8") + + mock_open.assert_called_with("/home/8.8.8.8/upgrade_info.json") + mock_json.load.assert_called_with(mock_open.return_value) + + test_yunex_upgrader.run_xfer_upgrade.assert_has_calls( + [call("/home/8.8.8.8/core-file-name"), call("/home/8.8.8.8/sdk-file-name")] + ) + assert test_yunex_upgrader.wait_until_online.call_count == 1 + assert mock_time.sleep.call_count == 1 + + # Assert notified success value + notify.assert_called_with(success=False) + + +@patch("addons.images.firmware_manager.yunex_upgrader.time") +@patch("addons.images.firmware_manager.yunex_upgrader.json") +@patch("builtins.open", new_callable=mock_open, read_data="data") +@patch( + "addons.images.firmware_manager.yunex_upgrader.tarfile.open", + return_value=MagicMock(), +) +def test_yunex_upgrader_sdk_ping_fail( + mock_tarfile_open, mock_open, mock_json, mock_time +): + taropen_obj = mock_tarfile_open.return_value.__enter__.return_value + mock_json.load.return_value = test_upgrade_info_json + + test_yunex_upgrader = YunexUpgrader(test_upgrade_info) + test_yunex_upgrader.download_blob = MagicMock() + test_yunex_upgrader.run_xfer_upgrade = MagicMock(return_value=0) + test_yunex_upgrader.wait_until_online = MagicMock(side_effect=[0, -1]) + test_yunex_upgrader.cleanup = MagicMock() + notify = MagicMock() + test_yunex_upgrader.notify_firmware_manager = notify + + test_yunex_upgrader.upgrade() + + # Assert notified success value + mock_tarfile_open.assert_called_with("/home/8.8.8.8/firmware_package.tar", "r") + taropen_obj.extractall.assert_called_with("/home/8.8.8.8") + + mock_open.assert_called_with("/home/8.8.8.8/upgrade_info.json") + mock_json.load.assert_called_with(mock_open.return_value) + + test_yunex_upgrader.run_xfer_upgrade.assert_has_calls( + [call("/home/8.8.8.8/core-file-name"), call("/home/8.8.8.8/sdk-file-name")] + ) + assert test_yunex_upgrader.wait_until_online.call_count == 2 + assert mock_time.sleep.call_count == 1 + + # Assert notified success value + notify.assert_called_with(success=False) + + +@patch("addons.images.firmware_manager.yunex_upgrader.time") +@patch("addons.images.firmware_manager.yunex_upgrader.json") +@patch("builtins.open", new_callable=mock_open, read_data="data") +@patch( + "addons.images.firmware_manager.yunex_upgrader.tarfile.open", + return_value=MagicMock(), +) +def test_yunex_upgrader_provision_upgrade_fail( + mock_tarfile_open, mock_open, mock_json, mock_time +): + taropen_obj = mock_tarfile_open.return_value.__enter__.return_value + mock_json.load.return_value = test_upgrade_info_json + + test_yunex_upgrader = YunexUpgrader(test_upgrade_info) + test_yunex_upgrader.download_blob = MagicMock() + test_yunex_upgrader.run_xfer_upgrade = MagicMock(side_effect=[0, 0, -1]) + test_yunex_upgrader.wait_until_online = MagicMock(return_value=0) + test_yunex_upgrader.cleanup = MagicMock() + notify = MagicMock() + test_yunex_upgrader.notify_firmware_manager = notify + + test_yunex_upgrader.upgrade() + + # Assert notified success value + mock_tarfile_open.assert_called_with("/home/8.8.8.8/firmware_package.tar", "r") + taropen_obj.extractall.assert_called_with("/home/8.8.8.8") + + mock_open.assert_called_with("/home/8.8.8.8/upgrade_info.json") + mock_json.load.assert_called_with(mock_open.return_value) + + test_yunex_upgrader.run_xfer_upgrade.assert_has_calls( + [ + call("/home/8.8.8.8/core-file-name"), + call("/home/8.8.8.8/sdk-file-name"), + call("/home/8.8.8.8/provision-file-name"), + ] + ) + assert test_yunex_upgrader.wait_until_online.call_count == 2 + assert mock_time.sleep.call_count == 2 + + # Assert notified success value + notify.assert_called_with(success=False) diff --git a/services/addons/tests/iss_health_check/test_iss_health_checker.py b/services/addons/tests/iss_health_check/test_iss_health_checker.py index 0ca97ea07..ea4a4b389 100644 --- a/services/addons/tests/iss_health_check/test_iss_health_checker.py +++ b/services/addons/tests/iss_health_check/test_iss_health_checker.py @@ -1,142 +1,143 @@ -from unittest.mock import patch -import os - -from addons.images.iss_health_check import iss_health_checker - -@patch('addons.images.iss_health_check.iss_health_checker.pgquery.query_db') -def test_get_rsu_data_no_data(mock_query_db): - mock_query_db.return_value = [] - result = iss_health_checker.get_rsu_data() - - # check - assert result == {} - mock_query_db.assert_called_once() - mock_query_db.assert_called_with("SELECT jsonb_build_object('rsu_id', rsu_id, 'iss_scms_id', iss_scms_id) FROM public.rsus WHERE iss_scms_id IS NOT NULL ORDER BY rsu_id") - -@patch('addons.images.iss_health_check.iss_health_checker.pgquery.query_db') -def test_get_rsu_data_with_data(mock_query_db): - # mock - mock_query_db.return_value = [ - [{'rsu_id': 1, 'iss_scms_id': 'ABC'}], - [{'rsu_id': 2, 'iss_scms_id': 'DEF'}], - [{'rsu_id': 3, 'iss_scms_id': 'GHI'}] - ] - result = iss_health_checker.get_rsu_data() - - expected_result = { - "ABC": {"rsu_id": 1}, - "DEF": {"rsu_id": 2}, - "GHI": {"rsu_id": 3} - } - - # check - assert result == expected_result - mock_query_db.assert_called_once() - mock_query_db.assert_called_with("SELECT jsonb_build_object('rsu_id', rsu_id, 'iss_scms_id', iss_scms_id) FROM public.rsus WHERE iss_scms_id IS NOT NULL ORDER BY rsu_id") - -@patch.dict(os.environ, { - "ISS_API_KEY": "test", - "ISS_SCMS_VEHICLE_REST_ENDPOINT": "https://api.dm.iss-scms.com/api/test", - "ISS_PROJECT_ID": "test" -}) -@patch('addons.images.iss_health_check.iss_health_checker.requests.Response') -@patch('addons.images.iss_health_check.iss_health_checker.requests') -@patch('addons.images.iss_health_check.iss_health_checker.iss_token') -@patch('addons.images.iss_health_check.iss_health_checker.get_rsu_data') -def test_get_scms_status_data(mock_get_rsu_data, mock_get_token, mock_requests, mock_response): - mock_get_rsu_data.return_value = { - "ABC": {"rsu_id": 1}, - "DEF": {"rsu_id": 2} - } - mock_get_token.get_token.return_value = "test-token" - mock_requests.get.return_value = mock_response - mock_response.json.side_effect = [ - { - "data": [ - { - "_id": "ABC", - "provisionerCompany_id": "company", - "entityType": "rsu", - "project_id": "test", - "deviceHealth": "Healthy", - "enrollments": [ - { - "enrollmentStartTime": "2022-10-02T00:00:00.000Z", - "authorizationCertInfo": { - "expireTimeOfLatestDownloadedCert": "2022-11-02T00:00:00.000Z" - } - } - ] - }, - { - "_id": "DEF", - "provisionerCompany_id": "company", - "entityType": "rsu", - "project_id": "test", - "deviceHealth": "Unhealthy", - "enrollments": [ - { - "enrollmentStartTime": "2022-10-02T00:00:00.000Z" - } - ] - } - ], - "count": 2 - }, - { - "data": [], - "count": 2 - } - ] - - actual_result = iss_health_checker.get_scms_status_data() - - expected_result = { - "ABC": { - "rsu_id": 1, - "provisionerCompany": "company", - "entityType": "rsu", - "project_id": "test", - "deviceHealth": "Healthy", - "expiration": "2022-11-02T00:00:00.000Z" - }, - "DEF": { - "rsu_id": 2, - "provisionerCompany": "company", - "entityType": "rsu", - "project_id": "test", - "deviceHealth": "Unhealthy", - "expiration": None - } - } - - - assert actual_result == expected_result - mock_get_rsu_data.assert_called_with() - mock_get_token.get_token.assert_called_with() - # Assert what should be the last call on the endpoint - mock_requests.get.assert_called_with("https://api.dm.iss-scms.com/api/test?pageSize=200&page=1&project_id=test", headers={'x-api-key': 'test-token'}) - -@patch('addons.images.iss_health_check.iss_health_checker.datetime') -@patch('addons.images.iss_health_check.iss_health_checker.pgquery.write_db') -def test_insert_scms_data(mock_write_db, mock_datetime): - mock_datetime.strftime.return_value = "2022-11-03T00:00:00.000Z" - test_data = { - "ABC": { - "rsu_id": 1, - "deviceHealth": "Healthy", - "expiration": "2022-11-02T00:00:00.000Z" - }, - "DEF": { - "rsu_id": 2, - "deviceHealth": "Unhealthy", - "expiration": None - }, - } - # call - iss_health_checker.insert_scms_data(test_data) - - expectedQuery = "INSERT INTO public.scms_health(\"timestamp\", health, expiration, rsu_id) VALUES " \ - "('2022-11-03T00:00:00.000Z', '1', '2022-11-02T00:00:00.000Z', 1), " \ - "('2022-11-03T00:00:00.000Z', '0', NULL, 2)" - mock_write_db.assert_called_with(expectedQuery) +from unittest.mock import patch +import os + +from addons.images.iss_health_check import iss_health_checker + + +@patch("addons.images.iss_health_check.iss_health_checker.pgquery.query_db") +def test_get_rsu_data_no_data(mock_query_db): + mock_query_db.return_value = [] + result = iss_health_checker.get_rsu_data() + + # check + assert result == {} + mock_query_db.assert_called_once() + mock_query_db.assert_called_with( + "SELECT jsonb_build_object('rsu_id', rsu_id, 'iss_scms_id', iss_scms_id) FROM public.rsus WHERE iss_scms_id IS NOT NULL ORDER BY rsu_id" + ) + + +@patch("addons.images.iss_health_check.iss_health_checker.pgquery.query_db") +def test_get_rsu_data_with_data(mock_query_db): + # mock + mock_query_db.return_value = [ + [{"rsu_id": 1, "iss_scms_id": "ABC"}], + [{"rsu_id": 2, "iss_scms_id": "DEF"}], + [{"rsu_id": 3, "iss_scms_id": "GHI"}], + ] + result = iss_health_checker.get_rsu_data() + + expected_result = {"ABC": {"rsu_id": 1}, "DEF": {"rsu_id": 2}, "GHI": {"rsu_id": 3}} + + # check + assert result == expected_result + mock_query_db.assert_called_once() + mock_query_db.assert_called_with( + "SELECT jsonb_build_object('rsu_id', rsu_id, 'iss_scms_id', iss_scms_id) FROM public.rsus WHERE iss_scms_id IS NOT NULL ORDER BY rsu_id" + ) + + +@patch.dict( + os.environ, + { + "ISS_API_KEY": "test", + "ISS_SCMS_VEHICLE_REST_ENDPOINT": "https://api.dm.iss-scms.com/api/test", + "ISS_PROJECT_ID": "test", + }, +) +@patch("addons.images.iss_health_check.iss_health_checker.requests.Response") +@patch("addons.images.iss_health_check.iss_health_checker.requests") +@patch("addons.images.iss_health_check.iss_health_checker.iss_token") +@patch("addons.images.iss_health_check.iss_health_checker.get_rsu_data") +def test_get_scms_status_data( + mock_get_rsu_data, mock_get_token, mock_requests, mock_response +): + mock_get_rsu_data.return_value = {"ABC": {"rsu_id": 1}, "DEF": {"rsu_id": 2}} + mock_get_token.get_token.return_value = "test-token" + mock_requests.get.return_value = mock_response + mock_response.json.side_effect = [ + { + "data": [ + { + "_id": "ABC", + "provisionerCompany_id": "company", + "entityType": "rsu", + "project_id": "test", + "deviceHealth": "Healthy", + "enrollments": [ + { + "enrollmentStartTime": "2022-10-02T00:00:00.000Z", + "authorizationCertInfo": { + "expireTimeOfLatestDownloadedCert": "2022-11-02T00:00:00.000Z" + }, + } + ], + }, + { + "_id": "DEF", + "provisionerCompany_id": "company", + "entityType": "rsu", + "project_id": "test", + "deviceHealth": "Unhealthy", + "enrollments": [ + {"enrollmentStartTime": "2022-10-02T00:00:00.000Z"} + ], + }, + ], + "count": 2, + }, + {"data": [], "count": 2}, + ] + + actual_result = iss_health_checker.get_scms_status_data() + + expected_result = { + "ABC": { + "rsu_id": 1, + "provisionerCompany": "company", + "entityType": "rsu", + "project_id": "test", + "deviceHealth": "Healthy", + "expiration": "2022-11-02T00:00:00.000Z", + }, + "DEF": { + "rsu_id": 2, + "provisionerCompany": "company", + "entityType": "rsu", + "project_id": "test", + "deviceHealth": "Unhealthy", + "expiration": None, + }, + } + + assert actual_result == expected_result + mock_get_rsu_data.assert_called_with() + mock_get_token.get_token.assert_called_with() + # Assert what should be the last call on the endpoint + mock_requests.get.assert_called_with( + "https://api.dm.iss-scms.com/api/test?pageSize=200&page=1&project_id=test", + headers={"x-api-key": "test-token"}, + ) + + +@patch("addons.images.iss_health_check.iss_health_checker.datetime") +@patch("addons.images.iss_health_check.iss_health_checker.pgquery.write_db") +def test_insert_scms_data(mock_write_db, mock_datetime): + mock_datetime.strftime.return_value = "2022-11-03T00:00:00.000Z" + test_data = { + "ABC": { + "rsu_id": 1, + "deviceHealth": "Healthy", + "expiration": "2022-11-02T00:00:00.000Z", + }, + "DEF": {"rsu_id": 2, "deviceHealth": "Unhealthy", "expiration": None}, + } + # call + iss_health_checker.insert_scms_data(test_data) + + expectedQuery = ( + 'INSERT INTO public.scms_health("timestamp", health, expiration, rsu_id) VALUES ' + "('2022-11-03T00:00:00.000Z', '1', '2022-11-02T00:00:00.000Z', 1), " + "('2022-11-03T00:00:00.000Z', '0', NULL, 2)" + ) + mock_write_db.assert_called_with(expectedQuery) diff --git a/services/addons/tests/iss_health_check/test_iss_token.py b/services/addons/tests/iss_health_check/test_iss_token.py index 0848cad35..392b041ed 100644 --- a/services/addons/tests/iss_health_check/test_iss_token.py +++ b/services/addons/tests/iss_health_check/test_iss_token.py @@ -1,174 +1,230 @@ -from unittest.mock import patch, MagicMock -import os -import json - -from addons.images.iss_health_check import iss_token - -@patch('addons.images.iss_health_check.iss_token.secretmanager.SecretManagerServiceClient') -def test_create_secret(mock_sm_client): - iss_token.create_secret(mock_sm_client, "test-secret_id", "test-parent") - expected_request = { - "parent": "test-parent", - "secret_id": "test-secret_id", - "secret": {"replication": {"automatic": {}}}, - } - mock_sm_client.create_secret.assert_called_with(request=expected_request) - -@patch('addons.images.iss_health_check.iss_token.secretmanager.SecretManagerServiceClient') -@patch('addons.images.iss_health_check.iss_token.secretmanager') -def test_check_if_secret_exists_true(mock_secretmanager, mock_sm_client): - mock_secretmanager.ListSecretsRequest.return_value = "list-request" - - item_match = MagicMock() - item_match.name = "proj/test-proj/secret/test-secret_id" - mock_list_values = [item_match] - mock_sm_client.list_secrets.return_value = mock_list_values - - actual_value = iss_token.check_if_secret_exists(mock_sm_client, "test-secret_id", "test-parent") - mock_secretmanager.ListSecretsRequest.assert_called_with(parent="test-parent") - mock_sm_client.list_secrets.assert_called_with(request="list-request") - assert actual_value == True - -@patch('addons.images.iss_health_check.iss_token.secretmanager.SecretManagerServiceClient') -@patch('addons.images.iss_health_check.iss_token.secretmanager') -def test_check_if_secret_exists_false(mock_secretmanager, mock_sm_client): - mock_secretmanager.ListSecretsRequest.return_value = "list-request" - - item_not_match = MagicMock() - item_not_match.name = "proj/test-proj/secret/test-secret" - mock_list_values = [item_not_match] - mock_sm_client.list_secrets.return_value = mock_list_values - - actual_value = iss_token.check_if_secret_exists(mock_sm_client, "test-secret_id", "test-parent") - mock_secretmanager.ListSecretsRequest.assert_called_with(parent="test-parent") - mock_sm_client.list_secrets.assert_called_with(request="list-request") - assert actual_value == False - -@patch('addons.images.iss_health_check.iss_token.secretmanager.SecretManagerServiceClient') -def test_get_latest_secret_version(mock_sm_client): - mock_response = MagicMock() - mock_response.payload.data = str.encode('{"message": "Secret payload data"}') - mock_sm_client.access_secret_version.return_value = mock_response - - actual_value = iss_token.get_latest_secret_version(mock_sm_client, "test-secret_id", "test-parent") - mock_sm_client.access_secret_version.assert_called_with(request={"name": "test-parent/secrets/test-secret_id/versions/latest"}) - assert actual_value == {'message': 'Secret payload data'} - -@patch('addons.images.iss_health_check.iss_token.secretmanager.SecretManagerServiceClient') -def test_add_secret_version(mock_sm_client): - secret_id = "test-secret_id" - parent = "test-parent" - data = {'message': 'Secret payload data'} - iss_token.add_secret_version(mock_sm_client, secret_id, parent, data) - - expected_request={ - "parent": f"{parent}/secrets/{secret_id}", - "payload": {"data": str.encode(json.dumps(data))} - } - mock_sm_client.add_secret_version.assert_called_with(request=expected_request) - -@patch.dict(os.environ, { - "PROJECT_ID": "test-proj", - "ISS_API_KEY": "test-api-key", - "ISS_SCMS_TOKEN_REST_ENDPOINT": "https://api.dm.iss-scms.com/api/test-token", - "ISS_API_KEY_NAME": "test-api-key-name" -}) -@patch('addons.images.iss_health_check.iss_token.requests.Response') -@patch('addons.images.iss_health_check.iss_token.requests') -@patch('addons.images.iss_health_check.iss_token.uuid') -@patch('addons.images.iss_health_check.iss_token.add_secret_version') -@patch('addons.images.iss_health_check.iss_token.create_secret') -@patch('addons.images.iss_health_check.iss_token.check_if_secret_exists') -@patch('addons.images.iss_health_check.iss_token.secretmanager') -def test_get_token_create_secret(mock_secretmanager, mock_check_if_secret_exists, mock_create_secret, mock_add_secret_version, mock_uuid, mock_requests, mock_response): - # Mock every major dependency - mock_sm_client = MagicMock() - mock_secretmanager.SecretManagerServiceClient.return_value = mock_sm_client - mock_check_if_secret_exists.return_value = False - mock_uuid.uuid4.return_value = 12345 - mock_requests.post.return_value = mock_response - mock_response.json.return_value = { - "Item": "new-iss-token" - } - - # Call function - expected_value = "new-iss-token" - actual_value = iss_token.get_token() - - # Check if iss_token function calls were made correctly - mock_check_if_secret_exists.assert_called_with(mock_sm_client, "iss-token-secret", "projects/test-proj") - mock_create_secret.assert_called_with(mock_sm_client, "iss-token-secret", "projects/test-proj") - mock_add_secret_version.assert_called_with(mock_sm_client, "iss-token-secret", "projects/test-proj", - { - "name": "test-api-key-name_12345", - "token": expected_value - }) - - # Check if HTTP requests were made correctly - expected_headers = { - "x-api-key": "test-api-key" - } - expected_body = { - 'friendlyName': "test-api-key-name_12345", - 'expireDays': 1 - } - mock_requests.post.assert_called_with("https://api.dm.iss-scms.com/api/test-token", json=expected_body, headers=expected_headers) - - # Assert final value - assert actual_value == expected_value - -@patch.dict(os.environ, { - "PROJECT_ID": "test-proj", - "ISS_API_KEY": "test-api-key", - "ISS_SCMS_TOKEN_REST_ENDPOINT": "https://api.dm.iss-scms.com/api/test-token", - "ISS_API_KEY_NAME": "test-api-key-name" -}) -@patch('addons.images.iss_health_check.iss_token.requests.Response') -@patch('addons.images.iss_health_check.iss_token.requests') -@patch('addons.images.iss_health_check.iss_token.uuid') -@patch('addons.images.iss_health_check.iss_token.add_secret_version') -@patch('addons.images.iss_health_check.iss_token.get_latest_secret_version') -@patch('addons.images.iss_health_check.iss_token.check_if_secret_exists') -@patch('addons.images.iss_health_check.iss_token.secretmanager') -def test_get_token_secret_exists(mock_secretmanager, mock_check_if_secret_exists, mock_get_latest_secret_version, mock_add_secret_version, mock_uuid, mock_requests, mock_response): - # Mock every major dependency - mock_sm_client = MagicMock() - mock_secretmanager.SecretManagerServiceClient.return_value = mock_sm_client - mock_check_if_secret_exists.return_value = True - mock_get_latest_secret_version.return_value = {"name": "test-api-key-name_01234", "token": "old-token"} - mock_uuid.uuid4.return_value = 12345 - mock_requests.post.return_value = mock_response - mock_response.json.return_value = { - "Item": "new-iss-token" - } - - # Call function - expected_value = "new-iss-token" - actual_value = iss_token.get_token() - - # Check if iss_token function calls were made correctly - mock_check_if_secret_exists.assert_called_with(mock_sm_client, "iss-token-secret", "projects/test-proj") - mock_get_latest_secret_version.assert_called_with(mock_sm_client, "iss-token-secret", "projects/test-proj") - mock_add_secret_version.assert_called_with(mock_sm_client, "iss-token-secret", "projects/test-proj", - { - "name": "test-api-key-name_12345", - "token": expected_value - }) - - # Check if HTTP requests were made correctly - expected_headers = { - "x-api-key": "old-token" - } - expected_post_body = { - 'friendlyName': "test-api-key-name_12345", - 'expireDays': 1 - } - mock_requests.post.assert_called_with("https://api.dm.iss-scms.com/api/test-token", json=expected_post_body, headers=expected_headers) - - expected_delete_body = { - 'friendlyName': "test-api-key-name_01234" - } - mock_requests.delete.assert_called_with("https://api.dm.iss-scms.com/api/test-token", json=expected_delete_body, headers=expected_headers) - - # Assert final value - assert actual_value == expected_value +from unittest.mock import patch, MagicMock +import os +import json + +from addons.images.iss_health_check import iss_token + + +@patch( + "addons.images.iss_health_check.iss_token.secretmanager.SecretManagerServiceClient" +) +def test_create_secret(mock_sm_client): + iss_token.create_secret(mock_sm_client, "test-secret_id", "test-parent") + expected_request = { + "parent": "test-parent", + "secret_id": "test-secret_id", + "secret": {"replication": {"automatic": {}}}, + } + mock_sm_client.create_secret.assert_called_with(request=expected_request) + + +@patch( + "addons.images.iss_health_check.iss_token.secretmanager.SecretManagerServiceClient" +) +@patch("addons.images.iss_health_check.iss_token.secretmanager") +def test_check_if_secret_exists_true(mock_secretmanager, mock_sm_client): + mock_secretmanager.ListSecretsRequest.return_value = "list-request" + + item_match = MagicMock() + item_match.name = "proj/test-proj/secret/test-secret_id" + mock_list_values = [item_match] + mock_sm_client.list_secrets.return_value = mock_list_values + + actual_value = iss_token.check_if_secret_exists( + mock_sm_client, "test-secret_id", "test-parent" + ) + mock_secretmanager.ListSecretsRequest.assert_called_with(parent="test-parent") + mock_sm_client.list_secrets.assert_called_with(request="list-request") + assert actual_value == True + + +@patch( + "addons.images.iss_health_check.iss_token.secretmanager.SecretManagerServiceClient" +) +@patch("addons.images.iss_health_check.iss_token.secretmanager") +def test_check_if_secret_exists_false(mock_secretmanager, mock_sm_client): + mock_secretmanager.ListSecretsRequest.return_value = "list-request" + + item_not_match = MagicMock() + item_not_match.name = "proj/test-proj/secret/test-secret" + mock_list_values = [item_not_match] + mock_sm_client.list_secrets.return_value = mock_list_values + + actual_value = iss_token.check_if_secret_exists( + mock_sm_client, "test-secret_id", "test-parent" + ) + mock_secretmanager.ListSecretsRequest.assert_called_with(parent="test-parent") + mock_sm_client.list_secrets.assert_called_with(request="list-request") + assert actual_value == False + + +@patch( + "addons.images.iss_health_check.iss_token.secretmanager.SecretManagerServiceClient" +) +def test_get_latest_secret_version(mock_sm_client): + mock_response = MagicMock() + mock_response.payload.data = str.encode('{"message": "Secret payload data"}') + mock_sm_client.access_secret_version.return_value = mock_response + + actual_value = iss_token.get_latest_secret_version( + mock_sm_client, "test-secret_id", "test-parent" + ) + mock_sm_client.access_secret_version.assert_called_with( + request={"name": "test-parent/secrets/test-secret_id/versions/latest"} + ) + assert actual_value == {"message": "Secret payload data"} + + +@patch( + "addons.images.iss_health_check.iss_token.secretmanager.SecretManagerServiceClient" +) +def test_add_secret_version(mock_sm_client): + secret_id = "test-secret_id" + parent = "test-parent" + data = {"message": "Secret payload data"} + iss_token.add_secret_version(mock_sm_client, secret_id, parent, data) + + expected_request = { + "parent": f"{parent}/secrets/{secret_id}", + "payload": {"data": str.encode(json.dumps(data))}, + } + mock_sm_client.add_secret_version.assert_called_with(request=expected_request) + + +@patch.dict( + os.environ, + { + "PROJECT_ID": "test-proj", + "ISS_API_KEY": "test-api-key", + "ISS_SCMS_TOKEN_REST_ENDPOINT": "https://api.dm.iss-scms.com/api/test-token", + "ISS_API_KEY_NAME": "test-api-key-name", + }, +) +@patch("addons.images.iss_health_check.iss_token.requests.Response") +@patch("addons.images.iss_health_check.iss_token.requests") +@patch("addons.images.iss_health_check.iss_token.uuid") +@patch("addons.images.iss_health_check.iss_token.add_secret_version") +@patch("addons.images.iss_health_check.iss_token.create_secret") +@patch("addons.images.iss_health_check.iss_token.check_if_secret_exists") +@patch("addons.images.iss_health_check.iss_token.secretmanager") +def test_get_token_create_secret( + mock_secretmanager, + mock_check_if_secret_exists, + mock_create_secret, + mock_add_secret_version, + mock_uuid, + mock_requests, + mock_response, +): + # Mock every major dependency + mock_sm_client = MagicMock() + mock_secretmanager.SecretManagerServiceClient.return_value = mock_sm_client + mock_check_if_secret_exists.return_value = False + mock_uuid.uuid4.return_value = 12345 + mock_requests.post.return_value = mock_response + mock_response.json.return_value = {"Item": "new-iss-token"} + + # Call function + expected_value = "new-iss-token" + actual_value = iss_token.get_token() + + # Check if iss_token function calls were made correctly + mock_check_if_secret_exists.assert_called_with( + mock_sm_client, "iss-token-secret", "projects/test-proj" + ) + mock_create_secret.assert_called_with( + mock_sm_client, "iss-token-secret", "projects/test-proj" + ) + mock_add_secret_version.assert_called_with( + mock_sm_client, + "iss-token-secret", + "projects/test-proj", + {"name": "test-api-key-name_12345", "token": expected_value}, + ) + + # Check if HTTP requests were made correctly + expected_headers = {"x-api-key": "test-api-key"} + expected_body = {"friendlyName": "test-api-key-name_12345", "expireDays": 1} + mock_requests.post.assert_called_with( + "https://api.dm.iss-scms.com/api/test-token", + json=expected_body, + headers=expected_headers, + ) + + # Assert final value + assert actual_value == expected_value + + +@patch.dict( + os.environ, + { + "PROJECT_ID": "test-proj", + "ISS_API_KEY": "test-api-key", + "ISS_SCMS_TOKEN_REST_ENDPOINT": "https://api.dm.iss-scms.com/api/test-token", + "ISS_API_KEY_NAME": "test-api-key-name", + }, +) +@patch("addons.images.iss_health_check.iss_token.requests.Response") +@patch("addons.images.iss_health_check.iss_token.requests") +@patch("addons.images.iss_health_check.iss_token.uuid") +@patch("addons.images.iss_health_check.iss_token.add_secret_version") +@patch("addons.images.iss_health_check.iss_token.get_latest_secret_version") +@patch("addons.images.iss_health_check.iss_token.check_if_secret_exists") +@patch("addons.images.iss_health_check.iss_token.secretmanager") +def test_get_token_secret_exists( + mock_secretmanager, + mock_check_if_secret_exists, + mock_get_latest_secret_version, + mock_add_secret_version, + mock_uuid, + mock_requests, + mock_response, +): + # Mock every major dependency + mock_sm_client = MagicMock() + mock_secretmanager.SecretManagerServiceClient.return_value = mock_sm_client + mock_check_if_secret_exists.return_value = True + mock_get_latest_secret_version.return_value = { + "name": "test-api-key-name_01234", + "token": "old-token", + } + mock_uuid.uuid4.return_value = 12345 + mock_requests.post.return_value = mock_response + mock_response.json.return_value = {"Item": "new-iss-token"} + + # Call function + expected_value = "new-iss-token" + actual_value = iss_token.get_token() + + # Check if iss_token function calls were made correctly + mock_check_if_secret_exists.assert_called_with( + mock_sm_client, "iss-token-secret", "projects/test-proj" + ) + mock_get_latest_secret_version.assert_called_with( + mock_sm_client, "iss-token-secret", "projects/test-proj" + ) + mock_add_secret_version.assert_called_with( + mock_sm_client, + "iss-token-secret", + "projects/test-proj", + {"name": "test-api-key-name_12345", "token": expected_value}, + ) + + # Check if HTTP requests were made correctly + expected_headers = {"x-api-key": "old-token"} + expected_post_body = {"friendlyName": "test-api-key-name_12345", "expireDays": 1} + mock_requests.post.assert_called_with( + "https://api.dm.iss-scms.com/api/test-token", + json=expected_post_body, + headers=expected_headers, + ) + + expected_delete_body = {"friendlyName": "test-api-key-name_01234"} + mock_requests.delete.assert_called_with( + "https://api.dm.iss-scms.com/api/test-token", + json=expected_delete_body, + headers=expected_headers, + ) + + # Assert final value + assert actual_value == expected_value diff --git a/services/addons/tests/rsu_ping/test_purger.py b/services/addons/tests/rsu_ping/test_purger.py index 52151d386..722bddc3a 100644 --- a/services/addons/tests/rsu_ping/test_purger.py +++ b/services/addons/tests/rsu_ping/test_purger.py @@ -1,57 +1,62 @@ -from mock import MagicMock, call, patch -from datetime import datetime, timedelta -from addons.images.rsu_ping import purger -from freezegun import freeze_time - -@freeze_time("2023-07-06") -@patch("addons.images.rsu_ping.purger.pgquery.query_db") -def test_get_last_online_rsu_records(mock_query_db): - # mock - mock_query_db.return_value = [ - (1, 1, datetime.now()) - ] - - # call - result = purger.get_last_online_rsu_records() - - # check - assert(len(result) == 1) - assert(result[0][0] == 1) - assert(result[0][1] == 1) - assert(result[0][2].strftime("%Y/%m/%d") == '2023/07/06') - -@freeze_time("2023-07-06") -@patch("addons.images.rsu_ping.purger.pgquery.write_db") -def test_purge_ping_data(mock_write_db): - now_dt = datetime.now() - purger.get_last_online_rsu_records = MagicMock(return_value= [ - [0, 0, now_dt - timedelta(hours = 10)], - [1, 1, now_dt - timedelta(days = 3)] - ]) - purger.logging.info = MagicMock() - purger.logging.debug = MagicMock() - - purger.purge_ping_data(24) - - purger.get_last_online_rsu_records.assert_called_once() - mock_write_db.assert_has_calls( - [ - call('DELETE FROM public.ping WHERE rsu_id = 0 AND timestamp < \'2023/07/05T00:00:00\'::timestamp'), - call('DELETE FROM public.ping WHERE rsu_id = 1 AND ping_id != 1') - ] - ) - purger.logging.info.assert_called_once() - -@freeze_time("2023-07-06") -@patch("addons.images.rsu_ping.purger.pgquery.write_db") -def test_purge_ping_data_none(mock_write_db): - now_dt = datetime.now() - purger.get_last_online_rsu_records = MagicMock(return_value= []) - purger.logging.info = MagicMock() - purger.logging.debug = MagicMock() - - purger.purge_ping_data(24) - - purger.get_last_online_rsu_records.assert_called_once() - mock_write_db.assert_not_called() - purger.logging.info.assert_called_once() \ No newline at end of file +from mock import MagicMock, call, patch +from datetime import datetime, timedelta +from addons.images.rsu_ping import purger +from freezegun import freeze_time + + +@freeze_time("2023-07-06") +@patch("addons.images.rsu_ping.purger.pgquery.query_db") +def test_get_last_online_rsu_records(mock_query_db): + # mock + mock_query_db.return_value = [(1, 1, datetime.now())] + + # call + result = purger.get_last_online_rsu_records() + + # check + assert len(result) == 1 + assert result[0][0] == 1 + assert result[0][1] == 1 + assert result[0][2].strftime("%Y/%m/%d") == "2023/07/06" + + +@freeze_time("2023-07-06") +@patch("addons.images.rsu_ping.purger.pgquery.write_db") +def test_purge_ping_data(mock_write_db): + now_dt = datetime.now() + purger.get_last_online_rsu_records = MagicMock( + return_value=[ + [0, 0, now_dt - timedelta(hours=10)], + [1, 1, now_dt - timedelta(days=3)], + ] + ) + purger.logging.info = MagicMock() + purger.logging.debug = MagicMock() + + purger.purge_ping_data(24) + + purger.get_last_online_rsu_records.assert_called_once() + mock_write_db.assert_has_calls( + [ + call( + "DELETE FROM public.ping WHERE rsu_id = 0 AND timestamp < '2023/07/05T00:00:00'::timestamp" + ), + call("DELETE FROM public.ping WHERE rsu_id = 1 AND ping_id != 1"), + ] + ) + purger.logging.info.assert_called_once() + + +@freeze_time("2023-07-06") +@patch("addons.images.rsu_ping.purger.pgquery.write_db") +def test_purge_ping_data_none(mock_write_db): + now_dt = datetime.now() + purger.get_last_online_rsu_records = MagicMock(return_value=[]) + purger.logging.info = MagicMock() + purger.logging.debug = MagicMock() + + purger.purge_ping_data(24) + + purger.get_last_online_rsu_records.assert_called_once() + mock_write_db.assert_not_called() + purger.logging.info.assert_called_once() diff --git a/services/addons/tests/rsu_ping/test_rsu_ping_fetch.py b/services/addons/tests/rsu_ping/test_rsu_ping_fetch.py index 021a2cf30..8d3b33176 100644 --- a/services/addons/tests/rsu_ping/test_rsu_ping_fetch.py +++ b/services/addons/tests/rsu_ping/test_rsu_ping_fetch.py @@ -1,335 +1,344 @@ -from mock import call, MagicMock, patch -from addons.images.rsu_ping import rsu_ping_fetch - - -@patch("addons.images.rsu_ping.purger.pgquery.query_db") -def test_get_rsu_data(mock_query_db): - # mock - mock_query_db.return_value = [(1, 'ipaddr')] - - # run - result = rsu_ping_fetch.get_rsu_data() - - expected_result = [{'rsu_id': 1, 'rsu_ip': 'ipaddr'}] - assert result == expected_result - mock_query_db.assert_called_once() - -@patch("addons.images.rsu_ping.purger.pgquery.write_db") -def test_insert_rsu_ping(mock_write_db): - # call - testJson = { - 'histories': [ - { - 'itemid': '487682', - 'clock': '1632350648', - 'value': '1', - 'ns': '447934900' - }, - { - 'itemid': '487682', - 'clock': '1632350348', - 'value': '1', - 'ns': '310686112' - }, - { - 'itemid': '487682', - 'clock': '1632350048', - 'value': '1', - 'ns': '537353876' - }, - { - 'itemid': '487682', - 'clock': '1632349748', - 'value': '1', - 'ns': '825216963' - }, - { - 'itemid': '487682', - 'clock': '1632349448', - 'value': '1', - 'ns': '555282271' - } - ], - 'rsu_id': 230, - 'rsu_ip': '172.16.28.51' - } - rsu_ping_fetch.insert_rsu_ping(testJson) - - # check - expected_calls = [ - call('INSERT INTO public.ping (timestamp, result, rsu_id) VALUES (to_timestamp(1632350648), B\'1\', 230)'), - call('INSERT INTO public.ping (timestamp, result, rsu_id) VALUES (to_timestamp(1632350348), B\'1\', 230)'), - call('INSERT INTO public.ping (timestamp, result, rsu_id) VALUES (to_timestamp(1632350048), B\'1\', 230)'), - call('INSERT INTO public.ping (timestamp, result, rsu_id) VALUES (to_timestamp(1632349748), B\'1\', 230)'), - call('INSERT INTO public.ping (timestamp, result, rsu_id) VALUES (to_timestamp(1632349448), B\'1\', 230)') - ] - mock_write_db.assert_has_calls(expected_calls) - - -def createRsuStatusFetchInstance(): - rsu_ping_fetch.os.environ['ZABBIX_ENDPOINT'] = 'endpoint' - rsu_ping_fetch.os.environ['ZABBIX_USER'] = 'user' - rsu_ping_fetch.os.environ['ZABBIX_PASSWORD'] = 'password' - return rsu_ping_fetch.RsuStatusFetch() - -def test_setZabbixAuth(): - # prepare - rsf = createRsuStatusFetchInstance() - rsu_ping_fetch.logging.info = MagicMock() - rsu_ping_fetch.requests.post = MagicMock() - rsu_ping_fetch.requests.post.return_value.json.return_value = {'result': 'auth'} - - # call - rsf.setZabbixAuth() - - # check - rsu_ping_fetch.logging.info.assert_called_once_with('Fetching Zabbix auth token from endpoint') - rsu_ping_fetch.requests.post.assert_called_once_with('endpoint', json={ - "jsonrpc": "2.0", - "method": "user.login", - "id": 1, - "params": { - "username": 'user', - "password": 'password' - } - }) - assert(rsf.ZABBIX_AUTH == 'auth') - -def test_getHostInfo(): - # prepare - rsf = createRsuStatusFetchInstance() - rsf.ZABBIX_AUTH = 'auth' - rsu_ping_fetch.requests.post = MagicMock() - rsu_ping_fetch.requests.post.return_value.json.return_value = {'result': 'result'} - - # call - rsu_ip = 'testaddress' - result = rsf.getHostInfo(rsu_ip) - - # check - rsu_ping_fetch.requests.post.assert_called_once_with(rsf.ZABBIX_ENDPOINT, json={ - "jsonrpc": "2.0", - "method": "host.get", - "id": 1, - "auth": 'auth', - "params": { - "output": [ - "hostid", - "host" - ], - "selectInterfaces": [ - "interfaceid", - "ip" - ], - "filter": { - "ip": "testaddress" - } - } - - }) - assert(result == {'result': 'result'}) - -def test_getItem(): - # prepare - rsf = createRsuStatusFetchInstance() - rsf.ZABBIX_AUTH = 'auth' - rsu_ping_fetch.requests.post = MagicMock() - rsu_ping_fetch.requests.post.return_value.json.return_value = {'result': 'result'} - - # call - hostInfo = { - "result": [ - { - "hostid": "hostid", - } - ] - } - result = rsf.getItem(hostInfo) - - # check - rsu_ping_fetch.requests.post.assert_called_once_with(rsf.ZABBIX_ENDPOINT, json={ - "jsonrpc": "2.0", - "method": "item.get", - "id": 1, - "auth": 'auth', - "params": { - "hostids": ["hostid"], - "filter": {"key_": "icmpping"} - } - }) - assert(result == {'result': 'result'}) - -def test_getHistory(): - # prepare - rsf = createRsuStatusFetchInstance() - rsf.ZABBIX_AUTH = 'auth' - rsu_ping_fetch.requests.post = MagicMock() - rsu_ping_fetch.requests.post.return_value.json.return_value = {'result': 'result'} - - # call - zabbix_item = { - "result": [ - { - "itemid": "itemid", - } - ] - } - result = rsf.getHistory(zabbix_item) - - # check - rsu_ping_fetch.requests.post.assert_called_once_with(rsf.ZABBIX_ENDPOINT, json={ - "jsonrpc": "2.0", - "method": "history.get", - "id": 1, - "auth": 'auth', - "params": { - "itemids": ["itemid"], - "output": "extend", - "sortfield": "clock", - "sortorder": "DESC", - "limit": 5 - } - }) - assert(result == {'result': 'result'}) - -def test_insertHistoryItem(): - # prepare - rsf = createRsuStatusFetchInstance() - rsu_ping_fetch.insert_rsu_ping = MagicMock(return_value=True) - rsu_ping_fetch.logging.info = MagicMock() - rsu_ping_fetch.requests.post = MagicMock() - rsu_ping_fetch.requests.post.return_value.status_code = 200 - - # call - zabbix_history = { - "result": { - "itemid": "itemid", - } - } - rsuInfo = { - "rsu_id": 1, - "rsu_ip": "testaddress" - } - result = rsf.insertHistoryItem(zabbix_history, rsuInfo) - - # check - expected_json = {'histories': {'itemid': 'itemid'}, 'rsuData': {'rsu_id': 1, 'rsu_ip': 'testaddress'}} - rsu_ping_fetch.insert_rsu_ping(expected_json) - rsu_ping_fetch.logging.info.assert_called_once_with('Inserting 1 history items for RSU testaddress') - assert(result == True) - -def test_printConfigInfo(): - # prepare - rsf = createRsuStatusFetchInstance() - rsu_ping_fetch.logging.info = MagicMock() - - # call - rsf.printConfigInfo() - - # check - expected_config_object = { - 'ZABBIX_ENDPOINT': 'endpoint', - 'ZABBIX_AUTH': '' - } - expected_message = 'Configuration: ' + str(expected_config_object) - rsu_ping_fetch.logging.info.assert_called_once_with(expected_message) - -def test_run(): - # prepare - rsf = createRsuStatusFetchInstance() - rsf.setZabbixAuth = MagicMock() - rsf.printConfigInfo = MagicMock() - rsu_ping_fetch.get_rsu_data = MagicMock() - rsu_ping_fetch.get_rsu_data.return_value = [ - { - "rsu_id": 1, - "rsu_ip": "testaddress" - } - ] - rsu_ping_fetch.logging.info = MagicMock() - rsf.getHostInfo = MagicMock() - rsf.getItem = MagicMock() - rsf.getHistory = MagicMock() - rsf.insertHistoryItem = MagicMock() - rsf.insertHistoryItem.return_value = True - rsu_ping_fetch.logging.warning = MagicMock() - rsu_ping_fetch.logging.error = MagicMock() - - # call - rsf.run() - - # check - rsf.setZabbixAuth.assert_called_once() - rsf.printConfigInfo.assert_called_once() - rsu_ping_fetch.get_rsu_data.assert_called_once() - rsf.getHostInfo.assert_called_once() - rsf.getItem.assert_called_once() - rsf.getHistory.assert_called_once() - rsf.insertHistoryItem.assert_called_once() - rsu_ping_fetch.logging.warning.assert_not_called() - rsu_ping_fetch.logging.error.assert_not_called() - -def test_run_insert_failure(): - # prepare - rsf = createRsuStatusFetchInstance() - rsf.setZabbixAuth = MagicMock() - rsf.printConfigInfo = MagicMock() - rsu_ping_fetch.get_rsu_data = MagicMock() - rsu_ping_fetch.get_rsu_data.return_value = [ - { - "rsu_id": 1, - "rsu_ip": "testaddress" - } - ] - rsu_ping_fetch.logging.info = MagicMock() - rsf.getHostInfo = MagicMock() - rsf.getItem = MagicMock() - rsf.getHistory = MagicMock() - rsf.insertHistoryItem = MagicMock() - rsf.insertHistoryItem.return_value = False - rsu_ping_fetch.logging.warning = MagicMock() - rsu_ping_fetch.logging.error = MagicMock() - - # call - rsf.run() - - # check - rsf.setZabbixAuth.assert_called_once() - rsf.printConfigInfo.assert_called_once() - rsu_ping_fetch.get_rsu_data.assert_called_once() - rsf.getHostInfo.assert_called_once() - rsf.getItem.assert_called_once() - rsf.getHistory.assert_called_once() - rsf.insertHistoryItem.assert_called_once() - rsu_ping_fetch.logging.warning.assert_called_once_with('Failed to insert history item for testaddress') - rsu_ping_fetch.logging.error.assert_not_called() - -def test_run_exception(): - # prepare - rsf = createRsuStatusFetchInstance() - rsf.setZabbixAuth = MagicMock() - rsf.printConfigInfo = MagicMock() - rsu_ping_fetch.get_rsu_data = MagicMock() - rsu_ping_fetch.get_rsu_data.return_value = [ - { - "rsu_id": 1, - "rsu_ip": "testaddress" - } - ] - rsu_ping_fetch.logging.info = MagicMock() - rsf.getHostInfo = MagicMock() - rsf.getHostInfo.side_effect = Exception('test exception') - rsu_ping_fetch.logging.warning = MagicMock() - rsu_ping_fetch.logging.error = MagicMock() - - # call - rsf.run() - - # check - rsf.setZabbixAuth.assert_called_once() - rsf.printConfigInfo.assert_called_once() - rsu_ping_fetch.get_rsu_data.assert_called_once() - rsu_ping_fetch.logging.info.assert_called_once_with('Found 1 RSUs to fetch status for') - rsf.getHostInfo.assert_called_once() - rsu_ping_fetch.logging.warning.assert_not_called() - rsu_ping_fetch.logging.error.assert_called_once_with('Failed to fetch Zabbix data RSU testaddress') +from mock import call, MagicMock, patch +from addons.images.rsu_ping import rsu_ping_fetch + + +@patch("addons.images.rsu_ping.purger.pgquery.query_db") +def test_get_rsu_data(mock_query_db): + # mock + mock_query_db.return_value = [(1, "ipaddr")] + + # run + result = rsu_ping_fetch.get_rsu_data() + + expected_result = [{"rsu_id": 1, "rsu_ip": "ipaddr"}] + assert result == expected_result + mock_query_db.assert_called_once() + + +@patch("addons.images.rsu_ping.purger.pgquery.write_db") +def test_insert_rsu_ping(mock_write_db): + # call + testJson = { + "histories": [ + { + "itemid": "487682", + "clock": "1632350648", + "value": "1", + "ns": "447934900", + }, + { + "itemid": "487682", + "clock": "1632350348", + "value": "1", + "ns": "310686112", + }, + { + "itemid": "487682", + "clock": "1632350048", + "value": "1", + "ns": "537353876", + }, + { + "itemid": "487682", + "clock": "1632349748", + "value": "1", + "ns": "825216963", + }, + { + "itemid": "487682", + "clock": "1632349448", + "value": "1", + "ns": "555282271", + }, + ], + "rsu_id": 230, + "rsu_ip": "172.16.28.51", + } + rsu_ping_fetch.insert_rsu_ping(testJson) + + # check + expected_calls = [ + call( + "INSERT INTO public.ping (timestamp, result, rsu_id) VALUES (to_timestamp(1632350648), B'1', 230)" + ), + call( + "INSERT INTO public.ping (timestamp, result, rsu_id) VALUES (to_timestamp(1632350348), B'1', 230)" + ), + call( + "INSERT INTO public.ping (timestamp, result, rsu_id) VALUES (to_timestamp(1632350048), B'1', 230)" + ), + call( + "INSERT INTO public.ping (timestamp, result, rsu_id) VALUES (to_timestamp(1632349748), B'1', 230)" + ), + call( + "INSERT INTO public.ping (timestamp, result, rsu_id) VALUES (to_timestamp(1632349448), B'1', 230)" + ), + ] + mock_write_db.assert_has_calls(expected_calls) + + +def createRsuStatusFetchInstance(): + rsu_ping_fetch.os.environ["ZABBIX_ENDPOINT"] = "endpoint" + rsu_ping_fetch.os.environ["ZABBIX_USER"] = "user" + rsu_ping_fetch.os.environ["ZABBIX_PASSWORD"] = "password" + return rsu_ping_fetch.RsuStatusFetch() + + +def test_setZabbixAuth(): + # prepare + rsf = createRsuStatusFetchInstance() + rsu_ping_fetch.logging.info = MagicMock() + rsu_ping_fetch.requests.post = MagicMock() + rsu_ping_fetch.requests.post.return_value.json.return_value = {"result": "auth"} + + # call + rsf.setZabbixAuth() + + # check + rsu_ping_fetch.logging.info.assert_called_once_with( + "Fetching Zabbix auth token from endpoint" + ) + rsu_ping_fetch.requests.post.assert_called_once_with( + "endpoint", + json={ + "jsonrpc": "2.0", + "method": "user.login", + "id": 1, + "params": {"username": "user", "password": "password"}, + }, + ) + assert rsf.ZABBIX_AUTH == "auth" + + +def test_getHostInfo(): + # prepare + rsf = createRsuStatusFetchInstance() + rsf.ZABBIX_AUTH = "auth" + rsu_ping_fetch.requests.post = MagicMock() + rsu_ping_fetch.requests.post.return_value.json.return_value = {"result": "result"} + + # call + rsu_ip = "testaddress" + result = rsf.getHostInfo(rsu_ip) + + # check + rsu_ping_fetch.requests.post.assert_called_once_with( + rsf.ZABBIX_ENDPOINT, + json={ + "jsonrpc": "2.0", + "method": "host.get", + "id": 1, + "auth": "auth", + "params": { + "output": ["hostid", "host"], + "selectInterfaces": ["interfaceid", "ip"], + "filter": {"ip": "testaddress"}, + }, + }, + ) + assert result == {"result": "result"} + + +def test_getItem(): + # prepare + rsf = createRsuStatusFetchInstance() + rsf.ZABBIX_AUTH = "auth" + rsu_ping_fetch.requests.post = MagicMock() + rsu_ping_fetch.requests.post.return_value.json.return_value = {"result": "result"} + + # call + hostInfo = { + "result": [ + { + "hostid": "hostid", + } + ] + } + result = rsf.getItem(hostInfo) + + # check + rsu_ping_fetch.requests.post.assert_called_once_with( + rsf.ZABBIX_ENDPOINT, + json={ + "jsonrpc": "2.0", + "method": "item.get", + "id": 1, + "auth": "auth", + "params": {"hostids": ["hostid"], "filter": {"key_": "icmpping"}}, + }, + ) + assert result == {"result": "result"} + + +def test_getHistory(): + # prepare + rsf = createRsuStatusFetchInstance() + rsf.ZABBIX_AUTH = "auth" + rsu_ping_fetch.requests.post = MagicMock() + rsu_ping_fetch.requests.post.return_value.json.return_value = {"result": "result"} + + # call + zabbix_item = { + "result": [ + { + "itemid": "itemid", + } + ] + } + result = rsf.getHistory(zabbix_item) + + # check + rsu_ping_fetch.requests.post.assert_called_once_with( + rsf.ZABBIX_ENDPOINT, + json={ + "jsonrpc": "2.0", + "method": "history.get", + "id": 1, + "auth": "auth", + "params": { + "itemids": ["itemid"], + "output": "extend", + "sortfield": "clock", + "sortorder": "DESC", + "limit": 5, + }, + }, + ) + assert result == {"result": "result"} + + +def test_insertHistoryItem(): + # prepare + rsf = createRsuStatusFetchInstance() + rsu_ping_fetch.insert_rsu_ping = MagicMock(return_value=True) + rsu_ping_fetch.logging.info = MagicMock() + rsu_ping_fetch.requests.post = MagicMock() + rsu_ping_fetch.requests.post.return_value.status_code = 200 + + # call + zabbix_history = { + "result": { + "itemid": "itemid", + } + } + rsuInfo = {"rsu_id": 1, "rsu_ip": "testaddress"} + result = rsf.insertHistoryItem(zabbix_history, rsuInfo) + + # check + expected_json = { + "histories": {"itemid": "itemid"}, + "rsuData": {"rsu_id": 1, "rsu_ip": "testaddress"}, + } + rsu_ping_fetch.insert_rsu_ping(expected_json) + rsu_ping_fetch.logging.info.assert_called_once_with( + "Inserting 1 history items for RSU testaddress" + ) + assert result == True + + +def test_printConfigInfo(): + # prepare + rsf = createRsuStatusFetchInstance() + rsu_ping_fetch.logging.info = MagicMock() + + # call + rsf.printConfigInfo() + + # check + expected_config_object = {"ZABBIX_ENDPOINT": "endpoint", "ZABBIX_AUTH": ""} + expected_message = "Configuration: " + str(expected_config_object) + rsu_ping_fetch.logging.info.assert_called_once_with(expected_message) + + +def test_run(): + # prepare + rsf = createRsuStatusFetchInstance() + rsf.setZabbixAuth = MagicMock() + rsf.printConfigInfo = MagicMock() + rsu_ping_fetch.get_rsu_data = MagicMock() + rsu_ping_fetch.get_rsu_data.return_value = [{"rsu_id": 1, "rsu_ip": "testaddress"}] + rsu_ping_fetch.logging.info = MagicMock() + rsf.getHostInfo = MagicMock() + rsf.getItem = MagicMock() + rsf.getHistory = MagicMock() + rsf.insertHistoryItem = MagicMock() + rsf.insertHistoryItem.return_value = True + rsu_ping_fetch.logging.warning = MagicMock() + rsu_ping_fetch.logging.error = MagicMock() + + # call + rsf.run() + + # check + rsf.setZabbixAuth.assert_called_once() + rsf.printConfigInfo.assert_called_once() + rsu_ping_fetch.get_rsu_data.assert_called_once() + rsf.getHostInfo.assert_called_once() + rsf.getItem.assert_called_once() + rsf.getHistory.assert_called_once() + rsf.insertHistoryItem.assert_called_once() + rsu_ping_fetch.logging.warning.assert_not_called() + rsu_ping_fetch.logging.error.assert_not_called() + + +def test_run_insert_failure(): + # prepare + rsf = createRsuStatusFetchInstance() + rsf.setZabbixAuth = MagicMock() + rsf.printConfigInfo = MagicMock() + rsu_ping_fetch.get_rsu_data = MagicMock() + rsu_ping_fetch.get_rsu_data.return_value = [{"rsu_id": 1, "rsu_ip": "testaddress"}] + rsu_ping_fetch.logging.info = MagicMock() + rsf.getHostInfo = MagicMock() + rsf.getItem = MagicMock() + rsf.getHistory = MagicMock() + rsf.insertHistoryItem = MagicMock() + rsf.insertHistoryItem.return_value = False + rsu_ping_fetch.logging.warning = MagicMock() + rsu_ping_fetch.logging.error = MagicMock() + + # call + rsf.run() + + # check + rsf.setZabbixAuth.assert_called_once() + rsf.printConfigInfo.assert_called_once() + rsu_ping_fetch.get_rsu_data.assert_called_once() + rsf.getHostInfo.assert_called_once() + rsf.getItem.assert_called_once() + rsf.getHistory.assert_called_once() + rsf.insertHistoryItem.assert_called_once() + rsu_ping_fetch.logging.warning.assert_called_once_with( + "Failed to insert history item for testaddress" + ) + rsu_ping_fetch.logging.error.assert_not_called() + + +def test_run_exception(): + # prepare + rsf = createRsuStatusFetchInstance() + rsf.setZabbixAuth = MagicMock() + rsf.printConfigInfo = MagicMock() + rsu_ping_fetch.get_rsu_data = MagicMock() + rsu_ping_fetch.get_rsu_data.return_value = [{"rsu_id": 1, "rsu_ip": "testaddress"}] + rsu_ping_fetch.logging.info = MagicMock() + rsf.getHostInfo = MagicMock() + rsf.getHostInfo.side_effect = Exception("test exception") + rsu_ping_fetch.logging.warning = MagicMock() + rsu_ping_fetch.logging.error = MagicMock() + + # call + rsf.run() + + # check + rsf.setZabbixAuth.assert_called_once() + rsf.printConfigInfo.assert_called_once() + rsu_ping_fetch.get_rsu_data.assert_called_once() + rsu_ping_fetch.logging.info.assert_called_once_with( + "Found 1 RSUs to fetch status for" + ) + rsf.getHostInfo.assert_called_once() + rsu_ping_fetch.logging.warning.assert_not_called() + rsu_ping_fetch.logging.error.assert_called_once_with( + "Failed to fetch Zabbix data RSU testaddress" + ) diff --git a/services/addons/tests/rsu_ping/test_rsu_pinger.py b/services/addons/tests/rsu_ping/test_rsu_pinger.py index 91777bcf0..fd79ff6c4 100644 --- a/services/addons/tests/rsu_ping/test_rsu_pinger.py +++ b/services/addons/tests/rsu_ping/test_rsu_pinger.py @@ -3,135 +3,111 @@ from subprocess import DEVNULL from addons.images.rsu_ping import rsu_pinger + @patch("addons.images.rsu_ping.rsu_pinger.pgquery.write_db") def test_insert_ping_data(mock_write_db): - ping_data = { - 1: '0', - 2: '1', - 3: '1' - } - time_str = '2023-11-01 00:00:00' - - # call - rsu_pinger.insert_ping_data(ping_data, time_str) - - # check - expected_query = "INSERT INTO public.ping (timestamp, result, rsu_id) VALUES " \ - "(TO_TIMESTAMP('2023-11-01 00:00:00', 'YYYY-MM-DD HH24:MI:SS'), B'0', 1), " \ - "(TO_TIMESTAMP('2023-11-01 00:00:00', 'YYYY-MM-DD HH24:MI:SS'), B'1', 2), " \ - "(TO_TIMESTAMP('2023-11-01 00:00:00', 'YYYY-MM-DD HH24:MI:SS'), B'1', 3)" - mock_write_db.assert_called_with(expected_query) + ping_data = {1: "0", 2: "1", 3: "1"} + time_str = "2023-11-01 00:00:00" + + # call + rsu_pinger.insert_ping_data(ping_data, time_str) + + # check + expected_query = ( + "INSERT INTO public.ping (timestamp, result, rsu_id) VALUES " + "(TO_TIMESTAMP('2023-11-01 00:00:00', 'YYYY-MM-DD HH24:MI:SS'), B'0', 1), " + "(TO_TIMESTAMP('2023-11-01 00:00:00', 'YYYY-MM-DD HH24:MI:SS'), B'1', 2), " + "(TO_TIMESTAMP('2023-11-01 00:00:00', 'YYYY-MM-DD HH24:MI:SS'), B'1', 3)" + ) + mock_write_db.assert_called_with(expected_query) + @patch("addons.images.rsu_ping.rsu_pinger.Popen") def test_ping_rsu_ips_online(mock_Popen): - mock_p = MagicMock() - mock_p.poll.return_value = 1 - mock_p.returncode = 0 - mock_Popen.return_value = mock_p - rsu_list = [ - (1, '1.1.1.1'), - (2, '2.2.2.2') - ] - - # call - result = rsu_pinger.ping_rsu_ips(rsu_list) - - # check - expected_result = { - 1: '1', - 2: '1' - } - mock_Popen.assert_has_calls( - [ - call(['ping', '-n', '-w5', '-c3', '1.1.1.1'], stdout=DEVNULL), - call(['ping', '-n', '-w5', '-c3', '2.2.2.2'], stdout=DEVNULL) - ] - ) - assert mock_p.poll.call_count == len(rsu_list) # 2 - assert result == expected_result + mock_p = MagicMock() + mock_p.poll.return_value = 1 + mock_p.returncode = 0 + mock_Popen.return_value = mock_p + rsu_list = [(1, "1.1.1.1"), (2, "2.2.2.2")] + + # call + result = rsu_pinger.ping_rsu_ips(rsu_list) + + # check + expected_result = {1: "1", 2: "1"} + mock_Popen.assert_has_calls( + [ + call(["ping", "-n", "-w5", "-c3", "1.1.1.1"], stdout=DEVNULL), + call(["ping", "-n", "-w5", "-c3", "2.2.2.2"], stdout=DEVNULL), + ] + ) + assert mock_p.poll.call_count == len(rsu_list) # 2 + assert result == expected_result + @patch("addons.images.rsu_ping.rsu_pinger.Popen") def test_ping_rsu_ips_offline(mock_Popen): - mock_p = MagicMock() - mock_p.poll.return_value = 1 - mock_p.returncode = 1 - mock_Popen.return_value = mock_p - rsu_list = [ - (1, '1.1.1.1'), - (2, '2.2.2.2') - ] - - # call - result = rsu_pinger.ping_rsu_ips(rsu_list) - - # check - expected_result = { - 1: '0', - 2: '0' - } - mock_Popen.assert_has_calls( - [ - call(['ping', '-n', '-w5', '-c3', '1.1.1.1'], stdout=DEVNULL), - call(['ping', '-n', '-w5', '-c3', '2.2.2.2'], stdout=DEVNULL) - ] - ) - assert mock_p.poll.call_count == len(rsu_list) # 2 - assert result == expected_result + mock_p = MagicMock() + mock_p.poll.return_value = 1 + mock_p.returncode = 1 + mock_Popen.return_value = mock_p + rsu_list = [(1, "1.1.1.1"), (2, "2.2.2.2")] + + # call + result = rsu_pinger.ping_rsu_ips(rsu_list) + + # check + expected_result = {1: "0", 2: "0"} + mock_Popen.assert_has_calls( + [ + call(["ping", "-n", "-w5", "-c3", "1.1.1.1"], stdout=DEVNULL), + call(["ping", "-n", "-w5", "-c3", "2.2.2.2"], stdout=DEVNULL), + ] + ) + assert mock_p.poll.call_count == len(rsu_list) # 2 + assert result == expected_result + @patch("addons.images.rsu_ping.rsu_pinger.pgquery.query_db") def test_get_rsu_ips(mock_query_db): - mock_query_db.return_value = [ - ( - { - 'rsu_id': 1, - 'ipv4_address': '1.1.1.1' - }, - ), - ( - { - 'rsu_id': 2, - 'ipv4_address': '2.2.2.2' - }, - ), - ] - - # call - result = rsu_pinger.get_rsu_ips() - - # check - expected_result = [ - (1, '1.1.1.1'), (2, '2.2.2.2') - ] - assert result == expected_result + mock_query_db.return_value = [ + ({"rsu_id": 1, "ipv4_address": "1.1.1.1"},), + ({"rsu_id": 2, "ipv4_address": "2.2.2.2"},), + ] + + # call + result = rsu_pinger.get_rsu_ips() + + # check + expected_result = [(1, "1.1.1.1"), (2, "2.2.2.2")] + assert result == expected_result + @patch("addons.images.rsu_ping.rsu_pinger.get_rsu_ips") @patch("addons.images.rsu_ping.rsu_pinger.ping_rsu_ips") @patch("addons.images.rsu_ping.rsu_pinger.insert_ping_data") def test_run_rsu_pinger(mock_insert_ping_data, mock_ping_rsu_ips, mock_get_rsu_ips): - mock_ping_rsu_ips.return_value = { - 1: '1', - 2: '0', - 3: '1' - } + mock_ping_rsu_ips.return_value = {1: "1", 2: "0", 3: "1"} + + # call + rsu_pinger.run_rsu_pinger() - # call - rsu_pinger.run_rsu_pinger() + # check + mock_get_rsu_ips.assert_called_once() + mock_ping_rsu_ips.assert_called_once() + mock_insert_ping_data.assert_called_once() - # check - mock_get_rsu_ips.assert_called_once() - mock_ping_rsu_ips.assert_called_once() - mock_insert_ping_data.assert_called_once() @patch("addons.images.rsu_ping.rsu_pinger.get_rsu_ips") @patch("addons.images.rsu_ping.rsu_pinger.ping_rsu_ips") @patch("addons.images.rsu_ping.rsu_pinger.insert_ping_data") def test_run_rsu_pinger_err(mock_insert_ping_data, mock_ping_rsu_ips, mock_get_rsu_ips): - mock_ping_rsu_ips.return_value = {} + mock_ping_rsu_ips.return_value = {} - # call - rsu_pinger.run_rsu_pinger() + # call + rsu_pinger.run_rsu_pinger() - # check - mock_get_rsu_ips.assert_called_once() - mock_ping_rsu_ips.assert_called_once() - assert mock_insert_ping_data.call_count == 0 \ No newline at end of file + # check + mock_get_rsu_ips.assert_called_once() + mock_ping_rsu_ips.assert_called_once() + assert mock_insert_ping_data.call_count == 0 diff --git a/services/api/.gitignore b/services/api/.gitignore index 910f9ca24..78b0ba4e7 100644 --- a/services/api/.gitignore +++ b/services/api/.gitignore @@ -1,2 +1,2 @@ -env +env .coverage \ No newline at end of file diff --git a/services/api/README.md b/services/api/README.md index ff7876384..7dc191c1f 100644 --- a/services/api/README.md +++ b/services/api/README.md @@ -1,330 +1,332 @@ -# CV Manager API - -The CV Manager API is a single application alternative to running all of the separate cloud functions as individual micro services. The Cloud Run function is triggered over HTTP just like a cloud function and performs all of the same features with optimized authentication and communication with the RSU REST API. - -### Benefits: - -- Reduces network hops when making calls to RSUs to 2 hops instead of 5 -- Authentication and role assignment is integrated into a middleware script in the REST API -- Less technical overhead (Less duplicated code across functions and less deployments) - -## Middleware - -Before the Cloud Run CV Manager will allow an endpoint to be hit, the middleware function will run to first authorize the user credentials. This makes it so that the user must always be authorized to be able to run the cloud run endpoints with no exceptions. (Besides HTTP OPTIONS methods for CORS support) - -The middleware makes the following assumptions: - -- Users are unique based on their email -- Users are only assigned a single role - -## Supported Endpoints - -Expected headers for all endpoints: - -- `"Content-Type": "application/json"` -- `"Authorization": "tokenId"` - -### /user-auth (GET) - -Returns authorized user information including full name, email, and role. - -Example return value: - -- {"name": "John Doe", "email": "jdoe@gmail.com", "role": "admin"} - -### /rsuinfo (GET) - -Returns all basic data for RSUs in the GCP Cloud SQL database. It performs a basic select all query from a table named "RsuData" that is located in a database specified by the environments variables. Returns single JSON object. - -### /rsu-online-status (GET) - -Returns the online status of every RSU and the last time each RSU has been documented to be online in a single JSON object. - -### /rsucounts (GET) - -Returns the message counts for a single, selected RSU from a BigQuery table. It performs a basic select query on a table specified by the environments variable. Returns single JSON object. - -### /rsu-command (GET, POST) - -### /rsu-map-info (GET) - -Returns the list of all ipv4 addresses with MAP message data in the PostgreSQL database when argument ip_list is true. Returns the MAP message geoJSON data for the RSU specified in the ip_address argument as a single JSON object when ip_list is false. - -### /rsu-bsm-data (POST) - -Returns geoJSON data for BSM messages from a BigQuery table given start time, end time, and geofence coordinates. It performs a select query on a table specified by the BSM_DB_NAME environment variable. Returns an array of JSON objects. - -1. Verifies the command and calls the corresponding function. -2. Provided RSU data is plugged into the appropriate data structure depending upon the RSU REST endpoint. - - HTTP GET URL arguments - - HTTP POST body data -3. Directly hit RSUs with SNMP commands or trigger the RSU REST endpoint for SSH commands. -4. Return response, varies depending upon request. - -## Admin Endpoints - -The CV Manager supports users who are application admins (super users) to add new RSUs, users and organizations to the CV Manager. This will then effect the database so it will be viewable to all users in the chosen organizations. - -## RSUs - -### /admin-new-rsu (GET) - -Returns the field options for specific RSU fields that do not take free-form responses. - -- primary_routes (will still allow new route names) -- rsu_models -- ssh_credential_groups -- snmp_credential_groups -- snmp_version_groups -- organizations - -### /admin-new-rsu (POST) - -Adds a new RSU to the CV Manager database and allows for it to be viewable and configurable via the CV Manager. Currently supports Commsignia, Kapsch and Yunex. Associates the RSU with every organization specified. - -body example: - -``` -{ - "ip": "10.0.0.1", - "geo_position": { - "latitude": 40.00, - "longitude": -100.00 - }, - "milepost": 56.8, - "primary_route": "I25", - "serial_number": "55EE002211", - "model": "Commsignia", - "scms_id": "", - "ssh_credential_group": "ssh profile", - "snmp_credential_group": "snmp profile", - "snmp_version_group": "snmp version", - "organizations": ["Organization 1"] -} -``` - -### /admin-rsu (GET) - -Depending upon the rsu_ip argument's value, this endpoint returns a list of all RSUs in the CV Manager's PostgreSQL DB or the details of a single RSU along with the options for specific RSU fields that do not take free-form responses. - -HTTP URL Arguments: - -- rsu_ip: - - Set to "all" if you want a list of all RSUs regardless of organization affiliation. Will not return the RSU field options. - - Set to a specific RSU IP such as "10.0.0.1" to return all of the RSU details of that single RSU along with the allowed RSU field options. - -### /admin-rsu (PATCH) - -Modifies an RSU within the CV Manager database, including RSUs that may not have been made through the /admin-new-rsu endpoint. Currently supports Commsignia, Kapsch and Yunex. - -body example: - -``` -{ - "ip": "10.0.0.1", - "geo_position": { - "latitude": 40.00, - "longitude": -100.00 - }, - "milepost": 56.8, - "primary_route": "I25", - "serial_number": "55EE002211", - "model": "Commsignia", - "scms_id": "", - "ssh_credential_group": "ssh profile", - "snmp_credential_group": "snmp profile", - "snmp_version_group": "snmp version", - "organizations_to_add": ["Organization 1"], - "organizations_to_remove": [] -} -``` - -### /admin-rsu (DELETE) - -Deletes the specified RSU from the CV Manager PostgreSQL database based off the IP specified in the rsu_ip argument. - -HTTP URL Arguments: - -- rsu_ip: Delete a specific RSU specified by its IP such as "10.0.0.1" from the CV Manager's PostgreSQL database. - -## Users - -### /admin-new-user (GET) - -Returns the field options for specific user fields that do not take free-form responses. - -- organizations -- roles - -### /admin-new-user (POST) - -Adds a new user to the CV Manager database. Associates the user with every organization specified. The specified user will be able to login to the CV Manager as soon as this is complete. The email associated with the user MUST be a Gmail account or an email address that is an alias of a Gmail. - -body example: - -``` -{ - "email": "jdoe@example.com", - "first_name": "John", - "last_name": "Doe", - "super_user": True, - "organizations": [ - {"name": "Test Org", "role": "operator"} - ] -} -``` - -### /admin-user (GET) - -Depending upon the user_email argument's value, this endpoint returns a list of all users in the CV Manager's PostgreSQL DB or the details of a single user along with the options for specific user fields that do not take free-form responses. - -HTTP URL Arguments: - -- user_email: - - Set to "all" if you want a list of all users regardless of organization affiliation. Will not return the user field options. - - Set to a specific user email such as "user@email.com" to return all of the user details of that single user along with the allowed user field options. - -### /admin-user (PATCH) - -Modifies a user within the CV Manager database, including users that may not have been made through the /admin-new-user endpoint. - -body example: - -``` -{ - "email": "jdoe@example.com", - "first_name": "John", - "last_name": "Doe", - "super_user": True, - "organizations_to_add": [ - {"name": "Test Org3", "role": "admin"} - ], - "organizations_to_modify": [ - {"name": "Test Org2", "role": "user"} - ], - "organizations_to_remove": [ - {"name": "Test Org", "role": "user"} - ] -} -``` - -### /admin-user (DELETE) - -Deletes the specified user from the CV Manager PostgreSQL database based off the user email specified in the user_email argument. - -HTTP URL Arguments: - -- user_email: Delete a specific user specified by its email such as "user@email.com" from the CV Manager's PostgreSQL database. - -## Organizations - -### /admin-new-org (POST) - -Adds a new organization to the CV Manager database. The new organization will be usable for new RSUs and users from that point onward. Adding existing RSUs and users to the new organization will require calls of the edit endpoints. - -body example: - -``` -{ - "name": "Test Org" -} -``` - -### /admin-org (GET) - -Depending upon the org_name argument's value, this endpoint returns a list of all organizations in the CV Manager's PostgreSQL DB or the details of a single organization. The list of all organizations will also include a count of the number of RSUs and users that are associated with each individual organization. Requesting a specific organization will include exactly which RSUs and users are a part of that organization. - -HTTP URL Arguments: - -- org_name: - - Set to "all" if you want a list of all organizations. Will also include counts of the number of RSUs and users for each organization. - - Set to a specific organization name such as "Org1" to return all of the organization information including affiliated RSUs and users. - -### /admin-org (PATCH) - -Modifies an organization within the CV Manager database, including organizations that may not have been made through the /admin-new-org endpoint. - -body example: - -``` -{ - "name": "Test Org", - "users_to_add": [ - {"email": "testing3@email.com", "role": "admin"} - ], - "users_to_modify": [ - {"email": "testing2@email.com", "role": "user"} - ], - "users_to_remove": [ - {"email": "testing1@email.com", "role": "user"} - ], - "rsus_to_add": ["10.0.0.2"], - "rsus_to_remove": ["10.0.0.1"] -} -``` - -### /admin-org (DELETE) - -Deletes the specified organization from the CV Manager PostgreSQL database based off the organization name specified in the org_name argument. - -HTTP URL Arguments: - -- org_name: Delete a specific organization specified by its name such as "Org1" from the CV Manager's PostgreSQL database. - -## Deploying CV Manager Cloud Run REST API - -1. Build docker image, tag it and push it to a GCP image repository (Container Registry) - - `cd ~/RSU_Management/GCP_cloud_run/rsu_manager` (~ represents wherever you cloned the repository within your local machine) - - `docker build .` - - `docker image tag :` - - `docker push :` -2. Go to GCP Cloud Run and click "Create Service" -3. Configure the Cloud Run deployment container settings - - Select container image (the one from step 1) - - Set container port to 8080 - - CPU allocation setting is up to the user - - 512MB is enough memory to run the application -4. Configure the Cloud Run deployment variables and secrets settings - - The following environment variables are required to be set by environment variable or secret: - -Environment Variables: - -- CORS_DOMAIN: The CV Manager webapp domain that CORS will allow API responses to. -- INSTANCE_CONNECTION_NAME: The connection name for the Cloud SQL instance. (project-id:region:name) -- PG_DB_HOST: The database IP. -- PG_DB_PORT: The database port. -- PG_PG_DB_USER: The database user that will be used to authenticate the cloud function when it queries the database. -- PG_PG_DB_PASS: The database user's password that will be used to authenticate the cloud function. -- COUNTS_DB_TYPE: Set to either "MongoDB" or "BigQuery" depending on where the message counts are stored. -- COUNTS_MSG_TYPES: Set to a list of message types to include in counts query. Sample format is described in the sample.env. -- COUNTS_DB_NAME: The BigQuery table or MongoDB collection name where the RSU message counts are located. -- BSM_DB_NAME: The database name for BSM visualization data. -- SSM_DB_NAME: The database name for SSM visualization data. -- SRM_DB_NAME: The database name for SRM visualization data. -- MONGO_DB_URI: URI for the MongoDB connection. -- MONGO_DB_NAME: Database name for RSU counts. -- KEYCLOAK_ENDPOINT: Keycloak base URL to send requests to. Reference the sample.env for the URL formatting. -- KEYCLOAK_REALM: Keycloak Realm name. -- KEYCLOAK_API_CLIENT_ID: Keycloak API client name. -- KEYCLOAK_API_CLIENT_SECRET_KEY: Keycloak API secret for the given client name. -- RSU_REST_ENDPOINT: HTTPS endpoint of the deployed RSU REST API in GCP Kubernetes. -- LOGGING_LEVEL: The level of which the application will log. (DEBUG, INFO, WARNING, ERROR) -- CSM_EMAIL_TO_SEND_FROM: Origin email address for the API. -- CSM_EMAIL_APP_USERNAME: Username for the SMTP server. -- CSM_EMAIL_APP_PASSWORD: Password for the SMTP server. -- CSM_EMAILS_TO_SEND_TO: Destination email list. -- CSM_TARGET_SMTP_SERVER_ADDRESS: Destination SMTP server address. -- CSM_TARGET_SMTP_SERVER_PORT: Destination SMTP server port. -- WZDX_ENDPOINT: WZDX datafeed enpoint. -- WZDX_API_KEY: API key for the WZDX datafeed. -- TIMEZONE: Timezone to be used for the API. - -1. Configure the Cloud Run deployment connections settings - - The application assumes there is a Cloud SQL DB, select the DB under "Cloud SQL connections". Ensure the environment variables match the selected DB. - - The application makes requests to the automated RSU REST API located in K8s. If this is in a VPC, configure the proper VPC connector. Route only requests to private IPs. -2. Configure the Cloud Run deployment security settings - - Ensure a service account has been selected that has: - - Cloud SQL Client permissions - - VPC Connector permissions - - BigQuery access permissions -3. Deploy and utilize the assigned endpoint in the CV Manager React application's environment variables +# CV Manager API + +The CV Manager API is a single application alternative to running all of the separate cloud functions as individual micro services. The Cloud Run function is triggered over HTTP just like a cloud function and performs all of the same features with optimized authentication and communication with the RSU REST API. + +### Benefits: + +- Reduces network hops when making calls to RSUs to 2 hops instead of 5 +- Authentication and role assignment is integrated into a middleware script in the REST API +- Less technical overhead (Less duplicated code across functions and less deployments) + +## Middleware + +Before the Cloud Run CV Manager will allow an endpoint to be hit, the middleware function will run to first authorize the user credentials. This makes it so that the user must always be authorized to be able to run the cloud run endpoints with no exceptions. (Besides HTTP OPTIONS methods for CORS support) + +The middleware makes the following assumptions: + +- Users are unique based on their email +- Users are only assigned a single role + +## Supported Endpoints + +Expected headers for all endpoints: + +- `"Content-Type": "application/json"` +- `"Authorization": "tokenId"` + +### /user-auth (GET) + +Returns authorized user information including full name, email, and role. + +Example return value: + +- {"name": "John Doe", "email": "jdoe@gmail.com", "role": "admin"} + +### /rsuinfo (GET) + +Returns all basic data for RSUs in the GCP Cloud SQL database. It performs a basic select all query from a table named "RsuData" that is located in a database specified by the environments variables. Returns single JSON object. + +### /rsu-online-status (GET) + +Returns the online status of every RSU and the last time each RSU has been documented to be online in a single JSON object. + +### /rsucounts (GET) + +Returns the message counts for a single, selected RSU from a BigQuery table. It performs a basic select query on a table specified by the environments variable. Returns single JSON object. + +### /rsu-command (GET, POST) + +### /rsu-map-info (GET) + +Returns the list of all ipv4 addresses with MAP message data in the PostgreSQL database when argument ip_list is true. Returns the MAP message geoJSON data for the RSU specified in the ip_address argument as a single JSON object when ip_list is false. + +### /rsu-bsm-data (POST) + +Returns geoJSON data for BSM messages from a BigQuery table given start time, end time, and geofence coordinates. It performs a select query on a table specified by the BSM_DB_NAME environment variable. Returns an array of JSON objects. + +1. Verifies the command and calls the corresponding function. +2. Provided RSU data is plugged into the appropriate data structure depending upon the RSU REST endpoint. + - HTTP GET URL arguments + - HTTP POST body data +3. Directly hit RSUs with SNMP commands or trigger the RSU REST endpoint for SSH commands. +4. Return response, varies depending upon request. + +## Admin Endpoints + +The CV Manager supports users who are application admins (super users) to add new RSUs, users and organizations to the CV Manager. This will then effect the database so it will be viewable to all users in the chosen organizations. + +## RSUs + +### /admin-new-rsu (GET) + +Returns the field options for specific RSU fields that do not take free-form responses. + +- primary_routes (will still allow new route names) +- rsu_models +- ssh_credential_groups +- snmp_credential_groups +- snmp_version_groups +- organizations + +### /admin-new-rsu (POST) + +Adds a new RSU to the CV Manager database and allows for it to be viewable and configurable via the CV Manager. Currently supports Commsignia, Kapsch and Yunex. Associates the RSU with every organization specified. + +body example: + +``` +{ + "ip": "10.0.0.1", + "geo_position": { + "latitude": 40.00, + "longitude": -100.00 + }, + "milepost": 56.8, + "primary_route": "I25", + "serial_number": "55EE002211", + "model": "Commsignia", + "scms_id": "", + "ssh_credential_group": "ssh profile", + "snmp_credential_group": "snmp profile", + "snmp_version_group": "snmp version", + "organizations": ["Organization 1"] +} +``` + +### /admin-rsu (GET) + +Depending upon the rsu_ip argument's value, this endpoint returns a list of all RSUs in the CV Manager's PostgreSQL DB or the details of a single RSU along with the options for specific RSU fields that do not take free-form responses. + +HTTP URL Arguments: + +- rsu_ip: + - Set to "all" if you want a list of all RSUs regardless of organization affiliation. Will not return the RSU field options. + - Set to a specific RSU IP such as "10.0.0.1" to return all of the RSU details of that single RSU along with the allowed RSU field options. + +### /admin-rsu (PATCH) + +Modifies an RSU within the CV Manager database, including RSUs that may not have been made through the /admin-new-rsu endpoint. Currently supports Commsignia, Kapsch and Yunex. + +body example: + +``` +{ + "ip": "10.0.0.1", + "geo_position": { + "latitude": 40.00, + "longitude": -100.00 + }, + "milepost": 56.8, + "primary_route": "I25", + "serial_number": "55EE002211", + "model": "Commsignia", + "scms_id": "", + "ssh_credential_group": "ssh profile", + "snmp_credential_group": "snmp profile", + "snmp_version_group": "snmp version", + "organizations_to_add": ["Organization 1"], + "organizations_to_remove": [] +} +``` + +### /admin-rsu (DELETE) + +Deletes the specified RSU from the CV Manager PostgreSQL database based off the IP specified in the rsu_ip argument. + +HTTP URL Arguments: + +- rsu_ip: Delete a specific RSU specified by its IP such as "10.0.0.1" from the CV Manager's PostgreSQL database. + +## Users + +### /admin-new-user (GET) + +Returns the field options for specific user fields that do not take free-form responses. + +- organizations +- roles + +### /admin-new-user (POST) + +Adds a new user to the CV Manager database. Associates the user with every organization specified. The specified user will be able to login to the CV Manager as soon as this is complete. The email associated with the user MUST be a Gmail account or an email address that is an alias of a Gmail. + +body example: + +``` +{ + "email": "jdoe@example.com", + "first_name": "John", + "last_name": "Doe", + "super_user": True, + "receive_error_emails": True, + "organizations": [ + {"name": "Test Org", "role": "operator"} + ] +} +``` + +### /admin-user (GET) + +Depending upon the user_email argument's value, this endpoint returns a list of all users in the CV Manager's PostgreSQL DB or the details of a single user along with the options for specific user fields that do not take free-form responses. + +HTTP URL Arguments: + +- user_email: + - Set to "all" if you want a list of all users regardless of organization affiliation. Will not return the user field options. + - Set to a specific user email such as "user@email.com" to return all of the user details of that single user along with the allowed user field options. + +### /admin-user (PATCH) + +Modifies a user within the CV Manager database, including users that may not have been made through the /admin-new-user endpoint. + +body example: + +``` +{ + "email": "jdoe@example.com", + "first_name": "John", + "last_name": "Doe", + "super_user": True, + "receive_error_emails": True, + "organizations_to_add": [ + {"name": "Test Org3", "role": "admin"} + ], + "organizations_to_modify": [ + {"name": "Test Org2", "role": "user"} + ], + "organizations_to_remove": [ + {"name": "Test Org", "role": "user"} + ] +} +``` + +### /admin-user (DELETE) + +Deletes the specified user from the CV Manager PostgreSQL database based off the user email specified in the user_email argument. + +HTTP URL Arguments: + +- user_email: Delete a specific user specified by its email such as "user@email.com" from the CV Manager's PostgreSQL database. + +## Organizations + +### /admin-new-org (POST) + +Adds a new organization to the CV Manager database. The new organization will be usable for new RSUs and users from that point onward. Adding existing RSUs and users to the new organization will require calls of the edit endpoints. + +body example: + +``` +{ + "name": "Test Org" +} +``` + +### /admin-org (GET) + +Depending upon the org_name argument's value, this endpoint returns a list of all organizations in the CV Manager's PostgreSQL DB or the details of a single organization. The list of all organizations will also include a count of the number of RSUs and users that are associated with each individual organization. Requesting a specific organization will include exactly which RSUs and users are a part of that organization. + +HTTP URL Arguments: + +- org_name: + - Set to "all" if you want a list of all organizations. Will also include counts of the number of RSUs and users for each organization. + - Set to a specific organization name such as "Org1" to return all of the organization information including affiliated RSUs and users. + +### /admin-org (PATCH) + +Modifies an organization within the CV Manager database, including organizations that may not have been made through the /admin-new-org endpoint. + +body example: + +``` +{ + "name": "Test Org", + "users_to_add": [ + {"email": "testing3@email.com", "role": "admin"} + ], + "users_to_modify": [ + {"email": "testing2@email.com", "role": "user"} + ], + "users_to_remove": [ + {"email": "testing1@email.com", "role": "user"} + ], + "rsus_to_add": ["10.0.0.2"], + "rsus_to_remove": ["10.0.0.1"] +} +``` + +### /admin-org (DELETE) + +Deletes the specified organization from the CV Manager PostgreSQL database based off the organization name specified in the org_name argument. + +HTTP URL Arguments: + +- org_name: Delete a specific organization specified by its name such as "Org1" from the CV Manager's PostgreSQL database. + +## Deploying CV Manager Cloud Run REST API + +1. Build docker image, tag it and push it to a GCP image repository (Container Registry) + - `cd ~/RSU_Management/GCP_cloud_run/rsu_manager` (~ represents wherever you cloned the repository within your local machine) + - `docker build .` + - `docker image tag :` + - `docker push :` +2. Go to GCP Cloud Run and click "Create Service" +3. Configure the Cloud Run deployment container settings + - Select container image (the one from step 1) + - Set container port to 8080 + - CPU allocation setting is up to the user + - 512MB is enough memory to run the application +4. Configure the Cloud Run deployment variables and secrets settings + - The following environment variables are required to be set by environment variable or secret: + +Environment Variables: + +- CORS_DOMAIN: The CV Manager webapp domain that CORS will allow API responses to. +- INSTANCE_CONNECTION_NAME: The connection name for the Cloud SQL instance. (project-id:region:name) +- PG_DB_HOST: The database IP. +- PG_DB_PORT: The database port. +- PG_PG_DB_USER: The database user that will be used to authenticate the cloud function when it queries the database. +- PG_PG_DB_PASS: The database user's password that will be used to authenticate the cloud function. +- COUNTS_DB_TYPE: Set to either "MongoDB" or "BigQuery" depending on where the message counts are stored. +- COUNTS_MSG_TYPES: Set to a list of message types to include in counts query. Sample format is described in the sample.env. +- COUNTS_DB_NAME: The BigQuery table or MongoDB collection name where the RSU message counts are located. +- BSM_DB_NAME: The database name for BSM visualization data. +- SSM_DB_NAME: The database name for SSM visualization data. +- SRM_DB_NAME: The database name for SRM visualization data. +- MONGO_DB_URI: URI for the MongoDB connection. +- MONGO_DB_NAME: Database name for RSU counts. +- KEYCLOAK_ENDPOINT: Keycloak base URL to send requests to. Reference the sample.env for the URL formatting. +- KEYCLOAK_REALM: Keycloak Realm name. +- KEYCLOAK_API_CLIENT_ID: Keycloak API client name. +- KEYCLOAK_API_CLIENT_SECRET_KEY: Keycloak API secret for the given client name. +- FIRMWARE_MANAGER_ENDPOINT: Endpoint for the firmware manager deployment's API. +- LOGGING_LEVEL: The level of which the application will log. (DEBUG, INFO, WARNING, ERROR) +- CSM_EMAIL_TO_SEND_FROM: Origin email address for the API. +- CSM_EMAIL_APP_USERNAME: Username for the SMTP server. +- CSM_EMAIL_APP_PASSWORD: Password for the SMTP server. +- CSM_EMAILS_TO_SEND_TO: Destination email list. +- CSM_TARGET_SMTP_SERVER_ADDRESS: Destination SMTP server address. +- CSM_TARGET_SMTP_SERVER_PORT: Destination SMTP server port. +- WZDX_ENDPOINT: WZDX datafeed enpoint. +- WZDX_API_KEY: API key for the WZDX datafeed. +- TIMEZONE: Timezone to be used for the API. + +1. Configure the Cloud Run deployment connections settings + - The application assumes there is a Cloud SQL DB, select the DB under "Cloud SQL connections". Ensure the environment variables match the selected DB. + - The application makes requests to the automated RSU REST API located in K8s. If this is in a VPC, configure the proper VPC connector. Route only requests to private IPs. +2. Configure the Cloud Run deployment security settings + - Ensure a service account has been selected that has: + - Cloud SQL Client permissions + - VPC Connector permissions + - BigQuery access permissions +3. Deploy and utilize the assigned endpoint in the CV Manager React application's environment variables diff --git a/services/api/requirements.txt b/services/api/requirements.txt index 6cb2ef5d3..703660cd5 100644 --- a/services/api/requirements.txt +++ b/services/api/requirements.txt @@ -1,16 +1,16 @@ -flask==3.0.0 -flask_restful==0.3.10 -marshmallow==3.20.1 -gunicorn==21.2.0 -google-auth==2.25.2 -requests==2.31.0 -sqlalchemy==2.0.21 -pg8000==1.30.2 -DateTime==5.2 -google-cloud-bigquery==3.14.1 -python-dateutil==2.8.2 -pytz==2023.3.post1 -Werkzeug==3.0.0 -python-keycloak==2.16.2 -pymongo==4.5.0 -fabric==3.2.2 +flask==3.0.0 +flask_restful==0.3.10 +marshmallow==3.20.1 +gunicorn==21.2.0 +google-auth==2.25.2 +requests==2.31.0 +sqlalchemy==2.0.21 +pg8000==1.30.2 +DateTime==5.2 +google-cloud-bigquery==3.14.1 +python-dateutil==2.8.2 +pytz==2023.3.post1 +Werkzeug==3.0.0 +python-keycloak==2.16.2 +pymongo==4.5.0 +fabric==3.2.2 diff --git a/services/api/resources/mibs/NTCIP1218-v01.txt b/services/api/resources/mibs/NTCIP1218-v01.txt new file mode 100644 index 000000000..9021d8ebd --- /dev/null +++ b/services/api/resources/mibs/NTCIP1218-v01.txt @@ -0,0 +1,3210 @@ +-- 5.0 MIB Comment Header +-- The draft NTCIP 1218 v01.35 MIB is a Draft document, which is distributed +-- for review and comment purposes only. You may reproduce and distribute +-- this document within your organization, but only for the purposes of and +-- only to the extent necessary to facilitate review and comment to the +-- NTCIP Coordinator. +-- +-- Please ensure that all copies include this notice. This document contains +-- preliminary information that is subject to substantive change without +-- further notice, prior to NTCIP 1218 v01 publication. +-- © 2019 AASHTO / ITE / NEMA +-- +-- If you wish to reprint or excerpt this MIB (or portions) outside of your +-- organization (for example, as part works offered by other organizations or +-- publishers, per the MIB Distribution Permission in draft NTCIP 1218 v0119 +-- Notices), request prior written permission from ntcip@nema.org. +-- +--********************************************************************* +-- Filename: 1218v0135.MIB +-- Date: December 16, 2019 +-- Description: This MIB defines the Roadside Unit (RSU) Objects +--********************************************************************* + +-- 5.1 MIB Header +NTCIP1218-v01 DEFINITIONS ::= BEGIN + +-- the following OBJECT IDENTIFIERS are used in the RSU MIB: +IMPORTS +MODULE-IDENTITY, OBJECT-TYPE, Integer32, Counter32, NOTIFICATION-TYPE, enterprises + FROM SNMPv2-SMI +TEXTUAL-CONVENTION, DateAndTime, RowStatus, DisplayString, MacAddress + FROM SNMPv2-TC + SyslogSeverity +FROM SYSLOG-TC-MIB + Uri255, Uri1024 +FROM URI-TC-MIB; + +-- 1218v0129 MODULE-IDENTITY +-- LAST-UPDATED "201912160000Z" +-- ORGANIZATION "NTCIP - AASHTO/ITE/NEMA" +-- CONTACT-INFO "postal: NTCIP Coordinator +-- National Electrical Manufacturers Association +-- 1300 North 17th Street, Suite 900 +-- Rosslyn, Virginia 22209-3801 +-- email: ntcip@nema.org" +-- REVISION "20191216000Z" +-- DESCRIPTION "This MIB defines the Roadside Unit (RSU) Objects" +-- rsu OBJECT IDENTIFIER ::= { devices 18 } + +-- A.2 STRUCTURE INFORMATION +nema OBJECT IDENTIFIER ::= { enterprises 1206 } +-- NEMA has received ID 1206 from IANA +-- NEMA starts at { iso org dod internet private enterprises 1206 } in the +-- global naming tree. + +transportation OBJECT IDENTIFIER ::= { nema 4 } +-- The transportation subtree is used by the NTCIP to define +-- standard objects specific for the transportation industry. + +devices OBJECT IDENTIFIER ::= { transportation 2 } + +rsu OBJECT IDENTIFIER ::= { devices 18 } + +RsuTableIndex ::= TEXTUAL-CONVENTION + DISPLAY-HINT "d" + STATUS current + DESCRIPTION "A valid range of values for use in table indices" + SYNTAX Integer32 (1..2147483647) + +RsuPsidTC ::= TEXTUAL-CONVENTION + DISPLAY-HINT "4x" + STATUS current + DESCRIPTION "PSID associated with a SAE J2735 message. The PSID is formatted per IEEE1609.12-2016 Table 2 as P-encoded hex values, e.g. BSM = 0x20, TIM = 0x8003, WSA = 0x8007, IProuting = 0xEFFFFFFE. For those PSIDs less than 4 octets in length, the RSU should only require the significant octets be provided. For example, if the desired PSID is 0x20, then the RSU should accept a supplied value of 0x20. should not need to be padded to a 4-octet length." + SYNTAX OCTET STRING (SIZE(1..4)) + +-- For NTCIP 1218. +-- DateAndTime ::= TEXTUAL-CONVENTION +-- DISPLAY-HINT "2d-1d-1d,1d:1d:1d.1d" +-- STATUS current +-- DESCRIPTION "A date-time specification." +-- SYNTAX OCTET STRING (SIZE(8)) + +-- 5.2 RSU Radios +rsuRadio OBJECT IDENTIFIER ::= { rsu 1 } + +-- 5.2.1 Maximum Radios +maxRsuRadios OBJECT-TYPE + SYNTAX Integer32 (1..16) + UNITS "radio" + MAX-ACCESS read-only + STATUS current + DESCRIPTION " The maximum number of V2X radios this Roadside Unit supports. This object indicates the maximum rows which appears in the rsuRadioTable object. + 1.3.6.1.4.1.1.1206.4.2.18.1.1" +::= { rsuRadio 1 } + +-- 5.2.2 Radio Table +rsuRadioTable OBJECT-TYPE + SYNTAX SEQUENCE OF RsuRadioEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION " A table containing the V2X radio parameters. The number of rows in this table is equal to the maxRsuRadios object. + static + 1.3.6.1.4.1.1.1206.4.2.18.1.2" +::= { rsuRadio 2 } + +rsuRadioEntry OBJECT-TYPE + SYNTAX RsuRadioEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION " Parameters for a specific roadside unit V2X radio. + 1.3.6.1.4.1.1.1206.4.2.18.1.2.1" + INDEX { rsuRadioIndex } +::= { rsuRadioTable 1 } + +RsuRadioEntry ::= SEQUENCE { + rsuRadioIndex Integer32, + rsuRadioDesc DisplayString, + rsuRadioEnable INTEGER, + rsuRadioType INTEGER, + rsuRadioMacAddress1 MacAddress, + rsuRadioMacAddress2 MacAddress, + rsuRadioChanMode INTEGER, + rsuRadioCh1 Integer32, + rsuRadioCh2 Integer32, + rsuRadioTxPower1 Integer32, + rsuRadioTxPower2 Integer32 } + +-- 5.2.2.1 RSU Radio Index +rsuRadioIndex OBJECT-TYPE + SYNTAX Integer32 (1..16) + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION " Roadside unit V2X radio index. + 1.3.6.1.4.1.1206.4.2.18.1.2.1.1" +::= { rsuRadioEntry 1 } + +-- 5.2.2.2 RSU Radio Description +rsuRadioDesc OBJECT-TYPE + SYNTAX DisplayString (SIZE(0..144)) + MAX-ACCESS read-write + STATUS current + DESCRIPTION " Name of the radio that the configuration relates to. + 1.3.6.1.4.1.1206.4.2.18.1.2.1.2" +::= { rsuRadioEntry 2 } + +-- 5.2.2.3 RSU Radio Enable +rsuRadioEnable OBJECT-TYPE + SYNTAX INTEGER { off(0), on(1) } + MAX-ACCESS read-write + STATUS current + DESCRIPTION " Set this bit to 1 to enable the V2X radio for transmission and reception of messages, and to 0 to disable the V2X radio. + 1.3.6.1.4.1.1206.4.2.18.1.2.1.3" +::= { rsuRadioEntry 3 } + +-- 5.2.2.4 RSU Radio Type +rsuRadioType OBJECT-TYPE + SYNTAX INTEGER { other(1), + dsrc(2), + pC5(3) } + MAX-ACCESS read-only + STATUS current + DESCRIPTION " Indicates the type of V2X Radio. pC5 is cellular V2X. + 1.3.6.1.4.1.1206.4.2.18.1.2.1.4" +::= { rsuRadioEntry 4 } + +-- 5.2.2.5 RSU Radio MAC Address 1 +rsuRadioMacAddress1 OBJECT-TYPE + SYNTAX MacAddress + MAX-ACCESS read-only + STATUS current + DESCRIPTION " Represents an 802 MAC address of the V2X Radio represented in the 'canonical' order defined by IEEE 802.1a, i.e., as if it were transmitted least significant bit first, even though 802.5 (in contrast to other 802.x protocols) requires MAC addresses to be transmitted most significant bit first. For a PC5 radio, use the least significant 24-bit for the MAC address (Layer 2 Source id). If the MacAddress is not available, set the MacAddress to all ones (1). + 1.3.6.1.4.1.1206.4.2.18.1.2.1.5" +::= { rsuRadioEntry 5 } + +-- 5.2.2.6 RSU Radio MAC Address 2 +rsuRadioMacAddress2 OBJECT-TYPE + SYNTAX MacAddress + MAX-ACCESS read-only + STATUS current + DESCRIPTION " Represents the second IEEE 802 MAC address of the V2X Radio operating on a channel switch. The MAC address is as if the represented in the 'canonical' order defined by IEEE 802.1a, i.e., as if it were transmitted least significant bit first, even though 802.5 (in contrast to other 802.x protocols) requires MAC addresses to be transmitted most significant bit first. If the MacAddress is not available or if the radio doesn't have a second MAC, set the MacAddress to all ones (1). + 1.3.6.1.4.1.1206.4.2.18.1.2.1.6" +::= { rsuRadioEntry 6 } + +-- 5.2.2.7 RSU Radio Channel Mode +rsuRadioChanMode OBJECT-TYPE + SYNTAX INTEGER { other (1), + unknown (2), + cont (3), + alt (4), + immediate (5) } + MAX-ACCESS read-write + STATUS current + DESCRIPTION " Represents the channel access mode for the radio. Supported values are continuous access (cont), alternating access (alt), and immediate access as defined in IEEE 1609.3-2016. A SET to a value of other (1) or unknown (2) shall return a badValue error. + 1.3.6.1.4.1.1206.4.2.18.1.2.1.7" +::= { rsuRadioEntry 7 } + +-- 5.2.2.8 RSU Radio Channel 1 +rsuRadioCh1 OBJECT-TYPE + SYNTAX Integer32 (0..255) + MAX-ACCESS read-write + STATUS current + DESCRIPTION " Represents the channel number for continuous mode as well as for time slot 0 when the radio is operating in alternating mode. For DSRC radios in the United States, the transmission channel is from 172 to 184, as defined by IEEE 802.11. For PC5 radios in the United States, the relationship between IEEE 802.11 and 3GPP channel numbers is found in Table 7 of NTCIP 1218 v01. + 1.3.6.1.4.1.1206.4.2.18.1.2.1.8" +::= { rsuRadioEntry 8 } + +-- 5.2.2.9 RSU Radio Channel 2 +rsuRadioCh2 OBJECT-TYPE + SYNTAX Integer32 (0..255) + MAX-ACCESS read-write + STATUS current + DESCRIPTION " Represents the channel number for channel slot 2. For DSRC radios in the United States, the transmission channel is from 172 to 184, as defined by IEEE 802.11. For PC5 radios in the United States, the relationship between IEEE 802.11 and 3GPP channel numbers is found in Table 7 of NTCIP 1218 v01. If the radio operates in continuous mode, this value is equal to rsuRadioCh1 and is ignored. + 1.3.6.1.4.1.1206.4.2.18.1.2.1.9" +::= { rsuRadioEntry 9 } + +-- 5.2.2.10 RSU Radio Channel 1 - Default Transmit Power +rsuRadioTxPower1 OBJECT-TYPE + SYNTAX Integer32 (-128..127) + UNITS "dBm" + MAX-ACCESS read-write + STATUS current + DESCRIPTION " Sets the output power for channel slot 1, in dBm, at the V2X antenna port. This object is considered a default transmit power level to be used if a power level is not defined by an application. The power level defined by the application would take precedence. + 1.3.6.1.4.1.1206.4.2.18.1.2.1.10" + DEFVAL { -128 } +::= { rsuRadioEntry 10 } + +-- 5.2.2.11 RSU Radio Channel 2 - Default Transmit Power +rsuRadioTxPower2 OBJECT-TYPE + SYNTAX Integer32 (-128..127) + UNITS "dBm" + MAX-ACCESS read-write + STATUS current + DESCRIPTION " Sets the output power for channel slot 2, in dBm, at the V2X antenna port. This object is considered a default transmit power level to be used if a power level is not defined by an application. The power level defined by the application would take precedence. If the radio operates in continuous mode, this value is equal to rsuRadioTxPower1 and is ignored. + 1.3.6.1.4.1.1206.4.2.18.1.2.1.11" + DEFVAL { -128 } +::= { rsuRadioEntry 11 } + +-- 5.3 RSU GNSS +rsuGnss OBJECT IDENTIFIER ::= { rsu 2 } + +-- 5.3.1 GNSS Status +rsuGnssStatus OBJECT-TYPE + SYNTAX Integer32 (0..128) + MAX-ACCESS read-only + STATUS current + DESCRIPTION " Provides the number of GNSS satellites the RSU's internal GNSS receiver is tracking and using. + 1.3.6.1.4.1.1206.4.2.18.2.1" +::= { rsuGnss 1 } + +-- 5.3.2 GNSS Augmentation +rsuGnssAugmentation OBJECT-TYPE + SYNTAX INTEGER { other (1), + none (2), + waas (3) } + MAX-ACCESS read-only + STATUS current + DESCRIPTION " This object indicates if the position from a GNSS or similar geopositioning device is augmented, such as Wide Area Augmentation System (WAAS) corrections. + 1.3.6.1.4.1.1.1206.4.2.18.2.2" + DEFVAL { none } +::= { rsuGnss 2 } + +-- 5.4 Store and Repeat Messages +rsuMsgRepeat OBJECT IDENTIFIER ::= { rsu 3 } + +-- 5.4.1 Maximum Number of Store and Repeat Messages +maxRsuMsgRepeat OBJECT-TYPE + SYNTAX Integer32 (1..255) + MAX-ACCESS read-only + STATUS current + DESCRIPTION " The maximum number of Store and Repeat messages this Roadside Unit supports. This object indicates the maximum rows which appears in the rsuMsgRepeatStatusTable object. + 1.3.6.1.4.1.1206.4.2.18.3.1" +::= { rsuMsgRepeat 1 } + +-- 5.4.2 Store and Repeat Table +rsuMsgRepeatStatusTable OBJECT-TYPE + SYNTAX SEQUENCE OF RsuMsgRepeatStatusEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION " Provides configuration information for each Store and Repeat message to be sent by a Roadside Unit. + dynamic + 1.3.6.1.4.1.1206.4.2.18.3.2" +::= { rsuMsgRepeat 2 } + +rsuMsgRepeatStatusEntry OBJECT-TYPE + SYNTAX RsuMsgRepeatStatusEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION " A row describing an RSU Store and Repeat Message. + 1.3.6.1.4.1.1206.4.2.18.3.2.1" + INDEX { rsuMsgRepeatIndex } +::= {rsuMsgRepeatStatusTable 1 } + +RsuMsgRepeatStatusEntry ::= SEQUENCE { + rsuMsgRepeatIndex RsuTableIndex, + rsuMsgRepeatPsid RsuPsidTC, + rsuMsgRepeatTxChannel Integer32, + rsuMsgRepeatTxInterval Integer32, + rsuMsgRepeatDeliveryStart DateAndTime, + rsuMsgRepeatDeliveryStop DateAndTime, + rsuMsgRepeatPayload OCTET STRING, + rsuMsgRepeatEnable INTEGER, + rsuMsgRepeatStatus RowStatus, + rsuMsgRepeatPriority Integer32, + rsuMsgRepeatOptions BITS } + +-- 5.4.2.1 Stored Message Index +rsuMsgRepeatIndex OBJECT-TYPE + SYNTAX RsuTableIndex + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION " Store and Repeat Message Index. This index shall not exceed maxRsuMsgRepeat. + 1.3.6.1.4.1.1206.4.2.18.3.2.1.1" +::= { rsuMsgRepeatStatusEntry 1 } + +-- 5.4.2.2 Stored Message PSID +rsuMsgRepeatPsid OBJECT-TYPE + SYNTAX RsuPsidTC + MAX-ACCESS read-create + STATUS current + DESCRIPTION " The Provider Service Identifier (PSID) for the Store and Repeat Message. The current PSID assignments can be found at https://standards.ieee.org/products-services/regauth/psid/public.html. + 1.3.6.1.4.1.1206.4.2.18.3.2.1.2" +::= { rsuMsgRepeatStatusEntry 2 } + +-- 5.4.2.3 Stored Message Transmission Channel +rsuMsgRepeatTxChannel OBJECT-TYPE + SYNTAX Integer32 (0..255) + MAX-ACCESS read-create + STATUS current + DESCRIPTION " The transmission channel the Store and Repeat Message is to be transmitted. For DSRC radios in the United States, the transmission channel is from 172 to 184, as defined by IEEE 802.11. For PC5 radios in the United States, the channel number is found in Table 7 of NTCIP 1218 v01. + 1.3.6.1.4.1.1206.4.2.18.3.2.1.3" +::= { rsuMsgRepeatStatusEntry 3 } + +-- 5.4.2.4 Stored Message Transmission Interval +rsuMsgRepeatTxInterval OBJECT-TYPE + SYNTAX Integer32 (1..2147483647) + UNITS "millisecond" + MAX-ACCESS read-create + STATUS current + DESCRIPTION " Time interval in milliseconds between the transmission of two successive Store and Repeat Messages. + 1.3.6.1.4.1.1206.4.2.18.3.2.1.4" +::= { rsuMsgRepeatStatusEntry 4 } + +-- 5.4.2.5 Stored Message Start Time +rsuMsgRepeatDeliveryStart OBJECT-TYPE + SYNTAX DateAndTime + MAX-ACCESS read-create + STATUS current + DESCRIPTION " Store and Repeat Message delivery start time in UTC. DateAndTime of length 8 octets. + 1.3.6.1.4.1.1206.4.2.18.3.2.1.5" +::= { rsuMsgRepeatStatusEntry 5 } + +-- 5.4.2.6 Stored Message Stop Time +rsuMsgRepeatDeliveryStop OBJECT-TYPE + SYNTAX DateAndTime + MAX-ACCESS read-create + STATUS current + DESCRIPTION " Store and Repeat Message delivery stop time in UTC. DateAndTime of length 8 octets. + 1.3.6.1.4.1.1206.4.2.18.3.2.1.6" +::= { rsuMsgRepeatStatusEntry 6 } + +-- 5.4.2.7 Stored Message Payload +rsuMsgRepeatPayload OBJECT-TYPE + SYNTAX OCTET STRING (SIZE(0..2302)) + MAX-ACCESS read-create + STATUS current + DESCRIPTION " Payload of Store and Repeat message. Length limit derived from IEEE1609dot3-MIB. For SAE J2735-201603 messages, this object is the UPER encoded MessageFrame. + 1.3.6.1.4.1.1206.4.2.18.3.2.1.7" +::= { rsuMsgRepeatStatusEntry 7 } + +-- 5.4.2.8 Stored Message Enabled Transmission +rsuMsgRepeatEnable OBJECT-TYPE + SYNTAX INTEGER { off (0), on (1) } + MAX-ACCESS read-create + STATUS current + DESCRIPTION " Set this bit to enable transmission of the message. 0=off/do not transmit, 1=on/transmit. + 1.3.6.1.4.1.1206.4.2.18.3.2.1.8" +::= { rsuMsgRepeatStatusEntry 8 } + +-- 5.4.2.9 Stored Message Status +rsuMsgRepeatStatus OBJECT-TYPE + SYNTAX RowStatus + MAX-ACCESS read-create + STATUS current + DESCRIPTION " Create (4) and (6) destroy row entry. + 1.3.6.1.4.1.1206.4.2.18.3.2.1.9" +::= { rsuMsgRepeatStatusEntry 9 } + +-- 5.4.2.10 Stored Message Priority +rsuMsgRepeatPriority OBJECT-TYPE + SYNTAX Integer32 (0..63) + MAX-ACCESS read-create + STATUS current + DESCRIPTION " Priority assigned to the Store and Repeat message. Priority values are defined by IEEE 1609.3-2016 for DSRC radios. + 1.3.6.1.4.1.1206.4.2.18.3.2.1.10" +::= { rsuMsgRepeatStatusEntry 10 } + +-- 5.4.2.11 Stored Message Options +rsuMsgRepeatOptions OBJECT-TYPE + SYNTAX BITS { bypass(0), + secure(1), + shortTerm(2), + longTerm(3) } + MAX-ACCESS read-create + STATUS current + DESCRIPTION " A bit-mapped value as defined below for configuring the message. + Bit 0 0=Bypass1609.2, 1=Process1609.2 + Bit 1 0=Secure, 1=Unsecure + Bit 2 0=ContXmit, 1=NoXmitShortTermXceeded + Bit 3 0=ContXmit, 1=NoXmitLongTermXceeded + + Bit 0 - Indicates if the RSU is to bypass 1609.2 processing of the message to the V2X interface. This allows the RSU to send the message that has been signed and/or encrypted by the TMC. Note the RSU would still wrap the message payload in a WSMP header. + Bit 1 - Indicates if the message should be secured (signed or encrypted) prior to transmission to the V2X Interface. How the message is to be secured is determined by its security profile. This bit is ignored if Bit 0=0 (bypass). + Bit 2 - Indicates if the message should continue to be transmitted if the short-term communications loss period (rsuShortCommLossTime) is exceeded. 0 indicates the message should continue to be transmitted. Default value is 0. + Bit 3 - Indicates if the message should continue to be transmitted if the long-term communications loss period (rsuLongCommLossTime) is exceeded. 0 indicates the message should continue to be transmitted. Default value is 0. + 1.3.6.1.4.1.1206.4.2.18.3.2.1.11" +::= { rsuMsgRepeatStatusEntry 11 } + +-- 5.4.3 Delete All Stored Messages +rsuMsgRepeatDeleteAll OBJECT-TYPE + SYNTAX INTEGER (0..1) + MAX-ACCESS read-write + STATUS current + DESCRIPTION " This object when set to TRUE (one) shall cause the Roadside Unit to delete (destroy) all stored messages in the rsuMsgRepeatStatusTable. This object shall automatically return to FALSE (zero) after all rows in the rsuMsgRepeatStatusTable have been deleted. + 1.3.6.1.4.1.1206.4.2.18.3.3" +::= { rsuMsgRepeat 3 } + +-- 5.5 Immediate Forward Messages +rsuIFM OBJECT IDENTIFIER ::= { rsu 4 } + +-- 5.5.1 Maximum Number of Immediate Forward Messages +maxRsuIFMs OBJECT-TYPE + SYNTAX Integer32 (1..255) + MAX-ACCESS read-only + STATUS current + DESCRIPTION " The maximum number of Immediate Forward messages this Roadside Unit supports. This object indicates the maximum rows which appears in the rsuIFMStatusTable object. + 1.3.6.1.4.1.1206.4.2.18.4.1" +::= { rsuIFM 1 } + +-- 5.5.2 Immediate Forward Table +rsuIFMStatusTable OBJECT-TYPE + SYNTAX SEQUENCE OF RsuIFMStatusEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION " Provides configuration parameters for each Immediate Forward message sent by a Roadside Unit. + dynamic + 1.3.6.1.4.1.1206.4.2.18.4.2" +::= { rsuIFM 2 } + +rsuIFMStatusEntry OBJECT-TYPE + SYNTAX RsuIFMStatusEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION " A row describing an RSU Immediate Forward Message Status. + 1.3.6.1.4.1.1206.4.2.18.4.2.1" + INDEX { rsuIFMIndex } +::= {rsuIFMStatusTable 1 } + +RsuIFMStatusEntry ::= SEQUENCE { + rsuIFMIndex RsuTableIndex, + rsuIFMPsid RsuPsidTC, + rsuIFMTxChannel Integer32, + rsuIFMEnable INTEGER, + rsuIFMStatus RowStatus, + rsuIFMPriority Integer32, + rsuIFMOptions BITS, + rsuIFMPayload OCTET STRING } + +-- 5.5.2.1 Forward Message Index +rsuIFMIndex OBJECT-TYPE + SYNTAX RsuTableIndex + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION " Immediate Forward Message Index. This index shall not exceed maxRsuIFMs. + 1.3.6.1.4.1.1206.4.2.18.4.2.1.1" +::= { rsuIFMStatusEntry 1 } + +-- 5.5.2.2 Forward Message PSID +rsuIFMPsid OBJECT-TYPE + SYNTAX RsuPsidTC + MAX-ACCESS read-create + STATUS current + DESCRIPTION " The Provider Service Identifier (PSID) for the Immediate Forward Message. The current PSID assignments can be found at https://standards.ieee.org/products-services/regauth/psid/public.html. + 1.3.6.1.4.1.1206.4.2.18.4.2.1.2" +::= { rsuIFMStatusEntry 2} + +-- 5.5.2.3 Forward Message Transmission Channel +rsuIFMTxChannel OBJECT-TYPE + SYNTAX Integer32 (0..255) + MAX-ACCESS read-create + STATUS current + DESCRIPTION " The transmission channel the Immediate Forward Message is to be transmitted. For DSRC radios in the United States, the transmission channel is from 172 to 184, as defined by IEEE 802.11. For PC5 radios in the United States, the channel number is found in Table 7 of NTCIP 1218 v01. + 1.3.6.1.4.1.1206.4.2.18.4.2.1.3" +::= { rsuIFMStatusEntry 3 } + +-- 5.5.2.4 Forward Message Enable +rsuIFMEnable OBJECT-TYPE + SYNTAX INTEGER { off (0), on (1) } + MAX-ACCESS read-create + STATUS current + DESCRIPTION " Set this bit to enable transmission of the message. 1 is to enable transmission. + 1.3.6.1.4.1.1206.4.2.18.4.2.1.4" +::= { rsuIFMStatusEntry 4 } + +-- 5.5.2.5 Forward Message Status +rsuIFMStatus OBJECT-TYPE + SYNTAX RowStatus + MAX-ACCESS read-create + STATUS current + DESCRIPTION " Create (4) and destroy (6) row entry. + 1.3.6.1.4.1.1206.4.2.18.4.2.1.5" +::= { rsuIFMStatusEntry 5 } + +-- 5.5.2.6 Forward Message Priority +rsuIFMPriority OBJECT-TYPE + SYNTAX Integer32 (0..63) + MAX-ACCESS read-create + STATUS current + DESCRIPTION " Priority assigned to the Immediate Forward message. Priority values defined by IEEE 1609.3-2016 for DSRC radios. + 1.3.6.1.4.1.1206.4.2.18.4.2.1.6" +::= { rsuIFMStatusEntry 6 } + +-- 5.5.2.7 Forward Message Options +rsuIFMOptions OBJECT-TYPE + SYNTAX BITS { bypass(0), + secure(1), + shortTerm(2), + longTerm(3) } + MAX-ACCESS read-create + STATUS current + DESCRIPTION " A bit-mapped value as defined below for configuring the message. + Bit 0 0=Bypass1609.2, 1=Process1609.2 + Bit 1 0=Secure, 1=Unsecure + Bit 2 0=ContXmit, 1=NoXmitShortTermXceeded + Bit 3 0=ContXmit, 1=NoXmitLongTermXceeded + + Bit 0 - Indicates if the RSU is to bypass 1609.2 processing of the message to the V2X interface. This allows the RSU to send the message that has been signed and/or encrypted by the TMC. Note the RSU would still wrap the message payload in a WSMP header. + Bit 1 - Indicates if the message should be secured (signed or encrypted) prior to transmission to the V2X Interface. How the message is to be secured is determined by its security profile. This bit is ignored if Bit 0=0 (bypass). + Bit 2 - Indicates if the message should continue to be transmitted if the short-term communications loss period (rsuShortCommLossTime) is exceeded. 0 indicates the message should continue to be transmitted. Default value is 0. + Bit 3 - Indicates if the message should continue to be transmitted if the long-term communications loss period (rsuLongCommLossTime) is exceeded. 0 indicates the message should continue to be transmitted. Default value is 0. + 1.3.6.1.4.1.1206.4.2.18.4.2.1.7" +::= { rsuIFMStatusEntry 7 } + +-- 5.5.2.8 Forward Message Payload +rsuIFMPayload OBJECT-TYPE + SYNTAX OCTET STRING (SIZE(0..2302)) + MAX-ACCESS read-create + STATUS current + DESCRIPTION " Payload of Immediate Forward message. Length limit derived from IEEE1609dot3-MIB. For SAE J2735-201603 messages, this object is the UPER encoded MessageFrame. This payload remains in this object until replaced by another payload. + 1.3.6.1.4.1.1206.4.2.18.4.2.1.8" +::= { rsuIFMStatusEntry 8 } + +-- 5.6 Received Messages +rsuReceivedMsg OBJECT IDENTIFIER ::= { rsu 5 } + +-- 5.6.1 Maximum Number of Messages +maxRsuReceivedMsgs OBJECT-TYPE + SYNTAX Integer32 (1..255) + MAX-ACCESS read-only + STATUS current + DESCRIPTION " The maximum number of received message types (from the V2X interface), times and destinations for forwarding this Roadside Unit supports. This object indicates the maximum rows which appears in the rsuReceivedMsgTable object. + 1.3.6.1.4.1.1206.4.2.18.5.1" +::= { rsuReceivedMsg 1 } + +-- 5.6.2 Received Messages Table +rsuReceivedMsgTable OBJECT-TYPE + SYNTAX SEQUENCE OF RsuReceivedMsgEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION " Contains the PSID being forwarded to a network host, the IP Address and port number of the destination host, as well as other configuration parameters as defined. + dynamic + 1.3.6.1.4.1.1206.4.2.18.5.2" +::= { rsuReceivedMsg 2 } + +rsuReceivedMsgEntry OBJECT-TYPE + SYNTAX RsuReceivedMsgEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION " A row describing the RSU Message Forwarding types. + 1.3.6.1.4.1.1206.4.2.18.5.2.1" + INDEX { rsuReceivedMsgIndex } + ::= { rsuReceivedMsgTable 1 } + +RsuReceivedMsgEntry ::= SEQUENCE { + rsuReceivedMsgIndex RsuTableIndex, + rsuReceivedMsgPsid RsuPsidTC, + rsuReceivedMsgDestIpAddr DisplayString, + rsuReceivedMsgDestPort Integer32, + rsuReceivedMsgProtocol INTEGER, + rsuReceivedMsgRssi Integer32, + rsuReceivedMsgInterval Integer32, + rsuReceivedMsgDeliveryStart DateAndTime, + rsuReceivedMsgDeliveryStop DateAndTime, + rsuReceivedMsgStatus RowStatus, + rsuReceivedMsgSecure INTEGER, + rsuReceivedMsgAuthMsgInterval Integer32 } + +-- 5.6.2.1 Received Message Index +rsuReceivedMsgIndex OBJECT-TYPE + SYNTAX RsuTableIndex + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION " Message Forward Message Table Index. This value shall not exceed maxRsuReceivedMsgs. + 1.3.6.1.4.1.1206.4.2.18.5.2.1.1" +::= { rsuReceivedMsgEntry 1 } + +-- 5.6.2.2 Received Message PSID +rsuReceivedMsgPsid OBJECT-TYPE + SYNTAX RsuPsidTC + MAX-ACCESS read-create + STATUS current + DESCRIPTION " The Provider Service Identifier (PSID) of a Message Received by the RSU via the V2X Interface to be forwarded. The current PSID assignments can be found at https://standards.ieee.org/products-services/regauth/psid/public.html. + 1.3.6.1.4.1.1206.4.2.18.5.2.1.2" +::= { rsuReceivedMsgEntry 2 } + +-- 5.6.2.3 Received Message Destination Address +rsuReceivedMsgDestIpAddr OBJECT-TYPE + SYNTAX DisplayString (SIZE(0..64)) + MAX-ACCESS read-create + STATUS current + DESCRIPTION " Destination Server IP address to forward the message received by the RSU over the V2X Interface. For an IPv4 remote destination, this address can be represented as an IPv4 quad-dotted IP address, for example, 192.33.44.235. For IPv6 remote destination, this address can be represented as zero-compressed simplified IPv6 address, for example 2031:0:130F::9C0:876A:130B. + 1.3.6.1.4.1.1206.4.2.18.5.2.1.3" +::= { rsuReceivedMsgEntry 3 } + +-- 5.6.2.4 Received Message Destination Port +rsuReceivedMsgDestPort OBJECT-TYPE + SYNTAX Integer32 (1024..65535) + MAX-ACCESS read-create + STATUS current + DESCRIPTION " Destination Server Port Number to forward the message received by the RSU over the V2X Interface. + 1.3.6.1.4.1.1206.4.2.18.5.2.1.4" +::= { rsuReceivedMsgEntry 4 } + +-- 5.6.2.5 Received Message Destination Protocol +rsuReceivedMsgProtocol OBJECT-TYPE + SYNTAX INTEGER { other (1), udp (2) } + MAX-ACCESS read-create + STATUS current + DESCRIPTION " Transport Protocol between RSU and Server to forward the message received by the RSU over the V2X Interface. The entire WSMP payload, including certificates and signature unless indicated by the rsuReceivedMsgSecure object, are to be forwarded. + + If udp is selected, the message contains the binary data (payload) defined by rsuReceivedMsgSecure. + + A SET to a value of 'other' shall return a badValue error. + + NOTE: If other is selected, this object allows for future extensions, possibly tcp. + 1.3.6.1.4.1.1206.4.2.18.5.2.1.5" + DEFVAL { udp } +::= { rsuReceivedMsgEntry 5 } + +-- 5.6.2.6 Received Message Minimum Signal Strength +rsuReceivedMsgRssi OBJECT-TYPE + SYNTAX Integer32 (-100..-60) + UNITS "dBm" + MAX-ACCESS read-create + STATUS current + DESCRIPTION " Minimum Received Signal Strength Level (in dbm units) of the SAE J2735 message received by the RSU over the V2X Interface before forwarding to the Server. + 1.3.6.1.4.1.1206.4.2.18.5.2.1.6" +::= { rsuReceivedMsgEntry 6 } + +-- 5.6.2.7 Received Message Forwarding Interval +rsuReceivedMsgInterval OBJECT-TYPE + SYNTAX Integer32 (0..10) + MAX-ACCESS read-create + STATUS current + DESCRIPTION " Interval with which RSU forwards messages received by the RSU over the V2X Interface to Server. For example, a value of 3 indicates every 3rd message received is forwarded (33% of the messages received). A value of 0 disables the message forward for this particular entry. + 1.3.6.1.4.1.1206.4.2.18.5.2.1.7" +::= { rsuReceivedMsgEntry 7 } + +-- 5.6.2.8 Received Message Forwarding Start Time +rsuReceivedMsgDeliveryStart OBJECT-TYPE + SYNTAX DateAndTime + MAX-ACCESS read-create + STATUS current + DESCRIPTION " Start time for RSU to start forwarding SAE J2735 Messages to Server in UTC. DateAndTime is of length 8 octets. + 1.3.6.1.4.1.1206.4.2.18.5.2.1.8" +::= { rsuReceivedMsgEntry 8 } + +-- 5.6.2.9 Received Message Forwarding Stop Time +rsuReceivedMsgDeliveryStop OBJECT-TYPE + SYNTAX DateAndTime + MAX-ACCESS read-create + STATUS current + DESCRIPTION " Stop time for RSU to stop forwarding SAE J2735 Messages to Server in UTC. DateAndTime is of length 8 octets. + 1.3.6.1.4.1.1206.4.2.18.5.2.1.9" +::= { rsuReceivedMsgEntry 9 } + +-- 5.6.2.10 Received Message Status +rsuReceivedMsgStatus OBJECT-TYPE + SYNTAX RowStatus + MAX-ACCESS read-create + STATUS current + DESCRIPTION " Create (4) and destroy (6) row entry. + 1.3.6.1.4.1.1206.4.2.18.5.2.1.10" +::= { rsuReceivedMsgEntry 10 } + +-- 5.6.2.11 Received Message Forward Secure Option +rsuReceivedMsgSecure OBJECT-TYPE + SYNTAX INTEGER (0..1) + MAX-ACCESS read-create + STATUS current + DESCRIPTION " A value of 0 indicates the RSU is to forward only the WSM message payload without security headers. Specifically this means that either of the following is forwarded, depending on whether the message is signed (a) or unsigned (b): (a) Ieee1609Dot2Data.signedData.tbsData.payload.data.unsecuredData or (b) Ieee1609Dot2Data.unsecuredData. + + A value of 1 indicates the RSU is to forward the entire WSM including the security headers. Specifically this means that the entire Ieee1609Dot2Data frame is forwarded in COER format. + 1.3.6.1.4.1.1206.4.2.18.5.2.1.11" +::= { rsuReceivedMsgEntry 11 } + +-- 5.6.2.12 Received Message Forward Secure Interval +rsuReceivedMsgAuthMsgInterval OBJECT-TYPE + SYNTAX Integer32 (0..10) + MAX-ACCESS read-create + STATUS current + DESCRIPTION " Interval with which the RSU authenticates messages received from a specific device over the V2X Interface and to be forwarded to the Server (as controlled by rsuDsrcFwdMsgInterval). If enabled, the RSU authenticates the first valid (e.g., as defined by rsuDsrcFwdRssi, rsuDsrcFwdDeliveryStart, rsuDsrcFwdDeliveryStop for this row) message received from a specific device. For a value of 4, the RSU then authenticates every 4th message (after the first message) for that specific device that is marked for forwarding (as determined by rsuDsrcFwdMsgInterval). A value of 0 disables authentication of message to be forward for this particular entry. + + For example, an RSU receives 12 messages that satisfies the criteria for this row (rsuDsrcFwdPsid, rsuDsrcFwdRssi, rsuDsrcFwdDeliveryStart, rsuDsrcFwdDeliveryStop). Messages 1, 2, 5, 6, 7, 10 and 12 are from device A and messages 3, 4, 8, 9 and 11 are from device B. Assuming rsuDsrcFwdMsgInterval has a value of 2, only messages 1, 3, 5, 7, 9, and 11 are 'marked' for forwarding. Of these messages, only messages 1 (the first message from device A), 3 (the first message from device B), 7 (the 2nd message from device A after the first message), and 11 (the 2nd message from device B after the first message) are authenticated. + 1.3.6.1.4.1.1206.4.2.18.5.2.1.12" +::= { rsuReceivedMsgEntry 12 } + +-- 5.7 GNSS Output +rsuGnssOutput OBJECT IDENTIFIER ::= { rsu 6 } + +-- 5.7.1 GNSS Output Port +rsuGnssOutputPort OBJECT-TYPE + SYNTAX Integer32 (1024..65535) + MAX-ACCESS read-write + STATUS current + DESCRIPTION " GNSS Out External Server Port Number. + 1.3.6.1.4.1.1206.4.2.18.6.1" + DEFVAL { 5115 } +::= { rsuGnssOutput 1 } + +-- 5.7.2 GNSS Output Address +rsuGnssOutputAddress OBJECT-TYPE + SYNTAX DisplayString (SIZE(0..64)) + MAX-ACCESS read-write + STATUS current + DESCRIPTION " Remote host address to which to transmit the GNSS string. For an IPv4 remote destination, this address can be represented as an IPv4 quad-dotted IP address, for example, 192.33.44.235. For IPv6 remote destination, this address can be represented as zero-compressed simplified IPv6 address, for example 2031:0:130F::9C0:876A:130B. + 1.3.6.1.4.1.1206.4.2.18.6.2" +::= { rsuGnssOutput 2 } + +-- 5.7.3 GNSS Output Interface Description +rsuGnssOutputInterface OBJECT-TYPE + SYNTAX DisplayString (SIZE(0..100)) + MAX-ACCESS read-write + STATUS current + DESCRIPTION " Local interface on the RSU which to output the GNSS string. E.g., eth0, eth1. + 1.3.6.1.4.1.1206.4.2.18.6.3" +::= { rsuGnssOutput 3 } + +-- 5.7.4 GNSS Output Interval +rsuGnssOutputInterval OBJECT-TYPE + SYNTAX Integer32 (0..18000) + UNITS "second" + MAX-ACCESS read-write + STATUS current + DESCRIPTION " Interval at which to transmit the rsuGNSSOutputString value to the remote host in seconds. A value of 0 indicates the transmission is disabled. + 1.3.6.1.4.1.1206.4.2.18.6.4" + DEFVAL { 1 } +::= { rsuGnssOutput 4 } + +-- 5.7.5 GNSS Data Output +rsuGnssOutputString OBJECT-TYPE + SYNTAX DisplayString (SIZE(0..100)) + MAX-ACCESS read-only + STATUS current + DESCRIPTION " Contains NMEA 0183 GPGGA or GNGGA output string including the $ starting character and the ending . + 1.3.6.1.4.1.1206.4.2.18.6.5" +::= { rsuGnssOutput 5 } + +-- 5.7.6 GNSS Reported Latitude +rsuGnssLat OBJECT-TYPE + SYNTAX Integer32 (-900000000..900000001) + UNITS "tenth of a microdegree" + MAX-ACCESS read-only + STATUS current + DESCRIPTION " Contains the actual GNSS latitude for validation of reference GNSS latitude (rsuLocationLat) in 10^-7 degrees. The value 900000001 represents unknown. + 1.3.6.1.4.1.1206.4.2.18.6.6" + DEFVAL { 900000001 } +::= { rsuGnssOutput 6 } + +-- 5.7.7 GNSS Reported Longitude +rsuGnssLon OBJECT-TYPE + SYNTAX Integer32 (-1800000000..1800000001) + UNITS "tenth of a microdegree" + MAX-ACCESS read-only + STATUS current + DESCRIPTION " Contains the actual GNSS longitude for validation of reference GNSS longitude (rsuLocationLong) in 10^-7 degrees. The value 1800000001 represents unknown. + 1.3.6.1.4.1.1206.4.2.18.6.7" + DEFVAL { 1800000001 } +::= { rsuGnssOutput 7 } + +-- 5.7.8 GNSS Reported Elevation +rsuGnssElv OBJECT-TYPE + SYNTAX Integer32 (-100000..1000001) + UNITS "centimeter" + MAX-ACCESS read-only + STATUS current + DESCRIPTION " Contains the actual GNSS elevation for validation of reference GNSS elevation (rsuLocationElv) in centimeters above the reference ellipsoid as defined by the WGS-84. The value of 1000001 represents unknown. + 1.3.6.1.4.1.1206.4.2.18.6.8" + DEFVAL { 1000001 } +::= { rsuGnssOutput 8 } + +-- 5.7.9 GNSS Allowable Location Deviation +rsuGnssMaxDeviation OBJECT-TYPE + SYNTAX Integer32 (0..20000) + UNITS "meter" + MAX-ACCESS read-write + STATUS current + DESCRIPTION " Contains the maximum allowable deviation between the actual 2D GNSS coordinates of the RSU (rsuGnssLat, rsuGnssLon) and the reference GNSS coordinates (rsuLocationLat, rsuLocationLon). A value of 0 disables this feature. + 1.3.6.1.4.1.1206.4.2.18.6.9" +::= { rsuGnssOutput 9 } + +-- 5.7.10 GNSS Location Deviation +rsuLocationDeviation OBJECT-TYPE + SYNTAX Integer32 (0..20001) + UNITS "meter" + MAX-ACCESS read-only + STATUS current + DESCRIPTION " The 2D deviation, in centimeters, between the reported GNSS coordinates (rsuGnssLat, rsuGnssLon) and the RSU's reference location (rsuLocationLat, rsuLocationLon). + Values of 0 to 19,999 provides a range from 0 centimeters to 19,999 meters. The value of 20,000 represents 20,000 meters or more. The value of 200001 represents unknown. + 1.3.6.1.4.1.1206.4.2.18.6.10" +::= { rsuGnssOutput 10 } + +-- 5.7.11 RSU Position Error +rsuGnssPositionError OBJECT-TYPE + SYNTAX Integer32 (0..200001) + UNITS "tenth of a meter" + MAX-ACCESS read-only + STATUS current + DESCRIPTION " Contains the estimated average position error in centimeters, at 67% confidence ((1) standard deviation). Values of 0 to 200,000 provides a range from 0 meters to 20,000 meters. The value of 200,001 represents unknown. + 1.3.6.1.4.1.1.1206.4.2.18.6.11" + DEFVAL { 200001 } +::= { rsuGnssOutput 11 } + +-- 5.8 Interface Log +rsuInterfaceLog OBJECT IDENTIFIER ::= { rsu 7 } + +-- 5.8.1 Maximum Number of Interface Logs +maxRsuInterfaceLogs OBJECT-TYPE + SYNTAX Integer32 (1..255) + MAX-ACCESS read-only + STATUS current + DESCRIPTION " The maximum number of active Interface Logs this Roadside Unit supports. This object indicates the maximum rows which appears in the rsuInterfaceLogTable object. + 1.3.6.1.4.1.1206.4.2.18.7.1" +::= { rsuInterfaceLog 1 } + +-- 5.8.2 Interface Log Table +rsuInterfaceLogTable OBJECT-TYPE + SYNTAX SEQUENCE OF RsuInterfaceLogEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION " Provides configuration information for capturing log files for a communication interface. The index represents the interface for which these configurations apply. + dynamic + 1.3.6.1.4.1.1206.4.2.18.7.2" +::= { rsuInterfaceLog 2 } + +rsuInterfaceLogEntry OBJECT-TYPE + SYNTAX RsuInterfaceLogEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION " A row describing RSU Interface Log. + 1.3.6.1.4.1.1206.4.2.18.7.2.1" + INDEX { rsuIfaceLogIndex } +::= {rsuInterfaceLogTable 1 } + +RsuInterfaceLogEntry ::= SEQUENCE { + rsuIfaceLogIndex RsuTableIndex, + rsuIfaceGenerate INTEGER, + rsuIfaceMaxFileSize Integer32, + rsuIfaceMaxFileTime Integer32, + rsuIfaceLogByDir INTEGER, + rsuIfaceName DisplayString, + rsuIfaceStoragePath DisplayString, + rsuIfaceLogName DisplayString, + rsuIfaceLogStart DateAndTime, + rsuIfaceLogStop DateAndTime, + rsuIfaceLogOptions BITS, + rsuIfaceLogStatus RowStatus } + +-- 5.8.2.1 Interface Log Index +rsuIfaceLogIndex OBJECT-TYPE + SYNTAX RsuTableIndex + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION " Interface Logging Index. This value shall not exceed maxRsuInterfaceLogs. + 1.3.6.1.4.1.1206.4.2.18.7.2.1.1" +::= { rsuInterfaceLogEntry 1 } + +-- 5.8.2.2 Interface Log Enable +rsuIfaceGenerate OBJECT-TYPE + SYNTAX INTEGER { off (0), + on (1) } + MAX-ACCESS read-create + STATUS current + DESCRIPTION " Set this bit to Enable / Disable interface logging for this row. 1 is to enable interface logging. + 1.3.6.1.4.1.1206.4.2.18.7.2.1.2" +::= { rsuInterfaceLogEntry 2 } + +-- 5.8.2.3 Interface Log Maximum File Size +rsuIfaceMaxFileSize OBJECT-TYPE + SYNTAX Integer32 (1..40) + UNITS "megabyte" + MAX-ACCESS read-create + STATUS current + DESCRIPTION " Maximum Interface Log File Size in megabytes, default is 5. + 1.3.6.1.4.1.1206.4.2.18.7.2.1.3" + DEFVAL { 5 } +::= { rsuInterfaceLogEntry 3 } + +-- 5.8.2.4 Interface Log Maximum Collection Time +rsuIfaceMaxFileTime OBJECT-TYPE + SYNTAX Integer32 (1..48) + UNITS "hour" + MAX-ACCESS read-create + STATUS current + DESCRIPTION " Maximum Collection time for Interface Logging in hours, default is 24. + 1.3.6.1.4.1.1206.4.2.18.7.2.1.4" +::= { rsuInterfaceLogEntry 4 } + +-- 5.8.2.5 Interface Log Direction Separation +rsuIfaceLogByDir OBJECT-TYPE + SYNTAX INTEGER { inboundOnly (1), + outboundOnly (2), + biSeparate (3), + biCombined (4) } + MAX-ACCESS read-create + STATUS current + DESCRIPTION " Sets which direction of communications traffic to capture in the interface data log. biSeparate (3) indicates both directions are to be captured but in separate interface data log files, while biCombined (4) indicates both directions are to be captured in the same interface data log file. + 1.3.6.1.4.1.1206.4.2.18.7.2.1.5" +::= { rsuInterfaceLogEntry 5 } + +-- 5.8.2.6 Interface Name +rsuIfaceName OBJECT-TYPE + SYNTAX DisplayString (SIZE(0..127)) + MAX-ACCESS read-create + STATUS current + DESCRIPTION " Identifies the name of the interface for which the logs defined by this row are to be managed. E.g., wlan0, gnss, dsrc. + 1.3.6.1.4.1.1206.4.2.18.7.2.1.6" +::= { rsuInterfaceLogEntry 6 } + +-- 5.8.2.7 Interface Log Storage Path +rsuIfaceStoragePath OBJECT-TYPE + SYNTAX DisplayString (SIZE(1..255)) + MAX-ACCESS read-create + STATUS current + DESCRIPTION " Indicates the storage path of the interface file logs. The path indicated here shall be relative to the base directory (see rsuSysDir). The base directory is specified by the string '/' (one forward slash). A subdirectory from the base may be specified by the string '/subdir'. + 1.3.6.1.4.1.1206.4.2.18.7.2.1.7" +::= { rsuInterfaceLogEntry 7 } + +-- 5.8.2.8 Interface Log Name +rsuIfaceLogName OBJECT-TYPE + SYNTAX DisplayString (SIZE(12..172)) + MAX-ACCESS read-create + STATUS current + DESCRIPTION " Indicates the filename used when storing new interface data logs. This parameter shall be in the format ___