diff --git a/docker/kcidb/Dockerfile b/docker/kcidb/Dockerfile index 8fa78da00..7cc8b9469 100644 --- a/docker/kcidb/Dockerfile +++ b/docker/kcidb/Dockerfile @@ -10,4 +10,4 @@ MAINTAINER "KernelCI TSC" RUN pip install git+https://github.com/kernelci/kcidb.git # Install logspec -RUN pip install git+https://github.com/kernelci/logspec.git@fc48496f93602f6e0029e4c5100e8a2839bf0ea6 +RUN pip install git+https://github.com/kernelci/logspec.git@e9316545d73877c301336656d06c18dad36d1e17 diff --git a/src/kernelci_pipeline/logspec_api.py b/src/kernelci_pipeline/logspec_api.py index 4e75e1548..bc065ce54 100644 --- a/src/kernelci_pipeline/logspec_api.py +++ b/src/kernelci_pipeline/logspec_api.py @@ -88,7 +88,7 @@ def get_logspec_errors(parsed_data, parser): of errors. """ - infra_error_detected = False + new_status = None errors_list = [] logspec_version = logspec.main.logspec_version() base_dict = { @@ -114,14 +114,15 @@ def create_special_boot_error(summary): # Check for unclean boot state if parsed_data.get('linux.boot.prompt'): - error = create_special_boot_error('Unclean boot. Reached prompt but marked as failed.') + error = create_special_boot_error('WARNING: Unclean boot. Reached prompt but marked as failed.') errors_list.append(error) + new_status = 'PASS' # Check for incomplete boot process elif not parsed_data.get('bootloader.done') or not parsed_data.get('linux.boot.kernel_started'): error = create_special_boot_error('Bootloader did not finish or kernel did not start.') errors_list.append(error) - infra_error_detected = True + new_status = 'MISS' # ---------------------------------------------------------------------- # Parse errors detected by logspec @@ -139,7 +140,7 @@ def create_special_boot_error(summary): for field in error._signature_fields} errors_list.append(logspec_dict) - return errors_list, infra_error_detected + return errors_list, new_status def new_issue(logspec_error, object_type): @@ -223,7 +224,7 @@ def generate_issues_and_incidents(result_id, log_url, object_type, oo_client): """Generate issues and incidents""" start_state = logspec.main.load_parser(object_types[object_type]['parser']) parser = object_types[object_type]['parser'] - error_list, infra_error_detected = process_log(log_url, parser, start_state) + error_list, new_status = process_log(log_url, parser, start_state) for error in error_list: if error and error['error'].get('signature'): issue = new_issue(error, object_type) @@ -235,4 +236,4 @@ def generate_issues_and_incidents(result_id, log_url, object_type, oo_client): # Remove duplicate issues parsed_data['issue_node'] = list({issue["id"]: issue for issue in parsed_data['issue_node']}.values()) - return parsed_data, infra_error_detected + return parsed_data, new_status diff --git a/src/send_kcidb.py b/src/send_kcidb.py index 5056dd8a6..5bc60ae9a 100755 --- a/src/send_kcidb.py +++ b/src/send_kcidb.py @@ -712,12 +712,12 @@ def _submit_to_kcidb(self, batch, context): batch['issues'], batch['incidents'], context['client'] ) self._nodes_processed(batch['nodes']) - return True except Exception as exc: self.log.error(f"Failed to submit data to KCIDB: {str(exc)}") # Don't mark as processed since they were not sent to KCIDB batch['nodes'] = [] return False + return True def _reset_batch_data(self): """Reset batch data structures""" @@ -800,14 +800,14 @@ def _parse_fail_node(self, parsed_node, context, node_type): local_file = self._cached_fetch(parsed_node['log_url']) local_url = f"file://{local_file}" - parsed_fail, infra_error_detected = generate_issues_and_incidents( + parsed_fail, new_status = generate_issues_and_incidents( parsed_node['id'], local_url, node_type, context['kcidb_oo_client']) - if infra_error_detected: + if new_status: self.log.warning( - f"Infrastructure error detected for {node_type} node " - f"{parsed_node['id']}, changing status from {parsed_node['status']} to MISS") - parsed_node['status'] = 'MISS' + f"Changing status from {parsed_node['status']} to {new_status} " + f"for {node_type} node {parsed_node['id']}") + parsed_node['status'] = new_status if parsed_fail['issue_node'] or parsed_fail['incident_node']: self.log.debug(f"Generated issues/incidents: {parsed_fail}")