diff --git a/common/interfaces.ts b/common/interfaces.ts
index f55ae405..81f2b2dd 100644
--- a/common/interfaces.ts
+++ b/common/interfaces.ts
@@ -281,8 +281,8 @@ export type SearchPipelineConfig = {
export type MLInferenceProcessor = IngestProcessor & {
ml_inference: {
model_id: string;
- input_map?: {};
- output_map?: {};
+ input_map?: {}[];
+ output_map?: {}[];
[key: string]: any;
};
};
diff --git a/public/general_components/results/ml_response.tsx b/public/general_components/results/ml_outputs.tsx
similarity index 63%
rename from public/general_components/results/ml_response.tsx
rename to public/general_components/results/ml_outputs.tsx
index 036e3834..c9a82200 100644
--- a/public/general_components/results/ml_response.tsx
+++ b/public/general_components/results/ml_outputs.tsx
@@ -18,34 +18,26 @@ import {
ML_RESPONSE_PROCESSOR_EXAMPLE_DOCS_LINK,
} from '../../../common';
-interface MLResponseProps {
- mlResponse: {};
+interface MLOutputsProps {
+ mlOutputs: {};
}
/**
- * Small component to render the ML response within a raw search response.
+ * Small component to render the ML outputs within a raw search response.
*/
-export function MLResponse(props: MLResponseProps) {
+export function MLOutputs(props: MLOutputsProps) {
return (
<>
-
- Showing results stored in ext.ml_inference from the
- search response.{' '}
-
- See an example
-
-
-
- {isEmpty(props.mlResponse) ? (
- No response found} titleSize="s" />
+ {isEmpty(props.mlOutputs) ? (
+ No outputs found} titleSize="s" />
) : (
)}
+
+
+ Showing ML outputs stored in ext.ml_inference from
+ the search response.{' '}
+
+ See an example
+
+
>
);
}
diff --git a/public/general_components/results/results.tsx b/public/general_components/results/results.tsx
index 3eac32ba..b6e4b46d 100644
--- a/public/general_components/results/results.tsx
+++ b/public/general_components/results/results.tsx
@@ -3,8 +3,8 @@
* SPDX-License-Identifier: Apache-2.0
*/
-import React, { useState } from 'react';
-import { get } from 'lodash';
+import React, { useEffect, useState } from 'react';
+import { get, isEmpty } from 'lodash';
import {
EuiPanel,
EuiFlexGroup,
@@ -14,7 +14,7 @@ import {
import { SearchResponse } from '../../../common';
import { ResultsTable } from './results_table';
import { ResultsJSON } from './results_json';
-import { MLResponse } from './ml_response';
+import { MLOutputs } from './ml_outputs';
interface ResultsProps {
response: SearchResponse;
@@ -22,8 +22,8 @@ interface ResultsProps {
enum VIEW {
HITS_TABLE = 'hits_table',
+ ML_OUTPUTS = 'ml_outputs',
RAW_JSON = 'raw_json',
- ML_RESPONSE = 'ml_response',
}
/**
@@ -31,8 +31,14 @@ enum VIEW {
* or the raw JSON response.
*/
export function Results(props: ResultsProps) {
- // selected view state
+ // selected view state. auto-navigate to ML outputs if there is values found
+ // in "ext.ml_inference" in the search response.
const [selectedView, setSelectedView] = useState(VIEW.HITS_TABLE);
+ useEffect(() => {
+ if (!isEmpty(get(props.response, 'ext.ml_inference', {}))) {
+ setSelectedView(VIEW.ML_OUTPUTS);
+ }
+ }, [props.response]);
return (
)}
+ {selectedView === VIEW.ML_OUTPUTS && (
+
+ )}
{selectedView === VIEW.RAW_JSON && (
)}
- {selectedView === VIEW.ML_RESPONSE && (
-
- )}
>
diff --git a/public/pages/workflows/new_workflow/quick_configure_modal.tsx b/public/pages/workflows/new_workflow/quick_configure_modal.tsx
index 854be4ac..6f307327 100644
--- a/public/pages/workflows/new_workflow/quick_configure_modal.tsx
+++ b/public/pages/workflows/new_workflow/quick_configure_modal.tsx
@@ -741,8 +741,7 @@ function updateRAGSearchResponseProcessors(
llmInterface: ModelInterface | undefined
): WorkflowConfig {
config.search.enrichResponse.processors.forEach((processor, idx) => {
- // prefill ML inference. By default, store the inference results
- // under the `ext.ml_inference` response body.
+ // prefill ML inference
if (processor.type === PROCESSOR_TYPE.ML) {
config.search.enrichResponse.processors[idx].fields.forEach((field) => {
if (field.id === 'model' && fields.llmId) {
@@ -785,7 +784,7 @@ function updateRAGSearchResponseProcessors(
...outputMap[0],
value: {
transformType: TRANSFORM_TYPE.FIELD,
- value: `ext.ml_inference.${fields.llmResponseField}`,
+ value: fields.llmResponseField,
},
};
} else {
diff --git a/public/utils/config_to_template_utils.ts b/public/utils/config_to_template_utils.ts
index 2b0c6552..65c7b093 100644
--- a/public/utils/config_to_template_utils.ts
+++ b/public/utils/config_to_template_utils.ts
@@ -4,7 +4,7 @@
*/
import { FormikValues } from 'formik';
-import { isEmpty } from 'lodash';
+import { get, isEmpty } from 'lodash';
import {
TemplateFlows,
TemplateNode,
@@ -233,6 +233,31 @@ export function processorConfigsToTemplateProcessors(
);
}
+ // process where the returned values from the output map should be stored.
+ // by default, if many-to-one, append with "ext.ml_inference", such that the outputs
+ // will be stored in a standalone field in the search response, instead of appended
+ // to each document redundantly.
+ const oneToOne = formValues?.one_to_one as boolean | undefined;
+ if (
+ oneToOne !== undefined &&
+ oneToOne === false &&
+ processor.ml_inference?.output_map !== undefined
+ ) {
+ const updatedOutputMap = processor.ml_inference.output_map?.map(
+ (mapEntry) => {
+ let updatedMapEntry = {};
+ Object.keys(mapEntry).forEach((key) => {
+ updatedMapEntry = {
+ ...updatedMapEntry,
+ [`ext.ml_inference.${key}`]: get(mapEntry, key),
+ };
+ });
+ return updatedMapEntry;
+ }
+ );
+ processor.ml_inference.output_map = updatedOutputMap;
+ }
+
// process optional fields
let additionalFormValues = {} as FormikValues;
Object.keys(formValues).forEach((formKey: string) => {