Skip to content

Commit

Permalink
#8: rename ailab-llama-search to ailab-llamaindex-search
Browse files Browse the repository at this point in the history
  • Loading branch information
k-allagbe committed Apr 2, 2024
1 parent a8e4983 commit e114e93
Show file tree
Hide file tree
Showing 10 changed files with 24 additions and 24 deletions.
4 changes: 2 additions & 2 deletions .env.template
Original file line number Diff line number Diff line change
Expand Up @@ -43,8 +43,8 @@ DB_NAME=
# TABLE_NAME=
# COLUMN=

# ailab-llama-search parameters. Adapt to your needs.
# ailab-llamaindex-search parameters. Adapt to your needs.
# Ask your cloud admin for the secrets.
LLAMAINDEX_DB_EMBED_MODEL_PARAMS='{"model": "text-embedding-ada-002", "deployment_name": "ada", "api_key": "<azure_openai_api_key>", "api_version": "2023-07-01-preview", "azure_endpoint": "<azure_openai_endpoint>"}'
LLAMAINDEX_DB_VECTOR_STORE_PARAMS='{"database": "llamaindex_db_legacy", "host": "<postgres_host>", "password": "<postgres_password>", "port": "5432", "user": "<postgres_user>", "embed_dim": 1536}'
LLAMAINDEX_DB_TRANS_PATHS={"id": "node/metadata/id", "chunk_id": "node/metadata/chunk_id", "url": "node/metadata/url", "title": "node/metadata/title", "subtitle": "node/metadata/subtitle", "tokens_count": "node/metadata/tokens_count", "last_updated": "node/metadata/last_updated", "score": "node/metadata/score", "llama_id": "node/id_", "llama_score": "score", "content": "node/text"}
LLAMAINDEX_DB_TRANS_PATHS={"id": "node/metadata/id", "chunk_id": "node/metadata/chunk_id", "url": "node/metadata/url", "title": "node/metadata/title", "subtitle": "node/metadata/subtitle", "tokens_count": "node/metadata/tokens_count", "last_updated": "node/metadata/last_updated", "score": "node/metadata/score", "llamaindex_id": "node/id_", "llamaindex_score": "score", "content": "node/text"}
Original file line number Diff line number Diff line change
Expand Up @@ -2,18 +2,18 @@

## Overview

The `ailab-llama-search` package facilitates querying our custom index built using LlamaIndex and PostgresSQL.
The `ailab-llamaindex-search` package facilitates querying our custom index built using LlamaIndex and PostgresSQL.

## Installation

```bash
pip install git+https://github.com/ai-cfia/llamaindex-db.git@main#subdirectory=ailab-llama-search
pip install git+https://github.com/ai-cfia/llamaindex-db.git@main#subdirectory=ailab-llamaindex-search
```

## Usage

```python
from ailab_llama_search import create_index_object, search
from ailab_llamaindex_search import create_index_object, search

# adapt these parameters to your needs
embed_model_params = {
Expand All @@ -40,8 +40,8 @@ trans_paths = {
"tokens_count": "node/metadata/tokens_count",
"last_updated": "node/metadata/last_updated",
"score": "node/metadata/score",
"llama_id": "node/id_",
"llama_score": "score",
"llamaindex_id": "node/id_",
"llamaindex_score": "score",
"content": "node/text",
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,8 @@
from llama_index.vector_stores.postgres import PGVectorStore


class AilabLlamaSearchError(Exception):
"""Generic Ailab llama search error."""
class AilabLlamaIndexSearchError(Exception):
"""Generic Ailab LlamaIndex search error."""


def transform(node_dict: dict, paths: dict):
Expand All @@ -25,7 +25,7 @@ def search(
):
if not query:
logging.error("Empty search query received")
raise AilabLlamaSearchError("search query cannot be empty.")
raise AilabLlamaIndexSearchError("search query cannot be empty.")

retriever = index.as_retriever(**search_params)
nodes = retriever.retrieve(query)
Expand Down
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ def read_requirements():


setup(
name="ailab-llama-search",
name="ailab-llamaindex-search",
version="0.1.0",
packages=find_packages(),
install_requires=read_requirements(),
Expand Down
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
import unittest
from unittest.mock import MagicMock, patch

from ailab_llama_search import (
AilabLlamaSearchError,
from ailab_llamaindex_search import (
AilabLlamaIndexSearchError,
VectorStoreIndex,
create_index_object,
search,
Expand Down Expand Up @@ -49,20 +49,20 @@ def setUp(self):
self.mock_index.as_retriever.return_value = self.mock_retriever

def test_search_with_empty_query_error(self):
with self.assertRaises(AilabLlamaSearchError):
with self.assertRaises(AilabLlamaIndexSearchError):
search("", self.mock_index)

@patch('ailab_llama_search.transform')
@patch('ailab_llamaindex_search.transform')
def test_search_calls_transform_on_results(self, mock_transform):
mock_transform.return_value = {'id': 1, 'name': 'Transformed Node'}
results = search("test query", self.mock_index)
self.assertTrue(mock_transform.called)
self.assertEqual(len(results), 1)
self.assertEqual(results[0], {'id': 1, 'name': 'Transformed Node'})

@patch('ailab_llama_search.AzureOpenAIEmbedding')
@patch('ailab_llama_search.PGVectorStore.from_params')
@patch('ailab_llama_search.VectorStoreIndex.from_vector_store')
@patch('ailab_llamaindex_search.AzureOpenAIEmbedding')
@patch('ailab_llamaindex_search.PGVectorStore.from_params')
@patch('ailab_llamaindex_search.VectorStoreIndex.from_vector_store')
def test_create_index_object_initializes_correctly(self, mock_from_vector_store, mock_from_params, mock_azure_openai_embedding):
mock_embed_model = MagicMock()
mock_azure_openai_embedding.return_value = mock_embed_model
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
import os
import unittest

from ailab_llama_search import create_index_object, search
from ailab_llamaindex_search import create_index_object, search
from dotenv import load_dotenv


Expand Down
Binary file modified docs/img/components.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
8 changes: 4 additions & 4 deletions docs/puml/components.plantuml
Original file line number Diff line number Diff line change
Expand Up @@ -13,16 +13,16 @@ title Component Diagram
interface REST
component "finesse-backend" as fb {
package app as "flask app"
package ailab_llama_search
note bottom of ailab_llama_search: pip install ailab-llama-search
package ailab_llamaindex_search
note bottom of ailab_llamaindex_search: pip install ailab-llamaindex-search
}
database llamaIndexdb {
package "vector store" as data_llamaindex
}

REST -- app
app --> ailab_llama_search: uses
ailab_llama_search --> data_llamaindex : reads
app --> ailab_llamaindex_search: uses
ailab_llamaindex_search --> data_llamaindex : reads

legend
<img:../img/logo.png{scale=0.2}>
Expand Down
2 changes: 1 addition & 1 deletion notebooks/design.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
"source": [
"## LlamaIndex integration in finesse\n",
"\n",
"We expose the search functionality as a package (`ailab-llama-search`) for reusability. \n"
"We expose the search functionality as a package (`ailab-llamaindex-search`) for reusability. \n"
]
},
{
Expand Down

0 comments on commit e114e93

Please sign in to comment.