Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(wrappers): add decoration wrapper #63

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@ dist-ssr
*.local

.env
examples

# Editor directories and files
.vscode/*
Expand Down
141 changes: 141 additions & 0 deletions examples/openai.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,141 @@
import 'dotenv/config';
import OpenAI from 'openai';

import { LiteralClient } from '../src';

const literalClient = new LiteralClient();

const openai = new OpenAI();

// Instrument the OpenAI client
literalClient.instrumentation.openai();

async function main() {
// This will be automagically logged by Literal AI
await openai.chat.completions.create({
model: 'gpt-4',
messages: [{ role: 'user', content: 'Say this is a test' }]
});

await openai.chat.completions.create({
model: 'gpt-4',
messages: [
{
role: 'user',
content:
'Write a README document in markdown explaining the basic usage of the `with-csv` npm library.'
}
]
});

await literalClient
.run({
name: 'Test run',
input: {
content:
'{"question": ["how to code chat with pdf and use pdfplumber and ollama local",[]],"images_content": {}}'
},
output: {
content: `To create a chat application that interacts with PDF files using \`pdfplumber\` and the Ollama local model in Chainlit, you can follow the example below. This example demonstrates how to upload a PDF, extract text using \`pdfplumber\`, and allow users to ask questions about its content using the Ollama model.

### Step 1: Install Required Packages
Make sure you have the necessary packages installed:
\`\`\`bash
pip install chainlit pdfplumber langchain
\`\`\`

### Step 2: Create the Application Script
Create a Python file named \`app.py\` and add the following code:

\`\`\`python
import os
import pdfplumber
import chainlit as cl
from langchain.llms.ollama import Ollama
from langchain.prompts import ChatPromptTemplate
from langchain.schema import StrOutputParser
from langchain.schema.runnable import Runnable
from langchain.schema.runnable.config import RunnableConfig
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.memory import ChatMessageHistory, ConversationBufferMemory
from langchain.schema import Document

# Initialize the Ollama model
model = Ollama(model="llama2")

@cl.on_chat_start
async def on_chat_start():
files = await cl.AskFileMessage(
content="Please upload a PDF file to start asking questions.",
accept=["application/pdf"],
max_size_mb=20,
timeout=180,
).send()

file = files[0]
docs = process_pdf(file)

message_history = ChatMessageHistory()
memory = ConversationBufferMemory(memory_key="chat_history", output_key="answer", chat_memory=message_history, return_messages=True)

prompt = ChatPromptTemplate.from_messages(
[
("system", "You're a knowledgeable assistant who provides accurate answers based on the PDF content."),
("human", "{question}"),
]
)

runnable = prompt | model | StrOutputParser()
cl.user_session.set("runnable", runnable)

await cl.Message(content="You can now ask questions about the PDF!").send()

def process_pdf(file):
text = ""
with pdfplumber.open(file.path) as pdf:
for page in pdf.pages:
text += page.extract_text() + "\
"

# Split the text into chunks
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
docs = text_splitter.split_text(text)
documents = [Document(page_content=chunk) for chunk in docs]
return documents

@cl.on_message
async def on_message(message: cl.Message):
runnable = cl.user_session.get("runnable") # type: Runnable
msg = cl.Message(content="")

for chunk in await cl.make_async(runnable.stream)(
{"question": message.content},
config=RunnableConfig(callbacks=[cl.LangchainCallbackHandler()]),
):
await msg.stream_token(chunk)

await msg.send()
\`\`\`

### Step 3: Run the Application
To start the Chainlit application, run the following command in your terminal:
\`\`\`bash
chainlit run app.py
\`\`\`

### Step 4: Interact with the Application
Open your browser and navigate to \`http://localhost:8000\`. You can upload a PDF file and start asking questions about its content.

### Explanation
- **PDF Processing**: The \`process_pdf\` function uses \`pdfplumber\` to extract text from the uploaded PDF file.
- **Text Splitting**: The extracted text is split into manageable chunks using \`RecursiveCharacterTextSplitter\`.
- **Ollama Model**: The Ollama model is used to generate responses based on the extracted text.
- **Conversational Interface**: Users can ask questions, and the model will respond based on the content of the PDF.

This setup allows you to create a conversational interface that can answer questions based on the content of a PDF file using \`pdfplumber\` and the Ollama local model.`
}
})
.send();
}

main();
13 changes: 13 additions & 0 deletions examples/prompt.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
import 'dotenv/config';

import { LiteralClient } from '../src';

const literalClient = new LiteralClient();

async function main() {
const prompt = await literalClient.api.getPrompt('');

console.log(prompt);
}

main();
3 changes: 2 additions & 1 deletion jest.config.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ const config: Config.InitialOptions = {
verbose: true,
transform: {
'^.+\\.ts?$': 'ts-jest'
}
},
testTimeout: 30_000
};
export default config;
35 changes: 24 additions & 11 deletions package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,7 @@
},
"peerDependencies": {
"@ai-sdk/openai": "0.0.x",
"@langchain/openai": "^0.2.7",
"ai": "3.x",
"langchain": "0.1.x",
"llamaindex": "0.3.x",
Expand Down
42 changes: 21 additions & 21 deletions src/api.ts
Original file line number Diff line number Diff line change
Expand Up @@ -410,7 +410,6 @@ export class API {
variables: variables
}
});

if (response.data.errors) {
throw new Error(JSON.stringify(response.data.errors));
}
Expand Down Expand Up @@ -851,21 +850,22 @@ export class API {
* @returns A Promise resolving to the newly created `Generation` object.
*/
async createGeneration(generation: Generation) {
const mutation = `
mutation CreateGeneration($generation: GenerationPayloadInput!) {
createGeneration(generation: $generation) {
id,
type
}
}
`;

const variables = {
generation
};
const stepId = generation.id;
const stepMetadata = generation.metadata;
const stepTags = generation.tags;

delete generation.id;

const generationAsStep = this.client.step({
id: stepId,
metadata: stepMetadata,
tags: stepTags,
generation,
name: generation.type ?? '',
type: 'llm'
});

const response = await this.makeGqlCall(mutation, variables);
return response.data.createGeneration as PersistedGeneration;
return generationAsStep.send();
}

/**
Expand Down Expand Up @@ -929,13 +929,13 @@ export class API {
$metadata: Json,
$participantId: String,
$tags: [String!],
) {
) {
upsertThread(
id: $threadId
name: $name
metadata: $metadata
participantId: $participantId
tags: $tags
id: $threadId
name: $name
metadata: $metadata
participantId: $participantId
tags: $tags
) {
${threadFields}
}
Expand Down
5 changes: 4 additions & 1 deletion src/evaluation/experiment-item-run.ts
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,10 @@ export class ExperimentItemRun extends Step {
? currentStore?.rootRun
: this.type === 'run'
? this
: null
: null,
metadata: currentStore?.metadata ?? null,
tags: currentStore?.tags ?? null,
stepId: currentStore?.stepId ?? null
},
async () => {
try {
Expand Down
29 changes: 29 additions & 0 deletions src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,9 @@ type StoredContext = {
currentStep: Step | null;
currentExperimentItemRunId?: string | null;
rootRun: Step | null;
metadata: Record<string, any> | null;
tags: string[] | null;
stepId: string | null;
};

/**
Expand Down Expand Up @@ -217,4 +220,30 @@ export class LiteralClient {

return store.rootRun;
}

decorate(options: {
metadata?: Record<string, any>;
tags?: string[];
stepId?: string;
}) {
return {
wrap: async <T>(cb: () => T) => {
const currentStore = this.store.getStore();

return this.store.run(
{
currentThread: currentStore?.currentThread ?? null,
currentExperimentItemRunId:
currentStore?.currentExperimentItemRunId ?? null,
currentStep: currentStore?.currentStep ?? null,
rootRun: currentStore?.rootRun ?? null,
metadata: options?.metadata ?? null,
tags: options?.tags ?? null,
stepId: options?.stepId ?? null
},
() => cb()
);
}
};
}
}
Loading
Loading