Skip to content

Commit aa93223

Browse files
authored
Merge branch 'main' into main
2 parents b9f653f + fbd03e1 commit aa93223

File tree

8 files changed

+451
-15
lines changed

8 files changed

+451
-15
lines changed

examples/README.md

+1
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,7 @@ Can be run on `OpenAI` or `Anthropic`
4040
- `langchain_simple_sequential.py` Simplest Langchain example with 2 chains in sequence
4141
- `langchain_sqlagent.py` Langchain's SQLAgent for NLP2SQL
4242
- `multiple_sessions.py` Examples illustrating session scoping for Langchain + Log10
43+
- `langchain_model_logger.py` Example of OpenAI (chat and text completions) and Anthropic calls with a log10 logger.
4344

4445
### Anthropic
4546

+28
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,28 @@
1+
from langchain import OpenAI
2+
from langchain.chat_models import ChatAnthropic
3+
from langchain.chat_models import ChatOpenAI
4+
from langchain.schema import HumanMessage, AIMessage, SystemMessage, BaseMessage
5+
6+
from log10.langchain import Log10Callback
7+
from log10.llm import Log10Config
8+
9+
10+
log10_callback = Log10Callback(log10_config=Log10Config())
11+
12+
13+
messages = [
14+
HumanMessage(content="You are a ping pong machine"),
15+
HumanMessage(content="Ping?"),
16+
]
17+
18+
llm = ChatOpenAI(model_name="gpt-3.5-turbo", callbacks=[log10_callback], temperature=0.5)
19+
completion = llm.predict_messages(messages)
20+
print(completion)
21+
22+
llm = ChatAnthropic(model="claude-2", callbacks=[log10_callback], temperature=0.7)
23+
llm.predict_messages(messages)
24+
print(completion)
25+
26+
llm = OpenAI(model_name="text-davinci-003", callbacks=[log10_callback], temperature=0.5)
27+
completion = llm.predict("You are a ping pong machine.\nPing?\n")
28+
print(completion)

log10/anthropic.py

+5-1
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,11 @@
1313

1414

1515
class Anthropic(LLM):
16-
def __init__(self, hparams: dict = None, skip_initialization: bool = False):
16+
def __init__(
17+
self, hparams: dict = None, skip_initialization: bool = False, log10_config=None
18+
):
19+
super().__init__(hparams, log10_config)
20+
1721
if not skip_initialization:
1822
self.client = anthropic.Anthropic()
1923
self.hparams = hparams

log10/langchain.py

+292
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,292 @@
1+
import time
2+
import uuid
3+
4+
from langchain.schema import HumanMessage, AIMessage, SystemMessage, BaseMessage
5+
from uuid import UUID
6+
7+
"""Callback Handler that prints to std out."""
8+
from typing import Any, Dict, List, Optional, Union
9+
10+
from langchain.callbacks.base import BaseCallbackHandler
11+
from langchain.schema import AgentAction, AgentFinish, LLMResult
12+
from log10.llm import LLM, Kind, Message
13+
14+
import logging
15+
16+
def kwargs_to_hparams(kwargs: Dict[str, Any]) -> Dict[str, Any]:
17+
"""Convert kwargs to hparams."""
18+
hparams = {}
19+
if "temperature" in kwargs:
20+
hparams["temperature"] = kwargs["temperature"]
21+
if "top_p" in kwargs:
22+
hparams["top_p"] = kwargs["top_p"]
23+
if "top_k" in kwargs:
24+
hparams["top_k"] = kwargs["top_k"]
25+
if "max_tokens" in kwargs:
26+
hparams["max_tokens"] = kwargs["max_tokens"]
27+
if "max_tokens_to_sample" in kwargs:
28+
hparams["max_tokens"] = kwargs["max_tokens_to_sample"]
29+
if "frequency_penalty" in kwargs:
30+
hparams["frequency_penalty"] = kwargs["frequency_penalty"]
31+
if "presence_penalty" in kwargs:
32+
hparams["presence_penalty"] = kwargs["presence_penalty"]
33+
return hparams
34+
35+
36+
class Log10Callback(BaseCallbackHandler, LLM):
37+
"""Callback Handler that prints to std out."""
38+
39+
def __init__(self, log10_config: Optional[dict] = None) -> None:
40+
"""Initialize callback handler."""
41+
super().__init__(log10_config=log10_config, hparams=None)
42+
self.runs = {}
43+
44+
def on_llm_start(
45+
self,
46+
serialized: Dict[str, Any],
47+
prompts: List[str],
48+
*,
49+
run_id: UUID,
50+
parent_run_id: Optional[UUID] = None,
51+
tags: Optional[List[str]] = None,
52+
**kwargs: Any,
53+
) -> None:
54+
"""Print out the prompts."""
55+
logging.debug(
56+
f"**\n**on_llm_start**\n**\n: serialized:\n {serialized} \n\n prompts:\n {prompts} \n\n rest: {kwargs}"
57+
)
58+
59+
kwargs = serialized.get("kwargs", {})
60+
hparams = kwargs_to_hparams(kwargs)
61+
62+
model = kwargs.get("model_name", None)
63+
if model is None:
64+
model = kwargs.get("model", None)
65+
if model is None:
66+
raise BaseException("No model found in serialized or kwargs")
67+
68+
if len(prompts) != 1:
69+
raise BaseException("Only support one prompt at a time")
70+
71+
request = {"model": model, "prompt": prompts[0], **hparams}
72+
73+
logging.debug(f"request: {request}")
74+
75+
completion_id = self.log_start(request, Kind.text)
76+
77+
self.runs[run_id] = {
78+
"kind": Kind.text,
79+
"completion_id": completion_id,
80+
"start_time": time.perf_counter(),
81+
"model": model,
82+
}
83+
84+
def on_chat_model_start(
85+
self,
86+
serialized: Dict[str, Any],
87+
messages: List[List[BaseMessage]],
88+
*,
89+
run_id: UUID,
90+
parent_run_id: Optional[UUID] = None,
91+
tags: Optional[List[str]] = None,
92+
**kwargs: Any,
93+
) -> None:
94+
logging.debug(
95+
f"**\n**on_chat_model_start**\n**\n: run_id:{run_id}\nserialized:\n{serialized}\n\nmessages:\n{messages}\n\nkwargs: {kwargs}"
96+
)
97+
98+
#
99+
# Find model string
100+
#
101+
kwargs = serialized.get("kwargs", {})
102+
model = kwargs.get("model_name", None)
103+
if model is None:
104+
model = kwargs.get("model", None)
105+
if model is None:
106+
raise BaseException("No model found in serialized or kwargs")
107+
108+
hparams = kwargs_to_hparams(kwargs)
109+
hparams["model"] = model
110+
111+
logging.debug(f"hparams: {hparams}")
112+
113+
if len(messages) != 1:
114+
raise BaseException("Only support one message at a time")
115+
116+
# Convert messages to log10 format
117+
log10_messages = []
118+
for message in messages[0]:
119+
logging.debug(f"message: {message}")
120+
if isinstance(message, HumanMessage):
121+
log10_messages.append(Message(role="user", content=message.content))
122+
elif isinstance(message, AIMessage):
123+
log10_messages.append(
124+
Message(role="assistant", content=message.content)
125+
)
126+
elif isinstance(message, SystemMessage):
127+
log10_messages.append(Message(role="system", content=message.content))
128+
else:
129+
raise BaseException(f"Unknown message type {type(message)}")
130+
131+
request = {
132+
"messages": [message.to_dict() for message in log10_messages],
133+
**hparams,
134+
}
135+
logging.debug(f"request: {request}")
136+
137+
completion_id = self.log_start(
138+
request,
139+
Kind.chat,
140+
)
141+
142+
self.runs[run_id] = {
143+
"kind": Kind.chat,
144+
"completion_id": completion_id,
145+
"start_time": time.perf_counter(),
146+
"model": model,
147+
}
148+
149+
logging.debug(f"logged start with completion_id: {completion_id}")
150+
151+
def on_llm_end(
152+
self,
153+
response: LLMResult,
154+
*,
155+
run_id: UUID,
156+
parent_run_id: Optional[UUID] = None,
157+
**kwargs: Any,
158+
) -> None:
159+
"""Do nothing."""
160+
# Find run in runs.
161+
run = self.runs.get(run_id, None)
162+
if run is None:
163+
raise BaseException("Could not find run in runs")
164+
165+
if run["kind"] != Kind.chat and run["kind"] != Kind.text:
166+
raise BaseException("Only support chat kind")
167+
168+
duration = time.perf_counter() - run["start_time"]
169+
170+
# Log end
171+
if len(response.generations) != 1:
172+
raise BaseException("Only support one message at a time")
173+
if len(response.generations[0]) != 1:
174+
raise BaseException("Only support one message at a time")
175+
176+
content = response.generations[0][0].text
177+
178+
log10response = {}
179+
if run["kind"] == Kind.chat:
180+
log10response = {
181+
"id": str(uuid.uuid4()),
182+
"object": "chat.completion",
183+
"model": run["model"],
184+
"choices": [
185+
{
186+
"index": 0,
187+
"message": {"role": "assistant", "content": content},
188+
"finish_reason": "stop",
189+
}
190+
],
191+
}
192+
elif run["kind"] == Kind.text:
193+
log10response = {
194+
"id": str(uuid.uuid4()),
195+
"object": "text_completion",
196+
"model": run["model"],
197+
"choices": [
198+
{
199+
"index": 0,
200+
"text": content,
201+
"logprobs": None,
202+
"finish_reason": "stop",
203+
}
204+
],
205+
}
206+
207+
# Determine if we can provide usage metrics (token count).
208+
logging.debug(f"**** response: {response}")
209+
if response.llm_output is not None:
210+
token_usage = response.llm_output.get("token_usage")
211+
if token_usage is not None:
212+
log10response["usage"] = token_usage
213+
logging.debug(f"usage: {log10response['usage']}")
214+
215+
logging.debug(
216+
f"**\n**on_llm_end**\n**\n: response:\n {log10response} \n\n rest: {kwargs}"
217+
)
218+
self.log_end(run["completion_id"], log10response, duration)
219+
220+
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
221+
"""Do nothing."""
222+
logging.debug(f"token:\n {token} \n\n rest: {kwargs}")
223+
224+
def on_llm_error(
225+
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
226+
) -> None:
227+
"""Do nothing."""
228+
logging.debug(f"error:\n {error} \n\n rest: {kwargs}")
229+
230+
def on_chain_start(
231+
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
232+
) -> None:
233+
"""Print out that we are entering a chain."""
234+
pass
235+
236+
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
237+
"""Print out that we finished a chain."""
238+
pass
239+
240+
def on_chain_error(
241+
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
242+
) -> None:
243+
"""Do nothing."""
244+
pass
245+
246+
def on_tool_start(
247+
self,
248+
serialized: Dict[str, Any],
249+
input_str: str,
250+
**kwargs: Any,
251+
) -> None:
252+
"""Do nothing."""
253+
pass
254+
255+
def on_agent_action(
256+
self, action: AgentAction, color: Optional[str] = None, **kwargs: Any
257+
) -> Any:
258+
"""Run on agent action."""
259+
pass
260+
261+
def on_tool_end(
262+
self,
263+
output: str,
264+
color: Optional[str] = None,
265+
observation_prefix: Optional[str] = None,
266+
llm_prefix: Optional[str] = None,
267+
**kwargs: Any,
268+
) -> None:
269+
"""If not the final action, print out observation."""
270+
pass
271+
272+
def on_tool_error(
273+
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
274+
) -> None:
275+
"""Do nothing."""
276+
pass
277+
278+
def on_text(
279+
self,
280+
text: str,
281+
color: Optional[str] = None,
282+
end: str = "",
283+
**kwargs: Any,
284+
) -> None:
285+
"""Run when agent ends."""
286+
pass
287+
288+
def on_agent_finish(
289+
self, finish: AgentFinish, color: Optional[str] = None, **kwargs: Any
290+
) -> None:
291+
"""Run on agent end."""
292+
pass

0 commit comments

Comments
 (0)