Skip to content

Commit

Permalink
Merge pull request #36 from kxtran/main
Browse files Browse the repository at this point in the history
Add tokens usage for Anthropic
  • Loading branch information
nqn authored Jul 18, 2023
2 parents fbd03e1 + aa93223 commit 5799861
Show file tree
Hide file tree
Showing 3 changed files with 24 additions and 4 deletions.
24 changes: 22 additions & 2 deletions log10/anthropic.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,9 @@ def __init__(
self.hparams["max_tokens_to_sample"] = 1024

def chat(self, messages: List[Message], hparams: dict = None) -> ChatCompletion:
chat_request = self.chat_request(messages, hparams)
completion = self.client.completions.create(
**self.chat_request(messages, hparams)
**chat_request
)
content = completion.completion

Expand All @@ -37,6 +38,8 @@ def chat(self, messages: List[Message], hparams: dict = None) -> ChatCompletion:
elif completion.stop_reason == "max_tokens":
reason = "length"

tokens_usage = self.create_tokens_usage(chat_request["prompt"], content)

# Imitate OpenAI reponse format.
response = {
"id": str(uuid.uuid4()),
Expand All @@ -49,6 +52,7 @@ def chat(self, messages: List[Message], hparams: dict = None) -> ChatCompletion:
"finish_reason": reason,
}
],
"usage": tokens_usage
}

return ChatCompletion(role="assistant", content=content, response=response)
Expand All @@ -65,8 +69,9 @@ def chat_request(self, messages: List[Message], hparams: dict = None) -> dict:
return {"prompt": prompt, "stop_sequences": [HUMAN_PROMPT], **merged_hparams}

def text(self, prompt: str, hparams: dict = None) -> TextCompletion:
text_request = self.text_request(prompt, hparams)
completion = self.client.completions.create(
**self.text_request(prompt, hparams)
**text_request
)
text = completion.completion

Expand All @@ -77,6 +82,8 @@ def text(self, prompt: str, hparams: dict = None) -> TextCompletion:
elif completion.stop_reason == "max_tokens":
reason = "length"

tokens_usage = self.create_tokens_usage(text_request["prompt"], text)

# Imitate OpenAI reponse format.
response = {
"id": str(uuid.uuid4()),
Expand All @@ -90,6 +97,7 @@ def text(self, prompt: str, hparams: dict = None) -> TextCompletion:
"finish_reason": reason,
}
],
"usage": tokens_usage
}
logging.info("Returning text completion")
return TextCompletion(text=text, response=response)
Expand Down Expand Up @@ -119,3 +127,15 @@ def convert_history_to_claude(messages: List[Message]):

def convert_claude_to_messages(prompt: str):
pass

def create_tokens_usage(self, prompt: str, completion: str):
prompt_tokens = self.client.count_tokens(prompt)
completion_tokens = self.client.count_tokens(completion)
total_tokens = prompt_tokens + completion_tokens

# Imitate OpenAI usage format.
return {
"prompt_tokens": prompt_tokens,
"completion_tokens": completion_tokens,
"total_tokens": total_tokens
}
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ build-backend = "hatchling.build"

[project]
name = "log10-io"
version = "0.2.1"
version = "0.2.2"
authors = []
license = "MIT"
description = "Unified LLM data management"
Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

setup(
name="Log10",
version="0.2.1",
version="0.2.2",
description="Log10 LLM data management",
author="Log10 team",
author_email="team@log10.io",
Expand Down

0 comments on commit 5799861

Please sign in to comment.