From 1760acfa2ee0a9c6491744b3bacaf9bbc4d37a8a Mon Sep 17 00:00:00 2001
From: Yun Kim <35776586+Yun-Kim@users.noreply.github.com>
Date: Fri, 24 Jan 2025 15:22:50 -0500
Subject: [PATCH 01/50] fix(openai): support Python 3.12, 3.13 (#12014)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Resolves #11994.
We were previously not testing our OpenAI integration with Python
versions >= 3.12 (at the time, OpenAI did not support those newer Python
versions). We recently saw that our bytecode wrapping for OpenAI methods
broke in Python 3.13, so this PR attempts to address that:
- Switch out our bytecode wrapping with `wrapt.wrap_function_wrapper()`
to make our patching compatible with newer Python versions.
- Add Python 3.12 and 3.13 to tested versions for the OpenAI
integration.
- Implements unpatching and adds patching tests
## Wrapping Changes
We previously were using direct bytecode wrapping in the OpenAI
integration, which was not supported for Python 3.13, and instead
switched to using `wrapt.wrap_function_wrapper()` to wrap OpenAI
methods. This meant that we needed to change wrapping formats,
including:
- use function/attribute names (string) rather than references as inputs
to the wrapping functions
- replace nested functions with `@with_traced_module()` to pass the
traced OpenAI module reference between traced methods
- implement unpatching
- add patching tests (note this is messy because we support both v0 and
v1 openai versions which have separate method names)
**Note**: the issue in #11994 was only reproducible via the
`AsyncOpenAI.Moderations.create(), AsyncOpenAI.Embeddings.create()`
endpoints (chat and completions were not affected). However to avoid any
risk introduced by the now unsupported bytecode wrapping, we are
changing the entire OpenAI integration to use
`wrapt.wrap_function_wrapper()` instead.
## Testing Changes
### Python 3.7 removal (Drop support for OpenAI v0)
Since ddtrace 3.0 will be removing support for Python 3.7, we are also
dropping support accordingly in this PR. This also coincides with
removing support for OpenAI v0 since v0 was last released more than a 16
months ago, and requires a large maintenance burden and pollutes the
codebase. Note that this PR will just drop testing support, but a future
PR will remove tracing support for v0 in the form of a refactor/cleanup
of the OpenAI integration.
### Azure OpenAI tests
Azure OpenAI requests are changed in latest versions of OpenAI, which
fail our cassette tests (recorded in previous versions of OpenAI).
However on manual testing, our openai integration is unchanged in
support for newer versions. With consideration for time/effort to
produce new cassette test files which is time consuming, we are going to
skip cassette tests for Azure endpoints with latest openai versions
until we improve our testing framework entirely (move off cassette
files). Here are the manual traces submitted with Azure OpenAI for
reference:
## Checklist
- [x] PR author has checked that all the criteria below are met
- The PR description includes an overview of the change
- The PR description articulates the motivation for the change
- The change includes tests OR the PR description describes a testing
strategy
- The PR description notes risks associated with the change, if any
- Newly-added code is easy to change
- The change follows the [library release note
guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html)
- The change includes or references documentation updates if necessary
- Backport labels are set (if
[applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting))
## Reviewer Checklist
- [x] Reviewer has checked that all the criteria below are met
- Title is accurate
- All changes are related to the pull request's stated goal
- Avoids breaking
[API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces)
changes
- Testing strategy adequately addresses listed risks
- Newly-added code is easy to change
- Release note makes sense to a user of the library
- If necessary, author has acknowledged and discussed the performance
implications of this PR as reported in the benchmarks PR comment
- Backport labels are set in a manner that is consistent with the
[release branch maintenance
policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)
---
.riot/requirements/107d8f2.txt | 54 ++++
.riot/requirements/130158f.txt | 48 ++++
.riot/requirements/13804af.txt | 57 ----
.riot/requirements/13fec34.txt | 49 ----
.riot/requirements/1825740.txt | 12 +-
.riot/requirements/18de44f.txt | 52 ++++
.riot/requirements/1ad89c5.txt | 50 ++++
.../requirements/{1db5311.txt => 1e6bd37.txt} | 47 +--
.riot/requirements/1ec15f5.txt | 57 ----
.riot/requirements/1ee49b9.txt | 53 ----
.riot/requirements/2634bf7.txt | 48 ++++
.riot/requirements/35ce786.txt | 55 ----
.riot/requirements/4a85f6d.txt | 50 ++++
.riot/requirements/4d27459.txt | 48 ++++
.riot/requirements/530c983.txt | 52 ++++
.riot/requirements/5da4fd8.txt | 49 ----
.riot/requirements/84ec59a.txt | 53 ----
.riot/requirements/87a1fff.txt | 53 ----
.riot/requirements/b5d5a35.txt | 52 ++++
.riot/requirements/c74f6e0.txt | 51 ----
.riot/requirements/cd2e4ea.txt | 53 ----
.../requirements/{181216c.txt => df60af6.txt} | 16 +-
.riot/requirements/f1c37b1.txt | 51 ----
.riot/requirements/f7c30a0.txt | 51 ----
.../internal/openai/_endpoint_hooks.py | 110 +++----
ddtrace/contrib/internal/openai/patch.py | 267 +++++++++--------
ddtrace/contrib/openai/__init__.py | 1 -
.../fix-openai-313-29ec43cbf2f35947.yaml | 7 +
riotfile.py | 16 +-
.../v1/completion_stream_wrong_api_key.yaml | 77 +++++
tests/contrib/openai/test_openai_llmobs.py | 16 ++
tests/contrib/openai/test_openai_patch.py | 272 +++++++++++++++++-
tests/contrib/openai/test_openai_v0.py | 38 ---
tests/contrib/openai/test_openai_v1.py | 91 ++----
34 files changed, 1092 insertions(+), 964 deletions(-)
create mode 100644 .riot/requirements/107d8f2.txt
create mode 100644 .riot/requirements/130158f.txt
delete mode 100644 .riot/requirements/13804af.txt
delete mode 100644 .riot/requirements/13fec34.txt
create mode 100644 .riot/requirements/18de44f.txt
create mode 100644 .riot/requirements/1ad89c5.txt
rename .riot/requirements/{1db5311.txt => 1e6bd37.txt} (55%)
delete mode 100644 .riot/requirements/1ec15f5.txt
delete mode 100644 .riot/requirements/1ee49b9.txt
create mode 100644 .riot/requirements/2634bf7.txt
delete mode 100644 .riot/requirements/35ce786.txt
create mode 100644 .riot/requirements/4a85f6d.txt
create mode 100644 .riot/requirements/4d27459.txt
create mode 100644 .riot/requirements/530c983.txt
delete mode 100644 .riot/requirements/5da4fd8.txt
delete mode 100644 .riot/requirements/84ec59a.txt
delete mode 100644 .riot/requirements/87a1fff.txt
create mode 100644 .riot/requirements/b5d5a35.txt
delete mode 100644 .riot/requirements/c74f6e0.txt
delete mode 100644 .riot/requirements/cd2e4ea.txt
rename .riot/requirements/{181216c.txt => df60af6.txt} (82%)
delete mode 100644 .riot/requirements/f1c37b1.txt
delete mode 100644 .riot/requirements/f7c30a0.txt
create mode 100644 releasenotes/notes/fix-openai-313-29ec43cbf2f35947.yaml
create mode 100644 tests/contrib/openai/cassettes/v1/completion_stream_wrong_api_key.yaml
diff --git a/.riot/requirements/107d8f2.txt b/.riot/requirements/107d8f2.txt
new file mode 100644
index 00000000000..7bed129ddaf
--- /dev/null
+++ b/.riot/requirements/107d8f2.txt
@@ -0,0 +1,54 @@
+#
+# This file is autogenerated by pip-compile with Python 3.9
+# by the following command:
+#
+# pip-compile --no-annotate --resolver=backtracking .riot/requirements/107d8f2.in
+#
+annotated-types==0.7.0
+anyio==4.8.0
+attrs==24.3.0
+certifi==2024.12.14
+coverage[toml]==7.6.10
+distro==1.9.0
+exceptiongroup==1.2.2
+h11==0.14.0
+httpcore==1.0.7
+httpx==0.27.2
+hypothesis==6.45.0
+idna==3.10
+importlib-metadata==8.6.1
+iniconfig==2.0.0
+mock==5.1.0
+multidict==6.1.0
+numpy==2.0.2
+openai[datalib,embeddings]==1.30.1
+opentracing==2.4.0
+packaging==24.2
+pandas==2.2.3
+pandas-stubs==2.2.2.240807
+pillow==9.5.0
+pluggy==1.5.0
+propcache==0.2.1
+pydantic==2.10.5
+pydantic-core==2.27.2
+pytest==8.3.4
+pytest-asyncio==0.21.1
+pytest-cov==6.0.0
+pytest-mock==3.14.0
+pytest-randomly==3.16.0
+python-dateutil==2.9.0.post0
+pytz==2024.2
+pyyaml==6.0.2
+six==1.17.0
+sniffio==1.3.1
+sortedcontainers==2.4.0
+tomli==2.2.1
+tqdm==4.67.1
+types-pytz==2024.2.0.20241221
+typing-extensions==4.12.2
+tzdata==2025.1
+urllib3==1.26.20
+vcrpy==4.2.1
+wrapt==1.17.2
+yarl==1.18.3
+zipp==3.21.0
diff --git a/.riot/requirements/130158f.txt b/.riot/requirements/130158f.txt
new file mode 100644
index 00000000000..037c7010f33
--- /dev/null
+++ b/.riot/requirements/130158f.txt
@@ -0,0 +1,48 @@
+#
+# This file is autogenerated by pip-compile with Python 3.12
+# by the following command:
+#
+# pip-compile --no-annotate .riot/requirements/130158f.in
+#
+annotated-types==0.7.0
+anyio==4.8.0
+attrs==24.3.0
+certifi==2024.12.14
+charset-normalizer==3.4.1
+coverage[toml]==7.6.10
+distro==1.9.0
+h11==0.14.0
+httpcore==1.0.7
+httpx==0.28.1
+hypothesis==6.45.0
+idna==3.10
+iniconfig==2.0.0
+jiter==0.8.2
+mock==5.1.0
+multidict==6.1.0
+openai==1.60.0
+opentracing==2.4.0
+packaging==24.2
+pillow==11.1.0
+pluggy==1.5.0
+propcache==0.2.1
+pydantic==2.10.5
+pydantic-core==2.27.2
+pytest==8.3.4
+pytest-asyncio==0.21.1
+pytest-cov==6.0.0
+pytest-mock==3.14.0
+pytest-randomly==3.16.0
+pyyaml==6.0.2
+regex==2024.11.6
+requests==2.32.3
+six==1.17.0
+sniffio==1.3.1
+sortedcontainers==2.4.0
+tiktoken==0.8.0
+tqdm==4.67.1
+typing-extensions==4.12.2
+urllib3==1.26.20
+vcrpy==4.2.1
+wrapt==1.17.2
+yarl==1.18.3
diff --git a/.riot/requirements/13804af.txt b/.riot/requirements/13804af.txt
deleted file mode 100644
index 7035a764386..00000000000
--- a/.riot/requirements/13804af.txt
+++ /dev/null
@@ -1,57 +0,0 @@
-#
-# This file is autogenerated by pip-compile with Python 3.8
-# by the following command:
-#
-# pip-compile --allow-unsafe --no-annotate .riot/requirements/13804af.in
-#
-annotated-types==0.7.0
-anyio==4.4.0
-attrs==24.2.0
-certifi==2024.7.4
-charset-normalizer==3.3.2
-coverage[toml]==7.6.1
-distro==1.9.0
-exceptiongroup==1.2.2
-h11==0.14.0
-httpcore==1.0.5
-httpx==0.27.0
-hypothesis==6.45.0
-idna==3.8
-importlib-metadata==8.4.0
-iniconfig==2.0.0
-mock==5.1.0
-multidict==6.0.5
-numpy==1.24.4
-openai[datalib]==1.30.1
-opentracing==2.4.0
-packaging==24.1
-pandas==2.0.3
-pandas-stubs==2.0.3.230814
-pillow==10.1.0
-pluggy==1.5.0
-pydantic==2.8.2
-pydantic-core==2.20.1
-pytest==8.3.2
-pytest-asyncio==0.21.1
-pytest-cov==5.0.0
-pytest-mock==3.14.0
-pytest-randomly==3.15.0
-python-dateutil==2.9.0.post0
-pytz==2024.1
-pyyaml==6.0.2
-regex==2024.7.24
-requests==2.32.3
-six==1.16.0
-sniffio==1.3.1
-sortedcontainers==2.4.0
-tiktoken==0.7.0
-tomli==2.0.1
-tqdm==4.66.5
-types-pytz==2024.1.0.20240417
-typing-extensions==4.12.2
-tzdata==2024.1
-urllib3==1.26.19
-vcrpy==4.2.1
-wrapt==1.16.0
-yarl==1.9.4
-zipp==3.20.1
diff --git a/.riot/requirements/13fec34.txt b/.riot/requirements/13fec34.txt
deleted file mode 100644
index 8858506f793..00000000000
--- a/.riot/requirements/13fec34.txt
+++ /dev/null
@@ -1,49 +0,0 @@
-#
-# This file is autogenerated by pip-compile with Python 3.11
-# by the following command:
-#
-# pip-compile --allow-unsafe --no-annotate .riot/requirements/13fec34.in
-#
-annotated-types==0.7.0
-anyio==3.7.1
-attrs==24.2.0
-certifi==2024.7.4
-coverage[toml]==7.6.1
-distro==1.9.0
-h11==0.14.0
-httpcore==1.0.5
-httpx==0.27.0
-hypothesis==6.45.0
-idna==3.8
-iniconfig==2.0.0
-mock==5.1.0
-multidict==6.0.5
-numpy==2.1.0
-openai[datalib,embeddings]==1.1.1
-opentracing==2.4.0
-packaging==24.1
-pandas==2.2.2
-pandas-stubs==2.2.2.240807
-pillow==9.5.0
-pluggy==1.5.0
-pydantic==2.8.2
-pydantic-core==2.20.1
-pytest==8.3.2
-pytest-asyncio==0.21.1
-pytest-cov==5.0.0
-pytest-mock==3.14.0
-pytest-randomly==3.15.0
-python-dateutil==2.9.0.post0
-pytz==2024.1
-pyyaml==6.0.2
-six==1.16.0
-sniffio==1.3.1
-sortedcontainers==2.4.0
-tqdm==4.66.5
-types-pytz==2024.1.0.20240417
-typing-extensions==4.12.2
-tzdata==2024.1
-urllib3==1.26.19
-vcrpy==4.2.1
-wrapt==1.16.0
-yarl==1.9.4
diff --git a/.riot/requirements/1825740.txt b/.riot/requirements/1825740.txt
index b4660fad985..d1ef7a92bc0 100644
--- a/.riot/requirements/1825740.txt
+++ b/.riot/requirements/1825740.txt
@@ -9,13 +9,13 @@ aiosignal==1.3.1
async-timeout==4.0.3
asynctest==0.13.0
attrs==24.2.0
-certifi==2024.7.4
-charset-normalizer==3.3.2
+certifi==2024.12.14
+charset-normalizer==3.4.1
coverage[toml]==7.2.7
exceptiongroup==1.2.2
frozenlist==1.3.3
hypothesis==6.45.0
-idna==3.8
+idna==3.10
importlib-metadata==6.7.0
iniconfig==2.0.0
joblib==1.3.2
@@ -36,13 +36,13 @@ pyyaml==6.0.1
requests==2.31.0
scikit-learn==1.0.2
scipy==1.7.3
-six==1.16.0
+six==1.17.0
sortedcontainers==2.4.0
threadpoolctl==3.1.0
tomli==2.0.1
-tqdm==4.66.5
+tqdm==4.67.1
typing-extensions==4.7.1
-urllib3==1.26.19
+urllib3==1.26.20
vcrpy==4.2.1
wrapt==1.16.0
yarl==1.9.4
diff --git a/.riot/requirements/18de44f.txt b/.riot/requirements/18de44f.txt
new file mode 100644
index 00000000000..702b980c641
--- /dev/null
+++ b/.riot/requirements/18de44f.txt
@@ -0,0 +1,52 @@
+#
+# This file is autogenerated by pip-compile with Python 3.8
+# by the following command:
+#
+# pip-compile --no-annotate --resolver=backtracking .riot/requirements/18de44f.in
+#
+annotated-types==0.7.0
+anyio==4.5.2
+attrs==24.3.0
+certifi==2024.12.14
+charset-normalizer==3.4.1
+coverage[toml]==7.6.1
+distro==1.9.0
+exceptiongroup==1.2.2
+h11==0.14.0
+httpcore==1.0.7
+httpx==0.28.1
+hypothesis==6.45.0
+idna==3.10
+importlib-metadata==8.5.0
+iniconfig==2.0.0
+jiter==0.8.2
+mock==5.1.0
+multidict==6.1.0
+openai==1.60.0
+opentracing==2.4.0
+packaging==24.2
+pillow==10.4.0
+pluggy==1.5.0
+propcache==0.2.0
+pydantic==2.10.5
+pydantic-core==2.27.2
+pytest==8.3.4
+pytest-asyncio==0.21.1
+pytest-cov==5.0.0
+pytest-mock==3.14.0
+pytest-randomly==3.15.0
+pyyaml==6.0.2
+regex==2024.11.6
+requests==2.32.3
+six==1.17.0
+sniffio==1.3.1
+sortedcontainers==2.4.0
+tiktoken==0.7.0
+tomli==2.2.1
+tqdm==4.67.1
+typing-extensions==4.12.2
+urllib3==1.26.20
+vcrpy==4.2.1
+wrapt==1.17.2
+yarl==1.15.2
+zipp==3.20.2
diff --git a/.riot/requirements/1ad89c5.txt b/.riot/requirements/1ad89c5.txt
new file mode 100644
index 00000000000..b10206e12d9
--- /dev/null
+++ b/.riot/requirements/1ad89c5.txt
@@ -0,0 +1,50 @@
+#
+# This file is autogenerated by pip-compile with Python 3.10
+# by the following command:
+#
+# pip-compile --no-annotate .riot/requirements/1ad89c5.in
+#
+annotated-types==0.7.0
+anyio==4.8.0
+attrs==24.3.0
+certifi==2024.12.14
+charset-normalizer==3.4.1
+coverage[toml]==7.6.10
+distro==1.9.0
+exceptiongroup==1.2.2
+h11==0.14.0
+httpcore==1.0.7
+httpx==0.28.1
+hypothesis==6.45.0
+idna==3.10
+iniconfig==2.0.0
+jiter==0.8.2
+mock==5.1.0
+multidict==6.1.0
+openai==1.60.0
+opentracing==2.4.0
+packaging==24.2
+pillow==11.1.0
+pluggy==1.5.0
+propcache==0.2.1
+pydantic==2.10.5
+pydantic-core==2.27.2
+pytest==8.3.4
+pytest-asyncio==0.21.1
+pytest-cov==6.0.0
+pytest-mock==3.14.0
+pytest-randomly==3.16.0
+pyyaml==6.0.2
+regex==2024.11.6
+requests==2.32.3
+six==1.17.0
+sniffio==1.3.1
+sortedcontainers==2.4.0
+tiktoken==0.8.0
+tomli==2.2.1
+tqdm==4.67.1
+typing-extensions==4.12.2
+urllib3==1.26.20
+vcrpy==4.2.1
+wrapt==1.17.2
+yarl==1.18.3
diff --git a/.riot/requirements/1db5311.txt b/.riot/requirements/1e6bd37.txt
similarity index 55%
rename from .riot/requirements/1db5311.txt
rename to .riot/requirements/1e6bd37.txt
index c29bc9bdb8f..11bb5871c14 100644
--- a/.riot/requirements/1db5311.txt
+++ b/.riot/requirements/1e6bd37.txt
@@ -2,52 +2,53 @@
# This file is autogenerated by pip-compile with Python 3.8
# by the following command:
#
-# pip-compile --allow-unsafe --no-annotate .riot/requirements/1db5311.in
+# pip-compile --no-annotate --resolver=backtracking .riot/requirements/1e6bd37.in
#
annotated-types==0.7.0
-anyio==4.4.0
-attrs==24.2.0
-certifi==2024.7.4
+anyio==4.5.2
+attrs==24.3.0
+certifi==2024.12.14
coverage[toml]==7.6.1
distro==1.9.0
exceptiongroup==1.2.2
h11==0.14.0
-httpcore==1.0.5
-httpx==0.27.0
+httpcore==1.0.7
+httpx==0.27.2
hypothesis==6.45.0
-idna==3.8
-importlib-metadata==8.4.0
+idna==3.10
+importlib-metadata==8.5.0
iniconfig==2.0.0
mock==5.1.0
-multidict==6.0.5
+multidict==6.1.0
numpy==1.24.4
openai[datalib,embeddings]==1.30.1
opentracing==2.4.0
-packaging==24.1
+packaging==24.2
pandas==2.0.3
pandas-stubs==2.0.3.230814
pillow==9.5.0
pluggy==1.5.0
-pydantic==2.8.2
-pydantic-core==2.20.1
-pytest==8.3.2
+propcache==0.2.0
+pydantic==2.10.5
+pydantic-core==2.27.2
+pytest==8.3.4
pytest-asyncio==0.21.1
pytest-cov==5.0.0
pytest-mock==3.14.0
pytest-randomly==3.15.0
python-dateutil==2.9.0.post0
-pytz==2024.1
+pytz==2024.2
pyyaml==6.0.2
-six==1.16.0
+six==1.17.0
sniffio==1.3.1
sortedcontainers==2.4.0
-tomli==2.0.1
-tqdm==4.66.5
-types-pytz==2024.1.0.20240417
+tomli==2.2.1
+tqdm==4.67.1
+types-pytz==2024.2.0.20241221
typing-extensions==4.12.2
-tzdata==2024.1
-urllib3==1.26.19
+tzdata==2025.1
+urllib3==1.26.20
vcrpy==4.2.1
-wrapt==1.16.0
-yarl==1.9.4
-zipp==3.20.1
+wrapt==1.17.2
+yarl==1.15.2
+zipp==3.20.2
diff --git a/.riot/requirements/1ec15f5.txt b/.riot/requirements/1ec15f5.txt
deleted file mode 100644
index b4479a2fb39..00000000000
--- a/.riot/requirements/1ec15f5.txt
+++ /dev/null
@@ -1,57 +0,0 @@
-#
-# This file is autogenerated by pip-compile with Python 3.9
-# by the following command:
-#
-# pip-compile --allow-unsafe --no-annotate .riot/requirements/1ec15f5.in
-#
-annotated-types==0.7.0
-anyio==4.4.0
-attrs==24.2.0
-certifi==2024.7.4
-charset-normalizer==3.3.2
-coverage[toml]==7.6.1
-distro==1.9.0
-exceptiongroup==1.2.2
-h11==0.14.0
-httpcore==1.0.5
-httpx==0.27.0
-hypothesis==6.45.0
-idna==3.8
-importlib-metadata==8.4.0
-iniconfig==2.0.0
-mock==5.1.0
-multidict==6.0.5
-numpy==2.0.1
-openai[datalib]==1.30.1
-opentracing==2.4.0
-packaging==24.1
-pandas==2.2.2
-pandas-stubs==2.2.2.240807
-pillow==10.1.0
-pluggy==1.5.0
-pydantic==2.8.2
-pydantic-core==2.20.1
-pytest==8.3.2
-pytest-asyncio==0.21.1
-pytest-cov==5.0.0
-pytest-mock==3.14.0
-pytest-randomly==3.15.0
-python-dateutil==2.9.0.post0
-pytz==2024.1
-pyyaml==6.0.2
-regex==2024.7.24
-requests==2.32.3
-six==1.16.0
-sniffio==1.3.1
-sortedcontainers==2.4.0
-tiktoken==0.7.0
-tomli==2.0.1
-tqdm==4.66.5
-types-pytz==2024.1.0.20240417
-typing-extensions==4.12.2
-tzdata==2024.1
-urllib3==1.26.19
-vcrpy==4.2.1
-wrapt==1.16.0
-yarl==1.9.4
-zipp==3.20.1
diff --git a/.riot/requirements/1ee49b9.txt b/.riot/requirements/1ee49b9.txt
deleted file mode 100644
index f170e2885c4..00000000000
--- a/.riot/requirements/1ee49b9.txt
+++ /dev/null
@@ -1,53 +0,0 @@
-#
-# This file is autogenerated by pip-compile with Python 3.11
-# by the following command:
-#
-# pip-compile --allow-unsafe --no-annotate .riot/requirements/1ee49b9.in
-#
-annotated-types==0.7.0
-anyio==4.4.0
-attrs==24.2.0
-certifi==2024.7.4
-charset-normalizer==3.3.2
-coverage[toml]==7.6.1
-distro==1.9.0
-h11==0.14.0
-httpcore==1.0.5
-httpx==0.27.0
-hypothesis==6.45.0
-idna==3.8
-iniconfig==2.0.0
-mock==5.1.0
-multidict==6.0.5
-numpy==2.1.0
-openai[datalib]==1.30.1
-opentracing==2.4.0
-packaging==24.1
-pandas==2.2.2
-pandas-stubs==2.2.2.240807
-pillow==10.1.0
-pluggy==1.5.0
-pydantic==2.8.2
-pydantic-core==2.20.1
-pytest==8.3.2
-pytest-asyncio==0.21.1
-pytest-cov==5.0.0
-pytest-mock==3.14.0
-pytest-randomly==3.15.0
-python-dateutil==2.9.0.post0
-pytz==2024.1
-pyyaml==6.0.2
-regex==2024.7.24
-requests==2.32.3
-six==1.16.0
-sniffio==1.3.1
-sortedcontainers==2.4.0
-tiktoken==0.7.0
-tqdm==4.66.5
-types-pytz==2024.1.0.20240417
-typing-extensions==4.12.2
-tzdata==2024.1
-urllib3==1.26.19
-vcrpy==4.2.1
-wrapt==1.16.0
-yarl==1.9.4
diff --git a/.riot/requirements/2634bf7.txt b/.riot/requirements/2634bf7.txt
new file mode 100644
index 00000000000..0000f6e28ff
--- /dev/null
+++ b/.riot/requirements/2634bf7.txt
@@ -0,0 +1,48 @@
+#
+# This file is autogenerated by pip-compile with Python 3.11
+# by the following command:
+#
+# pip-compile --no-annotate --resolver=backtracking .riot/requirements/2634bf7.in
+#
+annotated-types==0.7.0
+anyio==4.8.0
+attrs==24.3.0
+certifi==2024.12.14
+charset-normalizer==3.4.1
+coverage[toml]==7.6.10
+distro==1.9.0
+h11==0.14.0
+httpcore==1.0.7
+httpx==0.28.1
+hypothesis==6.45.0
+idna==3.10
+iniconfig==2.0.0
+jiter==0.8.2
+mock==5.1.0
+multidict==6.1.0
+openai==1.60.0
+opentracing==2.4.0
+packaging==24.2
+pillow==11.1.0
+pluggy==1.5.0
+propcache==0.2.1
+pydantic==2.10.5
+pydantic-core==2.27.2
+pytest==8.3.4
+pytest-asyncio==0.21.1
+pytest-cov==6.0.0
+pytest-mock==3.14.0
+pytest-randomly==3.16.0
+pyyaml==6.0.2
+regex==2024.11.6
+requests==2.32.3
+six==1.17.0
+sniffio==1.3.1
+sortedcontainers==2.4.0
+tiktoken==0.8.0
+tqdm==4.67.1
+typing-extensions==4.12.2
+urllib3==1.26.20
+vcrpy==4.2.1
+wrapt==1.17.2
+yarl==1.18.3
diff --git a/.riot/requirements/35ce786.txt b/.riot/requirements/35ce786.txt
deleted file mode 100644
index 3489155be91..00000000000
--- a/.riot/requirements/35ce786.txt
+++ /dev/null
@@ -1,55 +0,0 @@
-#
-# This file is autogenerated by pip-compile with Python 3.10
-# by the following command:
-#
-# pip-compile --allow-unsafe --no-annotate .riot/requirements/35ce786.in
-#
-annotated-types==0.7.0
-anyio==4.4.0
-attrs==24.2.0
-certifi==2024.7.4
-charset-normalizer==3.3.2
-coverage[toml]==7.6.1
-distro==1.9.0
-exceptiongroup==1.2.2
-h11==0.14.0
-httpcore==1.0.5
-httpx==0.27.0
-hypothesis==6.45.0
-idna==3.8
-iniconfig==2.0.0
-mock==5.1.0
-multidict==6.0.5
-numpy==2.1.0
-openai[datalib]==1.30.1
-opentracing==2.4.0
-packaging==24.1
-pandas==2.2.2
-pandas-stubs==2.2.2.240807
-pillow==10.1.0
-pluggy==1.5.0
-pydantic==2.8.2
-pydantic-core==2.20.1
-pytest==8.3.2
-pytest-asyncio==0.21.1
-pytest-cov==5.0.0
-pytest-mock==3.14.0
-pytest-randomly==3.15.0
-python-dateutil==2.9.0.post0
-pytz==2024.1
-pyyaml==6.0.2
-regex==2024.7.24
-requests==2.32.3
-six==1.16.0
-sniffio==1.3.1
-sortedcontainers==2.4.0
-tiktoken==0.7.0
-tomli==2.0.1
-tqdm==4.66.5
-types-pytz==2024.1.0.20240417
-typing-extensions==4.12.2
-tzdata==2024.1
-urllib3==1.26.19
-vcrpy==4.2.1
-wrapt==1.16.0
-yarl==1.9.4
diff --git a/.riot/requirements/4a85f6d.txt b/.riot/requirements/4a85f6d.txt
new file mode 100644
index 00000000000..41953c69178
--- /dev/null
+++ b/.riot/requirements/4a85f6d.txt
@@ -0,0 +1,50 @@
+#
+# This file is autogenerated by pip-compile with Python 3.11
+# by the following command:
+#
+# pip-compile --no-annotate --resolver=backtracking .riot/requirements/4a85f6d.in
+#
+annotated-types==0.7.0
+anyio==4.8.0
+attrs==24.3.0
+certifi==2024.12.14
+coverage[toml]==7.6.10
+distro==1.9.0
+h11==0.14.0
+httpcore==1.0.7
+httpx==0.27.2
+hypothesis==6.45.0
+idna==3.10
+iniconfig==2.0.0
+mock==5.1.0
+multidict==6.1.0
+numpy==2.2.2
+openai[datalib,embeddings]==1.30.1
+opentracing==2.4.0
+packaging==24.2
+pandas==2.2.3
+pandas-stubs==2.2.3.241126
+pillow==9.5.0
+pluggy==1.5.0
+propcache==0.2.1
+pydantic==2.10.5
+pydantic-core==2.27.2
+pytest==8.3.4
+pytest-asyncio==0.21.1
+pytest-cov==6.0.0
+pytest-mock==3.14.0
+pytest-randomly==3.16.0
+python-dateutil==2.9.0.post0
+pytz==2024.2
+pyyaml==6.0.2
+six==1.17.0
+sniffio==1.3.1
+sortedcontainers==2.4.0
+tqdm==4.67.1
+types-pytz==2024.2.0.20241221
+typing-extensions==4.12.2
+tzdata==2025.1
+urllib3==1.26.20
+vcrpy==4.2.1
+wrapt==1.17.2
+yarl==1.18.3
diff --git a/.riot/requirements/4d27459.txt b/.riot/requirements/4d27459.txt
new file mode 100644
index 00000000000..630c81558f3
--- /dev/null
+++ b/.riot/requirements/4d27459.txt
@@ -0,0 +1,48 @@
+#
+# This file is autogenerated by pip-compile with Python 3.13
+# by the following command:
+#
+# pip-compile --no-annotate .riot/requirements/4d27459.in
+#
+annotated-types==0.7.0
+anyio==4.8.0
+attrs==24.3.0
+certifi==2024.12.14
+charset-normalizer==3.4.1
+coverage[toml]==7.6.10
+distro==1.9.0
+h11==0.14.0
+httpcore==1.0.7
+httpx==0.28.1
+hypothesis==6.45.0
+idna==3.10
+iniconfig==2.0.0
+jiter==0.8.2
+mock==5.1.0
+multidict==6.1.0
+openai==1.60.0
+opentracing==2.4.0
+packaging==24.2
+pillow==11.1.0
+pluggy==1.5.0
+propcache==0.2.1
+pydantic==2.10.5
+pydantic-core==2.27.2
+pytest==8.3.4
+pytest-asyncio==0.21.1
+pytest-cov==6.0.0
+pytest-mock==3.14.0
+pytest-randomly==3.16.0
+pyyaml==6.0.2
+regex==2024.11.6
+requests==2.32.3
+six==1.17.0
+sniffio==1.3.1
+sortedcontainers==2.4.0
+tiktoken==0.8.0
+tqdm==4.67.1
+typing-extensions==4.12.2
+urllib3==1.26.20
+vcrpy==4.2.1
+wrapt==1.17.2
+yarl==1.18.3
diff --git a/.riot/requirements/530c983.txt b/.riot/requirements/530c983.txt
new file mode 100644
index 00000000000..c07f9a6b918
--- /dev/null
+++ b/.riot/requirements/530c983.txt
@@ -0,0 +1,52 @@
+#
+# This file is autogenerated by pip-compile with Python 3.9
+# by the following command:
+#
+# pip-compile --no-annotate --resolver=backtracking .riot/requirements/530c983.in
+#
+annotated-types==0.7.0
+anyio==4.8.0
+attrs==24.3.0
+certifi==2024.12.14
+charset-normalizer==3.4.1
+coverage[toml]==7.6.10
+distro==1.9.0
+exceptiongroup==1.2.2
+h11==0.14.0
+httpcore==1.0.7
+httpx==0.28.1
+hypothesis==6.45.0
+idna==3.10
+importlib-metadata==8.6.1
+iniconfig==2.0.0
+jiter==0.8.2
+mock==5.1.0
+multidict==6.1.0
+openai==1.60.0
+opentracing==2.4.0
+packaging==24.2
+pillow==11.1.0
+pluggy==1.5.0
+propcache==0.2.1
+pydantic==2.10.5
+pydantic-core==2.27.2
+pytest==8.3.4
+pytest-asyncio==0.21.1
+pytest-cov==6.0.0
+pytest-mock==3.14.0
+pytest-randomly==3.16.0
+pyyaml==6.0.2
+regex==2024.11.6
+requests==2.32.3
+six==1.17.0
+sniffio==1.3.1
+sortedcontainers==2.4.0
+tiktoken==0.8.0
+tomli==2.2.1
+tqdm==4.67.1
+typing-extensions==4.12.2
+urllib3==1.26.20
+vcrpy==4.2.1
+wrapt==1.17.2
+yarl==1.18.3
+zipp==3.21.0
diff --git a/.riot/requirements/5da4fd8.txt b/.riot/requirements/5da4fd8.txt
deleted file mode 100644
index a700b91bf81..00000000000
--- a/.riot/requirements/5da4fd8.txt
+++ /dev/null
@@ -1,49 +0,0 @@
-#
-# This file is autogenerated by pip-compile with Python 3.11
-# by the following command:
-#
-# pip-compile --allow-unsafe --no-annotate .riot/requirements/5da4fd8.in
-#
-annotated-types==0.7.0
-anyio==4.4.0
-attrs==24.2.0
-certifi==2024.7.4
-coverage[toml]==7.6.1
-distro==1.9.0
-h11==0.14.0
-httpcore==1.0.5
-httpx==0.27.0
-hypothesis==6.45.0
-idna==3.8
-iniconfig==2.0.0
-mock==5.1.0
-multidict==6.0.5
-numpy==2.1.0
-openai[datalib,embeddings]==1.30.1
-opentracing==2.4.0
-packaging==24.1
-pandas==2.2.2
-pandas-stubs==2.2.2.240807
-pillow==9.5.0
-pluggy==1.5.0
-pydantic==2.8.2
-pydantic-core==2.20.1
-pytest==8.3.2
-pytest-asyncio==0.21.1
-pytest-cov==5.0.0
-pytest-mock==3.14.0
-pytest-randomly==3.15.0
-python-dateutil==2.9.0.post0
-pytz==2024.1
-pyyaml==6.0.2
-six==1.16.0
-sniffio==1.3.1
-sortedcontainers==2.4.0
-tqdm==4.66.5
-types-pytz==2024.1.0.20240417
-typing-extensions==4.12.2
-tzdata==2024.1
-urllib3==1.26.19
-vcrpy==4.2.1
-wrapt==1.16.0
-yarl==1.9.4
diff --git a/.riot/requirements/84ec59a.txt b/.riot/requirements/84ec59a.txt
deleted file mode 100644
index 9b079cf3a38..00000000000
--- a/.riot/requirements/84ec59a.txt
+++ /dev/null
@@ -1,53 +0,0 @@
-#
-# This file is autogenerated by pip-compile with Python 3.8
-# by the following command:
-#
-# pip-compile --allow-unsafe --no-annotate .riot/requirements/84ec59a.in
-#
-annotated-types==0.7.0
-anyio==3.7.1
-attrs==24.2.0
-certifi==2024.7.4
-coverage[toml]==7.6.1
-distro==1.9.0
-exceptiongroup==1.2.2
-h11==0.14.0
-httpcore==1.0.5
-httpx==0.27.0
-hypothesis==6.45.0
-idna==3.8
-importlib-metadata==8.4.0
-iniconfig==2.0.0
-mock==5.1.0
-multidict==6.0.5
-numpy==1.24.4
-openai[datalib,embeddings]==1.1.1
-opentracing==2.4.0
-packaging==24.1
-pandas==2.0.3
-pandas-stubs==2.0.3.230814
-pillow==9.5.0
-pluggy==1.5.0
-pydantic==2.8.2
-pydantic-core==2.20.1
-pytest==8.3.2
-pytest-asyncio==0.21.1
-pytest-cov==5.0.0
-pytest-mock==3.14.0
-pytest-randomly==3.15.0
-python-dateutil==2.9.0.post0
-pytz==2024.1
-pyyaml==6.0.2
-six==1.16.0
-sniffio==1.3.1
-sortedcontainers==2.4.0
-tomli==2.0.1
-tqdm==4.66.5
-types-pytz==2024.1.0.20240417
-typing-extensions==4.12.2
-tzdata==2024.1
-urllib3==1.26.19
-vcrpy==4.2.1
-wrapt==1.16.0
-yarl==1.9.4
-zipp==3.20.1
diff --git a/.riot/requirements/87a1fff.txt b/.riot/requirements/87a1fff.txt
deleted file mode 100644
index b85e76cdd56..00000000000
--- a/.riot/requirements/87a1fff.txt
+++ /dev/null
@@ -1,53 +0,0 @@
-#
-# This file is autogenerated by pip-compile with Python 3.9
-# by the following command:
-#
-# pip-compile --allow-unsafe --no-annotate .riot/requirements/87a1fff.in
-#
-annotated-types==0.7.0
-anyio==4.4.0
-attrs==24.2.0
-certifi==2024.7.4
-coverage[toml]==7.6.1
-distro==1.9.0
-exceptiongroup==1.2.2
-h11==0.14.0
-httpcore==1.0.5
-httpx==0.27.0
-hypothesis==6.45.0
-idna==3.8
-importlib-metadata==8.4.0
-iniconfig==2.0.0
-mock==5.1.0
-multidict==6.0.5
-numpy==2.0.1
-openai[datalib,embeddings]==1.30.1
-opentracing==2.4.0
-packaging==24.1
-pandas==2.2.2
-pandas-stubs==2.2.2.240807
-pillow==9.5.0
-pluggy==1.5.0
-pydantic==2.8.2
-pydantic-core==2.20.1
-pytest==8.3.2
-pytest-asyncio==0.21.1
-pytest-cov==5.0.0
-pytest-mock==3.14.0
-pytest-randomly==3.15.0
-python-dateutil==2.9.0.post0
-pytz==2024.1
-pyyaml==6.0.2
-six==1.16.0
-sniffio==1.3.1
-sortedcontainers==2.4.0
-tomli==2.0.1
-tqdm==4.66.5
-types-pytz==2024.1.0.20240417
-typing-extensions==4.12.2
-tzdata==2024.1
-urllib3==1.26.19
-vcrpy==4.2.1
-wrapt==1.16.0
-yarl==1.9.4
-zipp==3.20.1
diff --git a/.riot/requirements/b5d5a35.txt b/.riot/requirements/b5d5a35.txt
new file mode 100644
index 00000000000..7838b7abd2c
--- /dev/null
+++ b/.riot/requirements/b5d5a35.txt
@@ -0,0 +1,52 @@
+#
+# This file is autogenerated by pip-compile with Python 3.10
+# by the following command:
+#
+# pip-compile --no-annotate .riot/requirements/b5d5a35.in
+#
+annotated-types==0.7.0
+anyio==4.8.0
+attrs==24.3.0
+certifi==2024.12.14
+coverage[toml]==7.6.10
+distro==1.9.0
+exceptiongroup==1.2.2
+h11==0.14.0
+httpcore==1.0.7
+httpx==0.27.2
+hypothesis==6.45.0
+idna==3.10
+iniconfig==2.0.0
+mock==5.1.0
+multidict==6.1.0
+numpy==2.2.2
+openai[datalib,embeddings]==1.30.1
+opentracing==2.4.0
+packaging==24.2
+pandas==2.2.3
+pandas-stubs==2.2.3.241126
+pillow==9.5.0
+pluggy==1.5.0
+propcache==0.2.1
+pydantic==2.10.5
+pydantic-core==2.27.2
+pytest==8.3.4
+pytest-asyncio==0.21.1
+pytest-cov==6.0.0
+pytest-mock==3.14.0
+pytest-randomly==3.16.0
+python-dateutil==2.9.0.post0
+pytz==2024.2
+pyyaml==6.0.2
+six==1.17.0
+sniffio==1.3.1
+sortedcontainers==2.4.0
+tomli==2.2.1
+tqdm==4.67.1
+types-pytz==2024.2.0.20241221
+typing-extensions==4.12.2
+tzdata==2025.1
+urllib3==1.26.20
+vcrpy==4.2.1
+wrapt==1.17.2
+yarl==1.18.3
diff --git a/.riot/requirements/c74f6e0.txt b/.riot/requirements/c74f6e0.txt
deleted file mode 100644
index 63345853661..00000000000
--- a/.riot/requirements/c74f6e0.txt
+++ /dev/null
@@ -1,51 +0,0 @@
-#
-# This file is autogenerated by pip-compile with Python 3.10
-# by the following command:
-#
-# pip-compile --allow-unsafe --no-annotate .riot/requirements/c74f6e0.in
-#
-annotated-types==0.7.0
-anyio==4.4.0
-attrs==24.2.0
-certifi==2024.7.4
-coverage[toml]==7.6.1
-distro==1.9.0
-exceptiongroup==1.2.2
-h11==0.14.0
-httpcore==1.0.5
-httpx==0.27.0
-hypothesis==6.45.0
-idna==3.8
-iniconfig==2.0.0
-mock==5.1.0
-multidict==6.0.5
-numpy==2.1.0
-openai[datalib,embeddings]==1.30.1
-opentracing==2.4.0
-packaging==24.1
-pandas==2.2.2
-pandas-stubs==2.2.2.240807
-pillow==9.5.0
-pluggy==1.5.0
-pydantic==2.8.2
-pydantic-core==2.20.1
-pytest==8.3.2
-pytest-asyncio==0.21.1
-pytest-cov==5.0.0
-pytest-mock==3.14.0
-pytest-randomly==3.15.0
-python-dateutil==2.9.0.post0
-pytz==2024.1
-pyyaml==6.0.2
-six==1.16.0
-sniffio==1.3.1
-sortedcontainers==2.4.0
-tomli==2.0.1
-tqdm==4.66.5
-types-pytz==2024.1.0.20240417
-typing-extensions==4.12.2
-tzdata==2024.1
-urllib3==1.26.19
-vcrpy==4.2.1
-wrapt==1.16.0
-yarl==1.9.4
diff --git a/.riot/requirements/cd2e4ea.txt b/.riot/requirements/cd2e4ea.txt
deleted file mode 100644
index 24353dafa0c..00000000000
--- a/.riot/requirements/cd2e4ea.txt
+++ /dev/null
@@ -1,53 +0,0 @@
-#
-# This file is autogenerated by pip-compile with Python 3.9
-# by the following command:
-#
-# pip-compile --allow-unsafe --no-annotate .riot/requirements/cd2e4ea.in
-#
-annotated-types==0.7.0
-anyio==3.7.1
-attrs==24.2.0
-certifi==2024.7.4
-coverage[toml]==7.6.1
-distro==1.9.0
-exceptiongroup==1.2.2
-h11==0.14.0
-httpcore==1.0.5
-httpx==0.27.0
-hypothesis==6.45.0
-idna==3.8
-importlib-metadata==8.4.0
-iniconfig==2.0.0
-mock==5.1.0
-multidict==6.0.5
-numpy==2.0.1
-openai[datalib,embeddings]==1.1.1
-opentracing==2.4.0
-packaging==24.1
-pandas==2.2.2
-pandas-stubs==2.2.2.240807
-pillow==9.5.0
-pluggy==1.5.0
-pydantic==2.8.2
-pydantic-core==2.20.1
-pytest==8.3.2
-pytest-asyncio==0.21.1
-pytest-cov==5.0.0
-pytest-mock==3.14.0
-pytest-randomly==3.15.0
-python-dateutil==2.9.0.post0
-pytz==2024.1
-pyyaml==6.0.2
-six==1.16.0
-sniffio==1.3.1
-sortedcontainers==2.4.0
-tomli==2.0.1
-tqdm==4.66.5
-types-pytz==2024.1.0.20240417
-typing-extensions==4.12.2
-tzdata==2024.1
-urllib3==1.26.19
-vcrpy==4.2.1
-wrapt==1.16.0
-yarl==1.9.4
-zipp==3.20.1
diff --git a/.riot/requirements/181216c.txt b/.riot/requirements/df60af6.txt
similarity index 82%
rename from .riot/requirements/181216c.txt
rename to .riot/requirements/df60af6.txt
index ac739930363..5143f0e0a74 100644
--- a/.riot/requirements/181216c.txt
+++ b/.riot/requirements/df60af6.txt
@@ -2,13 +2,13 @@
# This file is autogenerated by pip-compile with Python 3.7
# by the following command:
#
-# pip-compile --allow-unsafe --config=pyproject.toml --no-annotate --resolver=backtracking .riot/requirements/181216c.in
+# pip-compile --allow-unsafe --config=pyproject.toml --no-annotate --resolver=backtracking .riot/requirements/df60af6.in
#
annotated-types==0.5.0
anyio==3.7.1
attrs==24.2.0
cached-property==1.5.2
-certifi==2024.7.4
+certifi==2024.12.14
coverage[toml]==7.2.7
distro==1.9.0
exceptiongroup==1.2.2
@@ -16,13 +16,13 @@ h11==0.14.0
httpcore==0.17.3
httpx==0.24.1
hypothesis==6.45.0
-idna==3.8
+idna==3.10
importlib-metadata==6.7.0
iniconfig==2.0.0
mock==5.1.0
multidict==6.0.5
numpy==1.21.6
-openai[datalib,embeddings]==1.30.1
+openai[datalib]==1.30.1
opentracing==2.4.0
packaging==24.0
pandas==1.3.5
@@ -37,15 +37,15 @@ pytest-cov==4.1.0
pytest-mock==3.11.1
pytest-randomly==3.12.0
python-dateutil==2.9.0.post0
-pytz==2024.1
+pytz==2024.2
pyyaml==6.0.1
-six==1.16.0
+six==1.17.0
sniffio==1.3.1
sortedcontainers==2.4.0
tomli==2.0.1
-tqdm==4.66.5
+tqdm==4.67.1
typing-extensions==4.7.1
-urllib3==1.26.19
+urllib3==1.26.20
vcrpy==4.2.1
wrapt==1.16.0
yarl==1.9.4
diff --git a/.riot/requirements/f1c37b1.txt b/.riot/requirements/f1c37b1.txt
deleted file mode 100644
index 4da5078a988..00000000000
--- a/.riot/requirements/f1c37b1.txt
+++ /dev/null
@@ -1,51 +0,0 @@
-#
-# This file is autogenerated by pip-compile with Python 3.10
-# by the following command:
-#
-# pip-compile --allow-unsafe --no-annotate .riot/requirements/f1c37b1.in
-#
-annotated-types==0.7.0
-anyio==3.7.1
-attrs==24.2.0
-certifi==2024.7.4
-coverage[toml]==7.6.1
-distro==1.9.0
-exceptiongroup==1.2.2
-h11==0.14.0
-httpcore==1.0.5
-httpx==0.27.0
-hypothesis==6.45.0
-idna==3.8
-iniconfig==2.0.0
-mock==5.1.0
-multidict==6.0.5
-numpy==2.1.0
-openai[datalib,embeddings]==1.1.1
-opentracing==2.4.0
-packaging==24.1
-pandas==2.2.2
-pandas-stubs==2.2.2.240807
-pillow==9.5.0
-pluggy==1.5.0
-pydantic==2.8.2
-pydantic-core==2.20.1
-pytest==8.3.2
-pytest-asyncio==0.21.1
-pytest-cov==5.0.0
-pytest-mock==3.14.0
-pytest-randomly==3.15.0
-python-dateutil==2.9.0.post0
-pytz==2024.1
-pyyaml==6.0.2
-six==1.16.0
-sniffio==1.3.1
-sortedcontainers==2.4.0
-tomli==2.0.1
-tqdm==4.66.5
-types-pytz==2024.1.0.20240417
-typing-extensions==4.12.2
-tzdata==2024.1
-urllib3==1.26.19
-vcrpy==4.2.1
-wrapt==1.16.0
-yarl==1.9.4
diff --git a/.riot/requirements/f7c30a0.txt b/.riot/requirements/f7c30a0.txt
deleted file mode 100644
index 3e4716aede1..00000000000
--- a/.riot/requirements/f7c30a0.txt
+++ /dev/null
@@ -1,51 +0,0 @@
-#
-# This file is autogenerated by pip-compile with Python 3.7
-# by the following command:
-#
-# pip-compile --allow-unsafe --config=pyproject.toml --no-annotate --resolver=backtracking .riot/requirements/f7c30a0.in
-#
-annotated-types==0.5.0
-anyio==3.7.1
-attrs==24.2.0
-certifi==2024.7.4
-coverage[toml]==7.2.7
-distro==1.9.0
-exceptiongroup==1.2.2
-h11==0.14.0
-httpcore==0.17.3
-httpx==0.24.1
-hypothesis==6.45.0
-idna==3.8
-importlib-metadata==6.7.0
-iniconfig==2.0.0
-mock==5.1.0
-multidict==6.0.5
-numpy==1.21.6
-openai[datalib,embeddings]==1.1.1
-opentracing==2.4.0
-packaging==24.0
-pandas==1.3.5
-pandas-stubs==1.2.0.62
-pillow==9.5.0
-pluggy==1.2.0
-pydantic==2.5.3
-pydantic-core==2.14.6
-pytest==7.4.4
-pytest-asyncio==0.21.1
-pytest-cov==4.1.0
-pytest-mock==3.11.1
-pytest-randomly==3.12.0
-python-dateutil==2.9.0.post0
-pytz==2024.1
-pyyaml==6.0.1
-six==1.16.0
-sniffio==1.3.1
-sortedcontainers==2.4.0
-tomli==2.0.1
-tqdm==4.66.5
-typing-extensions==4.7.1
-urllib3==1.26.19
-vcrpy==4.2.1
-wrapt==1.16.0
-yarl==1.9.4
-zipp==3.15.0
diff --git a/ddtrace/contrib/internal/openai/_endpoint_hooks.py b/ddtrace/contrib/internal/openai/_endpoint_hooks.py
index 979e1774a8a..00ee44aef4b 100644
--- a/ddtrace/contrib/internal/openai/_endpoint_hooks.py
+++ b/ddtrace/contrib/internal/openai/_endpoint_hooks.py
@@ -37,7 +37,7 @@ class _EndpointHook:
OPERATION_ID = "" # Each endpoint hook must provide an operationID as specified in the OpenAI API specs:
# https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml
- def _record_request(self, pin, integration, span, args, kwargs):
+ def _record_request(self, pin, integration, instance, span, args, kwargs):
"""
Set base-level openai tags, as well as request params from args and kwargs.
All inherited EndpointHook classes should include a super call to this method before performing
@@ -45,12 +45,12 @@ def _record_request(self, pin, integration, span, args, kwargs):
"""
endpoint = self.ENDPOINT_NAME
if endpoint is None:
- endpoint = "%s" % args[0].OBJECT_NAME
+ endpoint = "%s" % getattr(instance, "OBJECT_NAME", "")
span.set_tag_str("openai.request.endpoint", "/%s/%s" % (API_VERSION, endpoint))
span.set_tag_str("openai.request.method", self.HTTP_METHOD_TYPE)
if self._request_arg_params and len(self._request_arg_params) > 1:
- for idx, arg in enumerate(self._request_arg_params, 1):
+ for idx, arg in enumerate(self._request_arg_params):
if idx >= len(args):
break
if arg is None or args[idx] is None:
@@ -74,8 +74,8 @@ def _record_request(self, pin, integration, span, args, kwargs):
else:
span.set_tag_str("openai.request.%s" % kw_attr, str(kwargs[kw_attr]))
- def handle_request(self, pin, integration, span, args, kwargs):
- self._record_request(pin, integration, span, args, kwargs)
+ def handle_request(self, pin, integration, instance, span, args, kwargs):
+ self._record_request(pin, integration, instance, span, args, kwargs)
resp, error = yield
if hasattr(resp, "parse"):
# Users can request the raw response, in which case we need to process on the parsed response
@@ -186,8 +186,8 @@ class _CompletionHook(_BaseCompletionHook):
HTTP_METHOD_TYPE = "POST"
OPERATION_ID = "createCompletion"
- def _record_request(self, pin, integration, span, args, kwargs):
- super()._record_request(pin, integration, span, args, kwargs)
+ def _record_request(self, pin, integration, instance, span, args, kwargs):
+ super()._record_request(pin, integration, instance, span, args, kwargs)
if integration.is_pc_sampled_span(span):
prompt = kwargs.get("prompt", "")
if isinstance(prompt, str):
@@ -241,8 +241,8 @@ class _ChatCompletionHook(_BaseCompletionHook):
HTTP_METHOD_TYPE = "POST"
OPERATION_ID = "createChatCompletion"
- def _record_request(self, pin, integration, span, args, kwargs):
- super()._record_request(pin, integration, span, args, kwargs)
+ def _record_request(self, pin, integration, instance, span, args, kwargs):
+ super()._record_request(pin, integration, instance, span, args, kwargs)
for idx, m in enumerate(kwargs.get("messages", [])):
role = getattr(m, "role", "")
name = getattr(m, "name", "")
@@ -305,12 +305,12 @@ class _EmbeddingHook(_EndpointHook):
HTTP_METHOD_TYPE = "POST"
OPERATION_ID = "createEmbedding"
- def _record_request(self, pin, integration, span, args, kwargs):
+ def _record_request(self, pin, integration, instance, span, args, kwargs):
"""
Embedding endpoint allows multiple inputs, each of which we specify a request tag for, so have to
manually set them in _pre_response().
"""
- super()._record_request(pin, integration, span, args, kwargs)
+ super()._record_request(pin, integration, instance, span, args, kwargs)
embedding_input = kwargs.get("input", "")
if integration.is_pc_sampled_span(span):
if isinstance(embedding_input, str) or isinstance(embedding_input[0], int):
@@ -340,8 +340,8 @@ class _ListHook(_EndpointHook):
HTTP_METHOD_TYPE = "GET"
OPERATION_ID = "list"
- def _record_request(self, pin, integration, span, args, kwargs):
- super()._record_request(pin, integration, span, args, kwargs)
+ def _record_request(self, pin, integration, instance, span, args, kwargs):
+ super()._record_request(pin, integration, instance, span, args, kwargs)
endpoint = span.get_tag("openai.request.endpoint")
if endpoint.endswith("/models"):
span.resource = "listModels"
@@ -399,15 +399,21 @@ class _RetrieveHook(_EndpointHook):
HTTP_METHOD_TYPE = "GET"
OPERATION_ID = "retrieve"
- def _record_request(self, pin, integration, span, args, kwargs):
- super()._record_request(pin, integration, span, args, kwargs)
+ def _record_request(self, pin, integration, instance, span, args, kwargs):
+ super()._record_request(pin, integration, instance, span, args, kwargs)
endpoint = span.get_tag("openai.request.endpoint")
if endpoint.endswith("/models"):
span.resource = "retrieveModel"
- span.set_tag_str("openai.request.model", args[1] if len(args) >= 2 else kwargs.get("model", ""))
+ if len(args) >= 1:
+ span.set_tag_str("openai.request.model", args[0])
+ else:
+ span.set_tag_str("openai.request.model", kwargs.get("model", kwargs.get("id", "")))
elif endpoint.endswith("/files"):
span.resource = "retrieveFile"
- span.set_tag_str("openai.request.file_id", args[1] if len(args) >= 2 else kwargs.get("file_id", ""))
+ if len(args) >= 1:
+ span.set_tag_str("openai.request.file_id", args[0])
+ else:
+ span.set_tag_str("openai.request.file_id", kwargs.get("file_id", kwargs.get("id", "")))
span.set_tag_str("openai.request.endpoint", "%s/*" % endpoint)
def _record_response(self, pin, integration, span, args, kwargs, resp, error):
@@ -434,10 +440,6 @@ class _ModelRetrieveHook(_RetrieveHook):
ENDPOINT_NAME = "models"
OPERATION_ID = "retrieveModel"
- def _record_request(self, pin, integration, span, args, kwargs):
- super()._record_request(pin, integration, span, args, kwargs)
- span.set_tag_str("openai.request.model", args[1] if len(args) >= 2 else kwargs.get("model", ""))
-
class _FileRetrieveHook(_RetrieveHook):
"""
@@ -447,10 +449,6 @@ class _FileRetrieveHook(_RetrieveHook):
ENDPOINT_NAME = "files"
OPERATION_ID = "retrieveFile"
- def _record_request(self, pin, integration, span, args, kwargs):
- super()._record_request(pin, integration, span, args, kwargs)
- span.set_tag_str("openai.request.file_id", args[1] if len(args) >= 2 else kwargs.get("file_id", ""))
-
class _DeleteHook(_EndpointHook):
"""Hook for openai.DeletableAPIResource, which is used by File.delete, and Model.delete."""
@@ -461,15 +459,21 @@ class _DeleteHook(_EndpointHook):
HTTP_METHOD_TYPE = "DELETE"
OPERATION_ID = "delete"
- def _record_request(self, pin, integration, span, args, kwargs):
- super()._record_request(pin, integration, span, args, kwargs)
+ def _record_request(self, pin, integration, instance, span, args, kwargs):
+ super()._record_request(pin, integration, instance, span, args, kwargs)
endpoint = span.get_tag("openai.request.endpoint")
if endpoint.endswith("/models"):
span.resource = "deleteModel"
- span.set_tag_str("openai.request.model", args[1] if len(args) >= 2 else kwargs.get("model", ""))
+ if len(args) >= 1:
+ span.set_tag_str("openai.request.model", args[0])
+ else:
+ span.set_tag_str("openai.request.model", kwargs.get("model", kwargs.get("sid", "")))
elif endpoint.endswith("/files"):
span.resource = "deleteFile"
- span.set_tag_str("openai.request.file_id", args[1] if len(args) >= 2 else kwargs.get("file_id", ""))
+ if len(args) >= 1:
+ span.set_tag_str("openai.request.file_id", args[0])
+ else:
+ span.set_tag_str("openai.request.file_id", kwargs.get("file_id", kwargs.get("sid", "")))
span.set_tag_str("openai.request.endpoint", "%s/*" % endpoint)
def _record_response(self, pin, integration, span, args, kwargs, resp, error):
@@ -508,8 +512,8 @@ class _ImageHook(_EndpointHook):
ENDPOINT_NAME = "images"
HTTP_METHOD_TYPE = "POST"
- def _record_request(self, pin, integration, span, args, kwargs):
- super()._record_request(pin, integration, span, args, kwargs)
+ def _record_request(self, pin, integration, instance, span, args, kwargs):
+ super()._record_request(pin, integration, instance, span, args, kwargs)
span.set_tag_str("openai.request.model", "dall-e")
def _record_response(self, pin, integration, span, args, kwargs, resp, error):
@@ -526,10 +530,10 @@ def _record_response(self, pin, integration, span, args, kwargs, resp, error):
if "prompt" in self._request_kwarg_params:
attrs_dict.update({"prompt": kwargs.get("prompt", "")})
if "image" in self._request_kwarg_params:
- image = args[1] if len(args) >= 2 else kwargs.get("image", "")
+ image = args[0] if len(args) >= 1 else kwargs.get("image", "")
attrs_dict.update({"image": image.name.split("/")[-1]})
if "mask" in self._request_kwarg_params:
- mask = args[2] if len(args) >= 3 else kwargs.get("mask", "")
+ mask = args[1] if len(args) >= 2 else kwargs.get("mask", "")
attrs_dict.update({"mask": mask.name.split("/")[-1]})
integration.log(
span, "info" if error is None else "error", "sampled %s" % self.OPERATION_ID, attrs=attrs_dict
@@ -560,12 +564,12 @@ class _ImageEditHook(_ImageHook):
ENDPOINT_NAME = "images/edits"
OPERATION_ID = "createImageEdit"
- def _record_request(self, pin, integration, span, args, kwargs):
- super()._record_request(pin, integration, span, args, kwargs)
+ def _record_request(self, pin, integration, instance, span, args, kwargs):
+ super()._record_request(pin, integration, instance, span, args, kwargs)
if not integration.is_pc_sampled_span:
return
- image = args[1] if len(args) >= 2 else kwargs.get("image", "")
- mask = args[2] if len(args) >= 3 else kwargs.get("mask", "")
+ image = args[0] if len(args) >= 1 else kwargs.get("image", "")
+ mask = args[1] if len(args) >= 2 else kwargs.get("mask", "")
if image:
if hasattr(image, "name"):
span.set_tag_str("openai.request.image", integration.trunc(image.name.split("/")[-1]))
@@ -584,11 +588,11 @@ class _ImageVariationHook(_ImageHook):
ENDPOINT_NAME = "images/variations"
OPERATION_ID = "createImageVariation"
- def _record_request(self, pin, integration, span, args, kwargs):
- super()._record_request(pin, integration, span, args, kwargs)
+ def _record_request(self, pin, integration, instance, span, args, kwargs):
+ super()._record_request(pin, integration, instance, span, args, kwargs)
if not integration.is_pc_sampled_span:
return
- image = args[1] if len(args) >= 2 else kwargs.get("image", "")
+ image = args[0] if len(args) >= 1 else kwargs.get("image", "")
if image:
if hasattr(image, "name"):
span.set_tag_str("openai.request.image", integration.trunc(image.name.split("/")[-1]))
@@ -602,11 +606,11 @@ class _BaseAudioHook(_EndpointHook):
ENDPOINT_NAME = "audio"
HTTP_METHOD_TYPE = "POST"
- def _record_request(self, pin, integration, span, args, kwargs):
- super()._record_request(pin, integration, span, args, kwargs)
+ def _record_request(self, pin, integration, instance, span, args, kwargs):
+ super()._record_request(pin, integration, instance, span, args, kwargs)
if not integration.is_pc_sampled_span:
return
- audio_file = args[2] if len(args) >= 3 else kwargs.get("file", "")
+ audio_file = args[1] if len(args) >= 2 else kwargs.get("file", "")
if audio_file and hasattr(audio_file, "name"):
span.set_tag_str("openai.request.filename", integration.trunc(audio_file.name.split("/")[-1]))
else:
@@ -626,7 +630,7 @@ def _record_response(self, pin, integration, span, args, kwargs, resp, error):
if integration.is_pc_sampled_span(span):
span.set_tag_str("openai.response.text", integration.trunc(text))
if integration.is_pc_sampled_log(span):
- file_input = args[2] if len(args) >= 3 else kwargs.get("file", "")
+ file_input = args[1] if len(args) >= 2 else kwargs.get("file", "")
integration.log(
span,
"info" if error is None else "error",
@@ -685,8 +689,8 @@ class _ModerationHook(_EndpointHook):
HTTP_METHOD_TYPE = "POST"
OPERATION_ID = "createModeration"
- def _record_request(self, pin, integration, span, args, kwargs):
- super()._record_request(pin, integration, span, args, kwargs)
+ def _record_request(self, pin, integration, instance, span, args, kwargs):
+ super()._record_request(pin, integration, instance, span, args, kwargs)
def _record_response(self, pin, integration, span, args, kwargs, resp, error):
resp = super()._record_response(pin, integration, span, args, kwargs, resp, error)
@@ -718,14 +722,14 @@ class _FileCreateHook(_BaseFileHook):
"organization",
"user_provided_filename",
)
- _request_kwarg_params = ("purpose",)
+ _request_kwarg_params = ("purpose", "user_provided_filename")
_response_attrs = ("id", "bytes", "created_at", "filename", "purpose", "status", "status_details")
HTTP_METHOD_TYPE = "POST"
OPERATION_ID = "createFile"
- def _record_request(self, pin, integration, span, args, kwargs):
- super()._record_request(pin, integration, span, args, kwargs)
- fp = args[1] if len(args) >= 2 else kwargs.get("file", "")
+ def _record_request(self, pin, integration, instance, span, args, kwargs):
+ super()._record_request(pin, integration, instance, span, args, kwargs)
+ fp = args[0] if len(args) >= 1 else kwargs.get("file", "")
if fp and hasattr(fp, "name"):
span.set_tag_str("openai.request.filename", fp.name.split("/")[-1])
else:
@@ -742,9 +746,9 @@ class _FileDownloadHook(_BaseFileHook):
OPERATION_ID = "downloadFile"
ENDPOINT_NAME = "files/*/content"
- def _record_request(self, pin, integration, span, args, kwargs):
- super()._record_request(pin, integration, span, args, kwargs)
- span.set_tag_str("openai.request.file_id", args[1] if len(args) >= 2 else kwargs.get("file_id", ""))
+ def _record_request(self, pin, integration, instance, span, args, kwargs):
+ super()._record_request(pin, integration, instance, span, args, kwargs)
+ span.set_tag_str("openai.request.file_id", args[0] if len(args) >= 1 else kwargs.get("file_id", ""))
def _record_response(self, pin, integration, span, args, kwargs, resp, error):
resp = super()._record_response(pin, integration, span, args, kwargs, resp, error)
diff --git a/ddtrace/contrib/internal/openai/patch.py b/ddtrace/contrib/internal/openai/patch.py
index d87b06b3aba..39f79d13795 100644
--- a/ddtrace/contrib/internal/openai/patch.py
+++ b/ddtrace/contrib/internal/openai/patch.py
@@ -6,12 +6,14 @@
from ddtrace import config
from ddtrace.contrib.internal.openai import _endpoint_hooks
from ddtrace.contrib.internal.openai.utils import _format_openai_api_key
+from ddtrace.contrib.trace_utils import unwrap
+from ddtrace.contrib.trace_utils import with_traced_module
+from ddtrace.contrib.trace_utils import wrap
from ddtrace.internal.logger import get_logger
from ddtrace.internal.schema import schematize_service_name
from ddtrace.internal.utils.formats import asbool
from ddtrace.internal.utils.formats import deep_getattr
from ddtrace.internal.utils.version import parse_version
-from ddtrace.internal.wrapping import wrap
from ddtrace.llmobs._integrations import OpenAIIntegration
from ddtrace.trace import Pin
@@ -80,8 +82,9 @@ def get_version():
else:
_RESOURCES = {
"model.Model": {
- "list": _endpoint_hooks._ListHook,
- "retrieve": _endpoint_hooks._RetrieveHook,
+ "list": _endpoint_hooks._ModelListHook,
+ "retrieve": _endpoint_hooks._ModelRetrieveHook,
+ "delete": _endpoint_hooks._ModelDeleteHook,
},
"completion.Completion": {
"create": _endpoint_hooks._CompletionHook,
@@ -105,19 +108,15 @@ def get_version():
"create": _endpoint_hooks._ModerationHook,
},
"file.File": {
- # File.list() and File.retrieve() share the same underlying method as Model.list() and Model.retrieve()
- # which means they are already wrapped
+ "list": _endpoint_hooks._FileListHook,
+ "retrieve": _endpoint_hooks._FileRetrieveHook,
"create": _endpoint_hooks._FileCreateHook,
- "delete": _endpoint_hooks._DeleteHook,
+ "delete": _endpoint_hooks._FileDeleteHook,
"download": _endpoint_hooks._FileDownloadHook,
},
}
-def _wrap_classmethod(obj, wrapper):
- wrap(obj.__func__, wrapper)
-
-
def patch():
# Avoid importing openai at the module level, eventually will be an import hook
import openai
@@ -127,72 +126,106 @@ def patch():
Pin().onto(openai)
integration = OpenAIIntegration(integration_config=config.openai, openai=openai)
+ openai._datadog_integration = integration
if OPENAI_VERSION >= (1, 0, 0):
if OPENAI_VERSION >= (1, 8, 0):
- wrap(openai._base_client.SyncAPIClient._process_response, _patched_convert(openai, integration))
- wrap(openai._base_client.AsyncAPIClient._process_response, _patched_convert(openai, integration))
+ wrap(openai, "_base_client.SyncAPIClient._process_response", patched_convert(openai))
+ wrap(openai, "_base_client.AsyncAPIClient._process_response", patched_convert(openai))
else:
- wrap(openai._base_client.BaseClient._process_response, _patched_convert(openai, integration))
- wrap(openai.OpenAI.__init__, _patched_client_init(openai, integration))
- wrap(openai.AsyncOpenAI.__init__, _patched_client_init(openai, integration))
- wrap(openai.AzureOpenAI.__init__, _patched_client_init(openai, integration))
- wrap(openai.AsyncAzureOpenAI.__init__, _patched_client_init(openai, integration))
+ wrap(openai, "_base_client.BaseClient._process_response", patched_convert(openai))
+ wrap(openai, "OpenAI.__init__", patched_client_init(openai))
+ wrap(openai, "AsyncOpenAI.__init__", patched_client_init(openai))
+ wrap(openai, "AzureOpenAI.__init__", patched_client_init(openai))
+ wrap(openai, "AsyncAzureOpenAI.__init__", patched_client_init(openai))
for resource, method_hook_dict in _RESOURCES.items():
if deep_getattr(openai.resources, resource) is None:
continue
for method_name, endpoint_hook in method_hook_dict.items():
- sync_method = deep_getattr(openai.resources, "%s.%s" % (resource, method_name))
- async_method = deep_getattr(
- openai.resources, "%s.%s" % (".Async".join(resource.split(".")), method_name)
- )
- wrap(sync_method, _patched_endpoint(openai, integration, endpoint_hook))
- wrap(async_method, _patched_endpoint_async(openai, integration, endpoint_hook))
+ sync_method = "resources.{}.{}".format(resource, method_name)
+ async_method = "resources.{}.{}".format(".Async".join(resource.split(".")), method_name)
+ wrap(openai, sync_method, _patched_endpoint(openai, endpoint_hook))
+ wrap(openai, async_method, _patched_endpoint_async(openai, endpoint_hook))
else:
import openai.api_requestor
- wrap(openai.api_requestor._make_session, _patched_make_session)
- wrap(openai.util.convert_to_openai_object, _patched_convert(openai, integration))
+ wrap(openai, "api_requestor._make_session", _patched_make_session)
+ wrap(openai, "util.convert_to_openai_object", patched_convert(openai))
for resource, method_hook_dict in _RESOURCES.items():
if deep_getattr(openai.api_resources, resource) is None:
continue
for method_name, endpoint_hook in method_hook_dict.items():
- sync_method = deep_getattr(openai.api_resources, "%s.%s" % (resource, method_name))
- async_method = deep_getattr(openai.api_resources, "%s.a%s" % (resource, method_name))
- _wrap_classmethod(sync_method, _patched_endpoint(openai, integration, endpoint_hook))
- _wrap_classmethod(async_method, _patched_endpoint_async(openai, integration, endpoint_hook))
+ sync_method = "api_resources.{}.{}".format(resource, method_name)
+ async_method = "api_resources.{}.a{}".format(resource, method_name)
+ wrap(openai, sync_method, _patched_endpoint(openai, endpoint_hook))
+ wrap(openai, async_method, _patched_endpoint_async(openai, endpoint_hook))
openai.__datadog_patch = True
def unpatch():
- # FIXME: add unpatching. The current wrapping.unwrap method requires
- # the wrapper function to be provided which we don't keep a reference to.
- pass
+ import openai
+ if not getattr(openai, "__datadog_patch", False):
+ return
-def _patched_client_init(openai, integration):
- """
- Patch for `openai.OpenAI/AsyncOpenAI` client init methods to add the client object to the OpenAIIntegration object.
- """
+ openai.__datadog_patch = False
- def patched_client_init(func, args, kwargs):
- func(*args, **kwargs)
- client = args[0]
- integration._client = client
- api_key = kwargs.get("api_key")
- if api_key is None:
- api_key = client.api_key
- if api_key is not None:
- integration.user_api_key = api_key
- return
+ if OPENAI_VERSION >= (1, 0, 0):
+ if OPENAI_VERSION >= (1, 8, 0):
+ unwrap(openai._base_client.SyncAPIClient, "_process_response")
+ unwrap(openai._base_client.AsyncAPIClient, "_process_response")
+ else:
+ unwrap(openai._base_client.BaseClient, "_process_response")
+ unwrap(openai.OpenAI, "__init__")
+ unwrap(openai.AsyncOpenAI, "__init__")
+ unwrap(openai.AzureOpenAI, "__init__")
+ unwrap(openai.AsyncAzureOpenAI, "__init__")
+
+ for resource, method_hook_dict in _RESOURCES.items():
+ if deep_getattr(openai.resources, resource) is None:
+ continue
+ for method_name, _ in method_hook_dict.items():
+ sync_resource = deep_getattr(openai.resources, resource)
+ async_resource = deep_getattr(openai.resources, ".Async".join(resource.split(".")))
+ unwrap(sync_resource, method_name)
+ unwrap(async_resource, method_name)
+ else:
+ import openai.api_requestor
+
+ unwrap(openai.api_requestor, "_make_session")
+ unwrap(openai.util, "convert_to_openai_object")
- return patched_client_init
+ for resource, method_hook_dict in _RESOURCES.items():
+ if deep_getattr(openai.api_resources, resource) is None:
+ continue
+ for method_name, _ in method_hook_dict.items():
+ resource_obj = deep_getattr(openai.api_resources, resource)
+ unwrap(resource_obj, method_name)
+ unwrap(resource_obj, "a{}".format(method_name))
+ delattr(openai, "_datadog_integration")
-def _patched_make_session(func, args, kwargs):
+
+@with_traced_module
+def patched_client_init(openai, pin, func, instance, args, kwargs):
+ """
+ Patch for `openai.OpenAI/AsyncOpenAI` client init methods to add the client object to the OpenAIIntegration object.
+ """
+ func(*args, **kwargs)
+ integration = openai._datadog_integration
+ integration._client = instance
+ api_key = kwargs.get("api_key")
+ if api_key is None:
+ api_key = instance.api_key
+ if api_key is not None:
+ integration.user_api_key = api_key
+ return
+
+
+def _patched_make_session(func, instance, args, kwargs):
"""Patch for `openai.api_requestor._make_session` which sets the service name on the
requests session so that spans from the requests integration will use the service name openai.
This is done so that the service break down will include OpenAI time spent querying the OpenAI backend.
@@ -205,7 +238,7 @@ def _patched_make_session(func, args, kwargs):
return session
-def _traced_endpoint(endpoint_hook, integration, pin, args, kwargs):
+def _traced_endpoint(endpoint_hook, integration, instance, pin, args, kwargs):
span = integration.trace(pin, endpoint_hook.OPERATION_ID)
openai_api_key = _format_openai_api_key(kwargs.get("api_key"))
err = None
@@ -214,7 +247,7 @@ def _traced_endpoint(endpoint_hook, integration, pin, args, kwargs):
span.set_tag_str("openai.user.api_key", openai_api_key)
try:
# Start the hook
- hook = endpoint_hook().handle_request(pin, integration, span, args, kwargs)
+ hook = endpoint_hook().handle_request(pin, integration, instance, span, args, kwargs)
hook.send(None)
resp, err = yield
@@ -238,19 +271,11 @@ def _traced_endpoint(endpoint_hook, integration, pin, args, kwargs):
integration.metric(span, "dist", "request.duration", span.duration_ns)
-def _patched_endpoint(openai, integration, patch_hook):
- def patched_endpoint(func, args, kwargs):
- # FIXME: this is a temporary workaround for the fact that our bytecode wrapping seems to modify
- # a function keyword argument into a cell when it shouldn't. This is only an issue on
- # Python 3.11+.
- if sys.version_info >= (3, 11) and kwargs.get("encoding_format", None):
- kwargs["encoding_format"] = kwargs["encoding_format"].cell_contents
-
- pin = Pin._find(openai, args[0])
- if not pin or not pin.enabled():
- return func(*args, **kwargs)
-
- g = _traced_endpoint(patch_hook, integration, pin, args, kwargs)
+def _patched_endpoint(openai, patch_hook):
+ @with_traced_module
+ def patched_endpoint(openai, pin, func, instance, args, kwargs):
+ integration = openai._datadog_integration
+ g = _traced_endpoint(patch_hook, integration, instance, pin, args, kwargs)
g.send(None)
resp, err = None, None
try:
@@ -267,22 +292,15 @@ def patched_endpoint(func, args, kwargs):
# This return takes priority over `return resp`
return e.value # noqa: B012
- return patched_endpoint
+ return patched_endpoint(openai)
-def _patched_endpoint_async(openai, integration, patch_hook):
+def _patched_endpoint_async(openai, patch_hook):
# Same as _patched_endpoint but async
- async def patched_endpoint(func, args, kwargs):
- # FIXME: this is a temporary workaround for the fact that our bytecode wrapping seems to modify
- # a function keyword argument into a cell when it shouldn't. This is only an issue on
- # Python 3.11+.
- if sys.version_info >= (3, 11) and kwargs.get("encoding_format", None):
- kwargs["encoding_format"] = kwargs["encoding_format"].cell_contents
-
- pin = Pin._find(openai, args[0])
- if not pin or not pin.enabled():
- return await func(*args, **kwargs)
- g = _traced_endpoint(patch_hook, integration, pin, args, kwargs)
+ @with_traced_module
+ async def patched_endpoint(openai, pin, func, instance, args, kwargs):
+ integration = openai._datadog_integration
+ g = _traced_endpoint(patch_hook, integration, instance, pin, args, kwargs)
g.send(None)
resp, err = None, None
try:
@@ -304,59 +322,54 @@ async def patched_endpoint(func, args, kwargs):
# This return takes priority over `return resp`
return e.value # noqa: B012
- return patched_endpoint
+ return patched_endpoint(openai)
-def _patched_convert(openai, integration):
- def patched_convert(func, args, kwargs):
- """Patch convert captures header information in the openai response"""
- pin = Pin.get_from(openai)
- if not pin or not pin.enabled():
- return func(*args, **kwargs)
-
- span = pin.tracer.current_span()
- if not span:
- return func(*args, **kwargs)
+@with_traced_module
+def patched_convert(openai, pin, func, instance, args, kwargs):
+ """Patch convert captures header information in the openai response"""
+ integration = openai._datadog_integration
+ span = pin.tracer.current_span()
+ if not span:
+ return func(*args, **kwargs)
- if OPENAI_VERSION < (1, 0, 0):
- resp = args[0]
- if not isinstance(resp, openai.openai_response.OpenAIResponse):
- return func(*args, **kwargs)
- headers = resp._headers
- else:
- resp = kwargs.get("response", {})
- headers = resp.headers
- # This function is called for each chunk in the stream.
- # To prevent needlessly setting the same tags for each chunk, short-circuit here.
- if span.get_tag("openai.organization.name") is not None:
+ if OPENAI_VERSION < (1, 0, 0):
+ resp = args[0]
+ if not isinstance(resp, openai.openai_response.OpenAIResponse):
return func(*args, **kwargs)
- if headers.get("openai-organization"):
- org_name = headers.get("openai-organization")
- span.set_tag_str("openai.organization.name", org_name)
-
- # Gauge total rate limit
- if headers.get("x-ratelimit-limit-requests"):
- v = headers.get("x-ratelimit-limit-requests")
- if v is not None:
- integration.metric(span, "gauge", "ratelimit.requests", int(v))
- span.set_metric("openai.organization.ratelimit.requests.limit", int(v))
- if headers.get("x-ratelimit-limit-tokens"):
- v = headers.get("x-ratelimit-limit-tokens")
- if v is not None:
- integration.metric(span, "gauge", "ratelimit.tokens", int(v))
- span.set_metric("openai.organization.ratelimit.tokens.limit", int(v))
- # Gauge and set span info for remaining requests and tokens
- if headers.get("x-ratelimit-remaining-requests"):
- v = headers.get("x-ratelimit-remaining-requests")
- if v is not None:
- integration.metric(span, "gauge", "ratelimit.remaining.requests", int(v))
- span.set_metric("openai.organization.ratelimit.requests.remaining", int(v))
- if headers.get("x-ratelimit-remaining-tokens"):
- v = headers.get("x-ratelimit-remaining-tokens")
- if v is not None:
- integration.metric(span, "gauge", "ratelimit.remaining.tokens", int(v))
- span.set_metric("openai.organization.ratelimit.tokens.remaining", int(v))
-
+ headers = resp._headers
+ else:
+ resp = kwargs.get("response", {})
+ headers = resp.headers
+ # This function is called for each chunk in the stream.
+ # To prevent needlessly setting the same tags for each chunk, short-circuit here.
+ if span.get_tag("openai.organization.name") is not None:
return func(*args, **kwargs)
-
- return patched_convert
+ if headers.get("openai-organization"):
+ org_name = headers.get("openai-organization")
+ span.set_tag_str("openai.organization.name", org_name)
+
+ # Gauge total rate limit
+ if headers.get("x-ratelimit-limit-requests"):
+ v = headers.get("x-ratelimit-limit-requests")
+ if v is not None:
+ integration.metric(span, "gauge", "ratelimit.requests", int(v))
+ span.set_metric("openai.organization.ratelimit.requests.limit", int(v))
+ if headers.get("x-ratelimit-limit-tokens"):
+ v = headers.get("x-ratelimit-limit-tokens")
+ if v is not None:
+ integration.metric(span, "gauge", "ratelimit.tokens", int(v))
+ span.set_metric("openai.organization.ratelimit.tokens.limit", int(v))
+ # Gauge and set span info for remaining requests and tokens
+ if headers.get("x-ratelimit-remaining-requests"):
+ v = headers.get("x-ratelimit-remaining-requests")
+ if v is not None:
+ integration.metric(span, "gauge", "ratelimit.remaining.requests", int(v))
+ span.set_metric("openai.organization.ratelimit.requests.remaining", int(v))
+ if headers.get("x-ratelimit-remaining-tokens"):
+ v = headers.get("x-ratelimit-remaining-tokens")
+ if v is not None:
+ integration.metric(span, "gauge", "ratelimit.remaining.tokens", int(v))
+ span.set_metric("openai.organization.ratelimit.tokens.remaining", int(v))
+
+ return func(*args, **kwargs)
diff --git a/ddtrace/contrib/openai/__init__.py b/ddtrace/contrib/openai/__init__.py
index 88090b5f85a..da94047c2e8 100644
--- a/ddtrace/contrib/openai/__init__.py
+++ b/ddtrace/contrib/openai/__init__.py
@@ -248,7 +248,6 @@
Pin.override(openai, service="my-openai-service")
""" # noqa: E501
-
# Required to allow users to import from `ddtrace.contrib.openai.patch` directly
import warnings as _w
diff --git a/releasenotes/notes/fix-openai-313-29ec43cbf2f35947.yaml b/releasenotes/notes/fix-openai-313-29ec43cbf2f35947.yaml
new file mode 100644
index 00000000000..59d818de9a9
--- /dev/null
+++ b/releasenotes/notes/fix-openai-313-29ec43cbf2f35947.yaml
@@ -0,0 +1,7 @@
+---
+features:
+ - |
+ openai: Introduces tracing support to the OpenAI integration for Python versions 3.12 and 3.13.
+fixes:
+ - |
+ openai: Fixes a patching issue where asynchronous moderation endpoint calls resulted in coroutine scheduling errors.
diff --git a/riotfile.py b/riotfile.py
index aa51b9349a6..bf3e0eede96 100644
--- a/riotfile.py
+++ b/riotfile.py
@@ -2504,18 +2504,26 @@ def select_pys(min_version: str = MIN_PYTHON_VERSION, max_version: str = MAX_PYT
},
),
Venv(
- pys=select_pys(min_version="3.7", max_version="3.11"),
+ pys="3.7",
pkgs={
- "openai[embeddings,datalib]": ["==1.1.1", "==1.30.1"],
+ "openai[datalib]": "==1.30.1",
"pillow": "==9.5.0",
},
),
Venv(
pys=select_pys(min_version="3.8", max_version="3.11"),
pkgs={
- "openai[datalib]": ["==1.30.1"],
+ "openai[embeddings,datalib]": "==1.30.1",
+ "pillow": "==9.5.0",
+ "httpx": "==0.27.2",
+ },
+ ),
+ Venv(
+ pys=select_pys(min_version="3.8"),
+ pkgs={
+ "openai": latest,
"tiktoken": latest,
- "pillow": "==10.1.0",
+ "pillow": latest,
},
env={"TIKTOKEN_AVAILABLE": "True"},
),
diff --git a/tests/contrib/openai/cassettes/v1/completion_stream_wrong_api_key.yaml b/tests/contrib/openai/cassettes/v1/completion_stream_wrong_api_key.yaml
new file mode 100644
index 00000000000..512263ce56c
--- /dev/null
+++ b/tests/contrib/openai/cassettes/v1/completion_stream_wrong_api_key.yaml
@@ -0,0 +1,77 @@
+interactions:
+- request:
+ body: '{"model":"text-curie-001","prompt":"how does openai tokenize prompts?","max_tokens":150,"n":1,"stream":true,"temperature":0.8}'
+ headers:
+ accept:
+ - application/json
+ accept-encoding:
+ - gzip, deflate
+ connection:
+ - keep-alive
+ content-length:
+ - '126'
+ content-type:
+ - application/json
+ host:
+ - api.openai.com
+ user-agent:
+ - OpenAI/Python 1.59.7
+ x-stainless-arch:
+ - arm64
+ x-stainless-async:
+ - 'false'
+ x-stainless-lang:
+ - python
+ x-stainless-os:
+ - MacOS
+ x-stainless-package-version:
+ - 1.59.7
+ x-stainless-retry-count:
+ - '0'
+ x-stainless-runtime:
+ - CPython
+ x-stainless-runtime-version:
+ - 3.13.1
+ method: POST
+ uri: https://api.openai.com/v1/completions
+ response:
+ body:
+ string: "{\n \"error\": {\n \"message\": \"Incorrect API key provided:
+ sk-wrong****-key. You can find your API key at https://platform.openai.com/account/api-keys.\",\n
+ \ \"type\": \"invalid_request_error\",\n \"param\": null,\n \"code\":
+ \"invalid_api_key\"\n }\n}\n"
+ headers:
+ CF-Cache-Status:
+ - DYNAMIC
+ CF-RAY:
+ - 9058b3cc3bcdd63c-IAD
+ Connection:
+ - keep-alive
+ Content-Length:
+ - '266'
+ Content-Type:
+ - application/json; charset=utf-8
+ Date:
+ - Tue, 21 Jan 2025 16:32:48 GMT
+ Server:
+ - cloudflare
+ Set-Cookie:
+ - __cf_bm=WUZdhCkUNTJUEkju8qgk4MKCHL7CFOaIUNvU0L9XmvA-1737477168-1.0.1.1-RJ7MOiDyJEfHrXSN0WQVgZFtkxlkwBL3p.5t3._uu77WPJSM8tYzI3wMHSu.yMwD9QkrbgR5yavkTN.RTWl_1A;
+ path=/; expires=Tue, 21-Jan-25 17:02:48 GMT; domain=.api.openai.com; HttpOnly;
+ Secure; SameSite=None
+ - _cfuvid=7KOfpy1ICNI532AjhDxBh2qtnyNpsjauHeWi6dEJgT4-1737477168271-0.0.1.1-604800000;
+ path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
+ X-Content-Type-Options:
+ - nosniff
+ alt-svc:
+ - h3=":443"; ma=86400
+ strict-transport-security:
+ - max-age=31536000; includeSubDomains; preload
+ vary:
+ - Origin
+ x-request-id:
+ - req_c45bfc7515dca54ef87c667f8210af23
+ status:
+ code: 401
+ message: Unauthorized
+version: 1
diff --git a/tests/contrib/openai/test_openai_llmobs.py b/tests/contrib/openai/test_openai_llmobs.py
index a145877c8c8..4c15b1ffad3 100644
--- a/tests/contrib/openai/test_openai_llmobs.py
+++ b/tests/contrib/openai/test_openai_llmobs.py
@@ -339,6 +339,10 @@ def test_completion(self, openai, ddtrace_global_config, mock_llmobs_writer, moc
)
)
+ @pytest.mark.skipif(
+ parse_version(openai_module.version.VERSION) >= (1, 60),
+ reason="latest openai versions use modified azure requests",
+ )
def test_completion_azure(
self, openai, azure_openai_config, ddtrace_global_config, mock_llmobs_writer, mock_tracer
):
@@ -369,6 +373,10 @@ def test_completion_azure(
)
)
+ @pytest.mark.skipif(
+ parse_version(openai_module.version.VERSION) >= (1, 60),
+ reason="latest openai versions use modified azure requests",
+ )
async def test_completion_azure_async(
self, openai, azure_openai_config, ddtrace_global_config, mock_llmobs_writer, mock_tracer
):
@@ -458,6 +466,10 @@ def test_chat_completion(self, openai, ddtrace_global_config, mock_llmobs_writer
)
)
+ @pytest.mark.skipif(
+ parse_version(openai_module.version.VERSION) >= (1, 60),
+ reason="latest openai versions use modified azure requests",
+ )
def test_chat_completion_azure(
self, openai, azure_openai_config, ddtrace_global_config, mock_llmobs_writer, mock_tracer
):
@@ -488,6 +500,10 @@ def test_chat_completion_azure(
)
)
+ @pytest.mark.skipif(
+ parse_version(openai_module.version.VERSION) >= (1, 60),
+ reason="latest openai versions use modified azure requests",
+ )
async def test_chat_completion_azure_async(
self, openai, azure_openai_config, ddtrace_global_config, mock_llmobs_writer, mock_tracer
):
diff --git a/tests/contrib/openai/test_openai_patch.py b/tests/contrib/openai/test_openai_patch.py
index caab79117cf..6a995213180 100644
--- a/tests/contrib/openai/test_openai_patch.py
+++ b/tests/contrib/openai/test_openai_patch.py
@@ -3,14 +3,10 @@
# removed the ``_generated`` suffix from the file name, to prevent the content
# from being overwritten by future re-generations.
+from ddtrace.contrib.internal.openai.patch import OPENAI_VERSION
from ddtrace.contrib.internal.openai.patch import get_version
from ddtrace.contrib.internal.openai.patch import patch
-
-
-try:
- from ddtrace.contrib.internal.openai.patch import unpatch
-except ImportError:
- unpatch = None
+from ddtrace.contrib.internal.openai.patch import unpatch
from tests.contrib.patch import PatchTestCase
@@ -22,10 +18,268 @@ class TestOpenaiPatch(PatchTestCase.Base):
__get_version__ = get_version
def assert_module_patched(self, openai):
- pass
+ if OPENAI_VERSION >= (1, 0, 0):
+ if OPENAI_VERSION >= (1, 8, 0):
+ self.assert_wrapped(openai._base_client.SyncAPIClient._process_response)
+ self.assert_wrapped(openai._base_client.AsyncAPIClient._process_response)
+ else:
+ self.assert_wrapped(openai._base_client.BaseClient._process_response)
+ self.assert_wrapped(openai.OpenAI.__init__)
+ self.assert_wrapped(openai.AsyncOpenAI.__init__)
+ self.assert_wrapped(openai.AzureOpenAI.__init__)
+ self.assert_wrapped(openai.AsyncAzureOpenAI.__init__)
+ self.assert_wrapped(openai.resources.models.Models.list)
+ self.assert_wrapped(openai.resources.models.Models.retrieve)
+ self.assert_wrapped(openai.resources.models.Models.delete)
+ self.assert_wrapped(openai.resources.models.AsyncModels.list)
+ self.assert_wrapped(openai.resources.models.AsyncModels.retrieve)
+ self.assert_wrapped(openai.resources.models.AsyncModels.delete)
+ self.assert_wrapped(openai.resources.completions.Completions.create)
+ self.assert_wrapped(openai.resources.chat.Completions.create)
+ self.assert_wrapped(openai.resources.completions.AsyncCompletions.create)
+ self.assert_wrapped(openai.resources.chat.AsyncCompletions.create)
+ self.assert_wrapped(openai.resources.images.Images.generate)
+ self.assert_wrapped(openai.resources.images.Images.edit)
+ self.assert_wrapped(openai.resources.images.Images.create_variation)
+ self.assert_wrapped(openai.resources.images.AsyncImages.generate)
+ self.assert_wrapped(openai.resources.images.AsyncImages.edit)
+ self.assert_wrapped(openai.resources.images.AsyncImages.create_variation)
+ self.assert_wrapped(openai.resources.audio.Transcriptions.create)
+ self.assert_wrapped(openai.resources.audio.Translations.create)
+ self.assert_wrapped(openai.resources.audio.AsyncTranscriptions.create)
+ self.assert_wrapped(openai.resources.audio.AsyncTranslations.create)
+ self.assert_wrapped(openai.resources.embeddings.Embeddings.create)
+ self.assert_wrapped(openai.resources.moderations.Moderations.create)
+ self.assert_wrapped(openai.resources.embeddings.AsyncEmbeddings.create)
+ self.assert_wrapped(openai.resources.moderations.AsyncModerations.create)
+ self.assert_wrapped(openai.resources.files.Files.create)
+ self.assert_wrapped(openai.resources.files.Files.retrieve)
+ self.assert_wrapped(openai.resources.files.Files.list)
+ self.assert_wrapped(openai.resources.files.Files.delete)
+ self.assert_wrapped(openai.resources.files.Files.retrieve_content)
+ self.assert_wrapped(openai.resources.files.AsyncFiles.create)
+ self.assert_wrapped(openai.resources.files.AsyncFiles.retrieve)
+ self.assert_wrapped(openai.resources.files.AsyncFiles.list)
+ self.assert_wrapped(openai.resources.files.AsyncFiles.delete)
+ self.assert_wrapped(openai.resources.files.AsyncFiles.retrieve_content)
+ else:
+ self.assert_wrapped(openai.api_resources.completion.Completion.create)
+ self.assert_wrapped(openai.api_resources.completion.Completion.acreate)
+ self.assert_wrapped(openai.api_requestor._make_session)
+ self.assert_wrapped(openai.util.convert_to_openai_object)
+ self.assert_wrapped(openai.api_resources.embedding.Embedding.create)
+ self.assert_wrapped(openai.api_resources.embedding.Embedding.acreate)
+ if hasattr(openai, "Model"):
+ self.assert_wrapped(openai.api_resources.model.Model.list)
+ self.assert_wrapped(openai.api_resources.model.Model.retrieve)
+ self.assert_wrapped(openai.api_resources.model.Model.delete)
+ self.assert_wrapped(openai.api_resources.model.Model.alist)
+ self.assert_wrapped(openai.api_resources.model.Model.aretrieve)
+ self.assert_wrapped(openai.api_resources.model.Model.adelete)
+ if hasattr(openai, "ChatCompletion"):
+ self.assert_wrapped(openai.api_resources.chat_completion.ChatCompletion.create)
+ self.assert_wrapped(openai.api_resources.chat_completion.ChatCompletion.acreate)
+ if hasattr(openai, "Image"):
+ self.assert_wrapped(openai.api_resources.image.Image.create)
+ self.assert_wrapped(openai.api_resources.image.Image.acreate)
+ self.assert_wrapped(openai.api_resources.image.Image.create_edit)
+ self.assert_wrapped(openai.api_resources.image.Image.acreate_edit)
+ self.assert_wrapped(openai.api_resources.image.Image.create_variation)
+ self.assert_wrapped(openai.api_resources.image.Image.acreate_variation)
+ if hasattr(openai, "Audio"):
+ self.assert_wrapped(openai.api_resources.audio.Audio.transcribe)
+ self.assert_wrapped(openai.api_resources.audio.Audio.atranscribe)
+ self.assert_wrapped(openai.api_resources.audio.Audio.translate)
+ self.assert_wrapped(openai.api_resources.audio.Audio.atranslate)
+ if hasattr(openai, "Moderation"):
+ self.assert_wrapped(openai.api_resources.moderation.Moderation.create)
+ self.assert_wrapped(openai.api_resources.moderation.Moderation.acreate)
+ if hasattr(openai, "File"):
+ self.assert_wrapped(openai.api_resources.file.File.create)
+ self.assert_wrapped(openai.api_resources.file.File.retrieve)
+ self.assert_wrapped(openai.api_resources.file.File.list)
+ self.assert_wrapped(openai.api_resources.file.File.delete)
+ self.assert_wrapped(openai.api_resources.file.File.download)
+ self.assert_wrapped(openai.api_resources.file.File.acreate)
+ self.assert_wrapped(openai.api_resources.file.File.aretrieve)
+ self.assert_wrapped(openai.api_resources.file.File.alist)
+ self.assert_wrapped(openai.api_resources.file.File.adelete)
+ self.assert_wrapped(openai.api_resources.file.File.adownload)
def assert_not_module_patched(self, openai):
- pass
+ if OPENAI_VERSION >= (1, 0, 0):
+ if OPENAI_VERSION >= (1, 8, 0):
+ self.assert_not_wrapped(openai._base_client.SyncAPIClient._process_response)
+ self.assert_not_wrapped(openai._base_client.AsyncAPIClient._process_response)
+ else:
+ self.assert_not_wrapped(openai._base_client.BaseClient._process_response)
+ self.assert_not_wrapped(openai.OpenAI.__init__)
+ self.assert_not_wrapped(openai.AsyncOpenAI.__init__)
+ self.assert_not_wrapped(openai.AzureOpenAI.__init__)
+ self.assert_not_wrapped(openai.AsyncAzureOpenAI.__init__)
+ self.assert_not_wrapped(openai.resources.models.Models.list)
+ self.assert_not_wrapped(openai.resources.models.Models.retrieve)
+ self.assert_not_wrapped(openai.resources.models.Models.delete)
+ self.assert_not_wrapped(openai.resources.models.AsyncModels.list)
+ self.assert_not_wrapped(openai.resources.models.AsyncModels.retrieve)
+ self.assert_not_wrapped(openai.resources.models.AsyncModels.delete)
+ self.assert_not_wrapped(openai.resources.completions.Completions.create)
+ self.assert_not_wrapped(openai.resources.chat.Completions.create)
+ self.assert_not_wrapped(openai.resources.completions.AsyncCompletions.create)
+ self.assert_not_wrapped(openai.resources.chat.AsyncCompletions.create)
+ self.assert_not_wrapped(openai.resources.images.Images.generate)
+ self.assert_not_wrapped(openai.resources.images.Images.edit)
+ self.assert_not_wrapped(openai.resources.images.Images.create_variation)
+ self.assert_not_wrapped(openai.resources.images.AsyncImages.generate)
+ self.assert_not_wrapped(openai.resources.images.AsyncImages.edit)
+ self.assert_not_wrapped(openai.resources.images.AsyncImages.create_variation)
+ self.assert_not_wrapped(openai.resources.audio.Transcriptions.create)
+ self.assert_not_wrapped(openai.resources.audio.Translations.create)
+ self.assert_not_wrapped(openai.resources.audio.AsyncTranscriptions.create)
+ self.assert_not_wrapped(openai.resources.audio.AsyncTranslations.create)
+ self.assert_not_wrapped(openai.resources.embeddings.Embeddings.create)
+ self.assert_not_wrapped(openai.resources.moderations.Moderations.create)
+ self.assert_not_wrapped(openai.resources.embeddings.AsyncEmbeddings.create)
+ self.assert_not_wrapped(openai.resources.moderations.AsyncModerations.create)
+ self.assert_not_wrapped(openai.resources.files.Files.create)
+ self.assert_not_wrapped(openai.resources.files.Files.retrieve)
+ self.assert_not_wrapped(openai.resources.files.Files.list)
+ self.assert_not_wrapped(openai.resources.files.Files.delete)
+ self.assert_not_wrapped(openai.resources.files.AsyncFiles.retrieve_content)
+ self.assert_not_wrapped(openai.resources.files.AsyncFiles.create)
+ self.assert_not_wrapped(openai.resources.files.AsyncFiles.retrieve)
+ self.assert_not_wrapped(openai.resources.files.AsyncFiles.list)
+ self.assert_not_wrapped(openai.resources.files.AsyncFiles.delete)
+ self.assert_not_wrapped(openai.resources.files.AsyncFiles.retrieve_content)
+ else:
+ self.assert_not_wrapped(openai.api_resources.completion.Completion.create)
+ self.assert_not_wrapped(openai.api_resources.completion.Completion.acreate)
+ self.assert_not_wrapped(openai.api_requestor._make_session)
+ self.assert_not_wrapped(openai.util.convert_to_openai_object)
+ self.assert_not_wrapped(openai.api_resources.embedding.Embedding.create)
+ self.assert_not_wrapped(openai.api_resources.embedding.Embedding.acreate)
+ if hasattr(openai, "Model"):
+ self.assert_not_wrapped(openai.api_resources.model.Model.list)
+ self.assert_not_wrapped(openai.api_resources.model.Model.retrieve)
+ self.assert_not_wrapped(openai.api_resources.model.Model.delete)
+ self.assert_not_wrapped(openai.api_resources.model.Model.alist)
+ self.assert_not_wrapped(openai.api_resources.model.Model.aretrieve)
+ self.assert_not_wrapped(openai.api_resources.model.Model.adelete)
+ if hasattr(openai, "ChatCompletion"):
+ self.assert_not_wrapped(openai.api_resources.chat_completion.ChatCompletion.create)
+ self.assert_not_wrapped(openai.api_resources.chat_completion.ChatCompletion.acreate)
+ if hasattr(openai, "Image"):
+ self.assert_not_wrapped(openai.api_resources.image.Image.create)
+ self.assert_not_wrapped(openai.api_resources.image.Image.acreate)
+ self.assert_not_wrapped(openai.api_resources.image.Image.create_edit)
+ self.assert_not_wrapped(openai.api_resources.image.Image.acreate_edit)
+ self.assert_not_wrapped(openai.api_resources.image.Image.create_variation)
+ self.assert_not_wrapped(openai.api_resources.image.Image.acreate_variation)
+ if hasattr(openai, "Audio"):
+ self.assert_not_wrapped(openai.api_resources.audio.Audio.transcribe)
+ self.assert_not_wrapped(openai.api_resources.audio.Audio.atranscribe)
+ self.assert_not_wrapped(openai.api_resources.audio.Audio.translate)
+ self.assert_not_wrapped(openai.api_resources.audio.Audio.atranslate)
+ if hasattr(openai, "Moderation"):
+ self.assert_not_wrapped(openai.api_resources.moderation.Moderation.create)
+ self.assert_not_wrapped(openai.api_resources.moderation.Moderation.acreate)
+ if hasattr(openai, "File"):
+ self.assert_not_wrapped(openai.api_resources.file.File.create)
+ self.assert_not_wrapped(openai.api_resources.file.File.retrieve)
+ self.assert_not_wrapped(openai.api_resources.file.File.list)
+ self.assert_not_wrapped(openai.api_resources.file.File.delete)
+ self.assert_not_wrapped(openai.api_resources.file.File.download)
+ self.assert_not_wrapped(openai.api_resources.file.File.acreate)
+ self.assert_not_wrapped(openai.api_resources.file.File.aretrieve)
+ self.assert_not_wrapped(openai.api_resources.file.File.alist)
+ self.assert_not_wrapped(openai.api_resources.file.File.adelete)
+ self.assert_not_wrapped(openai.api_resources.file.File.adownload)
def assert_not_module_double_patched(self, openai):
- pass
+ if OPENAI_VERSION >= (1, 0, 0):
+ if OPENAI_VERSION >= (1, 8, 0):
+ self.assert_not_double_wrapped(openai._base_client.SyncAPIClient._process_response)
+ self.assert_not_double_wrapped(openai._base_client.AsyncAPIClient._process_response)
+ else:
+ self.assert_not_double_wrapped(openai._base_client.BaseClient._process_response)
+ self.assert_not_double_wrapped(openai.OpenAI.__init__)
+ self.assert_not_double_wrapped(openai.AsyncOpenAI.__init__)
+ self.assert_not_double_wrapped(openai.AzureOpenAI.__init__)
+ self.assert_not_double_wrapped(openai.AsyncAzureOpenAI.__init__)
+ self.assert_not_double_wrapped(openai.resources.models.Models.list)
+ self.assert_not_double_wrapped(openai.resources.models.Models.retrieve)
+ self.assert_not_double_wrapped(openai.resources.models.Models.delete)
+ self.assert_not_double_wrapped(openai.resources.models.AsyncModels.list)
+ self.assert_not_double_wrapped(openai.resources.models.AsyncModels.retrieve)
+ self.assert_not_double_wrapped(openai.resources.models.AsyncModels.delete)
+ self.assert_not_double_wrapped(openai.resources.completions.Completions.create)
+ self.assert_not_double_wrapped(openai.resources.chat.Completions.create)
+ self.assert_not_double_wrapped(openai.resources.completions.AsyncCompletions.create)
+ self.assert_not_double_wrapped(openai.resources.chat.AsyncCompletions.create)
+ self.assert_not_double_wrapped(openai.resources.images.Images.generate)
+ self.assert_not_double_wrapped(openai.resources.images.Images.edit)
+ self.assert_not_double_wrapped(openai.resources.images.Images.create_variation)
+ self.assert_not_double_wrapped(openai.resources.images.AsyncImages.generate)
+ self.assert_not_double_wrapped(openai.resources.images.AsyncImages.edit)
+ self.assert_not_double_wrapped(openai.resources.images.AsyncImages.create_variation)
+ self.assert_not_double_wrapped(openai.resources.audio.Transcriptions.create)
+ self.assert_not_double_wrapped(openai.resources.audio.Translations.create)
+ self.assert_not_double_wrapped(openai.resources.audio.AsyncTranscriptions.create)
+ self.assert_not_double_wrapped(openai.resources.audio.AsyncTranslations.create)
+ self.assert_not_double_wrapped(openai.resources.embeddings.Embeddings.create)
+ self.assert_not_double_wrapped(openai.resources.moderations.Moderations.create)
+ self.assert_not_double_wrapped(openai.resources.embeddings.AsyncEmbeddings.create)
+ self.assert_not_double_wrapped(openai.resources.moderations.AsyncModerations.create)
+ self.assert_not_double_wrapped(openai.resources.files.Files.create)
+ self.assert_not_double_wrapped(openai.resources.files.Files.retrieve)
+ self.assert_not_double_wrapped(openai.resources.files.Files.list)
+ self.assert_not_double_wrapped(openai.resources.files.Files.delete)
+ self.assert_not_double_wrapped(openai.resources.files.Files.retrieve_content)
+ self.assert_not_double_wrapped(openai.resources.files.AsyncFiles.create)
+ self.assert_not_double_wrapped(openai.resources.files.AsyncFiles.retrieve)
+ self.assert_not_double_wrapped(openai.resources.files.AsyncFiles.list)
+ self.assert_not_double_wrapped(openai.resources.files.AsyncFiles.delete)
+ self.assert_not_double_wrapped(openai.resources.files.AsyncFiles.retrieve_content)
+ else:
+ self.assert_not_double_wrapped(openai.api_resources.completion.Completion.create)
+ self.assert_not_double_wrapped(openai.api_resources.completion.Completion.acreate)
+ self.assert_not_double_wrapped(openai.api_requestor._make_session)
+ self.assert_not_double_wrapped(openai.util.convert_to_openai_object)
+ self.assert_not_double_wrapped(openai.api_resources.embedding.Embedding.create)
+ self.assert_not_double_wrapped(openai.api_resources.embedding.Embedding.acreate)
+ if hasattr(openai, "Model"):
+ self.assert_not_double_wrapped(openai.api_resources.model.Model.list)
+ self.assert_not_double_wrapped(openai.api_resources.model.Model.retrieve)
+ self.assert_not_double_wrapped(openai.api_resources.model.Model.delete)
+ self.assert_not_double_wrapped(openai.api_resources.model.Model.alist)
+ self.assert_not_double_wrapped(openai.api_resources.model.Model.aretrieve)
+ self.assert_not_double_wrapped(openai.api_resources.model.Model.adelete)
+ if hasattr(openai, "ChatCompletion"):
+ self.assert_not_double_wrapped(openai.api_resources.chat_completion.ChatCompletion.create)
+ self.assert_not_double_wrapped(openai.api_resources.chat_completion.ChatCompletion.acreate)
+ if hasattr(openai, "Image"):
+ self.assert_not_double_wrapped(openai.api_resources.image.Image.create)
+ self.assert_not_double_wrapped(openai.api_resources.image.Image.acreate)
+ self.assert_not_double_wrapped(openai.api_resources.image.Image.create_edit)
+ self.assert_not_double_wrapped(openai.api_resources.image.Image.acreate_edit)
+ self.assert_not_double_wrapped(openai.api_resources.image.Image.create_variation)
+ self.assert_not_double_wrapped(openai.api_resources.image.Image.acreate_variation)
+ if hasattr(openai, "Audio"):
+ self.assert_not_double_wrapped(openai.api_resources.audio.Audio.transcribe)
+ self.assert_not_double_wrapped(openai.api_resources.audio.Audio.atranscribe)
+ self.assert_not_double_wrapped(openai.api_resources.audio.Audio.translate)
+ self.assert_not_double_wrapped(openai.api_resources.audio.Audio.atranslate)
+ if hasattr(openai, "Moderation"):
+ self.assert_not_double_wrapped(openai.api_resources.moderation.Moderation.create)
+ self.assert_not_double_wrapped(openai.api_resources.moderation.Moderation.acreate)
+ if hasattr(openai, "File"):
+ self.assert_not_double_wrapped(openai.api_resources.file.File.create)
+ self.assert_not_double_wrapped(openai.api_resources.file.File.retrieve)
+ self.assert_not_double_wrapped(openai.api_resources.file.File.list)
+ self.assert_not_double_wrapped(openai.api_resources.file.File.delete)
+ self.assert_not_double_wrapped(openai.api_resources.file.File.download)
+ self.assert_not_double_wrapped(openai.api_resources.file.File.acreate)
+ self.assert_not_double_wrapped(openai.api_resources.file.File.aretrieve)
+ self.assert_not_double_wrapped(openai.api_resources.file.File.alist)
+ self.assert_not_double_wrapped(openai.api_resources.file.File.adelete)
+ self.assert_not_double_wrapped(openai.api_resources.file.File.adownload)
diff --git a/tests/contrib/openai/test_openai_v0.py b/tests/contrib/openai/test_openai_v0.py
index 0dbd537c3ff..0a618b4bffc 100644
--- a/tests/contrib/openai/test_openai_v0.py
+++ b/tests/contrib/openai/test_openai_v0.py
@@ -9,9 +9,7 @@
import pytest
import ddtrace
-from ddtrace import patch
from ddtrace.contrib.internal.openai.utils import _est_tokens
-from ddtrace.contrib.internal.trace_utils import iswrapped
from ddtrace.internal.utils.version import parse_version
from tests.contrib.openai.utils import chat_completion_custom_functions
from tests.contrib.openai.utils import chat_completion_input_description
@@ -41,42 +39,6 @@ def test_config(ddtrace_config_openai, mock_tracer, openai):
assert ddtrace.config.openai.metrics_enabled is ddtrace_config_openai["metrics_enabled"]
-def test_patching(openai):
- """Ensure that the correct objects are patched and not double patched."""
-
- # for some reason these can't be specified as the real python objects...
- # no clue why (eg. openai.Completion.create doesn't work)
- methods = [
- (openai.Completion, "create"),
- (openai.api_resources.completion.Completion, "create"),
- (openai.Completion, "acreate"),
- (openai.api_resources.completion.Completion, "acreate"),
- (openai.api_requestor, "_make_session"),
- (openai.util, "convert_to_openai_object"),
- (openai.Embedding, "create"),
- (openai.Embedding, "acreate"),
- ]
- if hasattr(openai, "ChatCompletion"):
- methods += [
- (openai.ChatCompletion, "create"),
- (openai.api_resources.chat_completion.ChatCompletion, "create"),
- (openai.ChatCompletion, "acreate"),
- (openai.api_resources.chat_completion.ChatCompletion, "acreate"),
- ]
-
- for m in methods:
- assert not iswrapped(getattr(m[0], m[1]))
-
- patch(openai=True)
- for m in methods:
- assert iswrapped(getattr(m[0], m[1]))
-
- # Ensure double patching does not occur
- patch(openai=True)
- for m in methods:
- assert not iswrapped(getattr(m[0], m[1]).__dd_wrapped__)
-
-
@pytest.mark.parametrize("api_key_in_env", [True, False])
def test_model_list(api_key_in_env, request_api_key, openai, openai_vcr, mock_metrics, snapshot_tracer):
with snapshot_context(
diff --git a/tests/contrib/openai/test_openai_v1.py b/tests/contrib/openai/test_openai_v1.py
index 47ed05ea1bd..438b980d5b5 100644
--- a/tests/contrib/openai/test_openai_v1.py
+++ b/tests/contrib/openai/test_openai_v1.py
@@ -5,9 +5,7 @@
import pytest
import ddtrace
-from ddtrace import patch
from ddtrace.contrib.internal.openai.utils import _est_tokens
-from ddtrace.contrib.internal.trace_utils import iswrapped
from ddtrace.internal.utils.version import parse_version
from tests.contrib.openai.utils import chat_completion_custom_functions
from tests.contrib.openai.utils import chat_completion_input_description
@@ -37,56 +35,6 @@ def test_config(ddtrace_config_openai, mock_tracer, openai):
assert ddtrace.config.openai.metrics_enabled is ddtrace_config_openai["metrics_enabled"]
-def test_patching(openai):
- """Ensure that the correct objects are patched and not double patched."""
- methods = [
- (openai.resources.completions.Completions, "create"),
- (openai.resources.completions.AsyncCompletions, "create"),
- (openai.resources.chat.Completions, "create"),
- (openai.resources.chat.AsyncCompletions, "create"),
- (openai.resources.embeddings.Embeddings, "create"),
- (openai.resources.embeddings.AsyncEmbeddings, "create"),
- (openai.resources.models.Models, "list"),
- (openai.resources.models.Models, "retrieve"),
- (openai.resources.models.AsyncModels, "list"),
- (openai.resources.models.AsyncModels, "retrieve"),
- (openai.resources.images.Images, "generate"),
- (openai.resources.images.Images, "edit"),
- (openai.resources.images.Images, "create_variation"),
- (openai.resources.images.AsyncImages, "generate"),
- (openai.resources.images.AsyncImages, "edit"),
- (openai.resources.images.AsyncImages, "create_variation"),
- (openai.resources.audio.Transcriptions, "create"),
- (openai.resources.audio.AsyncTranscriptions, "create"),
- (openai.resources.audio.Translations, "create"),
- (openai.resources.audio.AsyncTranslations, "create"),
- (openai.resources.moderations.Moderations, "create"),
- (openai.resources.moderations.AsyncModerations, "create"),
- (openai.resources.files.Files, "create"),
- (openai.resources.files.Files, "retrieve"),
- (openai.resources.files.Files, "list"),
- (openai.resources.files.Files, "delete"),
- (openai.resources.files.Files, "retrieve_content"),
- (openai.resources.files.AsyncFiles, "create"),
- (openai.resources.files.AsyncFiles, "retrieve"),
- (openai.resources.files.AsyncFiles, "list"),
- (openai.resources.files.AsyncFiles, "delete"),
- (openai.resources.files.AsyncFiles, "retrieve_content"),
- ]
-
- for m in methods:
- assert not iswrapped(getattr(m[0], m[1]))
-
- patch(openai=True)
- for m in methods:
- assert iswrapped(getattr(m[0], m[1]))
-
- # Ensure double patching does not occur
- patch(openai=True)
- for m in methods:
- assert not iswrapped(getattr(m[0], m[1]).__dd_wrapped__)
-
-
@pytest.mark.parametrize("api_key_in_env", [True, False])
def test_model_list(api_key_in_env, request_api_key, openai, openai_vcr, mock_metrics, snapshot_tracer):
with snapshot_context(
@@ -908,17 +856,16 @@ def test_misuse(openai, snapshot_tracer):
)
def test_span_finish_on_stream_error(openai, openai_vcr, snapshot_tracer):
with openai_vcr.use_cassette("completion_stream_wrong_api_key.yaml"):
- with pytest.raises(openai.APIConnectionError):
- with pytest.raises(openai.AuthenticationError):
- client = openai.OpenAI(api_key="sk-wrong-api-key")
- client.completions.create(
- model="text-curie-001",
- prompt="how does openai tokenize prompts?",
- temperature=0.8,
- n=1,
- max_tokens=150,
- stream=True,
- )
+ with pytest.raises((openai.APIConnectionError, openai.AuthenticationError)):
+ client = openai.OpenAI(api_key="sk-wrong-api-key")
+ client.completions.create(
+ model="text-curie-001",
+ prompt="how does openai tokenize prompts?",
+ temperature=0.8,
+ n=1,
+ max_tokens=150,
+ stream=True,
+ )
@pytest.mark.snapshot
@@ -1383,6 +1330,9 @@ def test_est_tokens():
) # oracle: 92
+@pytest.mark.skipif(
+ parse_version(openai_module.version.VERSION) >= (1, 60), reason="latest openai versions use modified azure requests"
+)
@pytest.mark.snapshot(
token="tests.contrib.openai.test_openai.test_azure_openai_completion",
ignores=["meta.http.useragent", "meta.openai.api_base", "meta.openai.api_type", "meta.openai.api_version"],
@@ -1405,6 +1355,9 @@ def test_azure_openai_completion(openai, azure_openai_config, openai_vcr, snapsh
)
+@pytest.mark.skipif(
+ parse_version(openai_module.version.VERSION) >= (1, 60), reason="latest openai versions use modified azure requests"
+)
@pytest.mark.snapshot(
token="tests.contrib.openai.test_openai.test_azure_openai_completion",
ignores=[
@@ -1434,6 +1387,9 @@ async def test_azure_openai_acompletion(openai, azure_openai_config, openai_vcr,
)
+@pytest.mark.skipif(
+ parse_version(openai_module.version.VERSION) >= (1, 60), reason="latest openai versions use modified azure requests"
+)
@pytest.mark.snapshot(
token="tests.contrib.openai.test_openai.test_azure_openai_chat_completion",
ignores=["meta.http.useragent", "meta.openai.api_base", "meta.openai.api_type", "meta.openai.api_version"],
@@ -1456,6 +1412,9 @@ def test_azure_openai_chat_completion(openai, azure_openai_config, openai_vcr, s
)
+@pytest.mark.skipif(
+ parse_version(openai_module.version.VERSION) >= (1, 60), reason="latest openai versions use modified azure requests"
+)
@pytest.mark.snapshot(
token="tests.contrib.openai.test_openai.test_azure_openai_chat_completion",
ignores=["meta.http.useragent", "meta.openai.api_base", "meta.openai.api_type", "meta.openai.api_version"],
@@ -1478,6 +1437,9 @@ async def test_azure_openai_chat_acompletion(openai, azure_openai_config, openai
)
+@pytest.mark.skipif(
+ parse_version(openai_module.version.VERSION) >= (1, 60), reason="latest openai versions use modified azure requests"
+)
@pytest.mark.snapshot(
token="tests.contrib.openai.test_openai.test_azure_openai_embedding",
ignores=["meta.http.useragent", "meta.openai.api_base", "meta.openai.api_type", "meta.openai.api_version"],
@@ -1497,6 +1459,9 @@ def test_azure_openai_embedding(openai, azure_openai_config, openai_vcr, snapsho
)
+@pytest.mark.skipif(
+ parse_version(openai_module.version.VERSION) >= (1, 60), reason="latest openai versions use modified azure requests"
+)
@pytest.mark.snapshot(
token="tests.contrib.openai.test_openai.test_azure_openai_embedding",
ignores=["meta.http.useragent", "meta.openai.api_base", "meta.openai.api_type", "meta.openai.api_version"],
From de71af1c9ac3f372ae30cb2d866c6886df2d1027 Mon Sep 17 00:00:00 2001
From: ncybul <124532568+ncybul@users.noreply.github.com>
Date: Fri, 24 Jan 2025 15:23:19 -0500
Subject: [PATCH 02/50] chore(llmobs): [MLOB-2064] add telemetry for custom vs.
autoinstrumented span creation (#11983)
Adds telemetry to track the number of LLMObs spans created either via
auto instrumentation or manual instrumentation.
## Checklist
- [x] PR author has checked that all the criteria below are met
- The PR description includes an overview of the change
- The PR description articulates the motivation for the change
- The change includes tests OR the PR description describes a testing
strategy
- The PR description notes risks associated with the change, if any
- Newly-added code is easy to change
- The change follows the [library release note
guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html)
- The change includes or references documentation updates if necessary
- Backport labels are set (if
[applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting))
## Reviewer Checklist
- [x] Reviewer has checked that all the criteria below are met
- Title is accurate
- All changes are related to the pull request's stated goal
- Avoids breaking
[API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces)
changes
- Testing strategy adequately addresses listed risks
- Newly-added code is easy to change
- Release note makes sense to a user of the library
- If necessary, author has acknowledged and discussed the performance
implications of this PR as reported in the benchmarks PR comment
- Backport labels are set in a manner that is consistent with the
[release branch maintenance
policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)
---
ddtrace/llmobs/_integrations/base.py | 11 +++++++++++
ddtrace/llmobs/_llmobs.py | 10 ++++++++++
2 files changed, 21 insertions(+)
diff --git a/ddtrace/llmobs/_integrations/base.py b/ddtrace/llmobs/_integrations/base.py
index 9b6b5eadcda..6b205202efd 100644
--- a/ddtrace/llmobs/_integrations/base.py
+++ b/ddtrace/llmobs/_integrations/base.py
@@ -16,6 +16,8 @@
from ddtrace.internal.dogstatsd import get_dogstatsd_client
from ddtrace.internal.hostname import get_hostname
from ddtrace.internal.logger import get_logger
+from ddtrace.internal.telemetry import telemetry_writer
+from ddtrace.internal.telemetry.constants import TELEMETRY_NAMESPACE
from ddtrace.internal.utils.formats import asbool
from ddtrace.llmobs._constants import PARENT_ID_KEY
from ddtrace.llmobs._constants import PROPAGATED_PARENT_ID_KEY
@@ -136,6 +138,15 @@ def trace(self, pin: Pin, operation_id: str, submit_to_llmobs: bool = False, **k
# in these cases to avoid conflicting with the later propagated tags.
parent_id = _get_llmobs_parent_id(span) or "undefined"
span._set_ctx_item(PARENT_ID_KEY, str(parent_id))
+ telemetry_writer.add_count_metric(
+ namespace=TELEMETRY_NAMESPACE.MLOBS,
+ name="span.start",
+ value=1,
+ tags=(
+ ("integration", self._integration_name),
+ ("autoinstrumented", "true"),
+ ),
+ )
return span
@classmethod
diff --git a/ddtrace/llmobs/_llmobs.py b/ddtrace/llmobs/_llmobs.py
index 809afcf9011..768f4bdb292 100644
--- a/ddtrace/llmobs/_llmobs.py
+++ b/ddtrace/llmobs/_llmobs.py
@@ -27,6 +27,7 @@
from ddtrace.internal.service import ServiceStatusError
from ddtrace.internal.telemetry import telemetry_writer
from ddtrace.internal.telemetry.constants import TELEMETRY_APM_PRODUCT
+from ddtrace.internal.telemetry.constants import TELEMETRY_NAMESPACE
from ddtrace.internal.utils.formats import asbool
from ddtrace.internal.utils.formats import parse_tags_str
from ddtrace.llmobs import _constants as constants
@@ -520,6 +521,15 @@ def _start_span(
model_provider: Optional[str] = None,
ml_app: Optional[str] = None,
) -> Span:
+ telemetry_writer.add_count_metric(
+ namespace=TELEMETRY_NAMESPACE.MLOBS,
+ name="span.start",
+ value=1,
+ tags=(
+ ("autoinstrumented", "false"),
+ ("kind", operation_kind),
+ ),
+ )
if name is None:
name = operation_kind
span = self.tracer.trace(name, resource=operation_kind, span_type=SpanTypes.LLM)
From 3167f0009d95f95554d02294adfd401f0a7f4c41 Mon Sep 17 00:00:00 2001
From: Brett Langdon
Date: Fri, 24 Jan 2025 15:28:52 -0500
Subject: [PATCH 03/50] ci: move pr/regression check to benchmarks-report stage
(#12080)
---
.gitlab-ci.yml | 1 +
.gitlab/benchmarks.yml | 6 ++----
2 files changed, 3 insertions(+), 4 deletions(-)
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index b29f243c474..47d2baf065c 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -6,6 +6,7 @@ stages:
- shared-pipeline
- benchmarks
- macrobenchmarks
+ - benchmarks-report
- release
variables:
diff --git a/.gitlab/benchmarks.yml b/.gitlab/benchmarks.yml
index 6c7b48aea3a..e922d315444 100644
--- a/.gitlab/benchmarks.yml
+++ b/.gitlab/benchmarks.yml
@@ -59,8 +59,7 @@ microbenchmarks:
benchmarks-pr-comment:
image: $MICROBENCHMARKS_CI_IMAGE
tags: ["arch:amd64"]
- stage: benchmarks
- needs: [ "microbenchmarks" ]
+ stage: benchmarks-report
when: always
script:
- export REPORTS_DIR="$(pwd)/reports/" && (mkdir "${REPORTS_DIR}" || :)
@@ -78,8 +77,7 @@ benchmarks-pr-comment:
KUBERNETES_SERVICE_ACCOUNT_OVERWRITE: dd-trace-py
check-big-regressions:
- stage: benchmarks
- needs: [ microbenchmarks, benchmark-serverless ]
+ stage: benchmarks-report
when: always
tags: ["arch:amd64"]
image: $MICROBENCHMARKS_CI_IMAGE
From 48c6547b8eb0cd10be7229ca78cc52704f62e14d Mon Sep 17 00:00:00 2001
From: Munir Abdinur
Date: Fri, 24 Jan 2025 16:03:22 -0500
Subject: [PATCH 04/50] chore(configs): make rc and http configs internal [3.0]
(#12055)
Prefixes the following `ddtrace.config` attributes with an `_`. These
attributes are internal to the tracer and should not be referenced or
modified by users:
- http
- http_server
- trace_headers
- header_is_traced
- convert_rc_trace_sampling_rules
- enable_remote_configuration
- get_from
Users should enable remote configuration and set `http` configurations
via environment variables.
## Checklist
- [ ] PR author has checked that all the criteria below are met
- The PR description includes an overview of the change
- The PR description articulates the motivation for the change
- The change includes tests OR the PR description describes a testing
strategy
- The PR description notes risks associated with the change, if any
- Newly-added code is easy to change
- The change follows the [library release note
guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html)
- The change includes or references documentation updates if necessary
- Backport labels are set (if
[applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting))
## Reviewer Checklist
- [ ] Reviewer has checked that all the criteria below are met
- Title is accurate
- All changes are related to the pull request's stated goal
- Avoids breaking
[API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces)
changes
- Testing strategy adequately addresses listed risks
- Newly-added code is easy to change
- Release note makes sense to a user of the library
- If necessary, author has acknowledged and discussed the performance
implications of this PR as reported in the benchmarks PR comment
- Backport labels are set in a manner that is consistent with the
[release branch maintenance
policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)
---
ddtrace/__init__.py | 2 +-
ddtrace/_monkey.py | 2 +-
ddtrace/_trace/sampler.py | 2 +-
ddtrace/contrib/flask/__init__.py | 3 -
ddtrace/contrib/httplib/__init__.py | 12 -
.../contrib/internal/aiohttp/middlewares.py | 2 +-
ddtrace/contrib/internal/botocore/patch.py | 2 +-
ddtrace/contrib/internal/dramatiq/patch.py | 2 +-
ddtrace/contrib/internal/httplib/patch.py | 2 +-
.../contrib/internal/requests/connection.py | 4 +-
ddtrace/contrib/internal/sanic/patch.py | 2 +-
ddtrace/contrib/internal/tornado/handlers.py | 2 +-
ddtrace/contrib/internal/trace_utils.py | 2 +-
ddtrace/contrib/jinja2/__init__.py | 4 +-
ddtrace/contrib/requests/__init__.py | 5 +-
ddtrace/internal/remoteconfig/worker.py | 2 +-
ddtrace/internal/sampling.py | 2 +-
ddtrace/internal/writer/writer.py | 2 +-
ddtrace/propagation/_database_monitoring.py | 2 +-
ddtrace/settings/__init__.py | 4 +-
ddtrace/settings/_config.py | 1025 +++++++++++++++++
ddtrace/settings/config.py | 1000 +---------------
ddtrace/settings/dynamic_instrumentation.py | 2 +-
ddtrace/settings/integration.py | 4 +-
docs/advanced_usage.rst | 11 +-
...cate-rc-http-configs-206f6c60ff383c05.yaml | 11 +
tests/ci_visibility/test_ci_visibility.py | 2 +-
.../httplib/test_httplib_distributed.py | 4 +-
tests/contrib/jinja2/test_jinja2.py | 2 +-
tests/contrib/requests/test_requests.py | 22 +-
.../requests/test_requests_distributed.py | 8 +-
tests/contrib/tornado/test_tornado_web.py | 6 +-
tests/contrib/vertica/test_vertica.py | 2 +-
.../remoteconfig/test_remoteconfig.py | 2 +-
tests/internal/test_settings.py | 9 +-
tests/telemetry/test_writer.py | 4 +-
tests/tracer/test_env_vars.py | 2 +-
tests/tracer/test_global_config.py | 2 +-
tests/tracer/test_instance_config.py | 30 +-
tests/tracer/test_settings.py | 10 +-
tests/tracer/test_trace_utils.py | 4 +-
41 files changed, 1124 insertions(+), 1098 deletions(-)
create mode 100644 ddtrace/settings/_config.py
create mode 100644 releasenotes/notes/munir-deprecate-rc-http-configs-206f6c60ff383c05.yaml
diff --git a/ddtrace/__init__.py b/ddtrace/__init__.py
index b555d1117ca..e480851926f 100644
--- a/ddtrace/__init__.py
+++ b/ddtrace/__init__.py
@@ -17,7 +17,7 @@
# configure ddtrace logger before other modules log
configure_ddtrace_logger() # noqa: E402
-from .settings import _config as config
+from .settings import _global_config as config
# Enable telemetry writer and excepthook as early as possible to ensure we capture any exceptions from initialization
diff --git a/ddtrace/_monkey.py b/ddtrace/_monkey.py
index 75c70114ef2..8ede9f49ca4 100644
--- a/ddtrace/_monkey.py
+++ b/ddtrace/_monkey.py
@@ -12,7 +12,7 @@
from .internal import telemetry
from .internal.logger import get_logger
from .internal.utils import formats
-from .settings import _config as config
+from .settings import _global_config as config
if TYPE_CHECKING: # pragma: no cover
diff --git a/ddtrace/_trace/sampler.py b/ddtrace/_trace/sampler.py
index 961526e3f26..96d61b9adcf 100644
--- a/ddtrace/_trace/sampler.py
+++ b/ddtrace/_trace/sampler.py
@@ -22,7 +22,7 @@
from ..internal.rate_limiter import RateLimiter
from ..internal.sampling import _get_highest_precedence_rule_matching
from ..internal.sampling import _set_sampling_tags
-from ..settings import _config as ddconfig
+from ..settings import _global_config as ddconfig
from .sampling_rule import SamplingRule
diff --git a/ddtrace/contrib/flask/__init__.py b/ddtrace/contrib/flask/__init__.py
index 0562240d090..4b3c1afbf16 100644
--- a/ddtrace/contrib/flask/__init__.py
+++ b/ddtrace/contrib/flask/__init__.py
@@ -87,9 +87,6 @@ def index():
# Override service name
config.flask['service_name'] = 'custom-service-name'
- # Report 401, and 403 responses as errors
- config.http_server.error_statuses = '401,403'
-
.. __: http://flask.pocoo.org/
:ref:`All HTTP tags ` are supported for this integration.
diff --git a/ddtrace/contrib/httplib/__init__.py b/ddtrace/contrib/httplib/__init__.py
index ae85051517e..7c5247422a1 100644
--- a/ddtrace/contrib/httplib/__init__.py
+++ b/ddtrace/contrib/httplib/__init__.py
@@ -42,20 +42,8 @@
# Disable distributed tracing globally.
config.httplib['distributed_tracing'] = False
-
- # Change the service distributed tracing option only for this HTTP
- # connection.
-
- # Python 2
- connection = urllib.HTTPConnection('www.datadog.com')
-
- # Python 3
connection = http.client.HTTPConnection('www.datadog.com')
- cfg = config.get_from(connection)
- cfg['distributed_tracing'] = True
-
-
:ref:`Headers tracing ` is supported for this integration.
"""
diff --git a/ddtrace/contrib/internal/aiohttp/middlewares.py b/ddtrace/contrib/internal/aiohttp/middlewares.py
index 4f7abe5a12f..ddb2d35fbc6 100644
--- a/ddtrace/contrib/internal/aiohttp/middlewares.py
+++ b/ddtrace/contrib/internal/aiohttp/middlewares.py
@@ -108,7 +108,7 @@ def finish_request_span(request, response):
# DEV: aiohttp is special case maintains separate configuration from config api
trace_query_string = request[REQUEST_CONFIG_KEY].get("trace_query_string")
if trace_query_string is None:
- trace_query_string = config.http.trace_query_string
+ trace_query_string = config._http.trace_query_string
if trace_query_string:
request_span.set_tag_str(http.QUERY_STRING, request.query_string)
diff --git a/ddtrace/contrib/internal/botocore/patch.py b/ddtrace/contrib/internal/botocore/patch.py
index 734c429d798..61353e7b34e 100644
--- a/ddtrace/contrib/internal/botocore/patch.py
+++ b/ddtrace/contrib/internal/botocore/patch.py
@@ -33,7 +33,7 @@
from ddtrace.internal.utils.formats import asbool
from ddtrace.internal.utils.formats import deep_getattr
from ddtrace.llmobs._integrations import BedrockIntegration
-from ddtrace.settings.config import Config
+from ddtrace.settings._config import Config
from ddtrace.trace import Pin
from .services.bedrock import patched_bedrock_api_call
diff --git a/ddtrace/contrib/internal/dramatiq/patch.py b/ddtrace/contrib/internal/dramatiq/patch.py
index 08daad9d93c..a6ecbbfd5d4 100644
--- a/ddtrace/contrib/internal/dramatiq/patch.py
+++ b/ddtrace/contrib/internal/dramatiq/patch.py
@@ -11,7 +11,7 @@
from ddtrace.contrib import trace_utils
from ddtrace.ext import SpanKind
from ddtrace.ext import SpanTypes
-from ddtrace.settings.config import Config
+from ddtrace.settings._config import Config
def get_version() -> str:
diff --git a/ddtrace/contrib/internal/httplib/patch.py b/ddtrace/contrib/internal/httplib/patch.py
index a1e367af3a1..79a8ea2816f 100644
--- a/ddtrace/contrib/internal/httplib/patch.py
+++ b/ddtrace/contrib/internal/httplib/patch.py
@@ -91,7 +91,7 @@ def _wrap_request(func, instance, args, kwargs):
if should_skip_request(pin, instance):
return func_to_call(*args, **kwargs)
- cfg = config.get_from(instance)
+ cfg = config._get_from(instance)
try:
# Create a new span and attach to this instance (so we can retrieve/update/close later on the response)
diff --git a/ddtrace/contrib/internal/requests/connection.py b/ddtrace/contrib/internal/requests/connection.py
index c6d7706ef54..0b58f8b6dc5 100644
--- a/ddtrace/contrib/internal/requests/connection.py
+++ b/ddtrace/contrib/internal/requests/connection.py
@@ -75,7 +75,7 @@ def _wrap_send(func, instance, args, kwargs):
hostname, path = _extract_hostname_and_path(url)
host_without_port = hostname.split(":")[0] if hostname is not None else None
- cfg = config.get_from(instance)
+ cfg = config._get_from(instance)
service = None
if cfg["split_by_domain"] and hostname:
service = hostname
@@ -97,7 +97,7 @@ def _wrap_send(func, instance, args, kwargs):
# Configure trace search sample rate
# DEV: analytics enabled on per-session basis
- cfg = config.get_from(instance)
+ cfg = config._get_from(instance)
analytics_enabled = cfg.get("analytics_enabled")
if analytics_enabled:
span.set_tag(_ANALYTICS_SAMPLE_RATE_KEY, cfg.get("analytics_sample_rate", True))
diff --git a/ddtrace/contrib/internal/sanic/patch.py b/ddtrace/contrib/internal/sanic/patch.py
index 5d105cf2f32..8e53ed41dc8 100644
--- a/ddtrace/contrib/internal/sanic/patch.py
+++ b/ddtrace/contrib/internal/sanic/patch.py
@@ -273,7 +273,7 @@ async def sanic_http_lifecycle_exception(request, exception):
# Do not attach exception for exceptions not considered as errors
# ex: Http 400s
# DEV: We still need to set `__dd_span_call_finish` below
- if not hasattr(exception, "status_code") or config.http_server.is_error_code(exception.status_code):
+ if not hasattr(exception, "status_code") or config._http_server.is_error_code(exception.status_code):
ex_type = type(exception)
ex_tb = getattr(exception, "__traceback__", None)
span.set_exc_info(ex_type, exception, ex_tb)
diff --git a/ddtrace/contrib/internal/tornado/handlers.py b/ddtrace/contrib/internal/tornado/handlers.py
index 3c4a046bfb9..f858e33ee29 100644
--- a/ddtrace/contrib/internal/tornado/handlers.py
+++ b/ddtrace/contrib/internal/tornado/handlers.py
@@ -140,7 +140,7 @@ def log_exception(func, handler, args, kwargs):
# is not a 2xx. In this case we want to check the status code to be sure that
# only 5xx are traced as errors, while any other HTTPError exception is handled as
# usual.
- if config.http_server.is_error_code(value.status_code):
+ if config._http_server.is_error_code(value.status_code):
current_span.set_exc_info(*args)
else:
# any other uncaught exception should be reported as error
diff --git a/ddtrace/contrib/internal/trace_utils.py b/ddtrace/contrib/internal/trace_utils.py
index 56901934e83..5781c3f30df 100644
--- a/ddtrace/contrib/internal/trace_utils.py
+++ b/ddtrace/contrib/internal/trace_utils.py
@@ -485,7 +485,7 @@ def set_http_meta(
log.debug("failed to convert http status code %r to int", status_code)
else:
span.set_tag_str(http.STATUS_CODE, str(status_code))
- if config.http_server.is_error_code(int_status_code):
+ if config._http_server.is_error_code(int_status_code):
span.error = 1
if status_msg is not None:
diff --git a/ddtrace/contrib/jinja2/__init__.py b/ddtrace/contrib/jinja2/__init__.py
index cc8c6786b02..8437a51d9de 100644
--- a/ddtrace/contrib/jinja2/__init__.py
+++ b/ddtrace/contrib/jinja2/__init__.py
@@ -16,13 +16,13 @@
The library can be configured globally and per instance, using the Configuration API::
from ddtrace import config
+ from ddtrace.trace import Pin
# Change service name globally
config.jinja2['service_name'] = 'jinja-templates'
# change the service name only for this environment
- cfg = config.get_from(env)
- cfg['service_name'] = 'jinja-templates'
+ Pin.override(env, service='jinja-templates')
By default, the service name is set to None, so it is inherited from the parent span.
If there is no parent span and the service name is not overridden the agent will drop the traces.
diff --git a/ddtrace/contrib/requests/__init__.py b/ddtrace/contrib/requests/__init__.py
index efcb20f1219..7d034ce56bf 100644
--- a/ddtrace/contrib/requests/__init__.py
+++ b/ddtrace/contrib/requests/__init__.py
@@ -65,12 +65,11 @@
use the config API::
from ddtrace import config
+ from ddtrace.trace import Pin
from requests import Session
session = Session()
- cfg = config.get_from(session)
- cfg['service_name'] = 'auth-api'
- cfg['distributed_tracing'] = False
+ Pin.override(session, service='auth-api')
"""
diff --git a/ddtrace/internal/remoteconfig/worker.py b/ddtrace/internal/remoteconfig/worker.py
index 7ad8c592d2e..5429e599e74 100644
--- a/ddtrace/internal/remoteconfig/worker.py
+++ b/ddtrace/internal/remoteconfig/worker.py
@@ -13,7 +13,7 @@
from ddtrace.internal.remoteconfig.utils import get_poll_interval_seconds
from ddtrace.internal.service import ServiceStatus
from ddtrace.internal.utils.time import StopWatch
-from ddtrace.settings import _config as ddconfig
+from ddtrace.settings import _global_config as ddconfig
log = get_logger(__name__)
diff --git a/ddtrace/internal/sampling.py b/ddtrace/internal/sampling.py
index b1d3f7957f4..e64c0e27bc5 100644
--- a/ddtrace/internal/sampling.py
+++ b/ddtrace/internal/sampling.py
@@ -27,7 +27,7 @@
from ddtrace.internal.constants import SAMPLING_DECISION_TRACE_TAG_KEY
from ddtrace.internal.glob_matching import GlobMatcher
from ddtrace.internal.logger import get_logger
-from ddtrace.settings import _config as config
+from ddtrace.settings import _global_config as config
from .rate_limiter import RateLimiter
diff --git a/ddtrace/internal/writer/writer.py b/ddtrace/internal/writer/writer.py
index c494aa206a2..357fcf3917f 100644
--- a/ddtrace/internal/writer/writer.py
+++ b/ddtrace/internal/writer/writer.py
@@ -14,7 +14,7 @@
import ddtrace
from ddtrace.internal.utils.retry import fibonacci_backoff_with_jitter
-from ddtrace.settings import _config as config
+from ddtrace.settings import _global_config as config
from ddtrace.settings.asm import config as asm_config
from ddtrace.vendor.dogstatsd import DogStatsd
diff --git a/ddtrace/propagation/_database_monitoring.py b/ddtrace/propagation/_database_monitoring.py
index 5b585b13210..817d23c4ebf 100644
--- a/ddtrace/propagation/_database_monitoring.py
+++ b/ddtrace/propagation/_database_monitoring.py
@@ -10,7 +10,7 @@
from ..internal import compat
from ..internal.utils import get_argument_value
from ..internal.utils import set_argument_value
-from ..settings import _config as dd_config
+from ..settings import _global_config as dd_config
from ..settings._database_monitoring import dbm_config
diff --git a/ddtrace/settings/__init__.py b/ddtrace/settings/__init__.py
index 2c3a0bf7807..ebbb0c31f7b 100644
--- a/ddtrace/settings/__init__.py
+++ b/ddtrace/settings/__init__.py
@@ -1,12 +1,12 @@
from .._hooks import Hooks
-from .config import Config
+from ._config import Config
from .exceptions import ConfigException
from .http import HttpConfig
from .integration import IntegrationConfig
# Default global config
-_config = Config()
+_global_config = Config()
__all__ = [
"Config",
diff --git a/ddtrace/settings/_config.py b/ddtrace/settings/_config.py
new file mode 100644
index 00000000000..df3fe4177d1
--- /dev/null
+++ b/ddtrace/settings/_config.py
@@ -0,0 +1,1025 @@
+from copy import deepcopy
+import json
+import os
+import re
+import sys
+from typing import Any # noqa:F401
+from typing import Callable # noqa:F401
+from typing import Dict # noqa:F401
+from typing import List # noqa:F401
+from typing import Optional # noqa:F401
+from typing import Tuple # noqa:F401
+from typing import Union # noqa:F401
+
+from ddtrace.internal._file_queue import File_Queue
+from ddtrace.internal.serverless import in_azure_function
+from ddtrace.internal.serverless import in_gcp_function
+from ddtrace.internal.telemetry import telemetry_writer
+from ddtrace.internal.utils.cache import cachedmethod
+from ddtrace.internal.utils.deprecations import DDTraceDeprecationWarning
+from ddtrace.vendor.debtcollector import deprecate
+
+from .._trace.pin import Pin
+from ..internal import gitmetadata
+from ..internal.constants import _PROPAGATION_BEHAVIOR_DEFAULT
+from ..internal.constants import _PROPAGATION_BEHAVIOR_IGNORE
+from ..internal.constants import _PROPAGATION_STYLE_DEFAULT
+from ..internal.constants import _PROPAGATION_STYLE_NONE
+from ..internal.constants import DEFAULT_BUFFER_SIZE
+from ..internal.constants import DEFAULT_MAX_PAYLOAD_SIZE
+from ..internal.constants import DEFAULT_PROCESSING_INTERVAL
+from ..internal.constants import DEFAULT_REUSE_CONNECTIONS
+from ..internal.constants import DEFAULT_SAMPLING_RATE_LIMIT
+from ..internal.constants import DEFAULT_TIMEOUT
+from ..internal.constants import PROPAGATION_STYLE_ALL
+from ..internal.constants import PROPAGATION_STYLE_B3_SINGLE
+from ..internal.logger import get_logger
+from ..internal.schema import DEFAULT_SPAN_SERVICE_NAME
+from ..internal.serverless import in_aws_lambda
+from ..internal.utils.formats import asbool
+from ..internal.utils.formats import parse_tags_str
+from ._core import get_config as _get_config
+from ._inferred_base_service import detect_service
+from ._otel_remapper import otel_remapping as _otel_remapping
+from .endpoint_config import fetch_config_from_endpoint
+from .http import HttpConfig
+from .integration import IntegrationConfig
+
+
+if sys.version_info >= (3, 8):
+ from typing import Literal # noqa:F401
+else:
+ from typing_extensions import Literal
+
+
+log = get_logger(__name__)
+
+ENDPOINT_FETCHED_CONFIG = fetch_config_from_endpoint()
+
+DD_TRACE_OBFUSCATION_QUERY_STRING_REGEXP_DEFAULT = (
+ r"(?ix)"
+ r"(?:" # JSON-ish leading quote
+ r'(?:"|%22)?'
+ r")"
+ r"(?:" # common keys"
+ r"(?:old[-_]?|new[-_]?)?p(?:ass)?w(?:or)?d(?:1|2)?" # pw, password variants
+ r"|pass(?:[-_]?phrase)?" # pass, passphrase variants
+ r"|secret"
+ r"|(?:" # key, key_id variants
+ r"api[-_]?"
+ r"|private[-_]?"
+ r"|public[-_]?"
+ r"|access[-_]?"
+ r"|secret[-_]?"
+ r"|app(?:lica"
+ r"tion)?[-_]?"
+ r")key(?:[-_]?id)?"
+ r"|token"
+ r"|consumer[-_]?(?:id|key|secret)"
+ r"|sign(?:ed|ature)?"
+ r"|auth(?:entication|orization)?"
+ r")"
+ r"(?:"
+ # '=' query string separator, plus value til next '&' separator
+ r"(?:\s|%20)*(?:=|%3D)[^&]+"
+ # JSON-ish '": "somevalue"', key being handled with case above, without the opening '"'
+ r'|(?:"|%22)' # closing '"' at end of key
+ r"(?:\s|%20)*(?::|%3A)(?:\s|%20)*" # ':' key-value separator, with surrounding spaces
+ r'(?:"|%22)' # opening '"' at start of value
+ r'(?:%2[^2]|%[^2]|[^"%])+' # value
+ r'(?:"|%22)' # closing '"' at end of value
+ r")"
+ r"|(?:" # other common secret values
+ r" bearer(?:\s|%20)+[a-z0-9._\-]+"
+ r"|token(?::|%3A)[a-z0-9]{13}"
+ r"|gh[opsu]_[0-9a-zA-Z]{36}"
+ r"|ey[I-L](?:[\w=-]|%3D)+\.ey[I-L](?:[\w=-]|%3D)+(?:\.(?:[\w.+/=-]|%3D|%2F|%2B)+)?"
+ r"|-{5}BEGIN(?:[a-z\s]|%20)+PRIVATE(?:\s|%20)KEY-{5}[^\-]+-{5}END"
+ r"(?:[a-z\s]|%20)+PRIVATE(?:\s|%20)KEY(?:-{5})?(?:\n|%0A)?"
+ r"|(?:ssh-(?:rsa|dss)|ecdsa-[a-z0-9]+-[a-z0-9]+)(?:\s|%20|%09)+(?:[a-z0-9/.+]"
+ r"|%2F|%5C|%2B){100,}(?:=|%3D)*(?:(?:\s|%20|%09)+[a-z0-9._-]+)?"
+ r")"
+)
+
+
+def _parse_propagation_styles(styles_str):
+ # type: (str) -> Optional[List[str]]
+ """Helper to parse http propagation extract/inject styles via env variables.
+
+ The expected format is::
+
+