Skip to content

Commit

Permalink
More fmt, remove last references to logs/metrics
Browse files Browse the repository at this point in the history
  • Loading branch information
Yun-Kim committed Feb 4, 2025
1 parent adb8766 commit 82c6ed0
Show file tree
Hide file tree
Showing 12 changed files with 28 additions and 50 deletions.
7 changes: 0 additions & 7 deletions ddtrace/contrib/internal/openai/patch.py
Original file line number Diff line number Diff line change
Expand Up @@ -179,7 +179,6 @@ def _traced_endpoint(endpoint_hook, integration, instance, pin, args, kwargs):
# Record any error information
if err is not None:
span.set_exc_info(*sys.exc_info())
integration.metric(span, "incr", "request.error", 1)

# Pass the response and the error to the hook
try:
Expand All @@ -192,7 +191,6 @@ def _traced_endpoint(endpoint_hook, integration, instance, pin, args, kwargs):
# Streamed responses with error will need to be finished manually as well.
if not kwargs.get("stream") or err is not None:
span.finish()
integration.metric(span, "dist", "request.duration", span.duration_ns)


def _patched_endpoint(openai, patch_hook):
Expand Down Expand Up @@ -252,7 +250,6 @@ async def patched_endpoint(openai, pin, func, instance, args, kwargs):
@with_traced_module
def patched_convert(openai, pin, func, instance, args, kwargs):
"""Patch convert captures header information in the openai response"""
integration = openai._datadog_integration
span = pin.tracer.current_span()
if not span:
return func(*args, **kwargs)
Expand All @@ -277,23 +274,19 @@ def patched_convert(openai, pin, func, instance, args, kwargs):
if headers.get("x-ratelimit-limit-requests"):
v = headers.get("x-ratelimit-limit-requests")
if v is not None:
integration.metric(span, "gauge", "ratelimit.requests", int(v))
span.set_metric("openai.organization.ratelimit.requests.limit", int(v))
if headers.get("x-ratelimit-limit-tokens"):
v = headers.get("x-ratelimit-limit-tokens")
if v is not None:
integration.metric(span, "gauge", "ratelimit.tokens", int(v))
span.set_metric("openai.organization.ratelimit.tokens.limit", int(v))
# Gauge and set span info for remaining requests and tokens
if headers.get("x-ratelimit-remaining-requests"):
v = headers.get("x-ratelimit-remaining-requests")
if v is not None:
integration.metric(span, "gauge", "ratelimit.remaining.requests", int(v))
span.set_metric("openai.organization.ratelimit.requests.remaining", int(v))
if headers.get("x-ratelimit-remaining-tokens"):
v = headers.get("x-ratelimit-remaining-tokens")
if v is not None:
integration.metric(span, "gauge", "ratelimit.remaining.tokens", int(v))
span.set_metric("openai.organization.ratelimit.tokens.remaining", int(v))

return func(*args, **kwargs)
2 changes: 1 addition & 1 deletion ddtrace/llmobs/_integrations/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ def _is_azure_openai(span):
return "azure" in base_url.lower()

def record_usage(self, span: Span, usage: Dict[str, Any]) -> None:
if not usage or not self.metrics_enabled:
if not usage:
return
for token_type in ("prompt", "completion", "total"):
num_tokens = getattr(usage, token_type + "_tokens", None)
Expand Down
37 changes: 2 additions & 35 deletions tests/contrib/openai/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,34 +92,6 @@ def process_trace(self, trace):
return trace


@pytest.fixture(scope="session")
def mock_metrics():
patcher = mock.patch("ddtrace.llmobs._integrations.base.get_dogstatsd_client")
try:
DogStatsdMock = patcher.start()
m = mock.MagicMock()
DogStatsdMock.return_value = m
yield m
finally:
patcher.stop()


@pytest.fixture(scope="session")
def mock_logs():
"""
Note that this fixture must be ordered BEFORE mock_tracer as it needs to patch the log writer
before it is instantiated.
"""
patcher = mock.patch("ddtrace.llmobs._integrations.base.V2LogWriter")
try:
V2LogWriterMock = patcher.start()
m = mock.MagicMock()
V2LogWriterMock.return_value = m
yield m
finally:
patcher.stop()


@pytest.fixture()
def mock_llmobs_writer():
patcher = mock.patch("ddtrace.llmobs._llmobs.LLMObsSpanWriter")
Expand Down Expand Up @@ -163,18 +135,15 @@ def patch_openai(ddtrace_global_config, ddtrace_config_openai, openai_api_key, o


@pytest.fixture
def snapshot_tracer(openai, patch_openai, mock_logs, mock_metrics):
def snapshot_tracer(openai, patch_openai):
pin = Pin.get_from(openai)
pin.tracer._configure(trace_processors=[FilterOrg()])

yield pin.tracer

mock_logs.reset_mock()
mock_metrics.reset_mock()


@pytest.fixture
def mock_tracer(ddtrace_global_config, openai, patch_openai, mock_logs, mock_metrics):
def mock_tracer(ddtrace_global_config, openai, patch_openai):
pin = Pin.get_from(openai)
mock_tracer = DummyTracer(writer=DummyWriter(trace_flush_enabled=False))
pin.override(openai, tracer=mock_tracer)
Expand All @@ -187,6 +156,4 @@ def mock_tracer(ddtrace_global_config, openai, patch_openai, mock_logs, mock_met

yield mock_tracer

mock_logs.reset_mock()
mock_metrics.reset_mock()
LLMObs.disable()
8 changes: 1 addition & 7 deletions tests/contrib/openai/test_openai_llmobs.py
Original file line number Diff line number Diff line change
Expand Up @@ -602,9 +602,7 @@ def test_embedding_string_base64(self, openai, ddtrace_global_config, mock_llmob
[dict(_llmobs_enabled=True, _llmobs_ml_app="<ml-app-name>", _llmobs_agentless_enabled=True)],
)
@pytest.mark.skipif(parse_version(openai_module.version.VERSION) < (1, 0), reason="These tests are for openai >= 1.0")
def test_agentless_enabled_does_not_submit_metrics(
openai, ddtrace_global_config, mock_llmobs_writer, mock_tracer, mock_metrics
):
def test_agentless_enabled_does_not_submit_metrics(openai, ddtrace_global_config, mock_llmobs_writer, mock_tracer):
"""Ensure openai metrics are not emitted when agentless mode is enabled."""
with get_openai_vcr(subdirectory_name="v1").use_cassette("completion.yaml"):
model = "ada"
Expand All @@ -619,7 +617,3 @@ def test_agentless_enabled_does_not_submit_metrics(
user="ddtrace-test",
)
assert mock_llmobs_writer.enqueue.call_count == 1
mock_metrics.assert_not_called()
assert mock_metrics.increment.call_count == 0
assert mock_metrics.distribution.call_count == 0
assert mock_metrics.gauge.call_count == 0
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,9 @@
"openai.organization.ratelimit.requests.remaining": 2999,
"openai.organization.ratelimit.tokens.limit": 250000,
"openai.organization.ratelimit.tokens.remaining": 249979,
"openai.response.usage.completion_tokens": 12,
"openai.response.usage.prompt_tokens": 2,
"openai.response.usage.total_tokens": 14,
"process_id": 24448
},
"duration": 17466000,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,9 @@
"openai.organization.ratelimit.requests.remaining": 2999,
"openai.organization.ratelimit.tokens.limit": 250000,
"openai.organization.ratelimit.tokens.remaining": 249979,
"openai.response.usage.completion_tokens": 12,
"openai.response.usage.prompt_tokens": 2,
"openai.response.usage.total_tokens": 14,
"process_id": 20806
},
"duration": 16421000,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,9 @@
"openai.organization.ratelimit.requests.remaining": 2999,
"openai.organization.ratelimit.tokens.limit": 250000,
"openai.organization.ratelimit.tokens.remaining": 249979,
"openai.response.usage.completion_tokens": 12,
"openai.response.usage.prompt_tokens": 2,
"openai.response.usage.total_tokens": 14,
"process_id": 20827
},
"duration": 17257000,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,9 @@
"openai.organization.ratelimit.requests.remaining": 2999,
"openai.organization.ratelimit.tokens.limit": 250000,
"openai.organization.ratelimit.tokens.remaining": 249979,
"openai.response.usage.completion_tokens": 12,
"openai.response.usage.prompt_tokens": 2,
"openai.response.usage.total_tokens": 14,
"process_id": 20839
},
"duration": 17259000,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,9 @@
"openai.organization.ratelimit.requests.remaining": 2999,
"openai.organization.ratelimit.tokens.limit": 250000,
"openai.organization.ratelimit.tokens.remaining": 249979,
"openai.response.usage.completion_tokens": 12,
"openai.response.usage.prompt_tokens": 2,
"openai.response.usage.total_tokens": 14,
"process_id": 20848
},
"duration": 17004000,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,9 @@
"openai.organization.ratelimit.requests.remaining": 2999,
"openai.organization.ratelimit.tokens.limit": 250000,
"openai.organization.ratelimit.tokens.remaining": 249979,
"openai.response.usage.completion_tokens": 12,
"openai.response.usage.prompt_tokens": 2,
"openai.response.usage.total_tokens": 14,
"process_id": 20864
},
"duration": 17872000,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,9 @@
"openai.organization.ratelimit.requests.remaining": 2999,
"openai.organization.ratelimit.tokens.limit": 250000,
"openai.organization.ratelimit.tokens.remaining": 249979,
"openai.response.usage.completion_tokens": 12,
"openai.response.usage.prompt_tokens": 2,
"openai.response.usage.total_tokens": 14,
"process_id": 20888
},
"duration": 16629000,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,9 @@
"openai.organization.ratelimit.requests.remaining": 2999,
"openai.organization.ratelimit.tokens.limit": 250000,
"openai.organization.ratelimit.tokens.remaining": 249979,
"openai.response.usage.completion_tokens": 12,
"openai.response.usage.prompt_tokens": 2,
"openai.response.usage.total_tokens": 14,
"process_id": 24448
},
"duration": 17466000,
Expand Down

0 comments on commit 82c6ed0

Please sign in to comment.