Skip to content
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions MIGRATION_GUIDE.md
Original file line number Diff line number Diff line change
Expand Up @@ -137,6 +137,7 @@ Looking to upgrade from Sentry SDK 2.x to 3.x? Here's a comprehensive list of wh
- The `enable_tracing` `init` option has been removed. Configure `traces_sample_rate` directly.
- The `propagate_traces` `init` option has been removed. Use `trace_propagation_targets` instead.
- The `custom_sampling_context` parameter of `start_transaction` has been removed. Use `attributes` instead to set key-value pairs of data that should be accessible in the traces sampler. Note that span attributes need to conform to the [OpenTelemetry specification](https://opentelemetry.io/docs/concepts/signals/traces/#attributes), meaning only certain types can be set as values.
- `set_measurement` has been removed.
- The PyMongo integration no longer sets tags. The data is still accessible via span attributes.
- The PyMongo integration doesn't set `operation_ids` anymore. The individual IDs (`operation_id`, `request_id`, `session_id`) are now accessible as separate span attributes.
- `sentry_sdk.metrics` and associated metrics APIs have been removed as Sentry no longer accepts metrics data in this form. See https://sentry.zendesk.com/hc/en-us/articles/26369339769883-Upcoming-API-Changes-to-Metrics
Expand Down
1 change: 0 additions & 1 deletion sentry_sdk/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,6 @@
"set_context",
"set_extra",
"set_level",
"set_measurement",
"set_tag",
"set_tags",
"set_user",
Expand Down
41 changes: 0 additions & 41 deletions sentry_sdk/_types.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,6 @@ def substituted_because_contains_sensitive_data(cls):
from typing import Callable
from typing import Dict
from typing import Mapping
from typing import NotRequired
from typing import Optional
from typing import Type
from typing_extensions import Literal, TypedDict
Expand All @@ -120,45 +119,6 @@ class SDKInfo(TypedDict):
# "critical" is an alias of "fatal" recognized by Relay
LogLevelStr = Literal["fatal", "critical", "error", "warning", "info", "debug"]

DurationUnit = Literal[
"nanosecond",
"microsecond",
"millisecond",
"second",
"minute",
"hour",
"day",
"week",
]

InformationUnit = Literal[
"bit",
"byte",
"kilobyte",
"kibibyte",
"megabyte",
"mebibyte",
"gigabyte",
"gibibyte",
"terabyte",
"tebibyte",
"petabyte",
"pebibyte",
"exabyte",
"exbibyte",
]

FractionUnit = Literal["ratio", "percent"]
MeasurementUnit = Union[DurationUnit, InformationUnit, FractionUnit, str]

MeasurementValue = TypedDict(
"MeasurementValue",
{
"value": float,
"unit": NotRequired[Optional[MeasurementUnit]],
},
)

Event = TypedDict(
"Event",
{
Expand All @@ -180,7 +140,6 @@ class SDKInfo(TypedDict):
"level": LogLevelStr,
"logentry": Mapping[str, object],
"logger": str,
"measurements": dict[str, MeasurementValue],
"message": str,
"modules": dict[str, str],
"monitor_config": Mapping[str, object],
Expand Down
6 changes: 3 additions & 3 deletions sentry_sdk/ai/monitoring.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,14 +106,14 @@ def record_token_usage(
if ai_pipeline_name:
span.set_attribute("ai.pipeline.name", ai_pipeline_name)
if prompt_tokens is not None:
span.set_measurement("ai_prompt_tokens_used", value=prompt_tokens)
span.set_attribute("ai.prompt_tokens.used", prompt_tokens)
if completion_tokens is not None:
span.set_measurement("ai_completion_tokens_used", value=completion_tokens)
span.set_attribute("ai.completion_tokens.used", completion_tokens)
if (
total_tokens is None
and prompt_tokens is not None
and completion_tokens is not None
):
total_tokens = prompt_tokens + completion_tokens
if total_tokens is not None:
span.set_measurement("ai_total_tokens_used", total_tokens)
span.set_attribute("ai.total_tokens.used", total_tokens)
8 changes: 0 additions & 8 deletions sentry_sdk/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,6 @@
"set_context",
"set_extra",
"set_level",
"set_measurement",
"set_tag",
"set_tags",
"set_user",
Expand Down Expand Up @@ -287,13 +286,6 @@ def start_transaction(
)


def set_measurement(name, value, unit=""):
# type: (str, float, sentry_sdk._types.MeasurementUnit) -> None
transaction = get_current_scope().root_span
if transaction is not None:
transaction.set_measurement(name, value, unit)


def get_current_span(scope=None):
# type: (Optional[Scope]) -> Optional[sentry_sdk.tracing.Span]
"""
Expand Down
1 change: 0 additions & 1 deletion sentry_sdk/opentelemetry/consts.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@ class SentrySpanAttribute:
DESCRIPTION = "sentry.description"
OP = "sentry.op"
ORIGIN = "sentry.origin"
MEASUREMENT = "sentry.measurement"
TAG = "sentry.tag"
NAME = "sentry.name"
SOURCE = "sentry.source"
Expand Down
4 changes: 0 additions & 4 deletions sentry_sdk/opentelemetry/span_processor.py
Original file line number Diff line number Diff line change
Expand Up @@ -304,10 +304,6 @@ def _common_span_transaction_attributes_as_json(self, span):
"timestamp": convert_from_otel_timestamp(span.end_time),
} # type: Event

measurements = extract_span_attributes(span, SentrySpanAttribute.MEASUREMENT)
if measurements:
common_json["measurements"] = measurements

tags = extract_span_attributes(span, SentrySpanAttribute.TAG)
if tags:
common_json["tags"] = tags
Expand Down
10 changes: 1 addition & 9 deletions sentry_sdk/opentelemetry/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -309,15 +309,7 @@ def extract_span_attributes(span, namespace):
for attr, value in (span.attributes or {}).items():
if attr.startswith(namespace):
key = attr[len(namespace) + 1 :]

if namespace == SentrySpanAttribute.MEASUREMENT:
value = cast("tuple[str, str]", value)
extracted_attrs[key] = {
"value": float(value[0]),
"unit": value[1],
}
else:
extracted_attrs[key] = value
extracted_attrs[key] = value

return extracted_attrs

Expand Down
12 changes: 0 additions & 12 deletions sentry_sdk/tracing.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,6 @@
R = TypeVar("R")

from sentry_sdk._types import (
MeasurementUnit,
SamplingContext,
)

Expand Down Expand Up @@ -150,10 +149,6 @@ def finish(
# type: (...) -> None
pass

def set_measurement(self, name, value, unit=""):
# type: (str, float, MeasurementUnit) -> None
pass

def set_context(self, key, value):
# type: (str, dict[str, Any]) -> None
pass
Expand Down Expand Up @@ -540,13 +535,6 @@ def set_status(self, status):
else:
self._otel_span.set_status(Status(otel_status, otel_description))

def set_measurement(self, name, value, unit=""):
# type: (str, float, MeasurementUnit) -> None
# Stringify value here since OTel expects all seq items to be of one type
self.set_attribute(
f"{SentrySpanAttribute.MEASUREMENT}.{name}", (str(value), unit)
)

def set_thread(self, thread_id, thread_name):
# type: (Optional[int], Optional[str]) -> None
if thread_id is not None:
Expand Down
47 changes: 21 additions & 26 deletions tests/integrations/anthropic/test_anthropic.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,9 +127,9 @@ def test_nonstreaming_create_message(
assert SPANDATA.AI_INPUT_MESSAGES not in span["data"]
assert SPANDATA.AI_RESPONSES not in span["data"]

assert span["measurements"]["ai_prompt_tokens_used"]["value"] == 10
assert span["measurements"]["ai_completion_tokens_used"]["value"] == 20
assert span["measurements"]["ai_total_tokens_used"]["value"] == 30
assert span["data"]["ai.prompt_tokens_used"] == 10
assert span["data"]["ai.completion_tokens_used"] == 20
assert span["data"]["ai.total_tokens_used"] == 30
assert span["data"]["ai.streaming"] is False


Expand Down Expand Up @@ -197,9 +197,9 @@ async def test_nonstreaming_create_message_async(
assert SPANDATA.AI_INPUT_MESSAGES not in span["data"]
assert SPANDATA.AI_RESPONSES not in span["data"]

assert span["measurements"]["ai_prompt_tokens_used"]["value"] == 10
assert span["measurements"]["ai_completion_tokens_used"]["value"] == 20
assert span["measurements"]["ai_total_tokens_used"]["value"] == 30
assert span["data"]["ai.prompt_tokens_used"] == 10
assert span["data"]["ai.completion_tokens_used"] == 20
assert span["data"]["ai.total_tokens_used"] == 30
assert span["data"]["ai.streaming"] is False


Expand Down Expand Up @@ -299,9 +299,9 @@ def test_streaming_create_message(
assert SPANDATA.AI_INPUT_MESSAGES not in span["data"]
assert SPANDATA.AI_RESPONSES not in span["data"]

assert span["measurements"]["ai_prompt_tokens_used"]["value"] == 10
assert span["measurements"]["ai_completion_tokens_used"]["value"] == 30
assert span["measurements"]["ai_total_tokens_used"]["value"] == 40
assert span["data"]["ai.prompt_tokens_used"] == 10
assert span["data"]["ai.completion_tokens_used"] == 30
assert span["data"]["ai.total_tokens_used"] == 40
assert span["data"]["ai.streaming"] is True


Expand Down Expand Up @@ -404,9 +404,9 @@ async def test_streaming_create_message_async(
assert SPANDATA.AI_INPUT_MESSAGES not in span["data"]
assert SPANDATA.AI_RESPONSES not in span["data"]

assert span["measurements"]["ai_prompt_tokens_used"]["value"] == 10
assert span["measurements"]["ai_completion_tokens_used"]["value"] == 30
assert span["measurements"]["ai_total_tokens_used"]["value"] == 40
assert span["data"]["ai.prompt_tokens_used"] == 10
assert span["data"]["ai.completion_tokens_used"] == 30
assert span["data"]["ai.total_tokens_used"] == 40
assert span["data"]["ai.streaming"] is True


Expand Down Expand Up @@ -536,9 +536,9 @@ def test_streaming_create_message_with_input_json_delta(
assert SPANDATA.AI_INPUT_MESSAGES not in span["data"]
assert SPANDATA.AI_RESPONSES not in span["data"]

assert span["measurements"]["ai_prompt_tokens_used"]["value"] == 366
assert span["measurements"]["ai_completion_tokens_used"]["value"] == 51
assert span["measurements"]["ai_total_tokens_used"]["value"] == 417
assert span["data"]["ai.prompt_tokens_used"] == 366
assert span["data"]["ai.completion_tokens_used"] == 51
assert span["data"]["ai.total_tokens_used"] == 417
assert span["data"]["ai.streaming"] is True


Expand Down Expand Up @@ -675,9 +675,9 @@ async def test_streaming_create_message_with_input_json_delta_async(
assert SPANDATA.AI_INPUT_MESSAGES not in span["data"]
assert SPANDATA.AI_RESPONSES not in span["data"]

assert span["measurements"]["ai_prompt_tokens_used"]["value"] == 366
assert span["measurements"]["ai_completion_tokens_used"]["value"] == 51
assert span["measurements"]["ai_total_tokens_used"]["value"] == 417
assert span["data"]["ai.prompt_tokens_used"] == 366
assert span["data"]["ai.completion_tokens_used"] == 51
assert span["data"]["ai.total_tokens_used"] == 417
assert span["data"]["ai.streaming"] is True


Expand Down Expand Up @@ -822,11 +822,6 @@ def test_add_ai_data_to_span_with_input_json_delta(sentry_init, capture_events):
content_blocks=["{'test': 'data',", "'more': 'json'}"],
)

# assert span._data.get("ai.streaming") is True
# assert span._measurements.get("ai_prompt_tokens_used")["value"] == 10
# assert span._measurements.get("ai_completion_tokens_used")["value"] == 20
# assert span._measurements.get("ai_total_tokens_used")["value"] == 30

(event,) = events

assert len(event["spans"]) == 1
Expand All @@ -836,6 +831,6 @@ def test_add_ai_data_to_span_with_input_json_delta(sentry_init, capture_events):
[{"type": "text", "text": "{'test': 'data','more': 'json'}"}]
)
assert span["data"]["ai.streaming"] is True
assert span["measurements"]["ai_prompt_tokens_used"]["value"] == 10
assert span["measurements"]["ai_completion_tokens_used"]["value"] == 20
assert span["measurements"]["ai_total_tokens_used"]["value"] == 30
assert span["data"]["ai.prompt_tokens_used"] == 10
assert span["data"]["ai.completion_tokens_used"] == 20
assert span["data"]["ai.total_tokens_used"] == 30
16 changes: 8 additions & 8 deletions tests/integrations/cohere/test_cohere.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,9 +64,9 @@ def test_nonstreaming_chat(
assert "ai.input_messages" not in span["data"]
assert "ai.responses" not in span["data"]

assert span["measurements"]["ai_completion_tokens_used"]["value"] == 10
assert span["measurements"]["ai_prompt_tokens_used"]["value"] == 20
assert span["measurements"]["ai_total_tokens_used"]["value"] == 30
assert span["data"]["ai.completion_tokens_used"] == 10
assert span["data"]["ai.prompt_tokens_used"] == 20
assert span["data"]["ai.total_tokens_used"] == 30


# noinspection PyTypeChecker
Expand Down Expand Up @@ -136,9 +136,9 @@ def test_streaming_chat(sentry_init, capture_events, send_default_pii, include_p
assert "ai.input_messages" not in span["data"]
assert "ai.responses" not in span["data"]

assert span["measurements"]["ai_completion_tokens_used"]["value"] == 10
assert span["measurements"]["ai_prompt_tokens_used"]["value"] == 20
assert span["measurements"]["ai_total_tokens_used"]["value"] == 30
assert span["data"]["ai.completion_tokens_used"] == 10
assert span["data"]["ai.prompt_tokens_used"] == 20
assert span["data"]["ai.total_tokens_used"] == 30


def test_bad_chat(sentry_init, capture_events):
Expand Down Expand Up @@ -200,8 +200,8 @@ def test_embed(sentry_init, capture_events, send_default_pii, include_prompts):
else:
assert "ai.input_messages" not in span["data"]

assert span["measurements"]["ai_prompt_tokens_used"]["value"] == 10
assert span["measurements"]["ai_total_tokens_used"]["value"] == 10
assert span["data"]["ai.prompt_tokens_used"] == 10
assert span["data"]["ai.total_tokens_used"] == 10


def test_span_origin_chat(sentry_init, capture_events):
Expand Down
4 changes: 2 additions & 2 deletions tests/integrations/huggingface_hub/test_huggingface_hub.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ def test_nonstreaming_chat_completion(
assert "ai.responses" not in span["data"]

if details_arg:
assert span["measurements"]["ai_total_tokens_used"]["value"] == 10
assert span["data"]["ai.total_tokens_used"] == 10


@pytest.mark.parametrize(
Expand Down Expand Up @@ -133,7 +133,7 @@ def test_streaming_chat_completion(
assert "ai.responses" not in span["data"]

if details_arg:
assert span["measurements"]["ai_total_tokens_used"]["value"] == 10
assert span["data"]["ai.total_tokens_used"] == 10


def test_bad_chat_completion(sentry_init, capture_events):
Expand Down
7 changes: 4 additions & 3 deletions tests/integrations/langchain/test_langchain.py
Original file line number Diff line number Diff line change
Expand Up @@ -179,12 +179,13 @@ def test_langchain_agent(
assert len(list(x for x in tx["spans"] if x["op"] == "ai.run.langchain")) > 0

if use_unknown_llm_type:
assert "ai_prompt_tokens_used" in chat_spans[0]["measurements"]
assert "ai_total_tokens_used" in chat_spans[0]["measurements"]
assert "ai.prompt_tokens_used" in chat_spans[0]["data"]
assert "ai.total_tokens_used" in chat_spans[0]["data"]
else:
# important: to avoid double counting, we do *not* measure
# tokens used if we have an explicit integration (e.g. OpenAI)
assert "measurements" not in chat_spans[0]
assert "ai.prompt_tokens_used" not in chat_spans[0]["data"]
assert "ai.total_tokens_used" not in chat_spans[0]["data"]

if send_default_pii and include_prompts:
assert "You are very powerful" in chat_spans[0]["data"]["ai.input_messages"]
Expand Down
Loading
Loading