Skip to content
Draft
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
- Metric size limit per metric default changed to 1mib. ([#5779](https://github.com/getsentry/relay/pull/5779))
- Use `gen_ai.function_id` as a fallback for `gen_ai.agent.name`. ([#5776](https://github.com/getsentry/relay/pull/5776))
- Add `gen_ai.input.messages` and `gen_ai.output.messages` as distinct fields for SpanData. ([#5797](https://github.com/getsentry/relay/pull/5797))
- Update several `gen_ai` attributes to their latest representation. ([#5798](https://github.com/getsentry/relay/pull/5798))
- Extract `http.query` and `url.query` attributes from `query_string` in transactions' request context. ([#5784](https://github.com/getsentry/relay/pull/5784))

**Internal**:
Expand Down
2 changes: 1 addition & 1 deletion relay-conventions/src/consts.rs
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ convention_attributes!(
GEN_AI_REQUEST_MODEL => "gen_ai.request.model",
GEN_AI_RESPONSE_MODEL => "gen_ai.response.model",
GEN_AI_RESPONSE_TPS => "gen_ai.response.tokens_per_second",
GEN_AI_SYSTEM => "gen_ai.system",
GEN_AI_PROVIDER_NAME => "gen_ai.provider.name",
GEN_AI_USAGE_INPUT_CACHED_TOKENS => "gen_ai.usage.input_tokens.cached",
GEN_AI_USAGE_INPUT_CACHE_WRITE_TOKENS => "gen_ai.usage.input_tokens.cache_write",
GEN_AI_USAGE_INPUT_TOKENS => "gen_ai.usage.input_tokens",
Expand Down
81 changes: 40 additions & 41 deletions relay-event-schema/src/protocol/span.rs
Original file line number Diff line number Diff line change
Expand Up @@ -553,51 +553,46 @@ pub struct SpanData {
#[metastructure(field = "gen_ai.cost.output_tokens", pii = "maybe")]
pub gen_ai_cost_output_tokens: Annotated<Value>,

/// Prompt passed to LLM (Vercel AI SDK)
#[metastructure(field = "gen_ai.prompt", pii = "maybe")]
pub gen_ai_prompt: Annotated<Value>,

/// Prompt passed to LLM
/// The input messages to the model call.
#[metastructure(
field = "gen_ai.request.messages",
field = "gen_ai.input.messages",
pii = "maybe",
legacy_alias = "gen_ai.prompt",
legacy_alias = "gen_ai.request.messages",
legacy_alias = "ai.prompt.messages"
)]
pub gen_ai_request_messages: Annotated<Value>,
pub gen_ai_input_messages: Annotated<Value>,

/// Tool call arguments
/// Tool call arguments.
#[metastructure(
field = "gen_ai.tool.input",
field = "gen_ai.tool.call.arguments",
pii = "maybe",
legacy_alias = "gen_ai.tool.input",
legacy_alias = "ai.toolCall.args"
)]
pub gen_ai_tool_input: Annotated<Value>,
pub gen_ai_tool_call_arguments: Annotated<Value>,

/// Tool call result
/// Tool call result.
#[metastructure(
field = "gen_ai.tool.output",
field = "gen_ai.tool.call.result",
pii = "maybe",
legacy_alias = "gen_ai.tool.output",
legacy_alias = "ai.toolCall.result"
)]
pub gen_ai_tool_output: Annotated<Value>,
pub gen_ai_tool_call_result: Annotated<Value>,

/// LLM decisions to use tools
/// The output messages from the model call.
#[metastructure(
field = "gen_ai.response.tool_calls",
field = "gen_ai.output.messages",
legacy_alias = "gen_ai.response.tool_calls",
legacy_alias = "ai.response.toolCalls",
legacy_alias = "ai.tool_calls",
pii = "maybe"
)]
pub gen_ai_response_tool_calls: Annotated<Value>,

/// LLM response text (Vercel AI, generateText)
#[metastructure(
field = "gen_ai.response.text",
legacy_alias = "gen_ai.response.text",
legacy_alias = "ai.response.text",
legacy_alias = "ai.responses",
pii = "maybe"
)]
pub gen_ai_response_text: Annotated<Value>,
pub gen_ai_output_messages: Annotated<Value>,

/// LLM response object (Vercel AI, generateObject)
#[metastructure(field = "gen_ai.response.object", pii = "maybe")]
Expand All @@ -615,13 +610,14 @@ pub struct SpanData {
#[metastructure(field = "gen_ai.response.time_to_first_token", pii = "maybe")]
pub gen_ai_response_time_to_first_token: Annotated<Value>,

/// The available tools for a request to an LLM
/// The tool definitions available for a request to an LLM.
#[metastructure(
field = "gen_ai.request.available_tools",
field = "gen_ai.tool.definitions",
legacy_alias = "gen_ai.request.available_tools",
legacy_alias = "ai.tools",
pii = "maybe"
)]
pub gen_ai_request_available_tools: Annotated<Value>,
pub gen_ai_tool_definitions: Annotated<Value>,

/// The frequency penalty for a request to an LLM
#[metastructure(
Expand Down Expand Up @@ -653,20 +649,25 @@ pub struct SpanData {
#[metastructure(field = "gen_ai.request.top_p", legacy_alias = "ai.top_p")]
pub gen_ai_request_top_p: Annotated<Value>,

/// The finish reason for a response from an LLM
/// The finish reasons for a response from an LLM.
#[metastructure(
field = "gen_ai.response.finish_reason",
field = "gen_ai.response.finish_reasons",
legacy_alias = "gen_ai.response.finish_reason",
legacy_alias = "ai.finish_reason"
)]
pub gen_ai_response_finish_reason: Annotated<Value>,
pub gen_ai_response_finish_reasons: Annotated<Value>,

/// The unique identifier for a response from an LLM
#[metastructure(field = "gen_ai.response.id", legacy_alias = "ai.generation_id")]
pub gen_ai_response_id: Annotated<Value>,

/// The GenAI system identifier
#[metastructure(field = "gen_ai.system", legacy_alias = "ai.model.provider")]
pub gen_ai_system: Annotated<Value>,
/// The GenAI provider name.
#[metastructure(
field = "gen_ai.provider.name",
legacy_alias = "gen_ai.system",
legacy_alias = "ai.model.provider"
)]
pub gen_ai_provider_name: Annotated<Value>,

/// The system instructions passed to the model.
#[metastructure(
Expand Down Expand Up @@ -1526,26 +1527,24 @@ mod tests {
gen_ai_cost_total_tokens: ~,
gen_ai_cost_input_tokens: ~,
gen_ai_cost_output_tokens: ~,
gen_ai_prompt: ~,
gen_ai_request_messages: ~,
gen_ai_tool_input: ~,
gen_ai_tool_output: ~,
gen_ai_response_tool_calls: ~,
gen_ai_response_text: ~,
gen_ai_input_messages: ~,
gen_ai_tool_call_arguments: ~,
gen_ai_tool_call_result: ~,
gen_ai_output_messages: ~,
gen_ai_response_object: ~,
gen_ai_response_streaming: ~,
gen_ai_response_tokens_per_second: ~,
gen_ai_response_time_to_first_token: ~,
gen_ai_request_available_tools: ~,
gen_ai_tool_definitions: ~,
gen_ai_request_frequency_penalty: ~,
gen_ai_request_presence_penalty: ~,
gen_ai_request_seed: ~,
gen_ai_request_temperature: ~,
gen_ai_request_top_k: ~,
gen_ai_request_top_p: ~,
gen_ai_response_finish_reason: ~,
gen_ai_response_finish_reasons: ~,
gen_ai_response_id: ~,
gen_ai_system: ~,
gen_ai_provider_name: ~,
gen_ai_system_instructions: ~,
gen_ai_tool_name: ~,
gen_ai_operation_name: ~,
Expand Down
16 changes: 7 additions & 9 deletions relay-event-schema/src/protocol/span/convert.rs
Original file line number Diff line number Diff line change
Expand Up @@ -183,26 +183,24 @@ mod tests {
gen_ai_cost_total_tokens: ~,
gen_ai_cost_input_tokens: ~,
gen_ai_cost_output_tokens: ~,
gen_ai_prompt: ~,
gen_ai_request_messages: ~,
gen_ai_tool_input: ~,
gen_ai_tool_output: ~,
gen_ai_response_tool_calls: ~,
gen_ai_response_text: ~,
gen_ai_input_messages: ~,
gen_ai_tool_call_arguments: ~,
gen_ai_tool_call_result: ~,
gen_ai_output_messages: ~,
gen_ai_response_object: ~,
gen_ai_response_streaming: ~,
gen_ai_response_tokens_per_second: ~,
gen_ai_response_time_to_first_token: ~,
gen_ai_request_available_tools: ~,
gen_ai_tool_definitions: ~,
gen_ai_request_frequency_penalty: ~,
gen_ai_request_presence_penalty: ~,
gen_ai_request_seed: ~,
gen_ai_request_temperature: ~,
gen_ai_request_top_k: ~,
gen_ai_request_top_p: ~,
gen_ai_response_finish_reason: ~,
gen_ai_response_finish_reasons: ~,
gen_ai_response_id: ~,
gen_ai_system: ~,
gen_ai_provider_name: ~,
gen_ai_system_instructions: ~,
gen_ai_tool_name: ~,
gen_ai_operation_name: ~,
Expand Down
Loading
Loading