diff --git a/CHANGELOG.md b/CHANGELOG.md index f4ed36f9bd5..6832fe1660b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -32,6 +32,7 @@ - Remove continuous-profiling-beta feature flags. ([#5762](https://github.com/getsentry/relay/pull/5762)) - Playstation: Do not upload attachments if quota is 0. ([#5770](https://github.com/getsentry/relay/pull/5770)) - Add payload byte size to trace metrics. ([#5764](https://github.com/getsentry/relay/pull/5764)) +- Remove transaction metrics extraction. ([#5792](https://github.com/getsentry/relay/pull/5792)) - Mix kafka partition key with org id. ([#5772](https://github.com/getsentry/relay/pull/5772)) ## 26.3.1 diff --git a/relay-base-schema/src/metrics/mri.rs b/relay-base-schema/src/metrics/mri.rs index c8f67d6f54e..ae08462d85a 100644 --- a/relay-base-schema/src/metrics/mri.rs +++ b/relay-base-schema/src/metrics/mri.rs @@ -108,8 +108,6 @@ impl Error for ParseMetricError {} pub enum MetricNamespace { /// Metrics extracted from sessions. Sessions, - /// Metrics extracted from transaction events. - Transactions, /// Metrics extracted from spans. Spans, /// User-defined metrics directly sent by SDKs and applications. @@ -128,21 +126,14 @@ pub enum MetricNamespace { impl MetricNamespace { /// Returns all namespaces/variants of this enum. - pub fn all() -> [Self; 5] { - [ - Self::Sessions, - Self::Transactions, - Self::Spans, - Self::Custom, - Self::Unsupported, - ] + pub fn all() -> [Self; 4] { + [Self::Sessions, Self::Spans, Self::Custom, Self::Unsupported] } /// Returns the string representation for this metric type. pub fn as_str(&self) -> &'static str { match self { Self::Sessions => "sessions", - Self::Transactions => "transactions", Self::Spans => "spans", Self::Custom => "custom", Self::Unsupported => "unsupported", @@ -156,7 +147,6 @@ impl std::str::FromStr for MetricNamespace { fn from_str(ns: &str) -> Result { match ns { "sessions" => Ok(Self::Sessions), - "transactions" => Ok(Self::Transactions), "spans" => Ok(Self::Spans), "custom" => Ok(Self::Custom), _ => Ok(Self::Unsupported), diff --git a/relay-cardinality/src/limiter.rs b/relay-cardinality/src/limiter.rs index fa517cd2c06..da728ad4e47 100644 --- a/relay-cardinality/src/limiter.rs +++ b/relay-cardinality/src/limiter.rs @@ -506,8 +506,8 @@ mod tests { let limiter = CardinalityLimiter::new(RejectAllLimiter); let items = vec![ - Item::new(0, MetricNamespace::Transactions), - Item::new(1, MetricNamespace::Transactions), + Item::new(0, MetricNamespace::Sessions), + Item::new(1, MetricNamespace::Sessions), ]; let limits = build_limits(); let result = limiter @@ -550,7 +550,7 @@ mod tests { let limiter = CardinalityLimiter::new(AcceptAllLimiter); let items = vec![ - Item::new(0, MetricNamespace::Transactions), + Item::new(0, MetricNamespace::Sessions), Item::new(1, MetricNamespace::Spans), ]; let limits = build_limits(); @@ -598,11 +598,11 @@ mod tests { let items = vec![ Item::new(0, MetricNamespace::Sessions), - Item::new(1, MetricNamespace::Transactions), + Item::new(1, MetricNamespace::Unsupported), Item::new(2, MetricNamespace::Spans), Item::new(3, MetricNamespace::Custom), Item::new(4, MetricNamespace::Custom), - Item::new(5, MetricNamespace::Transactions), + Item::new(5, MetricNamespace::Unsupported), Item::new(6, MetricNamespace::Spans), ]; let limits = build_limits(); @@ -624,9 +624,9 @@ mod tests { assert_eq!( split.accepted, vec![ - Item::new(1, MetricNamespace::Transactions), + Item::new(1, MetricNamespace::Unsupported), Item::new(3, MetricNamespace::Custom), - Item::new(5, MetricNamespace::Transactions), + Item::new(5, MetricNamespace::Unsupported), ] ); } diff --git a/relay-cardinality/src/redis/limiter.rs b/relay-cardinality/src/redis/limiter.rs index 4ac3e774b8a..2c3060b574b 100644 --- a/relay-cardinality/src/redis/limiter.rs +++ b/relay-cardinality/src/redis/limiter.rs @@ -844,7 +844,7 @@ mod tests { let entries1 = [Entry::new(EntryId(0), Custom, &m0, 0)]; let entries2 = [Entry::new(EntryId(0), Spans, &m1, 1)]; - let entries3 = [Entry::new(EntryId(0), Transactions, &m2, 2)]; + let entries3 = [Entry::new(EntryId(0), Sessions, &m2, 2)]; let rejected = limiter.test_limits(scoping, limits, entries1).await; assert_eq!(rejected.len(), 0); @@ -908,7 +908,7 @@ mod tests { }, limit: 1, scope: CardinalityScope::Unknown, - namespace: Some(Transactions), + namespace: Some(Sessions), }, ]; @@ -924,8 +924,8 @@ mod tests { Entry::new(EntryId(1), Custom, &m1, 1), Entry::new(EntryId(2), Spans, &m2, 2), Entry::new(EntryId(3), Spans, &m3, 3), - Entry::new(EntryId(4), Transactions, &m4, 4), - Entry::new(EntryId(5), Transactions, &m5, 5), + Entry::new(EntryId(4), Sessions, &m4, 4), + Entry::new(EntryId(5), Sessions, &m5, 5), ]; // Run multiple times to make sure caching does not interfere. diff --git a/relay-cogs/src/lib.rs b/relay-cogs/src/lib.rs index ad1e6b913d4..dbd0b85640e 100644 --- a/relay-cogs/src/lib.rs +++ b/relay-cogs/src/lib.rs @@ -174,8 +174,6 @@ pub enum AppFeature { /// User Reports UserReports, - /// Metrics in the transactions namespace. - MetricsTransactions, /// Metrics in the spans namespace. MetricsSpans, /// Metrics in the sessions namespace. @@ -208,7 +206,6 @@ impl AppFeature { Self::CheckIns => "check_ins", Self::Replays => "replays", Self::UserReports => "user_reports", - Self::MetricsTransactions => "metrics_transactions", Self::MetricsSpans => "metrics_spans", Self::MetricsSessions => "metrics_sessions", Self::MetricsCustom => "metrics_custom", diff --git a/relay-config/src/aggregator.rs b/relay-config/src/aggregator.rs index dd52c7fae39..1e39bed7a19 100644 --- a/relay-config/src/aggregator.rs +++ b/relay-config/src/aggregator.rs @@ -165,6 +165,6 @@ mod tests { let condition = serde_json::from_value::(json).unwrap(); assert!(condition.matches(Some(MetricNamespace::Spans))); assert!(condition.matches(Some(MetricNamespace::Custom))); - assert!(!condition.matches(Some(MetricNamespace::Transactions))); + assert!(!condition.matches(Some(MetricNamespace::Sessions))); } } diff --git a/relay-dynamic-config/src/global.rs b/relay-dynamic-config/src/global.rs index 983d74b3fa8..e1c967b95ee 100644 --- a/relay-dynamic-config/src/global.rs +++ b/relay-dynamic-config/src/global.rs @@ -206,7 +206,6 @@ pub enum CardinalityLimiterMode { #[derive(Debug, Clone, Copy, Default, Serialize, Deserialize, PartialEq)] #[serde(default)] pub struct BucketEncodings { - transactions: BucketEncoding, spans: BucketEncoding, profiles: BucketEncoding, custom: BucketEncoding, @@ -216,7 +215,6 @@ impl BucketEncodings { /// Returns the configured encoding for a specific namespace. pub fn for_namespace(&self, namespace: MetricNamespace) -> BucketEncoding { match namespace { - MetricNamespace::Transactions => self.transactions, MetricNamespace::Spans => self.spans, MetricNamespace::Custom => self.custom, // Always force the legacy encoding for sessions, @@ -250,7 +248,6 @@ where { let encoding = BucketEncoding::deserialize(de::value::StrDeserializer::new(v))?; Ok(BucketEncodings { - transactions: encoding, spans: encoding, profiles: encoding, custom: encoding, @@ -453,7 +450,6 @@ mod tests { assert_eq!( o.metric_bucket_set_encodings, BucketEncodings { - transactions: BucketEncoding::Legacy, spans: BucketEncoding::Legacy, profiles: BucketEncoding::Legacy, custom: BucketEncoding::Legacy, @@ -462,7 +458,6 @@ mod tests { assert_eq!( o.metric_bucket_dist_encodings, BucketEncodings { - transactions: BucketEncoding::Zstd, spans: BucketEncoding::Zstd, profiles: BucketEncoding::Zstd, custom: BucketEncoding::Zstd, @@ -473,7 +468,6 @@ mod tests { #[test] fn test_metric_bucket_encodings_de_from_obj() { let original = BucketEncodings { - transactions: BucketEncoding::Base64, spans: BucketEncoding::Zstd, profiles: BucketEncoding::Base64, custom: BucketEncoding::Zstd, diff --git a/relay-dynamic-config/src/metrics.rs b/relay-dynamic-config/src/metrics.rs index 20a2a40f698..9ba725b4501 100644 --- a/relay-dynamic-config/src/metrics.rs +++ b/relay-dynamic-config/src/metrics.rs @@ -102,68 +102,6 @@ pub struct CustomMeasurementConfig { limit: usize, } -/// Maximum supported version of metrics extraction from transactions. -/// -/// The version is an integer scalar, incremented by one on each new version: -/// - 1: Initial version. -/// - 2: Moves `acceptTransactionNames` to global config. -/// - 3: -/// - Emit a `usage` metric and use it for rate limiting. -/// - Delay metrics extraction for indexed transactions. -/// - 4: Adds support for `RuleConfigs` with string comparisons. -/// - 5: No change, bumped together with [`MetricExtractionConfig::MAX_SUPPORTED_VERSION`]. -/// - 6: Bugfix to make transaction metrics extraction apply globally defined tag mappings. -const TRANSACTION_EXTRACT_MAX_SUPPORTED_VERSION: u16 = 6; - -/// Minimum supported version of metrics extraction from transaction. -const TRANSACTION_EXTRACT_MIN_SUPPORTED_VERSION: u16 = 3; - -/// Deprecated. Defines whether URL transactions should be considered low cardinality. -#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq)] -#[serde(rename_all = "camelCase")] -#[derive(Default)] -pub enum AcceptTransactionNames { - /// Only accept transaction names with a low-cardinality source. - Strict, - - /// For some SDKs, accept all transaction names, while for others, apply strict rules. - #[serde(other)] - #[default] - ClientBased, -} - -/// Configuration for extracting metrics from transaction payloads. -#[derive(Default, Debug, Clone, Serialize, Deserialize)] -#[serde(default, rename_all = "camelCase")] -pub struct TransactionMetricsConfig { - /// The required version to extract transaction metrics. - pub version: u16, - /// Custom event tags that are transferred from the transaction to metrics. - pub extract_custom_tags: BTreeSet, - /// Deprecated in favor of top-level config field. Still here to be forwarded to external relays. - pub custom_measurements: CustomMeasurementConfig, - /// Deprecated. Defines whether URL transactions should be considered low cardinality. - /// Keep this around for external Relays. - #[serde(rename = "acceptTransactionNames")] - pub deprecated1: AcceptTransactionNames, -} - -impl TransactionMetricsConfig { - /// Creates an enabled configuration with empty defaults. - pub fn new() -> Self { - Self { - version: TRANSACTION_EXTRACT_MAX_SUPPORTED_VERSION, - ..Self::default() - } - } - - /// Returns `true` if metrics extraction is enabled and compatible with this Relay. - pub fn is_enabled(&self) -> bool { - self.version >= TRANSACTION_EXTRACT_MIN_SUPPORTED_VERSION - && self.version <= TRANSACTION_EXTRACT_MAX_SUPPORTED_VERSION - } -} - /// Combined view of global and project-specific metrics extraction configs. #[derive(Debug, Clone, Copy)] pub struct CombinedMetricExtractionConfig<'a> { diff --git a/relay-dynamic-config/src/project.rs b/relay-dynamic-config/src/project.rs index c358d9fdf4c..e1c3322d0d0 100644 --- a/relay-dynamic-config/src/project.rs +++ b/relay-dynamic-config/src/project.rs @@ -12,10 +12,7 @@ use serde_json::Value; use crate::error_boundary::ErrorBoundary; use crate::feature::FeatureSet; -use crate::metrics::{ - self, MetricExtractionConfig, Metrics, SessionMetricsConfig, TaggingRule, - TransactionMetricsConfig, -}; +use crate::metrics::{self, MetricExtractionConfig, Metrics, SessionMetricsConfig, TaggingRule}; use crate::trusted_relay::TrustedRelayConfig; use crate::{GRADUATED_FEATURE_FLAGS, defaults}; @@ -72,9 +69,6 @@ pub struct ProjectConfig { /// Configuration for extracting metrics from sessions. #[serde(skip_serializing_if = "SessionMetricsConfig::is_disabled")] pub session_metrics: SessionMetricsConfig, - /// Configuration for extracting metrics from transaction events. - #[serde(skip_serializing_if = "Option::is_none")] - pub transaction_metrics: Option>, /// Configuration for generic metrics extraction from all data categories. #[serde(default, skip_serializing_if = "skip_metrics_extraction")] pub metric_extraction: ErrorBoundary, @@ -169,7 +163,6 @@ impl Default for ProjectConfig { breakdowns_v2: None, performance_score: Default::default(), session_metrics: SessionMetricsConfig::default(), - transaction_metrics: None, metric_extraction: Default::default(), metric_conditional_tagging: Vec::new(), features: Default::default(), @@ -215,8 +208,6 @@ pub struct LimitedProjectConfig { pub sampling: Option>, #[serde(skip_serializing_if = "SessionMetricsConfig::is_disabled")] pub session_metrics: SessionMetricsConfig, - #[serde(skip_serializing_if = "Option::is_none")] - pub transaction_metrics: Option>, #[serde(default, skip_serializing_if = "skip_metrics_extraction")] pub metric_extraction: ErrorBoundary, #[serde(skip_serializing_if = "Vec::is_empty")] diff --git a/relay-metrics/src/bucket.rs b/relay-metrics/src/bucket.rs index 27653724d47..b04d87b4a61 100644 --- a/relay-metrics/src/bucket.rs +++ b/relay-metrics/src/bucket.rs @@ -927,15 +927,15 @@ mod tests { #[test] fn test_parse_counter() { - let s = "transactions/foo:42|c"; + let s = "spans/foo:42|c"; let timestamp = UnixTimestamp::from_secs(4711); let metric = Bucket::parse(s.as_bytes(), timestamp).unwrap(); - insta::assert_debug_snapshot!(metric, @r###" + insta::assert_debug_snapshot!(metric, @r#" Bucket { timestamp: UnixTimestamp(4711), width: 0, name: MetricName( - "c:transactions/foo@none", + "c:spans/foo@none", ), value: Counter( 42.0, @@ -947,12 +947,12 @@ mod tests { extracted_from_indexed: false, }, } - "###); + "#); } #[test] fn test_parse_counter_packed() { - let s = "transactions/foo:42:17:21|c"; + let s = "spans/foo:42:17:21|c"; let timestamp = UnixTimestamp::from_secs(4711); let metric = Bucket::parse(s.as_bytes(), timestamp).unwrap(); assert_eq!(metric.value, BucketValue::Counter(80.into())); @@ -960,15 +960,15 @@ mod tests { #[test] fn test_parse_distribution() { - let s = "transactions/foo:17.5|d"; + let s = "spans/foo:17.5|d"; let timestamp = UnixTimestamp::from_secs(4711); let metric = Bucket::parse(s.as_bytes(), timestamp).unwrap(); - insta::assert_debug_snapshot!(metric, @r###" + insta::assert_debug_snapshot!(metric, @r#" Bucket { timestamp: UnixTimestamp(4711), width: 0, name: MetricName( - "d:transactions/foo@none", + "d:spans/foo@none", ), value: Distribution( [ @@ -982,7 +982,7 @@ mod tests { extracted_from_indexed: false, }, } - "###); + "#); } #[test] @@ -1013,15 +1013,15 @@ mod tests { #[test] fn test_parse_set() { - let s = "transactions/foo:4267882815|s"; + let s = "spans/foo:4267882815|s"; let timestamp = UnixTimestamp::from_secs(4711); let metric = Bucket::parse(s.as_bytes(), timestamp).unwrap(); - insta::assert_debug_snapshot!(metric, @r###" + insta::assert_debug_snapshot!(metric, @r#" Bucket { timestamp: UnixTimestamp(4711), width: 0, name: MetricName( - "s:transactions/foo@none", + "s:spans/foo@none", ), value: Set( { @@ -1035,7 +1035,7 @@ mod tests { extracted_from_indexed: false, }, } - "###); + "#); } #[test] @@ -1070,15 +1070,15 @@ mod tests { #[test] fn test_parse_gauge() { - let s = "transactions/foo:42|g"; + let s = "spans/foo:42|g"; let timestamp = UnixTimestamp::from_secs(4711); let metric = Bucket::parse(s.as_bytes(), timestamp).unwrap(); - insta::assert_debug_snapshot!(metric, @r###" + insta::assert_debug_snapshot!(metric, @r#" Bucket { timestamp: UnixTimestamp(4711), width: 0, name: MetricName( - "g:transactions/foo@none", + "g:spans/foo@none", ), value: Gauge( GaugeValue { @@ -1096,20 +1096,20 @@ mod tests { extracted_from_indexed: false, }, } - "###); + "#); } #[test] fn test_parse_gauge_packed() { - let s = "transactions/foo:25:17:42:220:85|g"; + let s = "spans/foo:25:17:42:220:85|g"; let timestamp = UnixTimestamp::from_secs(4711); let metric = Bucket::parse(s.as_bytes(), timestamp).unwrap(); - insta::assert_debug_snapshot!(metric, @r###" + insta::assert_debug_snapshot!(metric, @r#" Bucket { timestamp: UnixTimestamp(4711), width: 0, name: MetricName( - "g:transactions/foo@none", + "g:spans/foo@none", ), value: Gauge( GaugeValue { @@ -1127,7 +1127,7 @@ mod tests { extracted_from_indexed: false, }, } - "###); + "#); } #[test] diff --git a/relay-metrics/src/cogs.rs b/relay-metrics/src/cogs.rs index be5db5940fc..c024c51bb1c 100644 --- a/relay-metrics/src/cogs.rs +++ b/relay-metrics/src/cogs.rs @@ -38,7 +38,6 @@ where fn to_app_feature(ns: MetricNamespace) -> AppFeature { match ns { MetricNamespace::Sessions => AppFeature::MetricsSessions, - MetricNamespace::Transactions => AppFeature::MetricsTransactions, MetricNamespace::Spans => AppFeature::MetricsSpans, MetricNamespace::Custom => AppFeature::MetricsCustom, MetricNamespace::Unsupported => AppFeature::MetricsUnsupported, diff --git a/relay-metrics/src/protocol.rs b/relay-metrics/src/protocol.rs index cd92219a62c..f1f48349a01 100644 --- a/relay-metrics/src/protocol.rs +++ b/relay-metrics/src/protocol.rs @@ -287,7 +287,7 @@ mod tests { let mut bucket = Bucket { timestamp: UnixTimestamp::from_secs(5000), width: 0, - name: "c:transactions/hergus.bergus".into(), + name: "c:spans/hergus.bergus".into(), value: BucketValue::Counter(0.into()), tags: { let mut tags = MetricTags::new(); @@ -319,7 +319,7 @@ mod tests { { "timestamp": 5000, "width": 0, - "name": "c:transactions/hergus.bergus@none", + "name": "c:spans/hergus.bergus@none", "type": "c", "value": 0.0, "tags": { diff --git a/relay-metrics/src/utils.rs b/relay-metrics/src/utils.rs index 1f6d61031aa..37a9b3faff4 100644 --- a/relay-metrics/src/utils.rs +++ b/relay-metrics/src/utils.rs @@ -17,8 +17,6 @@ pub fn tags_cost(tags: &BTreeMap) -> usize { pub struct ByNamespace { /// Value for the [`MetricNamespace::Sessions`] namespace. pub sessions: T, - /// Value for the [`MetricNamespace::Transactions`] namespace. - pub transactions: T, /// Value for the [`MetricNamespace::Spans`] namespace. pub spans: T, /// Value for the [`MetricNamespace::Custom`] namespace. @@ -32,7 +30,6 @@ impl ByNamespace { pub fn get(&self, namespace: MetricNamespace) -> &T { match namespace { MetricNamespace::Sessions => &self.sessions, - MetricNamespace::Transactions => &self.transactions, MetricNamespace::Spans => &self.spans, MetricNamespace::Custom => &self.custom, MetricNamespace::Unsupported => &self.unsupported, @@ -43,7 +40,6 @@ impl ByNamespace { pub fn get_mut(&mut self, namespace: MetricNamespace) -> &mut T { match namespace { MetricNamespace::Sessions => &mut self.sessions, - MetricNamespace::Transactions => &mut self.transactions, MetricNamespace::Spans => &mut self.spans, MetricNamespace::Custom => &mut self.custom, MetricNamespace::Unsupported => &mut self.unsupported, @@ -53,12 +49,11 @@ impl ByNamespace { impl IntoIterator for ByNamespace { type Item = (MetricNamespace, T); - type IntoIter = std::array::IntoIter<(MetricNamespace, T), 5>; + type IntoIter = std::array::IntoIter<(MetricNamespace, T), 4>; fn into_iter(self) -> Self::IntoIter { let Self { sessions, - transactions, spans, custom, unsupported, @@ -66,7 +61,6 @@ impl IntoIterator for ByNamespace { [ (MetricNamespace::Sessions, sessions), - (MetricNamespace::Transactions, transactions), (MetricNamespace::Spans, spans), (MetricNamespace::Custom, custom), (MetricNamespace::Unsupported, unsupported), @@ -109,14 +103,12 @@ macro_rules! impl_op { fn $opfn(&mut self, rhs: Self) { let Self { sessions, - transactions, spans, custom, unsupported, } = self; $op::$opfn(sessions, rhs.sessions); - $op::$opfn(transactions, rhs.transactions); $op::$opfn(spans, rhs.spans); $op::$opfn(custom, rhs.custom); $op::$opfn(unsupported, rhs.unsupported); diff --git a/relay-quotas/src/redis.rs b/relay-quotas/src/redis.rs index 9651ec8284b..a127544dd69 100644 --- a/relay-quotas/src/redis.rs +++ b/relay-quotas/src/redis.rs @@ -586,7 +586,7 @@ mod tests { }; let quotas = &[get_quota(None)]; - let quota_with_namespace = &[get_quota(Some(MetricNamespace::Transactions))]; + let quota_with_namespace = &[get_quota(Some(MetricNamespace::Sessions))]; let scoping = ItemScoping { category: DataCategory::Error, @@ -596,7 +596,7 @@ mod tests { project_key: ProjectKey::parse("a94ae32be2584e0bbd7a4cbb95971fee").unwrap(), key_id: Some(44), }, - namespace: MetricNamespaceScoping::Some(MetricNamespace::Transactions), + namespace: MetricNamespaceScoping::Some(MetricNamespace::Sessions), }; let rate_limiter = build_rate_limiter(); @@ -634,7 +634,7 @@ mod tests { } else { assert_eq!( rate_limits[0].reason_code, - Some(ReasonCode::new("ns: Some(Transactions)")) + Some(ReasonCode::new("ns: Some(Sessions)")) ); } } diff --git a/relay-server/src/managed/counted.rs b/relay-server/src/managed/counted.rs index 2b09e8786ce..66f078f6bff 100644 --- a/relay-server/src/managed/counted.rs +++ b/relay-server/src/managed/counted.rs @@ -9,7 +9,7 @@ use relay_quotas::DataCategory; use smallvec::SmallVec; use crate::envelope::{Item, SourceQuantities, WithHeader}; -use crate::metrics_extraction::transactions::ExtractedMetrics; +use crate::metrics_extraction::ExtractedMetrics; use crate::utils::EnvelopeSummary; use crate::{Envelope, metrics, processing}; diff --git a/relay-server/src/metrics/minimal.rs b/relay-server/src/metrics/minimal.rs index 0f03e3d42ee..5eca76ed8e5 100644 --- a/relay-server/src/metrics/minimal.rs +++ b/relay-server/src/metrics/minimal.rs @@ -1,3 +1,5 @@ +use std::fmt; + use relay_metrics::{CounterType, MetricName, MetricNamespace, MetricResourceIdentifier}; use serde::Deserialize; use serde::de::IgnoredAny; @@ -11,10 +13,44 @@ use crate::metrics::{BucketSummary, TrackableBucket}; #[derive(Deserialize)] pub struct MinimalTrackableBucket { name: MetricName, + #[serde(default)] + tags: Tags, #[serde(flatten)] value: MinimalValue, } +#[derive(Clone, Copy, Debug, Default, Deserialize)] +struct Tags { + #[serde(default, deserialize_with = "bool_de")] + is_segment: bool, + #[serde(default, deserialize_with = "bool_de")] + was_transaction: bool, +} + +fn bool_de<'de, D: serde::Deserializer<'de>>(deserializer: D) -> Result { + struct Visitor; + + impl<'de> serde::de::Visitor<'de> for Visitor { + type Value = bool; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a boolean or a string representing a boolean") + } + + fn visit_bool(self, v: bool) -> Result { + Ok(v) + } + + fn visit_str(self, v: &str) -> Result { + // Every non true value is considered false. + Ok(v == "true") + } + } + + // Never fail, default to false. + Ok(deserializer.deserialize_any(Visitor).unwrap_or(false)) +} + impl TrackableBucket for MinimalTrackableBucket { fn name(&self) -> &relay_metrics::MetricName { &self.name @@ -27,17 +63,18 @@ impl TrackableBucket for MinimalTrackableBucket { }; match mri.namespace { - MetricNamespace::Transactions => { - let count = match self.value { - MinimalValue::Counter(c) if mri.name == "usage" => c.to_f64() as usize, - _ => 0, - }; - BucketSummary::Transactions(count) - } - MetricNamespace::Spans => BucketSummary::Spans(match self.value { - MinimalValue::Counter(c) if mri.name == "usage" => c.to_f64() as usize, - _ => 0, - }), + MetricNamespace::Spans => match self.value { + MinimalValue::Counter(c) if mri.name == "usage" => BucketSummary::Spans { + count: c.to_f64() as usize, + is_segment: self.tags.is_segment, + was_transaction: self.tags.was_transaction, + }, + _ => BucketSummary::Spans { + count: 0, + is_segment: false, + was_transaction: false, + }, + }, _ => BucketSummary::default(), } } @@ -69,27 +106,19 @@ mod tests { { "timestamp": 1615889440, "width": 10, - "name": "d:transactions/duration@none", - "type": "d", - "value": [ - 36.0, - 49.0, - 57.0, - 68.0 - ], + "name": "c:spans/usage@none", + "type": "c", + "value": 2.0, "tags": { - "has_profile": "true" + "is_segment": "true" } }, { "timestamp": 1615889440, "width": 10, - "name": "c:transactions/usage@none", + "name": "c:spans/unrelated@none", "type": "c", - "value": 3.0, - "tags": { - "route": "user_index" - } + "value": 2.0 }, { "timestamp": 1615889440, @@ -136,20 +165,26 @@ mod tests { } let summary = min_buckets.iter().map(|b| b.summary()).collect::>(); - assert_debug_snapshot!(summary, @r###" + assert_debug_snapshot!(summary, @r" [ - Transactions( - 0, - ), - Transactions( - 3, - ), - Spans( - 3, - ), + Spans { + count: 2, + is_segment: true, + was_transaction: false, + }, + Spans { + count: 0, + is_segment: false, + was_transaction: false, + }, + Spans { + count: 3, + is_segment: false, + was_transaction: false, + }, None, None, ] - "###); + "); } } diff --git a/relay-server/src/metrics/outcomes.rs b/relay-server/src/metrics/outcomes.rs index 0174b827e96..e3f94987035 100644 --- a/relay-server/src/metrics/outcomes.rs +++ b/relay-server/src/metrics/outcomes.rs @@ -77,8 +77,11 @@ impl MetricOutcomes { /// Contains the count of total transactions or spans that went into this bucket. #[derive(Debug, Default, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)] pub enum BucketSummary { - Transactions(usize), - Spans(usize), + Spans { + count: usize, + is_segment: bool, + was_transaction: bool, + }, #[default] None, } @@ -127,21 +130,22 @@ impl TrackableBucket for BucketView<'_> { }; match mri.namespace { - MetricNamespace::Transactions => { - let count = match self.value() { - BucketViewValue::Counter(c) if mri.name == "usage" => c.to_f64() as usize, - _ => 0, - }; - BucketSummary::Transactions(count) - } - MetricNamespace::Spans => BucketSummary::Spans(match self.value() { - BucketViewValue::Counter(c) if mri.name == "usage" => c.to_f64() as usize, - _ => 0, - }), - _ => { - // Nothing to count - BucketSummary::default() - } + MetricNamespace::Spans => match self.value() { + BucketViewValue::Counter(c) if mri.name == "usage" => BucketSummary::Spans { + count: c.to_f64() as usize, + is_segment: self.tags().get("is_segment").is_some_and(|s| s == "true"), + was_transaction: self + .tags() + .get("was_transaction") + .is_some_and(|s| s == "true"), + }, + _ => BucketSummary::Spans { + count: 0, + is_segment: false, + was_transaction: false, + }, + }, + _ => BucketSummary::default(), } } } @@ -160,10 +164,16 @@ where // Only count metrics for outcomes, where the indexed payload no longer exists. let summary = bucket.summary(); match summary { - BucketSummary::Transactions(count) => { - quantities.transactions += count; + BucketSummary::Spans { + count, + is_segment, + was_transaction, + } => { + quantities.spans += count; + if is_segment && was_transaction { + quantities.transactions += count; + } } - BucketSummary::Spans(count) => quantities.spans += count, BucketSummary::None => continue, }; } diff --git a/relay-server/src/metrics/rate_limits.rs b/relay-server/src/metrics/rate_limits.rs index 07e9ad94e67..842f4f01c49 100644 --- a/relay-server/src/metrics/rate_limits.rs +++ b/relay-server/src/metrics/rate_limits.rs @@ -20,21 +20,14 @@ pub struct MetricsLimiter> = Vec> { /// Project information. scoping: Scoping, - /// The number of performance items (transactions, spans, profiles) contributing to these metrics. + /// The number of performance items (spans) contributing to these metrics. #[cfg(feature = "processing")] counts: EntityCounts, } fn to_counts(summary: &BucketSummary) -> EntityCounts { match *summary { - BucketSummary::Transactions(count) => EntityCounts { - transactions: Some(count), - spans: None, - }, - BucketSummary::Spans(count) => EntityCounts { - transactions: None, - spans: Some(count), - }, + BucketSummary::Spans { count, .. } => EntityCounts { spans: Some(count) }, BucketSummary::None => EntityCounts::default(), } } @@ -48,19 +41,10 @@ struct SummarizedBucket { /// Contains the total counts of limitable entities represented by a batch of metrics. #[derive(Debug, Default, Clone)] struct EntityCounts { - /// The number of transactions represented in the current batch. - /// - /// - `None` if the batch does not contain any transaction metrics. - /// - `Some(0)` if the batch contains transaction metrics, but no `usage` count. - /// - `Some(n > 0)` if the batch contains a `usage` count for transactions. - /// - /// The distinction between `None` and `Some(0)` is needed to decide whether or not a rate limit - /// must be checked. - transactions: Option, /// The number of spans represented in the current batch. /// - /// - `None` if the batch does not contain any transaction metrics. - /// - `Some(0)` if the batch contains transaction metrics, but no `usage` count. + /// - `None` if the batch does not contain any span metrics. + /// - `Some(0)` if the batch contains span metrics, but no `usage` count. /// - `Some(n > 0)` if the batch contains a `usage` count for spans. /// /// The distinction between `None` and `Some(0)` is needed to decide whether or not a rate limit @@ -73,7 +57,6 @@ impl std::ops::Add for EntityCounts { fn add(self, rhs: EntityCounts) -> Self::Output { Self { - transactions: add_some(self.transactions, rhs.transactions), spans: add_some(self.spans, rhs.spans), } } @@ -141,7 +124,7 @@ impl>> MetricsLimiter { self.quotas.as_ref() } - /// Counts the number of transactions/spans represented in this batch. + /// Counts the number of spans represented in this batch. /// /// Returns /// - `None` if the batch does not contain metrics related to the data category. @@ -151,12 +134,8 @@ impl>> MetricsLimiter { /// The distinction between `None` and `Some(0)` is needed to decide whether or not a rate limit /// must be checked. #[cfg(feature = "processing")] - pub fn count(&self, category: DataCategory) -> Option { - match category { - DataCategory::Transaction => self.counts.transactions, - DataCategory::Span => self.counts.spans, - _ => None, - } + pub fn count(&self) -> (DataCategory, Option) { + (DataCategory::Span, self.counts.spans) } fn drop_with_outcome( @@ -167,10 +146,9 @@ impl>> MetricsLimiter { ) { let buckets = std::mem::take(&mut self.buckets); let (buckets, dropped) = utils::split_off_map(buckets, |b| match b.summary { - BucketSummary::Transactions { .. } if category == DataCategory::Transaction => { + BucketSummary::Spans { .. } if category == DataCategory::Span => { Either::Right(b.bucket) } - BucketSummary::Spans(_) if category == DataCategory::Span => Either::Right(b.bucket), _ => Either::Left(b), }); self.buckets = buckets; @@ -178,7 +156,7 @@ impl>> MetricsLimiter { metric_outcomes.track(self.scoping, &dropped, outcome); } - // Drop transaction-related metrics and create outcomes for any active rate limits. + // Drop span-related metrics and create outcomes for any active rate limits. // // Returns true if any metrics were dropped. pub fn enforce_limits( @@ -186,20 +164,18 @@ impl>> MetricsLimiter { rate_limits: &RateLimits, metric_outcomes: &MetricOutcomes, ) -> bool { - for category in [DataCategory::Transaction, DataCategory::Span] { - let active_rate_limits = - rate_limits.check_with_quotas(self.quotas.as_ref(), self.scoping.item(category)); - - // If a rate limit is active, discard relevant buckets. - if let Some(limit) = active_rate_limits.longest() { - self.drop_with_outcome( - category, - Outcome::RateLimited(limit.reason_code.clone()), - metric_outcomes, - ); - - return true; - } + let active_rate_limits = rate_limits + .check_with_quotas(self.quotas.as_ref(), self.scoping.item(DataCategory::Span)); + + // If a rate limit is active, discard relevant buckets. + if let Some(limit) = active_rate_limits.longest() { + self.drop_with_outcome( + DataCategory::Span, + Outcome::RateLimited(limit.reason_code.clone()), + metric_outcomes, + ); + + return true; } false @@ -273,14 +249,6 @@ mod tests { /// A few different bucket types fn mixed_bag() -> Vec { vec![ - Bucket { - timestamp: UnixTimestamp::now(), - width: 0, - name: "c:transactions/usage@none".into(), - tags: Default::default(), - value: BucketValue::counter(12.into()), - metadata: BucketMetadata::default(), - }, Bucket { timestamp: UnixTimestamp::now(), width: 0, @@ -320,29 +288,12 @@ mod tests { fn mixed_with_span_quota() { let (metrics, outcomes) = run_limiter(mixed_bag(), deny(DataCategory::Span)); - assert_eq!(metrics.len(), 2); - assert_eq!(&*metrics[0].name, "c:transactions/usage@none"); - assert_eq!(&*metrics[1].name, "d:custom/something@millisecond"); + assert_eq!(metrics.len(), 1); + assert_eq!(&*metrics[0].name, "d:custom/something@millisecond"); assert_eq!( outcomes, vec![(Outcome::RateLimited(None), DataCategory::Span, 90)] ); } - - #[test] - fn mixed_with_transaction_quota() { - let (metrics, outcomes) = run_limiter(mixed_bag(), deny(DataCategory::Transaction)); - - assert_eq!(metrics.len(), 4); - assert_eq!(&*metrics[0].name, "c:spans/usage@none"); - assert_eq!(&*metrics[1].name, "c:spans/usage@none"); - assert_eq!(&*metrics[2].name, "d:spans/exclusive_time@millisecond"); - assert_eq!(&*metrics[3].name, "d:custom/something@millisecond"); - - assert_eq!( - outcomes, - vec![(Outcome::RateLimited(None), DataCategory::Transaction, 12)] - ); - } } diff --git a/relay-server/src/metrics_extraction/event.rs b/relay-server/src/metrics_extraction/event.rs index 4ba75b90f40..278c9f38942 100644 --- a/relay-server/src/metrics_extraction/event.rs +++ b/relay-server/src/metrics_extraction/event.rs @@ -8,8 +8,8 @@ use relay_metrics::{Bucket, BucketMetadata, BucketValue}; use relay_quotas::DataCategory; use relay_sampling::evaluation::SamplingDecision; +use crate::metrics_extraction::ExtractedMetrics; use crate::metrics_extraction::generic::{self, Extractable}; -use crate::metrics_extraction::transactions::ExtractedMetrics; use crate::processing::transactions::extraction::extract_segment_span; use crate::statsd::RelayTimers; diff --git a/relay-server/src/metrics_extraction/generic.rs b/relay-server/src/metrics_extraction/generic.rs index 81c5a34146b..b97360494bf 100644 --- a/relay-server/src/metrics_extraction/generic.rs +++ b/relay-server/src/metrics_extraction/generic.rs @@ -196,7 +196,7 @@ mod tests { "metrics": [ { "category": "transaction", - "mri": "c:transactions/counter@none", + "mri": "c:spans/counter@none", } ] }); @@ -212,7 +212,7 @@ mod tests { timestamp: UnixTimestamp(1597976302), width: 0, name: MetricName( - "c:transactions/counter@none", + "c:spans/counter@none", ), value: Counter( 1.0, @@ -244,7 +244,7 @@ mod tests { "metrics": [ { "category": "transaction", - "mri": "d:transactions/duration@none", + "mri": "d:spans/duration@none", "field": "event.duration", } ] @@ -261,7 +261,7 @@ mod tests { timestamp: UnixTimestamp(1597976302), width: 0, name: MetricName( - "d:transactions/duration@none", + "d:spans/duration@none", ), value: Distribution( [ @@ -297,7 +297,7 @@ mod tests { "metrics": [ { "category": "transaction", - "mri": "s:transactions/users@none", + "mri": "s:spans/users@none", "field": "event.user.id", } ] @@ -314,7 +314,7 @@ mod tests { timestamp: UnixTimestamp(1597976302), width: 0, name: MetricName( - "s:transactions/users@none", + "s:spans/users@none", ), value: Set( { @@ -350,7 +350,7 @@ mod tests { "metrics": [ { "category": "transaction", - "mri": "s:transactions/users@none", + "mri": "s:spans/users@none", "field": "event.user.id", } ] @@ -367,7 +367,7 @@ mod tests { timestamp: UnixTimestamp(1597976302), width: 0, name: MetricName( - "s:transactions/users@none", + "s:spans/users@none", ), value: Set( { @@ -402,7 +402,7 @@ mod tests { "metrics": [ { "category": "transaction", - "mri": "c:transactions/counter@none", + "mri": "c:spans/counter@none", "tags": [ {"key": "id", "value": "4711"}, {"key": "release", "field": "event.release"}, @@ -432,7 +432,7 @@ mod tests { timestamp: UnixTimestamp(1597976302), width: 0, name: MetricName( - "c:transactions/counter@none", + "c:spans/counter@none", ), value: Counter( 1.0, @@ -471,7 +471,7 @@ mod tests { "metrics": [ { "category": "transaction", - "mri": "c:transactions/counter@none", + "mri": "c:spans/counter@none", "tags": [ { "key": "fast", @@ -498,7 +498,7 @@ mod tests { timestamp: UnixTimestamp(1597976302), width: 0, name: MetricName( - "c:transactions/counter@none", + "c:spans/counter@none", ), value: Counter( 1.0, @@ -534,11 +534,11 @@ mod tests { "version": 1, "metrics": [{ "category": "transaction", - "mri": "c:transactions/counter@none", + "mri": "c:spans/counter@none", }], "tags": [ { - "metrics": ["c:transactions/counter@none"], + "metrics": ["c:spans/counter@none"], "tags": [{ "key": "fast", "value": "yes", @@ -546,7 +546,7 @@ mod tests { }], }, { - "metrics": ["c:transactions/counter@none"], + "metrics": ["c:spans/counter@none"], "tags": [{ "key": "fast", "value": "no", @@ -566,7 +566,7 @@ mod tests { timestamp: UnixTimestamp(1597976302), width: 0, name: MetricName( - "c:transactions/counter@none", + "c:spans/counter@none", ), value: Counter( 1.0, @@ -603,7 +603,7 @@ mod tests { "metrics": [ { "category": "transaction", - "mri": "c:transactions/counter@none", + "mri": "c:spans/counter@none", "tags": [ {"key": "flag", "field": "event.extra.flag"}, ] @@ -622,7 +622,7 @@ mod tests { timestamp: UnixTimestamp(1597976302), width: 0, name: MetricName( - "c:transactions/counter@none", + "c:spans/counter@none", ), value: Counter( 1.0, diff --git a/relay-server/src/metrics_extraction/mod.rs b/relay-server/src/metrics_extraction/mod.rs index 4980fe01071..26cf3894697 100644 --- a/relay-server/src/metrics_extraction/mod.rs +++ b/relay-server/src/metrics_extraction/mod.rs @@ -4,8 +4,21 @@ use relay_metrics::Bucket; pub mod event; pub mod generic; pub mod sessions; -pub mod transactions; pub trait IntoMetric { fn into_metric(self, timestamp: UnixTimestamp) -> Bucket; } + +/// Metrics extracted from an envelope. +/// +/// Metric extraction derives pre-computed metrics (time series data) from payload items in +/// envelopes. Depending on their semantics, these metrics can be ingested into the same project as +/// the envelope or a different project. +#[derive(Debug, Default)] +pub struct ExtractedMetrics { + /// Metrics associated with the project of the envelope. + pub project_metrics: Vec, + + /// Metrics associated with the project of the trace parent. + pub sampling_metrics: Vec, +} diff --git a/relay-server/src/metrics_extraction/transactions/mod.rs b/relay-server/src/metrics_extraction/transactions/mod.rs deleted file mode 100644 index 14abe7b00f7..00000000000 --- a/relay-server/src/metrics_extraction/transactions/mod.rs +++ /dev/null @@ -1,755 +0,0 @@ -use std::collections::BTreeMap; - -use relay_base_schema::events::EventType; -use relay_base_schema::project::ProjectId; -use relay_common::time::UnixTimestamp; -use relay_dynamic_config::CombinedMetricExtractionConfig; -use relay_event_schema::protocol::Event; -use relay_metrics::Bucket; -use relay_sampling::evaluation::SamplingDecision; - -use crate::metrics_extraction::IntoMetric; -use crate::metrics_extraction::generic; -use crate::metrics_extraction::transactions::types::{ - CommonTag, CommonTags, ExtractMetricsError, TransactionCPRTags, TransactionMetric, -}; - -pub mod types; - -/// Metrics extracted from an envelope. -/// -/// Metric extraction derives pre-computed metrics (time series data) from payload items in -/// envelopes. Depending on their semantics, these metrics can be ingested into the same project as -/// the envelope or a different project. -#[derive(Debug, Default)] -pub struct ExtractedMetrics { - /// Metrics associated with the project of the envelope. - pub project_metrics: Vec, - - /// Metrics associated with the project of the trace parent. - pub sampling_metrics: Vec, -} - -/// A utility that extracts metrics from transactions. -pub struct TransactionExtractor<'a> { - pub generic_config: Option>, - pub transaction_from_dsc: Option<&'a str>, - pub sampling_decision: SamplingDecision, - pub target_project_id: ProjectId, -} - -impl TransactionExtractor<'_> { - pub fn extract(&self, event: &Event) -> Result { - let mut metrics = ExtractedMetrics::default(); - - if event.ty.value() != Some(&EventType::Transaction) { - return Ok(metrics); - } - - let Some(&end) = event.timestamp.value() else { - relay_log::debug!("failed to extract the end timestamp from the event"); - return Err(ExtractMetricsError::MissingTimestamp); - }; - - let Some(timestamp) = UnixTimestamp::from_datetime(end.into_inner()) else { - relay_log::debug!("event timestamp is not a valid unix timestamp"); - return Err(ExtractMetricsError::InvalidTimestamp); - }; - - // Internal usage counter - metrics - .project_metrics - .push(TransactionMetric::Usage.into_metric(timestamp)); - - let root_counter_tags = { - let mut universal_tags = CommonTags(BTreeMap::default()); - if let Some(transaction_from_dsc) = self.transaction_from_dsc { - universal_tags - .0 - .insert(CommonTag::Transaction, transaction_from_dsc.to_owned()); - } - - TransactionCPRTags { - decision: self.sampling_decision.to_string(), - target_project_id: self.target_project_id, - universal_tags, - } - }; - // Count the transaction towards the root - metrics.sampling_metrics.push( - TransactionMetric::CountPerRootProject { - tags: root_counter_tags, - } - .into_metric(timestamp), - ); - - // Apply shared tags from generic metric extraction. Transaction metrics will adopt generic - // metric extraction, after which this is done automatically. - if let Some(generic_config) = self.generic_config { - generic::tmp_apply_tags(&mut metrics.project_metrics, event, generic_config.tags()); - generic::tmp_apply_tags(&mut metrics.sampling_metrics, event, generic_config.tags()); - } - - Ok(metrics) - } -} - -#[cfg(test)] -mod tests { - use relay_dynamic_config::{ - CombinedMetricExtractionConfig, MetricExtractionConfig, TagMapping, - TransactionMetricsConfig, - }; - use relay_event_normalization::{ - CombinedMeasurementsConfig, MeasurementsConfig, NormalizationConfig, - PerformanceScoreConfig, PerformanceScoreProfile, PerformanceScoreWeightedComponent, - normalize_event, - }; - use relay_protocol::{Annotated, RuleCondition}; - - use super::*; - - #[test] - fn test_extract_transaction_metrics() { - let json = r#" - { - "type": "transaction", - "platform": "javascript", - "start_timestamp": "2021-04-26T07:59:01+0100", - "timestamp": "2021-04-26T08:00:00+0100", - "release": "1.2.3", - "dist": "foo ", - "environment": "fake_environment", - "transaction": "gEt /api/:version/users/", - "transaction_info": {"source": "custom"}, - "user": { - "id": "user123", - "geo": { - "country_code": "US" - } - }, - "tags": { - "fOO": "bar", - "bogus": "absolutely", - "device.class": "1" - }, - "measurements": { - "foo": {"value": 420.69}, - "lcp": {"value": 3000.0, "unit": "millisecond"} - }, - "contexts": { - "trace": { - "trace_id": "ff62a8b040f340bda5d830223def1d81", - "span_id": "bd429c44b67a3eb4", - "op": "mYOp", - "status": "ok" - }, - "browser": { - "name": "Chrome" - }, - "os": { - "name": "Windows" - } - }, - "request": { - "method": "post" - }, - "spans": [ - { - "description": "", - "op": "react.mount", - "parent_span_id": "bd429c44b67a3eb4", - "span_id": "8f5a2b8768cafb4e", - "start_timestamp": 1597976300.0000000, - "timestamp": 1597976302.0000000, - "trace_id": "ff62a8b040f340bda5d830223def1d81" - } - ] - } - "#; - - let mut event = Annotated::from_json(json).unwrap(); - - let breakdowns_config: relay_event_normalization::BreakdownsConfig = serde_json::from_str( - r#"{ - "span_ops": { - "type": "spanOperations", - "matches": ["react.mount"] - } - }"#, - ) - .unwrap(); - - relay_event_normalization::validate_event( - &mut event, - &relay_event_normalization::EventValidationConfig::default(), - ) - .unwrap(); - - normalize_event( - &mut event, - &NormalizationConfig { - breakdowns_config: Some(&breakdowns_config), - enrich_spans: false, - performance_score: Some(&PerformanceScoreConfig { - profiles: vec![PerformanceScoreProfile { - name: Some("".into()), - score_components: vec![PerformanceScoreWeightedComponent { - measurement: "lcp".into(), - weight: 0.5.try_into().unwrap(), - p10: 2.0.try_into().unwrap(), - p50: 3.0.try_into().unwrap(), - optional: false, - }], - condition: Some(RuleCondition::all()), - version: Some("alpha".into()), - }], - }), - ..Default::default() - }, - ); - - let extractor = TransactionExtractor { - generic_config: None, - transaction_from_dsc: Some("test_transaction"), - sampling_decision: SamplingDecision::Keep, - target_project_id: ProjectId::new(4711), - }; - - let extracted = extractor.extract(event.value().unwrap()).unwrap(); - insta::assert_debug_snapshot!(event.value().unwrap().spans, @r###" - [ - Span { - timestamp: Timestamp( - 2020-08-21T02:18:22Z, - ), - start_timestamp: Timestamp( - 2020-08-21T02:18:20Z, - ), - exclusive_time: 2000.0, - op: "react.mount", - span_id: SpanId("8f5a2b8768cafb4e"), - parent_span_id: SpanId("bd429c44b67a3eb4"), - trace_id: TraceId("ff62a8b040f340bda5d830223def1d81"), - segment_id: ~, - is_segment: ~, - is_remote: ~, - status: ~, - description: "", - tags: ~, - origin: ~, - profile_id: ~, - data: ~, - links: ~, - sentry_tags: ~, - received: ~, - measurements: ~, - platform: ~, - was_transaction: ~, - kind: ~, - performance_issues_spans: ~, - other: {}, - }, - ] - "###); - - insta::assert_debug_snapshot!(extracted.project_metrics, @r#" - [ - Bucket { - timestamp: UnixTimestamp(1619420400), - width: 0, - name: MetricName( - "c:transactions/usage@none", - ), - value: Counter( - 1.0, - ), - tags: {}, - metadata: BucketMetadata { - merges: 1, - received_at: Some( - UnixTimestamp(0), - ), - extracted_from_indexed: false, - }, - }, - ] - "#); - } - - #[test] - fn test_metric_measurement_units() { - let json = r#" - { - "type": "transaction", - "timestamp": "2021-04-26T08:00:00+0100", - "start_timestamp": "2021-04-26T07:59:01+0100", - "measurements": { - "fcp": {"value": 1.1}, - "stall_count": {"value": 3.3}, - "foo": {"value": 8.8} - }, - "contexts": { - "trace": { - "trace_id": "4c79f60c11214eb38604f4ae0781bfb2", - "span_id": "fa90fdead5f74053" - } - } - } - "#; - - // Normalize first, to make sure the units are correct: - let mut event = Annotated::from_json(json).unwrap(); - normalize_event(&mut event, &NormalizationConfig::default()); - - let extractor = TransactionExtractor { - generic_config: None, - transaction_from_dsc: Some("test_transaction"), - sampling_decision: SamplingDecision::Keep, - target_project_id: ProjectId::new(4711), - }; - - let extracted = extractor.extract(event.value().unwrap()).unwrap(); - insta::assert_debug_snapshot!(extracted.project_metrics, @r#" - [ - Bucket { - timestamp: UnixTimestamp(1619420400), - width: 0, - name: MetricName( - "c:transactions/usage@none", - ), - value: Counter( - 1.0, - ), - tags: {}, - metadata: BucketMetadata { - merges: 1, - received_at: Some( - UnixTimestamp(0), - ), - extracted_from_indexed: false, - }, - }, - ] - "#); - } - - #[test] - fn test_metric_measurement_unit_overrides() { - let json = r#"{ - "type": "transaction", - "timestamp": "2021-04-26T08:00:00+0100", - "start_timestamp": "2021-04-26T07:59:01+0100", - "measurements": { - "fcp": {"value": 1.1, "unit": "second"}, - "lcp": {"value": 2.2, "unit": "none"} - }, - "contexts": { - "trace": { - "trace_id": "4c79f60c11214eb38604f4ae0781bfb2", - "span_id": "fa90fdead5f74053" - } - } - }"#; - - // Normalize first, to make sure the units are correct: - let mut event = Annotated::from_json(json).unwrap(); - normalize_event(&mut event, &NormalizationConfig::default()); - - let extractor = TransactionExtractor { - generic_config: None, - transaction_from_dsc: Some("test_transaction"), - sampling_decision: SamplingDecision::Keep, - target_project_id: ProjectId::new(4711), - }; - - let extracted = extractor.extract(event.value().unwrap()).unwrap(); - insta::assert_debug_snapshot!(extracted.project_metrics, @r#" - [ - Bucket { - timestamp: UnixTimestamp(1619420400), - width: 0, - name: MetricName( - "c:transactions/usage@none", - ), - value: Counter( - 1.0, - ), - tags: {}, - metadata: BucketMetadata { - merges: 1, - received_at: Some( - UnixTimestamp(0), - ), - extracted_from_indexed: false, - }, - }, - ] - "#); - } - - #[test] - fn test_custom_measurements() { - let json = r#" - { - "type": "transaction", - "transaction": "foo", - "start_timestamp": "2021-04-26T08:00:00+0100", - "timestamp": "2021-04-26T08:00:02+0100", - "measurements": { - "a_custom1": {"value": 41}, - "fcp": {"value": 0.123, "unit": "millisecond"}, - "g_custom2": {"value": 42, "unit": "second"}, - "h_custom3": {"value": 43} - }, - "contexts": { - "trace": { - "trace_id": "4c79f60c11214eb38604f4ae0781bfb2", - "span_id": "fa90fdead5f74053" - }} - } - "#; - - let mut event = Annotated::from_json(json).unwrap(); - - // Normalize first, to make sure the units are correct: - let measurements_config: MeasurementsConfig = serde_json::from_value(serde_json::json!( - { - "builtinMeasurements": [{"name": "fcp", "unit": "millisecond"}], - "maxCustomMeasurements": 2 - } - )) - .unwrap(); - - let config = CombinedMeasurementsConfig::new(Some(&measurements_config), None); - - normalize_event( - &mut event, - &NormalizationConfig { - measurements: Some(config), - ..Default::default() - }, - ); - - let extractor = TransactionExtractor { - generic_config: None, - transaction_from_dsc: Some("test_transaction"), - sampling_decision: SamplingDecision::Keep, - target_project_id: ProjectId::new(4711), - }; - - let extracted = extractor.extract(event.value().unwrap()).unwrap(); - insta::assert_debug_snapshot!(extracted.project_metrics, @r#" - [ - Bucket { - timestamp: UnixTimestamp(1619420402), - width: 0, - name: MetricName( - "c:transactions/usage@none", - ), - value: Counter( - 1.0, - ), - tags: {}, - metadata: BucketMetadata { - merges: 1, - received_at: Some( - UnixTimestamp(0), - ), - extracted_from_indexed: false, - }, - }, - ] - "#); - } - - #[test] - fn test_span_tags() { - // Status is normalized upstream in the normalization step. - let json = r#" - { - "type": "transaction", - "timestamp": "2021-04-26T08:00:00+0100", - "start_timestamp": "2021-04-26T07:59:01+0100", - "contexts": { - "trace": { - "status": "ok" - } - }, - "spans": [ - { - "description": "", - "op": "react.mount", - "parent_span_id": "8f5a2b8768cafb4e", - "span_id": "bd429c44b67a3eb4", - "start_timestamp": 1597976300.0000000, - "timestamp": 1597976302.0000000, - "trace_id": "ff62a8b040f340bda5d830223def1d81" - }, - { - "description": "POST http://sth.subdomain.domain.tld:targetport/api/hi", - "op": "http.client", - "parent_span_id": "8f5a2b8768cafb4e", - "span_id": "bd2eb23da2beb459", - "start_timestamp": 1597976300.0000000, - "timestamp": 1597976302.0000000, - "trace_id": "ff62a8b040f340bda5d830223def1d81", - "status": "ok", - "data": { - "http.method": "POST", - "http.response.status_code": "200" - } - } - ] - } - "#; - - let event = Annotated::from_json(json).unwrap(); - - let extractor = TransactionExtractor { - generic_config: None, - transaction_from_dsc: Some("test_transaction"), - sampling_decision: SamplingDecision::Keep, - target_project_id: ProjectId::new(4711), - }; - - let extracted = extractor.extract(event.value().unwrap()).unwrap(); - insta::assert_debug_snapshot!(extracted.project_metrics, @r#" - [ - Bucket { - timestamp: UnixTimestamp(1619420400), - width: 0, - name: MetricName( - "c:transactions/usage@none", - ), - value: Counter( - 1.0, - ), - tags: {}, - metadata: BucketMetadata { - merges: 1, - received_at: Some( - UnixTimestamp(0), - ), - extracted_from_indexed: false, - }, - }, - ] - "#); - } - - #[test] - fn test_root_counter_keep() { - let json = r#" - { - "type": "transaction", - "timestamp": "2021-04-26T08:00:00+0100", - "start_timestamp": "2021-04-26T07:59:01+0100", - "transaction": "ignored", - "contexts": { - "trace": { - "status": "ok" - } - } - } - "#; - - let event = Annotated::from_json(json).unwrap(); - - let extractor = TransactionExtractor { - generic_config: None, - transaction_from_dsc: Some("root_transaction"), - sampling_decision: SamplingDecision::Keep, - target_project_id: ProjectId::new(4711), - }; - - let extracted = extractor.extract(event.value().unwrap()).unwrap(); - insta::assert_debug_snapshot!(extracted.sampling_metrics, @r###" - [ - Bucket { - timestamp: UnixTimestamp(1619420400), - width: 0, - name: MetricName( - "c:transactions/count_per_root_project@none", - ), - value: Counter( - 1.0, - ), - tags: { - "decision": "keep", - "target_project_id": "4711", - "transaction": "root_transaction", - }, - metadata: BucketMetadata { - merges: 1, - received_at: Some( - UnixTimestamp(0), - ), - extracted_from_indexed: false, - }, - }, - ] - "###); - } - - #[test] - fn test_parse_transaction_name_strategy() { - use relay_dynamic_config::AcceptTransactionNames; - - for (config_str, expected_strategy) in [ - (r#"{}"#, AcceptTransactionNames::ClientBased), - ( - r#"{"acceptTransactionNames": "unknown-strategy"}"#, - AcceptTransactionNames::ClientBased, - ), - ( - r#"{"acceptTransactionNames": "strict"}"#, - AcceptTransactionNames::Strict, - ), - ( - r#"{"acceptTransactionNames": "clientBased"}"#, - AcceptTransactionNames::ClientBased, - ), - ] { - let config: TransactionMetricsConfig = serde_json::from_str(config_str).unwrap(); - assert_eq!(config.deprecated1, expected_strategy, "{config_str}"); - } - } - - #[test] - fn test_computed_metrics() { - let json = r#"{ - "type": "transaction", - "timestamp": 1619420520, - "start_timestamp": 1619420400, - "contexts": { - "trace": { - "trace_id": "4c79f60c11214eb38604f4ae0781bfb2", - "span_id": "fa90fdead5f74053" - } - }, - "measurements": { - "frames_frozen": { - "value": 2 - }, - "frames_slow": { - "value": 1 - }, - "frames_total": { - "value": 4 - }, - "stall_total_time": { - "value": 4, - "unit": "millisecond" - } - } - }"#; - - let mut event = Annotated::from_json(json).unwrap(); - // Normalize first, to make sure that the metrics were computed: - normalize_event(&mut event, &NormalizationConfig::default()); - - let extractor = TransactionExtractor { - generic_config: None, - transaction_from_dsc: Some("test_transaction"), - sampling_decision: SamplingDecision::Keep, - target_project_id: ProjectId::new(4711), - }; - - let extracted = extractor.extract(event.value().unwrap()).unwrap(); - - let metrics_names: Vec<_> = extracted - .project_metrics - .into_iter() - .map(|m| m.name) - .collect(); - - insta::assert_debug_snapshot!(metrics_names, @r#" - [ - MetricName( - "c:transactions/usage@none", - ), - ] - "#); - } - - #[test] - fn test_conditional_tagging() { - let event = Annotated::from_json( - r#"{ - "type": "transaction", - "platform": "javascript", - "transaction": "foo", - "start_timestamp": "2021-04-26T08:00:00+0100", - "timestamp": "2021-04-26T08:00:02+0100", - "measurements": { - "lcp": {"value": 41, "unit": "millisecond"} - } - }"#, - ) - .unwrap(); - - let generic_tags: Vec = serde_json::from_str( - r#"[ - { - "metrics": ["d:transactions/duration@millisecond"], - "tags": [ - { - "condition": {"op": "gte", "name": "event.duration", "value": 9001}, - "key": "satisfaction", - "value": "frustrated" - }, - { - "condition": {"op": "gte", "name": "event.duration", "value": 666}, - "key": "satisfaction", - "value": "tolerated" - }, - { - "condition": {"op": "and", "inner": []}, - "key": "satisfaction", - "value": "satisfied" - } - ] - } - ]"#, - ) - .unwrap(); - let generic_config = MetricExtractionConfig { - version: 1, - tags: generic_tags, - ..Default::default() - }; - let combined_config = CombinedMetricExtractionConfig::from(&generic_config); - - let extractor = TransactionExtractor { - generic_config: Some(combined_config), - transaction_from_dsc: Some("test_transaction"), - sampling_decision: SamplingDecision::Keep, - target_project_id: ProjectId::new(4711), - }; - - let extracted = extractor.extract(event.value().unwrap()).unwrap(); - insta::assert_debug_snapshot!(extracted.project_metrics, @r#" - [ - Bucket { - timestamp: UnixTimestamp(1619420402), - width: 0, - name: MetricName( - "c:transactions/usage@none", - ), - value: Counter( - 1.0, - ), - tags: {}, - metadata: BucketMetadata { - merges: 1, - received_at: Some( - UnixTimestamp(0), - ), - extracted_from_indexed: false, - }, - }, - ] - "#); - } -} diff --git a/relay-server/src/metrics_extraction/transactions/types.rs b/relay-server/src/metrics_extraction/transactions/types.rs deleted file mode 100644 index a9ce5f06c0a..00000000000 --- a/relay-server/src/metrics_extraction/transactions/types.rs +++ /dev/null @@ -1,160 +0,0 @@ -use std::borrow::Cow; -use std::collections::BTreeMap; -use std::fmt::Display; - -use relay_base_schema::project::ProjectId; -use relay_common::time::UnixTimestamp; -use relay_metrics::{ - Bucket, BucketMetadata, BucketValue, MetricNamespace, MetricResourceIdentifier, MetricUnit, -}; - -use crate::metrics_extraction::IntoMetric; - -/// Enumerates the metrics extracted from transaction payloads. -#[derive(Clone, Debug, PartialEq)] -pub enum TransactionMetric { - /// An internal counter metric that tracks transaction usage. - /// - /// This metric does not have any of the common tags for the performance product, but instead - /// carries internal information for accounting purposes. - Usage, - /// An internal counter metric used to compute dynamic sampling biases. - /// - /// See ''. - CountPerRootProject { tags: TransactionCPRTags }, -} - -impl IntoMetric for TransactionMetric { - fn into_metric(self, timestamp: UnixTimestamp) -> Bucket { - let namespace = MetricNamespace::Transactions; - - let (name, value, unit, tags) = match self { - Self::Usage => ( - Cow::Borrowed("usage"), - BucketValue::counter(1.into()), - MetricUnit::None, - Default::default(), - ), - Self::CountPerRootProject { tags } => ( - Cow::Borrowed("count_per_root_project"), - BucketValue::counter(1.into()), - MetricUnit::None, - tags.into(), - ), - }; - - let mri = MetricResourceIdentifier { - ty: value.ty(), - namespace, - name, - unit, - }; - - // For extracted metrics we assume the `received_at` timestamp is equivalent to the time - // in which the metric is extracted. - let received_at = if cfg!(not(test)) { - UnixTimestamp::now() - } else { - UnixTimestamp::from_secs(0) - }; - - Bucket { - timestamp, - width: 0, - name: mri.to_string().into(), - value, - tags, - metadata: BucketMetadata::new(received_at), - } - } -} - -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct TransactionCPRTags { - pub decision: String, - pub target_project_id: ProjectId, - pub universal_tags: CommonTags, -} - -impl From for BTreeMap { - fn from(value: TransactionCPRTags) -> Self { - let mut map: BTreeMap = value.universal_tags.into(); - map.insert("decision".to_owned(), value.decision); - map.insert( - "target_project_id".to_owned(), - value.target_project_id.to_string(), - ); - map - } -} - -#[derive(Clone, Debug, PartialEq, Eq, Ord, PartialOrd)] -pub struct CommonTags(pub BTreeMap); - -impl From for BTreeMap { - fn from(value: CommonTags) -> Self { - value - .0 - .into_iter() - .map(|(k, v)| (k.to_string(), v)) - .collect() - } -} - -/// The most common tags for transaction metrics. -#[derive(Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)] -pub enum CommonTag { - Release, - Dist, - Environment, - Transaction, - Platform, - TransactionStatus, - TransactionOp, - HttpMethod, - HttpStatusCode, - BrowserName, - OsName, - GeoCountryCode, - UserSubregion, - DeviceClass, - Custom(String), -} - -impl Display for CommonTag { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let name = match self { - CommonTag::Release => "release", - CommonTag::Dist => "dist", - CommonTag::Environment => "environment", - CommonTag::Transaction => "transaction", - CommonTag::Platform => "platform", - CommonTag::TransactionStatus => "transaction.status", - CommonTag::TransactionOp => "transaction.op", - CommonTag::HttpMethod => "http.method", - CommonTag::HttpStatusCode => "http.status_code", - CommonTag::BrowserName => "browser.name", - CommonTag::OsName => "os.name", - CommonTag::GeoCountryCode => "geo.country_code", - CommonTag::UserSubregion => "user.geo.subregion", - CommonTag::DeviceClass => "device.class", - CommonTag::Custom(s) => s, - }; - write!(f, "{name}") - } -} - -/// Error returned from transaction metrics extraction. -#[derive(Clone, Copy, Debug, Eq, PartialEq, thiserror::Error)] -pub enum ExtractMetricsError { - /// The start or end timestamps are missing from the event payload. - #[error("no valid timestamp could be found in the event")] - MissingTimestamp, - /// The event timestamp is outside the supported range. - /// - /// The supported range is derived from the - /// [`max_secs_in_past`](relay_metrics::aggregator::AggregatorConfig::max_secs_in_past) and - /// [`max_secs_in_future`](relay_metrics::aggregator::AggregatorConfig::max_secs_in_future) configuration options. - #[error("timestamp too old or too far in the future")] - InvalidTimestamp, -} diff --git a/relay-server/src/processing/mod.rs b/relay-server/src/processing/mod.rs index 21d835fa7e2..e67a41fdabd 100644 --- a/relay-server/src/processing/mod.rs +++ b/relay-server/src/processing/mod.rs @@ -12,7 +12,7 @@ use relay_quotas::RateLimits; use relay_sampling::evaluation::ReservoirCounters; use crate::managed::{Counted, Managed, ManagedEnvelope, Rejected}; -use crate::metrics_extraction::transactions::ExtractedMetrics; +use crate::metrics_extraction::ExtractedMetrics; use crate::services::projects::project::ProjectInfo; mod common; diff --git a/relay-server/src/processing/sessions/process.rs b/relay-server/src/processing/sessions/process.rs index 3015c7fa020..40436e82605 100644 --- a/relay-server/src/processing/sessions/process.rs +++ b/relay-server/src/processing/sessions/process.rs @@ -8,7 +8,7 @@ use relay_quotas::DataCategory; use crate::envelope::Item; use crate::managed::{Managed, RecordKeeper}; use crate::metrics_extraction; -use crate::metrics_extraction::transactions::ExtractedMetrics; +use crate::metrics_extraction::ExtractedMetrics; use crate::processing::Context; use crate::processing::sessions::{Error, ExpandedSessions, Result, SerializedSessions}; use crate::services::processor::MINIMUM_CLOCK_DRIFT; diff --git a/relay-server/src/processing/spans/dynamic_sampling.rs b/relay-server/src/processing/spans/dynamic_sampling.rs index 908c555d6c7..e7083848a44 100644 --- a/relay-server/src/processing/spans/dynamic_sampling.rs +++ b/relay-server/src/processing/spans/dynamic_sampling.rs @@ -13,7 +13,7 @@ use relay_sampling::{DynamicSamplingContext, SamplingConfig}; use crate::envelope::ClientName; use crate::managed::Managed; -use crate::metrics_extraction::transactions::ExtractedMetrics; +use crate::metrics_extraction::ExtractedMetrics; use crate::processing::Context; use crate::processing::spans::{ Error, ExpandedSpan, ExpandedSpans, Indexed, Result, SerializedSpans, diff --git a/relay-server/src/processing/spans/mod.rs b/relay-server/src/processing/spans/mod.rs index fd10145d819..25cfb829cc7 100644 --- a/relay-server/src/processing/spans/mod.rs +++ b/relay-server/src/processing/spans/mod.rs @@ -14,7 +14,7 @@ use crate::integrations::Integration; use crate::managed::{ Counted, Managed, ManagedEnvelope, ManagedResult, OutcomeError, Quantities, Rejected, }; -use crate::metrics_extraction::transactions::ExtractedMetrics; +use crate::metrics_extraction::ExtractedMetrics; use crate::processing::trace_attachments::forward::attachment_to_item; use crate::processing::trace_attachments::process::ScrubAttachmentError; use crate::processing::trace_attachments::types::ExpandedAttachment; diff --git a/relay-server/src/processing/transactions/extraction.rs b/relay-server/src/processing/transactions/extraction.rs index d9cbacde816..51cde420d0c 100644 --- a/relay-server/src/processing/transactions/extraction.rs +++ b/relay-server/src/processing/transactions/extraction.rs @@ -1,9 +1,5 @@ -use std::sync::Once; - use relay_base_schema::project::ProjectId; -use relay_dynamic_config::{ - CombinedMetricExtractionConfig, ErrorBoundary, Feature, MetricExtractionGroups, -}; +use relay_dynamic_config::{CombinedMetricExtractionConfig, ErrorBoundary, MetricExtractionGroups}; use relay_event_normalization::span::tag_extraction; use relay_event_schema::protocol::{Event, Span}; use relay_metrics::MetricNamespace; @@ -11,7 +7,6 @@ use relay_protocol::Annotated; use relay_sampling::DynamicSamplingContext; use relay_sampling::evaluation::SamplingDecision; -use crate::metrics_extraction::transactions::TransactionExtractor; use crate::processing::Context; use crate::processing::utils::event::EventMetricsExtracted; use crate::services::processor::{ProcessingError, ProcessingExtractedMetrics}; @@ -95,33 +90,6 @@ pub fn extract_metrics( CombinedMetricExtractionConfig::new(global_config, config) }; - // Require a valid transaction metrics config. - let tx_config = match &ctx.project_info.config.transaction_metrics { - Some(ErrorBoundary::Ok(tx_config)) => tx_config, - Some(ErrorBoundary::Err(e)) => { - relay_log::debug!("Failed to parse legacy transaction metrics config: {e}"); - return Ok(EventMetricsExtracted(false)); - } - None => { - relay_log::debug!("Legacy transaction metrics config is missing"); - return Ok(EventMetricsExtracted(false)); - } - }; - - if !tx_config.is_enabled() { - static TX_CONFIG_ERROR: Once = Once::new(); - TX_CONFIG_ERROR.call_once(|| { - if ctx.config.processing_enabled() { - relay_log::error!( - "Processing Relay outdated, received tx config in version {}, which is not supported", - tx_config.version - ); - } - }); - - return Ok(EventMetricsExtracted(false)); - } - let metrics = crate::metrics_extraction::event::extract_metrics( event, crate::metrics_extraction::event::ExtractMetricsConfig { @@ -136,21 +104,7 @@ pub fn extract_metrics( transaction_from_dsc: dsc.and_then(|dsc| dsc.transaction.as_deref()), }, ); - extracted_metrics.extend(metrics, Some(sampling_decision)); - if !ctx.project_info.has_feature(Feature::DiscardTransaction) { - let transaction_from_dsc = dsc.and_then(|dsc| dsc.transaction.as_deref()); - - let extractor = TransactionExtractor { - generic_config: Some(combined_config), - transaction_from_dsc, - sampling_decision, - target_project_id: project_id, - }; - - extracted_metrics.extend(extractor.extract(event)?, Some(sampling_decision)); - } - Ok(EventMetricsExtracted(true)) } diff --git a/relay-server/src/processing/transactions/process.rs b/relay-server/src/processing/transactions/process.rs index ad1e5de5500..5525e425c8a 100644 --- a/relay-server/src/processing/transactions/process.rs +++ b/relay-server/src/processing/transactions/process.rs @@ -1,7 +1,6 @@ use std::sync::Arc; use relay_base_schema::events::EventType; -use relay_dynamic_config::ErrorBoundary; use relay_event_normalization::GeoIpLookup; use relay_event_schema::protocol::{Event, Metrics}; use relay_profiling::{ProfileError, ProfileType}; @@ -14,7 +13,7 @@ use smallvec::smallvec; use crate::envelope::Items; use crate::managed::{Counted, Managed, ManagedResult, Quantities, RecordKeeper, Rejected}; -use crate::metrics_extraction::transactions::ExtractedMetrics; +use crate::metrics_extraction::ExtractedMetrics; use crate::processing::spans::{Indexed, TotalAndIndexed}; use crate::processing::transactions::extraction::{self, ExtractMetricsContext}; use crate::processing::transactions::spans; @@ -300,9 +299,7 @@ async fn do_make_dynamic_sampling_decision( // but delay decision until inbound filters have been fully processed. // Also, we require transaction metrics to be enabled before sampling. let should_run = matches!(filters_status, FiltersStatus::Ok) || ctx.config.processing_enabled(); - - let can_extract_metrics = matches!(&ctx.project_info.config.transaction_metrics, Some(ErrorBoundary::Ok(c)) if c.is_enabled()); - if !(should_run && can_extract_metrics) { + if !should_run { return SamplingResult::Pending; } @@ -363,6 +360,20 @@ pub fn split_indexed_and_total( r.lenient(DataCategory::Transaction); r.lenient(DataCategory::Span); } + if work.flags.spans_rate_limited { + // This really is a bug, we ignore here. + // + // Transactions are counted using a span metric, as transaction payloads should + // eventually be fully transformed into spans. But this is for now only the case for + // metrics, the payloads are lacking behind. + // + // If spans are rate limited, there is no metric to attach the transaction to -> + // we're losing a transaction here. + // + // If and once we apply span rate limits to transactions this case will also be fixed, + // as it can no longer happen. + r.lenient(DataCategory::Transaction); + } (Box::new(work.into_indexed()), metrics.into_inner()) })) diff --git a/relay-server/src/processing/transactions/types/expanded.rs b/relay-server/src/processing/transactions/types/expanded.rs index a7c5ed27223..7ae220f0db3 100644 --- a/relay-server/src/processing/transactions/types/expanded.rs +++ b/relay-server/src/processing/transactions/types/expanded.rs @@ -10,7 +10,7 @@ use smallvec::smallvec; use crate::Envelope; use crate::envelope::{ContentType, EnvelopeHeaders, Item, ItemType, Items}; use crate::managed::{Counted, Managed, Quantities, Rejected}; -use crate::metrics_extraction::transactions::ExtractedMetrics; +use crate::metrics_extraction::ExtractedMetrics; use crate::processing::spans::{Indexed, TotalAndIndexed}; use crate::processing::transactions::Error; use crate::processing::transactions::process::split_indexed_and_total; diff --git a/relay-server/src/processing/utils/event.rs b/relay-server/src/processing/utils/event.rs index eaa9764dd34..0f732ad120a 100644 --- a/relay-server/src/processing/utils/event.rs +++ b/relay-server/src/processing/utils/event.rs @@ -218,9 +218,8 @@ pub fn normalize( let request_meta = headers.meta(); let client_ipaddr = request_meta.client_addr().map(IpAddr::from); - let transaction_aggregator_config = ctx - .config - .aggregator_config_for(MetricNamespace::Transactions); + // Inherit from spans, as transactions no longer produce metrics. + let transaction_aggregator_config = ctx.config.aggregator_config_for(MetricNamespace::Spans); let ai_model_costs = ctx.global_config.ai_model_costs.as_ref().ok(); let http_span_allowed_hosts = ctx.global_config.options.http_span_allowed_hosts.as_slice(); diff --git a/relay-server/src/services/processor.rs b/relay-server/src/services/processor.rs index 4fb7b1e17b4..c78a4a6a710 100644 --- a/relay-server/src/services/processor.rs +++ b/relay-server/src/services/processor.rs @@ -44,8 +44,7 @@ use crate::extractors::{PartialDsn, RequestMeta, RequestTrust}; use crate::integrations::Integration; use crate::managed::{InvalidProcessingGroupType, ManagedEnvelope, TypedEnvelope}; use crate::metrics::{MetricOutcomes, MetricsLimiter, MinimalTrackableBucket}; -use crate::metrics_extraction::transactions::ExtractedMetrics; -use crate::metrics_extraction::transactions::types::ExtractMetricsError; +use crate::metrics_extraction::ExtractedMetrics; use crate::processing::attachments::AttachmentProcessor; use crate::processing::check_ins::CheckInsProcessor; use crate::processing::client_reports::ClientReportsProcessor; @@ -597,9 +596,6 @@ pub enum ProcessingError { #[error("event filtered with reason: {0:?}")] EventFiltered(FilterStatKey), - #[error("missing or invalid required event timestamp")] - InvalidTimestamp, - #[error("could not serialize event payload")] SerializeFailed(#[source] serde_json::Error), @@ -643,7 +639,6 @@ impl ProcessingError { Self::UnsupportedSecurityType => Some(Outcome::Filtered(FilterStatKey::InvalidCsp)), Self::InvalidNelReport(_) => Some(Outcome::Invalid(DiscardReason::InvalidJson)), Self::InvalidTransaction => Some(Outcome::Invalid(DiscardReason::InvalidTransaction)), - Self::InvalidTimestamp => Some(Outcome::Invalid(DiscardReason::Timestamp)), Self::DuplicateItem(_) => Some(Outcome::Invalid(DiscardReason::DuplicateItem)), Self::NoEventPayload => Some(Outcome::Invalid(DiscardReason::NoEventPayload)), Self::InvalidNintendoDyingMessage(_) => Some(Outcome::Invalid(DiscardReason::Payload)), @@ -690,16 +685,6 @@ impl From for ProcessingError { } } -impl From for ProcessingError { - fn from(error: ExtractMetricsError) -> Self { - match error { - ExtractMetricsError::MissingTimestamp | ExtractMetricsError::InvalidTimestamp => { - Self::InvalidTimestamp - } - } - } -} - impl From for ProcessingError { fn from(value: InvalidProcessingGroupType) -> Self { Self::InvalidProcessingGroup(Box::new(value)) @@ -784,18 +769,11 @@ impl ProcessingExtractedMetrics { // flag reset to `false`. let mut reset_extracted_from_indexed: SmallVec<[_; 2]> = smallvec![]; - for (namespace, limit, indexed) in [ - ( - MetricNamespace::Transactions, - &enforcement.event, - &enforcement.event_indexed, - ), - ( - MetricNamespace::Spans, - &enforcement.spans, - &enforcement.spans_indexed, - ), - ] { + for (namespace, limit, indexed) in [( + MetricNamespace::Spans, + &enforcement.spans, + &enforcement.spans_indexed, + )] { if limit.is_active() { drop_namespaces.push(namespace); } else if indexed.is_active() && !enforced_consistently { @@ -2334,45 +2312,43 @@ impl EnvelopeProcessorService { let over_accept_once = true; let mut rate_limits = RateLimits::new(); - for category in [DataCategory::Transaction, DataCategory::Span] { - let count = bucket_limiter.count(category); + let (category, count) = bucket_limiter.count(); - let timer = Instant::now(); - let mut is_limited = false; + let timer = Instant::now(); + let mut is_limited = false; - if let Some(count) = count { - match rate_limiter - .is_rate_limited(quotas, scoping.item(category), count, over_accept_once) - .await - { - Ok(limits) => { - is_limited = limits.is_limited(); - rate_limits.merge(limits) - } - Err(e) => { - relay_log::error!(error = &e as &dyn Error, "rate limiting error") - } + if let Some(count) = count { + match rate_limiter + .is_rate_limited(quotas, scoping.item(category), count, over_accept_once) + .await + { + Ok(limits) => { + is_limited = limits.is_limited(); + rate_limits.merge(limits) + } + Err(e) => { + relay_log::error!(error = &e as &dyn Error, "rate limiting error") } } - - relay_statsd::metric!( - timer(RelayTimers::RateLimitBucketsDuration) = timer.elapsed(), - category = category.name(), - limited = if is_limited { "true" } else { "false" }, - count = match count { - None => "none", - Some(0) => "0", - Some(1) => "1", - Some(1..=10) => "10", - Some(1..=25) => "25", - Some(1..=50) => "50", - Some(51..=100) => "100", - Some(101..=500) => "500", - _ => "> 500", - }, - ); } + relay_statsd::metric!( + timer(RelayTimers::RateLimitBucketsDuration) = timer.elapsed(), + category = category.name(), + limited = if is_limited { "true" } else { "false" }, + count = match count { + None => "none", + Some(0) => "0", + Some(1) => "1", + Some(1..=10) => "10", + Some(1..=25) => "25", + Some(1..=50) => "50", + Some(51..=100) => "100", + Some(101..=500) => "500", + _ => "> 500", + }, + ); + if rate_limits.is_limited() { let was_enforced = bucket_limiter.enforce_limits(&rate_limits, &self.inner.metric_outcomes); @@ -3318,7 +3294,7 @@ mod tests { let project_metrics = |scoping| ProjectBuckets { buckets: vec![Bucket { - name: "d:transactions/bar".into(), + name: "d:spans/bar".into(), value: BucketValue::Counter(FiniteF64::new(1.0).unwrap()), timestamp: UnixTimestamp::now(), tags: Default::default(), @@ -3687,10 +3663,7 @@ mod tests { .await; let mut item = Item::new(ItemType::Statsd); - item.set_payload( - ContentType::Text, - "transactions/foo:3182887624:4267882815|s", - ); + item.set_payload(ContentType::Text, "spans/foo:3182887624:4267882815|s"); for (source, expected_received_at) in [ ( BucketSource::External, diff --git a/relay-server/src/services/processor/metrics.rs b/relay-server/src/services/processor/metrics.rs index d91d9c20636..1fc03eb08c3 100644 --- a/relay-server/src/services/processor/metrics.rs +++ b/relay-server/src/services/processor/metrics.rs @@ -14,7 +14,6 @@ use crate::services::projects::project::ProjectInfo; pub fn is_valid_namespace(bucket: &Bucket) -> bool { match bucket.name.namespace() { MetricNamespace::Sessions => true, - MetricNamespace::Transactions => true, MetricNamespace::Spans => true, MetricNamespace::Custom => true, MetricNamespace::Unsupported => false, @@ -56,7 +55,6 @@ pub fn apply_project_info( fn is_metric_namespace_valid(state: &ProjectInfo, namespace: MetricNamespace) -> bool { match namespace { MetricNamespace::Sessions => true, - MetricNamespace::Transactions => true, MetricNamespace::Spans => true, MetricNamespace::Custom => state.has_feature(Feature::CustomMetrics), MetricNamespace::Unsupported => false, diff --git a/relay-server/src/services/processor/profile.rs b/relay-server/src/services/processor/profile.rs index af385e92d27..1484165fa20 100644 --- a/relay-server/src/services/processor/profile.rs +++ b/relay-server/src/services/processor/profile.rs @@ -59,7 +59,7 @@ mod tests { use crate::services::projects::project::ProjectInfo; use crate::testutils::create_test_processor; use insta::assert_debug_snapshot; - use relay_dynamic_config::{ErrorBoundary, Feature, GlobalConfig, TransactionMetricsConfig}; + use relay_dynamic_config::{Feature, GlobalConfig}; use relay_event_schema::protocol::{Event, EventId, ProfileContext}; use relay_protocol::Annotated; use relay_system::Addr; @@ -82,8 +82,6 @@ mod tests { let envelope = ManagedEnvelope::new(envelope, Addr::dummy()); let mut project_info = ProjectInfo::default().sanitized(false); - project_info.config.transaction_metrics = - Some(ErrorBoundary::Ok(TransactionMetricsConfig::new())); project_info.config.features.0.insert(Feature::Profiling); let global_config = GlobalConfig::default(); diff --git a/relay-server/src/services/processor/span.rs b/relay-server/src/services/processor/span.rs index 395c8bd74f3..dcfcafae61e 100644 --- a/relay-server/src/services/processor/span.rs +++ b/relay-server/src/services/processor/span.rs @@ -107,8 +107,7 @@ pub async fn process( relay_log::debug!("failed to normalize span: {}", e); return ItemAction::Drop(Outcome::Invalid(match e { ProcessingError::ProcessingFailed(ProcessingAction::InvalidTransaction(_)) - | ProcessingError::InvalidTransaction - | ProcessingError::InvalidTimestamp => DiscardReason::InvalidSpan, + | ProcessingError::InvalidTransaction => DiscardReason::InvalidSpan, _ => DiscardReason::Internal, })); }; diff --git a/relay-server/src/services/store.rs b/relay-server/src/services/store.rs index 606f2695cbf..dc19e08a96e 100644 --- a/relay-server/src/services/store.rs +++ b/relay-server/src/services/store.rs @@ -1737,7 +1737,6 @@ impl Message for KafkaMessage<'_> { KafkaMessage::UserReport(_) => "user_report", KafkaMessage::Metric { message, .. } => match message.name.namespace() { MetricNamespace::Sessions => "metric_sessions", - MetricNamespace::Transactions => "metric_transactions", MetricNamespace::Spans => "metric_spans", MetricNamespace::Custom => "metric_custom", MetricNamespace::Unsupported => "metric_unsupported", diff --git a/tests/integration/consts.py b/tests/integration/consts.py index d69a0f9a4c0..ce8d4cdea67 100644 --- a/tests/integration/consts.py +++ b/tests/integration/consts.py @@ -1,6 +1,2 @@ -# Minimum supported version for metric transaction by Relay. -TRANSACTION_EXTRACT_MIN_SUPPORTED_VERSION = 3 # Minimum supported version for generic metrics extraction by Relay. METRICS_EXTRACTION_MIN_SUPPORTED_VERSION = 4 -# Maximum supported version for metric transaction by Relay. -TRANSACTION_EXTRACT_MAX_SUPPORTED_VERSION = 6 diff --git a/tests/integration/fixtures/mini_sentry.py b/tests/integration/fixtures/mini_sentry.py index 9f3ced5f37e..75f31d27bfd 100644 --- a/tests/integration/fixtures/mini_sentry.py +++ b/tests/integration/fixtures/mini_sentry.py @@ -146,7 +146,6 @@ def basic_project_config( "$object": ["@password"], }, }, - "transactionMetrics": {"version": 3}, }, } diff --git a/tests/integration/test_cardinality_limiter.py b/tests/integration/test_cardinality_limiter.py index 13e85f8e996..1da70ac5c11 100644 --- a/tests/integration/test_cardinality_limiter.py +++ b/tests/integration/test_cardinality_limiter.py @@ -37,11 +37,11 @@ def test_cardinality_limits(mini_sentry, relay_with_processing, metrics_consumer project_id = 10000 cardinality_limits = [ { - "id": "transactions", + "id": "spans", "window": {"windowSeconds": 3600, "granularitySeconds": 600}, "limit": 1, "scope": "project", - "namespace": "transactions", + "namespace": "spans", }, { "id": "custom", @@ -56,8 +56,8 @@ def test_cardinality_limits(mini_sentry, relay_with_processing, metrics_consumer metrics_payload = "\n".join( [ - "transactions/foo@second:12|c", - "transactions/bar@second:23|c", + "spans/foo@second:12|c", + "spans/bar@second:23|c", "sessions/foo@second:12|c", "foo@second:12|c", "bar@second:23|c", @@ -69,7 +69,7 @@ def test_cardinality_limits(mini_sentry, relay_with_processing, metrics_consumer metrics = metrics_by_namespace(metrics_consumer, 4) assert len(metrics["custom"]) == 2 assert len(metrics["sessions"]) == 1 - assert len(metrics["transactions"]) == 1 + assert len(metrics["spans"]) == 1 @pytest.mark.parametrize("mode", [None, "enabled", "disabled", "passive"]) @@ -85,25 +85,25 @@ def test_cardinality_limits_global_config_mode( project_id = 10001 cardinality_limits = [ { - "id": "transactions", + "id": "spans", "window": {"windowSeconds": 3600, "granularitySeconds": 600}, "limit": 1, "scope": "project", - "namespace": "transactions", + "namespace": "spans", }, ] add_project_config(mini_sentry, project_id, cardinality_limits) - metrics_payload = "transactions/foo@second:12|c\ntransactions/bar@second:23|c" + metrics_payload = "spans/foo@second:12|c\nspans/bar@second:23|c" relay.send_metrics(project_id, metrics_payload) if mode in [None, "enabled"]: metrics = metrics_by_namespace(metrics_consumer, 1) - assert len(metrics["transactions"]) == 1 + assert len(metrics["spans"]) == 1 else: metrics = metrics_by_namespace(metrics_consumer, 2) - assert len(metrics["transactions"]) == 2 + assert len(metrics["spans"]) == 2 def test_cardinality_limits_passive_limit( @@ -115,19 +115,19 @@ def test_cardinality_limits_passive_limit( project_id = 10002 cardinality_limits = [ { - "id": "transactions_passive", + "id": "spans_passive", "passive": True, "window": {"windowSeconds": 3600, "granularitySeconds": 600}, "limit": 1, "scope": "project", - "namespace": "transactions", + "namespace": "spans", }, { - "id": "transactions_enforced", + "id": "spans_enforced", "window": {"windowSeconds": 3600, "granularitySeconds": 600}, "limit": 3, "scope": "project", - "namespace": "transactions", + "namespace": "spans", }, { "id": "custom", @@ -142,10 +142,10 @@ def test_cardinality_limits_passive_limit( metrics_payload = "\n".join( [ - "transactions/foo@second:11|c", - "transactions/bar@second:22|c", - "transactions/baz@second:33|c", - "transactions/lol@second:55|c", + "spans/foo@second:11|c", + "spans/bar@second:22|c", + "spans/baz@second:33|c", + "spans/lol@second:55|c", "sessions/foo@second:12|c", "foo@second:12|c", "bar@second:23|c", @@ -158,4 +158,4 @@ def test_cardinality_limits_passive_limit( assert len(metrics["custom"]) == 2 assert len(metrics["sessions"]) == 1 # The passive limit should be ignored, the non-passive limit still needs to be enforced. - assert len(metrics["transactions"]) == 3 + assert len(metrics["spans"]) == 3 diff --git a/tests/integration/test_dynamic_sampling.py b/tests/integration/test_dynamic_sampling.py index 0067110a04e..67d45abacad 100644 --- a/tests/integration/test_dynamic_sampling.py +++ b/tests/integration/test_dynamic_sampling.py @@ -3,10 +3,6 @@ import json from .asserts import only_items -from .consts import ( - TRANSACTION_EXTRACT_MIN_SUPPORTED_VERSION, - TRANSACTION_EXTRACT_MAX_SUPPORTED_VERSION, -) import pytest from sentry_sdk.envelope import Envelope, Item, PayloadRef @@ -241,9 +237,6 @@ def test_it_removes_events(mini_sentry, relay): # create a basic project config config = mini_sentry.add_basic_project_config(project_id) - config["config"]["transactionMetrics"] = { - "version": TRANSACTION_EXTRACT_MIN_SUPPORTED_VERSION - } public_key = config["publicKeys"][0]["publicKey"] @@ -351,10 +344,7 @@ def test_sample_on_parametrized_root_transaction(mini_sentry, relay): # What the transaction is transformed into, which the dynamic sampling rules should respect. parametrized_transaction = "/auth/login/*/" - config = mini_sentry.add_basic_project_config(project_id) - config["config"]["transactionMetrics"] = { - "version": TRANSACTION_EXTRACT_MAX_SUPPORTED_VERSION - } + mini_sentry.add_basic_project_config(project_id) sampling_config = mini_sentry.add_basic_project_config(43) sampling_public_key = sampling_config["publicKeys"][0]["publicKey"] @@ -464,18 +454,12 @@ def test_uses_trace_public_key(mini_sentry, relay): # create basic project configs project_id1 = 42 config1 = mini_sentry.add_basic_project_config(project_id1) - config1["config"]["transactionMetrics"] = { - "version": TRANSACTION_EXTRACT_MIN_SUPPORTED_VERSION - } public_key1 = config1["publicKeys"][0]["publicKey"] add_sampling_config(config1, sample_rate=0, rule_type="trace") project_id2 = 43 config2 = mini_sentry.add_basic_project_config(project_id2) - config2["config"]["transactionMetrics"] = { - "version": TRANSACTION_EXTRACT_MIN_SUPPORTED_VERSION - } public_key2 = config2["publicKeys"][0]["publicKey"] add_sampling_config(config2, sample_rate=1, rule_type="trace") @@ -540,9 +524,6 @@ def test_multi_item_envelope(mini_sentry, relay, rule_type, event_factory): # create a basic project config config = mini_sentry.add_basic_project_config(project_id) - config["config"]["transactionMetrics"] = { - "version": TRANSACTION_EXTRACT_MIN_SUPPORTED_VERSION - } # add a sampling rule to project config that removes all transactions (sample_rate=0) public_key = config["publicKeys"][0]["publicKey"] # add a sampling rule to project config that drops all events (sample_rate=0), it should be ignored @@ -612,9 +593,6 @@ def collect_events_batch(expected_count, timeout_per_event=0.1): project_id = 42 relay = relay(mini_sentry) config = mini_sentry.add_basic_project_config(project_id) - config["config"]["transactionMetrics"] = { - "version": TRANSACTION_EXTRACT_MIN_SUPPORTED_VERSION - } public_key = config["publicKeys"][0]["publicKey"] # the closer to 0, the less flaky the test is @@ -727,9 +705,6 @@ def make_envelope(public_key): else: relay = relay_with_processing() config = mini_sentry.add_basic_project_config(project_id) - config["config"]["transactionMetrics"] = { - "version": TRANSACTION_EXTRACT_MIN_SUPPORTED_VERSION - } config["config"]["features"] = [ "organizations:profiling", ] @@ -874,9 +849,6 @@ def test_invalid_global_generic_filters_skip_dynamic_sampling(mini_sentry, relay project_id = 42 config = mini_sentry.add_basic_project_config(project_id) - config["config"]["transactionMetrics"] = { - "version": TRANSACTION_EXTRACT_MIN_SUPPORTED_VERSION - } public_key = config["publicKeys"][0]["publicKey"] # Reject all transactions with dynamic sampling diff --git a/tests/integration/test_healthchecks.py b/tests/integration/test_healthchecks.py index 4e8bcc3ab4b..97e5e032969 100644 --- a/tests/integration/test_healthchecks.py +++ b/tests/integration/test_healthchecks.py @@ -153,7 +153,7 @@ def test_readiness_depends_on_aggregator_being_full_after_metrics(mini_sentry, r {"aggregator": {"max_total_bucket_bytes": 1, "initial_delay": 30}}, ) - metrics_payload = "transactions/foo:42|c\ntransactions/bar:17|c" + metrics_payload = "spans/foo:42|c\nspans/bar:17|c" relay.send_metrics(42, metrics_payload) for _ in range(100): diff --git a/tests/integration/test_metrics.py b/tests/integration/test_metrics.py index 3d33189aaa6..af9b43e97d6 100644 --- a/tests/integration/test_metrics.py +++ b/tests/integration/test_metrics.py @@ -6,10 +6,6 @@ import signal import time import queue -from .consts import ( - TRANSACTION_EXTRACT_MIN_SUPPORTED_VERSION, - TRANSACTION_EXTRACT_MAX_SUPPORTED_VERSION, -) import pytest import requests @@ -123,7 +119,7 @@ def test_metrics_proxy_mode_buckets(mini_sentry, relay): ) project_id = 42 - bucket_name = "d:transactions/measurements.lcp@millisecond" + bucket_name = "d:spans/measurements.lcp@millisecond" bucket = { "org_id": 1, @@ -157,7 +153,7 @@ def test_metrics_proxy_mode_statsd(mini_sentry, relay): project_id = 42 now = int(datetime.now(tz=timezone.utc).timestamp()) - metrics_payload = f"transactions/foo:42|c\ntransactions/bar:17|c|T{now}" + metrics_payload = f"spans/foo:42|c\nspans/bar:17|c|T{now}" relay.send_metrics(project_id, metrics_payload) envelope = mini_sentry.get_captured_envelope() assert len(envelope.items) == 1 @@ -173,9 +169,7 @@ def test_metrics(mini_sentry, relay): mini_sentry.add_basic_project_config(project_id) timestamp = int(datetime.now(tz=timezone.utc).timestamp()) - metrics_payload = ( - f"transactions/foo:42|c|T{timestamp}\ntransactions/bar:17|c|T{timestamp}" - ) + metrics_payload = f"spans/foo:42|c|T{timestamp}\nspans/bar:17|c|T{timestamp}" relay.send_metrics(project_id, metrics_payload) envelope = mini_sentry.get_captured_envelope() @@ -191,14 +185,14 @@ def test_metrics(mini_sentry, relay): { "timestamp": time_after(timestamp), "width": 1, - "name": "c:transactions/bar@none", + "name": "c:spans/bar@none", "value": 17.0, "type": "c", }, { "timestamp": time_after(timestamp), "width": 1, - "name": "c:transactions/foo@none", + "name": "c:spans/foo@none", "value": 42.0, "type": "c", }, @@ -212,7 +206,7 @@ def test_metrics_backdated(mini_sentry, relay): mini_sentry.add_basic_project_config(project_id) timestamp = int(datetime.now(tz=timezone.utc).timestamp()) - 24 * 60 * 60 - metrics_payload = f"transactions/foo:42|c|T{timestamp}" + metrics_payload = f"spans/foo:42|c|T{timestamp}" relay.send_metrics(project_id, metrics_payload) envelope = mini_sentry.get_captured_envelope() @@ -228,7 +222,7 @@ def test_metrics_backdated(mini_sentry, relay): { "timestamp": time_after(timestamp), "width": 1, - "name": "c:transactions/foo@none", + "name": "c:spans/foo@none", "value": 42.0, "type": "c", }, @@ -239,13 +233,13 @@ def test_metrics_backdated(mini_sentry, relay): "metrics_partitions,expected_header", [ # With no partitions defined, partition count is auto assigned. - (None, "4"), + (None, "26"), # With zero partitions defined, all the buckets will be forwarded to a single partition. (0, "0"), # With zero partitions defined, all the buckets will be forwarded to a single partition. (1, "0"), # With more than zero partitions defined, the buckets will be forwarded to one of the partitions. - (128, "4"), + (128, "90"), ], ) def test_metrics_partition_key(mini_sentry, relay, metrics_partitions, expected_header): @@ -275,7 +269,7 @@ def test_metrics_partition_key(mini_sentry, relay, metrics_partitions, expected_ }, ) - metrics_payload = "transactions/foo:42|c|T999994711" + metrics_payload = "spans/foo:42|c|T999994711" relay.send_metrics(project_id, metrics_payload) mini_sentry.get_captured_envelope() @@ -288,7 +282,7 @@ def test_metrics_partition_key(mini_sentry, relay, metrics_partitions, expected_ @pytest.mark.parametrize( - "max_batch_size,expected_events", [(1000, 1), (200, 2), (130, 3), (100, 6), (50, 0)] + "max_batch_size,expected_events", [(1000, 1), (200, 2), (130, 3), (100, 5), (50, 0)] ) def test_metrics_max_batch_size(mini_sentry, relay, max_batch_size, expected_events): forever = 100 * 365 * 24 * 60 * 60 # *almost forever @@ -309,9 +303,7 @@ def test_metrics_max_batch_size(mini_sentry, relay, max_batch_size, expected_eve project_id = 42 mini_sentry.add_basic_project_config(project_id) - metrics_payload = ( - "transactions/foo:1:2:3:4:5:6:7:8:9:10:11:12:13:14:15:16:17|d|T999994711" - ) + metrics_payload = "spans/foo:1:2:3:4:5:6:7:8:9:10:11:12:13:14:15:16:17|d|T999994711" relay.send_metrics(project_id, metrics_payload) for _ in range(expected_events): @@ -320,7 +312,7 @@ def test_metrics_max_batch_size(mini_sentry, relay, max_batch_size, expected_eve assert mini_sentry.captured_envelopes.empty() -@pytest.mark.parametrize("ns", [None, "custom", "transactions"]) +@pytest.mark.parametrize("ns", [None, "custom", "spans"]) def test_metrics_rate_limits_namespace(mini_sentry, relay, ns): relay = relay(mini_sentry, options=TEST_CONFIG) @@ -336,9 +328,7 @@ def test_metrics_rate_limits_namespace(mini_sentry, relay, ns): ] timestamp = int(datetime.now(tz=timezone.utc).timestamp()) - metrics_payload = ( - f"transactions/foo:42|c|T{timestamp}\ntransactions/bar:17|c|T{timestamp}" - ) + metrics_payload = f"spans/foo:42|c|T{timestamp}\nspans/bar:17|c|T{timestamp}" # Send and ignore first request to populate caches relay.send_metrics(project_id, metrics_payload) @@ -367,7 +357,7 @@ def test_global_metrics(mini_sentry, relay): public_key = config["publicKeys"][0]["publicKey"] timestamp = int(datetime.now(tz=timezone.utc).timestamp()) - metrics_payload = f"transactions/foo:42|c\ntransactions/bar:17|c|T{timestamp}" + metrics_payload = f"spans/foo:42|c\nspans/bar:17|c|T{timestamp}" relay.send_metrics(project_id, metrics_payload) metrics_batch = mini_sentry.captured_metrics.get(timeout=5) @@ -378,14 +368,14 @@ def test_global_metrics(mini_sentry, relay): { "timestamp": time_after(timestamp), "width": 1, - "name": "c:transactions/bar@none", + "name": "c:spans/bar@none", "value": 17.0, "type": "c", }, { "timestamp": time_after(timestamp), "width": 1, - "name": "c:transactions/foo@none", + "name": "c:spans/foo@none", "value": 42.0, "type": "c", }, @@ -404,7 +394,7 @@ def test_global_metrics_no_config(mini_sentry, relay): { "timestamp": timestamp, "width": 1, - "name": "c:transactions/foo@none", + "name": "c:spans/foo@none", "value": 17.0, "type": "c", } @@ -445,7 +435,7 @@ def test_global_metrics_batching(mini_sentry, relay): timestamp = int(datetime.now(tz=timezone.utc).timestamp()) metrics_payload = ( - f"transactions/foo:1:2:3:4:5:6:7:8:9:10:11:12:13:14:15:16:17|d|T{timestamp}" + f"spans/foo:1:2:3:4:5:6:7:8:9:10:11:12:13:14:15:16:17|d|T{timestamp}" ) relay.send_metrics(project_id, metrics_payload) @@ -458,8 +448,8 @@ def test_global_metrics_batching(mini_sentry, relay): { "timestamp": time_after(timestamp), "width": 1, - "name": "d:transactions/foo@none", - "value": [float(i) for i in range(1, 16)], + "name": "d:spans/foo@none", + "value": [float(i) for i in range(1, 17)], "type": "d", } ] @@ -468,8 +458,8 @@ def test_global_metrics_batching(mini_sentry, relay): { "timestamp": time_after(timestamp), "width": 1, - "name": "d:transactions/foo@none", - "value": [16.0, 17.0], + "name": "d:spans/foo@none", + "value": [17.0], "type": "d", } ] @@ -484,19 +474,17 @@ def test_metrics_with_processing(mini_sentry, relay_with_processing, metrics_con project_config["config"]["features"] = ["organizations:custom-metrics"] timestamp = int(datetime.now(tz=timezone.utc).timestamp()) - metrics_payload = f"transactions/foo:42|c\nbar@second:17|c|T{timestamp}" + metrics_payload = f"spans/foo:42|c\nbar@second:17|c|T{timestamp}" relay.send_metrics(project_id, metrics_payload) metrics = metrics_by_name(metrics_consumer, 2) - assert metrics["headers"]["c:transactions/foo@none"] == [ - ("namespace", b"transactions") - ] - assert metrics["c:transactions/foo@none"] == { + assert metrics["headers"]["c:spans/foo@none"] == [("namespace", b"spans")] + assert metrics["c:spans/foo@none"] == { "org_id": 1, "project_id": project_id, "retention_days": 90, - "name": "c:transactions/foo@none", + "name": "c:spans/foo@none", "tags": {}, "value": 42.0, "type": "c", @@ -535,21 +523,17 @@ def test_global_metrics_with_processing( project_config["config"]["features"] = ["organizations:custom-metrics"] timestamp = int(datetime.now(tz=timezone.utc).timestamp()) - metrics_payload = ( - f"transactions/foo:42|c|T{timestamp}\nbar@second:17|c|T{timestamp}" - ) + metrics_payload = f"spans/foo:42|c|T{timestamp}\nbar@second:17|c|T{timestamp}" relay.send_metrics(project_id, metrics_payload) metrics = metrics_by_name(metrics_consumer, 2) - assert metrics["headers"]["c:transactions/foo@none"] == [ - ("namespace", b"transactions") - ] - assert metrics["c:transactions/foo@none"] == { + assert metrics["headers"]["c:spans/foo@none"] == [("namespace", b"spans")] + assert metrics["c:spans/foo@none"] == { "org_id": 1, "project_id": project_id, "retention_days": 90, - "name": "c:transactions/foo@none", + "name": "c:spans/foo@none", "tags": {}, "value": 42.0, "type": "c", @@ -591,17 +575,17 @@ def test_metrics_full(mini_sentry, relay, relay_with_processing, metrics_consume # Send two events to downstream and one to upstream timestamp = int(datetime.now(tz=timezone.utc).timestamp()) - downstream.send_metrics(project_id, f"transactions/foo:7|c|T{timestamp}") - downstream.send_metrics(project_id, f"transactions/foo:5|c|T{timestamp}") + downstream.send_metrics(project_id, f"spans/foo:7|c|T{timestamp}") + downstream.send_metrics(project_id, f"spans/foo:5|c|T{timestamp}") - upstream.send_metrics(project_id, f"transactions/foo:3|c|T{timestamp}") + upstream.send_metrics(project_id, f"spans/foo:3|c|T{timestamp}") metric, _ = metrics_consumer.get_metric(timeout=6) assert metric == { "org_id": 1, "project_id": project_id, "retention_days": 90, - "name": "c:transactions/foo@none", + "name": "c:spans/foo@none", "tags": {}, "value": 15.0, "type": "c", @@ -714,285 +698,6 @@ def test_session_metrics_processing( } -@pytest.mark.parametrize( - "extract_metrics,discard_data,with_external_relay", - [ - (True, "transaction", True), - (True, "transaction", False), - (True, "trace", False), - (True, False, False), - (False, "transaction", False), - (False, False, False), - (False, False, True), - ("corrupted", "transaction", False), - ], - ids=[ - "extract from transaction-sampled, external relay", - "extract from transaction-sampled", - "extract from trace-sampled", - "extract from unsampled", - "don't extract from transaction-sampled", - "don't extract from unsampled", - "don't extract from unsampled, external relay", - "corrupted config", - ], -) -def test_transaction_metrics( - mini_sentry, - relay, - relay_with_processing, - metrics_consumer, - extract_metrics, - discard_data, - transactions_consumer, - with_external_relay, -): - metrics_consumer = metrics_consumer() - transactions_consumer = transactions_consumer() - - if with_external_relay: - relay = relay(relay_with_processing(options=TEST_CONFIG), options=TEST_CONFIG) - else: - relay = relay_with_processing(options=TEST_CONFIG) - - project_id = 42 - mini_sentry.add_full_project_config(project_id) - config = mini_sentry.project_configs[project_id]["config"] - - timestamp = datetime.now(tz=timezone.utc) - - if extract_metrics: - config["sessionMetrics"] = {"version": 1} - config["breakdownsV2"] = { - "span_ops": {"type": "spanOperations", "matches": ["react.mount"]} - } - - if discard_data: - # Make sure Relay drops the transaction - ds = config.setdefault("sampling", {}) - ds.setdefault("version", 2) - ds.setdefault("rules", []).append( - { - "samplingValue": {"type": "sampleRate", "value": 0.0}, - "type": discard_data, - "condition": {"op": "and", "inner": []}, - "id": 1, - } - ) - - if extract_metrics == "corrupted": - config["transactionMetrics"] = TRANSACTION_EXTRACT_MAX_SUPPORTED_VERSION + 1 - elif extract_metrics: - config["transactionMetrics"] = { - "version": TRANSACTION_EXTRACT_MIN_SUPPORTED_VERSION, - } - else: - config["transactionMetrics"] = { - "version": TRANSACTION_EXTRACT_MIN_SUPPORTED_VERSION - 1, - } - - transaction = generate_transaction_item(timestamp.timestamp()) - transaction["measurements"] = { - "foo": {"value": 1.2}, - "bar": {"value": 1.3}, - } - - trace_info = { - "trace_id": transaction["contexts"]["trace"]["trace_id"], - "public_key": mini_sentry.get_dsn_public_key(project_id), - "transaction": "transaction_which_starts_trace", - } - relay.send_transaction(42, transaction, trace_info=trace_info) - - # Send another transaction: - transaction["measurements"] = { - "foo": {"value": 2.2}, - } - relay.send_transaction(42, transaction, trace_info=trace_info) - - def assert_transaction(): - event, _ = transactions_consumer.get_event() - - assert event["breakdowns"] == { - "span_ops": { - "ops.react.mount": {"value": 9.91, "unit": "millisecond"}, - "total.time": {"value": 9.91, "unit": "millisecond"}, - } - } - - if not extract_metrics: - assert ( - str(mini_sentry.test_failures.get(timeout=5)[1]) - == "Relay sent us event: Processing Relay outdated, received tx config in version 2, which is not supported" - ) - - if not extract_metrics or extract_metrics == "corrupted": - message = metrics_consumer.poll(timeout=None) - assert message is None, message.value() - - assert_transaction() - assert_transaction() - - return - - if discard_data: - transactions_consumer.assert_empty() - else: - assert_transaction() - assert_transaction() - - common = { - "timestamp": time_after(timestamp, precision="s"), - "org_id": 1, - "project_id": 42, - "retention_days": 90, - "received_at": time_after(timestamp, precision="s"), - } - decision = "drop" if discard_data else "keep" - - assert metrics_consumer.get_metrics(n=6, with_headers=False) == [ - { - **common, - "name": "c:spans/count_per_root_project@none", - "tags": { - "decision": decision, - "is_segment": "false", - "target_project_id": "42", - "transaction": "transaction_which_starts_trace", - }, - "type": "c", - "value": 2.0, - }, - { - **common, - "name": "c:spans/count_per_root_project@none", - "tags": { - "decision": decision, - "is_segment": "true", - "target_project_id": "42", - "transaction": "transaction_which_starts_trace", - }, - "type": "c", - "value": 2.0, - }, - { - **common, - "name": "c:spans/usage@none", - "tags": { - "is_segment": "false", - }, - "type": "c", - "value": 2.0, - }, - { - **common, - "name": "c:spans/usage@none", - "tags": { - "was_transaction": "true", - "is_segment": "true", - }, - "type": "c", - "value": 2.0, - }, - { - **common, - "name": "c:transactions/count_per_root_project@none", - "tags": { - "decision": decision, - "target_project_id": "42", - "transaction": "transaction_which_starts_trace", - }, - "type": "c", - "value": 2.0, - }, - { - **common, - "name": "c:transactions/usage@none", - "tags": {}, - "type": "c", - "value": 2.0, - }, - ] - - -def test_transaction_metrics_count_per_root_project( - mini_sentry, - relay, - relay_with_processing, - metrics_consumer, - transactions_consumer, -): - metrics_consumer = metrics_consumer() - transactions_consumer = transactions_consumer() - - relay = relay_with_processing(options=TEST_CONFIG) - - project_id = 42 - mini_sentry.add_full_project_config(41) - mini_sentry.add_full_project_config(project_id) - timestamp = datetime.now(tz=timezone.utc) - - for project_id in (41, 42): - config = mini_sentry.project_configs[project_id]["config"] - config["sessionMetrics"] = {"version": 1} - config["breakdownsV2"] = { - "span_ops": {"type": "spanOperations", "matches": ["react.mount"]} - } - config["transactionMetrics"] = { - "version": TRANSACTION_EXTRACT_MIN_SUPPORTED_VERSION, - } - - transaction = generate_transaction_item(timestamp.timestamp()) - transaction["measurements"] = { - "foo": {"value": 1.2}, - "bar": {"value": 1.3}, - } - - trace_info = { - "trace_id": transaction["contexts"]["trace"]["trace_id"], - "public_key": mini_sentry.get_dsn_public_key(41), - "transaction": "test", - } - relay.send_transaction(42, transaction, trace_info=trace_info) - - transaction = generate_transaction_item(timestamp.timestamp()) - transaction["measurements"] = { - "test": {"value": 1.2}, - } - relay.send_transaction(42, transaction) - relay.send_transaction(42, transaction) - - _ = transactions_consumer.get_event() - _ = transactions_consumer.get_event() - _ = transactions_consumer.get_event() - - metrics_by_project = metrics_by_name_group_by_project(metrics_consumer, timeout=4) - - timestamp = int(timestamp.timestamp()) - assert metrics_by_project[41]["c:transactions/count_per_root_project@none"] == { - "timestamp": time_after(timestamp), - "org_id": 1, - "project_id": 41, - "retention_days": 90, - "tags": {"decision": "keep", "target_project_id": "42", "transaction": "test"}, - "name": "c:transactions/count_per_root_project@none", - "type": "c", - "value": 1.0, - "received_at": time_after(timestamp), - } - assert metrics_by_project[42]["c:transactions/count_per_root_project@none"] == { - "timestamp": time_after(timestamp), - "org_id": 1, - "project_id": 42, - "retention_days": 90, - "tags": {"decision": "keep", "target_project_id": "42"}, - "name": "c:transactions/count_per_root_project@none", - "type": "c", - "value": 2.0, - "received_at": time_after(timestamp), - } - - @pytest.mark.parametrize("send_extracted_header", [False, True]) def test_transaction_metrics_extraction_external_relays( mini_sentry, relay, send_extracted_header @@ -1005,9 +710,6 @@ def test_transaction_metrics_extraction_external_relays( project_id = 42 mini_sentry.add_full_project_config(project_id) config = mini_sentry.project_configs[project_id]["config"] - config["transactionMetrics"] = { - "version": TRANSACTION_EXTRACT_MIN_SUPPORTED_VERSION - } config["sampling"] = { "version": 2, "rules": [ @@ -1049,14 +751,13 @@ def test_transaction_metrics_extraction_external_relays( assert len(metrics_envelope.items) == 1 payload = json.loads(metrics_envelope.items[0].get_bytes().decode()) - assert len(payload) == 6 + assert len(payload) == 4 by_name = {m["name"]: m for m in payload} - count_metric = by_name["c:transactions/count_per_root_project@none"] + count_metric = by_name["c:spans/count_per_root_project@none"] assert count_metric["tags"]["transaction"] == "root_transaction" assert count_metric["value"] == 1.0 - usage_metric = by_name["c:transactions/usage@none"] - assert not usage_metric.get("tags") # empty or missing + usage_metric = by_name["c:spans/usage@none"] assert usage_metric["value"] == 1.0 @@ -1080,10 +781,7 @@ def test_transaction_metrics_extraction_processing_relays( project_id = 42 mini_sentry.add_full_project_config(project_id) - config = mini_sentry.project_configs[project_id]["config"] - config["transactionMetrics"] = { - "version": TRANSACTION_EXTRACT_MIN_SUPPORTED_VERSION, - } + mini_sentry.project_configs[project_id]["config"] timestamp = datetime.now(tz=timezone.utc) tx = generate_transaction_item(timestamp.timestamp()) @@ -1098,11 +796,10 @@ def test_transaction_metrics_extraction_processing_relays( tx_consumer.assert_empty() if expect_metrics_extraction: - metrics = metrics_by_name(metrics_consumer, 6) - metric_usage = metrics["c:transactions/usage@none"] - assert metric_usage["tags"] == {} + metrics = metrics_by_name(metrics_consumer, 4) + metric_usage = metrics["c:spans/usage@none"] assert metric_usage["value"] == 1.0 - metric_count_per_project = metrics["c:transactions/count_per_root_project@none"] + metric_count_per_project = metrics["c:spans/count_per_root_project@none"] assert metric_count_per_project["value"] == 1.0 else: assert ( @@ -1113,52 +810,10 @@ def test_transaction_metrics_extraction_processing_relays( metrics_consumer.assert_empty() -@pytest.mark.parametrize( - "unsupported_version", - [0, 1234567890], - ids=["version is too small", "version is too big"], -) -def test_transaction_metrics_not_extracted_on_unsupported_version( - metrics_consumer, - transactions_consumer, - mini_sentry, - relay_with_processing, - unsupported_version, -): - project_id = 42 - mini_sentry.add_full_project_config(project_id) - config = mini_sentry.project_configs[project_id]["config"] - config["transactionMetrics"] = { - "version": unsupported_version, - } - - timestamp = datetime.now(tz=timezone.utc) - tx = generate_transaction_item(timestamp.timestamp()) - - metrics_consumer = metrics_consumer() - tx_consumer = transactions_consumer() - - relay = relay_with_processing(options=TEST_CONFIG) - relay.send_transaction(project_id, tx) - - tx, _ = tx_consumer.get_event() - assert tx["transaction"] == "/organizations/:orgId/performance/:eventSlug/" - tx_consumer.assert_empty() - - if unsupported_version < TRANSACTION_EXTRACT_MIN_SUPPORTED_VERSION: - error = str(mini_sentry.test_failures.get_nowait()) - assert "Processing Relay outdated" in error - - metrics_consumer.assert_empty() - - def test_no_transaction_metrics_when_filtered(mini_sentry, relay): project_id = 42 mini_sentry.add_full_project_config(project_id) config = mini_sentry.project_configs[project_id]["config"] - config["transactionMetrics"] = { - "version": TRANSACTION_EXTRACT_MIN_SUPPORTED_VERSION, - } config["filterSettings"]["releases"] = {"releases": ["foo@1.2.4"]} timestamp = datetime.now(tz=timezone.utc) @@ -1194,10 +849,7 @@ def test_transaction_name_too_long( """When a transaction name is truncated, the transaction metric should get the truncated value""" project_id = 42 mini_sentry.add_full_project_config(project_id) - config = mini_sentry.project_configs[project_id]["config"] - config["transactionMetrics"] = { - "version": TRANSACTION_EXTRACT_MIN_SUPPORTED_VERSION, - } + mini_sentry.project_configs[project_id]["config"] transaction = { "event_id": "d2132d31b39445f1938d7e21b6bf0ec4", @@ -1249,18 +901,18 @@ def test_graceful_shutdown(mini_sentry, relay): timestamp = int(datetime.now(tz=timezone.utc).timestamp()) past_timestamp = timestamp - 1000 + 30 - metrics_payload = f"transactions/past:42|c|T{past_timestamp}" + metrics_payload = f"spans/past:42|c|T{past_timestamp}" relay.send_metrics(project_id, metrics_payload) future_timestamp = timestamp + 30 - metrics_payload = f"transactions/future:17|c|T{future_timestamp}" + metrics_payload = f"spans/future:17|c|T{future_timestamp}" relay.send_metrics(project_id, metrics_payload) relay.process.send_signal(signal.SIGTERM) time.sleep(0.1) # Try to send another metric (will be rejected) - metrics_payload = f"transactions/now:666|c|T{timestamp}" + metrics_payload = f"spans/now:666|c|T{timestamp}" with pytest.raises(requests.ConnectionError): relay.send_metrics(project_id, metrics_payload) @@ -1285,14 +937,14 @@ def test_graceful_shutdown(mini_sentry, relay): { "timestamp": time_within_delta(past_timestamp, timedelta(seconds=1)), "width": 1, - "name": "c:transactions/past@none", + "name": "c:spans/past@none", "value": 42.0, "type": "c", }, { "timestamp": time_within_delta(future_timestamp, timedelta(seconds=1)), "width": 1, - "name": "c:transactions/future@none", + "name": "c:spans/future@none", "value": 17.0, "type": "c", }, @@ -1317,9 +969,6 @@ def test_limit_custom_measurements( "builtinMeasurements": [{"name": "foo", "unit": "none"}], "maxCustomMeasurements": 1, } - config["transactionMetrics"] = { - "version": TRANSACTION_EXTRACT_MIN_SUPPORTED_VERSION, - } transaction = generate_transaction_item(timestamp.timestamp()) transaction["measurements"] = { @@ -1336,13 +985,11 @@ def test_limit_custom_measurements( assert len(event["measurements"]) == 2 expected_metrics = { - "c:transactions/usage@none", - "c:transactions/count_per_root_project@none", "c:spans/usage@none", "c:spans/count_per_root_project@none", } - metrics = metrics_by_name(metrics_consumer, 6) + metrics = metrics_by_name(metrics_consumer, 4) metrics.pop("headers") assert metrics.keys() == expected_metrics @@ -1358,15 +1005,12 @@ def test_generic_metric_extraction(mini_sentry, relay): "metrics": [ { "category": "transaction", - "mri": "c:transactions/on_demand@none", + "mri": "c:spans/on_demand@none", "condition": {"op": "gte", "name": "event.duration", "value": 0.3}, "tags": [{"key": "query_hash", "value": "c91c2e4d"}], } ], } - config["transactionMetrics"] = { - "version": TRANSACTION_EXTRACT_MIN_SUPPORTED_VERSION - } config["sampling"] = { "version": 2, "rules": [ @@ -1405,7 +1049,7 @@ def test_generic_metric_extraction(mini_sentry, relay): assert { "timestamp": time_after(int(timestamp.timestamp())), "width": 1, - "name": "c:transactions/on_demand@none", + "name": "c:spans/on_demand@none", "type": "c", "value": 1.0, "tags": {"query_hash": "c91c2e4d"}, @@ -1421,12 +1065,12 @@ def test_custom_metrics_disabled(mini_sentry, relay_with_processing, metrics_con # NOTE: "organizations:custom-metrics" missing from features timestamp = int(datetime.now(tz=timezone.utc).timestamp()) - metrics_payload = f"transactions/foo:42|c\nbar@second:17|c|T{timestamp}" + metrics_payload = f"spans/foo:42|c\nbar@second:17|c|T{timestamp}" relay.send_metrics(project_id, metrics_payload) metrics = metrics_by_name(metrics_consumer, 1) - assert "c:transactions/foo@none" in metrics + assert "c:spans/foo@none" in metrics assert "c:custom/bar@second" not in metrics @@ -1462,10 +1106,7 @@ def test_relay_forwards_events_without_extracting_metrics_on_broken_global_filte project_id = 42 mini_sentry.add_full_project_config(project_id) - config = mini_sentry.project_configs[project_id]["config"] - config["transactionMetrics"] = { - "version": TRANSACTION_EXTRACT_MIN_SUPPORTED_VERSION, - } + mini_sentry.project_configs[project_id]["config"] if is_processing_relay: relay = relay_with_processing( @@ -1523,9 +1164,6 @@ def test_relay_forwards_events_without_extracting_metrics_on_unsupported_project "filters": [], } } - config["transactionMetrics"] = { - "version": TRANSACTION_EXTRACT_MIN_SUPPORTED_VERSION, - } if is_processing_relay: relay = relay_with_processing( @@ -1575,10 +1213,7 @@ def test_missing_global_filters_enables_metric_extraction( project_id = 42 mini_sentry.add_full_project_config(project_id) - config = mini_sentry.project_configs[project_id]["config"] - config["transactionMetrics"] = { - "version": TRANSACTION_EXTRACT_MIN_SUPPORTED_VERSION, - } + mini_sentry.project_configs[project_id]["config"] relay = relay_with_processing( options={ @@ -1647,9 +1282,6 @@ def test_histogram_outliers(mini_sentry, relay): with open(Path(__file__).parent / "fixtures/histogram-outliers.yml") as f: mini_sentry.global_config["metricExtraction"] = yaml.full_load(f) project_config = mini_sentry.add_full_project_config(project_id=42)["config"] - project_config["transactionMetrics"] = { - "version": TRANSACTION_EXTRACT_MIN_SUPPORTED_VERSION, - } project_config["metricExtraction"] = { "version": 3, "globalGroups": {"histogram_outliers": {"isEnabled": True}}, @@ -1724,7 +1356,7 @@ def test_metrics_extraction_with_computed_context_filters( "metrics": [ { "category": "transaction", - "mri": "c:transactions/on_demand_os@none", + "mri": "c:spans/on_demand_os@none", "condition": { "op": "eq", "name": "event.contexts.os", @@ -1733,7 +1365,7 @@ def test_metrics_extraction_with_computed_context_filters( }, { "category": "transaction", - "mri": "c:transactions/on_demand_runtime@none", + "mri": "c:spans/on_demand_runtime@none", "condition": { "op": "eq", "name": "event.contexts.runtime", @@ -1742,7 +1374,7 @@ def test_metrics_extraction_with_computed_context_filters( }, { "category": "transaction", - "mri": "c:transactions/on_demand_browser@none", + "mri": "c:spans/on_demand_browser@none", "condition": { "op": "eq", "name": "event.contexts.browser", @@ -1751,9 +1383,6 @@ def test_metrics_extraction_with_computed_context_filters( }, ], } - project_config["config"]["transactionMetrics"] = { - "version": TRANSACTION_EXTRACT_MIN_SUPPORTED_VERSION - } # Create a transaction with matching contexts transaction = generate_transaction_item(datetime.now(tz=timezone.utc).timestamp()) @@ -1789,13 +1418,13 @@ def test_metrics_extraction_with_computed_context_filters( # Define list of extracted metrics to check metric_names = [ - "c:transactions/on_demand_os@none", - "c:transactions/on_demand_runtime@none", - "c:transactions/on_demand_browser@none", + "c:spans/on_demand_os@none", + "c:spans/on_demand_runtime@none", + "c:spans/on_demand_browser@none", ] # Verify that all three metrics were extracted - metrics = metrics_by_name(metrics_consumer, 9) + metrics = metrics_by_name(metrics_consumer, 7) # Check each extracted metric for metric_name in metric_names: diff --git a/tests/integration/test_metrics_encoding.py b/tests/integration/test_metrics_encoding.py index e59fea3be62..e2a5d012cd4 100644 --- a/tests/integration/test_metrics_encoding.py +++ b/tests/integration/test_metrics_encoding.py @@ -62,14 +62,14 @@ def test_metric_bucket_encoding_legacy( project_id = 42 mini_sentry.add_basic_project_config(project_id) - relay.send_metrics(project_id, "transactions/foo:1337|d\ntransactions/bar:42|s") + relay.send_metrics(project_id, "spans/foo:1337|d\nspans/bar:42|s") metrics = metrics_by_name(metrics_consumer, 2) - assert metrics["d:transactions/foo@none"]["value"] == [1337.0] - assert metrics["s:transactions/bar@none"]["value"] == [42.0] + assert metrics["d:spans/foo@none"]["value"] == [1337.0] + assert metrics["s:spans/bar@none"]["value"] == [42.0] -@pytest.mark.parametrize("namespace", [None, "transactions", "custom"]) +@pytest.mark.parametrize("namespace", [None, "spans", "custom"]) @pytest.mark.parametrize("ty", ["set", "distribution"]) def test_metric_bucket_encoding_dynamic_global_config_option( mini_sentry, relay_with_processing, metrics_consumer, namespace, ty @@ -127,14 +127,14 @@ def test_metric_bucket_encoding_base64( project_id = 42 mini_sentry.add_basic_project_config(project_id) - relay.send_metrics(project_id, "transactions/foo:3:1:2|d\ntransactions/bar:7:1|s") + relay.send_metrics(project_id, "spans/foo:3:1:2|d\nspans/bar:7:1|s") metrics = metrics_by_name(metrics_consumer, 2) - assert metrics["d:transactions/foo@none"]["value"] == { + assert metrics["d:spans/foo@none"]["value"] == { "format": "base64", "data": b64_dist(3, 1, 2), # values are in order } - assert metrics["s:transactions/bar@none"]["value"] == { + assert metrics["s:spans/bar@none"]["value"] == { "format": "base64", "data": b64_set(1, 7), } @@ -156,10 +156,8 @@ def test_metric_bucket_encoding_zstd( project_id = 42 mini_sentry.add_basic_project_config(project_id) - relay.send_metrics(project_id, "transactions/foo:3:1:2|d\ntransactions/bar:7:1|s") + relay.send_metrics(project_id, "spans/foo:3:1:2|d\nspans/bar:7:1|s") metrics = metrics_by_name(metrics_consumer, 2) - assert_zstd_dist( - metrics["d:transactions/foo@none"]["value"], 1, 2, 3 - ) # values are sorted - assert_zstd_set(metrics["s:transactions/bar@none"]["value"], 1, 7) + assert_zstd_dist(metrics["d:spans/foo@none"]["value"], 1, 2, 3) # values are sorted + assert_zstd_set(metrics["s:spans/bar@none"]["value"], 1, 7) diff --git a/tests/integration/test_outcome.py b/tests/integration/test_outcome.py index 8369212c7da..a57b389f969 100644 --- a/tests/integration/test_outcome.py +++ b/tests/integration/test_outcome.py @@ -8,10 +8,6 @@ from queue import Empty from pytest_localserver.http import itertools -from .consts import ( - TRANSACTION_EXTRACT_MIN_SUPPORTED_VERSION, - TRANSACTION_EXTRACT_MAX_SUPPORTED_VERSION, -) import pytest import requests @@ -19,8 +15,6 @@ from sentry_relay.consts import DataCategory from .asserts import time_within_delta -from .test_metrics import metrics_by_name - RELAY_ROOT = Path(__file__).parent.parent.parent HOUR_MILLISEC = 1000 * 3600 @@ -818,9 +812,6 @@ def test_outcome_to_client_report(relay, mini_sentry): # Create project config project_id = 42 project_config = mini_sentry.add_full_project_config(project_id) - project_config["config"]["transactionMetrics"] = { - "version": TRANSACTION_EXTRACT_MIN_SUPPORTED_VERSION - } project_config["config"]["sampling"] = { "version": 2, "rules": [ @@ -1010,10 +1001,6 @@ def test_outcomes_aggregate_dynamic_sampling(relay, mini_sentry): ], } - project_config["config"]["transactionMetrics"] = { - "version": TRANSACTION_EXTRACT_MIN_SUPPORTED_VERSION - } - upstream = relay( mini_sentry, { @@ -1116,9 +1103,6 @@ def test_graceful_shutdown(relay, mini_sentry): # Create project config project_id = 42 project_config = mini_sentry.add_full_project_config(project_id) - project_config["config"]["transactionMetrics"] = { - "version": TRANSACTION_EXTRACT_MIN_SUPPORTED_VERSION - } project_config["config"]["sampling"] = { "version": 2, "rules": [ @@ -1218,9 +1202,6 @@ def test_profile_outcomes( project_config = mini_sentry.add_full_project_config(project_id)["config"] project_config.setdefault("features", []).append("organizations:profiling") - project_config["transactionMetrics"] = { - "version": TRANSACTION_EXTRACT_MAX_SUPPORTED_VERSION, - } project_config["sampling"] = { "version": 2, "rules": [ @@ -1362,7 +1343,7 @@ def make_envelope(transaction_name): metrics = [ m for m, _ in metrics_consumer.get_metrics() - if m["name"] == "c:transactions/usage@none" + if m["name"] == "c:spans/usage@none" and m["tags"].get("is_segment") == "true" ] assert sum(metric["value"] for metric in metrics) == 2 @@ -1388,23 +1369,17 @@ def test_profile_outcomes_invalid( mini_sentry, relay_with_processing, outcomes_consumer, - metrics_consumer, profile_payload, expected_outcome, ): """ Tests that Relay reports correct outcomes for invalid profiles as `Profile`. """ - outcomes_consumer = outcomes_consumer(timeout=2) - metrics_consumer = metrics_consumer() + outcomes_consumer = outcomes_consumer() project_id = 42 project_config = mini_sentry.add_full_project_config(project_id)["config"] - project_config.setdefault("features", []).append("organizations:profiling") - project_config["transactionMetrics"] = { - "version": TRANSACTION_EXTRACT_MIN_SUPPORTED_VERSION, - } config = { "outcomes": { @@ -1474,10 +1449,6 @@ def make_envelope(): }, ] - # Make sure the profile will not be counted as accepted: - metrics = metrics_by_name(metrics_consumer, 6) - assert "has_profile" not in metrics["c:transactions/usage@none"]["tags"] - def test_profile_outcomes_too_many( mini_sentry, @@ -1495,9 +1466,6 @@ def test_profile_outcomes_too_many( project_config = mini_sentry.add_full_project_config(project_id)["config"] project_config.setdefault("features", []).append("organizations:profiling") - project_config["transactionMetrics"] = { - "version": TRANSACTION_EXTRACT_MAX_SUPPORTED_VERSION, - } config = { "outcomes": { @@ -1781,9 +1749,6 @@ def test_span_outcomes( project_id = 42 project_config = mini_sentry.add_full_project_config(project_id)["config"] - project_config["transactionMetrics"] = { - "version": TRANSACTION_EXTRACT_MIN_SUPPORTED_VERSION, - } project_config["sampling"] = { "version": 2, "rules": [ @@ -1907,10 +1872,7 @@ def test_span_outcomes_invalid( outcomes_consumer = outcomes_consumer(timeout=2) project_id = 42 - project_config = mini_sentry.add_full_project_config(project_id)["config"] - project_config["transactionMetrics"] = { - "version": TRANSACTION_EXTRACT_MIN_SUPPORTED_VERSION, - } + mini_sentry.add_full_project_config(project_id) config = { "outcomes": { diff --git a/tests/integration/test_spans.py b/tests/integration/test_spans.py index 4425e81239d..9048e5c0906 100644 --- a/tests/integration/test_spans.py +++ b/tests/integration/test_spans.py @@ -11,9 +11,6 @@ from sentry_sdk.envelope import Envelope, Item, PayloadRef from .asserts import time_within_delta -from .consts import ( - TRANSACTION_EXTRACT_MIN_SUPPORTED_VERSION, -) from .test_store import make_transaction TEST_CONFIG = { @@ -58,9 +55,6 @@ def test_span_extraction( relay = relay_with_processing(options=TEST_CONFIG) project_id = 42 project_config = mini_sentry.add_full_project_config(project_id) - project_config["config"]["transactionMetrics"] = { - "version": TRANSACTION_EXTRACT_MIN_SUPPORTED_VERSION, - } project_config["config"].setdefault("features", []) if performance_issues_spans: @@ -127,7 +121,6 @@ def test_span_extraction( ) assert {headers[0] for _, headers in metrics_consumer.get_metrics()} == { ("namespace", b"spans"), - ("namespace", b"transactions"), } child_span = spans_consumer.get_span() @@ -707,11 +700,6 @@ def test_rate_limit_consistent_extracted( relay = relay_with_processing(options=TEST_CONFIG) project_id = 42 project_config = mini_sentry.add_full_project_config(project_id) - # Span metrics won't be extracted without a supported transactionMetrics config. - # Without extraction, the span is treated as `Span`, not `SpanIndexed`. - project_config["config"]["transactionMetrics"] = { - "version": TRANSACTION_EXTRACT_MIN_SUPPORTED_VERSION - } project_config["config"]["quotas"] = [ { "categories": ["span"], @@ -758,7 +746,7 @@ def summarize_outcomes(): assert len(spans) == 2 assert summarize_outcomes() == {(16, 0): 2} # SpanIndexed, Accepted # A limit only for span_indexed does not affect extracted metrics - metrics = metrics_consumer.get_metrics(n=6) + metrics = metrics_consumer.get_metrics(n=4) span_count = sum( [m[0]["value"] for m in metrics if m[0]["name"] == "c:spans/usage@none"] ) @@ -774,10 +762,7 @@ def summarize_outcomes(): } assert outcomes == expected_outcomes - metrics = metrics_consumer.get_metrics(timeout=1) - assert len(metrics) == 2 - assert all(m[0]["name"][2:14] == "transactions" for m in metrics), metrics - + metrics_consumer.assert_empty() outcomes_consumer.assert_empty() @@ -852,26 +837,18 @@ def test_rate_limit_is_consistent_between_transaction_and_spans( "reasonCode": "exceeded", }, ] - project_config["config"]["transactionMetrics"] = { - "version": TRANSACTION_EXTRACT_MIN_SUPPORTED_VERSION, - } transactions_consumer = transactions_consumer() spans_consumer = spans_consumer() outcomes_consumer = outcomes_consumer() metrics_consumer = metrics_consumer() - def usage_metrics(): + def span_usage_metric(): metrics = metrics_consumer.get_metrics() - transaction_count = sum( - m[0]["value"] - for m in metrics - if m[0]["name"] == "c:transactions/usage@none" - ) span_count = sum( m[0]["value"] for m in metrics if m[0]["name"] == "c:spans/usage@none" ) - return (transaction_count, span_count) + return span_count def summarize_outcomes(): counter = Counter() @@ -895,7 +872,7 @@ def summarize_outcomes(): spans = spans_consumer.get_spans(n=2, timeout=10) assert len(spans) == 2 assert summarize_outcomes() == {(16, 0): 2} # SpanIndexed, Accepted - assert usage_metrics() == (1, 2) + assert span_usage_metric() == 2 # Second batch nothing passes relay.send_envelope(project_id, envelope) @@ -909,13 +886,13 @@ def summarize_outcomes(): (12, 2): 2, # Span, Rate Limited (16, 2): 2, # SpanIndexed, Rate Limited } - assert usage_metrics() == (0, 0) + assert span_usage_metric() == 0 elif category == "transaction_indexed": assert summarize_outcomes() == { (9, 2): 1, # TransactionIndexed, Rate Limited (16, 2): 2, # SpanIndexed, Rate Limited } - assert usage_metrics() == (1, 2) + assert span_usage_metric() == 2 # Third batch might raise 429 since it hits the fast path maybe_raises = ( @@ -936,7 +913,7 @@ def summarize_outcomes(): (12, 2): expected_span_count, # Span, Rate Limited (16, 2): expected_span_count, # SpanIndexed, Rate Limited } - assert usage_metrics() == (0, 0) + assert span_usage_metric() == 0 elif category == "transaction_indexed": # We do not check indexed limits on the fast path, # so we count the correct number of spans (ignoring the span_count header): @@ -945,7 +922,7 @@ def summarize_outcomes(): (16, 2): 2, # SpanIndexed, Rate Limited } # Metrics are always correct: - assert usage_metrics() == (1, 2) + assert span_usage_metric() == 2 def test_span_filtering_with_generic_inbound_filter( @@ -1023,10 +1000,7 @@ def test_dynamic_sampling( outcomes_consumer = outcomes_consumer() project_id = 42 - project_config = mini_sentry.add_basic_project_config(project_id) - project_config["config"]["transactionMetrics"] = { - "version": TRANSACTION_EXTRACT_MIN_SUPPORTED_VERSION - } + mini_sentry.add_basic_project_config(project_id) sampling_config = mini_sentry.add_basic_project_config(43) sampling_public_key = sampling_config["publicKeys"][0]["publicKey"] diff --git a/tests/integration/test_store.py b/tests/integration/test_store.py index c982d7fe2e0..4d6cc5006ee 100644 --- a/tests/integration/test_store.py +++ b/tests/integration/test_store.py @@ -9,12 +9,6 @@ from sentry_relay.consts import DataCategory -from .asserts import time_within_delta -from .consts import ( - TRANSACTION_EXTRACT_MIN_SUPPORTED_VERSION, - TRANSACTION_EXTRACT_MAX_SUPPORTED_VERSION, -) - import pytest from flask import Response, abort from requests.exceptions import HTTPError @@ -464,7 +458,7 @@ def transform(e): assert event["logentry"]["formatted"] == f"otherkey{i}" -@pytest.mark.parametrize("namespace", ["transactions", "custom"]) +@pytest.mark.parametrize("namespace", ["spans", "custom"]) def test_sends_metric_bucket_outcome( mini_sentry, relay_with_processing, outcomes_consumer, namespace ): @@ -497,7 +491,7 @@ def test_sends_metric_bucket_outcome( ] timestamp = int(datetime.now(tz=timezone.utc).timestamp()) - metrics_payload = f"transactions/foo:42|c\nbar@second:17|c|T{timestamp}" + metrics_payload = f"spans/foo:42|c\nbar@second:17|c|T{timestamp}" relay.send_metrics(project_id, metrics_payload) outcome = outcomes_consumer.get_outcome(timeout=3) @@ -555,7 +549,7 @@ def make_bucket(name, type_, values): } buckets = [ - make_bucket(f"d:transactions/measurements.lcp{i}@millisecond", "d", [1.0]) + make_bucket(f"d:spans/measurements.lcp{i}@millisecond", "d", [1.0]) for i in range(metric_bucket_limit) ] @@ -568,258 +562,12 @@ def make_bucket(name, type_, values): metrics_consumer.assert_empty() -@pytest.mark.parametrize("violating_bucket", [2.0, 3.0]) -def test_enforce_transaction_rate_limit_on_metric_buckets( - mini_sentry, - relay_with_processing, - metrics_consumer, - outcomes_consumer, - violating_bucket, -): - """ - param violating_bucket is parametrized so we cover both cases: - 1. the quota is matched exactly - 2. quota is exceeded by one - """ - bucket_interval = 1 # second - relay = relay_with_processing( - { - "processing": {"max_rate_limit": 2 * 86400}, - "aggregator": { - "bucket_interval": bucket_interval, - "initial_delay": 0, - }, - } - ) - metrics_consumer = metrics_consumer() - outcomes_consumer = outcomes_consumer() - - project_id = 42 - projectconfig = mini_sentry.add_full_project_config(project_id) - # add another dsn key (we want 2 keys so we can set limits per key) - mini_sentry.add_dsn_key_to_project(project_id) - - public_keys = mini_sentry.get_dsn_public_key_configs(project_id) - key_id = public_keys[0]["numericId"] - - reason_code = uuid.uuid4().hex - - projectconfig["config"]["quotas"] = [ - { - "id": f"test_rate_limiting_{uuid.uuid4().hex}", - "scope": "key", - "scopeId": str(key_id), - "categories": ["transaction"], - "limit": 5, - "window": 86400, - "reasonCode": reason_code, - } - ] - - now = datetime.now(tz=timezone.utc).timestamp() - - def generate_ticks(): - # Generate a new timestamp for every bucket, so they do not get merged by the aggregator - tick = int(now // bucket_interval * bucket_interval) - while True: - yield tick - tick += bucket_interval - - tick = generate_ticks() - - def make_bucket(name, type_, values): - return { - "org_id": 1, - "project_id": project_id, - "timestamp": next(tick), - "name": name, - "type": type_, - "value": values, - "width": bucket_interval, - } - - def assert_metrics(expected_metrics): - produced_metrics = [ - m for m, _ in metrics_consumer.get_metrics(n=len(expected_metrics)) - ] - produced_metrics.sort(key=lambda b: (b["name"], b["value"])) - assert produced_metrics == expected_metrics - - # NOTE: Sending these buckets in multiple envelopes because the order of flushing - # and also the order of rate limiting is not deterministic. - relay.send_metrics_buckets( - project_id, - [ - # Send a few non-usage buckets, they will not deplete the quota - make_bucket("d:transactions/measurements.lcp@millisecond", "d", 10 * [1.0]), - # Session metrics are accepted - make_bucket("d:sessions/session@none", "c", 1), - make_bucket("d:sessions/duration@second", "d", 9 * [1]), - ], - ) - assert_metrics( - [ - { - "name": "d:sessions/duration@second", - "org_id": 1, - "project_id": 42, - "retention_days": 90, - "tags": {}, - "type": "d", - "value": [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], - "timestamp": time_within_delta(now), - "received_at": time_within_delta(now), - }, - { - "name": "d:sessions/session@none", - "org_id": 1, - "retention_days": 90, - "project_id": 42, - "tags": {}, - "type": "c", - "value": 1.0, - "timestamp": time_within_delta(now), - "received_at": time_within_delta(now), - }, - { - "name": "d:transactions/measurements.lcp@millisecond", - "org_id": 1, - "retention_days": 90, - "project_id": 42, - "tags": {}, - "type": "d", - "value": [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], - "timestamp": time_within_delta(now), - "received_at": time_within_delta(now), - }, - ] - ) - - relay.send_metrics_buckets( - project_id, - [ - # Usage metric, subtract 3 from quota - make_bucket("c:transactions/usage@none", "c", 3), - ], - ) - assert_metrics( - [ - { - "name": "c:transactions/usage@none", - "org_id": 1, - "retention_days": 90, - "project_id": 42, - "tags": {}, - "type": "c", - "value": 3.0, - "timestamp": time_within_delta(now), - "received_at": time_within_delta(now), - }, - ] - ) - - relay.send_metrics_buckets( - project_id, - [ - # Can still send unlimited non-usage metrics - make_bucket("d:transactions/measurements.lcp@millisecond", "d", 10 * [2.0]), - ], - ) - assert_metrics( - [ - { - "name": "d:transactions/measurements.lcp@millisecond", - "org_id": 1, - "retention_days": 90, - "project_id": 42, - "tags": {}, - "type": "d", - "value": [2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0], - "timestamp": time_within_delta(now), - "received_at": time_within_delta(now), - } - ] - ) - - relay.send_metrics_buckets( - project_id, - [ - # Usage metric, subtract from quota. This bucket is still accepted (see over_accept_once), - # but the rest will be rejected. - make_bucket("c:transactions/usage@none", "c", violating_bucket), - ], - ) - assert_metrics( - [ - { - "name": "c:transactions/usage@none", - "org_id": 1, - "retention_days": 90, - "project_id": 42, - "tags": {}, - "type": "c", - "value": violating_bucket, - "timestamp": time_within_delta(now), - "received_at": time_within_delta(now), - }, - ] - ) - - relay.send_metrics_buckets( - project_id, - [ - # FCP buckets won't make it into Kafka, since usage was now counted in Redis and it's >= 5. - make_bucket("d:transactions/measurements.fcp@millisecond", "d", 10 * [7.0]), - ], - ) - - relay.send_metrics_buckets( - project_id, - [ - # Another three for usage, won't make it into kafka because quota is zero. - make_bucket("c:transactions/usage@none", "c", 3), - # Session metrics are still accepted. - make_bucket("d:sessions/session@user", "s", [1254]), - ], - ) - assert_metrics( - [ - { - "name": "d:sessions/session@user", - "org_id": 1, - "retention_days": 90, - "project_id": 42, - "tags": {}, - "type": "s", - "value": [1254], - "timestamp": time_within_delta(now), - "received_at": time_within_delta(now), - } - ] - ) - - outcomes_consumer.assert_rate_limited( - reason_code, - key_id=key_id, - categories=["transaction", "metric_bucket"], - quantity=5, - ) - - -@pytest.mark.parametrize( - "extraction_version", - [ - TRANSACTION_EXTRACT_MIN_SUPPORTED_VERSION, - TRANSACTION_EXTRACT_MAX_SUPPORTED_VERSION, - ], -) def test_processing_quota_transaction_indexing( mini_sentry, relay_with_processing, metrics_consumer, transactions_consumer, outcomes_consumer, - extraction_version, ): relay = relay_with_processing( { @@ -860,9 +608,6 @@ def test_processing_quota_transaction_indexing( "reasonCode": "get_lost", }, ] - projectconfig["config"]["transactionMetrics"] = { - "version": extraction_version, - } relay.send_event(project_id, make_transaction({"message": "1st tx"})) event, _ = tx_consumer.get_event() diff --git a/tests/integration/test_unreal.py b/tests/integration/test_unreal.py index 214f097f1c7..ee3c101f5c0 100644 --- a/tests/integration/test_unreal.py +++ b/tests/integration/test_unreal.py @@ -1,7 +1,6 @@ import os import pytest import json -from .consts import TRANSACTION_EXTRACT_MAX_SUPPORTED_VERSION def load_dump_file(base_file_name: str): @@ -16,19 +15,13 @@ def load_dump_file(base_file_name: str): @pytest.mark.parametrize("dump_file_name", ["unreal_crash", "unreal_crash_apple"]) -@pytest.mark.parametrize("extract_metrics", [True, False]) -def test_unreal_crash(mini_sentry, relay, dump_file_name, extract_metrics): +def test_unreal_crash(mini_sentry, relay, dump_file_name): """ Asserts that non-processing Relays do not extract and forward the Unreal report. """ project_id = 42 relay = relay(mini_sentry) - config = mini_sentry.add_full_project_config(project_id)["config"] - if extract_metrics: - # regression: we dropped unreal events in customer relays while metrics extraction was on - config["transactionMetrics"] = { - "version": TRANSACTION_EXTRACT_MAX_SUPPORTED_VERSION, - } + mini_sentry.add_full_project_config(project_id) unreal_content = load_dump_file(dump_file_name)