cellstate_core/
scoring.rs

1//! Deterministic scoring formula with transparency
2//!
3//! The scoring formula computes a weighted sum of N orthogonal factors,
4//! each normalized to [0, 1]:
5//!
6//! ```text
7//! score = Σ wᵢ · Sᵢ   where Σ wᵢ = 1 and 0 ≤ Sᵢ ≤ 1
8//! ```
9//!
10//! This guarantees the final score is also in [0, 1] (convex combination).
11//!
12//! Re-export path: cellstate_core::scoring::*
13
14use crate::Timestamp;
15use serde::{Deserialize, Serialize};
16use std::collections::HashMap;
17
18/// Weights for deterministic scoring formula.
19///
20/// Seven orthogonal factors, each weight ∈ [0, 1], summing to 1.0 (±ε).
21///
22/// ```text
23/// score = w_v·S_vector + w_w·S_warmth + w_r·S_recency
24///       + w_a·S_abstraction + w_g·S_graph + w_k·S_keyword
25///       + w_c·S_causal
26/// ```
27///
28/// The `causal` factor scores entities by their proximity to recent events
29/// in the Event DAG. When no Event DAG data is available, adaptive weight
30/// renormalization zeros `causal` and redistributes its budget to the
31/// remaining factors.
32#[derive(Debug, Clone, Serialize, Deserialize)]
33#[cfg_attr(feature = "openapi", derive(utoipa::ToSchema))]
34pub struct ScoringWeights {
35    /// Cosine similarity between query embedding and entity embedding
36    pub vector: f32,
37    /// Access frequency with temporal decay
38    pub warmth: f32,
39    /// Creation time decay (newer = higher)
40    pub recency: f32,
41    /// Abstraction level (Principle > Summary > Raw)
42    pub abstraction: f32,
43    /// Graph proximity via edge traversal (degrees of separation)
44    pub graph: f32,
45    /// Keyword/substring match score
46    pub keyword: f32,
47    /// Event DAG causal proximity (how recently events reference this entity)
48    pub causal: f32,
49}
50
51impl Default for ScoringWeights {
52    fn default() -> Self {
53        Self {
54            vector: 0.30,
55            warmth: 0.12,
56            recency: 0.13,
57            abstraction: 0.08,
58            graph: 0.12,
59            keyword: 0.08,
60            causal: 0.17,
61        }
62    }
63}
64
65impl ScoringWeights {
66    /// Validate that all weights sum to 1.0 within epsilon tolerance.
67    ///
68    /// The tolerance matches the PostgreSQL `weights_sum_to_one` CHECK
69    /// constraint: `ABS(sum - 1.0) < 0.001`.
70    pub fn validate(&self) -> bool {
71        const EPSILON: f32 = 0.001;
72        let sum = self.sum();
73        (sum - 1.0).abs() <= EPSILON
74    }
75
76    /// Normalize weights so they sum to exactly 1.0, preserving ratios.
77    ///
78    /// # Mathematical Properties
79    ///
80    /// - **Idempotent**: `normalize(normalize(w)) = normalize(w)`
81    /// - **Ratio-preserving**: `wᵢ/wⱼ` is invariant under normalization
82    /// - **No-op on zero**: if all weights are 0, remains all zeros
83    pub fn normalize(&mut self) {
84        let sum = self.sum();
85        if sum > 0.0 {
86            self.vector /= sum;
87            self.warmth /= sum;
88            self.recency /= sum;
89            self.abstraction /= sum;
90            self.graph /= sum;
91            self.keyword /= sum;
92            self.causal /= sum;
93        }
94    }
95
96    /// Legacy 6-factor weights for backward compatibility.
97    ///
98    /// Returns weights with `causal = 0.0` and the original 6-factor
99    /// defaults. Useful when Event DAG is not available.
100    pub fn legacy_six_factor() -> Self {
101        Self {
102            vector: 0.35,
103            warmth: 0.15,
104            recency: 0.15,
105            abstraction: 0.10,
106            graph: 0.15,
107            keyword: 0.10,
108            causal: 0.0,
109        }
110    }
111
112    /// Sum of all weights.
113    #[inline]
114    fn sum(&self) -> f32 {
115        self.vector
116            + self.warmth
117            + self.recency
118            + self.abstraction
119            + self.graph
120            + self.keyword
121            + self.causal
122    }
123
124    /// Compute the weighted score given per-factor raw scores.
125    ///
126    /// Each raw score should be in [0, 1]. The result is in [0, 1] when
127    /// weights are valid (sum to 1.0).
128    ///
129    /// Non-finite factor values (NaN, ±Inf) are clamped to [0, 1] to prevent
130    /// silent propagation through downstream scoring.
131    pub fn score(&self, factors: &ScoringFactors) -> f32 {
132        let result = self.vector * factors.vector.clamp(0.0, 1.0)
133            + self.warmth * factors.warmth.clamp(0.0, 1.0)
134            + self.recency * factors.recency.clamp(0.0, 1.0)
135            + self.abstraction * factors.abstraction.clamp(0.0, 1.0)
136            + self.graph * factors.graph.clamp(0.0, 1.0)
137            + self.keyword * factors.keyword.clamp(0.0, 1.0)
138            + self.causal * factors.causal.clamp(0.0, 1.0);
139        // Guard against NaN from 0.0 * clamped-NaN (NaN.clamp returns NaN on some platforms)
140        if result.is_finite() {
141            result
142        } else {
143            0.0
144        }
145    }
146}
147
148/// Raw per-factor scores, each in [0, 1].
149#[derive(Debug, Clone, Default)]
150pub struct ScoringFactors {
151    pub vector: f32,
152    pub warmth: f32,
153    pub recency: f32,
154    pub abstraction: f32,
155    pub graph: f32,
156    pub keyword: f32,
157    pub causal: f32,
158}
159
160/// Debug output for scoring transparency
161#[derive(Debug, Clone, Serialize, Deserialize)]
162#[cfg_attr(feature = "openapi", derive(utoipa::ToSchema))]
163pub struct ScoringDebug {
164    pub item_id: String,
165    pub final_score: f32,
166    pub components: HashMap<String, ScoringComponent>,
167    pub weights_used: String,
168}
169
170#[derive(Debug, Clone, Serialize, Deserialize)]
171#[cfg_attr(feature = "openapi", derive(utoipa::ToSchema))]
172pub struct ScoringComponent {
173    pub raw: f32,
174    pub weighted: f32,
175}
176
177/// Source quality tracking (P2)
178#[derive(Debug, Clone, Serialize, Deserialize)]
179pub struct SourceQuality {
180    pub source_id: String,
181    pub reliability_score: f32,
182    pub freshness: Timestamp,
183    pub citation_count: u32,
184}
185
186// ── Belief-to-Scoring Bridge ─────────────────────────────────────────
187
188/// Apply belief-confidence modifiers to a base score.
189///
190/// When an agent holds beliefs about scored entities, the belief confidence
191/// acts as a multiplier:
192///
193/// ```text
194/// adjusted_score = base_score * belief_modifier(confidence)
195/// ```
196///
197/// The modifier uses a piecewise linear function:
198/// - confidence >= 0.8  →  modifier = 1.0 (full score, strong belief)
199/// - confidence in [0.3, 0.8) → modifier = 0.5 + (confidence - 0.3) * (0.5 / 0.5)
200///   (linear interpolation from 0.5 to 1.0)
201/// - confidence < 0.3  →  modifier = 0.5 * (confidence / 0.3)
202///   (linear from 0.0 to 0.5, penalises low-confidence beliefs)
203///
204/// This ensures:
205/// - High-confidence beliefs don't distort scores (modifier ≈ 1.0)
206/// - Low-confidence beliefs suppress scores proportionally
207/// - The output is always in [0, base_score]
208#[inline]
209pub fn belief_confidence_modifier(confidence: f32) -> f32 {
210    let c = confidence.clamp(0.0, 1.0);
211    if c >= 0.8 {
212        1.0
213    } else if c >= 0.3 {
214        0.5 + (c - 0.3)
215    } else {
216        0.5 * (c / 0.3)
217    }
218}
219
220/// Aggregate belief confidence across multiple beliefs about the same entity.
221///
222/// Uses max-confidence (optimistic) policy: the strongest supporting belief
223/// determines the modifier. Returns 1.0 (no modifier) if no beliefs apply.
224#[inline]
225pub fn aggregate_belief_modifier(confidences: &[f32]) -> f32 {
226    if confidences.is_empty() {
227        return 1.0;
228    }
229    let max_confidence = confidences.iter().copied().fold(0.0f32, f32::max);
230    belief_confidence_modifier(max_confidence)
231}
232
233#[cfg(test)]
234mod tests {
235    use super::*;
236
237    // ── Default weights ──────────────────────────────────────────────
238
239    #[test]
240    fn test_default_weights_sum_to_one() {
241        let weights = ScoringWeights::default();
242        let sum = weights.sum();
243        assert!(
244            (sum - 1.0).abs() < 0.001,
245            "Default weights should sum to 1.0, got {}",
246            sum
247        );
248    }
249
250    #[test]
251    fn test_default_weights_validate() {
252        let weights = ScoringWeights::default();
253        assert!(weights.validate(), "Default weights should validate");
254    }
255
256    #[test]
257    fn test_legacy_six_factor_validates_after_normalize() {
258        let mut weights = ScoringWeights::legacy_six_factor();
259        // Legacy has causal=0.0, so sum=1.0 already
260        assert!(
261            weights.validate(),
262            "Legacy 6-factor should already be valid"
263        );
264        weights.normalize();
265        assert!(
266            weights.validate(),
267            "Legacy should still validate after normalize"
268        );
269    }
270
271    // ── Validation ───────────────────────────────────────────────────
272
273    #[test]
274    fn test_validate_returns_true_for_valid_weights() {
275        let weights = ScoringWeights {
276            vector: 0.3,
277            warmth: 0.15,
278            recency: 0.15,
279            abstraction: 0.1,
280            graph: 0.1,
281            keyword: 0.05,
282            causal: 0.15,
283        };
284        assert!(weights.validate(), "Valid weights should pass validation");
285    }
286
287    #[test]
288    fn test_validate_returns_false_when_sum_too_high() {
289        let weights = ScoringWeights {
290            vector: 0.5,
291            warmth: 0.3,
292            recency: 0.3,
293            abstraction: 0.2,
294            graph: 0.2,
295            keyword: 0.2,
296            causal: 0.2,
297        };
298        assert!(
299            !weights.validate(),
300            "Weights summing to > 1.0 should fail validation"
301        );
302    }
303
304    #[test]
305    fn test_validate_returns_false_when_sum_too_low() {
306        let weights = ScoringWeights {
307            vector: 0.1,
308            warmth: 0.1,
309            recency: 0.1,
310            abstraction: 0.05,
311            graph: 0.05,
312            keyword: 0.05,
313            causal: 0.05,
314        };
315        assert!(
316            !weights.validate(),
317            "Weights summing to < 1.0 should fail validation"
318        );
319    }
320
321    #[test]
322    fn test_validate_accepts_values_within_epsilon() {
323        let weights = ScoringWeights {
324            vector: 0.3005,
325            warmth: 0.12,
326            recency: 0.13,
327            abstraction: 0.08,
328            graph: 0.12,
329            keyword: 0.08,
330            causal: 0.1695,
331        };
332        assert!(weights.validate(), "Weights within epsilon should validate");
333    }
334
335    // ── Normalization ────────────────────────────────────────────────
336
337    #[test]
338    fn test_normalize_fixes_invalid_weights() {
339        let mut weights = ScoringWeights {
340            vector: 0.7,
341            warmth: 0.3,
342            recency: 0.3,
343            abstraction: 0.2,
344            graph: 0.3,
345            keyword: 0.2,
346            causal: 0.3,
347        };
348
349        assert!(
350            !weights.validate(),
351            "Weights should be invalid before normalization"
352        );
353        weights.normalize();
354        assert!(
355            weights.validate(),
356            "Weights should be valid after normalization"
357        );
358    }
359
360    #[test]
361    fn test_normalize_preserves_ratios() {
362        let mut weights = ScoringWeights {
363            vector: 0.4,
364            warmth: 0.2,
365            recency: 0.2,
366            abstraction: 0.1,
367            graph: 0.2,
368            keyword: 0.1,
369            causal: 0.2,
370        };
371
372        let sum = weights.sum(); // 1.4
373        weights.normalize();
374
375        assert!((weights.vector - 0.4 / sum).abs() < 0.0001);
376        assert!((weights.warmth - 0.2 / sum).abs() < 0.0001);
377        assert!((weights.recency - 0.2 / sum).abs() < 0.0001);
378        assert!((weights.abstraction - 0.1 / sum).abs() < 0.0001);
379        assert!((weights.graph - 0.2 / sum).abs() < 0.0001);
380        assert!((weights.keyword - 0.1 / sum).abs() < 0.0001);
381        assert!((weights.causal - 0.2 / sum).abs() < 0.0001);
382    }
383
384    #[test]
385    fn test_normalize_handles_small_values() {
386        let mut weights = ScoringWeights {
387            vector: 0.01,
388            warmth: 0.01,
389            recency: 0.01,
390            abstraction: 0.01,
391            graph: 0.01,
392            keyword: 0.01,
393            causal: 0.01,
394        };
395
396        weights.normalize();
397        assert!(
398            weights.validate(),
399            "Normalized small weights should validate"
400        );
401    }
402
403    #[test]
404    fn test_normalize_handles_large_values() {
405        let mut weights = ScoringWeights {
406            vector: 100.0,
407            warmth: 50.0,
408            recency: 50.0,
409            abstraction: 25.0,
410            graph: 50.0,
411            keyword: 25.0,
412            causal: 50.0,
413        };
414
415        weights.normalize();
416        assert!(
417            weights.validate(),
418            "Normalized large weights should validate"
419        );
420    }
421
422    #[test]
423    fn test_normalize_handles_zero_weights() {
424        let mut weights = ScoringWeights {
425            vector: 1.0,
426            warmth: 0.0,
427            recency: 0.0,
428            abstraction: 0.0,
429            graph: 0.0,
430            keyword: 0.0,
431            causal: 0.0,
432        };
433
434        weights.normalize();
435        assert!(
436            weights.validate(),
437            "Normalized weights with zeros should validate"
438        );
439        assert!(
440            (weights.vector - 1.0).abs() < 0.0001,
441            "Single non-zero weight should become 1.0"
442        );
443    }
444
445    #[test]
446    fn test_normalize_handles_all_zeros() {
447        let mut weights = ScoringWeights {
448            vector: 0.0,
449            warmth: 0.0,
450            recency: 0.0,
451            abstraction: 0.0,
452            graph: 0.0,
453            keyword: 0.0,
454            causal: 0.0,
455        };
456
457        weights.normalize();
458        assert_eq!(weights.vector, 0.0);
459        assert_eq!(weights.warmth, 0.0);
460        assert_eq!(weights.causal, 0.0);
461    }
462
463    #[test]
464    fn test_normalize_idempotent() {
465        let mut weights = ScoringWeights {
466            vector: 0.7,
467            warmth: 0.3,
468            recency: 0.3,
469            abstraction: 0.2,
470            graph: 0.3,
471            keyword: 0.2,
472            causal: 0.3,
473        };
474
475        weights.normalize();
476        let first_vector = weights.vector;
477        let first_warmth = weights.warmth;
478        let first_causal = weights.causal;
479
480        weights.normalize();
481        assert!(
482            (weights.vector - first_vector).abs() < 0.00001,
483            "normalize() should be idempotent"
484        );
485        assert!(
486            (weights.warmth - first_warmth).abs() < 0.00001,
487            "normalize() should be idempotent"
488        );
489        assert!(
490            (weights.causal - first_causal).abs() < 0.00001,
491            "normalize() should be idempotent for causal"
492        );
493    }
494
495    #[test]
496    fn test_normalize_maintains_relative_weights() {
497        let mut weights = ScoringWeights {
498            vector: 0.4,
499            warmth: 0.2,
500            recency: 0.2,
501            abstraction: 0.1,
502            graph: 0.2,
503            keyword: 0.1,
504            causal: 0.2,
505        };
506
507        weights.normalize();
508
509        // Vector should be 2x warmth (0.4 / 0.2 = 2)
510        assert!((weights.vector / weights.warmth - 2.0).abs() < 0.0001);
511        // Warmth should equal recency
512        assert!((weights.warmth - weights.recency).abs() < 0.0001);
513        // Warmth should equal causal (both started at 0.2)
514        assert!((weights.warmth - weights.causal).abs() < 0.0001);
515    }
516
517    // ── Score computation ────────────────────────────────────────────
518
519    #[test]
520    fn test_score_all_ones_equals_one() {
521        let weights = ScoringWeights::default();
522        let factors = ScoringFactors {
523            vector: 1.0,
524            warmth: 1.0,
525            recency: 1.0,
526            abstraction: 1.0,
527            graph: 1.0,
528            keyword: 1.0,
529            causal: 1.0,
530        };
531        let score = weights.score(&factors);
532        assert!(
533            (score - 1.0).abs() < 0.01,
534            "All-ones factors with valid weights should score ~1.0, got {}",
535            score
536        );
537    }
538
539    #[test]
540    fn test_score_all_zeros_equals_zero() {
541        let weights = ScoringWeights::default();
542        let factors = ScoringFactors::default();
543        let score = weights.score(&factors);
544        assert_eq!(score, 0.0);
545    }
546
547    #[test]
548    fn test_score_single_factor() {
549        let weights = ScoringWeights::default();
550        let factors = ScoringFactors {
551            vector: 1.0,
552            ..Default::default()
553        };
554        let score = weights.score(&factors);
555        assert!(
556            (score - weights.vector).abs() < 0.0001,
557            "Single vector=1.0 should give score = vector weight"
558        );
559    }
560
561    // ── NaN/Inf guard ──────────────────────────────────────────────
562
563    #[test]
564    fn test_score_nan_factor_returns_zero() {
565        let weights = ScoringWeights::default();
566        let factors = ScoringFactors {
567            vector: f32::NAN,
568            warmth: 1.0,
569            recency: 1.0,
570            abstraction: 1.0,
571            graph: 1.0,
572            keyword: 1.0,
573            causal: 1.0,
574        };
575        let score = weights.score(&factors);
576        assert!(score.is_finite(), "NaN factor should not produce NaN score");
577    }
578
579    #[test]
580    fn test_score_inf_factor_clamped() {
581        let weights = ScoringWeights::default();
582        let factors = ScoringFactors {
583            vector: f32::INFINITY,
584            warmth: 0.0,
585            recency: 0.0,
586            abstraction: 0.0,
587            graph: 0.0,
588            keyword: 0.0,
589            causal: 0.0,
590        };
591        let score = weights.score(&factors);
592        assert!(score.is_finite(), "Inf factor should be clamped");
593        assert!(
594            (score - weights.vector).abs() < 0.001,
595            "Inf clamped to 1.0 should produce weight value"
596        );
597    }
598
599    #[test]
600    fn test_score_neg_inf_factor_clamped() {
601        let weights = ScoringWeights::default();
602        let factors = ScoringFactors {
603            vector: f32::NEG_INFINITY,
604            ..Default::default()
605        };
606        let score = weights.score(&factors);
607        assert!(score.is_finite(), "Neg Inf factor should be clamped to 0.0");
608        assert_eq!(score, 0.0);
609    }
610
611    // ── Belief-to-Scoring bridge ──────────────────────────────────────
612
613    #[test]
614    fn test_belief_modifier_high_confidence() {
615        assert_eq!(belief_confidence_modifier(1.0), 1.0);
616        assert_eq!(belief_confidence_modifier(0.9), 1.0);
617        assert_eq!(belief_confidence_modifier(0.8), 1.0);
618    }
619
620    #[test]
621    fn test_belief_modifier_medium_confidence() {
622        let m = belief_confidence_modifier(0.55);
623        assert!(
624            (m - 0.75).abs() < 0.01,
625            "0.55 confidence should give ~0.75 modifier, got {m}"
626        );
627    }
628
629    #[test]
630    fn test_belief_modifier_low_confidence() {
631        let m = belief_confidence_modifier(0.15);
632        assert!(
633            (m - 0.25).abs() < 0.01,
634            "0.15 confidence should give ~0.25 modifier, got {m}"
635        );
636        assert_eq!(belief_confidence_modifier(0.0), 0.0);
637    }
638
639    #[test]
640    fn test_belief_modifier_boundary_0_3() {
641        let m = belief_confidence_modifier(0.3);
642        assert!(
643            (m - 0.5).abs() < 0.01,
644            "0.3 confidence should give ~0.5 modifier, got {m}"
645        );
646    }
647
648    #[test]
649    fn test_belief_modifier_clamped() {
650        assert_eq!(belief_confidence_modifier(-0.5), 0.0);
651        assert_eq!(belief_confidence_modifier(2.0), 1.0);
652    }
653
654    #[test]
655    fn test_aggregate_belief_modifier_empty() {
656        assert_eq!(aggregate_belief_modifier(&[]), 1.0);
657    }
658
659    #[test]
660    fn test_aggregate_belief_modifier_uses_max() {
661        // Two beliefs: 0.3 and 0.9 — should use 0.9 (high confidence = 1.0 modifier)
662        let m = aggregate_belief_modifier(&[0.3, 0.9]);
663        assert_eq!(m, 1.0);
664    }
665
666    #[test]
667    fn test_aggregate_belief_modifier_all_low() {
668        let m = aggregate_belief_modifier(&[0.1, 0.2]);
669        // max is 0.2 → modifier = 0.5 * (0.2/0.3) ≈ 0.333
670        assert!((m - 0.333).abs() < 0.01, "got {m}");
671    }
672
673    // ── Property tests ───────────────────────────────────────────────
674
675    mod prop {
676        use super::*;
677        use proptest::prelude::*;
678
679        proptest! {
680            #![proptest_config(ProptestConfig::with_cases(1000))]
681
682            /// Normalization always produces valid weights (when sum > 0)
683            #[test]
684            fn prop_normalize_produces_valid(
685                v in 0.0f32..10.0, w in 0.0f32..10.0, r in 0.0f32..10.0,
686                a in 0.0f32..10.0, g in 0.0f32..10.0, k in 0.0f32..10.0,
687                c in 0.0f32..10.0,
688            ) {
689                prop_assume!(v + w + r + a + g + k + c > 0.0);
690                let mut weights = ScoringWeights {
691                    vector: v, warmth: w, recency: r,
692                    abstraction: a, graph: g, keyword: k, causal: c,
693                };
694                weights.normalize();
695                prop_assert!(weights.validate(),
696                    "Normalized weights should validate, sum={}",
697                    weights.sum());
698            }
699
700            /// Normalization is idempotent
701            #[test]
702            fn prop_normalize_idempotent(
703                v in 0.01f32..10.0, w in 0.01f32..10.0, r in 0.01f32..10.0,
704                a in 0.01f32..10.0, g in 0.01f32..10.0, k in 0.01f32..10.0,
705                c in 0.01f32..10.0,
706            ) {
707                let mut w1 = ScoringWeights {
708                    vector: v, warmth: w, recency: r,
709                    abstraction: a, graph: g, keyword: k, causal: c,
710                };
711                w1.normalize();
712                let snapshot = w1.clone();
713                w1.normalize();
714                prop_assert!((w1.vector - snapshot.vector).abs() < 1e-5);
715                prop_assert!((w1.causal - snapshot.causal).abs() < 1e-5);
716            }
717
718            /// Normalization preserves pairwise ratios
719            #[test]
720            fn prop_normalize_preserves_ratios(
721                v in 0.01f32..10.0, w in 0.01f32..10.0, r in 0.01f32..10.0,
722                a in 0.01f32..10.0, g in 0.01f32..10.0, k in 0.01f32..10.0,
723                c in 0.01f32..10.0,
724            ) {
725                let ratio_before = v / w;
726                let mut weights = ScoringWeights {
727                    vector: v, warmth: w, recency: r,
728                    abstraction: a, graph: g, keyword: k, causal: c,
729                };
730                weights.normalize();
731                let ratio_after = weights.vector / weights.warmth;
732                prop_assert!(
733                    (ratio_before - ratio_after).abs() < 0.01,
734                    "Ratio v/w changed: {} → {}",
735                    ratio_before, ratio_after
736                );
737            }
738
739            /// Score is bounded in [0, 1] when weights are valid and factors in [0, 1]
740            #[test]
741            fn prop_score_bounded(
742                sv in 0.0f32..1.0, sw in 0.0f32..1.0, sr in 0.0f32..1.0,
743                sa in 0.0f32..1.0, sg in 0.0f32..1.0, sk in 0.0f32..1.0,
744                sc in 0.0f32..1.0,
745            ) {
746                let weights = ScoringWeights::default();
747                let factors = ScoringFactors {
748                    vector: sv, warmth: sw, recency: sr,
749                    abstraction: sa, graph: sg, keyword: sk, causal: sc,
750                };
751                let score = weights.score(&factors);
752                prop_assert!(
753                    (0.0..=1.01).contains(&score),
754                    "Score {} not in [0, 1] with factors {:?}",
755                    score, factors
756                );
757            }
758
759            /// Score is monotone in each factor (increasing one factor ⟹ score increases)
760            #[test]
761            fn prop_score_monotone_per_vector(
762                base in 0.0f32..0.5,
763                delta in 0.01f32..0.5,
764            ) {
765                let weights = ScoringWeights::default();
766                let f1 = ScoringFactors { vector: base, ..Default::default() };
767                let f2 = ScoringFactors { vector: base + delta, ..Default::default() };
768                prop_assert!(weights.score(&f2) >= weights.score(&f1));
769            }
770        }
771    }
772
773    // ── Moved from integration tests (scoring_property_tests.rs) ─────
774
775    #[test]
776    fn test_scoring_weights_default_has_causal() {
777        let w = ScoringWeights::default();
778        assert!(w.causal > 0.0, "default causal weight should be positive");
779        assert!(
780            (w.causal - 0.17).abs() < 0.01,
781            "default causal should be ~0.17"
782        );
783    }
784
785    #[test]
786    fn test_legacy_weights_still_valid() {
787        let mut w = ScoringWeights {
788            vector: 0.35,
789            warmth: 0.15,
790            recency: 0.15,
791            abstraction: 0.10,
792            graph: 0.15,
793            keyword: 0.10,
794            causal: 0.0,
795        };
796        let sum = w.vector + w.warmth + w.recency + w.abstraction + w.graph + w.keyword;
797        assert!(
798            (sum - 1.0).abs() < 0.001,
799            "legacy 6-factor sum should be ~1.0"
800        );
801
802        let total = sum + w.causal;
803        w.vector /= total;
804        w.warmth /= total;
805        w.recency /= total;
806        w.abstraction /= total;
807        w.graph /= total;
808        w.keyword /= total;
809        w.causal /= total;
810
811        let new_sum =
812            w.vector + w.warmth + w.recency + w.abstraction + w.graph + w.keyword + w.causal;
813        assert!(
814            (new_sum - 1.0).abs() < 0.001,
815            "renormalized sum should be ~1.0"
816        );
817    }
818
819    mod prop_integration {
820        use super::super::*;
821        use proptest::prelude::*;
822
823        fn arb_scoring_weights() -> impl Strategy<Value = ScoringWeights> {
824            proptest::array::uniform7(0.01_f32..1.0).prop_map(|raw: [f32; 7]| {
825                let sum: f32 = raw.iter().sum();
826                ScoringWeights {
827                    vector: raw[0] / sum,
828                    warmth: raw[1] / sum,
829                    recency: raw[2] / sum,
830                    abstraction: raw[3] / sum,
831                    graph: raw[4] / sum,
832                    keyword: raw[5] / sum,
833                    causal: raw[6] / sum,
834                }
835            })
836        }
837
838        fn arb_legacy_weights() -> impl Strategy<Value = ScoringWeights> {
839            proptest::array::uniform6(0.01_f32..1.0).prop_map(|raw: [f32; 6]| {
840                let sum: f32 = raw.iter().sum();
841                ScoringWeights {
842                    vector: raw[0] / sum,
843                    warmth: raw[1] / sum,
844                    recency: raw[2] / sum,
845                    abstraction: raw[3] / sum,
846                    graph: raw[4] / sum,
847                    keyword: raw[5] / sum,
848                    causal: 0.0,
849                }
850            })
851        }
852
853        proptest! {
854            #![proptest_config(ProptestConfig::with_cases(500))]
855
856            #[test]
857            fn prop_adaptive_renorm_preserves_sum(
858                weights in arb_scoring_weights(),
859                zero_vector in proptest::bool::ANY,
860                zero_graph in proptest::bool::ANY,
861                zero_causal in proptest::bool::ANY,
862            ) {
863                let mut w = weights;
864                if zero_vector { w.vector = 0.0; }
865                if zero_graph { w.graph = 0.0; }
866                if zero_causal { w.causal = 0.0; }
867
868                let sum = w.vector + w.warmth + w.recency
869                    + w.abstraction + w.graph + w.keyword + w.causal;
870                if sum > 1e-6 {
871                    w.vector /= sum;
872                    w.warmth /= sum;
873                    w.recency /= sum;
874                    w.abstraction /= sum;
875                    w.graph /= sum;
876                    w.keyword /= sum;
877                    w.causal /= sum;
878
879                    let new_sum = w.vector + w.warmth + w.recency
880                        + w.abstraction + w.graph + w.keyword + w.causal;
881                    prop_assert!((new_sum - 1.0).abs() < 0.01,
882                        "renormalized sum should be ~1.0, got {}", new_sum);
883                }
884            }
885
886            #[test]
887            fn prop_legacy_compat_score_valid(
888                weights in arb_legacy_weights(),
889                s_vector in 0.0_f32..=1.0,
890                s_warmth in 0.0_f32..=1.0,
891                s_recency in 0.0_f32..=1.0,
892                s_abstraction in 0.0_f32..=1.0,
893                s_graph in 0.0_f32..=1.0,
894                s_keyword in 0.0_f32..=1.0,
895            ) {
896                let score = weights.vector * s_vector
897                    + weights.warmth * s_warmth
898                    + weights.recency * s_recency
899                    + weights.abstraction * s_abstraction
900                    + weights.graph * s_graph
901                    + weights.keyword * s_keyword
902                    + weights.causal * 0.0;
903
904                prop_assert!((-1e-6..=1.0 + 1e-6).contains(&score),
905                    "legacy 6-factor score {} should be in [0,1]", score);
906            }
907
908            #[test]
909            fn prop_7factor_score_bounded(
910                weights in arb_scoring_weights(),
911                s_vector in 0.0_f32..=1.0,
912                s_warmth in 0.0_f32..=1.0,
913                s_recency in 0.0_f32..=1.0,
914                s_abstraction in 0.0_f32..=1.0,
915                s_graph in 0.0_f32..=1.0,
916                s_keyword in 0.0_f32..=1.0,
917                s_causal in 0.0_f32..=1.0,
918            ) {
919                let score = weights.vector * s_vector
920                    + weights.warmth * s_warmth
921                    + weights.recency * s_recency
922                    + weights.abstraction * s_abstraction
923                    + weights.graph * s_graph
924                    + weights.keyword * s_keyword
925                    + weights.causal * s_causal;
926
927                prop_assert!((-1e-6..=1.0 + 1e-6).contains(&score),
928                    "7-factor score {} should be in [0,1]", score);
929            }
930
931            #[test]
932            fn prop_default_weights_sum_one(_dummy in 0..1u8) {
933                let w = ScoringWeights::default();
934                let sum = w.vector + w.warmth + w.recency
935                    + w.abstraction + w.graph + w.keyword + w.causal;
936                prop_assert!((sum - 1.0).abs() < 0.001,
937                    "default weights sum should be ~1.0, got {}", sum);
938            }
939        }
940    }
941}