From fb6180ce5624996209191098db386afeba7869d4 Mon Sep 17 00:00:00 2001 From: Vinay Kumar P <49900911+vinay-500@users.noreply.github.com> Date: Thu, 18 Dec 2025 21:48:02 -0600 Subject: [PATCH] Change assertions to assertAlmostEqual for perplexity Replace integer truncation in GPTQ perplexity tests with tolerance-based assertions Avoid masking small regressions caused by int() truncation --- tests/gptq/test_quantization.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/gptq/test_quantization.py b/tests/gptq/test_quantization.py index 20e2e3083c..dfba6e3465 100644 --- a/tests/gptq/test_quantization.py +++ b/tests/gptq/test_quantization.py @@ -113,8 +113,8 @@ def test_perplexity(self): the perplexity of the converted models """ - self.assertEqual(int(self.fp16_ppl), self.expected_fp16_perplexity) - self.assertEqual(int(self.quantized_ppl), self.expected_quantized_perplexity) + self.assertAlmostEqual(self.fp16_ppl, self.expected_fp16_perplexity, delta=1.0) + self.assertAlmostEqual(self.quantized_ppl, self.expected_quantized_perplexity, delta=1.0) def test_quantized_layers_class(self): """