From 77fad827147ad9899e7c36d8bd74f0bc25214b7e Mon Sep 17 00:00:00 2001 From: Jochen Hoenle <173445474+hoe-jo@users.noreply.github.com> Date: Thu, 16 Apr 2026 10:28:45 +0200 Subject: [PATCH] add trlc ai validator --- .bazelrc | 3 + .bazelrc.ai_checker | 25 + MODULE.bazel | 23 + defs.bzl | 9 + validation/ai_checker/BUILD | 78 ++ validation/ai_checker/README.md | 366 +++++++++ .../ai_checker/_assets/class_diagram.puml | 140 ++++ .../ai_checker/_assets/class_diagram.svg | 1 + .../_assets/deployment_diagram.puml | 87 +++ .../ai_checker/_assets/deployment_diagram.svg | 1 + validation/ai_checker/ai_checker.bzl | 270 +++++++ validation/ai_checker/guidelines/general.md | 37 + .../guidelines/requirements_guidelines.md | 56 ++ validation/ai_checker/requirements.txt | 661 ++++++++++++++++ validation/ai_checker/requirements.txt.in | 9 + .../ai_checker/src/ai_checker/__init__.py | 0 .../src/ai_checker/ai_checker_core.py | 392 ++++++++++ .../src/ai_checker/analysis_cache.py | 106 +++ .../src/ai_checker/analysis_models.py | 43 ++ .../src/ai_checker/artefact_extractor.py | 48 ++ .../ai_checker/src/ai_checker/constants.py | 20 + .../src/ai_checker/guidelines_reader.py | 100 +++ .../ai_checker/src/ai_checker/orchestrator.py | 380 +++++++++ .../src/ai_checker/requirement_extractor.py | 265 +++++++ .../src/ai_checker/result_formatter.py | 607 +++++++++++++++ .../src/copilot_adapter/__init__.py | 0 .../src/copilot_adapter/copilot_langchain.py | 726 ++++++++++++++++++ 27 files changed, 4453 insertions(+) create mode 100644 .bazelrc.ai_checker create mode 100644 validation/ai_checker/BUILD create mode 100644 validation/ai_checker/README.md create mode 100644 validation/ai_checker/_assets/class_diagram.puml create mode 100644 validation/ai_checker/_assets/class_diagram.svg create mode 100644 validation/ai_checker/_assets/deployment_diagram.puml create mode 100644 validation/ai_checker/_assets/deployment_diagram.svg create mode 100644 validation/ai_checker/ai_checker.bzl create mode 100644 validation/ai_checker/guidelines/general.md create mode 100644 validation/ai_checker/guidelines/requirements_guidelines.md create mode 100644 validation/ai_checker/requirements.txt create mode 100644 validation/ai_checker/requirements.txt.in create mode 100644 validation/ai_checker/src/ai_checker/__init__.py create mode 100644 validation/ai_checker/src/ai_checker/ai_checker_core.py create mode 100644 validation/ai_checker/src/ai_checker/analysis_cache.py create mode 100644 validation/ai_checker/src/ai_checker/analysis_models.py create mode 100644 validation/ai_checker/src/ai_checker/artefact_extractor.py create mode 100644 validation/ai_checker/src/ai_checker/constants.py create mode 100644 validation/ai_checker/src/ai_checker/guidelines_reader.py create mode 100644 validation/ai_checker/src/ai_checker/orchestrator.py create mode 100644 validation/ai_checker/src/ai_checker/requirement_extractor.py create mode 100644 validation/ai_checker/src/ai_checker/result_formatter.py create mode 100644 validation/ai_checker/src/copilot_adapter/__init__.py create mode 100644 validation/ai_checker/src/copilot_adapter/copilot_langchain.py diff --git a/.bazelrc b/.bazelrc index e923028c..11c551c5 100644 --- a/.bazelrc +++ b/.bazelrc @@ -11,3 +11,6 @@ build --java_language_version=17 build --tool_java_language_version=17 build --java_runtime_version=remotejdk_17 build --tool_java_runtime_version=remotejdk_17 + +# Import AI checker custom configuration +try-import %workspace%/.bazelrc.ai_checker diff --git a/.bazelrc.ai_checker b/.bazelrc.ai_checker new file mode 100644 index 00000000..9fe05fd6 --- /dev/null +++ b/.bazelrc.ai_checker @@ -0,0 +1,25 @@ +############################################################################### +## GitHub Copilot SDK - Environment (config:copilot) +############################################################################### +# The Copilot CLI needs HOME (for stored OAuth credentials) and proxy vars +# (to reach api.github.com behind a corporate proxy). +# +# These are scoped to --config=copilot so they don't affect other builds. +# The AI checker BUILD target applies this config automatically via a +# test --config=copilot line below. +# +# Auth docs: https://github.com/github/copilot-sdk/blob/main/docs/auth/index.md + +# Auth +build:copilot --action_env=HOME +build:copilot --action_env=COPILOT_GITHUB_TOKEN +build:copilot --action_env=GH_TOKEN +build:copilot --action_env=GITHUB_TOKEN + +# Proxy (Node.js checks both upper and lowercase) +build:copilot --action_env=HTTP_PROXY +build:copilot --action_env=HTTPS_PROXY +build:copilot --action_env=NO_PROXY +build:copilot --action_env=http_proxy +build:copilot --action_env=https_proxy +build:copilot --action_env=no_proxy diff --git a/MODULE.bazel b/MODULE.bazel index cd160a28..9a15f3a0 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -229,3 +229,26 @@ git_override( commit = "56881461f9d3fde2918d1731aa5937aaf64cd67c", remote = "https://github.com/bmw-software-engineering/lobster.git", ) + +############################################################################### +# Dependencies for AI Checker +############################################################################### + +# Make the copilot CLI binary executable (rules_python strips +x from wheels) +pip.whl_mods( + copy_executables = {"site-packages/copilot/bin/copilot": "copilot_cli"}, + hub_name = "ai_checker_whl_mods", + whl_name = "github_copilot_sdk", +) +use_repo(pip, "ai_checker_whl_mods") + +# Core + LangChain + GitHub Copilot SDK dependencies +pip.parse( + hub_name = "pip_ai_checker", + python_version = PYTHON_VERSION, + requirements_lock = "//validation/ai_checker:requirements.txt", + whl_modifications = { + "@ai_checker_whl_mods//:github_copilot_sdk.json": "github-copilot-sdk", + }, +) +use_repo(pip, "pip_ai_checker") diff --git a/defs.bzl b/defs.bzl index ac832c5d..116b2872 100644 --- a/defs.bzl +++ b/defs.bzl @@ -38,6 +38,13 @@ load( # --- starpls --- load("//starpls:starpls.bzl", _setup_starpls = "setup_starpls") +# --- ai_checker --- +load( + "//validation/ai_checker:ai_checker.bzl", + _architecture_ai_test = "architecture_ai_test", + _trlc_requirements_ai_test = "trlc_requirements_ai_test", +) + score_virtualenv = _score_virtualenv score_py_pytest = _score_py_pytest dash_license_checker = _dash_license_checker @@ -46,3 +53,5 @@ cli_helper = _cli_helper use_format_targets = _use_format_targets setup_starpls = _setup_starpls rust_coverage_report = _rust_coverage_report +trlc_requirements_ai_test = _trlc_requirements_ai_test +architecture_ai_test = _architecture_ai_test diff --git a/validation/ai_checker/BUILD b/validation/ai_checker/BUILD new file mode 100644 index 00000000..991940b5 --- /dev/null +++ b/validation/ai_checker/BUILD @@ -0,0 +1,78 @@ +# ******************************************************************************* +# Copyright (c) 2026 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* +load("@pip_ai_checker//:requirements.bzl", "requirement") +load("@rules_python//python:pip.bzl", "compile_pip_requirements") + +exports_files( + ["src/ai_checker/orchestrator.py"], + visibility = ["//visibility:public"], +) + +# Default requirements engineering guidelines +filegroup( + name = "default_guidelines", + srcs = glob(["guidelines/*.md"]), + visibility = ["//visibility:public"], +) + +# Core AI checker library (analysis framework) +py_library( + name = "ai_checker_core", + srcs = glob(["src/ai_checker/*.py"]), + imports = ["src"], + visibility = ["//visibility:public"], + deps = [ + "@trlc//trlc", + requirement("bigtree"), + requirement("pydantic"), + requirement("pydot"), + requirement("pyyaml"), + ], +) + +# LangChain adapter for GitHub Copilot SDK +py_library( + name = "copilot_langchain", + srcs = [ + "src/copilot_adapter/__init__.py", + "src/copilot_adapter/copilot_langchain.py", + ], + imports = ["src"], + visibility = ["//visibility:public"], + deps = [ + requirement("langchain-core"), + requirement("github-copilot-sdk"), + requirement("pydantic"), + ], +) + +# Default orchestrator (uses GitHub Copilot SDK as default AI backend) +py_binary( + name = "orchestrator", + srcs = ["src/ai_checker/orchestrator.py"], + imports = ["src"], + main = "src/ai_checker/orchestrator.py", + visibility = ["//visibility:public"], + deps = [ + ":ai_checker_core", + ":copilot_langchain", + requirement("langchain-core"), + ], +) + +# Run: bazel run //validation/ai_checker:requirements.update +compile_pip_requirements( + name = "requirements", + src = "requirements.txt.in", + requirements_txt = "requirements.txt", +) diff --git a/validation/ai_checker/README.md b/validation/ai_checker/README.md new file mode 100644 index 00000000..f500bebb --- /dev/null +++ b/validation/ai_checker/README.md @@ -0,0 +1,366 @@ +# AI Checker + +AI-powered analysis tool for engineering artefacts against guidelines. + +--- + +## User Guide + +### What It Does + +The AI Checker analyzes TRLC requirements and architectural design artefacts +against engineering guidelines using an AI model. For each artefact it +produces: + +- a list of **findings** (categorized as *Major* or *Minor*) +- a list of **suggestions** for improvement +- a numerical **quality score** from 0 to 10 + +Results are written as a JSON file and, optionally, an HTML report. + +### Prerequisites + +- A GitHub Copilot license (default backend) **or** a custom AI model + (see [Custom AI Model](#custom-ai-model)) +- Bazel + +### Running a Check + +Add a rule to your `BUILD` file and run it with `--config=copilot`: + +```starlark +load("@score_tooling//validation/ai_checker:ai_checker.bzl", "trlc_requirements_ai_test") + +trlc_requirements_ai_test( + name = "requirements_ai_check", + reqs = [":my_requirements"], + score_threshold = "6.0", + tags = ["manual"], +) +``` + +```bash +bazel test //path/to:requirements_ai_check --config=copilot +``` + +The `tags = ["manual"]` attribute is recommended to prevent the rule from +running during routine `bazel test //...` sweeps. + +### Rule Reference + +#### `trlc_requirements_ai_test` + +Analyzes TRLC requirements against the built-in requirements engineering +guidelines. + +```starlark +trlc_requirements_ai_test( + name = "requirements_ai_check", + reqs = [":my_requirements"], # required: targets providing TrlcProviderInfo + model = "anthropic/claude-sonnet-4-5", # optional: AI model to use + score_threshold = "6.0", # optional: minimum average score to pass (0–10) + guidelines = "//my/org:guidelines", # optional: override default guideline filegroup + tags = ["manual"], +) +``` + +| Attribute | Description | Required | Default | +|-----------|-------------|----------|---------| +| `reqs` | Label list of targets providing `TrlcProviderInfo` | Yes | — | +| `model` | AI model identifier | No | `"anthropic/claude-sonnet-4-5"` | +| `score_threshold` | Minimum average score (0–10) to pass the test | No | `"0.0"` | +| `guidelines` | Filegroup of guideline markdown files | No | `default_guidelines` | + +#### `architecture_ai_test` + +Analyzes architectural design artefacts against the built-in architecture +guidelines. + +```starlark +architecture_ai_test( + name = "architecture_ai_check", + designs = [":my_architectural_design"], # required: targets providing ArchitecturalDesignInfo + model = "anthropic/claude-sonnet-4-5", + score_threshold = "6.0", + tags = ["manual"], +) +``` + +| Attribute | Description | Required | Default | +|-----------|-------------|----------|---------| +| `designs` | Label list of targets providing `ArchitecturalDesignInfo` | Yes | — | +| `model` | AI model identifier | No | `"anthropic/claude-sonnet-4-5"` | +| `score_threshold` | Minimum average score (0–10) to pass the test | No | `"0.0"` | +| `guidelines` | Filegroup of guideline markdown files | No | `default_architecture_guidelines` | + +### Output + +Each test rule produces two output files: + +| File | Content | +|------|---------| +| `_analysis.json` | Machine-readable results (scores, findings, suggestions) | +| `_analysis.html` | Interactive HTML report | + +The HTML report shows a color-coded score card per artefact, linked guideline +reference pages, and summary statistics. Both files land in `bazel-bin/`. + +### Debug Output + +To inspect the raw prompt sent to the AI model: + +```bash +bazel test //path/to:requirements_ai_check --config=copilot --output_groups=debug +cat bazel-bin/path/to/requirements_ai_check_debug.log +``` + +### Custom AI Model + +To use a provider other than GitHub Copilot, point `_custom_ai_model` at a +`py_binary` or `py_library` target that exposes a `create_chat_model()` function: + +```starlark +trlc_requirements_ai_test( + name = "requirements_ai_check", + reqs = [":my_requirements"], + _custom_ai_model = "//my/org:ai_model_py", +) +``` + +See the [Integration Guide](#integration-guide) for details on implementing a +[Integration Guide](#integration-guide) for full details. + +--- + +## Integration Guide + +This section describes how to use the AI Checker from another Bazel repository +(e.g., a consumer workspace that references this repo via a Bazel registry or +`git_repository`). + +### Step 1 — Import the Bazel Config + +Add this line to your root `.bazelrc` to pull in the Copilot environment +configuration: + +```text +try-import %workspace%/.bazelrc.ai_checker +``` + +Copy `.bazelrc.ai_checker` from this repository into your workspace root. +It forwards the authentication and proxy variables the Copilot CLI needs +into Bazel's sandbox: + +```text +build:copilot --action_env=HOME +build:copilot --action_env=COPILOT_GITHUB_TOKEN +build:copilot --action_env=GH_TOKEN +build:copilot --action_env=GITHUB_TOKEN +build:copilot --action_env=HTTP_PROXY +build:copilot --action_env=HTTPS_PROXY +build:copilot --action_env=NO_PROXY +build:copilot --action_env=http_proxy +build:copilot --action_env=https_proxy +build:copilot --action_env=no_proxy +``` + +**Why `--config=copilot`?** +Bazel sandboxes strip the host environment by default. The Copilot SDK's +Node.js CLI needs `HOME` (for stored OAuth tokens) and proxy variables (to +reach `api.github.com`) to be explicitly forwarded. These are scoped to +`config:copilot` so they do not affect other build actions. + +**Authentication** — at least one of the following must be available inside +the sandbox: + +| Variable | Purpose | +|----------|---------| +| `COPILOT_GITHUB_TOKEN` | Explicit token — recommended for CI | +| `GH_TOKEN` | GitHub CLI compatible | +| `GITHUB_TOKEN` | GitHub Actions compatible | +| `HOME` | Lets the CLI find stored OAuth credentials in `~/.copilot/` | + +### Step 2 — Declare Bazel Targets + +```starlark +load("@score_tooling//validation/ai_checker:ai_checker.bzl", + "trlc_requirements_ai_test", + "architecture_ai_test") + +# Analyze TRLC requirements +trlc_requirements_ai_test( + name = "requirements_ai_check", + reqs = [":my_requirements"], # target providing TrlcProviderInfo + model = "anthropic/claude-sonnet-4-5", + score_threshold = "6.0", # fail if average score < 6.0 + tags = ["manual"], # recommended: exclude from //... +) + +# Analyze architectural designs +architecture_ai_test( + name = "architecture_ai_check", + designs = [":my_architectural_design"], # target providing ArchitecturalDesignInfo + model = "anthropic/claude-sonnet-4-5", + score_threshold = "6.0", + tags = ["manual"], +) +``` + +**Manual tag recommendation:** Adding `tags = ["manual"]` prevents accidental +AI analysis runs during routine `bazel test //...` sweeps. Run AI tests +by targeting them explicitly: + +```bash +bazel test //path/to:requirements_ai_check --config=copilot +``` + +| Attribute | Description | Required | Default | +|-----------|-------------|----------|---------| +| `reqs` / `designs` | Targets providing `TrlcProviderInfo` or `ArchitecturalDesignInfo` | Yes | — | +| `model` | AI model identifier | No | `"anthropic/claude-sonnet-4-5"` | +| `score_threshold` | Minimum average score (0–10) to pass | No | `"0.0"` | +| `guidelines` | Custom guideline filegroup | No | `default_guidelines` / `default_architecture_guidelines` | + +### Overriding Guidelines + +Each rule uses a default `guidelines` filegroup. Override per target to +supply organisation-specific rules: + +```starlark +trlc_requirements_ai_test( + name = "my_ai_check", + reqs = [":my_requirements"], + guidelines = "//my/org:custom_guidelines", +) +``` + +### Custom AI Model (Bazel) + +To substitute a different AI backend at the Bazel level, provide a +`_custom_ai_model` attribute pointing to your `ai_model.py` file: + +```starlark +trlc_requirements_ai_test( + name = "requirements_ai_check", + reqs = [":my_requirements"], + _custom_ai_model = "//my/org:ai_model_py", +) +``` + +The file must expose `create_chat_model(model_name, max_completion_tokens)`. + +### Debug Output + +To inspect the raw input sent to the AI model and response timing: + +```bash +bazel build //path/to:requirements_ai_check --config=copilot --output_groups=debug +cat bazel-bin/path/to/requirements_ai_check_debug.log +``` + +The debug log contains: +- Python version, model name, and guidelines path +- Batch processing information +- Complete system message (guidelines) and human message (artefacts) +- Response timing and token cost statistics + +--- + +## Developer Guide + +### Architecture + +The AI Checker is organized into two source layers and one extension point: + +| Directory | Purpose | +|-----------|---------| +| `src/ai_checker/` | Core analysis framework (extraction, scoring, caching, reporting). Depends on `langchain-core` for the `BaseChatModel` interface. | +| `src/copilot_adapter/` | `ChatCopilot` — LangChain `BaseChatModel` wrapper for the GitHub Copilot SDK. | + +### Diagrams + +**Deployment overview:** + +![Deployment Diagram](_assets/deployment_diagram.svg) + +**Class relationships:** + +![Class Diagram](_assets/class_diagram.svg) + +### Key Components + +#### `AIChecker` (`src/ai_checker/ai_checker_core.py`) + +Performs the async AI analysis. Responsibilities: + +- Splits artefacts into batches (by count via `--batch-size` and by total + character length via `--max-batch-chars`) +- Processes batches concurrently, rate-limited by an `asyncio.Semaphore` +- Calls `BaseChatModel.with_structured_output(AnalysisResults).ainvoke()` +- Manages the optional result cache (`AnalysisCache`) + +#### `ChatCopilot` (`src/copilot_adapter/copilot_langchain.py`) + +A full `BaseChatModel` implementation backed by the GitHub Copilot SDK CLI +(a Node.js binary). Provides: + +- Standard LangChain message types (system, human, AI, tool) +- Tool calling via `bind_tools()` +- Structured output via `with_structured_output()` +- Native async generation (`_agenerate`) and a sync bridge (`_generate`) +- Pre-flight checks: CLI binary presence, executable bit, `HOME`, proxy vars +- Post-start authentication verification via `get_auth_status()` + +**Why a separate adapter package?** +The `rules_python` wheel packaging strips the executable bit from the +Copilot CLI binary. `ChatCopilot` locates the executable copy created by +the `pip.whl_mods / copy_executables` mechanism and provides clear +diagnostic messages when the environment is misconfigured. The package is +named `copilot_adapter` (not `langchain`) to avoid shadowing the real +`langchain` PyPI package when `imports = ["src"]` is active in Bazel. + +#### `RequirementExtractor` (`src/ai_checker/requirement_extractor.py`) + +Parses TRLC files using the TRLC Python API and returns artefacts as +`dict[str, dict[str, Any]]`. Only objects whose source file resides under +the `--input` directory are analyzed; objects from `--deps` directories are +loaded solely for link resolution. + +#### `AnalysisOrchestrator` (`src/ai_checker/orchestrator.py`) + +Top-level coordinator. Instantiates the extractor, guidelines reader, AI +checker, and result formatter; wires them together; and exposes the CLI +entry point (`main()`). + +#### `GuidelinesReader` (`src/ai_checker/guidelines_reader.py`) + +Reads all `*.md` files from a flat guidelines directory and concatenates +them into the system-message string sent to the AI model. + +#### `ResultFormatter` (`src/ai_checker/result_formatter.py`) + +Formats `AnalysisResults` as JSON or HTML. The HTML report generates +per-guideline markdown subpages linked from the main report. + +### Caching Design + +`AnalysisCache` keys results by `SHA-256(artefacts_json + guidelines + model_name)`. +It is **only** usable via the CLI `--cache` flag. The Bazel rule deliberately +omits `--cache` because Bazel's action cache provides equivalent re-use without +breaking hermeticity. + +### Adding a New Artefact Type + +1. Subclass `ArtefactExtractor` (`src/ai_checker/artefact_extractor.py`) and + implement `extract() -> dict[str, dict[str, Any]]`. +2. Instantiate your extractor in `AnalysisOrchestrator.analyze_directory()` + based on the input file types detected. +3. Add a corresponding Bazel rule in `ai_checker.bzl` following the pattern of + `_trlc_requirements_ai_test_impl`. + +### Updating Python Dependencies + +```bash +# Core + Copilot SDK dependencies +bazel run //validation/ai_checker:requirements.update +``` diff --git a/validation/ai_checker/_assets/class_diagram.puml b/validation/ai_checker/_assets/class_diagram.puml new file mode 100644 index 00000000..c080daa0 --- /dev/null +++ b/validation/ai_checker/_assets/class_diagram.puml @@ -0,0 +1,140 @@ +@startuml + +class RequirementAnalysis <> { + +requirement_id: str + +description: str + +findings: List[str] + +suggestions: List[str] + +score: float +} + +class AnalysisResults <> { + +analyses: List[RequirementAnalysis] +} + +class AnalysisCache { + -_cache_dir: Path + +__init__(cache_dir) + +get(cache_hash): Optional[AnalysisResults] + +set(cache_hash, results): None + +is_enabled(): bool +} + +class AIChecker { + -_model_name: str + -_cache: AnalysisCache + -_batch_size: int + -_max_concurrent_requests: int + -_max_batch_chars: int + -_semaphore: asyncio.Semaphore + +__init__(model_name, cache_dir, debug_log, batch_size, max_concurrent_requests, max_batch_chars) + +<> analyze(artefacts, guidelines_content, chat_model): AnalysisResults + -_generate_cache_key(artefacts, guidelines_content): str + -_format_artefacts_for_analysis(artefacts): str + -_create_batches(artefacts): List[Dict] + -<> _analyze_batch_async(batch_number, artefacts, guidelines_content, chat_model): List[RequirementAnalysis] +} + +class ChatCopilot <> { + +model: str + +timeout: float + -_bound_tools: list + -_tool_choice: Optional[str] + +_generate(messages, stop, run_manager, **kwargs): ChatResult + +_agenerate(messages, stop, run_manager, **kwargs): ChatResult + +bind_tools(tools, **kwargs): Runnable + +with_structured_output(schema, **kwargs): Runnable +} + +interface ArtefactExtractor <> { + {abstract} +extract(): Dict[str, Dict[str, Any]] +} + +class RequirementExtractor { + -input_directory: str + -dependency_directories: Optional[List[str]] + -symbols: Optional[Symbol_Table] + +__init__(input_directory, dependency_directories) + +extract(): Dict[str, Dict[str, Any]] + +parse_trlc_files(): Symbol_Table + +extract_requirements_data(): List[Dict[str, Any]] + +extract_field_value(obj, field_name): Optional[Any] +} + +class AnalysisOrchestrator { + -ai_checker: AIChecker + -artefact_extractor: ArtefactExtractor + -guidelines_reader: GuidelinesReader + -_chat_model: BaseChatModel + -model_name: str + -guidelines_path: str + -guidelines_content: str + -_custom_ai_model: Optional[str] + +__init__(model_name, guidelines_path, cache_dir, debug_log, batch_size, custom_ai_model) + +analyze_directory(input_dir, dependency_dirs): AnalysisResults + +format_and_output(analysis_results, output_file, html_file, guidelines_output_dir): None +} + +class GuidelinesReader { + -guidelines_dir: str + -guidelines: Dict[str, str] + +__init__(guidelines_dir) + +get_guideline(name): str + +get_all_guidelines(): Dict[str, str] + -_load_all_guidelines(): None + -_read_file(file_path): str +} + +class ResultFormatter { + -results: AnalysisResults + -model_name: str + -guidelines_reader: Optional[GuidelinesReader] + -guidelines_output_dir: Optional[str] + -original_requirements: Optional[Dict] + -git_hash: str + -timestamp: str + +__init__(analysis_results, model_name, guidelines_reader, guidelines_output_dir, original_requirements) + +output(file_path): None + -_print_to_stdout(): None + -_write_json(path): None + -_write_html(path): None + -_generate_html_report(): str + -_generate_guideline_pages(main_report_path): None + {static} -_get_git_hash(): str + {static} -_get_timestamp(): str +} + +AnalysisResults o-- RequirementAnalysis +AnalysisCache ..> AnalysisResults +AIChecker o-- AnalysisCache +AIChecker ..> AnalysisResults +ArtefactExtractor <|.. RequirementExtractor +AnalysisOrchestrator o-- AIChecker +AnalysisOrchestrator o-- GuidelinesReader +AnalysisOrchestrator o-- ArtefactExtractor +AnalysisOrchestrator --> ChatCopilot: creates (default) +AnalysisOrchestrator ..> AnalysisResults +ResultFormatter ..> AnalysisResults +ResultFormatter o-- "optional" GuidelinesReader + +note right of AnalysisOrchestrator + Default: creates ChatCopilot. + Custom: loads ai_model.py + at runtime for alternative + AI model implementations. +end note + +note right of ChatCopilot + LangChain BaseChatModel wrapper + for the GitHub Copilot SDK. + Uses Copilot CLI (Node.js binary) + for API communication. +end note + +note right of ArtefactExtractor + Interface allows plugging + different extractors + (TRLC, code, etc.) +end note + +@enduml diff --git a/validation/ai_checker/_assets/class_diagram.svg b/validation/ai_checker/_assets/class_diagram.svg new file mode 100644 index 00000000..e773e88c --- /dev/null +++ b/validation/ai_checker/_assets/class_diagram.svg @@ -0,0 +1 @@ +«BaseModel»RequirementAnalysisrequirement_id: strdescription: strfindings: List[str]suggestions: List[str]score: float«BaseModel»AnalysisResultsanalyses: List[RequirementAnalysis]AnalysisCache_cache_dir: Path__init__(cache_dir)get(cache_hash): Optional[AnalysisResults]set(cache_hash, results): Noneis_enabled(): boolAIChecker_model_name: str_cache: AnalysisCache_batch_size: int_max_concurrent_requests: int_max_batch_chars: int_semaphore: asyncio.Semaphore__init__(model_name, cache_dir, debug_log, batch_size, max_concurrent_requests, max_batch_chars)«async» analyze(artefacts, guidelines_content, chat_model): AnalysisResults_generate_cache_key(artefacts, guidelines_content): str_format_artefacts_for_analysis(artefacts): str_create_batches(artefacts): List[Dict]«async» _analyze_batch_async(batch_number, artefacts, guidelines_content, chat_model): List[RequirementAnalysis]«BaseChatModel»ChatCopilotmodel: strtimeout: float_bound_tools: list_tool_choice: Optional[str]_generate(messages, stop, run_manager, **kwargs): ChatResult_agenerate(messages, stop, run_manager, **kwargs): ChatResultbind_tools(tools, **kwargs): Runnablewith_structured_output(schema, **kwargs): Runnable«interface»ArtefactExtractorextract(): Dict[str, Dict[str, Any]]RequirementExtractorinput_directory: strdependency_directories: Optional[List[str]]symbols: Optional[Symbol_Table]__init__(input_directory, dependency_directories)extract(): Dict[str, Dict[str, Any]]parse_trlc_files(): Symbol_Tableextract_requirements_data(): List[Dict[str, Any]]extract_field_value(obj, field_name): Optional[Any]AnalysisOrchestratorai_checker: AICheckerartefact_extractor: ArtefactExtractorguidelines_reader: GuidelinesReader_chat_model: BaseChatModelmodel_name: strguidelines_path: strguidelines_content: str_custom_ai_model: Optional[str]__init__(model_name, guidelines_path, cache_dir, debug_log, batch_size, custom_ai_model)analyze_directory(input_dir, dependency_dirs): AnalysisResultsformat_and_output(analysis_results, output_file, html_file, guidelines_output_dir): NoneGuidelinesReaderguidelines_dir: strguidelines: Dict[str, str]__init__(guidelines_dir)get_guideline(name): strget_all_guidelines(): Dict[str, str]_load_all_guidelines(): None_read_file(file_path): strResultFormatterresults: AnalysisResultsmodel_name: strguidelines_reader: Optional[GuidelinesReader]guidelines_output_dir: Optional[str]original_requirements: Optional[Dict]git_hash: strtimestamp: str__init__(analysis_results, model_name, guidelines_reader, guidelines_output_dir, original_requirements)output(file_path): None_print_to_stdout(): None_write_json(path): None_write_html(path): None_generate_html_report(): str_generate_guideline_pages(main_report_path): None_get_git_hash(): str_get_timestamp(): strDefault: creates ChatCopilot.Custom: loads ai_model.pyat runtime for alternativeAI model implementations.LangChain BaseChatModel wrapperfor the GitHub Copilot SDK.Uses Copilot CLI (Node.js binary)for API communication.Interface allows pluggingdifferent extractors(TRLC, code, etc.)creates (default)optional \ No newline at end of file diff --git a/validation/ai_checker/_assets/deployment_diagram.puml b/validation/ai_checker/_assets/deployment_diagram.puml new file mode 100644 index 00000000..2a0424e8 --- /dev/null +++ b/validation/ai_checker/_assets/deployment_diagram.puml @@ -0,0 +1,87 @@ +@startuml + +skinparam packageStyle rectangle + +interface "TRLC Python API" as TRLCAPI +interface "LangChain\nBaseChatModel" as LangChain + +package "Input Artifacts" { + artifact "TRLC\nRequirements" as ReqFile + artifact "Guidelines\n(Markdown)" as GuideFile +} + +package "src/ai_checker/ (Core Framework)" as CorePkg { + component "Orchestrator\n(py_binary)" as Orchestrator + component "AIChecker\n(Async)" as AIChecker + component "Requirement\nExtractor" as Extractor + component "Result\nFormatter" as Formatter + component "Guidelines\nReader" as GReader + component "Analysis\nCache" as Cache +} + +package "src/copilot_adapter/ (LangChain Integration)" as LangChainPkg { + component "ChatCopilot\n(BaseChatModel)" as ChatCopilot +} + + + +package "Output Artifacts" { + artifact "HTML\nReport" as HtmlReport + artifact "JSON\nResults" as JsonResults +} + +package "AI Services" { + cloud "GitHub Copilot API\n(default)" as CopilotAPI + cloud "Custom AI Service\n(via ai_model.py)" as CustomCloud +} + +package "Bazel Configuration" { + component "--config=copilot\n(.bazelrc.ai_checker)" as CopilotConfig +} + +ReqFile --> TRLCAPI: parsed via +TRLCAPI --> Extractor: symbol table +Extractor --> Orchestrator: artefacts\n(Dict format) +GuideFile --> GReader: loads +GReader --> Orchestrator: guidelines content + +Orchestrator --> AIChecker: async analyze\nwith batches +AIChecker --> Cache: optional\n(CLI only) + +AIChecker --> LangChain: async ainvoke\n(concurrent) +ChatCopilot ..|> LangChain: implements +ChatCopilot --> CopilotAPI: Copilot SDK\n(Node.js CLI) + +LangChain --> CustomCloud: custom provider + +LangChain ..> AIChecker: structured output\n(AnalysisResults) +AIChecker ..> Orchestrator: AnalysisResults + +Orchestrator --> Formatter: pass results +Formatter --> HtmlReport: generates +Formatter --> JsonResults: generates + +CopilotConfig ..> ChatCopilot: env vars\n(auth + proxy) + +note right of ChatCopilot + LangChain BaseChatModel wrapper for + the GitHub Copilot SDK. + Supports: messages, tool calling, + structured output, async/sync. +end note + +note as N1 + **AIChecker - Async Features:** + - Concurrent batch processing + - Rate limiting (Semaphore) + - Smart batching (count + size) +end note + +note as N2 + **Cache:** + Intentionally disabled for Bazel builds + (Bazel has its own action cache). + Only available via CLI: --cache +end note + +@enduml diff --git a/validation/ai_checker/_assets/deployment_diagram.svg b/validation/ai_checker/_assets/deployment_diagram.svg new file mode 100644 index 00000000..5cf6d8e9 --- /dev/null +++ b/validation/ai_checker/_assets/deployment_diagram.svg @@ -0,0 +1 @@ +Input Artifactssrc/ai_checker/ (Core Framework)src/copilot_adapter/ (LangChain Integration)Output ArtifactsAI ServicesBazel ConfigurationTRLCRequirementsGuidelines(Markdown)Orchestrator(py_binary)AIChecker(Async)RequirementExtractorResultFormatterGuidelinesReaderAnalysisCacheChatCopilot(BaseChatModel)HTMLReportJSONResultsGitHub Copilot API(default)Custom AI Service(via ai_model.py)--config=copilot(.bazelrc.ai_checker)TRLC Python APILangChainBaseChatModelLangChain BaseChatModel wrapper forthe GitHub Copilot SDK.Supports: messages, tool calling,structured output, async/sync.AIChecker - Async Features:- Concurrent batch processing- Rate limiting (Semaphore)- Smart batching (count + size)Cache:Intentionally disabled for Bazel builds(Bazel has its own action cache).Only available via CLI: --cache <dir>parsed viasymbol tableartefacts(Dict format)loadsguidelines contentasync analyzewith batchesAnalysisResultsoptional(CLI only)async ainvoke(concurrent)structured output(AnalysisResults)implementsCopilot SDK(Node.js CLI)custom providerpass resultsgeneratesgeneratesenv vars(auth + proxy) \ No newline at end of file diff --git a/validation/ai_checker/ai_checker.bzl b/validation/ai_checker/ai_checker.bzl new file mode 100644 index 00000000..42631f9f --- /dev/null +++ b/validation/ai_checker/ai_checker.bzl @@ -0,0 +1,270 @@ +# ******************************************************************************* +# Copyright (c) 2026 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* +"""Bazel rules for AI-powered artefact testing. + +Provides rules for analyzing TRLC requirements and architectural designs +against engineering guidelines using an AI checker. +""" + +load("@trlc//:trlc.bzl", "TrlcProviderInfo") +load("//bazel/rules/rules_score:providers.bzl", "ArchitecturalDesignInfo") + +# ============================================================================ +# Shared implementation +# ============================================================================ + +def _run_ai_analysis(ctx, analysis_files, all_input_files, input_dirs, dep_dirs): + """Common implementation for all AI artefact analysis test rules. + + Args: + ctx: Rule context. + analysis_files: Files to analyze (direct inputs). + all_input_files: All files needed as action inputs (incl. deps for resolution). + input_dirs: Dict of directories containing analysis files. + dep_dirs: Dict of dependency directories (for link resolution). + + Returns: + List of providers (DefaultInfo). + """ + if not analysis_files: + fail("No artefact files found for analysis") + + # Declare outputs + html_report = ctx.actions.declare_file("{}_analysis.html".format(ctx.attr.name)) + json_report = ctx.actions.declare_file("{}_analysis.json".format(ctx.attr.name)) + guidelines_output_dir = ctx.actions.declare_directory("guidelines") + debug_log = ctx.actions.declare_file("{}_debug.log".format(ctx.attr.name)) + + # Collect guideline files from the filegroup + guideline_files = ctx.files.guidelines + + # Determine input and guidelines directories + input_dir = analysis_files[0].dirname + guidelines_dir = guideline_files[0].dirname if guideline_files else None + + # Build arguments for the orchestrator + args = ctx.actions.args() + args.add("--input", input_dir) + + for dep_dir in dep_dirs.keys(): + args.add("--deps", dep_dir) + + for extra_dir in input_dirs.keys(): + if extra_dir != input_dir: + args.add("--deps", extra_dir) + + args.add("--output", json_report.path) + args.add("--html", html_report.path) + args.add("--guidelines-output", guidelines_output_dir.path) + + if guidelines_dir: + args.add("--guidelines", guidelines_dir) + + if ctx.attr.model: + args.add("--model", ctx.attr.model) + + if ctx.attr._batch_size > 0: + args.add("--batch-size", str(ctx.attr._batch_size)) + + # NOTE: --cache is intentionally NOT passed. Bazel actions are + # already cached by Bazel's action cache; an additional Python-level + # cache would break hermeticity. The --cache flag is only available + # for direct CLI invocations (python orchestrator.py --cache ). + + # Prepare action inputs (include custom ai_model if provided) + action_inputs = all_input_files + guideline_files + if ctx.attr._custom_ai_model: + custom_ai_model_file = ctx.attr._custom_ai_model[DefaultInfo].files.to_list() + if custom_ai_model_file: + action_inputs.extend(custom_ai_model_file) + args.add("--custom-ai-model", custom_ai_model_file[0].path) + + # Add debug log output for Bazel output_groups + args.add("--debug-log", debug_log.path) + args.add("--verbose") + + ctx.actions.run( + executable = ctx.executable._orchestrator, + inputs = depset(direct = action_inputs), + outputs = [json_report, html_report, guidelines_output_dir, debug_log], + arguments = [args], + progress_message = "Analyzing artefacts with AI for {}".format(ctx.attr.name), + # NOTE: no-sandbox is required because the GitHub Copilot CLI needs + # outbound network access (api.github.com) and the user's $HOME + # directory (for stored OAuth credentials), both of which Bazel's + # sandbox blocks. This is an inherent trade-off of using an external + # AI service from a Bazel action. Hermeticity is partially preserved + # by Bazel's own action-cache keying on declared inputs. + execution_requirements = {"no-sandbox": "1"}, + use_default_shell_env = True, + ) + + # Test executable — validates the JSON report score against the threshold + test_executable = ctx.actions.declare_file("{}_test_executable".format(ctx.attr.name)) + + command = """#!/bin/bash +set -e +set -o pipefail + +json_path="{json}" + +if [ ! -f "$json_path" ] || [ ! -s "$json_path" ]; then + echo "ERROR: JSON report was not generated or is empty" + exit 1 +fi + +average=$(python3 -c " +import json, pathlib, sys +data = json.loads(pathlib.Path(sys.argv[1]).read_text()) +scores = [a['score'] for a in data.get('analyses', [])] +print(f'{{sum(scores)/len(scores):.2f}}' if scores else '0') +" "$json_path") + +threshold="{threshold}" + +if (( $(echo "$average >= $threshold" | bc -l) )); then + echo "AI analysis complete. Average score: $average (threshold: $threshold)" + exit 0 +else + echo "ERROR: Average score $average is below threshold $threshold" + exit 1 +fi +""".format(json = json_report.short_path, threshold = ctx.attr.score_threshold) + + ctx.actions.write( + output = test_executable, + content = command, + is_executable = True, + ) + + return [ + DefaultInfo( + runfiles = ctx.runfiles( + files = [json_report, html_report, guidelines_output_dir], + ), + files = depset([json_report, html_report, guidelines_output_dir]), + executable = test_executable, + ), + OutputGroupInfo( + debug = depset([debug_log]), + ), + ] + +# Attributes shared by all AI test rules +_COMMON_AI_TEST_ATTRS = { + "model": attr.string( + doc = "AI model name to use for analysis.", + default = "anthropic/claude-sonnet-4-5", + ), + "score_threshold": attr.string( + doc = "Minimum average score required to pass the test (0-10).", + default = "0.0", + ), + "_batch_size": attr.int( + doc = "Number of artefacts to process per batch (0 = all at once).", + default = 0, + ), + "_custom_ai_model": attr.label( + doc = "Custom ai_model.py file (optional, provided by consumer repo).", + default = None, + allow_single_file = [".py"], + ), + "_orchestrator": attr.label( + doc = "Orchestrator binary.", + default = "//validation/ai_checker:orchestrator", + executable = True, + cfg = "exec", + ), +} + +# ============================================================================ +# TRLC Requirements AI Test +# ============================================================================ + +def _trlc_requirements_ai_test_impl(ctx): + """Extract TRLC artefacts from providers and delegate to shared analysis.""" + analysis_files = [] + all_files = [] + input_dirs = {} + dep_dirs = {} + + for req in ctx.attr.reqs: + trlc_provider = req[TrlcProviderInfo] + + direct_reqs = trlc_provider.reqs.to_list() + analysis_files.extend(direct_reqs) + for f in direct_reqs: + input_dirs[f.dirname] = True + + dep_reqs = trlc_provider.deps.to_list() + spec_files = trlc_provider.spec.to_list() + all_files.extend(direct_reqs + dep_reqs + spec_files) + for f in dep_reqs + spec_files: + dep_dirs[f.dirname] = True + + return _run_ai_analysis(ctx, analysis_files, all_files, input_dirs, dep_dirs) + +trlc_requirements_ai_test = rule( + implementation = _trlc_requirements_ai_test_impl, + attrs = dict(_COMMON_AI_TEST_ATTRS, **{ + "reqs": attr.label_list( + doc = "Targets providing TrlcProviderInfo.", + providers = [TrlcProviderInfo], + mandatory = True, + ), + "guidelines": attr.label( + doc = "Filegroup containing guideline markdown files.", + default = "//validation/ai_checker:default_guidelines", + allow_files = True, + ), + }), + test = True, + toolchains = [], + fragments = ["platform"], +) + +# ============================================================================ +# Architecture AI Test +# ============================================================================ + +def _architecture_ai_test_impl(ctx): + """Extract architecture artefacts from providers and delegate to shared analysis.""" + analysis_files = [] + input_dirs = {} + + for design in ctx.attr.designs: + design_info = design[ArchitecturalDesignInfo] + for f in design_info.static.to_list() + design_info.dynamic.to_list(): + analysis_files.append(f) + input_dirs[f.dirname] = True + + return _run_ai_analysis(ctx, analysis_files, analysis_files, input_dirs, {}) + +architecture_ai_test = rule( + implementation = _architecture_ai_test_impl, + attrs = dict(_COMMON_AI_TEST_ATTRS, **{ + "designs": attr.label_list( + doc = "Targets providing ArchitecturalDesignInfo.", + providers = [ArchitecturalDesignInfo], + mandatory = True, + ), + "guidelines": attr.label( + doc = "Filegroup containing architecture guideline markdown files.", + default = "//validation/ai_checker:default_architecture_guidelines", + allow_files = True, + ), + }), + test = True, + toolchains = [], + fragments = ["platform"], +) diff --git a/validation/ai_checker/guidelines/general.md b/validation/ai_checker/guidelines/general.md new file mode 100644 index 00000000..e2c44c47 --- /dev/null +++ b/validation/ai_checker/guidelines/general.md @@ -0,0 +1,37 @@ +# General + +You are a requirements quality analyst specialized in evaluating software module, tool and process requirements within the context of a Safety Related (ISO26262) POSIX Software Platform. On our platform Bazel, Markdown, RST are used. + +Accept following points as fully defined and don´t list it in the findings or suggestions: +- General: + - "S-Core" is the Name of the Platform project +- Bazel commands + - Link: "@//:target" + - public visible + - expose target +- Markdown Expressions (Link: "[label](http://example.com)") +- RST Expressions (Link: ".. _a link: https://domain.invalid/" or `a link`_ ) +- Expression which are provided as comment or via `` as fully defined. + +Do a requirements review of each single requirement: +- Review the requirements according to the guidelines, for the sentence template also take into consideration optional and mandatory parts of a sentence. +- Include the requirement type in the analysis. +- Accept expressions as defined if in doubt + +Do not: +- analyze the hierarchy of the requirements or relations between them (e.g. stated via attribute parent) +- mention any findings of fully defined items +- take the sentence template to strict + +Score the requirement from 0-10 where: +- 0-3: Critical issues, requirement needs major rework +- 4-6: Moderate issues, improvement needed +- 7-8: Good quality with minor improvements possible +- 9-10: Excellent quality, meets professional standards + +Analysis Result: +- Provide specific, actionable findings and refer it to a specific keyword from the document. (e.g. *Vague* Requirement contains ....) +- Only point out findings, don´t mention if something is particularly well defined +- Categorize findings in major and minor +- Provide an overall scoring for the requirement +- If required use HTML formatting diff --git a/validation/ai_checker/guidelines/requirements_guidelines.md b/validation/ai_checker/guidelines/requirements_guidelines.md new file mode 100644 index 00000000..50d56d4e --- /dev/null +++ b/validation/ai_checker/guidelines/requirements_guidelines.md @@ -0,0 +1,56 @@ +# Requirements Writing Guidelines + +Guidelines for creating and formulating requirements, derived from the SCORE Requirements Engineering Process. + +## Requirement Levels + +| Level | Scope | Derived From | +|---|---|---| +| **Stakeholder Requirement** | Platform-level functionality and safety mechanisms | Standards, customer needs | +| **Feature Requirement** | Integration-level behaviour, independent of component decomposition | Stakeholder Requirements | +| **Component Requirement** | Component-specific implementation details | Feature Requirements | +| **Assumption of Use (AoU)** | Boundary conditions for using a software element (any level) | Safety analyses, architecture | + +## Sentence Template + +Every requirement **shall** follow this structure: + +> **\** shall **\
** **\** **\** **\** + +Of the last three parts (object, parameter, conjunction), at least one is mandatory — the others are optional. + +### Examples + +| Subject | shall | Verb | Object | Parameter | Condition | +|---|---|---|---|---|---| +| The component | shall | detect | if a key-value pair got corrupted | and set its status to INVALID | during every restart of the SW platform. | +| The software platform | shall | enable | users | to ensure the compatibility of application software | across vehicle variants and releases. | +| The linter-tool | shall | check | correctness of .rst files format | | upon each commit. | + +## Quality Criteria + +A well-written requirement is: + +- **Unambiguous** — only one possible interpretation +- **Verifiable** — can be tested or reviewed +- **Atomic** — expresses a single need (one "shall" per requirement) +- **Consistent** — no contradictions with other requirements +- **Complete** — contains subject, verb, and at least one of: object, parameter, or condition +- **Necessary** — traceable to a parent requirement or rationale + +### Avoid + +- Vague terms: *approximately*, *as appropriate*, *user-friendly*, *fast*, *efficient* +- Unbounded lists: *etc.*, *and so on*, *such as* (without closing the list) +- Compound requirements: multiple "shall" statements in one requirement +- Implementation details in stakeholder/feature requirements +- Missing conditions or parameters that leave behaviour undefined + +## Requirement Types Explained + +| Type | Meaning | Verification | +|---|---|---| +| **Functional** | Behaviour that can be observed | Unit/integration test | +| **Interface** | API or protocol specification | Test or inspection | +| **Non-Functional** | Quality attribute (performance, reliability) | Review/analysis | +| **Process** | Process-related constraint | Process review | diff --git a/validation/ai_checker/requirements.txt b/validation/ai_checker/requirements.txt new file mode 100644 index 00000000..4018feaa --- /dev/null +++ b/validation/ai_checker/requirements.txt @@ -0,0 +1,661 @@ +# +# This file is autogenerated by pip-compile with Python 3.12 +# by the following command: +# +# bazel run //validation/ai_checker:requirements.update +# +annotated-types==0.7.0 \ + --hash=sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53 \ + --hash=sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89 + # via pydantic +anyio==4.12.1 \ + --hash=sha256:41cfcc3a4c85d3f05c932da7c26d0201ac36f72abd4435ba90d0464a3ffed703 \ + --hash=sha256:d405828884fc140aa80a3c667b8beed277f1dfedec42ba031bd6ac3db606ab6c + # via httpx +bigtree==1.1.0 \ + --hash=sha256:3f1ff63d2d66d31bf19855ddda8884637edf8dd1fc1aa118cf3a750580ece48b \ + --hash=sha256:f54f99d842732c91cce39c596a3755a2e8325b1cab5bc6876f5b15bd3942081c + # via -r validation/ai_checker/requirements.txt.in +certifi==2026.1.4 \ + --hash=sha256:9943707519e4add1115f44c2bc244f782c0249876bf51b6599fee1ffbedd685c \ + --hash=sha256:ac726dd470482006e014ad384921ed6438c457018f4b3d204aea4281258b2120 + # via + # httpcore + # httpx + # requests +charset-normalizer==3.4.4 \ + --hash=sha256:027f6de494925c0ab2a55eab46ae5129951638a49a34d87f4c3eda90f696b4ad \ + --hash=sha256:077fbb858e903c73f6c9db43374fd213b0b6a778106bc7032446a8e8b5b38b93 \ + --hash=sha256:0a98e6759f854bd25a58a73fa88833fba3b7c491169f86ce1180c948ab3fd394 \ + --hash=sha256:0d3d8f15c07f86e9ff82319b3d9ef6f4bf907608f53fe9d92b28ea9ae3d1fd89 \ + --hash=sha256:0f04b14ffe5fdc8c4933862d8306109a2c51e0704acfa35d51598eb45a1e89fc \ + --hash=sha256:11d694519d7f29d6cd09f6ac70028dba10f92f6cdd059096db198c283794ac86 \ + --hash=sha256:194f08cbb32dc406d6e1aea671a68be0823673db2832b38405deba2fb0d88f63 \ + --hash=sha256:1bee1e43c28aa63cb16e5c14e582580546b08e535299b8b6158a7c9c768a1f3d \ + --hash=sha256:21d142cc6c0ec30d2efee5068ca36c128a30b0f2c53c1c07bd78cb6bc1d3be5f \ + --hash=sha256:2437418e20515acec67d86e12bf70056a33abdacb5cb1655042f6538d6b085a8 \ + --hash=sha256:244bfb999c71b35de57821b8ea746b24e863398194a4014e4c76adc2bbdfeff0 \ + --hash=sha256:2677acec1a2f8ef614c6888b5b4ae4060cc184174a938ed4e8ef690e15d3e505 \ + --hash=sha256:277e970e750505ed74c832b4bf75dac7476262ee2a013f5574dd49075879e161 \ + --hash=sha256:2aaba3b0819274cc41757a1da876f810a3e4d7b6eb25699253a4effef9e8e4af \ + --hash=sha256:2b7d8f6c26245217bd2ad053761201e9f9680f8ce52f0fcd8d0755aeae5b2152 \ + --hash=sha256:2c9d3c380143a1fedbff95a312aa798578371eb29da42106a29019368a475318 \ + --hash=sha256:3162d5d8ce1bb98dd51af660f2121c55d0fa541b46dff7bb9b9f86ea1d87de72 \ + --hash=sha256:31fd66405eaf47bb62e8cd575dc621c56c668f27d46a61d975a249930dd5e2a4 \ + --hash=sha256:362d61fd13843997c1c446760ef36f240cf81d3ebf74ac62652aebaf7838561e \ + --hash=sha256:376bec83a63b8021bb5c8ea75e21c4ccb86e7e45ca4eb81146091b56599b80c3 \ + --hash=sha256:44c2a8734b333e0578090c4cd6b16f275e07aa6614ca8715e6c038e865e70576 \ + --hash=sha256:47cc91b2f4dd2833fddaedd2893006b0106129d4b94fdb6af1f4ce5a9965577c \ + --hash=sha256:4902828217069c3c5c71094537a8e623f5d097858ac6ca8252f7b4d10b7560f1 \ + --hash=sha256:4bd5d4137d500351a30687c2d3971758aac9a19208fc110ccb9d7188fbe709e8 \ + --hash=sha256:4fe7859a4e3e8457458e2ff592f15ccb02f3da787fcd31e0183879c3ad4692a1 \ + --hash=sha256:542d2cee80be6f80247095cc36c418f7bddd14f4a6de45af91dfad36d817bba2 \ + --hash=sha256:554af85e960429cf30784dd47447d5125aaa3b99a6f0683589dbd27e2f45da44 \ + --hash=sha256:5833d2c39d8896e4e19b689ffc198f08ea58116bee26dea51e362ecc7cd3ed26 \ + --hash=sha256:5947809c8a2417be3267efc979c47d76a079758166f7d43ef5ae8e9f92751f88 \ + --hash=sha256:5ae497466c7901d54b639cf42d5b8c1b6a4fead55215500d2f486d34db48d016 \ + --hash=sha256:5bd2293095d766545ec1a8f612559f6b40abc0eb18bb2f5d1171872d34036ede \ + --hash=sha256:5bfbb1b9acf3334612667b61bd3002196fe2a1eb4dd74d247e0f2a4d50ec9bbf \ + --hash=sha256:5cb4d72eea50c8868f5288b7f7f33ed276118325c1dfd3957089f6b519e1382a \ + --hash=sha256:5dbe56a36425d26d6cfb40ce79c314a2e4dd6211d51d6d2191c00bed34f354cc \ + --hash=sha256:5f819d5fe9234f9f82d75bdfa9aef3a3d72c4d24a6e57aeaebba32a704553aa0 \ + --hash=sha256:64b55f9dce520635f018f907ff1b0df1fdc31f2795a922fb49dd14fbcdf48c84 \ + --hash=sha256:6515f3182dbe4ea06ced2d9e8666d97b46ef4c75e326b79bb624110f122551db \ + --hash=sha256:65e2befcd84bc6f37095f5961e68a6f077bf44946771354a28ad434c2cce0ae1 \ + --hash=sha256:6aee717dcfead04c6eb1ce3bd29ac1e22663cdea57f943c87d1eab9a025438d7 \ + --hash=sha256:6b39f987ae8ccdf0d2642338faf2abb1862340facc796048b604ef14919e55ed \ + --hash=sha256:6e1fcf0720908f200cd21aa4e6750a48ff6ce4afe7ff5a79a90d5ed8a08296f8 \ + --hash=sha256:74018750915ee7ad843a774364e13a3db91682f26142baddf775342c3f5b1133 \ + --hash=sha256:74664978bb272435107de04e36db5a9735e78232b85b77d45cfb38f758efd33e \ + --hash=sha256:74bb723680f9f7a6234dcf67aea57e708ec1fbdf5699fb91dfd6f511b0a320ef \ + --hash=sha256:752944c7ffbfdd10c074dc58ec2d5a8a4cd9493b314d367c14d24c17684ddd14 \ + --hash=sha256:778d2e08eda00f4256d7f672ca9fef386071c9202f5e4607920b86d7803387f2 \ + --hash=sha256:780236ac706e66881f3b7f2f32dfe90507a09e67d1d454c762cf642e6e1586e0 \ + --hash=sha256:798d75d81754988d2565bff1b97ba5a44411867c0cf32b77a7e8f8d84796b10d \ + --hash=sha256:799a7a5e4fb2d5898c60b640fd4981d6a25f1c11790935a44ce38c54e985f828 \ + --hash=sha256:7a32c560861a02ff789ad905a2fe94e3f840803362c84fecf1851cb4cf3dc37f \ + --hash=sha256:7c308f7e26e4363d79df40ca5b2be1c6ba9f02bdbccfed5abddb7859a6ce72cf \ + --hash=sha256:7fa17817dc5625de8a027cb8b26d9fefa3ea28c8253929b8d6649e705d2835b6 \ + --hash=sha256:81d5eb2a312700f4ecaa977a8235b634ce853200e828fbadf3a9c50bab278328 \ + --hash=sha256:82004af6c302b5d3ab2cfc4cc5f29db16123b1a8417f2e25f9066f91d4411090 \ + --hash=sha256:837c2ce8c5a65a2035be9b3569c684358dfbf109fd3b6969630a87535495ceaa \ + --hash=sha256:840c25fb618a231545cbab0564a799f101b63b9901f2569faecd6b222ac72381 \ + --hash=sha256:8a6562c3700cce886c5be75ade4a5db4214fda19fede41d9792d100288d8f94c \ + --hash=sha256:8af65f14dc14a79b924524b1e7fffe304517b2bff5a58bf64f30b98bbc5079eb \ + --hash=sha256:8ef3c867360f88ac904fd3f5e1f902f13307af9052646963ee08ff4f131adafc \ + --hash=sha256:94537985111c35f28720e43603b8e7b43a6ecfb2ce1d3058bbe955b73404e21a \ + --hash=sha256:99ae2cffebb06e6c22bdc25801d7b30f503cc87dbd283479e7b606f70aff57ec \ + --hash=sha256:9a26f18905b8dd5d685d6d07b0cdf98a79f3c7a918906af7cc143ea2e164c8bc \ + --hash=sha256:9b35f4c90079ff2e2edc5b26c0c77925e5d2d255c42c74fdb70fb49b172726ac \ + --hash=sha256:9cd98cdc06614a2f768d2b7286d66805f94c48cde050acdbbb7db2600ab3197e \ + --hash=sha256:9d1bb833febdff5c8927f922386db610b49db6e0d4f4ee29601d71e7c2694313 \ + --hash=sha256:9f7fcd74d410a36883701fafa2482a6af2ff5ba96b9a620e9e0721e28ead5569 \ + --hash=sha256:a59cb51917aa591b1c4e6a43c132f0cdc3c76dbad6155df4e28ee626cc77a0a3 \ + --hash=sha256:a61900df84c667873b292c3de315a786dd8dac506704dea57bc957bd31e22c7d \ + --hash=sha256:a79cfe37875f822425b89a82333404539ae63dbdddf97f84dcbc3d339aae9525 \ + --hash=sha256:a8a8b89589086a25749f471e6a900d3f662d1d3b6e2e59dcecf787b1cc3a1894 \ + --hash=sha256:a8bf8d0f749c5757af2142fe7903a9df1d2e8aa3841559b2bad34b08d0e2bcf3 \ + --hash=sha256:a9768c477b9d7bd54bc0c86dbaebdec6f03306675526c9927c0e8a04e8f94af9 \ + --hash=sha256:ac1c4a689edcc530fc9d9aa11f5774b9e2f33f9a0c6a57864e90908f5208d30a \ + --hash=sha256:af2d8c67d8e573d6de5bc30cdb27e9b95e49115cd9baad5ddbd1a6207aaa82a9 \ + --hash=sha256:b435cba5f4f750aa6c0a0d92c541fb79f69a387c91e61f1795227e4ed9cece14 \ + --hash=sha256:b5b290ccc2a263e8d185130284f8501e3e36c5e02750fc6b6bdeb2e9e96f1e25 \ + --hash=sha256:b5d84d37db046c5ca74ee7bb47dd6cbc13f80665fdde3e8040bdd3fb015ecb50 \ + --hash=sha256:b7cf1017d601aa35e6bb650b6ad28652c9cd78ee6caff19f3c28d03e1c80acbf \ + --hash=sha256:bc7637e2f80d8530ee4a78e878bce464f70087ce73cf7c1caf142416923b98f1 \ + --hash=sha256:c0463276121fdee9c49b98908b3a89c39be45d86d1dbaa22957e38f6321d4ce3 \ + --hash=sha256:c4ef880e27901b6cc782f1b95f82da9313c0eb95c3af699103088fa0ac3ce9ac \ + --hash=sha256:c8ae8a0f02f57a6e61203a31428fa1d677cbe50c93622b4149d5c0f319c1d19e \ + --hash=sha256:ca5862d5b3928c4940729dacc329aa9102900382fea192fc5e52eb69d6093815 \ + --hash=sha256:cb01158d8b88ee68f15949894ccc6712278243d95f344770fa7593fa2d94410c \ + --hash=sha256:cb6254dc36b47a990e59e1068afacdcd02958bdcce30bb50cc1700a8b9d624a6 \ + --hash=sha256:cc00f04ed596e9dc0da42ed17ac5e596c6ccba999ba6bd92b0e0aef2f170f2d6 \ + --hash=sha256:cd09d08005f958f370f539f186d10aec3377d55b9eeb0d796025d4886119d76e \ + --hash=sha256:cd4b7ca9984e5e7985c12bc60a6f173f3c958eae74f3ef6624bb6b26e2abbae4 \ + --hash=sha256:ce8a0633f41a967713a59c4139d29110c07e826d131a316b50ce11b1d79b4f84 \ + --hash=sha256:cead0978fc57397645f12578bfd2d5ea9138ea0fac82b2f63f7f7c6877986a69 \ + --hash=sha256:d055ec1e26e441f6187acf818b73564e6e6282709e9bcb5b63f5b23068356a15 \ + --hash=sha256:d1f13550535ad8cff21b8d757a3257963e951d96e20ec82ab44bc64aeb62a191 \ + --hash=sha256:d9c7f57c3d666a53421049053eaacdd14bbd0a528e2186fcb2e672effd053bb0 \ + --hash=sha256:d9e45d7faa48ee908174d8fe84854479ef838fc6a705c9315372eacbc2f02897 \ + --hash=sha256:da3326d9e65ef63a817ecbcc0df6e94463713b754fe293eaa03da99befb9a5bd \ + --hash=sha256:de00632ca48df9daf77a2c65a484531649261ec9f25489917f09e455cb09ddb2 \ + --hash=sha256:e1f185f86a6f3403aa2420e815904c67b2f9ebc443f045edd0de921108345794 \ + --hash=sha256:e824f1492727fa856dd6eda4f7cee25f8518a12f3c4a56a74e8095695089cf6d \ + --hash=sha256:e912091979546adf63357d7e2ccff9b44f026c075aeaf25a52d0e95ad2281074 \ + --hash=sha256:eaabd426fe94daf8fd157c32e571c85cb12e66692f15516a83a03264b08d06c3 \ + --hash=sha256:ebf3e58c7ec8a8bed6d66a75d7fb37b55e5015b03ceae72a8e7c74495551e224 \ + --hash=sha256:ecaae4149d99b1c9e7b88bb03e3221956f68fd6d50be2ef061b2381b61d20838 \ + --hash=sha256:eecbc200c7fd5ddb9a7f16c7decb07b566c29fa2161a16cf67b8d068bd21690a \ + --hash=sha256:f155a433c2ec037d4e8df17d18922c3a0d9b3232a396690f17175d2946f0218d \ + --hash=sha256:f1e34719c6ed0b92f418c7c780480b26b5d9c50349e9a9af7d76bf757530350d \ + --hash=sha256:f34be2938726fc13801220747472850852fe6b1ea75869a048d6f896838c896f \ + --hash=sha256:f820802628d2694cb7e56db99213f930856014862f3fd943d290ea8438d07ca8 \ + --hash=sha256:f8bf04158c6b607d747e93949aa60618b61312fe647a6369f88ce2ff16043490 \ + --hash=sha256:f8e160feb2aed042cd657a72acc0b481212ed28b1b9a95c0cee1621b524e1966 \ + --hash=sha256:f9d332f8c2a2fcbffe1378594431458ddbef721c1769d78e2cbc06280d8155f9 \ + --hash=sha256:fa09f53c465e532f4d3db095e0c55b615f010ad81803d383195b6b5ca6cbf5f3 \ + --hash=sha256:faa3a41b2b66b6e50f84ae4a68c64fcd0c44355741c6374813a800cd6695db9e \ + --hash=sha256:fd44c878ea55ba351104cb93cc85e74916eb8fa440ca7903e57575e97394f608 + # via requests +github-copilot-sdk==0.1.25 \ + --hash=sha256:13ef99fa8c709c5f80d820672bf36ee9176bc33f0efce6a2b5cbf6d1bb2369e8 \ + --hash=sha256:1a90ee583309ff308fea42f9edec61203645a33ca1d3dc42953628fb8c3eda07 \ + --hash=sha256:5249a63d1ac1e4d325c70c9902e81327b0baca53afa46010f52ac3fd3b5a111b \ + --hash=sha256:7af33d3afbe09a78dfc9d65a843526e47aba15631e90926c42a21a200fab12da \ + --hash=sha256:bc74a3d08ee45313ac02a3f7159c583ec41fc16090ec5f27f88c4b737f03139e \ + --hash=sha256:d32c3fc2c393f70923a645a133607da2e562d078b87437f499100d5bb8c1902f + # via -r validation/ai_checker/requirements.txt.in +h11==0.16.0 \ + --hash=sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1 \ + --hash=sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86 + # via httpcore +httpcore==1.0.9 \ + --hash=sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55 \ + --hash=sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8 + # via httpx +httpx==0.28.1 \ + --hash=sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc \ + --hash=sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad + # via langsmith +idna==3.11 \ + --hash=sha256:771a87f49d9defaf64091e6e6fe9c18d4833f140bd19464795bc32d966ca37ea \ + --hash=sha256:795dafcc9c04ed0c1fb032c2aa73654d8e8c5023a7df64a53f39190ada629902 + # via + # anyio + # httpx + # requests +jsonpatch==1.33 \ + --hash=sha256:0ae28c0cd062bbd8b8ecc26d7d164fbbea9652a1a3693f3b956c1eae5145dade \ + --hash=sha256:9fcd4009c41e6d12348b4a0ff2563ba56a2923a7dfee731d004e212e1ee5030c + # via langchain-core +jsonpointer==3.0.0 \ + --hash=sha256:13e088adc14fca8b6aa8177c044e12701e6ad4b28ff10e65f2267a90109c9942 \ + --hash=sha256:2b2d729f2091522d61c3b31f82e11870f60b68f43fbc705cb76bf4b832af59ef + # via jsonpatch +langchain-core==1.2.13 \ + --hash=sha256:b31823e28d3eff1e237096d0bd3bf80c6f9624eb471a9496dbfbd427779f8d82 \ + --hash=sha256:d2773d0d0130a356378db9a858cfeef64c3d64bc03722f1d4d6c40eb46fdf01b + # via -r validation/ai_checker/requirements.txt.in +langsmith==0.6.3 \ + --hash=sha256:33246769c0bb24e2c17e0c34bb21931084437613cd37faf83bd0978a297b826f \ + --hash=sha256:44fdf8084165513e6bede9dda715e7b460b1b3f57ac69f2ca3f03afa911233ec + # via langchain-core +orjson==3.11.5 \ + --hash=sha256:0522003e9f7fba91982e83a97fec0708f5a714c96c4209db7104e6b9d132f111 \ + --hash=sha256:073aab025294c2f6fc0807201c76fdaed86f8fc4be52c440fb78fbb759a1ac09 \ + --hash=sha256:09b94b947ac08586af635ef922d69dc9bc63321527a3a04647f4986a73f4bd30 \ + --hash=sha256:1b280e2d2d284a6713b0cfec7b08918ebe57df23e3f76b27586197afca3cb1e9 \ + --hash=sha256:1b6bd351202b2cd987f35a13b5e16471cf4d952b42a73c391cc537974c43ef6d \ + --hash=sha256:1cbf2735722623fcdee8e712cbaaab9e372bbcb0c7924ad711b261c2eccf4a5c \ + --hash=sha256:1db2088b490761976c1b2e956d5d4e6409f3732e9d79cfa69f876c5248d1baf9 \ + --hash=sha256:23d04c4543e78f724c4dfe656b3791b5f98e4c9253e13b2636f1af5d90e4a880 \ + --hash=sha256:298d2451f375e5f17b897794bcc3e7b821c0f32b4788b9bcae47ada24d7f3cf7 \ + --hash=sha256:2b91126e7b470ff2e75746f6f6ee32b9ab67b7a93c8ba1d15d3a0caaf16ec875 \ + --hash=sha256:2cc79aaad1dfabe1bd2d50ee09814a1253164b3da4c00a78c458d82d04b3bdef \ + --hash=sha256:334e5b4bff9ad101237c2d799d9fd45737752929753bf4faf4b207335a416b7d \ + --hash=sha256:38b22f476c351f9a1c43e5b07d8b5a02eb24a6ab8e75f700f7d479d4568346a5 \ + --hash=sha256:3b01799262081a4c47c035dd77c1301d40f568f77cc7ec1bb7db5d63b0a01629 \ + --hash=sha256:3c8d8a112b274fae8c5f0f01954cb0480137072c271f3f4958127b010dfefaec \ + --hash=sha256:3fd15f9fc8c203aeceff4fda211157fad114dde66e92e24097b3647a08f4ee9e \ + --hash=sha256:42e8961196af655bb5e63ce6c60d25e8798cd4dfbc04f4203457fa3869322c2e \ + --hash=sha256:4bdd8d164a871c4ec773f9de0f6fe8769c2d6727879c37a9666ba4183b7f8228 \ + --hash=sha256:4dad582bc93cef8f26513e12771e76385a7e6187fd713157e971c784112aad56 \ + --hash=sha256:53deb5addae9c22bbe3739298f5f2196afa881ea75944e7720681c7080909a81 \ + --hash=sha256:54aae9b654554c3b4edd61896b978568c6daa16af96fa4681c9b5babd469f863 \ + --hash=sha256:59ac72ea775c88b163ba8d21b0177628bd015c5dd060647bbab6e22da3aad287 \ + --hash=sha256:5f0a2ae6f09ac7bd47d2d5a5305c1d9ed08ac057cda55bb0a49fa506f0d2da00 \ + --hash=sha256:5f691263425d3177977c8d1dd896cde7b98d93cbf390b2544a090675e83a6a0a \ + --hash=sha256:61026196a1c4b968e1b1e540563e277843082e9e97d78afa03eb89315af531f1 \ + --hash=sha256:61de247948108484779f57a9f406e4c84d636fa5a59e411e6352484985e8a7c3 \ + --hash=sha256:667c132f1f3651c14522a119e4dd631fad98761fa960c55e8e7430bb2a1ba4ac \ + --hash=sha256:67394d3becd50b954c4ecd24ac90b5051ee7c903d167459f93e77fc6f5b4c968 \ + --hash=sha256:69a0f6ac618c98c74b7fbc8c0172ba86f9e01dbf9f62aa0b1776c2231a7bffe5 \ + --hash=sha256:6af8680328c69e15324b5af3ae38abbfcf9cbec37b5346ebfd52339c3d7e8a18 \ + --hash=sha256:7339f41c244d0eea251637727f016b3d20050636695bc78345cce9029b189401 \ + --hash=sha256:7403851e430a478440ecc1258bcbacbfbd8175f9ac1e39031a7121dd0de05ff8 \ + --hash=sha256:75412ca06e20904c19170f8a24486c4e6c7887dea591ba18a1ab572f1300ee9f \ + --hash=sha256:75bc2e59e6a2ac1dd28901d07115abdebc4563b5b07dd612bf64260a201b1c7f \ + --hash=sha256:7bb2ce0b82bc9fd1168a513ddae7a857994b780b2945a8c51db4ab1c4b751ebc \ + --hash=sha256:7cce16ae2f5fb2c53c3eafdd1706cb7b6530a67cc1c17abe8ec747f5cd7c0c51 \ + --hash=sha256:801a821e8e6099b8c459ac7540b3c32dba6013437c57fdcaec205b169754f38c \ + --hash=sha256:82393ab47b4fe44ffd0a7659fa9cfaacc717eb617c93cde83795f14af5c2e9d5 \ + --hash=sha256:82cd00d49d6063d2b8791da5d4f9d20539c5951f965e45ccf4e96d33505ce68f \ + --hash=sha256:835f26fa24ba0bb8c53ae2a9328d1706135b74ec653ed933869b74b6909e63fd \ + --hash=sha256:86cfc555bfd5794d24c6a1903e558b50644e5e68e6471d66502ce5cb5fdef3f9 \ + --hash=sha256:894aea2e63d4f24a7f04a1908307c738d0dce992e9249e744b8f4e8dd9197f39 \ + --hash=sha256:8be318da8413cdbbce77b8c5fac8d13f6eb0f0db41b30bb598631412619572e8 \ + --hash=sha256:8d5f16195bb671a5dd3d1dbea758918bada8f6cc27de72bd64adfbd748770814 \ + --hash=sha256:9172578c4eb09dbfcf1657d43198de59b6cef4054de385365060ed50c458ac98 \ + --hash=sha256:92a8d676748fca47ade5bc3da7430ed7767afe51b2f8100e3cd65e151c0eaceb \ + --hash=sha256:9645ef655735a74da4990c24ffbd6894828fbfa117bc97c1edd98c282ecb52e1 \ + --hash=sha256:9c8494625ad60a923af6b2b0bd74107146efe9b55099e20d7740d995f338fcd8 \ + --hash=sha256:9cc1e55c884921434a84a0c3dd2699eb9f92e7b441d7f53f3941079ec6ce7499 \ + --hash=sha256:9df95000fbe6777bf9820ae82ab7578e8662051bb5f83d71a28992f539d2cda7 \ + --hash=sha256:a230065027bc2a025e944f9d4714976a81e7ecfa940923283bca7bbc1f10f626 \ + --hash=sha256:a261fef929bcf98a60713bf5e95ad067cea16ae345d9a35034e73c3990e927d2 \ + --hash=sha256:a4f3cb2d874e03bc7767c8f88adaa1a9a05cecea3712649c3b58589ec7317310 \ + --hash=sha256:a66d7769e98a08a12a139049aac2f0ca3adae989817f8c43337455fbc7669b85 \ + --hash=sha256:a86fe4ff4ea523eac8f4b57fdac319faf037d3c1be12405e6a7e86b3fbc4756a \ + --hash=sha256:aa0f513be38b40234c77975e68805506cad5d57b3dfd8fe3baa7f4f4051e15b4 \ + --hash=sha256:aa5e4244063db8e1d87e0f54c3f7522f14b2dc937e65d5241ef0076a096409fd \ + --hash=sha256:acbc5fac7e06777555b0722b8ad5f574739e99ffe99467ed63da98f97f9ca0fe \ + --hash=sha256:b29d36b60e606df01959c4b982729c8845c69d1963f88686608be9ced96dbfaa \ + --hash=sha256:b42ffbed9128e547a1647a3e50bc88ab28ae9daa61713962e0d3dd35e820c125 \ + --hash=sha256:b923c1c13fa02084eb38c9c065afd860a5cff58026813319a06949c3af5732ac \ + --hash=sha256:b9f86d69ae822cabc2a0f6c099b43e8733dda788405cba2665595b7e8dd8d167 \ + --hash=sha256:bb150d529637d541e6af06bbe3d02f5498d628b7f98267ff87647584293ab439 \ + --hash=sha256:c028a394c766693c5c9909dec76b24f37e6a1b91999e8d0c0d5feecbe93c3e05 \ + --hash=sha256:c0d87bd1896faac0d10b4f849016db81a63e4ec5df38757ffae84d45ab38aa71 \ + --hash=sha256:c0e5d9f7a0227df2927d343a6e3859bebf9208b427c79bd31949abcc2fa32fa5 \ + --hash=sha256:c2021afda46c1ed64d74b555065dbd4c2558d510d8cec5ea6a53001b3e5e82a9 \ + --hash=sha256:c2ed66358f32c24e10ceea518e16eb3549e34f33a9d51f99ce23b0251776a1ef \ + --hash=sha256:c404603df4865f8e0afe981aa3c4b62b406e6d06049564d58934860b62b7f91d \ + --hash=sha256:c74099c6b230d4261fdc3169d50efc09abf38ace1a42ea2f9994b1d79153d477 \ + --hash=sha256:ccc70da619744467d8f1f49a8cadae5ec7bbe054e5232d95f92ed8737f8c5870 \ + --hash=sha256:d4be86b58e9ea262617b8ca6251a2f0d63cc132a6da4b5fcc8e0a4128782c829 \ + --hash=sha256:d7345c759276b798ccd6d77a87136029e71e66a8bbf2d2755cbdde1d82e78706 \ + --hash=sha256:ddbfdb5099b3e6ba6d6ea818f61997bb66de14b411357d24c4612cf1ebad08ca \ + --hash=sha256:ddc21521598dbe369d83d4d40338e23d4101dad21dae0e79fa20465dbace019f \ + --hash=sha256:df9eadb2a6386d5ea2bfd81309c505e125cfc9ba2b1b99a97e60985b0b3665d1 \ + --hash=sha256:e08ca8a6c851e95aaecc32bc44a5aa75d0ad26af8cdac7c77e4ed93acf3d5b69 \ + --hash=sha256:e446a8ea0a4c366ceafc7d97067bfd55292969143b57e3c846d87fc701e797a0 \ + --hash=sha256:e46c762d9f0e1cfb4ccc8515de7f349abbc95b59cb5a2bd68df5973fdef913f8 \ + --hash=sha256:e607b49b1a106ee2086633167033afbd63f76f2999e9236f638b06b112b24ea7 \ + --hash=sha256:e697d06ad57dd0c7a737771d470eedc18e68dfdefcdd3b7de7f33dfda5b6212e \ + --hash=sha256:e8b5f96c05fce7d0218df3fdfeb962d6b8cfff7e3e20264306b46dd8b217c0f3 \ + --hash=sha256:ed24250e55efbcb0b35bed7caaec8cedf858ab2f9f2201f17b8938c618c8ca6f \ + --hash=sha256:fa1863e75b92891f553b7922ce4ee10ed06db061e104f2b7815de80cdcb135ad \ + --hash=sha256:fea7339bdd22e6f1060c55ac31b6a755d86a5b2ad3657f2669ec243f8e3b2bdb \ + --hash=sha256:ff770589960a86eae279f5d8aa536196ebda8273a2a07db2a54e82b93bc86626 \ + --hash=sha256:ff7877d376add4e16b274e35a3f58b7f37b362abf4aa31863dadacdd20e3a583 + # via langsmith +packaging==25.0 \ + --hash=sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484 \ + --hash=sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f + # via + # langchain-core + # langsmith +pydantic==2.12.5 \ + --hash=sha256:4d351024c75c0f085a9febbb665ce8c0c6ec5d30e903bdb6394b7ede26aebb49 \ + --hash=sha256:e561593fccf61e8a20fc46dfc2dfe075b8be7d0188df33f221ad1f0139180f9d + # via + # -r validation/ai_checker/requirements.txt.in + # github-copilot-sdk + # langchain-core + # langsmith +pydantic-core==2.41.5 \ + --hash=sha256:0177272f88ab8312479336e1d777f6b124537d47f2123f89cb37e0accea97f90 \ + --hash=sha256:01a3d0ab748ee531f4ea6c3e48ad9dac84ddba4b0d82291f87248f2f9de8d740 \ + --hash=sha256:0384e2e1021894b1ff5a786dbf94771e2986ebe2869533874d7e43bc79c6f504 \ + --hash=sha256:03b77d184b9eb40240ae9fd676ca364ce1085f203e1b1256f8ab9984dca80a84 \ + --hash=sha256:03ca43e12fab6023fc79d28ca6b39b05f794ad08ec2feccc59a339b02f2b3d33 \ + --hash=sha256:05a2c8852530ad2812cb7914dc61a1125dc4e06252ee98e5638a12da6cc6fb6c \ + --hash=sha256:070259a8818988b9a84a449a2a7337c7f430a22acc0859c6b110aa7212a6d9c0 \ + --hash=sha256:08daa51ea16ad373ffd5e7606252cc32f07bc72b28284b6bc9c6df804816476e \ + --hash=sha256:0cbaad15cb0c90aa221d43c00e77bb33c93e8d36e0bf74760cd00e732d10a6a0 \ + --hash=sha256:100baa204bb412b74fe285fb0f3a385256dad1d1879f0a5cb1499ed2e83d132a \ + --hash=sha256:112e305c3314f40c93998e567879e887a3160bb8689ef3d2c04b6cc62c33ac34 \ + --hash=sha256:16f80f7abe3351f8ea6858914ddc8c77e02578544a0ebc15b4c2e1a0e813b0b2 \ + --hash=sha256:1746d4a3d9a794cacae06a5eaaccb4b8643a131d45fbc9af23e353dc0a5ba5c3 \ + --hash=sha256:1962293292865bca8e54702b08a4f26da73adc83dd1fcf26fbc875b35d81c815 \ + --hash=sha256:1d1d9764366c73f996edd17abb6d9d7649a7eb690006ab6adbda117717099b14 \ + --hash=sha256:1f8d33a7f4d5a7889e60dc39856d76d09333d8a6ed0f5f1190635cbec70ec4ba \ + --hash=sha256:22f0fb8c1c583a3b6f24df2470833b40207e907b90c928cc8d3594b76f874375 \ + --hash=sha256:239edca560d05757817c13dc17c50766136d21f7cd0fac50295499ae24f90fdf \ + --hash=sha256:242a206cd0318f95cd21bdacff3fcc3aab23e79bba5cac3db5a841c9ef9c6963 \ + --hash=sha256:25e1c2af0fce638d5f1988b686f3b3ea8cd7de5f244ca147c777769e798a9cd1 \ + --hash=sha256:266fb4cbf5e3cbd0b53669a6d1b039c45e3ce651fd5442eff4d07c2cc8d66808 \ + --hash=sha256:2782c870e99878c634505236d81e5443092fba820f0373997ff75f90f68cd553 \ + --hash=sha256:287dad91cfb551c363dc62899a80e9e14da1f0e2b6ebde82c806612ca2a13ef1 \ + --hash=sha256:29452c56df2ed968d18d7e21f4ab0ac55e71dc59524872f6fc57dcf4a3249ed2 \ + --hash=sha256:299e0a22e7ae2b85c1a57f104538b2656e8ab1873511fd718a1c1c6f149b77b5 \ + --hash=sha256:2a5e06546e19f24c6a96a129142a75cee553cc018ffee48a460059b1185f4470 \ + --hash=sha256:2b761d210c9ea91feda40d25b4efe82a1707da2ef62901466a42492c028553a2 \ + --hash=sha256:2c010c6ded393148374c0f6f0bf89d206bf3217f201faa0635dcd56bd1520f6b \ + --hash=sha256:2ff4321e56e879ee8d2a879501c8e469414d948f4aba74a2d4593184eb326660 \ + --hash=sha256:3006c3dd9ba34b0c094c544c6006cc79e87d8612999f1a5d43b769b89181f23c \ + --hash=sha256:33cb885e759a705b426baada1fe68cbb0a2e68e34c5d0d0289a364cf01709093 \ + --hash=sha256:346285d28e4c8017da95144c7f3acd42740d637ff41946af5ce6e5e420502dd5 \ + --hash=sha256:34a64bc3441dc1213096a20fe27e8e128bd3ff89921706e83c0b1ac971276594 \ + --hash=sha256:35b44f37a3199f771c3eaa53051bc8a70cd7b54f333531c59e29fd4db5d15008 \ + --hash=sha256:378bec5c66998815d224c9ca994f1e14c0c21cb95d2f52b6021cc0b2a58f2a5a \ + --hash=sha256:3f37a19d7ebcdd20b96485056ba9e8b304e27d9904d233d7b1015db320e51f0a \ + --hash=sha256:3f84d5c1b4ab906093bdc1ff10484838aca54ef08de4afa9de0f5f14d69639cd \ + --hash=sha256:4009935984bd36bd2c774e13f9a09563ce8de4abaa7226f5108262fa3e637284 \ + --hash=sha256:406bf18d345822d6c21366031003612b9c77b3e29ffdb0f612367352aab7d586 \ + --hash=sha256:4819fa52133c9aa3c387b3328f25c1facc356491e6135b459f1de698ff64d869 \ + --hash=sha256:482c982f814460eabe1d3bb0adfdc583387bd4691ef00b90575ca0d2b6fe2294 \ + --hash=sha256:4bc36bbc0b7584de96561184ad7f012478987882ebf9f9c389b23f432ea3d90f \ + --hash=sha256:506d766a8727beef16b7adaeb8ee6217c64fc813646b424d0804d67c16eddb66 \ + --hash=sha256:56121965f7a4dc965bff783d70b907ddf3d57f6eba29b6d2e5dabfaf07799c51 \ + --hash=sha256:58133647260ea01e4d0500089a8c4f07bd7aa6ce109682b1426394988d8aaacc \ + --hash=sha256:5921a4d3ca3aee735d9fd163808f5e8dd6c6972101e4adbda9a4667908849b97 \ + --hash=sha256:5a4e67afbc95fa5c34cf27d9089bca7fcab4e51e57278d710320a70b956d1b9a \ + --hash=sha256:5cb1b2f9742240e4bb26b652a5aeb840aa4b417c7748b6f8387927bc6e45e40d \ + --hash=sha256:62de39db01b8d593e45871af2af9e497295db8d73b085f6bfd0b18c83c70a8f9 \ + --hash=sha256:634e8609e89ceecea15e2d61bc9ac3718caaaa71963717bf3c8f38bfde64242c \ + --hash=sha256:63510af5e38f8955b8ee5687740d6ebf7c2a0886d15a6d65c32814613681bc07 \ + --hash=sha256:650ae77860b45cfa6e2cdafc42618ceafab3a2d9a3811fcfbd3bbf8ac3c40d36 \ + --hash=sha256:6561e94ba9dacc9c61bce40e2d6bdc3bfaa0259d3ff36ace3b1e6901936d2e3e \ + --hash=sha256:65840751b72fbfd82c3c640cff9284545342a4f1eb1586ad0636955b261b0b05 \ + --hash=sha256:6cb58b9c66f7e4179a2d5e0f849c48eff5c1fca560994d6eb6543abf955a149e \ + --hash=sha256:6f52298fbd394f9ed112d56f3d11aabd0d5bd27beb3084cc3d8ad069483b8941 \ + --hash=sha256:707625ef0983fcfb461acfaf14de2067c5942c6bb0f3b4c99158bed6fedd3cf3 \ + --hash=sha256:72f6c8b11857a856bcfa48c86f5368439f74453563f951e473514579d44aa612 \ + --hash=sha256:753e230374206729bf0a807954bcc6c150d3743928a73faffee51ac6557a03c3 \ + --hash=sha256:76d0819de158cd855d1cbb8fcafdf6f5cf1eb8e470abe056d5d161106e38062b \ + --hash=sha256:76ee27c6e9c7f16f47db7a94157112a2f3a00e958bc626e2f4ee8bec5c328fbe \ + --hash=sha256:77b63866ca88d804225eaa4af3e664c5faf3568cea95360d21f4725ab6e07146 \ + --hash=sha256:79ec52ec461e99e13791ec6508c722742ad745571f234ea6255bed38c6480f11 \ + --hash=sha256:7b93a4d08587e2b7e7882de461e82b6ed76d9026ce91ca7915e740ecc7855f60 \ + --hash=sha256:7da7087d756b19037bc2c06edc6c170eeef3c3bafcb8f532ff17d64dc427adfd \ + --hash=sha256:7f3bf998340c6d4b0c9a2f02d6a400e51f123b59565d74dc60d252ce888c260b \ + --hash=sha256:80aa89cad80b32a912a65332f64a4450ed00966111b6615ca6816153d3585a8c \ + --hash=sha256:8566def80554c3faa0e65ac30ab0932b9e3a5cd7f8323764303d468e5c37595a \ + --hash=sha256:873e0d5b4fb9b89ef7c2d2a963ea7d02879d9da0da8d9d4933dee8ee86a8b460 \ + --hash=sha256:88942d3a3dff3afc8288c21e565e476fc278902ae4d6d134f1eeda118cc830b1 \ + --hash=sha256:8bfeaf8735be79f225f3fefab7f941c712aaca36f1128c9d7e2352ee1aa87bdf \ + --hash=sha256:8e7c86f27c585ef37c35e56a96363ab8de4e549a95512445b85c96d3e2f7c1bf \ + --hash=sha256:915c3d10f81bec3a74fbd4faebe8391013ba61e5a1a8d48c4455b923bdda7858 \ + --hash=sha256:93e8740d7503eb008aa2df04d3b9735f845d43ae845e6dcd2be0b55a2da43cd2 \ + --hash=sha256:941103c9be18ac8daf7b7adca8228f8ed6bb7a1849020f643b3a14d15b1924d9 \ + --hash=sha256:97aeba56665b4c3235a0e52b2c2f5ae9cd071b8a8310ad27bddb3f7fb30e9aa2 \ + --hash=sha256:a39455728aabd58ceabb03c90e12f71fd30fa69615760a075b9fec596456ccc3 \ + --hash=sha256:a3a52f6156e73e7ccb0f8cced536adccb7042be67cb45f9562e12b319c119da6 \ + --hash=sha256:a668ce24de96165bb239160b3d854943128f4334822900534f2fe947930e5770 \ + --hash=sha256:a75dafbf87d6276ddc5b2bf6fae5254e3d0876b626eb24969a574fff9149ee5d \ + --hash=sha256:aabf5777b5c8ca26f7824cb4a120a740c9588ed58df9b2d196ce92fba42ff8dc \ + --hash=sha256:aec5cf2fd867b4ff45b9959f8b20ea3993fc93e63c7363fe6851424c8a7e7c23 \ + --hash=sha256:b2379fa7ed44ddecb5bfe4e48577d752db9fc10be00a6b7446e9663ba143de26 \ + --hash=sha256:b4ececa40ac28afa90871c2cc2b9ffd2ff0bf749380fbdf57d165fd23da353aa \ + --hash=sha256:b5819cd790dbf0c5eb9f82c73c16b39a65dd6dd4d1439dcdea7816ec9adddab8 \ + --hash=sha256:b74557b16e390ec12dca509bce9264c3bbd128f8a2c376eaa68003d7f327276d \ + --hash=sha256:b80aa5095cd3109962a298ce14110ae16b8c1aece8b72f9dafe81cf597ad80b3 \ + --hash=sha256:b93590ae81f7010dbe380cdeab6f515902ebcbefe0b9327cc4804d74e93ae69d \ + --hash=sha256:b96d5f26b05d03cc60f11a7761a5ded1741da411e7fe0909e27a5e6a0cb7b034 \ + --hash=sha256:bd3d54f38609ff308209bd43acea66061494157703364ae40c951f83ba99a1a9 \ + --hash=sha256:bfea2a5f0b4d8d43adf9d7b8bf019fb46fdd10a2e5cde477fbcb9d1fa08c68e1 \ + --hash=sha256:c007fe8a43d43b3969e8469004e9845944f1a80e6acd47c150856bb87f230c56 \ + --hash=sha256:c1df3d34aced70add6f867a8cf413e299177e0c22660cc767218373d0779487b \ + --hash=sha256:c23e27686783f60290e36827f9c626e63154b82b116d7fe9adba1fda36da706c \ + --hash=sha256:c8d8b4eb992936023be7dee581270af5c6e0697a8559895f527f5b7105ecd36a \ + --hash=sha256:c9e19dd6e28fdcaa5a1de679aec4141f691023916427ef9bae8584f9c2fb3b0e \ + --hash=sha256:d0d2568a8c11bf8225044aa94409e21da0cb09dcdafe9ecd10250b2baad531a9 \ + --hash=sha256:d38548150c39b74aeeb0ce8ee1d8e82696f4a4e16ddc6de7b1d8823f7de4b9b5 \ + --hash=sha256:d3a978c4f57a597908b7e697229d996d77a6d3c94901e9edee593adada95ce1a \ + --hash=sha256:d5160812ea7a8a2ffbe233d8da666880cad0cbaf5d4de74ae15c313213d62556 \ + --hash=sha256:dc799088c08fa04e43144b164feb0c13f9a0bc40503f8df3e9fde58a3c0c101e \ + --hash=sha256:df3959765b553b9440adfd3c795617c352154e497a4eaf3752555cfb5da8fc49 \ + --hash=sha256:dfa8a0c812ac681395907e71e1274819dec685fec28273a28905df579ef137e2 \ + --hash=sha256:e25c479382d26a2a41b7ebea1043564a937db462816ea07afa8a44c0866d52f9 \ + --hash=sha256:e4f4a984405e91527a0d62649ee21138f8e3d0ef103be488c1dc11a80d7f184b \ + --hash=sha256:e536c98a7626a98feb2d3eaf75944ef6f3dbee447e1f841eae16f2f0a72d8ddc \ + --hash=sha256:e56ba91f47764cc14f1daacd723e3e82d1a89d783f0f5afe9c364b8bb491ccdb \ + --hash=sha256:e672ba74fbc2dc8eea59fb6d4aed6845e6905fc2a8afe93175d94a83ba2a01a0 \ + --hash=sha256:e7b576130c69225432866fe2f4a469a85a54ade141d96fd396dffcf607b558f8 \ + --hash=sha256:e8465ab91a4bd96d36dde3263f06caa6a8a6019e4113f24dc753d79a8b3a3f82 \ + --hash=sha256:e96cea19e34778f8d59fe40775a7a574d95816eb150850a85a7a4c8f4b94ac69 \ + --hash=sha256:ece5c59f0ce7d001e017643d8d24da587ea1f74f6993467d85ae8a5ef9d4f42b \ + --hash=sha256:eceb81a8d74f9267ef4081e246ffd6d129da5d87e37a77c9bde550cb04870c1c \ + --hash=sha256:ed2e99c456e3fadd05c991f8f437ef902e00eedf34320ba2b0842bd1c3ca3a75 \ + --hash=sha256:f0cd744688278965817fd0839c4a4116add48d23890d468bc436f78beb28abf5 \ + --hash=sha256:f14f8f046c14563f8eb3f45f499cc658ab8d10072961e07225e507adb700e93f \ + --hash=sha256:f15489ba13d61f670dcc96772e733aad1a6f9c429cc27574c6cdaed82d0146ad \ + --hash=sha256:f31d95a179f8d64d90f6831d71fa93290893a33148d890ba15de25642c5d075b \ + --hash=sha256:f41a7489d32336dbf2199c8c0a215390a751c5b014c2c1c5366e817202e9cdf7 \ + --hash=sha256:f41eb9797986d6ebac5e8edff36d5cef9de40def462311b3eb3eeded1431e425 \ + --hash=sha256:f547144f2966e1e16ae626d8ce72b4cfa0caedc7fa28052001c94fb2fcaa1c52 + # via pydantic +pydot==4.0.1 \ + --hash=sha256:869c0efadd2708c0be1f916eb669f3d664ca684bc57ffb7ecc08e70d5e93fee6 \ + --hash=sha256:c2148f681c4a33e08bf0e26a9e5f8e4099a82e0e2a068098f32ce86577364ad5 + # via -r validation/ai_checker/requirements.txt.in +pyparsing==3.3.1 \ + --hash=sha256:023b5e7e5520ad96642e2c6db4cb683d3970bd640cdf7115049a6e9c3682df82 \ + --hash=sha256:47fad0f17ac1e2cad3de3b458570fbc9b03560aa029ed5e16ee5554da9a2251c + # via pydot +python-dateutil==2.9.0.post0 \ + --hash=sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3 \ + --hash=sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427 + # via github-copilot-sdk +pyyaml==6.0.3 \ + --hash=sha256:00c4bdeba853cc34e7dd471f16b4114f4162dc03e6b7afcc2128711f0eca823c \ + --hash=sha256:0150219816b6a1fa26fb4699fb7daa9caf09eb1999f3b70fb6e786805e80375a \ + --hash=sha256:02893d100e99e03eda1c8fd5c441d8c60103fd175728e23e431db1b589cf5ab3 \ + --hash=sha256:02ea2dfa234451bbb8772601d7b8e426c2bfa197136796224e50e35a78777956 \ + --hash=sha256:0f29edc409a6392443abf94b9cf89ce99889a1dd5376d94316ae5145dfedd5d6 \ + --hash=sha256:10892704fc220243f5305762e276552a0395f7beb4dbf9b14ec8fd43b57f126c \ + --hash=sha256:16249ee61e95f858e83976573de0f5b2893b3677ba71c9dd36b9cf8be9ac6d65 \ + --hash=sha256:1d37d57ad971609cf3c53ba6a7e365e40660e3be0e5175fa9f2365a379d6095a \ + --hash=sha256:1ebe39cb5fc479422b83de611d14e2c0d3bb2a18bbcb01f229ab3cfbd8fee7a0 \ + --hash=sha256:214ed4befebe12df36bcc8bc2b64b396ca31be9304b8f59e25c11cf94a4c033b \ + --hash=sha256:2283a07e2c21a2aa78d9c4442724ec1eb15f5e42a723b99cb3d822d48f5f7ad1 \ + --hash=sha256:22ba7cfcad58ef3ecddc7ed1db3409af68d023b7f940da23c6c2a1890976eda6 \ + --hash=sha256:27c0abcb4a5dac13684a37f76e701e054692a9b2d3064b70f5e4eb54810553d7 \ + --hash=sha256:28c8d926f98f432f88adc23edf2e6d4921ac26fb084b028c733d01868d19007e \ + --hash=sha256:2e71d11abed7344e42a8849600193d15b6def118602c4c176f748e4583246007 \ + --hash=sha256:34d5fcd24b8445fadc33f9cf348c1047101756fd760b4dacb5c3e99755703310 \ + --hash=sha256:37503bfbfc9d2c40b344d06b2199cf0e96e97957ab1c1b546fd4f87e53e5d3e4 \ + --hash=sha256:3c5677e12444c15717b902a5798264fa7909e41153cdf9ef7ad571b704a63dd9 \ + --hash=sha256:3ff07ec89bae51176c0549bc4c63aa6202991da2d9a6129d7aef7f1407d3f295 \ + --hash=sha256:41715c910c881bc081f1e8872880d3c650acf13dfa8214bad49ed4cede7c34ea \ + --hash=sha256:418cf3f2111bc80e0933b2cd8cd04f286338bb88bdc7bc8e6dd775ebde60b5e0 \ + --hash=sha256:44edc647873928551a01e7a563d7452ccdebee747728c1080d881d68af7b997e \ + --hash=sha256:4a2e8cebe2ff6ab7d1050ecd59c25d4c8bd7e6f400f5f82b96557ac0abafd0ac \ + --hash=sha256:4ad1906908f2f5ae4e5a8ddfce73c320c2a1429ec52eafd27138b7f1cbe341c9 \ + --hash=sha256:501a031947e3a9025ed4405a168e6ef5ae3126c59f90ce0cd6f2bfc477be31b7 \ + --hash=sha256:5190d403f121660ce8d1d2c1bb2ef1bd05b5f68533fc5c2ea899bd15f4399b35 \ + --hash=sha256:5498cd1645aa724a7c71c8f378eb29ebe23da2fc0d7a08071d89469bf1d2defb \ + --hash=sha256:5cf4e27da7e3fbed4d6c3d8e797387aaad68102272f8f9752883bc32d61cb87b \ + --hash=sha256:5e0b74767e5f8c593e8c9b5912019159ed0533c70051e9cce3e8b6aa699fcd69 \ + --hash=sha256:5ed875a24292240029e4483f9d4a4b8a1ae08843b9c54f43fcc11e404532a8a5 \ + --hash=sha256:5fcd34e47f6e0b794d17de1b4ff496c00986e1c83f7ab2fb8fcfe9616ff7477b \ + --hash=sha256:5fdec68f91a0c6739b380c83b951e2c72ac0197ace422360e6d5a959d8d97b2c \ + --hash=sha256:6344df0d5755a2c9a276d4473ae6b90647e216ab4757f8426893b5dd2ac3f369 \ + --hash=sha256:64386e5e707d03a7e172c0701abfb7e10f0fb753ee1d773128192742712a98fd \ + --hash=sha256:652cb6edd41e718550aad172851962662ff2681490a8a711af6a4d288dd96824 \ + --hash=sha256:66291b10affd76d76f54fad28e22e51719ef9ba22b29e1d7d03d6777a9174198 \ + --hash=sha256:66e1674c3ef6f541c35191caae2d429b967b99e02040f5ba928632d9a7f0f065 \ + --hash=sha256:6adc77889b628398debc7b65c073bcb99c4a0237b248cacaf3fe8a557563ef6c \ + --hash=sha256:79005a0d97d5ddabfeeea4cf676af11e647e41d81c9a7722a193022accdb6b7c \ + --hash=sha256:7c6610def4f163542a622a73fb39f534f8c101d690126992300bf3207eab9764 \ + --hash=sha256:7f047e29dcae44602496db43be01ad42fc6f1cc0d8cd6c83d342306c32270196 \ + --hash=sha256:8098f252adfa6c80ab48096053f512f2321f0b998f98150cea9bd23d83e1467b \ + --hash=sha256:850774a7879607d3a6f50d36d04f00ee69e7fc816450e5f7e58d7f17f1ae5c00 \ + --hash=sha256:8d1fab6bb153a416f9aeb4b8763bc0f22a5586065f86f7664fc23339fc1c1fac \ + --hash=sha256:8da9669d359f02c0b91ccc01cac4a67f16afec0dac22c2ad09f46bee0697eba8 \ + --hash=sha256:8dc52c23056b9ddd46818a57b78404882310fb473d63f17b07d5c40421e47f8e \ + --hash=sha256:9149cad251584d5fb4981be1ecde53a1ca46c891a79788c0df828d2f166bda28 \ + --hash=sha256:93dda82c9c22deb0a405ea4dc5f2d0cda384168e466364dec6255b293923b2f3 \ + --hash=sha256:96b533f0e99f6579b3d4d4995707cf36df9100d67e0c8303a0c55b27b5f99bc5 \ + --hash=sha256:9c57bb8c96f6d1808c030b1687b9b5fb476abaa47f0db9c0101f5e9f394e97f4 \ + --hash=sha256:9c7708761fccb9397fe64bbc0395abcae8c4bf7b0eac081e12b809bf47700d0b \ + --hash=sha256:9f3bfb4965eb874431221a3ff3fdcddc7e74e3b07799e0e84ca4a0f867d449bf \ + --hash=sha256:a33284e20b78bd4a18c8c2282d549d10bc8408a2a7ff57653c0cf0b9be0afce5 \ + --hash=sha256:a80cb027f6b349846a3bf6d73b5e95e782175e52f22108cfa17876aaeff93702 \ + --hash=sha256:b30236e45cf30d2b8e7b3e85881719e98507abed1011bf463a8fa23e9c3e98a8 \ + --hash=sha256:b3bc83488de33889877a0f2543ade9f70c67d66d9ebb4ac959502e12de895788 \ + --hash=sha256:b865addae83924361678b652338317d1bd7e79b1f4596f96b96c77a5a34b34da \ + --hash=sha256:b8bb0864c5a28024fac8a632c443c87c5aa6f215c0b126c449ae1a150412f31d \ + --hash=sha256:ba1cc08a7ccde2d2ec775841541641e4548226580ab850948cbfda66a1befcdc \ + --hash=sha256:bdb2c67c6c1390b63c6ff89f210c8fd09d9a1217a465701eac7316313c915e4c \ + --hash=sha256:c1ff362665ae507275af2853520967820d9124984e0f7466736aea23d8611fba \ + --hash=sha256:c2514fceb77bc5e7a2f7adfaa1feb2fb311607c9cb518dbc378688ec73d8292f \ + --hash=sha256:c3355370a2c156cffb25e876646f149d5d68f5e0a3ce86a5084dd0b64a994917 \ + --hash=sha256:c458b6d084f9b935061bc36216e8a69a7e293a2f1e68bf956dcd9e6cbcd143f5 \ + --hash=sha256:d0eae10f8159e8fdad514efdc92d74fd8d682c933a6dd088030f3834bc8e6b26 \ + --hash=sha256:d76623373421df22fb4cf8817020cbb7ef15c725b9d5e45f17e189bfc384190f \ + --hash=sha256:ebc55a14a21cb14062aa4162f906cd962b28e2e9ea38f9b4391244cd8de4ae0b \ + --hash=sha256:eda16858a3cab07b80edaf74336ece1f986ba330fdb8ee0d6c0d68fe82bc96be \ + --hash=sha256:ee2922902c45ae8ccada2c5b501ab86c36525b883eff4255313a253a3160861c \ + --hash=sha256:efd7b85f94a6f21e4932043973a7ba2613b059c4a000551892ac9f1d11f5baf3 \ + --hash=sha256:f7057c9a337546edc7973c0d3ba84ddcdf0daa14533c2065749c9075001090e6 \ + --hash=sha256:fa160448684b4e94d80416c0fa4aac48967a969efe22931448d853ada8baf926 \ + --hash=sha256:fc09d0aa354569bc501d4e787133afc08552722d3ab34836a80547331bb5d4a0 + # via + # -r validation/ai_checker/requirements.txt.in + # langchain-core +requests==2.32.5 \ + --hash=sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6 \ + --hash=sha256:dbba0bac56e100853db0ea71b82b4dfd5fe2bf6d3754a8893c3af500cec7d7cf + # via + # langsmith + # requests-toolbelt +requests-toolbelt==1.0.0 \ + --hash=sha256:7681a0a3d047012b5bdc0ee37d7f8f07ebe76ab08caeccfc3921ce23c88d5bc6 \ + --hash=sha256:cccfdd665f0a24fcf4726e690f65639d272bb0637b9b92dfd91a5568ccf6bd06 + # via langsmith +six==1.17.0 \ + --hash=sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274 \ + --hash=sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81 + # via python-dateutil +tenacity==9.1.2 \ + --hash=sha256:1169d376c297e7de388d18b4481760d478b0e99a777cad3a9c86e556f4b697cb \ + --hash=sha256:f77bf36710d8b73a50b2dd155c97b870017ad21afe6ab300326b0371b3b05138 + # via langchain-core +typing-extensions==4.15.0 \ + --hash=sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466 \ + --hash=sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548 + # via + # anyio + # github-copilot-sdk + # langchain-core + # pydantic + # pydantic-core + # typing-inspection +typing-inspection==0.4.2 \ + --hash=sha256:4ed1cacbdc298c220f1bd249ed5287caa16f34d44ef4e9c3d0cbad5b521545e7 \ + --hash=sha256:ba561c48a67c5958007083d386c3295464928b01faa735ab8547c5692e87f464 + # via pydantic +urllib3==2.6.3 \ + --hash=sha256:1b62b6884944a57dbe321509ab94fd4d3b307075e0c2eae991ac71ee15ad38ed \ + --hash=sha256:bf272323e553dfb2e87d9bfd225ca7b0f467b919d7bbd355436d3fd37cb0acd4 + # via requests +uuid-utils==0.13.0 \ + --hash=sha256:046cb2756e1597b3de22d24851b769913e192135830486a0a70bf41327f0360c \ + --hash=sha256:3e4f2cc54e6a99c0551158100ead528479ad2596847478cbad624977064ffce3 \ + --hash=sha256:4c17df6427a9e23a4cd7fb9ee1efb53b8abb078660b9bdb2524ca8595022dfe1 \ + --hash=sha256:516adf07f5b2cdb88d50f489c702b5f1a75ae8b2639bfd254f4192d5f7ee261f \ + --hash=sha256:5447a680df6ef8a5a353976aaf4c97cc3a3a22b1ee13671c44227b921e3ae2a9 \ + --hash=sha256:5a88e23e0b2f4203fefe2ccbca5736ee06fcad10e61b5e7e39c8d7904bc13300 \ + --hash=sha256:5dc4c9f749bd2511b8dcbf0891e658d7d86880022963db050722ad7b502b5e22 \ + --hash=sha256:6be6c4d11275f5cc402a4fdba6c2b1ce45fd3d99bb78716cd1cc2cbf6802b2ce \ + --hash=sha256:775347c6110fb71360df17aac74132d8d47c1dbe71233ac98197fc872a791fd2 \ + --hash=sha256:77621cf6ceca7f42173a642a01c01c216f9eaec3b7b65d093d2d6a433ca0a83d \ + --hash=sha256:83628283e977fb212e756bc055df8fdd2f9f589a2e539ba1abe755b8ce8df7a4 \ + --hash=sha256:97985256c2e59b7caa51f5c8515f64d777328562a9c900ec65e9d627baf72737 \ + --hash=sha256:9a5a9eb06c2bb86dd876cd7b2fe927fc8543d14c90d971581db6ffda4a02526f \ + --hash=sha256:aeee3bd89e8de6184a3ab778ce19f5ce9ad32849d1be549516e0ddb257562d8d \ + --hash=sha256:b276b538c57733ed406948584912da422a604313c71479654848b84b9e19c9b0 \ + --hash=sha256:b7ccaa20e24c5f60f41a69ef571ed820737f9b0ade4cbeef56aaa8f80f5aa475 \ + --hash=sha256:bdaf2b77e34b199cf04cde28399495fd1ed951de214a4ece1f3919b2f945bb06 \ + --hash=sha256:c47638ed6334ab19d80f73664f153b04bbb04ab8ce4298d10da6a292d4d21c47 \ + --hash=sha256:cf95f6370ad1a0910ee7b5ad5228fd19c4ae32fe3627389006adaf519408c41e \ + --hash=sha256:e3909a8a1fbd79d7c8bdc874eeb83e23ccb7a7cb0aa821a49596cc96c0cce84b \ + --hash=sha256:e5182e2d95f38e65f2e5bce90648ef56987443da13e145afcd747e584f9bc69c \ + --hash=sha256:eb2f0baf81e82f9769a7684022dca8f3bf801ca1574a3e94df1876e9d6f9271e + # via + # langchain-core + # langsmith +zstandard==0.25.0 \ + --hash=sha256:011d388c76b11a0c165374ce660ce2c8efa8e5d87f34996aa80f9c0816698b64 \ + --hash=sha256:01582723b3ccd6939ab7b3a78622c573799d5d8737b534b86d0e06ac18dbde4a \ + --hash=sha256:05353cef599a7b0b98baca9b068dd36810c3ef0f42bf282583f438caf6ddcee3 \ + --hash=sha256:05df5136bc5a011f33cd25bc9f506e7426c0c9b3f9954f056831ce68f3b6689f \ + --hash=sha256:06acb75eebeedb77b69048031282737717a63e71e4ae3f77cc0c3b9508320df6 \ + --hash=sha256:07b527a69c1e1c8b5ab1ab14e2afe0675614a09182213f21a0717b62027b5936 \ + --hash=sha256:0bbc9a0c65ce0eea3c34a691e3c4b6889f5f3909ba4822ab385fab9057099431 \ + --hash=sha256:0be7622c37c183406f3dbf0cba104118eb16a4ea7359eeb5752f0794882fc250 \ + --hash=sha256:106281ae350e494f4ac8a80470e66d1fe27e497052c8d9c3b95dc4cf1ade81aa \ + --hash=sha256:10ef2a79ab8e2974e2075fb984e5b9806c64134810fac21576f0668e7ea19f8f \ + --hash=sha256:1673b7199bbe763365b81a4f3252b8e80f44c9e323fc42940dc8843bfeaf9851 \ + --hash=sha256:172de1f06947577d3a3005416977cce6168f2261284c02080e7ad0185faeced3 \ + --hash=sha256:181eb40e0b6a29b3cd2849f825e0fa34397f649170673d385f3598ae17cca2e9 \ + --hash=sha256:1869da9571d5e94a85a5e8d57e4e8807b175c9e4a6294e3b66fa4efb074d90f6 \ + --hash=sha256:19796b39075201d51d5f5f790bf849221e58b48a39a5fc74837675d8bafc7362 \ + --hash=sha256:1cd5da4d8e8ee0e88be976c294db744773459d51bb32f707a0f166e5ad5c8649 \ + --hash=sha256:1f3689581a72eaba9131b1d9bdbfe520ccd169999219b41000ede2fca5c1bfdb \ + --hash=sha256:1f830a0dac88719af0ae43b8b2d6aef487d437036468ef3c2ea59c51f9d55fd5 \ + --hash=sha256:223415140608d0f0da010499eaa8ccdb9af210a543fac54bce15babbcfc78439 \ + --hash=sha256:22a06c5df3751bb7dc67406f5374734ccee8ed37fc5981bf1ad7041831fa1137 \ + --hash=sha256:22a086cff1b6ceca18a8dd6096ec631e430e93a8e70a9ca5efa7561a00f826fa \ + --hash=sha256:23ebc8f17a03133b4426bcc04aabd68f8236eb78c3760f12783385171b0fd8bd \ + --hash=sha256:25f8f3cd45087d089aef5ba3848cd9efe3ad41163d3400862fb42f81a3a46701 \ + --hash=sha256:2b6bd67528ee8b5c5f10255735abc21aa106931f0dbaf297c7be0c886353c3d0 \ + --hash=sha256:2e54296a283f3ab5a26fc9b8b5d4978ea0532f37b231644f367aa588930aa043 \ + --hash=sha256:3756b3e9da9b83da1796f8809dd57cb024f838b9eeafde28f3cb472012797ac1 \ + --hash=sha256:37daddd452c0ffb65da00620afb8e17abd4adaae6ce6310702841760c2c26860 \ + --hash=sha256:3a39c94ad7866160a4a46d772e43311a743c316942037671beb264e395bdd611 \ + --hash=sha256:3b870ce5a02d4b22286cf4944c628e0f0881b11b3f14667c1d62185a99e04f53 \ + --hash=sha256:3c83b0188c852a47cd13ef3bf9209fb0a77fa5374958b8c53aaa699398c6bd7b \ + --hash=sha256:4203ce3b31aec23012d3a4cf4a2ed64d12fea5269c49aed5e4c3611b938e4088 \ + --hash=sha256:457ed498fc58cdc12fc48f7950e02740d4f7ae9493dd4ab2168a47c93c31298e \ + --hash=sha256:474d2596a2dbc241a556e965fb76002c1ce655445e4e3bf38e5477d413165ffa \ + --hash=sha256:4b14abacf83dfb5c25eb4e4a79520de9e7e205f72c9ee7702f91233ae57d33a2 \ + --hash=sha256:4b6d83057e713ff235a12e73916b6d356e3084fd3d14ced499d84240f3eecee0 \ + --hash=sha256:4d441506e9b372386a5271c64125f72d5df6d2a8e8a2a45a0ae09b03cb781ef7 \ + --hash=sha256:4f187a0bb61b35119d1926aee039524d1f93aaf38a9916b8c4b78ac8514a0aaf \ + --hash=sha256:51526324f1b23229001eb3735bc8c94f9c578b1bd9e867a0a646a3b17109f388 \ + --hash=sha256:53e08b2445a6bc241261fea89d065536f00a581f02535f8122eba42db9375530 \ + --hash=sha256:53f94448fe5b10ee75d246497168e5825135d54325458c4bfffbaafabcc0a577 \ + --hash=sha256:5a56ba0db2d244117ed744dfa8f6f5b366e14148e00de44723413b2f3938a902 \ + --hash=sha256:5f1ad7bf88535edcf30038f6919abe087f606f62c00a87d7e33e7fc57cb69fcc \ + --hash=sha256:5f5e4c2a23ca271c218ac025bd7d635597048b366d6f31f420aaeb715239fc98 \ + --hash=sha256:6a573a35693e03cf1d67799fd01b50ff578515a8aeadd4595d2a7fa9f3ec002a \ + --hash=sha256:6c0e5a65158a7946e7a7affa6418878ef97ab66636f13353b8502d7ea03c8097 \ + --hash=sha256:6dffecc361d079bb48d7caef5d673c88c8988d3d33fb74ab95b7ee6da42652ea \ + --hash=sha256:7030defa83eef3e51ff26f0b7bfb229f0204b66fe18e04359ce3474ac33cbc09 \ + --hash=sha256:7149623bba7fdf7e7f24312953bcf73cae103db8cae49f8154dd1eadc8a29ecb \ + --hash=sha256:72d35d7aa0bba323965da807a462b0966c91608ef3a48ba761678cb20ce5d8b7 \ + --hash=sha256:75ffc32a569fb049499e63ce68c743155477610532da1eb38e7f24bf7cd29e74 \ + --hash=sha256:7713e1179d162cf5c7906da876ec2ccb9c3a9dcbdffef0cc7f70c3667a205f0b \ + --hash=sha256:78228d8a6a1c177a96b94f7e2e8d012c55f9c760761980da16ae7546a15a8e9b \ + --hash=sha256:7b3c3a3ab9daa3eed242d6ecceead93aebbb8f5f84318d82cee643e019c4b73b \ + --hash=sha256:809c5bcb2c67cd0ed81e9229d227d4ca28f82d0f778fc5fea624a9def3963f91 \ + --hash=sha256:81dad8d145d8fd981b2962b686b2241d3a1ea07733e76a2f15435dfb7fb60150 \ + --hash=sha256:85304a43f4d513f5464ceb938aa02c1e78c2943b29f44a750b48b25ac999a049 \ + --hash=sha256:89c4b48479a43f820b749df49cd7ba2dbc2b1b78560ecb5ab52985574fd40b27 \ + --hash=sha256:8e735494da3db08694d26480f1493ad2cf86e99bdd53e8e9771b2752a5c0246a \ + --hash=sha256:913cbd31a400febff93b564a23e17c3ed2d56c064006f54efec210d586171c00 \ + --hash=sha256:9174f4ed06f790a6869b41cba05b43eeb9a35f8993c4422ab853b705e8112bbd \ + --hash=sha256:9300d02ea7c6506f00e627e287e0492a5eb0371ec1670ae852fefffa6164b072 \ + --hash=sha256:933b65d7680ea337180733cf9e87293cc5500cc0eb3fc8769f4d3c88d724ec5c \ + --hash=sha256:9654dbc012d8b06fc3d19cc825af3f7bf8ae242226df5f83936cb39f5fdc846c \ + --hash=sha256:98750a309eb2f020da61e727de7d7ba3c57c97cf6213f6f6277bb7fb42a8e065 \ + --hash=sha256:99c0c846e6e61718715a3c9437ccc625de26593fea60189567f0118dc9db7512 \ + --hash=sha256:a1a4ae2dec3993a32247995bdfe367fc3266da832d82f8438c8570f989753de1 \ + --hash=sha256:a3f79487c687b1fc69f19e487cd949bf3aae653d181dfb5fde3bf6d18894706f \ + --hash=sha256:a4089a10e598eae6393756b036e0f419e8c1d60f44a831520f9af41c14216cf2 \ + --hash=sha256:a51ff14f8017338e2f2e5dab738ce1ec3b5a851f23b18c1ae1359b1eecbee6df \ + --hash=sha256:a5a419712cf88862a45a23def0ae063686db3d324cec7edbe40509d1a79a0aab \ + --hash=sha256:a9ec8c642d1ec73287ae3e726792dd86c96f5681eb8df274a757bf62b750eae7 \ + --hash=sha256:aaf21ba8fb76d102b696781bddaa0954b782536446083ae3fdaa6f16b25a1c4b \ + --hash=sha256:ab85470ab54c2cb96e176f40342d9ed41e58ca5733be6a893b730e7af9c40550 \ + --hash=sha256:b9af1fe743828123e12b41dd8091eca1074d0c1569cc42e6e1eee98027f2bbd0 \ + --hash=sha256:bfc4e20784722098822e3eee42b8e576b379ed72cca4a7cb856ae733e62192ea \ + --hash=sha256:bfd06b1c5584b657a2892a6014c2f4c20e0db0208c159148fa78c65f7e0b0277 \ + --hash=sha256:c19bcdd826e95671065f8692b5a4aa95c52dc7a02a4c5a0cac46deb879a017a2 \ + --hash=sha256:c2ba942c94e0691467ab901fc51b6f2085ff48f2eea77b1a48240f011e8247c7 \ + --hash=sha256:c8e167d5adf59476fa3e37bee730890e389410c354771a62e3c076c86f9f7778 \ + --hash=sha256:ca54090275939dc8ec5dea2d2afb400e0f83444b2fc24e07df7fdef677110859 \ + --hash=sha256:d7541afd73985c630bafcd6338d2518ae96060075f9463d7dc14cfb33514383d \ + --hash=sha256:d8c56bb4e6c795fc77d74d8e8b80846e1fb8292fc0b5060cd8131d522974b751 \ + --hash=sha256:da469dc041701583e34de852d8634703550348d5822e66a0c827d39b05365b12 \ + --hash=sha256:daab68faadb847063d0c56f361a289c4f268706b598afbf9ad113cbe5c38b6b2 \ + --hash=sha256:e05ab82ea7753354bb054b92e2f288afb750e6b439ff6ca78af52939ebbc476d \ + --hash=sha256:e09bb6252b6476d8d56100e8147b803befa9a12cea144bbe629dd508800d1ad0 \ + --hash=sha256:e29f0cf06974c899b2c188ef7f783607dbef36da4c242eb6c82dcd8b512855e3 \ + --hash=sha256:e59fdc271772f6686e01e1b3b74537259800f57e24280be3f29c8a0deb1904dd \ + --hash=sha256:e7360eae90809efd19b886e59a09dad07da4ca9ba096752e61a2e03c8aca188e \ + --hash=sha256:e96594a5537722fdfb79951672a2a63aec5ebfb823e7560586f7484819f2a08f \ + --hash=sha256:ea9d54cc3d8064260114a0bbf3479fc4a98b21dffc89b3459edd506b69262f6e \ + --hash=sha256:ec996f12524f88e151c339688c3897194821d7f03081ab35d31d1e12ec975e94 \ + --hash=sha256:f27662e4f7dbf9f9c12391cb37b4c4c3cb90ffbd3b1fb9284dadbbb8935fa708 \ + --hash=sha256:f373da2c1757bb7f1acaf09369cdc1d51d84131e50d5fa9863982fd626466313 \ + --hash=sha256:f5aeea11ded7320a84dcdd62a3d95b5186834224a9e55b92ccae35d21a8b63d4 \ + --hash=sha256:f604efd28f239cc21b3adb53eb061e2a205dc164be408e553b41ba2ffe0ca15c \ + --hash=sha256:f67e8f1a324a900e75b5e28ffb152bcac9fbed1cc7b43f99cd90f395c4375344 \ + --hash=sha256:fd7a5004eb1980d3cefe26b2685bcb0b17989901a70a1040d1ac86f1d898c551 \ + --hash=sha256:ffef5a74088f1e09947aecf91011136665152e0b4b359c42be3373897fb39b01 + # via langsmith diff --git a/validation/ai_checker/requirements.txt.in b/validation/ai_checker/requirements.txt.in new file mode 100644 index 00000000..4c9f008a --- /dev/null +++ b/validation/ai_checker/requirements.txt.in @@ -0,0 +1,9 @@ +# Core dependencies for AI checker framework +bigtree +pydot +pydantic +pyyaml + +# LangChain + GitHub Copilot SDK +github-copilot-sdk>=0.1.23 +langchain-core>=1.2.9 diff --git a/validation/ai_checker/src/ai_checker/__init__.py b/validation/ai_checker/src/ai_checker/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/validation/ai_checker/src/ai_checker/ai_checker_core.py b/validation/ai_checker/src/ai_checker/ai_checker_core.py new file mode 100644 index 00000000..d231b62e --- /dev/null +++ b/validation/ai_checker/src/ai_checker/ai_checker_core.py @@ -0,0 +1,392 @@ +# ******************************************************************************* +# Copyright (c) 2026 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* +""" +AI Checker for analyzing requirements against guidelines. + +This module provides the core AI analysis functionality for requirements. +""" + +import asyncio +import hashlib +import json +import logging +import sys +import time +from typing import Any + +from langchain_core.language_models.chat_models import BaseChatModel +from langchain_core.messages import HumanMessage, SystemMessage +from pydantic import ValidationError + +from ai_checker.analysis_cache import AnalysisCache +from ai_checker.analysis_models import AnalysisResults +from ai_checker.constants import DEFAULT_MODEL + + +class AIChecker: + """ + Analyzes requirements against engineering guidelines + using an AI model with structured output. + """ + + def __init__( + self, + model_name: str = DEFAULT_MODEL, + cache_dir: str | None = None, + debug_log: str | None = None, + batch_size: int | None = None, + max_concurrent_requests: int = 5, + max_batch_chars: int = 50000, + ): + """ + Initialize the AI Checker with model configuration. + + Args: + model_name: Name of the AI model to use (for cache key + generation) + cache_dir: Optional directory path for caching results. If + None, caching is disabled. + debug_log: Optional file path for detailed debug logging. + If provided, verbose debug output is written to this file. + batch_size: Optional number of requirements to process per + batch. If None, process all at once. + max_concurrent_requests: Maximum number of concurrent API requests + max_batch_chars: Maximum total characters per batch + """ + self._model_name = model_name + self._cache = AnalysisCache(cache_dir) + self._batch_size = batch_size + self._max_concurrent_requests = max_concurrent_requests + self._max_batch_chars = max_batch_chars + self._semaphore = asyncio.Semaphore(max_concurrent_requests) + + # Set up logger (use fixed name to prevent handler leaks across instances) + self._logger = logging.getLogger(f"{__name__}.AIChecker") + self._logger.setLevel(logging.DEBUG) + self._logger.propagate = False + self._logger.handlers.clear() + + # Always add stderr handler for INFO and above (progress messages) + stderr_handler = logging.StreamHandler(sys.stderr) + stderr_handler.setLevel(logging.INFO) + stderr_formatter = logging.Formatter("%(message)s") + stderr_handler.setFormatter(stderr_formatter) + self._logger.addHandler(stderr_handler) + + # Add file handler for DEBUG level if debug log requested + if debug_log: + file_handler = logging.FileHandler(debug_log, mode="w") + file_handler.setLevel(logging.DEBUG) + file_formatter = logging.Formatter("%(message)s") + file_handler.setFormatter(file_formatter) + self._logger.addHandler(file_handler) + + def _generate_cache_key( + self, artefacts: dict[str, dict[str, Any]], guidelines_content: str + ) -> str: + """ + Generate a unique cache key for the given artefacts. + + Args: + artefacts: Dictionary mapping artefact IDs to their metadata + guidelines_content: Combined content of all guidelines + + Returns: + SHA256 hash of the artefacts, guidelines content, and model name + """ + # Create a deterministic string representation of artefacts + artefact_data = json.dumps(artefacts, sort_keys=True) + combined = f"{artefact_data}:{guidelines_content}:{self._model_name}" + return hashlib.sha256(combined.encode("utf-8")).hexdigest() + + def _format_artefacts_for_analysis( + self, artefacts: dict[str, dict[str, Any]] + ) -> str: + """ + Format extracted artefacts for AI analysis. + + Args: + artefacts: Dictionary mapping artefact IDs to their metadata + + Returns: + Formatted string representation of artefacts + """ + formatted = "Requirements to analyze:\n\n" + for artefact_id, metadata in artefacts.items(): + formatted += f"ID: {artefact_id}\n" + + # Format all metadata fields + for key, value in metadata.items(): + if value: + formatted += f"{key.capitalize()}: {value}\n" + + formatted += "\n" + + return formatted + + def _create_batches( + self, artefacts: dict[str, dict[str, Any]] + ) -> list[dict[str, dict[str, Any]]]: + """ + Create batches based on both count and total character size. + + Args: + artefacts: Dictionary mapping artefact IDs to their metadata + + Returns: + List of batches, where each batch is a dict of artefacts + """ + batches = [] + current_batch = {} + current_char_count = 0 + + for artefact_id, metadata in artefacts.items(): + # Calculate character count for this artefact + artefact_text = self._format_artefacts_for_analysis({artefact_id: metadata}) + char_count = len(artefact_text) + + # Check if adding this artefact would exceed limits + would_exceed_count = ( + self._batch_size and len(current_batch) >= self._batch_size + ) + would_exceed_chars = current_char_count + char_count > self._max_batch_chars + + # Start new batch if necessary + if current_batch and (would_exceed_count or would_exceed_chars): + batches.append(current_batch) + current_batch = {} + current_char_count = 0 + + # Add artefact to current batch + current_batch[artefact_id] = metadata + current_char_count += char_count + + # Add final batch if not empty + if current_batch: + batches.append(current_batch) + + return batches + + async def analyze( + self, + artefacts: dict[str, dict[str, Any]], + guidelines_content: str, + chat_model: BaseChatModel, + ) -> AnalysisResults: + """ + Analyze artefacts using the chat model with structured output. + Uses async processing with rate limiting for concurrent requests. + Uses caching if enabled to avoid redundant API calls. + + Args: + artefacts: Dictionary mapping artefact IDs to their metadata + guidelines_content: Combined content of all guidelines + chat_model: BaseChatModel instance for AI analysis + + Returns: + AnalysisResults containing structured analyses for each artefact + """ + # Log number of artefacts to be analyzed + num_artefacts = len(artefacts) + self._logger.info(f"--> Analyzing {num_artefacts} requirement(s)...") + + # Create batches based on count and character size + batches = self._create_batches(artefacts) + num_batches = len(batches) + + if num_batches > 1: + self._logger.info( + f"--> Created {num_batches} batches based on size and count limits" + ) + self._logger.info( + f"--> Processing {num_batches} batches concurrently " + f"(max {self._max_concurrent_requests} at a time)" + ) + + total_start_time = time.time() + + # Create tasks for all batches to process concurrently + batch_tasks = [ + self._analyze_batch_async(i + 1, batch, guidelines_content, chat_model) + for i, batch in enumerate(batches) + ] + + # Execute all batch tasks concurrently with rate limiting + # Use return_exceptions=True to continue even if some batches fail + all_batch_results = await asyncio.gather(*batch_tasks, return_exceptions=True) + + # Flatten results from all batches, handling exceptions + all_analyses = [] + failed_batches = 0 + for i, batch_results in enumerate(all_batch_results): + if isinstance(batch_results, Exception): + failed_batches += 1 + self._logger.warning( + f"--> WARNING: Batch {i + 1} failed with error: " + f"{type(batch_results).__name__}: {str(batch_results)}" + ) + else: + all_analyses.extend(batch_results) + + if failed_batches > 0: + self._logger.warning( + f"--> WARNING: {failed_batches} out of {num_batches} batches failed. " + f"Successfully analyzed {len(all_analyses)} requirement(s)." + ) + + # Calculate final statistics + current_total_cost = 0.0 + if chat_model and hasattr(chat_model, "total_costs"): + current_total_cost = getattr(chat_model, "total_costs", 0.0) + + # Log final statistics + total_elapsed = time.time() - total_start_time + all_scores = [a.score for a in all_analyses] + average_score = sum(all_scores) / len(all_scores) if all_scores else 0.0 + + self._logger.info(f"--> Execution time: {total_elapsed:.2f}s") + self._logger.info(f"--> Total costs: ${current_total_cost:.4f} USD") + self._logger.info(f"--> Overall average score: {average_score:.2f}") + + return AnalysisResults(analyses=all_analyses) + + async def _analyze_batch_async( + self, + batch_number: int, + artefacts: dict[str, dict[str, Any]], + guidelines_content: str, + chat_model: BaseChatModel, + ) -> list[Any]: + """ + Analyze a batch of artefacts using async with rate limiting. + + Args: + batch_number: The batch number (1-indexed) for logging + artefacts: Dictionary mapping artefact IDs to their metadata + guidelines_content: Combined content of all guidelines + chat_model: BaseChatModel instance for AI analysis + + Returns: + List of analysis results for all artefacts in the batch + """ + # Log batch start + batch_size = len(artefacts) + self._logger.info( + f"--> Batch {batch_number}: Processing {batch_size} requirement(s)..." + ) + + self._logger.debug( + f"Batch {batch_number} contains artefact IDs: {', '.join(artefacts.keys())}" + ) + self._logger.debug( + f"Guidelines content length: {len(guidelines_content)} characters" + ) + + # Check cache first + cache_hash = self._generate_cache_key(artefacts, guidelines_content) + cached_result = self._cache.get(cache_hash) + if cached_result is not None: + self._logger.info(f"--> Batch {batch_number}: Completed (from cache)") + return cached_result.analyses + + # Use semaphore for rate limiting + async with self._semaphore: + try: + self._logger.debug( + f"Batch {batch_number}: Creating structured chat model..." + ) + + # Create structured chat model + structured_chat = chat_model.with_structured_output(AnalysisResults) + + # Prepare system message with guidelines + system_message = SystemMessage(content=guidelines_content) + + # Format requirements for analysis + formatted_artefacts = self._format_artefacts_for_analysis(artefacts) + + self._logger.debug( + f"Batch {batch_number}: Formatted artefacts length: " + f"{len(formatted_artefacts)} characters" + ) + self._logger.debug( + f"Batch {batch_number}: ===== RAW AI MODEL INPUT =====" + ) + self._logger.debug( + f"Batch {batch_number}: System Message (Guidelines):" + ) + self._logger.debug(guidelines_content) + self._logger.debug(f"Batch {batch_number}: ---") + self._logger.debug( + f"Batch {batch_number}: Human Message (Requirements):" + ) + self._logger.debug(formatted_artefacts) + self._logger.debug( + f"Batch {batch_number}: ===== END RAW AI MODEL INPUT =====" + ) + self._logger.debug( + f"Batch {batch_number}: Sending request to AI model " + f"({self._model_name})..." + ) + + analysis_prompt = HumanMessage(content=formatted_artefacts) + + # Call async invoke + start_time = time.time() + response = await structured_chat.ainvoke( + [system_message, analysis_prompt] + ) + elapsed = time.time() - start_time + + self._logger.debug( + f"Batch {batch_number}: Received response in {elapsed:.2f}s" + ) + + # Validate that we got a proper response + if not hasattr(response, "analyses") or not response.analyses: + raise ValueError( + f"AI model returned empty or invalid response. " + f"Expected 'analyses' field with {len(artefacts)} " + f"items, got: {response}" + ) + + # Cache the result + self._cache.set(cache_hash, response) + + # Log batch completion + self._logger.info(f"--> Batch {batch_number}: Completed successfully") + + return response.analyses + + except ValidationError as e: + self._logger.error( + f"\n\n--> Batch {batch_number}: AI Model Error " + f"(returned invalid response):" + ) + self._logger.error("--> Validation errors:") + for error in e.errors(): + field = error.get("loc", ["unknown"])[0] + msg = error.get("msg", "Unknown error") + input_val = error.get("input", "N/A") + self._logger.error(f" - Field '{field}': {msg}") + if input_val != "N/A": + self._logger.error( + f" Received: {json.dumps(input_val, indent=6)}" + ) + raise + except Exception as e: + self._logger.error( + f"--> Batch {batch_number}: AI Model Error: " + f"{type(e).__name__}: {str(e)}" + ) + raise diff --git a/validation/ai_checker/src/ai_checker/analysis_cache.py b/validation/ai_checker/src/ai_checker/analysis_cache.py new file mode 100644 index 00000000..ef7761b1 --- /dev/null +++ b/validation/ai_checker/src/ai_checker/analysis_cache.py @@ -0,0 +1,106 @@ +# ******************************************************************************* +# Copyright (c) 2026 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* +""" +Cache management for AI analysis results. + +This module provides caching functionality for storing and retrieving +analysis results based on hash keys. +""" + +import json +import logging +import os +from typing import Optional + +from ai_checker.analysis_models import AnalysisResults + +logger = logging.getLogger(__name__) + + +class AnalysisCache: + """ + Manages caching of analysis results with hash-based key interface. + """ + + def __init__(self, cache_dir: Optional[str] = None): + """ + Initialize the cache. + + Args: + cache_dir: Optional directory path for caching results. + If None, caching is disabled. + """ + self._cache_dir = cache_dir + if self._cache_dir: + os.makedirs(self._cache_dir, exist_ok=True) + + def get(self, cache_hash: str) -> Optional[AnalysisResults]: + """ + Load cached analysis results. + + Args: + cache_hash: SHA256 hash key for the analysis + + Returns: + Cached AnalysisResults or None if not found + """ + if not self._cache_dir: + return None + + cache_file = os.path.join(self._cache_dir, f"{cache_hash}.json") + if os.path.exists(cache_file): + try: + with open(cache_file, "r", encoding="utf-8") as f: + data = json.load(f) + return AnalysisResults.model_validate(data) + except Exception as exc: + logger.warning( + "Failed to read cache file %s: %s: %s", + cache_file, + type(exc).__name__, + exc, + ) + return None + return None + + def set(self, cache_hash: str, results: AnalysisResults) -> None: + """ + Save analysis results to cache. + + Args: + cache_hash: SHA256 hash key for the analysis + results: AnalysisResults to cache + """ + if not self._cache_dir: + return + + cache_file = os.path.join(self._cache_dir, f"{cache_hash}.json") + try: + with open(cache_file, "w", encoding="utf-8") as f: + f.write(results.model_dump_json(indent=2)) + except Exception as exc: + logger.warning( + "Failed to write cache file %s: %s: %s", + cache_file, + type(exc).__name__, + exc, + ) + + def is_enabled(self) -> bool: + """ + Check if caching is enabled. + + Returns: + True if cache directory is configured, False otherwise + """ + return self._cache_dir is not None diff --git a/validation/ai_checker/src/ai_checker/analysis_models.py b/validation/ai_checker/src/ai_checker/analysis_models.py new file mode 100644 index 00000000..486c3c10 --- /dev/null +++ b/validation/ai_checker/src/ai_checker/analysis_models.py @@ -0,0 +1,43 @@ +# ******************************************************************************* +# Copyright (c) 2026 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* +""" +Data models for requirement analysis results. + +This module provides Pydantic models for structured analysis outputs. +""" + +from pydantic import BaseModel, Field + + +class RequirementAnalysis(BaseModel): + """Structured output for individual requirement analysis.""" + + requirement_id: str = Field(description="Unique identifier for the requirement") + description: str = Field( + description=("Brief description of the requirement (first line is sufficient)") + ) + findings: list[str] = Field(description="List of findings from the analysis") + suggestions: list[str] = Field(description="List of suggestions from the analysis") + score: float = Field( + description="Numerical score from 0 to 10 representing analysis quality", + ge=0.0, + le=10, + ) + + +class AnalysisResults(BaseModel): + """Structured output for multiple requirement analyses.""" + + analyses: list[RequirementAnalysis] = Field( + description="List of requirement analyses" + ) diff --git a/validation/ai_checker/src/ai_checker/artefact_extractor.py b/validation/ai_checker/src/ai_checker/artefact_extractor.py new file mode 100644 index 00000000..0194c64e --- /dev/null +++ b/validation/ai_checker/src/ai_checker/artefact_extractor.py @@ -0,0 +1,48 @@ +# ******************************************************************************* +# Copyright (c) 2026 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* +""" +Abstract base class for artefact extractors. + +This module defines the interface for extracting artefacts from various sources +for AI analysis. +""" + +from abc import ABC, abstractmethod +from typing import Dict, Any + + +class ArtefactExtractor(ABC): + """ + Abstract base class for extracting artefacts for AI analysis. + + Implementations should extract artefacts from their respective sources + (e.g., TRLC requirements, code documentation, test cases) and return + them in a standardized dictionary format. + """ + + @abstractmethod + def extract(self) -> Dict[str, Dict[str, Any]]: + """ + Extract artefacts and return them in a standardized format. + + Returns: + Dictionary mapping artefact IDs to their metadata: + { + "artefact_id": { + "field1": "value1", + "field2": "value2", + ... + } + } + """ + pass diff --git a/validation/ai_checker/src/ai_checker/constants.py b/validation/ai_checker/src/ai_checker/constants.py new file mode 100644 index 00000000..bbc8cb20 --- /dev/null +++ b/validation/ai_checker/src/ai_checker/constants.py @@ -0,0 +1,20 @@ +# ******************************************************************************* +# Copyright (c) 2026 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* +""" +Configuration constants for AI Checker. + +This module provides shared constants used across AI checker components. +""" + +# Default AI model to use for all analysis operations +DEFAULT_MODEL = "anthropic/claude-sonnet-4-5" diff --git a/validation/ai_checker/src/ai_checker/guidelines_reader.py b/validation/ai_checker/src/ai_checker/guidelines_reader.py new file mode 100644 index 00000000..affb4ae8 --- /dev/null +++ b/validation/ai_checker/src/ai_checker/guidelines_reader.py @@ -0,0 +1,100 @@ +# ******************************************************************************* +# Copyright (c) 2026 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* +""" +Reader for guidelines markdown files. + +This module provides functionality to read and manage guidelines +from a directory of markdown files. +""" + +import logging +import os + +logger = logging.getLogger(__name__) + + +class GuidelinesReader: + """Reader for guidelines markdown files.""" + + def __init__(self, guidelines_dir: str): + """ + Initialize the GuidelinesReader and load all guidelines. + + Args: + guidelines_dir: Path to guidelines directory containing + markdown files. + """ + self.guidelines_dir = guidelines_dir + + # Dictionary to store all guideline contents keyed by filename + # (without extension) + self.guidelines: dict[str, str] = {} + + # Load all markdown files from the directory + self._load_all_guidelines() + + def _load_all_guidelines(self): + """Load all markdown files from the guidelines directory.""" + if not os.path.isdir(self.guidelines_dir): + logger.warning(f"Guidelines directory not found: {self.guidelines_dir}") + return + + for filename in sorted(os.listdir(self.guidelines_dir)): + if filename.endswith(".md"): + file_path = os.path.join(self.guidelines_dir, filename) + # Use filename without extension as key + key = os.path.splitext(filename)[0] + content = self._read_file(file_path) + if content: + self.guidelines[key] = content + + def _read_file(self, file_path: str) -> str: + """Read a file and return its content as a string. + + Args: + file_path: Path to the file to read + + Returns: + File content as string, or empty string if file not found + """ + try: + with open(file_path, encoding="utf-8") as f: + return f.read() + except FileNotFoundError: + logger.warning(f"File not found: {file_path}") + return "" + except OSError as e: + logger.warning(f"Error reading file {file_path}: {e}") + return "" + except UnicodeDecodeError as e: + logger.warning(f"Unicode decode error reading file {file_path}: {e}") + return "" + + def get_guideline(self, name: str) -> str: + """Get a specific guideline by name. + + Args: + name: Name of the guideline file (without .md extension) + + Returns: + Guideline content as string, or empty string if not found + """ + return self.guidelines.get(name, "") + + def get_all_guidelines(self) -> dict[str, str]: + """Get all guidelines as a dictionary. + + Returns: + Dictionary mapping guideline names to their content + """ + return self.guidelines.copy() diff --git a/validation/ai_checker/src/ai_checker/orchestrator.py b/validation/ai_checker/src/ai_checker/orchestrator.py new file mode 100644 index 00000000..0239f4ba --- /dev/null +++ b/validation/ai_checker/src/ai_checker/orchestrator.py @@ -0,0 +1,380 @@ +# ******************************************************************************* +# Copyright (c) 2026 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* +""" +Orchestrator for artefact analysis workflow. + +This module provides the main orchestration logic for extracting and analyzing +artefacts against engineering guidelines using pluggable extractors. +""" + +import argparse +import asyncio +import concurrent.futures +import logging +import os +import sys +from typing import Optional + +from langchain_core.language_models.chat_models import BaseChatModel + +from ai_checker.ai_checker_core import AIChecker +from ai_checker.analysis_models import AnalysisResults +from ai_checker.requirement_extractor import RequirementExtractor +from ai_checker.result_formatter import ResultFormatter +from ai_checker.guidelines_reader import GuidelinesReader +from ai_checker.constants import DEFAULT_MODEL + + +def _create_default_chat_model( + model_name: str = DEFAULT_MODEL, + max_completion_tokens: int = 8192, +) -> BaseChatModel: + """ + Create the default chat model using the GitHub Copilot SDK adapter. + + Uses the ChatCopilot LangChain wrapper as the default AI backend. + + Args: + model_name: Model identifier (e.g. 'gpt-4.1', 'claude-sonnet-4') + max_completion_tokens: Maximum tokens for completion + + Returns: + Configured BaseChatModel instance (ChatCopilot) + """ + from copilot_adapter.copilot_langchain import ChatCopilot + + return ChatCopilot( + model=model_name, + timeout=max(120.0, max_completion_tokens / 50.0), + ) + + +def _load_custom_ai_model_module(custom_path: str): + """ + Load a custom ai_model module from a file path. + + The custom module must provide a `create_chat_model` function with + the signature: `create_chat_model(model_name: str, max_completion_tokens: int) -> BaseChatModel` + + WARNING: This executes arbitrary Python code from the given path. Only + pass paths to files that you own and trust. Never set --custom-ai-model + to a path derived from untrusted external input. + + Args: + custom_path: Path to custom ai_model.py file (must be a trusted file) + + Returns: + The loaded custom ai_model module + """ + logger = logging.getLogger(__name__) + logger.info(f"--> Using custom ai_model from: {custom_path}") + import importlib.util + + spec = importlib.util.spec_from_file_location("custom_ai_model", custom_path) + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + return module + + +class AnalysisOrchestrator: + """ + Main orchestrator class responsible for coordinating artefact + extraction and analysis. + """ + + def __init__( + self, + model_name: str = DEFAULT_MODEL, + guidelines_path: str = "guidelines", + cache_dir: str | None = None, + debug_log: str | None = None, + batch_size: int | None = None, + custom_ai_model: str | None = None, + max_concurrent_requests: int = 5, + max_batch_chars: int = 50000, + ): + """ + Initialize the orchestrator with AI checker. + + Args: + model_name: Name of the AI model to use + guidelines_path: Relative path to guidelines directory + cache_dir: Optional directory path for caching results + debug_log: Optional file path for detailed debug logging + batch_size: Optional number of requirements to process per batch + custom_ai_model: Optional path to custom ai_model.py file + max_concurrent_requests: Maximum number of concurrent API requests + max_batch_chars: Maximum total characters per batch + """ + self.model_name = model_name + self.guidelines_path = guidelines_path + self._custom_ai_model = custom_ai_model + + # Initialize requirement extractor (no input directory yet) + self.requirement_extractor = None + + # Load guidelines using GuidelinesReader + self.guidelines_reader = GuidelinesReader(guidelines_path) + all_guidelines = self.guidelines_reader.get_all_guidelines() + self.guidelines_content = "\n\n".join(all_guidelines.values()) + + # Create AI model (private member) + self._chat_model: BaseChatModel = None + if custom_ai_model and os.path.exists(custom_ai_model): + # Use custom ai_model.py provided by the user + ai_model_module = _load_custom_ai_model_module(custom_ai_model) + self._chat_model = ai_model_module.create_chat_model( + model_name=model_name, + max_completion_tokens=8192, + ) + else: + # Default: use GitHub Copilot SDK via ChatCopilot adapter + logger = logging.getLogger(__name__) + logger.info("--> Using default ChatCopilot model adapter") + self._chat_model = _create_default_chat_model( + model_name=model_name, + max_completion_tokens=8192, + ) + + # Initialize AI checker + self.ai_checker = AIChecker( + model_name=model_name, + cache_dir=cache_dir, + debug_log=debug_log, + batch_size=batch_size, + max_concurrent_requests=max_concurrent_requests, + max_batch_chars=max_batch_chars, + ) + + # Initialize result formatter (will be configured with results later) + self.result_formatter = None + + # Extractor instance (will be set when analyzing) + self.artefact_extractor = None + + # Stored artefacts from extraction (reused for formatting) + self._extracted_artefacts = None + + def analyze_directory( + self, input_dir: str, dependency_dirs: list[str] | None = None + ) -> AnalysisResults: + """ + Extract and analyze artefacts from a directory using TRLC + extractor. + + Args: + input_dir: Path to directory containing files to analyze + dependency_dirs: Optional list of directories containing + dependencies for link resolution + + Returns: + AnalysisResults containing structured analyses for each artefact + """ + # Initialize TRLC requirement extractor + self.artefact_extractor = RequirementExtractor(input_dir, dependency_dirs) + + # Extract artefacts + artefacts = self.artefact_extractor.extract() + self._extracted_artefacts = artefacts + + if not artefacts: + print( + f"WARNING: No artefacts found in '{input_dir}'. " + "Architecture analysis is not yet implemented.", + file=sys.stderr, + ) + return AnalysisResults(analyses=[]) + + # Analyze artefacts using AI checker with guidelines and chat model. + # asyncio.run() will raise RuntimeError if there is already a running + # event loop (e.g. inside pytest-asyncio or Jupyter). In that case, + # delegate to a fresh thread that owns its own event loop. + coro = self.ai_checker.analyze( + artefacts, self.guidelines_content, self._chat_model + ) + try: + asyncio.get_running_loop() + # We're inside a running loop — run the coroutine in a new thread. + with concurrent.futures.ThreadPoolExecutor(max_workers=1) as pool: + analysis_results = pool.submit(asyncio.run, coro).result() + except RuntimeError: + # No running loop — safe to call asyncio.run() directly. + analysis_results = asyncio.run(coro) + + return analysis_results + + def format_and_output( + self, + analysis_results: AnalysisResults, + output_file: str = None, + html_file: str = None, + guidelines_output_dir: str = None, + ) -> None: + """Format and output analysis results. + + Args: + analysis_results: AnalysisResults to format and output + output_file: Output file for JSON results (None for stdout) + html_file: Output file for HTML report (optional) + guidelines_output_dir: Output directory for guideline pages (optional) + """ + # Use previously extracted artefacts (avoids re-parsing) + original_artefacts = self._extracted_artefacts + + # Initialize result formatter with analysis results + self.result_formatter = ResultFormatter( + analysis_results, + model_name=self.model_name, + guidelines_reader=self.guidelines_reader, + guidelines_output_dir=guidelines_output_dir, + original_requirements=original_artefacts, + ) + + # Output JSON results (primary output) + if output_file: + self.result_formatter.output(output_file) + else: + self.result_formatter.output(None) # Print to stdout + + # Output HTML report if requested + if html_file: + self.result_formatter.output(html_file) + + +def argument_parser() -> argparse.ArgumentParser: + """Create argument parser for CLI.""" + parser = argparse.ArgumentParser( + description="Analyze TRLC requirements against engineering guidelines" + ) + parser.add_argument( + "-i", + "--input", + required=True, + help="Path to directory containing TRLC files to analyze", + ) + parser.add_argument( + "--deps", + action="append", + default=[], + help=( + "Additional directories for dependency resolution " + "(can be specified multiple times)" + ), + ) + parser.add_argument( + "-o", + "--output", + default=None, + help="Output file for JSON analysis results (required for Bazel rules)", + ) + parser.add_argument( + "--html", + default=None, + help="Output file for HTML report (optional)", + ) + parser.add_argument( + "-g", + "--guidelines", + default="guidelines", + help="Relative path to guidelines directory (default: guidelines)", + ) + parser.add_argument( + "-m", + "--model", + default=DEFAULT_MODEL, + help=f"AI model to use (default: {DEFAULT_MODEL})", + ) + parser.add_argument( + "-c", + "--cache", + default=None, + help="Directory path for caching analysis results (optional)", + ) + parser.add_argument( + "--guidelines-output", + default=None, + help="Output directory for guideline HTML pages (optional)", + ) + parser.add_argument( + "-b", + "--batch-size", + type=int, + default=None, + help=( + "Number of requirements to process per batch (optional, " + "default: process all at once)" + ), + ) + parser.add_argument( + "--max-concurrent-requests", + type=int, + default=5, + help="Maximum number of concurrent API requests (default: 5)", + ) + parser.add_argument( + "--max-batch-chars", + type=int, + default=50000, + help="Maximum total characters per batch (default: 50000)", + ) + parser.add_argument( + "--custom-ai-model", + default=None, + help="Path to custom ai_model.py file (optional)", + ) + parser.add_argument( + "--debug-log", + default=None, + help="Path to write debug output (stderr) for Bazel output_groups (optional)", + ) + parser.add_argument( + "--verbose", + action="store_true", + help="Enable verbose debug logging to debug log file (requires --debug-log)", + ) + return parser + + +def main() -> None: + """Main entry point for the orchestrator CLI.""" + parser = argument_parser() + args = parser.parse_args() + + try: + # Initialize orchestrator and analyze + orchestrator = AnalysisOrchestrator( + model_name=args.model, + guidelines_path=args.guidelines, + cache_dir=args.cache, + debug_log=args.debug_log if args.verbose else None, + batch_size=args.batch_size, + custom_ai_model=args.custom_ai_model, + max_concurrent_requests=args.max_concurrent_requests, + max_batch_chars=args.max_batch_chars, + ) + analysis_results = orchestrator.analyze_directory(args.input, args.deps) + + # Format and output results + orchestrator.format_and_output( + analysis_results, + output_file=args.output, + html_file=args.html, + guidelines_output_dir=args.guidelines_output, + ) + except Exception: + # Let exceptions propagate with full traceback + raise + + +if __name__ == "__main__": + main() diff --git a/validation/ai_checker/src/ai_checker/requirement_extractor.py b/validation/ai_checker/src/ai_checker/requirement_extractor.py new file mode 100644 index 00000000..f86998fb --- /dev/null +++ b/validation/ai_checker/src/ai_checker/requirement_extractor.py @@ -0,0 +1,265 @@ +# ******************************************************************************* +# Copyright (c) 2026 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* +""" +Extracts structured requirement data from TRLC files. + +This module provides functionality to parse TRLC requirement files and extract +requirement metadata into a structured format suitable for AI analysis. +""" + +import argparse +import os +from typing import Any + +import trlc.ast +from trlc.errors import Message_Handler +from trlc.trlc import Source_Manager + +from ai_checker.artefact_extractor import ArtefactExtractor + + +class RequirementExtractor(ArtefactExtractor): + """Extracts structured requirement data from TRLC files.""" + + def __init__( + self, input_directory: str, dependency_directories: list[str] | None = None + ): + """ + Initialize the RequirementExtractor with directory paths. + + Args: + input_directory: Path to directory containing TRLC files to + analyze + dependency_directories: Optional list of additional + directories for link resolution + """ + self.input_directory = os.path.abspath(input_directory) + self.dependency_directories = [ + os.path.abspath(d) for d in (dependency_directories or []) + ] + self.symbols: trlc.ast.Symbol_Table | None = None + + def parse_trlc_files(self) -> trlc.ast.Symbol_Table: + """ + Parse TRLC files in the specified directories. + + Registers all directories (input + dependencies) with TRLC for link resolution. + + Returns: + Symbol table containing parsed TRLC objects + + Raises: + ValueError: If parsing fails + """ + message_handler = Message_Handler() + source_manager = Source_Manager(message_handler) + + # Collect all directories and filter out overlapping ones + all_dirs = [self.input_directory] + self.dependency_directories + + # Remove duplicates and filter out directories that are + # subdirectories of others + unique_dirs = [] + for dir_path in sorted(set(all_dirs)): + # Check if this directory is a subdirectory of any already + # registered directory + is_subdir = False + for existing_dir in unique_dirs: + if dir_path.startswith(existing_dir + os.sep): + is_subdir = True + break + + # Also check if any existing directory is a subdirectory of this one + # In that case, remove the existing one and add this one + dirs_to_remove = [] + for i, existing_dir in enumerate(unique_dirs): + if existing_dir.startswith(dir_path + os.sep): + dirs_to_remove.append(i) + + for i in reversed(dirs_to_remove): + unique_dirs.pop(i) + + if not is_subdir: + unique_dirs.append(dir_path) + + # Register all unique, non-overlapping directories + for dir_path in unique_dirs: + source_manager.register_directory(dir_path) + + symbols = source_manager.process() + if symbols is None: + raise ValueError("Failed to parse TRLC Files") + + self.symbols = symbols + return symbols + + def extract_field_value( + self, obj: trlc.ast.Record_Object, field_name: str + ) -> Any | None: + """ + Extract a field value from a TRLC Record_Object. + + This function handles multiple field types: + - Implicit_Null: Returns None for null/empty fields + - Record_Reference: Resolves reference objects to their target's + fully qualified name by accessing the 'target' attribute and + calling fully_qualified_name() on it + - String values: Returns the value from the 'value' attribute + - Other types: Returns the field object as-is + + Args: + obj: The TRLC Record_Object to extract from + field_name: Name of the field to extract + + Returns: + The extracted field value (string, FQN for references, None + for null fields), or None if the field does not exist + """ + try: + # Try to get field from the record object's members + if hasattr(obj, "field") and field_name in obj.field: + field = obj.field[field_name] + + # Handle Implicit_Null objects (null values) + if isinstance(field, trlc.ast.Implicit_Null): + return None + + # Handle Record_Reference objects (for parent requirements) + if isinstance(field, trlc.ast.Record_Reference): + if hasattr(field, "target") and field.target is not None: + return field.target.fully_qualified_name() + return None + + # Handle field with value attribute (strings, etc.) + if hasattr(field, "value"): + return field.value + + return field + return None + except (AttributeError, KeyError): + return None + + def extract_requirements_data(self) -> list[dict[str, Any]]: + """ + Extract structured requirement data from TRLC symbol table. + + Only extracts requirements from the input_directory, not from + dependency directories. + + Returns: + List of dictionaries, each containing: + - unique_id: Fully qualified requirement name + - description: Requirement description text + - parent_requirement: Parent requirement ID if present + - requirement_type: Type of the requirement + """ + if self.symbols is None: + self.parse_trlc_files() + + requirements = [] + + for obj in self.symbols.iter_record_objects(): + # Only extract requirements from the input directory (not dependencies). + # Use `+ os.sep` to avoid false-positive prefix matches + # (e.g. /foo/bar matching /foo/barbaz). + obj_file_path = os.path.abspath(obj.location.file_name) + if not obj_file_path.startswith(self.input_directory + os.sep): + continue + + unique_id = obj.fully_qualified_name() + + # Extract description field + description = self.extract_field_value(obj, "description") + if description is None: + description = "" + + # Extract parent requirement field + parent_requirement = self.extract_field_value(obj, "parent") + + # Get requirement type + requirement_type = ( + obj.n_typ.name + if hasattr(obj, "n_typ") and hasattr(obj.n_typ, "name") + else "Unknown" + ) + + requirements.append( + { + "unique_id": unique_id, + "description": str(description), + "parent_requirement": str(parent_requirement) + if parent_requirement + else None, + "requirement_type": requirement_type, + } + ) + + return requirements + + def extract(self) -> dict[str, dict[str, Any]]: + """ + Parse TRLC files and extract structured requirement data. + + Returns: + Dictionary mapping requirement IDs to their metadata: + { + "requirement_id": { + "description": "...", + "parent": "...", + "type": "..." + } + } + """ + self.parse_trlc_files() + requirements_list = self.extract_requirements_data() + + # Convert list format to dictionary format for the interface + artefacts = {} + for req in requirements_list: + req_id = req["unique_id"] + + # Guard against any object that wasn't fully resolved to a string + # (extract_field_value returns None or str for Record_Reference, + # but be defensive against future TRLC API changes). + parent = req["parent_requirement"] + if parent is not None and not isinstance(parent, str): + parent = "[not resolved]" + + artefacts[req_id] = { + "description": req["description"], + "parent": parent, + "type": req["requirement_type"], + } + + return artefacts + + +# CLI interface - for direct command-line usage only +def argument_parser() -> argparse.ArgumentParser: + parser = argparse.ArgumentParser() + parser.add_argument("-i", "--input", required=True) + + return parser + + +def main() -> None: + parser = argument_parser() + args = parser.parse_args() + + extractor = RequirementExtractor(args.input) + testfiles = extractor.extract() + print(testfiles) + + +if __name__ == "__main__": + main() diff --git a/validation/ai_checker/src/ai_checker/result_formatter.py b/validation/ai_checker/src/ai_checker/result_formatter.py new file mode 100644 index 00000000..afc41a85 --- /dev/null +++ b/validation/ai_checker/src/ai_checker/result_formatter.py @@ -0,0 +1,607 @@ +# ******************************************************************************* +# Copyright (c) 2026 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* +""" +Result formatter for TRLC AI Checker analysis results. + +This module provides formatting and output functionality for analysis results +in various formats (stdout, JSON, HTML). +""" + +import html +import os +import re +import subprocess +from datetime import datetime +from typing import Any, Dict, Optional + +from ai_checker.analysis_models import AnalysisResults +from ai_checker.guidelines_reader import GuidelinesReader + + +class ResultFormatter: + """ + Handles formatting and output of analysis results in multiple formats. + """ + + @staticmethod + def _get_git_hash() -> str: + """Get the current git commit hash. + + First checks the BUILD_EMBED_LABEL / STABLE_GIT_COMMIT stamp variables + injected by Bazel --workspace_status_command. Falls back to running + ``git rev-parse HEAD`` in the source tree, and finally returns + 'Unknown' if neither is available (e.g. inside a fully hermetised + Bazel action without network/git access). + + Returns: + Git commit hash (8 chars) or 'Unknown' + """ + # Prefer Bazel workspace-status stamp variables (set by --workspace_status_command) + for env_var in ("STABLE_GIT_COMMIT", "BUILD_EMBED_LABEL", "GIT_COMMIT"): + value = os.environ.get(env_var, "").strip() + if value: + return value[:8] + + try: + # Fall back to running git directly (works for local CLI invocations) + result = subprocess.run( + ["git", "rev-parse", "HEAD"], + capture_output=True, + text=True, + timeout=5, + cwd=os.path.dirname(os.path.abspath(__file__)), + ) + if result.returncode == 0: + return result.stdout.strip()[:8] + except (FileNotFoundError, subprocess.TimeoutExpired, OSError): + pass + return "Unknown" + + @staticmethod + def _get_timestamp() -> str: + """Get the current timestamp. + + Returns: + ISO format timestamp string + """ + return datetime.now().isoformat(timespec="seconds") + + @staticmethod + def _normalize_filename(text: str) -> str: + """Convert text to filesystem-safe filename slug. + + Args: + text: Text to normalize + + Returns: + Normalized string (lowercase, spaces to hyphens, unsafe chars removed) + """ + # Convert to lowercase + slug = text.lower() + # Replace spaces and underscores with hyphens + slug = re.sub(r"[\s_]+", "-", slug) + # Remove unsafe characters (keep alphanumeric and hyphens) + slug = re.sub(r"[^a-z0-9-]", "", slug) + # Remove leading/trailing hyphens + slug = slug.strip("-") + # Collapse multiple hyphens + slug = re.sub(r"-+", "-", slug) + return slug + + @staticmethod + def _extract_severity(finding: str) -> str: + """Extract severity level from finding text. + + Args: + finding: Finding text that may start with Major:, Minor:, **Major**:, Major:, or Major: + + Returns: + CSS class name: 'major', 'minor', or empty string if no severity found + """ + # Check for plain text format: Major: or Minor: + match = re.match(r"^(Major|Minor):\s", finding, re.IGNORECASE) + if match: + return match.group(1).lower() + # Check for markdown format: **Major**: or **Minor**: + match = re.match(r"^\*\*(Major|Minor)\*\*:", finding, re.IGNORECASE) + if match: + return match.group(1).lower() + # Check for HTML format: Major: or Minor: or Major: + match = re.match( + r"^<(?:b|strong)>(Major|Minor):", finding, re.IGNORECASE + ) + if match: + return match.group(1).lower() + # Check for escaped HTML: <b>Major:</b> or <strong>Minor:</strong> + match = re.match( + r"^<(?:b|strong)>(Major|Minor):</(?:b|strong)>", + finding, + re.IGNORECASE, + ) + if match: + return match.group(1).lower() + return "" + + @staticmethod + def _markdown_to_html(text: str) -> str: + """Convert markdown formatting to HTML while preserving existing HTML tags. + + Args: + text: Text with markdown and/or HTML formatting + + Returns: + HTML-formatted string with markdown converted and HTML preserved + """ + # First escape HTML to protect it, but mark HTML tags specially + # Replace HTML tags with placeholders + html_tags = [] + + def save_html_tag(match): + html_tags.append(match.group(0)) + return f"__HTML_TAG_{len(html_tags) - 1}__" + + # Save HTML tags + text = re.sub(r"<[^>]+>", save_html_tag, text) + + # Now escape any remaining HTML special characters + text = html.escape(text) + + # Convert markdown formatting + # Bold: **text** to text + text = re.sub(r"\*\*([^*]+)\*\*", r"\1", text) + # Italic: *text* to text + text = re.sub(r"\*([^*]+)\*", r"\1", text) + + # Restore HTML tags + for i, tag in enumerate(html_tags): + text = text.replace(f"__HTML_TAG_{i}__", tag) + + # Convert line breaks to
tags + text = text.replace("\n", "
\n") + + return text + + @staticmethod + def _text_to_html(text: str) -> str: + """Convert plain text to HTML with proper line breaks and escaping. + + Args: + text: Plain text string + + Returns: + HTML-formatted string with line breaks converted to
tags + """ + # Escape HTML special characters + escaped = html.escape(text) + # Convert line breaks to
tags + return escaped.replace("\n", "
\n") + + def __init__( + self, + analysis_results: AnalysisResults, + model_name: Optional[str] = None, + guidelines_reader: Optional[GuidelinesReader] = None, + guidelines_output_dir: Optional[str] = None, + original_requirements: Optional[Dict[str, Dict[str, Any]]] = None, + ): + """ + Initialize the formatter with analysis results. + + Args: + analysis_results: AnalysisResults object containing analyses + model_name: Name of the AI model used for analysis + guidelines_reader: GuidelinesReader object containing all guidelines + guidelines_output_dir: Optional directory path for writing guideline files + original_requirements: Original requirement data as dict {id: {metadata}} + """ + self.results = analysis_results + self.model_name = model_name or "Unknown" + self.guidelines_reader = guidelines_reader + self.guidelines_output_dir = guidelines_output_dir + self.git_hash = self._get_git_hash() + self.timestamp = self._get_timestamp() + + # Create lookup map for original requirement descriptions + self.original_descriptions = {} + if original_requirements: + for req_id, req_data in original_requirements.items(): + self.original_descriptions[req_id] = req_data.get("description", "") + + def output(self, file_path: Optional[str] = None) -> None: + """ + Output results based on file path extension or to stdout. + + Args: + file_path: Optional path to output file. If None, prints JSON to stdout. + Extension determines format: .html for HTML, otherwise JSON. + """ + if file_path is None: + self._print_to_stdout() + else: + # Create parent directories if they don't exist + parent = os.path.dirname(file_path) + if parent: + os.makedirs(parent, exist_ok=True) + + extension = os.path.splitext(file_path)[1].lower() + + if extension == ".html": + self._write_html(file_path) + else: + self._write_json(file_path) + + print(f"Analysis results written to {file_path}") + + def _print_to_stdout(self) -> None: + """Print results as JSON to stdout.""" + output = self.results.model_dump_json(indent=2) + print(output) + + def _write_json(self, path: str) -> None: + """ + Write results as JSON file. + + Args: + path: File path for output JSON file + """ + with open(path, "w", encoding="utf-8") as f: + f.write(self.results.model_dump_json(indent=2)) + + def _write_html(self, path: str) -> None: + """ + Write results as HTML report with guideline subpages. + + Args: + path: File path for output HTML file + """ + + # Generate main report + html_content = self._generate_html_report(path) + with open(path, "w", encoding="utf-8") as f: + f.write(html_content) + + # Generate guideline subpages + if self.guidelines_reader: + self._generate_guideline_pages(path) + + def _generate_html_report(self, main_report_path: Optional[str] = None) -> str: + """ + Generate HTML report from analysis results. + + Args: + main_report_path: Optional path to main report file for computing relative links + + Returns: + HTML string containing formatted report + """ + # Calculate summary statistics + total_requirements = len(self.results.analyses) + avg_score = ( + sum(a.score for a in self.results.analyses) / total_requirements + if total_requirements > 0 + else 0 + ) + + # Escape all untrusted values before interpolating into HTML + safe_git_hash = html.escape(self.git_hash) + safe_timestamp = html.escape(self.timestamp) + safe_model_name = html.escape(self.model_name) + + doc = f""" + + + + + Requirements Analysis Report + + + +
+

Requirements Analysis Report

+

Comprehensive analysis of requirements against engineering guidelines

+
+

Hash: {safe_git_hash}

+

Timestamp: {safe_timestamp}

+
+
+ +
+
+

Total Requirements

+
{total_requirements}
+
+
+

Average Score

+
{avg_score:.1f}/10
+
+
+

AI Model Used

+
{safe_model_name}
+
+
+ +
+
+

Guidelines Used

+
+
    +{self._generate_guidelines_links(main_report_path)} +
+
+
+
+ +
+""" + + # Add individual requirement sections + for analysis in self.results.analyses: + score_class = ( + "high" + if analysis.score >= 8 + else "medium" + if analysis.score >= 5 + else "low" + ) + + # Use original full description if available, otherwise use + # AI's description + description = self.original_descriptions.get( + analysis.requirement_id, analysis.description + ) + # Escape requirement_id: it comes from user-supplied TRLC files + safe_req_id = html.escape(analysis.requirement_id) + + doc += f""" +
+
+
{safe_req_id}
+
{analysis.score:.1f}/10
+
+ +
+ Description: {self._text_to_html(description)} +
+""" + + if analysis.findings: + doc += """ +
+

Findings

+
    +""" + for finding in analysis.findings: + severity_class = self._extract_severity(finding) + formatted_finding = self._markdown_to_html(finding) + doc += f'
  • {formatted_finding}
  • \n' + doc += """
+
+""" + + if analysis.suggestions: + doc += """ +
+

Suggestions

+
    +""" + for suggestion in analysis.suggestions: + doc += f"
  • {self._markdown_to_html(suggestion)}
  • \n" + doc += """
+
+""" + + doc += """
+""" + + doc += """
+ + +""" + return doc + + def _generate_guidelines_links(self, main_report_path: Optional[str] = None) -> str: + """Generate HTML for guidelines list with links to subpages. + + Args: + main_report_path: Path to main report (used to compute relative paths) + + Returns: + HTML string with list items for guidelines + """ + if not self.guidelines_reader or not self.guidelines_reader.guidelines: + return ( + '
  • No guidelines specified
  • ' + ) + + # Compute output directory for guidelines (same logic as _generate_guideline_pages) + if main_report_path: + report_dir = os.path.dirname(main_report_path) + if self.guidelines_output_dir: + output_dir = self.guidelines_output_dir + else: + output_dir = os.path.join(report_dir, "guidelines") + + # Compute relative path from report directory to guidelines directory + try: + relative_base_str = os.path.relpath(output_dir, report_dir) + except ValueError: + # If not relative, use absolute path + relative_base_str = output_dir + else: + # Fallback to hardcoded path if no main_report_path provided + relative_base_str = "guidelines" + + links = [] + for guideline_name in sorted(self.guidelines_reader.guidelines.keys()): + # Normalize guideline name for filename + slug = self._normalize_filename(guideline_name) + links.append( + f'
  • ' + f'' + f"📋 {guideline_name}
  • " + ) + return "\n".join(links) + + def _generate_guideline_pages(self, main_report_path: str) -> None: + """Generate markdown files for each guideline. + + Args: + main_report_path: Path to main report (used to determine output directory) + """ + if not self.guidelines_reader: + return + + # Use guidelines subdirectory in the same parent directory as main report + if self.guidelines_output_dir: + output_dir = self.guidelines_output_dir + else: + output_dir = os.path.join(os.path.dirname(main_report_path), "guidelines") + + os.makedirs(output_dir, exist_ok=True) + + for ( + guideline_name, + guideline_content, + ) in self.guidelines_reader.guidelines.items(): + # Normalize guideline name for filename + slug = self._normalize_filename(guideline_name) + page_path = os.path.join(output_dir, f"guideline_{slug}.md") + with open(page_path, "w", encoding="utf-8") as f: + f.write(f"# {guideline_name}\n\n") + f.write(guideline_content) diff --git a/validation/ai_checker/src/copilot_adapter/__init__.py b/validation/ai_checker/src/copilot_adapter/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/validation/ai_checker/src/copilot_adapter/copilot_langchain.py b/validation/ai_checker/src/copilot_adapter/copilot_langchain.py new file mode 100644 index 00000000..541ecab8 --- /dev/null +++ b/validation/ai_checker/src/copilot_adapter/copilot_langchain.py @@ -0,0 +1,726 @@ +# ******************************************************************************* +# Copyright (c) 2026 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* +""" +LangChain BaseChatModel wrapper for the GitHub Copilot SDK. + +Provides a fully LangChain-compatible chat model that supports: +- Standard message types (system, human, AI, tool) +- Tool calling via bind_tools() +- Structured output via with_structured_output() +- Async generation (native) +- Sync generation (via asyncio bridge) +""" + +from __future__ import annotations + +import asyncio +import json +import logging +import os +import stat +import uuid +from collections.abc import Sequence +from pathlib import Path +from typing import Any, Callable, Optional + +from copilot import CopilotClient +from copilot.generated.session_events import SessionEvent, SessionEventType +from copilot.types import SessionConfig, Tool as CopilotTool, ToolInvocation, ToolResult + +logger = logging.getLogger(__name__) + +# --------------------------------------------------------------------------- +# Auth-related environment variables checked by the Copilot CLI (priority order) +# --------------------------------------------------------------------------- +_AUTH_ENV_VARS = [ + "COPILOT_GITHUB_TOKEN", # Recommended for explicit Copilot usage + "GH_TOKEN", # GitHub CLI compatible + "GITHUB_TOKEN", # GitHub Actions compatible +] + + +class CopilotSetupError(RuntimeError): + """Raised when the Copilot SDK environment is not correctly configured.""" + + +def _resolve_copilot_cli_path() -> Optional[str]: + """Find the executable copy of the copilot CLI created by copy_executables. + + rules_python strips the executable bit from binaries inside wheels. + The pip.whl_mods / copy_executables mechanism creates an executable + copy called ``copilot_cli`` next to the package. We walk up from + ``copilot.__file__`` until we find it. + + IMPORTANT: we must NOT resolve symlinks (Path.resolve()) because in + the Bazel runfiles tree the symlinks point back to the source repo + where the genrule output does not exist. The raw __file__ path + stays inside the execution root where the copy IS present. + """ + import copilot as _copilot_pkg + + pkg_file = Path(_copilot_pkg.__file__) # .../site-packages/copilot/__init__.py + # Walk up: copilot/ -> site-packages/ -> lib/ -> ... -> repo root + current = pkg_file.parent + for _ in range(10): + candidate = current / "copilot_cli" + if candidate.exists(): + return str(candidate) + current = current.parent + return None + + +def _check_cli_binary(cli_path: str) -> list[str]: + """Validate that the CLI binary exists and is executable. + + Returns a list of problem descriptions (empty = all good). + """ + problems: list[str] = [] + p = Path(cli_path) + if not p.exists(): + problems.append(f"Copilot CLI binary not found at: {cli_path}") + return problems + if not p.is_file(): + problems.append(f"Copilot CLI path is not a file: {cli_path}") + return problems + mode = p.stat().st_mode + if not (mode & stat.S_IXUSR): + problems.append( + f"Copilot CLI binary is NOT executable (mode {oct(mode)}): {cli_path}\n" + " Hint: rules_python strips +x from wheel binaries. Make sure\n" + " pip.whl_mods / copy_executables is configured in MODULE.bazel." + ) + return problems + + +def _check_environment() -> list[str]: + """Check that the runtime environment has what the Copilot CLI needs. + + Returns a list of problem descriptions (empty = all good). + """ + problems: list[str] = [] + + if not os.environ.get("HOME"): + problems.append( + "HOME environment variable is not set.\n" + " The Copilot CLI needs HOME to locate stored OAuth credentials.\n" + " Ensure .bazelrc.ai_checker contains: build --action_env=HOME" + ) + + # The Copilot CLI binary (Node.js) uses fetch() to reach api.github.com. + # Behind a corporate proxy it needs HTTPS_PROXY. + if not os.environ.get("HTTPS_PROXY") and not os.environ.get("https_proxy"): + problems.append( + "HTTPS_PROXY / https_proxy environment variable is not set.\n" + " If you are behind a corporate proxy the Copilot CLI cannot\n" + " reach api.github.com and will fail with 'TypeError: fetch failed'.\n" + " Ensure .bazelrc.ai_checker contains: build --action_env=HTTPS_PROXY" + ) + + return problems + + +def _describe_auth_sources() -> str: + """Return a human-readable summary of available auth sources.""" + lines = ["Authentication sources detected:"] + found_any = False + + for var in _AUTH_ENV_VARS: + val = os.environ.get(var) + if val: + # Mask the token for security + masked = val[:4] + "..." + val[-4:] if len(val) > 10 else "****" + lines.append(f" [OK] ${var} = {masked}") + found_any = True + else: + lines.append(f" [ ] ${var} — not set") + + home = os.environ.get("HOME", "") + if home: + lines.append(f" [OK] $HOME = {home} (CLI can search system keychain)") + else: + lines.append( + " [ ] $HOME — not set (CLI cannot find stored OAuth credentials)" + ) + + if not found_any and not home: + lines.append("") + lines.append(" ** No authentication source available! **") + lines.append( + " Fix: set COPILOT_GITHUB_TOKEN, or ensure HOME is passed to the action." + ) + lines.append( + " See: https://github.com/github/copilot-sdk/blob/main/docs/auth/index.md" + ) + + # Network / proxy info + lines.append("") + proxy = os.environ.get("HTTPS_PROXY") or os.environ.get("https_proxy") + if proxy: + lines.append(f" [OK] HTTPS_PROXY = {proxy}") + else: + lines.append( + " [ ] HTTPS_PROXY — not set (may cause 'fetch failed' behind a proxy)" + ) + + return "\n".join(lines) + + +from langchain_core.callbacks import ( + CallbackManagerForLLMRun, + AsyncCallbackManagerForLLMRun, +) +from langchain_core.language_models.chat_models import BaseChatModel +from langchain_core.messages import ( + AIMessage, + BaseMessage, + HumanMessage, + SystemMessage, + ToolMessage, +) +from langchain_core.outputs import ChatGeneration, ChatResult +from langchain_core.tools import BaseTool +from langchain_core.utils.function_calling import convert_to_openai_tool +from pydantic import BaseModel, Field, PrivateAttr + + +def _convert_tools_to_openai_format( + tools: Sequence[dict[str, Any] | type | Callable | BaseTool], +) -> list[dict[str, Any]]: + """Convert LangChain tool specs to OpenAI-format tool definitions.""" + result = [] + for tool in tools: + if isinstance(tool, dict): + # Already a dict — assume it's in OpenAI format or close enough + result.append(tool) + else: + result.append(convert_to_openai_tool(tool)) + return result + + +def _build_copilot_tools( + openai_tools: list[dict[str, Any]], +) -> list[CopilotTool]: + """Convert OpenAI-format tool dicts into Copilot SDK Tool objects. + + The handler is a no-op because we never let the Copilot agent + autonomously execute tools — we only need the definitions so the + model can emit tool_calls in its response. + """ + copilot_tools = [] + for t in openai_tools: + fn = t.get("function", t) + name = fn["name"] + description = fn.get("description", "") + parameters = fn.get("parameters") + + # Capture loop variables explicitly to avoid the closure-over-loop-variable + # pitfall. Although _noop_handler is never actually invoked (tool + # execution is intercepted at the LangChain level), the correct capture + # pattern is important for correctness and future maintainability. + def _make_noop_handler(tool_name: str): + async def _noop_handler(invocation: ToolInvocation) -> ToolResult: + # This handler should never actually be invoked because we + # intercept tool requests at the LangChain level. + return ToolResult( + textResultForLlm="Tool execution is managed by LangChain.", + resultType="success", + ) + + return _noop_handler + + copilot_tools.append( + CopilotTool( + name=name, + description=description, + handler=_make_noop_handler(name), + parameters=parameters, + ) + ) + return copilot_tools + + +def _deep_decode_json_strings(obj: Any) -> Any: + """Recursively decode values that are JSON-encoded strings. + + Some LLMs (e.g. Claude via the Copilot SDK) double-encode nested + lists or objects as JSON strings inside the outer tool-call arguments + dict. This function walks the structure and replaces any string value + that successfully parses as a JSON array or object with the decoded + Python value, leaving plain strings untouched. + """ + if isinstance(obj, dict): + return {k: _deep_decode_json_strings(v) for k, v in obj.items()} + if isinstance(obj, list): + return [_deep_decode_json_strings(v) for v in obj] + if isinstance(obj, str): + stripped = obj.strip() + if stripped and stripped[0] in ("{", "["): + try: + decoded = json.loads(stripped) + # Only substitute if the result is a richer structure + if isinstance(decoded, (dict, list)): + return _deep_decode_json_strings(decoded) + except (json.JSONDecodeError, ValueError): + pass + return obj + + +def _messages_to_prompt(messages: list[BaseMessage]) -> str: + """Convert a list of LangChain messages into a single prompt string. + + The Copilot SDK accepts a plain text prompt rather than a structured + message array. We serialise the conversation into a tagged format so + the model can distinguish roles. + """ + parts: list[str] = [] + for msg in messages: + content = ( + msg.content if isinstance(msg.content, str) else json.dumps(msg.content) + ) + + if isinstance(msg, SystemMessage): + parts.append(f"[system]\n{content}") + elif isinstance(msg, HumanMessage): + parts.append(f"[user]\n{content}") + elif isinstance(msg, AIMessage): + text_parts = [f"[assistant]\n{content}"] if content else ["[assistant]"] + # Include any tool calls the AI made previously + if msg.tool_calls: + for tc in msg.tool_calls: + text_parts.append( + f"[tool_call id={tc['id']} name={tc['name']}]\n" + f"{json.dumps(tc['args'])}" + ) + parts.append("\n".join(text_parts)) + elif isinstance(msg, ToolMessage): + parts.append(f"[tool_result id={msg.tool_call_id}]\n{content}") + else: + parts.append(f"[{msg.type}]\n{content}") + + return "\n\n".join(parts) + + +def _extract_system_message(messages: list[BaseMessage]) -> Optional[str]: + """Extract the system message content if the first message is a SystemMessage.""" + if messages and isinstance(messages[0], SystemMessage): + content = messages[0].content + return content if isinstance(content, str) else json.dumps(content) + return None + + +class ChatCopilot(BaseChatModel): + """LangChain chat model backed by the GitHub Copilot SDK. + + Example: + >>> from copilot_langchain import ChatCopilot + >>> + >>> llm = ChatCopilot(model="gpt-4.1") + >>> response = await llm.ainvoke("Hello, how are you?") + >>> print(response.content) + + With tools: + >>> from langchain_core.tools import tool + >>> + >>> @tool + >>> def add(a: int, b: int) -> int: + ... \"\"\"Add two numbers.\"\"\" + ... return a + b + >>> + >>> llm_with_tools = ChatCopilot(model="gpt-4.1").bind_tools([add]) + >>> response = await llm_with_tools.ainvoke("What is 2 + 3?") + + With structured output: + >>> from pydantic import BaseModel + >>> + >>> class Answer(BaseModel): + ... value: int + ... explanation: str + >>> + >>> chain = ChatCopilot(model="gpt-4.1").with_structured_output(Answer) + >>> result = await chain.ainvoke("What is 2 + 2?") + >>> print(result.value) + """ + + model: str = "gpt-4.1" + """Model identifier to use (e.g. 'gpt-4.1', 'claude-sonnet-4').""" + + timeout: float = 120.0 + """Timeout in seconds for waiting on a response.""" + + copilot_client_options: dict[str, Any] = Field(default_factory=dict) + """Options passed to CopilotClient() constructor.""" + + # Private attributes (not serialised by Pydantic) + _client: Optional[CopilotClient] = PrivateAttr(default=None) + _client_started: bool = PrivateAttr(default=False) + _bound_tools: list[dict[str, Any]] = PrivateAttr(default_factory=list) + _tool_choice: Optional[str] = PrivateAttr(default=None) + _ls_structured_output_format: Optional[dict[str, Any]] = PrivateAttr(default=None) + + # ------------------------------------------------------------------ # + # LangChain required properties + # ------------------------------------------------------------------ # + + @property + def _llm_type(self) -> str: + return "copilot-sdk" + + @property + def _identifying_params(self) -> dict[str, Any]: + return {"model": self.model} + + # ------------------------------------------------------------------ # + # Client lifecycle + # ------------------------------------------------------------------ # + + async def _ensure_client(self) -> CopilotClient: + """Lazily create, start, and verify the CopilotClient. + + Performs pre-flight checks before starting the CLI: + 1. Resolves the CLI binary path (copy_executables workaround) + 2. Validates the binary exists and is executable + 3. Checks required environment variables (HOME, token vars) + 4. Starts the CLI server + 5. Verifies authentication via ``get_auth_status()`` + + Raises: + CopilotSetupError: If any pre-flight check fails with a + detailed, actionable error message. + """ + if self._client is None: + opts = dict(self.copilot_client_options or {}) + + # --- Resolve CLI binary path -------------------------------- + if "cli_path" not in opts and "cli_url" not in opts: + resolved = _resolve_copilot_cli_path() + if resolved: + opts["cli_path"] = resolved + logger.info("Resolved Copilot CLI path: %s", resolved) + else: + logger.warning( + "Could not find copilot_cli (copy_executables target). " + "Falling back to bundled binary — this may fail with " + "PermissionError if the executable bit was stripped." + ) + + # --- Pre-flight: check binary ------------------------------- + cli_path = opts.get("cli_path") + if cli_path: + problems = _check_cli_binary(cli_path) + if problems: + raise CopilotSetupError( + "Copilot CLI binary check failed:\n" + + "\n".join(f" - {p}" for p in problems) + ) + + # --- Pre-flight: check environment -------------------------- + env_problems = _check_environment() + if env_problems: + logger.warning( + "Environment issues detected:\n%s\n%s", + "\n".join(f" - {p}" for p in env_problems), + _describe_auth_sources(), + ) + # Don't hard-fail here — the user may have a token env var. + # We'll verify auth after starting the client. + + logger.info("Starting CopilotClient...\n%s", _describe_auth_sources()) + self._client = CopilotClient(opts or None) + + if not self._client_started: + try: + await self._client.start() + except PermissionError as exc: + raise CopilotSetupError( + f"PermissionError starting Copilot CLI: {exc}\n" + " The CLI binary is not executable. Make sure\n" + " pip.whl_mods / copy_executables is configured in MODULE.bazel\n" + " to create an executable copy of copilot/bin/copilot." + ) from exc + except RuntimeError as exc: + if "timeout" in str(exc).lower() or "Timeout" in str(exc): + raise CopilotSetupError( + f"Timeout starting Copilot CLI server: {exc}\n" + " The CLI started but did not become ready in time.\n" + " This usually means the CLI cannot authenticate.\n\n" + + _describe_auth_sources() + + "\n\n" + " Possible fixes:\n" + " 1. Run 'copilot' in a terminal and sign in interactively.\n" + " 2. Set COPILOT_GITHUB_TOKEN (or GH_TOKEN / GITHUB_TOKEN)\n" + " and pass it via --action_env=COPILOT_GITHUB_TOKEN.\n" + " 3. Ensure HOME is available in the action environment\n" + " (use_default_shell_env = True in the Bazel rule).\n" + " See: https://github.com/github/copilot-sdk/blob/main/docs/auth/index.md" + ) from exc + raise + except Exception as exc: + raise CopilotSetupError( + f"Failed to start CopilotClient: {type(exc).__name__}: {exc}\n\n" + + _describe_auth_sources() + ) from exc + + self._client_started = True + + # --- Post-start: verify authentication ---------------------- + try: + auth_status = await self._client.get_auth_status() + if ( + hasattr(auth_status, "isAuthenticated") + and auth_status.isAuthenticated + ): + user = getattr(auth_status, "login", "unknown") + logger.info("Copilot authenticated as: %s", user) + elif ( + hasattr(auth_status, "is_authenticated") + and auth_status.is_authenticated + ): + user = getattr(auth_status, "login", "unknown") + logger.info("Copilot authenticated as: %s", user) + else: + raise CopilotSetupError( + "Copilot CLI started but is NOT authenticated.\n" + f" Auth status: {auth_status}\n\n" + + _describe_auth_sources() + + "\n\n" + " Possible fixes:\n" + " 1. Run 'copilot' in a terminal and sign in interactively.\n" + " 2. Set COPILOT_GITHUB_TOKEN (or GH_TOKEN / GITHUB_TOKEN).\n" + " See: https://github.com/github/copilot-sdk/blob/main/docs/auth/index.md" + ) + except CopilotSetupError: + raise + except Exception as exc: + # get_auth_status itself failed — log but don't block. + # The actual LLM call will fail with a clearer error if auth + # is truly broken. + logger.warning( + "Could not verify auth status (non-fatal): %s: %s", + type(exc).__name__, + exc, + ) + + return self._client + + async def aclose(self) -> None: + """Shut down the underlying Copilot CLI process.""" + if self._client and self._client_started: + await self._client.stop() + self._client_started = False + + # ------------------------------------------------------------------ # + # Tool binding + # ------------------------------------------------------------------ # + + def bind_tools( + self, + tools: Sequence[dict[str, Any] | type | Callable | BaseTool], + *, + tool_choice: str | None = None, + **kwargs: Any, + ) -> ChatCopilot: + """Return a new ChatCopilot with tools bound. + + Args: + tools: Tools to make available to the model. + tool_choice: When set to "any", forces the model to use a tool. + Used internally by with_structured_output(). + + Returns: + A new ChatCopilot instance with the tools bound. + """ + openai_tools = _convert_tools_to_openai_format(tools) + # Create a shallow copy with the tools attached + new = self.model_copy() + new._bound_tools = openai_tools + new._tool_choice = tool_choice + new._ls_structured_output_format = kwargs.get("ls_structured_output_format") + new._client = self._client + new._client_started = self._client_started + return new + + # ------------------------------------------------------------------ # + # Core generation (async — the native path) + # ------------------------------------------------------------------ # + + async def _agenerate( + self, + messages: list[BaseMessage], + stop: list[str] | None = None, + run_manager: AsyncCallbackManagerForLLMRun | None = None, + **kwargs: Any, + ) -> ChatResult: + try: + client = await self._ensure_client() + except CopilotSetupError: + raise # Already has a clear message + except Exception as exc: + raise CopilotSetupError( + f"Unexpected error initialising Copilot SDK: {type(exc).__name__}: {exc}\n\n" + + _describe_auth_sources() + ) from exc + + # Build session config + session_config: SessionConfig = { + "model": kwargs.get("model", self.model), + } + + # Disable all built-in tools so only our bound tools are available + session_config["available_tools"] = [] + + # Merge any extra tools from kwargs with bound tools + extra_tools = kwargs.get("tools", []) + all_openai_tools = self._bound_tools + ( + _convert_tools_to_openai_format(extra_tools) if extra_tools else [] + ) + + if all_openai_tools: + session_config["tools"] = _build_copilot_tools(all_openai_tools) + + # Use system message from the conversation if present + system_content = _extract_system_message(messages) + if system_content: + base_system = system_content + # Remove system message from prompt construction + prompt_messages = [m for m in messages if not isinstance(m, SystemMessage)] + else: + base_system = "You are a helpful assistant." + prompt_messages = messages + + # When tool_choice="any" (structured output), force the model to + # respond exclusively via tool calls. + if self._tool_choice == "any" and all_openai_tools: + tool_names = [t.get("function", t)["name"] for t in all_openai_tools] + base_system += ( + "\n\nIMPORTANT: You MUST respond by calling one of the following " + f"tools: {', '.join(tool_names)}. " + "Do NOT respond with plain text. You MUST use a tool call. " + "Pass your entire answer as arguments to the tool." + ) + + session_config["system_message"] = { + "mode": "replace", + "content": base_system, + } + + # Disable infinite sessions for simple request/response + session_config["infinite_sessions"] = {"enabled": False} + + # Create session + session = await client.create_session(session_config) + + try: + # Build the prompt from messages + prompt = _messages_to_prompt(prompt_messages) + + # Collect streaming events for tool calls + tool_requests: list[Any] = [] + + def _event_handler(event: Any) -> None: + if event.type == SessionEventType.ASSISTANT_MESSAGE: + if event.data.tool_requests: + tool_requests.extend(event.data.tool_requests) + + unsubscribe = session.on(_event_handler) + + try: + response = await session.send_and_wait( + {"prompt": prompt}, + timeout=self.timeout, + ) + finally: + unsubscribe() + + # Extract content + content = "" + if response and response.data and response.data.content: + content = response.data.content + + # Check for tool requests on the response itself + if response and response.data and response.data.tool_requests: + for tr in response.data.tool_requests: + if tr not in tool_requests: + tool_requests.append(tr) + + # Build tool_calls for the AIMessage + tool_calls = [] + for tr in tool_requests: + args = tr.arguments + if isinstance(args, str): + try: + args = json.loads(args) + except (json.JSONDecodeError, TypeError): + args = {"raw": args} + elif args is None: + args = {} + + # Deep-decode: some models (e.g. Claude via Copilot SDK) return + # nested lists/objects as JSON-encoded strings inside the outer + # tool-call arguments dict. Un-double-encode them so LangChain's + # structured-output parser receives proper Python objects. + if isinstance(args, dict): + args = _deep_decode_json_strings(args) + + tool_calls.append( + { + "name": tr.name, + "args": args if isinstance(args, dict) else {"raw": args}, + "id": tr.tool_call_id, + } + ) + + # Build the AIMessage + ai_message = AIMessage( + content=content, + tool_calls=tool_calls if tool_calls else [], + response_metadata={ + "model": self.model, + }, + ) + + return ChatResult( + generations=[ChatGeneration(message=ai_message)], + ) + finally: + await session.destroy() + + # ------------------------------------------------------------------ # + # Sync generation (bridges to async) + # ------------------------------------------------------------------ # + + def _generate( + self, + messages: list[BaseMessage], + stop: list[str] | None = None, + run_manager: CallbackManagerForLLMRun | None = None, + **kwargs: Any, + ) -> ChatResult: + """Synchronous generation — delegates to the async implementation.""" + try: + loop = asyncio.get_running_loop() + except RuntimeError: + loop = None + + if loop and loop.is_running(): + # We're already in an async context — use a helper to run in + # a new thread to avoid blocking the event loop. + import concurrent.futures + + with concurrent.futures.ThreadPoolExecutor(max_workers=1) as pool: + future = pool.submit( + asyncio.run, + self._agenerate(messages, stop, None, **kwargs), + ) + return future.result() + else: + return asyncio.run(self._agenerate(messages, stop, None, **kwargs))