Skip to content
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,7 @@

from pydantic import Field, model_validator

from semantic_kernel.connectors.ai.function_choice_type import FunctionChoiceType
from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings
from semantic_kernel.exceptions import ServiceInvalidExecutionSettingsError

logger = logging.getLogger(__name__)

Expand Down Expand Up @@ -46,10 +44,5 @@ class AnthropicChatPromptExecutionSettings(AnthropicPromptExecutionSettings):

@model_validator(mode="after")
def validate_tool_choice(self) -> "AnthropicChatPromptExecutionSettings":
"""Validate tool choice. Anthropic doesn't support NONE tool choice."""
tool_choice = self.tool_choice

if tool_choice and tool_choice.get("type") == FunctionChoiceType.NONE.value:
raise ServiceInvalidExecutionSettingsError("Tool choice 'none' is not supported by Anthropic.")

"""Validate tool choice payload."""
return self
Original file line number Diff line number Diff line change
Expand Up @@ -144,9 +144,7 @@ def _update_function_choice_settings_callback(
@override
def _reset_function_choice_settings(self, settings: "PromptExecutionSettings") -> None:
if hasattr(settings, "tool_choice"):
settings.tool_choice = None
if hasattr(settings, "tools"):
settings.tools = None
settings.tool_choice = {"type": FunctionChoiceType.NONE.value}

@override
@trace_chat_completion(MODEL_PROVIDER_NAME)
Expand Down Expand Up @@ -386,7 +384,7 @@ def _get_tool_calls_from_message(self, message: Message) -> list[FunctionCallCon
id=content_block.id,
index=idx,
name=content_block.name,
arguments=getattr(content_block, "input", None),
arguments=json.dumps(content_block.input) if content_block.input else None,
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Empty dict {} is falsy in Python. When a tool has zero arguments, content_block.input is {}, so this expression produces None instead of '{}'. Use an explicit is not None check to correctly serialize zero-argument tool calls.

Suggested change
arguments=json.dumps(content_block.input) if content_block.input else None,
arguments=json.dumps(content_block.input) if content_block.input is not None else None,

)
)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@

import pytest
from anthropic import AsyncAnthropic
from anthropic.types import Message
from anthropic.types import Message, TextBlock, ToolUseBlock, Usage

from semantic_kernel.connectors.ai.anthropic.prompt_execution_settings.anthropic_prompt_execution_settings import (
AnthropicChatPromptExecutionSettings,
Expand Down Expand Up @@ -527,6 +527,73 @@ async def test_send_chat_stream_request_tool_calls(
assert message is not None


async def test_get_chat_message_contents_preserves_tools_when_auto_invoke_exhausted(
kernel: Kernel,
mock_tool_calls_message: ChatMessageContent,
mock_tool_call_result_message: ChatMessageContent,
):
tool_call_response = Message(
id="tool_call_message_id",
content=[
ToolUseBlock(
id="test_tool_use_block_id",
input={"key": "test"},
name="test-test",
type="tool_use",
)
],
model="claude-3-opus-20240229",
role="assistant",
stop_reason="tool_use",
stop_sequence=None,
type="message",
usage=Usage(input_tokens=10, output_tokens=10),
)
final_response = Message(
id="final_message_id",
content=[TextBlock(text="Final answer", type="text")],
model="claude-3-opus-20240229",
role="assistant",
stop_reason="end_turn",
stop_sequence=None,
type="message",
usage=Usage(input_tokens=10, output_tokens=10),
)

client = MagicMock(spec=AsyncAnthropic)
messages_mock = MagicMock()
messages_mock.create = AsyncMock(side_effect=[tool_call_response, final_response])
client.messages = messages_mock

chat_history = ChatHistory()
chat_history.add_user_message("What is 3+3?")
chat_history.add_message(mock_tool_calls_message)
chat_history.add_message(mock_tool_call_result_message)

kernel.add_function("test", kernel_function(lambda key: "test", name="test"))

settings = AnthropicChatPromptExecutionSettings(function_choice_behavior=FunctionChoiceBehavior.Auto())
settings.function_choice_behavior.maximum_auto_invoke_attempts = 1
chat_completion = AnthropicChatCompletion(
ai_model_id="test_model_id",
service_id="test",
api_key="",
async_client=client,
)

await chat_completion.get_chat_message_contents(
chat_history=chat_history,
settings=settings,
kernel=kernel,
arguments=KernelArguments(),
)

payload = messages_mock.create.call_args_list[-1].kwargs
assert "tools" in payload
assert payload["tools"]
assert payload["tool_choice"] == {"type": "none"}


def test_client_base_url(mock_anthropic_client_completion: MagicMock):
chat_completion_base = AnthropicChatCompletion(
ai_model_id="test_model_id", service_id="test", api_key="", async_client=mock_anthropic_client_completion
Expand All @@ -545,5 +612,36 @@ def test_chat_completion_reset_settings(
settings = AnthropicChatPromptExecutionSettings(tools=[{"name": "test"}], tool_choice={"type": "any"})
chat_completion._reset_function_choice_settings(settings)

assert settings.tools is None
assert settings.tool_choice is None
assert settings.tools == [{"name": "test"}]
assert settings.tool_choice == {"type": "none"}


def test_get_tool_calls_from_message_serializes_arguments(
mock_anthropic_client_completion: MagicMock,
):
chat_completion = AnthropicChatCompletion(
ai_model_id="test_model_id", service_id="test", api_key="", async_client=mock_anthropic_client_completion
)
message = Message(
id="test_message_id",
content=[
TextBlock(text="<thinking></thinking>", type="text"),
ToolUseBlock(
id="test_tool_use_block_id",
input={"input": 3, "amount": 3},
name="math-Add",
type="tool_use",
),
],
model="claude-3-opus-20240229",
role="assistant",
stop_reason="tool_use",
stop_sequence=None,
type="message",
usage=Usage(input_tokens=100, output_tokens=100),
)

tool_calls = chat_completion._get_tool_calls_from_message(message)

assert len(tool_calls) == 1
assert tool_calls[0].arguments == '{"input": 3, "amount": 3}'
Original file line number Diff line number Diff line change
@@ -1,13 +1,10 @@
# Copyright (c) Microsoft. All rights reserved.

import pytest

from semantic_kernel.connectors.ai.anthropic.prompt_execution_settings.anthropic_prompt_execution_settings import (
AnthropicChatPromptExecutionSettings,
)
from semantic_kernel.connectors.ai.function_choice_behavior import FunctionChoiceBehavior
from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings
from semantic_kernel.exceptions import ServiceInvalidExecutionSettingsError


def test_default_anthropic_chat_prompt_execution_settings():
Expand Down Expand Up @@ -115,15 +112,15 @@ def test_create_options():


def test_tool_choice_none():
with pytest.raises(ServiceInvalidExecutionSettingsError, match="Tool choice 'none' is not supported by Anthropic."):
AnthropicChatPromptExecutionSettings(
service_id="test_service",
extension_data={
"temperature": 0.5,
"top_p": 0.5,
"max_tokens": 128,
"tool_choice": {"type": "none"},
"messages": [{"role": "system", "content": "Hello"}],
},
function_choice_behavior=FunctionChoiceBehavior.NoneInvoke(),
)
settings = AnthropicChatPromptExecutionSettings(
service_id="test_service",
extension_data={
"temperature": 0.5,
"top_p": 0.5,
"max_tokens": 128,
"tool_choice": {"type": "none"},
"messages": [{"role": "system", "content": "Hello"}],
},
function_choice_behavior=FunctionChoiceBehavior.NoneInvoke(),
)
assert settings.tool_choice == {"type": "none"}
Loading