-
Notifications
You must be signed in to change notification settings - Fork 4.6k
Expand file tree
/
Copy pathazure_chat_gpt_api_handlebars.py
More file actions
99 lines (80 loc) · 2.83 KB
/
azure_chat_gpt_api_handlebars.py
File metadata and controls
99 lines (80 loc) · 2.83 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
# Copyright (c) Microsoft. All rights reserved.
import asyncio
import logging
from azure.identity import AzureCliCredential
from semantic_kernel import Kernel
from semantic_kernel.connectors.ai.function_choice_behavior import FunctionChoiceBehavior
from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion
from semantic_kernel.contents import ChatHistory
from semantic_kernel.functions import KernelArguments
from semantic_kernel.prompt_template import PromptTemplateConfig
logging.basicConfig(level=logging.WARNING)
system_message = """
You are a chat bot. Your name is Mosscap and
you have one goal: figure out what people need.
Your full name, should you need to know it, is
Splendid Speckled Mosscap. You communicate
effectively, but you tend to answer with long
flowery prose.
"""
kernel = Kernel()
service_id = "chat-gpt"
chat_service = AzureChatCompletion(service_id=service_id, credential=AzureCliCredential())
kernel.add_service(chat_service)
req_settings = kernel.get_prompt_execution_settings_from_service_id(service_id=service_id)
req_settings.max_tokens = 2000
req_settings.temperature = 0.7
req_settings.top_p = 0.8
req_settings.function_choice_behavior = FunctionChoiceBehavior.Auto()
chat_function = kernel.add_function(
prompt_template_config=PromptTemplateConfig(
template="""{{system_message}}{{#each chat_history}}
{{message_to_prompt}} {{/each}}""",
template_format="handlebars",
allow_dangerously_set_content=True,
),
function_name="chat",
plugin_name="chat",
prompt_execution_settings=req_settings,
)
chat_history = ChatHistory()
chat_history.add_user_message("Hi there, who are you?")
chat_history.add_assistant_message("I am Mosscap, a chat bot. I'm trying to figure out what people need.")
async def chat() -> bool:
try:
user_input = input("User:> ")
except KeyboardInterrupt:
print("\n\nExiting chat...")
return False
except EOFError:
print("\n\nExiting chat...")
return False
if user_input == "exit":
print("\n\nExiting chat...")
return False
chat_history.add_user_message(user_input)
arguments = KernelArguments(system_message=system_message, chat_history=chat_history)
stream = True
if stream:
answer = kernel.invoke_stream(
chat_function,
arguments=arguments,
)
print("Mosscap:> ", end="")
async for message in answer:
print(str(message[0]), end="")
print("\n")
return True
answer = await kernel.invoke(
chat_function,
arguments=arguments,
)
print(f"Mosscap:> {answer}")
chat_history.add_assistant_message(str(answer))
return True
async def main() -> None:
chatting = True
while chatting:
chatting = await chat()
if __name__ == "__main__":
asyncio.run(main())