|
1
|
|
|
import os |
|
2
|
|
|
from typing import Optional |
|
3
|
|
|
|
|
4
|
|
|
from llama_index.core import Settings |
|
5
|
|
|
from llama_index.core.chat_engine import SimpleChatEngine |
|
6
|
|
|
from llama_index.core.llms import ChatMessage, MessageRole |
|
7
|
|
|
|
|
8
|
|
|
# from openai import Client |
|
9
|
|
|
from llama_index.llms.openai import OpenAI # type: ignore[import-untyped] |
|
10
|
|
|
from tenacity import retry, stop_after_attempt, wait_random_exponential |
|
11
|
|
|
|
|
12
|
|
|
from menderbot.config import has_llm_consent, load_config |
|
13
|
|
|
|
|
14
|
|
|
INSTRUCTIONS = ( |
|
15
|
|
|
"""You are helpful electronic assistant with knowledge of Software Engineering.""" |
|
16
|
|
|
) |
|
17
|
|
|
|
|
18
|
|
|
MODEL = "gpt-4-1106-preview" |
|
19
|
|
|
TEMPERATURE = 0.5 |
|
20
|
|
|
MAX_TOKENS = 1000 |
|
21
|
|
|
FREQUENCY_PENALTY = 0 |
|
22
|
|
|
PRESENCE_PENALTY = 0.6 |
|
23
|
|
|
# limits how many questions we include in the prompt |
|
24
|
|
|
MAX_CONTEXT_QUESTIONS = 10 |
|
25
|
|
|
|
|
26
|
|
|
|
|
27
|
|
|
__openai_client: Optional[OpenAI] = None |
|
28
|
|
|
__key_env_var = "OPENAI_API_KEY" |
|
29
|
|
|
|
|
30
|
|
|
|
|
31
|
|
|
def key_env_var() -> str: |
|
32
|
|
|
return __key_env_var |
|
33
|
|
|
|
|
34
|
|
|
|
|
35
|
|
|
def init_openai(): |
|
36
|
|
|
# pylint: disable-next=[global-statement] |
|
37
|
|
|
global __openai_client |
|
38
|
|
|
global __key_env_var |
|
39
|
|
|
if has_llm_consent(): |
|
40
|
|
|
config = load_config() |
|
41
|
|
|
openai_config = config.get("apis", {}).get("openai", {}) |
|
42
|
|
|
__key_env_var = openai_config.get("api_key_env_var", "OPENAI_API_KEY") |
|
43
|
|
|
organization_env_var = openai_config.get( |
|
44
|
|
|
"organization_env_var", "OPENAI_ORGANIZATION" |
|
45
|
|
|
) |
|
46
|
|
|
__openai_client = OpenAI( |
|
47
|
|
|
api_key=os.getenv(__key_env_var), |
|
48
|
|
|
organization=os.getenv(organization_env_var), |
|
49
|
|
|
base_url=openai_config.get("api_base", "https://api.openai.com/v1"), |
|
50
|
|
|
temperature=TEMPERATURE, |
|
51
|
|
|
max_tokens=MAX_TOKENS, |
|
52
|
|
|
top_p=1, |
|
53
|
|
|
frequency_penalty=FREQUENCY_PENALTY, |
|
54
|
|
|
presence_penalty=PRESENCE_PENALTY, |
|
55
|
|
|
) |
|
56
|
|
|
|
|
57
|
|
|
|
|
58
|
|
|
init_openai() |
|
59
|
|
|
|
|
60
|
|
|
|
|
61
|
|
|
def is_test_override() -> bool: |
|
62
|
|
|
return ( |
|
63
|
|
|
os.getenv(key_env_var()) |
|
64
|
|
|
== "sk-TEST00000000000000000000000000000000000000000000" |
|
65
|
|
|
) |
|
66
|
|
|
|
|
67
|
|
|
|
|
68
|
|
|
def has_key() -> bool: |
|
69
|
|
|
return os.getenv(key_env_var(), "") != "" |
|
70
|
|
|
|
|
71
|
|
|
|
|
72
|
|
|
def override_response_for_test(messages) -> str: |
|
73
|
|
|
del messages |
|
74
|
|
|
return "<LLM Output>" |
|
75
|
|
|
|
|
76
|
|
|
|
|
77
|
|
|
def is_debug(): |
|
78
|
|
|
return os.getenv("DEBUG_LLM", "0") == "1" |
|
79
|
|
|
|
|
80
|
|
|
|
|
81
|
|
|
@retry(wait=wait_random_exponential(min=3, max=90), stop=stop_after_attempt(3)) |
|
82
|
|
|
def get_response( |
|
83
|
|
|
instructions: str, previous_questions_and_answers: list, new_question: str |
|
84
|
|
|
) -> str: |
|
85
|
|
|
"""Get a response from ChatCompletion |
|
86
|
|
|
|
|
87
|
|
|
Args: |
|
88
|
|
|
instructions: The instructions for the chat bot - this determines how it will behave |
|
89
|
|
|
previous_questions_and_answers: Chat history |
|
90
|
|
|
new_question: The new question to ask the bot |
|
91
|
|
|
|
|
92
|
|
|
Returns: |
|
93
|
|
|
The response text |
|
94
|
|
|
""" |
|
95
|
|
|
global __openai_client |
|
96
|
|
|
# build the messages |
|
97
|
|
|
history = [ |
|
98
|
|
|
ChatMessage(role=MessageRole.SYSTEM, content=instructions), |
|
99
|
|
|
] |
|
100
|
|
|
# add the previous questions and answers |
|
101
|
|
|
for question, answer in previous_questions_and_answers[-MAX_CONTEXT_QUESTIONS:]: |
|
102
|
|
|
history.append(ChatMessage(role=MessageRole.USER, content=question)) |
|
103
|
|
|
history.append(ChatMessage(role=MessageRole.ASSISTANT, content=answer)) |
|
104
|
|
|
|
|
105
|
|
|
if is_debug(): |
|
106
|
|
|
print("=== sending to LLM ===") |
|
107
|
|
|
for message in history: |
|
108
|
|
|
print(message.role, message.content) |
|
109
|
|
|
print("===") |
|
110
|
|
|
if is_test_override(): |
|
111
|
|
|
return override_response_for_test(history) |
|
112
|
|
|
if __openai_client is None: |
|
113
|
|
|
raise ValueError("OpenAI client is not initialized, check consent?") |
|
114
|
|
|
Settings.llm = __openai_client |
|
115
|
|
|
chat_engine = SimpleChatEngine.from_defaults( |
|
116
|
|
|
chat_history=history, |
|
117
|
|
|
) |
|
118
|
|
|
# completion = __openai_client.chat.create( |
|
119
|
|
|
# model=MODEL, |
|
120
|
|
|
# messages=messages, |
|
121
|
|
|
|
|
122
|
|
|
# ) |
|
123
|
|
|
message = ChatMessage(role=MessageRole.USER, content=new_question) |
|
124
|
|
|
return chat_engine.chat(message=message).choices[0].content |
|
125
|
|
|
# return completion.choices[0].message.content |
|
126
|
|
|
|
|
127
|
|
|
|
|
128
|
|
|
def unwrap_codeblock(text): |
|
129
|
|
|
return text.strip().removeprefix("```").removesuffix("```") |
|
130
|
|
|
|