Replies: 3 comments 2 replies
-
To handle multi-user scenarios more elegantly without recreating an
Here's a modified version of your code implementing these ideas: import json
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from pydantic import Field
from llama_index.llms.openai.utils import ALL_AVAILABLE_MODELS, CHAT_MODELS
from llama_index.llms.openai import OpenAI
from llama_index.core import Settings
from llama_index.core.tools import QueryEngineTool, FunctionTool
from llama_index.core.storage.chat_store import SimpleChatStore
from llama_index.core.memory import ChatMemoryBuffer
from llama_index.agent.openai import OpenAIAgent
# Initialize global settings
MODEL = 'ep-20241223171230-8tv46'
Settings.context_window = 4096
ALL_AVAILABLE_MODELS[MODEL] = 4000
CHAT_MODELS[MODEL] = 4000
Settings.llm = OpenAI(temperature=0.1, model=MODEL, api_base='xxxx', api_key='xxxx', max_tokens=512)
# Initialize chat store and agent pool
chat_store = SimpleChatStore()
agent_pool = {}
def get_or_create_chat_memory(user_id):
return ChatMemoryBuffer.from_defaults(token_limit=3000, chat_store=chat_store, chat_store_key=user_id)
def get_or_create_agent(user_id, user_info):
if user_id not in agent_pool:
memory = get_or_create_chat_memory(user_id)
system_prompt = SYSTEM_PROMPT.format(user_info=json.dumps(user_info, ensure_ascii=False, indent=2))
agent = OpenAIAgent.from_tools(query_engine_tools, verbose=True, system_prompt=system_prompt, memory=memory)
agent_pool[user_id] = agent
return agent_pool[user_id]
def update_user_info(property_name: str = Field(description="property name of user information, like name or age."),
property_value: str = Field(description="extracted value from user query.")) -> dict:
value = user_info.get(property_name, None)
if property_name not in user_info:
return {'response': f'not such property in user-information'}
if isinstance(value, str):
user_info[property_name] = property_value
elif isinstance(value, list):
user_info[property_name].append(property_value)
else:
return {'response': f'illegal property_value'}
return {'response': f'success.'}
query_engine_tools = [FunctionTool.from_defaults(update_user_info)]
USER_INFO_MAP = {
"user1": {
"name": 'Arvin',
"age": '18',
"height": '1.75m',
"weight": '63kg',
"hobby": ['basketball', 'listening music'],
},
"user2": {
"name": 'Jack',
"age": '36',
"height": '1.78m',
"weight": '75kg',
"hobby": ['watching movie'],
}
}
SYSTEM_PROMPT = (
"You are a helpful assistant. Your task is to answer user questions carefully!\n"
"# User Information\n"
"1. User's personal information can help you to have a better responses.\n"
"2. You should detect the following user information from the user's query, and once detected, you should first call the tool to update it before answering the user's query.\n"
"3. NOTICE:DO NOT TELL USER YOU HAVE UPDATE THIS.\n"
"Below is user information:\n"
"```\n"
"{user_info}\n"
"```\n"
)
user_id = 'user1'
while True:
query = input('user:')
print('\n')
user_info = USER_INFO_MAP.get(user_id, {})
agent = get_or_create_agent(user_id, user_info)
response = agent.chat(message=query)
print(f'bot:{response}') This approach reduces the overhead of recreating agents and allows for more efficient handling of multiple users. To continue talking to Dosu, mention @dosu. Help Dosu learn! Give it feedback: Great Response | Irrelevant Answer | Incorrect Sources | Too Verbose | Hallucination | Other |
Beta Was this translation helpful? Give feedback.
-
This solution is not feasible as user_info is also dynamically updated. With your method, the user_info in the system prompt is fixed. @dosu |
Beta Was this translation helpful? Give feedback.
-
Agent doesn't has a update_system_prompt method! @dosu |
Beta Was this translation helpful? Give feedback.
-
Here are my code examples for multi-user and dynamically updating system prompts. Every time a user requests, I need to recreate an OpenAIAgent instance. I think this is not elegant. Is there a more elegant and convenient implementation way?
Beta Was this translation helpful? Give feedback.
All reactions