Reputation: 1
def claim_agent(user_question):
memory = ConversationBufferWindowMemory(ai_prefix="Insurance Agent", k=20)
prompt_template = PromptTemplate(
input_variables=['history', 'input'],
template="""
You are a Insurance agent bot, you have to talk with our customers and collect all the required details to fill the claim:
"Policy_number":
"Cause_of_accident":
"Contacted_Police/Fire_department":
"Report_Number":
"Street_number":
"Street_name":
"City":
"State_Province":
"Zip_Code":
"Country":
"Loss_date":
"time":
Make sure to ask only one at a time and also make your responses align with the customer sentiment. Make sure collect all the above data,
After collecting all the data provide the output in a json format which has all the above mentioned values and make sure to provide the time in 24hr format and also date in American format.
and say thank you for providing all the details our we will assign an adjuster to process the claim!.
conversation history:
{history}
human:{input}
AI:
"""
)
conversation_chain = LLMChain(
llm=Llama3_8b,
prompt=prompt_template,
memory=memory,
)
query=user_question
verification="False"
while True:
response = conversation_chain.invoke(query)
print("Agent: ", response['text'])
if '{' in response['text']:
break
query = input("You: ")
data = memory.load_memory_variables({})
conversation = []
for key, value in data.items():
conversation.append([key + ':' + value])
complete_data = list(conversation[0][0].split('\n'))
conversation_data = ''
for i in complete_data:
conversation_data += " " + i
return conversation_data
User_question=input("Please enter your query: ")
claim_agent(User_question)
For this code im getting error like this: Please enter your query: Hello Traceback (most recent call last): File "D:\Project_tests\AI Chat Agent\test.py", line 65, in claim_agent(User_question) File "D:\Project_tests\AI Chat Agent\test.py", line 50, in claim_agent response = conversation_chain.invoke(query) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "D:\Project_tests.venv\Lib\site-packages\langchain\chains\base.py", line 163, in invoke raise e File "D:\Project_tests.venv\Lib\site-packages\langchain\chains\base.py", line 153, in invoke self._call(inputs, run_manager=run_manager) File "D:\Project_tests.venv\Lib\site-packages\langchain\chains\llm.py", line 103, in _call response = self.generate([inputs], run_manager=run_manager) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "D:\Project_tests.venv\Lib\site-packages\langchain\chains\llm.py", line 115, in generate return self.llm.generate_prompt( ^^^^^^^^^^^^^^^^^^^^^^^^^ File "D:\Project_tests.venv\Lib\site-packages\langchain_core\language_models\chat_models.py", line 560, in generate_prompt return self.generate(prompt_messages, stop=stop, callbacks=callbacks, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "D:\Project_tests.venv\Lib\site-packages\langchain_core\language_models\chat_models.py", line 421, in generate raise e File "D:\Project_tests.venv\Lib\site-packages\langchain_core\language_models\chat_models.py", line 411, in generate self._generate_with_cache( File "D:\Project_tests.venv\Lib\site-packages\langchain_core\language_models\chat_models.py", line 632, in _generate_with_cache result = self._generate( ^^^^^^^^^^^^^^^ File "D:\Project_tests.venv\Lib\site-packages\langchain_groq\chat_models.py", line 242, in _generate response = self.client.create(messages=message_dicts, **params) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "D:\Project_tests.venv\Lib\site-packages\groq\resources\chat\completions.py", line 178, in create return self._post( ^^^^^^^^^^^ File "D:\Project_tests.venv\Lib\site-packages\groq_base_client.py", line 1194, in post return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls)) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "D:\Project_tests.venv\Lib\site-packages\groq_base_client.py", line 896, in request return self._request( ^^^^^^^^^^^^^^ File "D:\Project_tests.venv\Lib\site-packages\groq_base_client.py", line 972, in _request return self._retry_request( ^^^^^^^^^^^^^^^^^^^^ File "D:\Project_tests.venv\Lib\site-packages\groq_base_client.py", line 1020, in _retry_request return self._request( ^^^^^^^^^^^^^^ File "D:\Project_tests.venv\Lib\site-packages\groq_base_client.py", line 972, in _request return self._retry_request( ^^^^^^^^^^^^^^^^^^^^ File "D:\Project_tests.venv\Lib\site-packages\groq_base_client.py", line 1020, in _retry_request return self._request( ^^^^^^^^^^^^^^ File "D:\Project_tests.venv\Lib\site-packages\groq_base_client.py", line 987, in _request raise self._make_status_error_from_response(err.response) from None groq.InternalServerError: Error code: 503 - {'error': {'message': 'Service Unavailable', 'type': 'internal_server_error'}}
Im trying to pass a input query to groq but it is raising an error! it worked well until 1 hr before but suddenly it started generating this type of errors! I even tried chaging the api key but still the issue persisting! If i directly use the langchain "Invoke method" and pass a simple prompt directly within it, it is generating an output so i think the issue is not about the rate limit or any other things!!
Upvotes: 0
Views: 2498
Reputation: 1
Hey i am getting the same error, 503 error indicates that the server is down for maintenance/Overload => source: https://console.groq.com/docs/errors
A few of their API servers might be down for maintenance/ upgrade/ servicing , therefore it might be better if you wait for sometime ( probably a day ~ 24 hrs. ) till they fix it up
i don't think anything is wrong with your code. Another alternative is to replace groq with another provider of the API with replicate or Together or OpenRouter ( it think openRouter has a free tier for a few models).
Upvotes: 0