I’m just getting this error back after trying to prompt Han-Tyumi:
openai.error.RateLimitError: This app has encountered an error. The original error message is redacted to prevent data leaks. Full error details have been recorded in the logs (if you’re on Streamlit Cloud, click on ‘Manage app’ in the lower right of your app).
Traceback:
File “/home/adminuser/venv/lib/python3.9/site-packages/streamlit/runtime/scriptrunner/script_runner.py”, line 534, in _run_script
exec(code, module.dict)
File “/mount/src/han-tyumi/han_tyumi.py”, line 217, in
st.session_state.last_response = run_query(query)
File “/mount/src/han-tyumi/han_tyumi.py”, line 208, in run_query
response = full_chain.invoke({“question”:query})
File “/home/adminuser/venv/lib/python3.9/site-packages/langchain_core/runnables/base.py”, line 1514, in invoke
input = step.invoke(
File “/home/adminuser/venv/lib/python3.9/site-packages/langchain_core/runnables/base.py”, line 2040, in invoke
output = {key: future.result() for key, future in zip(steps, futures)}
File “/home/adminuser/venv/lib/python3.9/site-packages/langchain_core/runnables/base.py”, line 2040, in
output = {key: future.result() for key, future in zip(steps, futures)}
File “/usr/local/lib/python3.9/concurrent/futures/_base.py”, line 446, in result
return self.__get_result()
File “/usr/local/lib/python3.9/concurrent/futures/_base.py”, line 391, in __get_result
raise self._exception
File “/usr/local/lib/python3.9/concurrent/futures/thread.py”, line 58, in run
result = self.fn(*self.args, **self.kwargs)
File “/home/adminuser/venv/lib/python3.9/site-packages/langchain_core/runnables/base.py”, line 1514, in invoke
input = step.invoke(
File “/home/adminuser/venv/lib/python3.9/site-packages/langchain_core/language_models/chat_models.py”, line 160, in invoke
self.generate_prompt(
File “/home/adminuser/venv/lib/python3.9/site-packages/langchain_core/language_models/chat_models.py”, line 491, in generate_prompt
return self.generate(prompt_messages, stop=stop, callbacks=callbacks, **kwargs)
File “/home/adminuser/venv/lib/python3.9/site-packages/langchain_core/language_models/chat_models.py”, line 378, in generate
raise e
File “/home/adminuser/venv/lib/python3.9/site-packages/langchain_core/language_models/chat_models.py”, line 368, in generate
self._generate_with_cache(
File “/home/adminuser/venv/lib/python3.9/site-packages/langchain_core/language_models/chat_models.py”, line 524, in _generate_with_cache
return self._generate(
File “/home/adminuser/venv/lib/python3.9/site-packages/langchain_community/chat_models/openai.py”, line 435, in _generate
response = self.completion_with_retry(
File “/home/adminuser/venv/lib/python3.9/site-packages/langchain_community/chat_models/openai.py”, line 360, in completion_with_retry
return _completion_with_retry(**kwargs)
File “/home/adminuser/venv/lib/python3.9/site-packages/tenacity/init.py”, line 289, in wrapped_f
return self(f, *args, **kw)
File “/home/adminuser/venv/lib/python3.9/site-packages/tenacity/init.py”, line 379, in call
do = self.iter(retry_state=retry_state)
File “/home/adminuser/venv/lib/python3.9/site-packages/tenacity/init.py”, line 325, in iter
raise retry_exc.reraise()
File “/home/adminuser/venv/lib/python3.9/site-packages/tenacity/init.py”, line 158, in reraise
raise self.last_attempt.result()
File “/usr/local/lib/python3.9/concurrent/futures/_base.py”, line 439, in result
return self.__get_result()
File “/usr/local/lib/python3.9/concurrent/futures/_base.py”, line 391, in __get_result
raise self._exception
File “/home/adminuser/venv/lib/python3.9/site-packages/tenacity/init.py”, line 382, in call
result = fn(*args, **kwargs)
File “/home/adminuser/venv/lib/python3.9/site-packages/langchain_community/chat_models/openai.py”, line 358, in _completion_with_retry
return self.client.create(**kwargs)
File “/home/adminuser/venv/lib/python3.9/site-packages/openai/api_resources/chat_completion.py”, line 25, in create
return super().create(*args, **kwargs)
File “/home/adminuser/venv/lib/python3.9/site-packages/openai/api_resources/abstract/engine_api_resource.py”, line 155, in create
response, _, api_key = requestor.request(
File “/home/adminuser/venv/lib/python3.9/site-packages/openai/api_requestor.py”, line 299, in request
resp, got_stream = self._interpret_response(result, stream)
File “/home/adminuser/venv/lib/python3.9/site-packages/openai/api_requestor.py”, line 710, in _interpret_response
self._interpret_response_line(
File “/home/adminuser/venv/lib/python3.9/site-packages/openai/api_requestor.py”, line 775, in _interpret_response_line
raise self.handle_error_response(