Getting the error while trying to run a langchain code.
ValueError: `run` not supported when there is not exactly one input key, got ['question', 'documents'].
Traceback:
File "c:\users\aviparna.biswas\appdata\local\programs\python\python37\lib\site-packages\streamlit\runtime\scriptrunner\script_runner.py", line 565, in _run_script
exec(code, module.__dict__)
File "D:\Python Projects\POC\Radium\Ana\app.py", line 49, in <module>
answer = question_chain.run(formatted_prompt)
File "c:\users\aviparna.biswas\appdata\local\programs\python\python37\lib\site-packages\langchain\chains\base.py", line 106, in run
f"`run` not supported when there is not exactly one input key, got ['question', 'documents']."
My code is as follows.
import os
from apikey import apikey
import streamlit as st
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain, SequentialChain
#from langchain.memory import ConversationBufferMemory
from docx import Document
os.environ['OPENAI_API_KEY'] = apikey
# App framework
st.title('π¦π Colab Ana Answering Bot..')
prompt = st.text_input('Plug in your question here')
# Upload multiple documents
uploaded_files = st.file_uploader("Choose your documents (docx files)", accept_multiple_files=True, type=['docx'])
document_text = ""
# Read and combine Word documents
def read_docx(file):
doc = Document(file)
full_text = []
for paragraph in doc.paragraphs:
full_text.append(paragraph.text)
return '\n'.join(full_text)
for file in uploaded_files:
document_text += read_docx(file) + "\n\n"
with st.expander('Contextual Prompt'):
st.write(document_text)
# Prompt template
question_template = PromptTemplate(
input_variables=['question', 'documents'],
template='Given the following documents: {documents}. Answer the question: {question}'
)
# Llms
llm = OpenAI(temperature=0.9)
question_chain = LLMChain(llm=llm, prompt=question_template, verbose=True, output_key='answer')
# Show answer if there's a prompt and documents are uploaded
if prompt and document_text:
formatted_prompt = question_template.format(question=prompt, documents=document_text)
answer = question_chain.run(formatted_prompt)
st.write(answer['answer'])
I have gone through the documentations and even then I am getting the same error. I have already seen demos where multiple prompts are being taken by langchain.