Trying to combine multiple memory types with a langchain conversational chain.
import os
from langchain.chains import ConversationChain
from langchain.memory import (
ConversationBufferMemory,
CombinedMemory,
ConversationSummaryMemory,
ConversationEntityMemory
)
from langchain.memory.prompt import ENTITY_MEMORY_CONVERSATION_TEMPLATE
from pydantic import BaseModel
from typing import List, Dict, Any
from langchain.llms import OpenAI
llm = OpenAI(openai_api_key=os.getenv('OPENAI_API_KEY'))
entity_memory = ConversationEntityMemory(llm=llm)
conv_memory = ConversationBufferMemory(memory_key="chat_history_lines", input_key="input")
summary_memory = ConversationSummaryMemory(llm=OpenAI(), input_key="input")
# Combined
memory = CombinedMemory(memories=[
conv_memory,
#summary_memory,
entity_memory
])
_DEFAULT_TEMPLATE = """
You are a helpful assistant.
Known entities:
{entities}
History:
{history}
Current conversation:
{chat_history_lines}
User: {input}
You:"""
PROMPT = PromptTemplate(
input_variables=['entities', 'chat_history_lines', 'history', 'input'],
template=_DEFAULT_TEMPLATE,
)
conversation = ConversationChain(
llm=llm,
verbose=True,
#prompt=ENTITY_MEMORY_CONVERSATION_TEMPLATE,
prompt=PROMPT,
memory=memory
)
conversation.predict(input="Hi!")
ValueError: One input key expected got ['input', 'chat_history_lines']
What do I have to change in order to use multiple memories in a conversation chain? Or do I have to use something else than ConversationChain?