Use LlamaIndex with different embeddings model
Asked Answered
L

6

12

OpenAI's GPT embedding models are used across all LlamaIndex examples, even though they seem to be the most expensive and worst performing embedding models compared to T5 and sentence-transformers models (see comparison below).

How do I use all-roberta-large-v1 as embedding model, in combination with OpenAI's GPT3 as "response builder"? I'm not even sure if I can use one model for creating/retrieving embedding tokens and another model to generate the response based on the retrieved embeddings.

Example

Following is an example of what I'm looking for:

documents = SimpleDirectoryReader('data').load_data()

# Use Roberta or any other open-source model to generate embeddings
index = ???????.from_documents(documents)

# Use GPT3 here
query_engine = index.as_query_engine()
response = query_engine.query("What did the author do growing up?")

print(response)

Model Comparison

Embedding Models

Source

Leavings answered 31/5, 2023 at 9:53 Comment(2)
thanks for asking this question! about this: "I'm not even sure if I can use one model for creating/retrieving embedding tokens and another model to generate the response based on the retrieved embeddings." Did you understand if it is possible and if the quality of the responses is still good?Feoffee
did you find out?Triplane
M
7

You can set it up in a service_context, using either a local model or something from HuggingFace:

from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from llama_index import LangchainEmbedding, ServiceContext

embed_model = LangchainEmbedding(
  HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2")
)
service_context = ServiceContext.from_defaults(embed_model=embed_model)

You can then either pass this service_context, or set it globally:

from llama_index import set_global_service_context

set_global_service_context(service_context)
Multipara answered 18/7, 2023 at 9:50 Comment(1)
this doesn't answer the question. Can you expand?Triplane
M
5

Here's how to do it with open-source:

# Load embedding model
def load_embedding_model() -> HuggingFaceEmbedding:
    return HuggingFaceEmbedding(model_name="WhereIsAI/UAE-Large-V1")

def run_inference(
    use_rag: bool, messages: list[ChatMessage]
) -> ChatResponse | Response:
    llm = load_llm()
    embedding_model = load_embedding_model()
    if not use_rag:
        return llm.chat(messages=messages)

    set_global_tokenizer(
        AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.2").encode
    )
    service_context = ServiceContext.from_defaults(
        llm=llm,
        embed_model=embedding_model,
        system_prompt="You are a bot that answers questions about podcast transcripts",
    )
    index_dir = DATA_DIR / "indices"

    index = save_or_load_index(index_dir=index_dir, service_context=service_context)
    query_engine = index.as_query_engine()
    return query_engine.query(messages[1].content)

Full write up

Mama answered 1/2 at 15:56 Comment(0)
T
3

See this code that creates two different llms - one for the embeddings and the other one for "context+query" evaluation:

import os
import sys

import llama_index.indices.vector_store.retrievers
import llama_index.query_engine.retriever_query_engine
import llama_index.response_synthesizers
import llama_index.retrievers

if 'OPENAI_API_KEY' not in os.environ:
  sys.stderr.write("""
  You haven't set up your API key yet.
  
  If you don't have an API key yet, visit:
  
  https://platform.openai.com/signup

  1. Make an account or sign in
  2. Click "View API Keys" from the top right menu.
  3. Click "Create new secret key"

  Then, open the Secrets Tool and add OPENAI_API_KEY as a secret.
  """)
  exit(1)

import streamlit as st
from llama_index import (
    ServiceContext,
    SimpleDirectoryReader,
    VectorStoreIndex,
)
from llama_index.llms import OpenAI

st.set_page_config(page_title="LlamaIndex Q&A with Lyft Financials",
                   page_icon="蓮",
                   layout="centered",
                   initial_sidebar_state="auto",
                   menu_items=None)

st.title("LlamaIndex 蓮 Q&A with Lyft Financials")


@st.cache_resource(show_spinner=False)
def load_data():
  """
    Loads and indexes the Lyft 2021 financials using the VectorStoreIndex.
    
    Returns:
    - VectorStoreIndex: Indexed representation of the Lyft 10-K.
  """
  with st.spinner(
      text="Loading and indexing the Lyft 10-K. This may take a while..."):
    reader = SimpleDirectoryReader(input_dir="./data", recursive=True)
    docs = reader.load_data()
    service_context__embedding = ServiceContext.from_defaults(
        llm=OpenAI(
            model="text-ada-001",
            temperature=0.0,
        ),
        system_prompt=
        "You are an AI assistant creating text embedding for financial reports."
    )
    index = VectorStoreIndex.from_documents(
        docs, service_context=service_context__embedding)
    return index


# Create Index
index = load_data()

retriever = llama_index.indices.vector_store.retrievers.VectorIndexRetriever(
    index=index,
    similarity_top_k=3,
)

llm_context_query__service_context = ServiceContext.from_defaults(
    llm=OpenAI(
        model="gpt-3.5-turbo",
        temperature=0.1,
    ),
    system_prompt=
    "You are an AI assistant answering questions related to  financial reports fragments."
)

# configure response synthesizer
# text_qa_template=text_qa_template,
# refine_template=refine_template,

response_synthesizer = llama_index.response_synthesizers.get_response_synthesizer(
    response_mode="refine",
    service_context=llm_context_query__service_context,
    use_async=False,
    streaming=False,
)

query_engine = (
    llama_index.query_engine.retriever_query_engine.RetrieverQueryEngine(
        retriever=retriever,
        response_synthesizer=response_synthesizer,
    ))

# Take input from the user
user_input = st.text_input("Enter Your Query", "")

# Display the input
if st.button("Submit"):
  st.write(f"Your Query: {user_input}")

  with st.spinner("Thinking..."):
    # Query the index
    result = query_engine.query(user_input)
    print(result.source_nodes)
    # Display the results
    st.write(f"Answer: {str(result)}")
Triplane answered 25/9, 2023 at 14:12 Comment(1)
Good example, tested workingCannonade
B
2

Here is an example that uses

  • Chroma for Vector storage.
  • sentence-transformers/all-MiniLM-L6-v2 for embeddings
  • GPT3.5 for the final llm query

It does this by using one StorageContext for indexing, and another StorageContext for querying the llm.

import os
import chromadb
import llama_index
from llama_index.vector_stores import ChromaVectorStore
from llama_index.storage.storage_context import StorageContext
from llama_index import ServiceContext, VectorStoreIndex, SimpleDirectoryReader, LangchainEmbedding
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from llama_index.llms import OpenAI
from llama_index.query_engine.retriever_query_engine import RetrieverQueryEngine
from llama_index.indices.vector_store.retrievers import VectorIndexRetriever

docs_to_load = "/path/to/ingester/data-for-ingestion/small-batch"
user_input = "What are these documents about?"

def setup_environment():
    os.environ['OPENAI_API_KEY'] = 'sk-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
    os.environ['LLAMA_INDEX_CACHE_DIR'] = 'cache'

def load_documents(path):
    return SimpleDirectoryReader(
        path,
        recursive=True,
        required_exts=[".pdf"],
        filename_as_id=True,
    ).load_data()

def setup_index(documents):
    db = chromadb.PersistentClient(path="./chroma_db")
    chroma_collection = db.get_or_create_collection("quickstart")
    vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
    storage_context = StorageContext.from_defaults(vector_store=vector_store)
    embed_model = LangchainEmbedding(HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L6-v2'))
    service_context_embedding = ServiceContext.from_defaults(embed_model=embed_model)
    return VectorStoreIndex.from_documents(documents, storage_context=storage_context, service_context=service_context_embedding)

def setup_query_engine(index):
    service_context_llm = ServiceContext.from_defaults(
        llm=OpenAI(
            model="gpt-3.5-turbo",
            temperature=0.1,
        ),
        system_prompt="You are an AI assistant answering questions related to PDF documents."
    )
    retriever = VectorIndexRetriever(
        index=index,
        similarity_top_k=10,
    )
    response_synthesizer = llama_index.response_synthesizers.get_response_synthesizer(
        response_mode="compact",
        service_context=service_context_llm,
        use_async=False,
        streaming=False,
    )
    return RetrieverQueryEngine(
        retriever=retriever,
        response_synthesizer=response_synthesizer,
    )

def main():
    setup_environment()
    documents = load_documents(docs_to_load)
    index = setup_index(documents)
    query_engine = setup_query_engine(index)
    result = query_engine.query(user_input)
    print(f"Answer: {str(result)}")

if __name__ == "__main__":
    main()
Bellona answered 16/10, 2023 at 19:31 Comment(0)
B
1

This is another block that uses open-source embeddings and OpenAI for the llm query.


import os
import logging
import sys
import llama_index
from llama_index import SimpleDirectoryReader, VectorStoreIndex, ServiceContext, StorageContext
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from llama_index.llms import OpenAI
from llama_index.query_engine.retriever_query_engine import RetrieverQueryEngine
from llama_index.indices.vector_store.retrievers import VectorIndexRetriever
from llama_index import load_index_from_storage
from icecream import ic

# Setup environment and logging
os.environ['OPENAI_API_KEY'] = 'your-openai-api-key'
os.environ['LLAMA_INDEX_CACHE_DIR'] = 'cache'
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))

# User-defined variables
docs_to_load = "/path/to/your/documents"
persist_dir = "/path/to/your/persistent/directory"
user_input = "Your query here"
gpt_model = "gpt-4"

# Initialize embedding model
embed_model = HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L6-v2')
embed_model = "local"
service_context_embeddings = ServiceContext.from_defaults(embed_model=embed_model)

# Try to load the existing index
try:
    logging.info("Trying to load existing directory.")
    storage_context_persisted = StorageContext.from_defaults(persist_dir=persist_dir)
    index = load_index_from_storage(storage_context=storage_context_persisted, service_context=service_context_embeddings)
    logging.info("Loaded existing directory.")
except FileNotFoundError:
    logging.error("Failed to load existing directory, creating a new one.")
    documents = SimpleDirectoryReader(docs_to_load, recursive=True, required_exts=[".pdf"], filename_as_id=True).load_data()
    storage_context_empty = StorageContext.from_defaults()
    index = VectorStoreIndex.from_documents(documents=documents, storage_context=storage_context_empty, service_context=service_context_embeddings)
    index.storage_context.persist(persist_dir=persist_dir)

# Initialize retriever
retriever = VectorIndexRetriever(index=index, similarity_top_k=10)
nodes = retriever.retrieve(user_input)
ic(nodes)

# Initialize LLM service context and response synthesizer
service_context_llm = ServiceContext.from_defaults(llm=OpenAI(model=gpt_model))
response_synthesizer = llama_index.response_synthesizers.get_response_synthesizer(
    response_mode="compact",
    service_context=service_context_llm,
    use_async=False,
    streaming=False,
)

# Initialize and run query engine
query_engine = RetrieverQueryEngine(
    retriever=retriever,
    response_synthesizer=response_synthesizer,
)
response = query_engine.query(user_input)
logging.info(response)
Bellona answered 18/10, 2023 at 0:17 Comment(0)
U
0

First define your own embedding model:

class MyEmbedding(BaseEmbedding):
    def __init__(self, **kwargs: Any) -> None:
        """Init params."""
        super().__init__(**kwargs)

    @classmethod
    def class_name(cls) -> str:
        return "SfOpenAIEmbedding"

    async def _aget_query_embedding(self, query: str) -> Embedding:
        pass

    def _get_query_embedding(self, query: str) -> List[float]:
        """Return your own embedding array"""
        return my_embedding(query)

    def _get_text_embedding(self, text: str) -> List[float]:
        """Return your own embedding array"""
        return my_embedding(text)

Then set the llamaindex Setting object (Setting object is a global configuration):

Settings.embed_model = SfOpenAIEmbedding()
Unspoken answered 4/7 at 8:26 Comment(0)

© 2022 - 2024 — McMap. All rights reserved.