diff --git a/docs/docs/integrations/retrievers/pinecone_hybrid_search.ipynb b/docs/docs/integrations/retrievers/pinecone_hybrid_search.ipynb index 4353f47364d6d..b3f6f4e838d81 100644 --- a/docs/docs/integrations/retrievers/pinecone_hybrid_search.ipynb +++ b/docs/docs/integrations/retrievers/pinecone_hybrid_search.ipynb @@ -1,351 +1,415 @@ { - "cells": [ - { - "cell_type": "markdown", - "id": "ab66dd43", - "metadata": {}, - "source": [ - "# Pinecone Hybrid Search\n", - "\n", - ">[Pinecone](https://docs.pinecone.io/docs/overview) is a vector database with broad functionality.\n", - "\n", - "This notebook goes over how to use a retriever that under the hood uses Pinecone and Hybrid Search.\n", - "\n", - "The logic of this retriever is taken from [this documentation](https://docs.pinecone.io/docs/hybrid-search)\n", - "\n", - "To use Pinecone, you must have an API key and an Environment. \n", - "Here are the [installation instructions](https://docs.pinecone.io/docs/quickstart)." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9ab4ab62-9bb2-4ecf-9fbf-1af7f0be558b", - "metadata": {}, - "outputs": [], - "source": [ - "#!pip install pinecone-client pinecone-text" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "bf0cf405-451d-4f87-94b1-2b7d65f1e1be", - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "import getpass\n", - "\n", - "os.environ[\"PINECONE_API_KEY\"] = getpass.getpass(\"Pinecone API Key:\")" - ] - }, - { - "cell_type": "code", - "execution_count": 75, - "id": "393ac030", - "metadata": {}, - "outputs": [], - "source": [ - "from langchain.retrievers import PineconeHybridSearchRetriever" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4577fea1-05e7-47a0-8173-56b0ddaa22bf", - "metadata": {}, - "outputs": [], - "source": [ - "os.environ[\"PINECONE_ENVIRONMENT\"] = getpass.getpass(\"Pinecone Environment:\")" - ] - }, - { - "cell_type": "markdown", - "id": "80e2e8e3-0fb5-4bd9-9196-9eada3439a61", - "metadata": {}, - "source": [ - "We want to use `OpenAIEmbeddings` so we have to get the OpenAI API Key." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "314a7ee5-f498-45f6-8fdb-81428730083e", - "metadata": {}, - "outputs": [], - "source": [ - "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")" - ] - }, - { - "cell_type": "markdown", - "id": "aaf80e7f", - "metadata": {}, - "source": [ - "## Setup Pinecone" - ] - }, - { - "cell_type": "markdown", - "id": "95d5d7f9", - "metadata": {}, - "source": [ - "You should only have to do this part once.\n", - "\n", - "Note: it's important to make sure that the \"context\" field that holds the document text in the metadata is not indexed. Currently you need to specify explicitly the fields you do want to index. For more information checkout Pinecone's [docs](https://docs.pinecone.io/docs/manage-indexes#selective-metadata-indexing)." - ] - }, - { - "cell_type": "code", - "execution_count": 76, - "id": "3b8f7697", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "WhoAmIResponse(username='load', user_label='label', projectname='load-test')" - ] - }, - "execution_count": 76, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "import os\n", - "import pinecone\n", - "\n", - "api_key = os.getenv(\"PINECONE_API_KEY\") or \"PINECONE_API_KEY\"\n", - "# find environment next to your API key in the Pinecone console\n", - "env = os.getenv(\"PINECONE_ENVIRONMENT\") or \"PINECONE_ENVIRONMENT\"\n", - "\n", - "index_name = \"langchain-pinecone-hybrid-search\"\n", - "\n", - "pinecone.init(api_key=api_key, environment=env)\n", - "pinecone.whoami()" - ] - }, - { - "cell_type": "code", - "execution_count": 77, - "id": "cfa3a8d8", - "metadata": {}, - "outputs": [], - "source": [ - "# create the index\n", - "pinecone.create_index(\n", - " name=index_name,\n", - " dimension=1536, # dimensionality of dense model\n", - " metric=\"dotproduct\", # sparse values supported only for dotproduct\n", - " pod_type=\"s1\",\n", - " metadata_config={\"indexed\": []}, # see explanation above\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "e01549af", - "metadata": {}, - "source": [ - "Now that its created, we can use it" - ] - }, - { - "cell_type": "code", - "execution_count": 78, - "id": "bcb3c8c2", - "metadata": {}, - "outputs": [], - "source": [ - "index = pinecone.Index(index_name)" - ] - }, - { - "cell_type": "markdown", - "id": "dbc025d6", - "metadata": {}, - "source": [ - "## Get embeddings and sparse encoders\n", - "\n", - "Embeddings are used for the dense vectors, tokenizer is used for the sparse vector" - ] - }, - { - "cell_type": "code", - "execution_count": 79, - "id": "2f63c911", - "metadata": {}, - "outputs": [], - "source": [ - "from langchain.embeddings import OpenAIEmbeddings\n", - "\n", - "embeddings = OpenAIEmbeddings()" - ] - }, - { - "cell_type": "markdown", - "id": "96bf8879", - "metadata": {}, - "source": [ - "To encode the text to sparse values you can either choose SPLADE or BM25. For out of domain tasks we recommend using BM25.\n", - "\n", - "For more information about the sparse encoders you can checkout pinecone-text library [docs](https://pinecone-io.github.io/pinecone-text/pinecone_text.html)." - ] - }, - { - "cell_type": "code", - "execution_count": 80, - "id": "c3f030e5", - "metadata": {}, - "outputs": [], - "source": [ - "from pinecone_text.sparse import BM25Encoder\n", - "\n", - "# or from pinecone_text.sparse import SpladeEncoder if you wish to work with SPLADE\n", - "\n", - "# use default tf-idf values\n", - "bm25_encoder = BM25Encoder().default()" - ] - }, - { - "cell_type": "markdown", - "id": "23601ddb", - "metadata": {}, - "source": [ - "The above code is using default tfids values. It's highly recommended to fit the tf-idf values to your own corpus. You can do it as follow:\n", - "\n", - "```python\n", - "corpus = [\"foo\", \"bar\", \"world\", \"hello\"]\n", - "\n", - "# fit tf-idf values on your corpus\n", - "bm25_encoder.fit(corpus)\n", - "\n", - "# store the values to a json file\n", - "bm25_encoder.dump(\"bm25_values.json\")\n", - "\n", - "# load to your BM25Encoder object\n", - "bm25_encoder = BM25Encoder().load(\"bm25_values.json\")\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "5462801e", - "metadata": {}, - "source": [ - "## Load Retriever\n", - "\n", - "We can now construct the retriever!" - ] - }, - { - "cell_type": "code", - "execution_count": 81, - "id": "ac77d835", - "metadata": {}, - "outputs": [], - "source": [ - "retriever = PineconeHybridSearchRetriever(\n", - " embeddings=embeddings, sparse_encoder=bm25_encoder, index=index\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "1c518c42", - "metadata": {}, - "source": [ - "## Add texts (if necessary)\n", - "\n", - "We can optionally add texts to the retriever (if they aren't already in there)" - ] - }, - { - "cell_type": "code", - "execution_count": 82, - "id": "98b1c017", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "100%|██████████| 1/1 [00:02<00:00, 2.27s/it]\n" - ] - } - ], - "source": [ - "retriever.add_texts([\"foo\", \"bar\", \"world\", \"hello\"])" - ] - }, - { - "cell_type": "markdown", - "id": "08437fa2", - "metadata": {}, - "source": [ - "## Use Retriever\n", - "\n", - "We can now use the retriever!" - ] - }, - { - "cell_type": "code", - "execution_count": 83, - "id": "c0455218", - "metadata": {}, - "outputs": [], - "source": [ - "result = retriever.get_relevant_documents(\"foo\")" - ] - }, - { - "cell_type": "code", - "execution_count": 84, - "id": "7dfa5c29", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "Document(page_content='foo', metadata={})" - ] - }, - "execution_count": 84, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "result[0]" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.6" - }, - "vscode": { - "interpreter": { - "hash": "7ec0d8babd8cabf695a1d94b1e586d626e046c9df609f6bad065d15d49f67f54" - } - } - }, - "nbformat": 4, - "nbformat_minor": 5 + "cells": [ + { + "cell_type": "markdown", + "id": "ab66dd43", + "metadata": {}, + "source": [ + "# Pinecone Hybrid Search\n", + "\n", + ">[Pinecone](https://docs.pinecone.io/docs/overview) is a vector database with broad functionality.\n", + "\n", + "This notebook goes over how to use a retriever that under the hood uses Pinecone and Hybrid Search.\n", + "\n", + "The logic of this retriever is taken from [this documentation](https://docs.pinecone.io/docs/hybrid-search)\n", + "\n", + "To use Pinecone, you must have an API key and an Environment. \n", + "Here are the [installation instructions](https://docs.pinecone.io/docs/quickstart)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9ab4ab62-9bb2-4ecf-9fbf-1af7f0be558b", + "metadata": {}, + "outputs": [], + "source": [ + "#!pip install pinecone-client pinecone-text\n" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "bf0cf405-451d-4f87-94b1-2b7d65f1e1be", + "metadata": {}, + "outputs": [ + { + "name": "stdin", + "output_type": "stream", + "text": [ + "Pinecone API Key: ········\n" + ] + } + ], + "source": [ + "import os\n", + "import getpass\n", + "\n", + "os.environ[\"PINECONE_API_KEY\"] = getpass.getpass(\"Pinecone API Key:\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "393ac030", + "metadata": {}, + "outputs": [], + "source": [ + "from langchain.retrievers import PineconeHybridSearchRetriever\n", + "from langchain.retrievers.pinecone_hybrid_search import PineconeIndexUpsert\n" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "4577fea1-05e7-47a0-8173-56b0ddaa22bf", + "metadata": {}, + "outputs": [ + { + "name": "stdin", + "output_type": "stream", + "text": [ + "Pinecone Environment: ········\n" + ] + } + ], + "source": [ + "os.environ[\"PINECONE_ENVIRONMENT\"] = getpass.getpass(\"Pinecone Environment:\")\n" + ] + }, + { + "cell_type": "markdown", + "id": "80e2e8e3-0fb5-4bd9-9196-9eada3439a61", + "metadata": {}, + "source": [ + "We want to use `OpenAIEmbeddings` so we have to get the OpenAI API Key." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "314a7ee5-f498-45f6-8fdb-81428730083e", + "metadata": {}, + "outputs": [ + { + "name": "stdin", + "output_type": "stream", + "text": [ + "OpenAI API Key: ········\n" + ] + } + ], + "source": [ + "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")\n" + ] + }, + { + "cell_type": "markdown", + "id": "aaf80e7f", + "metadata": {}, + "source": [ + "## Setup Pinecone" + ] + }, + { + "cell_type": "markdown", + "id": "95d5d7f9", + "metadata": {}, + "source": [ + "You should only have to do this part once.\n", + "\n", + "Note: it's important to make sure that the \"context\" field that holds the document text in the metadata is not indexed. Currently you need to specify explicitly the fields you do want to index. For more information checkout Pinecone's [docs](https://docs.pinecone.io/docs/manage-indexes#selective-metadata-indexing)." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "3b8f7697", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "WhoAmIResponse(username='c78f2bd', user_label='default', projectname='5c4ca0b')" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import os\n", + "import pinecone\n", + "\n", + "api_key = os.getenv(\"PINECONE_API_KEY\") or \"PINECONE_API_KEY\"\n", + "# find environment next to your API key in the Pinecone console\n", + "env = os.getenv(\"PINECONE_ENVIRONMENT\") or \"PINECONE_ENVIRONMENT\"\n", + "\n", + "index_name = \"langchain-pinecone-hybrid-search\"\n", + "\n", + "pinecone.init(api_key=api_key, environment=env)\n", + "pinecone.whoami()\n" + ] + }, + { + "cell_type": "code", + "execution_count": 77, + "id": "cfa3a8d8", + "metadata": {}, + "outputs": [], + "source": [ + "# create the index\n", + "pinecone.create_index(\n", + " name=index_name,\n", + " dimension=1536, # dimensionality of dense model\n", + " metric=\"dotproduct\", # sparse values supported only for dotproduct\n", + " pod_type=\"s1\",\n", + " metadata_config={\"indexed\": []}, # see explanation above\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "e01549af", + "metadata": {}, + "source": [ + "Now that its created, we can use it.\n", + "We can create a pinecone index by using our wrapper for upsert so we either choose between multithreaded and singlethreaded implementation.\n", + "Pass `pool_threads=4` if you want to use multithreaded implementation of the upsert." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "bcb3c8c2", + "metadata": {}, + "outputs": [], + "source": [ + "index = PineconeIndexUpsert.get_index_upsert(index_name, pool_threads=1)\n" + ] + }, + { + "cell_type": "markdown", + "id": "dbc025d6", + "metadata": {}, + "source": [ + "## Get embeddings and sparse encoders\n", + "\n", + "Embeddings are used for the dense vectors, tokenizer is used for the sparse vector" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "2f63c911", + "metadata": {}, + "outputs": [], + "source": [ + "from langchain.embeddings import OpenAIEmbeddings\n", + "\n", + "embeddings = OpenAIEmbeddings()\n" + ] + }, + { + "cell_type": "markdown", + "id": "96bf8879", + "metadata": {}, + "source": [ + "To encode the text to sparse values you can either choose SPLADE or BM25. For out of domain tasks we recommend using BM25.\n", + "\n", + "For more information about the sparse encoders you can checkout pinecone-text library [docs](https://pinecone-io.github.io/pinecone-text/pinecone_text.html)." + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "c3f030e5", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "100% [........................................................................] 65406227 / 65406227" + ] + } + ], + "source": [ + "from pinecone_text.sparse import BM25Encoder\n", + "\n", + "# or from pinecone_text.sparse import SpladeEncoder if you wish to work with SPLADE\n", + "\n", + "# use default tf-idf values\n", + "bm25_encoder = BM25Encoder().default()\n" + ] + }, + { + "cell_type": "markdown", + "id": "23601ddb", + "metadata": {}, + "source": [ + "The above code is using default tfids values. It's highly recommended to fit the tf-idf values to your own corpus. You can do it as follow:\n", + "\n", + "```python\n", + "corpus = [\"foo\", \"bar\", \"world\", \"hello\"]\n", + "\n", + "# fit tf-idf values on your corpus\n", + "bm25_encoder.fit(corpus)\n", + "\n", + "# store the values to a json file\n", + "bm25_encoder.dump(\"bm25_values.json\")\n", + "\n", + "# load to your BM25Encoder object\n", + "bm25_encoder = BM25Encoder().load(\"bm25_values.json\")\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "5462801e", + "metadata": {}, + "source": [ + "## Load Retriever\n", + "\n", + "We can now construct the retriever!" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "ac77d835", + "metadata": {}, + "outputs": [], + "source": [ + "retriever = PineconeHybridSearchRetriever(\n", + " embeddings=embeddings, sparse_encoder=bm25_encoder, index_upsert=index\n", + ")\n" + ] + }, + { + "cell_type": "markdown", + "id": "1c518c42", + "metadata": {}, + "source": [ + "## Add texts (if necessary)\n", + "\n", + "We can optionally add texts to the retriever (if they aren't already in there)" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "98b1c017", + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "55dc444cf9b24f49913335730bf2949f", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + " 0%| | 0/1 [00:00 None: + """Upsert vectors into a Pinecone index. + + Args: + vectors: List of vectors to upsert. + namespace: Namespace of the index. + batch_size: Batch size for upserting. + + Returns: + None + """ + ... + + def __call__( + self, + vectors: List[Dict[str, Any]], + namespace: Optional[str] = None, + batch_size: int = 64, + *args: Any, + **kwargs: Any, + ) -> None: + return self.upsert(vectors=vectors, namespace=namespace, batch_size=batch_size) + + @classmethod + def get_index_upsert( + cls, index_name: str, pool_threads: int = 1 + ) -> PineconeIndexUpsert: + """Get an instance of PineconeIndexUpsert. + + It is a wrapper around pinecone.Index that provides an + interface for upserting vectors either synchronously or using threads. + """ + import pinecone + + ret: Optional[PineconeIndexUpsert] = None + if pool_threads > 1: + index = pinecone.Index(index_name, pool_threads=pool_threads) + ret = ThreadedIndexUpsert(index) + else: + index = pinecone.Index(index_name) + ret = SyncIndexUpsert(index) + return ret + + +class ThreadedIndexUpsert(PineconeIndexUpsert): + """Upsert vectors into a Pinecone index using threads.""" + + def __init__(self, index: Index) -> None: + super().__init__(index) + + def upsert( + self, + vectors: List[Dict[str, Any]], + namespace: Optional[str] = None, + batch_size: int = 64, + *args: Any, + **kwargs: Any, + ) -> None: + # A threaded parallel implementation of upserting vectors into a Pinecone index. + # It works only for REST API, not gRPC. + async_res = [ + self.index.upsert( + batch, namespace=namespace, async_req=True, *args, **kwargs + ) + for batch in batch_iterate(batch_size, vectors) + ] + [res.get() for res in async_res] + + +class SyncIndexUpsert(PineconeIndexUpsert): + """Upsert vectors into a Pinecone index synchronously.""" + + def __init__(self, index: Index) -> None: + super().__init__(index) + + def upsert( + self, + vectors: List[Dict[str, Any]], + namespace: Optional[str] = None, + batch_size: int = 64, + *args: Any, + **kwargs: Any, + ) -> None: + self.index.upsert( + vectors, namespace=namespace, batch_size=batch_size, *args, **kwargs + ) def hash_text(text: str) -> str: @@ -23,12 +134,14 @@ def hash_text(text: str) -> str: def create_index( contexts: List[str], - index: Any, + index_upsert: PineconeIndexUpsert, embeddings: Embeddings, sparse_encoder: Any, ids: Optional[List[str]] = None, metadatas: Optional[List[dict]] = None, namespace: Optional[str] = None, + batch_size: int = 32, + chunk_size: int = 1000, ) -> None: """Create an index from a list of contexts. @@ -36,14 +149,17 @@ def create_index( Args: contexts: List of contexts to embed. - index: Index to use. + index_upsert: PineconeIndexUpsert instance to use for upserting. embeddings: Embeddings model to use. sparse_encoder: Sparse encoder to use. ids: List of ids to use for the documents. metadatas: List of metadata to use for the documents. + namespace: Namespace to use for the documents. + batch_size: Batch size to use for the index upsert. + chunk_size: Chunk size to use for the calculating embeddings. """ - batch_size = 32 - _iterator = range(0, len(contexts), batch_size) + # get index upsert threaded or not + _iterator = range(0, len(contexts), chunk_size) try: from tqdm.auto import tqdm @@ -55,44 +171,35 @@ def create_index( # create unique ids using hash of the text ids = [hash_text(context) for context in contexts] + metadatas = metadatas or [{} for _ in contexts] + for metadata, context in zip(metadatas, contexts): + metadata["context"] = context + for i in _iterator: - # find end of batch - i_end = min(i + batch_size, len(contexts)) # extract batch - context_batch = contexts[i:i_end] - batch_ids = ids[i:i_end] - metadata_batch = ( - metadatas[i:i_end] if metadatas else [{} for _ in context_batch] - ) - # add context passages as metadata - meta = [ - {"context": context, **metadata} - for context, metadata in zip(context_batch, metadata_batch) - ] + chunk_batch = contexts[i : i + chunk_size] + chunk_ids = ids[i : i + chunk_size] + chunk_metadata = metadatas[i : i + chunk_size] # create dense vectors - dense_embeds = embeddings.embed_documents(context_batch) + dense_embeds = embeddings.embed_documents(chunk_batch) # create sparse vectors - sparse_embeds = sparse_encoder.encode_documents(context_batch) + sparse_embeds = sparse_encoder.encode_documents(chunk_batch) for s in sparse_embeds: s["values"] = [float(s1) for s1 in s["values"]] - vectors = [] - # loop through the data and create dictionaries for upserts - for doc_id, sparse, dense, metadata in zip( - batch_ids, sparse_embeds, dense_embeds, meta - ): - vectors.append( - { - "id": doc_id, - "sparse_values": sparse, - "values": dense, - "metadata": metadata, - } + vectors = [ + { + "id": doc_id, + "sparse_values": sparse, + "values": dense, + "metadata": metadata, + } + for doc_id, sparse, dense, metadata in zip( + chunk_ids, sparse_embeds, dense_embeds, chunk_metadata ) - - # upload the documents to the new hybrid index - index.upsert(vectors, namespace=namespace) + ] + index_upsert.upsert(vectors, namespace=namespace, batch_size=batch_size) class PineconeHybridSearchRetriever(BaseRetriever): @@ -103,7 +210,7 @@ class PineconeHybridSearchRetriever(BaseRetriever): """description""" sparse_encoder: Any """Sparse encoder to use.""" - index: Any + index_upsert: PineconeIndexUpsert """Pinecone index to use.""" top_k: int = 4 """Number of documents to return.""" @@ -124,15 +231,19 @@ def add_texts( ids: Optional[List[str]] = None, metadatas: Optional[List[dict]] = None, namespace: Optional[str] = None, + batch_size: int = 32, + chunk_size: int = 1000, ) -> None: create_index( texts, - self.index, + self.index_upsert, self.embeddings, self.sparse_encoder, ids=ids, metadatas=metadatas, namespace=namespace, + batch_size=batch_size, + chunk_size=chunk_size, ) @root_validator() @@ -162,7 +273,7 @@ def _get_relevant_documents( dense_vec, sparse_vec = hybrid_convex_scale(dense_vec, sparse_vec, self.alpha) sparse_vec["values"] = [float(s1) for s1 in sparse_vec["values"]] # query pinecone with the query parameters - result = self.index.query( + result = self.index_upsert.index.query( vector=dense_vec, sparse_vector=sparse_vec, top_k=self.top_k, diff --git a/libs/langchain/tests/integration_tests/retrievers/test_pinecone_hybrid_search.py b/libs/langchain/tests/integration_tests/retrievers/test_pinecone_hybrid_search.py new file mode 100644 index 0000000000000..4ea47887e5857 --- /dev/null +++ b/libs/langchain/tests/integration_tests/retrievers/test_pinecone_hybrid_search.py @@ -0,0 +1,186 @@ +from __future__ import annotations + +import importlib +import os +import uuid +from typing import TYPE_CHECKING, Generator, List + +import pytest + +from langchain.docstore.document import Document +from langchain.document_loaders import TextLoader +from langchain.embeddings import OpenAIEmbeddings +from langchain.retrievers.pinecone_hybrid_search import ( + PineconeHybridSearchRetriever, + PineconeIndexUpsert, +) + +if TYPE_CHECKING: + from pinecone_text.sparse.bm25_encoder import BM25Encoder + +index_name = "langchain-pinecone-hybrid-search" # name of the index +dimension = 1536 # dimension of the embeddings + + +def reset_pinecone() -> None: + assert os.environ.get("PINECONE_API_KEY") is not None + assert os.environ.get("PINECONE_ENVIRONMENT") is not None + + import pinecone + + importlib.reload(pinecone) + + pinecone.init( + api_key=os.environ.get("PINECONE_API_KEY"), + environment=os.environ.get("PINECONE_ENVIRONMENT"), + ) + + +@pytest.fixture(scope="function") +def texts() -> Generator[List[str], None, None]: + # Load the documents from a file located in the fixtures directory + documents = TextLoader( + os.path.join( + os.path.dirname(__file__), "../vectorstores/fixtures", "sharks.txt" + ) + ).load() + + yield [doc.page_content for doc in documents] + + +@pytest.fixture(scope="module") +def embedding_openai() -> OpenAIEmbeddings: + return OpenAIEmbeddings() + + +@pytest.fixture(scope="function") +def bm25_encoder() -> BM25Encoder: + from pinecone_text.sparse.bm25_encoder import BM25Encoder + + return BM25Encoder().default() + + +class TestPinecone: + @classmethod + def setup_class(cls) -> None: + import pinecone + + if index_name in pinecone.list_indexes(): + pinecone.delete_index(index_name) + pinecone.create_index( + name=index_name, + dimension=dimension, + metric="dotproduct", + pod_type="s1", + metadata_config={"indexed": []}, + ) + + @classmethod + def teardown_class(cls) -> None: + reset_pinecone() + + @pytest.fixture(autouse=True) + def setup(self) -> None: + reset_pinecone() + + @pytest.mark.vcr() + def test_add_text_threaded( + self, + texts: List[str], + embedding_openai: OpenAIEmbeddings, + bm25_encoder: BM25Encoder, + ) -> None: + """Test end to end construction and search.""" + index = PineconeIndexUpsert.get_index_upsert(index_name, pool_threads=2) + needs = "foobuu booo" + texts.insert(0, needs) + retrieval = PineconeHybridSearchRetriever( + index_upsert=index, + top_k=1, + embeddings=embedding_openai, + sparse_encoder=bm25_encoder, + ) + retrieval.add_texts(texts, batch_size=32, chunk_size=1000) + res = retrieval.get_relevant_documents(needs) + assert res == [Document(page_content=needs)] + + @pytest.mark.vcr() + def test_add_text_synced( + self, + texts: List[str], + embedding_openai: OpenAIEmbeddings, + bm25_encoder: BM25Encoder, + ) -> None: + """Test end to end construction and search.""" + index = PineconeIndexUpsert.get_index_upsert(index_name) + needs = "foobuu booo" + texts.insert(0, needs) + retrieval = PineconeHybridSearchRetriever( + index_upsert=index, + top_k=1, + embeddings=embedding_openai, + sparse_encoder=bm25_encoder, + ) + retrieval.add_texts(texts) + res = retrieval.get_relevant_documents(needs) + assert res == [Document(page_content=needs)] + + @pytest.mark.skipif(reason="slow to run for benchmark") + @pytest.mark.parametrize( + "pool_threads,batch_size,embeddings_chunk_size,data_multiplier", + [ + ( + 1, + 32, + 32, + 1000, + ), # simulate single threaded with embeddings_chunk_size = batch_size = 32 + ( + 1, + 32, + 1000, + 1000, + ), # simulate single threaded with embeddings_chunk_size = 1000 + ( + 4, + 32, + 1000, + 1000, + ), # simulate 4 threaded with embeddings_chunk_size = 1000 + (20, 64, 5000, 1000), + ], # simulate 20 threaded with embeddings_chunk_size = 5000 + ) + def test_from_texts_with_metadatas_benchmark( + self, + pool_threads: int, + batch_size: int, + embeddings_chunk_size: int, + data_multiplier: int, + texts: List[str], + embedding_openai: OpenAIEmbeddings, + bm25_encoder: BM25Encoder, + ) -> None: + """Test end to end construction and search.""" + + index = PineconeIndexUpsert.get_index_upsert( + index_name, pool_threads=pool_threads + ) + + texts *= data_multiplier + uuids = [uuid.uuid4().hex for _ in range(len(texts))] + metadatas = [{"page": i} for i in range(len(texts))] + + retrieval = PineconeHybridSearchRetriever( + index_upsert=index, + top_k=1, + embeddings=embedding_openai, + sparse_encoder=bm25_encoder, + ) + + retrieval.add_texts( + texts, + uuids, + metadatas, + batch_size=batch_size, + chunk_size=embeddings_chunk_size, + )