diff --git a/config/sidebar-guides.json b/config/sidebar-guides.json index b9458d33f..176f65e32 100644 --- a/config/sidebar-guides.json +++ b/config/sidebar-guides.json @@ -55,15 +55,20 @@ "title": "Artificial Intelligence", "slug": "ai", "routes": [ + { + "source": "guides/embedders/openai.mdx", + "label": "Semantic search with OpenAI embeddings", + "slug": "openai" + }, { "source": "guides/langchain.mdx", "label": "Implementing semantic search with LangChain", "slug": "langchain" }, { - "source": "guides/computing_hugging_face_embeddings_gpu.mdx", - "label": "Computing Hugging Face embeddings with the GPU", - "slug": "computing_hugging_face_embeddings_gpu" + "source": "guides/embedders/huggingface.mdx", + "label": "Implementing semantic search with Hugging Face Inference Endpoints", + "slug": "huggingface" }, { "source": "guides/embedders/cloudflare.mdx", @@ -80,15 +85,15 @@ "label": "Semantic search with Mistral embeddings", "slug": "mistral" }, - { - "source": "guides/embedders/openai.mdx", - "label": "Semantic search with OpenAI embeddings", - "slug": "openai" - }, { "source": "guides/embedders/voyage.mdx", "label": "Semantic search with Voyage embeddings", "slug": "voyage" + }, + { + "source": "guides/computing_hugging_face_embeddings_gpu.mdx", + "label": "Computing Hugging Face embeddings with the GPU", + "slug": "computing_hugging_face_embeddings_gpu" } ] }, diff --git a/guides/embedders/cloudflare.mdx b/guides/embedders/cloudflare.mdx index 27c882e59..02eb91ab7 100644 --- a/guides/embedders/cloudflare.mdx +++ b/guides/embedders/cloudflare.mdx @@ -88,7 +88,7 @@ In this request: - `q`: Represents the user's search query. - `hybrid`: Specifies the configuration for the hybrid search. - `semanticRatio`: Allows you to control the balance between semantic search and traditional search. A value of 1 indicates pure semantic search, while a value of 0 represents full-text search. You can adjust this parameter to achieve a hybrid search experience. - - `embedder`: The name of the embedder used for generating embeddings. Make sure to use the same name as specified in the embedder configuration, which in this case is "cf-bge-small-en-v1.5". + - `embedder`: The name of the embedder used for generating embeddings. Make sure to use the same name as specified in the embedder configuration, which in this case is "cloudflare". You can use the Meilisearch API or client libraries to perform searches and retrieve the relevant documents based on semantic similarity. diff --git a/guides/embedders/huggingface.mdx b/guides/embedders/huggingface.mdx new file mode 100644 index 000000000..1bd2f6f31 --- /dev/null +++ b/guides/embedders/huggingface.mdx @@ -0,0 +1,86 @@ +--- +title: Semantic Search with Hugging Face Inference Endpoints - Meilisearch documentation +description: This guide will walk you through the process of setting up Meilisearch with Hugging Face Inference Endpoints. +--- + +# Semantic search with Hugging Face Inference Endpoints + +## Introduction + +This guide will walk you through the process of setting up a Meilisearch REST embedder with [Hugging Face Inference Endpoints](https://ui.endpoints.huggingface.co/) to enable semantic search capabilities. + + +You can use Hugging Face and Meilisearch in two ways: running the model locally by setting the embedder source to `huggingface`, or remotely in Hugging Face's servers by setting the embeder source to `rest`. + + +## Requirements + +To follow this guide, you'll need: + +- A [Meilisearch Cloud](https://www.meilisearch.com/cloud) project running version 1.10 or above with the Vector store activated +- A [Hugging Face account](https://huggingface.co/) with a deployed inference endpoint +- The endpoint URL and API key of the deployed model on your Hugging Face account + +## Configure the embedder + +Set up an embedder using the update settings endpoint: + +```json +{ + "hf-inference": { + "source": "rest", + "url": "ENDPOINT_URL", + "apiKey": "API_KEY", + "dimensions": 384, + "documentTemplate": "CUSTOM_LIQUID_TEMPLATE", + "request": { + "inputs": ["{{text}}", "{{..}}"], + "model": "baai/bge-small-en-v1.5" + }, + "response": ["{{embedding}}", "{{..}}"] + } +} +``` + +In this configuration: + +- `source`: declares Meilisearch should connect to this embedder via its REST API +- `url`: replace `ENDPOINT_URL` with the address of your Hugging Face model endpoint +- `apiKey`: replace `API_KEY` with your Hugging Face API key +- `dimensions`: specifies the dimensions of the embeddings, which are 384 for `baai/bge-small-en-v1.5` +- `documentTemplate`: an optional but recommended [template](/learn/ai_powered_search/getting_started_with_ai_search) for the data you will send the embedder +- `request`: defines the structure and parameters of the request Meilisearch will send to the embedder +- `response`: defines the structure of the embedder's response + +Once you've configured the embedder, Meilisearch will automatically generate embeddings for your documents. Monitor the task using the Cloud UI or the [get task endpoint](/reference/api/tasks). + + +This example uses [BAAI/bge-small-en-v1.5](https://huggingface.co/BAAI/bge-small-en-v1.5) as its model, but Hugging Face offers [other options that may fit your dataset better](https://ui.endpoints.huggingface.co/catalog?task=sentence-embeddings). + + +## Perform a semantic search + +With the embedder set up, you can now perform semantic searches. Make a search request with the `hybrid` search parameter, setting `semanticRatio` to `1`: + +```json +{ + "q": "QUERY_TERMS", + "hybrid": { + "semanticRatio": 1, + "embedder": "hf-inference" + } +} +``` + +In this request: + +- `q`: the search query +- `hybrid`: enables AI-powered search functionality + - `semanticRatio`: controls the balance between semantic search and full-text search. Setting it to `1` means you will only receive semantic search results + - `embedder`: the name of the embedder used for generating embeddings + +## Conclusion + +You have set up with an embedder using Hugging Face Inference Endpoints. This allows you to use pure semantic search capabilities in your application. + +Consult the [embedder setting documentation](/reference/api/settings#embedders-experimental) for more information on other embedder configuration options.