Classes
AzureAISearchVectorStore
Defined in: providers/storage/azure/src/vectorStore/AzureAISearchVectorStore.ts:284
Azure AI Search vector store.
Example
import { DefaultAzureCredential, getBearerTokenProvider} from "@azure/identity";
import {KnownAnalyzerNames, KnownVectorSearchAlgorithmKind } from "@azure/search-documents";
// 1- Setup Azure OpenAI
const azureADTokenProvider = getBearerTokenProvider(
new DefaultAzureCredential(),
"https://cognitiveservices.azure.com/.default",
);
// IMPORTANT: You need to deploy your own embedding model as well as your own chat completion model
// NOTE: You can use whatever embedding model and language model that is supported in LlamaIndex
const azure = {
azureADTokenProvider,
deployment: process.env.AZURE_DEPLOYMENT_NAME,
};
Settings.llm = new OpenAI({ azure });
Settings.embedModel = new OpenAIEmbedding({
model: process.env.EMBEDDING_MODEL,
azure: {
...azure,
deployment: process.env.EMBEDDING_MODEL,
},
});
// ---------------------------------------------------------
// 2- Setup Azure AI Search
// Define env variables in .env file
// AZURE_AI_SEARCH_ENDPOINT=
// AZURE_AI_SEARCH_KEY=
// AZURE_OPENAI_ENDPOINT=
// EMBEDDING_MODEL=text-embedding-ada-002
// AZURE_DEPLOYMENT_NAME=gpt-4
// AZURE_API_VERSION=2024-09-01-preview
// Define index name
const indexName = "llamaindex-vector-store";
// ---------------------------------------------------------
// 3a- Create Index (if it does not exist)
// id: Edm.String
// chunk: Edm.String
// embedding: Collection(Edm.Single)
// metadata: Edm.String
// doc_id: Edm.String
// author: Edm.String
// theme: Edm.String
// director: Edm.String
// Define metadata fields with their respective configurations
const metadataFields = {
author: "author",
theme: ["theme", MetadataIndexFieldType.STRING],
director: "director",
};
// Define index parameters and vector store configuration
// Index validation:
// - IndexManagement.VALIDATE_INDEX: will validate before creating emnbedding index and will throw a runtime error if the index does not exist
// - IndexManagement.NO_VALIDATION: will try to access the index and will throw a runtime error if the index does not exist
// - IndexManagement.CREATE_IF_NOT_EXISTS: will create the index if it does not exist
const vectorStore = new AzureAISearchVectorStore({
filterableMetadataFieldKeys:
metadataFields as unknown as FilterableMetadataFieldKeysType,
indexName,
indexManagement: IndexManagement.CREATE_IF_NOT_EXISTS,
idFieldKey: "id",
chunkFieldKey: "chunk",
embeddingFieldKey: "embedding",
metadataStringFieldKey: "metadata",
docIdFieldKey: "doc_id",
embeddingDimensionality: 1536,
hiddenFieldKeys: ["embedding"],
languageAnalyzer: KnownAnalyzerNames.EnLucene,
// store vectors on disk
vectorAlgorithmType: KnownVectorSearchAlgorithmKind.ExhaustiveKnn,
// Optional: Set to "scalar" or "binary" if using HNSW
compressionType: KnownVectorSearchCompressionKind.BinaryQuantization,
});
// ---------------------------------------------------------
// 3a- Loading documents
// Load the documents stored in the data/paul_graham/ using the SimpleDirectoryReader
// NOTE: You can use whatever reader that is supported in LlamaIndex
// Load documents using a directory reader
const documents = await new SimpleDirectoryReader().loadData(
"data/paul_graham/",
);
const storageContext = await storageContextFromDefaults({ vectorStore });
// Create index from documents with the specified storage context
const index = await VectorStoreIndex.fromDocuments(documents, {
storageContext,
docStoreStrategy: DocStoreStrategy.UPSERTS,
});
const queryEngine = index.asQueryEngine();
const response = await queryEngine.query({
query: "What did the author do growing up?",
similarityTopK: 3,
} as any);
console.log({ response });
## Extends
- `BaseVectorStore`
## Type Parameters
• **T** *extends* [`R`](/docs/api/type-aliases/R)
## Constructors
### new AzureAISearchVectorStore()
> **new AzureAISearchVectorStore**\<`T`\>(`options`): [`AzureAISearchVectorStore`](/docs/api/classes/AzureAISearchVectorStore)\<`T`\>
Defined in: [providers/storage/azure/src/vectorStore/AzureAISearchVectorStore.ts:311](https://github.com/run-llama/LlamaIndexTS/blob/c14a21bc0b5d1df0dd801918fdc09a27f1ce4b31/packages/providers/storage/azure/src/vectorStore/AzureAISearchVectorStore.ts#L311)
#### Parameters
##### options
[`AzureAISearchOptions`](/docs/api/interfaces/AzureAISearchOptions)\<`T`\> & `VectorStoreBaseParams`
#### Returns
[`AzureAISearchVectorStore`](/docs/api/classes/AzureAISearchVectorStore)\<`T`\>
#### Overrides
`BaseVectorStore.constructor`
## Properties
### storesText
> **storesText**: `boolean` = `true`
Defined in: [providers/storage/azure/src/vectorStore/AzureAISearchVectorStore.ts:285](https://github.com/run-llama/LlamaIndexTS/blob/c14a21bc0b5d1df0dd801918fdc09a27f1ce4b31/packages/providers/storage/azure/src/vectorStore/AzureAISearchVectorStore.ts#L285)
#### Overrides
`BaseVectorStore.storesText`
***
### \_searchClient
> **\_searchClient**: `undefined` \| `SearchClient`\<`T`\>
Defined in: [providers/storage/azure/src/vectorStore/AzureAISearchVectorStore.ts:286](https://github.com/run-llama/LlamaIndexTS/blob/c14a21bc0b5d1df0dd801918fdc09a27f1ce4b31/packages/providers/storage/azure/src/vectorStore/AzureAISearchVectorStore.ts#L286)
***
### \_indexClient
> **\_indexClient**: `undefined` \| `SearchIndexClient`
Defined in: [providers/storage/azure/src/vectorStore/AzureAISearchVectorStore.ts:292](https://github.com/run-llama/LlamaIndexTS/blob/c14a21bc0b5d1df0dd801918fdc09a27f1ce4b31/packages/providers/storage/azure/src/vectorStore/AzureAISearchVectorStore.ts#L292)
***
### flatMetadata
> **flatMetadata**: `boolean` = `true`
Defined in: [providers/storage/azure/src/vectorStore/AzureAISearchVectorStore.ts:298](https://github.com/run-llama/LlamaIndexTS/blob/c14a21bc0b5d1df0dd801918fdc09a27f1ce4b31/packages/providers/storage/azure/src/vectorStore/AzureAISearchVectorStore.ts#L298)
## Methods
### createSearchIndexClient()
> **createSearchIndexClient**(`options`): `void`
Defined in: [providers/storage/azure/src/vectorStore/AzureAISearchVectorStore.ts:860](https://github.com/run-llama/LlamaIndexTS/blob/c14a21bc0b5d1df0dd801918fdc09a27f1ce4b31/packages/providers/storage/azure/src/vectorStore/AzureAISearchVectorStore.ts#L860)
#### Parameters
##### options
[`AzureAISearchOptions`](/docs/api/interfaces/AzureAISearchOptions)\<`T`\>
#### Returns
`void`
***
### createSearchClient()
> **createSearchClient**(`options`): `void`
Defined in: [providers/storage/azure/src/vectorStore/AzureAISearchVectorStore.ts:877](https://github.com/run-llama/LlamaIndexTS/blob/c14a21bc0b5d1df0dd801918fdc09a27f1ce4b31/packages/providers/storage/azure/src/vectorStore/AzureAISearchVectorStore.ts#L877)
#### Parameters
##### options
[`AzureAISearchOptions`](/docs/api/interfaces/AzureAISearchOptions)\<`T`\>
#### Returns
`void`
***
### client()
> **client**(): `undefined` \| `SearchClient`\<`T`\>
Defined in: [providers/storage/azure/src/vectorStore/AzureAISearchVectorStore.ts:1061](https://github.com/run-llama/LlamaIndexTS/blob/c14a21bc0b5d1df0dd801918fdc09a27f1ce4b31/packages/providers/storage/azure/src/vectorStore/AzureAISearchVectorStore.ts#L1061)
Get search client
#### Returns
`undefined` \| `SearchClient`\<`T`\>
Azure AI Search client. See SearchClient
#### Overrides
`BaseVectorStore.client`
***
### indexClient()
> **indexClient**(): `undefined` \| `SearchIndexClient`
Defined in: [providers/storage/azure/src/vectorStore/AzureAISearchVectorStore.ts:1069](https://github.com/run-llama/LlamaIndexTS/blob/c14a21bc0b5d1df0dd801918fdc09a27f1ce4b31/packages/providers/storage/azure/src/vectorStore/AzureAISearchVectorStore.ts#L1069)
Get index client
#### Returns
`undefined` \| `SearchIndexClient`
Azure AI Search index client. See SearchIndexClient
***
### add()
> **add**(`nodes`): `Promise`\<`string`[]\>
Defined in: [providers/storage/azure/src/vectorStore/AzureAISearchVectorStore.ts:1078](https://github.com/run-llama/LlamaIndexTS/blob/c14a21bc0b5d1df0dd801918fdc09a27f1ce4b31/packages/providers/storage/azure/src/vectorStore/AzureAISearchVectorStore.ts#L1078)
Add nodes to index associated with the configured search client.
#### Parameters
##### nodes
`BaseNode`\<`Metadata`\>[]
List of nodes with embeddings to add to the index
#### Returns
`Promise`\<`string`[]\>
List of node IDs that were added to the index
#### Overrides
`BaseVectorStore.add`
***
### delete()
> **delete**(`refDocId`): `Promise`\<`void`\>
Defined in: [providers/storage/azure/src/vectorStore/AzureAISearchVectorStore.ts:1143](https://github.com/run-llama/LlamaIndexTS/blob/c14a21bc0b5d1df0dd801918fdc09a27f1ce4b31/packages/providers/storage/azure/src/vectorStore/AzureAISearchVectorStore.ts#L1143)
Delete documents from the AI Search Index with docIdFieldKey (doc_id) field equal to refDocId.
#### Parameters
##### refDocId
`string`
The reference document ID to delete from the index
#### Returns
`Promise`\<`void`\>
#### Overrides
`BaseVectorStore.delete`
***
### getNodes()
> **getNodes**(`nodeIds`?, `filters`?, `limit`?): `Promise`\<`BaseNode`\<`Metadata`\>[]\>
Defined in: [providers/storage/azure/src/vectorStore/AzureAISearchVectorStore.ts:1190](https://github.com/run-llama/LlamaIndexTS/blob/c14a21bc0b5d1df0dd801918fdc09a27f1ce4b31/packages/providers/storage/azure/src/vectorStore/AzureAISearchVectorStore.ts#L1190)
Get nodes asynchronously from the Azure AI Search index.
#### Parameters
##### nodeIds?
`string`[]
List of node IDs to retrieve from the index
##### filters?
`MetadataFilters`
Metadata filters to apply to the search
##### limit?
`number`
Maximum number of nodes to retrieve
#### Returns
`Promise`\<`BaseNode`\<`Metadata`\>[]\>
List of nodes retrieved from the index
***
### query()
> **query**(`query`): `Promise`\<`VectorStoreQueryResult`\>
Defined in: [providers/storage/azure/src/vectorStore/AzureAISearchVectorStore.ts:1243](https://github.com/run-llama/LlamaIndexTS/blob/c14a21bc0b5d1df0dd801918fdc09a27f1ce4b31/packages/providers/storage/azure/src/vectorStore/AzureAISearchVectorStore.ts#L1243)
#### Parameters
##### query
`VectorStoreQuery` & `object`
#### Returns
`Promise`\<`VectorStoreQueryResult`\>
#### Overrides
`BaseVectorStore.query`