diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index 18fbaaa167..cb27f5dfcd 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -41,7 +41,7 @@ jobs: runs-on: ubuntu-22.04 timeout-minutes: 10 env: - CACHE_VERSION: 1.02 # bump this to run all clients on the CI. + CACHE_VERSION: 1.10 # bump this to run all clients on the CI. steps: - name: debugging - dump GitHub context env: diff --git a/clients/algoliasearch-client-csharp/algoliasearch/Utils/SearchClientExtensions.cs b/clients/algoliasearch-client-csharp/algoliasearch/Utils/SearchClientExtensions.cs index ea0ebd2e67..d621aa0972 100644 --- a/clients/algoliasearch-client-csharp/algoliasearch/Utils/SearchClientExtensions.cs +++ b/clients/algoliasearch-client-csharp/algoliasearch/Utils/SearchClientExtensions.cs @@ -166,23 +166,25 @@ public partial interface ISearchClient /// /// The index in which to perform the request. /// The list of `objects` to store in the given Algolia `indexName`. + /// Whether or not we should wait until every `batch` tasks has been processed, this operation may slow the total execution time of this method but is more reliable.. /// Add extra http header or query parameters to Algolia. /// Cancellation Token to cancel the request. /// - Task> SaveObjectsAsync(string indexName, IEnumerable objects, RequestOptions options = null, CancellationToken cancellationToken = default) where T : class; + Task> SaveObjectsAsync(string indexName, IEnumerable objects, bool waitForTasks = false, RequestOptions options = null, CancellationToken cancellationToken = default) where T : class; /// - List SaveObjects(string indexName, IEnumerable objects, RequestOptions options = null, CancellationToken cancellationToken = default) where T : class; + List SaveObjects(string indexName, IEnumerable objects, bool waitForTasks = false, RequestOptions options = null, CancellationToken cancellationToken = default) where T : class; /// /// Helper: Deletes every records for the given objectIDs. The `chunkedBatch` helper is used under the hood, which creates a `batch` requests with at most 1000 objectIDs in it. /// /// The index in which to perform the request. /// The list of `objectIDs` to remove from the given Algolia `indexName`. + /// Whether or not we should wait until every `batch` tasks has been processed, this operation may slow the total execution time of this method but is more reliable.. /// Add extra http header or query parameters to Algolia. /// Cancellation Token to cancel the request. - Task> DeleteObjectsAsync(string indexName, IEnumerable objectIDs, RequestOptions options = null, CancellationToken cancellationToken = default); + Task> DeleteObjectsAsync(string indexName, IEnumerable objectIDs, bool waitForTasks = false, RequestOptions options = null, CancellationToken cancellationToken = default); /// - List DeleteObjects(string indexName, IEnumerable objectIDs, RequestOptions options = null, CancellationToken cancellationToken = default); + List DeleteObjects(string indexName, IEnumerable objectIDs, bool waitForTasks = false, RequestOptions options = null, CancellationToken cancellationToken = default); /// /// Helper: Replaces object content of all the given objects according to their respective `objectID` field. The `chunkedBatch` helper is used under the hood, which creates a `batch` requests with at most 1000 objects in it. @@ -190,11 +192,12 @@ public partial interface ISearchClient /// The index in which to perform the request. /// The list of `objects` to update in the given Algolia `indexName`. /// To be provided if non-existing objects are passed, otherwise, the call will fail. + /// Whether or not we should wait until every `batch` tasks has been processed, this operation may slow the total execution time of this method but is more reliable.. /// Add extra http header or query parameters to Algolia. /// Cancellation Token to cancel the request. - Task> PartialUpdateObjectsAsync(string indexName, IEnumerable objects, bool createIfNotExists, RequestOptions options = null, CancellationToken cancellationToken = default) where T : class; + Task> PartialUpdateObjectsAsync(string indexName, IEnumerable objects, bool createIfNotExists, bool waitForTasks = false, RequestOptions options = null, CancellationToken cancellationToken = default) where T : class; /// - List PartialUpdateObjects(string indexName, IEnumerable objects, bool createIfNotExists, RequestOptions options = null, CancellationToken cancellationToken = default) where T : class; + List PartialUpdateObjects(string indexName, IEnumerable objects, bool createIfNotExists, bool waitForTasks = false, RequestOptions options = null, CancellationToken cancellationToken = default) where T : class; /// /// Helper: Check if an index exists. @@ -564,42 +567,44 @@ public List ChunkedBatch(string indexName, IEnumerable obje /// public async Task> SaveObjectsAsync(string indexName, IEnumerable objects, + bool waitForTasks = false, RequestOptions options = null, CancellationToken cancellationToken = default) where T : class { - return await ChunkedBatchAsync(indexName, objects, Action.AddObject, false, 1000, options, cancellationToken).ConfigureAwait(false); + return await ChunkedBatchAsync(indexName, objects, Action.AddObject, waitForTasks, 1000, options, cancellationToken).ConfigureAwait(false); } /// - public List SaveObjects(string indexName, IEnumerable objects, RequestOptions options = null, + public List SaveObjects(string indexName, IEnumerable objects, bool waitForTasks = false, RequestOptions options = null, CancellationToken cancellationToken = default) where T : class => - AsyncHelper.RunSync(() => SaveObjectsAsync(indexName, objects, options, cancellationToken)); + AsyncHelper.RunSync(() => SaveObjectsAsync(indexName, objects, waitForTasks, options, cancellationToken)); /// public async Task> DeleteObjectsAsync(string indexName, IEnumerable objectIDs, + bool waitForTasks = false, RequestOptions options = null, CancellationToken cancellationToken = default) { - return await ChunkedBatchAsync(indexName, objectIDs.Select(id => new { objectID = id }), Action.DeleteObject, false, 1000, options, cancellationToken).ConfigureAwait(false); + return await ChunkedBatchAsync(indexName, objectIDs.Select(id => new { objectID = id }), Action.DeleteObject, waitForTasks, 1000, options, cancellationToken).ConfigureAwait(false); } /// - public List DeleteObjects(string indexName, IEnumerable objectIDs, RequestOptions options = null, + public List DeleteObjects(string indexName, IEnumerable objectIDs, bool waitForTasks = false, RequestOptions options = null, CancellationToken cancellationToken = default) => - AsyncHelper.RunSync(() => DeleteObjectsAsync(indexName, objectIDs, options, cancellationToken)); + AsyncHelper.RunSync(() => DeleteObjectsAsync(indexName, objectIDs, waitForTasks, options, cancellationToken)); /// - public async Task> PartialUpdateObjectsAsync(string indexName, IEnumerable objects, bool createIfNotExists, + public async Task> PartialUpdateObjectsAsync(string indexName, IEnumerable objects, bool createIfNotExists, bool waitForTasks = false, RequestOptions options = null, CancellationToken cancellationToken = default) where T : class { - return await ChunkedBatchAsync(indexName, objects, createIfNotExists ? Action.PartialUpdateObject : Action.PartialUpdateObjectNoCreate, false, 1000, options, cancellationToken).ConfigureAwait(false); + return await ChunkedBatchAsync(indexName, objects, createIfNotExists ? Action.PartialUpdateObject : Action.PartialUpdateObjectNoCreate, waitForTasks, 1000, options, cancellationToken).ConfigureAwait(false); } /// - public List PartialUpdateObjects(string indexName, IEnumerable objects, bool createIfNotExists, + public List PartialUpdateObjects(string indexName, IEnumerable objects, bool createIfNotExists, bool waitForTasks = false, RequestOptions options = null, CancellationToken cancellationToken = default) where T : class => - AsyncHelper.RunSync(() => PartialUpdateObjectsAsync(indexName, objects, createIfNotExists, options, cancellationToken)); + AsyncHelper.RunSync(() => PartialUpdateObjectsAsync(indexName, objects, createIfNotExists, waitForTasks, options, cancellationToken)); private static async Task> CreateIterable(Func> executeQuery, Func stopCondition) diff --git a/clients/algoliasearch-client-kotlin/client/src/commonMain/kotlin/com/algolia/client/extensions/SearchClient.kt b/clients/algoliasearch-client-kotlin/client/src/commonMain/kotlin/com/algolia/client/extensions/SearchClient.kt index 740808d492..da143eb1dd 100644 --- a/clients/algoliasearch-client-kotlin/client/src/commonMain/kotlin/com/algolia/client/extensions/SearchClient.kt +++ b/clients/algoliasearch-client-kotlin/client/src/commonMain/kotlin/com/algolia/client/extensions/SearchClient.kt @@ -369,6 +369,7 @@ public suspend fun SearchClient.chunkedBatch( * * @param indexName The index in which to perform the request. * @param objects The list of objects to index. + * @param waitForTask If true, wait for the task to complete. * @param requestOptions The requestOptions to send along with the query, they will be merged with the transporter requestOptions. * @return The list of responses from the batch requests. * @@ -376,13 +377,14 @@ public suspend fun SearchClient.chunkedBatch( public suspend fun SearchClient.saveObjects( indexName: String, objects: List, + waitForTask: Boolean = false, requestOptions: RequestOptions? = null, ): List { return this.chunkedBatch( indexName = indexName, objects = objects, action = Action.AddObject, - waitForTask = false, + waitForTask = waitForTask, batchSize = 1000, requestOptions = requestOptions, ) @@ -393,6 +395,7 @@ public suspend fun SearchClient.saveObjects( * * @param indexName The index in which to perform the request. * @param objectIDs The list of objectIDs to delete from the index. + * @param waitForTask If true, wait for the task to complete. * @param requestOptions The requestOptions to send along with the query, they will be merged with the transporter requestOptions. * @return The list of responses from the batch requests. * @@ -400,13 +403,14 @@ public suspend fun SearchClient.saveObjects( public suspend fun SearchClient.deleteObjects( indexName: String, objectIDs: List, + waitForTask: Boolean = false, requestOptions: RequestOptions? = null, ): List { return this.chunkedBatch( indexName = indexName, objects = objectIDs.map { id -> JsonObject(mapOf("objectID" to Json.encodeToJsonElement(id))) }, action = Action.DeleteObject, - waitForTask = false, + waitForTask = waitForTask, batchSize = 1000, requestOptions = requestOptions, ) @@ -418,6 +422,7 @@ public suspend fun SearchClient.deleteObjects( * @param indexName The index in which to perform the request. * @param objects The list of objects to update in the index. * @param createIfNotExists To be provided if non-existing objects are passed, otherwise, the call will fail.. + * @param waitForTask If true, wait for the task to complete. * @param requestOptions The requestOptions to send along with the query, they will be merged with the transporter requestOptions. * @return The list of responses from the batch requests. * @@ -426,13 +431,14 @@ public suspend fun SearchClient.partialUpdateObjects( indexName: String, objects: List, createIfNotExists: Boolean, + waitForTask: Boolean = false, requestOptions: RequestOptions? = null, ): List { return this.chunkedBatch( indexName = indexName, objects = objects, action = if (createIfNotExists) Action.PartialUpdateObject else Action.PartialUpdateObjectNoCreate, - waitForTask = false, + waitForTask = waitForTask, batchSize = 1000, requestOptions = requestOptions, ) diff --git a/clients/algoliasearch-client-scala/src/main/scala/algoliasearch/extension/package.scala b/clients/algoliasearch-client-scala/src/main/scala/algoliasearch/extension/package.scala index 80781732ee..b916606295 100644 --- a/clients/algoliasearch-client-scala/src/main/scala/algoliasearch/extension/package.scala +++ b/clients/algoliasearch-client-scala/src/main/scala/algoliasearch/extension/package.scala @@ -251,6 +251,8 @@ package object extension { * The index in which to perform the request. * @param objects * The list of objects to save. + * @param waitForTasks + * Whether to wait for the tasks to complete. * @param requestOptions * Additional request configuration. * @return @@ -259,9 +261,10 @@ package object extension { def saveObjects( indexName: String, objects: Seq[Any], + waitForTasks: Boolean = false, requestOptions: Option[RequestOptions] = None )(implicit ec: ExecutionContext): Future[Seq[BatchResponse]] = { - chunkedBatch(indexName, objects, Action.AddObject, false, 1000, requestOptions) + chunkedBatch(indexName, objects, Action.AddObject, waitForTasks, 1000, requestOptions) } /** Helper: Deletes every objects for the given objectIDs. The `chunkedBatch` helper is used under the hood, which @@ -271,6 +274,8 @@ package object extension { * The index in which to perform the request. * @param objectIDs * The list of objectIDs to delete. + * @param waitForTasks + * Whether to wait for the tasks to complete. * @param requestOptions * Additional request configuration. * @return @@ -279,13 +284,14 @@ package object extension { def deleteObjects( indexName: String, objectIDs: Seq[String], + waitForTasks: Boolean = false, requestOptions: Option[RequestOptions] = None )(implicit ec: ExecutionContext): Future[Seq[BatchResponse]] = { chunkedBatch( indexName, objectIDs.map(id => new { val objectID: String = id }), Action.DeleteObject, - false, + waitForTasks, 1000, requestOptions ) @@ -300,6 +306,8 @@ package object extension { * The list of objects to save. * @param createIfNotExists * To be provided if non-existing objects are passed, otherwise, the call will fail. + * @param waitForTasks + * Whether to wait for the tasks to complete. * @param requestOptions * Additional request configuration. * @return @@ -309,13 +317,14 @@ package object extension { indexName: String, objects: Seq[Any], createIfNotExists: Boolean = false, + waitForTasks: Boolean = false, requestOptions: Option[RequestOptions] = None )(implicit ec: ExecutionContext): Future[Seq[BatchResponse]] = { chunkedBatch( indexName, objects, if (createIfNotExists) Action.PartialUpdateObject else Action.PartialUpdateObjectNoCreate, - false, + waitForTasks, 1000, requestOptions ) diff --git a/clients/algoliasearch-client-swift/Sources/Search/Extra/SearchClientExtension.swift b/clients/algoliasearch-client-swift/Sources/Search/Extra/SearchClientExtension.swift index e52ad5bd32..fefa966a70 100644 --- a/clients/algoliasearch-client-swift/Sources/Search/Extra/SearchClientExtension.swift +++ b/clients/algoliasearch-client-swift/Sources/Search/Extra/SearchClientExtension.swift @@ -463,18 +463,20 @@ public extension SearchClient { /// which creates a `batch` requests with at most 1000 objects in it. /// - parameter indexName: The name of the index where to save the objects /// - parameter objects: The new objects + /// - parameter waitForTasks: If we should wait for the batch task to be finished before processing the next one /// - parameter requestOptions: The request options /// - returns: [BatchResponse] func saveObjects( indexName: String, objects: [some Encodable], + waitForTasks: Bool = false, requestOptions: RequestOptions? = nil ) async throws -> [BatchResponse] { try await self.chunkedBatch( indexName: indexName, objects: objects, action: .addObject, - waitForTasks: false, + waitForTasks: waitForTasks, batchSize: 1000, requestOptions: requestOptions ) @@ -484,18 +486,20 @@ public extension SearchClient { /// creates a `batch` requests with at most 1000 objectIDs in it. /// - parameter indexName: The name of the index to delete objectIDs from /// - parameter objectIDs: The objectIDs to delete + /// - parameter waitForTasks: If we should wait for the batch task to be finished before processing the next one /// - parameter requestOptions: The request options /// - returns: [BatchResponse] func deleteObjects( indexName: String, objectIDs: [String], + waitForTasks: Bool = false, requestOptions: RequestOptions? = nil ) async throws -> [BatchResponse] { try await self.chunkedBatch( indexName: indexName, objects: objectIDs.map { AnyCodable(["objectID": $0]) }, action: .deleteObject, - waitForTasks: false, + waitForTasks: waitForTasks, batchSize: 1000, requestOptions: requestOptions ) @@ -507,19 +511,21 @@ public extension SearchClient { /// - parameter objects: The objects to update /// - parameter createIfNotExists: To be provided if non-existing objects are passed, otherwise, the call will /// fail.. + /// - parameter waitForTasks: If we should wait for the batch task to be finished before processing the next one /// - parameter requestOptions: The request options /// - returns: [BatchResponse] func partialUpdateObjects( indexName: String, objects: [some Encodable], createIfNotExists: Bool = false, + waitForTasks: Bool = false, requestOptions: RequestOptions? = nil ) async throws -> [BatchResponse] { try await self.chunkedBatch( indexName: indexName, objects: objects, action: createIfNotExists ? .partialUpdateObject : .partialUpdateObjectNoCreate, - waitForTasks: false, + waitForTasks: waitForTasks, batchSize: 1000, requestOptions: requestOptions ) diff --git a/playground/python/app/search.py b/playground/python/app/search.py index 5226381951..99a5188ef6 100644 --- a/playground/python/app/search.py +++ b/playground/python/app/search.py @@ -1,30 +1,30 @@ -from asyncio import run from os import environ -from algoliasearch.search.client import SearchClient +from algoliasearch.search.client import SearchClientSync from algoliasearch.search import __version__ from dotenv import load_dotenv load_dotenv("../.env") -async def main(): +def main(): print("SearchClient version", __version__) - client = SearchClient( + client = SearchClientSync( environ.get("ALGOLIA_APPLICATION_ID"), environ.get("ALGOLIA_ADMIN_KEY") ) print("client initialized", client) try: - resp = await client.search(search_method_params={ - "requests": [{"indexName": "api-clients-automation"}] - }) - print(resp.to_dict()) + resp = client.save_objects("foo", [{"foo": "bar"}]) + print(resp) + + for r in resp: + client.wait_for_task(index_name="foo", task_id=r.task_id) finally: - await client.close() + client.close() print("client closed") -run(main()) +main() diff --git a/playground/python/poetry.lock b/playground/python/poetry.lock index f24cc7267b..eb8224dda7 100644 --- a/playground/python/poetry.lock +++ b/playground/python/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.4 and should not be changed by hand. [[package]] name = "aiohappyeyeballs" @@ -139,7 +139,7 @@ frozenlist = ">=1.1.0" [[package]] name = "algoliasearch" -version = "4.4.2" +version = "4.6.5" description = "A fully-featured and blazing-fast Python API client to interact with Algolia." optional = false python-versions = ">= 3.8.1" @@ -516,6 +516,17 @@ files = [ [package.dependencies] typing-extensions = {version = ">=4.1.0", markers = "python_version < \"3.11\""} +[[package]] +name = "nodeenv" +version = "1.9.1" +description = "Node.js virtual environment builder" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +files = [ + {file = "nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9"}, + {file = "nodeenv-1.9.1.tar.gz", hash = "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f"}, +] + [[package]] name = "pydantic" version = "2.9.1" @@ -640,6 +651,26 @@ files = [ [package.dependencies] typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" +[[package]] +name = "pyright" +version = "1.1.383" +description = "Command line wrapper for pyright" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pyright-1.1.383-py3-none-any.whl", hash = "sha256:d864d1182a313f45aaf99e9bfc7d2668eeabc99b29a556b5344894fd73cb1959"}, + {file = "pyright-1.1.383.tar.gz", hash = "sha256:1df7f12407f3710c9c6df938d98ec53f70053e6c6bbf71ce7bcb038d42f10070"}, +] + +[package.dependencies] +nodeenv = ">=1.6.0" +typing-extensions = ">=4.1" + +[package.extras] +all = ["nodejs-wheel-binaries", "twine (>=3.4.1)"] +dev = ["twine (>=3.4.1)"] +nodejs = ["nodejs-wheel-binaries"] + [[package]] name = "python-dateutil" version = "2.9.0.post0" @@ -691,29 +722,29 @@ use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] [[package]] name = "ruff" -version = "0.6.4" +version = "0.6.8" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.6.4-py3-none-linux_armv6l.whl", hash = "sha256:c4b153fc152af51855458e79e835fb6b933032921756cec9af7d0ba2aa01a258"}, - {file = "ruff-0.6.4-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:bedff9e4f004dad5f7f76a9d39c4ca98af526c9b1695068198b3bda8c085ef60"}, - {file = "ruff-0.6.4-py3-none-macosx_11_0_arm64.whl", hash = "sha256:d02a4127a86de23002e694d7ff19f905c51e338c72d8e09b56bfb60e1681724f"}, - {file = "ruff-0.6.4-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7862f42fc1a4aca1ea3ffe8a11f67819d183a5693b228f0bb3a531f5e40336fc"}, - {file = "ruff-0.6.4-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eebe4ff1967c838a1a9618a5a59a3b0a00406f8d7eefee97c70411fefc353617"}, - {file = "ruff-0.6.4-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:932063a03bac394866683e15710c25b8690ccdca1cf192b9a98260332ca93408"}, - {file = "ruff-0.6.4-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:50e30b437cebef547bd5c3edf9ce81343e5dd7c737cb36ccb4fe83573f3d392e"}, - {file = "ruff-0.6.4-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c44536df7b93a587de690e124b89bd47306fddd59398a0fb12afd6133c7b3818"}, - {file = "ruff-0.6.4-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0ea086601b22dc5e7693a78f3fcfc460cceabfdf3bdc36dc898792aba48fbad6"}, - {file = "ruff-0.6.4-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b52387d3289ccd227b62102c24714ed75fbba0b16ecc69a923a37e3b5e0aaaa"}, - {file = "ruff-0.6.4-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:0308610470fcc82969082fc83c76c0d362f562e2f0cdab0586516f03a4e06ec6"}, - {file = "ruff-0.6.4-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:803b96dea21795a6c9d5bfa9e96127cc9c31a1987802ca68f35e5c95aed3fc0d"}, - {file = "ruff-0.6.4-py3-none-musllinux_1_2_i686.whl", hash = "sha256:66dbfea86b663baab8fcae56c59f190caba9398df1488164e2df53e216248baa"}, - {file = "ruff-0.6.4-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:34d5efad480193c046c86608dbba2bccdc1c5fd11950fb271f8086e0c763a5d1"}, - {file = "ruff-0.6.4-py3-none-win32.whl", hash = "sha256:f0f8968feea5ce3777c0d8365653d5e91c40c31a81d95824ba61d871a11b8523"}, - {file = "ruff-0.6.4-py3-none-win_amd64.whl", hash = "sha256:549daccee5227282289390b0222d0fbee0275d1db6d514550d65420053021a58"}, - {file = "ruff-0.6.4-py3-none-win_arm64.whl", hash = "sha256:ac4b75e898ed189b3708c9ab3fc70b79a433219e1e87193b4f2b77251d058d14"}, - {file = "ruff-0.6.4.tar.gz", hash = "sha256:ac3b5bfbee99973f80aa1b7cbd1c9cbce200883bdd067300c22a6cc1c7fba212"}, + {file = "ruff-0.6.8-py3-none-linux_armv6l.whl", hash = "sha256:77944bca110ff0a43b768f05a529fecd0706aac7bcce36d7f1eeb4cbfca5f0f2"}, + {file = "ruff-0.6.8-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:27b87e1801e786cd6ede4ada3faa5e254ce774de835e6723fd94551464c56b8c"}, + {file = "ruff-0.6.8-py3-none-macosx_11_0_arm64.whl", hash = "sha256:cd48f945da2a6334f1793d7f701725a76ba93bf3d73c36f6b21fb04d5338dcf5"}, + {file = "ruff-0.6.8-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:677e03c00f37c66cea033274295a983c7c546edea5043d0c798833adf4cf4c6f"}, + {file = "ruff-0.6.8-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9f1476236b3eacfacfc0f66aa9e6cd39f2a624cb73ea99189556015f27c0bdeb"}, + {file = "ruff-0.6.8-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6f5a2f17c7d32991169195d52a04c95b256378bbf0de8cb98478351eb70d526f"}, + {file = "ruff-0.6.8-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:5fd0d4b7b1457c49e435ee1e437900ced9b35cb8dc5178921dfb7d98d65a08d0"}, + {file = "ruff-0.6.8-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f8034b19b993e9601f2ddf2c517451e17a6ab5cdb1c13fdff50c1442a7171d87"}, + {file = "ruff-0.6.8-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6cfb227b932ba8ef6e56c9f875d987973cd5e35bc5d05f5abf045af78ad8e098"}, + {file = "ruff-0.6.8-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6ef0411eccfc3909269fed47c61ffebdcb84a04504bafa6b6df9b85c27e813b0"}, + {file = "ruff-0.6.8-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:007dee844738c3d2e6c24ab5bc7d43c99ba3e1943bd2d95d598582e9c1b27750"}, + {file = "ruff-0.6.8-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:ce60058d3cdd8490e5e5471ef086b3f1e90ab872b548814e35930e21d848c9ce"}, + {file = "ruff-0.6.8-py3-none-musllinux_1_2_i686.whl", hash = "sha256:1085c455d1b3fdb8021ad534379c60353b81ba079712bce7a900e834859182fa"}, + {file = "ruff-0.6.8-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:70edf6a93b19481affd287d696d9e311388d808671bc209fb8907b46a8c3af44"}, + {file = "ruff-0.6.8-py3-none-win32.whl", hash = "sha256:792213f7be25316f9b46b854df80a77e0da87ec66691e8f012f887b4a671ab5a"}, + {file = "ruff-0.6.8-py3-none-win_amd64.whl", hash = "sha256:ec0517dc0f37cad14a5319ba7bba6e7e339d03fbf967a6d69b0907d61be7a263"}, + {file = "ruff-0.6.8-py3-none-win_arm64.whl", hash = "sha256:8d3bb2e3fbb9875172119021a13eed38849e762499e3cfde9588e4b4d70968dc"}, + {file = "ruff-0.6.8.tar.gz", hash = "sha256:a5bf44b1aa0adaf6d9d20f86162b34f7c593bfedabc51239953e446aefc8ce18"}, ] [[package]] @@ -863,4 +894,4 @@ multidict = ">=4.0" [metadata] lock-version = "2.0" python-versions = ">= 3.8.1" -content-hash = "dfa3bff1d09cecebda86aed5791fbf6b0e889a7aa7cdc9f13761d54c17971ef4" +content-hash = "ba50344b0f59ea0535e3de55495754546a0fa4db45108cde00c34844d2e07b9d" diff --git a/scripts/playground.ts b/scripts/playground.ts index fde790ea1b..a2551b5233 100644 --- a/scripts/playground.ts +++ b/scripts/playground.ts @@ -38,7 +38,7 @@ export async function playground({ language, client }: { language: AllLanguage; await run(`php ${client}.php`, { cwd: 'playground/php/src', language }); break; case 'python': - await run(`poetry install --sync && poetry run ${client}`, { + await run(`poetry lock --no-update && poetry install --sync && poetry run ${client}`, { cwd: 'playground/python', language, }); diff --git a/specs/search/helpers/deleteObjects.yml b/specs/search/helpers/deleteObjects.yml index 6a8264bd4b..2d1a2d032d 100644 --- a/specs/search/helpers/deleteObjects.yml +++ b/specs/search/helpers/deleteObjects.yml @@ -22,6 +22,12 @@ method: type: array items: type: string + - in: query + name: waitForTasks + description: Whether or not we should wait until every `batch` tasks has been processed, this operation may slow the total execution time of this method but is more reliable. + required: false + schema: + type: boolean - in: query name: requestOptions description: The request options to pass to the `batch` method. diff --git a/specs/search/helpers/partialUpdateObjects.yml b/specs/search/helpers/partialUpdateObjects.yml index 74a6d128dd..2dcae02b8f 100644 --- a/specs/search/helpers/partialUpdateObjects.yml +++ b/specs/search/helpers/partialUpdateObjects.yml @@ -28,6 +28,12 @@ method: required: false schema: type: boolean + - in: query + name: waitForTasks + description: Whether or not we should wait until every `batch` tasks has been processed, this operation may slow the total execution time of this method but is more reliable. + required: false + schema: + type: boolean - in: query name: requestOptions description: The request options to pass to the `batch` method. diff --git a/specs/search/helpers/saveObjects.yml b/specs/search/helpers/saveObjects.yml index 31c9335eb6..bf7cee3fe8 100644 --- a/specs/search/helpers/saveObjects.yml +++ b/specs/search/helpers/saveObjects.yml @@ -22,6 +22,12 @@ method: type: array items: type: object + - in: query + name: waitForTasks + description: Whether or not we should wait until every `batch` tasks has been processed, this operation may slow the total execution time of this method but is more reliable. + required: false + schema: + type: boolean - in: query name: requestOptions description: The request options to pass to the `batch` method. diff --git a/templates/java/api_helpers.mustache b/templates/java/api_helpers.mustache index ea8117e8e5..80213a82fb 100644 --- a/templates/java/api_helpers.mustache +++ b/templates/java/api_helpers.mustache @@ -1,116 +1,129 @@ {{#isSearchClient}} /** - * Helper: Wait for a task to complete with `indexName` and `taskID`. - * - * @param indexName The `indexName` where the operation was performed. - * @param taskID The `taskID` returned in the method response. - * @param maxRetries The maximum number of retry. 50 by default. (optional) - * @param timeout The function to decide how long to wait between retries. min(retries * 200, 5000) by default. (optional) - * @param requestOptions The requestOptions to send along with the query, they will be merged with the transporter requestOptions. (optional) - */ -public GetTaskResponse waitForTask(String indexName, Long taskID, int maxRetries, IntUnaryOperator timeout, RequestOptions requestOptions) { + * Helper: Wait for a task to complete with `indexName` and `taskID`. + * + * @param indexName The `indexName` where the operation was performed. + * @param taskID The `taskID` returned in the method response. + * @param maxRetries The maximum number of retry. 50 by default. (optional) + * @param timeout The function to decide how long to wait between retries. min(retries * 200, + * 5000) by default. (optional) + * @param requestOptions The requestOptions to send along with the query, they will be merged with + * the transporter requestOptions. (optional) + */ +public GetTaskResponse waitForTask( + String indexName, + Long taskID, + int maxRetries, + IntUnaryOperator timeout, + RequestOptions requestOptions +) { return TaskUtils.retryUntil( () -> this.getTask(indexName, taskID, requestOptions), (GetTaskResponse task) -> task.getStatus() == TaskStatus.PUBLISHED, - maxRetries, + maxRetries, timeout ); } /** - * Helper: Wait for a task to complete with `indexName` and `taskID`. - * - * @param indexName The `indexName` where the operation was performed. - * @param taskID The `taskID` returned in the method response. - * @param requestOptions The requestOptions to send along with the query, they will be merged with the transporter requestOptions. (optional) - */ + * Helper: Wait for a task to complete with `indexName` and `taskID`. + * + * @param indexName The `indexName` where the operation was performed. + * @param taskID The `taskID` returned in the method response. + * @param requestOptions The requestOptions to send along with the query, they will be merged with + * the transporter requestOptions. (optional) + */ public GetTaskResponse waitForTask(String indexName, Long taskID, RequestOptions requestOptions) { return this.waitForTask(indexName, taskID, TaskUtils.DEFAULT_MAX_RETRIES, TaskUtils.DEFAULT_TIMEOUT, requestOptions); } /** - * Helper: Wait for a task to complete with `indexName` and `taskID`. - * - * @param indexName The `indexName` where the operation was performed. - * @param taskID The `taskID` returned in the method response. - * @param maxRetries The maximum number of retry. 50 by default. (optional) - * @param timeout The function to decide how long to wait between retries. min(retries * 200, 5000) by default. (optional) - */ + * Helper: Wait for a task to complete with `indexName` and `taskID`. + * + * @param indexName The `indexName` where the operation was performed. + * @param taskID The `taskID` returned in the method response. + * @param maxRetries The maximum number of retry. 50 by default. (optional) + * @param timeout The function to decide how long to wait between retries. min(retries * 200, + * 5000) by default. (optional) + */ public GetTaskResponse waitForTask(String indexName, Long taskID, int maxRetries, IntUnaryOperator timeout) { return this.waitForTask(indexName, taskID, maxRetries, timeout, null); } /** - * Helper: Wait for a task to complete with `indexName` and `taskID`. - * - * @param indexName The `indexName` where the operation was performed. - * @param taskID The `taskID` returned in the method response. - */ + * Helper: Wait for a task to complete with `indexName` and `taskID`. + * + * @param indexName The `indexName` where the operation was performed. + * @param taskID The `taskID` returned in the method response. + */ public GetTaskResponse waitForTask(String indexName, Long taskID) { return this.waitForTask(indexName, taskID, TaskUtils.DEFAULT_MAX_RETRIES, TaskUtils.DEFAULT_TIMEOUT, null); } /** -* Helper: Wait for a application-level task to complete with `taskID`. -* -* @param taskID The `taskID` returned in the method response. -* @param maxRetries The maximum number of retry. 50 by default. (optional) -* @param timeout The function to decide how long to wait between retries. min(retries * 200, -* 5000) by default. (optional) -* @param requestOptions The requestOptions to send along with the query, they will be merged with -* the transporter requestOptions. (optional) -*/ + * Helper: Wait for a application-level task to complete with `taskID`. + * + * @param taskID The `taskID` returned in the method response. + * @param maxRetries The maximum number of retry. 50 by default. (optional) + * @param timeout The function to decide how long to wait between retries. min(retries * 200, + * 5000) by default. (optional) + * @param requestOptions The requestOptions to send along with the query, they will be merged with + * the transporter requestOptions. (optional) + */ public GetTaskResponse waitForAppTask(Long taskID, int maxRetries, IntUnaryOperator timeout, RequestOptions requestOptions) { -return TaskUtils.retryUntil( - () -> this.getAppTask(taskID, requestOptions), - (GetTaskResponse task) -> task.getStatus() == TaskStatus.PUBLISHED, - maxRetries, - timeout -); + return TaskUtils.retryUntil( + () -> this.getAppTask(taskID, requestOptions), + (GetTaskResponse task) -> task.getStatus() == TaskStatus.PUBLISHED, + maxRetries, + timeout + ); } /** -* Helper: Wait for an application-level task to complete with `taskID`. -* -* @param taskID The `taskID` returned in the method response. -* @param requestOptions The requestOptions to send along with the query, they will be merged with -* the transporter requestOptions. (optional) -*/ + * Helper: Wait for an application-level task to complete with `taskID`. + * + * @param taskID The `taskID` returned in the method response. + * @param requestOptions The requestOptions to send along with the query, they will be merged with + * the transporter requestOptions. (optional) + */ public GetTaskResponse waitForAppTask(Long taskID, RequestOptions requestOptions) { return this.waitForAppTask(taskID, TaskUtils.DEFAULT_MAX_RETRIES, TaskUtils.DEFAULT_TIMEOUT, requestOptions); } /** -* Helper: Wait for an application-level task to complete with `taskID`. -* -* @param taskID The `taskID` returned in the method response. -* @param maxRetries The maximum number of retry. 50 by default. (optional) -* @param timeout The function to decide how long to wait between retries. min(retries * 200, -* 5000) by default. (optional) -*/ + * Helper: Wait for an application-level task to complete with `taskID`. + * + * @param taskID The `taskID` returned in the method response. + * @param maxRetries The maximum number of retry. 50 by default. (optional) + * @param timeout The function to decide how long to wait between retries. min(retries * 200, + * 5000) by default. (optional) + */ public GetTaskResponse waitForAppTask(Long taskID, int maxRetries, IntUnaryOperator timeout) { return this.waitForAppTask(taskID, maxRetries, timeout, null); } /** -* Helper: Wait for an application-level task to complete with `taskID`. -* -* @param taskID The `taskID` returned in the method response. -*/ + * Helper: Wait for an application-level task to complete with `taskID`. + * + * @param taskID The `taskID` returned in the method response. + */ public GetTaskResponse waitForAppTask(Long taskID) { return this.waitForAppTask(taskID, TaskUtils.DEFAULT_MAX_RETRIES, TaskUtils.DEFAULT_TIMEOUT, null); } /** - * Helper: Wait for an API key to be added, updated or deleted based on a given `operation`. - * - * @param operation The `operation` that was done on a `key`. - * @param key The `key` that has been added, deleted or updated. - * @param apiKey Necessary to know if an `update` operation has been processed, compare fields of the response with it. - * @param maxRetries The maximum number of retry. 50 by default. (optional) - * @param timeout The function to decide how long to wait between retries. min(retries * 200, 5000) by default. (optional) - * @param requestOptions The requestOptions to send along with the query, they will be merged with the transporter requestOptions. (optional) - */ + * Helper: Wait for an API key to be added, updated or deleted based on a given `operation`. + * + * @param operation The `operation` that was done on a `key`. + * @param key The `key` that has been added, deleted or updated. + * @param apiKey Necessary to know if an `update` operation has been processed, compare fields of + * the response with it. + * @param maxRetries The maximum number of retry. 50 by default. (optional) + * @param timeout The function to decide how long to wait between retries. min(retries * 200, + * 5000) by default. (optional) + * @param requestOptions The requestOptions to send along with the query, they will be merged with + * the transporter requestOptions. (optional) + */ public GetApiKeyResponse waitForApiKey( String key, ApiKeyOperation operation, @@ -177,89 +190,111 @@ public GetApiKeyResponse waitForApiKey( } /** - * Helper: Wait for an API key to be added or deleted based on a given `operation`. - * - * @param key The `key` that has been added or deleted. - * @param operation The `operation` that was done on a `key`. (ADD or DELETE only) - * @param maxRetries The maximum number of retry. 50 by default. (optional) - * @param timeout The function to decide how long to wait between retries. min(retries * 200, 5000) by default. (optional) - * @param requestOptions The requestOptions to send along with the query, they will be merged with the transporter requestOptions. (optional) - */ -public GetApiKeyResponse waitForApiKey(String key, ApiKeyOperation operation, int maxRetries, IntUnaryOperator timeout, RequestOptions requestOptions) { + * Helper: Wait for an API key to be added or deleted based on a given `operation`. + * + * @param key The `key` that has been added or deleted. + * @param operation The `operation` that was done on a `key`. (ADD or DELETE only) + * @param maxRetries The maximum number of retry. 50 by default. (optional) + * @param timeout The function to decide how long to wait between retries. min(retries * 200, + * 5000) by default. (optional) + * @param requestOptions The requestOptions to send along with the query, they will be merged with + * the transporter requestOptions. (optional) + */ +public GetApiKeyResponse waitForApiKey( + String key, + ApiKeyOperation operation, + int maxRetries, + IntUnaryOperator timeout, + RequestOptions requestOptions +) { return this.waitForApiKey(key, operation, null, maxRetries, timeout, requestOptions); } + /** - * Helper: Wait for an API key to be added, updated or deleted based on a given `operation`. - * - * @param key The `key` that has been added, deleted or updated. - * @param operation The `operation` that was done on a `key`. - * @param apiKey Necessary to know if an `update` operation has been processed, compare fields of the response with it. - * @param requestOptions The requestOptions to send along with the query, they will be merged with the transporter requestOptions. (optional) - */ + * Helper: Wait for an API key to be added, updated or deleted based on a given `operation`. + * + * @param key The `key` that has been added, deleted or updated. + * @param operation The `operation` that was done on a `key`. + * @param apiKey Necessary to know if an `update` operation has been processed, compare fields of + * the response with it. + * @param requestOptions The requestOptions to send along with the query, they will be merged with + * the transporter requestOptions. (optional) + */ public GetApiKeyResponse waitForApiKey(String key, ApiKeyOperation operation, ApiKey apiKey, RequestOptions requestOptions) { return this.waitForApiKey(key, operation, apiKey, TaskUtils.DEFAULT_MAX_RETRIES, TaskUtils.DEFAULT_TIMEOUT, requestOptions); } + /** - * Helper: Wait for an API key to be added or deleted based on a given `operation`. - * - * @param key The `key` that has been added or deleted. - * @param operation The `operation` that was done on a `key`. (ADD or DELETE only) - * @param requestOptions The requestOptions to send along with the query, they will be merged with the transporter requestOptions. (optional) - */ + * Helper: Wait for an API key to be added or deleted based on a given `operation`. + * + * @param key The `key` that has been added or deleted. + * @param operation The `operation` that was done on a `key`. (ADD or DELETE only) + * @param requestOptions The requestOptions to send along with the query, they will be merged with + * the transporter requestOptions. (optional) + */ public GetApiKeyResponse waitForApiKey(String key, ApiKeyOperation operation, RequestOptions requestOptions) { return this.waitForApiKey(key, operation, null, TaskUtils.DEFAULT_MAX_RETRIES, TaskUtils.DEFAULT_TIMEOUT, requestOptions); } + /** - * Helper: Wait for an API key to be added, updated or deleted based on a given `operation`. - * - * @param key The `key` that has been added, deleted or updated. - * @param operation The `operation` that was done on a `key`. - * @param apiKey Necessary to know if an `update` operation has been processed, compare fields of the response with it. - * @param maxRetries The maximum number of retry. 50 by default. (optional) - * @param timeout The function to decide how long to wait between retries. min(retries * 200, 5000) by default. (optional) - */ + * Helper: Wait for an API key to be added, updated or deleted based on a given `operation`. + * + * @param key The `key` that has been added, deleted or updated. + * @param operation The `operation` that was done on a `key`. + * @param apiKey Necessary to know if an `update` operation has been processed, compare fields of + * the response with it. + * @param maxRetries The maximum number of retry. 50 by default. (optional) + * @param timeout The function to decide how long to wait between retries. min(retries * 200, + * 5000) by default. (optional) + */ public GetApiKeyResponse waitForApiKey(String key, ApiKeyOperation operation, ApiKey apiKey, int maxRetries, IntUnaryOperator timeout) { return this.waitForApiKey(key, operation, apiKey, maxRetries, timeout, null); } + /** - * Helper: Wait for an API key to be added or deleted based on a given `operation`. - * - * @param key The `key` that has been added or deleted. - * @param operation The `operation` that was done on a `key`. (ADD or DELETE only) - * @param maxRetries The maximum number of retry. 50 by default. (optional) - * @param timeout The function to decide how long to wait between retries. min(retries * 200, 5000) by default. (optional) - */ + * Helper: Wait for an API key to be added or deleted based on a given `operation`. + * + * @param key The `key` that has been added or deleted. + * @param operation The `operation` that was done on a `key`. (ADD or DELETE only) + * @param maxRetries The maximum number of retry. 50 by default. (optional) + * @param timeout The function to decide how long to wait between retries. min(retries * 200, + * 5000) by default. (optional) + */ public GetApiKeyResponse waitForApiKey(String key, ApiKeyOperation operation, int maxRetries, IntUnaryOperator timeout) { return this.waitForApiKey(key, operation, null, maxRetries, timeout, null); } + /** - * Helper: Wait for an API key to be added, updated or deleted based on a given `operation`. - * - * @param key The `key` that has been added, deleted or updated. - * @param operation The `operation` that was done on a `key`. - * @param apiKey Necessary to know if an `update` operation has been processed, compare fields of the response with it. - */ + * Helper: Wait for an API key to be added, updated or deleted based on a given `operation`. + * + * @param key The `key` that has been added, deleted or updated. + * @param operation The `operation` that was done on a `key`. + * @param apiKey Necessary to know if an `update` operation has been processed, compare fields of + * the response with it. + */ public GetApiKeyResponse waitForApiKey(String key, ApiKeyOperation operation, ApiKey apiKey) { return this.waitForApiKey(key, operation, apiKey, TaskUtils.DEFAULT_MAX_RETRIES, TaskUtils.DEFAULT_TIMEOUT, null); } + /** - * Helper: Wait for an API key to be added or deleted based on a given `operation`. - * - * @param key The `key` that has been added or deleted. - * @param operation The `operation` that was done on a `key`. (ADD or DELETE only) - */ + * Helper: Wait for an API key to be added or deleted based on a given `operation`. + * + * @param key The `key` that has been added or deleted. + * @param operation The `operation` that was done on a `key`. (ADD or DELETE only) + */ public GetApiKeyResponse waitForApiKey(String key, ApiKeyOperation operation) { return this.waitForApiKey(key, operation, null, TaskUtils.DEFAULT_MAX_RETRIES, TaskUtils.DEFAULT_TIMEOUT, null); } /** - * Helper: Returns an iterator on top of the `browse` method. - * - * @param indexName The index in which to perform the request. - * @param params The `browse` parameters. - * @param innerType The class held by the index, could be your custom class or {@link Object}. - * @param requestOptions The requestOptions to send along with the query, they will be merged with the transporter requestOptions. (optional) - */ + * Helper: Returns an iterator on top of the `browse` method. + * + * @param indexName The index in which to perform the request. + * @param params The `browse` parameters. + * @param innerType The class held by the index, could be your custom class or {@link Object}. + * @param requestOptions The requestOptions to send along with the query, they will be merged with + * the transporter requestOptions. (optional) + */ public Iterable browseObjects(String indexName, BrowseParamsObject params, Class innerType, RequestOptions requestOptions) { final Holder currentCursor = new Holder<>(); @@ -275,33 +310,34 @@ public Iterable browseObjects(String indexName, BrowseParamsObject params } /** - * Helper: Returns an iterator on top of the `browse` method. - * - * @param indexName The index in which to perform the request. - * @param params The `browse` parameters. - * @param innerType The class held by the index, could be your custom class or {@link Object}. - */ + * Helper: Returns an iterator on top of the `browse` method. + * + * @param indexName The index in which to perform the request. + * @param params The `browse` parameters. + * @param innerType The class held by the index, could be your custom class or {@link Object}. + */ public Iterable browseObjects(String indexName, BrowseParamsObject params, Class innerType) { return browseObjects(indexName, params, innerType, null); } /** - * Helper: Returns an iterator on top of the `browse` method. - * - * @param indexName The index in which to perform the request. - * @param innerType The class held by the index, could be your custom class or {@link Object}. - */ + * Helper: Returns an iterator on top of the `browse` method. + * + * @param indexName The index in which to perform the request. + * @param innerType The class held by the index, could be your custom class or {@link Object}. + */ public Iterable browseObjects(String indexName, Class innerType) { return browseObjects(indexName, new BrowseParamsObject(), innerType, null); } /** - * Helper: Returns an iterator on top of the `searchSynonyms` method. - * - * @param indexName The index in which to perform the request. - * @param params The `searchSynonyms` parameters. (optional) - * @param requestOptions The requestOptions to send along with the query, they will be merged with the transporter requestOptions. (optional) - */ + * Helper: Returns an iterator on top of the `searchSynonyms` method. + * + * @param indexName The index in which to perform the request. + * @param params The `searchSynonyms` parameters. (optional) + * @param requestOptions The requestOptions to send along with the query, they will be merged with + * the transporter requestOptions. (optional) + */ public Iterable browseSynonyms(String indexName, SearchSynonymsParams params, RequestOptions requestOptions) { final Holder currentPage = new Holder<>(0); @@ -319,32 +355,32 @@ public Iterable browseSynonyms(String indexName, SearchSynonymsParam } /** - * Helper: Returns an iterator on top of the `searchSynonyms` method. - * - * @param indexName The index in which to perform the request. - * @param params The `searchSynonyms` parameters .(optional) - */ + * Helper: Returns an iterator on top of the `searchSynonyms` method. + * + * @param indexName The index in which to perform the request. + * @param params The `searchSynonyms` parameters .(optional) + */ public Iterable browseSynonyms(String indexName, SearchSynonymsParams params) { return browseSynonyms(indexName, params, null); } - /** - * Helper: Returns an iterator on top of the `searchSynonyms` method. - * - * @param indexName The index in which to perform the request. - */ + * Helper: Returns an iterator on top of the `searchSynonyms` method. + * + * @param indexName The index in which to perform the request. + */ public Iterable browseSynonyms(String indexName) { return browseSynonyms(indexName, null, null); } /** - * Helper: Returns an iterator on top of the `searchRules` method. - * - * @param indexName The index in which to perform the request. - * @param params The `searchRules` parameters. (optional) - * @param requestOptions The requestOptions to send along with the query, they will be merged with the transporter requestOptions. (optional) - */ + * Helper: Returns an iterator on top of the `searchRules` method. + * + * @param indexName The index in which to perform the request. + * @param params The `searchRules` parameters. (optional) + * @param requestOptions The requestOptions to send along with the query, they will be merged with + * the transporter requestOptions. (optional) + */ public Iterable browseRules(String indexName, SearchRulesParams params, RequestOptions requestOptions) { final Holder currentPage = new Holder<>(0); final int hitsPerPage = 1000; @@ -361,225 +397,238 @@ public Iterable browseRules(String indexName, SearchRulesParams params, Re } /** - * Helper: Returns an iterator on top of the `searchRules` method. - * - * @param indexName The index in which to perform the request. - * @param params The `searchRules` parameters. (optional) - */ + * Helper: Returns an iterator on top of the `searchRules` method. + * + * @param indexName The index in which to perform the request. + * @param params The `searchRules` parameters. (optional) + */ public Iterable browseRules(String indexName, SearchRulesParams params) { return browseRules(indexName, params, null); } /** - * Helper: Returns an iterator on top of the `searchRules` method. - * - * @param indexName The index in which to perform the request. - */ + * Helper: Returns an iterator on top of the `searchRules` method. + * + * @param indexName The index in which to perform the request. + */ public Iterable browseRules(String indexName) { return browseRules(indexName, new SearchRulesParams(), null); } /** - * Executes a synchronous search for the provided search requests, with certainty that we will only request - * Algolia records (hits). Results will be received in the same order as the queries. - * - * @param requests A list of search requests to be executed. - */ + * Executes a synchronous search for the provided search requests, with certainty that we will + * only request Algolia records (hits). Results will be received in the same order as the queries. + * + * @param requests A list of search requests to be executed. + */ public List> searchForHits(@Nonnull List requests, Class innerType) { - return LaunderThrowable.await(searchForHitsAsync(requests, null, innerType, null)); + return LaunderThrowable.await(searchForHitsAsync(requests, null, innerType, null)); } /** - * Executes a synchronous search for the provided search requests, with certainty that we will only request - * Algolia records (hits). Results will be received in the same order as the queries. - * - * @param requests A list of search requests to be executed. - * @param strategy The search strategy to be employed during the search. - */ + * Executes a synchronous search for the provided search requests, with certainty that we will + * only request Algolia records (hits). Results will be received in the same order as the queries. + * + * @param requests A list of search requests to be executed. + * @param strategy The search strategy to be employed during the search. + */ public List> searchForHits(@Nonnull List requests, SearchStrategy strategy, Class innerType) { - return LaunderThrowable.await(searchForHitsAsync(requests, strategy, innerType, null)); + return LaunderThrowable.await(searchForHitsAsync(requests, strategy, innerType, null)); } /** - * Executes a synchronous search for the provided search requests, with certainty that we will only request - * Algolia records (hits). Results will be received in the same order as the queries. - * - * @param requests A list of search requests to be executed. - * @param strategy The search strategy to be employed during the search. - * @param requestOptions Additional options for the search request. - */ -public List> searchForHits(@Nonnull List requests, SearchStrategy strategy, Class innerType, RequestOptions requestOptions) { - return LaunderThrowable.await(searchForHitsAsync(requests, strategy, innerType, requestOptions)); + * Executes a synchronous search for the provided search requests, with certainty that we will + * only request Algolia records (hits). Results will be received in the same order as the queries. + * + * @param requests A list of search requests to be executed. + * @param strategy The search strategy to be employed during the search. + * @param requestOptions Additional options for the search request. + */ +public List> searchForHits( + @Nonnull List requests, + SearchStrategy strategy, + Class innerType, + RequestOptions requestOptions +) { + return LaunderThrowable.await(searchForHitsAsync(requests, strategy, innerType, requestOptions)); } /** - * Executes an asynchronous search for the provided search requests, with certainty that we will only request - * Algolia records (hits). Results will be received in the same order as the queries. - * - * @param requests A list of search requests to be executed. - */ + * Executes an asynchronous search for the provided search requests, with certainty that we will + * only request Algolia records (hits). Results will be received in the same order as the queries. + * + * @param requests A list of search requests to be executed. + */ public CompletableFuture>> searchForHitsAsync(@Nonnull List requests, Class innerType) { - return searchForHitsAsync(requests, null, innerType, null); -} - -/** - * Executes an asynchronous search for the provided search requests, with certainty that we will only request - * Algolia records (hits). Results will be received in the same order as the queries. - * - * @param requests A list of search requests to be executed. - * @param strategy The search strategy to be employed during the search. - */ -public CompletableFuture>> searchForHitsAsync(@Nonnull List requests, SearchStrategy strategy, Class innerType) { - return searchForHitsAsync(requests, strategy, innerType, null); -} - -/** - * Executes an asynchronous search for the provided search requests, with certainty that we will only request - * Algolia records (hits). Results will be received in the same order as the queries. - * - * @param requests A list of search requests to be executed. - * @param innerType The class held by the index, could be your custom class or {@link Object}. - * @param strategy The search strategy to be employed during the search. - * @param requestOptions Additional options for the search request. - */ -public CompletableFuture>> searchForHitsAsync(@Nonnull List requests, SearchStrategy strategy, Class innerType, RequestOptions requestOptions) { - final List searchQueries = new ArrayList<>(requests); // Upcast the list - final SearchMethodParams params = new SearchMethodParams() - .setRequests(searchQueries) - .setStrategy(strategy); - return searchAsync(params, innerType) - .thenApply(searchResponses -> searchResponses - .getResults() - .stream() - .map(res -> (SearchResponse) res) - .collect(Collectors.toList()) - ); -} - -/** - * Executes a synchronous search for the provided search requests, with certainty that we will only request - * Algolia facets. Results will be received in the same order as the queries. - * - * @param requests A list of search requests to be executed. - */ + return searchForHitsAsync(requests, null, innerType, null); +} + +/** + * Executes an asynchronous search for the provided search requests, with certainty that we will + * only request Algolia records (hits). Results will be received in the same order as the queries. + * + * @param requests A list of search requests to be executed. + * @param strategy The search strategy to be employed during the search. + */ +public CompletableFuture>> searchForHitsAsync( + @Nonnull List requests, + SearchStrategy strategy, + Class innerType +) { + return searchForHitsAsync(requests, strategy, innerType, null); +} + +/** + * Executes an asynchronous search for the provided search requests, with certainty that we will + * only request Algolia records (hits). Results will be received in the same order as the queries. + * + * @param requests A list of search requests to be executed. + * @param innerType The class held by the index, could be your custom class or {@link Object}. + * @param strategy The search strategy to be employed during the search. + * @param requestOptions Additional options for the search request. + */ +public CompletableFuture>> searchForHitsAsync( + @Nonnull List requests, + SearchStrategy strategy, + Class innerType, + RequestOptions requestOptions +) { + final List searchQueries = new ArrayList<>(requests); // Upcast the list + final SearchMethodParams params = new SearchMethodParams().setRequests(searchQueries).setStrategy(strategy); + return searchAsync(params, innerType).thenApply(searchResponses -> + searchResponses.getResults().stream().map(res -> (SearchResponse) res).collect(Collectors.toList()) + ); +} + +/** + * Executes a synchronous search for the provided search requests, with certainty that we will + * only request Algolia facets. Results will be received in the same order as the queries. + * + * @param requests A list of search requests to be executed. + */ public List searchForFacets(@Nonnull List requests) { - return LaunderThrowable.await(searchForFacetsAsync(requests, null, null)); + return LaunderThrowable.await(searchForFacetsAsync(requests, null, null)); } /** - * Executes a synchronous search for the provided search requests, with certainty that we will only request - * Algolia facets. Results will be received in the same order as the queries. - * - * @param requests A list of search requests to be executed. - * @param strategy The search strategy to be employed during the search. - */ + * Executes a synchronous search for the provided search requests, with certainty that we will + * only request Algolia facets. Results will be received in the same order as the queries. + * + * @param requests A list of search requests to be executed. + * @param strategy The search strategy to be employed during the search. + */ public List searchForFacets(@Nonnull List requests, SearchStrategy strategy) { - return LaunderThrowable.await(searchForFacetsAsync(requests, strategy, null)); + return LaunderThrowable.await(searchForFacetsAsync(requests, strategy, null)); } /** - * Executes a synchronous search for the provided search requests, with certainty that we will only request - * Algolia facets. Results will be received in the same order as the queries. - * - * @param requests A list of search requests to be executed. - * @param strategy The search strategy to be employed during the search. - * @param requestOptions Additional options for the search request. - */ -public List searchForFacets(@Nonnull List requests, SearchStrategy strategy, RequestOptions requestOptions) { - return LaunderThrowable.await(searchForFacetsAsync(requests, strategy, requestOptions)); + * Executes a synchronous search for the provided search requests, with certainty that we will + * only request Algolia facets. Results will be received in the same order as the queries. + * + * @param requests A list of search requests to be executed. + * @param strategy The search strategy to be employed during the search. + * @param requestOptions Additional options for the search request. + */ +public List searchForFacets( + @Nonnull List requests, + SearchStrategy strategy, + RequestOptions requestOptions +) { + return LaunderThrowable.await(searchForFacetsAsync(requests, strategy, requestOptions)); } /** - * Executes an asynchronous search for the provided search requests, with certainty that we will only request - * Algolia facets. Results will be received in the same order as the queries. - * - * @param requests A list of search requests to be executed. - */ + * Executes an asynchronous search for the provided search requests, with certainty that we will + * only request Algolia facets. Results will be received in the same order as the queries. + * + * @param requests A list of search requests to be executed. + */ public CompletableFuture> searchForFacetsAsync(@Nonnull List requests) { - return searchForFacetsAsync(requests, null, null); -} - -/** - * Executes an asynchronous search for the provided search requests, with certainty that we will only request - * Algolia facets. Results will be received in the same order as the queries. - * - * @param requests A list of search requests to be executed. - * @param strategy The search strategy to be employed during the search. - */ -public CompletableFuture> searchForFacetsAsync(@Nonnull List requests, SearchStrategy strategy) { - return searchForFacetsAsync(requests, strategy, null); -} - -/** - * Executes an asynchronous search for the provided search requests, with certainty that we will only request - * Algolia facets. Results will be received in the same order as the queries. - * - * @param requests A list of search requests to be executed. - * @param strategy The search strategy to be employed during the search. - * @param requestOptions Additional options for the search request. - */ -public CompletableFuture> searchForFacetsAsync(@Nonnull List requests, SearchStrategy strategy, RequestOptions requestOptions) { - final List searchQueries = new ArrayList<>(requests); // Upcast the list - final SearchMethodParams params = new SearchMethodParams() - .setRequests(searchQueries) - .setStrategy(strategy); - return searchAsync(params, Hit.class) - .thenApply(searchResponses -> searchResponses - .getResults() - .stream() - .map(res -> (SearchForFacetValuesResponse) res) - .collect(Collectors.toList()) - ); -} - -/** -* Helper: Chunks the given `objects` list in subset of 1000 elements max in order to make it fit -* in `batch` requests. -* -* @summary Helper: Chunks the given `objects` list in subset of 1000 elements max in order to -* make it fit in `batch` requests. -* @param indexName - The `indexName` to replace `objects` in. -* @param objects - The array of `objects` to store in the given Algolia `indexName`. -* @param action - The `batch` `action` to perform on the given array of `objects`. -* @param waitForTasks - Whether or not we should wait until every `batch` tasks has been -* processed, this operation may slow the total execution time of this method but is more -* reliable. -* @param batchSize - The size of the chunk of `objects`. The number of `batch` calls will be -* equal to `length(objects) / batchSize`. Defaults to 1000. -* @param requestOptions - The requestOptions to send along with the query, they will be forwarded -* to the `getTask` method and merged with the transporter requestOptions. -*/ + return searchForFacetsAsync(requests, null, null); +} + +/** + * Executes an asynchronous search for the provided search requests, with certainty that we will + * only request Algolia facets. Results will be received in the same order as the queries. + * + * @param requests A list of search requests to be executed. + * @param strategy The search strategy to be employed during the search. + */ +public CompletableFuture> searchForFacetsAsync( + @Nonnull List requests, + SearchStrategy strategy +) { + return searchForFacetsAsync(requests, strategy, null); +} + +/** + * Executes an asynchronous search for the provided search requests, with certainty that we will + * only request Algolia facets. Results will be received in the same order as the queries. + * + * @param requests A list of search requests to be executed. + * @param strategy The search strategy to be employed during the search. + * @param requestOptions Additional options for the search request. + */ +public CompletableFuture> searchForFacetsAsync( + @Nonnull List requests, + SearchStrategy strategy, + RequestOptions requestOptions +) { + final List searchQueries = new ArrayList<>(requests); // Upcast the list + final SearchMethodParams params = new SearchMethodParams().setRequests(searchQueries).setStrategy(strategy); + return searchAsync(params, Hit.class).thenApply(searchResponses -> + searchResponses.getResults().stream().map(res -> (SearchForFacetValuesResponse) res).collect(Collectors.toList()) + ); +} + +/** + * Helper: Chunks the given `objects` list in subset of 1000 elements max in order to make it fit + * in `batch` requests. + * + * @summary Helper: Chunks the given `objects` list in subset of 1000 elements max in order to + * make it fit in `batch` requests. + * @param indexName - The `indexName` to replace `objects` in. + * @param objects - The array of `objects` to store in the given Algolia `indexName`. + * @param action - The `batch` `action` to perform on the given array of `objects`. + * @param waitForTasks - Whether or not we should wait until every `batch` tasks has been + * processed, this operation may slow the total execution time of this method but is more + * reliable. + * @param batchSize - The size of the chunk of `objects`. The number of `batch` calls will be + * equal to `length(objects) / batchSize`. Defaults to 1000. + * @param requestOptions - The requestOptions to send along with the query, they will be forwarded + * to the `getTask` method and merged with the transporter requestOptions. + */ public List chunkedBatch( -String indexName, -Iterable objects, -Action action, -boolean waitForTasks, -int batchSize, -RequestOptions requestOptions + String indexName, + Iterable objects, + Action action, + boolean waitForTasks, + int batchSize, + RequestOptions requestOptions ) { -List responses = new ArrayList<>(); -List requests = new ArrayList<>(); + List responses = new ArrayList<>(); + List requests = new ArrayList<>(); + + for (T item : objects) { + if (requests.size() == batchSize) { + BatchResponse batch = batch(indexName, new BatchWriteParams().setRequests(requests), requestOptions); + responses.add(batch); + requests.clear(); + } -for (T item : objects) { - if (requests.size() == batchSize) { + requests.add(new BatchRequest().setAction(action).setBody(item)); + } + + if (requests.size() > 0) { BatchResponse batch = batch(indexName, new BatchWriteParams().setRequests(requests), requestOptions); responses.add(batch); - requests.clear(); } - requests.add(new BatchRequest().setAction(action).setBody(item)); -} - -if (requests.size() > 0) { - BatchResponse batch = batch(indexName, new BatchWriteParams().setRequests(requests), requestOptions); - responses.add(batch); -} - -if (waitForTasks) { - responses.forEach(response -> waitForTask(indexName, response.getTaskID(), requestOptions)); -} + if (waitForTasks) { + responses.forEach(response -> waitForTask(indexName, response.getTaskID(), requestOptions)); + } -return responses; + return responses; } public List chunkedBatch(String indexName, Iterable objects, Action action, boolean waitForTasks) { @@ -590,277 +639,340 @@ public List chunkedBatch(String indexName, Iterable object return chunkedBatch(indexName, objects, action, waitForTasks, batchSize, null); } -public List chunkedBatch(String indexName, Iterable objects, Action action, boolean waitForTasks, RequestOptions requestOptions) { +public List chunkedBatch( + String indexName, + Iterable objects, + Action action, + boolean waitForTasks, + RequestOptions requestOptions +) { return chunkedBatch(indexName, objects, action, waitForTasks, 1000, requestOptions); } /** -* Push a new set of objects and remove all previous ones. Settings, synonyms and query rules are -* untouched. Replace all records in an index without any downtime. See -* https://api-clients-automation.netlify.app/docs/add-new-api-client#5-helpers for -* implementation details. -* -* @param indexName The `indexName` to replace `objects` in. -* @param objects The array of `objects` to store in the given Algolia `indexName`. -* @param batchSize The size of the chunk of `objects`. The number of `batch` calls will be equal -* to `length(objects) / batchSize`. -* @throws AlgoliaRetryException When the retry has failed on all hosts -* @throws AlgoliaApiException When the API sends an http error code -* @throws AlgoliaRuntimeException When an error occurred during the serialization -*/ -public ReplaceAllObjectsResponse replaceAllObjects( -String indexName, -Iterable objects, -int batchSize -) { -return replaceAllObjects(indexName, objects, batchSize, null); -} + * Push a new set of objects and remove all previous ones. Settings, synonyms and query rules are + * untouched. Replace all records in an index without any downtime. See + * https://api-clients-automation.netlify.app/docs/add-new-api-client#5-helpers for implementation + * details. + * + * @param indexName The `indexName` to replace `objects` in. + * @param objects The array of `objects` to store in the given Algolia `indexName`. + * @param batchSize The size of the chunk of `objects`. The number of `batch` calls will be equal + * to `length(objects) / batchSize`. + * @throws AlgoliaRetryException When the retry has failed on all hosts + * @throws AlgoliaApiException When the API sends an http error code + * @throws AlgoliaRuntimeException When an error occurred during the serialization + */ +public ReplaceAllObjectsResponse replaceAllObjects(String indexName, Iterable objects, int batchSize) { + return replaceAllObjects(indexName, objects, batchSize, null); +} + +/** + * Helper: Saves the given array of objects in the given index. The `chunkedBatch` helper is used + * under the hood, which creates a `batch` requests with at most 1000 objects in it. + * + * @param indexName The `indexName` to replace `objects` in. + * @param objects The array of `objects` to store in the given Algolia `indexName`. + */ +public List saveObjects(String indexName, Iterable objects) { + return saveObjects(indexName, objects, null); +} + +/** + * Helper: Saves the given array of objects in the given index. The `chunkedBatch` helper is used + * under the hood, which creates a `batch` requests with at most 1000 objects in it. + * + * @param indexName The `indexName` to replace `objects` in. + * @param objects The array of `objects` to store in the given Algolia `indexName`. + * @param requestOptions The requestOptions to send along with the query, they will be merged with + * the transporter requestOptions. (optional) + */ +public List saveObjects(String indexName, Iterable objects, RequestOptions requestOptions) { + return saveObjects(indexName, objects, false, requestOptions); +} + +/** + * Helper: Saves the given array of objects in the given index. The `chunkedBatch` helper is used + * under the hood, which creates a `batch` requests with at most 1000 objects in it. + * + * @param indexName The `indexName` to replace `objects` in. + * @param objects The array of `objects` to store in the given Algolia `indexName`. + * @param waitForTasks - Whether or not we should wait until every `batch` tasks has been + * processed, this operation may slow the total execution time of this method but is more + * reliable. + * @param requestOptions The requestOptions to send along with the query, they will be merged with + * the transporter requestOptions. (optional) + */ +public List saveObjects(String indexName, Iterable objects, boolean waitForTasks, RequestOptions requestOptions) { + return chunkedBatch(indexName, objects, Action.ADD_OBJECT, waitForTasks, 1000, requestOptions); +} + +/** + * Helper: Deletes every records for the given objectIDs. The `chunkedBatch` helper is used under + * the hood, which creates a `batch` requests with at most 1000 objectIDs in it. + * + * @param indexName The `indexName` to delete `objectIDs` from. + * @param objectIDs The array of `objectIDs` to delete from the `indexName`. + */ +public List deleteObjects(String indexName, List objectIDs) { + return deleteObjects(indexName, objectIDs, false, null); +} + +/** + * Helper: Deletes every records for the given objectIDs. The `chunkedBatch` helper is used under + * the hood, which creates a `batch` requests with at most 1000 objectIDs in it. + * + * @param indexName The `indexName` to delete `objectIDs` from. + * @param objectIDs The array of `objectIDs` to delete from the `indexName`. + * @param requestOptions The requestOptions to send along with the query, they will be merged with + * the transporter requestOptions. (optional) + */ +public List deleteObjects(String indexName, List objectIDs, RequestOptions requestOptions) { + return deleteObjects(indexName, objectIDs, false, null); +} + +/** + * Helper: Deletes every records for the given objectIDs. The `chunkedBatch` helper is used under + * the hood, which creates a `batch` requests with at most 1000 objectIDs in it. + * + * @param indexName The `indexName` to delete `objectIDs` from. + * @param objectIDs The array of `objectIDs` to delete from the `indexName`. + * @param waitForTasks - Whether or not we should wait until every `batch` tasks has been + * processed, this operation may slow the total execution time of this method but is more + * reliable. + * @param requestOptions The requestOptions to send along with the query, they will be merged with + * the transporter requestOptions. (optional) + */ +public List deleteObjects(String indexName, List objectIDs, boolean waitForTasks, RequestOptions requestOptions) { + List> objects = new ArrayList<>(); + + for (String id : objectIDs) { + Map obj = new HashMap<>(); + obj.put("objectID", id); + objects.add(obj); + } -/** -* Helper: Saves the given array of objects in the given index. The `chunkedBatch` helper is used under the hood, which creates a `batch` requests with at most 1000 objects in it. -* -* @param indexName The `indexName` to replace `objects` in. -* @param objects The array of `objects` to store in the given Algolia `indexName`. -*/ -public List saveObjects( -String indexName, -Iterable objects -) { -return saveObjects(indexName, objects, null); + return chunkedBatch(indexName, objects, Action.DELETE_OBJECT, waitForTasks, 1000, requestOptions); } /** -* Helper: Saves the given array of objects in the given index. The `chunkedBatch` helper is used under the hood, which creates a `batch` requests with at most 1000 objects in it. -* -* @param indexName The `indexName` to replace `objects` in. -* @param objects The array of `objects` to store in the given Algolia `indexName`. -* @param requestOptions The requestOptions to send along with the query, they will be merged with the transporter requestOptions. (optional) -*/ -public List saveObjects( -String indexName, -Iterable objects, -RequestOptions requestOptions -) { -return chunkedBatch(indexName, objects, Action.ADD_OBJECT, false, 1000, requestOptions); + * Helper: Replaces object content of all the given objects according to their respective + * `objectID` field. The `chunkedBatch` helper is used under the hood, which creates a `batch` + * requests with at most 1000 objects in it. + * + * @param indexName The `indexName` to update `objects` in. + * @param objects The array of `objects` to update in the given Algolia `indexName`. + * @param createIfNotExists To be provided if non-existing objects are passed, otherwise, the call + * will fail. + */ +public List partialUpdateObjects(String indexName, Iterable objects, boolean createIfNotExists) { + return partialUpdateObjects(indexName, objects, createIfNotExists, false, null); } /** -* Helper: Deletes every records for the given objectIDs. The `chunkedBatch` helper is used under the hood, which creates a `batch` requests with at most 1000 objectIDs in it. -* -* @param indexName The `indexName` to delete `objectIDs` from. -* @param objectIDs The array of `objectIDs` to delete from the `indexName`. -*/ -public List deleteObjects( -String indexName, -List objectIDs + * Helper: Replaces object content of all the given objects according to their respective + * `objectID` field. The `chunkedBatch` helper is used under the hood, which creates a `batch` + * requests with at most 1000 objects in it. + * + * @param indexName The `indexName` to update `objects` in. + * @param objects The array of `objects` to update in the given Algolia `indexName`. + * @param createIfNotExists To be provided if non-existing objects are passed, otherwise, the call + * will fail. + * @param waitForTasks - Whether or not we should wait until every `batch` tasks has been + * processed, this operation may slow the total execution time of this method but is more + * reliable. + */ +public List partialUpdateObjects( + String indexName, + Iterable objects, + boolean createIfNotExists, + boolean waitForTasks +) { + return partialUpdateObjects(indexName, objects, createIfNotExists, waitForTasks, null); +} + +/** + * Helper: Replaces object content of all the given objects according to their respective + * `objectID` field. The `chunkedBatch` helper is used under the hood, which creates a `batch` + * requests with at most 1000 objects in it. + * + * @param indexName The `indexName` to update `objects` in. + * @param objects The array of `objects` to update in the given Algolia `indexName`. + * @param createIfNotExists To be provided if non-existing objects are passed, otherwise, the call + * will fail. + * @param waitForTasks - Whether or not we should wait until every `batch` tasks has been + * processed, this operation may slow the total execution time of this method but is more + * reliable. + * @param requestOptions The requestOptions to send along with the query, they will be merged with + * the transporter requestOptions. (optional) + */ +public List partialUpdateObjects( + String indexName, + Iterable objects, + boolean createIfNotExists, + boolean waitForTasks, + RequestOptions requestOptions ) { -return deleteObjects(indexName, objectIDs, null); + return chunkedBatch( + indexName, + objects, + createIfNotExists ? Action.PARTIAL_UPDATE_OBJECT : Action.PARTIAL_UPDATE_OBJECT_NO_CREATE, + waitForTasks, + 1000, + requestOptions + ); } /** -* Helper: Deletes every records for the given objectIDs. The `chunkedBatch` helper is used under the hood, which creates a `batch` requests with at most 1000 objectIDs in it. -* -* @param indexName The `indexName` to delete `objectIDs` from. -* @param objectIDs The array of `objectIDs` to delete from the `indexName`. -* @param requestOptions The requestOptions to send along with the query, they will be merged with the transporter requestOptions. (optional) -*/ -public List deleteObjects( -String indexName, -List objectIDs, -RequestOptions requestOptions + * Push a new set of objects and remove all previous ones. Settings, synonyms and query rules are + * untouched. Replace all records in an index without any downtime. See + * https://api-clients-automation.netlify.app/docs/add-new-api-client#5-helpers for implementation + * details. + * + * @param indexName The `indexName` to replace `objects` in. + * @param objects The array of `objects` to store in the given Algolia `indexName`. + * @param batchSize The size of the chunk of `objects`. The number of `batch` calls will be equal + * to `length(objects) / batchSize`. + * @param requestOptions The requestOptions to send along with the query, they will be merged with + * the transporter requestOptions. (optional) + * @throws AlgoliaRetryException When the retry has failed on all hosts + * @throws AlgoliaApiException When the API sends an http error code + * @throws AlgoliaRuntimeException When an error occurred during the serialization + */ +public ReplaceAllObjectsResponse replaceAllObjects( + String indexName, + Iterable objects, + int batchSize, + RequestOptions requestOptions ) { -List> objects = new ArrayList<>(); + Random rnd = new Random(); + String tmpIndexName = indexName + "_tmp_" + rnd.nextInt(100); + + // Copy settings, synonyms and rules + UpdatedAtResponse copyOperationResponse = operationIndex( + indexName, + new OperationIndexParams() + .setOperation(OperationType.COPY) + .setDestination(tmpIndexName) + .addScope(ScopeType.SETTINGS) + .addScope(ScopeType.RULES) + .addScope(ScopeType.SYNONYMS), + requestOptions + ); -for (String id : objectIDs) { - Map obj = new HashMap<>(); - obj.put("objectID", id); - objects.add(obj); -} + // Save new objects + List batchResponses = chunkedBatch(tmpIndexName, objects, Action.ADD_OBJECT, true, batchSize, requestOptions); -return chunkedBatch(indexName, objects, Action.DELETE_OBJECT, false, 1000, requestOptions); -} + waitForTask(tmpIndexName, copyOperationResponse.getTaskID(), requestOptions); -/** -* Helper: Replaces object content of all the given objects according to their respective `objectID` field. The `chunkedBatch` helper is used under the hood, which creates a `batch` requests with at most 1000 objects in it. -* -* @param indexName The `indexName` to update `objects` in. -* @param objects The array of `objects` to update in the given Algolia `indexName`. -* @param createIfNotExists To be provided if non-existing objects are passed, otherwise, the call will fail. -*/ -public List partialUpdateObjects( -String indexName, -Iterable objects, -boolean createIfNotExists -) { -return partialUpdateObjects(indexName, objects, createIfNotExists, null); + copyOperationResponse = operationIndex( + indexName, + new OperationIndexParams() + .setOperation(OperationType.COPY) + .setDestination(tmpIndexName) + .addScope(ScopeType.SETTINGS) + .addScope(ScopeType.RULES) + .addScope(ScopeType.SYNONYMS), + requestOptions + ); + waitForTask(tmpIndexName, copyOperationResponse.getTaskID(), requestOptions); + + // Move temporary index to source index + UpdatedAtResponse moveOperationResponse = operationIndex( + tmpIndexName, + new OperationIndexParams().setOperation(OperationType.MOVE).setDestination(indexName), + requestOptions + ); + waitForTask(tmpIndexName, moveOperationResponse.getTaskID(), requestOptions); + + return new ReplaceAllObjectsResponse() + .setCopyOperationResponse(copyOperationResponse) + .setBatchResponses(batchResponses) + .setMoveOperationResponse(moveOperationResponse); } /** -* Helper: Replaces object content of all the given objects according to their respective `objectID` field. The `chunkedBatch` helper is used under the hood, which creates a `batch` requests with at most 1000 objects in it. -* -* @param indexName The `indexName` to update `objects` in. -* @param objects The array of `objects` to update in the given Algolia `indexName`. -* @param createIfNotExists To be provided if non-existing objects are passed, otherwise, the call will fail. -* @param requestOptions The requestOptions to send along with the query, they will be merged with the transporter requestOptions. (optional) -*/ -public List partialUpdateObjects( -String indexName, -Iterable objects, -boolean createIfNotExists, -RequestOptions requestOptions -) { -return chunkedBatch(indexName, objects, createIfNotExists ? Action.PARTIAL_UPDATE_OBJECT : Action.PARTIAL_UPDATE_OBJECT_NO_CREATE, false, 1000, requestOptions); -} - -/** -* Push a new set of objects and remove all previous ones. Settings, synonyms and query rules are -* untouched. Replace all records in an index without any downtime. -* See https://api-clients-automation.netlify.app/docs/add-new-api-client#5-helpers for implementation details. -* -* @param indexName The `indexName` to replace `objects` in. -* @param objects The array of `objects` to store in the given Algolia `indexName`. -* @param batchSize The size of the chunk of `objects`. The number of `batch` calls will be equal -* to `length(objects) / batchSize`. -* @param requestOptions The requestOptions to send along with the query, they will be merged with the transporter requestOptions. (optional) -* @throws AlgoliaRetryException When the retry has failed on all hosts -* @throws AlgoliaApiException When the API sends an http error code -* @throws AlgoliaRuntimeException When an error occurred during the serialization -*/ -public ReplaceAllObjectsResponse replaceAllObjects( -String indexName, -Iterable objects, -int batchSize, -RequestOptions requestOptions -) { -Random rnd = new Random(); -String tmpIndexName = indexName + "_tmp_" + rnd.nextInt(100); - -// Copy settings, synonyms and rules -UpdatedAtResponse copyOperationResponse = operationIndex( - indexName, - new OperationIndexParams() - .setOperation(OperationType.COPY) - .setDestination(tmpIndexName) - .addScope(ScopeType.SETTINGS) - .addScope(ScopeType.RULES) - .addScope(ScopeType.SYNONYMS), - requestOptions -); - -// Save new objects -List batchResponses = chunkedBatch(tmpIndexName, objects, Action.ADD_OBJECT, true, batchSize, requestOptions); - -waitForTask(tmpIndexName, copyOperationResponse.getTaskID(), requestOptions); - -copyOperationResponse = operationIndex( - indexName, - new OperationIndexParams() - .setOperation(OperationType.COPY) - .setDestination(tmpIndexName) - .addScope(ScopeType.SETTINGS) - .addScope(ScopeType.RULES) - .addScope(ScopeType.SYNONYMS), - requestOptions -); -waitForTask(tmpIndexName, copyOperationResponse.getTaskID(), requestOptions); - -// Move temporary index to source index -UpdatedAtResponse moveOperationResponse = operationIndex( - tmpIndexName, - new OperationIndexParams().setOperation(OperationType.MOVE).setDestination(indexName), - requestOptions -); -waitForTask(tmpIndexName, moveOperationResponse.getTaskID(), requestOptions); - -return new ReplaceAllObjectsResponse() - .setCopyOperationResponse(copyOperationResponse) - .setBatchResponses(batchResponses) - .setMoveOperationResponse(moveOperationResponse); -} - -/** -* Helper: Generates a secured API key based on the given `parent_api_key` and given -* `restrictions`. -* -* @param parentApiKey API key to generate from. -* @param restrictions Restrictions to add the key -* @throws Exception if an error occurs during the encoding -* @throws AlgoliaRetryException When the retry has failed on all hosts -* @throws AlgoliaApiException When the API sends an http error code -* @throws AlgoliaRuntimeException When an error occurred during the serialization -*/ + * Helper: Generates a secured API key based on the given `parent_api_key` and given + * `restrictions`. + * + * @param parentApiKey API key to generate from. + * @param restrictions Restrictions to add the key + * @throws Exception if an error occurs during the encoding + * @throws AlgoliaRetryException When the retry has failed on all hosts + * @throws AlgoliaApiException When the API sends an http error code + * @throws AlgoliaRuntimeException When an error occurred during the serialization + */ public String generateSecuredApiKey(@Nonnull String parentApiKey, @Nonnull SecuredApiKeyRestrictions restrictions) throws Exception { -Map restrictionsMap = new HashMap<>(); -if (restrictions.getFilters() != null) restrictionsMap.put("filters", StringUtils.paramToString(restrictions.getFilters())); -if (restrictions.getValidUntil() != 0) restrictionsMap.put("validUntil", StringUtils.paramToString(restrictions.getValidUntil())); -if (restrictions.getRestrictIndices() != null) restrictionsMap.put( - "restrictIndices", - StringUtils.paramToString(restrictions.getRestrictIndices()) -); -if (restrictions.getRestrictSources() != null) restrictionsMap.put( - "restrictSources", - StringUtils.paramToString(restrictions.getRestrictSources()) -); -if (restrictions.getUserToken() != null) restrictionsMap.put("userToken", StringUtils.paramToString(restrictions.getUserToken())); - -if (restrictions.getSearchParams() != null) { - Map searchParamsMap = JsonSerializer - .getObjectMapper() - .convertValue(restrictions.getSearchParams(), new TypeReference>() {}); - searchParamsMap.forEach((key, value) -> restrictionsMap.put(key, StringUtils.paramToString(value))); -} - -String queryStr = restrictionsMap - .entrySet() - .stream() - .sorted(Map.Entry.comparingByKey()) - .map(entry -> String.format("%s=%s", entry.getKey(), entry.getValue())) - .collect(Collectors.joining("&")); - -String key = hmac(parentApiKey, queryStr); - -return new String(Base64.getEncoder().encode(String.format("%s%s", key, queryStr).getBytes(Charset.forName("UTF8")))); + Map restrictionsMap = new HashMap<>(); + if (restrictions.getFilters() != null) restrictionsMap.put("filters", StringUtils.paramToString(restrictions.getFilters())); + if (restrictions.getValidUntil() != 0) restrictionsMap.put("validUntil", StringUtils.paramToString(restrictions.getValidUntil())); + if (restrictions.getRestrictIndices() != null) restrictionsMap.put( + "restrictIndices", + StringUtils.paramToString(restrictions.getRestrictIndices()) + ); + if (restrictions.getRestrictSources() != null) restrictionsMap.put( + "restrictSources", + StringUtils.paramToString(restrictions.getRestrictSources()) + ); + if (restrictions.getUserToken() != null) restrictionsMap.put("userToken", StringUtils.paramToString(restrictions.getUserToken())); + + if (restrictions.getSearchParams() != null) { + Map searchParamsMap = JsonSerializer.getObjectMapper() + .convertValue(restrictions.getSearchParams(), new TypeReference>() {}); + searchParamsMap.forEach((key, value) -> restrictionsMap.put(key, StringUtils.paramToString(value))); + } + + String queryStr = restrictionsMap + .entrySet() + .stream() + .sorted(Map.Entry.comparingByKey()) + .map(entry -> String.format("%s=%s", entry.getKey(), entry.getValue())) + .collect(Collectors.joining("&")); + + String key = hmac(parentApiKey, queryStr); + + return new String(Base64.getEncoder().encode(String.format("%s%s", key, queryStr).getBytes(Charset.forName("UTF8")))); } private String hmac(String key, String msg) throws NoSuchAlgorithmException, InvalidKeyException { -Mac hmac = Mac.getInstance("HmacSHA256"); -hmac.init(new SecretKeySpec(key.getBytes(), "HmacSHA256")); -byte[] rawHmac = hmac.doFinal(msg.getBytes()); -StringBuilder sb = new StringBuilder(rawHmac.length * 2); -for (byte b : rawHmac) { - sb.append(String.format("%02x", b & 0xff)); -} -return sb.toString(); + Mac hmac = Mac.getInstance("HmacSHA256"); + hmac.init(new SecretKeySpec(key.getBytes(), "HmacSHA256")); + byte[] rawHmac = hmac.doFinal(msg.getBytes()); + StringBuilder sb = new StringBuilder(rawHmac.length * 2); + for (byte b : rawHmac) { + sb.append(String.format("%02x", b & 0xff)); + } + return sb.toString(); } /** -* Helper: Retrieves the remaining validity of the previous generated `secured_api_key`, the -* `validUntil` parameter must have been provided. -* -* @param securedApiKey The secured API Key to check -* @throws AlgoliaRuntimeException if securedApiKey is null, empty or whitespaces. -* @throws AlgoliaRuntimeException if securedApiKey doesn't have a validUntil -* parameter. -*/ + * Helper: Retrieves the remaining validity of the previous generated `secured_api_key`, the + * `validUntil` parameter must have been provided. + * + * @param securedApiKey The secured API Key to check + * @throws AlgoliaRuntimeException if securedApiKey is null, empty or whitespaces. + * @throws AlgoliaRuntimeException if securedApiKey doesn't have a validUntil + * parameter. + */ public Duration getSecuredApiKeyRemainingValidity(@Nonnull String securedApiKey) { -if (securedApiKey == null || securedApiKey.trim().isEmpty()) { - throw new AlgoliaRuntimeException("securedAPIKey must not be empty, null or whitespaces"); -} + if (securedApiKey == null || securedApiKey.trim().isEmpty()) { + throw new AlgoliaRuntimeException("securedAPIKey must not be empty, null or whitespaces"); + } -byte[] decodedBytes = Base64.getDecoder().decode(securedApiKey); -String decodedString = new String(decodedBytes); + byte[] decodedBytes = Base64.getDecoder().decode(securedApiKey); + String decodedString = new String(decodedBytes); -Pattern pattern = Pattern.compile("validUntil=\\d+"); -Matcher matcher = pattern.matcher(decodedString); + Pattern pattern = Pattern.compile("validUntil=\\d+"); + Matcher matcher = pattern.matcher(decodedString); -if (!matcher.find()) { - throw new AlgoliaRuntimeException("The Secured API Key doesn't have a validUntil parameter."); -} + if (!matcher.find()) { + throw new AlgoliaRuntimeException("The Secured API Key doesn't have a validUntil parameter."); + } -String validUntilMatch = matcher.group(0); -long timeStamp = Long.parseLong(validUntilMatch.replace("validUntil=", "")); + String validUntilMatch = matcher.group(0); + long timeStamp = Long.parseLong(validUntilMatch.replace("validUntil=", "")); -return Duration.ofSeconds(timeStamp - Instant.now().getEpochSecond()); + return Duration.ofSeconds(timeStamp - Instant.now().getEpochSecond()); } public boolean indexExists(String indexName) { diff --git a/templates/javascript/clients/client/api/helpers.mustache b/templates/javascript/clients/client/api/helpers.mustache index a509b9c3f6..7610e01fef 100644 --- a/templates/javascript/clients/client/api/helpers.mustache +++ b/templates/javascript/clients/client/api/helpers.mustache @@ -317,14 +317,15 @@ async chunkedBatch({ indexName, objects, action = 'addObject', waitForTasks, bat * @param saveObjects - The `saveObjects` object. * @param saveObjects.indexName - The `indexName` to save `objects` in. * @param saveObjects.objects - The array of `objects` to store in the given Algolia `indexName`. + * @param saveObjects.waitForTasks - Whether or not we should wait until every `batch` tasks has been processed, this operation may slow the total execution time of this method but is more reliable. * @param requestOptions - The requestOptions to send along with the query, they will be forwarded to the `batch` method and merged with the transporter requestOptions. */ async saveObjects( - { indexName, objects }: SaveObjectsOptions, + { indexName, objects, waitForTasks }: SaveObjectsOptions, requestOptions?: RequestOptions ): Promise { return await this.chunkedBatch( - { indexName, objects, action: 'addObject' }, + { indexName, objects, action: 'addObject', waitForTasks }, requestOptions ); }, @@ -336,10 +337,11 @@ async saveObjects( * @param deleteObjects - The `deleteObjects` object. * @param deleteObjects.indexName - The `indexName` to delete `objectIDs` from. * @param deleteObjects.objectIDs - The objectIDs to delete. + * @param deleteObjects.waitForTasks - Whether or not we should wait until every `batch` tasks has been processed, this operation may slow the total execution time of this method but is more reliable. * @param requestOptions - The requestOptions to send along with the query, they will be forwarded to the `batch` method and merged with the transporter requestOptions. */ async deleteObjects( - { indexName, objectIDs }: DeleteObjectsOptions, + { indexName, objectIDs, waitForTasks }: DeleteObjectsOptions, requestOptions?: RequestOptions ): Promise { return await this.chunkedBatch( @@ -347,6 +349,7 @@ async deleteObjects( indexName, objects: objectIDs.map((objectID) => ({ objectID })), action: 'deleteObject', + waitForTasks, }, requestOptions ); @@ -360,10 +363,11 @@ async deleteObjects( * @param partialUpdateObjects.indexName - The `indexName` to update `objects` in. * @param partialUpdateObjects.objects - The array of `objects` to update in the given Algolia `indexName`. * @param partialUpdateObjects.createIfNotExists - To be provided if non-existing objects are passed, otherwise, the call will fail.. + * @param partialUpdateObjects.waitForTasks - Whether or not we should wait until every `batch` tasks has been processed, this operation may slow the total execution time of this method but is more reliable. * @param requestOptions - The requestOptions to send along with the query, they will be forwarded to the `getTask` method and merged with the transporter requestOptions. */ async partialUpdateObjects( - { indexName, objects, createIfNotExists }: PartialUpdateObjectsOptions, + { indexName, objects, createIfNotExists, waitForTasks }: PartialUpdateObjectsOptions, requestOptions?: RequestOptions ): Promise { return await this.chunkedBatch( @@ -373,6 +377,7 @@ async partialUpdateObjects( action: createIfNotExists ? 'partialUpdateObject' : 'partialUpdateObjectNoCreate', + waitForTasks }, requestOptions ); diff --git a/templates/javascript/clients/client/model/clientMethodProps.mustache b/templates/javascript/clients/client/model/clientMethodProps.mustache index 4bd1efde65..3e454a837e 100644 --- a/templates/javascript/clients/client/model/clientMethodProps.mustache +++ b/templates/javascript/clients/client/model/clientMethodProps.mustache @@ -132,7 +132,7 @@ export type SearchClientNodeHelpers = { } {{/isSearchClient}} -export type DeleteObjectsOptions = Pick & { +export type DeleteObjectsOptions = Pick & { /** * The objectIDs to delete. */ @@ -141,7 +141,7 @@ export type DeleteObjectsOptions = Pick & { export type PartialUpdateObjectsOptions = Pick< ChunkedBatchOptions, - 'indexName' | 'objects' + 'indexName' | 'objects' | 'waitForTasks' > & { /** *To be provided if non-existing objects are passed, otherwise, the call will fail. @@ -151,7 +151,7 @@ export type PartialUpdateObjectsOptions = Pick< export type SaveObjectsOptions = Pick< ChunkedBatchOptions, - 'indexName' | 'objects' + 'indexName' | 'objects' | 'waitForTasks' >; export type ChunkedBatchOptions = ReplaceAllObjectsOptions & { diff --git a/templates/php/api.mustache b/templates/php/api.mustache index 8f38c2fba0..12274200d4 100644 --- a/templates/php/api.mustache +++ b/templates/php/api.mustache @@ -659,4 +659,4 @@ use Algolia\AlgoliaSearch\Exceptions\NotFoundException; ); } } -{{/operations}} +{{/operations}} \ No newline at end of file diff --git a/templates/python/search_helpers.mustache b/templates/python/search_helpers.mustache index eb082abc8a..80a7343761 100644 --- a/templates/python/search_helpers.mustache +++ b/templates/python/search_helpers.mustache @@ -256,35 +256,38 @@ self, index_name: str, objects: List[Dict[str, Any]], + wait_for_tasks: bool = False, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> List[BatchResponse]: """ Helper: Saves the given array of objects in the given index. The `chunked_batch` helper is used under the hood, which creates a `batch` requests with at most 1000 objects in it. """ - return {{^isSyncClient}}await {{/isSyncClient}}self.chunked_batch(index_name=index_name, objects=objects, action=Action.ADDOBJECT, request_options=request_options) + return {{^isSyncClient}}await {{/isSyncClient}}self.chunked_batch(index_name=index_name, objects=objects, action=Action.ADDOBJECT, wait_for_tasks=wait_for_tasks, request_options=request_options) {{^isSyncClient}}async {{/isSyncClient}}def delete_objects( self, index_name: str, object_ids: List[str], + wait_for_tasks: bool = False, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> List[BatchResponse]: """ Helper: Deletes every records for the given objectIDs. The `chunked_batch` helper is used under the hood, which creates a `batch` requests with at most 1000 objectIDs in it. """ - return {{^isSyncClient}}await {{/isSyncClient}}self.chunked_batch(index_name=index_name, objects=[{"objectID": id} for id in object_ids], action=Action.DELETEOBJECT, request_options=request_options) + return {{^isSyncClient}}await {{/isSyncClient}}self.chunked_batch(index_name=index_name, objects=[{"objectID": id} for id in object_ids], action=Action.DELETEOBJECT, wait_for_tasks=wait_for_tasks, request_options=request_options) {{^isSyncClient}}async {{/isSyncClient}}def partial_update_objects( self, index_name: str, objects: List[Dict[str, Any]], create_if_not_exists: bool = False, + wait_for_tasks: bool = False, request_options: Optional[Union[dict, RequestOptions]] = None, ) -> List[BatchResponse]: """ Helper: Replaces object content of all the given objects according to their respective `objectID` field. The `chunked_batch` helper is used under the hood, which creates a `batch` requests with at most 1000 objects in it. """ - return {{^isSyncClient}}await {{/isSyncClient}}self.chunked_batch(index_name=index_name, objects=objects, action=Action.PARTIALUPDATEOBJECT if create_if_not_exists else Action.PARTIALUPDATEOBJECTNOCREATE, request_options=request_options) + return {{^isSyncClient}}await {{/isSyncClient}}self.chunked_batch(index_name=index_name, objects=objects, action=Action.PARTIALUPDATEOBJECT if create_if_not_exists else Action.PARTIALUPDATEOBJECTNOCREATE, wait_for_tasks=wait_for_tasks, request_options=request_options) {{^isSyncClient}}async {{/isSyncClient}}def chunked_batch( self, diff --git a/templates/ruby/search_helpers.mustache b/templates/ruby/search_helpers.mustache index d333beb79f..2c54d095a3 100644 --- a/templates/ruby/search_helpers.mustache +++ b/templates/ruby/search_helpers.mustache @@ -209,16 +209,17 @@ end # # @param index_name [String]: The `index_name` to save `objects` in. # @param objects [Array]: The array of `objects` to store in the given Algolia `indexName`. +# @param wait_for_tasks [Boolean]: Whether or not we should wait until every `batch` tasks has been processed, this operation may slow the total execution time of this method but is more reliable. # @param request_options: The request options to send along with the query, they will be merged with the transporter base parameters (headers, query params, timeouts, etc.). (optional) # # @return [BatchResponse] # -def save_objects(index_name, objects, request_options = {}) +def save_objects(index_name, objects, wait_for_tasks = false, request_options = {}) chunked_batch( index_name, objects, Search::Action::ADD_OBJECT, - false, + wait_for_tasks, 1000, request_options ) @@ -228,16 +229,17 @@ end # # @param index_name [String]: The `index_name` to delete `object_ids` from. # @param object_ids [Array]: The object_ids to delete. +# @param wait_for_tasks [Boolean]: Whether or not we should wait until every `batch` tasks has been processed, this operation may slow the total execution time of this method but is more reliable. # @param request_options: The request options to send along with the query, they will be merged with the transporter base parameters (headers, query params, timeouts, etc.). (optional) # # @return [BatchResponse] # -def delete_objects(index_name, object_ids, request_options = {}) +def delete_objects(index_name, object_ids, wait_for_tasks = false, request_options = {}) chunked_batch( index_name, object_ids.map { |id| { "objectID" => id } }, Search::Action::DELETE_OBJECT, - false, + wait_for_tasks, 1000, request_options ) @@ -248,16 +250,17 @@ end # @param index_name [String]: The `index_name` to delete `object_ids` from. # @param objects [Array]: The objects to partially update. # @param create_if_not_exists [Boolean]: To be provided if non-existing objects are passed, otherwise, the call will fail. +# @param wait_for_tasks [Boolean] Whether or not we should wait until every `batch` tasks has been processed, this operation may slow the total execution time of this method but is more reliable. # @param request_options: The request options to send along with the query, they will be merged with the transporter base parameters (headers, query params, timeouts, etc.). (optional) # # @return [BatchResponse] # -def partial_update_objects(index_name, objects, create_if_not_exists, request_options = {}) +def partial_update_objects(index_name, objects, create_if_not_exists, wait_for_tasks = false, request_options = {}) chunked_batch( index_name, objects, create_if_not_exists ? Search::Action::PARTIAL_UPDATE_OBJECT : Search::Action::PARTIAL_UPDATE_OBJECT_NO_CREATE, - false, + wait_for_tasks, 1000, request_options )