use of org.opensearch.action.bulk.BulkRequest in project OpenSearch by opensearch-project.
the class IngestServiceTests method testExecuteIndexPipelineExistsButFailedParsing.
public void testExecuteIndexPipelineExistsButFailedParsing() {
IngestService ingestService = createWithProcessors(Collections.singletonMap("mock", (factories, tag, description, config) -> new AbstractProcessor("mock", "description") {
@Override
public IngestDocument execute(IngestDocument ingestDocument) {
throw new IllegalStateException("error");
}
@Override
public String getType() {
return null;
}
}));
// Start empty
ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build();
String id = "_id";
PutPipelineRequest putRequest = new PutPipelineRequest(id, new BytesArray("{\"processors\": [{\"mock\" : {}}]}"), XContentType.JSON);
ClusterState previousClusterState = clusterState;
clusterState = IngestService.innerPut(putRequest, clusterState);
ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState));
final SetOnce<Boolean> failure = new SetOnce<>();
BulkRequest bulkRequest = new BulkRequest();
final IndexRequest indexRequest1 = new IndexRequest("_index").id("_id1").source(emptyMap()).setPipeline("_none").setFinalPipeline("_none");
bulkRequest.add(indexRequest1);
IndexRequest indexRequest2 = new IndexRequest("_index").id("_id2").source(emptyMap()).setPipeline(id).setFinalPipeline("_none");
bulkRequest.add(indexRequest2);
final BiConsumer<Integer, Exception> failureHandler = (slot, e) -> {
assertThat(e.getCause(), instanceOf(IllegalStateException.class));
assertThat(e.getCause().getMessage(), equalTo("error"));
failure.set(true);
assertThat(slot, equalTo(1));
};
@SuppressWarnings("unchecked") final BiConsumer<Thread, Exception> completionHandler = mock(BiConsumer.class);
ingestService.executeBulkRequest(bulkRequest.numberOfActions(), bulkRequest.requests(), failureHandler, completionHandler, indexReq -> {
}, Names.WRITE);
assertTrue(failure.get());
verify(completionHandler, times(1)).accept(Thread.currentThread(), null);
}
use of org.opensearch.action.bulk.BulkRequest in project OpenSearch by opensearch-project.
the class IngestServiceTests method testBulkRequestExecutionWithFailures.
public void testBulkRequestExecutionWithFailures() throws Exception {
BulkRequest bulkRequest = new BulkRequest();
String pipelineId = "_id";
int numRequest = scaledRandomIntBetween(8, 64);
int numIndexRequests = 0;
for (int i = 0; i < numRequest; i++) {
DocWriteRequest request;
if (randomBoolean()) {
if (randomBoolean()) {
request = new DeleteRequest("_index", "_id");
} else {
request = new UpdateRequest("_index", "_id");
}
} else {
IndexRequest indexRequest = new IndexRequest("_index").id("_id").setPipeline(pipelineId).setFinalPipeline("_none");
indexRequest.source(Requests.INDEX_CONTENT_TYPE, "field1", "value1");
request = indexRequest;
numIndexRequests++;
}
bulkRequest.add(request);
}
CompoundProcessor processor = mock(CompoundProcessor.class);
when(processor.getProcessors()).thenReturn(Collections.singletonList(mock(Processor.class)));
Exception error = new RuntimeException();
doAnswer(args -> {
@SuppressWarnings("unchecked") BiConsumer<IngestDocument, Exception> handler = (BiConsumer) args.getArguments()[1];
handler.accept(null, error);
return null;
}).when(processor).execute(any(), any());
IngestService ingestService = createWithProcessors(Collections.singletonMap("mock", (factories, tag, description, config) -> processor));
PutPipelineRequest putRequest = new PutPipelineRequest("_id", new BytesArray("{\"processors\": [{\"mock\" : {}}]}"), XContentType.JSON);
// Start empty
ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build();
ClusterState previousClusterState = clusterState;
clusterState = IngestService.innerPut(putRequest, clusterState);
ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState));
@SuppressWarnings("unchecked") BiConsumer<Integer, Exception> requestItemErrorHandler = mock(BiConsumer.class);
@SuppressWarnings("unchecked") final BiConsumer<Thread, Exception> completionHandler = mock(BiConsumer.class);
ingestService.executeBulkRequest(numRequest, bulkRequest.requests(), requestItemErrorHandler, completionHandler, indexReq -> {
}, Names.WRITE);
verify(requestItemErrorHandler, times(numIndexRequests)).accept(anyInt(), argThat(o -> o.getCause().equals(error)));
verify(completionHandler, times(1)).accept(Thread.currentThread(), null);
}
use of org.opensearch.action.bulk.BulkRequest in project OpenSearch by opensearch-project.
the class SnapshotResiliencyTests method testSuccessfulSnapshotAndRestore.
public void testSuccessfulSnapshotAndRestore() {
setupTestCluster(randomFrom(1, 3, 5), randomIntBetween(2, 10));
String repoName = "repo";
String snapshotName = "snapshot";
final String index = "test";
final int shards = randomIntBetween(1, 10);
final int documents = randomIntBetween(0, 100);
final TestClusterNodes.TestClusterNode masterNode = testClusterNodes.currentMaster(testClusterNodes.nodes.values().iterator().next().clusterService.state());
final StepListener<CreateSnapshotResponse> createSnapshotResponseListener = new StepListener<>();
continueOrDie(createRepoAndIndex(repoName, index, shards), createIndexResponse -> {
final Runnable afterIndexing = () -> client().admin().cluster().prepareCreateSnapshot(repoName, snapshotName).setWaitForCompletion(true).execute(createSnapshotResponseListener);
if (documents == 0) {
afterIndexing.run();
} else {
final BulkRequest bulkRequest = new BulkRequest().setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE);
for (int i = 0; i < documents; ++i) {
bulkRequest.add(new IndexRequest(index).source(Collections.singletonMap("foo", "bar" + i)));
}
final StepListener<BulkResponse> bulkResponseStepListener = new StepListener<>();
client().bulk(bulkRequest, bulkResponseStepListener);
continueOrDie(bulkResponseStepListener, bulkResponse -> {
assertFalse("Failures in bulk response: " + bulkResponse.buildFailureMessage(), bulkResponse.hasFailures());
assertEquals(documents, bulkResponse.getItems().length);
afterIndexing.run();
});
}
});
final StepListener<AcknowledgedResponse> deleteIndexListener = new StepListener<>();
continueOrDie(createSnapshotResponseListener, createSnapshotResponse -> client().admin().indices().delete(new DeleteIndexRequest(index), deleteIndexListener));
final StepListener<RestoreSnapshotResponse> restoreSnapshotResponseListener = new StepListener<>();
continueOrDie(deleteIndexListener, ignored -> client().admin().cluster().restoreSnapshot(new RestoreSnapshotRequest(repoName, snapshotName).waitForCompletion(true), restoreSnapshotResponseListener));
final StepListener<SearchResponse> searchResponseListener = new StepListener<>();
continueOrDie(restoreSnapshotResponseListener, restoreSnapshotResponse -> {
assertEquals(shards, restoreSnapshotResponse.getRestoreInfo().totalShards());
client().search(new SearchRequest(index).source(new SearchSourceBuilder().size(0).trackTotalHits(true)), searchResponseListener);
});
final AtomicBoolean documentCountVerified = new AtomicBoolean();
continueOrDie(searchResponseListener, r -> {
assertEquals(documents, Objects.requireNonNull(r.getHits().getTotalHits()).value);
documentCountVerified.set(true);
});
runUntil(documentCountVerified::get, TimeUnit.MINUTES.toMillis(5L));
assertNotNull(createSnapshotResponseListener.result());
assertNotNull(restoreSnapshotResponseListener.result());
assertTrue(documentCountVerified.get());
SnapshotsInProgress finalSnapshotsInProgress = masterNode.clusterService.state().custom(SnapshotsInProgress.TYPE);
assertFalse(finalSnapshotsInProgress.entries().stream().anyMatch(entry -> entry.state().completed() == false));
final Repository repository = masterNode.repositoriesService.repository(repoName);
Collection<SnapshotId> snapshotIds = getRepositoryData(repository).getSnapshotIds();
assertThat(snapshotIds, hasSize(1));
final SnapshotInfo snapshotInfo = repository.getSnapshotInfo(snapshotIds.iterator().next());
assertEquals(SnapshotState.SUCCESS, snapshotInfo.state());
assertThat(snapshotInfo.indices(), containsInAnyOrder(index));
assertEquals(shards, snapshotInfo.successfulShards());
assertEquals(0, snapshotInfo.failedShards());
}
use of org.opensearch.action.bulk.BulkRequest in project bw-calendar-engine by Bedework.
the class BwIndexEsImpl method makeEventInstances.
private boolean makeEventInstances(final EventInfo ei, final DateLimits dl, final boolean noIndex) throws CalFacadeException {
final BwEvent ev = ei.getEvent();
/* Create a list of all instance date/times before overrides. */
final int maxYears;
final int maxInstances;
if (ev.getPublick()) {
maxYears = unauthpars.getMaxYears();
maxInstances = unauthpars.getMaxInstances();
} else {
maxYears = authpars.getMaxYears();
maxInstances = authpars.getMaxInstances();
}
final RecurPeriods rp = RecurUtil.getPeriods(ev, maxYears, maxInstances);
if (rp.instances.isEmpty()) {
// No instances for an alleged recurring event.
return false;
// throw new CalFacadeException(CalFacadeException.noRecurrenceInstances);
}
int instanceCt = maxInstances;
final String stzid = ev.getDtstart().getTzid();
final boolean dateOnly = ev.getDtstart().getDateType();
/* First build a table of overrides so we can skip these later
*/
final Map<String, String> overrides = new HashMap<>();
BulkRequest bulkReq = null;
if (!noIndex) {
bulkReq = new BulkRequest();
}
if (debug()) {
debug("Start makeInstances");
}
/*
if (!Util.isEmpty(ei.getOverrideProxies())) {
for (BwEvent ov: ei.getOverrideProxies()) {
overrides.put(ov.getRecurrenceId(), ov.getRecurrenceId());
}
}
*/
if (!Util.isEmpty(ei.getOverrides())) {
for (final EventInfo oei : ei.getOverrides()) {
final BwEvent ov = oei.getEvent();
overrides.put(ov.getRecurrenceId(), ov.getRecurrenceId());
final String start;
if (ov.getDtstart().getDateType()) {
start = ov.getRecurrenceId().substring(0, 8);
} else {
start = ov.getRecurrenceId();
}
final BwDateTime rstart = BwDateTime.makeBwDateTime(ov.getDtstart().getDateType(), start, stzid);
final BwDateTime rend = rstart.addDuration(BwDuration.makeDuration(ov.getDuration()));
dl.checkMin(rstart);
dl.checkMax(rend);
if (bulkReq != null) {
/*iresp = */
bulkReq = addToBulk(bulkReq, oei, ItemKind.override, rstart, rend, ov.getRecurrenceId());
}
instanceCt--;
}
}
for (final Period p : rp.instances) {
String dtval = p.getStart().toString();
if (dateOnly) {
dtval = dtval.substring(0, 8);
}
final BwDateTime rstart = BwDateTime.makeBwDateTime(dateOnly, dtval, stzid);
if (overrides.get(rstart.getDate()) != null) {
// Overrides indexed separately - skip this instance.
continue;
}
final String recurrenceId = rstart.getDate();
dtval = p.getEnd().toString();
if (dateOnly) {
dtval = dtval.substring(0, 8);
}
final BwDateTime rend = BwDateTime.makeBwDateTime(dateOnly, dtval, stzid);
dl.checkMin(rstart);
dl.checkMax(rend);
if (bulkReq != null) {
/*iresp = */
bulkReq = addToBulk(bulkReq, ei, entity, rstart, rend, recurrenceId);
}
instanceCt--;
if (instanceCt == 0) {
// That's all you're getting from me
break;
}
}
if ((bulkReq != null) && (bulkReq.estimatedSizeInBytes() > 0)) {
flushBulkReq(bulkReq);
}
return true;
}
use of org.opensearch.action.bulk.BulkRequest in project fess-crawler by codelibs.
the class FesenClient method deleteByQuery.
public int deleteByQuery(final String index, final String type, final QueryBuilder queryBuilder) {
SearchResponse response = get(c -> c.prepareSearch(index).setScroll(scrollForDelete).setSize(sizeForDelete).setQuery(queryBuilder).execute());
String scrollId = response.getScrollId();
int count = 0;
try {
while (scrollId != null) {
final SearchHit[] hits = response.getHits().getHits();
if (hits.length == 0) {
break;
}
count += hits.length;
final BulkResponse bulkResponse = get(c -> {
final BulkRequestBuilder bulkRequest = client.prepareBulk();
for (final SearchHit hit : hits) {
bulkRequest.add(client.prepareDelete().setIndex(hit.getIndex()).setId(hit.getId()));
}
return bulkRequest.execute();
});
if (bulkResponse.hasFailures()) {
throw new EsAccessException(bulkResponse.buildFailureMessage());
}
final String sid = scrollId;
response = get(c -> c.prepareSearchScroll(sid).setScroll(scrollForDelete).execute());
if (!scrollId.equals(response.getScrollId())) {
clearScroll(scrollId);
}
scrollId = response.getScrollId();
}
} finally {
clearScroll(scrollId);
}
return count;
}
Aggregations