use of org.elasticsearch.action.bulk.byscroll.BulkByScrollResponse in project elasticsearch by elastic.
the class TransportRethrottleActionTests method testRethrottleWithAllSucceeded.
public void testRethrottleWithAllSucceeded() {
List<BulkByScrollTask.StatusOrException> sliceStatuses = new ArrayList<>(slices);
for (int i = 0; i < slices; i++) {
@SuppressWarnings("unchecked") ActionListener<BulkByScrollResponse> listener = i < slices - 1 ? neverCalled() : mock(ActionListener.class);
BulkByScrollTask.Status status = believeableCompletedStatus(i);
task.onSliceResponse(listener, i, new BulkByScrollResponse(timeValueMillis(10), status, emptyList(), emptyList(), false));
if (i == slices - 1) {
// The whole thing succeeded so we should have got the success
captureResponse(BulkByScrollResponse.class, listener).getStatus();
}
sliceStatuses.add(new BulkByScrollTask.StatusOrException(status));
}
rethrottleTestCase(0, listener -> {
}, /* There are no async tasks to simulate because the listener is called for us. */
expectSuccessfulRethrottleWithStatuses(sliceStatuses));
}
use of org.elasticsearch.action.bulk.byscroll.BulkByScrollResponse in project elasticsearch by elastic.
the class DeleteByQueryConcurrentTests method testConcurrentDeleteByQueriesOnSameDocs.
public void testConcurrentDeleteByQueriesOnSameDocs() throws Throwable {
final long docs = randomIntBetween(50, 100);
List<IndexRequestBuilder> builders = new ArrayList<>();
for (int i = 0; i < docs; i++) {
builders.add(client().prepareIndex("test", "doc", String.valueOf(i)).setSource("foo", "bar"));
}
indexRandom(true, true, true, builders);
final Thread[] threads = new Thread[scaledRandomIntBetween(2, 9)];
final CountDownLatch start = new CountDownLatch(1);
final MatchQueryBuilder query = matchQuery("foo", "bar");
final AtomicLong deleted = new AtomicLong(0);
for (int t = 0; t < threads.length; t++) {
Runnable r = () -> {
try {
start.await();
BulkByScrollResponse response = deleteByQuery().source("test").filter(query).refresh(true).get();
// Some deletions might fail due to version conflict, but
// what matters here is the total of successful deletions
deleted.addAndGet(response.getDeleted());
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
};
threads[t] = new Thread(r);
threads[t].start();
}
start.countDown();
for (Thread thread : threads) {
thread.join();
}
assertHitCount(client().prepareSearch("test").setSize(0).get(), 0L);
assertThat(deleted.get(), equalTo(docs));
}
use of org.elasticsearch.action.bulk.byscroll.BulkByScrollResponse in project elasticsearch by elastic.
the class ReindexBasicTests method testCopyManyWithSlices.
public void testCopyManyWithSlices() throws Exception {
int workers = between(2, 10);
List<IndexRequestBuilder> docs = new ArrayList<>();
int max = between(150, 500);
for (int i = 0; i < max; i++) {
docs.add(client().prepareIndex("source", "test", Integer.toString(i)).setSource("foo", "a"));
}
indexRandom(true, docs);
assertHitCount(client().prepareSearch("source").setSize(0).get(), max);
// Copy all the docs
ReindexRequestBuilder copy = reindex().source("source").destination("dest", "all").refresh(true).setSlices(workers);
// Use a small batch size so we have to use more than one batch
copy.source().setSize(5);
assertThat(copy.get(), matcher().created(max).batches(greaterThanOrEqualTo(max / 5)).slices(hasSize(workers)));
assertHitCount(client().prepareSearch("dest").setTypes("all").setSize(0).get(), max);
// Copy some of the docs
int half = max / 2;
copy = reindex().source("source").destination("dest", "half").refresh(true).setSlices(workers);
// Use a small batch size so we have to use more than one batch
copy.source().setSize(5);
// The real "size" of the request.
copy.size(half);
BulkByScrollResponse response = copy.get();
assertThat(response, matcher().created(lessThanOrEqualTo((long) half)).slices(hasSize(workers)));
assertHitCount(client().prepareSearch("dest").setTypes("half").setSize(0).get(), response.getCreated());
}
use of org.elasticsearch.action.bulk.byscroll.BulkByScrollResponse in project elasticsearch by elastic.
the class ReindexFailureTests method testFailuresCauseAbortDefault.
public void testFailuresCauseAbortDefault() throws Exception {
/*
* Create the destination index such that the copy will cause a mapping
* conflict on every request.
*/
indexRandom(true, client().prepareIndex("dest", "test", "test").setSource("test", 10));
indexDocs(100);
ReindexRequestBuilder copy = reindex().source("source").destination("dest");
/*
* Set the search size to something very small to cause there to be
* multiple batches for this request so we can assert that we abort on
* the first batch.
*/
copy.source().setSize(1);
BulkByScrollResponse response = copy.get();
assertThat(response, matcher().batches(1).failures(both(greaterThan(0)).and(lessThanOrEqualTo(maximumNumberOfShards()))));
for (Failure failure : response.getBulkFailures()) {
assertThat(failure.getMessage(), containsString("NumberFormatException[For input string: \"words words\"]"));
}
}
use of org.elasticsearch.action.bulk.byscroll.BulkByScrollResponse in project elasticsearch by elastic.
the class ReindexFailureTests method testAbortOnVersionConflict.
public void testAbortOnVersionConflict() throws Exception {
// Just put something in the way of the copy.
indexRandom(true, client().prepareIndex("dest", "test", "1").setSource("test", "test"));
indexDocs(100);
ReindexRequestBuilder copy = reindex().source("source").destination("dest").abortOnVersionConflict(true);
// CREATE will cause the conflict to prevent the write.
copy.destination().setOpType(CREATE);
BulkByScrollResponse response = copy.get();
assertThat(response, matcher().batches(1).versionConflicts(1).failures(1).created(99));
for (Failure failure : response.getBulkFailures()) {
assertThat(failure.getMessage(), containsString("VersionConflictEngineException[[test]["));
}
}
Aggregations