use of org.elasticsearch.action.ShardOperationFailedException in project elasticsearch by elastic.
the class IndexStatsIT method testConcurrentIndexingAndStatsRequests.
/**
* Test that we can safely concurrently index and get stats. This test was inspired by a serialization issue that arose due to a race
* getting doc stats during heavy indexing. The race could lead to deleted docs being negative which would then be serialized as a
* variable-length long. Since serialization of negative longs using a variable-length format was unsupported
* ({@link org.elasticsearch.common.io.stream.StreamOutput#writeVLong(long)}), the stream would become corrupted. Here, we want to test
* that we can continue to get stats while indexing.
*/
public void testConcurrentIndexingAndStatsRequests() throws BrokenBarrierException, InterruptedException, ExecutionException {
final AtomicInteger idGenerator = new AtomicInteger();
final int numberOfIndexingThreads = Runtime.getRuntime().availableProcessors();
final int numberOfStatsThreads = 4 * numberOfIndexingThreads;
final CyclicBarrier barrier = new CyclicBarrier(1 + numberOfIndexingThreads + numberOfStatsThreads);
final AtomicBoolean stop = new AtomicBoolean();
final List<Thread> threads = new ArrayList<>(numberOfIndexingThreads + numberOfIndexingThreads);
final CountDownLatch latch = new CountDownLatch(1);
final AtomicBoolean failed = new AtomicBoolean();
final AtomicReference<List<ShardOperationFailedException>> shardFailures = new AtomicReference<>(new CopyOnWriteArrayList<>());
final AtomicReference<List<Exception>> executionFailures = new AtomicReference<>(new CopyOnWriteArrayList<>());
// increasing the number of shards increases the number of chances any one stats request will hit a race
final CreateIndexRequest createIndexRequest = new CreateIndexRequest("test", Settings.builder().put("index.number_of_shards", 10).build());
client().admin().indices().create(createIndexRequest).get();
// start threads that will index concurrently with stats requests
for (int i = 0; i < numberOfIndexingThreads; i++) {
final Thread thread = new Thread(() -> {
try {
barrier.await();
} catch (final BrokenBarrierException | InterruptedException e) {
failed.set(true);
executionFailures.get().add(e);
latch.countDown();
}
while (!stop.get()) {
final String id = Integer.toString(idGenerator.incrementAndGet());
final IndexResponse response = client().prepareIndex("test", "type", id).setSource("{}", XContentType.JSON).get();
assertThat(response.getResult(), equalTo(DocWriteResponse.Result.CREATED));
}
});
thread.setName("indexing-" + i);
threads.add(thread);
thread.start();
}
// start threads that will get stats concurrently with indexing
for (int i = 0; i < numberOfStatsThreads; i++) {
final Thread thread = new Thread(() -> {
try {
barrier.await();
} catch (final BrokenBarrierException | InterruptedException e) {
failed.set(true);
executionFailures.get().add(e);
latch.countDown();
}
final IndicesStatsRequest request = new IndicesStatsRequest();
request.all();
request.indices(new String[0]);
while (!stop.get()) {
try {
final IndicesStatsResponse response = client().admin().indices().stats(request).get();
if (response.getFailedShards() > 0) {
failed.set(true);
shardFailures.get().addAll(Arrays.asList(response.getShardFailures()));
latch.countDown();
}
} catch (final ExecutionException | InterruptedException e) {
failed.set(true);
executionFailures.get().add(e);
latch.countDown();
}
}
});
thread.setName("stats-" + i);
threads.add(thread);
thread.start();
}
// release the hounds
barrier.await();
// wait for a failure, or for fifteen seconds to elapse
latch.await(15, TimeUnit.SECONDS);
// stop all threads and wait for them to complete
stop.set(true);
for (final Thread thread : threads) {
thread.join();
}
assertThat(shardFailures.get(), emptyCollectionOf(ShardOperationFailedException.class));
assertThat(executionFailures.get(), emptyCollectionOf(Exception.class));
}
Aggregations