use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.bulk.BulkItemResponse in project elasticsearch by elastic.
the class TransportNoopBulkAction method doExecute.
@Override
protected void doExecute(BulkRequest request, ActionListener<BulkResponse> listener) {
final int itemCount = request.requests().size();
// simulate at least a realistic amount of data that gets serialized
BulkItemResponse[] bulkItemResponses = new BulkItemResponse[itemCount];
for (int idx = 0; idx < itemCount; idx++) {
bulkItemResponses[idx] = ITEM_RESPONSE;
}
listener.onResponse(new BulkResponse(bulkItemResponses, 0));
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.bulk.BulkItemResponse in project elasticsearch by elastic.
the class AbstractAsyncBulkByScrollAction method onBulkResponse.
/**
* Processes bulk responses, accounting for failures.
*/
void onBulkResponse(TimeValue thisBatchStartTime, BulkResponse response) {
try {
List<Failure> failures = new ArrayList<Failure>();
Set<String> destinationIndicesThisBatch = new HashSet<>();
for (BulkItemResponse item : response) {
if (item.isFailed()) {
recordFailure(item.getFailure(), failures);
continue;
}
switch(item.getOpType()) {
case CREATE:
case INDEX:
if (item.getResponse().getResult() == DocWriteResponse.Result.CREATED) {
task.countCreated();
} else {
task.countUpdated();
}
break;
case UPDATE:
task.countUpdated();
break;
case DELETE:
task.countDeleted();
break;
}
// Track the indexes we've seen so we can refresh them if requested
destinationIndicesThisBatch.add(item.getIndex());
}
if (task.isCancelled()) {
finishHim(null);
return;
}
addDestinationIndices(destinationIndicesThisBatch);
if (false == failures.isEmpty()) {
refreshAndFinish(unmodifiableList(failures), emptyList(), false);
return;
}
if (mainRequest.getSize() != SIZE_ALL_MATCHES && task.getSuccessfullyProcessed() >= mainRequest.getSize()) {
// We've processed all the requested docs.
refreshAndFinish(emptyList(), emptyList(), false);
return;
}
startNextScroll(thisBatchStartTime, response.getItems().length);
} catch (Exception t) {
finishHim(t);
}
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.bulk.BulkItemResponse in project elasticsearch by elastic.
the class AsyncBulkByScrollActionTests method testBulkResponseSetsLotsOfStatus.
public void testBulkResponseSetsLotsOfStatus() {
testRequest.setAbortOnVersionConflict(false);
int maxBatches = randomIntBetween(0, 100);
long versionConflicts = 0;
long created = 0;
long updated = 0;
long deleted = 0;
for (int batches = 0; batches < maxBatches; batches++) {
BulkItemResponse[] responses = new BulkItemResponse[randomIntBetween(0, 100)];
for (int i = 0; i < responses.length; i++) {
ShardId shardId = new ShardId(new Index("name", "uid"), 0);
if (rarely()) {
versionConflicts++;
responses[i] = new BulkItemResponse(i, randomFrom(DocWriteRequest.OpType.values()), new Failure(shardId.getIndexName(), "type", "id" + i, new VersionConflictEngineException(shardId, "type", "id", "test")));
continue;
}
boolean createdResponse;
DocWriteRequest.OpType opType;
switch(randomIntBetween(0, 2)) {
case 0:
createdResponse = true;
opType = DocWriteRequest.OpType.CREATE;
created++;
break;
case 1:
createdResponse = false;
opType = randomFrom(DocWriteRequest.OpType.INDEX, DocWriteRequest.OpType.UPDATE);
updated++;
break;
case 2:
createdResponse = false;
opType = DocWriteRequest.OpType.DELETE;
deleted++;
break;
default:
throw new RuntimeException("Bad scenario");
}
responses[i] = new BulkItemResponse(i, opType, new IndexResponse(shardId, "type", "id" + i, randomInt(20), randomInt(), createdResponse));
}
new DummyAsyncBulkByScrollAction().onBulkResponse(timeValueNanos(System.nanoTime()), new BulkResponse(responses, 0));
assertEquals(versionConflicts, testTask.getStatus().getVersionConflicts());
assertEquals(updated, testTask.getStatus().getUpdated());
assertEquals(created, testTask.getStatus().getCreated());
assertEquals(deleted, testTask.getStatus().getDeleted());
assertEquals(versionConflicts, testTask.getStatus().getVersionConflicts());
}
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.bulk.BulkItemResponse in project elasticsearch by elastic.
the class AsyncBulkByScrollActionTests method testBulkFailuresAbortRequest.
/**
* Mimicks bulk indexing failures.
*/
public void testBulkFailuresAbortRequest() throws Exception {
Failure failure = new Failure("index", "type", "id", new RuntimeException("test"));
DummyAsyncBulkByScrollAction action = new DummyAsyncBulkByScrollAction();
BulkResponse bulkResponse = new BulkResponse(new BulkItemResponse[] { new BulkItemResponse(0, DocWriteRequest.OpType.CREATE, failure) }, randomLong());
action.onBulkResponse(timeValueNanos(System.nanoTime()), bulkResponse);
BulkByScrollResponse response = listener.get();
assertThat(response.getBulkFailures(), contains(failure));
assertThat(response.getSearchFailures(), empty());
assertNull(response.getReasonCancelled());
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.bulk.BulkItemResponse in project elasticsearch by elastic.
the class ExceptionRetryIT method testRetryDueToExceptionOnNetworkLayer.
/**
* Tests retry mechanism when indexing. If an exception occurs when indexing then the indexing request is tried again before finally
* failing. If auto generated ids are used this must not lead to duplicate ids
* see https://github.com/elastic/elasticsearch/issues/8788
*/
public void testRetryDueToExceptionOnNetworkLayer() throws ExecutionException, InterruptedException, IOException {
final AtomicBoolean exceptionThrown = new AtomicBoolean(false);
int numDocs = scaledRandomIntBetween(100, 1000);
Client client = internalCluster().coordOnlyNodeClient();
NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats().get();
NodeStats unluckyNode = randomFrom(nodeStats.getNodes().stream().filter((s) -> s.getNode().isDataNode()).collect(Collectors.toList()));
assertAcked(client().admin().indices().prepareCreate("index").setSettings(Settings.builder().put("index.number_of_replicas", 1).put("index.number_of_shards", 5)));
ensureGreen("index");
logger.info("unlucky node: {}", unluckyNode.getNode());
//create a transport service that throws a ConnectTransportException for one bulk request and therefore triggers a retry.
for (NodeStats dataNode : nodeStats.getNodes()) {
MockTransportService mockTransportService = ((MockTransportService) internalCluster().getInstance(TransportService.class, dataNode.getNode().getName()));
mockTransportService.addDelegate(internalCluster().getInstance(TransportService.class, unluckyNode.getNode().getName()), new MockTransportService.DelegateTransport(mockTransportService.original()) {
@Override
protected void sendRequest(Connection connection, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException {
super.sendRequest(connection, requestId, action, request, options);
if (action.equals(TransportShardBulkAction.ACTION_NAME) && exceptionThrown.compareAndSet(false, true)) {
logger.debug("Throw ConnectTransportException");
throw new ConnectTransportException(connection.getNode(), action);
}
}
});
}
BulkRequestBuilder bulkBuilder = client.prepareBulk();
for (int i = 0; i < numDocs; i++) {
XContentBuilder doc = null;
doc = jsonBuilder().startObject().field("foo", "bar").endObject();
bulkBuilder.add(client.prepareIndex("index", "type").setSource(doc));
}
BulkResponse response = bulkBuilder.get();
if (response.hasFailures()) {
for (BulkItemResponse singleIndexRespons : response.getItems()) {
if (singleIndexRespons.isFailed()) {
fail("None of the bulk items should fail but got " + singleIndexRespons.getFailureMessage());
}
}
}
refresh();
SearchResponse searchResponse = client().prepareSearch("index").setSize(numDocs * 2).addStoredField("_id").get();
Set<String> uniqueIds = new HashSet();
long dupCounter = 0;
boolean found_duplicate_already = false;
for (int i = 0; i < searchResponse.getHits().getHits().length; i++) {
if (!uniqueIds.add(searchResponse.getHits().getHits()[i].getId())) {
if (!found_duplicate_already) {
SearchResponse dupIdResponse = client().prepareSearch("index").setQuery(termQuery("_id", searchResponse.getHits().getHits()[i].getId())).setExplain(true).get();
assertThat(dupIdResponse.getHits().getTotalHits(), greaterThan(1L));
logger.info("found a duplicate id:");
for (SearchHit hit : dupIdResponse.getHits()) {
logger.info("Doc {} was found on shard {}", hit.getId(), hit.getShard().getShardId());
}
logger.info("will not print anymore in case more duplicates are found.");
found_duplicate_already = true;
}
dupCounter++;
}
}
assertSearchResponse(searchResponse);
assertThat(dupCounter, equalTo(0L));
assertHitCount(searchResponse, numDocs);
IndicesStatsResponse index = client().admin().indices().prepareStats("index").clear().setSegments(true).get();
IndexStats indexStats = index.getIndex("index");
long maxUnsafeAutoIdTimestamp = Long.MIN_VALUE;
for (IndexShardStats indexShardStats : indexStats) {
for (ShardStats shardStats : indexShardStats) {
SegmentsStats segments = shardStats.getStats().getSegments();
maxUnsafeAutoIdTimestamp = Math.max(maxUnsafeAutoIdTimestamp, segments.getMaxUnsafeAutoIdTimestamp());
}
}
assertTrue("exception must have been thrown otherwise setup is broken", exceptionThrown.get());
assertTrue("maxUnsafeAutoIdTimestamp must be > than 0 we have at least one retry", maxUnsafeAutoIdTimestamp > -1);
}
Aggregations