use of org.elasticsearch.action.bulk.BulkItemResponse in project rssriver by dadoonet.
the class RssRiver method start.
@Override
public void start() {
if (logger.isInfoEnabled())
logger.info("Starting rss stream");
try {
client.admin().indices().prepareCreate(indexName).execute().actionGet();
} catch (Exception e) {
if (ExceptionsHelper.unwrapCause(e) instanceof IndexAlreadyExistsException) {
// that's fine
} else if (ExceptionsHelper.unwrapCause(e) instanceof ClusterBlockException) {
// ok, not recovered yet..., lets start indexing and hope we
// recover by the first bulk
// TODO: a smarter logic can be to register for cluster event
// listener here, and only start sampling when the block is
// removed...
} else {
logger.warn("failed to create index [{}], disabling river...", e, indexName);
return;
}
}
try {
pushMapping(indexName, typeName, RssToJson.buildRssMapping(typeName, raw));
} catch (Exception e) {
logger.warn("failed to create mapping for [{}/{}], disabling river...", e, indexName, typeName);
return;
}
// Creating bulk processor
this.bulkProcessor = BulkProcessor.builder(client, new BulkProcessor.Listener() {
@Override
public void beforeBulk(long executionId, BulkRequest request) {
logger.debug("Going to execute new bulk composed of {} actions", request.numberOfActions());
}
@Override
public void afterBulk(long executionId, BulkRequest request, BulkResponse response) {
logger.debug("Executed bulk composed of {} actions", request.numberOfActions());
if (response.hasFailures()) {
logger.warn("There was failures while executing bulk", response.buildFailureMessage());
if (logger.isDebugEnabled()) {
for (BulkItemResponse item : response.getItems()) {
if (item.isFailed()) {
logger.debug("Error for {}/{}/{} for {} operation: {}", item.getIndex(), item.getType(), item.getId(), item.getOpType(), item.getFailureMessage());
}
}
}
}
}
@Override
public void afterBulk(long executionId, BulkRequest request, Throwable failure) {
logger.warn("Error executing bulk", failure);
}
}).setBulkActions(bulkSize).setConcurrentRequests(maxConcurrentBulk).setFlushInterval(bulkFlushInterval).build();
// We create as many Threads as there are feeds
threads = new ArrayList<Thread>(feedsDefinition.size());
int threadNumber = 0;
for (RssRiverFeedDefinition feedDefinition : feedsDefinition) {
Thread thread = EsExecutors.daemonThreadFactory(settings.globalSettings(), "rss_slurper_" + threadNumber).newThread(new RSSParser(feedDefinition));
thread.start();
threads.add(thread);
threadNumber++;
}
}
use of org.elasticsearch.action.bulk.BulkItemResponse in project elasticsearch by elastic.
the class SimpleRoutingIT method testRequiredRoutingBulk.
public void testRequiredRoutingBulk() throws Exception {
client().admin().indices().prepareCreate("test").addAlias(new Alias("alias")).addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("_routing").field("required", true).endObject().endObject().endObject()).execute().actionGet();
ensureGreen();
{
BulkResponse bulkResponse = client().prepareBulk().add(Requests.indexRequest(indexOrAlias()).type("type1").id("1").source(Requests.INDEX_CONTENT_TYPE, "field", "value")).execute().actionGet();
assertThat(bulkResponse.getItems().length, equalTo(1));
assertThat(bulkResponse.hasFailures(), equalTo(true));
for (BulkItemResponse bulkItemResponse : bulkResponse) {
assertThat(bulkItemResponse.isFailed(), equalTo(true));
assertThat(bulkItemResponse.getOpType(), equalTo(DocWriteRequest.OpType.INDEX));
assertThat(bulkItemResponse.getFailure().getStatus(), equalTo(RestStatus.BAD_REQUEST));
assertThat(bulkItemResponse.getFailure().getCause(), instanceOf(RoutingMissingException.class));
assertThat(bulkItemResponse.getFailureMessage(), containsString("routing is required for [test]/[type1]/[1]"));
}
}
{
BulkResponse bulkResponse = client().prepareBulk().add(Requests.indexRequest(indexOrAlias()).type("type1").id("1").routing("0").source(Requests.INDEX_CONTENT_TYPE, "field", "value")).execute().actionGet();
assertThat(bulkResponse.hasFailures(), equalTo(false));
}
{
BulkResponse bulkResponse = client().prepareBulk().add(new UpdateRequest(indexOrAlias(), "type1", "1").doc(Requests.INDEX_CONTENT_TYPE, "field", "value2")).execute().actionGet();
assertThat(bulkResponse.getItems().length, equalTo(1));
assertThat(bulkResponse.hasFailures(), equalTo(true));
for (BulkItemResponse bulkItemResponse : bulkResponse) {
assertThat(bulkItemResponse.isFailed(), equalTo(true));
assertThat(bulkItemResponse.getOpType(), equalTo(DocWriteRequest.OpType.UPDATE));
assertThat(bulkItemResponse.getFailure().getStatus(), equalTo(RestStatus.BAD_REQUEST));
assertThat(bulkItemResponse.getFailure().getCause(), instanceOf(RoutingMissingException.class));
assertThat(bulkItemResponse.getFailureMessage(), containsString("routing is required for [test]/[type1]/[1]"));
}
}
{
BulkResponse bulkResponse = client().prepareBulk().add(new UpdateRequest(indexOrAlias(), "type1", "1").doc(Requests.INDEX_CONTENT_TYPE, "field", "value2").routing("0")).execute().actionGet();
assertThat(bulkResponse.hasFailures(), equalTo(false));
}
{
BulkResponse bulkResponse = client().prepareBulk().add(Requests.deleteRequest(indexOrAlias()).type("type1").id("1")).execute().actionGet();
assertThat(bulkResponse.getItems().length, equalTo(1));
assertThat(bulkResponse.hasFailures(), equalTo(true));
for (BulkItemResponse bulkItemResponse : bulkResponse) {
assertThat(bulkItemResponse.isFailed(), equalTo(true));
assertThat(bulkItemResponse.getOpType(), equalTo(DocWriteRequest.OpType.DELETE));
assertThat(bulkItemResponse.getFailure().getStatus(), equalTo(RestStatus.BAD_REQUEST));
assertThat(bulkItemResponse.getFailure().getCause(), instanceOf(RoutingMissingException.class));
assertThat(bulkItemResponse.getFailureMessage(), containsString("routing is required for [test]/[type1]/[1]"));
}
}
{
BulkResponse bulkResponse = client().prepareBulk().add(Requests.deleteRequest(indexOrAlias()).type("type1").id("1").routing("0")).execute().actionGet();
assertThat(bulkResponse.getItems().length, equalTo(1));
assertThat(bulkResponse.hasFailures(), equalTo(false));
}
}
use of org.elasticsearch.action.bulk.BulkItemResponse in project elasticsearch by elastic.
the class ExceptionRetryIT method testRetryDueToExceptionOnNetworkLayer.
/**
* Tests retry mechanism when indexing. If an exception occurs when indexing then the indexing request is tried again before finally
* failing. If auto generated ids are used this must not lead to duplicate ids
* see https://github.com/elastic/elasticsearch/issues/8788
*/
public void testRetryDueToExceptionOnNetworkLayer() throws ExecutionException, InterruptedException, IOException {
final AtomicBoolean exceptionThrown = new AtomicBoolean(false);
int numDocs = scaledRandomIntBetween(100, 1000);
Client client = internalCluster().coordOnlyNodeClient();
NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats().get();
NodeStats unluckyNode = randomFrom(nodeStats.getNodes().stream().filter((s) -> s.getNode().isDataNode()).collect(Collectors.toList()));
assertAcked(client().admin().indices().prepareCreate("index").setSettings(Settings.builder().put("index.number_of_replicas", 1).put("index.number_of_shards", 5)));
ensureGreen("index");
logger.info("unlucky node: {}", unluckyNode.getNode());
//create a transport service that throws a ConnectTransportException for one bulk request and therefore triggers a retry.
for (NodeStats dataNode : nodeStats.getNodes()) {
MockTransportService mockTransportService = ((MockTransportService) internalCluster().getInstance(TransportService.class, dataNode.getNode().getName()));
mockTransportService.addDelegate(internalCluster().getInstance(TransportService.class, unluckyNode.getNode().getName()), new MockTransportService.DelegateTransport(mockTransportService.original()) {
@Override
protected void sendRequest(Connection connection, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException {
super.sendRequest(connection, requestId, action, request, options);
if (action.equals(TransportShardBulkAction.ACTION_NAME) && exceptionThrown.compareAndSet(false, true)) {
logger.debug("Throw ConnectTransportException");
throw new ConnectTransportException(connection.getNode(), action);
}
}
});
}
BulkRequestBuilder bulkBuilder = client.prepareBulk();
for (int i = 0; i < numDocs; i++) {
XContentBuilder doc = null;
doc = jsonBuilder().startObject().field("foo", "bar").endObject();
bulkBuilder.add(client.prepareIndex("index", "type").setSource(doc));
}
BulkResponse response = bulkBuilder.get();
if (response.hasFailures()) {
for (BulkItemResponse singleIndexRespons : response.getItems()) {
if (singleIndexRespons.isFailed()) {
fail("None of the bulk items should fail but got " + singleIndexRespons.getFailureMessage());
}
}
}
refresh();
SearchResponse searchResponse = client().prepareSearch("index").setSize(numDocs * 2).addStoredField("_id").get();
Set<String> uniqueIds = new HashSet();
long dupCounter = 0;
boolean found_duplicate_already = false;
for (int i = 0; i < searchResponse.getHits().getHits().length; i++) {
if (!uniqueIds.add(searchResponse.getHits().getHits()[i].getId())) {
if (!found_duplicate_already) {
SearchResponse dupIdResponse = client().prepareSearch("index").setQuery(termQuery("_id", searchResponse.getHits().getHits()[i].getId())).setExplain(true).get();
assertThat(dupIdResponse.getHits().getTotalHits(), greaterThan(1L));
logger.info("found a duplicate id:");
for (SearchHit hit : dupIdResponse.getHits()) {
logger.info("Doc {} was found on shard {}", hit.getId(), hit.getShard().getShardId());
}
logger.info("will not print anymore in case more duplicates are found.");
found_duplicate_already = true;
}
dupCounter++;
}
}
assertSearchResponse(searchResponse);
assertThat(dupCounter, equalTo(0L));
assertHitCount(searchResponse, numDocs);
IndicesStatsResponse index = client().admin().indices().prepareStats("index").clear().setSegments(true).get();
IndexStats indexStats = index.getIndex("index");
long maxUnsafeAutoIdTimestamp = Long.MIN_VALUE;
for (IndexShardStats indexShardStats : indexStats) {
for (ShardStats shardStats : indexShardStats) {
SegmentsStats segments = shardStats.getStats().getSegments();
maxUnsafeAutoIdTimestamp = Math.max(maxUnsafeAutoIdTimestamp, segments.getMaxUnsafeAutoIdTimestamp());
}
}
assertTrue("exception must have been thrown otherwise setup is broken", exceptionThrown.get());
assertTrue("maxUnsafeAutoIdTimestamp must be > than 0 we have at least one retry", maxUnsafeAutoIdTimestamp > -1);
}
use of org.elasticsearch.action.bulk.BulkItemResponse in project elasticsearch by elastic.
the class IngestClientIT method testBulkWithIngestFailures.
public void testBulkWithIngestFailures() throws Exception {
createIndex("index");
BytesReference source = jsonBuilder().startObject().field("description", "my_pipeline").startArray("processors").startObject().startObject("test").endObject().endObject().endArray().endObject().bytes();
PutPipelineRequest putPipelineRequest = new PutPipelineRequest("_id", source, XContentType.JSON);
client().admin().cluster().putPipeline(putPipelineRequest).get();
int numRequests = scaledRandomIntBetween(32, 128);
BulkRequest bulkRequest = new BulkRequest();
for (int i = 0; i < numRequests; i++) {
IndexRequest indexRequest = new IndexRequest("index", "type", Integer.toString(i)).setPipeline("_id");
indexRequest.source(Requests.INDEX_CONTENT_TYPE, "field", "value", "fail", i % 2 == 0);
bulkRequest.add(indexRequest);
}
BulkResponse response = client().bulk(bulkRequest).actionGet();
assertThat(response.getItems().length, equalTo(bulkRequest.requests().size()));
for (int i = 0; i < bulkRequest.requests().size(); i++) {
BulkItemResponse itemResponse = response.getItems()[i];
if (i % 2 == 0) {
BulkItemResponse.Failure failure = itemResponse.getFailure();
ElasticsearchException compoundProcessorException = (ElasticsearchException) failure.getCause();
assertThat(compoundProcessorException.getRootCause().getMessage(), equalTo("test processor failed"));
} else {
IndexResponse indexResponse = itemResponse.getResponse();
assertThat("Expected a successful response but found failure [" + itemResponse.getFailure() + "].", itemResponse.isFailed(), is(false));
assertThat(indexResponse, notNullValue());
assertThat(indexResponse.getId(), equalTo(Integer.toString(i)));
assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult());
}
}
}
use of org.elasticsearch.action.bulk.BulkItemResponse in project elasticsearch by elastic.
the class CrudIT method testBulk.
public void testBulk() throws IOException {
int nbItems = randomIntBetween(10, 100);
boolean[] errors = new boolean[nbItems];
XContentType xContentType = randomFrom(XContentType.JSON, XContentType.SMILE);
BulkRequest bulkRequest = new BulkRequest();
for (int i = 0; i < nbItems; i++) {
String id = String.valueOf(i);
boolean erroneous = randomBoolean();
errors[i] = erroneous;
DocWriteRequest.OpType opType = randomFrom(DocWriteRequest.OpType.values());
if (opType == DocWriteRequest.OpType.DELETE) {
if (erroneous == false) {
assertEquals(RestStatus.CREATED, highLevelClient().index(new IndexRequest("index", "test", id).source("field", -1)).status());
}
DeleteRequest deleteRequest = new DeleteRequest("index", "test", id);
bulkRequest.add(deleteRequest);
} else {
BytesReference source = XContentBuilder.builder(xContentType.xContent()).startObject().field("id", i).endObject().bytes();
if (opType == DocWriteRequest.OpType.INDEX) {
IndexRequest indexRequest = new IndexRequest("index", "test", id).source(source, xContentType);
if (erroneous) {
indexRequest.version(12L);
}
bulkRequest.add(indexRequest);
} else if (opType == DocWriteRequest.OpType.CREATE) {
IndexRequest createRequest = new IndexRequest("index", "test", id).source(source, xContentType).create(true);
if (erroneous) {
assertEquals(RestStatus.CREATED, highLevelClient().index(createRequest).status());
}
bulkRequest.add(createRequest);
} else if (opType == DocWriteRequest.OpType.UPDATE) {
UpdateRequest updateRequest = new UpdateRequest("index", "test", id).doc(new IndexRequest().source(source, xContentType));
if (erroneous == false) {
assertEquals(RestStatus.CREATED, highLevelClient().index(new IndexRequest("index", "test", id).source("field", -1)).status());
}
bulkRequest.add(updateRequest);
}
}
}
BulkResponse bulkResponse = execute(bulkRequest, highLevelClient()::bulk, highLevelClient()::bulkAsync);
assertEquals(RestStatus.OK, bulkResponse.status());
assertTrue(bulkResponse.getTookInMillis() > 0);
assertEquals(nbItems, bulkResponse.getItems().length);
for (int i = 0; i < nbItems; i++) {
BulkItemResponse bulkItemResponse = bulkResponse.getItems()[i];
assertEquals(i, bulkItemResponse.getItemId());
assertEquals("index", bulkItemResponse.getIndex());
assertEquals("test", bulkItemResponse.getType());
assertEquals(String.valueOf(i), bulkItemResponse.getId());
DocWriteRequest.OpType requestOpType = bulkRequest.requests().get(i).opType();
if (requestOpType == DocWriteRequest.OpType.INDEX || requestOpType == DocWriteRequest.OpType.CREATE) {
assertEquals(errors[i], bulkItemResponse.isFailed());
assertEquals(errors[i] ? RestStatus.CONFLICT : RestStatus.CREATED, bulkItemResponse.status());
} else if (requestOpType == DocWriteRequest.OpType.UPDATE) {
assertEquals(errors[i], bulkItemResponse.isFailed());
assertEquals(errors[i] ? RestStatus.NOT_FOUND : RestStatus.OK, bulkItemResponse.status());
} else if (requestOpType == DocWriteRequest.OpType.DELETE) {
assertFalse(bulkItemResponse.isFailed());
assertEquals(errors[i] ? RestStatus.NOT_FOUND : RestStatus.OK, bulkItemResponse.status());
}
}
}
Aggregations