use of org.elasticsearch.action.bulk.BulkRequest in project camel by apache.
the class BulkRequestAggregationStrategy method aggregate.
@Override
public Exchange aggregate(Exchange oldExchange, Exchange newExchange) {
// Don't use getBody(Class<T>) here as we don't want to coerce the body type using a type converter.
Object objBody = newExchange.getIn().getBody();
if (!(objBody instanceof ActionRequest)) {
throw new InvalidPayloadRuntimeException(newExchange, ActionRequest.class);
}
ActionRequest newBody = (ActionRequest) objBody;
BulkRequest request;
if (oldExchange == null) {
request = new BulkRequest();
request.add(newBody);
newExchange.getIn().setBody(request);
return newExchange;
} else {
request = oldExchange.getIn().getBody(BulkRequest.class);
request.add(newBody);
return oldExchange;
}
}
use of org.elasticsearch.action.bulk.BulkRequest in project camel by apache.
the class ElasticsearchBulkTest method bulkRequestBody.
@Test
public void bulkRequestBody() throws Exception {
String prefix = createPrefix();
// given
BulkRequest request = new BulkRequest();
request.add(new IndexRequest(prefix + "foo", prefix + "bar", prefix + "baz").source("{\"" + prefix + "content\": \"" + prefix + "hello\"}"));
// when
BulkResponse response = template.requestBody("direct:bulk", request, BulkResponse.class);
// then
assertThat(response, notNullValue());
assertEquals(prefix + "baz", response.getItems()[0].getId());
}
use of org.elasticsearch.action.bulk.BulkRequest in project rssriver by dadoonet.
the class RssRiver method start.
@Override
public void start() {
if (logger.isInfoEnabled())
logger.info("Starting rss stream");
try {
client.admin().indices().prepareCreate(indexName).execute().actionGet();
} catch (Exception e) {
if (ExceptionsHelper.unwrapCause(e) instanceof IndexAlreadyExistsException) {
// that's fine
} else if (ExceptionsHelper.unwrapCause(e) instanceof ClusterBlockException) {
// ok, not recovered yet..., lets start indexing and hope we
// recover by the first bulk
// TODO: a smarter logic can be to register for cluster event
// listener here, and only start sampling when the block is
// removed...
} else {
logger.warn("failed to create index [{}], disabling river...", e, indexName);
return;
}
}
try {
pushMapping(indexName, typeName, RssToJson.buildRssMapping(typeName, raw));
} catch (Exception e) {
logger.warn("failed to create mapping for [{}/{}], disabling river...", e, indexName, typeName);
return;
}
// Creating bulk processor
this.bulkProcessor = BulkProcessor.builder(client, new BulkProcessor.Listener() {
@Override
public void beforeBulk(long executionId, BulkRequest request) {
logger.debug("Going to execute new bulk composed of {} actions", request.numberOfActions());
}
@Override
public void afterBulk(long executionId, BulkRequest request, BulkResponse response) {
logger.debug("Executed bulk composed of {} actions", request.numberOfActions());
if (response.hasFailures()) {
logger.warn("There was failures while executing bulk", response.buildFailureMessage());
if (logger.isDebugEnabled()) {
for (BulkItemResponse item : response.getItems()) {
if (item.isFailed()) {
logger.debug("Error for {}/{}/{} for {} operation: {}", item.getIndex(), item.getType(), item.getId(), item.getOpType(), item.getFailureMessage());
}
}
}
}
}
@Override
public void afterBulk(long executionId, BulkRequest request, Throwable failure) {
logger.warn("Error executing bulk", failure);
}
}).setBulkActions(bulkSize).setConcurrentRequests(maxConcurrentBulk).setFlushInterval(bulkFlushInterval).build();
// We create as many Threads as there are feeds
threads = new ArrayList<Thread>(feedsDefinition.size());
int threadNumber = 0;
for (RssRiverFeedDefinition feedDefinition : feedsDefinition) {
Thread thread = EsExecutors.daemonThreadFactory(settings.globalSettings(), "rss_slurper_" + threadNumber).newThread(new RSSParser(feedDefinition));
thread.start();
threads.add(thread);
threadNumber++;
}
}
use of org.elasticsearch.action.bulk.BulkRequest in project elasticsearch by elastic.
the class IngestClientIT method testBulkWithIngestFailures.
public void testBulkWithIngestFailures() throws Exception {
createIndex("index");
BytesReference source = jsonBuilder().startObject().field("description", "my_pipeline").startArray("processors").startObject().startObject("test").endObject().endObject().endArray().endObject().bytes();
PutPipelineRequest putPipelineRequest = new PutPipelineRequest("_id", source, XContentType.JSON);
client().admin().cluster().putPipeline(putPipelineRequest).get();
int numRequests = scaledRandomIntBetween(32, 128);
BulkRequest bulkRequest = new BulkRequest();
for (int i = 0; i < numRequests; i++) {
IndexRequest indexRequest = new IndexRequest("index", "type", Integer.toString(i)).setPipeline("_id");
indexRequest.source(Requests.INDEX_CONTENT_TYPE, "field", "value", "fail", i % 2 == 0);
bulkRequest.add(indexRequest);
}
BulkResponse response = client().bulk(bulkRequest).actionGet();
assertThat(response.getItems().length, equalTo(bulkRequest.requests().size()));
for (int i = 0; i < bulkRequest.requests().size(); i++) {
BulkItemResponse itemResponse = response.getItems()[i];
if (i % 2 == 0) {
BulkItemResponse.Failure failure = itemResponse.getFailure();
ElasticsearchException compoundProcessorException = (ElasticsearchException) failure.getCause();
assertThat(compoundProcessorException.getRootCause().getMessage(), equalTo("test processor failed"));
} else {
IndexResponse indexResponse = itemResponse.getResponse();
assertThat("Expected a successful response but found failure [" + itemResponse.getFailure() + "].", itemResponse.isFailed(), is(false));
assertThat(indexResponse, notNullValue());
assertThat(indexResponse.getId(), equalTo(Integer.toString(i)));
assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult());
}
}
}
use of org.elasticsearch.action.bulk.BulkRequest in project elasticsearch by elastic.
the class PipelineExecutionServiceTests method testBulkRequestExecution.
public void testBulkRequestExecution() throws Exception {
BulkRequest bulkRequest = new BulkRequest();
String pipelineId = "_id";
int numRequest = scaledRandomIntBetween(8, 64);
for (int i = 0; i < numRequest; i++) {
IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").setPipeline(pipelineId);
indexRequest.source(Requests.INDEX_CONTENT_TYPE, "field1", "value1");
bulkRequest.add(indexRequest);
}
when(store.get(pipelineId)).thenReturn(new Pipeline(pipelineId, null, version, new CompoundProcessor()));
@SuppressWarnings("unchecked") BiConsumer<IndexRequest, Exception> requestItemErrorHandler = mock(BiConsumer.class);
@SuppressWarnings("unchecked") Consumer<Exception> completionHandler = mock(Consumer.class);
executionService.executeBulkRequest(bulkRequest.requests(), requestItemErrorHandler, completionHandler);
verify(requestItemErrorHandler, never()).accept(any(), any());
verify(completionHandler, times(1)).accept(null);
}
Aggregations