use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.bulk.BulkResponse in project graylog2-server by Graylog2.
the class MessagesAdapterES7 method bulkIndexChunked.
private List<Messages.IndexingError> bulkIndexChunked(ChunkedBulkIndexer.Chunk command) throws ChunkedBulkIndexer.EntityTooLargeException {
final List<IndexingRequest> messageList = command.requests;
final int offset = command.offset;
final int chunkSize = command.size;
if (messageList.isEmpty()) {
return Collections.emptyList();
}
final Iterable<List<IndexingRequest>> chunks = Iterables.partition(messageList.subList(offset, messageList.size()), chunkSize);
int chunkCount = 1;
int indexedSuccessfully = 0;
final List<Messages.IndexingError> indexFailures = new ArrayList<>();
for (List<IndexingRequest> chunk : chunks) {
final BulkResponse result = runBulkRequest(indexedSuccessfully, chunk);
indexedSuccessfully += chunk.size();
final List<BulkItemResponse> failures = extractFailures(result);
indexFailures.addAll(indexingErrorsFrom(failures, messageList));
logDebugInfo(messageList, offset, chunkSize, chunkCount, result, failures);
logFailures(result, failures.size());
chunkCount++;
}
return indexFailures;
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.bulk.BulkResponse in project graylog2-server by Graylog2.
the class MessagesAdapterES7 method runBulkRequest.
private BulkResponse runBulkRequest(int indexedSuccessfully, List<IndexingRequest> chunk) throws ChunkedBulkIndexer.EntityTooLargeException {
final BulkRequest bulkRequest = createBulkRequest(chunk);
final BulkResponse result;
try {
result = this.client.execute((c, requestOptions) -> c.bulk(bulkRequest, requestOptions));
} catch (ElasticsearchException e) {
for (ElasticsearchException cause : e.guessRootCauses()) {
if (cause.status().equals(RestStatus.REQUEST_ENTITY_TOO_LARGE)) {
throw new ChunkedBulkIndexer.EntityTooLargeException(indexedSuccessfully, indexingErrorsFrom(chunk));
}
}
throw new org.graylog2.indexer.ElasticsearchException(e);
}
return result;
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.bulk.BulkResponse in project rssriver by dadoonet.
the class RssRiver method start.
@Override
public void start() {
if (logger.isInfoEnabled())
logger.info("Starting rss stream");
try {
client.admin().indices().prepareCreate(indexName).execute().actionGet();
} catch (Exception e) {
if (ExceptionsHelper.unwrapCause(e) instanceof IndexAlreadyExistsException) {
// that's fine
} else if (ExceptionsHelper.unwrapCause(e) instanceof ClusterBlockException) {
// ok, not recovered yet..., lets start indexing and hope we
// recover by the first bulk
// TODO: a smarter logic can be to register for cluster event
// listener here, and only start sampling when the block is
// removed...
} else {
logger.warn("failed to create index [{}], disabling river...", e, indexName);
return;
}
}
try {
pushMapping(indexName, typeName, RssToJson.buildRssMapping(typeName, raw));
} catch (Exception e) {
logger.warn("failed to create mapping for [{}/{}], disabling river...", e, indexName, typeName);
return;
}
// Creating bulk processor
this.bulkProcessor = BulkProcessor.builder(client, new BulkProcessor.Listener() {
@Override
public void beforeBulk(long executionId, BulkRequest request) {
logger.debug("Going to execute new bulk composed of {} actions", request.numberOfActions());
}
@Override
public void afterBulk(long executionId, BulkRequest request, BulkResponse response) {
logger.debug("Executed bulk composed of {} actions", request.numberOfActions());
if (response.hasFailures()) {
logger.warn("There was failures while executing bulk", response.buildFailureMessage());
if (logger.isDebugEnabled()) {
for (BulkItemResponse item : response.getItems()) {
if (item.isFailed()) {
logger.debug("Error for {}/{}/{} for {} operation: {}", item.getIndex(), item.getType(), item.getId(), item.getOpType(), item.getFailureMessage());
}
}
}
}
}
@Override
public void afterBulk(long executionId, BulkRequest request, Throwable failure) {
logger.warn("Error executing bulk", failure);
}
}).setBulkActions(bulkSize).setConcurrentRequests(maxConcurrentBulk).setFlushInterval(bulkFlushInterval).build();
// We create as many Threads as there are feeds
threads = new ArrayList<Thread>(feedsDefinition.size());
int threadNumber = 0;
for (RssRiverFeedDefinition feedDefinition : feedsDefinition) {
Thread thread = EsExecutors.daemonThreadFactory(settings.globalSettings(), "rss_slurper_" + threadNumber).newThread(new RSSParser(feedDefinition));
thread.start();
threads.add(thread);
threadNumber++;
}
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.bulk.BulkResponse in project MSEC by Tencent.
the class ESClientThread method run.
@Override
public void run() {
// TODO Auto-generated method stub
try {
while (true) {
ESClientThread.ESThreadRequest request = queue.take();
BulkRequestBuilder bulkRequest = client.prepareBulk();
for (int i = 0; i < request.sourceList.size(); i++) {
// System.out.println("take from queue: " + source);
bulkRequest.add(client.prepareIndex(request.indexNameList.get(i), request.indexTypeList.get(i)).setSource(request.sourceList.get(i)));
LOG.info("taken source: " + request.sourceList.get(i));
}
BulkResponse bulkResponse = bulkRequest.execute().actionGet();
if (bulkResponse.hasFailures()) {
System.out.println("bulk response errorss!" + bulkResponse.buildFailureMessage());
bulkResponse = bulkRequest.execute().actionGet();
}
// Thread.sleep(10);
}
} catch (InterruptedException e) {
e.printStackTrace();
}
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.bulk.BulkResponse in project metacat by Netflix.
the class ElasticSearchUtilImpl method updateDocs.
private void updateDocs(final String type, final List<String> ids, final ObjectNode node) {
try {
RETRY_ES_PUBLISH.call(() -> {
final BulkRequestBuilder bulkRequest = client.prepareBulk();
ids.forEach(id -> {
bulkRequest.add(client.prepareUpdate(esIndex, type, id).setRetryOnConflict(NO_OF_CONFLICT_RETRIES).setDoc(metacatJson.toJsonAsBytes(node), XContentType.JSON));
});
final BulkResponse bulkResponse = bulkRequest.execute().actionGet(esBulkCallTimeout);
if (bulkResponse.hasFailures()) {
for (BulkItemResponse item : bulkResponse.getItems()) {
if (item.isFailed()) {
handleException("ElasticSearchUtil.updateDocs.item", type, item.getId(), item.getFailure().getCause(), Metrics.CounterElasticSearchUpdate.getMetricName());
}
}
}
return null;
});
} catch (Exception e) {
handleException("ElasticSearchUtil.updatDocs", type, ids, e, Metrics.CounterElasticSearchBulkUpdate.getMetricName());
}
}
Aggregations