use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.bulk.BulkResponse in project metron by apache.
the class ElasticsearchBulkDocumentWriterTest method setupElasticsearchToSucceed.
private void setupElasticsearchToSucceed() throws IOException {
final String documentId = UUID.randomUUID().toString();
final boolean isFailed = false;
final int itemID = 0;
// the write response will contain what is used as the document ID
DocWriteResponse writeResponse = mock(DocWriteResponse.class);
when(writeResponse.getId()).thenReturn(documentId);
// define the item level response
BulkItemResponse itemResponse = mock(BulkItemResponse.class);
when(itemResponse.isFailed()).thenReturn(isFailed);
when(itemResponse.getItemId()).thenReturn(itemID);
when(itemResponse.getResponse()).thenReturn(writeResponse);
List<BulkItemResponse> itemsResponses = Collections.singletonList(itemResponse);
// define the bulk response to indicate success
BulkResponse response = mock(BulkResponse.class);
when(response.iterator()).thenReturn(itemsResponses.iterator());
when(response.hasFailures()).thenReturn(isFailed);
// have the client return the mock response
when(highLevelClient.bulk(any(BulkRequest.class))).thenReturn(response);
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.bulk.BulkResponse in project metron by apache.
the class ElasticsearchBulkDocumentWriter method write.
@Override
public BulkDocumentWriterResults<D> write() {
BulkDocumentWriterResults<D> results = new BulkDocumentWriterResults<>();
try {
// create an index request for each document
BulkRequest bulkRequest = new BulkRequest();
bulkRequest.setRefreshPolicy(refreshPolicy);
for (Indexable doc : documents) {
DocWriteRequest request = createRequest(doc.document, doc.index);
bulkRequest.add(request);
}
// submit the request and handle the response
BulkResponse bulkResponse = client.getHighLevelClient().bulk(bulkRequest);
handleBulkResponse(bulkResponse, documents, results);
if (LOG.isDebugEnabled()) {
String shards = Arrays.stream(bulkResponse.getItems()).map(bulkItemResponse -> bulkItemResponse.getResponse().getShardId().toString()).collect(Collectors.joining(","));
LOG.debug("{} results written to shards {} in {} ms; batchSize={}, success={}, failed={}", bulkResponse.getItems().length, shards, bulkResponse.getTookInMillis(), documents.size(), results.getSuccesses().size(), results.getFailures().size());
}
} catch (IOException e) {
// assume all documents have failed
for (Indexable indexable : documents) {
D failed = indexable.document;
results.addFailure(failed, e, ExceptionUtils.getRootCauseMessage(e));
}
LOG.error("Failed to submit bulk request; all documents failed", e);
} finally {
// flush all documents no matter which ones succeeded or failed
documents.clear();
}
return results;
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.bulk.BulkResponse in project snow-owl by b2ihealthcare.
the class EsDocumentWriter method commit.
@Override
public void commit() throws IOException {
if (isEmpty()) {
return;
}
final Set<DocumentMapping> mappingsToRefresh = Collections.synchronizedSet(newHashSet());
final EsClient client = admin.client();
// apply bulk updates first
final ListeningExecutorService executor;
if (bulkUpdateOperations.size() > 1 || bulkDeleteOperations.size() > 1) {
final int threads = Math.min(4, Math.max(bulkUpdateOperations.size(), bulkDeleteOperations.size()));
executor = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(threads));
} else {
executor = MoreExecutors.newDirectExecutorService();
}
final List<ListenableFuture<?>> updateFutures = newArrayList();
for (BulkUpdate<?> update : bulkUpdateOperations) {
updateFutures.add(executor.submit(() -> {
if (admin.bulkUpdate(update)) {
mappingsToRefresh.add(admin.mappings().getMapping(update.getType()));
}
}));
}
for (BulkDelete<?> delete : bulkDeleteOperations) {
updateFutures.add(executor.submit(() -> {
if (admin.bulkDelete(delete)) {
mappingsToRefresh.add(admin.mappings().getMapping(delete.getType()));
}
}));
}
try {
executor.shutdown();
Futures.allAsList(updateFutures).get();
executor.awaitTermination(10, TimeUnit.SECONDS);
} catch (InterruptedException | ExecutionException e) {
admin.log().error("Couldn't execute bulk updates", e);
throw new IndexException("Couldn't execute bulk updates", e);
}
// then bulk indexes/deletes
if (!indexOperations.isEmpty() || !deleteOperations.isEmpty()) {
final BulkProcessor processor = client.bulk(new BulkProcessor.Listener() {
@Override
public void beforeBulk(long executionId, BulkRequest request) {
admin.log().debug("Sending bulk request {}", request.numberOfActions());
}
@Override
public void afterBulk(long executionId, BulkRequest request, Throwable failure) {
admin.log().error("Failed bulk request", failure);
}
@Override
public void afterBulk(long executionId, BulkRequest request, BulkResponse response) {
admin.log().debug("Successfully processed bulk request ({}) in {}.", request.numberOfActions(), response.getTook());
if (response.hasFailures()) {
for (BulkItemResponse itemResponse : response.getItems()) {
checkState(!itemResponse.isFailed(), "Failed to commit bulk request in index '%s', %s", admin.name(), itemResponse.getFailureMessage());
}
}
}
}).setConcurrentRequests(getConcurrencyLevel()).setBulkActions((int) admin.settings().get(IndexClientFactory.BULK_ACTIONS_SIZE)).setBulkSize(new ByteSizeValue((int) admin.settings().get(IndexClientFactory.BULK_ACTIONS_SIZE_IN_MB), ByteSizeUnit.MB)).build();
for (Class<?> type : ImmutableSet.copyOf(indexOperations.rowKeySet())) {
final Map<String, Object> indexOperationsForType = indexOperations.row(type);
final DocumentMapping mapping = admin.mappings().getMapping(type);
final String typeIndex = admin.getTypeIndex(mapping);
mappingsToRefresh.add(mapping);
for (Entry<String, Object> entry : Iterables.consumingIterable(indexOperationsForType.entrySet())) {
final String id = entry.getKey();
if (!deleteOperations.containsValue(id)) {
final Object obj = entry.getValue();
final byte[] _source = mapper.writeValueAsBytes(obj);
IndexRequest indexRequest = new IndexRequest().index(typeIndex).opType(OpType.INDEX).source(_source, XContentType.JSON);
// XXX revisions has their special local ID, but that's not needed when sending them to ES, ES will autogenerate a non-conflicting ID for them
if (!mapping.isAutoGeneratedId()) {
indexRequest.id(id);
}
processor.add(indexRequest);
}
}
for (String id : deleteOperations.removeAll(type)) {
processor.add(new DeleteRequest(typeIndex, id));
}
// Flush processor between index boundaries
processor.flush();
}
// Remaining delete operations can be executed on their own
for (Class<?> type : ImmutableSet.copyOf(deleteOperations.keySet())) {
final DocumentMapping mapping = admin.mappings().getMapping(type);
final String typeIndex = admin.getTypeIndex(mapping);
mappingsToRefresh.add(mapping);
for (String id : deleteOperations.removeAll(type)) {
processor.add(new DeleteRequest(typeIndex, id));
}
// Flush processor between index boundaries
processor.flush();
}
try {
processor.awaitClose(5, TimeUnit.MINUTES);
} catch (InterruptedException e) {
throw new IndexException("Interrupted bulk processing part of the commit", e);
}
}
// refresh the index if there were only updates
admin.refresh(mappingsToRefresh);
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.bulk.BulkResponse in project ranger by apache.
the class ElasticSearchAuditDestination method log.
@Override
public boolean log(Collection<AuditEventBase> events) {
boolean ret = false;
try {
logStatusIfRequired();
addTotalCount(events.size());
RestHighLevelClient client = getClient();
if (null == client) {
// ElasticSearch is still not initialized. So need return error
addDeferredCount(events.size());
return ret;
}
ArrayList<AuditEventBase> eventList = new ArrayList<>(events);
BulkRequest bulkRequest = new BulkRequest();
try {
for (AuditEventBase event : eventList) {
AuthzAuditEvent authzEvent = (AuthzAuditEvent) event;
String id = authzEvent.getEventId();
Map<String, Object> doc = toDoc(authzEvent);
bulkRequest.add(new IndexRequest(index).id(id).source(doc));
}
} catch (Exception ex) {
addFailedCount(eventList.size());
logFailedEvent(eventList, ex);
}
BulkResponse response = client.bulk(bulkRequest, RequestOptions.DEFAULT);
if (response.status().getStatus() >= 400) {
addFailedCount(eventList.size());
logFailedEvent(eventList, "HTTP " + response.status().getStatus());
} else {
BulkItemResponse[] items = response.getItems();
for (int i = 0; i < items.length; i++) {
AuditEventBase itemRequest = eventList.get(i);
BulkItemResponse itemResponse = items[i];
if (itemResponse.isFailed()) {
addFailedCount(1);
logFailedEvent(Arrays.asList(itemRequest), itemResponse.getFailureMessage());
} else {
if (LOG.isDebugEnabled()) {
LOG.debug(String.format("Indexed %s", itemRequest.getEventKey()));
}
addSuccessCount(1);
ret = true;
}
}
}
} catch (Throwable t) {
addDeferredCount(events.size());
logError("Error sending message to ElasticSearch", t);
}
return ret;
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.bulk.BulkResponse in project pancm_project by xuwujing.
the class IpHandler method saveBulk.
/**
* @return boolean
* @Author pancm
* @Description 批量新增/更新数据
* @Date 2019/3/21
* @Param [mapList:存储参数, index:索引库名, type:索引库类型,key:存储的主键,为空表示使用ES主键]
*/
public static boolean saveBulk(List<Map<String, Object>> mapList, String index, String type, String key) throws IOException {
if (mapList == null || mapList.size() == 0) {
return true;
}
if (index == null || index.trim().length() == 0 || type == null || type.trim().length() == 0) {
return false;
}
try {
BulkRequest request = new BulkRequest();
mapList.forEach(map -> {
if (key != null) {
String id = map.get(key) + "";
if (id == null || id.trim().length() == 0) {
request.add(new IndexRequest(index, type).source(map, XContentType.JSON));
} else {
request.add(new IndexRequest(index, type, id).source(map, XContentType.JSON));
}
} else {
request.add(new IndexRequest(index, type).source(map, XContentType.JSON));
}
});
BulkResponse bulkResponse = client.bulk(request, RequestOptions.DEFAULT);
// 说明至少有一个失败了,这里就直接返回false
if (bulkResponse.hasFailures()) {
return false;
}
return true;
} finally {
if (isAutoClose) {
close();
}
}
}
Aggregations