use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.bulk.BulkRequest in project graylog2-server by Graylog2.
the class FixtureImporterES7 method importNode.
private void importNode(JsonNode root) throws IOException {
/* This supports the nosqlunit DataSet structure:
*
* {
* "documents": [
* {
* "document": [
* {
* "index": {
* "indexName": "graylog_0",
* "indexId": "0"
* }
* },
* {
* "data": {
* "source": "example.org",
* "message": "Hi",
* "timestamp": "2015-01-01 01:00:00.000"
* }
* }
* ]
* }
* ]
* }
*/
final BulkRequest bulkRequest = new BulkRequest();
final Set<String> targetIndices = new HashSet<>();
for (final JsonNode document : root.path("documents")) {
final List<JsonNode> indexes = new ArrayList<>();
Map<String, Object> data = new HashMap<>();
for (JsonNode entry : document.path("document")) {
if (entry.hasNonNull("index")) {
indexes.add(entry.path("index"));
} else if (entry.hasNonNull("data")) {
data = OBJECT_MAPPER.convertValue(entry.path("data"), TypeReferences.MAP_STRING_OBJECT);
}
}
for (final JsonNode index : indexes) {
final IndexRequest indexRequest = new IndexRequest().source(data);
final String indexName = index.path("indexName").asText(null);
if (indexName == null) {
throw new IllegalArgumentException("Missing indexName in " + index);
}
targetIndices.add(indexName);
indexRequest.index(indexName);
if (index.hasNonNull("indexId")) {
indexRequest.id(index.path("indexId").asText());
}
bulkRequest.add(indexRequest);
}
}
for (String indexName : targetIndices) {
if (!indexExists(indexName)) {
createIndex(indexName);
}
}
final BulkResponse result = client.execute((c, requestOptions) -> c.bulk(bulkRequest, requestOptions), "Unable to import fixtures.");
if (result.hasFailures()) {
throw new IllegalStateException("Error while bulk indexing documents: " + result.buildFailureMessage());
}
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.bulk.BulkRequest in project graylog2-server by Graylog2.
the class MessagesAdapterES7 method runBulkRequest.
private BulkResponse runBulkRequest(int indexedSuccessfully, List<IndexingRequest> chunk) throws ChunkedBulkIndexer.EntityTooLargeException {
final BulkRequest bulkRequest = createBulkRequest(chunk);
final BulkResponse result;
try {
result = this.client.execute((c, requestOptions) -> c.bulk(bulkRequest, requestOptions));
} catch (ElasticsearchException e) {
for (ElasticsearchException cause : e.guessRootCauses()) {
if (cause.status().equals(RestStatus.REQUEST_ENTITY_TOO_LARGE)) {
throw new ChunkedBulkIndexer.EntityTooLargeException(indexedSuccessfully, indexingErrorsFrom(chunk));
}
}
throw new org.graylog2.indexer.ElasticsearchException(e);
}
return result;
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.bulk.BulkRequest in project rssriver by dadoonet.
the class RssRiver method start.
@Override
public void start() {
if (logger.isInfoEnabled())
logger.info("Starting rss stream");
try {
client.admin().indices().prepareCreate(indexName).execute().actionGet();
} catch (Exception e) {
if (ExceptionsHelper.unwrapCause(e) instanceof IndexAlreadyExistsException) {
// that's fine
} else if (ExceptionsHelper.unwrapCause(e) instanceof ClusterBlockException) {
// ok, not recovered yet..., lets start indexing and hope we
// recover by the first bulk
// TODO: a smarter logic can be to register for cluster event
// listener here, and only start sampling when the block is
// removed...
} else {
logger.warn("failed to create index [{}], disabling river...", e, indexName);
return;
}
}
try {
pushMapping(indexName, typeName, RssToJson.buildRssMapping(typeName, raw));
} catch (Exception e) {
logger.warn("failed to create mapping for [{}/{}], disabling river...", e, indexName, typeName);
return;
}
// Creating bulk processor
this.bulkProcessor = BulkProcessor.builder(client, new BulkProcessor.Listener() {
@Override
public void beforeBulk(long executionId, BulkRequest request) {
logger.debug("Going to execute new bulk composed of {} actions", request.numberOfActions());
}
@Override
public void afterBulk(long executionId, BulkRequest request, BulkResponse response) {
logger.debug("Executed bulk composed of {} actions", request.numberOfActions());
if (response.hasFailures()) {
logger.warn("There was failures while executing bulk", response.buildFailureMessage());
if (logger.isDebugEnabled()) {
for (BulkItemResponse item : response.getItems()) {
if (item.isFailed()) {
logger.debug("Error for {}/{}/{} for {} operation: {}", item.getIndex(), item.getType(), item.getId(), item.getOpType(), item.getFailureMessage());
}
}
}
}
}
@Override
public void afterBulk(long executionId, BulkRequest request, Throwable failure) {
logger.warn("Error executing bulk", failure);
}
}).setBulkActions(bulkSize).setConcurrentRequests(maxConcurrentBulk).setFlushInterval(bulkFlushInterval).build();
// We create as many Threads as there are feeds
threads = new ArrayList<Thread>(feedsDefinition.size());
int threadNumber = 0;
for (RssRiverFeedDefinition feedDefinition : feedsDefinition) {
Thread thread = EsExecutors.daemonThreadFactory(settings.globalSettings(), "rss_slurper_" + threadNumber).newThread(new RSSParser(feedDefinition));
thread.start();
threads.add(thread);
threadNumber++;
}
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.bulk.BulkRequest in project sonarqube by SonarSource.
the class EsTester method putDocuments.
public void putDocuments(IndexType indexType, BaseDoc... docs) {
try {
BulkRequest bulk = new BulkRequest().setRefreshPolicy(REFRESH_IMMEDIATE);
for (BaseDoc doc : docs) {
bulk.add(doc.toIndexRequest());
}
BulkResponse bulkResponse = ES_REST_CLIENT.bulk(bulk);
if (bulkResponse.hasFailures()) {
throw new IllegalStateException(bulkResponse.buildFailureMessage());
}
} catch (Exception e) {
throw Throwables.propagate(e);
}
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.bulk.BulkRequest in project elasticsearch by elastic.
the class AsyncBulkByScrollActionTests method bulkRetryTestCase.
/**
* Execute a bulk retry test case. The total number of failures is random and the number of retries attempted is set to
* testRequest.getMaxRetries and controled by the failWithRejection parameter.
*/
private void bulkRetryTestCase(boolean failWithRejection) throws Exception {
int totalFailures = randomIntBetween(1, testRequest.getMaxRetries());
int size = randomIntBetween(1, 100);
testRequest.setMaxRetries(totalFailures - (failWithRejection ? 1 : 0));
client.bulksToReject = client.bulksAttempts.get() + totalFailures;
/*
* When we get a successful bulk response we usually start the next scroll request but lets just intercept that so we don't have to
* deal with it. We just wait for it to happen.
*/
CountDownLatch successLatch = new CountDownLatch(1);
DummyAsyncBulkByScrollAction action = new DummyActionWithoutBackoff() {
@Override
void startNextScroll(TimeValue lastBatchStartTime, int lastBatchSize) {
successLatch.countDown();
}
};
BulkRequest request = new BulkRequest();
for (int i = 0; i < size + 1; i++) {
request.add(new IndexRequest("index", "type", "id" + i));
}
action.sendBulkRequest(timeValueNanos(System.nanoTime()), request);
if (failWithRejection) {
BulkByScrollResponse response = listener.get();
assertThat(response.getBulkFailures(), hasSize(1));
assertEquals(response.getBulkFailures().get(0).getStatus(), RestStatus.TOO_MANY_REQUESTS);
assertThat(response.getSearchFailures(), empty());
assertNull(response.getReasonCancelled());
} else {
assertTrue(successLatch.await(10, TimeUnit.SECONDS));
}
}
Aggregations