use of org.elasticsearch.action.bulk.BulkResponse in project yacy_grid_mcp by yacy.
the class ElasticsearchClient method writeMapBulk.
/**
* bulk message write
* @param jsonMapList
* a list of json documents to be indexed
* @param indexName
* the name of the index
* @param typeName
* the type of the index
* @return a list with error messages.
* The key is the id of the document, the value is an error string.
* The method was only successful if this list is empty.
* This must be a list, because keys may appear several times.
*/
public BulkWriteResult writeMapBulk(final String indexName, final List<BulkEntry> jsonMapList) {
long start = System.currentTimeMillis();
BulkRequestBuilder bulkRequest = elasticsearchClient.prepareBulk();
for (BulkEntry be : jsonMapList) {
if (be.id == null)
continue;
bulkRequest.add(elasticsearchClient.prepareIndex(indexName, be.type, be.id).setSource(be.jsonMap).setVersion(1).setCreate(// enforces OpType.INDEX
false).setVersionType(VersionType.EXTERNAL_GTE));
}
BulkResponse bulkResponse = bulkRequest.get();
BulkWriteResult result = new BulkWriteResult();
for (BulkItemResponse r : bulkResponse.getItems()) {
String id = r.getId();
DocWriteResponse response = r.getResponse();
if (response.getResult() == DocWriteResponse.Result.CREATED)
result.created.add(id);
String err = r.getFailureMessage();
if (err != null) {
result.errors.put(id, err);
}
}
long duration = Math.max(1, System.currentTimeMillis() - start);
long regulator = 0;
int created = result.created.size();
long ops = created * 1000 / duration;
if (duration > throttling_time_threshold && ops < throttling_ops_threshold) {
regulator = (long) (throttling_factor * duration);
try {
Thread.sleep(regulator);
} catch (InterruptedException e) {
}
}
Data.logger.info("elastic write bulk to index " + indexName + ": " + jsonMapList.size() + " entries, " + result.created.size() + " created, " + result.errors.size() + " errors, " + duration + " ms" + (regulator == 0 ? "" : ", throttled with " + regulator + " ms") + ", " + ops + " objects/second");
return result;
}
use of org.elasticsearch.action.bulk.BulkResponse in project vertigo by KleeGroup.
the class ESStatement method remove.
/**
* Supprime des documents.
* @param query Requete de filtrage des documents à supprimer
*/
void remove(final ListFilter query) {
Assertion.checkNotNull(query);
// -----
final QueryBuilder queryBuilder = ESSearchRequestBuilder.translateToQueryBuilder(query);
final SearchRequestBuilder searchRequestBuilder = esClient.prepareSearch().setIndices(indexName).setTypes(typeName).setSearchType(SearchType.QUERY_THEN_FETCH).setNoFields().setSize(//
DEFAULT_SCROLL_SIZE).setScroll(//
DEFAULT_SCROLL_KEEP_ALIVE).addSort("_id", SortOrder.ASC).setQuery(queryBuilder);
try {
// get first scroll doc_id
SearchResponse queryResponse = searchRequestBuilder.execute().actionGet();
// the scrolling id
final String scrollid = queryResponse.getScrollId();
SearchHits searchHits = queryResponse.getHits();
while (searchHits.getHits().length > 0) {
// collect the id for this scroll
final BulkRequestBuilder bulkRequest = esClient.prepareBulk().setRefresh(BULK_REFRESH);
for (final SearchHit searchHit : searchHits) {
bulkRequest.add(esClient.prepareDelete(indexName, typeName, searchHit.getId()));
}
// bulk delete all ids
final BulkResponse bulkResponse = bulkRequest.execute().actionGet();
if (bulkResponse.hasFailures()) {
throw new VSystemException("Can't removeBQuery {0} into {1} index.\nCause by {3}", typeName, indexName, bulkResponse.buildFailureMessage());
}
LOGGER.info("Deleted " + searchHits.getHits().length + " elements from index " + indexName);
// new scrolling
queryResponse = //
esClient.prepareSearchScroll(scrollid).setScroll(//
DEFAULT_SCROLL_KEEP_ALIVE).execute().actionGet();
searchHits = queryResponse.getHits();
}
} catch (final SearchPhaseExecutionException e) {
final VUserException vue = new VUserException(SearchResource.DYNAMO_SEARCH_QUERY_SYNTAX_ERROR);
vue.initCause(e);
throw vue;
}
}
use of org.elasticsearch.action.bulk.BulkResponse in project vertigo by KleeGroup.
the class ESStatement method putAll.
/**
* @param indexCollection Collection des indexes à insérer
*/
void putAll(final Collection<SearchIndex<K, I>> indexCollection) {
// Injection spécifique au moteur d'indexation.
try {
final BulkRequestBuilder bulkRequest = esClient.prepareBulk().setRefresh(BULK_REFRESH);
for (final SearchIndex<K, I> index : indexCollection) {
try (final XContentBuilder xContentBuilder = esDocumentCodec.index2XContentBuilder(index)) {
bulkRequest.add(esClient.prepareIndex().setIndex(indexName).setType(typeName).setId(index.getURI().urn()).setSource(xContentBuilder));
}
}
final BulkResponse bulkResponse = bulkRequest.execute().actionGet();
if (bulkResponse.hasFailures()) {
throw new VSystemException("Can't putAll {0} into {1} index.\nCause by {2}", typeName, indexName, bulkResponse.buildFailureMessage());
}
} catch (final IOException e) {
handleIOException(e);
}
}
use of org.elasticsearch.action.bulk.BulkResponse in project core-ng-project by neowu.
the class ElasticSearchTypeImpl method bulkDelete.
@Override
public void bulkDelete(BulkDeleteRequest request) {
if (request.ids == null || request.ids.isEmpty())
throw Exceptions.error("request.ids must not be empty");
StopWatch watch = new StopWatch();
String index = request.index == null ? this.index : request.index;
BulkRequestBuilder builder = client().prepareBulk();
for (String id : request.ids) {
builder.add(client().prepareDelete(index, type, id));
}
long esTookTime = 0;
try {
BulkResponse response = builder.get();
esTookTime = response.getTook().nanos();
if (response.hasFailures())
throw new SearchException(response.buildFailureMessage());
} catch (ElasticsearchException e) {
// due to elastic search uses async executor to run, we have to wrap the exception to retain the original place caused the exception
throw new SearchException(e);
} finally {
long elapsedTime = watch.elapsedTime();
ActionLogContext.track("elasticsearch", elapsedTime, 0, request.ids.size());
logger.debug("bulkDelete, index={}, type={}, size={}, esTookTime={}, elapsedTime={}", index, type, request.ids.size(), esTookTime, elapsedTime);
checkSlowOperation(elapsedTime);
}
}
use of org.elasticsearch.action.bulk.BulkResponse in project core-ng-project by neowu.
the class ElasticSearchTypeImpl method bulkIndex.
@Override
public void bulkIndex(BulkIndexRequest<T> request) {
if (request.sources == null || request.sources.isEmpty())
throw Exceptions.error("request.sources must not be empty");
StopWatch watch = new StopWatch();
String index = request.index == null ? this.index : request.index;
BulkRequestBuilder builder = client().prepareBulk();
for (Map.Entry<String, T> entry : request.sources.entrySet()) {
String id = entry.getKey();
T source = entry.getValue();
validator.validate(source);
byte[] document = writer.toJSON(source);
builder.add(client().prepareIndex(index, type, id).setSource(document, XContentType.JSON));
}
long esTookTime = 0;
try {
BulkResponse response = builder.get();
esTookTime = response.getTook().nanos();
if (response.hasFailures())
throw new SearchException(response.buildFailureMessage());
} catch (ElasticsearchException e) {
// due to elastic search uses async executor to run, we have to wrap the exception to retain the original place caused the exception
throw new SearchException(e);
} finally {
long elapsedTime = watch.elapsedTime();
ActionLogContext.track("elasticsearch", elapsedTime, 0, request.sources.size());
logger.debug("bulkIndex, index={}, type={}, size={}, esTookTime={}, elapsedTime={}", index, type, request.sources.size(), esTookTime, elapsedTime);
checkSlowOperation(elapsedTime);
}
}
Aggregations