use of org.elasticsearch.action.admin.indices.refresh.RefreshRequest in project elasticsearch by elastic.
the class RestRefreshAction method prepareRequest.
@Override
public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException {
RefreshRequest refreshRequest = new RefreshRequest(Strings.splitStringByCommaToArray(request.param("index")));
refreshRequest.indicesOptions(IndicesOptions.fromRequest(request, refreshRequest.indicesOptions()));
return channel -> client.admin().indices().refresh(refreshRequest, new RestBuilderListener<RefreshResponse>(channel) {
@Override
public RestResponse buildResponse(RefreshResponse response, XContentBuilder builder) throws Exception {
builder.startObject();
buildBroadcastShardsHeader(builder, request, response);
builder.endObject();
return new BytesRestResponse(response.getStatus(), builder);
}
});
}
use of org.elasticsearch.action.admin.indices.refresh.RefreshRequest in project elasticsearch by elastic.
the class AbstractAsyncBulkByScrollAction method refreshAndFinish.
/**
* Start terminating a request that finished non-catastrophically by refreshing the modified indices and then proceeding to
* {@link #finishHim(Exception, List, List, boolean)}.
*/
void refreshAndFinish(List<Failure> indexingFailures, List<SearchFailure> searchFailures, boolean timedOut) {
if (task.isCancelled() || false == mainRequest.isRefresh() || destinationIndices.isEmpty()) {
finishHim(null, indexingFailures, searchFailures, timedOut);
return;
}
RefreshRequest refresh = new RefreshRequest();
refresh.indices(destinationIndices.toArray(new String[destinationIndices.size()]));
client.admin().indices().refresh(refresh, new ActionListener<RefreshResponse>() {
@Override
public void onResponse(RefreshResponse response) {
finishHim(null, indexingFailures, searchFailures, timedOut);
}
@Override
public void onFailure(Exception e) {
finishHim(e);
}
});
}
use of org.elasticsearch.action.admin.indices.refresh.RefreshRequest in project elasticsearch by elastic.
the class BulkProcessorRetryIT method executeBulkRejectionLoad.
private void executeBulkRejectionLoad(BackoffPolicy backoffPolicy, boolean rejectedExecutionExpected) throws Throwable {
final CorrelatingBackoffPolicy internalPolicy = new CorrelatingBackoffPolicy(backoffPolicy);
int numberOfAsyncOps = randomIntBetween(600, 700);
final CountDownLatch latch = new CountDownLatch(numberOfAsyncOps);
final Set<Object> responses = Collections.newSetFromMap(new ConcurrentHashMap<>());
assertAcked(prepareCreate(INDEX_NAME));
ensureGreen();
BulkProcessor bulkProcessor = BulkProcessor.builder(client(), new BulkProcessor.Listener() {
@Override
public void beforeBulk(long executionId, BulkRequest request) {
// no op
}
@Override
public void afterBulk(long executionId, BulkRequest request, BulkResponse response) {
internalPolicy.logResponse(response);
responses.add(response);
latch.countDown();
}
@Override
public void afterBulk(long executionId, BulkRequest request, Throwable failure) {
responses.add(failure);
latch.countDown();
}
}).setBulkActions(1).setConcurrentRequests(randomIntBetween(0, 100)).setBackoffPolicy(internalPolicy).build();
indexDocs(bulkProcessor, numberOfAsyncOps);
latch.await(10, TimeUnit.SECONDS);
bulkProcessor.close();
assertThat(responses.size(), equalTo(numberOfAsyncOps));
// validate all responses
for (Object response : responses) {
if (response instanceof BulkResponse) {
BulkResponse bulkResponse = (BulkResponse) response;
for (BulkItemResponse bulkItemResponse : bulkResponse.getItems()) {
if (bulkItemResponse.isFailed()) {
BulkItemResponse.Failure failure = bulkItemResponse.getFailure();
Throwable rootCause = ExceptionsHelper.unwrapCause(failure.getCause());
if (rootCause instanceof EsRejectedExecutionException) {
if (rejectedExecutionExpected == false) {
Iterator<TimeValue> backoffState = internalPolicy.backoffStateFor(bulkResponse);
assertNotNull("backoffState is null (indicates a bulk request got rejected without retry)", backoffState);
if (backoffState.hasNext()) {
// we're not expecting that we overwhelmed it even once when we maxed out the number of retries
throw new AssertionError("Got rejected although backoff policy would allow more retries", rootCause);
} else {
logger.debug("We maxed out the number of bulk retries and got rejected (this is ok).");
}
}
} else {
throw new AssertionError("Unexpected failure", rootCause);
}
}
}
} else {
Throwable t = (Throwable) response;
// we're not expecting any other errors
throw new AssertionError("Unexpected failure", t);
}
}
client().admin().indices().refresh(new RefreshRequest()).get();
// validate we did not create any duplicates due to retries
Matcher<Long> searchResultCount;
// it is ok if we lost some index operations to rejected executions (which is possible even when backing off (although less likely)
searchResultCount = lessThanOrEqualTo((long) numberOfAsyncOps);
SearchResponse results = client().prepareSearch(INDEX_NAME).setTypes(TYPE_NAME).setQuery(QueryBuilders.matchAllQuery()).setSize(0).get();
assertThat(results.getHits().getTotalHits(), searchResultCount);
}
use of org.elasticsearch.action.admin.indices.refresh.RefreshRequest in project elasticsearch by elastic.
the class IndicesRequestIT method testRefresh.
public void testRefresh() {
String[] indexShardActions = new String[] { TransportShardRefreshAction.NAME, TransportShardRefreshAction.NAME + "[r]", TransportShardRefreshAction.NAME + "[p]" };
interceptTransportActions(indexShardActions);
RefreshRequest refreshRequest = new RefreshRequest(randomIndicesOrAliases());
internalCluster().coordOnlyNodeClient().admin().indices().refresh(refreshRequest).actionGet();
clearInterceptedActions();
String[] indices = new IndexNameExpressionResolver(Settings.EMPTY).concreteIndexNames(client().admin().cluster().prepareState().get().getState(), refreshRequest);
assertIndicesSubset(Arrays.asList(indices), indexShardActions);
}
Aggregations