use of org.elasticsearch.action.ActionListener in project elasticsearch by elastic.
the class TransportRolloverAction method masterOperation.
@Override
protected void masterOperation(final RolloverRequest rolloverRequest, final ClusterState state, final ActionListener<RolloverResponse> listener) {
final MetaData metaData = state.metaData();
validate(metaData, rolloverRequest);
final AliasOrIndex aliasOrIndex = metaData.getAliasAndIndexLookup().get(rolloverRequest.getAlias());
final IndexMetaData indexMetaData = aliasOrIndex.getIndices().get(0);
final String sourceProvidedName = indexMetaData.getSettings().get(IndexMetaData.SETTING_INDEX_PROVIDED_NAME, indexMetaData.getIndex().getName());
final String sourceIndexName = indexMetaData.getIndex().getName();
final String unresolvedName = (rolloverRequest.getNewIndexName() != null) ? rolloverRequest.getNewIndexName() : generateRolloverIndexName(sourceProvidedName, indexNameExpressionResolver);
final String rolloverIndexName = indexNameExpressionResolver.resolveDateMathExpression(unresolvedName);
// will fail if the index already exists
MetaDataCreateIndexService.validateIndexName(rolloverIndexName, state);
client.admin().indices().prepareStats(sourceIndexName).clear().setDocs(true).execute(new ActionListener<IndicesStatsResponse>() {
@Override
public void onResponse(IndicesStatsResponse statsResponse) {
final Set<Condition.Result> conditionResults = evaluateConditions(rolloverRequest.getConditions(), statsResponse.getTotal().getDocs(), metaData.index(sourceIndexName));
if (rolloverRequest.isDryRun()) {
listener.onResponse(new RolloverResponse(sourceIndexName, rolloverIndexName, conditionResults, true, false, false, false));
return;
}
if (conditionResults.size() == 0 || conditionResults.stream().anyMatch(result -> result.matched)) {
CreateIndexClusterStateUpdateRequest updateRequest = prepareCreateIndexRequest(unresolvedName, rolloverIndexName, rolloverRequest);
createIndexService.createIndex(updateRequest, ActionListener.wrap(createIndexClusterStateUpdateResponse -> {
indexAliasesService.indicesAliases(prepareRolloverAliasesUpdateRequest(sourceIndexName, rolloverIndexName, rolloverRequest), ActionListener.wrap(aliasClusterStateUpdateResponse -> {
if (aliasClusterStateUpdateResponse.isAcknowledged()) {
activeShardsObserver.waitForActiveShards(rolloverIndexName, rolloverRequest.getCreateIndexRequest().waitForActiveShards(), rolloverRequest.masterNodeTimeout(), isShardsAcked -> listener.onResponse(new RolloverResponse(sourceIndexName, rolloverIndexName, conditionResults, false, true, true, isShardsAcked)), listener::onFailure);
} else {
listener.onResponse(new RolloverResponse(sourceIndexName, rolloverIndexName, conditionResults, false, true, false, false));
}
}, listener::onFailure));
}, listener::onFailure));
} else {
// conditions not met
listener.onResponse(new RolloverResponse(sourceIndexName, rolloverIndexName, conditionResults, false, false, false, false));
}
}
@Override
public void onFailure(Exception e) {
listener.onFailure(e);
}
});
}
use of org.elasticsearch.action.ActionListener in project elasticsearch by elastic.
the class InternalClusterInfoService method refresh.
/**
* Refreshes the ClusterInfo in a blocking fashion
*/
public final ClusterInfo refresh() {
if (logger.isTraceEnabled()) {
logger.trace("Performing ClusterInfoUpdateJob");
}
final CountDownLatch nodeLatch = updateNodeStats(new ActionListener<NodesStatsResponse>() {
@Override
public void onResponse(NodesStatsResponse nodeStatses) {
ImmutableOpenMap.Builder<String, DiskUsage> newLeastAvaiableUsages = ImmutableOpenMap.builder();
ImmutableOpenMap.Builder<String, DiskUsage> newMostAvaiableUsages = ImmutableOpenMap.builder();
fillDiskUsagePerNode(logger, nodeStatses.getNodes(), newLeastAvaiableUsages, newMostAvaiableUsages);
leastAvailableSpaceUsages = newLeastAvaiableUsages.build();
mostAvailableSpaceUsages = newMostAvaiableUsages.build();
}
@Override
public void onFailure(Exception e) {
if (e instanceof ReceiveTimeoutTransportException) {
logger.error("NodeStatsAction timed out for ClusterInfoUpdateJob", e);
} else {
if (e instanceof ClusterBlockException) {
if (logger.isTraceEnabled()) {
logger.trace("Failed to execute NodeStatsAction for ClusterInfoUpdateJob", e);
}
} else {
logger.warn("Failed to execute NodeStatsAction for ClusterInfoUpdateJob", e);
}
// we empty the usages list, to be safe - we don't know what's going on.
leastAvailableSpaceUsages = ImmutableOpenMap.of();
mostAvailableSpaceUsages = ImmutableOpenMap.of();
}
}
});
final CountDownLatch indicesLatch = updateIndicesStats(new ActionListener<IndicesStatsResponse>() {
@Override
public void onResponse(IndicesStatsResponse indicesStatsResponse) {
ShardStats[] stats = indicesStatsResponse.getShards();
ImmutableOpenMap.Builder<String, Long> newShardSizes = ImmutableOpenMap.builder();
ImmutableOpenMap.Builder<ShardRouting, String> newShardRoutingToDataPath = ImmutableOpenMap.builder();
buildShardLevelInfo(logger, stats, newShardSizes, newShardRoutingToDataPath, clusterService.state());
shardSizes = newShardSizes.build();
shardRoutingToDataPath = newShardRoutingToDataPath.build();
}
@Override
public void onFailure(Exception e) {
if (e instanceof ReceiveTimeoutTransportException) {
logger.error("IndicesStatsAction timed out for ClusterInfoUpdateJob", e);
} else {
if (e instanceof ClusterBlockException) {
if (logger.isTraceEnabled()) {
logger.trace("Failed to execute IndicesStatsAction for ClusterInfoUpdateJob", e);
}
} else {
logger.warn("Failed to execute IndicesStatsAction for ClusterInfoUpdateJob", e);
}
// we empty the usages list, to be safe - we don't know what's going on.
shardSizes = ImmutableOpenMap.of();
shardRoutingToDataPath = ImmutableOpenMap.of();
}
}
});
try {
nodeLatch.await(fetchTimeout.getMillis(), TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
// restore interrupt status
Thread.currentThread().interrupt();
logger.warn("Failed to update node information for ClusterInfoUpdateJob within {} timeout", fetchTimeout);
}
try {
indicesLatch.await(fetchTimeout.getMillis(), TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
// restore interrupt status
Thread.currentThread().interrupt();
logger.warn("Failed to update shard information for ClusterInfoUpdateJob within {} timeout", fetchTimeout);
}
ClusterInfo clusterInfo = getClusterInfo();
for (Listener l : listeners) {
try {
l.onNewInfo(clusterInfo);
} catch (Exception e) {
logger.info("Failed executing ClusterInfoService listener", e);
}
}
return clusterInfo;
}
use of org.elasticsearch.action.ActionListener in project elasticsearch by elastic.
the class SyncedFlushService method innerAttemptSyncedFlush.
private void innerAttemptSyncedFlush(final ShardId shardId, final ClusterState state, final ActionListener<ShardsSyncedFlushResult> actionListener) {
try {
final IndexShardRoutingTable shardRoutingTable = getShardRoutingTable(shardId, state);
final List<ShardRouting> activeShards = shardRoutingTable.activeShards();
final int totalShards = shardRoutingTable.getSize();
if (activeShards.size() == 0) {
actionListener.onResponse(new ShardsSyncedFlushResult(shardId, totalShards, "no active shards"));
return;
}
final ActionListener<Map<String, Engine.CommitId>> commitIdsListener = new ActionListener<Map<String, Engine.CommitId>>() {
@Override
public void onResponse(final Map<String, Engine.CommitId> commitIds) {
if (commitIds.isEmpty()) {
actionListener.onResponse(new ShardsSyncedFlushResult(shardId, totalShards, "all shards failed to commit on pre-sync"));
return;
}
final ActionListener<InFlightOpsResponse> inflightOpsListener = new ActionListener<InFlightOpsResponse>() {
@Override
public void onResponse(InFlightOpsResponse response) {
final int inflight = response.opCount();
assert inflight >= 0;
if (inflight != 0) {
actionListener.onResponse(new ShardsSyncedFlushResult(shardId, totalShards, "[" + inflight + "] ongoing operations on primary"));
} else {
// 3. now send the sync request to all the shards
String syncId = UUIDs.base64UUID();
sendSyncRequests(syncId, activeShards, state, commitIds, shardId, totalShards, actionListener);
}
}
@Override
public void onFailure(Exception e) {
actionListener.onFailure(e);
}
};
// 2. fetch in flight operations
getInflightOpsCount(shardId, state, shardRoutingTable, inflightOpsListener);
}
@Override
public void onFailure(Exception e) {
actionListener.onFailure(e);
}
};
// 1. send pre-sync flushes to all replicas
sendPreSyncRequests(activeShards, state, shardId, commitIdsListener);
} catch (Exception e) {
actionListener.onFailure(e);
}
}
use of org.elasticsearch.action.ActionListener in project elasticsearch by elastic.
the class TransportClientNodesServiceTests method testListenerFailures.
public void testListenerFailures() throws InterruptedException {
int iters = iterations(10, 100);
for (int i = 0; i < iters; i++) {
try (TestIteration iteration = new TestIteration()) {
// stop transport from responding early
iteration.transport.endConnectMode();
final CountDownLatch latch = new CountDownLatch(1);
final AtomicInteger finalFailures = new AtomicInteger();
final AtomicReference<Throwable> finalFailure = new AtomicReference<>();
final AtomicReference<TestResponse> response = new AtomicReference<>();
ActionListener<TestResponse> actionListener = new ActionListener<TestResponse>() {
@Override
public void onResponse(TestResponse testResponse) {
response.set(testResponse);
latch.countDown();
}
@Override
public void onFailure(Exception e) {
finalFailures.incrementAndGet();
finalFailure.set(e);
latch.countDown();
}
};
final AtomicInteger preSendFailures = new AtomicInteger();
iteration.transportClientNodesService.execute((node, retryListener) -> {
if (rarely()) {
preSendFailures.incrementAndGet();
//throw whatever exception that is not a subclass of ConnectTransportException
throw new IllegalArgumentException();
}
iteration.transportService.sendRequest(node, "action", new TestRequest(), TransportRequestOptions.EMPTY, new TransportResponseHandler<TestResponse>() {
@Override
public TestResponse newInstance() {
return new TestResponse();
}
@Override
public void handleResponse(TestResponse response1) {
retryListener.onResponse(response1);
}
@Override
public void handleException(TransportException exp) {
retryListener.onFailure(exp);
}
@Override
public String executor() {
return randomBoolean() ? ThreadPool.Names.SAME : ThreadPool.Names.GENERIC;
}
});
}, actionListener);
assertThat(latch.await(1, TimeUnit.SECONDS), equalTo(true));
//there can be only either one failure that causes the request to fail straightaway or success
assertThat(preSendFailures.get() + iteration.transport.failures() + iteration.transport.successes(), lessThanOrEqualTo(1));
if (iteration.transport.successes() == 1) {
assertThat(finalFailures.get(), equalTo(0));
assertThat(finalFailure.get(), nullValue());
assertThat(response.get(), notNullValue());
} else {
assertThat(finalFailures.get(), equalTo(1));
assertThat(finalFailure.get(), notNullValue());
assertThat(response.get(), nullValue());
if (preSendFailures.get() == 0 && iteration.transport.failures() == 0) {
assertThat(finalFailure.get(), instanceOf(NoNodeAvailableException.class));
}
}
assertThat(iteration.transport.triedNodes().size(), lessThanOrEqualTo(iteration.listNodesCount));
assertThat(iteration.transport.triedNodes().size(), equalTo(iteration.transport.connectTransportExceptions() + iteration.transport.failures() + iteration.transport.successes()));
}
}
}
use of org.elasticsearch.action.ActionListener in project elasticsearch by elastic.
the class ParentTaskAssigningClientTests method testSetsParentId.
public void testSetsParentId() {
TaskId[] parentTaskId = new TaskId[] { new TaskId(randomAsciiOfLength(3), randomLong()) };
// This mock will do nothing but verify that parentTaskId is set on all requests sent to it.
NoOpClient mock = new NoOpClient(getTestName()) {
@Override
protected <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void doExecute(Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
assertEquals(parentTaskId[0], request.getParentTask());
super.doExecute(action, request, listener);
}
};
try (ParentTaskAssigningClient client = new ParentTaskAssigningClient(mock, parentTaskId[0])) {
// All of these should have the parentTaskId set
client.bulk(new BulkRequest());
client.search(new SearchRequest());
client.clearScroll(new ClearScrollRequest());
// Now lets verify that unwrapped calls don't have the parentTaskId set
parentTaskId[0] = TaskId.EMPTY_TASK_ID;
client.unwrap().bulk(new BulkRequest());
client.unwrap().search(new SearchRequest());
client.unwrap().clearScroll(new ClearScrollRequest());
}
}
Aggregations