use of org.elasticsearch.index.IndexNotFoundException in project elasticsearch by elastic.
the class SyncedFlushService method getShardRoutingTable.
final IndexShardRoutingTable getShardRoutingTable(ShardId shardId, ClusterState state) {
final IndexRoutingTable indexRoutingTable = state.routingTable().index(shardId.getIndexName());
if (indexRoutingTable == null) {
IndexMetaData index = state.getMetaData().index(shardId.getIndex());
if (index != null && index.getState() == IndexMetaData.State.CLOSE) {
throw new IndexClosedException(shardId.getIndex());
}
throw new IndexNotFoundException(shardId.getIndexName());
}
final IndexShardRoutingTable shardRoutingTable = indexRoutingTable.shard(shardId.id());
if (shardRoutingTable == null) {
throw new ShardNotFoundException(shardId);
}
return shardRoutingTable;
}
use of org.elasticsearch.index.IndexNotFoundException in project elasticsearch by elastic.
the class PeerRecoveryTargetService method doRecovery.
private void doRecovery(final long recoveryId) {
final StartRecoveryRequest request;
final CancellableThreads cancellableThreads;
final RecoveryState.Timer timer;
try (RecoveryRef recoveryRef = onGoingRecoveries.getRecovery(recoveryId)) {
if (recoveryRef == null) {
logger.trace("not running recovery with id [{}] - can not find it (probably finished)", recoveryId);
return;
}
final RecoveryTarget recoveryTarget = recoveryRef.target();
cancellableThreads = recoveryTarget.cancellableThreads();
timer = recoveryTarget.state().getTimer();
try {
assert recoveryTarget.sourceNode() != null : "can not do a recovery without a source node";
request = getStartRecoveryRequest(recoveryTarget);
logger.trace("{} preparing shard for peer recovery", recoveryTarget.shardId());
recoveryTarget.indexShard().prepareForIndexRecovery();
} catch (final Exception e) {
// this will be logged as warning later on...
logger.trace("unexpected error while preparing shard for peer recovery, failing recovery", e);
onGoingRecoveries.failRecovery(recoveryId, new RecoveryFailedException(recoveryTarget.state(), "failed to prepare shard for recovery", e), true);
return;
}
}
try {
logger.trace("{} starting recovery from {}", request.shardId(), request.sourceNode());
final AtomicReference<RecoveryResponse> responseHolder = new AtomicReference<>();
cancellableThreads.execute(() -> responseHolder.set(transportService.submitRequest(request.sourceNode(), PeerRecoverySourceService.Actions.START_RECOVERY, request, new FutureTransportResponseHandler<RecoveryResponse>() {
@Override
public RecoveryResponse newInstance() {
return new RecoveryResponse();
}
}).txGet()));
final RecoveryResponse recoveryResponse = responseHolder.get();
final TimeValue recoveryTime = new TimeValue(timer.time());
// do this through ongoing recoveries to remove it from the collection
onGoingRecoveries.markRecoveryAsDone(recoveryId);
if (logger.isTraceEnabled()) {
StringBuilder sb = new StringBuilder();
sb.append('[').append(request.shardId().getIndex().getName()).append(']').append('[').append(request.shardId().id()).append("] ");
sb.append("recovery completed from ").append(request.sourceNode()).append(", took[").append(recoveryTime).append("]\n");
sb.append(" phase1: recovered_files [").append(recoveryResponse.phase1FileNames.size()).append("]").append(" with " + "total_size of [").append(new ByteSizeValue(recoveryResponse.phase1TotalSize)).append("]").append(", took [").append(timeValueMillis(recoveryResponse.phase1Time)).append("], throttling_wait [").append(timeValueMillis(recoveryResponse.phase1ThrottlingWaitTime)).append(']').append("\n");
sb.append(" : reusing_files [").append(recoveryResponse.phase1ExistingFileNames.size()).append("] with " + "total_size of [").append(new ByteSizeValue(recoveryResponse.phase1ExistingTotalSize)).append("]\n");
sb.append(" phase2: start took [").append(timeValueMillis(recoveryResponse.startTime)).append("]\n");
sb.append(" : recovered [").append(recoveryResponse.phase2Operations).append("]").append(" transaction log " + "operations").append(", took [").append(timeValueMillis(recoveryResponse.phase2Time)).append("]").append("\n");
logger.trace("{}", sb);
} else {
logger.debug("{} recovery done from [{}], took [{}]", request.shardId(), request.sourceNode(), recoveryTime);
}
} catch (CancellableThreads.ExecutionCancelledException e) {
logger.trace("recovery cancelled", e);
} catch (Exception e) {
if (logger.isTraceEnabled()) {
logger.trace((Supplier<?>) () -> new ParameterizedMessage("[{}][{}] Got exception on recovery", request.shardId().getIndex().getName(), request.shardId().id()), e);
}
Throwable cause = ExceptionsHelper.unwrapCause(e);
if (cause instanceof CancellableThreads.ExecutionCancelledException) {
// this can also come from the source wrapped in a RemoteTransportException
onGoingRecoveries.failRecovery(recoveryId, new RecoveryFailedException(request, "source has canceled the recovery", cause), false);
return;
}
if (cause instanceof RecoveryEngineException) {
// unwrap an exception that was thrown as part of the recovery
cause = cause.getCause();
}
// do it twice, in case we have double transport exception
cause = ExceptionsHelper.unwrapCause(cause);
if (cause instanceof RecoveryEngineException) {
// unwrap an exception that was thrown as part of the recovery
cause = cause.getCause();
}
if (cause instanceof IllegalIndexShardStateException || cause instanceof IndexNotFoundException || cause instanceof ShardNotFoundException) {
// if the target is not ready yet, retry
retryRecovery(recoveryId, "remote shard not ready", recoverySettings.retryDelayStateSync(), recoverySettings.activityTimeout());
return;
}
if (cause instanceof DelayRecoveryException) {
retryRecovery(recoveryId, cause, recoverySettings.retryDelayStateSync(), recoverySettings.activityTimeout());
return;
}
if (cause instanceof ConnectTransportException) {
logger.debug("delaying recovery of {} for [{}] due to networking error [{}]", request.shardId(), recoverySettings.retryDelayNetwork(), cause.getMessage());
retryRecovery(recoveryId, cause.getMessage(), recoverySettings.retryDelayNetwork(), recoverySettings.activityTimeout());
return;
}
if (cause instanceof AlreadyClosedException) {
onGoingRecoveries.failRecovery(recoveryId, new RecoveryFailedException(request, "source shard is closed", cause), false);
return;
}
onGoingRecoveries.failRecovery(recoveryId, new RecoveryFailedException(request, e), true);
}
}
use of org.elasticsearch.index.IndexNotFoundException in project elasticsearch by elastic.
the class CreateIndexIT method testCreateAndDeleteIndexConcurrently.
public void testCreateAndDeleteIndexConcurrently() throws InterruptedException {
createIndex("test");
final AtomicInteger indexVersion = new AtomicInteger(0);
final Object indexVersionLock = new Object();
final CountDownLatch latch = new CountDownLatch(1);
int numDocs = randomIntBetween(1, 10);
for (int i = 0; i < numDocs; i++) {
client().prepareIndex("test", "test").setSource("index_version", indexVersion.get()).get();
}
synchronized (indexVersionLock) {
// not necessarily needed here but for completeness we lock here too
indexVersion.incrementAndGet();
}
client().admin().indices().prepareDelete("test").execute(new // this happens async!!!
ActionListener<DeleteIndexResponse>() {
@Override
public void onResponse(DeleteIndexResponse deleteIndexResponse) {
Thread thread = new Thread() {
@Override
public void run() {
try {
// recreate that index
client().prepareIndex("test", "test").setSource("index_version", indexVersion.get()).get();
synchronized (indexVersionLock) {
// we sync here since we have to ensure that all indexing operations below for a given ID are done before
// we increment the index version otherwise a doc that is in-flight could make it into an index that it
// was supposed to be deleted for and our assertion fail...
indexVersion.incrementAndGet();
}
// from here on all docs with index_version == 0|1 must be gone!!!! only 2 are ok;
assertAcked(client().admin().indices().prepareDelete("test").get());
} finally {
latch.countDown();
}
}
};
thread.start();
}
@Override
public void onFailure(Exception e) {
throw new RuntimeException(e);
}
});
numDocs = randomIntBetween(100, 200);
for (int i = 0; i < numDocs; i++) {
try {
synchronized (indexVersionLock) {
client().prepareIndex("test", "test").setSource("index_version", indexVersion.get()).setTimeout(TimeValue.timeValueSeconds(10)).get();
}
} catch (IndexNotFoundException inf) {
// fine
} catch (UnavailableShardsException ex) {
assertEquals(ex.getCause().getClass(), IndexNotFoundException.class);
// fine we run into a delete index while retrying
}
}
latch.await();
refresh();
// we only really assert that we never reuse segments of old indices or anything like this here and that nothing fails with
// crazy exceptions
SearchResponse expected = client().prepareSearch("test").setIndicesOptions(IndicesOptions.lenientExpandOpen()).setQuery(new RangeQueryBuilder("index_version").from(indexVersion.get(), true)).get();
SearchResponse all = client().prepareSearch("test").setIndicesOptions(IndicesOptions.lenientExpandOpen()).get();
assertEquals(expected + " vs. " + all, expected.getHits().getTotalHits(), all.getHits().getTotalHits());
logger.info("total: {}", expected.getHits().getTotalHits());
}
use of org.elasticsearch.index.IndexNotFoundException in project elasticsearch by elastic.
the class AllocationCommandsTests method testAllocateCommand.
public void testAllocateCommand() {
AllocationService allocation = createAllocationService(Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none").put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none").build());
final String index = "test";
logger.info("--> building initial routing table");
MetaData metaData = MetaData.builder().put(IndexMetaData.builder(index).settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1).putInSyncAllocationIds(0, Collections.singleton("asdf")).putInSyncAllocationIds(1, Collections.singleton("qwertz"))).build();
// shard routing is added as "from recovery" instead of "new index creation" so that we can test below that allocating an empty
// primary with accept_data_loss flag set to false fails
RoutingTable routingTable = RoutingTable.builder().addAsRecovery(metaData.index(index)).build();
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
final ShardId shardId = new ShardId(metaData.index(index).getIndex(), 0);
logger.info("--> adding 3 nodes on same rack and do rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).add(newNode("node3")).add(newNode("node4", singleton(DiscoveryNode.Role.MASTER)))).build();
clusterState = allocation.reroute(clusterState, "reroute");
assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(0));
logger.info("--> allocating to non-existent node, should fail");
try {
allocation.reroute(clusterState, new AllocationCommands(randomAllocateCommand(index, shardId.id(), "node42")), false, false);
fail("expected IllegalArgumentException when allocating to non-existing node");
} catch (IllegalArgumentException e) {
assertThat(e.getMessage(), containsString("failed to resolve [node42], no matching nodes"));
}
logger.info("--> allocating to non-data node, should fail");
try {
allocation.reroute(clusterState, new AllocationCommands(randomAllocateCommand(index, shardId.id(), "node4")), false, false);
fail("expected IllegalArgumentException when allocating to non-data node");
} catch (IllegalArgumentException e) {
assertThat(e.getMessage(), containsString("allocation can only be done on data nodes"));
}
logger.info("--> allocating non-existing shard, should fail");
try {
allocation.reroute(clusterState, new AllocationCommands(randomAllocateCommand("test", 1, "node2")), false, false);
fail("expected ShardNotFoundException when allocating non-existing shard");
} catch (ShardNotFoundException e) {
assertThat(e.getMessage(), containsString("no such shard"));
}
logger.info("--> allocating non-existing index, should fail");
try {
allocation.reroute(clusterState, new AllocationCommands(randomAllocateCommand("test2", 0, "node2")), false, false);
fail("expected ShardNotFoundException when allocating non-existing index");
} catch (IndexNotFoundException e) {
assertThat(e.getMessage(), containsString("no such index"));
}
logger.info("--> allocating empty primary with acceptDataLoss flag set to false");
try {
allocation.reroute(clusterState, new AllocationCommands(new AllocateEmptyPrimaryAllocationCommand("test", 0, "node1", false)), false, false);
fail("expected IllegalArgumentException when allocating empty primary with acceptDataLoss flag set to false");
} catch (IllegalArgumentException e) {
assertThat(e.getMessage(), containsString("allocating an empty primary for " + shardId + " can result in data loss. Please confirm by setting the accept_data_loss parameter to true"));
}
logger.info("--> allocating stale primary with acceptDataLoss flag set to false");
try {
allocation.reroute(clusterState, new AllocationCommands(new AllocateStalePrimaryAllocationCommand(index, shardId.id(), "node1", false)), false, false);
fail("expected IllegalArgumentException when allocating stale primary with acceptDataLoss flag set to false");
} catch (IllegalArgumentException e) {
assertThat(e.getMessage(), containsString("allocating an empty primary for " + shardId + " can result in data loss. Please confirm by setting the accept_data_loss parameter to true"));
}
logger.info("--> allocating empty primary with acceptDataLoss flag set to true");
ClusterState newState = allocation.reroute(clusterState, new AllocationCommands(new AllocateEmptyPrimaryAllocationCommand("test", 0, "node1", true)), false, false).getClusterState();
assertThat(newState, not(equalTo(clusterState)));
clusterState = newState;
assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node1").shardsWithState(INITIALIZING).size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(0));
logger.info("--> start the primary shard");
clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(0));
logger.info("--> allocate the replica shard on the primary shard node, should fail");
try {
allocation.reroute(clusterState, new AllocationCommands(new AllocateReplicaAllocationCommand("test", 0, "node1")), false, false);
fail("expected IllegalArgumentException when allocating replica shard on the primary shard node");
} catch (IllegalArgumentException e) {
}
logger.info("--> allocate the replica shard on on the second node");
newState = allocation.reroute(clusterState, new AllocationCommands(new AllocateReplicaAllocationCommand("test", 0, "node2")), false, false).getClusterState();
assertThat(newState, not(equalTo(clusterState)));
clusterState = newState;
assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").shardsWithState(INITIALIZING).size(), equalTo(1));
logger.info("--> start the replica shard");
clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").shardsWithState(STARTED).size(), equalTo(1));
logger.info("--> verify that we fail when there are no unassigned shards");
try {
allocation.reroute(clusterState, new AllocationCommands(randomAllocateCommand("test", 0, "node3")), false, false);
fail("expected IllegalArgumentException when allocating shard while no unassigned shard available");
} catch (IllegalArgumentException e) {
}
}
use of org.elasticsearch.index.IndexNotFoundException in project elasticsearch by elastic.
the class TypesExistsIT method testSimple.
public void testSimple() throws Exception {
Client client = client();
CreateIndexResponse response1 = client.admin().indices().prepareCreate("test1").addMapping("type1", jsonBuilder().startObject().startObject("type1").endObject().endObject()).addMapping("type2", jsonBuilder().startObject().startObject("type2").endObject().endObject()).execute().actionGet();
CreateIndexResponse response2 = client.admin().indices().prepareCreate("test2").addMapping("type1", jsonBuilder().startObject().startObject("type1").endObject().endObject()).execute().actionGet();
client.admin().indices().prepareAliases().addAlias("test1", "alias1").execute().actionGet();
assertAcked(response1);
assertAcked(response2);
TypesExistsResponse response = client.admin().indices().prepareTypesExists("test1").setTypes("type1").execute().actionGet();
assertThat(response.isExists(), equalTo(true));
response = client.admin().indices().prepareTypesExists("test1").setTypes("type2").execute().actionGet();
assertThat(response.isExists(), equalTo(true));
response = client.admin().indices().prepareTypesExists("test1").setTypes("type3").execute().actionGet();
assertThat(response.isExists(), equalTo(false));
try {
client.admin().indices().prepareTypesExists("notExist").setTypes("type1").execute().actionGet();
fail("Exception should have been thrown");
} catch (IndexNotFoundException e) {
}
try {
client.admin().indices().prepareTypesExists("notExist").setTypes("type0").execute().actionGet();
fail("Exception should have been thrown");
} catch (IndexNotFoundException e) {
}
response = client.admin().indices().prepareTypesExists("alias1").setTypes("type1").execute().actionGet();
assertThat(response.isExists(), equalTo(true));
response = client.admin().indices().prepareTypesExists("*").setTypes("type1").execute().actionGet();
assertThat(response.isExists(), equalTo(true));
response = client.admin().indices().prepareTypesExists("test1", "test2").setTypes("type1").execute().actionGet();
assertThat(response.isExists(), equalTo(true));
response = client.admin().indices().prepareTypesExists("test1", "test2").setTypes("type2").execute().actionGet();
assertThat(response.isExists(), equalTo(false));
}
Aggregations