use of org.elasticsearch.index.IndexNotFoundException in project elasticsearch by elastic.
the class AllocateStalePrimaryAllocationCommand method execute.
@Override
public RerouteExplanation execute(RoutingAllocation allocation, boolean explain) {
final DiscoveryNode discoNode;
try {
discoNode = allocation.nodes().resolveNode(node);
} catch (IllegalArgumentException e) {
return explainOrThrowRejectedCommand(explain, allocation, e);
}
final RoutingNodes routingNodes = allocation.routingNodes();
RoutingNode routingNode = routingNodes.node(discoNode.getId());
if (routingNode == null) {
return explainOrThrowMissingRoutingNode(allocation, explain, discoNode);
}
final ShardRouting shardRouting;
try {
shardRouting = allocation.routingTable().shardRoutingTable(index, shardId).primaryShard();
} catch (IndexNotFoundException | ShardNotFoundException e) {
return explainOrThrowRejectedCommand(explain, allocation, e);
}
if (shardRouting.unassigned() == false) {
return explainOrThrowRejectedCommand(explain, allocation, "primary [" + index + "][" + shardId + "] is already assigned");
}
if (acceptDataLoss == false) {
return explainOrThrowRejectedCommand(explain, allocation, "allocating an empty primary for [" + index + "][" + shardId + "] can result in data loss. Please confirm by setting the accept_data_loss parameter to true");
}
if (shardRouting.recoverySource().getType() != RecoverySource.Type.EXISTING_STORE) {
return explainOrThrowRejectedCommand(explain, allocation, "trying to allocate an existing primary shard [" + index + "][" + shardId + "], while no such shard has ever been active");
}
initializeUnassignedShard(allocation, routingNodes, routingNode, shardRouting);
return new RerouteExplanation(this, allocation.decision(Decision.YES, name() + " (allocation command)", "ignore deciders"));
}
use of org.elasticsearch.index.IndexNotFoundException in project elasticsearch by elastic.
the class TruncateTranslogCommand method execute.
@Override
protected void execute(Terminal terminal, OptionSet options, Environment env) throws Exception {
boolean batch = options.has(batchMode);
Path translogPath = getTranslogPath(options);
Path idxLocation = translogPath.getParent().resolve("index");
if (Files.exists(translogPath) == false || Files.isDirectory(translogPath) == false) {
throw new ElasticsearchException("translog directory [" + translogPath + "], must exist and be a directory");
}
if (Files.exists(idxLocation) == false || Files.isDirectory(idxLocation) == false) {
throw new ElasticsearchException("unable to find a shard at [" + idxLocation + "], which must exist and be a directory");
}
// Hold the lock open for the duration of the tool running
try (Directory dir = FSDirectory.open(idxLocation, NativeFSLockFactory.INSTANCE);
Lock writeLock = dir.obtainLock(IndexWriter.WRITE_LOCK_NAME)) {
Set<Path> translogFiles;
try {
terminal.println("Checking existing translog files");
translogFiles = filesInDirectory(translogPath);
} catch (IOException e) {
terminal.println("encountered IOException while listing directory, aborting...");
throw new ElasticsearchException("failed to find existing translog files", e);
}
// Warn about ES being stopped and files being deleted
warnAboutDeletingFiles(terminal, translogFiles, batch);
List<IndexCommit> commits;
try {
terminal.println("Reading translog UUID information from Lucene commit from shard at [" + idxLocation + "]");
commits = DirectoryReader.listCommits(dir);
} catch (IndexNotFoundException infe) {
throw new ElasticsearchException("unable to find a valid shard at [" + idxLocation + "]", infe);
}
// Retrieve the generation and UUID from the existing data
Map<String, String> commitData = commits.get(commits.size() - 1).getUserData();
String translogGeneration = commitData.get(Translog.TRANSLOG_GENERATION_KEY);
String translogUUID = commitData.get(Translog.TRANSLOG_UUID_KEY);
if (translogGeneration == null || translogUUID == null) {
throw new ElasticsearchException("shard must have a valid translog generation and UUID but got: [{}] and: [{}]", translogGeneration, translogUUID);
}
terminal.println("Translog Generation: " + translogGeneration);
terminal.println("Translog UUID : " + translogUUID);
Path tempEmptyCheckpoint = translogPath.resolve("temp-" + Translog.CHECKPOINT_FILE_NAME);
Path realEmptyCheckpoint = translogPath.resolve(Translog.CHECKPOINT_FILE_NAME);
Path tempEmptyTranslog = translogPath.resolve("temp-" + Translog.TRANSLOG_FILE_PREFIX + translogGeneration + Translog.TRANSLOG_FILE_SUFFIX);
Path realEmptyTranslog = translogPath.resolve(Translog.TRANSLOG_FILE_PREFIX + translogGeneration + Translog.TRANSLOG_FILE_SUFFIX);
// Write empty checkpoint and translog to empty files
long gen = Long.parseLong(translogGeneration);
int translogLen = writeEmptyTranslog(tempEmptyTranslog, translogUUID);
writeEmptyCheckpoint(tempEmptyCheckpoint, translogLen, gen);
terminal.println("Removing existing translog files");
IOUtils.rm(translogFiles.toArray(new Path[] {}));
terminal.println("Creating new empty checkpoint at [" + realEmptyCheckpoint + "]");
Files.move(tempEmptyCheckpoint, realEmptyCheckpoint, StandardCopyOption.ATOMIC_MOVE);
terminal.println("Creating new empty translog at [" + realEmptyTranslog + "]");
Files.move(tempEmptyTranslog, realEmptyTranslog, StandardCopyOption.ATOMIC_MOVE);
// Fsync the translog directory after rename
IOUtils.fsync(translogPath, true);
} catch (LockObtainFailedException lofe) {
throw new ElasticsearchException("Failed to lock shard's directory at [" + idxLocation + "], is Elasticsearch still running?");
}
terminal.println("Done.");
}
use of org.elasticsearch.index.IndexNotFoundException in project elasticsearch by elastic.
the class TransportMultiTermVectorsAction method doExecute.
@Override
protected void doExecute(final MultiTermVectorsRequest request, final ActionListener<MultiTermVectorsResponse> listener) {
ClusterState clusterState = clusterService.state();
clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.READ);
final AtomicArray<MultiTermVectorsItemResponse> responses = new AtomicArray<>(request.requests.size());
Map<ShardId, MultiTermVectorsShardRequest> shardRequests = new HashMap<>();
for (int i = 0; i < request.requests.size(); i++) {
TermVectorsRequest termVectorsRequest = request.requests.get(i);
termVectorsRequest.routing(clusterState.metaData().resolveIndexRouting(termVectorsRequest.parent(), termVectorsRequest.routing(), termVectorsRequest.index()));
if (!clusterState.metaData().hasConcreteIndex(termVectorsRequest.index())) {
responses.set(i, new MultiTermVectorsItemResponse(null, new MultiTermVectorsResponse.Failure(termVectorsRequest.index(), termVectorsRequest.type(), termVectorsRequest.id(), new IndexNotFoundException(termVectorsRequest.index()))));
continue;
}
String concreteSingleIndex = indexNameExpressionResolver.concreteSingleIndex(clusterState, termVectorsRequest).getName();
if (termVectorsRequest.routing() == null && clusterState.getMetaData().routingRequired(concreteSingleIndex, termVectorsRequest.type())) {
responses.set(i, new MultiTermVectorsItemResponse(null, new MultiTermVectorsResponse.Failure(concreteSingleIndex, termVectorsRequest.type(), termVectorsRequest.id(), new IllegalArgumentException("routing is required for [" + concreteSingleIndex + "]/[" + termVectorsRequest.type() + "]/[" + termVectorsRequest.id() + "]"))));
continue;
}
ShardId shardId = clusterService.operationRouting().shardId(clusterState, concreteSingleIndex, termVectorsRequest.id(), termVectorsRequest.routing());
MultiTermVectorsShardRequest shardRequest = shardRequests.get(shardId);
if (shardRequest == null) {
shardRequest = new MultiTermVectorsShardRequest(shardId.getIndexName(), shardId.id());
shardRequest.preference(request.preference);
shardRequests.put(shardId, shardRequest);
}
shardRequest.add(i, termVectorsRequest);
}
if (shardRequests.size() == 0) {
// only failures..
listener.onResponse(new MultiTermVectorsResponse(responses.toArray(new MultiTermVectorsItemResponse[responses.length()])));
}
final AtomicInteger counter = new AtomicInteger(shardRequests.size());
for (final MultiTermVectorsShardRequest shardRequest : shardRequests.values()) {
shardAction.execute(shardRequest, new ActionListener<MultiTermVectorsShardResponse>() {
@Override
public void onResponse(MultiTermVectorsShardResponse response) {
for (int i = 0; i < response.locations.size(); i++) {
responses.set(response.locations.get(i), new MultiTermVectorsItemResponse(response.responses.get(i), response.failures.get(i)));
}
if (counter.decrementAndGet() == 0) {
finishHim();
}
}
@Override
public void onFailure(Exception e) {
// create failures for all relevant requests
for (int i = 0; i < shardRequest.locations.size(); i++) {
TermVectorsRequest termVectorsRequest = shardRequest.requests.get(i);
responses.set(shardRequest.locations.get(i), new MultiTermVectorsItemResponse(null, new MultiTermVectorsResponse.Failure(shardRequest.index(), termVectorsRequest.type(), termVectorsRequest.id(), e)));
}
if (counter.decrementAndGet() == 0) {
finishHim();
}
}
private void finishHim() {
listener.onResponse(new MultiTermVectorsResponse(responses.toArray(new MultiTermVectorsItemResponse[responses.length()])));
}
});
}
}
use of org.elasticsearch.index.IndexNotFoundException in project crate by crate.
the class DocTableInfo method getRouting.
@Nullable
private Routing getRouting(ClusterState state, WhereClause whereClause, String preference, final List<ShardId> missingShards) {
final Map<String, Map<String, List<Integer>>> locations = new TreeMap<>();
GroupShardsIterator shardIterators;
try {
shardIterators = getShardIterators(whereClause, preference, state);
} catch (IndexNotFoundException e) {
return new Routing(locations);
}
fillLocationsFromShardIterators(locations, shardIterators, missingShards);
if (missingShards.isEmpty()) {
return new Routing(locations);
} else {
return null;
}
}
use of org.elasticsearch.index.IndexNotFoundException in project crate by crate.
the class DocTableInfoBuilder method docIndexMetaData.
private DocIndexMetaData docIndexMetaData() {
DocIndexMetaData docIndexMetaData;
String templateName = PartitionName.templateName(ident.schema(), ident.name());
boolean createdFromTemplate = false;
if (metaData.getTemplates().containsKey(templateName)) {
docIndexMetaData = buildDocIndexMetaDataFromTemplate(ident.indexName(), templateName);
createdFromTemplate = true;
concreteIndices = indexNameExpressionResolver.concreteIndices(state, IndicesOptions.lenientExpandOpen(), ident.indexName());
} else {
try {
concreteIndices = indexNameExpressionResolver.concreteIndices(state, IndicesOptions.strictExpandOpen(), ident.indexName());
if (concreteIndices.length == 0) {
// no matching index found
throw new TableUnknownException(ident);
}
docIndexMetaData = buildDocIndexMetaData(concreteIndices[0]);
} catch (IndexNotFoundException ex) {
throw new TableUnknownException(ident.fqn(), ex);
}
}
if ((!createdFromTemplate && concreteIndices.length == 1) || !checkAliasSchema) {
return docIndexMetaData;
}
for (String concreteIndice : concreteIndices) {
if (IndexMetaData.State.CLOSE.equals(metaData.indices().get(concreteIndice).getState())) {
throw new UnhandledServerException(String.format(Locale.ENGLISH, "Unable to access the partition %s, it is closed", concreteIndice));
}
try {
docIndexMetaData = docIndexMetaData.merge(buildDocIndexMetaData(concreteIndice), transportPutIndexTemplateAction, createdFromTemplate);
} catch (IOException e) {
throw new UnhandledServerException("Unable to merge/build new DocIndexMetaData", e);
}
}
return docIndexMetaData;
}
Aggregations