use of org.elasticsearch.index.shard.ShardId in project elasticsearch by elastic.
the class TransportNodesListGatewayStartedShards method nodeOperation.
@Override
protected NodeGatewayStartedShards nodeOperation(NodeRequest request) {
try {
final ShardId shardId = request.getShardId();
logger.trace("{} loading local shard state info", shardId);
ShardStateMetaData shardStateMetaData = ShardStateMetaData.FORMAT.loadLatestState(logger, NamedXContentRegistry.EMPTY, nodeEnv.availableShardPaths(request.shardId));
if (shardStateMetaData != null) {
IndexMetaData metaData = clusterService.state().metaData().index(shardId.getIndex());
if (metaData == null) {
// we may send this requests while processing the cluster state that recovered the index
// sometimes the request comes in before the local node processed that cluster state
// in such cases we can load it from disk
metaData = IndexMetaData.FORMAT.loadLatestState(logger, NamedXContentRegistry.EMPTY, nodeEnv.indexPaths(shardId.getIndex()));
}
if (metaData == null) {
ElasticsearchException e = new ElasticsearchException("failed to find local IndexMetaData");
e.setShard(request.shardId);
throw e;
}
if (indicesService.getShardOrNull(shardId) == null) {
// we don't have an open shard on the store, validate the files on disk are openable
ShardPath shardPath = null;
try {
IndexSettings indexSettings = new IndexSettings(metaData, settings);
shardPath = ShardPath.loadShardPath(logger, nodeEnv, shardId, indexSettings);
if (shardPath == null) {
throw new IllegalStateException(shardId + " no shard path found");
}
Store.tryOpenIndex(shardPath.resolveIndex(), shardId, nodeEnv::shardLock, logger);
} catch (Exception exception) {
final ShardPath finalShardPath = shardPath;
logger.trace((Supplier<?>) () -> new ParameterizedMessage("{} can't open index for shard [{}] in path [{}]", shardId, shardStateMetaData, (finalShardPath != null) ? finalShardPath.resolveIndex() : ""), exception);
String allocationId = shardStateMetaData.allocationId != null ? shardStateMetaData.allocationId.getId() : null;
return new NodeGatewayStartedShards(clusterService.localNode(), allocationId, shardStateMetaData.primary, exception);
}
}
logger.debug("{} shard state info found: [{}]", shardId, shardStateMetaData);
String allocationId = shardStateMetaData.allocationId != null ? shardStateMetaData.allocationId.getId() : null;
return new NodeGatewayStartedShards(clusterService.localNode(), allocationId, shardStateMetaData.primary);
}
logger.trace("{} no local shard info found", shardId);
return new NodeGatewayStartedShards(clusterService.localNode(), null, false);
} catch (Exception e) {
throw new ElasticsearchException("failed to load started shards", e);
}
}
use of org.elasticsearch.index.shard.ShardId in project elasticsearch by elastic.
the class NodeEnvironment method findAllShardsForIndex.
private static Set<ShardId> findAllShardsForIndex(Path indexPath, Index index) throws IOException {
assert indexPath.getFileName().toString().equals(index.getUUID());
Set<ShardId> shardIds = new HashSet<>();
if (Files.isDirectory(indexPath)) {
try (DirectoryStream<Path> stream = Files.newDirectoryStream(indexPath)) {
for (Path shardPath : stream) {
String fileName = shardPath.getFileName().toString();
if (Files.isDirectory(shardPath) && fileName.chars().allMatch(Character::isDigit)) {
int shardId = Integer.parseInt(fileName);
ShardId id = new ShardId(index, shardId);
shardIds.add(id);
}
}
}
}
return shardIds;
}
use of org.elasticsearch.index.shard.ShardId in project elasticsearch by elastic.
the class NodeEnvironment method deleteShardDirectoryUnderLock.
/**
* Deletes a shard data directory. Note: this method assumes that the shard
* lock is acquired. This method will also attempt to acquire the write
* locks for the shard's paths before deleting the data, but this is best
* effort, as the lock is released before the deletion happens in order to
* allow the folder to be deleted
*
* @param lock the shards lock
* @throws IOException if an IOException occurs
* @throws ElasticsearchException if the write.lock is not acquirable
*/
public void deleteShardDirectoryUnderLock(ShardLock lock, IndexSettings indexSettings) throws IOException {
final ShardId shardId = lock.getShardId();
assert isShardLocked(shardId) : "shard " + shardId + " is not locked";
final Path[] paths = availableShardPaths(shardId);
logger.trace("acquiring locks for {}, paths: [{}]", shardId, paths);
acquireFSLockForPaths(indexSettings, paths);
IOUtils.rm(paths);
if (indexSettings.hasCustomDataPath()) {
Path customLocation = resolveCustomLocation(indexSettings, shardId);
logger.trace("acquiring lock for {}, custom path: [{}]", shardId, customLocation);
acquireFSLockForPaths(indexSettings, customLocation);
logger.trace("deleting custom shard {} directory [{}]", shardId, customLocation);
IOUtils.rm(customLocation);
}
logger.trace("deleted shard {} directory, paths: [{}]", shardId, paths);
assert FileSystemUtils.exists(paths) == false;
}
use of org.elasticsearch.index.shard.ShardId in project elasticsearch by elastic.
the class IndicesQueryCache method getStats.
/** Get usage statistics for the given shard. */
public QueryCacheStats getStats(ShardId shard) {
final Map<ShardId, QueryCacheStats> stats = new HashMap<>();
for (Map.Entry<ShardId, Stats> entry : shardStats.entrySet()) {
stats.put(entry.getKey(), entry.getValue().toQueryCacheStats());
}
QueryCacheStats shardStats = new QueryCacheStats();
QueryCacheStats info = stats.get(shard);
if (info == null) {
info = new QueryCacheStats();
}
shardStats.add(info);
// We also have some shared ram usage that we try to distribute to
// proportionally to their number of cache entries of each shard
long totalSize = 0;
for (QueryCacheStats s : stats.values()) {
totalSize += s.getCacheSize();
}
final double weight = totalSize == 0 ? 1d / stats.size() : shardStats.getCacheSize() / totalSize;
final long additionalRamBytesUsed = Math.round(weight * sharedRamBytesUsed);
shardStats.add(new QueryCacheStats(additionalRamBytesUsed, 0, 0, 0, 0));
return shardStats;
}
use of org.elasticsearch.index.shard.ShardId in project elasticsearch by elastic.
the class IndicesService method processPendingDeletes.
/**
* Processes all pending deletes for the given index. This method will acquire all locks for the given index and will
* process all pending deletes for this index. Pending deletes might occur if the OS doesn't allow deletion of files because
* they are used by a different process ie. on Windows where files might still be open by a virus scanner. On a shared
* filesystem a replica might not have been closed when the primary is deleted causing problems on delete calls so we
* schedule there deletes later.
* @param index the index to process the pending deletes for
* @param timeout the timeout used for processing pending deletes
*/
@Override
public void processPendingDeletes(Index index, IndexSettings indexSettings, TimeValue timeout) throws IOException, InterruptedException, ShardLockObtainFailedException {
logger.debug("{} processing pending deletes", index);
final long startTimeNS = System.nanoTime();
final List<ShardLock> shardLocks = nodeEnv.lockAllForIndex(index, indexSettings, timeout.millis());
int numRemoved = 0;
try {
Map<ShardId, ShardLock> locks = new HashMap<>();
for (ShardLock lock : shardLocks) {
locks.put(lock.getShardId(), lock);
}
final List<PendingDelete> remove;
synchronized (pendingDeletes) {
remove = pendingDeletes.remove(index);
}
if (remove != null && remove.isEmpty() == false) {
numRemoved = remove.size();
// make sure we delete indices first
CollectionUtil.timSort(remove);
// ensure we retry after 10 sec
final long maxSleepTimeMs = 10 * 1000;
long sleepTime = 10;
do {
if (remove.isEmpty()) {
break;
}
Iterator<PendingDelete> iterator = remove.iterator();
while (iterator.hasNext()) {
PendingDelete delete = iterator.next();
if (delete.deleteIndex) {
assert delete.shardId == -1;
logger.debug("{} deleting index store reason [{}]", index, "pending delete");
try {
nodeEnv.deleteIndexDirectoryUnderLock(index, indexSettings);
iterator.remove();
} catch (IOException ex) {
logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} retry pending delete", index), ex);
}
} else {
assert delete.shardId != -1;
ShardLock shardLock = locks.get(new ShardId(delete.index, delete.shardId));
if (shardLock != null) {
try {
deleteShardStore("pending delete", shardLock, delete.settings);
iterator.remove();
} catch (IOException ex) {
logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} retry pending delete", shardLock.getShardId()), ex);
}
} else {
logger.warn("{} no shard lock for pending delete", delete.shardId);
iterator.remove();
}
}
}
if (remove.isEmpty() == false) {
logger.warn("{} still pending deletes present for shards {} - retrying", index, remove.toString());
Thread.sleep(sleepTime);
// increase the sleep time gradually
sleepTime = Math.min(maxSleepTimeMs, sleepTime * 2);
logger.debug("{} schedule pending delete retry after {} ms", index, sleepTime);
}
} while ((System.nanoTime() - startTimeNS) < timeout.nanos());
}
} finally {
IOUtils.close(shardLocks);
if (numRemoved > 0) {
int remainingUncompletedDeletes = numUncompletedDeletes.addAndGet(-numRemoved);
assert remainingUncompletedDeletes >= 0;
}
}
}
Aggregations