use of org.elasticsearch.cluster.SnapshotsInProgress in project elasticsearch by elastic.
the class SnapshotShardsService method processIndexShardSnapshots.
/**
* Checks if any new shards should be snapshotted on this node
*
* @param event cluster state changed event
*/
private void processIndexShardSnapshots(ClusterChangedEvent event) {
SnapshotsInProgress snapshotsInProgress = event.state().custom(SnapshotsInProgress.TYPE);
Map<Snapshot, SnapshotShards> survivors = new HashMap<>();
// First, remove snapshots that are no longer there
for (Map.Entry<Snapshot, SnapshotShards> entry : shardSnapshots.entrySet()) {
final Snapshot snapshot = entry.getKey();
if (snapshotsInProgress != null && snapshotsInProgress.snapshot(snapshot) != null) {
survivors.put(entry.getKey(), entry.getValue());
} else {
// state update, which is being processed here
for (IndexShardSnapshotStatus snapshotStatus : entry.getValue().shards.values()) {
if (snapshotStatus.stage() == Stage.INIT || snapshotStatus.stage() == Stage.STARTED) {
snapshotStatus.abort();
}
}
}
}
// For now we will be mostly dealing with a single snapshot at a time but might have multiple simultaneously running
// snapshots in the future
Map<Snapshot, Map<ShardId, IndexShardSnapshotStatus>> newSnapshots = new HashMap<>();
// Now go through all snapshots and update existing or create missing
final String localNodeId = event.state().nodes().getLocalNodeId();
final DiscoveryNode masterNode = event.state().nodes().getMasterNode();
final Map<Snapshot, Map<String, IndexId>> snapshotIndices = new HashMap<>();
if (snapshotsInProgress != null) {
for (SnapshotsInProgress.Entry entry : snapshotsInProgress.entries()) {
snapshotIndices.put(entry.snapshot(), entry.indices().stream().collect(Collectors.toMap(IndexId::getName, Function.identity())));
if (entry.state() == State.STARTED) {
Map<ShardId, IndexShardSnapshotStatus> startedShards = new HashMap<>();
SnapshotShards snapshotShards = shardSnapshots.get(entry.snapshot());
for (ObjectObjectCursor<ShardId, ShardSnapshotStatus> shard : entry.shards()) {
// Add all new shards to start processing on
if (localNodeId.equals(shard.value.nodeId())) {
if (shard.value.state() == State.INIT && (snapshotShards == null || !snapshotShards.shards.containsKey(shard.key))) {
logger.trace("[{}] - Adding shard to the queue", shard.key);
startedShards.put(shard.key, new IndexShardSnapshotStatus());
}
}
}
if (!startedShards.isEmpty()) {
newSnapshots.put(entry.snapshot(), startedShards);
if (snapshotShards != null) {
// We already saw this snapshot but we need to add more started shards
Map<ShardId, IndexShardSnapshotStatus> shards = new HashMap<>();
// Put all shards that were already running on this node
shards.putAll(snapshotShards.shards);
// Put all newly started shards
shards.putAll(startedShards);
survivors.put(entry.snapshot(), new SnapshotShards(unmodifiableMap(shards)));
} else {
// Brand new snapshot that we haven't seen before
survivors.put(entry.snapshot(), new SnapshotShards(unmodifiableMap(startedShards)));
}
}
} else if (entry.state() == State.ABORTED) {
// Abort all running shards for this snapshot
SnapshotShards snapshotShards = shardSnapshots.get(entry.snapshot());
if (snapshotShards != null) {
for (ObjectObjectCursor<ShardId, ShardSnapshotStatus> shard : entry.shards()) {
IndexShardSnapshotStatus snapshotStatus = snapshotShards.shards.get(shard.key);
if (snapshotStatus != null) {
switch(snapshotStatus.stage()) {
case INIT:
case STARTED:
snapshotStatus.abort();
break;
case FINALIZE:
logger.debug("[{}] trying to cancel snapshot on shard [{}] that is finalizing, letting it finish", entry.snapshot(), shard.key);
break;
case DONE:
logger.debug("[{}] trying to cancel snapshot on the shard [{}] that is already done, updating status on the master", entry.snapshot(), shard.key);
updateIndexShardSnapshotStatus(entry.snapshot(), shard.key, new ShardSnapshotStatus(localNodeId, State.SUCCESS), masterNode);
break;
case FAILURE:
logger.debug("[{}] trying to cancel snapshot on the shard [{}] that has already failed, updating status on the master", entry.snapshot(), shard.key);
updateIndexShardSnapshotStatus(entry.snapshot(), shard.key, new ShardSnapshotStatus(localNodeId, State.FAILED, snapshotStatus.failure()), masterNode);
break;
default:
throw new IllegalStateException("Unknown snapshot shard stage " + snapshotStatus.stage());
}
}
}
}
}
}
}
// Update the list of snapshots that we saw and tried to started
// If startup of these shards fails later, we don't want to try starting these shards again
shutdownLock.lock();
try {
shardSnapshots = unmodifiableMap(survivors);
if (shardSnapshots.isEmpty()) {
// Notify all waiting threads that no more snapshots
shutdownCondition.signalAll();
}
} finally {
shutdownLock.unlock();
}
// We have new shards to starts
if (newSnapshots.isEmpty() == false) {
Executor executor = threadPool.executor(ThreadPool.Names.SNAPSHOT);
for (final Map.Entry<Snapshot, Map<ShardId, IndexShardSnapshotStatus>> entry : newSnapshots.entrySet()) {
Map<String, IndexId> indicesMap = snapshotIndices.get(entry.getKey());
assert indicesMap != null;
for (final Map.Entry<ShardId, IndexShardSnapshotStatus> shardEntry : entry.getValue().entrySet()) {
final ShardId shardId = shardEntry.getKey();
try {
final IndexShard indexShard = indicesService.indexServiceSafe(shardId.getIndex()).getShardOrNull(shardId.id());
final IndexId indexId = indicesMap.get(shardId.getIndexName());
assert indexId != null;
executor.execute(new AbstractRunnable() {
@Override
public void doRun() {
snapshot(indexShard, entry.getKey(), indexId, shardEntry.getValue());
updateIndexShardSnapshotStatus(entry.getKey(), shardId, new ShardSnapshotStatus(localNodeId, State.SUCCESS), masterNode);
}
@Override
public void onFailure(Exception e) {
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] [{}] failed to create snapshot", shardId, entry.getKey()), e);
updateIndexShardSnapshotStatus(entry.getKey(), shardId, new ShardSnapshotStatus(localNodeId, State.FAILED, ExceptionsHelper.detailedMessage(e)), masterNode);
}
});
} catch (Exception e) {
updateIndexShardSnapshotStatus(entry.getKey(), shardId, new ShardSnapshotStatus(localNodeId, State.FAILED, ExceptionsHelper.detailedMessage(e)), masterNode);
}
}
}
}
}
use of org.elasticsearch.cluster.SnapshotsInProgress in project elasticsearch by elastic.
the class SnapshotsService method isRepositoryInUse.
/**
* Checks if a repository is currently in use by one of the snapshots
*
* @param clusterState cluster state
* @param repository repository id
* @return true if repository is currently in use by one of the running snapshots
*/
public static boolean isRepositoryInUse(ClusterState clusterState, String repository) {
SnapshotsInProgress snapshots = clusterState.custom(SnapshotsInProgress.TYPE);
if (snapshots != null) {
for (SnapshotsInProgress.Entry snapshot : snapshots.entries()) {
if (repository.equals(snapshot.snapshot().getRepository())) {
return true;
}
}
}
SnapshotDeletionsInProgress deletionsInProgress = clusterState.custom(SnapshotDeletionsInProgress.TYPE);
if (deletionsInProgress != null) {
for (SnapshotDeletionsInProgress.Entry entry : deletionsInProgress.getEntries()) {
if (entry.getSnapshot().getRepository().equals(repository)) {
return true;
}
}
}
return false;
}
use of org.elasticsearch.cluster.SnapshotsInProgress in project elasticsearch by elastic.
the class SnapshotsService method indicesToFailForCloseOrDeletion.
private static Set<Index> indicesToFailForCloseOrDeletion(ClusterState currentState, Set<IndexMetaData> indices) {
SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE);
Set<Index> indicesToFail = null;
if (snapshots != null) {
for (final SnapshotsInProgress.Entry entry : snapshots.entries()) {
if (entry.partial() == false) {
if (entry.state() == State.INIT) {
for (IndexId index : entry.indices()) {
IndexMetaData indexMetaData = currentState.metaData().index(index.getName());
if (indexMetaData != null && indices.contains(indexMetaData)) {
if (indicesToFail == null) {
indicesToFail = new HashSet<>();
}
indicesToFail.add(indexMetaData.getIndex());
}
}
} else {
for (ObjectObjectCursor<ShardId, SnapshotsInProgress.ShardSnapshotStatus> shard : entry.shards()) {
if (!shard.value.state().completed()) {
IndexMetaData indexMetaData = currentState.metaData().index(shard.key.getIndex());
if (indexMetaData != null && indices.contains(indexMetaData)) {
if (indicesToFail == null) {
indicesToFail = new HashSet<>();
}
indicesToFail.add(shard.key.getIndex());
}
}
}
}
}
}
}
return indicesToFail;
}
use of org.elasticsearch.cluster.SnapshotsInProgress in project elasticsearch by elastic.
the class SnapshotsService method removedNodesCleanupNeeded.
private boolean removedNodesCleanupNeeded(ClusterChangedEvent event) {
SnapshotsInProgress snapshotsInProgress = event.state().custom(SnapshotsInProgress.TYPE);
if (snapshotsInProgress == null) {
return false;
}
// Check if we just became the master
boolean newMaster = !event.previousState().nodes().isLocalNodeElectedMaster();
for (SnapshotsInProgress.Entry snapshot : snapshotsInProgress.entries()) {
if (newMaster && (snapshot.state() == State.SUCCESS || snapshot.state() == State.INIT)) {
// We just replaced old master and snapshots in intermediate states needs to be cleaned
return true;
}
for (DiscoveryNode node : event.nodesDelta().removedNodes()) {
for (ObjectCursor<ShardSnapshotStatus> shardStatus : snapshot.shards().values()) {
if (!shardStatus.value.state().completed() && node.getId().equals(shardStatus.value.nodeId())) {
// At least one shard was running on the removed node - we need to fail it
return true;
}
}
}
}
return false;
}
use of org.elasticsearch.cluster.SnapshotsInProgress in project elasticsearch by elastic.
the class MetaDataDeleteIndexServiceTests method testDeleteSnapshotting.
public void testDeleteSnapshotting() {
String index = randomAsciiOfLength(5);
Snapshot snapshot = new Snapshot("doesn't matter", new SnapshotId("snapshot name", "snapshot uuid"));
SnapshotsInProgress snaps = new SnapshotsInProgress(new SnapshotsInProgress.Entry(snapshot, true, false, SnapshotsInProgress.State.INIT, singletonList(new IndexId(index, "doesn't matter")), System.currentTimeMillis(), (long) randomIntBetween(0, 1000), ImmutableOpenMap.of()));
ClusterState state = ClusterState.builder(clusterState(index)).putCustom(SnapshotsInProgress.TYPE, snaps).build();
Exception e = expectThrows(IllegalArgumentException.class, () -> service.deleteIndices(state, singleton(state.metaData().getIndices().get(index).getIndex())));
assertEquals("Cannot delete indices that are being snapshotted: [[" + index + "]]. Try again after snapshot finishes " + "or cancel the currently running snapshot.", e.getMessage());
}
Aggregations