use of org.elasticsearch.repositories.IndexId in project crate by crate.
the class SnapshotsService method shards.
/**
* Calculates the list of shards that should be included into the current snapshot
*
* @param clusterState cluster state
* @param snapshot SnapshotsInProgress Entry
* @return list of shard to be included into current snapshot
*/
private static ImmutableOpenMap<ShardId, SnapshotsInProgress.ShardSnapshotStatus> shards(ClusterState clusterState, SnapshotsInProgress.Entry snapshot, RepositoryData repositoryData) {
ImmutableOpenMap.Builder<ShardId, SnapshotsInProgress.ShardSnapshotStatus> builder = ImmutableOpenMap.builder();
Metadata metadata = clusterState.metadata();
final ShardGenerations shardGenerations = repositoryData.shardGenerations();
for (IndexId index : snapshot.indices()) {
final String indexName = index.getName();
final boolean isNewIndex = repositoryData.getIndices().containsKey(indexName) == false;
IndexMetadata indexMetadata = metadata.index(indexName);
if (indexMetadata == null) {
// The index was deleted before we managed to start the snapshot - mark it as missing.
builder.put(new ShardId(indexName, IndexMetadata.INDEX_UUID_NA_VALUE, 0), new SnapshotsInProgress.ShardSnapshotStatus(null, ShardState.MISSING, "missing index", null));
} else {
IndexRoutingTable indexRoutingTable = clusterState.getRoutingTable().index(indexName);
for (int i = 0; i < indexMetadata.getNumberOfShards(); i++) {
ShardId shardId = new ShardId(indexMetadata.getIndex(), i);
final String shardRepoGeneration;
if (snapshot.useShardGenerations()) {
if (isNewIndex) {
assert shardGenerations.getShardGen(index, shardId.getId()) == null : "Found shard generation for new index [" + index + "]";
shardRepoGeneration = ShardGenerations.NEW_SHARD_GEN;
} else {
shardRepoGeneration = shardGenerations.getShardGen(index, shardId.getId());
}
} else {
shardRepoGeneration = null;
}
if (indexRoutingTable != null) {
ShardRouting primary = indexRoutingTable.shard(i).primaryShard();
if (primary == null || !primary.assignedToNode()) {
builder.put(shardId, new SnapshotsInProgress.ShardSnapshotStatus(null, ShardState.MISSING, "primary shard is not allocated", shardRepoGeneration));
} else if (primary.relocating() || primary.initializing()) {
builder.put(shardId, new SnapshotsInProgress.ShardSnapshotStatus(primary.currentNodeId(), ShardState.WAITING, shardRepoGeneration));
} else if (!primary.started()) {
builder.put(shardId, new SnapshotsInProgress.ShardSnapshotStatus(primary.currentNodeId(), ShardState.MISSING, "primary shard hasn't been started yet", shardRepoGeneration));
} else {
builder.put(shardId, new SnapshotsInProgress.ShardSnapshotStatus(primary.currentNodeId(), shardRepoGeneration));
}
} else {
builder.put(shardId, new SnapshotsInProgress.ShardSnapshotStatus(null, ShardState.MISSING, "missing routing table"));
}
}
}
}
return builder.build();
}
use of org.elasticsearch.repositories.IndexId in project crate by crate.
the class SnapshotsService method buildGenerations.
private static ShardGenerations buildGenerations(SnapshotsInProgress.Entry snapshot, Metadata metadata) {
ShardGenerations.Builder builder = ShardGenerations.builder();
final Map<String, IndexId> indexLookup = new HashMap<>();
snapshot.indices().forEach(idx -> indexLookup.put(idx.getName(), idx));
snapshot.shards().forEach(c -> {
if (metadata.index(c.key.getIndex()) == null) {
assert snapshot.partial() : "Index [" + c.key.getIndex() + "] was deleted during a snapshot but snapshot was not partial.";
return;
}
final IndexId indexId = indexLookup.get(c.key.getIndexName());
if (indexId != null) {
builder.put(indexId, c.key.id(), c.value.generation());
}
});
return builder.build();
}
use of org.elasticsearch.repositories.IndexId in project crate by crate.
the class BlobStoreTestUtil method assertSnapshotUUIDs.
private static void assertSnapshotUUIDs(BlobStoreRepository repository, RepositoryData repositoryData) throws IOException {
final BlobContainer repoRoot = repository.blobContainer();
final Collection<SnapshotId> snapshotIds = repositoryData.getSnapshotIds();
final List<String> expectedSnapshotUUIDs = snapshotIds.stream().map(SnapshotId::getUUID).collect(Collectors.toList());
for (String prefix : new String[] { "snap-", "meta-" }) {
final Collection<String> foundSnapshotUUIDs = repoRoot.listBlobs().keySet().stream().filter(p -> p.startsWith(prefix)).map(p -> p.replace(prefix, "").replace(".dat", "")).collect(Collectors.toSet());
assertThat(foundSnapshotUUIDs, containsInAnyOrder(expectedSnapshotUUIDs.toArray(Strings.EMPTY_ARRAY)));
}
final BlobContainer indicesContainer = repository.blobContainer().children().get("indices");
final Map<String, BlobContainer> indices;
if (indicesContainer == null) {
indices = Collections.emptyMap();
} else {
indices = indicesContainer.children();
}
final Map<IndexId, Integer> maxShardCountsExpected = new HashMap<>();
final Map<IndexId, Integer> maxShardCountsSeen = new HashMap<>();
// Assert that for each snapshot, the relevant metadata was written to index and shard folders
for (SnapshotId snapshotId : snapshotIds) {
repository.getSnapshotInfo(snapshotId, ActionListener.wrap(snapshotInfo -> {
for (String index : snapshotInfo.indices()) {
final IndexId indexId = repositoryData.resolveIndexId(index);
assertThat(indices, hasKey(indexId.getId()));
final BlobContainer indexContainer = indices.get(indexId.getId());
assertThat(indexContainer.listBlobs(), hasKey(String.format(Locale.ROOT, BlobStoreRepository.METADATA_NAME_FORMAT, snapshotId.getUUID())));
final IndexMetadata indexMetadata = PlainActionFuture.get(x -> repository.getSnapshotIndexMetadata(snapshotId, indexId, x));
for (Map.Entry<String, BlobContainer> entry : indexContainer.children().entrySet()) {
// Skip Lucene MockFS extraN directory
if (entry.getKey().startsWith("extra")) {
continue;
}
final int shardId = Integer.parseInt(entry.getKey());
final int shardCount = indexMetadata.getNumberOfShards();
maxShardCountsExpected.compute(indexId, (i, existing) -> existing == null || existing < shardCount ? shardCount : existing);
final BlobContainer shardContainer = entry.getValue();
// becomes unreferenced. We should fix that and remove this conditional once its fixed.
if (shardContainer.listBlobs().keySet().stream().anyMatch(blob -> blob.startsWith("extra") == false)) {
final int impliedCount = shardId - 1;
maxShardCountsSeen.compute(indexId, (i, existing) -> existing == null || existing < impliedCount ? impliedCount : existing);
}
if (shardId < shardCount && snapshotInfo.shardFailures().stream().noneMatch(shardFailure -> shardFailure.index().equals(index) && shardFailure.shardId() == shardId)) {
final Map<String, BlobMetadata> shardPathContents = shardContainer.listBlobs();
assertThat(shardPathContents, hasKey(String.format(Locale.ROOT, BlobStoreRepository.SNAPSHOT_NAME_FORMAT, snapshotId.getUUID())));
assertThat(shardPathContents.keySet().stream().filter(name -> name.startsWith(BlobStoreRepository.INDEX_FILE_PREFIX)).count(), lessThanOrEqualTo(2L));
}
}
}
}, (e) -> {
}));
}
;
maxShardCountsSeen.forEach(((indexId, count) -> assertThat("Found unreferenced shard paths for index [" + indexId + "]", count, lessThanOrEqualTo(maxShardCountsExpected.get(indexId)))));
}
Aggregations