use of org.elasticsearch.common.blobstore.BlobMetadata in project crate by crate.
the class FsBlobContainer method listBlobsByPrefix.
@Override
public Map<String, BlobMetadata> listBlobsByPrefix(String blobNamePrefix) throws IOException {
// If we get duplicate files we should just take the last entry
Map<String, BlobMetadata> builder = new HashMap<>();
blobNamePrefix = blobNamePrefix == null ? "" : blobNamePrefix;
try (DirectoryStream<Path> stream = Files.newDirectoryStream(path, blobNamePrefix + "*")) {
for (Path file : stream) {
final BasicFileAttributes attrs = Files.readAttributes(file, BasicFileAttributes.class);
if (attrs.isRegularFile()) {
builder.put(file.getFileName().toString(), new PlainBlobMetadata(file.getFileName().toString(), attrs.size()));
}
}
}
return unmodifiableMap(builder);
}
use of org.elasticsearch.common.blobstore.BlobMetadata in project crate by crate.
the class BlobStoreRepository method deleteSnapshot.
@Override
public void deleteSnapshot(SnapshotId snapshotId, long repositoryStateId, boolean writeShardGens, ActionListener<Void> listener) {
if (isReadOnly()) {
listener.onFailure(new RepositoryException(metadata.name(), "cannot delete snapshot from a readonly repository"));
} else {
final long latestKnownGen = latestKnownRepoGen.get();
if (latestKnownGen > repositoryStateId) {
listener.onFailure(new ConcurrentSnapshotExecutionException(new Snapshot(metadata.name(), snapshotId), "Another concurrent operation moved repo generation to [ " + latestKnownGen + "] but this delete assumed generation [" + repositoryStateId + "]"));
return;
}
try {
final Map<String, BlobMetadata> rootBlobs = blobContainer().listBlobs();
final RepositoryData repositoryData = safeRepositoryData(repositoryStateId, rootBlobs);
// Cache the indices that were found before writing out the new index-N blob so that a stuck master will never
// delete an index that was created by another master node after writing this index-N blob.
final Map<String, BlobContainer> foundIndices = blobStore().blobContainer(indicesPath()).children();
doDeleteShardSnapshots(snapshotId, repositoryStateId, foundIndices, rootBlobs, repositoryData, writeShardGens, listener);
} catch (Exception ex) {
listener.onFailure(new RepositoryException(metadata.name(), "failed to delete snapshot [" + snapshotId + "]", ex));
}
}
}
use of org.elasticsearch.common.blobstore.BlobMetadata in project crate by crate.
the class BlobStoreRepository method cleanupStaleBlobs.
/**
* Cleans up stale blobs directly under the repository root as well as all indices paths that aren't referenced by any existing
* snapshots. This method is only to be called directly after a new {@link RepositoryData} was written to the repository and with
* parameters {@code foundIndices}, {@code rootBlobs}
*
* @param foundIndices all indices blob containers found in the repository before {@code newRepoData} was written
* @param rootBlobs all blobs found directly under the repository root
* @param newRepoData new repository data that was just written
* @param listener listener to invoke with the combined long of all blobs removed in this operation
*/
private void cleanupStaleBlobs(Map<String, BlobContainer> foundIndices, Map<String, BlobMetadata> rootBlobs, RepositoryData newRepoData, ActionListener<Long> listener) {
final GroupedActionListener<Long> groupedListener = new GroupedActionListener<>(ActionListener.wrap(deleteResults -> {
long deletes = 0;
for (Long result : deleteResults) {
deletes += result;
}
listener.onResponse(deletes);
}, listener::onFailure), 2);
final Executor executor = threadPool.executor(ThreadPool.Names.SNAPSHOT);
executor.execute(ActionRunnable.supply(groupedListener, () -> {
List<String> deletedBlobs = cleanupStaleRootFiles(staleRootBlobs(newRepoData, rootBlobs.keySet()));
return (long) deletedBlobs.size();
}));
final Set<String> survivingIndexIds = newRepoData.getIndices().values().stream().map(IndexId::getId).collect(Collectors.toSet());
executor.execute(ActionRunnable.supply(groupedListener, () -> cleanupStaleIndices(foundIndices, survivingIndexIds)));
}
use of org.elasticsearch.common.blobstore.BlobMetadata in project crate by crate.
the class ESBlobStoreContainerTestCase method testList.
public void testList() throws IOException {
try (BlobStore store = newBlobStore()) {
final BlobContainer container = store.blobContainer(new BlobPath());
assertThat(container.listBlobs().size(), equalTo(0));
int numberOfFooBlobs = randomIntBetween(0, 10);
int numberOfBarBlobs = randomIntBetween(3, 20);
Map<String, Long> generatedBlobs = new HashMap<>();
for (int i = 0; i < numberOfFooBlobs; i++) {
int length = randomIntBetween(10, 100);
String name = "foo-" + i + "-";
generatedBlobs.put(name, (long) length);
writeRandomBlob(container, name, length);
}
for (int i = 1; i < numberOfBarBlobs; i++) {
int length = randomIntBetween(10, 100);
String name = "bar-" + i + "-";
generatedBlobs.put(name, (long) length);
writeRandomBlob(container, name, length);
}
int length = randomIntBetween(10, 100);
String name = "bar-0-";
generatedBlobs.put(name, (long) length);
byte[] data = writeRandomBlob(container, name, length);
Map<String, BlobMetadata> blobs = container.listBlobs();
assertThat(blobs.size(), equalTo(numberOfFooBlobs + numberOfBarBlobs));
for (Map.Entry<String, Long> generated : generatedBlobs.entrySet()) {
BlobMetadata blobMetadata = blobs.get(generated.getKey());
assertThat(generated.getKey(), blobMetadata, notNullValue());
assertThat(blobMetadata.name(), equalTo(generated.getKey()));
assertThat(blobMetadata.length(), equalTo(generated.getValue()));
}
assertThat(container.listBlobsByPrefix("foo-").size(), equalTo(numberOfFooBlobs));
assertThat(container.listBlobsByPrefix("bar-").size(), equalTo(numberOfBarBlobs));
assertThat(container.listBlobsByPrefix("baz-").size(), equalTo(0));
}
}
use of org.elasticsearch.common.blobstore.BlobMetadata in project crate by crate.
the class BlobStoreTestUtil method assertSnapshotUUIDs.
private static void assertSnapshotUUIDs(BlobStoreRepository repository, RepositoryData repositoryData) throws IOException {
final BlobContainer repoRoot = repository.blobContainer();
final Collection<SnapshotId> snapshotIds = repositoryData.getSnapshotIds();
final List<String> expectedSnapshotUUIDs = snapshotIds.stream().map(SnapshotId::getUUID).collect(Collectors.toList());
for (String prefix : new String[] { "snap-", "meta-" }) {
final Collection<String> foundSnapshotUUIDs = repoRoot.listBlobs().keySet().stream().filter(p -> p.startsWith(prefix)).map(p -> p.replace(prefix, "").replace(".dat", "")).collect(Collectors.toSet());
assertThat(foundSnapshotUUIDs, containsInAnyOrder(expectedSnapshotUUIDs.toArray(Strings.EMPTY_ARRAY)));
}
final BlobContainer indicesContainer = repository.blobContainer().children().get("indices");
final Map<String, BlobContainer> indices;
if (indicesContainer == null) {
indices = Collections.emptyMap();
} else {
indices = indicesContainer.children();
}
final Map<IndexId, Integer> maxShardCountsExpected = new HashMap<>();
final Map<IndexId, Integer> maxShardCountsSeen = new HashMap<>();
// Assert that for each snapshot, the relevant metadata was written to index and shard folders
for (SnapshotId snapshotId : snapshotIds) {
repository.getSnapshotInfo(snapshotId, ActionListener.wrap(snapshotInfo -> {
for (String index : snapshotInfo.indices()) {
final IndexId indexId = repositoryData.resolveIndexId(index);
assertThat(indices, hasKey(indexId.getId()));
final BlobContainer indexContainer = indices.get(indexId.getId());
assertThat(indexContainer.listBlobs(), hasKey(String.format(Locale.ROOT, BlobStoreRepository.METADATA_NAME_FORMAT, snapshotId.getUUID())));
final IndexMetadata indexMetadata = PlainActionFuture.get(x -> repository.getSnapshotIndexMetadata(snapshotId, indexId, x));
for (Map.Entry<String, BlobContainer> entry : indexContainer.children().entrySet()) {
// Skip Lucene MockFS extraN directory
if (entry.getKey().startsWith("extra")) {
continue;
}
final int shardId = Integer.parseInt(entry.getKey());
final int shardCount = indexMetadata.getNumberOfShards();
maxShardCountsExpected.compute(indexId, (i, existing) -> existing == null || existing < shardCount ? shardCount : existing);
final BlobContainer shardContainer = entry.getValue();
// becomes unreferenced. We should fix that and remove this conditional once its fixed.
if (shardContainer.listBlobs().keySet().stream().anyMatch(blob -> blob.startsWith("extra") == false)) {
final int impliedCount = shardId - 1;
maxShardCountsSeen.compute(indexId, (i, existing) -> existing == null || existing < impliedCount ? impliedCount : existing);
}
if (shardId < shardCount && snapshotInfo.shardFailures().stream().noneMatch(shardFailure -> shardFailure.index().equals(index) && shardFailure.shardId() == shardId)) {
final Map<String, BlobMetadata> shardPathContents = shardContainer.listBlobs();
assertThat(shardPathContents, hasKey(String.format(Locale.ROOT, BlobStoreRepository.SNAPSHOT_NAME_FORMAT, snapshotId.getUUID())));
assertThat(shardPathContents.keySet().stream().filter(name -> name.startsWith(BlobStoreRepository.INDEX_FILE_PREFIX)).count(), lessThanOrEqualTo(2L));
}
}
}
}, (e) -> {
}));
}
;
maxShardCountsSeen.forEach(((indexId, count) -> assertThat("Found unreferenced shard paths for index [" + indexId + "]", count, lessThanOrEqualTo(maxShardCountsExpected.get(indexId)))));
}
Aggregations