use of org.elasticsearch.common.blobstore.BlobMetadata in project elasticsearch by elastic.
the class AzureStorageServiceImpl method listBlobsByPrefix.
@Override
public Map<String, BlobMetaData> listBlobsByPrefix(String account, LocationMode mode, String container, String keyPath, String prefix) throws URISyntaxException, StorageException {
// NOTE: this should be here: if (prefix == null) prefix = "";
// however, this is really inefficient since deleteBlobsByPrefix enumerates everything and
// then does a prefix match on the result; it should just call listBlobsByPrefix with the prefix!
logger.debug("listing container [{}], keyPath [{}], prefix [{}]", container, keyPath, prefix);
MapBuilder<String, BlobMetaData> blobsBuilder = MapBuilder.newMapBuilder();
CloudBlobClient client = this.getSelectedClient(account, mode);
CloudBlobContainer blobContainer = client.getContainerReference(container);
SocketAccess.doPrivilegedVoidException(() -> {
if (blobContainer.exists()) {
for (ListBlobItem blobItem : blobContainer.listBlobs(keyPath + (prefix == null ? "" : prefix))) {
URI uri = blobItem.getUri();
logger.trace("blob url [{}]", uri);
// uri.getPath is of the form /container/keyPath.* and we want to strip off the /container/
// this requires 1 + container.length() + 1, with each 1 corresponding to one of the /
String blobPath = uri.getPath().substring(1 + container.length() + 1);
CloudBlockBlob blob = blobContainer.getBlockBlobReference(blobPath);
// fetch the blob attributes from Azure (getBlockBlobReference does not do this)
// this is needed to retrieve the blob length (among other metadata) from Azure Storage
blob.downloadAttributes();
BlobProperties properties = blob.getProperties();
String name = blobPath.substring(keyPath.length());
logger.trace("blob url [{}], name [{}], size [{}]", uri, name, properties.getLength());
blobsBuilder.put(name, new PlainBlobMetaData(name, properties.getLength()));
}
}
});
return blobsBuilder.immutableMap();
}
use of org.elasticsearch.common.blobstore.BlobMetadata in project elasticsearch by elastic.
the class AzureStorageServiceMock method listBlobsByPrefix.
@Override
public Map<String, BlobMetaData> listBlobsByPrefix(String account, LocationMode mode, String container, String keyPath, String prefix) {
MapBuilder<String, BlobMetaData> blobsBuilder = MapBuilder.newMapBuilder();
blobs.forEach((String blobName, ByteArrayOutputStream bos) -> {
final String checkBlob;
if (keyPath != null && !keyPath.isEmpty()) {
// strip off key path from the beginning of the blob name
checkBlob = blobName.replace(keyPath, "");
} else {
checkBlob = blobName;
}
if (prefix == null || startsWithIgnoreCase(checkBlob, prefix)) {
blobsBuilder.put(blobName, new PlainBlobMetaData(checkBlob, bos.size()));
}
});
return blobsBuilder.immutableMap();
}
use of org.elasticsearch.common.blobstore.BlobMetadata in project crate by crate.
the class BlobStoreFormatIT method testCompressionIsApplied.
public void testCompressionIsApplied() throws IOException {
BlobStore blobStore = createTestBlobStore();
BlobContainer blobContainer = blobStore.blobContainer(BlobPath.cleanPath());
StringBuilder veryRedundantText = new StringBuilder();
for (int i = 0; i < randomIntBetween(100, 300); i++) {
veryRedundantText.append("Blah ");
}
ChecksumBlobStoreFormat<BlobObj> checksumFormat = new ChecksumBlobStoreFormat<>(BLOB_CODEC, "%s", BlobObj::fromXContent, xContentRegistry(), false);
ChecksumBlobStoreFormat<BlobObj> checksumFormatComp = new ChecksumBlobStoreFormat<>(BLOB_CODEC, "%s", BlobObj::fromXContent, xContentRegistry(), true);
BlobObj blobObj = new BlobObj(veryRedundantText.toString());
checksumFormatComp.write(blobObj, blobContainer, "blob-comp", true);
checksumFormat.write(blobObj, blobContainer, "blob-not-comp", true);
Map<String, BlobMetadata> blobs = blobContainer.listBlobsByPrefix("blob-");
assertEquals(blobs.size(), 2);
assertThat(blobs.get("blob-not-comp").length(), greaterThan(blobs.get("blob-comp").length()));
}
use of org.elasticsearch.common.blobstore.BlobMetadata in project crate by crate.
the class BlobStoreRepository method doDeleteShardSnapshots.
/**
* After updating the {@link RepositoryData} each of the shards directories is individually first moved to the next shard generation
* and then has all now unreferenced blobs in it deleted.
*
* @param snapshotId SnapshotId to delete
* @param repositoryStateId Expected repository state id
* @param foundIndices All indices folders found in the repository before executing any writes to the repository during this
* delete operation
* @param rootBlobs All blobs found at the root of the repository before executing any writes to the repository during this
* delete operation
* @param repositoryData RepositoryData found the in the repository before executing this delete
* @param listener Listener to invoke once finished
*/
private void doDeleteShardSnapshots(SnapshotId snapshotId, long repositoryStateId, Map<String, BlobContainer> foundIndices, Map<String, BlobMetadata> rootBlobs, RepositoryData repositoryData, boolean writeShardGens, ActionListener<Void> listener) {
if (writeShardGens) {
// First write the new shard state metadata (with the removed snapshot) and compute deletion targets
final StepListener<Collection<ShardSnapshotMetaDeleteResult>> writeShardMetadataAndComputeDeletesStep = new StepListener<>();
writeUpdatedShardMetadataAndComputeDeletes(snapshotId, repositoryData, true, writeShardMetadataAndComputeDeletesStep);
// Once we have put the new shard-level metadata into place, we can update the repository metadata as follows:
// 1. Remove the snapshot from the list of existing snapshots
// 2. Update the index shard generations of all updated shard folders
//
// Note: If we fail updating any of the individual shard paths, none of them are changed since the newly created
// index-${gen_uuid} will not be referenced by the existing RepositoryData and new RepositoryData is only
// written if all shard paths have been successfully updated.
final StepListener<RepositoryData> writeUpdatedRepoDataStep = new StepListener<>();
writeShardMetadataAndComputeDeletesStep.whenComplete(deleteResults -> {
final ShardGenerations.Builder builder = ShardGenerations.builder();
for (ShardSnapshotMetaDeleteResult newGen : deleteResults) {
builder.put(newGen.indexId, newGen.shardId, newGen.newGeneration);
}
final RepositoryData updatedRepoData = repositoryData.removeSnapshot(snapshotId, builder.build());
writeIndexGen(updatedRepoData, repositoryStateId, true, ActionListener.wrap(v -> writeUpdatedRepoDataStep.onResponse(updatedRepoData), listener::onFailure));
}, listener::onFailure);
// Once we have updated the repository, run the clean-ups
writeUpdatedRepoDataStep.whenComplete(updatedRepoData -> {
// Run unreferenced blobs cleanup in parallel to shard-level snapshot deletion
final ActionListener<Void> afterCleanupsListener = new GroupedActionListener<>(ActionListener.wrap(() -> listener.onResponse(null)), 2);
asyncCleanupUnlinkedRootAndIndicesBlobs(foundIndices, rootBlobs, updatedRepoData, afterCleanupsListener);
asyncCleanupUnlinkedShardLevelBlobs(snapshotId, writeShardMetadataAndComputeDeletesStep.result(), afterCleanupsListener);
}, listener::onFailure);
} else {
// Write the new repository data first (with the removed snapshot), using no shard generations
final RepositoryData updatedRepoData = repositoryData.removeSnapshot(snapshotId, ShardGenerations.EMPTY);
writeIndexGen(updatedRepoData, repositoryStateId, false, ActionListener.wrap(v -> {
// Run unreferenced blobs cleanup in parallel to shard-level snapshot deletion
final ActionListener<Void> afterCleanupsListener = new GroupedActionListener<>(ActionListener.wrap(() -> listener.onResponse(null)), 2);
asyncCleanupUnlinkedRootAndIndicesBlobs(foundIndices, rootBlobs, updatedRepoData, afterCleanupsListener);
final StepListener<Collection<ShardSnapshotMetaDeleteResult>> writeMetaAndComputeDeletesStep = new StepListener<>();
writeUpdatedShardMetadataAndComputeDeletes(snapshotId, repositoryData, false, writeMetaAndComputeDeletesStep);
writeMetaAndComputeDeletesStep.whenComplete(deleteResults -> asyncCleanupUnlinkedShardLevelBlobs(snapshotId, deleteResults, afterCleanupsListener), afterCleanupsListener::onFailure);
}, listener::onFailure));
}
}
use of org.elasticsearch.common.blobstore.BlobMetadata in project elasticsearch by elastic.
the class FsBlobContainer method listBlobsByPrefix.
@Override
public Map<String, BlobMetaData> listBlobsByPrefix(String blobNamePrefix) throws IOException {
// If we get duplicate files we should just take the last entry
Map<String, BlobMetaData> builder = new HashMap<>();
blobNamePrefix = blobNamePrefix == null ? "" : blobNamePrefix;
try (DirectoryStream<Path> stream = Files.newDirectoryStream(path, blobNamePrefix + "*")) {
for (Path file : stream) {
final BasicFileAttributes attrs = Files.readAttributes(file, BasicFileAttributes.class);
if (attrs.isRegularFile()) {
builder.put(file.getFileName().toString(), new PlainBlobMetaData(file.getFileName().toString(), attrs.size()));
}
}
}
return unmodifiableMap(builder);
}
Aggregations