use of org.apache.hadoop.ozone.om.helpers.OmMultipartUpload in project ozone by apache.
the class TestKeyManagerUnit method listMultipartUploadsWithPrefix.
@Test
public void listMultipartUploadsWithPrefix() throws IOException {
// GIVEN
createBucket(metadataManager, "vol1", "bucket1");
createBucket(metadataManager, "vol1", "bucket2");
initMultipartUpload(writeClient, "vol1", "bucket1", "dip/key1");
initMultipartUpload(writeClient, "vol1", "bucket1", "dir/key1");
initMultipartUpload(writeClient, "vol1", "bucket1", "dir/key2");
initMultipartUpload(writeClient, "vol1", "bucket1", "key3");
initMultipartUpload(writeClient, "vol1", "bucket2", "dir/key1");
// WHEN
OmMultipartUploadList omMultipartUploadList = keyManager.listMultipartUploads("vol1", "bucket1", "dir");
// THEN
List<OmMultipartUpload> uploads = omMultipartUploadList.getUploads();
Assert.assertEquals(2, uploads.size());
Assert.assertEquals("dir/key1", uploads.get(0).getKeyName());
Assert.assertEquals("dir/key2", uploads.get(1).getKeyName());
}
use of org.apache.hadoop.ozone.om.helpers.OmMultipartUpload in project ozone by apache.
the class TestKeyManagerUnit method listMultipartUploadsWithFewEntriesInCache.
@Test
public void listMultipartUploadsWithFewEntriesInCache() throws IOException {
String volume = UUID.randomUUID().toString();
String bucket = UUID.randomUUID().toString();
// GIVEN
createBucket(metadataManager, volume, bucket);
createBucket(metadataManager, volume, bucket);
// Add few to cache and few to DB.
addinitMultipartUploadToCache(volume, bucket, "dir/key1");
initMultipartUpload(writeClient, volume, bucket, "dir/key2");
addinitMultipartUploadToCache(volume, bucket, "dir/key3");
initMultipartUpload(writeClient, volume, bucket, "dir/key4");
// WHEN
OmMultipartUploadList omMultipartUploadList = keyManager.listMultipartUploads(volume, bucket, "");
// THEN
List<OmMultipartUpload> uploads = omMultipartUploadList.getUploads();
Assert.assertEquals(4, uploads.size());
Assert.assertEquals("dir/key1", uploads.get(0).getKeyName());
Assert.assertEquals("dir/key2", uploads.get(1).getKeyName());
Assert.assertEquals("dir/key3", uploads.get(2).getKeyName());
Assert.assertEquals("dir/key4", uploads.get(3).getKeyName());
// Add few more to test prefix.
// Same way add few to cache and few to DB.
addinitMultipartUploadToCache(volume, bucket, "dir/ozonekey1");
initMultipartUpload(writeClient, volume, bucket, "dir/ozonekey2");
OmMultipartInfo omMultipartInfo3 = addinitMultipartUploadToCache(volume, bucket, "dir/ozonekey3");
OmMultipartInfo omMultipartInfo4 = initMultipartUpload(writeClient, volume, bucket, "dir/ozonekey4");
omMultipartUploadList = keyManager.listMultipartUploads(volume, bucket, "dir/ozone");
// THEN
uploads = omMultipartUploadList.getUploads();
Assert.assertEquals(4, uploads.size());
Assert.assertEquals("dir/ozonekey1", uploads.get(0).getKeyName());
Assert.assertEquals("dir/ozonekey2", uploads.get(1).getKeyName());
Assert.assertEquals("dir/ozonekey3", uploads.get(2).getKeyName());
Assert.assertEquals("dir/ozonekey4", uploads.get(3).getKeyName());
// Abort multipart upload for key in DB.
abortMultipart(volume, bucket, "dir/ozonekey4", omMultipartInfo4.getUploadID());
// Now list.
omMultipartUploadList = keyManager.listMultipartUploads(volume, bucket, "dir/ozone");
// THEN
uploads = omMultipartUploadList.getUploads();
Assert.assertEquals(3, uploads.size());
Assert.assertEquals("dir/ozonekey1", uploads.get(0).getKeyName());
Assert.assertEquals("dir/ozonekey2", uploads.get(1).getKeyName());
Assert.assertEquals("dir/ozonekey3", uploads.get(2).getKeyName());
// abort multipart upload for key in cache.
abortMultipart(volume, bucket, "dir/ozonekey3", omMultipartInfo3.getUploadID());
// Now list.
omMultipartUploadList = keyManager.listMultipartUploads(volume, bucket, "dir/ozone");
// THEN
uploads = omMultipartUploadList.getUploads();
Assert.assertEquals(2, uploads.size());
Assert.assertEquals("dir/ozonekey1", uploads.get(0).getKeyName());
Assert.assertEquals("dir/ozonekey2", uploads.get(1).getKeyName());
}
use of org.apache.hadoop.ozone.om.helpers.OmMultipartUpload in project ozone by apache.
the class OzoneManagerProtocolClientSideTranslatorPB method listMultipartUploads.
@Override
public OmMultipartUploadList listMultipartUploads(String volumeName, String bucketName, String prefix) throws IOException {
ListMultipartUploadsRequest request = ListMultipartUploadsRequest.newBuilder().setVolume(volumeName).setBucket(bucketName).setPrefix(prefix == null ? "" : prefix).build();
OMRequest omRequest = createOMRequest(Type.ListMultipartUploads).setListMultipartUploadsRequest(request).build();
ListMultipartUploadsResponse listMultipartUploadsResponse = handleError(submitRequest(omRequest)).getListMultipartUploadsResponse();
List<OmMultipartUpload> uploadList = listMultipartUploadsResponse.getUploadsListList().stream().map(proto -> new OmMultipartUpload(proto.getVolumeName(), proto.getBucketName(), proto.getKeyName(), proto.getUploadId(), Instant.ofEpochMilli(proto.getCreationTime()), ReplicationConfig.fromProtoTypeAndFactor(proto.getType(), proto.getFactor()))).collect(Collectors.toList());
OmMultipartUploadList response = new OmMultipartUploadList(uploadList);
return response;
}
use of org.apache.hadoop.ozone.om.helpers.OmMultipartUpload in project ozone by apache.
the class TestKeyManagerUnit method listMultipartUploads.
@Test
public void listMultipartUploads() throws IOException {
// GIVEN
createBucket(metadataManager, "vol1", "bucket1");
createBucket(metadataManager, "vol1", "bucket2");
initMultipartUpload(writeClient, "vol1", "bucket1", "dir/key1");
initMultipartUpload(writeClient, "vol1", "bucket1", "dir/key2");
initMultipartUpload(writeClient, "vol1", "bucket2", "dir/key1");
// WHEN
OmMultipartUploadList omMultipartUploadList = keyManager.listMultipartUploads("vol1", "bucket1", "");
// THEN
List<OmMultipartUpload> uploads = omMultipartUploadList.getUploads();
Assert.assertEquals(2, uploads.size());
Assert.assertEquals("dir/key1", uploads.get(0).getKeyName());
Assert.assertEquals("dir/key2", uploads.get(1).getKeyName());
Assert.assertNotNull(uploads.get(1));
Instant creationTime = uploads.get(1).getCreationTime();
Assert.assertNotNull(creationTime);
Assert.assertFalse("Creation date is too old: " + creationTime + " < " + startDate, creationTime.isBefore(startDate));
}
use of org.apache.hadoop.ozone.om.helpers.OmMultipartUpload in project ozone by apache.
the class KeyManagerImpl method listMultipartUploads.
@Override
public OmMultipartUploadList listMultipartUploads(String volumeName, String bucketName, String prefix) throws OMException {
Preconditions.checkNotNull(volumeName);
Preconditions.checkNotNull(bucketName);
metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName, bucketName);
try {
Set<String> multipartUploadKeys = metadataManager.getMultipartUploadKeys(volumeName, bucketName, prefix);
List<OmMultipartUpload> collect = multipartUploadKeys.stream().map(OmMultipartUpload::from).peek(upload -> {
try {
Table<String, OmMultipartKeyInfo> keyInfoTable = metadataManager.getMultipartInfoTable();
OmMultipartKeyInfo multipartKeyInfo = keyInfoTable.get(upload.getDbKey());
upload.setCreationTime(Instant.ofEpochMilli(multipartKeyInfo.getCreationTime()));
upload.setReplicationConfig(multipartKeyInfo.getReplicationConfig());
} catch (IOException e) {
LOG.warn("Open key entry for multipart upload record can be read {}", metadataManager.getOzoneKey(upload.getVolumeName(), upload.getBucketName(), upload.getKeyName()));
}
}).collect(Collectors.toList());
return new OmMultipartUploadList(collect);
} catch (IOException ex) {
LOG.error("List Multipart Uploads Failed: volume: " + volumeName + "bucket: " + bucketName + "prefix: " + prefix, ex);
throw new OMException(ex.getMessage(), ResultCodes.LIST_MULTIPART_UPLOAD_PARTS_FAILED);
} finally {
metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName, bucketName);
}
}
Aggregations