use of org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo in project ozone by apache.
the class TestS3MultipartUploadAbortResponse method testAddDBToBatchWithParts.
@Test
public void testAddDBToBatchWithParts() throws Exception {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
String keyName = getKeyName();
String multipartUploadID = UUID.randomUUID().toString();
String multipartOpenKey = getMultipartOpenKey(volumeName, bucketName, keyName, multipartUploadID);
String multipartKey = omMetadataManager.getMultipartKey(volumeName, bucketName, keyName, multipartUploadID);
OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder().setVolumeName(volumeName).setBucketName(bucketName).setCreationTime(Time.now()).build();
S3InitiateMultipartUploadResponse s3InitiateMultipartUploadResponse = createS3InitiateMPUResponse(volumeName, bucketName, keyName, multipartUploadID);
s3InitiateMultipartUploadResponse.addToDBBatch(omMetadataManager, batchOperation);
// Add some dummy parts for testing.
// Not added any key locations, as this just test is to see entries are
// adding to delete table or not.
OmMultipartKeyInfo omMultipartKeyInfo = s3InitiateMultipartUploadResponse.getOmMultipartKeyInfo();
PartKeyInfo part1 = createPartKeyInfo(volumeName, bucketName, keyName, 1);
PartKeyInfo part2 = createPartKeyInfo(volumeName, bucketName, keyName, 2);
addPart(1, part1, omMultipartKeyInfo);
addPart(2, part2, omMultipartKeyInfo);
S3MultipartUploadAbortResponse s3MultipartUploadAbortResponse = createS3AbortMPUResponse(multipartKey, multipartOpenKey, s3InitiateMultipartUploadResponse.getOmMultipartKeyInfo(), omBucketInfo);
s3MultipartUploadAbortResponse.addToDBBatch(omMetadataManager, batchOperation);
omMetadataManager.getStore().commitBatchOperation(batchOperation);
Assert.assertNull(omMetadataManager.getOpenKeyTable(getBucketLayout()).get(multipartKey));
Assert.assertNull(omMetadataManager.getMultipartInfoTable().get(multipartKey));
// As 2 parts are created, so 2 entries should be there in delete table.
Assert.assertTrue(omMetadataManager.countRowsInTable(omMetadataManager.getDeletedTable()) == 2);
String part1DeletedKeyName = omMultipartKeyInfo.getPartKeyInfo(1).getPartName();
String part2DeletedKeyName = omMultipartKeyInfo.getPartKeyInfo(2).getPartName();
Assert.assertNotNull(omMetadataManager.getDeletedTable().get(part1DeletedKeyName));
Assert.assertNotNull(omMetadataManager.getDeletedTable().get(part2DeletedKeyName));
RepeatedOmKeyInfo ro = omMetadataManager.getDeletedTable().get(part1DeletedKeyName);
Assert.assertEquals(OmKeyInfo.getFromProtobuf(part1.getPartKeyInfo()), ro.getOmKeyInfoList().get(0));
ro = omMetadataManager.getDeletedTable().get(part2DeletedKeyName);
Assert.assertEquals(OmKeyInfo.getFromProtobuf(part2.getPartKeyInfo()), ro.getOmKeyInfoList().get(0));
}
use of org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo in project ozone by apache.
the class TestS3MultipartResponse method createS3InitiateMPUResponse.
public S3InitiateMultipartUploadResponse createS3InitiateMPUResponse(String volumeName, String bucketName, String keyName, String multipartUploadID) {
OmMultipartKeyInfo multipartKeyInfo = new OmMultipartKeyInfo.Builder().setUploadID(multipartUploadID).setCreationTime(Time.now()).setReplicationConfig(RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE)).build();
OmKeyInfo omKeyInfo = new OmKeyInfo.Builder().setVolumeName(volumeName).setBucketName(bucketName).setKeyName(keyName).setCreationTime(Time.now()).setModificationTime(Time.now()).setReplicationConfig(RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE)).setOmKeyLocationInfos(Collections.singletonList(new OmKeyLocationInfoGroup(0, new ArrayList<>()))).build();
OMResponse omResponse = OMResponse.newBuilder().setCmdType(OzoneManagerProtocolProtos.Type.InitiateMultiPartUpload).setStatus(OzoneManagerProtocolProtos.Status.OK).setSuccess(true).setInitiateMultiPartUploadResponse(OzoneManagerProtocolProtos.MultipartInfoInitiateResponse.newBuilder().setVolumeName(volumeName).setBucketName(bucketName).setKeyName(keyName).setMultipartUploadID(multipartUploadID)).build();
return getS3InitiateMultipartUploadResp(multipartKeyInfo, omKeyInfo, omResponse);
}
use of org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo in project ozone by apache.
the class TestKeyManagerUnit method addinitMultipartUploadToCache.
private OmMultipartInfo addinitMultipartUploadToCache(String volume, String bucket, String key) {
Map<Integer, OzoneManagerProtocolProtos.PartKeyInfo> partKeyInfoMap = new HashMap<>();
String uploadID = UUID.randomUUID().toString();
OmMultipartKeyInfo multipartKeyInfo = new OmMultipartKeyInfo.Builder().setUploadID(uploadID).setCreationTime(Time.now()).setReplicationConfig(RatisReplicationConfig.getInstance(ReplicationFactor.THREE)).setPartKeyInfoList(partKeyInfoMap).build();
metadataManager.getMultipartInfoTable().addCacheEntry(new CacheKey<>(metadataManager.getMultipartKey(volume, bucket, key, uploadID)), new CacheValue<>(Optional.of(multipartKeyInfo), RandomUtils.nextInt()));
return new OmMultipartInfo(volume, bucket, key, uploadID);
}
use of org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo in project ozone by apache.
the class KeyManagerImpl method listMultipartUploads.
@Override
public OmMultipartUploadList listMultipartUploads(String volumeName, String bucketName, String prefix) throws OMException {
Preconditions.checkNotNull(volumeName);
Preconditions.checkNotNull(bucketName);
metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName, bucketName);
try {
Set<String> multipartUploadKeys = metadataManager.getMultipartUploadKeys(volumeName, bucketName, prefix);
List<OmMultipartUpload> collect = multipartUploadKeys.stream().map(OmMultipartUpload::from).peek(upload -> {
try {
Table<String, OmMultipartKeyInfo> keyInfoTable = metadataManager.getMultipartInfoTable();
OmMultipartKeyInfo multipartKeyInfo = keyInfoTable.get(upload.getDbKey());
upload.setCreationTime(Instant.ofEpochMilli(multipartKeyInfo.getCreationTime()));
upload.setReplicationConfig(multipartKeyInfo.getReplicationConfig());
} catch (IOException e) {
LOG.warn("Open key entry for multipart upload record can be read {}", metadataManager.getOzoneKey(upload.getVolumeName(), upload.getBucketName(), upload.getKeyName()));
}
}).collect(Collectors.toList());
return new OmMultipartUploadList(collect);
} catch (IOException ex) {
LOG.error("List Multipart Uploads Failed: volume: " + volumeName + "bucket: " + bucketName + "prefix: " + prefix, ex);
throw new OMException(ex.getMessage(), ResultCodes.LIST_MULTIPART_UPLOAD_PARTS_FAILED);
} finally {
metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName, bucketName);
}
}
use of org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo in project ozone by apache.
the class KeyManagerImpl method listParts.
@Override
public OmMultipartUploadListParts listParts(String volumeName, String bucketName, String keyName, String uploadID, int partNumberMarker, int maxParts) throws IOException {
Preconditions.checkNotNull(volumeName);
Preconditions.checkNotNull(bucketName);
Preconditions.checkNotNull(keyName);
Preconditions.checkNotNull(uploadID);
boolean isTruncated = false;
int nextPartNumberMarker = 0;
BucketLayout bucketLayout = BucketLayout.DEFAULT;
if (ozoneManager != null) {
String buckKey = ozoneManager.getMetadataManager().getBucketKey(volumeName, bucketName);
OmBucketInfo buckInfo = ozoneManager.getMetadataManager().getBucketTable().get(buckKey);
bucketLayout = buckInfo.getBucketLayout();
}
metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName, bucketName);
try {
String multipartKey = metadataManager.getMultipartKey(volumeName, bucketName, keyName, uploadID);
OmMultipartKeyInfo multipartKeyInfo = metadataManager.getMultipartInfoTable().get(multipartKey);
if (multipartKeyInfo == null) {
throw new OMException("No Such Multipart upload exists for this key.", ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR);
} else {
TreeMap<Integer, PartKeyInfo> partKeyInfoMap = multipartKeyInfo.getPartKeyInfoMap();
Iterator<Map.Entry<Integer, PartKeyInfo>> partKeyInfoMapIterator = partKeyInfoMap.entrySet().iterator();
ReplicationConfig replicationConfig = null;
int count = 0;
List<OmPartInfo> omPartInfoList = new ArrayList<>();
while (count < maxParts && partKeyInfoMapIterator.hasNext()) {
Map.Entry<Integer, PartKeyInfo> partKeyInfoEntry = partKeyInfoMapIterator.next();
nextPartNumberMarker = partKeyInfoEntry.getKey();
// than part number marker
if (partKeyInfoEntry.getKey() > partNumberMarker) {
PartKeyInfo partKeyInfo = partKeyInfoEntry.getValue();
String partName = getPartName(partKeyInfo, volumeName, bucketName, keyName);
OmPartInfo omPartInfo = new OmPartInfo(partKeyInfo.getPartNumber(), partName, partKeyInfo.getPartKeyInfo().getModificationTime(), partKeyInfo.getPartKeyInfo().getDataSize());
omPartInfoList.add(omPartInfo);
// if there are parts, use replication type from one of the parts
replicationConfig = ReplicationConfig.fromProtoTypeAndFactor(partKeyInfo.getPartKeyInfo().getType(), partKeyInfo.getPartKeyInfo().getFactor());
count++;
}
}
if (replicationConfig == null) {
// if there are no parts, use the replicationType from the open key.
if (isBucketFSOptimized(volumeName, bucketName)) {
multipartKey = getMultipartOpenKeyFSO(volumeName, bucketName, keyName, uploadID);
}
OmKeyInfo omKeyInfo = metadataManager.getOpenKeyTable(bucketLayout).get(multipartKey);
if (omKeyInfo == null) {
throw new IllegalStateException("Open key is missing for multipart upload " + multipartKey);
}
replicationConfig = omKeyInfo.getReplicationConfig();
}
Preconditions.checkNotNull(replicationConfig, "ReplicationConfig can't be identified");
if (partKeyInfoMapIterator.hasNext()) {
Map.Entry<Integer, PartKeyInfo> partKeyInfoEntry = partKeyInfoMapIterator.next();
isTruncated = true;
} else {
isTruncated = false;
nextPartNumberMarker = 0;
}
OmMultipartUploadListParts omMultipartUploadListParts = new OmMultipartUploadListParts(replicationConfig, nextPartNumberMarker, isTruncated);
omMultipartUploadListParts.addPartList(omPartInfoList);
return omMultipartUploadListParts;
}
} catch (OMException ex) {
throw ex;
} catch (IOException ex) {
LOG.error("List Multipart Upload Parts Failed: volume: {}, bucket: {}, ,key: " + "{} ", volumeName, bucketName, keyName, ex);
throw new OMException(ex.getMessage(), ResultCodes.LIST_MULTIPART_UPLOAD_PARTS_FAILED);
} finally {
metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName, bucketName);
}
}
Aggregations