Search in sources :

Example 16 with OmMultipartKeyInfo

use of org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo in project ozone by apache.

the class TestS3MultipartUploadAbortResponse method testAddDBToBatchWithParts.

@Test
public void testAddDBToBatchWithParts() throws Exception {
    String volumeName = UUID.randomUUID().toString();
    String bucketName = UUID.randomUUID().toString();
    String keyName = getKeyName();
    String multipartUploadID = UUID.randomUUID().toString();
    String multipartOpenKey = getMultipartOpenKey(volumeName, bucketName, keyName, multipartUploadID);
    String multipartKey = omMetadataManager.getMultipartKey(volumeName, bucketName, keyName, multipartUploadID);
    OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder().setVolumeName(volumeName).setBucketName(bucketName).setCreationTime(Time.now()).build();
    S3InitiateMultipartUploadResponse s3InitiateMultipartUploadResponse = createS3InitiateMPUResponse(volumeName, bucketName, keyName, multipartUploadID);
    s3InitiateMultipartUploadResponse.addToDBBatch(omMetadataManager, batchOperation);
    // Add some dummy parts for testing.
    // Not added any key locations, as this just test is to see entries are
    // adding to delete table or not.
    OmMultipartKeyInfo omMultipartKeyInfo = s3InitiateMultipartUploadResponse.getOmMultipartKeyInfo();
    PartKeyInfo part1 = createPartKeyInfo(volumeName, bucketName, keyName, 1);
    PartKeyInfo part2 = createPartKeyInfo(volumeName, bucketName, keyName, 2);
    addPart(1, part1, omMultipartKeyInfo);
    addPart(2, part2, omMultipartKeyInfo);
    S3MultipartUploadAbortResponse s3MultipartUploadAbortResponse = createS3AbortMPUResponse(multipartKey, multipartOpenKey, s3InitiateMultipartUploadResponse.getOmMultipartKeyInfo(), omBucketInfo);
    s3MultipartUploadAbortResponse.addToDBBatch(omMetadataManager, batchOperation);
    omMetadataManager.getStore().commitBatchOperation(batchOperation);
    Assert.assertNull(omMetadataManager.getOpenKeyTable(getBucketLayout()).get(multipartKey));
    Assert.assertNull(omMetadataManager.getMultipartInfoTable().get(multipartKey));
    // As 2 parts are created, so 2 entries should be there in delete table.
    Assert.assertTrue(omMetadataManager.countRowsInTable(omMetadataManager.getDeletedTable()) == 2);
    String part1DeletedKeyName = omMultipartKeyInfo.getPartKeyInfo(1).getPartName();
    String part2DeletedKeyName = omMultipartKeyInfo.getPartKeyInfo(2).getPartName();
    Assert.assertNotNull(omMetadataManager.getDeletedTable().get(part1DeletedKeyName));
    Assert.assertNotNull(omMetadataManager.getDeletedTable().get(part2DeletedKeyName));
    RepeatedOmKeyInfo ro = omMetadataManager.getDeletedTable().get(part1DeletedKeyName);
    Assert.assertEquals(OmKeyInfo.getFromProtobuf(part1.getPartKeyInfo()), ro.getOmKeyInfoList().get(0));
    ro = omMetadataManager.getDeletedTable().get(part2DeletedKeyName);
    Assert.assertEquals(OmKeyInfo.getFromProtobuf(part2.getPartKeyInfo()), ro.getOmKeyInfoList().get(0));
}
Also used : OmBucketInfo(org.apache.hadoop.ozone.om.helpers.OmBucketInfo) OmMultipartKeyInfo(org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo) PartKeyInfo(org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PartKeyInfo) RepeatedOmKeyInfo(org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo) Test(org.junit.Test)

Example 17 with OmMultipartKeyInfo

use of org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo in project ozone by apache.

the class TestS3MultipartResponse method createS3InitiateMPUResponse.

public S3InitiateMultipartUploadResponse createS3InitiateMPUResponse(String volumeName, String bucketName, String keyName, String multipartUploadID) {
    OmMultipartKeyInfo multipartKeyInfo = new OmMultipartKeyInfo.Builder().setUploadID(multipartUploadID).setCreationTime(Time.now()).setReplicationConfig(RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE)).build();
    OmKeyInfo omKeyInfo = new OmKeyInfo.Builder().setVolumeName(volumeName).setBucketName(bucketName).setKeyName(keyName).setCreationTime(Time.now()).setModificationTime(Time.now()).setReplicationConfig(RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE)).setOmKeyLocationInfos(Collections.singletonList(new OmKeyLocationInfoGroup(0, new ArrayList<>()))).build();
    OMResponse omResponse = OMResponse.newBuilder().setCmdType(OzoneManagerProtocolProtos.Type.InitiateMultiPartUpload).setStatus(OzoneManagerProtocolProtos.Status.OK).setSuccess(true).setInitiateMultiPartUploadResponse(OzoneManagerProtocolProtos.MultipartInfoInitiateResponse.newBuilder().setVolumeName(volumeName).setBucketName(bucketName).setKeyName(keyName).setMultipartUploadID(multipartUploadID)).build();
    return getS3InitiateMultipartUploadResp(multipartKeyInfo, omKeyInfo, omResponse);
}
Also used : OmKeyLocationInfoGroup(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup) OmMultipartKeyInfo(org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo) OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) RepeatedOmKeyInfo(org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo) ArrayList(java.util.ArrayList) OMResponse(org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse)

Example 18 with OmMultipartKeyInfo

use of org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo in project ozone by apache.

the class TestKeyManagerUnit method addinitMultipartUploadToCache.

private OmMultipartInfo addinitMultipartUploadToCache(String volume, String bucket, String key) {
    Map<Integer, OzoneManagerProtocolProtos.PartKeyInfo> partKeyInfoMap = new HashMap<>();
    String uploadID = UUID.randomUUID().toString();
    OmMultipartKeyInfo multipartKeyInfo = new OmMultipartKeyInfo.Builder().setUploadID(uploadID).setCreationTime(Time.now()).setReplicationConfig(RatisReplicationConfig.getInstance(ReplicationFactor.THREE)).setPartKeyInfoList(partKeyInfoMap).build();
    metadataManager.getMultipartInfoTable().addCacheEntry(new CacheKey<>(metadataManager.getMultipartKey(volume, bucket, key, uploadID)), new CacheValue<>(Optional.of(multipartKeyInfo), RandomUtils.nextInt()));
    return new OmMultipartInfo(volume, bucket, key, uploadID);
}
Also used : HashMap(java.util.HashMap) OmMultipartKeyInfo(org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo) OmMultipartInfo(org.apache.hadoop.ozone.om.helpers.OmMultipartInfo)

Example 19 with OmMultipartKeyInfo

use of org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo in project ozone by apache.

the class KeyManagerImpl method listMultipartUploads.

@Override
public OmMultipartUploadList listMultipartUploads(String volumeName, String bucketName, String prefix) throws OMException {
    Preconditions.checkNotNull(volumeName);
    Preconditions.checkNotNull(bucketName);
    metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName, bucketName);
    try {
        Set<String> multipartUploadKeys = metadataManager.getMultipartUploadKeys(volumeName, bucketName, prefix);
        List<OmMultipartUpload> collect = multipartUploadKeys.stream().map(OmMultipartUpload::from).peek(upload -> {
            try {
                Table<String, OmMultipartKeyInfo> keyInfoTable = metadataManager.getMultipartInfoTable();
                OmMultipartKeyInfo multipartKeyInfo = keyInfoTable.get(upload.getDbKey());
                upload.setCreationTime(Instant.ofEpochMilli(multipartKeyInfo.getCreationTime()));
                upload.setReplicationConfig(multipartKeyInfo.getReplicationConfig());
            } catch (IOException e) {
                LOG.warn("Open key entry for multipart upload record can be read  {}", metadataManager.getOzoneKey(upload.getVolumeName(), upload.getBucketName(), upload.getKeyName()));
            }
        }).collect(Collectors.toList());
        return new OmMultipartUploadList(collect);
    } catch (IOException ex) {
        LOG.error("List Multipart Uploads Failed: volume: " + volumeName + "bucket: " + bucketName + "prefix: " + prefix, ex);
        throw new OMException(ex.getMessage(), ResultCodes.LIST_MULTIPART_UPLOAD_PARTS_FAILED);
    } finally {
        metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName, bucketName);
    }
}
Also used : Arrays(java.util.Arrays) INTERNAL_ERROR(org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INTERNAL_ERROR) OzoneFSUtils(org.apache.hadoop.ozone.om.helpers.OzoneFSUtils) StringUtils(org.apache.commons.lang3.StringUtils) GeneralSecurityException(java.security.GeneralSecurityException) OM_KEY_PREFIX(org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX) HADOOP_SECURITY_KEY_PROVIDER_PATH(org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH) Map(java.util.Map) Path(java.nio.file.Path) EnumSet(java.util.EnumSet) OmBucketInfo(org.apache.hadoop.ozone.om.helpers.OmBucketInfo) DFS_CONTAINER_RATIS_ENABLED_DEFAULT(org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT) Set(java.util.Set) PrivilegedExceptionAction(java.security.PrivilegedExceptionAction) SecurityUtil(org.apache.hadoop.security.SecurityUtil) OzoneBlockTokenSecretManager(org.apache.hadoop.ozone.security.OzoneBlockTokenSecretManager) VOLUME_NOT_FOUND(org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND) CodecRegistry(org.apache.hadoop.hdds.utils.db.CodecRegistry) HDDS_BLOCK_TOKEN_ENABLED(org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) OZONE_URI_DELIMITER(org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER) OmMultipartUploadListParts(org.apache.hadoop.ozone.om.helpers.OmMultipartUploadListParts) OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT(org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT) INVALID_KMS_PROVIDER(org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_KMS_PROVIDER) TreeSet(java.util.TreeSet) ArrayList(java.util.ArrayList) Time.monotonicNow(org.apache.hadoop.util.Time.monotonicNow) EncryptedKeyVersion(org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion) Strings(com.google.common.base.Strings) OMFileRequest(org.apache.hadoop.ozone.om.request.file.OMFileRequest) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) KeyProviderCryptoExtension(org.apache.hadoop.crypto.key.KeyProviderCryptoExtension) OmDirectoryInfo(org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo) OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT(org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT) ContainerWithPipeline(org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline) BlockGroup(org.apache.hadoop.ozone.common.BlockGroup) ReplicationConfig(org.apache.hadoop.hdds.client.ReplicationConfig) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) IOException(java.io.IOException) BucketEncryptionKeyInfo(org.apache.hadoop.ozone.om.helpers.BucketEncryptionKeyInfo) OZONE_BLOCK_DELETING_SERVICE_INTERVAL(org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL) FileEncryptionInfo(org.apache.hadoop.fs.FileEncryptionInfo) TreeMap(java.util.TreeMap) OZONE_SCM_BLOCK_SIZE_DEFAULT(org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_DEFAULT) DIRECTORY_NOT_FOUND(org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.DIRECTORY_NOT_FOUND) Paths(java.nio.file.Paths) Table(org.apache.hadoop.hdds.utils.db.Table) CacheKey(org.apache.hadoop.hdds.utils.db.cache.CacheKey) OmPartInfo(org.apache.hadoop.ozone.om.helpers.OmPartInfo) READ(org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto.READ) Preconditions(com.google.common.base.Preconditions) TableIterator(org.apache.hadoop.hdds.utils.db.TableIterator) OmMultipartKeyInfo(org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo) RequestContext(org.apache.hadoop.ozone.security.acl.RequestContext) LoggerFactory(org.slf4j.LoggerFactory) OmKeyArgs(org.apache.hadoop.ozone.om.helpers.OmKeyArgs) ScmBlockLocationProtocol(org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol) OZONE_SCM_BLOCK_SIZE(org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE) KEY_NOT_FOUND(org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND) OZONE_BLOCK_DELETING_SERVICE_TIMEOUT(org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT) OzoneFileStatus(org.apache.hadoop.ozone.om.helpers.OzoneFileStatus) BucketLayout(org.apache.hadoop.ozone.om.helpers.BucketLayout) CacheValue(org.apache.hadoop.hdds.utils.db.cache.CacheValue) OzoneAcl(org.apache.hadoop.ozone.OzoneAcl) OmMultipartUpload(org.apache.hadoop.ozone.om.helpers.OmMultipartUpload) StorageUnit(org.apache.hadoop.conf.StorageUnit) RatisReplicationConfig(org.apache.hadoop.hdds.client.RatisReplicationConfig) Collection(java.util.Collection) ReplicationFactor(org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor) FILE_NOT_FOUND(org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_NOT_FOUND) Instant(java.time.Instant) Collectors(java.util.stream.Collectors) Objects(java.util.Objects) List(java.util.List) OMException(org.apache.hadoop.ozone.om.exceptions.OMException) BUCKET_NOT_FOUND(org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND) OZONE_KEY_PREALLOCATION_BLOCKS_MAX(org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_KEY_PREALLOCATION_BLOCKS_MAX) OMClientRequest(org.apache.hadoop.ozone.om.request.OMClientRequest) OzoneObj(org.apache.hadoop.ozone.security.acl.OzoneObj) RDBStore(org.apache.hadoop.hdds.utils.db.RDBStore) OZONE_DIR_DELETING_SERVICE_INTERVAL(org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL) OZONE_DIR_DELETING_SERVICE_INTERVAL_DEFAULT(org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL_DEFAULT) HashMap(java.util.HashMap) BackgroundService(org.apache.hadoop.hdds.utils.BackgroundService) OZONE_CLIENT_LIST_TRASH_KEYS_MAX(org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_LIST_TRASH_KEYS_MAX) OmUtils(org.apache.hadoop.ozone.OmUtils) Stack(java.util.Stack) ResultCodes(org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes) OZONE_KEY_PREALLOCATION_BLOCKS_MAX_DEFAULT(org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_KEY_PREALLOCATION_BLOCKS_MAX_DEFAULT) HashSet(java.util.HashSet) PartKeyInfo(org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PartKeyInfo) OmMultipartUploadList(org.apache.hadoop.ozone.om.helpers.OmMultipartUploadList) OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) OmKeyLocationInfo(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo) OzoneAclUtil(org.apache.hadoop.ozone.om.helpers.OzoneAclUtil) Server(org.apache.hadoop.ipc.Server) HDDS_BLOCK_TOKEN_ENABLED_DEFAULT(org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED_DEFAULT) DFS_CONTAINER_RATIS_ENABLED_KEY(org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY) BUCKET_LOCK(org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK) Logger(org.slf4j.Logger) Iterator(java.util.Iterator) SCM_GET_PIPELINE_EXCEPTION(org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.SCM_GET_PIPELINE_EXCEPTION) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) TimeUnit(java.util.concurrent.TimeUnit) KEY(org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType.KEY) RepeatedOmKeyInfo(org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo) OmKeyLocationInfoGroup(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup) IAccessAuthorizer(org.apache.hadoop.ozone.security.acl.IAccessAuthorizer) StorageContainerLocationProtocol(org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol) OZONE_CLIENT_LIST_TRASH_KEYS_MAX_DEFAULT(org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_LIST_TRASH_KEYS_MAX_DEFAULT) Time(org.apache.hadoop.util.Time) VisibleForTesting(com.google.common.annotations.VisibleForTesting) Collections(java.util.Collections) Table(org.apache.hadoop.hdds.utils.db.Table) OmMultipartKeyInfo(org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo) OmMultipartUpload(org.apache.hadoop.ozone.om.helpers.OmMultipartUpload) OmMultipartUploadList(org.apache.hadoop.ozone.om.helpers.OmMultipartUploadList) IOException(java.io.IOException) OMException(org.apache.hadoop.ozone.om.exceptions.OMException)

Example 20 with OmMultipartKeyInfo

use of org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo in project ozone by apache.

the class KeyManagerImpl method listParts.

@Override
public OmMultipartUploadListParts listParts(String volumeName, String bucketName, String keyName, String uploadID, int partNumberMarker, int maxParts) throws IOException {
    Preconditions.checkNotNull(volumeName);
    Preconditions.checkNotNull(bucketName);
    Preconditions.checkNotNull(keyName);
    Preconditions.checkNotNull(uploadID);
    boolean isTruncated = false;
    int nextPartNumberMarker = 0;
    BucketLayout bucketLayout = BucketLayout.DEFAULT;
    if (ozoneManager != null) {
        String buckKey = ozoneManager.getMetadataManager().getBucketKey(volumeName, bucketName);
        OmBucketInfo buckInfo = ozoneManager.getMetadataManager().getBucketTable().get(buckKey);
        bucketLayout = buckInfo.getBucketLayout();
    }
    metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName, bucketName);
    try {
        String multipartKey = metadataManager.getMultipartKey(volumeName, bucketName, keyName, uploadID);
        OmMultipartKeyInfo multipartKeyInfo = metadataManager.getMultipartInfoTable().get(multipartKey);
        if (multipartKeyInfo == null) {
            throw new OMException("No Such Multipart upload exists for this key.", ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR);
        } else {
            TreeMap<Integer, PartKeyInfo> partKeyInfoMap = multipartKeyInfo.getPartKeyInfoMap();
            Iterator<Map.Entry<Integer, PartKeyInfo>> partKeyInfoMapIterator = partKeyInfoMap.entrySet().iterator();
            ReplicationConfig replicationConfig = null;
            int count = 0;
            List<OmPartInfo> omPartInfoList = new ArrayList<>();
            while (count < maxParts && partKeyInfoMapIterator.hasNext()) {
                Map.Entry<Integer, PartKeyInfo> partKeyInfoEntry = partKeyInfoMapIterator.next();
                nextPartNumberMarker = partKeyInfoEntry.getKey();
                // than part number marker
                if (partKeyInfoEntry.getKey() > partNumberMarker) {
                    PartKeyInfo partKeyInfo = partKeyInfoEntry.getValue();
                    String partName = getPartName(partKeyInfo, volumeName, bucketName, keyName);
                    OmPartInfo omPartInfo = new OmPartInfo(partKeyInfo.getPartNumber(), partName, partKeyInfo.getPartKeyInfo().getModificationTime(), partKeyInfo.getPartKeyInfo().getDataSize());
                    omPartInfoList.add(omPartInfo);
                    // if there are parts, use replication type from one of the parts
                    replicationConfig = ReplicationConfig.fromProtoTypeAndFactor(partKeyInfo.getPartKeyInfo().getType(), partKeyInfo.getPartKeyInfo().getFactor());
                    count++;
                }
            }
            if (replicationConfig == null) {
                // if there are no parts, use the replicationType from the open key.
                if (isBucketFSOptimized(volumeName, bucketName)) {
                    multipartKey = getMultipartOpenKeyFSO(volumeName, bucketName, keyName, uploadID);
                }
                OmKeyInfo omKeyInfo = metadataManager.getOpenKeyTable(bucketLayout).get(multipartKey);
                if (omKeyInfo == null) {
                    throw new IllegalStateException("Open key is missing for multipart upload " + multipartKey);
                }
                replicationConfig = omKeyInfo.getReplicationConfig();
            }
            Preconditions.checkNotNull(replicationConfig, "ReplicationConfig can't be identified");
            if (partKeyInfoMapIterator.hasNext()) {
                Map.Entry<Integer, PartKeyInfo> partKeyInfoEntry = partKeyInfoMapIterator.next();
                isTruncated = true;
            } else {
                isTruncated = false;
                nextPartNumberMarker = 0;
            }
            OmMultipartUploadListParts omMultipartUploadListParts = new OmMultipartUploadListParts(replicationConfig, nextPartNumberMarker, isTruncated);
            omMultipartUploadListParts.addPartList(omPartInfoList);
            return omMultipartUploadListParts;
        }
    } catch (OMException ex) {
        throw ex;
    } catch (IOException ex) {
        LOG.error("List Multipart Upload Parts Failed: volume: {}, bucket: {}, ,key: " + "{} ", volumeName, bucketName, keyName, ex);
        throw new OMException(ex.getMessage(), ResultCodes.LIST_MULTIPART_UPLOAD_PARTS_FAILED);
    } finally {
        metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName, bucketName);
    }
}
Also used : OmBucketInfo(org.apache.hadoop.ozone.om.helpers.OmBucketInfo) ReplicationConfig(org.apache.hadoop.hdds.client.ReplicationConfig) RatisReplicationConfig(org.apache.hadoop.hdds.client.RatisReplicationConfig) OmMultipartUploadListParts(org.apache.hadoop.ozone.om.helpers.OmMultipartUploadListParts) BucketLayout(org.apache.hadoop.ozone.om.helpers.BucketLayout) ArrayList(java.util.ArrayList) OmPartInfo(org.apache.hadoop.ozone.om.helpers.OmPartInfo) IOException(java.io.IOException) OmMultipartKeyInfo(org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo) OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) RepeatedOmKeyInfo(org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo) PartKeyInfo(org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PartKeyInfo) OMException(org.apache.hadoop.ozone.om.exceptions.OMException) Map(java.util.Map) TreeMap(java.util.TreeMap) HashMap(java.util.HashMap)

Aggregations

OmMultipartKeyInfo (org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo)20 OmKeyInfo (org.apache.hadoop.ozone.om.helpers.OmKeyInfo)14 OmBucketInfo (org.apache.hadoop.ozone.om.helpers.OmBucketInfo)11 ArrayList (java.util.ArrayList)9 RepeatedOmKeyInfo (org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo)8 IOException (java.io.IOException)7 PartKeyInfo (org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PartKeyInfo)7 Test (org.junit.Test)7 Map (java.util.Map)6 OMMetadataManager (org.apache.hadoop.ozone.om.OMMetadataManager)6 OMClientResponse (org.apache.hadoop.ozone.om.response.OMClientResponse)6 OMResponse (org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse)6 TreeMap (java.util.TreeMap)5 OMException (org.apache.hadoop.ozone.om.exceptions.OMException)5 OmKeyLocationInfoGroup (org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup)5 KeyArgs (org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs)5 ReplicationConfig (org.apache.hadoop.hdds.client.ReplicationConfig)4 BucketLayout (org.apache.hadoop.ozone.om.helpers.BucketLayout)4 HashMap (java.util.HashMap)3 OmDirectoryInfo (org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo)3