Search in sources :

Example 96 with OmKeyInfo

use of org.apache.hadoop.ozone.om.helpers.OmKeyInfo in project ozone by apache.

the class OMKeysDeleteRequest method validateAndUpdateCache.

@Override
@SuppressWarnings("methodlength")
public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
    DeleteKeysRequest deleteKeyRequest = getOmRequest().getDeleteKeysRequest();
    OzoneManagerProtocolProtos.DeleteKeyArgs deleteKeyArgs = deleteKeyRequest.getDeleteKeys();
    List<String> deleteKeys = new ArrayList<>(deleteKeyArgs.getKeysList());
    IOException exception = null;
    OMClientResponse omClientResponse = null;
    Result result = null;
    OMMetrics omMetrics = ozoneManager.getMetrics();
    omMetrics.incNumKeyDeletes();
    String volumeName = deleteKeyArgs.getVolumeName();
    String bucketName = deleteKeyArgs.getBucketName();
    Map<String, String> auditMap = new LinkedHashMap<>();
    auditMap.put(VOLUME, volumeName);
    auditMap.put(BUCKET, bucketName);
    List<OmKeyInfo> omKeyInfoList = new ArrayList<>();
    AuditLogger auditLogger = ozoneManager.getAuditLogger();
    OzoneManagerProtocolProtos.UserInfo userInfo = getOmRequest().getUserInfo();
    OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(getOmRequest());
    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
    boolean acquiredLock = false;
    int indexFailed = 0;
    int length = deleteKeys.size();
    OzoneManagerProtocolProtos.DeleteKeyArgs.Builder unDeletedKeys = OzoneManagerProtocolProtos.DeleteKeyArgs.newBuilder().setVolumeName(volumeName).setBucketName(bucketName);
    boolean deleteStatus = true;
    try {
        ResolvedBucket bucket = ozoneManager.resolveBucketLink(Pair.of(volumeName, bucketName), this);
        bucket.audit(auditMap);
        volumeName = bucket.realVolume();
        bucketName = bucket.realBucket();
        acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, bucketName);
        // Validate bucket and volume exists or not.
        validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
        String volumeOwner = getVolumeOwner(omMetadataManager, volumeName);
        for (indexFailed = 0; indexFailed < length; indexFailed++) {
            String keyName = deleteKeyArgs.getKeys(indexFailed);
            String objectKey = omMetadataManager.getOzoneKey(volumeName, bucketName, keyName);
            OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable(getBucketLayout()).get(objectKey);
            if (omKeyInfo == null) {
                deleteStatus = false;
                LOG.error("Received a request to delete a Key does not exist {}", objectKey);
                deleteKeys.remove(keyName);
                unDeletedKeys.addKeys(keyName);
                continue;
            }
            try {
                // check Acl
                checkKeyAcls(ozoneManager, volumeName, bucketName, keyName, IAccessAuthorizer.ACLType.DELETE, OzoneObj.ResourceType.KEY, volumeOwner);
                omKeyInfoList.add(omKeyInfo);
            } catch (Exception ex) {
                deleteStatus = false;
                LOG.error("Acl check failed for Key: {}", objectKey, ex);
                deleteKeys.remove(keyName);
                unDeletedKeys.addKeys(keyName);
            }
        }
        long quotaReleased = 0;
        OmBucketInfo omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName);
        // Mark all keys which can be deleted, in cache as deleted.
        for (OmKeyInfo omKeyInfo : omKeyInfoList) {
            omMetadataManager.getKeyTable(getBucketLayout()).addCacheEntry(new CacheKey<>(omMetadataManager.getOzoneKey(volumeName, bucketName, omKeyInfo.getKeyName())), new CacheValue<>(Optional.absent(), trxnLogIndex));
            omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());
            quotaReleased += sumBlockLengths(omKeyInfo);
        }
        omBucketInfo.incrUsedBytes(-quotaReleased);
        omBucketInfo.incrUsedNamespace(-1L * omKeyInfoList.size());
        omClientResponse = new OMKeysDeleteResponse(omResponse.setDeleteKeysResponse(DeleteKeysResponse.newBuilder().setStatus(deleteStatus).setUnDeletedKeys(unDeletedKeys)).setStatus(deleteStatus ? OK : PARTIAL_DELETE).setSuccess(deleteStatus).build(), omKeyInfoList, ozoneManager.isRatisEnabled(), omBucketInfo.copyObject());
        result = Result.SUCCESS;
    } catch (IOException ex) {
        result = Result.FAILURE;
        exception = ex;
        createErrorOMResponse(omResponse, ex);
        // reset deleteKeys as request failed.
        deleteKeys = new ArrayList<>();
        // Add all keys which are failed due to any other exception .
        for (int i = indexFailed; i < length; i++) {
            unDeletedKeys.addKeys(deleteKeyArgs.getKeys(i));
        }
        omResponse.setDeleteKeysResponse(DeleteKeysResponse.newBuilder().setStatus(false).setUnDeletedKeys(unDeletedKeys).build()).build();
        omClientResponse = new OMKeysDeleteResponse(omResponse.build(), getBucketLayout());
    } finally {
        if (acquiredLock) {
            omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, bucketName);
        }
        addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, omDoubleBufferHelper);
    }
    addDeletedKeys(auditMap, deleteKeys, unDeletedKeys.getKeysList());
    auditLog(auditLogger, buildAuditMessage(DELETE_KEYS, auditMap, exception, userInfo));
    switch(result) {
        case SUCCESS:
            omMetrics.decNumKeys(deleteKeys.size());
            if (LOG.isDebugEnabled()) {
                LOG.debug("Keys delete success. Volume:{}, Bucket:{}, Keys:{}", volumeName, bucketName, auditMap.get(DELETED_KEYS_LIST));
            }
            break;
        case FAILURE:
            omMetrics.incNumKeyDeleteFails();
            if (LOG.isDebugEnabled()) {
                LOG.debug("Keys delete failed. Volume:{}, Bucket:{}, DeletedKeys:{}, " + "UnDeletedKeys:{}", volumeName, bucketName, auditMap.get(DELETED_KEYS_LIST), auditMap.get(UNDELETED_KEYS_LIST), exception);
            }
            break;
        default:
            LOG.error("Unrecognized Result for OMKeysDeleteRequest: {}", deleteKeyRequest);
    }
    return omClientResponse;
}
Also used : OmBucketInfo(org.apache.hadoop.ozone.om.helpers.OmBucketInfo) DeleteKeysRequest(org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeysRequest) ArrayList(java.util.ArrayList) OMKeysDeleteResponse(org.apache.hadoop.ozone.om.response.key.OMKeysDeleteResponse) OMMetrics(org.apache.hadoop.ozone.om.OMMetrics) LinkedHashMap(java.util.LinkedHashMap) OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) ResolvedBucket(org.apache.hadoop.ozone.om.ResolvedBucket) AuditLogger(org.apache.hadoop.ozone.audit.AuditLogger) OMClientResponse(org.apache.hadoop.ozone.om.response.OMClientResponse) IOException(java.io.IOException) IOException(java.io.IOException) OMResponse(org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse) OzoneManagerProtocolProtos(org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos) OMMetadataManager(org.apache.hadoop.ozone.om.OMMetadataManager)

Example 97 with OmKeyInfo

use of org.apache.hadoop.ozone.om.helpers.OmKeyInfo in project ozone by apache.

the class OMFileCreateRequestWithFSO method validateAndUpdateCache.

@Override
@SuppressWarnings("methodlength")
public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
    CreateFileRequest createFileRequest = getOmRequest().getCreateFileRequest();
    KeyArgs keyArgs = createFileRequest.getKeyArgs();
    Map<String, String> auditMap = buildKeyArgsAuditMap(keyArgs);
    String volumeName = keyArgs.getVolumeName();
    String bucketName = keyArgs.getBucketName();
    String keyName = keyArgs.getKeyName();
    // if isRecursive is true, file would be created even if parent
    // directories does not exist.
    boolean isRecursive = createFileRequest.getIsRecursive();
    if (LOG.isDebugEnabled()) {
        LOG.debug("File create for : " + volumeName + "/" + bucketName + "/" + keyName + ":" + isRecursive);
    }
    // if isOverWrite is true, file would be over written.
    boolean isOverWrite = createFileRequest.getIsOverwrite();
    OMMetrics omMetrics = ozoneManager.getMetrics();
    omMetrics.incNumCreateFile();
    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
    boolean acquiredLock = false;
    OmBucketInfo omBucketInfo = null;
    final List<OmKeyLocationInfo> locations = new ArrayList<>();
    List<OmDirectoryInfo> missingParentInfos;
    int numKeysCreated = 0;
    OMClientResponse omClientResponse = null;
    OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(getOmRequest());
    IOException exception = null;
    Result result = null;
    try {
        keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap);
        volumeName = keyArgs.getVolumeName();
        bucketName = keyArgs.getBucketName();
        if (keyName.length() == 0) {
            // Check if this is the root of the filesystem.
            throw new OMException("Can not write to directory: " + keyName, OMException.ResultCodes.NOT_A_FILE);
        }
        // check Acl
        checkKeyAcls(ozoneManager, volumeName, bucketName, keyName, IAccessAuthorizer.ACLType.CREATE, OzoneObj.ResourceType.KEY);
        // acquire lock
        acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, bucketName);
        validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
        OmKeyInfo dbFileInfo = null;
        OMFileRequest.OMPathInfoWithFSO pathInfoFSO = OMFileRequest.verifyDirectoryKeysInPath(omMetadataManager, volumeName, bucketName, keyName, Paths.get(keyName));
        if (pathInfoFSO.getDirectoryResult() == OMFileRequest.OMDirectoryResult.FILE_EXISTS) {
            String dbFileKey = omMetadataManager.getOzonePathKey(pathInfoFSO.getLastKnownParentId(), pathInfoFSO.getLeafNodeName());
            dbFileInfo = OMFileRequest.getOmKeyInfoFromFileTable(false, omMetadataManager, dbFileKey, keyName);
        }
        // check if the file or directory already existed in OM
        checkDirectoryResult(keyName, isOverWrite, pathInfoFSO.getDirectoryResult());
        if (!isRecursive) {
            checkAllParentsExist(keyArgs, pathInfoFSO);
        }
        // add all missing parents to dir table
        missingParentInfos = OMDirectoryCreateRequestWithFSO.getAllMissingParentDirInfo(ozoneManager, keyArgs, pathInfoFSO, trxnLogIndex);
        // total number of keys created.
        numKeysCreated = missingParentInfos.size();
        // do open key
        OmBucketInfo bucketInfo = omMetadataManager.getBucketTable().get(omMetadataManager.getBucketKey(volumeName, bucketName));
        OmKeyInfo omFileInfo = prepareFileInfo(omMetadataManager, keyArgs, dbFileInfo, keyArgs.getDataSize(), locations, getFileEncryptionInfo(keyArgs), ozoneManager.getPrefixManager(), bucketInfo, pathInfoFSO, trxnLogIndex, pathInfoFSO.getLeafNodeObjectId(), ozoneManager.isRatisEnabled());
        long openVersion = omFileInfo.getLatestVersionLocations().getVersion();
        long clientID = createFileRequest.getClientID();
        String dbOpenFileName = omMetadataManager.getOpenFileName(pathInfoFSO.getLastKnownParentId(), pathInfoFSO.getLeafNodeName(), clientID);
        // Append new blocks
        List<OmKeyLocationInfo> newLocationList = keyArgs.getKeyLocationsList().stream().map(OmKeyLocationInfo::getFromProtobuf).collect(Collectors.toList());
        omFileInfo.appendNewBlocks(newLocationList, false);
        omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName);
        // check bucket and volume quota
        long preAllocatedSpace = newLocationList.size() * ozoneManager.getScmBlockSize() * omFileInfo.getReplicationConfig().getRequiredNodes();
        checkBucketQuotaInBytes(omBucketInfo, preAllocatedSpace);
        checkBucketQuotaInNamespace(omBucketInfo, 1L);
        // Add to cache entry can be done outside of lock for this openKey.
        // Even if bucket gets deleted, when commitKey we shall identify if
        // bucket gets deleted.
        OMFileRequest.addOpenFileTableCacheEntry(omMetadataManager, dbOpenFileName, omFileInfo, pathInfoFSO.getLeafNodeName(), trxnLogIndex);
        // Add cache entries for the prefix directories.
        // Skip adding for the file key itself, until Key Commit.
        OMFileRequest.addDirectoryTableCacheEntries(omMetadataManager, Optional.absent(), Optional.of(missingParentInfos), trxnLogIndex);
        omBucketInfo.incrUsedBytes(preAllocatedSpace);
        // Update namespace quota
        omBucketInfo.incrUsedNamespace(1L);
        // Prepare response. Sets user given full key name in the 'keyName'
        // attribute in response object.
        int clientVersion = getOmRequest().getVersion();
        omResponse.setCreateFileResponse(CreateFileResponse.newBuilder().setKeyInfo(omFileInfo.getNetworkProtobuf(keyName, clientVersion, keyArgs.getLatestVersionLocation())).setID(clientID).setOpenVersion(openVersion).build()).setCmdType(Type.CreateFile);
        omClientResponse = new OMFileCreateResponseWithFSO(omResponse.build(), omFileInfo, missingParentInfos, clientID, omBucketInfo.copyObject());
        result = Result.SUCCESS;
    } catch (IOException ex) {
        result = Result.FAILURE;
        exception = ex;
        omMetrics.incNumCreateFileFails();
        omResponse.setCmdType(Type.CreateFile);
        omClientResponse = new OMFileCreateResponseWithFSO(createErrorOMResponse(omResponse, exception), getBucketLayout());
    } finally {
        addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, omDoubleBufferHelper);
        if (acquiredLock) {
            omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, bucketName);
        }
    }
    // Audit Log outside the lock
    auditLog(ozoneManager.getAuditLogger(), buildAuditMessage(OMAction.CREATE_FILE, auditMap, exception, getOmRequest().getUserInfo()));
    switch(result) {
        case SUCCESS:
            omMetrics.incNumKeys(numKeysCreated);
            LOG.debug("File created. Volume:{}, Bucket:{}, Key:{}", volumeName, bucketName, keyName);
            break;
        case FAILURE:
            LOG.error("File create failed. Volume:{}, Bucket:{}, Key:{}.", volumeName, bucketName, keyName, exception);
            break;
        default:
            LOG.error("Unrecognized Result for OMFileCreateRequest: {}", createFileRequest);
    }
    return omClientResponse;
}
Also used : OmBucketInfo(org.apache.hadoop.ozone.om.helpers.OmBucketInfo) ArrayList(java.util.ArrayList) OMFileCreateResponseWithFSO(org.apache.hadoop.ozone.om.response.file.OMFileCreateResponseWithFSO) OMMetrics(org.apache.hadoop.ozone.om.OMMetrics) OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) OmDirectoryInfo(org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo) OMClientResponse(org.apache.hadoop.ozone.om.response.OMClientResponse) CreateFileRequest(org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateFileRequest) KeyArgs(org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs) IOException(java.io.IOException) OmKeyLocationInfo(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo) OMResponse(org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse) OMMetadataManager(org.apache.hadoop.ozone.om.OMMetadataManager) OMException(org.apache.hadoop.ozone.om.exceptions.OMException)

Example 98 with OmKeyInfo

use of org.apache.hadoop.ozone.om.helpers.OmKeyInfo in project ozone by apache.

the class OMKeyCommitRequestWithFSO method validateAndUpdateCache.

@Override
@SuppressWarnings("methodlength")
public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
    CommitKeyRequest commitKeyRequest = getOmRequest().getCommitKeyRequest();
    KeyArgs commitKeyArgs = commitKeyRequest.getKeyArgs();
    String volumeName = commitKeyArgs.getVolumeName();
    String bucketName = commitKeyArgs.getBucketName();
    String keyName = commitKeyArgs.getKeyName();
    OMMetrics omMetrics = ozoneManager.getMetrics();
    omMetrics.incNumKeyCommits();
    AuditLogger auditLogger = ozoneManager.getAuditLogger();
    Map<String, String> auditMap = buildKeyArgsAuditMap(commitKeyArgs);
    OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(getOmRequest());
    IOException exception = null;
    OmKeyInfo omKeyInfo = null;
    OmBucketInfo omBucketInfo = null;
    OMClientResponse omClientResponse = null;
    boolean bucketLockAcquired = false;
    Result result;
    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
    try {
        commitKeyArgs = resolveBucketLink(ozoneManager, commitKeyArgs, auditMap);
        volumeName = commitKeyArgs.getVolumeName();
        bucketName = commitKeyArgs.getBucketName();
        // check Acl
        checkKeyAclsInOpenKeyTable(ozoneManager, volumeName, bucketName, keyName, IAccessAuthorizer.ACLType.WRITE, commitKeyRequest.getClientID());
        String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
        Iterator<Path> pathComponents = Paths.get(keyName).iterator();
        String dbOpenFileKey = null;
        List<OmKeyLocationInfo> locationInfoList = new ArrayList<>();
        for (KeyLocation keyLocation : commitKeyArgs.getKeyLocationsList()) {
            locationInfoList.add(OmKeyLocationInfo.getFromProtobuf(keyLocation));
        }
        bucketLockAcquired = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, bucketName);
        validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
        String fileName = OzoneFSUtils.getFileName(keyName);
        omBucketInfo = omMetadataManager.getBucketTable().get(bucketKey);
        long bucketId = omBucketInfo.getObjectID();
        long parentID = OMFileRequest.getParentID(bucketId, pathComponents, keyName, omMetadataManager, "Cannot create file : " + keyName + " as parent directory doesn't exist");
        String dbFileKey = omMetadataManager.getOzonePathKey(parentID, fileName);
        dbOpenFileKey = omMetadataManager.getOpenFileName(parentID, fileName, commitKeyRequest.getClientID());
        omKeyInfo = OMFileRequest.getOmKeyInfoFromFileTable(true, omMetadataManager, dbOpenFileKey, keyName);
        if (omKeyInfo == null) {
            throw new OMException("Failed to commit key, as " + dbOpenFileKey + "entry is not found in the OpenKey table", KEY_NOT_FOUND);
        }
        omKeyInfo.setDataSize(commitKeyArgs.getDataSize());
        omKeyInfo.setModificationTime(commitKeyArgs.getModificationTime());
        // Update the block length for each block
        omKeyInfo.updateLocationInfoList(locationInfoList, false);
        // Set the UpdateID to current transactionLogIndex
        omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());
        // If bucket versioning is turned on during the update, between key
        // creation and key commit, old versions will be just overwritten and
        // not kept. Bucket versioning will be effective from the first key
        // creation after the knob turned on.
        RepeatedOmKeyInfo oldKeyVersionsToDelete = null;
        OmKeyInfo keyToDelete = omMetadataManager.getKeyTable(getBucketLayout()).get(dbFileKey);
        if (keyToDelete != null && !omBucketInfo.getIsVersionEnabled()) {
            oldKeyVersionsToDelete = getOldVersionsToCleanUp(dbFileKey, keyToDelete, omMetadataManager, trxnLogIndex, ozoneManager.isRatisEnabled());
        }
        // Add to cache of open key table and key table.
        OMFileRequest.addOpenFileTableCacheEntry(omMetadataManager, dbFileKey, null, fileName, trxnLogIndex);
        OMFileRequest.addFileTableCacheEntry(omMetadataManager, dbFileKey, omKeyInfo, fileName, trxnLogIndex);
        if (oldKeyVersionsToDelete != null) {
            OMFileRequest.addDeletedTableCacheEntry(omMetadataManager, dbFileKey, oldKeyVersionsToDelete, trxnLogIndex);
        }
        long scmBlockSize = ozoneManager.getScmBlockSize();
        int factor = omKeyInfo.getReplicationConfig().getRequiredNodes();
        // Block was pre-requested and UsedBytes updated when createKey and
        // AllocatedBlock. The space occupied by the Key shall be based on
        // the actual Key size, and the total Block size applied before should
        // be subtracted.
        long correctedSpace = omKeyInfo.getDataSize() * factor - locationInfoList.size() * scmBlockSize * factor;
        // Subtract the size of blocks to be overwritten.
        if (keyToDelete != null) {
            correctedSpace -= keyToDelete.getDataSize() * keyToDelete.getReplicationConfig().getRequiredNodes();
        }
        omBucketInfo.incrUsedBytes(correctedSpace);
        omClientResponse = new OMKeyCommitResponseWithFSO(omResponse.build(), omKeyInfo, dbFileKey, dbOpenFileKey, omBucketInfo.copyObject(), oldKeyVersionsToDelete);
        result = Result.SUCCESS;
    } catch (IOException ex) {
        result = Result.FAILURE;
        exception = ex;
        omClientResponse = new OMKeyCommitResponseWithFSO(createErrorOMResponse(omResponse, exception), getBucketLayout());
    } finally {
        addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, omDoubleBufferHelper);
        if (bucketLockAcquired) {
            omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, bucketName);
        }
    }
    auditLog(auditLogger, buildAuditMessage(OMAction.COMMIT_KEY, auditMap, exception, getOmRequest().getUserInfo()));
    processResult(commitKeyRequest, volumeName, bucketName, keyName, omMetrics, exception, omKeyInfo, result);
    return omClientResponse;
}
Also used : OmBucketInfo(org.apache.hadoop.ozone.om.helpers.OmBucketInfo) ArrayList(java.util.ArrayList) OMMetrics(org.apache.hadoop.ozone.om.OMMetrics) OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) RepeatedOmKeyInfo(org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo) OMKeyCommitResponseWithFSO(org.apache.hadoop.ozone.om.response.key.OMKeyCommitResponseWithFSO) Path(java.nio.file.Path) AuditLogger(org.apache.hadoop.ozone.audit.AuditLogger) OMClientResponse(org.apache.hadoop.ozone.om.response.OMClientResponse) CommitKeyRequest(org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CommitKeyRequest) KeyLocation(org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyLocation) KeyArgs(org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs) IOException(java.io.IOException) OmKeyLocationInfo(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo) OMResponse(org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse) RepeatedOmKeyInfo(org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo) OMMetadataManager(org.apache.hadoop.ozone.om.OMMetadataManager) OMException(org.apache.hadoop.ozone.om.exceptions.OMException)

Example 99 with OmKeyInfo

use of org.apache.hadoop.ozone.om.helpers.OmKeyInfo in project ozone by apache.

the class S3InitiateMultipartUploadRequestWithFSO method validateAndUpdateCache.

@Override
@SuppressWarnings("methodlength")
public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, long transactionLogIndex, OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
    MultipartInfoInitiateRequest multipartInfoInitiateRequest = getOmRequest().getInitiateMultiPartUploadRequest();
    KeyArgs keyArgs = multipartInfoInitiateRequest.getKeyArgs();
    Preconditions.checkNotNull(keyArgs.getMultipartUploadID());
    Map<String, String> auditMap = buildKeyArgsAuditMap(keyArgs);
    String volumeName = keyArgs.getVolumeName();
    String bucketName = keyArgs.getBucketName();
    final String requestedVolume = volumeName;
    final String requestedBucket = bucketName;
    String keyName = keyArgs.getKeyName();
    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
    ozoneManager.getMetrics().incNumInitiateMultipartUploads();
    boolean acquiredBucketLock = false;
    IOException exception = null;
    OmMultipartKeyInfo multipartKeyInfo = null;
    OmKeyInfo omKeyInfo = null;
    List<OmDirectoryInfo> missingParentInfos;
    Result result = null;
    OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(getOmRequest());
    OMClientResponse omClientResponse = null;
    try {
        keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap);
        volumeName = keyArgs.getVolumeName();
        bucketName = keyArgs.getBucketName();
        // TODO to support S3 ACL later.
        acquiredBucketLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, bucketName);
        validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
        OMFileRequest.OMPathInfoWithFSO pathInfoFSO = OMFileRequest.verifyDirectoryKeysInPath(omMetadataManager, volumeName, bucketName, keyName, Paths.get(keyName));
        // check if the directory already existed in OM
        checkDirectoryResult(keyName, pathInfoFSO.getDirectoryResult());
        // add all missing parents to dir table
        missingParentInfos = OMDirectoryCreateRequestWithFSO.getAllMissingParentDirInfo(ozoneManager, keyArgs, pathInfoFSO, transactionLogIndex);
        // We are adding uploadId to key, because if multiple users try to
        // perform multipart upload on the same key, each will try to upload, who
        // ever finally commit the key, we see that key in ozone. Suppose if we
        // don't add id, and use the same key /volume/bucket/key, when multiple
        // users try to upload the key, we update the parts of the key's from
        // multiple users to same key, and the key output can be a mix of the
        // parts from multiple users.
        // So on same key if multiple time multipart upload is initiated we
        // store multiple entries in the openKey Table.
        // Checked AWS S3, when we try to run multipart upload, each time a
        // new uploadId is returned. And also even if a key exist when initiate
        // multipart upload request is received, it returns multipart upload id
        // for the key.
        String multipartKey = omMetadataManager.getMultipartKey(volumeName, bucketName, keyName, keyArgs.getMultipartUploadID());
        String multipartOpenKey = omMetadataManager.getMultipartKey(pathInfoFSO.getLastKnownParentId(), pathInfoFSO.getLeafNodeName(), keyArgs.getMultipartUploadID());
        // Even if this key already exists in the KeyTable, it would be taken
        // care of in the final complete multipart upload. AWS S3 behavior is
        // also like this, even when key exists in a bucket, user can still
        // initiate MPU.
        final ReplicationConfig replicationConfig = ReplicationConfig.fromProtoTypeAndFactor(keyArgs.getType(), keyArgs.getFactor());
        multipartKeyInfo = new OmMultipartKeyInfo.Builder().setUploadID(keyArgs.getMultipartUploadID()).setCreationTime(keyArgs.getModificationTime()).setReplicationConfig(replicationConfig).setObjectID(pathInfoFSO.getLeafNodeObjectId()).setUpdateID(transactionLogIndex).setParentID(pathInfoFSO.getLastKnownParentId()).build();
        omKeyInfo = new OmKeyInfo.Builder().setVolumeName(volumeName).setBucketName(bucketName).setKeyName(keyArgs.getKeyName()).setCreationTime(keyArgs.getModificationTime()).setModificationTime(keyArgs.getModificationTime()).setReplicationConfig(replicationConfig).setOmKeyLocationInfos(Collections.singletonList(new OmKeyLocationInfoGroup(0, new ArrayList<>()))).setAcls(OzoneAclUtil.fromProtobuf(keyArgs.getAclsList())).setObjectID(pathInfoFSO.getLeafNodeObjectId()).setUpdateID(transactionLogIndex).setFileEncryptionInfo(keyArgs.hasFileEncryptionInfo() ? OMPBHelper.convert(keyArgs.getFileEncryptionInfo()) : null).setParentObjectID(pathInfoFSO.getLastKnownParentId()).build();
        // Add cache entries for the prefix directories.
        // Skip adding for the file key itself, until Key Commit.
        OMFileRequest.addDirectoryTableCacheEntries(omMetadataManager, Optional.absent(), Optional.of(missingParentInfos), transactionLogIndex);
        OMFileRequest.addOpenFileTableCacheEntry(omMetadataManager, multipartOpenKey, omKeyInfo, pathInfoFSO.getLeafNodeName(), transactionLogIndex);
        // Add to cache
        omMetadataManager.getMultipartInfoTable().addCacheEntry(new CacheKey<>(multipartKey), new CacheValue<>(Optional.of(multipartKeyInfo), transactionLogIndex));
        omClientResponse = new S3InitiateMultipartUploadResponseWithFSO(omResponse.setInitiateMultiPartUploadResponse(MultipartInfoInitiateResponse.newBuilder().setVolumeName(requestedVolume).setBucketName(requestedBucket).setKeyName(keyName).setMultipartUploadID(keyArgs.getMultipartUploadID())).build(), multipartKeyInfo, omKeyInfo, multipartKey, missingParentInfos, getBucketLayout());
        result = Result.SUCCESS;
    } catch (IOException ex) {
        result = Result.FAILURE;
        exception = ex;
        omClientResponse = new S3InitiateMultipartUploadResponseWithFSO(createErrorOMResponse(omResponse, exception), getBucketLayout());
    } finally {
        addResponseToDoubleBuffer(transactionLogIndex, omClientResponse, ozoneManagerDoubleBufferHelper);
        if (acquiredBucketLock) {
            omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, bucketName);
        }
    }
    logResult(ozoneManager, multipartInfoInitiateRequest, auditMap, volumeName, bucketName, keyName, exception, result);
    return omClientResponse;
}
Also used : OMClientResponse(org.apache.hadoop.ozone.om.response.OMClientResponse) ReplicationConfig(org.apache.hadoop.hdds.client.ReplicationConfig) ArrayList(java.util.ArrayList) KeyArgs(org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs) IOException(java.io.IOException) OMResponse(org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse) OmKeyLocationInfoGroup(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup) OmMultipartKeyInfo(org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo) OMFileRequest(org.apache.hadoop.ozone.om.request.file.OMFileRequest) S3InitiateMultipartUploadResponseWithFSO(org.apache.hadoop.ozone.om.response.s3.multipart.S3InitiateMultipartUploadResponseWithFSO) OMMetadataManager(org.apache.hadoop.ozone.om.OMMetadataManager) OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) OmDirectoryInfo(org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo) MultipartInfoInitiateRequest(org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartInfoInitiateRequest)

Example 100 with OmKeyInfo

use of org.apache.hadoop.ozone.om.helpers.OmKeyInfo in project ozone by apache.

the class OMKeyAclRequest method validateAndUpdateCache.

@Override
public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
    OmKeyInfo omKeyInfo = null;
    OMResponse.Builder omResponse = onInit();
    OMClientResponse omClientResponse = null;
    IOException exception = null;
    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
    boolean lockAcquired = false;
    String volume = null;
    String bucket = null;
    String key = null;
    boolean operationResult = false;
    Result result = null;
    try {
        ObjectParser objectParser = new ObjectParser(getPath(), ObjectType.KEY);
        volume = objectParser.getVolume();
        bucket = objectParser.getBucket();
        key = objectParser.getKey();
        // check Acl
        if (ozoneManager.getAclsEnabled()) {
            checkAcls(ozoneManager, OzoneObj.ResourceType.VOLUME, OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.WRITE_ACL, volume, bucket, key);
        }
        lockAcquired = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volume, bucket);
        String dbKey = omMetadataManager.getOzoneKey(volume, bucket, key);
        omKeyInfo = omMetadataManager.getKeyTable(getBucketLayout()).get(dbKey);
        if (omKeyInfo == null) {
            throw new OMException(OMException.ResultCodes.KEY_NOT_FOUND);
        }
        operationResult = apply(omKeyInfo, trxnLogIndex);
        omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());
        // Update the modification time when updating ACLs of Key.
        long modificationTime = omKeyInfo.getModificationTime();
        if (getOmRequest().getAddAclRequest().hasObj() && operationResult) {
            modificationTime = getOmRequest().getAddAclRequest().getModificationTime();
        } else if (getOmRequest().getSetAclRequest().hasObj() && operationResult) {
            modificationTime = getOmRequest().getSetAclRequest().getModificationTime();
        } else if (getOmRequest().getRemoveAclRequest().hasObj() && operationResult) {
            modificationTime = getOmRequest().getRemoveAclRequest().getModificationTime();
        }
        omKeyInfo.setModificationTime(modificationTime);
        // update cache.
        omMetadataManager.getKeyTable(getBucketLayout()).addCacheEntry(new CacheKey<>(dbKey), new CacheValue<>(Optional.of(omKeyInfo), trxnLogIndex));
        omClientResponse = onSuccess(omResponse, omKeyInfo, operationResult);
        result = Result.SUCCESS;
    } catch (IOException ex) {
        result = Result.FAILURE;
        exception = ex;
        omClientResponse = onFailure(omResponse, ex);
    } finally {
        addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, omDoubleBufferHelper);
        if (lockAcquired) {
            omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volume, bucket);
        }
    }
    OzoneObj obj = getObject();
    Map<String, String> auditMap = obj.toAuditMap();
    onComplete(result, operationResult, exception, trxnLogIndex, ozoneManager.getAuditLogger(), auditMap);
    return omClientResponse;
}
Also used : ObjectParser(org.apache.hadoop.ozone.om.request.util.ObjectParser) OzoneObj(org.apache.hadoop.ozone.security.acl.OzoneObj) OMClientResponse(org.apache.hadoop.ozone.om.response.OMClientResponse) IOException(java.io.IOException) OMResponse(org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse) OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) OMMetadataManager(org.apache.hadoop.ozone.om.OMMetadataManager) OMException(org.apache.hadoop.ozone.om.exceptions.OMException)

Aggregations

OmKeyInfo (org.apache.hadoop.ozone.om.helpers.OmKeyInfo)258 Test (org.junit.Test)102 RepeatedOmKeyInfo (org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo)79 ArrayList (java.util.ArrayList)61 OmKeyLocationInfo (org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo)57 IOException (java.io.IOException)54 OMClientResponse (org.apache.hadoop.ozone.om.response.OMClientResponse)47 OmKeyArgs (org.apache.hadoop.ozone.om.helpers.OmKeyArgs)46 OMException (org.apache.hadoop.ozone.om.exceptions.OMException)38 OmBucketInfo (org.apache.hadoop.ozone.om.helpers.OmBucketInfo)37 OMMetadataManager (org.apache.hadoop.ozone.om.OMMetadataManager)32 OmKeyLocationInfoGroup (org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup)32 OMResponse (org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse)30 OmDirectoryInfo (org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo)28 OzoneManagerProtocolProtos (org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos)27 Pipeline (org.apache.hadoop.hdds.scm.pipeline.Pipeline)26 HashMap (java.util.HashMap)25 OMRequest (org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest)24 OzoneOutputStream (org.apache.hadoop.ozone.client.io.OzoneOutputStream)23 Map (java.util.Map)21