Search in sources :

Example 6 with ResolvedBucket

use of org.apache.hadoop.ozone.om.ResolvedBucket in project ozone by apache.

the class OMTrashRecoverRequest method validateAndUpdateCache.

@Override
public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, long transactionLogIndex, OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
    RecoverTrashRequest recoverTrashRequest = getOmRequest().getRecoverTrashRequest();
    Preconditions.checkNotNull(recoverTrashRequest);
    String volumeName = recoverTrashRequest.getVolumeName();
    String bucketName = recoverTrashRequest.getBucketName();
    String keyName = recoverTrashRequest.getKeyName();
    String destinationBucket = recoverTrashRequest.getDestinationBucket();
    /**
     * TODO: HDDS-2818. New Metrics for Trash Key Recover and Fails.
     *  OMMetrics omMetrics = ozoneManager.getMetrics();
     */
    OMResponse.Builder omResponse = OMResponse.newBuilder().setCmdType(Type.RecoverTrash).setStatus(Status.OK).setSuccess(true);
    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
    boolean acquireLock = false;
    OMClientResponse omClientResponse = null;
    try {
        ResolvedBucket bucket = ozoneManager.resolveBucketLink(Pair.of(volumeName, destinationBucket), this);
        volumeName = bucket.realVolume();
        destinationBucket = bucket.realBucket();
        // Check acl for the destination bucket.
        checkBucketAcls(ozoneManager, volumeName, destinationBucket, keyName, IAccessAuthorizer.ACLType.WRITE);
        acquireLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, destinationBucket);
        // Validate.
        validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
        validateBucketAndVolume(omMetadataManager, volumeName, destinationBucket);
        /**
         * TODO: HDDS-2425. HDDS-2426.
         *  Update cache.
         *    omMetadataManager.getKeyTable().addCacheEntry(
         *    new CacheKey<>(),
         *    new CacheValue<>()
         *    );
         *
         *  Execute recovering trash in non-existing bucket.
         *  Execute recovering trash in existing bucket.
         *    omClientResponse = new OMTrashRecoverResponse(omKeyInfo,
         *    omResponse.setRecoverTrashResponse(
         *    RecoverTrashResponse.newBuilder())
         *    .build());
         */
        omClientResponse = null;
    } catch (IOException ex) {
        LOG.error("Fail for recovering trash.", ex);
        omClientResponse = new OMTrashRecoverResponse(null, createErrorOMResponse(omResponse, ex), getBucketLayout());
    } finally {
        if (omClientResponse != null) {
            omClientResponse.setFlushFuture(ozoneManagerDoubleBufferHelper.add(omClientResponse, transactionLogIndex));
        }
        if (acquireLock) {
            omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, destinationBucket);
        }
    }
    return omClientResponse;
}
Also used : OMClientResponse(org.apache.hadoop.ozone.om.response.OMClientResponse) RecoverTrashRequest(org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RecoverTrashRequest) OMMetadataManager(org.apache.hadoop.ozone.om.OMMetadataManager) OMTrashRecoverResponse(org.apache.hadoop.ozone.om.response.key.OMTrashRecoverResponse) IOException(java.io.IOException) ResolvedBucket(org.apache.hadoop.ozone.om.ResolvedBucket) OMResponse(org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse)

Example 7 with ResolvedBucket

use of org.apache.hadoop.ozone.om.ResolvedBucket in project ozone by apache.

the class OMKeysDeleteRequest method validateAndUpdateCache.

@Override
@SuppressWarnings("methodlength")
public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
    DeleteKeysRequest deleteKeyRequest = getOmRequest().getDeleteKeysRequest();
    OzoneManagerProtocolProtos.DeleteKeyArgs deleteKeyArgs = deleteKeyRequest.getDeleteKeys();
    List<String> deleteKeys = new ArrayList<>(deleteKeyArgs.getKeysList());
    IOException exception = null;
    OMClientResponse omClientResponse = null;
    Result result = null;
    OMMetrics omMetrics = ozoneManager.getMetrics();
    omMetrics.incNumKeyDeletes();
    String volumeName = deleteKeyArgs.getVolumeName();
    String bucketName = deleteKeyArgs.getBucketName();
    Map<String, String> auditMap = new LinkedHashMap<>();
    auditMap.put(VOLUME, volumeName);
    auditMap.put(BUCKET, bucketName);
    List<OmKeyInfo> omKeyInfoList = new ArrayList<>();
    AuditLogger auditLogger = ozoneManager.getAuditLogger();
    OzoneManagerProtocolProtos.UserInfo userInfo = getOmRequest().getUserInfo();
    OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(getOmRequest());
    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
    boolean acquiredLock = false;
    int indexFailed = 0;
    int length = deleteKeys.size();
    OzoneManagerProtocolProtos.DeleteKeyArgs.Builder unDeletedKeys = OzoneManagerProtocolProtos.DeleteKeyArgs.newBuilder().setVolumeName(volumeName).setBucketName(bucketName);
    boolean deleteStatus = true;
    try {
        ResolvedBucket bucket = ozoneManager.resolveBucketLink(Pair.of(volumeName, bucketName), this);
        bucket.audit(auditMap);
        volumeName = bucket.realVolume();
        bucketName = bucket.realBucket();
        acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, bucketName);
        // Validate bucket and volume exists or not.
        validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
        String volumeOwner = getVolumeOwner(omMetadataManager, volumeName);
        for (indexFailed = 0; indexFailed < length; indexFailed++) {
            String keyName = deleteKeyArgs.getKeys(indexFailed);
            String objectKey = omMetadataManager.getOzoneKey(volumeName, bucketName, keyName);
            OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable(getBucketLayout()).get(objectKey);
            if (omKeyInfo == null) {
                deleteStatus = false;
                LOG.error("Received a request to delete a Key does not exist {}", objectKey);
                deleteKeys.remove(keyName);
                unDeletedKeys.addKeys(keyName);
                continue;
            }
            try {
                // check Acl
                checkKeyAcls(ozoneManager, volumeName, bucketName, keyName, IAccessAuthorizer.ACLType.DELETE, OzoneObj.ResourceType.KEY, volumeOwner);
                omKeyInfoList.add(omKeyInfo);
            } catch (Exception ex) {
                deleteStatus = false;
                LOG.error("Acl check failed for Key: {}", objectKey, ex);
                deleteKeys.remove(keyName);
                unDeletedKeys.addKeys(keyName);
            }
        }
        long quotaReleased = 0;
        OmBucketInfo omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName);
        // Mark all keys which can be deleted, in cache as deleted.
        for (OmKeyInfo omKeyInfo : omKeyInfoList) {
            omMetadataManager.getKeyTable(getBucketLayout()).addCacheEntry(new CacheKey<>(omMetadataManager.getOzoneKey(volumeName, bucketName, omKeyInfo.getKeyName())), new CacheValue<>(Optional.absent(), trxnLogIndex));
            omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());
            quotaReleased += sumBlockLengths(omKeyInfo);
        }
        omBucketInfo.incrUsedBytes(-quotaReleased);
        omBucketInfo.incrUsedNamespace(-1L * omKeyInfoList.size());
        omClientResponse = new OMKeysDeleteResponse(omResponse.setDeleteKeysResponse(DeleteKeysResponse.newBuilder().setStatus(deleteStatus).setUnDeletedKeys(unDeletedKeys)).setStatus(deleteStatus ? OK : PARTIAL_DELETE).setSuccess(deleteStatus).build(), omKeyInfoList, ozoneManager.isRatisEnabled(), omBucketInfo.copyObject());
        result = Result.SUCCESS;
    } catch (IOException ex) {
        result = Result.FAILURE;
        exception = ex;
        createErrorOMResponse(omResponse, ex);
        // reset deleteKeys as request failed.
        deleteKeys = new ArrayList<>();
        // Add all keys which are failed due to any other exception .
        for (int i = indexFailed; i < length; i++) {
            unDeletedKeys.addKeys(deleteKeyArgs.getKeys(i));
        }
        omResponse.setDeleteKeysResponse(DeleteKeysResponse.newBuilder().setStatus(false).setUnDeletedKeys(unDeletedKeys).build()).build();
        omClientResponse = new OMKeysDeleteResponse(omResponse.build(), getBucketLayout());
    } finally {
        if (acquiredLock) {
            omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, bucketName);
        }
        addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, omDoubleBufferHelper);
    }
    addDeletedKeys(auditMap, deleteKeys, unDeletedKeys.getKeysList());
    auditLog(auditLogger, buildAuditMessage(DELETE_KEYS, auditMap, exception, userInfo));
    switch(result) {
        case SUCCESS:
            omMetrics.decNumKeys(deleteKeys.size());
            if (LOG.isDebugEnabled()) {
                LOG.debug("Keys delete success. Volume:{}, Bucket:{}, Keys:{}", volumeName, bucketName, auditMap.get(DELETED_KEYS_LIST));
            }
            break;
        case FAILURE:
            omMetrics.incNumKeyDeleteFails();
            if (LOG.isDebugEnabled()) {
                LOG.debug("Keys delete failed. Volume:{}, Bucket:{}, DeletedKeys:{}, " + "UnDeletedKeys:{}", volumeName, bucketName, auditMap.get(DELETED_KEYS_LIST), auditMap.get(UNDELETED_KEYS_LIST), exception);
            }
            break;
        default:
            LOG.error("Unrecognized Result for OMKeysDeleteRequest: {}", deleteKeyRequest);
    }
    return omClientResponse;
}
Also used : OmBucketInfo(org.apache.hadoop.ozone.om.helpers.OmBucketInfo) DeleteKeysRequest(org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeysRequest) ArrayList(java.util.ArrayList) OMKeysDeleteResponse(org.apache.hadoop.ozone.om.response.key.OMKeysDeleteResponse) OMMetrics(org.apache.hadoop.ozone.om.OMMetrics) LinkedHashMap(java.util.LinkedHashMap) OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) ResolvedBucket(org.apache.hadoop.ozone.om.ResolvedBucket) AuditLogger(org.apache.hadoop.ozone.audit.AuditLogger) OMClientResponse(org.apache.hadoop.ozone.om.response.OMClientResponse) IOException(java.io.IOException) IOException(java.io.IOException) OMResponse(org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse) OzoneManagerProtocolProtos(org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos) OMMetadataManager(org.apache.hadoop.ozone.om.OMMetadataManager)

Example 8 with ResolvedBucket

use of org.apache.hadoop.ozone.om.ResolvedBucket in project ozone by apache.

the class TestCleanupTableInfo method setupOzoneManagerMock.

/**
 * Creates a mock Ozone Manager object.
 * Defined behaviour in the mock:
 *  - returns the specified metrics instance
 *  - returns the specified metadataManager
 *  - resolves the bucket links to themselves (no symlinks)
 *  - disables ACLs
 *  - provides an audit logger
 *
 * @return the mocked Ozone Manager
 * @throws IOException should not happen but declared in mocked methods
 */
@Before
public void setupOzoneManagerMock() throws IOException {
    om = mock(OzoneManager.class);
    OMMetadataManager metaMgr = createOMMetadataManagerSpy();
    when(om.getMetrics()).thenReturn(omMetrics);
    when(om.getMetadataManager()).thenReturn(metaMgr);
    when(om.resolveBucketLink(any(KeyArgs.class), any(OMClientRequest.class))).thenAnswer(invocationOnMock -> {
        Pair<String, String> pair = Pair.of(TEST_VOLUME_NAME, TEST_BUCKET_NAME);
        return new ResolvedBucket(pair, pair);
    });
    when(om.getAclsEnabled()).thenReturn(false);
    when(om.getAuditLogger()).thenReturn(mock(AuditLogger.class));
    addVolumeToMetaTable(aVolumeArgs());
    addBucketToMetaTable(aBucketInfo());
}
Also used : OMClientRequest(org.apache.hadoop.ozone.om.request.OMClientRequest) AuditLogger(org.apache.hadoop.ozone.audit.AuditLogger) OzoneManager(org.apache.hadoop.ozone.om.OzoneManager) OMMetadataManager(org.apache.hadoop.ozone.om.OMMetadataManager) KeyArgs(org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs) ResolvedBucket(org.apache.hadoop.ozone.om.ResolvedBucket) Before(org.junit.Before)

Example 9 with ResolvedBucket

use of org.apache.hadoop.ozone.om.ResolvedBucket in project ozone by apache.

the class OMKeyRequest method resolveBucketLink.

protected KeyArgs resolveBucketLink(OzoneManager ozoneManager, KeyArgs keyArgs, Map<String, String> auditMap) throws IOException {
    ResolvedBucket bucket = ozoneManager.resolveBucketLink(keyArgs, this);
    keyArgs = bucket.update(keyArgs);
    bucket.audit(auditMap);
    return keyArgs;
}
Also used : ResolvedBucket(org.apache.hadoop.ozone.om.ResolvedBucket)

Example 10 with ResolvedBucket

use of org.apache.hadoop.ozone.om.ResolvedBucket in project ozone by apache.

the class OMKeyRequest method getFileEncryptionInfoForMpuKey.

protected void getFileEncryptionInfoForMpuKey(KeyArgs keyArgs, KeyArgs.Builder newKeyArgs, OzoneManager ozoneManager) throws IOException {
    String volumeName = keyArgs.getVolumeName();
    String bucketName = keyArgs.getBucketName();
    boolean acquireLock = false;
    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
    if (ozoneManager.getKmsProvider() != null) {
        acquireLock = omMetadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName, bucketName);
        try {
            ResolvedBucket resolvedBucket = ozoneManager.resolveBucketLink(Pair.of(keyArgs.getVolumeName(), keyArgs.getBucketName()));
            // Get the DB key name for looking up keyInfo in OpenKeyTable with
            // resolved volume/bucket.
            String dbMultipartOpenKey = getDBMultipartOpenKey(resolvedBucket.realVolume(), resolvedBucket.realBucket(), keyArgs.getKeyName(), keyArgs.getMultipartUploadID(), omMetadataManager);
            OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable(getBucketLayout()).get(dbMultipartOpenKey);
            if (omKeyInfo != null && omKeyInfo.getFileEncryptionInfo() != null) {
                newKeyArgs.setFileEncryptionInfo(OMPBHelper.convert(omKeyInfo.getFileEncryptionInfo()));
            }
        } finally {
            if (acquireLock) {
                omMetadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName, bucketName);
            }
        }
    }
}
Also used : OMMetadataManager(org.apache.hadoop.ozone.om.OMMetadataManager) OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) RepeatedOmKeyInfo(org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo) ResolvedBucket(org.apache.hadoop.ozone.om.ResolvedBucket)

Aggregations

ResolvedBucket (org.apache.hadoop.ozone.om.ResolvedBucket)11 AuditLogger (org.apache.hadoop.ozone.audit.AuditLogger)7 OMMetadataManager (org.apache.hadoop.ozone.om.OMMetadataManager)6 OzoneManager (org.apache.hadoop.ozone.om.OzoneManager)5 OMClientRequest (org.apache.hadoop.ozone.om.request.OMClientRequest)5 KeyArgs (org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs)5 Before (org.junit.Before)5 OzoneConfiguration (org.apache.hadoop.hdds.conf.OzoneConfiguration)4 AuditMessage (org.apache.hadoop.ozone.audit.AuditMessage)4 OmMetadataManagerImpl (org.apache.hadoop.ozone.om.OmMetadataManagerImpl)4 IOException (java.io.IOException)3 ArrayList (java.util.ArrayList)3 OmKeyInfo (org.apache.hadoop.ozone.om.helpers.OmKeyInfo)3 OMClientResponse (org.apache.hadoop.ozone.om.response.OMClientResponse)3 OMResponse (org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse)3 LinkedHashMap (java.util.LinkedHashMap)2 OMMetrics (org.apache.hadoop.ozone.om.OMMetrics)2 OmBucketInfo (org.apache.hadoop.ozone.om.helpers.OmBucketInfo)2 HashMap (java.util.HashMap)1 Random (java.util.Random)1