Search in sources :

Example 16 with AuditLogger

use of org.apache.hadoop.ozone.audit.AuditLogger in project ozone by apache.

the class OMKeyDeleteRequestWithFSO method validateAndUpdateCache.

@Override
@SuppressWarnings("methodlength")
public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
    DeleteKeyRequest deleteKeyRequest = getOmRequest().getDeleteKeyRequest();
    OzoneManagerProtocolProtos.KeyArgs keyArgs = deleteKeyRequest.getKeyArgs();
    Map<String, String> auditMap = buildKeyArgsAuditMap(keyArgs);
    String volumeName = keyArgs.getVolumeName();
    String bucketName = keyArgs.getBucketName();
    String keyName = keyArgs.getKeyName();
    boolean recursive = keyArgs.getRecursive();
    OMMetrics omMetrics = ozoneManager.getMetrics();
    omMetrics.incNumKeyDeletes();
    AuditLogger auditLogger = ozoneManager.getAuditLogger();
    OzoneManagerProtocolProtos.UserInfo userInfo = getOmRequest().getUserInfo();
    OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(getOmRequest());
    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
    IOException exception = null;
    boolean acquiredLock = false;
    OMClientResponse omClientResponse = null;
    Result result = null;
    OmBucketInfo omBucketInfo = null;
    try {
        keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap);
        volumeName = keyArgs.getVolumeName();
        bucketName = keyArgs.getBucketName();
        checkACLsWithFSO(ozoneManager, volumeName, bucketName, keyName, IAccessAuthorizer.ACLType.DELETE);
        acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, bucketName);
        // Validate bucket and volume exists or not.
        validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
        OzoneFileStatus keyStatus = OMFileRequest.getOMKeyInfoIfExists(omMetadataManager, volumeName, bucketName, keyName, 0);
        if (keyStatus == null) {
            throw new OMException("Key not found. Key:" + keyName, KEY_NOT_FOUND);
        }
        OmKeyInfo omKeyInfo = keyStatus.getKeyInfo();
        // New key format for the fileTable & dirTable.
        // For example, the user given key path is '/a/b/c/d/e/file1', then in DB
        // keyName field stores only the leaf node name, which is 'file1'.
        String fileName = OzoneFSUtils.getFileName(keyName);
        omKeyInfo.setKeyName(fileName);
        // Set the UpdateID to current transactionLogIndex
        omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());
        String ozonePathKey = omMetadataManager.getOzonePathKey(omKeyInfo.getParentObjectID(), omKeyInfo.getFileName());
        if (keyStatus.isDirectory()) {
            // Check if there are any sub path exists under the user requested path
            if (!recursive && OMFileRequest.hasChildren(omKeyInfo, omMetadataManager)) {
                throw new OMException("Directory is not empty. Key:" + keyName, DIRECTORY_NOT_EMPTY);
            }
            // Update dir cache.
            omMetadataManager.getDirectoryTable().addCacheEntry(new CacheKey<>(ozonePathKey), new CacheValue<>(Optional.absent(), trxnLogIndex));
        } else {
            // Update table cache.
            omMetadataManager.getKeyTable(getBucketLayout()).addCacheEntry(new CacheKey<>(ozonePathKey), new CacheValue<>(Optional.absent(), trxnLogIndex));
        }
        omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName);
        // TODO: HDDS-4565: consider all the sub-paths if the path is a dir.
        long quotaReleased = sumBlockLengths(omKeyInfo);
        omBucketInfo.incrUsedBytes(-quotaReleased);
        omBucketInfo.incrUsedNamespace(-1L);
        // No need to add cache entries to delete table. As delete table will
        // be used by DeleteKeyService only, not used for any client response
        // validation, so we don't need to add to cache.
        // TODO: Revisit if we need it later.
        omClientResponse = new OMKeyDeleteResponseWithFSO(omResponse.setDeleteKeyResponse(DeleteKeyResponse.newBuilder()).build(), keyName, omKeyInfo, ozoneManager.isRatisEnabled(), omBucketInfo.copyObject(), keyStatus.isDirectory());
        result = Result.SUCCESS;
    } catch (IOException ex) {
        result = Result.FAILURE;
        exception = ex;
        omClientResponse = new OMKeyDeleteResponseWithFSO(createErrorOMResponse(omResponse, exception), getBucketLayout());
    } finally {
        addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, omDoubleBufferHelper);
        if (acquiredLock) {
            omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, bucketName);
        }
    }
    // Performing audit logging outside of the lock.
    auditLog(auditLogger, buildAuditMessage(OMAction.DELETE_KEY, auditMap, exception, userInfo));
    switch(result) {
        case SUCCESS:
            omMetrics.decNumKeys();
            LOG.debug("Key deleted. Volume:{}, Bucket:{}, Key:{}", volumeName, bucketName, keyName);
            break;
        case FAILURE:
            omMetrics.incNumKeyDeleteFails();
            LOG.error("Key delete failed. Volume:{}, Bucket:{}, Key:{}.", volumeName, bucketName, keyName, exception);
            break;
        default:
            LOG.error("Unrecognized Result for OMKeyDeleteRequest: {}", deleteKeyRequest);
    }
    return omClientResponse;
}
Also used : OmBucketInfo(org.apache.hadoop.ozone.om.helpers.OmBucketInfo) AuditLogger(org.apache.hadoop.ozone.audit.AuditLogger) OMClientResponse(org.apache.hadoop.ozone.om.response.OMClientResponse) OMKeyDeleteResponseWithFSO(org.apache.hadoop.ozone.om.response.key.OMKeyDeleteResponseWithFSO) IOException(java.io.IOException) OMMetrics(org.apache.hadoop.ozone.om.OMMetrics) OMResponse(org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse) OzoneManagerProtocolProtos(org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos) OMMetadataManager(org.apache.hadoop.ozone.om.OMMetadataManager) OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) OzoneFileStatus(org.apache.hadoop.ozone.om.helpers.OzoneFileStatus) OMException(org.apache.hadoop.ozone.om.exceptions.OMException) DeleteKeyRequest(org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeyRequest)

Example 17 with AuditLogger

use of org.apache.hadoop.ozone.audit.AuditLogger in project ozone by apache.

the class OMKeyRenameRequest method validateAndUpdateCache.

@Override
@SuppressWarnings("methodlength")
public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
    RenameKeyRequest renameKeyRequest = getOmRequest().getRenameKeyRequest();
    OzoneManagerProtocolProtos.KeyArgs keyArgs = renameKeyRequest.getKeyArgs();
    Map<String, String> auditMap = buildAuditMap(keyArgs, renameKeyRequest);
    String volumeName = keyArgs.getVolumeName();
    String bucketName = keyArgs.getBucketName();
    String fromKeyName = keyArgs.getKeyName();
    String toKeyName = renameKeyRequest.getToKeyName();
    OMMetrics omMetrics = ozoneManager.getMetrics();
    omMetrics.incNumKeyRenames();
    AuditLogger auditLogger = ozoneManager.getAuditLogger();
    OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(getOmRequest());
    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
    boolean acquiredLock = false;
    OMClientResponse omClientResponse = null;
    IOException exception = null;
    OmKeyInfo fromKeyValue = null;
    String toKey = null, fromKey = null;
    Result result = null;
    try {
        if (toKeyName.length() == 0 || fromKeyName.length() == 0) {
            throw new OMException("Key name is empty", OMException.ResultCodes.INVALID_KEY_NAME);
        }
        keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap);
        volumeName = keyArgs.getVolumeName();
        bucketName = keyArgs.getBucketName();
        // check Acls to see if user has access to perform delete operation on
        // old key and create operation on new key
        checkKeyAcls(ozoneManager, volumeName, bucketName, fromKeyName, IAccessAuthorizer.ACLType.DELETE, OzoneObj.ResourceType.KEY);
        checkKeyAcls(ozoneManager, volumeName, bucketName, toKeyName, IAccessAuthorizer.ACLType.CREATE, OzoneObj.ResourceType.KEY);
        acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, bucketName);
        // Validate bucket and volume exists or not.
        validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
        // Check if toKey exists
        fromKey = omMetadataManager.getOzoneKey(volumeName, bucketName, fromKeyName);
        toKey = omMetadataManager.getOzoneKey(volumeName, bucketName, toKeyName);
        OmKeyInfo toKeyValue = omMetadataManager.getKeyTable(getBucketLayout()).get(toKey);
        if (toKeyValue != null) {
            throw new OMException("Key already exists " + toKeyName, OMException.ResultCodes.KEY_ALREADY_EXISTS);
        }
        // fromKeyName should exist
        fromKeyValue = omMetadataManager.getKeyTable(getBucketLayout()).get(fromKey);
        if (fromKeyValue == null) {
            // TODO: Add support for renaming open key
            throw new OMException("Key not found " + fromKey, KEY_NOT_FOUND);
        }
        fromKeyValue.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());
        fromKeyValue.setKeyName(toKeyName);
        // Set modification time
        fromKeyValue.setModificationTime(keyArgs.getModificationTime());
        // Add to cache.
        // fromKey should be deleted, toKey should be added with newly updated
        // omKeyInfo.
        Table<String, OmKeyInfo> keyTable = omMetadataManager.getKeyTable(getBucketLayout());
        keyTable.addCacheEntry(new CacheKey<>(fromKey), new CacheValue<>(Optional.absent(), trxnLogIndex));
        keyTable.addCacheEntry(new CacheKey<>(toKey), new CacheValue<>(Optional.of(fromKeyValue), trxnLogIndex));
        omClientResponse = new OMKeyRenameResponse(omResponse.setRenameKeyResponse(RenameKeyResponse.newBuilder()).build(), fromKeyName, toKeyName, fromKeyValue, getBucketLayout());
        result = Result.SUCCESS;
    } catch (IOException ex) {
        result = Result.FAILURE;
        exception = ex;
        omClientResponse = new OMKeyRenameResponse(createErrorOMResponse(omResponse, exception), getBucketLayout());
    } finally {
        addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, omDoubleBufferHelper);
        if (acquiredLock) {
            omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, bucketName);
        }
    }
    auditLog(auditLogger, buildAuditMessage(OMAction.RENAME_KEY, auditMap, exception, getOmRequest().getUserInfo()));
    switch(result) {
        case SUCCESS:
            LOG.debug("Rename Key is successfully completed for volume:{} bucket:{}" + " fromKey:{} toKey:{}. ", volumeName, bucketName, fromKeyName, toKeyName);
            break;
        case FAILURE:
            ozoneManager.getMetrics().incNumKeyRenameFails();
            LOG.error("Rename key failed for volume:{} bucket:{} fromKey:{} " + "toKey:{}. Key: {} not found.", volumeName, bucketName, fromKeyName, toKeyName, fromKeyName);
            break;
        default:
            LOG.error("Unrecognized Result for OMKeyRenameRequest: {}", renameKeyRequest);
    }
    return omClientResponse;
}
Also used : AuditLogger(org.apache.hadoop.ozone.audit.AuditLogger) OMClientResponse(org.apache.hadoop.ozone.om.response.OMClientResponse) OMKeyRenameResponse(org.apache.hadoop.ozone.om.response.key.OMKeyRenameResponse) IOException(java.io.IOException) OMMetrics(org.apache.hadoop.ozone.om.OMMetrics) OMResponse(org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse) OzoneManagerProtocolProtos(org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos) KeyArgs(org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs) OMMetadataManager(org.apache.hadoop.ozone.om.OMMetadataManager) OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) RenameKeyRequest(org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameKeyRequest) OMException(org.apache.hadoop.ozone.om.exceptions.OMException)

Example 18 with AuditLogger

use of org.apache.hadoop.ozone.audit.AuditLogger in project ozone by apache.

the class OMKeysRenameRequest method validateAndUpdateCache.

@Override
@SuppressWarnings("methodlength")
public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
    RenameKeysRequest renameKeysRequest = getOmRequest().getRenameKeysRequest();
    RenameKeysArgs renameKeysArgs = renameKeysRequest.getRenameKeysArgs();
    String volumeName = renameKeysArgs.getVolumeName();
    String bucketName = renameKeysArgs.getBucketName();
    OMClientResponse omClientResponse = null;
    List<RenameKeysMap> unRenamedKeys = new ArrayList<>();
    // fromKeyName -> toKeyName
    Map<String, String> renamedKeys = new HashMap<>();
    Map<String, OmKeyInfo> fromKeyAndToKeyInfo = new HashMap<>();
    OMMetrics omMetrics = ozoneManager.getMetrics();
    omMetrics.incNumKeyRenames();
    AuditLogger auditLogger = ozoneManager.getAuditLogger();
    OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(getOmRequest());
    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
    IOException exception = null;
    OmKeyInfo fromKeyValue = null;
    Result result = null;
    Map<String, String> auditMap = new LinkedHashMap<>();
    String fromKeyName = null;
    String toKeyName = null;
    boolean acquiredLock = false;
    boolean renameStatus = true;
    try {
        ResolvedBucket bucket = ozoneManager.resolveBucketLink(Pair.of(volumeName, bucketName), this);
        bucket.audit(auditMap);
        volumeName = bucket.realVolume();
        bucketName = bucket.realBucket();
        acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, bucketName);
        // Validate bucket and volume exists or not.
        validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
        String volumeOwner = getVolumeOwner(omMetadataManager, volumeName);
        for (RenameKeysMap renameKey : renameKeysArgs.getRenameKeysMapList()) {
            fromKeyName = renameKey.getFromKeyName();
            toKeyName = renameKey.getToKeyName();
            RenameKeysMap.Builder unRenameKey = RenameKeysMap.newBuilder();
            if (toKeyName.length() == 0 || fromKeyName.length() == 0) {
                renameStatus = false;
                unRenamedKeys.add(unRenameKey.setFromKeyName(fromKeyName).setToKeyName(toKeyName).build());
                LOG.error("Key name is empty fromKeyName {} toKeyName {}", fromKeyName, toKeyName);
                continue;
            }
            try {
                // check Acls to see if user has access to perform delete operation
                // on old key and create operation on new key
                checkKeyAcls(ozoneManager, volumeName, bucketName, fromKeyName, IAccessAuthorizer.ACLType.DELETE, OzoneObj.ResourceType.KEY, volumeOwner);
                checkKeyAcls(ozoneManager, volumeName, bucketName, toKeyName, IAccessAuthorizer.ACLType.CREATE, OzoneObj.ResourceType.KEY, volumeOwner);
            } catch (Exception ex) {
                renameStatus = false;
                unRenamedKeys.add(unRenameKey.setFromKeyName(fromKeyName).setToKeyName(toKeyName).build());
                LOG.error("Acl check failed for fromKeyName {} toKeyName {}", fromKeyName, toKeyName, ex);
                continue;
            }
            // Check if toKey exists
            String fromKey = omMetadataManager.getOzoneKey(volumeName, bucketName, fromKeyName);
            String toKey = omMetadataManager.getOzoneKey(volumeName, bucketName, toKeyName);
            OmKeyInfo toKeyValue = omMetadataManager.getKeyTable(getBucketLayout()).get(toKey);
            if (toKeyValue != null) {
                renameStatus = false;
                unRenamedKeys.add(unRenameKey.setFromKeyName(fromKeyName).setToKeyName(toKeyName).build());
                LOG.error("Received a request name of new key {} already exists", toKeyName);
            }
            // fromKeyName should exist
            fromKeyValue = omMetadataManager.getKeyTable(getBucketLayout()).get(fromKey);
            if (fromKeyValue == null) {
                renameStatus = false;
                unRenamedKeys.add(unRenameKey.setFromKeyName(fromKeyName).setToKeyName(toKeyName).build());
                LOG.error("Received a request to rename a Key does not exist {}", fromKey);
                continue;
            }
            fromKeyValue.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());
            fromKeyValue.setKeyName(toKeyName);
            // Set modification time
            fromKeyValue.setModificationTime(Time.now());
            // Add to cache.
            // fromKey should be deleted, toKey should be added with newly updated
            // omKeyInfo.
            Table<String, OmKeyInfo> keyTable = omMetadataManager.getKeyTable(getBucketLayout());
            keyTable.addCacheEntry(new CacheKey<>(fromKey), new CacheValue<>(Optional.absent(), trxnLogIndex));
            keyTable.addCacheEntry(new CacheKey<>(toKey), new CacheValue<>(Optional.of(fromKeyValue), trxnLogIndex));
            renamedKeys.put(fromKeyName, toKeyName);
            fromKeyAndToKeyInfo.put(fromKeyName, fromKeyValue);
        }
        OmRenameKeys newOmRenameKeys = new OmRenameKeys(volumeName, bucketName, null, fromKeyAndToKeyInfo);
        omClientResponse = new OMKeysRenameResponse(omResponse.setRenameKeysResponse(RenameKeysResponse.newBuilder().setStatus(renameStatus).addAllUnRenamedKeys(unRenamedKeys)).setStatus(renameStatus ? OK : PARTIAL_RENAME).setSuccess(renameStatus).build(), newOmRenameKeys);
        result = Result.SUCCESS;
    } catch (IOException ex) {
        result = Result.FAILURE;
        exception = ex;
        createErrorOMResponse(omResponse, ex);
        omResponse.setRenameKeysResponse(RenameKeysResponse.newBuilder().setStatus(renameStatus).addAllUnRenamedKeys(unRenamedKeys).build());
        omClientResponse = new OMKeysRenameResponse(omResponse.build());
    } finally {
        addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, omDoubleBufferHelper);
        if (acquiredLock) {
            omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, bucketName);
        }
    }
    auditMap = buildAuditMap(auditMap, renamedKeys, unRenamedKeys);
    auditLog(auditLogger, buildAuditMessage(OMAction.RENAME_KEYS, auditMap, exception, getOmRequest().getUserInfo()));
    switch(result) {
        case SUCCESS:
            LOG.debug("Rename Keys is successfully completed for auditMap:{}.", auditMap);
            break;
        case FAILURE:
            ozoneManager.getMetrics().incNumKeyRenameFails();
            LOG.error("Rename keys failed for auditMap:{}.", auditMap);
            break;
        default:
            LOG.error("Unrecognized Result for OMKeysRenameRequest: {}", renameKeysRequest);
    }
    return omClientResponse;
}
Also used : HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) RenameKeysMap(org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameKeysMap) ArrayList(java.util.ArrayList) OMMetrics(org.apache.hadoop.ozone.om.OMMetrics) LinkedHashMap(java.util.LinkedHashMap) RenameKeysArgs(org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameKeysArgs) OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) OmRenameKeys(org.apache.hadoop.ozone.om.helpers.OmRenameKeys) RenameKeysRequest(org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameKeysRequest) ResolvedBucket(org.apache.hadoop.ozone.om.ResolvedBucket) AuditLogger(org.apache.hadoop.ozone.audit.AuditLogger) OMClientResponse(org.apache.hadoop.ozone.om.response.OMClientResponse) IOException(java.io.IOException) IOException(java.io.IOException) OMResponse(org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse) OMKeysRenameResponse(org.apache.hadoop.ozone.om.response.key.OMKeysRenameResponse) OMMetadataManager(org.apache.hadoop.ozone.om.OMMetadataManager)

Example 19 with AuditLogger

use of org.apache.hadoop.ozone.audit.AuditLogger in project ozone by apache.

the class OMAllocateBlockRequest method validateAndUpdateCache.

@Override
public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
    OzoneManagerProtocolProtos.AllocateBlockRequest allocateBlockRequest = getOmRequest().getAllocateBlockRequest();
    OzoneManagerProtocolProtos.KeyArgs keyArgs = allocateBlockRequest.getKeyArgs();
    OzoneManagerProtocolProtos.KeyLocation blockLocation = allocateBlockRequest.getKeyLocation();
    Preconditions.checkNotNull(blockLocation);
    String volumeName = keyArgs.getVolumeName();
    String bucketName = keyArgs.getBucketName();
    String keyName = keyArgs.getKeyName();
    long clientID = allocateBlockRequest.getClientID();
    OMMetrics omMetrics = ozoneManager.getMetrics();
    omMetrics.incNumBlockAllocateCalls();
    AuditLogger auditLogger = ozoneManager.getAuditLogger();
    Map<String, String> auditMap = buildKeyArgsAuditMap(keyArgs);
    auditMap.put(OzoneConsts.CLIENT_ID, String.valueOf(clientID));
    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
    String openKeyName = null;
    OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(getOmRequest());
    OMClientResponse omClientResponse = null;
    OmKeyInfo openKeyInfo = null;
    IOException exception = null;
    OmBucketInfo omBucketInfo = null;
    boolean acquiredLock = false;
    try {
        keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap);
        volumeName = keyArgs.getVolumeName();
        bucketName = keyArgs.getBucketName();
        // check Acl
        checkKeyAclsInOpenKeyTable(ozoneManager, volumeName, bucketName, keyName, IAccessAuthorizer.ACLType.WRITE, allocateBlockRequest.getClientID());
        validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
        // Here we don't acquire bucket/volume lock because for a single client
        // allocateBlock is called in serial fashion.
        openKeyName = omMetadataManager.getOpenKey(volumeName, bucketName, keyName, clientID);
        openKeyInfo = omMetadataManager.getOpenKeyTable(getBucketLayout()).get(openKeyName);
        if (openKeyInfo == null) {
            throw new OMException("Open Key not found " + openKeyName, KEY_NOT_FOUND);
        }
        List<OmKeyLocationInfo> newLocationList = Collections.singletonList(OmKeyLocationInfo.getFromProtobuf(blockLocation));
        acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, bucketName);
        omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName);
        // check bucket and volume quota
        long preAllocatedSpace = newLocationList.size() * ozoneManager.getScmBlockSize() * openKeyInfo.getReplicationConfig().getRequiredNodes();
        checkBucketQuotaInBytes(omBucketInfo, preAllocatedSpace);
        // Append new block
        openKeyInfo.appendNewBlocks(newLocationList, false);
        // Set modification time.
        openKeyInfo.setModificationTime(keyArgs.getModificationTime());
        // Set the UpdateID to current transactionLogIndex
        openKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());
        // Add to cache.
        omMetadataManager.getOpenKeyTable(getBucketLayout()).addCacheEntry(new CacheKey<>(openKeyName), new CacheValue<>(Optional.of(openKeyInfo), trxnLogIndex));
        omBucketInfo.incrUsedBytes(preAllocatedSpace);
        omResponse.setAllocateBlockResponse(AllocateBlockResponse.newBuilder().setKeyLocation(blockLocation).build());
        omClientResponse = new OMAllocateBlockResponse(omResponse.build(), openKeyInfo, clientID, omBucketInfo.copyObject(), getBucketLayout());
        LOG.debug("Allocated block for Volume:{}, Bucket:{}, OpenKey:{}", volumeName, bucketName, openKeyName);
    } catch (IOException ex) {
        omMetrics.incNumBlockAllocateCallFails();
        exception = ex;
        omClientResponse = new OMAllocateBlockResponse(createErrorOMResponse(omResponse, exception), getBucketLayout());
        LOG.error("Allocate Block failed. Volume:{}, Bucket:{}, OpenKey:{}. " + "Exception:{}", volumeName, bucketName, openKeyName, exception);
    } finally {
        addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, omDoubleBufferHelper);
        if (acquiredLock) {
            omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, bucketName);
        }
    }
    auditLog(auditLogger, buildAuditMessage(OMAction.ALLOCATE_BLOCK, auditMap, exception, getOmRequest().getUserInfo()));
    return omClientResponse;
}
Also used : OmBucketInfo(org.apache.hadoop.ozone.om.helpers.OmBucketInfo) AuditLogger(org.apache.hadoop.ozone.audit.AuditLogger) OMClientResponse(org.apache.hadoop.ozone.om.response.OMClientResponse) IOException(java.io.IOException) OMAllocateBlockResponse(org.apache.hadoop.ozone.om.response.key.OMAllocateBlockResponse) OmKeyLocationInfo(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo) OMMetrics(org.apache.hadoop.ozone.om.OMMetrics) OMResponse(org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse) OzoneManagerProtocolProtos(org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos) KeyArgs(org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs) OMMetadataManager(org.apache.hadoop.ozone.om.OMMetadataManager) OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) AllocateBlockRequest(org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.AllocateBlockRequest) OMException(org.apache.hadoop.ozone.om.exceptions.OMException)

Example 20 with AuditLogger

use of org.apache.hadoop.ozone.audit.AuditLogger in project ozone by apache.

the class OMKeyCommitRequest method validateAndUpdateCache.

@Override
@SuppressWarnings("methodlength")
public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
    CommitKeyRequest commitKeyRequest = getOmRequest().getCommitKeyRequest();
    KeyArgs commitKeyArgs = commitKeyRequest.getKeyArgs();
    String volumeName = commitKeyArgs.getVolumeName();
    String bucketName = commitKeyArgs.getBucketName();
    String keyName = commitKeyArgs.getKeyName();
    OMMetrics omMetrics = ozoneManager.getMetrics();
    omMetrics.incNumKeyCommits();
    AuditLogger auditLogger = ozoneManager.getAuditLogger();
    Map<String, String> auditMap = buildKeyArgsAuditMap(commitKeyArgs);
    OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(getOmRequest());
    IOException exception = null;
    OmKeyInfo omKeyInfo = null;
    OmBucketInfo omBucketInfo = null;
    OMClientResponse omClientResponse = null;
    boolean bucketLockAcquired = false;
    Result result;
    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
    try {
        commitKeyArgs = resolveBucketLink(ozoneManager, commitKeyArgs, auditMap);
        volumeName = commitKeyArgs.getVolumeName();
        bucketName = commitKeyArgs.getBucketName();
        // check Acl
        checkKeyAclsInOpenKeyTable(ozoneManager, volumeName, bucketName, keyName, IAccessAuthorizer.ACLType.WRITE, commitKeyRequest.getClientID());
        String dbOzoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName, keyName);
        String dbOpenKey = omMetadataManager.getOpenKey(volumeName, bucketName, keyName, commitKeyRequest.getClientID());
        List<OmKeyLocationInfo> locationInfoList = new ArrayList<>();
        for (KeyLocation keyLocation : commitKeyArgs.getKeyLocationsList()) {
            OmKeyLocationInfo locationInfo = OmKeyLocationInfo.getFromProtobuf(keyLocation);
            // client when returning from cache.
            if (ozoneManager.isGrpcBlockTokenEnabled()) {
                locationInfo.setToken(null);
            }
            locationInfoList.add(locationInfo);
        }
        bucketLockAcquired = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, bucketName);
        validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
        omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName);
        // Check for directory exists with same name, if it exists throw error.
        if (ozoneManager.getEnableFileSystemPaths()) {
            if (checkDirectoryAlreadyExists(volumeName, bucketName, keyName, omMetadataManager)) {
                throw new OMException("Can not create file: " + keyName + " as there is already directory in the given path", NOT_A_FILE);
            }
            // Ensure the parent exist.
            if (!"".equals(OzoneFSUtils.getParent(keyName)) && !checkDirectoryAlreadyExists(volumeName, bucketName, OzoneFSUtils.getParent(keyName), omMetadataManager)) {
                throw new OMException("Cannot create file : " + keyName + " as parent directory doesn't exist", OMException.ResultCodes.DIRECTORY_NOT_FOUND);
            }
        }
        omKeyInfo = omMetadataManager.getOpenKeyTable(getBucketLayout()).get(dbOpenKey);
        if (omKeyInfo == null) {
            throw new OMException("Failed to commit key, as " + dbOpenKey + "entry is not found in the OpenKey table", KEY_NOT_FOUND);
        }
        omKeyInfo.setDataSize(commitKeyArgs.getDataSize());
        omKeyInfo.setModificationTime(commitKeyArgs.getModificationTime());
        // Update the block length for each block
        List<OmKeyLocationInfo> allocatedLocationInfoList = omKeyInfo.getLatestVersionLocations().getLocationList();
        omKeyInfo.updateLocationInfoList(locationInfoList, false);
        // Set the UpdateID to current transactionLogIndex
        omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());
        // If bucket versioning is turned on during the update, between key
        // creation and key commit, old versions will be just overwritten and
        // not kept. Bucket versioning will be effective from the first key
        // creation after the knob turned on.
        RepeatedOmKeyInfo oldKeyVersionsToDelete = null;
        OmKeyInfo keyToDelete = omMetadataManager.getKeyTable(getBucketLayout()).get(dbOzoneKey);
        if (keyToDelete != null && !omBucketInfo.getIsVersionEnabled()) {
            oldKeyVersionsToDelete = getOldVersionsToCleanUp(dbOzoneKey, keyToDelete, omMetadataManager, trxnLogIndex, ozoneManager.isRatisEnabled());
        }
        // Add to cache of open key table and key table.
        omMetadataManager.getOpenKeyTable(getBucketLayout()).addCacheEntry(new CacheKey<>(dbOpenKey), new CacheValue<>(Optional.absent(), trxnLogIndex));
        omMetadataManager.getKeyTable(getBucketLayout()).addCacheEntry(new CacheKey<>(dbOzoneKey), new CacheValue<>(Optional.of(omKeyInfo), trxnLogIndex));
        if (oldKeyVersionsToDelete != null) {
            OMFileRequest.addDeletedTableCacheEntry(omMetadataManager, dbOzoneKey, oldKeyVersionsToDelete, trxnLogIndex);
        }
        long scmBlockSize = ozoneManager.getScmBlockSize();
        int factor = omKeyInfo.getReplicationConfig().getRequiredNodes();
        omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName);
        // Block was pre-requested and UsedBytes updated when createKey and
        // AllocatedBlock. The space occupied by the Key shall be based on
        // the actual Key size, and the total Block size applied before should
        // be subtracted.
        long correctedSpace = omKeyInfo.getDataSize() * factor - allocatedLocationInfoList.size() * scmBlockSize * factor;
        // Subtract the size of blocks to be overwritten.
        if (keyToDelete != null) {
            correctedSpace -= keyToDelete.getDataSize() * keyToDelete.getReplicationConfig().getRequiredNodes();
        }
        omBucketInfo.incrUsedBytes(correctedSpace);
        omClientResponse = new OMKeyCommitResponse(omResponse.build(), omKeyInfo, dbOzoneKey, dbOpenKey, omBucketInfo.copyObject(), oldKeyVersionsToDelete);
        result = Result.SUCCESS;
    } catch (IOException ex) {
        result = Result.FAILURE;
        exception = ex;
        omClientResponse = new OMKeyCommitResponse(createErrorOMResponse(omResponse, exception), getBucketLayout());
    } finally {
        addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, omDoubleBufferHelper);
        if (bucketLockAcquired) {
            omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, bucketName);
        }
    }
    auditLog(auditLogger, buildAuditMessage(OMAction.COMMIT_KEY, auditMap, exception, getOmRequest().getUserInfo()));
    processResult(commitKeyRequest, volumeName, bucketName, keyName, omMetrics, exception, omKeyInfo, result);
    return omClientResponse;
}
Also used : OmBucketInfo(org.apache.hadoop.ozone.om.helpers.OmBucketInfo) ArrayList(java.util.ArrayList) OMMetrics(org.apache.hadoop.ozone.om.OMMetrics) OMKeyCommitResponse(org.apache.hadoop.ozone.om.response.key.OMKeyCommitResponse) OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) RepeatedOmKeyInfo(org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo) AuditLogger(org.apache.hadoop.ozone.audit.AuditLogger) OMClientResponse(org.apache.hadoop.ozone.om.response.OMClientResponse) CommitKeyRequest(org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CommitKeyRequest) KeyLocation(org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyLocation) KeyArgs(org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs) IOException(java.io.IOException) OmKeyLocationInfo(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo) OMResponse(org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse) RepeatedOmKeyInfo(org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo) OMMetadataManager(org.apache.hadoop.ozone.om.OMMetadataManager) OMException(org.apache.hadoop.ozone.om.exceptions.OMException)

Aggregations

AuditLogger (org.apache.hadoop.ozone.audit.AuditLogger)25 IOException (java.io.IOException)18 OMMetadataManager (org.apache.hadoop.ozone.om.OMMetadataManager)18 OMMetrics (org.apache.hadoop.ozone.om.OMMetrics)18 OMClientResponse (org.apache.hadoop.ozone.om.response.OMClientResponse)18 OMResponse (org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse)18 OMException (org.apache.hadoop.ozone.om.exceptions.OMException)14 OzoneManagerProtocolProtos (org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos)14 KeyArgs (org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs)12 OmBucketInfo (org.apache.hadoop.ozone.om.helpers.OmBucketInfo)11 OmKeyInfo (org.apache.hadoop.ozone.om.helpers.OmKeyInfo)11 OzoneConfiguration (org.apache.hadoop.hdds.conf.OzoneConfiguration)7 AuditMessage (org.apache.hadoop.ozone.audit.AuditMessage)7 OmMetadataManagerImpl (org.apache.hadoop.ozone.om.OmMetadataManagerImpl)7 OzoneManager (org.apache.hadoop.ozone.om.OzoneManager)7 Before (org.junit.Before)7 ArrayList (java.util.ArrayList)6 ResolvedBucket (org.apache.hadoop.ozone.om.ResolvedBucket)6 OmVolumeArgs (org.apache.hadoop.ozone.om.helpers.OmVolumeArgs)5 OmKeyLocationInfo (org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo)4