use of org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyLocation in project ozone by apache.
the class OMKeyCommitRequestWithFSO method validateAndUpdateCache.
@Override
@SuppressWarnings("methodlength")
public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
CommitKeyRequest commitKeyRequest = getOmRequest().getCommitKeyRequest();
KeyArgs commitKeyArgs = commitKeyRequest.getKeyArgs();
String volumeName = commitKeyArgs.getVolumeName();
String bucketName = commitKeyArgs.getBucketName();
String keyName = commitKeyArgs.getKeyName();
OMMetrics omMetrics = ozoneManager.getMetrics();
omMetrics.incNumKeyCommits();
AuditLogger auditLogger = ozoneManager.getAuditLogger();
Map<String, String> auditMap = buildKeyArgsAuditMap(commitKeyArgs);
OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(getOmRequest());
IOException exception = null;
OmKeyInfo omKeyInfo = null;
OmBucketInfo omBucketInfo = null;
OMClientResponse omClientResponse = null;
boolean bucketLockAcquired = false;
Result result;
OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
try {
commitKeyArgs = resolveBucketLink(ozoneManager, commitKeyArgs, auditMap);
volumeName = commitKeyArgs.getVolumeName();
bucketName = commitKeyArgs.getBucketName();
// check Acl
checkKeyAclsInOpenKeyTable(ozoneManager, volumeName, bucketName, keyName, IAccessAuthorizer.ACLType.WRITE, commitKeyRequest.getClientID());
String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
Iterator<Path> pathComponents = Paths.get(keyName).iterator();
String dbOpenFileKey = null;
List<OmKeyLocationInfo> locationInfoList = new ArrayList<>();
for (KeyLocation keyLocation : commitKeyArgs.getKeyLocationsList()) {
locationInfoList.add(OmKeyLocationInfo.getFromProtobuf(keyLocation));
}
bucketLockAcquired = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, bucketName);
validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
String fileName = OzoneFSUtils.getFileName(keyName);
omBucketInfo = omMetadataManager.getBucketTable().get(bucketKey);
long bucketId = omBucketInfo.getObjectID();
long parentID = OMFileRequest.getParentID(bucketId, pathComponents, keyName, omMetadataManager, "Cannot create file : " + keyName + " as parent directory doesn't exist");
String dbFileKey = omMetadataManager.getOzonePathKey(parentID, fileName);
dbOpenFileKey = omMetadataManager.getOpenFileName(parentID, fileName, commitKeyRequest.getClientID());
omKeyInfo = OMFileRequest.getOmKeyInfoFromFileTable(true, omMetadataManager, dbOpenFileKey, keyName);
if (omKeyInfo == null) {
throw new OMException("Failed to commit key, as " + dbOpenFileKey + "entry is not found in the OpenKey table", KEY_NOT_FOUND);
}
omKeyInfo.setDataSize(commitKeyArgs.getDataSize());
omKeyInfo.setModificationTime(commitKeyArgs.getModificationTime());
// Update the block length for each block
omKeyInfo.updateLocationInfoList(locationInfoList, false);
// Set the UpdateID to current transactionLogIndex
omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());
// If bucket versioning is turned on during the update, between key
// creation and key commit, old versions will be just overwritten and
// not kept. Bucket versioning will be effective from the first key
// creation after the knob turned on.
RepeatedOmKeyInfo oldKeyVersionsToDelete = null;
OmKeyInfo keyToDelete = omMetadataManager.getKeyTable(getBucketLayout()).get(dbFileKey);
if (keyToDelete != null && !omBucketInfo.getIsVersionEnabled()) {
oldKeyVersionsToDelete = getOldVersionsToCleanUp(dbFileKey, keyToDelete, omMetadataManager, trxnLogIndex, ozoneManager.isRatisEnabled());
}
// Add to cache of open key table and key table.
OMFileRequest.addOpenFileTableCacheEntry(omMetadataManager, dbFileKey, null, fileName, trxnLogIndex);
OMFileRequest.addFileTableCacheEntry(omMetadataManager, dbFileKey, omKeyInfo, fileName, trxnLogIndex);
if (oldKeyVersionsToDelete != null) {
OMFileRequest.addDeletedTableCacheEntry(omMetadataManager, dbFileKey, oldKeyVersionsToDelete, trxnLogIndex);
}
long scmBlockSize = ozoneManager.getScmBlockSize();
int factor = omKeyInfo.getReplicationConfig().getRequiredNodes();
// Block was pre-requested and UsedBytes updated when createKey and
// AllocatedBlock. The space occupied by the Key shall be based on
// the actual Key size, and the total Block size applied before should
// be subtracted.
long correctedSpace = omKeyInfo.getDataSize() * factor - locationInfoList.size() * scmBlockSize * factor;
// Subtract the size of blocks to be overwritten.
if (keyToDelete != null) {
correctedSpace -= keyToDelete.getDataSize() * keyToDelete.getReplicationConfig().getRequiredNodes();
}
omBucketInfo.incrUsedBytes(correctedSpace);
omClientResponse = new OMKeyCommitResponseWithFSO(omResponse.build(), omKeyInfo, dbFileKey, dbOpenFileKey, omBucketInfo.copyObject(), oldKeyVersionsToDelete);
result = Result.SUCCESS;
} catch (IOException ex) {
result = Result.FAILURE;
exception = ex;
omClientResponse = new OMKeyCommitResponseWithFSO(createErrorOMResponse(omResponse, exception), getBucketLayout());
} finally {
addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, omDoubleBufferHelper);
if (bucketLockAcquired) {
omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, bucketName);
}
}
auditLog(auditLogger, buildAuditMessage(OMAction.COMMIT_KEY, auditMap, exception, getOmRequest().getUserInfo()));
processResult(commitKeyRequest, volumeName, bucketName, keyName, omMetrics, exception, omKeyInfo, result);
return omClientResponse;
}
use of org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyLocation in project ozone by apache.
the class TestOMKeyCommitRequest method getKeyLocation.
/**
* Create KeyLocation list.
*/
private List<KeyLocation> getKeyLocation(int count) {
List<KeyLocation> keyLocations = new ArrayList<>();
for (int i = 0; i < count; i++) {
KeyLocation keyLocation = KeyLocation.newBuilder().setBlockID(HddsProtos.BlockID.newBuilder().setContainerBlockID(HddsProtos.ContainerBlockID.newBuilder().setContainerID(i + 1000).setLocalID(i + 100).build())).setOffset(0).setLength(200).setCreateVersion(version).build();
keyLocations.add(keyLocation);
}
return keyLocations;
}
use of org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyLocation in project ozone by apache.
the class OMKeyCommitRequest method validateAndUpdateCache.
@Override
@SuppressWarnings("methodlength")
public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
CommitKeyRequest commitKeyRequest = getOmRequest().getCommitKeyRequest();
KeyArgs commitKeyArgs = commitKeyRequest.getKeyArgs();
String volumeName = commitKeyArgs.getVolumeName();
String bucketName = commitKeyArgs.getBucketName();
String keyName = commitKeyArgs.getKeyName();
OMMetrics omMetrics = ozoneManager.getMetrics();
omMetrics.incNumKeyCommits();
AuditLogger auditLogger = ozoneManager.getAuditLogger();
Map<String, String> auditMap = buildKeyArgsAuditMap(commitKeyArgs);
OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(getOmRequest());
IOException exception = null;
OmKeyInfo omKeyInfo = null;
OmBucketInfo omBucketInfo = null;
OMClientResponse omClientResponse = null;
boolean bucketLockAcquired = false;
Result result;
OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
try {
commitKeyArgs = resolveBucketLink(ozoneManager, commitKeyArgs, auditMap);
volumeName = commitKeyArgs.getVolumeName();
bucketName = commitKeyArgs.getBucketName();
// check Acl
checkKeyAclsInOpenKeyTable(ozoneManager, volumeName, bucketName, keyName, IAccessAuthorizer.ACLType.WRITE, commitKeyRequest.getClientID());
String dbOzoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName, keyName);
String dbOpenKey = omMetadataManager.getOpenKey(volumeName, bucketName, keyName, commitKeyRequest.getClientID());
List<OmKeyLocationInfo> locationInfoList = new ArrayList<>();
for (KeyLocation keyLocation : commitKeyArgs.getKeyLocationsList()) {
OmKeyLocationInfo locationInfo = OmKeyLocationInfo.getFromProtobuf(keyLocation);
// client when returning from cache.
if (ozoneManager.isGrpcBlockTokenEnabled()) {
locationInfo.setToken(null);
}
locationInfoList.add(locationInfo);
}
bucketLockAcquired = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, bucketName);
validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName);
// Check for directory exists with same name, if it exists throw error.
if (ozoneManager.getEnableFileSystemPaths()) {
if (checkDirectoryAlreadyExists(volumeName, bucketName, keyName, omMetadataManager)) {
throw new OMException("Can not create file: " + keyName + " as there is already directory in the given path", NOT_A_FILE);
}
// Ensure the parent exist.
if (!"".equals(OzoneFSUtils.getParent(keyName)) && !checkDirectoryAlreadyExists(volumeName, bucketName, OzoneFSUtils.getParent(keyName), omMetadataManager)) {
throw new OMException("Cannot create file : " + keyName + " as parent directory doesn't exist", OMException.ResultCodes.DIRECTORY_NOT_FOUND);
}
}
omKeyInfo = omMetadataManager.getOpenKeyTable(getBucketLayout()).get(dbOpenKey);
if (omKeyInfo == null) {
throw new OMException("Failed to commit key, as " + dbOpenKey + "entry is not found in the OpenKey table", KEY_NOT_FOUND);
}
omKeyInfo.setDataSize(commitKeyArgs.getDataSize());
omKeyInfo.setModificationTime(commitKeyArgs.getModificationTime());
// Update the block length for each block
List<OmKeyLocationInfo> allocatedLocationInfoList = omKeyInfo.getLatestVersionLocations().getLocationList();
omKeyInfo.updateLocationInfoList(locationInfoList, false);
// Set the UpdateID to current transactionLogIndex
omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());
// If bucket versioning is turned on during the update, between key
// creation and key commit, old versions will be just overwritten and
// not kept. Bucket versioning will be effective from the first key
// creation after the knob turned on.
RepeatedOmKeyInfo oldKeyVersionsToDelete = null;
OmKeyInfo keyToDelete = omMetadataManager.getKeyTable(getBucketLayout()).get(dbOzoneKey);
if (keyToDelete != null && !omBucketInfo.getIsVersionEnabled()) {
oldKeyVersionsToDelete = getOldVersionsToCleanUp(dbOzoneKey, keyToDelete, omMetadataManager, trxnLogIndex, ozoneManager.isRatisEnabled());
}
// Add to cache of open key table and key table.
omMetadataManager.getOpenKeyTable(getBucketLayout()).addCacheEntry(new CacheKey<>(dbOpenKey), new CacheValue<>(Optional.absent(), trxnLogIndex));
omMetadataManager.getKeyTable(getBucketLayout()).addCacheEntry(new CacheKey<>(dbOzoneKey), new CacheValue<>(Optional.of(omKeyInfo), trxnLogIndex));
if (oldKeyVersionsToDelete != null) {
OMFileRequest.addDeletedTableCacheEntry(omMetadataManager, dbOzoneKey, oldKeyVersionsToDelete, trxnLogIndex);
}
long scmBlockSize = ozoneManager.getScmBlockSize();
int factor = omKeyInfo.getReplicationConfig().getRequiredNodes();
omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName);
// Block was pre-requested and UsedBytes updated when createKey and
// AllocatedBlock. The space occupied by the Key shall be based on
// the actual Key size, and the total Block size applied before should
// be subtracted.
long correctedSpace = omKeyInfo.getDataSize() * factor - allocatedLocationInfoList.size() * scmBlockSize * factor;
// Subtract the size of blocks to be overwritten.
if (keyToDelete != null) {
correctedSpace -= keyToDelete.getDataSize() * keyToDelete.getReplicationConfig().getRequiredNodes();
}
omBucketInfo.incrUsedBytes(correctedSpace);
omClientResponse = new OMKeyCommitResponse(omResponse.build(), omKeyInfo, dbOzoneKey, dbOpenKey, omBucketInfo.copyObject(), oldKeyVersionsToDelete);
result = Result.SUCCESS;
} catch (IOException ex) {
result = Result.FAILURE;
exception = ex;
omClientResponse = new OMKeyCommitResponse(createErrorOMResponse(omResponse, exception), getBucketLayout());
} finally {
addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, omDoubleBufferHelper);
if (bucketLockAcquired) {
omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, bucketName);
}
}
auditLog(auditLogger, buildAuditMessage(OMAction.COMMIT_KEY, auditMap, exception, getOmRequest().getUserInfo()));
processResult(commitKeyRequest, volumeName, bucketName, keyName, omMetrics, exception, omKeyInfo, result);
return omClientResponse;
}
Aggregations