use of org.apache.hadoop.ozone.om.helpers.OmVolumeArgs in project ozone by apache.
the class OMBucketSetPropertyRequest method validateAndUpdateCache.
@Override
@SuppressWarnings("methodlength")
public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, long transactionLogIndex, OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
SetBucketPropertyRequest setBucketPropertyRequest = getOmRequest().getSetBucketPropertyRequest();
Preconditions.checkNotNull(setBucketPropertyRequest);
OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
OMMetrics omMetrics = ozoneManager.getMetrics();
omMetrics.incNumBucketUpdates();
BucketArgs bucketArgs = setBucketPropertyRequest.getBucketArgs();
OmBucketArgs omBucketArgs = OmBucketArgs.getFromProtobuf(bucketArgs);
String volumeName = bucketArgs.getVolumeName();
String bucketName = bucketArgs.getBucketName();
OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(getOmRequest());
OmBucketInfo omBucketInfo = null;
AuditLogger auditLogger = ozoneManager.getAuditLogger();
OzoneManagerProtocolProtos.UserInfo userInfo = getOmRequest().getUserInfo();
IOException exception = null;
boolean acquiredBucketLock = false, success = true;
OMClientResponse omClientResponse = null;
try {
// check Acl
if (ozoneManager.getAclsEnabled()) {
checkAcls(ozoneManager, OzoneObj.ResourceType.BUCKET, OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.WRITE, volumeName, bucketName, null);
}
// acquire lock.
acquiredBucketLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, bucketName);
String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
OmBucketInfo dbBucketInfo = omMetadataManager.getBucketTable().get(bucketKey);
// Check if bucket exist
if (dbBucketInfo == null) {
LOG.debug("bucket: {} not found ", bucketName);
throw new OMException("Bucket doesn't exist", OMException.ResultCodes.BUCKET_NOT_FOUND);
}
OmBucketInfo.Builder bucketInfoBuilder = OmBucketInfo.newBuilder();
bucketInfoBuilder.setVolumeName(dbBucketInfo.getVolumeName()).setBucketName(dbBucketInfo.getBucketName()).setObjectID(dbBucketInfo.getObjectID()).setUpdateID(transactionLogIndex);
bucketInfoBuilder.addAllMetadata(KeyValueUtil.getFromProtobuf(bucketArgs.getMetadataList()));
// Check StorageType to update
StorageType storageType = omBucketArgs.getStorageType();
if (storageType != null) {
bucketInfoBuilder.setStorageType(storageType);
LOG.debug("Updating bucket storage type for bucket: {} in volume: {}", bucketName, volumeName);
} else {
bucketInfoBuilder.setStorageType(dbBucketInfo.getStorageType());
}
// Check Versioning to update
Boolean versioning = omBucketArgs.getIsVersionEnabled();
if (versioning != null) {
bucketInfoBuilder.setIsVersionEnabled(versioning);
LOG.debug("Updating bucket versioning for bucket: {} in volume: {}", bucketName, volumeName);
} else {
bucketInfoBuilder.setIsVersionEnabled(dbBucketInfo.getIsVersionEnabled());
}
// Check quotaInBytes and quotaInNamespace to update
String volumeKey = omMetadataManager.getVolumeKey(volumeName);
OmVolumeArgs omVolumeArgs = omMetadataManager.getVolumeTable().get(volumeKey);
if (checkQuotaBytesValid(omMetadataManager, omVolumeArgs, omBucketArgs)) {
bucketInfoBuilder.setQuotaInBytes(omBucketArgs.getQuotaInBytes());
} else {
bucketInfoBuilder.setQuotaInBytes(dbBucketInfo.getQuotaInBytes());
}
if (checkQuotaNamespaceValid(omVolumeArgs, omBucketArgs)) {
bucketInfoBuilder.setQuotaInNamespace(omBucketArgs.getQuotaInNamespace());
} else {
bucketInfoBuilder.setQuotaInNamespace(dbBucketInfo.getQuotaInNamespace());
}
bucketInfoBuilder.setCreationTime(dbBucketInfo.getCreationTime());
bucketInfoBuilder.setModificationTime(setBucketPropertyRequest.getModificationTime());
// Set acls from dbBucketInfo if it has any.
if (dbBucketInfo.getAcls() != null) {
bucketInfoBuilder.setAcls(dbBucketInfo.getAcls());
}
// Set the objectID to dbBucketInfo objectID, if present
if (dbBucketInfo.getObjectID() != 0) {
bucketInfoBuilder.setObjectID(dbBucketInfo.getObjectID());
}
// Set the updateID to current transaction log index
bucketInfoBuilder.setUpdateID(transactionLogIndex);
// Quota used remains unchanged
bucketInfoBuilder.setUsedBytes(dbBucketInfo.getUsedBytes());
bucketInfoBuilder.setUsedNamespace(dbBucketInfo.getUsedNamespace());
omBucketInfo = bucketInfoBuilder.build();
// Update table cache.
omMetadataManager.getBucketTable().addCacheEntry(new CacheKey<>(bucketKey), new CacheValue<>(Optional.of(omBucketInfo), transactionLogIndex));
omResponse.setSetBucketPropertyResponse(SetBucketPropertyResponse.newBuilder().build());
omClientResponse = new OMBucketSetPropertyResponse(omResponse.build(), omBucketInfo);
} catch (IOException ex) {
success = false;
exception = ex;
omClientResponse = new OMBucketSetPropertyResponse(createErrorOMResponse(omResponse, exception));
} finally {
addResponseToDoubleBuffer(transactionLogIndex, omClientResponse, ozoneManagerDoubleBufferHelper);
if (acquiredBucketLock) {
omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, bucketName);
}
}
// Performing audit logging outside of the lock.
auditLog(auditLogger, buildAuditMessage(OMAction.UPDATE_BUCKET, omBucketArgs.toAuditMap(), exception, userInfo));
// return response.
if (success) {
LOG.debug("Setting bucket property for bucket:{} in volume:{}", bucketName, volumeName);
return omClientResponse;
} else {
LOG.error("Setting bucket property failed for bucket:{} in volume:{}", bucketName, volumeName, exception);
omMetrics.incNumBucketUpdateFails();
return omClientResponse;
}
}
use of org.apache.hadoop.ozone.om.helpers.OmVolumeArgs in project ozone by apache.
the class VolumeManagerImpl method removeAcl.
/**
* Remove acl for Ozone object. Return true if acl is removed successfully
* else false.
*
* @param obj Ozone object.
* @param acl Ozone acl to be removed.
* @throws IOException if there is error.
*/
@Override
public boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException {
Objects.requireNonNull(obj);
Objects.requireNonNull(acl);
if (!obj.getResourceType().equals(OzoneObj.ResourceType.VOLUME)) {
throw new IllegalArgumentException("Unexpected argument passed to " + "VolumeManager. OzoneObj type:" + obj.getResourceType());
}
String volume = obj.getVolumeName();
metadataManager.getLock().acquireWriteLock(VOLUME_LOCK, volume);
try {
String dbVolumeKey = metadataManager.getVolumeKey(volume);
OmVolumeArgs volumeArgs = metadataManager.getVolumeTable().get(dbVolumeKey);
if (volumeArgs == null) {
LOG.debug("volume:{} does not exist", volume);
throw new OMException("Volume " + volume + " is not found", ResultCodes.VOLUME_NOT_FOUND);
}
if (volumeArgs.removeAcl(acl)) {
metadataManager.getVolumeTable().put(dbVolumeKey, volumeArgs);
return true;
}
Preconditions.checkState(volume.equals(volumeArgs.getVolume()));
// return volumeArgs.getAclMap().hasAccess(userAcl);
} catch (IOException ex) {
if (!(ex instanceof OMException)) {
LOG.error("Remove acl operation failed for volume:{} acl:{}", volume, acl, ex);
}
throw ex;
} finally {
metadataManager.getLock().releaseWriteLock(VOLUME_LOCK, volume);
}
return false;
}
use of org.apache.hadoop.ozone.om.helpers.OmVolumeArgs in project ozone by apache.
the class VolumeManagerImpl method checkAccess.
/**
* Check access for given ozoneObject.
*
* @param ozObject object for which access needs to be checked.
* @param context Context object encapsulating all user related information.
* @return true if user has access else false.
*/
@Override
public boolean checkAccess(OzoneObj ozObject, RequestContext context) throws OMException {
Objects.requireNonNull(ozObject);
Objects.requireNonNull(context);
String volume = ozObject.getVolumeName();
metadataManager.getLock().acquireReadLock(VOLUME_LOCK, volume);
try {
String dbVolumeKey = metadataManager.getVolumeKey(volume);
OmVolumeArgs volumeArgs = metadataManager.getVolumeTable().get(dbVolumeKey);
if (volumeArgs == null) {
LOG.debug("volume:{} does not exist", volume);
throw new OMException("Volume " + volume + " is not found", ResultCodes.VOLUME_NOT_FOUND);
}
Preconditions.checkState(volume.equals(volumeArgs.getVolume()));
boolean hasAccess = OzoneAclUtil.checkAclRights(volumeArgs.getAcls(), context);
if (LOG.isDebugEnabled()) {
LOG.debug("user:{} has access rights for volume:{} :{} ", context.getClientUgi(), ozObject.getVolumeName(), hasAccess);
}
return hasAccess;
} catch (IOException ex) {
if (ex instanceof OMException) {
throw (OMException) ex;
}
LOG.error("Check access operation failed for volume:{}", volume, ex);
throw new OMException("Check access operation failed for " + "volume:" + volume, ex, ResultCodes.INTERNAL_ERROR);
} finally {
metadataManager.getLock().releaseReadLock(VOLUME_LOCK, volume);
}
}
use of org.apache.hadoop.ozone.om.helpers.OmVolumeArgs in project ozone by apache.
the class VolumeManagerImpl method getVolumeInfo.
/**
* Gets the volume information.
* @param volume - Volume name.
* @return VolumeArgs or exception is thrown.
* @throws IOException
*/
@Override
public OmVolumeArgs getVolumeInfo(String volume) throws IOException {
Preconditions.checkNotNull(volume);
metadataManager.getLock().acquireReadLock(VOLUME_LOCK, volume);
try {
String dbVolumeKey = metadataManager.getVolumeKey(volume);
OmVolumeArgs volumeArgs = metadataManager.getVolumeTable().get(dbVolumeKey);
if (volumeArgs == null) {
LOG.debug("volume:{} does not exist", volume);
throw new OMException("Volume " + volume + " is not found", ResultCodes.VOLUME_NOT_FOUND);
}
return volumeArgs;
} catch (IOException ex) {
if (!(ex instanceof OMException)) {
LOG.warn("Info volume failed for volume:{}", volume, ex);
}
throw ex;
} finally {
metadataManager.getLock().releaseReadLock(VOLUME_LOCK, volume);
}
}
use of org.apache.hadoop.ozone.om.helpers.OmVolumeArgs in project ozone by apache.
the class NSSummaryEndpoint method getDiskUsage.
/**
* DU endpoint to return datasize for subdirectory (bucket for volume).
* @param path request path
* @param listFile show subpath/disk usage for each key
* @param withReplica count actual DU with replication
* @return DU response
* @throws IOException
*/
@GET
@Path("/du")
@SuppressWarnings("methodlength")
public Response getDiskUsage(@QueryParam("path") String path, @DefaultValue("false") @QueryParam("files") boolean listFile, @DefaultValue("false") @QueryParam("replica") boolean withReplica) throws IOException {
if (path == null || path.length() == 0) {
return Response.status(Response.Status.BAD_REQUEST).build();
}
DUResponse duResponse = new DUResponse();
if (!isInitializationComplete()) {
duResponse.setStatus(ResponseStatus.INITIALIZING);
return Response.ok(duResponse).build();
}
String normalizedPath = normalizePath(path);
String[] names = parseRequestPath(normalizedPath);
EntityType type = getEntityType(normalizedPath, names);
duResponse.setPath(normalizedPath);
switch(type) {
case ROOT:
List<OmVolumeArgs> volumes = listVolumes();
duResponse.setCount(volumes.size());
List<DUResponse.DiskUsage> volumeDuData = new ArrayList<>();
long totalDataSize = 0L;
long totalDataSizeWithReplica = 0L;
for (OmVolumeArgs volume : volumes) {
String volumeName = volume.getVolume();
String subpath = omMetadataManager.getVolumeKey(volumeName);
DUResponse.DiskUsage diskUsage = new DUResponse.DiskUsage();
long dataSize = 0;
diskUsage.setSubpath(subpath);
// iterate all buckets per volume to get total data size
for (OmBucketInfo bucket : listBucketsUnderVolume(volumeName)) {
long bucketObjectID = bucket.getObjectID();
dataSize += getTotalSize(bucketObjectID);
}
totalDataSize += dataSize;
// TODO: to be dropped or optimized in the future
if (withReplica) {
long volumeDU = calculateDUForVolume(volumeName);
totalDataSizeWithReplica += volumeDU;
diskUsage.setSizeWithReplica(volumeDU);
}
diskUsage.setSize(dataSize);
volumeDuData.add(diskUsage);
}
if (withReplica) {
duResponse.setSizeWithReplica(totalDataSizeWithReplica);
}
duResponse.setSize(totalDataSize);
duResponse.setDuData(volumeDuData);
break;
case VOLUME:
String volName = names[0];
List<OmBucketInfo> buckets = listBucketsUnderVolume(volName);
duResponse.setCount(buckets.size());
// List of DiskUsage data for all buckets
List<DUResponse.DiskUsage> bucketDuData = new ArrayList<>();
long volDataSize = 0L;
long volDataSizeWithReplica = 0L;
for (OmBucketInfo bucket : buckets) {
String bucketName = bucket.getBucketName();
long bucketObjectID = bucket.getObjectID();
String subpath = omMetadataManager.getBucketKey(volName, bucketName);
DUResponse.DiskUsage diskUsage = new DUResponse.DiskUsage();
diskUsage.setSubpath(subpath);
long dataSize = getTotalSize(bucketObjectID);
volDataSize += dataSize;
if (withReplica) {
long bucketDU = calculateDUUnderObject(bucketObjectID);
diskUsage.setSizeWithReplica(bucketDU);
volDataSizeWithReplica += bucketDU;
}
diskUsage.setSize(dataSize);
bucketDuData.add(diskUsage);
}
if (withReplica) {
duResponse.setSizeWithReplica(volDataSizeWithReplica);
}
duResponse.setSize(volDataSize);
duResponse.setDuData(bucketDuData);
break;
case BUCKET:
long bucketObjectId = getBucketObjectId(names);
NSSummary bucketNSSummary = reconNamespaceSummaryManager.getNSSummary(bucketObjectId);
// empty bucket, because it's not a parent of any directory or key
if (bucketNSSummary == null) {
if (withReplica) {
duResponse.setSizeWithReplica(0L);
}
break;
}
// get object IDs for all its subdirectories
Set<Long> bucketSubdirs = bucketNSSummary.getChildDir();
duResponse.setKeySize(bucketNSSummary.getSizeOfFiles());
List<DUResponse.DiskUsage> dirDUData = new ArrayList<>();
long bucketDataSize = duResponse.getKeySize();
long bucketDataSizeWithReplica = 0L;
for (long subdirObjectId : bucketSubdirs) {
NSSummary subdirNSSummary = reconNamespaceSummaryManager.getNSSummary(subdirObjectId);
// get directory's name and generate the next-level subpath.
String dirName = subdirNSSummary.getDirName();
String subpath = buildSubpath(normalizedPath, dirName);
// we need to reformat the subpath in the response in a
// format with leading slash and without trailing slash
DUResponse.DiskUsage diskUsage = new DUResponse.DiskUsage();
diskUsage.setSubpath(subpath);
long dataSize = getTotalSize(subdirObjectId);
bucketDataSize += dataSize;
if (withReplica) {
long dirDU = calculateDUUnderObject(subdirObjectId);
diskUsage.setSizeWithReplica(dirDU);
bucketDataSizeWithReplica += dirDU;
}
diskUsage.setSize(dataSize);
dirDUData.add(diskUsage);
}
// Either listFile or withReplica is enabled, we need the directKeys info
if (listFile || withReplica) {
bucketDataSizeWithReplica += handleDirectKeys(bucketObjectId, withReplica, listFile, dirDUData, normalizedPath);
}
if (withReplica) {
duResponse.setSizeWithReplica(bucketDataSizeWithReplica);
}
duResponse.setCount(dirDUData.size());
duResponse.setSize(bucketDataSize);
duResponse.setDuData(dirDUData);
break;
case DIRECTORY:
long dirObjectId = getDirObjectId(names);
NSSummary dirNSSummary = reconNamespaceSummaryManager.getNSSummary(dirObjectId);
// Empty directory
if (dirNSSummary == null) {
if (withReplica) {
duResponse.setSizeWithReplica(0L);
}
break;
}
Set<Long> subdirs = dirNSSummary.getChildDir();
duResponse.setKeySize(dirNSSummary.getSizeOfFiles());
long dirDataSize = duResponse.getKeySize();
long dirDataSizeWithReplica = 0L;
List<DUResponse.DiskUsage> subdirDUData = new ArrayList<>();
// iterate all subdirectories to get disk usage data
for (long subdirObjectId : subdirs) {
NSSummary subdirNSSummary = reconNamespaceSummaryManager.getNSSummary(subdirObjectId);
String subdirName = subdirNSSummary.getDirName();
// build the path for subdirectory
String subpath = buildSubpath(normalizedPath, subdirName);
DUResponse.DiskUsage diskUsage = new DUResponse.DiskUsage();
// reformat the response
diskUsage.setSubpath(subpath);
long dataSize = getTotalSize(subdirObjectId);
dirDataSize += dataSize;
if (withReplica) {
long subdirDU = calculateDUUnderObject(subdirObjectId);
diskUsage.setSizeWithReplica(subdirDU);
dirDataSizeWithReplica += subdirDU;
}
diskUsage.setSize(dataSize);
subdirDUData.add(diskUsage);
}
// handle direct keys under directory
if (listFile || withReplica) {
dirDataSizeWithReplica += handleDirectKeys(dirObjectId, withReplica, listFile, subdirDUData, normalizedPath);
}
if (withReplica) {
duResponse.setSizeWithReplica(dirDataSizeWithReplica);
}
duResponse.setCount(subdirDUData.size());
duResponse.setSize(dirDataSize);
duResponse.setDuData(subdirDUData);
break;
case KEY:
// DU for key doesn't have subpaths
duResponse.setCount(0);
// The object ID for the directory that the key is directly in
long parentObjectId = getDirObjectId(names, names.length - 1);
String fileName = names[names.length - 1];
String ozoneKey = omMetadataManager.getOzonePathKey(parentObjectId, fileName);
OmKeyInfo keyInfo = omMetadataManager.getFileTable().getSkipCache(ozoneKey);
duResponse.setSize(keyInfo.getDataSize());
if (withReplica) {
long keySizeWithReplica = getKeySizeWithReplication(keyInfo);
duResponse.setSizeWithReplica(keySizeWithReplica);
}
break;
case UNKNOWN:
duResponse.setStatus(ResponseStatus.PATH_NOT_FOUND);
break;
default:
break;
}
return Response.ok(duResponse).build();
}
Aggregations