use of org.apache.hadoop.ozone.om.helpers.OmVolumeArgs in project ozone by apache.
the class GeneratorOm method writeOmBucketVolume.
private void writeOmBucketVolume() throws IOException {
Table<String, OmVolumeArgs> volTable = omDb.getTable(OmMetadataManagerImpl.VOLUME_TABLE, String.class, OmVolumeArgs.class);
String admin = getUserId();
String owner = getUserId();
OmVolumeArgs omVolumeArgs = new OmVolumeArgs.Builder().setVolume(volumeName).setAdminName(admin).setCreationTime(Time.now()).setOwnerName(owner).setObjectID(1L).setUpdateID(1L).setQuotaInBytes(100L).addOzoneAcls(new OzoneAcl(IAccessAuthorizer.ACLIdentityType.WORLD, "", IAccessAuthorizer.ACLType.ALL, ACCESS)).addOzoneAcls(new OzoneAcl(IAccessAuthorizer.ACLIdentityType.USER, getUserId(), IAccessAuthorizer.ACLType.ALL, ACCESS)).build();
volTable.put("/" + volumeName, omVolumeArgs);
final Table<String, PersistedUserVolumeInfo> userTable = omDb.getTable(OmMetadataManagerImpl.USER_TABLE, String.class, PersistedUserVolumeInfo.class);
PersistedUserVolumeInfo currentUserVolumeInfo = userTable.get(getUserId());
if (currentUserVolumeInfo == null) {
currentUserVolumeInfo = PersistedUserVolumeInfo.newBuilder().addVolumeNames(volumeName).build();
} else if (!currentUserVolumeInfo.getVolumeNamesList().contains(volumeName)) {
currentUserVolumeInfo = PersistedUserVolumeInfo.newBuilder().addAllVolumeNames(currentUserVolumeInfo.getVolumeNamesList()).addVolumeNames(volumeName).build();
}
userTable.put(getUserId(), currentUserVolumeInfo);
Table<String, OmBucketInfo> bucketTable = omDb.getTable(OmMetadataManagerImpl.BUCKET_TABLE, String.class, OmBucketInfo.class);
OmBucketInfo omBucketInfo = new OmBucketInfo.Builder().setBucketName(bucketName).setVolumeName(volumeName).build();
bucketTable.put("/" + volumeName + "/" + bucketName, omBucketInfo);
}
use of org.apache.hadoop.ozone.om.helpers.OmVolumeArgs in project ozone by apache.
the class OzoneManager method listVolumeByUser.
/**
* Lists volumes accessible by a specific user.
*
* @param userName - user name
* @param prefix - Filter prefix -- Return only entries that match this.
* @param prevKey - Previous key -- List starts from the next from the
* prevkey
* @param maxKeys - Max number of keys to return.
* @return List of Volumes.
* @throws IOException
*/
@Override
public List<OmVolumeArgs> listVolumeByUser(String userName, String prefix, String prevKey, int maxKeys) throws IOException {
UserGroupInformation remoteUserUgi = ProtobufRpcEngine.Server.getRemoteUser();
if (isAclEnabled) {
if (remoteUserUgi == null) {
LOG.error("Rpc user UGI is null. Authorization failed.");
throw new OMException("Rpc user UGI is null. Authorization failed.", ResultCodes.PERMISSION_DENIED);
}
}
boolean auditSuccess = true;
Map<String, String> auditMap = new LinkedHashMap<>();
auditMap.put(OzoneConsts.PREV_KEY, prevKey);
auditMap.put(OzoneConsts.PREFIX, prefix);
auditMap.put(OzoneConsts.MAX_KEYS, String.valueOf(maxKeys));
auditMap.put(OzoneConsts.USERNAME, userName);
try {
metrics.incNumVolumeLists();
if (isAclEnabled) {
// List all volumes first
List<OmVolumeArgs> listAllVolumes = volumeManager.listVolumes(null, prefix, prevKey, maxKeys);
List<OmVolumeArgs> result = new ArrayList<>();
// Filter all volumes by LIST ACL
for (OmVolumeArgs volumeArgs : listAllVolumes) {
if (hasAcls(userName, ResourceType.VOLUME, StoreType.OZONE, ACLType.LIST, volumeArgs.getVolume(), null, null)) {
result.add(volumeArgs);
}
}
return result;
} else {
// When ACL is not enabled, fallback to filter by owner
return volumeManager.listVolumes(userName, prefix, prevKey, maxKeys);
}
} catch (Exception ex) {
metrics.incNumVolumeListFails();
auditSuccess = false;
AUDIT.logReadFailure(buildAuditMessageForFailure(OMAction.LIST_VOLUMES, auditMap, ex));
throw ex;
} finally {
if (auditSuccess) {
AUDIT.logReadSuccess(buildAuditMessageForSuccess(OMAction.LIST_VOLUMES, auditMap));
}
}
}
use of org.apache.hadoop.ozone.om.helpers.OmVolumeArgs in project ozone by apache.
the class OzoneManager method createS3VolumeInfo.
private OmVolumeArgs createS3VolumeInfo(String s3Volume, long objectID) throws IOException {
String userName = UserGroupInformation.getCurrentUser().getShortUserName();
long time = Time.now();
// We need to set the updateID to DEFAULT_OM_UPDATE_ID, because when
// acl op on S3v volume during updateID check it will fail if we have a
// value with maximum transactionID. Because updateID checks if new
// new updateID is greater than previous updateID, otherwise it fails.
OmVolumeArgs.Builder omVolumeArgs = new OmVolumeArgs.Builder().setVolume(s3Volume).setUpdateID(DEFAULT_OM_UPDATE_ID).setObjectID(objectID).setCreationTime(time).setModificationTime(time).setOwnerName(userName).setAdminName(userName).setQuotaInBytes(OzoneConsts.QUOTA_RESET);
// Provide ACLType of ALL which is default acl rights for user and group.
List<OzoneAcl> listOfAcls = new ArrayList<>();
// User ACL
listOfAcls.add(new OzoneAcl(ACLIdentityType.USER, userName, ACLType.ALL, ACCESS));
// Group ACLs of the User
List<String> userGroups = Arrays.asList(UserGroupInformation.createRemoteUser(userName).getGroupNames());
userGroups.stream().forEach((group) -> listOfAcls.add(new OzoneAcl(ACLIdentityType.GROUP, group, ACLType.ALL, ACCESS)));
// Add ACLs
for (OzoneAcl ozoneAcl : listOfAcls) {
omVolumeArgs.addOzoneAcls(ozoneAcl);
}
return omVolumeArgs.build();
}
use of org.apache.hadoop.ozone.om.helpers.OmVolumeArgs in project ozone by apache.
the class BucketManagerImpl method createBucket.
/**
* MetadataDB is maintained in MetadataManager and shared between
* BucketManager and VolumeManager. (and also by BlockManager)
*
* BucketManager uses MetadataDB to store bucket level information.
*
* Keys used in BucketManager for storing data into MetadataDB
* for BucketInfo:
* {volume/bucket} -> bucketInfo
*
* Work flow of create bucket:
*
* -> Check if the Volume exists in metadataDB, if not throw
* VolumeNotFoundException.
* -> Else check if the Bucket exists in metadataDB, if so throw
* BucketExistException
* -> Else update MetadataDB with VolumeInfo.
*/
/**
* Creates a bucket.
*
* @param bucketInfo - OmBucketInfo.
*/
@Override
public void createBucket(OmBucketInfo bucketInfo) throws IOException {
Preconditions.checkNotNull(bucketInfo);
String volumeName = bucketInfo.getVolumeName();
String bucketName = bucketInfo.getBucketName();
boolean acquiredBucketLock = false;
metadataManager.getLock().acquireWriteLock(VOLUME_LOCK, volumeName);
try {
acquiredBucketLock = metadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, bucketName);
String volumeKey = metadataManager.getVolumeKey(volumeName);
String bucketKey = metadataManager.getBucketKey(volumeName, bucketName);
OmVolumeArgs volumeArgs = metadataManager.getVolumeTable().get(volumeKey);
// Check if the volume exists
if (volumeArgs == null) {
LOG.debug("volume: {} not found ", volumeName);
throw new OMException("Volume doesn't exist", VOLUME_NOT_FOUND);
}
// Check if bucket already exists
if (metadataManager.getBucketTable().get(bucketKey) != null) {
LOG.debug("bucket: {} already exists ", bucketName);
throw new OMException("Bucket already exist", OMException.ResultCodes.BUCKET_ALREADY_EXISTS);
}
BucketEncryptionKeyInfo bek = bucketInfo.getEncryptionKeyInfo();
boolean hasSourceVolume = bucketInfo.getSourceVolume() != null;
boolean hasSourceBucket = bucketInfo.getSourceBucket() != null;
if (hasSourceBucket != hasSourceVolume) {
throw new OMException("Both source volume and source bucket are " + "required for bucket links", OMException.ResultCodes.INVALID_REQUEST);
}
if (bek != null && hasSourceBucket) {
throw new OMException("Encryption cannot be set for bucket links", OMException.ResultCodes.INVALID_REQUEST);
}
BucketEncryptionKeyInfo.Builder bekb = createBucketEncryptionKeyInfoBuilder(bek);
OmBucketInfo.Builder omBucketInfoBuilder = bucketInfo.toBuilder().setCreationTime(Time.now());
OzoneAclUtil.inheritDefaultAcls(omBucketInfoBuilder.getAcls(), volumeArgs.getDefaultAcls());
if (bekb != null) {
omBucketInfoBuilder.setBucketEncryptionKey(bekb.build());
}
OmBucketInfo omBucketInfo = omBucketInfoBuilder.build();
commitBucketInfoToDB(omBucketInfo);
if (hasSourceBucket) {
LOG.debug("created link {}/{} to bucket: {}/{}", volumeName, bucketName, omBucketInfo.getSourceVolume(), omBucketInfo.getSourceBucket());
} else {
LOG.debug("created bucket: {} in volume: {}", bucketName, volumeName);
}
} catch (IOException ex) {
if (!(ex instanceof OMException)) {
LOG.error("Bucket creation failed for bucket:{} in volume:{}", bucketName, volumeName, ex);
}
throw ex;
} finally {
if (acquiredBucketLock) {
metadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, bucketName);
}
metadataManager.getLock().releaseWriteLock(VOLUME_LOCK, volumeName);
}
}
use of org.apache.hadoop.ozone.om.helpers.OmVolumeArgs in project ozone by apache.
the class OmMetadataManagerImpl method listAllVolumes.
/**
* @return list of all volumes.
*/
private List<OmVolumeArgs> listAllVolumes(String prefix, String startKey, int maxKeys) {
List<OmVolumeArgs> result = Lists.newArrayList();
/* volumeTable is full-cache, so we use cacheIterator. */
Iterator<Map.Entry<CacheKey<String>, CacheValue<OmVolumeArgs>>> cacheIterator = getVolumeTable().cacheIterator();
String volumeName;
OmVolumeArgs omVolumeArgs;
boolean prefixIsEmpty = Strings.isNullOrEmpty(prefix);
boolean startKeyIsEmpty = Strings.isNullOrEmpty(startKey);
while (cacheIterator.hasNext() && result.size() < maxKeys) {
Map.Entry<CacheKey<String>, CacheValue<OmVolumeArgs>> entry = cacheIterator.next();
omVolumeArgs = entry.getValue().getCacheValue();
volumeName = omVolumeArgs.getVolume();
if (!prefixIsEmpty && !volumeName.startsWith(prefix)) {
continue;
}
if (!startKeyIsEmpty) {
if (volumeName.equals(startKey)) {
startKeyIsEmpty = true;
}
continue;
}
result.add(omVolumeArgs);
}
return result;
}
Aggregations