use of org.apache.hadoop.ozone.om.response.bucket.OMBucketCreateResponse in project ozone by apache.
the class TestOzoneManagerDoubleBufferWithOMResponse method doMixTransactions.
/**
* This method add's a mix of createBucket/DeleteBucket responses to double
* buffer. Total number of responses added is specified by bucketCount.
*/
private void doMixTransactions(String volumeName, int bucketCount, Queue<OMBucketDeleteResponse> deleteBucketQueue, Queue<OMBucketCreateResponse> bucketQueue) {
for (int i = 0; i < bucketCount; i++) {
String bucketName = UUID.randomUUID().toString();
long transactionID = trxId.incrementAndGet();
OMBucketCreateResponse omBucketCreateResponse = createBucket(volumeName, bucketName, transactionID);
// For every 2 transactions have a deleted bucket.
if (i % 2 == 0) {
OMBucketDeleteResponse omBucketDeleteResponse = (OMBucketDeleteResponse) deleteBucket(volumeName, bucketName, trxId.incrementAndGet());
deleteBucketQueue.add(omBucketDeleteResponse);
} else {
bucketQueue.add(omBucketCreateResponse);
}
}
}
use of org.apache.hadoop.ozone.om.response.bucket.OMBucketCreateResponse in project ozone by apache.
the class TestOzoneManagerDoubleBufferWithOMResponse method testDoubleBufferWithMixOfTransactions.
/**
* This test first creates a volume, and then does a mix of transactions
* like create/delete buckets and add them to double buffer. Then it
* verifies OM DB entries are matching with actual responses added to
* double buffer or not.
*/
@Test
public void testDoubleBufferWithMixOfTransactions() throws Exception {
// This test checks count, data in table is correct or not.
Queue<OMBucketCreateResponse> bucketQueue = new ConcurrentLinkedQueue<>();
Queue<OMBucketDeleteResponse> deleteBucketQueue = new ConcurrentLinkedQueue<>();
String volumeName = UUID.randomUUID().toString();
OMVolumeCreateResponse omVolumeCreateResponse = (OMVolumeCreateResponse) createVolume(volumeName, trxId.incrementAndGet());
int bucketCount = 10;
doMixTransactions(volumeName, bucketCount, deleteBucketQueue, bucketQueue);
// As for every 2 transactions of create bucket we add deleted bucket.
final int deleteCount = 5;
// We are doing +1 for volume transaction.
GenericTestUtils.waitFor(() -> doubleBuffer.getFlushedTransactionCount() == (bucketCount + deleteCount + 1), 100, 120000);
Assert.assertEquals(1, omMetadataManager.countRowsInTable(omMetadataManager.getVolumeTable()));
Assert.assertEquals(5, omMetadataManager.countRowsInTable(omMetadataManager.getBucketTable()));
// Now after this in our DB we should have 5 buckets and one volume
checkVolume(volumeName, omVolumeCreateResponse);
checkCreateBuckets(bucketQueue);
checkDeletedBuckets(deleteBucketQueue);
// Check lastAppliedIndex is updated correctly or not.
GenericTestUtils.waitFor(() -> bucketCount + deleteCount + 1 == lastAppliedIndex, 100, 30000);
TransactionInfo transactionInfo = omMetadataManager.getTransactionInfoTable().get(TRANSACTION_INFO_KEY);
assertNotNull(transactionInfo);
Assert.assertEquals(lastAppliedIndex, transactionInfo.getTransactionIndex());
Assert.assertEquals(term, transactionInfo.getTerm());
}
use of org.apache.hadoop.ozone.om.response.bucket.OMBucketCreateResponse in project ozone by apache.
the class OMBucketCreateRequest method validateAndUpdateCache.
@Override
public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, long transactionLogIndex, OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
OMMetrics omMetrics = ozoneManager.getMetrics();
omMetrics.incNumBucketCreates();
OMMetadataManager metadataManager = ozoneManager.getMetadataManager();
CreateBucketRequest createBucketRequest = getOmRequest().getCreateBucketRequest();
BucketInfo bucketInfo = createBucketRequest.getBucketInfo();
String volumeName = bucketInfo.getVolumeName();
String bucketName = bucketInfo.getBucketName();
OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(getOmRequest());
OmBucketInfo omBucketInfo = null;
if (bucketInfo.getBucketLayout() == null || bucketInfo.getBucketLayout().equals(BucketLayoutProto.LEGACY)) {
// Bucket Layout argument was not passed during bucket creation.
String omDefaultBucketLayout = ozoneManager.getOMDefaultBucketLayout();
BucketLayout defaultType = BucketLayout.fromString(omDefaultBucketLayout);
omBucketInfo = OmBucketInfo.getFromProtobuf(bucketInfo, defaultType);
} else {
omBucketInfo = OmBucketInfo.getFromProtobuf(bucketInfo);
}
if (omBucketInfo.getBucketLayout().isFileSystemOptimized()) {
omMetrics.incNumFSOBucketCreates();
}
AuditLogger auditLogger = ozoneManager.getAuditLogger();
OzoneManagerProtocolProtos.UserInfo userInfo = getOmRequest().getUserInfo();
String volumeKey = metadataManager.getVolumeKey(volumeName);
String bucketKey = metadataManager.getBucketKey(volumeName, bucketName);
IOException exception = null;
boolean acquiredBucketLock = false;
boolean acquiredVolumeLock = false;
OMClientResponse omClientResponse = null;
try {
// check Acl
if (ozoneManager.getAclsEnabled()) {
checkAcls(ozoneManager, OzoneObj.ResourceType.BUCKET, OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.CREATE, volumeName, bucketName, null);
}
acquiredVolumeLock = metadataManager.getLock().acquireReadLock(VOLUME_LOCK, volumeName);
acquiredBucketLock = metadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, bucketName);
OmVolumeArgs omVolumeArgs = metadataManager.getVolumeTable().getReadCopy(volumeKey);
// Check if the volume exists
if (omVolumeArgs == null) {
LOG.debug("volume: {} not found ", volumeName);
throw new OMException("Volume doesn't exist", VOLUME_NOT_FOUND);
}
// Check if bucket already exists
if (metadataManager.getBucketTable().isExist(bucketKey)) {
LOG.debug("bucket: {} already exists ", bucketName);
throw new OMException("Bucket already exist", BUCKET_ALREADY_EXISTS);
}
// Check quotaInBytes to update
checkQuotaBytesValid(metadataManager, omVolumeArgs, omBucketInfo, volumeKey);
// Add objectID and updateID
omBucketInfo.setObjectID(ozoneManager.getObjectIdFromTxId(transactionLogIndex));
omBucketInfo.setUpdateID(transactionLogIndex, ozoneManager.isRatisEnabled());
// Add default acls from volume.
addDefaultAcls(omBucketInfo, omVolumeArgs);
// check namespace quota
checkQuotaInNamespace(omVolumeArgs, 1L);
// update used namespace for volume
omVolumeArgs.incrUsedNamespace(1L);
// Update table cache.
metadataManager.getVolumeTable().addCacheEntry(new CacheKey<>(volumeKey), new CacheValue<>(Optional.of(omVolumeArgs), transactionLogIndex));
metadataManager.getBucketTable().addCacheEntry(new CacheKey<>(bucketKey), new CacheValue<>(Optional.of(omBucketInfo), transactionLogIndex));
omResponse.setCreateBucketResponse(CreateBucketResponse.newBuilder().build());
omClientResponse = new OMBucketCreateResponse(omResponse.build(), omBucketInfo, omVolumeArgs.copyObject());
} catch (IOException ex) {
exception = ex;
omClientResponse = new OMBucketCreateResponse(createErrorOMResponse(omResponse, exception));
} finally {
addResponseToDoubleBuffer(transactionLogIndex, omClientResponse, ozoneManagerDoubleBufferHelper);
if (acquiredBucketLock) {
metadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, bucketName);
}
if (acquiredVolumeLock) {
metadataManager.getLock().releaseReadLock(VOLUME_LOCK, volumeName);
}
}
// Performing audit logging outside of the lock.
auditLog(auditLogger, buildAuditMessage(OMAction.CREATE_BUCKET, omBucketInfo.toAuditMap(), exception, userInfo));
// return response.
if (exception == null) {
LOG.debug("created bucket: {} in volume: {}", bucketName, volumeName);
omMetrics.incNumBuckets();
return omClientResponse;
} else {
omMetrics.incNumBucketCreateFails();
LOG.error("Bucket creation failed for bucket:{} in volume:{}", bucketName, volumeName, exception);
return omClientResponse;
}
}
use of org.apache.hadoop.ozone.om.response.bucket.OMBucketCreateResponse in project ozone by apache.
the class TestOzoneManagerDoubleBufferWithOMResponse method testDoubleBufferWithMixOfTransactionsParallel.
/**
* This test first creates a volume, and then does a mix of transactions
* like create/delete buckets in parallel and add to double buffer. Then it
* verifies OM DB entries are matching with actual responses added to
* double buffer or not.
*/
@Test
public void testDoubleBufferWithMixOfTransactionsParallel() throws Exception {
// This test checks count, data in table is correct or not.
Queue<OMBucketCreateResponse> bucketQueue = new ConcurrentLinkedQueue<>();
Queue<OMBucketDeleteResponse> deleteBucketQueue = new ConcurrentLinkedQueue<>();
String volumeName1 = UUID.randomUUID().toString();
OMVolumeCreateResponse omVolumeCreateResponse1 = (OMVolumeCreateResponse) createVolume(volumeName1, trxId.incrementAndGet());
String volumeName2 = UUID.randomUUID().toString();
OMVolumeCreateResponse omVolumeCreateResponse2 = (OMVolumeCreateResponse) createVolume(volumeName2, trxId.incrementAndGet());
int bucketsPerVolume = 10;
Daemon daemon1 = new Daemon(() -> doMixTransactions(volumeName1, bucketsPerVolume, deleteBucketQueue, bucketQueue));
Daemon daemon2 = new Daemon(() -> doMixTransactions(volumeName2, bucketsPerVolume, deleteBucketQueue, bucketQueue));
daemon1.start();
daemon2.start();
int bucketCount = 2 * bucketsPerVolume;
// As for every 2 transactions of create bucket we add deleted bucket.
final int deleteCount = 10;
// We are doing +1 for volume transaction.
GenericTestUtils.waitFor(() -> doubleBuffer.getFlushedTransactionCount() == (bucketCount + deleteCount + 2), 100, 120000);
Assert.assertEquals(2, omMetadataManager.countRowsInTable(omMetadataManager.getVolumeTable()));
Assert.assertEquals(10, omMetadataManager.countRowsInTable(omMetadataManager.getBucketTable()));
// Now after this in our DB we should have 5 buckets and one volume
checkVolume(volumeName1, omVolumeCreateResponse1);
checkVolume(volumeName2, omVolumeCreateResponse2);
checkCreateBuckets(bucketQueue);
checkDeletedBuckets(deleteBucketQueue);
// Not checking lastAppliedIndex here, because 2 daemon threads are
// running in parallel, so lastAppliedIndex cannot be always
// total transaction count. So, just checking here whether it is less
// than total transaction count.
Assert.assertTrue(lastAppliedIndex <= bucketCount + deleteCount + 2);
}
use of org.apache.hadoop.ozone.om.response.bucket.OMBucketCreateResponse in project ozone by apache.
the class TestOzoneManagerDoubleBufferWithOMResponse method createBucket.
/**
* Create OMBucketCreateResponse for specified volume and bucket.
* @return OMBucketCreateResponse
*/
private OMBucketCreateResponse createBucket(String volumeName, String bucketName, long transactionID) {
OzoneManagerProtocolProtos.OMRequest omRequest = OMRequestTestUtils.createBucketRequest(bucketName, volumeName, false, OzoneManagerProtocolProtos.StorageTypeProto.DISK);
OMBucketCreateRequest omBucketCreateRequest = new OMBucketCreateRequest(omRequest);
return (OMBucketCreateResponse) omBucketCreateRequest.validateAndUpdateCache(ozoneManager, transactionID, ozoneManagerDoubleBufferHelper);
}
Aggregations