Search in sources :

Example 1 with OMBucketCreateResponse

use of org.apache.hadoop.ozone.om.response.bucket.OMBucketCreateResponse in project ozone by apache.

the class TestOzoneManagerDoubleBufferWithOMResponse method doMixTransactions.

/**
 * This method add's a mix of createBucket/DeleteBucket responses to double
 * buffer. Total number of responses added is specified by bucketCount.
 */
private void doMixTransactions(String volumeName, int bucketCount, Queue<OMBucketDeleteResponse> deleteBucketQueue, Queue<OMBucketCreateResponse> bucketQueue) {
    for (int i = 0; i < bucketCount; i++) {
        String bucketName = UUID.randomUUID().toString();
        long transactionID = trxId.incrementAndGet();
        OMBucketCreateResponse omBucketCreateResponse = createBucket(volumeName, bucketName, transactionID);
        // For every 2 transactions have a deleted bucket.
        if (i % 2 == 0) {
            OMBucketDeleteResponse omBucketDeleteResponse = (OMBucketDeleteResponse) deleteBucket(volumeName, bucketName, trxId.incrementAndGet());
            deleteBucketQueue.add(omBucketDeleteResponse);
        } else {
            bucketQueue.add(omBucketCreateResponse);
        }
    }
}
Also used : OMBucketDeleteResponse(org.apache.hadoop.ozone.om.response.bucket.OMBucketDeleteResponse) OMBucketCreateResponse(org.apache.hadoop.ozone.om.response.bucket.OMBucketCreateResponse)

Example 2 with OMBucketCreateResponse

use of org.apache.hadoop.ozone.om.response.bucket.OMBucketCreateResponse in project ozone by apache.

the class TestOzoneManagerDoubleBufferWithOMResponse method testDoubleBufferWithMixOfTransactions.

/**
 * This test first creates a volume, and then does a mix of transactions
 * like create/delete buckets and add them to double buffer. Then it
 * verifies OM DB entries are matching with actual responses added to
 * double buffer or not.
 */
@Test
public void testDoubleBufferWithMixOfTransactions() throws Exception {
    // This test checks count, data in table is correct or not.
    Queue<OMBucketCreateResponse> bucketQueue = new ConcurrentLinkedQueue<>();
    Queue<OMBucketDeleteResponse> deleteBucketQueue = new ConcurrentLinkedQueue<>();
    String volumeName = UUID.randomUUID().toString();
    OMVolumeCreateResponse omVolumeCreateResponse = (OMVolumeCreateResponse) createVolume(volumeName, trxId.incrementAndGet());
    int bucketCount = 10;
    doMixTransactions(volumeName, bucketCount, deleteBucketQueue, bucketQueue);
    // As for every 2 transactions of create bucket we add deleted bucket.
    final int deleteCount = 5;
    // We are doing +1 for volume transaction.
    GenericTestUtils.waitFor(() -> doubleBuffer.getFlushedTransactionCount() == (bucketCount + deleteCount + 1), 100, 120000);
    Assert.assertEquals(1, omMetadataManager.countRowsInTable(omMetadataManager.getVolumeTable()));
    Assert.assertEquals(5, omMetadataManager.countRowsInTable(omMetadataManager.getBucketTable()));
    // Now after this in our DB we should have 5 buckets and one volume
    checkVolume(volumeName, omVolumeCreateResponse);
    checkCreateBuckets(bucketQueue);
    checkDeletedBuckets(deleteBucketQueue);
    // Check lastAppliedIndex is updated correctly or not.
    GenericTestUtils.waitFor(() -> bucketCount + deleteCount + 1 == lastAppliedIndex, 100, 30000);
    TransactionInfo transactionInfo = omMetadataManager.getTransactionInfoTable().get(TRANSACTION_INFO_KEY);
    assertNotNull(transactionInfo);
    Assert.assertEquals(lastAppliedIndex, transactionInfo.getTransactionIndex());
    Assert.assertEquals(term, transactionInfo.getTerm());
}
Also used : OMBucketDeleteResponse(org.apache.hadoop.ozone.om.response.bucket.OMBucketDeleteResponse) OMBucketCreateResponse(org.apache.hadoop.ozone.om.response.bucket.OMBucketCreateResponse) TransactionInfo(org.apache.hadoop.hdds.utils.TransactionInfo) ConcurrentLinkedQueue(java.util.concurrent.ConcurrentLinkedQueue) OMVolumeCreateResponse(org.apache.hadoop.ozone.om.response.volume.OMVolumeCreateResponse) Test(org.junit.Test)

Example 3 with OMBucketCreateResponse

use of org.apache.hadoop.ozone.om.response.bucket.OMBucketCreateResponse in project ozone by apache.

the class OMBucketCreateRequest method validateAndUpdateCache.

@Override
public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, long transactionLogIndex, OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
    OMMetrics omMetrics = ozoneManager.getMetrics();
    omMetrics.incNumBucketCreates();
    OMMetadataManager metadataManager = ozoneManager.getMetadataManager();
    CreateBucketRequest createBucketRequest = getOmRequest().getCreateBucketRequest();
    BucketInfo bucketInfo = createBucketRequest.getBucketInfo();
    String volumeName = bucketInfo.getVolumeName();
    String bucketName = bucketInfo.getBucketName();
    OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(getOmRequest());
    OmBucketInfo omBucketInfo = null;
    if (bucketInfo.getBucketLayout() == null || bucketInfo.getBucketLayout().equals(BucketLayoutProto.LEGACY)) {
        // Bucket Layout argument was not passed during bucket creation.
        String omDefaultBucketLayout = ozoneManager.getOMDefaultBucketLayout();
        BucketLayout defaultType = BucketLayout.fromString(omDefaultBucketLayout);
        omBucketInfo = OmBucketInfo.getFromProtobuf(bucketInfo, defaultType);
    } else {
        omBucketInfo = OmBucketInfo.getFromProtobuf(bucketInfo);
    }
    if (omBucketInfo.getBucketLayout().isFileSystemOptimized()) {
        omMetrics.incNumFSOBucketCreates();
    }
    AuditLogger auditLogger = ozoneManager.getAuditLogger();
    OzoneManagerProtocolProtos.UserInfo userInfo = getOmRequest().getUserInfo();
    String volumeKey = metadataManager.getVolumeKey(volumeName);
    String bucketKey = metadataManager.getBucketKey(volumeName, bucketName);
    IOException exception = null;
    boolean acquiredBucketLock = false;
    boolean acquiredVolumeLock = false;
    OMClientResponse omClientResponse = null;
    try {
        // check Acl
        if (ozoneManager.getAclsEnabled()) {
            checkAcls(ozoneManager, OzoneObj.ResourceType.BUCKET, OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.CREATE, volumeName, bucketName, null);
        }
        acquiredVolumeLock = metadataManager.getLock().acquireReadLock(VOLUME_LOCK, volumeName);
        acquiredBucketLock = metadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, bucketName);
        OmVolumeArgs omVolumeArgs = metadataManager.getVolumeTable().getReadCopy(volumeKey);
        // Check if the volume exists
        if (omVolumeArgs == null) {
            LOG.debug("volume: {} not found ", volumeName);
            throw new OMException("Volume doesn't exist", VOLUME_NOT_FOUND);
        }
        // Check if bucket already exists
        if (metadataManager.getBucketTable().isExist(bucketKey)) {
            LOG.debug("bucket: {} already exists ", bucketName);
            throw new OMException("Bucket already exist", BUCKET_ALREADY_EXISTS);
        }
        // Check quotaInBytes to update
        checkQuotaBytesValid(metadataManager, omVolumeArgs, omBucketInfo, volumeKey);
        // Add objectID and updateID
        omBucketInfo.setObjectID(ozoneManager.getObjectIdFromTxId(transactionLogIndex));
        omBucketInfo.setUpdateID(transactionLogIndex, ozoneManager.isRatisEnabled());
        // Add default acls from volume.
        addDefaultAcls(omBucketInfo, omVolumeArgs);
        // check namespace quota
        checkQuotaInNamespace(omVolumeArgs, 1L);
        // update used namespace for volume
        omVolumeArgs.incrUsedNamespace(1L);
        // Update table cache.
        metadataManager.getVolumeTable().addCacheEntry(new CacheKey<>(volumeKey), new CacheValue<>(Optional.of(omVolumeArgs), transactionLogIndex));
        metadataManager.getBucketTable().addCacheEntry(new CacheKey<>(bucketKey), new CacheValue<>(Optional.of(omBucketInfo), transactionLogIndex));
        omResponse.setCreateBucketResponse(CreateBucketResponse.newBuilder().build());
        omClientResponse = new OMBucketCreateResponse(omResponse.build(), omBucketInfo, omVolumeArgs.copyObject());
    } catch (IOException ex) {
        exception = ex;
        omClientResponse = new OMBucketCreateResponse(createErrorOMResponse(omResponse, exception));
    } finally {
        addResponseToDoubleBuffer(transactionLogIndex, omClientResponse, ozoneManagerDoubleBufferHelper);
        if (acquiredBucketLock) {
            metadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, bucketName);
        }
        if (acquiredVolumeLock) {
            metadataManager.getLock().releaseReadLock(VOLUME_LOCK, volumeName);
        }
    }
    // Performing audit logging outside of the lock.
    auditLog(auditLogger, buildAuditMessage(OMAction.CREATE_BUCKET, omBucketInfo.toAuditMap(), exception, userInfo));
    // return response.
    if (exception == null) {
        LOG.debug("created bucket: {} in volume: {}", bucketName, volumeName);
        omMetrics.incNumBuckets();
        return omClientResponse;
    } else {
        omMetrics.incNumBucketCreateFails();
        LOG.error("Bucket creation failed for bucket:{} in volume:{}", bucketName, volumeName, exception);
        return omClientResponse;
    }
}
Also used : OmBucketInfo(org.apache.hadoop.ozone.om.helpers.OmBucketInfo) AuditLogger(org.apache.hadoop.ozone.audit.AuditLogger) OMClientResponse(org.apache.hadoop.ozone.om.response.OMClientResponse) OmVolumeArgs(org.apache.hadoop.ozone.om.helpers.OmVolumeArgs) CreateBucketRequest(org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateBucketRequest) BucketLayout(org.apache.hadoop.ozone.om.helpers.BucketLayout) IOException(java.io.IOException) OMMetrics(org.apache.hadoop.ozone.om.OMMetrics) OMResponse(org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse) OzoneManagerProtocolProtos(org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos) OMMetadataManager(org.apache.hadoop.ozone.om.OMMetadataManager) OMBucketCreateResponse(org.apache.hadoop.ozone.om.response.bucket.OMBucketCreateResponse) OmBucketInfo(org.apache.hadoop.ozone.om.helpers.OmBucketInfo) BucketInfo(org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketInfo) OMException(org.apache.hadoop.ozone.om.exceptions.OMException)

Example 4 with OMBucketCreateResponse

use of org.apache.hadoop.ozone.om.response.bucket.OMBucketCreateResponse in project ozone by apache.

the class TestOzoneManagerDoubleBufferWithOMResponse method testDoubleBufferWithMixOfTransactionsParallel.

/**
 * This test first creates a volume, and then does a mix of transactions
 * like create/delete buckets in parallel and add to double buffer. Then it
 * verifies OM DB entries are matching with actual responses added to
 * double buffer or not.
 */
@Test
public void testDoubleBufferWithMixOfTransactionsParallel() throws Exception {
    // This test checks count, data in table is correct or not.
    Queue<OMBucketCreateResponse> bucketQueue = new ConcurrentLinkedQueue<>();
    Queue<OMBucketDeleteResponse> deleteBucketQueue = new ConcurrentLinkedQueue<>();
    String volumeName1 = UUID.randomUUID().toString();
    OMVolumeCreateResponse omVolumeCreateResponse1 = (OMVolumeCreateResponse) createVolume(volumeName1, trxId.incrementAndGet());
    String volumeName2 = UUID.randomUUID().toString();
    OMVolumeCreateResponse omVolumeCreateResponse2 = (OMVolumeCreateResponse) createVolume(volumeName2, trxId.incrementAndGet());
    int bucketsPerVolume = 10;
    Daemon daemon1 = new Daemon(() -> doMixTransactions(volumeName1, bucketsPerVolume, deleteBucketQueue, bucketQueue));
    Daemon daemon2 = new Daemon(() -> doMixTransactions(volumeName2, bucketsPerVolume, deleteBucketQueue, bucketQueue));
    daemon1.start();
    daemon2.start();
    int bucketCount = 2 * bucketsPerVolume;
    // As for every 2 transactions of create bucket we add deleted bucket.
    final int deleteCount = 10;
    // We are doing +1 for volume transaction.
    GenericTestUtils.waitFor(() -> doubleBuffer.getFlushedTransactionCount() == (bucketCount + deleteCount + 2), 100, 120000);
    Assert.assertEquals(2, omMetadataManager.countRowsInTable(omMetadataManager.getVolumeTable()));
    Assert.assertEquals(10, omMetadataManager.countRowsInTable(omMetadataManager.getBucketTable()));
    // Now after this in our DB we should have 5 buckets and one volume
    checkVolume(volumeName1, omVolumeCreateResponse1);
    checkVolume(volumeName2, omVolumeCreateResponse2);
    checkCreateBuckets(bucketQueue);
    checkDeletedBuckets(deleteBucketQueue);
    // Not checking lastAppliedIndex here, because 2 daemon threads are
    // running in parallel, so lastAppliedIndex cannot be always
    // total transaction count. So, just checking here whether it is less
    // than total transaction count.
    Assert.assertTrue(lastAppliedIndex <= bucketCount + deleteCount + 2);
}
Also used : Daemon(org.apache.hadoop.util.Daemon) OMBucketDeleteResponse(org.apache.hadoop.ozone.om.response.bucket.OMBucketDeleteResponse) OMBucketCreateResponse(org.apache.hadoop.ozone.om.response.bucket.OMBucketCreateResponse) ConcurrentLinkedQueue(java.util.concurrent.ConcurrentLinkedQueue) OMVolumeCreateResponse(org.apache.hadoop.ozone.om.response.volume.OMVolumeCreateResponse) Test(org.junit.Test)

Example 5 with OMBucketCreateResponse

use of org.apache.hadoop.ozone.om.response.bucket.OMBucketCreateResponse in project ozone by apache.

the class TestOzoneManagerDoubleBufferWithOMResponse method createBucket.

/**
 * Create OMBucketCreateResponse for specified volume and bucket.
 * @return OMBucketCreateResponse
 */
private OMBucketCreateResponse createBucket(String volumeName, String bucketName, long transactionID) {
    OzoneManagerProtocolProtos.OMRequest omRequest = OMRequestTestUtils.createBucketRequest(bucketName, volumeName, false, OzoneManagerProtocolProtos.StorageTypeProto.DISK);
    OMBucketCreateRequest omBucketCreateRequest = new OMBucketCreateRequest(omRequest);
    return (OMBucketCreateResponse) omBucketCreateRequest.validateAndUpdateCache(ozoneManager, transactionID, ozoneManagerDoubleBufferHelper);
}
Also used : OMBucketCreateRequest(org.apache.hadoop.ozone.om.request.bucket.OMBucketCreateRequest) OzoneManagerProtocolProtos(org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos) OMBucketCreateResponse(org.apache.hadoop.ozone.om.response.bucket.OMBucketCreateResponse)

Aggregations

OMBucketCreateResponse (org.apache.hadoop.ozone.om.response.bucket.OMBucketCreateResponse)5 OMBucketDeleteResponse (org.apache.hadoop.ozone.om.response.bucket.OMBucketDeleteResponse)3 ConcurrentLinkedQueue (java.util.concurrent.ConcurrentLinkedQueue)2 OMVolumeCreateResponse (org.apache.hadoop.ozone.om.response.volume.OMVolumeCreateResponse)2 OzoneManagerProtocolProtos (org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos)2 Test (org.junit.Test)2 IOException (java.io.IOException)1 TransactionInfo (org.apache.hadoop.hdds.utils.TransactionInfo)1 AuditLogger (org.apache.hadoop.ozone.audit.AuditLogger)1 OMMetadataManager (org.apache.hadoop.ozone.om.OMMetadataManager)1 OMMetrics (org.apache.hadoop.ozone.om.OMMetrics)1 OMException (org.apache.hadoop.ozone.om.exceptions.OMException)1 BucketLayout (org.apache.hadoop.ozone.om.helpers.BucketLayout)1 OmBucketInfo (org.apache.hadoop.ozone.om.helpers.OmBucketInfo)1 OmVolumeArgs (org.apache.hadoop.ozone.om.helpers.OmVolumeArgs)1 OMBucketCreateRequest (org.apache.hadoop.ozone.om.request.bucket.OMBucketCreateRequest)1 OMClientResponse (org.apache.hadoop.ozone.om.response.OMClientResponse)1 BucketInfo (org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketInfo)1 CreateBucketRequest (org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateBucketRequest)1 OMResponse (org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse)1