Search in sources :

Example 6 with DatanodeStore

use of org.apache.hadoop.ozone.container.metadata.DatanodeStore in project ozone by apache.

the class TestKeyValueContainer method testDBProfileAffectsDBOptions.

@Test
public void testDBProfileAffectsDBOptions() throws Exception {
    // Create Container 1
    keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
    DatanodeDBProfile outProfile1;
    try (ReferenceCountedDB db1 = BlockUtils.getDB(keyValueContainer.getContainerData(), CONF)) {
        DatanodeStore store1 = db1.getStore();
        Assert.assertTrue(store1 instanceof AbstractDatanodeStore);
        outProfile1 = ((AbstractDatanodeStore) store1).getDbProfile();
    }
    // Create Container 2 with different DBProfile in otherConf
    OzoneConfiguration otherConf = new OzoneConfiguration();
    // Use a dedicated profile for test
    otherConf.setEnum(HDDS_DB_PROFILE, DBProfile.SSD);
    keyValueContainerData = new KeyValueContainerData(2L, layout, (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(), datanodeId.toString());
    keyValueContainer = new KeyValueContainer(keyValueContainerData, otherConf);
    keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
    DatanodeDBProfile outProfile2;
    try (ReferenceCountedDB db2 = BlockUtils.getDB(keyValueContainer.getContainerData(), otherConf)) {
        DatanodeStore store2 = db2.getStore();
        Assert.assertTrue(store2 instanceof AbstractDatanodeStore);
        outProfile2 = ((AbstractDatanodeStore) store2).getDbProfile();
    }
    // DBOtions should be different
    Assert.assertNotEquals(outProfile1.getDBOptions().compactionReadaheadSize(), outProfile2.getDBOptions().compactionReadaheadSize());
}
Also used : DatanodeDBProfile(org.apache.hadoop.ozone.container.common.utils.db.DatanodeDBProfile) DatanodeStore(org.apache.hadoop.ozone.container.metadata.DatanodeStore) AbstractDatanodeStore(org.apache.hadoop.ozone.container.metadata.AbstractDatanodeStore) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) AbstractDatanodeStore(org.apache.hadoop.ozone.container.metadata.AbstractDatanodeStore) ReferenceCountedDB(org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB) Test(org.junit.Test)

Example 7 with DatanodeStore

use of org.apache.hadoop.ozone.container.metadata.DatanodeStore in project ozone by apache.

the class DeleteBlocksCommandHandler method markBlocksForDeletionSchemaV2.

/**
 * Move a bunch of blocks from a container to deleting state. This is a meta
 * update, the actual deletes happen in async mode.
 *
 * @param containerData - KeyValueContainerData
 * @param delTX a block deletion transaction.
 * @throws IOException if I/O error occurs.
 */
private void markBlocksForDeletionSchemaV2(KeyValueContainerData containerData, DeletedBlocksTransaction delTX, int newDeletionBlocks, long txnID) throws IOException {
    long containerId = delTX.getContainerID();
    if (!isTxnIdValid(containerId, containerData, delTX)) {
        return;
    }
    try (ReferenceCountedDB containerDB = BlockUtils.getDB(containerData, conf)) {
        DatanodeStore ds = containerDB.getStore();
        DatanodeStoreSchemaTwoImpl dnStoreTwoImpl = (DatanodeStoreSchemaTwoImpl) ds;
        Table<Long, DeletedBlocksTransaction> delTxTable = dnStoreTwoImpl.getDeleteTransactionTable();
        try (BatchOperation batch = containerDB.getStore().getBatchHandler().initBatchOperation()) {
            delTxTable.putWithBatch(batch, txnID, delTX);
            newDeletionBlocks += delTX.getLocalIDList().size();
            updateMetaData(containerData, delTX, newDeletionBlocks, containerDB, batch);
            containerDB.getStore().getBatchHandler().commitBatchOperation(batch);
        }
    }
}
Also used : DatanodeStore(org.apache.hadoop.ozone.container.metadata.DatanodeStore) BatchOperation(org.apache.hadoop.hdds.utils.db.BatchOperation) DatanodeStoreSchemaTwoImpl(org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaTwoImpl) ReferenceCountedDB(org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB) DeletedBlocksTransaction(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction)

Example 8 with DatanodeStore

use of org.apache.hadoop.ozone.container.metadata.DatanodeStore in project ozone by apache.

the class ContainerCache method getDB.

/**
 * Returns a DB handle if available, create the handler otherwise.
 *
 * @param containerID - ID of the container.
 * @param containerDBType - DB type of the container.
 * @param containerDBPath - DB path of the container.
 * @param schemaVersion - Schema version of the container.
 * @param conf - Hadoop Configuration.
 * @return ReferenceCountedDB.
 */
public ReferenceCountedDB getDB(long containerID, String containerDBType, String containerDBPath, String schemaVersion, ConfigurationSource conf) throws IOException {
    Preconditions.checkState(containerID >= 0, "Container ID cannot be negative.");
    ReferenceCountedDB db;
    Lock containerLock = rocksDBLock.get(containerDBPath);
    containerLock.lock();
    metrics.incNumDbGetOps();
    try {
        lock.lock();
        try {
            db = (ReferenceCountedDB) this.get(containerDBPath);
            if (db != null) {
                metrics.incNumCacheHits();
                db.incrementReference();
                return db;
            } else {
                metrics.incNumCacheMisses();
            }
        } finally {
            lock.unlock();
        }
        try {
            long start = Time.monotonicNow();
            DatanodeStore store = BlockUtils.getUncachedDatanodeStore(containerID, containerDBPath, schemaVersion, conf, false);
            db = new ReferenceCountedDB(store, containerDBPath);
            metrics.incDbOpenLatency(Time.monotonicNow() - start);
        } catch (Exception e) {
            LOG.error("Error opening DB. Container:{} ContainerPath:{}", containerID, containerDBPath, e);
            throw e;
        }
        lock.lock();
        try {
            ReferenceCountedDB currentDB = (ReferenceCountedDB) this.get(containerDBPath);
            if (currentDB != null) {
                // increment the reference before returning the object
                currentDB.incrementReference();
                // clean the db created in previous step
                cleanupDb(db);
                return currentDB;
            } else {
                this.put(containerDBPath, db);
                // increment the reference before returning the object
                db.incrementReference();
                return db;
            }
        } finally {
            lock.unlock();
        }
    } finally {
        containerLock.unlock();
    }
}
Also used : DatanodeStore(org.apache.hadoop.ozone.container.metadata.DatanodeStore) IOException(java.io.IOException) ReentrantLock(java.util.concurrent.locks.ReentrantLock) Lock(java.util.concurrent.locks.Lock)

Example 9 with DatanodeStore

use of org.apache.hadoop.ozone.container.metadata.DatanodeStore in project ozone by apache.

the class TestBlockDeletingService method createTxn.

private void createTxn(KeyValueContainerData data, List<Long> containerBlocks, int txnID, long containerID) {
    try (ReferenceCountedDB metadata = BlockUtils.getDB(data, conf)) {
        StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction dtx = StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction.newBuilder().setTxID(txnID).setContainerID(containerID).addAllLocalID(containerBlocks).setCount(0).build();
        try (BatchOperation batch = metadata.getStore().getBatchHandler().initBatchOperation()) {
            DatanodeStore ds = metadata.getStore();
            DatanodeStoreSchemaTwoImpl dnStoreTwoImpl = (DatanodeStoreSchemaTwoImpl) ds;
            dnStoreTwoImpl.getDeleteTransactionTable().putWithBatch(batch, (long) txnID, dtx);
            metadata.getStore().getBatchHandler().commitBatchOperation(batch);
        }
    } catch (IOException exception) {
        LOG.warn("Transaction creation was not successful for txnID: " + txnID + " consisting of " + containerBlocks.size() + " blocks.");
    }
}
Also used : DatanodeStore(org.apache.hadoop.ozone.container.metadata.DatanodeStore) BatchOperation(org.apache.hadoop.hdds.utils.db.BatchOperation) IOException(java.io.IOException) StorageContainerDatanodeProtocolProtos(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos) DatanodeStoreSchemaTwoImpl(org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaTwoImpl) ReferenceCountedDB(org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB)

Aggregations

DatanodeStore (org.apache.hadoop.ozone.container.metadata.DatanodeStore)9 ReferenceCountedDB (org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB)7 DatanodeStoreSchemaTwoImpl (org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaTwoImpl)5 IOException (java.io.IOException)4 DeletedBlocksTransaction (org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction)2 BatchOperation (org.apache.hadoop.hdds.utils.db.BatchOperation)2 Test (org.junit.Test)2 File (java.io.File)1 ArrayList (java.util.ArrayList)1 List (java.util.List)1 Map (java.util.Map)1 Lock (java.util.concurrent.locks.Lock)1 ReentrantLock (java.util.concurrent.locks.ReentrantLock)1 OzoneConfiguration (org.apache.hadoop.hdds.conf.OzoneConfiguration)1 StorageContainerDatanodeProtocolProtos (org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos)1 MetadataKeyFilters (org.apache.hadoop.hdds.utils.MetadataKeyFilters)1 Table (org.apache.hadoop.hdds.utils.db.Table)1 DatanodeDBProfile (org.apache.hadoop.ozone.container.common.utils.db.DatanodeDBProfile)1 AbstractDatanodeStore (org.apache.hadoop.ozone.container.metadata.AbstractDatanodeStore)1 DatanodeStoreSchemaOneImpl (org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaOneImpl)1