use of org.apache.hadoop.ozone.container.metadata.DatanodeStore in project ozone by apache.
the class TestKeyValueContainer method testDBProfileAffectsDBOptions.
@Test
public void testDBProfileAffectsDBOptions() throws Exception {
// Create Container 1
keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
DatanodeDBProfile outProfile1;
try (ReferenceCountedDB db1 = BlockUtils.getDB(keyValueContainer.getContainerData(), CONF)) {
DatanodeStore store1 = db1.getStore();
Assert.assertTrue(store1 instanceof AbstractDatanodeStore);
outProfile1 = ((AbstractDatanodeStore) store1).getDbProfile();
}
// Create Container 2 with different DBProfile in otherConf
OzoneConfiguration otherConf = new OzoneConfiguration();
// Use a dedicated profile for test
otherConf.setEnum(HDDS_DB_PROFILE, DBProfile.SSD);
keyValueContainerData = new KeyValueContainerData(2L, layout, (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(), datanodeId.toString());
keyValueContainer = new KeyValueContainer(keyValueContainerData, otherConf);
keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
DatanodeDBProfile outProfile2;
try (ReferenceCountedDB db2 = BlockUtils.getDB(keyValueContainer.getContainerData(), otherConf)) {
DatanodeStore store2 = db2.getStore();
Assert.assertTrue(store2 instanceof AbstractDatanodeStore);
outProfile2 = ((AbstractDatanodeStore) store2).getDbProfile();
}
// DBOtions should be different
Assert.assertNotEquals(outProfile1.getDBOptions().compactionReadaheadSize(), outProfile2.getDBOptions().compactionReadaheadSize());
}
use of org.apache.hadoop.ozone.container.metadata.DatanodeStore in project ozone by apache.
the class DeleteBlocksCommandHandler method markBlocksForDeletionSchemaV2.
/**
* Move a bunch of blocks from a container to deleting state. This is a meta
* update, the actual deletes happen in async mode.
*
* @param containerData - KeyValueContainerData
* @param delTX a block deletion transaction.
* @throws IOException if I/O error occurs.
*/
private void markBlocksForDeletionSchemaV2(KeyValueContainerData containerData, DeletedBlocksTransaction delTX, int newDeletionBlocks, long txnID) throws IOException {
long containerId = delTX.getContainerID();
if (!isTxnIdValid(containerId, containerData, delTX)) {
return;
}
try (ReferenceCountedDB containerDB = BlockUtils.getDB(containerData, conf)) {
DatanodeStore ds = containerDB.getStore();
DatanodeStoreSchemaTwoImpl dnStoreTwoImpl = (DatanodeStoreSchemaTwoImpl) ds;
Table<Long, DeletedBlocksTransaction> delTxTable = dnStoreTwoImpl.getDeleteTransactionTable();
try (BatchOperation batch = containerDB.getStore().getBatchHandler().initBatchOperation()) {
delTxTable.putWithBatch(batch, txnID, delTX);
newDeletionBlocks += delTX.getLocalIDList().size();
updateMetaData(containerData, delTX, newDeletionBlocks, containerDB, batch);
containerDB.getStore().getBatchHandler().commitBatchOperation(batch);
}
}
}
use of org.apache.hadoop.ozone.container.metadata.DatanodeStore in project ozone by apache.
the class ContainerCache method getDB.
/**
* Returns a DB handle if available, create the handler otherwise.
*
* @param containerID - ID of the container.
* @param containerDBType - DB type of the container.
* @param containerDBPath - DB path of the container.
* @param schemaVersion - Schema version of the container.
* @param conf - Hadoop Configuration.
* @return ReferenceCountedDB.
*/
public ReferenceCountedDB getDB(long containerID, String containerDBType, String containerDBPath, String schemaVersion, ConfigurationSource conf) throws IOException {
Preconditions.checkState(containerID >= 0, "Container ID cannot be negative.");
ReferenceCountedDB db;
Lock containerLock = rocksDBLock.get(containerDBPath);
containerLock.lock();
metrics.incNumDbGetOps();
try {
lock.lock();
try {
db = (ReferenceCountedDB) this.get(containerDBPath);
if (db != null) {
metrics.incNumCacheHits();
db.incrementReference();
return db;
} else {
metrics.incNumCacheMisses();
}
} finally {
lock.unlock();
}
try {
long start = Time.monotonicNow();
DatanodeStore store = BlockUtils.getUncachedDatanodeStore(containerID, containerDBPath, schemaVersion, conf, false);
db = new ReferenceCountedDB(store, containerDBPath);
metrics.incDbOpenLatency(Time.monotonicNow() - start);
} catch (Exception e) {
LOG.error("Error opening DB. Container:{} ContainerPath:{}", containerID, containerDBPath, e);
throw e;
}
lock.lock();
try {
ReferenceCountedDB currentDB = (ReferenceCountedDB) this.get(containerDBPath);
if (currentDB != null) {
// increment the reference before returning the object
currentDB.incrementReference();
// clean the db created in previous step
cleanupDb(db);
return currentDB;
} else {
this.put(containerDBPath, db);
// increment the reference before returning the object
db.incrementReference();
return db;
}
} finally {
lock.unlock();
}
} finally {
containerLock.unlock();
}
}
use of org.apache.hadoop.ozone.container.metadata.DatanodeStore in project ozone by apache.
the class TestBlockDeletingService method createTxn.
private void createTxn(KeyValueContainerData data, List<Long> containerBlocks, int txnID, long containerID) {
try (ReferenceCountedDB metadata = BlockUtils.getDB(data, conf)) {
StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction dtx = StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction.newBuilder().setTxID(txnID).setContainerID(containerID).addAllLocalID(containerBlocks).setCount(0).build();
try (BatchOperation batch = metadata.getStore().getBatchHandler().initBatchOperation()) {
DatanodeStore ds = metadata.getStore();
DatanodeStoreSchemaTwoImpl dnStoreTwoImpl = (DatanodeStoreSchemaTwoImpl) ds;
dnStoreTwoImpl.getDeleteTransactionTable().putWithBatch(batch, (long) txnID, dtx);
metadata.getStore().getBatchHandler().commitBatchOperation(batch);
}
} catch (IOException exception) {
LOG.warn("Transaction creation was not successful for txnID: " + txnID + " consisting of " + containerBlocks.size() + " blocks.");
}
}
Aggregations