Search in sources :

Example 1 with DatanodeStore

use of org.apache.hadoop.ozone.container.metadata.DatanodeStore in project ozone by apache.

the class KeyValueContainerUtil method createContainerMetaData.

/**
 * @param containerMetaDataPath
 * @throws IOException
 */
/**
 * creates metadata path, chunks path and metadata DB for the specified
 * container.
 *
 * @param containerMetaDataPath Path to the container's metadata directory.
 * @param chunksPath Path were chunks for this container should be stored.
 * @param dbFile Path to the container's .db file.
 * @param schemaVersion The schema version of the container. If this method
 * has not been updated after a schema version addition
 * and does not recognize the latest SchemaVersion, an
 * {@link IllegalArgumentException} is thrown.
 * @param conf The configuration to use for this container.
 * @throws IOException
 */
public static void createContainerMetaData(long containerID, File containerMetaDataPath, File chunksPath, File dbFile, String schemaVersion, ConfigurationSource conf) throws IOException {
    Preconditions.checkNotNull(containerMetaDataPath);
    Preconditions.checkNotNull(conf);
    if (!containerMetaDataPath.mkdirs()) {
        LOG.error("Unable to create directory for metadata storage. Path: {}", containerMetaDataPath);
        throw new IOException("Unable to create directory for metadata storage." + " Path: " + containerMetaDataPath);
    }
    if (!chunksPath.mkdirs()) {
        LOG.error("Unable to create chunks directory Container {}", chunksPath);
        // clean up container metadata path and metadata db
        FileUtils.deleteDirectory(containerMetaDataPath);
        FileUtils.deleteDirectory(containerMetaDataPath.getParentFile());
        throw new IOException("Unable to create directory for data storage." + " Path: " + chunksPath);
    }
    DatanodeStore store;
    if (schemaVersion.equals(OzoneConsts.SCHEMA_V1)) {
        store = new DatanodeStoreSchemaOneImpl(conf, containerID, dbFile.getAbsolutePath(), false);
    } else if (schemaVersion.equals(OzoneConsts.SCHEMA_V2)) {
        store = new DatanodeStoreSchemaTwoImpl(conf, containerID, dbFile.getAbsolutePath(), false);
    } else {
        throw new IllegalArgumentException("Unrecognized schema version for container: " + schemaVersion);
    }
    ReferenceCountedDB db = new ReferenceCountedDB(store, dbFile.getAbsolutePath());
    // add db handler into cache
    BlockUtils.addDB(db, dbFile.getAbsolutePath(), conf);
}
Also used : DatanodeStoreSchemaOneImpl(org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaOneImpl) DatanodeStore(org.apache.hadoop.ozone.container.metadata.DatanodeStore) IOException(java.io.IOException) DatanodeStoreSchemaTwoImpl(org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaTwoImpl) ReferenceCountedDB(org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB)

Example 2 with DatanodeStore

use of org.apache.hadoop.ozone.container.metadata.DatanodeStore in project ozone by apache.

the class KeyValueContainerUtil method parseKVContainerData.

/**
 * Parse KeyValueContainerData and verify checksum. Set block related
 * metadata like block commit sequence id, block count, bytes used and
 * pending delete block count and delete transaction id.
 * @param kvContainerData
 * @param config
 * @throws IOException
 */
public static void parseKVContainerData(KeyValueContainerData kvContainerData, ConfigurationSource config) throws IOException {
    long containerID = kvContainerData.getContainerID();
    File metadataPath = new File(kvContainerData.getMetadataPath());
    // Verify Checksum
    ContainerUtils.verifyChecksum(kvContainerData, config);
    File dbFile = KeyValueContainerLocationUtil.getContainerDBFile(metadataPath, containerID);
    if (!dbFile.exists()) {
        LOG.error("Container DB file is missing for ContainerID {}. " + "Skipping loading of this container.", containerID);
        // Don't further process this container, as it is missing db file.
        return;
    }
    kvContainerData.setDbFile(dbFile);
    if (kvContainerData.getSchemaVersion() == null) {
        // If this container has not specified a schema version, it is in the old
        // format with one default column family.
        kvContainerData.setSchemaVersion(OzoneConsts.SCHEMA_V1);
    }
    boolean isBlockMetadataSet = false;
    ReferenceCountedDB cachedDB = null;
    DatanodeStore store = null;
    try {
        try {
            boolean readOnly = ContainerInspectorUtil.isReadOnly(ContainerProtos.ContainerType.KeyValueContainer);
            store = BlockUtils.getUncachedDatanodeStore(kvContainerData, config, readOnly);
        } catch (IOException e) {
            // If an exception is thrown, then it may indicate the RocksDB is
            // already open in the container cache. As this code is only executed at
            // DN startup, this should only happen in the tests.
            cachedDB = BlockUtils.getDB(kvContainerData, config);
            store = cachedDB.getStore();
            LOG.warn("Attempt to get an uncached RocksDB handle failed and an " + "instance was retrieved from the cache. This should only happen " + "in tests");
        }
        Table<String, Long> metadataTable = store.getMetadataTable();
        // Set pending deleted block count.
        Long pendingDeleteBlockCount = metadataTable.get(OzoneConsts.PENDING_DELETE_BLOCK_COUNT);
        if (pendingDeleteBlockCount != null) {
            kvContainerData.incrPendingDeletionBlocks(pendingDeleteBlockCount);
        } else {
            // Set pending deleted block count.
            MetadataKeyFilters.KeyPrefixFilter filter = MetadataKeyFilters.getDeletingKeyFilter();
            int numPendingDeletionBlocks = store.getBlockDataTable().getSequentialRangeKVs(null, Integer.MAX_VALUE, filter).size();
            kvContainerData.incrPendingDeletionBlocks(numPendingDeletionBlocks);
        }
        // Set delete transaction id.
        Long delTxnId = metadataTable.get(OzoneConsts.DELETE_TRANSACTION_KEY);
        if (delTxnId != null) {
            kvContainerData.updateDeleteTransactionId(delTxnId);
        }
        // Set BlockCommitSequenceId.
        Long bcsId = metadataTable.get(OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID);
        if (bcsId != null) {
            kvContainerData.updateBlockCommitSequenceId(bcsId);
        }
        // Set bytes used.
        // commitSpace for Open Containers relies on usedBytes
        Long bytesUsed = metadataTable.get(OzoneConsts.CONTAINER_BYTES_USED);
        if (bytesUsed != null) {
            isBlockMetadataSet = true;
            kvContainerData.setBytesUsed(bytesUsed);
        }
        // Set block count.
        Long blockCount = metadataTable.get(OzoneConsts.BLOCK_COUNT);
        if (blockCount != null) {
            isBlockMetadataSet = true;
            kvContainerData.setBlockCount(blockCount);
        }
        if (!isBlockMetadataSet) {
            initializeUsedBytesAndBlockCount(store, kvContainerData);
        }
        // If the container is missing a chunks directory, possibly due to the
        // bug fixed by HDDS-6235, create it here.
        File chunksDir = new File(kvContainerData.getChunksPath());
        if (!chunksDir.exists()) {
            Files.createDirectories(chunksDir.toPath());
        }
        // Run advanced container inspection/repair operations if specified on
        // startup. If this method is called but not as a part of startup,
        // The inspectors will be unloaded and this will be a no-op.
        ContainerInspectorUtil.process(kvContainerData, store);
    } finally {
        if (cachedDB != null) {
            // If we get a cached instance, calling close simply decrements the
            // reference count.
            cachedDB.close();
        } else if (store != null) {
            // close the rocksDB handle in the cache and the next reader would fail
            try {
                store.stop();
            } catch (IOException e) {
                throw e;
            } catch (Exception e) {
                throw new RuntimeException("Unexpected exception closing the " + "RocksDB when loading containers", e);
            }
        }
    }
}
Also used : MetadataKeyFilters(org.apache.hadoop.hdds.utils.MetadataKeyFilters) IOException(java.io.IOException) ReferenceCountedDB(org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB) IOException(java.io.IOException) DatanodeStore(org.apache.hadoop.ozone.container.metadata.DatanodeStore) File(java.io.File)

Example 3 with DatanodeStore

use of org.apache.hadoop.ozone.container.metadata.DatanodeStore in project ozone by apache.

the class TestStorageContainerManagerHelper method verifyBlocksWithTxnTable.

public boolean verifyBlocksWithTxnTable(Map<Long, List<Long>> containerBlocks) throws IOException {
    for (Map.Entry<Long, List<Long>> entry : containerBlocks.entrySet()) {
        ReferenceCountedDB meta = getContainerMetadata(entry.getKey());
        DatanodeStore ds = meta.getStore();
        DatanodeStoreSchemaTwoImpl dnStoreTwoImpl = (DatanodeStoreSchemaTwoImpl) ds;
        List<? extends Table.KeyValue<Long, DeletedBlocksTransaction>> txnsInTxnTable = dnStoreTwoImpl.getDeleteTransactionTable().getRangeKVs(null, Integer.MAX_VALUE, null);
        List<Long> conID = new ArrayList<>();
        for (Table.KeyValue<Long, DeletedBlocksTransaction> txn : txnsInTxnTable) {
            conID.addAll(txn.getValue().getLocalIDList());
        }
        if (!conID.equals(containerBlocks.get(entry.getKey()))) {
            return false;
        }
        meta.close();
    }
    return true;
}
Also used : Table(org.apache.hadoop.hdds.utils.db.Table) ArrayList(java.util.ArrayList) DatanodeStoreSchemaTwoImpl(org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaTwoImpl) ReferenceCountedDB(org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB) DatanodeStore(org.apache.hadoop.ozone.container.metadata.DatanodeStore) ArrayList(java.util.ArrayList) List(java.util.List) Map(java.util.Map) DeletedBlocksTransaction(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction)

Example 4 with DatanodeStore

use of org.apache.hadoop.ozone.container.metadata.DatanodeStore in project ozone by apache.

the class TestSchemaOneBackwardsCompatibility method testDirectTableIterationDisabled.

/**
 * Because all tables in schema version one map back to the default table,
 * directly iterating any of the table instances should be forbidden.
 * Otherwise, the iterators for each table would read the entire default
 * table, return all database contents, and yield unexpected results.
 * @throws Exception
 */
@Test
public void testDirectTableIterationDisabled() throws Exception {
    try (ReferenceCountedDB refCountedDB = BlockUtils.getDB(newKvData(), conf)) {
        DatanodeStore store = refCountedDB.getStore();
        assertTableIteratorUnsupported(store.getMetadataTable());
        assertTableIteratorUnsupported(store.getBlockDataTable());
        assertTableIteratorUnsupported(store.getDeletedBlocksTable());
    }
}
Also used : DatanodeStore(org.apache.hadoop.ozone.container.metadata.DatanodeStore) ReferenceCountedDB(org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB) Test(org.junit.Test)

Example 5 with DatanodeStore

use of org.apache.hadoop.ozone.container.metadata.DatanodeStore in project ozone by apache.

the class TestContainerCache method createContainerDB.

private void createContainerDB(OzoneConfiguration conf, File dbFile) throws Exception {
    DatanodeStore store = new DatanodeStoreSchemaTwoImpl(conf, 1, dbFile.getAbsolutePath(), false);
    // we close since the SCM pre-creates containers.
    // we will open and put Db handle into a cache when keys are being created
    // in a container.
    store.stop();
}
Also used : DatanodeStore(org.apache.hadoop.ozone.container.metadata.DatanodeStore) DatanodeStoreSchemaTwoImpl(org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaTwoImpl)

Aggregations

DatanodeStore (org.apache.hadoop.ozone.container.metadata.DatanodeStore)9 ReferenceCountedDB (org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB)7 DatanodeStoreSchemaTwoImpl (org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaTwoImpl)5 IOException (java.io.IOException)4 DeletedBlocksTransaction (org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction)2 BatchOperation (org.apache.hadoop.hdds.utils.db.BatchOperation)2 Test (org.junit.Test)2 File (java.io.File)1 ArrayList (java.util.ArrayList)1 List (java.util.List)1 Map (java.util.Map)1 Lock (java.util.concurrent.locks.Lock)1 ReentrantLock (java.util.concurrent.locks.ReentrantLock)1 OzoneConfiguration (org.apache.hadoop.hdds.conf.OzoneConfiguration)1 StorageContainerDatanodeProtocolProtos (org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos)1 MetadataKeyFilters (org.apache.hadoop.hdds.utils.MetadataKeyFilters)1 Table (org.apache.hadoop.hdds.utils.db.Table)1 DatanodeDBProfile (org.apache.hadoop.ozone.container.common.utils.db.DatanodeDBProfile)1 AbstractDatanodeStore (org.apache.hadoop.ozone.container.metadata.AbstractDatanodeStore)1 DatanodeStoreSchemaOneImpl (org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaOneImpl)1