Search in sources :

Example 1 with DBStore

use of org.apache.hadoop.hdds.utils.db.DBStore in project ozone by apache.

the class TestSCMInstallSnapshot method testInstallCheckPoint.

@Test
public void testInstallCheckPoint() throws Exception {
    DBCheckpoint checkpoint = downloadSnapshot();
    StorageContainerManager scm = cluster.getStorageContainerManager();
    DBStore db = HAUtils.loadDB(conf, checkpoint.getCheckpointLocation().getParent().toFile(), checkpoint.getCheckpointLocation().getFileName().toString(), new SCMDBDefinition());
    // Hack the transaction index in the checkpoint so as to ensure the
    // checkpointed transaction index is higher than when it was downloaded
    // from.
    Assert.assertNotNull(db);
    HAUtils.getTransactionInfoTable(db, new SCMDBDefinition()).put(OzoneConsts.TRANSACTION_INFO_KEY, TransactionInfo.builder().setCurrentTerm(10).setTransactionIndex(100).build());
    db.close();
    ContainerID cid = scm.getContainerManager().getContainers().get(0).containerID();
    PipelineID pipelineID = scm.getPipelineManager().getPipelines().get(0).getId();
    scm.getScmMetadataStore().getPipelineTable().delete(pipelineID);
    scm.getContainerManager().deleteContainer(cid);
    Assert.assertNull(scm.getScmMetadataStore().getPipelineTable().get(pipelineID));
    Assert.assertFalse(scm.getContainerManager().containerExist(cid));
    SCMStateMachine sm = scm.getScmHAManager().getRatisServer().getSCMStateMachine();
    sm.pause();
    sm.setInstallingDBCheckpoint(checkpoint);
    sm.reinitialize();
    Assert.assertNotNull(scm.getScmMetadataStore().getPipelineTable().get(pipelineID));
    Assert.assertNotNull(scm.getScmMetadataStore().getContainerTable().get(cid));
    Assert.assertTrue(scm.getPipelineManager().containsPipeline(pipelineID));
    Assert.assertTrue(scm.getContainerManager().containerExist(cid));
    Assert.assertEquals(100, scm.getScmMetadataStore().getTransactionInfoTable().get(OzoneConsts.TRANSACTION_INFO_KEY).getTransactionIndex());
    Assert.assertEquals(100, scm.getScmHAManager().asSCMHADBTransactionBuffer().getLatestTrxInfo().getTermIndex().getIndex());
}
Also used : StorageContainerManager(org.apache.hadoop.hdds.scm.server.StorageContainerManager) ContainerID(org.apache.hadoop.hdds.scm.container.ContainerID) DBCheckpoint(org.apache.hadoop.hdds.utils.db.DBCheckpoint) SCMDBDefinition(org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition) PipelineID(org.apache.hadoop.hdds.scm.pipeline.PipelineID) SCMStateMachine(org.apache.hadoop.hdds.scm.ha.SCMStateMachine) DBStore(org.apache.hadoop.hdds.utils.db.DBStore) Test(org.junit.Test)

Example 2 with DBStore

use of org.apache.hadoop.hdds.utils.db.DBStore in project ozone by apache.

the class HAUtils method getTransactionInfoFromDB.

/**
 * Obtain Transaction info from DB.
 * @param tempConfig
 * @param dbDir path to DB
 * @return TransactionInfo
 * @throws Exception
 */
private static TransactionInfo getTransactionInfoFromDB(OzoneConfiguration tempConfig, Path dbDir, String dbName, DBDefinition definition) throws Exception {
    DBStore dbStore = loadDB(tempConfig, dbDir.toFile(), dbName, definition);
    // Get the table name with TransactionInfo as the value. The transaction
    // info table name are different in SCM and SCM.
    // In case, a new table gets added where the value is TransactionInfo, this
    // logic may not work.
    Table<String, TransactionInfo> transactionInfoTable = getTransactionInfoTable(dbStore, definition);
    TransactionInfo transactionInfo = transactionInfoTable.get(TRANSACTION_INFO_KEY);
    dbStore.close();
    if (transactionInfo == null) {
        throw new IOException("Failed to read TransactionInfo from DB " + definition.getName() + " at " + dbDir);
    }
    return transactionInfo;
}
Also used : IOException(java.io.IOException) SupplierWithIOException(org.apache.hadoop.hdds.function.SupplierWithIOException) DBStore(org.apache.hadoop.hdds.utils.db.DBStore)

Example 3 with DBStore

use of org.apache.hadoop.hdds.utils.db.DBStore in project ozone by apache.

the class TestReplicationManager method setup.

@Before
public void setup() throws IOException, InterruptedException, NodeNotFoundException, InvalidStateTransitionException {
    OzoneConfiguration conf = new OzoneConfiguration();
    conf.setTimeDuration(HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT, 0, TimeUnit.SECONDS);
    scmLogs = GenericTestUtils.LogCapturer.captureLogs(ReplicationManager.LOG);
    containerManager = Mockito.mock(ContainerManager.class);
    nodeManager = new SimpleMockNodeManager();
    eventQueue = new EventQueue();
    scmhaManager = MockSCMHAManager.getInstance(true);
    testDir = GenericTestUtils.getTestDir(TestContainerManagerImpl.class.getSimpleName() + UUID.randomUUID());
    conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath());
    dbStore = DBStoreBuilder.createDBStore(conf, new SCMDBDefinition());
    pipelineManager = Mockito.mock(PipelineManager.class);
    when(pipelineManager.containsPipeline(Mockito.any(PipelineID.class))).thenReturn(true);
    containerStateManager = ContainerStateManagerImpl.newBuilder().setConfiguration(conf).setPipelineManager(pipelineManager).setRatisServer(scmhaManager.getRatisServer()).setContainerStore(SCMDBDefinition.CONTAINERS.getTable(dbStore)).setSCMDBTransactionBuffer(scmhaManager.getDBTransactionBuffer()).build();
    serviceManager = new SCMServiceManager();
    datanodeCommandHandler = new DatanodeCommandHandler();
    eventQueue.addHandler(SCMEvents.DATANODE_COMMAND, datanodeCommandHandler);
    Mockito.when(containerManager.getContainers()).thenAnswer(invocation -> {
        Set<ContainerID> ids = containerStateManager.getContainerIDs();
        List<ContainerInfo> containers = new ArrayList<>();
        for (ContainerID id : ids) {
            containers.add(containerStateManager.getContainer(id));
        }
        return containers;
    });
    Mockito.when(containerManager.getContainer(Mockito.any(ContainerID.class))).thenAnswer(invocation -> containerStateManager.getContainer(((ContainerID) invocation.getArguments()[0])));
    Mockito.when(containerManager.getContainerReplicas(Mockito.any(ContainerID.class))).thenAnswer(invocation -> containerStateManager.getContainerReplicas(((ContainerID) invocation.getArguments()[0])));
    containerPlacementPolicy = Mockito.mock(PlacementPolicy.class);
    Mockito.when(containerPlacementPolicy.chooseDatanodes(Mockito.any(), Mockito.any(), Mockito.anyInt(), Mockito.anyLong(), Mockito.anyLong())).thenAnswer(invocation -> {
        int count = (int) invocation.getArguments()[2];
        return IntStream.range(0, count).mapToObj(i -> randomDatanodeDetails()).collect(Collectors.toList());
    });
    Mockito.when(containerPlacementPolicy.validateContainerPlacement(Mockito.any(), Mockito.anyInt())).thenAnswer(invocation -> new ContainerPlacementStatusDefault(2, 2, 3));
    clock = new TestClock(Instant.now(), ZoneId.of("UTC"));
    createReplicationManager(new ReplicationManagerConfiguration());
}
Also used : HddsProtos(org.apache.hadoop.hdds.protocol.proto.HddsProtos) NodeStatus(org.apache.hadoop.hdds.scm.node.NodeStatus) MockDatanodeDetails.randomDatanodeDetails(org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails) EventQueue(org.apache.hadoop.hdds.server.events.EventQueue) TimeoutException(java.util.concurrent.TimeoutException) STALE(org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.STALE) DBStore(org.apache.hadoop.hdds.utils.db.DBStore) SCMCommandProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto) HddsTestUtils.getContainer(org.apache.hadoop.hdds.scm.HddsTestUtils.getContainer) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) EventPublisher(org.apache.hadoop.hdds.server.events.EventPublisher) After(org.junit.After) Map(java.util.Map) SCMHAManager(org.apache.hadoop.hdds.scm.ha.SCMHAManager) HddsConfigKeys(org.apache.hadoop.hdds.HddsConfigKeys) ContainerPlacementStatusDefault(org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementStatusDefault) FileUtil(org.apache.hadoop.fs.FileUtil) DBStoreBuilder(org.apache.hadoop.hdds.utils.db.DBStoreBuilder) Longs(com.google.common.primitives.Longs) Set(java.util.Set) UUID(java.util.UUID) IN_SERVICE(org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.IN_SERVICE) Instant(java.time.Instant) HddsTestUtils.getReplicas(org.apache.hadoop.hdds.scm.HddsTestUtils.getReplicas) Collectors(java.util.stream.Collectors) CommandForDatanode(org.apache.hadoop.ozone.protocol.commands.CommandForDatanode) ZoneId(java.time.ZoneId) PlacementPolicy(org.apache.hadoop.hdds.scm.PlacementPolicy) List(java.util.List) MoveDataNodePair(org.apache.hadoop.hdds.scm.container.common.helpers.MoveDataNodePair) ReplicationManagerConfiguration(org.apache.hadoop.hdds.scm.container.ReplicationManager.ReplicationManagerConfiguration) SCMServiceManager(org.apache.hadoop.hdds.scm.ha.SCMServiceManager) Optional(java.util.Optional) DECOMMISSIONED(org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.DECOMMISSIONED) GenericTestUtils(org.apache.ozone.test.GenericTestUtils) IntStream(java.util.stream.IntStream) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) InvalidStateTransitionException(org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException) MoveResult(org.apache.hadoop.hdds.scm.container.ReplicationManager.MoveResult) SCMContext(org.apache.hadoop.hdds.scm.ha.SCMContext) HashMap(java.util.HashMap) CompletableFuture(java.util.concurrent.CompletableFuture) ArrayList(java.util.ArrayList) LifeCycleState(org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState) MockDatanodeDetails.createDatanodeDetails(org.apache.hadoop.hdds.protocol.MockDatanodeDetails.createDatanodeDetails) MockSCMHAManager(org.apache.hadoop.hdds.scm.ha.MockSCMHAManager) NodeNotFoundException(org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException) DECOMMISSIONING(org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.DECOMMISSIONING) Before(org.junit.Before) LifeCycleEvent(org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent) HEALTHY(org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY) State(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) FileUtils(org.apache.commons.io.FileUtils) EventHandler(org.apache.hadoop.hdds.server.events.EventHandler) Test(org.junit.Test) IOException(java.io.IOException) Mockito.when(org.mockito.Mockito.when) SCMEvents(org.apache.hadoop.hdds.scm.events.SCMEvents) PipelineManager(org.apache.hadoop.hdds.scm.pipeline.PipelineManager) File(java.io.File) ExecutionException(java.util.concurrent.ExecutionException) TimeUnit(java.util.concurrent.TimeUnit) IN_MAINTENANCE(org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.IN_MAINTENANCE) Mockito(org.mockito.Mockito) SCMDBDefinition(org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition) CLOSED(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State.CLOSED) SCMDBTransactionBufferImpl(org.apache.hadoop.hdds.scm.metadata.SCMDBTransactionBufferImpl) PipelineID(org.apache.hadoop.hdds.scm.pipeline.PipelineID) Assert(org.junit.Assert) TestClock(org.apache.ozone.test.TestClock) PipelineManager(org.apache.hadoop.hdds.scm.pipeline.PipelineManager) ArrayList(java.util.ArrayList) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) ContainerPlacementStatusDefault(org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementStatusDefault) EventQueue(org.apache.hadoop.hdds.server.events.EventQueue) TestClock(org.apache.ozone.test.TestClock) SCMDBDefinition(org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition) PlacementPolicy(org.apache.hadoop.hdds.scm.PlacementPolicy) SCMServiceManager(org.apache.hadoop.hdds.scm.ha.SCMServiceManager) ReplicationManagerConfiguration(org.apache.hadoop.hdds.scm.container.ReplicationManager.ReplicationManagerConfiguration) PipelineID(org.apache.hadoop.hdds.scm.pipeline.PipelineID) Before(org.junit.Before)

Example 4 with DBStore

use of org.apache.hadoop.hdds.utils.db.DBStore in project ozone by apache.

the class OmMetadataManagerImpl method loadDB.

public static DBStore loadDB(OzoneConfiguration configuration, File metaDir, String dbName) throws IOException {
    RocksDBConfiguration rocksDBConfiguration = configuration.getObject(RocksDBConfiguration.class);
    DBStoreBuilder dbStoreBuilder = DBStoreBuilder.newBuilder(configuration, rocksDBConfiguration).setName(dbName).setPath(Paths.get(metaDir.getPath()));
    DBStore dbStore = addOMTablesAndCodecs(dbStoreBuilder).build();
    return dbStore;
}
Also used : DBStoreBuilder(org.apache.hadoop.hdds.utils.db.DBStoreBuilder) RocksDBConfiguration(org.apache.hadoop.hdds.utils.db.RocksDBConfiguration) DBStore(org.apache.hadoop.hdds.utils.db.DBStore)

Example 5 with DBStore

use of org.apache.hadoop.hdds.utils.db.DBStore in project ozone by apache.

the class ReconOmMetadataManagerImpl method initializeNewRdbStore.

/**
 * Replace existing DB instance with new one.
 *
 * @param dbFile new DB file location.
 */
private void initializeNewRdbStore(File dbFile) throws IOException {
    try {
        DBStoreBuilder dbStoreBuilder = DBStoreBuilder.newBuilder(ozoneConfiguration).setName(dbFile.getName()).setPath(dbFile.toPath().getParent());
        addOMTablesAndCodecs(dbStoreBuilder);
        DBStore newStore = dbStoreBuilder.build();
        setStore(newStore);
        LOG.info("Created OM DB handle from snapshot at {}.", dbFile.getAbsolutePath());
    } catch (IOException ioEx) {
        LOG.error("Unable to initialize Recon OM DB snapshot store.", ioEx);
    }
    if (getStore() != null) {
        initializeOmTables();
        omTablesInitialized = true;
    }
}
Also used : DBStoreBuilder(org.apache.hadoop.hdds.utils.db.DBStoreBuilder) IOException(java.io.IOException) DBStore(org.apache.hadoop.hdds.utils.db.DBStore) RDBStore(org.apache.hadoop.hdds.utils.db.RDBStore)

Aggregations

DBStore (org.apache.hadoop.hdds.utils.db.DBStore)8 File (java.io.File)4 IOException (java.io.IOException)4 Test (org.junit.Test)3 ArrayList (java.util.ArrayList)2 UUID (java.util.UUID)2 OzoneConfiguration (org.apache.hadoop.hdds.conf.OzoneConfiguration)2 DatanodeDetails (org.apache.hadoop.hdds.protocol.DatanodeDetails)2 SCMDBDefinition (org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition)2 PipelineID (org.apache.hadoop.hdds.scm.pipeline.PipelineID)2 DBStoreBuilder (org.apache.hadoop.hdds.utils.db.DBStoreBuilder)2 Longs (com.google.common.primitives.Longs)1 ProvisionException (com.google.inject.ProvisionException)1 Instant (java.time.Instant)1 ZoneId (java.time.ZoneId)1 HashMap (java.util.HashMap)1 List (java.util.List)1 Map (java.util.Map)1 Optional (java.util.Optional)1 Set (java.util.Set)1