use of org.apache.hadoop.hdds.utils.db.DBStore in project ozone by apache.
the class TestSCMInstallSnapshot method testInstallCheckPoint.
@Test
public void testInstallCheckPoint() throws Exception {
DBCheckpoint checkpoint = downloadSnapshot();
StorageContainerManager scm = cluster.getStorageContainerManager();
DBStore db = HAUtils.loadDB(conf, checkpoint.getCheckpointLocation().getParent().toFile(), checkpoint.getCheckpointLocation().getFileName().toString(), new SCMDBDefinition());
// Hack the transaction index in the checkpoint so as to ensure the
// checkpointed transaction index is higher than when it was downloaded
// from.
Assert.assertNotNull(db);
HAUtils.getTransactionInfoTable(db, new SCMDBDefinition()).put(OzoneConsts.TRANSACTION_INFO_KEY, TransactionInfo.builder().setCurrentTerm(10).setTransactionIndex(100).build());
db.close();
ContainerID cid = scm.getContainerManager().getContainers().get(0).containerID();
PipelineID pipelineID = scm.getPipelineManager().getPipelines().get(0).getId();
scm.getScmMetadataStore().getPipelineTable().delete(pipelineID);
scm.getContainerManager().deleteContainer(cid);
Assert.assertNull(scm.getScmMetadataStore().getPipelineTable().get(pipelineID));
Assert.assertFalse(scm.getContainerManager().containerExist(cid));
SCMStateMachine sm = scm.getScmHAManager().getRatisServer().getSCMStateMachine();
sm.pause();
sm.setInstallingDBCheckpoint(checkpoint);
sm.reinitialize();
Assert.assertNotNull(scm.getScmMetadataStore().getPipelineTable().get(pipelineID));
Assert.assertNotNull(scm.getScmMetadataStore().getContainerTable().get(cid));
Assert.assertTrue(scm.getPipelineManager().containsPipeline(pipelineID));
Assert.assertTrue(scm.getContainerManager().containerExist(cid));
Assert.assertEquals(100, scm.getScmMetadataStore().getTransactionInfoTable().get(OzoneConsts.TRANSACTION_INFO_KEY).getTransactionIndex());
Assert.assertEquals(100, scm.getScmHAManager().asSCMHADBTransactionBuffer().getLatestTrxInfo().getTermIndex().getIndex());
}
use of org.apache.hadoop.hdds.utils.db.DBStore in project ozone by apache.
the class HAUtils method getTransactionInfoFromDB.
/**
* Obtain Transaction info from DB.
* @param tempConfig
* @param dbDir path to DB
* @return TransactionInfo
* @throws Exception
*/
private static TransactionInfo getTransactionInfoFromDB(OzoneConfiguration tempConfig, Path dbDir, String dbName, DBDefinition definition) throws Exception {
DBStore dbStore = loadDB(tempConfig, dbDir.toFile(), dbName, definition);
// Get the table name with TransactionInfo as the value. The transaction
// info table name are different in SCM and SCM.
// In case, a new table gets added where the value is TransactionInfo, this
// logic may not work.
Table<String, TransactionInfo> transactionInfoTable = getTransactionInfoTable(dbStore, definition);
TransactionInfo transactionInfo = transactionInfoTable.get(TRANSACTION_INFO_KEY);
dbStore.close();
if (transactionInfo == null) {
throw new IOException("Failed to read TransactionInfo from DB " + definition.getName() + " at " + dbDir);
}
return transactionInfo;
}
use of org.apache.hadoop.hdds.utils.db.DBStore in project ozone by apache.
the class TestReplicationManager method setup.
@Before
public void setup() throws IOException, InterruptedException, NodeNotFoundException, InvalidStateTransitionException {
OzoneConfiguration conf = new OzoneConfiguration();
conf.setTimeDuration(HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT, 0, TimeUnit.SECONDS);
scmLogs = GenericTestUtils.LogCapturer.captureLogs(ReplicationManager.LOG);
containerManager = Mockito.mock(ContainerManager.class);
nodeManager = new SimpleMockNodeManager();
eventQueue = new EventQueue();
scmhaManager = MockSCMHAManager.getInstance(true);
testDir = GenericTestUtils.getTestDir(TestContainerManagerImpl.class.getSimpleName() + UUID.randomUUID());
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath());
dbStore = DBStoreBuilder.createDBStore(conf, new SCMDBDefinition());
pipelineManager = Mockito.mock(PipelineManager.class);
when(pipelineManager.containsPipeline(Mockito.any(PipelineID.class))).thenReturn(true);
containerStateManager = ContainerStateManagerImpl.newBuilder().setConfiguration(conf).setPipelineManager(pipelineManager).setRatisServer(scmhaManager.getRatisServer()).setContainerStore(SCMDBDefinition.CONTAINERS.getTable(dbStore)).setSCMDBTransactionBuffer(scmhaManager.getDBTransactionBuffer()).build();
serviceManager = new SCMServiceManager();
datanodeCommandHandler = new DatanodeCommandHandler();
eventQueue.addHandler(SCMEvents.DATANODE_COMMAND, datanodeCommandHandler);
Mockito.when(containerManager.getContainers()).thenAnswer(invocation -> {
Set<ContainerID> ids = containerStateManager.getContainerIDs();
List<ContainerInfo> containers = new ArrayList<>();
for (ContainerID id : ids) {
containers.add(containerStateManager.getContainer(id));
}
return containers;
});
Mockito.when(containerManager.getContainer(Mockito.any(ContainerID.class))).thenAnswer(invocation -> containerStateManager.getContainer(((ContainerID) invocation.getArguments()[0])));
Mockito.when(containerManager.getContainerReplicas(Mockito.any(ContainerID.class))).thenAnswer(invocation -> containerStateManager.getContainerReplicas(((ContainerID) invocation.getArguments()[0])));
containerPlacementPolicy = Mockito.mock(PlacementPolicy.class);
Mockito.when(containerPlacementPolicy.chooseDatanodes(Mockito.any(), Mockito.any(), Mockito.anyInt(), Mockito.anyLong(), Mockito.anyLong())).thenAnswer(invocation -> {
int count = (int) invocation.getArguments()[2];
return IntStream.range(0, count).mapToObj(i -> randomDatanodeDetails()).collect(Collectors.toList());
});
Mockito.when(containerPlacementPolicy.validateContainerPlacement(Mockito.any(), Mockito.anyInt())).thenAnswer(invocation -> new ContainerPlacementStatusDefault(2, 2, 3));
clock = new TestClock(Instant.now(), ZoneId.of("UTC"));
createReplicationManager(new ReplicationManagerConfiguration());
}
use of org.apache.hadoop.hdds.utils.db.DBStore in project ozone by apache.
the class OmMetadataManagerImpl method loadDB.
public static DBStore loadDB(OzoneConfiguration configuration, File metaDir, String dbName) throws IOException {
RocksDBConfiguration rocksDBConfiguration = configuration.getObject(RocksDBConfiguration.class);
DBStoreBuilder dbStoreBuilder = DBStoreBuilder.newBuilder(configuration, rocksDBConfiguration).setName(dbName).setPath(Paths.get(metaDir.getPath()));
DBStore dbStore = addOMTablesAndCodecs(dbStoreBuilder).build();
return dbStore;
}
use of org.apache.hadoop.hdds.utils.db.DBStore in project ozone by apache.
the class ReconOmMetadataManagerImpl method initializeNewRdbStore.
/**
* Replace existing DB instance with new one.
*
* @param dbFile new DB file location.
*/
private void initializeNewRdbStore(File dbFile) throws IOException {
try {
DBStoreBuilder dbStoreBuilder = DBStoreBuilder.newBuilder(ozoneConfiguration).setName(dbFile.getName()).setPath(dbFile.toPath().getParent());
addOMTablesAndCodecs(dbStoreBuilder);
DBStore newStore = dbStoreBuilder.build();
setStore(newStore);
LOG.info("Created OM DB handle from snapshot at {}.", dbFile.getAbsolutePath());
} catch (IOException ioEx) {
LOG.error("Unable to initialize Recon OM DB snapshot store.", ioEx);
}
if (getStore() != null) {
initializeOmTables();
omTablesInitialized = true;
}
}
Aggregations