use of org.apache.hadoop.hdds.utils.db.RocksDBConfiguration in project ozone by apache.
the class TestReconWithOzoneManagerHA method setup.
@Before
public void setup() throws Exception {
OzoneConfiguration conf = new OzoneConfiguration();
conf.set(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, Boolean.TRUE.toString());
conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, false);
// Sync to disk enabled
RocksDBConfiguration dbConf = conf.getObject(RocksDBConfiguration.class);
dbConf.setSyncOption(true);
conf.setFromObject(dbConf);
cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf).setClusterId(UUID.randomUUID().toString()).setScmId(UUID.randomUUID().toString()).setOMServiceId(OM_SERVICE_ID).setNumDatanodes(1).setNumOfOzoneManagers(3).includeRecon(true).build();
cluster.waitForClusterToBeReady();
objectStore = OzoneClientFactory.getRpcClient(OM_SERVICE_ID, conf).getObjectStore();
objectStore.createVolume(VOL_NAME);
objectStore.getVolume(VOL_NAME).createBucket(VOL_NAME);
}
use of org.apache.hadoop.hdds.utils.db.RocksDBConfiguration in project ozone by apache.
the class GeneratorOm method call.
@Override
public Void call() throws Exception {
init();
setThreadNo(1);
config = createOzoneConfiguration();
File metaDir = OMStorage.getOmDbDir(config);
RocksDBConfiguration rocksDBConfiguration = config.getObject(RocksDBConfiguration.class);
DBStoreBuilder dbStoreBuilder = DBStoreBuilder.newBuilder(config, rocksDBConfiguration).setName(OM_DB_NAME).setPath(metaDir.toPath());
OmMetadataManagerImpl.addOMTablesAndCodecs(dbStoreBuilder);
omDb = dbStoreBuilder.build();
// initialization: create one bucket and volume in OM.
writeOmBucketVolume();
omKeyTable = omDb.getTable(OmMetadataManagerImpl.KEY_TABLE, String.class, OmKeyInfo.class);
timer = getMetrics().timer("om-generator");
runTests(this::writeOmKeys);
omDb.close();
return null;
}
use of org.apache.hadoop.hdds.utils.db.RocksDBConfiguration in project ozone by apache.
the class OmMetadataManagerImpl method loadDB.
public static DBStore loadDB(OzoneConfiguration configuration, File metaDir, String dbName) throws IOException {
RocksDBConfiguration rocksDBConfiguration = configuration.getObject(RocksDBConfiguration.class);
DBStoreBuilder dbStoreBuilder = DBStoreBuilder.newBuilder(configuration, rocksDBConfiguration).setName(dbName).setPath(Paths.get(metaDir.getPath()));
DBStore dbStore = addOMTablesAndCodecs(dbStoreBuilder).build();
return dbStore;
}
use of org.apache.hadoop.hdds.utils.db.RocksDBConfiguration in project ozone by apache.
the class OmMetadataManagerImpl method start.
/**
* Start metadata manager.
*/
@Override
public void start(OzoneConfiguration configuration) throws IOException {
// db, so we need to create the store object and initialize DB.
if (store == null) {
File metaDir = OMStorage.getOmDbDir(configuration);
// Check if there is a DB Inconsistent Marker in the metaDir. This
// marker indicates that the DB is in an inconsistent state and hence
// the SCM process should be terminated.
File markerFile = new File(metaDir, DB_TRANSIENT_MARKER);
if (markerFile.exists()) {
LOG.error("File {} marks that OM DB is in an inconsistent state.", markerFile);
// Note - The marker file should be deleted only after fixing the DB.
// In an HA setup, this can be done by replacing this DB with a
// checkpoint from another OM.
String errorMsg = "Cannot load OM DB as it is in an inconsistent " + "state.";
ExitUtils.terminate(1, errorMsg, LOG);
}
RocksDBConfiguration rocksDBConfiguration = configuration.getObject(RocksDBConfiguration.class);
// When ratis is not enabled override and set the sync.
if (!isRatisEnabled) {
rocksDBConfiguration.setSyncOption(true);
}
this.store = loadDB(configuration, metaDir);
initializeOmTables();
}
}
use of org.apache.hadoop.hdds.utils.db.RocksDBConfiguration in project ozone by apache.
the class HAUtils method loadDB.
public static DBStore loadDB(OzoneConfiguration configuration, File metaDir, String dbName, DBDefinition definition) throws IOException {
RocksDBConfiguration rocksDBConfiguration = configuration.getObject(RocksDBConfiguration.class);
DBStoreBuilder dbStoreBuilder = DBStoreBuilder.newBuilder(configuration, rocksDBConfiguration).setName(dbName).setPath(Paths.get(metaDir.getPath()));
// Add column family names and codecs.
for (DBColumnFamilyDefinition columnFamily : definition.getColumnFamilies()) {
dbStoreBuilder.addTable(columnFamily.getName());
dbStoreBuilder.addCodec(columnFamily.getKeyType(), columnFamily.getKeyCodec());
dbStoreBuilder.addCodec(columnFamily.getValueType(), columnFamily.getValueCodec());
}
return dbStoreBuilder.build();
}
Aggregations