use of org.apache.hadoop.hbase.wal.WALFactory in project hbase by apache.
the class Replication method initialize.
@Override
public void initialize(Server server, FileSystem fs, Path logDir, Path oldLogDir, WALFactory walFactory) throws IOException {
this.server = server;
this.conf = this.server.getConfiguration();
this.isReplicationForBulkLoadDataEnabled = ReplicationUtils.isReplicationForBulkLoadDataEnabled(this.conf);
if (this.isReplicationForBulkLoadDataEnabled) {
if (conf.get(HConstants.REPLICATION_CLUSTER_ID) == null || conf.get(HConstants.REPLICATION_CLUSTER_ID).isEmpty()) {
throw new IllegalArgumentException(HConstants.REPLICATION_CLUSTER_ID + " cannot be null/empty when " + HConstants.REPLICATION_BULKLOAD_ENABLE_KEY + " is set to true.");
}
}
try {
this.queueStorage = ReplicationStorageFactory.getReplicationQueueStorage(server.getZooKeeper(), conf);
this.replicationPeers = ReplicationFactory.getReplicationPeers(server.getZooKeeper(), this.conf);
this.replicationPeers.init();
} catch (Exception e) {
throw new IOException("Failed replication handler create", e);
}
UUID clusterId = null;
try {
clusterId = ZKClusterId.getUUIDForCluster(this.server.getZooKeeper());
} catch (KeeperException ke) {
throw new IOException("Could not read cluster id", ke);
}
SyncReplicationPeerMappingManager mapping = new SyncReplicationPeerMappingManager();
this.globalMetricsSource = CompatibilitySingletonFactory.getInstance(MetricsReplicationSourceFactory.class).getGlobalSource();
this.replicationManager = new ReplicationSourceManager(queueStorage, replicationPeers, conf, this.server, fs, logDir, oldLogDir, clusterId, walFactory, mapping, globalMetricsSource);
this.syncReplicationPeerInfoProvider = new SyncReplicationPeerInfoProviderImpl(replicationPeers, mapping);
PeerActionListener peerActionListener = PeerActionListener.DUMMY;
// Get the user-space WAL provider
WALProvider walProvider = walFactory != null ? walFactory.getWALProvider() : null;
if (walProvider != null) {
walProvider.addWALActionsListener(new ReplicationSourceWALActionListener(conf, replicationManager));
if (walProvider instanceof SyncReplicationWALProvider) {
SyncReplicationWALProvider syncWALProvider = (SyncReplicationWALProvider) walProvider;
peerActionListener = syncWALProvider;
syncWALProvider.setPeerInfoProvider(syncReplicationPeerInfoProvider);
// for sync replication state change, we need to reload the state twice, you can see the
// code in PeerProcedureHandlerImpl, so here we need to go over the sync replication peers
// to see if any of them are in the middle of the two refreshes, if so, we need to manually
// repeat the action we have done in the first refresh, otherwise when the second refresh
// comes we will be in trouble, such as NPE.
replicationPeers.getAllPeerIds().stream().map(replicationPeers::getPeer).filter(p -> p.getPeerConfig().isSyncReplication()).filter(p -> p.getNewSyncReplicationState() != SyncReplicationState.NONE).forEach(p -> syncWALProvider.peerSyncReplicationStateChange(p.getId(), p.getSyncReplicationState(), p.getNewSyncReplicationState(), 0));
}
}
this.statsPeriodInSecond = this.conf.getInt("replication.stats.thread.period.seconds", 5 * 60);
this.replicationLoad = new ReplicationLoad();
this.peerProcedureHandler = new PeerProcedureHandlerImpl(replicationManager, peerActionListener);
}
use of org.apache.hadoop.hbase.wal.WALFactory in project hbase by apache.
the class TestWALObserver method testWALCoprocessorReplay.
/**
* Test WAL replay behavior with WALObserver.
*/
@Test
public void testWALCoprocessorReplay() throws Exception {
// WAL replay is handled at HRegion::replayRecoveredEdits(), which is
// ultimately called by HRegion::initialize()
TableName tableName = TableName.valueOf(currentTest.getMethodName());
TableDescriptor htd = getBasic3FamilyHTableDescriptor(tableName);
MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
// final HRegionInfo hri =
// createBasic3FamilyHRegionInfo(Bytes.toString(tableName));
// final HRegionInfo hri1 =
// createBasic3FamilyHRegionInfo(Bytes.toString(tableName));
RegionInfo hri = RegionInfoBuilder.newBuilder(tableName).build();
final Path basedir = CommonFSUtils.getTableDir(this.hbaseRootDir, tableName);
deleteDir(basedir);
fs.mkdirs(new Path(basedir, hri.getEncodedName()));
final Configuration newConf = HBaseConfiguration.create(this.conf);
// WAL wal = new WAL(this.fs, this.dir, this.oldLogDir, this.conf);
WAL wal = wals.getWAL(null);
// Put p = creatPutWith2Families(TEST_ROW);
WALEdit edit = new WALEdit();
long now = EnvironmentEdgeManager.currentTime();
final int countPerFamily = 1000;
NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
for (byte[] fam : htd.getColumnFamilyNames()) {
scopes.put(fam, 0);
}
for (byte[] fam : htd.getColumnFamilyNames()) {
addWALEdits(tableName, hri, TEST_ROW, fam, countPerFamily, EnvironmentEdgeManager.getDelegate(), wal, scopes, mvcc);
}
wal.appendData(hri, new WALKeyImpl(hri.getEncodedNameAsBytes(), tableName, now, mvcc, scopes), edit);
// sync to fs.
wal.sync();
User user = HBaseTestingUtil.getDifferentUser(newConf, ".replay.wal.secondtime");
user.runAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
Path p = runWALSplit(newConf);
LOG.info("WALSplit path == " + p);
// Make a new wal for new region open.
final WALFactory wals2 = new WALFactory(conf, ServerName.valueOf(currentTest.getMethodName() + "2", 16010, EnvironmentEdgeManager.currentTime()).toString());
WAL wal2 = wals2.getWAL(null);
HRegion region = HRegion.openHRegion(newConf, FileSystem.get(newConf), hbaseRootDir, hri, htd, wal2, TEST_UTIL.getHBaseCluster().getRegionServer(0), null);
SampleRegionWALCoprocessor cp2 = region.getCoprocessorHost().findCoprocessor(SampleRegionWALCoprocessor.class);
// TODO: asserting here is problematic.
assertNotNull(cp2);
assertTrue(cp2.isPreWALRestoreCalled());
assertTrue(cp2.isPostWALRestoreCalled());
region.close();
wals2.close();
return null;
}
});
}
use of org.apache.hadoop.hbase.wal.WALFactory in project hbase by apache.
the class MasterRegion method create.
public static MasterRegion create(MasterRegionParams params) throws IOException {
TableDescriptor td = params.tableDescriptor();
LOG.info("Create or load local region for table " + td);
Server server = params.server();
Configuration baseConf = server.getConfiguration();
FileSystem fs = CommonFSUtils.getRootDirFileSystem(baseConf);
FileSystem walFs = CommonFSUtils.getWALFileSystem(baseConf);
Path globalRootDir = CommonFSUtils.getRootDir(baseConf);
Path globalWALRootDir = CommonFSUtils.getWALRootDir(baseConf);
Path rootDir = new Path(globalRootDir, params.regionDirName());
Path walRootDir = new Path(globalWALRootDir, params.regionDirName());
// we will override some configurations so create a new one.
Configuration conf = new Configuration(baseConf);
CommonFSUtils.setRootDir(conf, rootDir);
CommonFSUtils.setWALRootDir(conf, walRootDir);
MasterRegionFlusherAndCompactor.setupConf(conf, params.flushSize(), params.flushPerChanges(), params.flushIntervalMs());
conf.setInt(AbstractFSWAL.MAX_LOGS, params.maxWals());
if (params.useHsync() != null) {
conf.setBoolean(HRegion.WAL_HSYNC_CONF_KEY, params.useHsync());
}
if (params.useMetaCellComparator() != null) {
conf.setBoolean(HRegion.USE_META_CELL_COMPARATOR, params.useMetaCellComparator());
}
conf.setInt(AbstractFSWAL.RING_BUFFER_SLOT_COUNT, IntMath.ceilingPowerOfTwo(params.ringBufferSlotCount()));
MasterRegionWALRoller walRoller = MasterRegionWALRoller.create(td.getTableName() + "-WAL-Roller", conf, server, walFs, walRootDir, globalWALRootDir, params.archivedWalSuffix(), params.rollPeriodMs(), params.flushSize());
walRoller.start();
WALFactory walFactory = new WALFactory(conf, server.getServerName().toString(), server, false);
Path tableDir = CommonFSUtils.getTableDir(rootDir, td.getTableName());
Path initializingFlag = new Path(tableDir, INITIALIZING_FLAG);
Path initializedFlag = new Path(tableDir, INITIALIZED_FLAG);
HRegion region;
if (!fs.exists(tableDir)) {
// bootstrap, no doubt
if (!fs.mkdirs(initializedFlag)) {
throw new IOException("Can not touch initialized flag");
}
region = bootstrap(conf, td, fs, rootDir, walFs, walRootDir, walFactory, walRoller, server.getServerName().toString(), true);
} else {
if (!fs.exists(initializedFlag)) {
if (!fs.exists(initializingFlag)) {
// should be old style, where we do not have the initializing or initialized file, persist
// the table descriptor, touch the initialized flag and then open the region.
// the store file tracker must be DEFAULT
LOG.info("No {} or {} file, try upgrading", INITIALIZING_FLAG, INITIALIZED_FLAG);
TableDescriptor oldTd = TableDescriptorBuilder.newBuilder(td).setValue(StoreFileTrackerFactory.TRACKER_IMPL, StoreFileTrackerFactory.Trackers.DEFAULT.name()).build();
FSTableDescriptors.createTableDescriptorForTableDirectory(fs, tableDir, oldTd, true);
if (!fs.mkdirs(initializedFlag)) {
throw new IOException("Can not touch initialized flag: " + initializedFlag);
}
RegionInfo regionInfo = loadRegionInfo(fs, tableDir);
tryMigrate(conf, fs, tableDir, regionInfo, oldTd, td);
region = open(conf, td, regionInfo, fs, rootDir, walFs, walRootDir, walFactory, walRoller, server.getServerName().toString());
} else {
// exists(unless someone delete it manually...), so we do not do null check here.
for (FileStatus status : fs.listStatus(tableDir)) {
if (!status.getPath().getName().equals(INITIALIZING_FLAG)) {
fs.delete(status.getPath(), true);
}
}
region = bootstrap(conf, td, fs, rootDir, walFs, walRootDir, walFactory, walRoller, server.getServerName().toString(), false);
}
} else {
if (fs.exists(initializingFlag) && !fs.delete(initializingFlag, true)) {
LOG.warn("failed to clean up initializing flag: " + initializingFlag);
}
// open it, make sure to load the table descriptor from fs
TableDescriptor oldTd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
RegionInfo regionInfo = loadRegionInfo(fs, tableDir);
tryMigrate(conf, fs, tableDir, regionInfo, oldTd, td);
region = open(conf, td, regionInfo, fs, rootDir, walFs, walRootDir, walFactory, walRoller, server.getServerName().toString());
}
}
Path globalArchiveDir = HFileArchiveUtil.getArchivePath(baseConf);
MasterRegionFlusherAndCompactor flusherAndCompactor = new MasterRegionFlusherAndCompactor(conf, server, region, params.flushSize(), params.flushPerChanges(), params.flushIntervalMs(), params.compactMin(), globalArchiveDir, params.archivedHFileSuffix());
walRoller.setFlusherAndCompactor(flusherAndCompactor);
Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
if (!fs.mkdirs(archiveDir)) {
LOG.warn("Failed to create archive directory {}. Usually this should not happen but it will" + " be created again when we actually archive the hfiles later, so continue", archiveDir);
}
return new MasterRegion(region, walFactory, flusherAndCompactor, walRoller);
}
use of org.apache.hadoop.hbase.wal.WALFactory in project hbase by apache.
the class TestWALRecordReader method testWALRecordReaderActiveArchiveTolerance.
/**
* Test WALRecordReader tolerance to moving WAL from active
* to archive directory
* @throws Exception exception
*/
@Test
public void testWALRecordReaderActiveArchiveTolerance() throws Exception {
final WALFactory walfactory = new WALFactory(conf, getName());
WAL log = walfactory.getWAL(info);
byte[] value = Bytes.toBytes("value");
WALEdit edit = new WALEdit();
edit.add(new KeyValue(rowName, family, Bytes.toBytes("1"), EnvironmentEdgeManager.currentTime(), value));
long txid = log.appendData(info, getWalKeyImpl(EnvironmentEdgeManager.currentTime(), scopes), edit);
log.sync(txid);
// make sure 2nd edit gets a later timestamp
Thread.sleep(10);
edit = new WALEdit();
edit.add(new KeyValue(rowName, family, Bytes.toBytes("2"), EnvironmentEdgeManager.currentTime(), value));
txid = log.appendData(info, getWalKeyImpl(EnvironmentEdgeManager.currentTime(), scopes), edit);
log.sync(txid);
log.shutdown();
// should have 2 log entries now
WALInputFormat input = new WALInputFormat();
Configuration jobConf = new Configuration(conf);
jobConf.set("mapreduce.input.fileinputformat.inputdir", logDir.toString());
// make sure log is found
List<InputSplit> splits = input.getSplits(MapreduceTestingShim.createJobContext(jobConf));
assertEquals(1, splits.size());
WALInputFormat.WALSplit split = (WALInputFormat.WALSplit) splits.get(0);
LOG.debug("log=" + logDir + " file=" + split.getLogFileName());
testSplitWithMovingWAL(splits.get(0), Bytes.toBytes("1"), Bytes.toBytes("2"));
}
use of org.apache.hadoop.hbase.wal.WALFactory in project hbase by apache.
the class TestCacheOnWriteInSchema method setUp.
@Before
public void setUp() throws IOException {
// parameterized tests add [#] suffix get rid of [ and ].
table = Bytes.toBytes(name.getMethodName().replaceAll("[\\[\\]]", "_"));
conf = TEST_UTIL.getConfiguration();
conf.setInt(HFile.FORMAT_VERSION_KEY, HFile.MAX_FORMAT_VERSION);
conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, false);
conf.setBoolean(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY, false);
conf.setBoolean(CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY, false);
fs = HFileSystem.get(conf);
// Create the schema
ColumnFamilyDescriptor hcd = cowType.modifyFamilySchema(ColumnFamilyDescriptorBuilder.newBuilder(family).setBloomFilterType(BloomType.ROWCOL)).build();
TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(table)).setColumnFamily(hcd).build();
// Create a store based on the schema
String id = TestCacheOnWriteInSchema.class.getName();
Path logdir = new Path(CommonFSUtils.getRootDir(conf), AbstractFSWALProvider.getWALDirectoryName(id));
fs.delete(logdir, true);
RegionInfo info = RegionInfoBuilder.newBuilder(htd.getTableName()).build();
walFactory = new WALFactory(conf, id);
region = TEST_UTIL.createLocalHRegion(info, conf, htd, walFactory.getWAL(info));
region.setBlockCache(BlockCacheFactory.createBlockCache(conf));
store = new HStore(region, hcd, conf, false);
}
Aggregations