use of org.apache.hadoop.hbase.wal.WALFactory in project hbase by apache.
the class TestWALObserver method setUp.
@Before
public void setUp() throws Exception {
this.conf = HBaseConfiguration.create(TEST_UTIL.getConfiguration());
// this.cluster = TEST_UTIL.getDFSCluster();
this.fs = TEST_UTIL.getDFSCluster().getFileSystem();
this.hbaseRootDir = FSUtils.getRootDir(conf);
this.hbaseWALRootDir = FSUtils.getWALRootDir(conf);
this.oldLogDir = new Path(this.hbaseWALRootDir, HConstants.HREGION_OLDLOGDIR_NAME);
String serverName = ServerName.valueOf(currentTest.getMethodName(), 16010, System.currentTimeMillis()).toString();
this.logDir = new Path(this.hbaseWALRootDir, AbstractFSWALProvider.getWALDirectoryName(serverName));
if (TEST_UTIL.getDFSCluster().getFileSystem().exists(this.hbaseRootDir)) {
TEST_UTIL.getDFSCluster().getFileSystem().delete(this.hbaseRootDir, true);
}
if (TEST_UTIL.getDFSCluster().getFileSystem().exists(this.hbaseWALRootDir)) {
TEST_UTIL.getDFSCluster().getFileSystem().delete(this.hbaseWALRootDir, true);
}
this.wals = new WALFactory(conf, null, serverName);
}
use of org.apache.hadoop.hbase.wal.WALFactory in project hbase by apache.
the class TestCompactionArchiveConcurrentClose method initHRegion.
private Region initHRegion(HTableDescriptor htd, HRegionInfo info) throws IOException {
Configuration conf = testUtil.getConfiguration();
Path tableDir = FSUtils.getTableDir(testDir, htd.getTableName());
HRegionFileSystem fs = new WaitingHRegionFileSystem(conf, tableDir.getFileSystem(conf), tableDir, info);
final Configuration walConf = new Configuration(conf);
FSUtils.setRootDir(walConf, tableDir);
final WALFactory wals = new WALFactory(walConf, null, "log_" + info.getEncodedName());
HRegion region = new HRegion(fs, wals.getWAL(info.getEncodedNameAsBytes(), info.getTable().getNamespace()), conf, htd, null);
region.initialize();
return region;
}
use of org.apache.hadoop.hbase.wal.WALFactory in project hbase by apache.
the class TestCompactionArchiveIOException method initHRegion.
private HRegion initHRegion(HTableDescriptor htd, HRegionInfo info) throws IOException {
Configuration conf = testUtil.getConfiguration();
Path tableDir = FSUtils.getTableDir(testDir, htd.getTableName());
Path regionDir = new Path(tableDir, info.getEncodedName());
Path storeDir = new Path(regionDir, htd.getColumnFamilies()[0].getNameAsString());
FileSystem errFS = spy(testUtil.getTestFileSystem());
// Prior to HBASE-16964, when an exception is thrown archiving any compacted file,
// none of the other files are cleared from the compactedfiles list.
// Simulate this condition with a dummy file
doThrow(new IOException("Error for test")).when(errFS).rename(eq(new Path(storeDir, ERROR_FILE)), any(Path.class));
HRegionFileSystem fs = new HRegionFileSystem(conf, errFS, tableDir, info);
final Configuration walConf = new Configuration(conf);
FSUtils.setRootDir(walConf, tableDir);
final WALFactory wals = new WALFactory(walConf, null, "log_" + info.getEncodedName());
HRegion region = new HRegion(fs, wals.getWAL(info.getEncodedNameAsBytes(), info.getTable().getNamespace()), conf, htd, null);
region.initialize();
return region;
}
use of org.apache.hadoop.hbase.wal.WALFactory in project hbase by apache.
the class TestDefaultMemStore method testShouldFlushMeta.
@Test
public void testShouldFlushMeta() throws Exception {
// write an edit in the META and ensure the shouldFlush (that the periodic memstore
// flusher invokes) returns true after SYSTEM_CACHE_FLUSH_INTERVAL (even though
// the MEMSTORE_PERIODIC_FLUSH_INTERVAL is set to a higher value)
Configuration conf = new Configuration();
conf.setInt(HRegion.MEMSTORE_PERIODIC_FLUSH_INTERVAL, HRegion.SYSTEM_CACHE_FLUSH_INTERVAL * 10);
HBaseTestingUtility hbaseUtility = HBaseTestingUtility.createLocalHTU(conf);
Path testDir = hbaseUtility.getDataTestDir();
EnvironmentEdgeForMemstoreTest edge = new EnvironmentEdgeForMemstoreTest();
EnvironmentEdgeManager.injectEdge(edge);
edge.setCurrentTimeMillis(1234);
WALFactory wFactory = new WALFactory(conf, null, "1234");
HRegion meta = HRegion.createHRegion(HRegionInfo.FIRST_META_REGIONINFO, testDir, conf, FSTableDescriptors.createMetaTableDescriptor(conf), wFactory.getMetaWAL(HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes()));
HRegionInfo hri = new HRegionInfo(TableName.valueOf(name.getMethodName()), Bytes.toBytes("row_0200"), Bytes.toBytes("row_0300"));
HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(name.getMethodName()));
desc.addFamily(new HColumnDescriptor("foo".getBytes()));
HRegion r = HRegion.createHRegion(hri, testDir, conf, desc, wFactory.getWAL(hri.getEncodedNameAsBytes(), hri.getTable().getNamespace()));
HRegion.addRegionToMETA(meta, r);
edge.setCurrentTimeMillis(1234 + 100);
StringBuffer sb = new StringBuffer();
assertTrue(meta.shouldFlush(sb) == false);
edge.setCurrentTimeMillis(edge.currentTime() + HRegion.SYSTEM_CACHE_FLUSH_INTERVAL + 1);
assertTrue(meta.shouldFlush(sb) == true);
}
use of org.apache.hadoop.hbase.wal.WALFactory in project hbase by apache.
the class TestHRegion method durabilityTest.
private void durabilityTest(String method, Durability tableDurability, Durability mutationDurability, long timeout, boolean expectAppend, final boolean expectSync, final boolean expectSyncFromLogSyncer) throws Exception {
Configuration conf = HBaseConfiguration.create(CONF);
method = method + "_" + tableDurability.name() + "_" + mutationDurability.name();
byte[] family = Bytes.toBytes("family");
Path logDir = new Path(new Path(dir + method), "log");
final Configuration walConf = new Configuration(conf);
FSUtils.setRootDir(walConf, logDir);
// XXX: The spied AsyncFSWAL can not work properly because of a Mockito defect that can not
// deal with classes which have a field of an inner class. See discussions in HBASE-15536.
walConf.set(WALFactory.WAL_PROVIDER, "filesystem");
final WALFactory wals = new WALFactory(walConf, null, UUID.randomUUID().toString());
final WAL wal = spy(wals.getWAL(tableName.getName(), tableName.getNamespace()));
this.region = initHRegion(tableName, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, false, tableDurability, wal, new byte[][] { family });
Put put = new Put(Bytes.toBytes("r1"));
put.addColumn(family, Bytes.toBytes("q1"), Bytes.toBytes("v1"));
put.setDurability(mutationDurability);
region.put(put);
//verify append called or not
verify(wal, expectAppend ? times(1) : never()).append((HRegionInfo) any(), (WALKey) any(), (WALEdit) any(), Mockito.anyBoolean());
// verify sync called or not
if (expectSync || expectSyncFromLogSyncer) {
TEST_UTIL.waitFor(timeout, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
try {
if (expectSync) {
// Hregion calls this one
verify(wal, times(1)).sync(anyLong());
} else if (expectSyncFromLogSyncer) {
// wal syncer calls this one
verify(wal, times(1)).sync();
}
} catch (Throwable ignore) {
}
return true;
}
});
} else {
//verify(wal, never()).sync(anyLong());
verify(wal, never()).sync();
}
HBaseTestingUtility.closeRegionAndWAL(this.region);
wals.close();
this.region = null;
}
Aggregations