use of org.apache.hadoop.hbase.wal.WALFactory in project hbase by apache.
the class TestDurability method testIncrement.
@Test
public void testIncrement() throws Exception {
byte[] row1 = Bytes.toBytes("row1");
byte[] col1 = Bytes.toBytes("col1");
byte[] col2 = Bytes.toBytes("col2");
byte[] col3 = Bytes.toBytes("col3");
// Setting up region
final WALFactory wals = new WALFactory(CONF, null, ServerName.valueOf("TestIncrement", 16010, System.currentTimeMillis()).toString());
byte[] tableName = Bytes.toBytes("TestIncrement");
final WAL wal = wals.getWAL(tableName, null);
HRegion region = createHRegion(tableName, "increment", wal, Durability.USE_DEFAULT);
// col1: amount = 0, 1 write back to WAL
Increment inc1 = new Increment(row1);
inc1.addColumn(FAMILY, col1, 0);
Result res = region.increment(inc1);
assertEquals(1, res.size());
assertEquals(0, Bytes.toLong(res.getValue(FAMILY, col1)));
verifyWALCount(wals, wal, 1);
// col1: amount = 1, 1 write back to WAL
inc1 = new Increment(row1);
inc1.addColumn(FAMILY, col1, 1);
res = region.increment(inc1);
assertEquals(1, res.size());
assertEquals(1, Bytes.toLong(res.getValue(FAMILY, col1)));
verifyWALCount(wals, wal, 2);
// col1: amount = 0, 0 write back to WAL
inc1 = new Increment(row1);
inc1.addColumn(FAMILY, col1, 0);
res = region.increment(inc1);
assertEquals(1, res.size());
assertEquals(1, Bytes.toLong(res.getValue(FAMILY, col1)));
verifyWALCount(wals, wal, 2);
// col1: amount = 0, col2: amount = 0, col3: amount = 0
// 1 write back to WAL
inc1 = new Increment(row1);
inc1.addColumn(FAMILY, col1, 0);
inc1.addColumn(FAMILY, col2, 0);
inc1.addColumn(FAMILY, col3, 0);
res = region.increment(inc1);
assertEquals(3, res.size());
assertEquals(1, Bytes.toLong(res.getValue(FAMILY, col1)));
assertEquals(0, Bytes.toLong(res.getValue(FAMILY, col2)));
assertEquals(0, Bytes.toLong(res.getValue(FAMILY, col3)));
verifyWALCount(wals, wal, 3);
// col1: amount = 5, col2: amount = 4, col3: amount = 3
// 1 write back to WAL
inc1 = new Increment(row1);
inc1.addColumn(FAMILY, col1, 5);
inc1.addColumn(FAMILY, col2, 4);
inc1.addColumn(FAMILY, col3, 3);
res = region.increment(inc1);
assertEquals(3, res.size());
assertEquals(6, Bytes.toLong(res.getValue(FAMILY, col1)));
assertEquals(4, Bytes.toLong(res.getValue(FAMILY, col2)));
assertEquals(3, Bytes.toLong(res.getValue(FAMILY, col3)));
verifyWALCount(wals, wal, 4);
}
use of org.apache.hadoop.hbase.wal.WALFactory in project hbase by apache.
the class TestDurability method testDurability.
@Test
public void testDurability() throws Exception {
final WALFactory wals = new WALFactory(CONF, null, ServerName.valueOf("TestDurability", 16010, System.currentTimeMillis()).toString());
byte[] tableName = Bytes.toBytes("TestDurability");
final WAL wal = wals.getWAL(tableName, null);
HRegion region = createHRegion(tableName, "region", wal, Durability.USE_DEFAULT);
HRegion deferredRegion = createHRegion(tableName, "deferredRegion", wal, Durability.ASYNC_WAL);
region.put(newPut(null));
verifyWALCount(wals, wal, 1);
// a put through the deferred table does not write to the wal immediately,
// but maybe has been successfully sync-ed by the underlying AsyncWriter +
// AsyncFlusher thread
deferredRegion.put(newPut(null));
// but will after we sync the wal
wal.sync();
verifyWALCount(wals, wal, 2);
// a put through a deferred table will be sync with the put sync'ed put
deferredRegion.put(newPut(null));
wal.sync();
verifyWALCount(wals, wal, 3);
region.put(newPut(null));
verifyWALCount(wals, wal, 4);
// a put through a deferred table will be sync with the put sync'ed put
deferredRegion.put(newPut(Durability.USE_DEFAULT));
wal.sync();
verifyWALCount(wals, wal, 5);
region.put(newPut(Durability.USE_DEFAULT));
verifyWALCount(wals, wal, 6);
// SKIP_WAL never writes to the wal
region.put(newPut(Durability.SKIP_WAL));
deferredRegion.put(newPut(Durability.SKIP_WAL));
verifyWALCount(wals, wal, 6);
wal.sync();
verifyWALCount(wals, wal, 6);
// Async overrides sync table default
region.put(newPut(Durability.ASYNC_WAL));
deferredRegion.put(newPut(Durability.ASYNC_WAL));
wal.sync();
verifyWALCount(wals, wal, 8);
// sync overrides async table default
region.put(newPut(Durability.SYNC_WAL));
deferredRegion.put(newPut(Durability.SYNC_WAL));
verifyWALCount(wals, wal, 10);
// fsync behaves like sync
region.put(newPut(Durability.FSYNC_WAL));
deferredRegion.put(newPut(Durability.FSYNC_WAL));
verifyWALCount(wals, wal, 12);
}
use of org.apache.hadoop.hbase.wal.WALFactory in project hbase by apache.
the class TestLogRollingNoCluster method testContendedLogRolling.
/**
* Spin up a bunch of threads and have them all append to a WAL. Roll the
* WAL frequently to try and trigger NPE.
* @throws IOException
* @throws InterruptedException
*/
@Test
public void testContendedLogRolling() throws Exception {
TEST_UTIL.startMiniDFSCluster(3);
Path dir = TEST_UTIL.getDataTestDirOnTestFS();
// The implementation needs to know the 'handler' count.
TEST_UTIL.getConfiguration().setInt(HConstants.REGION_SERVER_HANDLER_COUNT, NUM_THREADS);
final Configuration conf = new Configuration(TEST_UTIL.getConfiguration());
conf.set(WALFactory.WAL_PROVIDER, "filesystem");
FSUtils.setRootDir(conf, dir);
conf.set("hbase.regionserver.hlog.writer.impl", HighLatencySyncWriter.class.getName());
final WALFactory wals = new WALFactory(conf, null, TestLogRollingNoCluster.class.getName());
final WAL wal = wals.getWAL(new byte[] {}, null);
Appender[] appenders = null;
final int numThreads = NUM_THREADS;
appenders = new Appender[numThreads];
try {
for (int i = 0; i < numThreads; i++) {
// Have each appending thread write 'count' entries
appenders[i] = new Appender(wal, i, NUM_ENTRIES);
}
for (int i = 0; i < numThreads; i++) {
appenders[i].start();
}
for (int i = 0; i < numThreads; i++) {
//ensure that all threads are joined before closing the wal
appenders[i].join();
}
} finally {
wals.close();
}
for (int i = 0; i < numThreads; i++) {
assertFalse(appenders[i].isException());
}
TEST_UTIL.shutdownMiniDFSCluster();
}
use of org.apache.hadoop.hbase.wal.WALFactory in project hbase by apache.
the class TestWALActionsListener method testActionListener.
/**
* Add a bunch of dummy data and roll the logs every two insert. We
* should end up with 10 rolled files (plus the roll called in
* the constructor). Also test adding a listener while it's running.
*/
@Test
public void testActionListener() throws Exception {
DummyWALActionsListener observer = new DummyWALActionsListener();
List<WALActionsListener> list = new ArrayList<>(1);
list.add(observer);
final WALFactory wals = new WALFactory(conf, list, "testActionListener");
DummyWALActionsListener laterobserver = new DummyWALActionsListener();
HRegionInfo hri = new HRegionInfo(TableName.valueOf(SOME_BYTES), SOME_BYTES, SOME_BYTES, false);
final WAL wal = wals.getWAL(hri.getEncodedNameAsBytes(), hri.getTable().getNamespace());
MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
for (int i = 0; i < 20; i++) {
byte[] b = Bytes.toBytes(i + "");
KeyValue kv = new KeyValue(b, b, b);
WALEdit edit = new WALEdit();
edit.add(kv);
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(SOME_BYTES));
htd.addFamily(new HColumnDescriptor(b));
NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
for (byte[] fam : htd.getFamiliesKeys()) {
scopes.put(fam, 0);
}
final long txid = wal.append(hri, new WALKey(hri.getEncodedNameAsBytes(), TableName.valueOf(b), 0, mvcc, scopes), edit, true);
wal.sync(txid);
if (i == 10) {
wal.registerWALActionsListener(laterobserver);
}
if (i % 2 == 0) {
wal.rollWriter();
}
}
wal.close();
assertEquals(11, observer.preLogRollCounter);
assertEquals(11, observer.postLogRollCounter);
assertEquals(5, laterobserver.preLogRollCounter);
assertEquals(5, laterobserver.postLogRollCounter);
assertEquals(1, observer.closedCount);
}
use of org.apache.hadoop.hbase.wal.WALFactory in project cdap by caskdata.
the class IncrementSummingScannerTest method createRegion.
static HRegion createRegion(Configuration hConf, CConfiguration cConf, TableId tableId, HColumnDescriptor cfd) throws Exception {
HBaseTableUtil tableUtil = new HBaseTableUtilFactory(cConf).get();
HTableDescriptorBuilder htd = tableUtil.buildHTableDescriptor(tableId);
cfd.setMaxVersions(Integer.MAX_VALUE);
cfd.setKeepDeletedCells(true);
htd.addFamily(cfd);
htd.addCoprocessor(IncrementHandler.class.getName());
HTableDescriptor desc = htd.build();
String tableName = desc.getNameAsString();
Path tablePath = new Path("/tmp/" + tableName);
Path hlogPath = new Path("/tmp/hlog-" + tableName);
FileSystem fs = FileSystem.get(hConf);
assertTrue(fs.mkdirs(tablePath));
WALFactory walFactory = new WALFactory(hConf, null, hlogPath.toString());
WAL hLog = walFactory.getWAL(new byte[] { 1 });
HRegionInfo regionInfo = new HRegionInfo(desc.getTableName());
HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(hConf, fs, tablePath, regionInfo);
return new HRegion(regionFS, hLog, hConf, desc, new LocalRegionServerServices(hConf, ServerName.valueOf(InetAddress.getLocalHost().getHostName(), 0, System.currentTimeMillis())));
}
Aggregations