use of org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility.TestProcedure in project hbase by apache.
the class TestWALProcedureStore method testWalReplayOrder_ABC_BAD.
@Test(timeout = 60000)
public void testWalReplayOrder_ABC_BAD() throws Exception {
/*
* | A B C | -> | B A D |
*/
TestProcedure a = new TestProcedure(1, 0);
TestProcedure b = new TestProcedure(2, 1);
TestProcedure c = new TestProcedure(3, 2);
TestProcedure d = new TestProcedure(4, 0);
procStore.insert(a, null);
a.addStackId(0);
procStore.update(a);
procStore.insert(a, new Procedure[] { b });
b.addStackId(1);
procStore.update(b);
procStore.insert(b, new Procedure[] { c });
b.addStackId(2);
procStore.update(b);
procStore.rollWriterForTesting();
b.addStackId(3);
procStore.update(b);
a.addStackId(4);
procStore.update(a);
procStore.insert(d, null);
d.addStackId(0);
procStore.update(d);
storeRestart(new ProcedureStore.ProcedureLoader() {
@Override
public void setMaxProcId(long maxProcId) {
assertEquals(4, maxProcId);
}
@Override
public void load(ProcedureIterator procIter) throws IOException {
assertTrue(procIter.hasNext());
assertEquals(4, procIter.nextAsProcedureInfo().getProcId());
// TODO: This will be multiple call once we do fast-start
//assertFalse(procIter.hasNext());
assertTrue(procIter.hasNext());
assertEquals(1, procIter.nextAsProcedureInfo().getProcId());
assertTrue(procIter.hasNext());
assertEquals(2, procIter.nextAsProcedureInfo().getProcId());
assertTrue(procIter.hasNext());
assertEquals(3, procIter.nextAsProcedureInfo().getProcId());
assertFalse(procIter.hasNext());
}
@Override
public void handleCorrupted(ProcedureIterator procIter) throws IOException {
assertFalse(procIter.hasNext());
}
});
}
use of org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility.TestProcedure in project hbase by apache.
the class TestWALProcedureStoreOnHDFS method testWalAbortOnLowReplicationWithQueuedWriters.
@Test
public void testWalAbortOnLowReplicationWithQueuedWriters() throws Exception {
setupDFS();
assertEquals(3, UTIL.getDFSCluster().getDataNodes().size());
store.registerListener(new ProcedureStore.ProcedureStoreListener() {
@Override
public void postSync() {
Threads.sleepWithoutInterrupt(2000);
}
@Override
public void abortProcess() {
}
});
final AtomicInteger reCount = new AtomicInteger(0);
Thread[] thread = new Thread[store.getNumThreads() * 2 + 1];
for (int i = 0; i < thread.length; ++i) {
final long procId = i + 1L;
thread[i] = new Thread(() -> {
try {
LOG.debug("[S] INSERT " + procId);
store.insert(new TestProcedure(procId, -1), null);
LOG.debug("[E] INSERT " + procId);
} catch (RuntimeException e) {
reCount.incrementAndGet();
LOG.debug("[F] INSERT " + procId + ": " + e.getMessage());
}
});
thread[i].start();
}
Thread.sleep(1000);
LOG.info("Stop DataNode");
UTIL.getDFSCluster().stopDataNode(0);
assertEquals(2, UTIL.getDFSCluster().getDataNodes().size());
for (int i = 0; i < thread.length; ++i) {
thread[i].join();
}
assertFalse(store.isRunning());
assertTrue(reCount.toString(), reCount.get() >= store.getNumThreads() && reCount.get() < thread.length);
}
use of org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility.TestProcedure in project hbase by apache.
the class TestWALProcedureStoreOnHDFS method testWalAbortOnLowReplication.
@Test(expected = RuntimeException.class)
public void testWalAbortOnLowReplication() throws Exception {
setupDFS();
assertEquals(3, UTIL.getDFSCluster().getDataNodes().size());
LOG.info("Stop DataNode");
UTIL.getDFSCluster().stopDataNode(0);
assertEquals(2, UTIL.getDFSCluster().getDataNodes().size());
store.insert(new TestProcedure(1, -1), null);
for (long i = 2; store.isRunning(); ++i) {
assertEquals(2, UTIL.getDFSCluster().getDataNodes().size());
store.insert(new TestProcedure(i, -1), null);
Thread.sleep(100);
}
assertFalse(store.isRunning());
}
use of org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility.TestProcedure in project hbase by apache.
the class TestWALProcedureStoreOnHDFS method testWalRollOnLowReplication.
@Test
public void testWalRollOnLowReplication() throws Exception {
UTIL.getConfiguration().setInt("dfs.namenode.replication.min", 1);
setupDFS();
int dnCount = 0;
store.insert(new TestProcedure(1, -1), null);
UTIL.getDFSCluster().restartDataNode(dnCount);
for (long i = 2; i < 100; ++i) {
store.insert(new TestProcedure(i, -1), null);
waitForNumReplicas(3);
Thread.sleep(100);
if ((i % 30) == 0) {
LOG.info("Restart Data Node");
UTIL.getDFSCluster().restartDataNode(++dnCount % 3);
}
}
assertTrue(store.isRunning());
}
use of org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility.TestProcedure in project hbase by apache.
the class TestStressWALProcedureStore method testEntrySizeLimit.
// REENABLE after merge of
@Ignore
// REENABLE after merge of
@Test
public // https://github.com/google/protobuf/issues/2228#issuecomment-252058282
void testEntrySizeLimit() throws Exception {
final int NITEMS = 20;
for (int i = 1; i <= NITEMS; ++i) {
final byte[] data = new byte[256 << i];
LOG.info(String.format("Writing %s", StringUtils.humanSize(data.length)));
TestProcedure proc = new TestProcedure(i, 0, data);
procStore.insert(proc, null);
}
// check that we are able to read the big proc-blobs
ProcedureTestingUtility.storeRestartAndAssert(procStore, NITEMS, NITEMS, 0, 0);
}
Aggregations