Search in sources :

Example 1 with RepeatingTestThread

use of org.apache.hadoop.hbase.MultithreadedTestUtil.RepeatingTestThread in project hbase by apache.

the class TestHRegion method testWritesWhileGetting.

/**
 * Writes very wide records and gets the latest row every time.. Flushes and
 * compacts the region aggressivly to catch issues.
 *
 * @throws IOException
 *           by flush / scan / compaction
 * @throws InterruptedException
 *           when joining threads
 */
@Test
public void testWritesWhileGetting() throws Exception {
    int testCount = 50;
    int numRows = 1;
    int numFamilies = 10;
    int numQualifiers = 100;
    int compactInterval = 100;
    byte[][] families = new byte[numFamilies][];
    for (int i = 0; i < numFamilies; i++) {
        families[i] = Bytes.toBytes("family" + i);
    }
    byte[][] qualifiers = new byte[numQualifiers][];
    for (int i = 0; i < numQualifiers; i++) {
        qualifiers[i] = Bytes.toBytes("qual" + i);
    }
    // This test flushes constantly and can cause many files to be created,
    // possibly
    // extending over the ulimit. Make sure compactions are aggressive in
    // reducing
    // the number of HFiles created.
    Configuration conf = HBaseConfiguration.create(CONF);
    conf.setInt("hbase.hstore.compaction.min", 1);
    conf.setInt("hbase.hstore.compaction.max", 1000);
    this.region = initHRegion(tableName, method, conf, families);
    PutThread putThread = null;
    MultithreadedTestUtil.TestContext ctx = new MultithreadedTestUtil.TestContext(conf);
    try {
        putThread = new PutThread(numRows, families, qualifiers);
        putThread.start();
        putThread.waitForFirstPut();
        // Add a thread that flushes as fast as possible
        ctx.addThread(new RepeatingTestThread(ctx) {

            @Override
            public void doAnAction() throws Exception {
                region.flush(true);
                // Compact regularly to avoid creating too many files and exceeding
                // the ulimit.
                region.compact(false);
                for (HStore store : region.getStores()) {
                    store.closeAndArchiveCompactedFiles();
                }
            }
        });
        ctx.startThreads();
        Get get = new Get(Bytes.toBytes("row0"));
        Result result = null;
        int expectedCount = numFamilies * numQualifiers;
        long prevTimestamp = 0L;
        for (int i = 0; i < testCount; i++) {
            LOG.info("testWritesWhileGetting verify turn " + i);
            boolean previousEmpty = result == null || result.isEmpty();
            result = region.get(get);
            if (!result.isEmpty() || !previousEmpty || i > compactInterval) {
                assertEquals("i=" + i, expectedCount, result.size());
                // TODO this was removed, now what dangit?!
                // search looking for the qualifier in question?
                long timestamp = 0;
                for (Cell kv : result.rawCells()) {
                    if (CellUtil.matchingFamily(kv, families[0]) && CellUtil.matchingQualifier(kv, qualifiers[0])) {
                        timestamp = kv.getTimestamp();
                    }
                }
                assertTrue(timestamp >= prevTimestamp);
                prevTimestamp = timestamp;
                Cell previousKV = null;
                for (Cell kv : result.rawCells()) {
                    byte[] thisValue = CellUtil.cloneValue(kv);
                    if (previousKV != null) {
                        if (Bytes.compareTo(CellUtil.cloneValue(previousKV), thisValue) != 0) {
                            LOG.warn("These two KV should have the same value." + " Previous KV:" + previousKV + "(memStoreTS:" + previousKV.getSequenceId() + ")" + ", New KV: " + kv + "(memStoreTS:" + kv.getSequenceId() + ")");
                            assertEquals(0, Bytes.compareTo(CellUtil.cloneValue(previousKV), thisValue));
                        }
                    }
                    previousKV = kv;
                }
            }
        }
    } finally {
        if (putThread != null)
            putThread.done();
        region.flush(true);
        if (putThread != null) {
            putThread.join();
            putThread.checkNoError();
        }
        ctx.stop();
        HBaseTestingUtil.closeRegionAndWAL(this.region);
        this.region = null;
    }
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) FailedSanityCheckException(org.apache.hadoop.hbase.exceptions.FailedSanityCheckException) RegionTooBusyException(org.apache.hadoop.hbase.RegionTooBusyException) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) NotServingRegionException(org.apache.hadoop.hbase.NotServingRegionException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) DroppedSnapshotException(org.apache.hadoop.hbase.DroppedSnapshotException) ExpectedException(org.junit.rules.ExpectedException) CheckAndMutateResult(org.apache.hadoop.hbase.client.CheckAndMutateResult) Result(org.apache.hadoop.hbase.client.Result) RepeatingTestThread(org.apache.hadoop.hbase.MultithreadedTestUtil.RepeatingTestThread) Get(org.apache.hadoop.hbase.client.Get) MultithreadedTestUtil(org.apache.hadoop.hbase.MultithreadedTestUtil) Cell(org.apache.hadoop.hbase.Cell) Test(org.junit.Test)

Example 2 with RepeatingTestThread

use of org.apache.hadoop.hbase.MultithreadedTestUtil.RepeatingTestThread in project hbase by apache.

the class AcidGuaranteesTestTool method runTestAtomicity.

private void runTestAtomicity(Admin admin) throws Exception {
    createTableIfMissing(admin, useMob);
    TestContext ctx = new TestContext(conf);
    byte[][] rows = new byte[numUniqueRows][];
    for (int i = 0; i < numUniqueRows; i++) {
        rows[i] = Bytes.toBytes("test_row_" + i);
    }
    List<AtomicityWriter> writers = Lists.newArrayList();
    for (int i = 0; i < numWriters; i++) {
        AtomicityWriter writer = new AtomicityWriter(ctx, rows, FAMILIES, sharedPool);
        writers.add(writer);
        ctx.addThread(writer);
    }
    // Add a flusher
    ctx.addThread(new RepeatingTestThread(ctx) {

        @Override
        public void doAnAction() throws Exception {
            try {
                admin.flush(TABLE_NAME);
            } catch (IOException ioe) {
                LOG.warn("Ignoring exception while flushing: " + StringUtils.stringifyException(ioe));
            }
            // the running cluster.
            if (!crazyFlush) {
                Thread.sleep(60000);
            }
        }
    });
    List<AtomicGetReader> getters = Lists.newArrayList();
    for (int i = 0; i < numGetters; i++) {
        AtomicGetReader getter = new AtomicGetReader(ctx, rows[i % numUniqueRows], FAMILIES, sharedPool);
        getters.add(getter);
        ctx.addThread(getter);
    }
    List<AtomicScanReader> scanners = Lists.newArrayList();
    for (int i = 0; i < numScanners; i++) {
        AtomicScanReader scanner = new AtomicScanReader(ctx, FAMILIES, sharedPool);
        scanners.add(scanner);
        ctx.addThread(scanner);
    }
    ctx.startThreads();
    ctx.waitFor(millisToRun);
    ctx.stop();
    LOG.info("Finished test. Writers:");
    for (AtomicityWriter writer : writers) {
        LOG.info("  wrote " + writer.numWritten.get());
    }
    LOG.info("Readers:");
    for (AtomicGetReader reader : getters) {
        LOG.info("  read " + reader.numRead.get());
    }
    LOG.info("Scanners:");
    for (AtomicScanReader scanner : scanners) {
        LOG.info("  scanned " + scanner.numScans.get());
        LOG.info("  verified " + scanner.numRowsScanned.get() + " rows");
    }
}
Also used : TestContext(org.apache.hadoop.hbase.MultithreadedTestUtil.TestContext) IOException(java.io.IOException) IOException(java.io.IOException) RepeatingTestThread(org.apache.hadoop.hbase.MultithreadedTestUtil.RepeatingTestThread)

Example 3 with RepeatingTestThread

use of org.apache.hadoop.hbase.MultithreadedTestUtil.RepeatingTestThread in project hbase by apache.

the class TestAcidGuarantees method runTestAtomicity.

public void runTestAtomicity(long millisToRun, int numWriters, int numGetters, int numScanners, int numUniqueRows, final boolean systemTest, final boolean useMob) throws Exception {
    createTableIfMissing(useMob);
    TestContext ctx = new TestContext(util.getConfiguration());
    byte[][] rows = new byte[numUniqueRows][];
    for (int i = 0; i < numUniqueRows; i++) {
        rows[i] = Bytes.toBytes("test_row_" + i);
    }
    List<AtomicityWriter> writers = Lists.newArrayList();
    for (int i = 0; i < numWriters; i++) {
        AtomicityWriter writer = new AtomicityWriter(ctx, rows, FAMILIES, getSharedThreadPool());
        writers.add(writer);
        ctx.addThread(writer);
    }
    // Add a flusher
    ctx.addThread(new RepeatingTestThread(ctx) {

        Admin admin = util.getAdmin();

        public void doAnAction() throws Exception {
            try {
                admin.flush(TABLE_NAME);
            } catch (IOException ioe) {
                LOG.warn("Ignoring exception while flushing: " + StringUtils.stringifyException(ioe));
            }
            // the running cluster.
            if (systemTest)
                Thread.sleep(60000);
        }
    });
    List<AtomicGetReader> getters = Lists.newArrayList();
    for (int i = 0; i < numGetters; i++) {
        AtomicGetReader getter = new AtomicGetReader(ctx, rows[i % numUniqueRows], FAMILIES, getSharedThreadPool());
        getters.add(getter);
        ctx.addThread(getter);
    }
    List<AtomicScanReader> scanners = Lists.newArrayList();
    for (int i = 0; i < numScanners; i++) {
        AtomicScanReader scanner = new AtomicScanReader(ctx, FAMILIES, getSharedThreadPool());
        scanners.add(scanner);
        ctx.addThread(scanner);
    }
    ctx.startThreads();
    ctx.waitFor(millisToRun);
    ctx.stop();
    LOG.info("Finished test. Writers:");
    for (AtomicityWriter writer : writers) {
        LOG.info("  wrote " + writer.numWritten.get());
    }
    LOG.info("Readers:");
    for (AtomicGetReader reader : getters) {
        LOG.info("  read " + reader.numRead.get());
    }
    LOG.info("Scanners:");
    for (AtomicScanReader scanner : scanners) {
        LOG.info("  scanned " + scanner.numScans.get());
        LOG.info("  verified " + scanner.numRowsScanned.get() + " rows");
    }
}
Also used : TestContext(org.apache.hadoop.hbase.MultithreadedTestUtil.TestContext) IOException(java.io.IOException) Admin(org.apache.hadoop.hbase.client.Admin) IOException(java.io.IOException) RepeatingTestThread(org.apache.hadoop.hbase.MultithreadedTestUtil.RepeatingTestThread)

Aggregations

IOException (java.io.IOException)3 RepeatingTestThread (org.apache.hadoop.hbase.MultithreadedTestUtil.RepeatingTestThread)3 TestContext (org.apache.hadoop.hbase.MultithreadedTestUtil.TestContext)2 InterruptedIOException (java.io.InterruptedIOException)1 Configuration (org.apache.hadoop.conf.Configuration)1 Cell (org.apache.hadoop.hbase.Cell)1 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)1 DroppedSnapshotException (org.apache.hadoop.hbase.DroppedSnapshotException)1 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)1 MultithreadedTestUtil (org.apache.hadoop.hbase.MultithreadedTestUtil)1 NotServingRegionException (org.apache.hadoop.hbase.NotServingRegionException)1 RegionTooBusyException (org.apache.hadoop.hbase.RegionTooBusyException)1 Admin (org.apache.hadoop.hbase.client.Admin)1 CheckAndMutateResult (org.apache.hadoop.hbase.client.CheckAndMutateResult)1 Get (org.apache.hadoop.hbase.client.Get)1 Result (org.apache.hadoop.hbase.client.Result)1 FailedSanityCheckException (org.apache.hadoop.hbase.exceptions.FailedSanityCheckException)1 Test (org.junit.Test)1 ExpectedException (org.junit.rules.ExpectedException)1