Search in sources :

Example 26 with ByteBufferKeyValue

use of org.apache.hadoop.hbase.ByteBufferKeyValue in project hbase by apache.

the class TestMemStoreLAB method testLABThreading.

/**
 * Test allocation from lots of threads, making sure the results don't
 * overlap in any way
 */
@Test
public void testLABThreading() throws Exception {
    Configuration conf = new Configuration();
    MultithreadedTestUtil.TestContext ctx = new MultithreadedTestUtil.TestContext(conf);
    final AtomicInteger totalAllocated = new AtomicInteger();
    final MemStoreLAB mslab = new MemStoreLABImpl();
    List<List<AllocRecord>> allocations = Lists.newArrayList();
    for (int i = 0; i < 10; i++) {
        final List<AllocRecord> allocsByThisThread = Lists.newLinkedList();
        allocations.add(allocsByThisThread);
        TestThread t = new MultithreadedTestUtil.RepeatingTestThread(ctx) {

            private Random r = new Random();

            @Override
            public void doAnAction() throws Exception {
                int valSize = r.nextInt(3);
                KeyValue kv = new KeyValue(rk, cf, q, new byte[valSize]);
                int size = kv.getSerializedSize();
                ByteBufferKeyValue newCell = (ByteBufferKeyValue) mslab.copyCellInto(kv);
                totalAllocated.addAndGet(size);
                allocsByThisThread.add(new AllocRecord(newCell.getBuffer(), newCell.getOffset(), size));
            }
        };
        ctx.addThread(t);
    }
    ctx.startThreads();
    while (totalAllocated.get() < 50 * 1024 * 1000 && ctx.shouldRun()) {
        Thread.sleep(10);
    }
    ctx.stop();
    // Partition the allocations by the actual byte[] they point into,
    // make sure offsets are unique for each chunk
    Map<ByteBuffer, Map<Integer, AllocRecord>> mapsByChunk = Maps.newHashMap();
    int sizeCounted = 0;
    for (AllocRecord rec : Iterables.concat(allocations)) {
        sizeCounted += rec.size;
        if (rec.size == 0) {
            continue;
        }
        Map<Integer, AllocRecord> mapForThisByteArray = mapsByChunk.get(rec.alloc);
        if (mapForThisByteArray == null) {
            mapForThisByteArray = Maps.newTreeMap();
            mapsByChunk.put(rec.alloc, mapForThisByteArray);
        }
        AllocRecord oldVal = mapForThisByteArray.put(rec.offset, rec);
        assertNull("Already had an entry " + oldVal + " for allocation " + rec, oldVal);
    }
    assertEquals("Sanity check test", sizeCounted, totalAllocated.get());
    // Now check each byte array to make sure allocations don't overlap
    for (Map<Integer, AllocRecord> allocsInChunk : mapsByChunk.values()) {
        // since we add the chunkID at the 0th offset of the chunk and the
        // chunkid is an int we need to account for those 4 bytes
        int expectedOff = Bytes.SIZEOF_INT;
        for (AllocRecord alloc : allocsInChunk.values()) {
            assertEquals(expectedOff, alloc.offset);
            assertTrue("Allocation overruns buffer", alloc.offset + alloc.size <= alloc.alloc.capacity());
            expectedOff += alloc.size;
        }
    }
}
Also used : ByteBufferKeyValue(org.apache.hadoop.hbase.ByteBufferKeyValue) KeyValue(org.apache.hadoop.hbase.KeyValue) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) TestThread(org.apache.hadoop.hbase.MultithreadedTestUtil.TestThread) ByteBuffer(java.nio.ByteBuffer) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ByteBufferKeyValue(org.apache.hadoop.hbase.ByteBufferKeyValue) Random(java.util.Random) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) MultithreadedTestUtil(org.apache.hadoop.hbase.MultithreadedTestUtil) ArrayList(java.util.ArrayList) List(java.util.List) Map(java.util.Map) Test(org.junit.Test)

Aggregations

ByteBufferKeyValue (org.apache.hadoop.hbase.ByteBufferKeyValue)26 KeyValue (org.apache.hadoop.hbase.KeyValue)22 ByteBuffer (java.nio.ByteBuffer)16 Cell (org.apache.hadoop.hbase.Cell)14 Test (org.junit.Test)11 ArrayList (java.util.ArrayList)6 ArrayBackedTag (org.apache.hadoop.hbase.ArrayBackedTag)6 Random (java.util.Random)4 Tag (org.apache.hadoop.hbase.Tag)4 List (java.util.List)3 Configuration (org.apache.hadoop.conf.Configuration)3 Path (org.apache.hadoop.fs.Path)3 HashMap (java.util.HashMap)2 ExtendedCell (org.apache.hadoop.hbase.ExtendedCell)2 CellProtos (org.apache.hadoop.hbase.shaded.protobuf.generated.CellProtos)2 IOException (java.io.IOException)1 Map (java.util.Map)1 TreeMap (java.util.TreeMap)1 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)1 FileSystem (org.apache.hadoop.fs.FileSystem)1