Search in sources :

Example 16 with HFileContextBuilder

use of org.apache.hadoop.hbase.io.hfile.HFileContextBuilder in project hbase by apache.

the class TestReversibleScanners method testReversibleStoreScanner.

@Test
public void testReversibleStoreScanner() throws IOException {
    // write data to one memstore and two store files
    FileSystem fs = TEST_UTIL.getTestFileSystem();
    Path hfilePath = new Path(new Path(TEST_UTIL.getDataTestDir("testReversibleStoreScanner"), "regionname"), "familyname");
    CacheConfig cacheConf = new CacheConfig(TEST_UTIL.getConfiguration());
    HFileContextBuilder hcBuilder = new HFileContextBuilder();
    hcBuilder.withBlockSize(2 * 1024);
    HFileContext hFileContext = hcBuilder.build();
    StoreFileWriter writer1 = new StoreFileWriter.Builder(TEST_UTIL.getConfiguration(), cacheConf, fs).withOutputDir(hfilePath).withFileContext(hFileContext).build();
    StoreFileWriter writer2 = new StoreFileWriter.Builder(TEST_UTIL.getConfiguration(), cacheConf, fs).withOutputDir(hfilePath).withFileContext(hFileContext).build();
    MemStore memstore = new DefaultMemStore();
    writeMemstoreAndStoreFiles(memstore, new StoreFileWriter[] { writer1, writer2 });
    HStoreFile sf1 = new HStoreFile(fs, writer1.getPath(), TEST_UTIL.getConfiguration(), cacheConf, BloomType.NONE, true);
    HStoreFile sf2 = new HStoreFile(fs, writer2.getPath(), TEST_UTIL.getConfiguration(), cacheConf, BloomType.NONE, true);
    ScanInfo scanInfo = new ScanInfo(TEST_UTIL.getConfiguration(), FAMILYNAME, 0, Integer.MAX_VALUE, Long.MAX_VALUE, KeepDeletedCells.FALSE, HConstants.DEFAULT_BLOCKSIZE, 0, CellComparatorImpl.COMPARATOR, false);
    // Case 1.Test a full reversed scan
    Scan scan = new Scan();
    scan.setReversed(true);
    StoreScanner storeScanner = getReversibleStoreScanner(memstore, sf1, sf2, scan, scanInfo, MAXMVCC);
    verifyCountAndOrder(storeScanner, QUALSIZE * ROWSIZE, ROWSIZE, false);
    // Case 2.Test reversed scan with a specified start row
    int startRowNum = ROWSIZE / 2;
    byte[] startRow = ROWS[startRowNum];
    scan.withStartRow(startRow);
    storeScanner = getReversibleStoreScanner(memstore, sf1, sf2, scan, scanInfo, MAXMVCC);
    verifyCountAndOrder(storeScanner, QUALSIZE * (startRowNum + 1), startRowNum + 1, false);
    // Case 3.Test reversed scan with a specified start row and specified
    // qualifiers
    assertTrue(QUALSIZE > 2);
    scan.addColumn(FAMILYNAME, QUALS[0]);
    scan.addColumn(FAMILYNAME, QUALS[2]);
    storeScanner = getReversibleStoreScanner(memstore, sf1, sf2, scan, scanInfo, MAXMVCC);
    verifyCountAndOrder(storeScanner, 2 * (startRowNum + 1), startRowNum + 1, false);
    // Case 4.Test reversed scan with mvcc based on case 3
    for (int readPoint = 0; readPoint < MAXMVCC; readPoint++) {
        LOG.info("Setting read point to " + readPoint);
        storeScanner = getReversibleStoreScanner(memstore, sf1, sf2, scan, scanInfo, readPoint);
        int expectedRowCount = 0;
        int expectedKVCount = 0;
        for (int i = startRowNum; i >= 0; i--) {
            int kvCount = 0;
            if (makeMVCC(i, 0) <= readPoint) {
                kvCount++;
            }
            if (makeMVCC(i, 2) <= readPoint) {
                kvCount++;
            }
            if (kvCount > 0) {
                expectedRowCount++;
                expectedKVCount += kvCount;
            }
        }
        verifyCountAndOrder(storeScanner, expectedKVCount, expectedRowCount, false);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) HFileContextBuilder(org.apache.hadoop.hbase.io.hfile.HFileContextBuilder) HFileContext(org.apache.hadoop.hbase.io.hfile.HFileContext) FileSystem(org.apache.hadoop.fs.FileSystem) Scan(org.apache.hadoop.hbase.client.Scan) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig) Test(org.junit.Test)

Example 17 with HFileContextBuilder

use of org.apache.hadoop.hbase.io.hfile.HFileContextBuilder in project hbase by apache.

the class TestRowPrefixBloomFilter method writeStoreFile.

private void writeStoreFile(final Path f, BloomType bt, int expKeys) throws IOException {
    HFileContext meta = new HFileContextBuilder().withBlockSize(BLOCKSIZE_SMALL).withChecksumType(CKTYPE).withBytesPerCheckSum(CKBYTES).build();
    // Make a store file and write data to it.
    StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, fs).withFilePath(f).withBloomType(bt).withMaxKeyCount(expKeys).withFileContext(meta).build();
    long now = EnvironmentEdgeManager.currentTime();
    try {
        // Put with valid row style
        for (int i = 0; i < prefixRowCount; i += 2) {
            // prefix rows
            String prefixRow = String.format(prefixFormatter, i);
            for (int j = 0; j < suffixRowCount; j++) {
                // suffix rows
                String row = generateRowWithSuffix(prefixRow, j);
                KeyValue kv = new KeyValue(Bytes.toBytes(row), Bytes.toBytes("family"), Bytes.toBytes("col"), now, Bytes.toBytes("value"));
                writer.append(kv);
            }
        }
        // Put with invalid row style
        for (int i = prefixRowCount; i < prefixRowCount * 2; i += 2) {
            // prefix rows
            String row = String.format(invalidFormatter, i);
            KeyValue kv = new KeyValue(Bytes.toBytes(row), Bytes.toBytes("family"), Bytes.toBytes("col"), now, Bytes.toBytes("value"));
            writer.append(kv);
        }
    } finally {
        writer.close();
    }
}
Also used : KeyValue(org.apache.hadoop.hbase.KeyValue) HFileContextBuilder(org.apache.hadoop.hbase.io.hfile.HFileContextBuilder) HFileContext(org.apache.hadoop.hbase.io.hfile.HFileContext)

Example 18 with HFileContextBuilder

use of org.apache.hadoop.hbase.io.hfile.HFileContextBuilder in project hbase by apache.

the class TestSecureBulkLoadManager method prepareHFile.

private void prepareHFile(Path dir, byte[] key, byte[] value) throws Exception {
    TableDescriptor desc = testUtil.getAdmin().getDescriptor(TABLE);
    ColumnFamilyDescriptor family = desc.getColumnFamily(FAMILY);
    Compression.Algorithm compression = HFile.DEFAULT_COMPRESSION_ALGORITHM;
    CacheConfig writerCacheConf = new CacheConfig(conf, family, null, ByteBuffAllocator.HEAP);
    writerCacheConf.setCacheDataOnWrite(false);
    HFileContext hFileContext = new HFileContextBuilder().withIncludesMvcc(false).withIncludesTags(true).withCompression(compression).withCompressTags(family.isCompressTags()).withChecksumType(StoreUtils.getChecksumType(conf)).withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(conf)).withBlockSize(family.getBlocksize()).withHBaseCheckSum(true).withDataBlockEncoding(family.getDataBlockEncoding()).withEncryptionContext(Encryption.Context.NONE).withCreateTime(EnvironmentEdgeManager.currentTime()).build();
    StoreFileWriter.Builder builder = new StoreFileWriter.Builder(conf, writerCacheConf, dir.getFileSystem(conf)).withOutputDir(new Path(dir, family.getNameAsString())).withBloomType(family.getBloomFilterType()).withMaxKeyCount(Integer.MAX_VALUE).withFileContext(hFileContext);
    StoreFileWriter writer = builder.build();
    Put put = new Put(key);
    put.addColumn(FAMILY, COLUMN, value);
    for (Cell c : put.get(FAMILY, COLUMN)) {
        writer.append(c);
    }
    writer.close();
}
Also used : Path(org.apache.hadoop.fs.Path) Compression(org.apache.hadoop.hbase.io.compress.Compression) HFileContextBuilder(org.apache.hadoop.hbase.io.hfile.HFileContextBuilder) HFileContextBuilder(org.apache.hadoop.hbase.io.hfile.HFileContextBuilder) ColumnFamilyDescriptor(org.apache.hadoop.hbase.client.ColumnFamilyDescriptor) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) Put(org.apache.hadoop.hbase.client.Put) HFileContext(org.apache.hadoop.hbase.io.hfile.HFileContext) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig) Cell(org.apache.hadoop.hbase.Cell)

Example 19 with HFileContextBuilder

use of org.apache.hadoop.hbase.io.hfile.HFileContextBuilder in project hbase by apache.

the class HFileTestUtil method createHFile.

/**
 * Create an HFile with the given number of rows between a given
 * start key and end key @ family:qualifier.
 * If withTag is true, we add the rowKey as the tag value for
 * tagtype MOB_TABLE_NAME_TAG_TYPE
 */
public static void createHFile(Configuration configuration, FileSystem fs, Path path, DataBlockEncoding encoding, byte[] family, byte[] qualifier, byte[] startKey, byte[] endKey, int numRows, boolean withTag) throws IOException {
    HFileContext meta = new HFileContextBuilder().withIncludesTags(withTag).withDataBlockEncoding(encoding).withColumnFamily(family).build();
    HFile.Writer writer = HFile.getWriterFactory(configuration, new CacheConfig(configuration)).withPath(fs, path).withFileContext(meta).create();
    long now = EnvironmentEdgeManager.currentTime();
    try {
        // subtract 2 since iterateOnSplits doesn't include boundary keys
        for (byte[] key : Bytes.iterateOnSplits(startKey, endKey, numRows - 2)) {
            Cell kv = new KeyValue(key, family, qualifier, now, key);
            if (withTag) {
                // add a tag.  Arbitrarily chose mob tag since we have a helper already.
                Tag tableNameTag = new ArrayBackedTag(TagType.MOB_TABLE_NAME_TAG_TYPE, key);
                kv = MobUtils.createMobRefCell(kv, key, tableNameTag);
                // verify that the kv has the tag.
                Optional<Tag> tag = PrivateCellUtil.getTag(kv, TagType.MOB_TABLE_NAME_TAG_TYPE);
                if (!tag.isPresent()) {
                    throw new IllegalStateException("Tag didn't stick to KV " + kv.toString());
                }
            }
            writer.append(kv);
        }
    } finally {
        writer.appendFileInfo(BULKLOAD_TIME_KEY, Bytes.toBytes(EnvironmentEdgeManager.currentTime()));
        writer.close();
    }
}
Also used : KeyValue(org.apache.hadoop.hbase.KeyValue) HFileContextBuilder(org.apache.hadoop.hbase.io.hfile.HFileContextBuilder) ArrayBackedTag(org.apache.hadoop.hbase.ArrayBackedTag) HFileContext(org.apache.hadoop.hbase.io.hfile.HFileContext) ArrayBackedTag(org.apache.hadoop.hbase.ArrayBackedTag) Tag(org.apache.hadoop.hbase.Tag) HFile(org.apache.hadoop.hbase.io.hfile.HFile) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig) Cell(org.apache.hadoop.hbase.Cell)

Example 20 with HFileContextBuilder

use of org.apache.hadoop.hbase.io.hfile.HFileContextBuilder in project hbase by apache.

the class TestBucketCache method testCacheBlockNextBlockMetadataMissing.

@Test
public void testCacheBlockNextBlockMetadataMissing() throws Exception {
    int size = 100;
    int length = HConstants.HFILEBLOCK_HEADER_SIZE + size;
    ByteBuffer buf1 = ByteBuffer.allocate(size), buf2 = ByteBuffer.allocate(size);
    HFileContext meta = new HFileContextBuilder().build();
    ByteBuffAllocator allocator = ByteBuffAllocator.HEAP;
    HFileBlock blockWithNextBlockMetadata = new HFileBlock(BlockType.DATA, size, size, -1, ByteBuff.wrap(buf1), HFileBlock.FILL_HEADER, -1, 52, -1, meta, allocator);
    HFileBlock blockWithoutNextBlockMetadata = new HFileBlock(BlockType.DATA, size, size, -1, ByteBuff.wrap(buf2), HFileBlock.FILL_HEADER, -1, -1, -1, meta, allocator);
    BlockCacheKey key = new BlockCacheKey("testCacheBlockNextBlockMetadataMissing", 0);
    ByteBuffer actualBuffer = ByteBuffer.allocate(length);
    ByteBuffer block1Buffer = ByteBuffer.allocate(length);
    ByteBuffer block2Buffer = ByteBuffer.allocate(length);
    blockWithNextBlockMetadata.serialize(block1Buffer, true);
    blockWithoutNextBlockMetadata.serialize(block2Buffer, true);
    // Add blockWithNextBlockMetadata, expect blockWithNextBlockMetadata back.
    CacheTestUtils.getBlockAndAssertEquals(cache, key, blockWithNextBlockMetadata, actualBuffer, block1Buffer);
    waitUntilFlushedToBucket(cache, key);
    assertNotNull(cache.backingMap.get(key));
    assertEquals(1, cache.backingMap.get(key).refCnt());
    assertEquals(1, blockWithNextBlockMetadata.getBufferReadOnly().refCnt());
    assertEquals(1, blockWithoutNextBlockMetadata.getBufferReadOnly().refCnt());
    // Add blockWithoutNextBlockMetada, expect blockWithNextBlockMetadata back.
    CacheTestUtils.getBlockAndAssertEquals(cache, key, blockWithoutNextBlockMetadata, actualBuffer, block1Buffer);
    assertEquals(1, blockWithNextBlockMetadata.getBufferReadOnly().refCnt());
    assertEquals(1, blockWithoutNextBlockMetadata.getBufferReadOnly().refCnt());
    assertEquals(1, cache.backingMap.get(key).refCnt());
    // Clear and add blockWithoutNextBlockMetadata
    assertTrue(cache.evictBlock(key));
    assertEquals(1, blockWithNextBlockMetadata.getBufferReadOnly().refCnt());
    assertEquals(1, blockWithoutNextBlockMetadata.getBufferReadOnly().refCnt());
    assertNull(cache.getBlock(key, false, false, false));
    CacheTestUtils.getBlockAndAssertEquals(cache, key, blockWithoutNextBlockMetadata, actualBuffer, block2Buffer);
    waitUntilFlushedToBucket(cache, key);
    assertEquals(1, blockWithNextBlockMetadata.getBufferReadOnly().refCnt());
    assertEquals(1, blockWithoutNextBlockMetadata.getBufferReadOnly().refCnt());
    // Add blockWithNextBlockMetadata, expect blockWithNextBlockMetadata to replace.
    CacheTestUtils.getBlockAndAssertEquals(cache, key, blockWithNextBlockMetadata, actualBuffer, block1Buffer);
    waitUntilFlushedToBucket(cache, key);
    assertEquals(1, blockWithNextBlockMetadata.getBufferReadOnly().refCnt());
    assertEquals(1, blockWithoutNextBlockMetadata.getBufferReadOnly().refCnt());
}
Also used : HFileBlock(org.apache.hadoop.hbase.io.hfile.HFileBlock) HFileContextBuilder(org.apache.hadoop.hbase.io.hfile.HFileContextBuilder) ByteBuffer(java.nio.ByteBuffer) BlockCacheKey(org.apache.hadoop.hbase.io.hfile.BlockCacheKey) HFileContext(org.apache.hadoop.hbase.io.hfile.HFileContext) ByteBuffAllocator(org.apache.hadoop.hbase.io.ByteBuffAllocator) Test(org.junit.Test)

Aggregations

HFileContextBuilder (org.apache.hadoop.hbase.io.hfile.HFileContextBuilder)89 HFileContext (org.apache.hadoop.hbase.io.hfile.HFileContext)82 Path (org.apache.hadoop.fs.Path)52 Test (org.junit.Test)48 KeyValue (org.apache.hadoop.hbase.KeyValue)39 CacheConfig (org.apache.hadoop.hbase.io.hfile.CacheConfig)27 FileSystem (org.apache.hadoop.fs.FileSystem)26 Cell (org.apache.hadoop.hbase.Cell)17 HFile (org.apache.hadoop.hbase.io.hfile.HFile)16 ByteBuffer (java.nio.ByteBuffer)15 Configuration (org.apache.hadoop.conf.Configuration)14 HFileScanner (org.apache.hadoop.hbase.io.hfile.HFileScanner)12 StoreFileWriter (org.apache.hadoop.hbase.regionserver.StoreFileWriter)12 DataOutputStream (java.io.DataOutputStream)6 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)6 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)6 DataBlockEncoding (org.apache.hadoop.hbase.io.encoding.DataBlockEncoding)6 ByteArrayOutputStream (java.io.ByteArrayOutputStream)5 IOException (java.io.IOException)5 ArrayList (java.util.ArrayList)5