Search in sources :

Example 21 with StoreFileWriter

use of org.apache.hadoop.hbase.regionserver.StoreFileWriter in project hbase by apache.

the class BoundedRecoveredHFilesOutputSink method append.

@Override
void append(RegionEntryBuffer buffer) throws IOException {
    Map<String, CellSet> familyCells = new HashMap<>();
    Map<String, Long> familySeqIds = new HashMap<>();
    boolean isMetaTable = buffer.tableName.equals(META_TABLE_NAME);
    // sequence id.
    for (WAL.Entry entry : buffer.entryBuffer) {
        long seqId = entry.getKey().getSequenceId();
        List<Cell> cells = entry.getEdit().getCells();
        for (Cell cell : cells) {
            if (CellUtil.matchingFamily(cell, WALEdit.METAFAMILY)) {
                continue;
            }
            PrivateCellUtil.setSequenceId(cell, seqId);
            String familyName = Bytes.toString(CellUtil.cloneFamily(cell));
            // comparator need to be specified for meta
            familyCells.computeIfAbsent(familyName, key -> new CellSet(isMetaTable ? MetaCellComparator.META_COMPARATOR : CellComparatorImpl.COMPARATOR)).add(cell);
            familySeqIds.compute(familyName, (k, v) -> v == null ? seqId : Math.max(v, seqId));
        }
    }
    // Create a new hfile writer for each column family, write edits then close writer.
    String regionName = Bytes.toString(buffer.encodedRegionName);
    for (Map.Entry<String, CellSet> cellsEntry : familyCells.entrySet()) {
        String familyName = cellsEntry.getKey();
        StoreFileWriter writer = createRecoveredHFileWriter(buffer.tableName, regionName, familySeqIds.get(familyName), familyName, isMetaTable);
        LOG.trace("Created {}", writer.getPath());
        openingWritersNum.incrementAndGet();
        try {
            for (Cell cell : cellsEntry.getValue()) {
                writer.append(cell);
            }
            // Append the max seqid to hfile, used when recovery.
            writer.appendMetadata(familySeqIds.get(familyName), false);
            regionEditsWrittenMap.compute(Bytes.toString(buffer.encodedRegionName), (k, v) -> v == null ? buffer.entryBuffer.size() : v + buffer.entryBuffer.size());
            splits.add(writer.getPath());
            openingWritersNum.decrementAndGet();
        } finally {
            writer.close();
            LOG.trace("Closed {}, edits={}", writer.getPath(), familyCells.size());
        }
    }
}
Also used : Entry(org.apache.hadoop.hbase.wal.WAL.Entry) LoggerFactory(org.slf4j.LoggerFactory) HashMap(java.util.HashMap) InterruptedIOException(java.io.InterruptedIOException) CellSet(org.apache.hadoop.hbase.regionserver.CellSet) ConcurrentMap(java.util.concurrent.ConcurrentMap) Entry(org.apache.hadoop.hbase.wal.WAL.Entry) Future(java.util.concurrent.Future) RegionEntryBuffer(org.apache.hadoop.hbase.wal.EntryBuffers.RegionEntryBuffer) CellComparatorImpl(org.apache.hadoop.hbase.CellComparatorImpl) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) StoreUtils(org.apache.hadoop.hbase.regionserver.StoreUtils) Map(java.util.Map) Path(org.apache.hadoop.fs.Path) MetaCellComparator(org.apache.hadoop.hbase.MetaCellComparator) Cell(org.apache.hadoop.hbase.Cell) Bytes(org.apache.hadoop.hbase.util.Bytes) TableName(org.apache.hadoop.hbase.TableName) Logger(org.slf4j.Logger) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) StoreFileWriter(org.apache.hadoop.hbase.regionserver.StoreFileWriter) CellUtil(org.apache.hadoop.hbase.CellUtil) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig) List(java.util.List) InterfaceAudience(org.apache.yetus.audience.InterfaceAudience) HFileContext(org.apache.hadoop.hbase.io.hfile.HFileContext) PrivateCellUtil(org.apache.hadoop.hbase.PrivateCellUtil) HFileContextBuilder(org.apache.hadoop.hbase.io.hfile.HFileContextBuilder) META_TABLE_NAME(org.apache.hadoop.hbase.TableName.META_TABLE_NAME) StoreFileWriter(org.apache.hadoop.hbase.regionserver.StoreFileWriter) HashMap(java.util.HashMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Cell(org.apache.hadoop.hbase.Cell) HashMap(java.util.HashMap) ConcurrentMap(java.util.concurrent.ConcurrentMap) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) CellSet(org.apache.hadoop.hbase.regionserver.CellSet)

Example 22 with StoreFileWriter

use of org.apache.hadoop.hbase.regionserver.StoreFileWriter in project hbase by apache.

the class TestSeekBeforeWithInlineBlocks method testMultiIndexLevelRandomHFileWithBlooms.

/**
 * Scanner.seekBefore() could fail because when seeking to a previous HFile data block, it needs
 * to know the size of that data block, which it calculates using current data block offset and
 * the previous data block offset.  This fails to work when there are leaf-level index blocks in
 * the scannable section of the HFile, i.e. starting in HFileV2.  This test will try seekBefore()
 * on a flat (single-level) and multi-level (2,3) HFile and confirm this bug is now fixed.  This
 * bug also happens for inline Bloom blocks for the same reasons.
 */
@Test
public void testMultiIndexLevelRandomHFileWithBlooms() throws IOException {
    conf = TEST_UTIL.getConfiguration();
    TEST_UTIL.getConfiguration().setInt(BloomFilterUtil.PREFIX_LENGTH_KEY, 10);
    // Try out different HFile versions to ensure reverse scan works on each version
    for (int hfileVersion = HFile.MIN_FORMAT_VERSION_WITH_TAGS; hfileVersion <= HFile.MAX_FORMAT_VERSION; hfileVersion++) {
        conf.setInt(HFile.FORMAT_VERSION_KEY, hfileVersion);
        fs = HFileSystem.get(conf);
        // Try out different bloom types because inline Bloom blocks break seekBefore()
        for (BloomType bloomType : BloomType.values()) {
            // Test out HFile block indices of various sizes/levels
            for (int testI = 0; testI < INDEX_CHUNK_SIZES.length; testI++) {
                int indexBlockSize = INDEX_CHUNK_SIZES[testI];
                int expectedNumLevels = EXPECTED_NUM_LEVELS[testI];
                LOG.info(String.format("Testing HFileVersion: %s, BloomType: %s, Index Levels: %s", hfileVersion, bloomType, expectedNumLevels));
                conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, indexBlockSize);
                conf.setInt(BloomFilterFactory.IO_STOREFILE_BLOOM_BLOCK_SIZE, BLOOM_BLOCK_SIZE);
                conf.setInt(BloomFilterUtil.PREFIX_LENGTH_KEY, 10);
                Cell[] cells = new Cell[NUM_KV];
                Path hfilePath = new Path(TEST_UTIL.getDataTestDir(), String.format("testMultiIndexLevelRandomHFileWithBlooms-%s-%s-%s", hfileVersion, bloomType, testI));
                // Disable caching to prevent it from hiding any bugs in block seeks/reads
                conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f);
                CacheConfig cacheConf = new CacheConfig(conf);
                // Write the HFile
                {
                    HFileContext meta = new HFileContextBuilder().withBlockSize(DATA_BLOCK_SIZE).build();
                    StoreFileWriter storeFileWriter = new StoreFileWriter.Builder(conf, cacheConf, fs).withFilePath(hfilePath).withFileContext(meta).withBloomType(bloomType).build();
                    for (int i = 0; i < NUM_KV; i++) {
                        byte[] row = RandomKeyValueUtil.randomOrderedKey(RAND, i);
                        byte[] qual = RandomKeyValueUtil.randomRowOrQualifier(RAND);
                        byte[] value = RandomKeyValueUtil.randomValue(RAND);
                        KeyValue kv = new KeyValue(row, FAM, qual, value);
                        storeFileWriter.append(kv);
                        cells[i] = kv;
                    }
                    storeFileWriter.close();
                }
                // Read the HFile
                HFile.Reader reader = HFile.createReader(fs, hfilePath, cacheConf, true, conf);
                // Sanity check the HFile index level
                assertEquals(expectedNumLevels, reader.getTrailer().getNumDataIndexLevels());
                // enabled and disabled
                for (boolean pread : new boolean[] { false, true }) {
                    HFileScanner scanner = reader.getScanner(conf, true, pread);
                    checkNoSeekBefore(cells, scanner, 0);
                    for (int i = 1; i < NUM_KV; i++) {
                        checkSeekBefore(cells, scanner, i);
                        checkCell(cells[i - 1], scanner.getCell());
                    }
                    assertTrue(scanner.seekTo());
                    for (int i = NUM_KV - 1; i >= 1; i--) {
                        checkSeekBefore(cells, scanner, i);
                        checkCell(cells[i - 1], scanner.getCell());
                    }
                    checkNoSeekBefore(cells, scanner, 0);
                    scanner.close();
                }
                reader.close();
            }
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) StoreFileWriter(org.apache.hadoop.hbase.regionserver.StoreFileWriter) KeyValue(org.apache.hadoop.hbase.KeyValue) BloomType(org.apache.hadoop.hbase.regionserver.BloomType) Cell(org.apache.hadoop.hbase.Cell) Test(org.junit.Test)

Example 23 with StoreFileWriter

use of org.apache.hadoop.hbase.regionserver.StoreFileWriter in project hbase by apache.

the class FaultyMobStoreCompactor method performCompaction.

@Override
protected boolean performCompaction(FileDetails fd, InternalScanner scanner, long smallestReadPoint, boolean cleanSeqId, ThroughputController throughputController, boolean major, int numofFilesToCompact) throws IOException {
    totalCompactions.incrementAndGet();
    if (major) {
        totalMajorCompactions.incrementAndGet();
    }
    long bytesWrittenProgressForLog = 0;
    long bytesWrittenProgressForShippedCall = 0;
    // Clear old mob references
    mobRefSet.get().clear();
    boolean isUserRequest = userRequest.get();
    boolean compactMOBs = major && isUserRequest;
    boolean discardMobMiss = conf.getBoolean(MobConstants.MOB_UNSAFE_DISCARD_MISS_KEY, MobConstants.DEFAULT_MOB_DISCARD_MISS);
    boolean mustFail = false;
    if (compactMOBs) {
        mobCounter.incrementAndGet();
        double dv = rnd.nextDouble();
        if (dv < failureProb) {
            mustFail = true;
            totalFailures.incrementAndGet();
        }
    }
    FileSystem fs = store.getFileSystem();
    // Since scanner.next() can return 'false' but still be delivering data,
    // we have to use a do/while loop.
    List<Cell> cells = new ArrayList<>();
    // Limit to "hbase.hstore.compaction.kv.max" (default 10) to avoid OOME
    long currentTime = EnvironmentEdgeManager.currentTime();
    long lastMillis = 0;
    if (LOG.isDebugEnabled()) {
        lastMillis = currentTime;
    }
    CloseChecker closeChecker = new CloseChecker(conf, currentTime);
    String compactionName = ThroughputControlUtil.getNameForThrottling(store, "compaction");
    long now = 0;
    boolean hasMore;
    Path path = MobUtils.getMobFamilyPath(conf, store.getTableName(), store.getColumnFamilyName());
    byte[] fileName = null;
    StoreFileWriter mobFileWriter = null;
    long mobCells = 0;
    long cellsCountCompactedToMob = 0, cellsCountCompactedFromMob = 0;
    long cellsSizeCompactedToMob = 0, cellsSizeCompactedFromMob = 0;
    boolean finished = false;
    ScannerContext scannerContext = ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build();
    throughputController.start(compactionName);
    KeyValueScanner kvs = (scanner instanceof KeyValueScanner) ? (KeyValueScanner) scanner : null;
    long shippedCallSizeLimit = (long) numofFilesToCompact * this.store.getColumnFamilyDescriptor().getBlocksize();
    Cell mobCell = null;
    long counter = 0;
    long countFailAt = -1;
    if (mustFail) {
        // randomly fail fast
        countFailAt = rnd.nextInt(100);
    }
    try {
        try {
            mobFileWriter = mobStore.createWriterInTmp(new Date(fd.latestPutTs), fd.maxKeyCount, major ? majorCompactionCompression : minorCompactionCompression, store.getRegionInfo().getStartKey(), true);
            fileName = Bytes.toBytes(mobFileWriter.getPath().getName());
        } catch (IOException e) {
            // Bailing out
            LOG.error("Failed to create mob writer, ", e);
            throw e;
        }
        if (compactMOBs) {
            // Add the only reference we get for compact MOB case
            // because new store file will have only one MOB reference
            // in this case - of newly compacted MOB file
            mobRefSet.get().put(store.getTableName(), mobFileWriter.getPath().getName());
        }
        do {
            hasMore = scanner.next(cells, scannerContext);
            currentTime = EnvironmentEdgeManager.currentTime();
            if (LOG.isDebugEnabled()) {
                now = currentTime;
            }
            if (closeChecker.isTimeLimit(store, currentTime)) {
                progress.cancel();
                return false;
            }
            for (Cell c : cells) {
                counter++;
                if (compactMOBs) {
                    if (MobUtils.isMobReferenceCell(c)) {
                        if (counter == countFailAt) {
                            LOG.warn("INJECTED FAULT mobCounter={}", mobCounter.get());
                            throw new CorruptHFileException("injected fault");
                        }
                        String fName = MobUtils.getMobFileName(c);
                        // Added to support migration
                        try {
                            mobCell = mobStore.resolve(c, true, false).getCell();
                        } catch (DoNotRetryIOException e) {
                            if (discardMobMiss && e.getCause() != null && e.getCause() instanceof FileNotFoundException) {
                                LOG.error("Missing MOB cell: file={} not found cell={}", fName, c);
                                continue;
                            } else {
                                throw e;
                            }
                        }
                        if (discardMobMiss && mobCell.getValueLength() == 0) {
                            LOG.error("Missing MOB cell value: file={} cell={}", fName, mobCell);
                            continue;
                        }
                        if (mobCell.getValueLength() > mobSizeThreshold) {
                            // put the mob data back to the store file
                            PrivateCellUtil.setSequenceId(mobCell, c.getSequenceId());
                            mobFileWriter.append(mobCell);
                            writer.append(MobUtils.createMobRefCell(mobCell, fileName, this.mobStore.getRefCellTags()));
                            mobCells++;
                        } else {
                            // If MOB value is less than threshold, append it directly to a store file
                            PrivateCellUtil.setSequenceId(mobCell, c.getSequenceId());
                            writer.append(mobCell);
                            cellsCountCompactedFromMob++;
                            cellsSizeCompactedFromMob += mobCell.getValueLength();
                        }
                    } else {
                        // Not a MOB reference cell
                        int size = c.getValueLength();
                        if (size > mobSizeThreshold) {
                            mobFileWriter.append(c);
                            writer.append(MobUtils.createMobRefCell(c, fileName, this.mobStore.getRefCellTags()));
                            mobCells++;
                            cellsCountCompactedToMob++;
                            cellsSizeCompactedToMob += c.getValueLength();
                        } else {
                            writer.append(c);
                        }
                    }
                } else if (c.getTypeByte() != KeyValue.Type.Put.getCode()) {
                    // Not a major compaction or major with MOB disabled
                    // If the kv type is not put, directly write the cell
                    // to the store file.
                    writer.append(c);
                } else if (MobUtils.isMobReferenceCell(c)) {
                    // Not a major MOB compaction, Put MOB reference
                    if (MobUtils.hasValidMobRefCellValue(c)) {
                        int size = MobUtils.getMobValueLength(c);
                        if (size > mobSizeThreshold) {
                            // If the value size is larger than the threshold, it's regarded as a mob. Since
                            // its value is already in the mob file, directly write this cell to the store file
                            Optional<TableName> refTable = MobUtils.getTableName(c);
                            if (refTable.isPresent()) {
                                mobRefSet.get().put(refTable.get(), MobUtils.getMobFileName(c));
                                writer.append(c);
                            } else {
                                throw new IOException(String.format("MOB cell did not contain a tablename " + "tag. should not be possible. see ref guide on mob troubleshooting. " + "store=%s cell=%s", getStoreInfo(), c));
                            }
                        } else {
                            // If the value is not larger than the threshold, it's not regarded a mob. Retrieve
                            // the mob cell from the mob file, and write it back to the store file.
                            mobCell = mobStore.resolve(c, true, false).getCell();
                            if (mobCell.getValueLength() != 0) {
                                // put the mob data back to the store file
                                PrivateCellUtil.setSequenceId(mobCell, c.getSequenceId());
                                writer.append(mobCell);
                                cellsCountCompactedFromMob++;
                                cellsSizeCompactedFromMob += mobCell.getValueLength();
                            } else {
                                // If the value of a file is empty, there might be issues when retrieving,
                                // directly write the cell to the store file, and leave it to be handled by the
                                // next compaction.
                                LOG.error("Empty value for: " + c);
                                Optional<TableName> refTable = MobUtils.getTableName(c);
                                if (refTable.isPresent()) {
                                    mobRefSet.get().put(refTable.get(), MobUtils.getMobFileName(c));
                                    writer.append(c);
                                } else {
                                    throw new IOException(String.format("MOB cell did not contain a tablename " + "tag. should not be possible. see ref guide on mob troubleshooting. " + "store=%s cell=%s", getStoreInfo(), c));
                                }
                            }
                        }
                    } else {
                        LOG.error("Corrupted MOB reference: {}", c);
                        writer.append(c);
                    }
                } else if (c.getValueLength() <= mobSizeThreshold) {
                    // If the value size of a cell is not larger than the threshold, directly write it to
                    // the store file.
                    writer.append(c);
                } else {
                    // If the value size of a cell is larger than the threshold, it's regarded as a mob,
                    // write this cell to a mob file, and write the path to the store file.
                    mobCells++;
                    // append the original keyValue in the mob file.
                    mobFileWriter.append(c);
                    Cell reference = MobUtils.createMobRefCell(c, fileName, this.mobStore.getRefCellTags());
                    // write the cell whose value is the path of a mob file to the store file.
                    writer.append(reference);
                    cellsCountCompactedToMob++;
                    cellsSizeCompactedToMob += c.getValueLength();
                    // Add ref we get for compact MOB case
                    mobRefSet.get().put(store.getTableName(), mobFileWriter.getPath().getName());
                }
                int len = c.getSerializedSize();
                ++progress.currentCompactedKVs;
                progress.totalCompactedSize += len;
                bytesWrittenProgressForShippedCall += len;
                if (LOG.isDebugEnabled()) {
                    bytesWrittenProgressForLog += len;
                }
                throughputController.control(compactionName, len);
                if (closeChecker.isSizeLimit(store, len)) {
                    progress.cancel();
                    return false;
                }
                if (kvs != null && bytesWrittenProgressForShippedCall > shippedCallSizeLimit) {
                    ((ShipperListener) writer).beforeShipped();
                    kvs.shipped();
                    bytesWrittenProgressForShippedCall = 0;
                }
            }
            // logging at DEBUG level
            if (LOG.isDebugEnabled()) {
                if ((now - lastMillis) >= COMPACTION_PROGRESS_LOG_INTERVAL) {
                    String rate = String.format("%.2f", (bytesWrittenProgressForLog / 1024.0) / ((now - lastMillis) / 1000.0));
                    LOG.debug("Compaction progress: {} {}, rate={} KB/sec, throughputController is {}", compactionName, progress, rate, throughputController);
                    lastMillis = now;
                    bytesWrittenProgressForLog = 0;
                }
            }
            cells.clear();
        } while (hasMore);
        finished = true;
    } catch (InterruptedException e) {
        progress.cancel();
        throw new InterruptedIOException("Interrupted while control throughput of compacting " + compactionName);
    } catch (FileNotFoundException e) {
        LOG.error("MOB Stress Test FAILED, region: " + store.getRegionInfo().getEncodedName(), e);
        System.exit(-1);
    } catch (IOException t) {
        LOG.error("Mob compaction failed for region: " + store.getRegionInfo().getEncodedName());
        throw t;
    } finally {
        // Clone last cell in the final because writer will append last cell when committing. If
        // don't clone here and once the scanner get closed, then the memory of last cell will be
        // released. (HBASE-22582)
        ((ShipperListener) writer).beforeShipped();
        throughputController.finish(compactionName);
        if (!finished && mobFileWriter != null) {
            // Remove all MOB references because compaction failed
            mobRefSet.get().clear();
            // Abort writer
            abortWriter(mobFileWriter);
        }
    }
    if (mobFileWriter != null) {
        if (mobCells > 0) {
            // If the mob file is not empty, commit it.
            mobFileWriter.appendMetadata(fd.maxSeqId, major, mobCells);
            mobFileWriter.close();
            mobStore.commitFile(mobFileWriter.getPath(), path);
        } else {
            // If the mob file is empty, delete it instead of committing.
            abortWriter(mobFileWriter);
        }
    }
    mobStore.updateCellsCountCompactedFromMob(cellsCountCompactedFromMob);
    mobStore.updateCellsCountCompactedToMob(cellsCountCompactedToMob);
    mobStore.updateCellsSizeCompactedFromMob(cellsSizeCompactedFromMob);
    mobStore.updateCellsSizeCompactedToMob(cellsSizeCompactedToMob);
    progress.complete();
    return true;
}
Also used : StoreFileWriter(org.apache.hadoop.hbase.regionserver.StoreFileWriter) InterruptedIOException(java.io.InterruptedIOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) ArrayList(java.util.ArrayList) FileNotFoundException(java.io.FileNotFoundException) CorruptHFileException(org.apache.hadoop.hbase.io.hfile.CorruptHFileException) FileSystem(org.apache.hadoop.fs.FileSystem) Cell(org.apache.hadoop.hbase.Cell) ScannerContext(org.apache.hadoop.hbase.regionserver.ScannerContext) ShipperListener(org.apache.hadoop.hbase.regionserver.ShipperListener) CloseChecker(org.apache.hadoop.hbase.regionserver.compactions.CloseChecker) Path(org.apache.hadoop.fs.Path) KeyValueScanner(org.apache.hadoop.hbase.regionserver.KeyValueScanner) InterruptedIOException(java.io.InterruptedIOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) IOException(java.io.IOException) Date(java.util.Date) TableName(org.apache.hadoop.hbase.TableName)

Example 24 with StoreFileWriter

use of org.apache.hadoop.hbase.regionserver.StoreFileWriter in project hbase by apache.

the class TestCachedMobFile method testReadKeyValue.

@Test
public void testReadKeyValue() throws Exception {
    Path testDir = TEST_UTIL.getDataTestDir();
    FileSystem fs = testDir.getFileSystem(conf);
    HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
    StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, fs).withOutputDir(testDir).withFileContext(meta).build();
    String caseName = testName.getMethodName();
    MobTestUtil.writeStoreFile(writer, caseName);
    CachedMobFile cachedMobFile = CachedMobFile.create(fs, writer.getPath(), conf, cacheConf);
    byte[] family = Bytes.toBytes(caseName);
    byte[] qualify = Bytes.toBytes(caseName);
    // Test the start key
    // The start key bytes
    byte[] startKey = Bytes.toBytes("aa");
    KeyValue expectedKey = new KeyValue(startKey, family, qualify, Long.MAX_VALUE, Type.Put, startKey);
    KeyValue seekKey = expectedKey.createKeyOnly(false);
    Cell cell = cachedMobFile.readCell(seekKey, false).getCell();
    MobTestUtil.assertCellEquals(expectedKey, cell);
    // Test the end key
    // The end key bytes
    byte[] endKey = Bytes.toBytes("zz");
    expectedKey = new KeyValue(endKey, family, qualify, Long.MAX_VALUE, Type.Put, endKey);
    seekKey = expectedKey.createKeyOnly(false);
    cell = cachedMobFile.readCell(seekKey, false).getCell();
    MobTestUtil.assertCellEquals(expectedKey, cell);
    // Test the random key
    byte[] randomKey = Bytes.toBytes(MobTestUtil.generateRandomString(2));
    expectedKey = new KeyValue(randomKey, family, qualify, Long.MAX_VALUE, Type.Put, randomKey);
    seekKey = expectedKey.createKeyOnly(false);
    cell = cachedMobFile.readCell(seekKey, false).getCell();
    MobTestUtil.assertCellEquals(expectedKey, cell);
    // Test the key which is less than the start key
    // Smaller than "aa"
    byte[] lowerKey = Bytes.toBytes("a1");
    expectedKey = new KeyValue(startKey, family, qualify, Long.MAX_VALUE, Type.Put, startKey);
    seekKey = new KeyValue(lowerKey, family, qualify, Long.MAX_VALUE, Type.Put, lowerKey);
    cell = cachedMobFile.readCell(seekKey, false).getCell();
    MobTestUtil.assertCellEquals(expectedKey, cell);
    // Test the key which is more than the end key
    // Bigger than "zz"
    byte[] upperKey = Bytes.toBytes("z{");
    seekKey = new KeyValue(upperKey, family, qualify, Long.MAX_VALUE, Type.Put, upperKey);
    Assert.assertNull(cachedMobFile.readCell(seekKey, false));
}
Also used : Path(org.apache.hadoop.fs.Path) StoreFileWriter(org.apache.hadoop.hbase.regionserver.StoreFileWriter) KeyValue(org.apache.hadoop.hbase.KeyValue) FileSystem(org.apache.hadoop.fs.FileSystem) HFileContextBuilder(org.apache.hadoop.hbase.io.hfile.HFileContextBuilder) Cell(org.apache.hadoop.hbase.Cell) HFileContext(org.apache.hadoop.hbase.io.hfile.HFileContext) Test(org.junit.Test)

Example 25 with StoreFileWriter

use of org.apache.hadoop.hbase.regionserver.StoreFileWriter in project hbase by apache.

the class TestMobFile method testReadKeyValue.

@Test
public void testReadKeyValue() throws Exception {
    Path testDir = TEST_UTIL.getDataTestDir();
    FileSystem fs = testDir.getFileSystem(conf);
    HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
    StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, fs).withOutputDir(testDir).withFileContext(meta).build();
    String caseName = testName.getMethodName();
    MobTestUtil.writeStoreFile(writer, caseName);
    MobFile mobFile = new MobFile(new HStoreFile(fs, writer.getPath(), conf, cacheConf, BloomType.NONE, true));
    byte[] family = Bytes.toBytes(caseName);
    byte[] qualify = Bytes.toBytes(caseName);
    // Test the start key
    // The start key bytes
    byte[] startKey = Bytes.toBytes("aa");
    KeyValue expectedKey = new KeyValue(startKey, family, qualify, Long.MAX_VALUE, Type.Put, startKey);
    KeyValue seekKey = expectedKey.createKeyOnly(false);
    Cell cell = mobFile.readCell(seekKey, false).getCell();
    MobTestUtil.assertCellEquals(expectedKey, cell);
    // Test the end key
    // The end key bytes
    byte[] endKey = Bytes.toBytes("zz");
    expectedKey = new KeyValue(endKey, family, qualify, Long.MAX_VALUE, Type.Put, endKey);
    seekKey = expectedKey.createKeyOnly(false);
    cell = mobFile.readCell(seekKey, false).getCell();
    MobTestUtil.assertCellEquals(expectedKey, cell);
    // Test the random key
    byte[] randomKey = Bytes.toBytes(MobTestUtil.generateRandomString(2));
    expectedKey = new KeyValue(randomKey, family, qualify, Long.MAX_VALUE, Type.Put, randomKey);
    seekKey = expectedKey.createKeyOnly(false);
    cell = mobFile.readCell(seekKey, false).getCell();
    MobTestUtil.assertCellEquals(expectedKey, cell);
    // Test the key which is less than the start key
    // Smaller than "aa"
    byte[] lowerKey = Bytes.toBytes("a1");
    expectedKey = new KeyValue(startKey, family, qualify, Long.MAX_VALUE, Type.Put, startKey);
    seekKey = new KeyValue(lowerKey, family, qualify, Long.MAX_VALUE, Type.Put, lowerKey);
    cell = mobFile.readCell(seekKey, false).getCell();
    MobTestUtil.assertCellEquals(expectedKey, cell);
    // Test the key which is more than the end key
    // Bigger than "zz"
    byte[] upperKey = Bytes.toBytes("z{");
    seekKey = new KeyValue(upperKey, family, qualify, Long.MAX_VALUE, Type.Put, upperKey);
    assertNull(mobFile.readCell(seekKey, false));
}
Also used : Path(org.apache.hadoop.fs.Path) StoreFileWriter(org.apache.hadoop.hbase.regionserver.StoreFileWriter) KeyValue(org.apache.hadoop.hbase.KeyValue) FileSystem(org.apache.hadoop.fs.FileSystem) HStoreFile(org.apache.hadoop.hbase.regionserver.HStoreFile) HFileContextBuilder(org.apache.hadoop.hbase.io.hfile.HFileContextBuilder) Cell(org.apache.hadoop.hbase.Cell) HFileContext(org.apache.hadoop.hbase.io.hfile.HFileContext) Test(org.junit.Test)

Aggregations

StoreFileWriter (org.apache.hadoop.hbase.regionserver.StoreFileWriter)30 Path (org.apache.hadoop.fs.Path)23 HFileContextBuilder (org.apache.hadoop.hbase.io.hfile.HFileContextBuilder)14 HFileContext (org.apache.hadoop.hbase.io.hfile.HFileContext)13 IOException (java.io.IOException)11 Cell (org.apache.hadoop.hbase.Cell)11 InterruptedIOException (java.io.InterruptedIOException)9 ArrayList (java.util.ArrayList)9 FileSystem (org.apache.hadoop.fs.FileSystem)9 KeyValue (org.apache.hadoop.hbase.KeyValue)9 CacheConfig (org.apache.hadoop.hbase.io.hfile.CacheConfig)6 Test (org.junit.Test)6 Date (java.util.Date)5 TableName (org.apache.hadoop.hbase.TableName)5 ScannerContext (org.apache.hadoop.hbase.regionserver.ScannerContext)5 Map (java.util.Map)4 HashMap (java.util.HashMap)3 TreeMap (java.util.TreeMap)3 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)3 Algorithm (org.apache.hadoop.hbase.io.compress.Compression.Algorithm)3