Search in sources :

Example 1 with StoreFileScanner

use of org.apache.hadoop.hbase.regionserver.StoreFileScanner in project hbase by apache.

the class TestStripeCompactor method createCompactor.

private StripeCompactor createCompactor(StoreFileWritersCapture writers, KeyValue[] input) throws Exception {
    Configuration conf = HBaseConfiguration.create();
    conf.setBoolean("hbase.regionserver.compaction.private.readers", usePrivateReaders);
    final Scanner scanner = new Scanner(input);
    // Create store mock that is satisfactory for compactor.
    HColumnDescriptor col = new HColumnDescriptor(NAME_OF_THINGS);
    ScanInfo si = new ScanInfo(conf, col, Long.MAX_VALUE, 0, CellComparator.COMPARATOR);
    Store store = mock(Store.class);
    when(store.getFamily()).thenReturn(col);
    when(store.getScanInfo()).thenReturn(si);
    when(store.areWritesEnabled()).thenReturn(true);
    when(store.getFileSystem()).thenReturn(mock(FileSystem.class));
    when(store.getRegionInfo()).thenReturn(new HRegionInfo(TABLE_NAME));
    when(store.createWriterInTmp(anyLong(), any(Compression.Algorithm.class), anyBoolean(), anyBoolean(), anyBoolean(), anyBoolean())).thenAnswer(writers);
    when(store.getComparator()).thenReturn(CellComparator.COMPARATOR);
    return new StripeCompactor(conf, store) {

        @Override
        protected InternalScanner createScanner(Store store, List<StoreFileScanner> scanners, long smallestReadPoint, long earliestPutTs, byte[] dropDeletesFromRow, byte[] dropDeletesToRow) throws IOException {
            return scanner;
        }

        @Override
        protected InternalScanner createScanner(Store store, List<StoreFileScanner> scanners, ScanType scanType, long smallestReadPoint, long earliestPutTs) throws IOException {
            return scanner;
        }
    };
}
Also used : HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) Scanner(org.apache.hadoop.hbase.regionserver.compactions.TestCompactor.Scanner) StoreFileScanner(org.apache.hadoop.hbase.regionserver.StoreFileScanner) InternalScanner(org.apache.hadoop.hbase.regionserver.InternalScanner) ScanType(org.apache.hadoop.hbase.regionserver.ScanType) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) FileSystem(org.apache.hadoop.fs.FileSystem) Store(org.apache.hadoop.hbase.regionserver.Store) ScanInfo(org.apache.hadoop.hbase.regionserver.ScanInfo) ArrayList(java.util.ArrayList) List(java.util.List)

Example 2 with StoreFileScanner

use of org.apache.hadoop.hbase.regionserver.StoreFileScanner in project hbase by apache.

the class MobFile method readCell.

/**
   * Reads a cell from the mob file.
   * @param search The cell need to be searched in the mob file.
   * @param cacheMobBlocks Should this scanner cache blocks.
   * @param readPt the read point.
   * @return The cell in the mob file.
   * @throws IOException
   */
public Cell readCell(Cell search, boolean cacheMobBlocks, long readPt) throws IOException {
    Cell result = null;
    StoreFileScanner scanner = null;
    List<StoreFile> sfs = new ArrayList<>();
    sfs.add(sf);
    try {
        List<StoreFileScanner> sfScanners = StoreFileScanner.getScannersForStoreFiles(sfs, cacheMobBlocks, true, false, false, readPt);
        if (!sfScanners.isEmpty()) {
            scanner = sfScanners.get(0);
            if (scanner.seek(search)) {
                result = scanner.peek();
            }
        }
    } finally {
        if (scanner != null) {
            scanner.close();
        }
    }
    return result;
}
Also used : ArrayList(java.util.ArrayList) StoreFile(org.apache.hadoop.hbase.regionserver.StoreFile) Cell(org.apache.hadoop.hbase.Cell) StoreFileScanner(org.apache.hadoop.hbase.regionserver.StoreFileScanner)

Example 3 with StoreFileScanner

use of org.apache.hadoop.hbase.regionserver.StoreFileScanner in project hbase by apache.

the class TestMobFile method testGetScanner.

@Test
public void testGetScanner() throws Exception {
    Path testDir = TEST_UTIL.getDataTestDir();
    FileSystem fs = testDir.getFileSystem(conf);
    HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
    StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, fs).withOutputDir(testDir).withFileContext(meta).build();
    MobTestUtil.writeStoreFile(writer, getName());
    MobFile mobFile = new MobFile(new StoreFile(fs, writer.getPath(), conf, cacheConf, BloomType.NONE));
    assertNotNull(mobFile.getScanner());
    assertTrue(mobFile.getScanner() instanceof StoreFileScanner);
}
Also used : Path(org.apache.hadoop.fs.Path) StoreFileWriter(org.apache.hadoop.hbase.regionserver.StoreFileWriter) FileSystem(org.apache.hadoop.fs.FileSystem) StoreFile(org.apache.hadoop.hbase.regionserver.StoreFile) HFileContextBuilder(org.apache.hadoop.hbase.io.hfile.HFileContextBuilder) HFileContext(org.apache.hadoop.hbase.io.hfile.HFileContext) StoreFileScanner(org.apache.hadoop.hbase.regionserver.StoreFileScanner) Test(org.junit.Test)

Example 4 with StoreFileScanner

use of org.apache.hadoop.hbase.regionserver.StoreFileScanner in project hbase by apache.

the class Compactor method compact.

protected List<Path> compact(final CompactionRequest request, InternalScannerFactory scannerFactory, CellSinkFactory<T> sinkFactory, ThroughputController throughputController, User user) throws IOException {
    FileDetails fd = getFileDetails(request.getFiles(), request.isAllFiles());
    this.progress = new CompactionProgress(fd.maxKeyCount);
    // Find the smallest read point across all the Scanners.
    long smallestReadPoint = getSmallestReadPoint();
    List<StoreFileScanner> scanners;
    Collection<StoreFile> readersToClose;
    T writer = null;
    boolean dropCache;
    if (request.isMajor() || request.isAllFiles()) {
        dropCache = this.dropCacheMajor;
    } else {
        dropCache = this.dropCacheMinor;
    }
    if (this.conf.getBoolean("hbase.regionserver.compaction.private.readers", true)) {
        // clone all StoreFiles, so we'll do the compaction on a independent copy of StoreFiles,
        // HFiles, and their readers
        readersToClose = new ArrayList<>(request.getFiles().size());
        for (StoreFile f : request.getFiles()) {
            StoreFile clonedStoreFile = f.cloneForReader();
            // create the reader after the store file is cloned in case
            // the sequence id is used for sorting in scanners
            clonedStoreFile.createReader();
            readersToClose.add(clonedStoreFile);
        }
        scanners = createFileScanners(readersToClose, smallestReadPoint, dropCache);
    } else {
        readersToClose = Collections.emptyList();
        scanners = createFileScanners(request.getFiles(), smallestReadPoint, dropCache);
    }
    InternalScanner scanner = null;
    boolean finished = false;
    try {
        /* Include deletes, unless we are doing a major compaction */
        ScanType scanType = scannerFactory.getScanType(request);
        scanner = preCreateCoprocScanner(request, scanType, fd.earliestPutTs, scanners, user, smallestReadPoint);
        if (scanner == null) {
            scanner = scannerFactory.createScanner(scanners, scanType, fd, smallestReadPoint);
        }
        scanner = postCreateCoprocScanner(request, scanType, scanner, user);
        if (scanner == null) {
            // NULL scanner returned from coprocessor hooks means skip normal processing.
            return new ArrayList<>();
        }
        boolean cleanSeqId = false;
        if (fd.minSeqIdToKeep > 0) {
            smallestReadPoint = Math.min(fd.minSeqIdToKeep, smallestReadPoint);
            cleanSeqId = true;
        }
        writer = sinkFactory.createWriter(scanner, fd, dropCache);
        finished = performCompaction(fd, scanner, writer, smallestReadPoint, cleanSeqId, throughputController, request.isAllFiles(), request.getFiles().size());
        if (!finished) {
            throw new InterruptedIOException("Aborting compaction of store " + store + " in region " + store.getRegionInfo().getRegionNameAsString() + " because it was interrupted.");
        }
    } finally {
        Closeables.close(scanner, true);
        for (StoreFile f : readersToClose) {
            try {
                f.closeReader(true);
            } catch (IOException e) {
                LOG.warn("Exception closing " + f, e);
            }
        }
        if (!finished && writer != null) {
            abortWriter(writer);
        }
    }
    assert finished : "We should have exited the method on all error paths";
    assert writer != null : "Writer should be non-null if no error";
    return commitWriter(writer, fd, request);
}
Also used : InterruptedIOException(java.io.InterruptedIOException) InternalScanner(org.apache.hadoop.hbase.regionserver.InternalScanner) ArrayList(java.util.ArrayList) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) StoreFileScanner(org.apache.hadoop.hbase.regionserver.StoreFileScanner) ScanType(org.apache.hadoop.hbase.regionserver.ScanType) StoreFile(org.apache.hadoop.hbase.regionserver.StoreFile)

Example 5 with StoreFileScanner

use of org.apache.hadoop.hbase.regionserver.StoreFileScanner in project hbase by apache.

the class TestDateTieredCompactor method createCompactor.

private DateTieredCompactor createCompactor(StoreFileWritersCapture writers, final KeyValue[] input, List<StoreFile> storefiles) throws Exception {
    Configuration conf = HBaseConfiguration.create();
    conf.setBoolean("hbase.regionserver.compaction.private.readers", usePrivateReaders);
    final Scanner scanner = new Scanner(input);
    // Create store mock that is satisfactory for compactor.
    HColumnDescriptor col = new HColumnDescriptor(NAME_OF_THINGS);
    ScanInfo si = new ScanInfo(conf, col, Long.MAX_VALUE, 0, CellComparator.COMPARATOR);
    final Store store = mock(Store.class);
    when(store.getStorefiles()).thenReturn(storefiles);
    when(store.getFamily()).thenReturn(col);
    when(store.getScanInfo()).thenReturn(si);
    when(store.areWritesEnabled()).thenReturn(true);
    when(store.getFileSystem()).thenReturn(mock(FileSystem.class));
    when(store.getRegionInfo()).thenReturn(new HRegionInfo(TABLE_NAME));
    when(store.createWriterInTmp(anyLong(), any(Compression.Algorithm.class), anyBoolean(), anyBoolean(), anyBoolean(), anyBoolean())).thenAnswer(writers);
    when(store.getComparator()).thenReturn(CellComparator.COMPARATOR);
    long maxSequenceId = StoreFile.getMaxSequenceIdInList(storefiles);
    when(store.getMaxSequenceId()).thenReturn(maxSequenceId);
    return new DateTieredCompactor(conf, store) {

        @Override
        protected InternalScanner createScanner(Store store, List<StoreFileScanner> scanners, long smallestReadPoint, long earliestPutTs, byte[] dropDeletesFromRow, byte[] dropDeletesToRow) throws IOException {
            return scanner;
        }

        @Override
        protected InternalScanner createScanner(Store store, List<StoreFileScanner> scanners, ScanType scanType, long smallestReadPoint, long earliestPutTs) throws IOException {
            return scanner;
        }
    };
}
Also used : HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) Scanner(org.apache.hadoop.hbase.regionserver.compactions.TestCompactor.Scanner) StoreFileScanner(org.apache.hadoop.hbase.regionserver.StoreFileScanner) InternalScanner(org.apache.hadoop.hbase.regionserver.InternalScanner) ScanType(org.apache.hadoop.hbase.regionserver.ScanType) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) FileSystem(org.apache.hadoop.fs.FileSystem) Store(org.apache.hadoop.hbase.regionserver.Store) ScanInfo(org.apache.hadoop.hbase.regionserver.ScanInfo) ArrayList(java.util.ArrayList) List(java.util.List)

Aggregations

StoreFileScanner (org.apache.hadoop.hbase.regionserver.StoreFileScanner)6 ArrayList (java.util.ArrayList)5 InternalScanner (org.apache.hadoop.hbase.regionserver.InternalScanner)4 ScanType (org.apache.hadoop.hbase.regionserver.ScanType)4 List (java.util.List)3 Configuration (org.apache.hadoop.conf.Configuration)3 FileSystem (org.apache.hadoop.fs.FileSystem)3 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)3 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)3 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)3 Store (org.apache.hadoop.hbase.regionserver.Store)3 StoreFile (org.apache.hadoop.hbase.regionserver.StoreFile)3 ScanInfo (org.apache.hadoop.hbase.regionserver.ScanInfo)2 Scanner (org.apache.hadoop.hbase.regionserver.compactions.TestCompactor.Scanner)2 ImmutableList (com.google.common.collect.ImmutableList)1 IOException (java.io.IOException)1 InterruptedIOException (java.io.InterruptedIOException)1 Path (org.apache.hadoop.fs.Path)1 Cell (org.apache.hadoop.hbase.Cell)1 HFileContext (org.apache.hadoop.hbase.io.hfile.HFileContext)1