Search in sources :

Example 1 with StoreScanner

use of org.apache.hadoop.hbase.regionserver.StoreScanner in project hbase by apache.

the class ZooKeeperScanPolicyObserver method preFlushScannerOpen.

@Override
public InternalScanner preFlushScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c, Store store, KeyValueScanner memstoreScanner, InternalScanner s) throws IOException {
    ScanInfo scanInfo = getScanInfo(store, c.getEnvironment());
    if (scanInfo == null) {
        // take default action
        return null;
    }
    Scan scan = new Scan();
    scan.setMaxVersions(scanInfo.getMaxVersions());
    return new StoreScanner(store, scanInfo, scan, Collections.singletonList(memstoreScanner), ScanType.COMPACT_RETAIN_DELETES, store.getSmallestReadPoint(), HConstants.OLDEST_TIMESTAMP);
}
Also used : ScanInfo(org.apache.hadoop.hbase.regionserver.ScanInfo) Scan(org.apache.hadoop.hbase.client.Scan) StoreScanner(org.apache.hadoop.hbase.regionserver.StoreScanner)

Example 2 with StoreScanner

use of org.apache.hadoop.hbase.regionserver.StoreScanner in project hbase by apache.

the class PartitionedMobCompactor method createScanner.

/**
   * Creates a store scanner.
   * @param filesToCompact The files to be compacted.
   * @param scanType The scan type.
   * @return The store scanner.
   * @throws IOException if IO failure is encountered
   */
private StoreScanner createScanner(List<StoreFile> filesToCompact, ScanType scanType) throws IOException {
    List scanners = StoreFileScanner.getScannersForStoreFiles(filesToCompact, false, true, false, false, HConstants.LATEST_TIMESTAMP);
    Scan scan = new Scan();
    scan.setMaxVersions(column.getMaxVersions());
    long ttl = HStore.determineTTLFromFamily(column);
    ScanInfo scanInfo = new ScanInfo(conf, column, ttl, 0, CellComparator.COMPARATOR);
    return new StoreScanner(scan, scanInfo, scanType, null, scanners, 0L, HConstants.LATEST_TIMESTAMP);
}
Also used : List(java.util.List) ArrayList(java.util.ArrayList) Scan(org.apache.hadoop.hbase.client.Scan) ScanInfo(org.apache.hadoop.hbase.regionserver.ScanInfo) StoreScanner(org.apache.hadoop.hbase.regionserver.StoreScanner)

Example 3 with StoreScanner

use of org.apache.hadoop.hbase.regionserver.StoreScanner in project hbase by apache.

the class AbstractMultiOutputCompactor method initMultiWriter.

protected void initMultiWriter(AbstractMultiFileWriter writer, InternalScanner scanner, final FileDetails fd, final boolean shouldDropBehind) {
    WriterFactory writerFactory = new WriterFactory() {

        @Override
        public StoreFileWriter createWriter() throws IOException {
            return createTmpWriter(fd, shouldDropBehind);
        }
    };
    // Prepare multi-writer, and perform the compaction using scanner and writer.
    // It is ok here if storeScanner is null.
    StoreScanner storeScanner = (scanner instanceof StoreScanner) ? (StoreScanner) scanner : null;
    writer.init(storeScanner, writerFactory);
}
Also used : WriterFactory(org.apache.hadoop.hbase.regionserver.AbstractMultiFileWriter.WriterFactory) StoreScanner(org.apache.hadoop.hbase.regionserver.StoreScanner)

Example 4 with StoreScanner

use of org.apache.hadoop.hbase.regionserver.StoreScanner in project hbase by apache.

the class Compactor method createScanner.

/**
   * @param store The store.
   * @param scanners Store file scanners.
   * @param smallestReadPoint Smallest MVCC read point.
   * @param earliestPutTs Earliest put across all files.
   * @param dropDeletesFromRow Drop deletes starting with this row, inclusive. Can be null.
   * @param dropDeletesToRow Drop deletes ending with this row, exclusive. Can be null.
   * @return A compaction scanner.
   */
protected InternalScanner createScanner(Store store, List<StoreFileScanner> scanners, long smallestReadPoint, long earliestPutTs, byte[] dropDeletesFromRow, byte[] dropDeletesToRow) throws IOException {
    Scan scan = new Scan();
    scan.setMaxVersions(store.getFamily().getMaxVersions());
    return new StoreScanner(store, store.getScanInfo(), scan, scanners, smallestReadPoint, earliestPutTs, dropDeletesFromRow, dropDeletesToRow);
}
Also used : Scan(org.apache.hadoop.hbase.client.Scan) StoreScanner(org.apache.hadoop.hbase.regionserver.StoreScanner)

Example 5 with StoreScanner

use of org.apache.hadoop.hbase.regionserver.StoreScanner in project cdap by caskdata.

the class MessageTableRegionObserver method preFlushScannerOpen.

@Override
public InternalScanner preFlushScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c, Store store, KeyValueScanner memstoreScanner, InternalScanner s) throws IOException {
    LOG.info("preFlush, filter using MessageDataFilter");
    TransactionVisibilityState txVisibilityState = txStateCache.getLatestState();
    Scan scan = new Scan();
    scan.setFilter(new MessageDataFilter(c.getEnvironment(), System.currentTimeMillis(), prefixLength, topicMetadataCache, txVisibilityState));
    return new LoggingInternalScanner("MessageDataFilter", "preFlush", new StoreScanner(store, store.getScanInfo(), scan, Collections.singletonList(memstoreScanner), ScanType.COMPACT_DROP_DELETES, store.getSmallestReadPoint(), HConstants.OLDEST_TIMESTAMP), txVisibilityState);
}
Also used : TransactionVisibilityState(org.apache.tephra.persist.TransactionVisibilityState) Scan(org.apache.hadoop.hbase.client.Scan) StoreScanner(org.apache.hadoop.hbase.regionserver.StoreScanner)

Aggregations

StoreScanner (org.apache.hadoop.hbase.regionserver.StoreScanner)36 Scan (org.apache.hadoop.hbase.client.Scan)33 TransactionVisibilityState (org.apache.tephra.persist.TransactionVisibilityState)14 ArrayList (java.util.ArrayList)3 ScanInfo (org.apache.hadoop.hbase.regionserver.ScanInfo)3 IOException (java.io.IOException)2 Path (org.apache.hadoop.fs.Path)2 Cell (org.apache.hadoop.hbase.Cell)2 ScannerContext (org.apache.hadoop.hbase.regionserver.ScannerContext)2 StoreFileWriter (org.apache.hadoop.hbase.regionserver.StoreFileWriter)2 Date (java.util.Date)1 List (java.util.List)1 WriterFactory (org.apache.hadoop.hbase.regionserver.AbstractMultiFileWriter.WriterFactory)1 StoreFile (org.apache.hadoop.hbase.regionserver.StoreFile)1