Search in sources :

Example 1 with ScanType

use of org.apache.hadoop.hbase.regionserver.ScanType in project hbase by apache.

the class TestStripeCompactor method createCompactor.

private StripeCompactor createCompactor(StoreFileWritersCapture writers, KeyValue[] input) throws Exception {
    Configuration conf = HBaseConfiguration.create();
    conf.setBoolean("hbase.regionserver.compaction.private.readers", usePrivateReaders);
    final Scanner scanner = new Scanner(input);
    // Create store mock that is satisfactory for compactor.
    HColumnDescriptor col = new HColumnDescriptor(NAME_OF_THINGS);
    ScanInfo si = new ScanInfo(conf, col, Long.MAX_VALUE, 0, CellComparator.COMPARATOR);
    Store store = mock(Store.class);
    when(store.getFamily()).thenReturn(col);
    when(store.getScanInfo()).thenReturn(si);
    when(store.areWritesEnabled()).thenReturn(true);
    when(store.getFileSystem()).thenReturn(mock(FileSystem.class));
    when(store.getRegionInfo()).thenReturn(new HRegionInfo(TABLE_NAME));
    when(store.createWriterInTmp(anyLong(), any(Compression.Algorithm.class), anyBoolean(), anyBoolean(), anyBoolean(), anyBoolean())).thenAnswer(writers);
    when(store.getComparator()).thenReturn(CellComparator.COMPARATOR);
    return new StripeCompactor(conf, store) {

        @Override
        protected InternalScanner createScanner(Store store, List<StoreFileScanner> scanners, long smallestReadPoint, long earliestPutTs, byte[] dropDeletesFromRow, byte[] dropDeletesToRow) throws IOException {
            return scanner;
        }

        @Override
        protected InternalScanner createScanner(Store store, List<StoreFileScanner> scanners, ScanType scanType, long smallestReadPoint, long earliestPutTs) throws IOException {
            return scanner;
        }
    };
}
Also used : HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) Scanner(org.apache.hadoop.hbase.regionserver.compactions.TestCompactor.Scanner) StoreFileScanner(org.apache.hadoop.hbase.regionserver.StoreFileScanner) InternalScanner(org.apache.hadoop.hbase.regionserver.InternalScanner) ScanType(org.apache.hadoop.hbase.regionserver.ScanType) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) FileSystem(org.apache.hadoop.fs.FileSystem) Store(org.apache.hadoop.hbase.regionserver.Store) ScanInfo(org.apache.hadoop.hbase.regionserver.ScanInfo) ArrayList(java.util.ArrayList) List(java.util.List)

Example 2 with ScanType

use of org.apache.hadoop.hbase.regionserver.ScanType in project hbase by apache.

the class Compactor method compact.

protected List<Path> compact(final CompactionRequest request, InternalScannerFactory scannerFactory, CellSinkFactory<T> sinkFactory, ThroughputController throughputController, User user) throws IOException {
    FileDetails fd = getFileDetails(request.getFiles(), request.isAllFiles());
    this.progress = new CompactionProgress(fd.maxKeyCount);
    // Find the smallest read point across all the Scanners.
    long smallestReadPoint = getSmallestReadPoint();
    List<StoreFileScanner> scanners;
    Collection<StoreFile> readersToClose;
    T writer = null;
    boolean dropCache;
    if (request.isMajor() || request.isAllFiles()) {
        dropCache = this.dropCacheMajor;
    } else {
        dropCache = this.dropCacheMinor;
    }
    if (this.conf.getBoolean("hbase.regionserver.compaction.private.readers", true)) {
        // clone all StoreFiles, so we'll do the compaction on a independent copy of StoreFiles,
        // HFiles, and their readers
        readersToClose = new ArrayList<>(request.getFiles().size());
        for (StoreFile f : request.getFiles()) {
            StoreFile clonedStoreFile = f.cloneForReader();
            // create the reader after the store file is cloned in case
            // the sequence id is used for sorting in scanners
            clonedStoreFile.createReader();
            readersToClose.add(clonedStoreFile);
        }
        scanners = createFileScanners(readersToClose, smallestReadPoint, dropCache);
    } else {
        readersToClose = Collections.emptyList();
        scanners = createFileScanners(request.getFiles(), smallestReadPoint, dropCache);
    }
    InternalScanner scanner = null;
    boolean finished = false;
    try {
        /* Include deletes, unless we are doing a major compaction */
        ScanType scanType = scannerFactory.getScanType(request);
        scanner = preCreateCoprocScanner(request, scanType, fd.earliestPutTs, scanners, user, smallestReadPoint);
        if (scanner == null) {
            scanner = scannerFactory.createScanner(scanners, scanType, fd, smallestReadPoint);
        }
        scanner = postCreateCoprocScanner(request, scanType, scanner, user);
        if (scanner == null) {
            // NULL scanner returned from coprocessor hooks means skip normal processing.
            return new ArrayList<>();
        }
        boolean cleanSeqId = false;
        if (fd.minSeqIdToKeep > 0) {
            smallestReadPoint = Math.min(fd.minSeqIdToKeep, smallestReadPoint);
            cleanSeqId = true;
        }
        writer = sinkFactory.createWriter(scanner, fd, dropCache);
        finished = performCompaction(fd, scanner, writer, smallestReadPoint, cleanSeqId, throughputController, request.isAllFiles(), request.getFiles().size());
        if (!finished) {
            throw new InterruptedIOException("Aborting compaction of store " + store + " in region " + store.getRegionInfo().getRegionNameAsString() + " because it was interrupted.");
        }
    } finally {
        Closeables.close(scanner, true);
        for (StoreFile f : readersToClose) {
            try {
                f.closeReader(true);
            } catch (IOException e) {
                LOG.warn("Exception closing " + f, e);
            }
        }
        if (!finished && writer != null) {
            abortWriter(writer);
        }
    }
    assert finished : "We should have exited the method on all error paths";
    assert writer != null : "Writer should be non-null if no error";
    return commitWriter(writer, fd, request);
}
Also used : InterruptedIOException(java.io.InterruptedIOException) InternalScanner(org.apache.hadoop.hbase.regionserver.InternalScanner) ArrayList(java.util.ArrayList) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) StoreFileScanner(org.apache.hadoop.hbase.regionserver.StoreFileScanner) ScanType(org.apache.hadoop.hbase.regionserver.ScanType) StoreFile(org.apache.hadoop.hbase.regionserver.StoreFile)

Example 3 with ScanType

use of org.apache.hadoop.hbase.regionserver.ScanType in project hbase by apache.

the class TestDateTieredCompactor method createCompactor.

private DateTieredCompactor createCompactor(StoreFileWritersCapture writers, final KeyValue[] input, List<StoreFile> storefiles) throws Exception {
    Configuration conf = HBaseConfiguration.create();
    conf.setBoolean("hbase.regionserver.compaction.private.readers", usePrivateReaders);
    final Scanner scanner = new Scanner(input);
    // Create store mock that is satisfactory for compactor.
    HColumnDescriptor col = new HColumnDescriptor(NAME_OF_THINGS);
    ScanInfo si = new ScanInfo(conf, col, Long.MAX_VALUE, 0, CellComparator.COMPARATOR);
    final Store store = mock(Store.class);
    when(store.getStorefiles()).thenReturn(storefiles);
    when(store.getFamily()).thenReturn(col);
    when(store.getScanInfo()).thenReturn(si);
    when(store.areWritesEnabled()).thenReturn(true);
    when(store.getFileSystem()).thenReturn(mock(FileSystem.class));
    when(store.getRegionInfo()).thenReturn(new HRegionInfo(TABLE_NAME));
    when(store.createWriterInTmp(anyLong(), any(Compression.Algorithm.class), anyBoolean(), anyBoolean(), anyBoolean(), anyBoolean())).thenAnswer(writers);
    when(store.getComparator()).thenReturn(CellComparator.COMPARATOR);
    long maxSequenceId = StoreFile.getMaxSequenceIdInList(storefiles);
    when(store.getMaxSequenceId()).thenReturn(maxSequenceId);
    return new DateTieredCompactor(conf, store) {

        @Override
        protected InternalScanner createScanner(Store store, List<StoreFileScanner> scanners, long smallestReadPoint, long earliestPutTs, byte[] dropDeletesFromRow, byte[] dropDeletesToRow) throws IOException {
            return scanner;
        }

        @Override
        protected InternalScanner createScanner(Store store, List<StoreFileScanner> scanners, ScanType scanType, long smallestReadPoint, long earliestPutTs) throws IOException {
            return scanner;
        }
    };
}
Also used : HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) Scanner(org.apache.hadoop.hbase.regionserver.compactions.TestCompactor.Scanner) StoreFileScanner(org.apache.hadoop.hbase.regionserver.StoreFileScanner) InternalScanner(org.apache.hadoop.hbase.regionserver.InternalScanner) ScanType(org.apache.hadoop.hbase.regionserver.ScanType) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) FileSystem(org.apache.hadoop.fs.FileSystem) Store(org.apache.hadoop.hbase.regionserver.Store) ScanInfo(org.apache.hadoop.hbase.regionserver.ScanInfo) ArrayList(java.util.ArrayList) List(java.util.List)

Example 4 with ScanType

use of org.apache.hadoop.hbase.regionserver.ScanType in project hbase by apache.

the class TestStripeCompactionPolicy method createCompactor.

private StripeCompactor createCompactor() throws Exception {
    HColumnDescriptor col = new HColumnDescriptor(Bytes.toBytes("foo"));
    StoreFileWritersCapture writers = new StoreFileWritersCapture();
    Store store = mock(Store.class);
    HRegionInfo info = mock(HRegionInfo.class);
    when(info.getRegionNameAsString()).thenReturn("testRegion");
    when(store.getFamily()).thenReturn(col);
    when(store.getRegionInfo()).thenReturn(info);
    when(store.createWriterInTmp(anyLong(), any(Compression.Algorithm.class), anyBoolean(), anyBoolean(), anyBoolean(), anyBoolean())).thenAnswer(writers);
    Configuration conf = HBaseConfiguration.create();
    conf.setBoolean("hbase.regionserver.compaction.private.readers", usePrivateReaders);
    final Scanner scanner = new Scanner();
    return new StripeCompactor(conf, store) {

        @Override
        protected InternalScanner createScanner(Store store, List<StoreFileScanner> scanners, long smallestReadPoint, long earliestPutTs, byte[] dropDeletesFromRow, byte[] dropDeletesToRow) throws IOException {
            return scanner;
        }

        @Override
        protected InternalScanner createScanner(Store store, List<StoreFileScanner> scanners, ScanType scanType, long smallestReadPoint, long earliestPutTs) throws IOException {
            return scanner;
        }
    };
}
Also used : HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) StoreFileScanner(org.apache.hadoop.hbase.regionserver.StoreFileScanner) InternalScanner(org.apache.hadoop.hbase.regionserver.InternalScanner) ScanType(org.apache.hadoop.hbase.regionserver.ScanType) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) Store(org.apache.hadoop.hbase.regionserver.Store) List(java.util.List) ArrayList(java.util.ArrayList) ImmutableList(com.google.common.collect.ImmutableList) StoreFileWritersCapture(org.apache.hadoop.hbase.regionserver.compactions.TestCompactor.StoreFileWritersCapture)

Aggregations

ArrayList (java.util.ArrayList)4 InternalScanner (org.apache.hadoop.hbase.regionserver.InternalScanner)4 ScanType (org.apache.hadoop.hbase.regionserver.ScanType)4 StoreFileScanner (org.apache.hadoop.hbase.regionserver.StoreFileScanner)4 List (java.util.List)3 Configuration (org.apache.hadoop.conf.Configuration)3 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)3 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)3 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)3 Store (org.apache.hadoop.hbase.regionserver.Store)3 FileSystem (org.apache.hadoop.fs.FileSystem)2 ScanInfo (org.apache.hadoop.hbase.regionserver.ScanInfo)2 Scanner (org.apache.hadoop.hbase.regionserver.compactions.TestCompactor.Scanner)2 ImmutableList (com.google.common.collect.ImmutableList)1 IOException (java.io.IOException)1 InterruptedIOException (java.io.InterruptedIOException)1 StoreFile (org.apache.hadoop.hbase.regionserver.StoreFile)1 StoreFileWritersCapture (org.apache.hadoop.hbase.regionserver.compactions.TestCompactor.StoreFileWritersCapture)1