Search in sources :

Example 1 with InternalScanner

use of org.apache.hadoop.hbase.regionserver.InternalScanner in project hbase by apache.

the class ColumnAggregationEndpointWithErrors method sum.

@Override
public void sum(RpcController controller, ColumnAggregationWithErrorsSumRequest request, RpcCallback<ColumnAggregationWithErrorsSumResponse> done) {
    // aggregate at each region
    Scan scan = new Scan();
    // Family is required in pb. Qualifier is not.
    byte[] family = request.getFamily().toByteArray();
    byte[] qualifier = request.hasQualifier() ? request.getQualifier().toByteArray() : null;
    if (request.hasQualifier()) {
        scan.addColumn(family, qualifier);
    } else {
        scan.addFamily(family);
    }
    int sumResult = 0;
    InternalScanner scanner = null;
    try {
        Region region = this.env.getRegion();
        // throw an exception for requests to the last region in the table, to test error handling
        if (Bytes.equals(region.getRegionInfo().getEndKey(), HConstants.EMPTY_END_ROW)) {
            throw new DoNotRetryIOException("An expected exception");
        }
        scanner = region.getScanner(scan);
        List<Cell> curVals = new ArrayList<>();
        boolean hasMore = false;
        do {
            curVals.clear();
            hasMore = scanner.next(curVals);
            for (Cell kv : curVals) {
                if (CellUtil.matchingQualifier(kv, qualifier)) {
                    sumResult += Bytes.toInt(kv.getValueArray(), kv.getValueOffset());
                }
            }
        } while (hasMore);
    } catch (IOException e) {
        CoprocessorRpcUtils.setControllerException(controller, e);
        // Set result to -1 to indicate error.
        sumResult = -1;
        LOG.info("Setting sum result to -1 to indicate error", e);
    } finally {
        if (scanner != null) {
            try {
                scanner.close();
            } catch (IOException e) {
                CoprocessorRpcUtils.setControllerException(controller, e);
                sumResult = -1;
                LOG.info("Setting sum result to -1 to indicate error", e);
            }
        }
    }
    done.run(ColumnAggregationWithErrorsSumResponse.newBuilder().setSum(sumResult).build());
}
Also used : InternalScanner(org.apache.hadoop.hbase.regionserver.InternalScanner) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) ArrayList(java.util.ArrayList) Region(org.apache.hadoop.hbase.regionserver.Region) Scan(org.apache.hadoop.hbase.client.Scan) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) IOException(java.io.IOException) Cell(org.apache.hadoop.hbase.Cell)

Example 2 with InternalScanner

use of org.apache.hadoop.hbase.regionserver.InternalScanner in project hbase by apache.

the class RowCountEndpoint method getRowCount.

/**
   * Returns a count of the rows in the region where this coprocessor is loaded.
   */
@Override
public void getRowCount(RpcController controller, ExampleProtos.CountRequest request, RpcCallback<ExampleProtos.CountResponse> done) {
    Scan scan = new Scan();
    scan.setFilter(new FirstKeyOnlyFilter());
    ExampleProtos.CountResponse response = null;
    InternalScanner scanner = null;
    try {
        scanner = env.getRegion().getScanner(scan);
        List<Cell> results = new ArrayList<>();
        boolean hasMore = false;
        byte[] lastRow = null;
        long count = 0;
        do {
            hasMore = scanner.next(results);
            for (Cell kv : results) {
                byte[] currentRow = CellUtil.cloneRow(kv);
                if (lastRow == null || !Bytes.equals(lastRow, currentRow)) {
                    lastRow = currentRow;
                    count++;
                }
            }
            results.clear();
        } while (hasMore);
        response = ExampleProtos.CountResponse.newBuilder().setCount(count).build();
    } catch (IOException ioe) {
        CoprocessorRpcUtils.setControllerException(controller, ioe);
    } finally {
        if (scanner != null) {
            try {
                scanner.close();
            } catch (IOException ignored) {
            }
        }
    }
    done.run(response);
}
Also used : ExampleProtos(org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos) InternalScanner(org.apache.hadoop.hbase.regionserver.InternalScanner) FirstKeyOnlyFilter(org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter) ArrayList(java.util.ArrayList) Scan(org.apache.hadoop.hbase.client.Scan) IOException(java.io.IOException) Cell(org.apache.hadoop.hbase.Cell)

Example 3 with InternalScanner

use of org.apache.hadoop.hbase.regionserver.InternalScanner in project hbase by apache.

the class AccessControlLists method loadAll.

/**
   * Loads all of the permission grants stored in a region of the {@code _acl_}
   * table.
   *
   * @param aclRegion
   * @return a map of the permissions for this table.
   * @throws IOException
   */
static Map<byte[], ListMultimap<String, TablePermission>> loadAll(Region aclRegion) throws IOException {
    if (!isAclRegion(aclRegion)) {
        throw new IOException("Can only load permissions from " + ACL_TABLE_NAME);
    }
    Map<byte[], ListMultimap<String, TablePermission>> allPerms = new TreeMap<>(Bytes.BYTES_RAWCOMPARATOR);
    // do a full scan of _acl_ table
    Scan scan = new Scan();
    scan.addFamily(ACL_LIST_FAMILY);
    InternalScanner iScanner = null;
    try {
        iScanner = aclRegion.getScanner(scan);
        while (true) {
            List<Cell> row = new ArrayList<>();
            boolean hasNext = iScanner.next(row);
            ListMultimap<String, TablePermission> perms = ArrayListMultimap.create();
            byte[] entry = null;
            for (Cell kv : row) {
                if (entry == null) {
                    entry = CellUtil.cloneRow(kv);
                }
                Pair<String, TablePermission> permissionsOfUserOnTable = parsePermissionRecord(entry, kv);
                if (permissionsOfUserOnTable != null) {
                    String username = permissionsOfUserOnTable.getFirst();
                    TablePermission permissions = permissionsOfUserOnTable.getSecond();
                    perms.put(username, permissions);
                }
            }
            if (entry != null) {
                allPerms.put(entry, perms);
            }
            if (!hasNext) {
                break;
            }
        }
    } finally {
        if (iScanner != null) {
            iScanner.close();
        }
    }
    return allPerms;
}
Also used : InternalScanner(org.apache.hadoop.hbase.regionserver.InternalScanner) ArrayList(java.util.ArrayList) IOException(java.io.IOException) TreeMap(java.util.TreeMap) Scan(org.apache.hadoop.hbase.client.Scan) ArrayListMultimap(com.google.common.collect.ArrayListMultimap) ListMultimap(com.google.common.collect.ListMultimap) Cell(org.apache.hadoop.hbase.Cell)

Example 4 with InternalScanner

use of org.apache.hadoop.hbase.regionserver.InternalScanner in project hbase by apache.

the class DefaultMobStoreFlusher method flushSnapshot.

/**
   * Flushes the snapshot of the MemStore.
   * If this store is not a mob store, flush the cells in the snapshot to store files of HBase.
   * If the store is a mob one, the flusher flushes the MemStore into two places.
   * One is the store files of HBase, the other is the mob files.
   * <ol>
   * <li>Cells that are not PUT type or have the delete mark will be directly flushed to
   * HBase.</li>
   * <li>If the size of a cell value is larger than a threshold, it'll be
   * flushed to a mob file, another cell with the path of this file will be flushed to HBase.</li>
   * <li>If the size of a cell value is smaller than or equal with a threshold, it'll be flushed to
   * HBase directly.</li>
   * </ol>
   */
@Override
public List<Path> flushSnapshot(MemStoreSnapshot snapshot, long cacheFlushId, MonitoredTask status, ThroughputController throughputController) throws IOException {
    ArrayList<Path> result = new ArrayList<>();
    long cellsCount = snapshot.getCellsCount();
    // don't flush if there are no entries
    if (cellsCount == 0)
        return result;
    // Use a store scanner to find which rows to flush.
    long smallestReadPoint = store.getSmallestReadPoint();
    InternalScanner scanner = createScanner(snapshot.getScanner(), smallestReadPoint);
    if (scanner == null) {
        // NULL scanner returned from coprocessor hooks means skip normal processing
        return result;
    }
    StoreFileWriter writer;
    try {
        // list of store files. Add cleanup of anything put on filesystem if we fail.
        synchronized (flushLock) {
            status.setStatus("Flushing " + store + ": creating writer");
            // Write the map out to the disk
            writer = store.createWriterInTmp(cellsCount, store.getFamily().getCompressionType(), false, true, true, false, snapshot.getTimeRangeTracker());
            IOException e = null;
            try {
                // It's a mob store, flush the cells in a mob way. This is the difference of flushing
                // between a normal and a mob store.
                performMobFlush(snapshot, cacheFlushId, scanner, writer, status, throughputController);
            } catch (IOException ioe) {
                e = ioe;
                // throw the exception out
                throw ioe;
            } finally {
                if (e != null) {
                    writer.close();
                } else {
                    finalizeWriter(writer, cacheFlushId, status);
                }
            }
        }
    } finally {
        scanner.close();
    }
    LOG.info("Mob store is flushed, sequenceid=" + cacheFlushId + ", memsize=" + StringUtils.TraditionalBinaryPrefix.long2String(snapshot.getDataSize(), "", 1) + ", hasBloomFilter=" + writer.hasGeneralBloom() + ", into tmp file " + writer.getPath());
    result.add(writer.getPath());
    return result;
}
Also used : Path(org.apache.hadoop.fs.Path) StoreFileWriter(org.apache.hadoop.hbase.regionserver.StoreFileWriter) InternalScanner(org.apache.hadoop.hbase.regionserver.InternalScanner) ArrayList(java.util.ArrayList) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException)

Example 5 with InternalScanner

use of org.apache.hadoop.hbase.regionserver.InternalScanner in project hbase by apache.

the class AggregateImplementation method getRowNum.

/**
   * Gives the row count for the given column family and column qualifier, in
   * the given row range as defined in the Scan object.
   */
@Override
public void getRowNum(RpcController controller, AggregateRequest request, RpcCallback<AggregateResponse> done) {
    AggregateResponse response = null;
    long counter = 0l;
    List<Cell> results = new ArrayList<>();
    InternalScanner scanner = null;
    try {
        Scan scan = ProtobufUtil.toScan(request.getScan());
        byte[][] colFamilies = scan.getFamilies();
        byte[] colFamily = colFamilies != null ? colFamilies[0] : null;
        NavigableSet<byte[]> qualifiers = colFamilies != null ? scan.getFamilyMap().get(colFamily) : null;
        byte[] qualifier = null;
        if (qualifiers != null && !qualifiers.isEmpty()) {
            qualifier = qualifiers.pollFirst();
        }
        if (scan.getFilter() == null && qualifier == null)
            scan.setFilter(new FirstKeyOnlyFilter());
        scanner = env.getRegion().getScanner(scan);
        boolean hasMoreRows = false;
        do {
            hasMoreRows = scanner.next(results);
            if (results.size() > 0) {
                counter++;
            }
            results.clear();
        } while (hasMoreRows);
        ByteBuffer bb = ByteBuffer.allocate(8).putLong(counter);
        bb.rewind();
        response = AggregateResponse.newBuilder().addFirstPart(ByteString.copyFrom(bb)).build();
    } catch (IOException e) {
        CoprocessorRpcUtils.setControllerException(controller, e);
    } finally {
        if (scanner != null) {
            try {
                scanner.close();
            } catch (IOException ignored) {
            }
        }
    }
    log.info("Row counter from this region is " + env.getRegion().getRegionInfo().getRegionNameAsString() + ": " + counter);
    done.run(response);
}
Also used : InternalScanner(org.apache.hadoop.hbase.regionserver.InternalScanner) FirstKeyOnlyFilter(org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter) AggregateResponse(org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse) ArrayList(java.util.ArrayList) IOException(java.io.IOException) ByteBuffer(java.nio.ByteBuffer) Scan(org.apache.hadoop.hbase.client.Scan) Cell(org.apache.hadoop.hbase.Cell)

Aggregations

InternalScanner (org.apache.hadoop.hbase.regionserver.InternalScanner)44 ArrayList (java.util.ArrayList)41 Cell (org.apache.hadoop.hbase.Cell)36 Scan (org.apache.hadoop.hbase.client.Scan)34 Test (org.junit.Test)17 IOException (java.io.IOException)15 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)12 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)12 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)9 Put (org.apache.hadoop.hbase.client.Put)9 List (java.util.List)7 AggregateResponse (org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse)7 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)7 Region (org.apache.hadoop.hbase.regionserver.Region)6 Configuration (org.apache.hadoop.conf.Configuration)5 KeyValue (org.apache.hadoop.hbase.KeyValue)5 HashMap (java.util.HashMap)4 ScanType (org.apache.hadoop.hbase.regionserver.ScanType)4 StoreFileScanner (org.apache.hadoop.hbase.regionserver.StoreFileScanner)4 ByteString (com.google.protobuf.ByteString)3