Search in sources :

Example 11 with FirstKeyOnlyFilter

use of org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter in project hbase by apache.

the class TestPartialResultsFromClientSide method testMayHaveMoreCellsInRowReturnsTrueAndSetBatch.

@Test
public void testMayHaveMoreCellsInRowReturnsTrueAndSetBatch() throws IOException {
    Table table = createTestTable(TableName.valueOf(name.getMethodName()), ROWS, FAMILIES, QUALIFIERS, VALUE);
    Scan scan = new Scan();
    scan.setBatch(1);
    scan.setFilter(new FirstKeyOnlyFilter());
    ResultScanner scanner = table.getScanner(scan);
    Result result;
    while ((result = scanner.next()) != null) {
        assertTrue(result.rawCells() != null);
        assertEquals(1, result.rawCells().length);
    }
}
Also used : Table(org.apache.hadoop.hbase.client.Table) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) FirstKeyOnlyFilter(org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter) Scan(org.apache.hadoop.hbase.client.Scan) Result(org.apache.hadoop.hbase.client.Result) Test(org.junit.Test)

Example 12 with FirstKeyOnlyFilter

use of org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter in project hbase by apache.

the class RegionMover method isSuccessfulScan.

/**
   * Tries to scan a row from passed region
   * @param admin
   * @param region
   * @throws IOException
   */
private void isSuccessfulScan(Admin admin, HRegionInfo region) throws IOException {
    Scan scan = new Scan(region.getStartKey());
    scan.setBatch(1);
    scan.setCaching(1);
    scan.setFilter(new FirstKeyOnlyFilter());
    try {
        Table table = admin.getConnection().getTable(region.getTable());
        try {
            ResultScanner scanner = table.getScanner(scan);
            try {
                scanner.next();
            } finally {
                scanner.close();
            }
        } finally {
            table.close();
        }
    } catch (IOException e) {
        LOG.error("Could not scan region:" + region.getEncodedName(), e);
        throw e;
    }
}
Also used : Table(org.apache.hadoop.hbase.client.Table) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) FirstKeyOnlyFilter(org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter) Scan(org.apache.hadoop.hbase.client.Scan) IOException(java.io.IOException)

Example 13 with FirstKeyOnlyFilter

use of org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter in project hbase by apache.

the class TestSeekBeforeWithReverseScan method testReverseScanWithPadding.

@Test
public void testReverseScanWithPadding() throws Exception {
    byte[] terminator = new byte[] { -1 };
    byte[] row1 = Bytes.add(invert(Bytes.toBytes("a")), terminator);
    byte[] row2 = Bytes.add(invert(Bytes.toBytes("ab")), terminator);
    byte[] row3 = Bytes.add(invert(Bytes.toBytes("b")), terminator);
    Put put1 = new Put(row1);
    put1.addColumn(cfName, cqName, HConstants.EMPTY_BYTE_ARRAY);
    Put put2 = new Put(row2);
    put2.addColumn(cfName, cqName, HConstants.EMPTY_BYTE_ARRAY);
    Put put3 = new Put(row3);
    put3.addColumn(cfName, cqName, HConstants.EMPTY_BYTE_ARRAY);
    region.put(put1);
    region.put(put2);
    region.put(put3);
    region.flush(true);
    Scan scan = new Scan();
    scan.setCacheBlocks(false);
    scan.setReversed(true);
    scan.setFilter(new FirstKeyOnlyFilter());
    scan.addFamily(cfName);
    RegionScanner scanner = region.getScanner(scan);
    List<Cell> res = new ArrayList<>();
    int count = 1;
    while (scanner.next(res)) {
        count++;
    }
    assertEquals(3, count);
}
Also used : RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) FirstKeyOnlyFilter(org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter) ArrayList(java.util.ArrayList) Scan(org.apache.hadoop.hbase.client.Scan) Cell(org.apache.hadoop.hbase.Cell) Put(org.apache.hadoop.hbase.client.Put) Test(org.junit.Test)

Example 14 with FirstKeyOnlyFilter

use of org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter in project hbase by apache.

the class BulkDeleteEndpoint method delete.

@Override
public void delete(RpcController controller, BulkDeleteRequest request, RpcCallback<BulkDeleteResponse> done) {
    long totalRowsDeleted = 0L;
    long totalVersionsDeleted = 0L;
    Region region = env.getRegion();
    int rowBatchSize = request.getRowBatchSize();
    Long timestamp = null;
    if (request.hasTimestamp()) {
        timestamp = request.getTimestamp();
    }
    DeleteType deleteType = request.getDeleteType();
    boolean hasMore = true;
    RegionScanner scanner = null;
    try {
        Scan scan = ProtobufUtil.toScan(request.getScan());
        if (scan.getFilter() == null && deleteType == DeleteType.ROW) {
            // What we need is just the rowkeys. So only 1st KV from any row is enough.
            // Only when it is a row delete, we can apply this filter.
            // In other types we rely on the scan to know which all columns to be deleted.
            scan.setFilter(new FirstKeyOnlyFilter());
        }
        // Here by assume that the scan is perfect with the appropriate
        // filter and having necessary column(s).
        scanner = region.getScanner(scan);
        while (hasMore) {
            List<List<Cell>> deleteRows = new ArrayList<>(rowBatchSize);
            for (int i = 0; i < rowBatchSize; i++) {
                List<Cell> results = new ArrayList<>();
                hasMore = scanner.next(results);
                if (results.size() > 0) {
                    deleteRows.add(results);
                }
                if (!hasMore) {
                    // There are no more rows.
                    break;
                }
            }
            if (deleteRows.size() > 0) {
                Mutation[] deleteArr = new Mutation[deleteRows.size()];
                int i = 0;
                for (List<Cell> deleteRow : deleteRows) {
                    deleteArr[i++] = createDeleteMutation(deleteRow, deleteType, timestamp);
                }
                OperationStatus[] opStatus = region.batchMutate(deleteArr, HConstants.NO_NONCE, HConstants.NO_NONCE);
                for (i = 0; i < opStatus.length; i++) {
                    if (opStatus[i].getOperationStatusCode() != OperationStatusCode.SUCCESS) {
                        break;
                    }
                    totalRowsDeleted++;
                    if (deleteType == DeleteType.VERSION) {
                        byte[] versionsDeleted = deleteArr[i].getAttribute(NO_OF_VERSIONS_TO_DELETE);
                        if (versionsDeleted != null) {
                            totalVersionsDeleted += Bytes.toInt(versionsDeleted);
                        }
                    }
                }
            }
        }
    } catch (IOException ioe) {
        LOG.error(ioe);
        // Call ServerRpcController#getFailedOn() to retrieve this IOException at client side.
        CoprocessorRpcUtils.setControllerException(controller, ioe);
    } finally {
        if (scanner != null) {
            try {
                scanner.close();
            } catch (IOException ioe) {
                LOG.error(ioe);
            }
        }
    }
    Builder responseBuilder = BulkDeleteResponse.newBuilder();
    responseBuilder.setRowsDeleted(totalRowsDeleted);
    if (deleteType == DeleteType.VERSION) {
        responseBuilder.setVersionsDeleted(totalVersionsDeleted);
    }
    BulkDeleteResponse result = responseBuilder.build();
    done.run(result);
}
Also used : BulkDeleteResponse(org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse) FirstKeyOnlyFilter(org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter) Builder(org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse.Builder) ArrayList(java.util.ArrayList) IOException(java.io.IOException) RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) OperationStatus(org.apache.hadoop.hbase.regionserver.OperationStatus) Region(org.apache.hadoop.hbase.regionserver.Region) DeleteType(org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.DeleteType) Scan(org.apache.hadoop.hbase.client.Scan) ArrayList(java.util.ArrayList) List(java.util.List) Mutation(org.apache.hadoop.hbase.client.Mutation) Cell(org.apache.hadoop.hbase.Cell)

Example 15 with FirstKeyOnlyFilter

use of org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter in project storm by apache.

the class HBaseWindowsStore method getAllKeys.

@Override
public Iterable<String> getAllKeys() {
    Scan scan = new Scan();
    // this filter makes sure to receive only Key or row but not values associated with those rows.
    scan.setFilter(new FirstKeyOnlyFilter());
    //scan.setCaching(1000);
    final Iterator<Result> resultIterator;
    try {
        resultIterator = htable().getScanner(scan).iterator();
    } catch (IOException e) {
        throw new RuntimeException(e);
    }
    final Iterator<String> iterator = new Iterator<String>() {

        @Override
        public boolean hasNext() {
            return resultIterator.hasNext();
        }

        @Override
        public String next() {
            Result result = resultIterator.next();
            String key = null;
            try {
                key = new String(result.getRow(), UTF_8);
            } catch (UnsupportedEncodingException e) {
                throw new RuntimeException(e);
            }
            return key;
        }

        @Override
        public void remove() {
            throw new UnsupportedOperationException("remove operation is not supported");
        }
    };
    return new Iterable<String>() {

        @Override
        public Iterator<String> iterator() {
            return iterator;
        }
    };
}
Also used : FirstKeyOnlyFilter(org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter) Iterator(java.util.Iterator) UnsupportedEncodingException(java.io.UnsupportedEncodingException) Scan(org.apache.hadoop.hbase.client.Scan) IOException(java.io.IOException) Result(org.apache.hadoop.hbase.client.Result)

Aggregations

FirstKeyOnlyFilter (org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter)28 Scan (org.apache.hadoop.hbase.client.Scan)17 Cell (org.apache.hadoop.hbase.Cell)9 ArrayList (java.util.ArrayList)8 Test (org.junit.Test)8 IOException (java.io.IOException)7 Result (org.apache.hadoop.hbase.client.Result)7 RegionScanner (org.apache.hadoop.hbase.regionserver.RegionScanner)6 Filter (org.apache.hadoop.hbase.filter.Filter)5 RowFilter (org.apache.hadoop.hbase.filter.RowFilter)4 Connection (java.sql.Connection)3 Put (org.apache.hadoop.hbase.client.Put)3 ResultScanner (org.apache.hadoop.hbase.client.ResultScanner)3 CompareFilter (org.apache.hadoop.hbase.filter.CompareFilter)3 FilterList (org.apache.hadoop.hbase.filter.FilterList)3 BloomFilter (org.apache.hive.common.util.BloomFilter)3 ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)3 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)3 PMetaDataEntity (org.apache.phoenix.schema.PMetaDataEntity)3 List (java.util.List)2