Search in sources :

Example 56 with RegionScanner

use of org.apache.hadoop.hbase.regionserver.RegionScanner in project phoenix by apache.

the class MetaDataEndpointImpl method doDropFunction.

private MetaDataMutationResult doDropFunction(long clientTimeStamp, List<byte[]> keys, List<Mutation> functionMetaData, List<ImmutableBytesPtr> invalidateList) throws IOException, SQLException {
    List<byte[]> keysClone = new ArrayList<byte[]>(keys);
    List<PFunction> functions = doGetFunctions(keysClone, clientTimeStamp);
    // there was a table, but it's been deleted. In either case we want to return.
    if (functions == null || functions.isEmpty()) {
        if (buildDeletedFunction(keys.get(0), new FunctionBytesPtr(keys.get(0)), env.getRegion(), clientTimeStamp) != null) {
            return new MetaDataMutationResult(MutationCode.FUNCTION_ALREADY_EXISTS, EnvironmentEdgeManager.currentTimeMillis(), null);
        }
        return new MetaDataMutationResult(MutationCode.FUNCTION_NOT_FOUND, EnvironmentEdgeManager.currentTimeMillis(), null);
    }
    if (functions != null && !functions.isEmpty()) {
        if (functions.get(0).getTimeStamp() < clientTimeStamp) {
            // continue
            if (isFunctionDeleted(functions.get(0))) {
                return new MetaDataMutationResult(MutationCode.FUNCTION_NOT_FOUND, EnvironmentEdgeManager.currentTimeMillis(), null);
            }
            invalidateList.add(new FunctionBytesPtr(keys.get(0)));
            Region region = env.getRegion();
            Scan scan = MetaDataUtil.newTableRowsScan(keys.get(0), MIN_TABLE_TIMESTAMP, clientTimeStamp);
            List<Cell> results = Lists.newArrayList();
            try (RegionScanner scanner = region.getScanner(scan)) {
                scanner.next(results);
                if (results.isEmpty()) {
                    // Should not be possible
                    return new MetaDataMutationResult(MutationCode.FUNCTION_NOT_FOUND, EnvironmentEdgeManager.currentTimeMillis(), null);
                }
                do {
                    Cell kv = results.get(0);
                    Delete delete = new Delete(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), clientTimeStamp);
                    functionMetaData.add(delete);
                    results.clear();
                    scanner.next(results);
                } while (!results.isEmpty());
            }
            return new MetaDataMutationResult(MutationCode.FUNCTION_ALREADY_EXISTS, EnvironmentEdgeManager.currentTimeMillis(), functions, true);
        }
    }
    return new MetaDataMutationResult(MutationCode.FUNCTION_NOT_FOUND, EnvironmentEdgeManager.currentTimeMillis(), null);
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) PFunction(org.apache.phoenix.parse.PFunction) RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) ArrayList(java.util.ArrayList) Region(org.apache.hadoop.hbase.regionserver.Region) Scan(org.apache.hadoop.hbase.client.Scan) Cell(org.apache.hadoop.hbase.Cell) FunctionBytesPtr(org.apache.phoenix.cache.GlobalCache.FunctionBytesPtr)

Example 57 with RegionScanner

use of org.apache.hadoop.hbase.regionserver.RegionScanner in project cdap by caskdata.

the class IncrementHandler method preGetOp.

@Override
public void preGetOp(ObserverContext<RegionCoprocessorEnvironment> ctx, Get get, List<Cell> results) throws IOException {
    Scan scan = new Scan(get);
    scan.setMaxVersions();
    scan.setFilter(Filters.combine(new IncrementFilter(), scan.getFilter()));
    RegionScanner scanner = null;
    try {
        scanner = new IncrementSummingScanner(region, scan.getBatch(), region.getScanner(scan), ScanType.USER_SCAN);
        scanner.next(results);
        ctx.bypass();
    } finally {
        if (scanner != null) {
            scanner.close();
        }
    }
}
Also used : RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) Scan(org.apache.hadoop.hbase.client.Scan)

Example 58 with RegionScanner

use of org.apache.hadoop.hbase.regionserver.RegionScanner in project cdap by caskdata.

the class IncrementHandler method preGetOp.

@Override
public void preGetOp(ObserverContext<RegionCoprocessorEnvironment> ctx, Get get, List<Cell> results) throws IOException {
    Scan scan = new Scan(get);
    scan.setMaxVersions();
    scan.setFilter(Filters.combine(new IncrementFilter(), scan.getFilter()));
    RegionScanner scanner = null;
    try {
        scanner = new IncrementSummingScanner(region, scan.getBatch(), region.getScanner(scan), ScanType.USER_SCAN);
        scanner.next(results);
        ctx.bypass();
    } finally {
        if (scanner != null) {
            scanner.close();
        }
    }
}
Also used : RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) Scan(org.apache.hadoop.hbase.client.Scan)

Example 59 with RegionScanner

use of org.apache.hadoop.hbase.regionserver.RegionScanner in project cdap by caskdata.

the class IncrementSummingScannerTest method testIncrementScanning.

@Test
public void testIncrementScanning() throws Exception {
    TableId tableId = TableId.from(NamespaceId.DEFAULT.getNamespace(), "TestIncrementSummingScanner");
    byte[] familyBytes = Bytes.toBytes("f");
    byte[] columnBytes = Bytes.toBytes("c");
    HRegion region = createRegion(tableId, familyBytes);
    try {
        region.initialize();
        // test handling of a single increment value alone
        Put p = new Put(Bytes.toBytes("r1"));
        p.add(familyBytes, columnBytes, Bytes.toBytes(3L));
        p.setAttribute(HBaseTable.DELTA_WRITE, TRUE);
        region.put(p);
        Scan scan = new Scan();
        RegionScanner scanner = new IncrementSummingScanner(region, -1, region.getScanner(scan), ScanType.USER_SCAN);
        List<Cell> results = Lists.newArrayList();
        scanner.next(results);
        assertEquals(1, results.size());
        Cell cell = results.get(0);
        assertNotNull(cell);
        assertEquals(3L, Bytes.toLong(cell.getValue()));
        // test handling of a single total sum
        p = new Put(Bytes.toBytes("r2"));
        p.add(familyBytes, columnBytes, Bytes.toBytes(5L));
        region.put(p);
        scan = new Scan(Bytes.toBytes("r2"));
        scanner = new IncrementSummingScanner(region, -1, region.getScanner(scan), ScanType.USER_SCAN);
        results = Lists.newArrayList();
        scanner.next(results);
        assertEquals(1, results.size());
        cell = results.get(0);
        assertNotNull(cell);
        assertEquals(5L, Bytes.toLong(cell.getValue()));
        // test handling of multiple increment values
        long now = System.currentTimeMillis();
        p = new Put(Bytes.toBytes("r3"));
        for (int i = 0; i < 5; i++) {
            p.add(familyBytes, columnBytes, now - i, Bytes.toBytes((long) (i + 1)));
        }
        p.setAttribute(HBaseTable.DELTA_WRITE, TRUE);
        region.put(p);
        scan = new Scan(Bytes.toBytes("r3"));
        scan.setMaxVersions();
        scanner = new IncrementSummingScanner(region, -1, region.getScanner(scan), ScanType.USER_SCAN);
        results = Lists.newArrayList();
        scanner.next(results);
        assertEquals(1, results.size());
        cell = results.get(0);
        assertNotNull(cell);
        assertEquals(15L, Bytes.toLong(cell.getValue()));
        // test handling of multiple increment values followed by a total sum, then other increments
        now = System.currentTimeMillis();
        p = new Put(Bytes.toBytes("r4"));
        for (int i = 0; i < 3; i++) {
            p.add(familyBytes, columnBytes, now - i, Bytes.toBytes(1L));
        }
        p.setAttribute(HBaseTable.DELTA_WRITE, TRUE);
        region.put(p);
        // this put will appear as a "total" sum prior to all the delta puts
        p = new Put(Bytes.toBytes("r4"));
        p.add(familyBytes, columnBytes, now - 5, Bytes.toBytes(5L));
        region.put(p);
        scan = new Scan(Bytes.toBytes("r4"));
        scan.setMaxVersions();
        scanner = new IncrementSummingScanner(region, -1, region.getScanner(scan), ScanType.USER_SCAN);
        results = Lists.newArrayList();
        scanner.next(results);
        assertEquals(1, results.size());
        cell = results.get(0);
        assertNotNull(cell);
        assertEquals(8L, Bytes.toLong(cell.getValue()));
        // test handling of an increment column followed by a non-increment column
        p = new Put(Bytes.toBytes("r4"));
        p.add(familyBytes, Bytes.toBytes("c2"), Bytes.toBytes("value"));
        region.put(p);
        scan = new Scan(Bytes.toBytes("r4"));
        scan.setMaxVersions();
        scanner = new IncrementSummingScanner(region, -1, region.getScanner(scan), ScanType.USER_SCAN);
        results = Lists.newArrayList();
        scanner.next(results);
        assertEquals(2, results.size());
        cell = results.get(0);
        assertNotNull(cell);
        assertEquals(8L, Bytes.toLong(cell.getValue()));
        cell = results.get(1);
        assertNotNull(cell);
        assertEquals("value", Bytes.toString(cell.getValue()));
        // test handling of an increment column followed by a delete
        now = System.currentTimeMillis();
        Delete d = new Delete(Bytes.toBytes("r5"));
        d.deleteColumn(familyBytes, columnBytes, now - 3);
        region.delete(d);
        p = new Put(Bytes.toBytes("r5"));
        for (int i = 2; i >= 0; i--) {
            p.add(familyBytes, columnBytes, now - i, Bytes.toBytes(1L));
        }
        p.setAttribute(HBaseTable.DELTA_WRITE, TRUE);
        region.put(p);
        scan = new Scan(Bytes.toBytes("r5"));
        scan.setMaxVersions();
        scan.setRaw(true);
        scanner = new IncrementSummingScanner(region, -1, region.getScanner(scan), ScanType.COMPACT_RETAIN_DELETES);
        results = Lists.newArrayList();
        scanner.next(results);
        // delete marker will not be returned for user scan
        assertEquals(2, results.size());
        cell = results.get(0);
        assertNotNull(cell);
        assertEquals(3L, Bytes.toLong(cell.getValue(), IncrementHandlerState.DELTA_MAGIC_PREFIX.length, 8));
        // next cell should be the delete
        cell = results.get(1);
        assertTrue(CellUtil.isDelete(cell));
    } finally {
        region.close();
    }
}
Also used : TableId(co.cask.cdap.data2.util.TableId) Delete(org.apache.hadoop.hbase.client.Delete) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) Scan(org.apache.hadoop.hbase.client.Scan) Cell(org.apache.hadoop.hbase.Cell) Put(org.apache.hadoop.hbase.client.Put) HBase10CDHTest(co.cask.cdap.data.hbase.HBase10CDHTest) Test(org.junit.Test)

Example 60 with RegionScanner

use of org.apache.hadoop.hbase.regionserver.RegionScanner in project cdap by caskdata.

the class IncrementSummingScannerTest method verifyCounts.

private void verifyCounts(HRegion region, Scan scan, long[] counts, int batch) throws Exception {
    RegionScanner scanner = new IncrementSummingScanner(region, batch, region.getScanner(scan), ScanType.USER_SCAN);
    // init with false if loop will execute zero times
    boolean hasMore = counts.length > 0;
    for (long count : counts) {
        List<Cell> results = Lists.newArrayList();
        hasMore = scanner.next(results);
        assertEquals(1, results.size());
        Cell cell = results.get(0);
        assertNotNull(cell);
        assertEquals(count, Bytes.toLong(cell.getValue()));
    }
    assertFalse(hasMore);
}
Also used : RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) Cell(org.apache.hadoop.hbase.Cell)

Aggregations

RegionScanner (org.apache.hadoop.hbase.regionserver.RegionScanner)67 Scan (org.apache.hadoop.hbase.client.Scan)52 Cell (org.apache.hadoop.hbase.Cell)45 Test (org.junit.Test)28 Put (org.apache.hadoop.hbase.client.Put)27 ArrayList (java.util.ArrayList)20 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)20 TableId (co.cask.cdap.data2.util.TableId)17 Region (org.apache.hadoop.hbase.regionserver.Region)14 List (java.util.List)11 Delete (org.apache.hadoop.hbase.client.Delete)11 ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)10 KeyValue (org.apache.hadoop.hbase.KeyValue)8 PMetaDataEntity (org.apache.phoenix.schema.PMetaDataEntity)7 Configuration (org.apache.hadoop.conf.Configuration)6 RegionCoprocessorEnvironment (org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment)6 FirstKeyOnlyFilter (org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter)6 ColumnReference (org.apache.phoenix.hbase.index.covered.update.ColumnReference)6 IOException (java.io.IOException)5 PTable (org.apache.phoenix.schema.PTable)4