Search in sources :

Example 71 with RegionScanner

use of org.apache.hadoop.hbase.regionserver.RegionScanner in project cdap by cdapio.

the class IncrementSummingScannerTest method testWithBatchLimit.

@Test
public void testWithBatchLimit() throws Exception {
    TableId tableId = TableId.from(NamespaceId.DEFAULT.getNamespace(), "testWithBatchLimit");
    byte[] familyBytes = Bytes.toBytes("f");
    byte[] columnBytes = Bytes.toBytes("c2");
    HRegion region = createRegion(tableId, familyBytes);
    try {
        region.initialize();
        long now = System.currentTimeMillis();
        // put a non increment columns
        Put p = new Put(Bytes.toBytes("r4"));
        p.add(familyBytes, Bytes.toBytes("c1"), Bytes.toBytes("value1"));
        region.put(p);
        // now put some increment deltas in a column
        p = new Put(Bytes.toBytes("r4"));
        for (int i = 0; i < 3; i++) {
            p.add(familyBytes, columnBytes, now - i, Bytes.toBytes(1L));
        }
        p.setAttribute(HBaseTable.DELTA_WRITE, TRUE);
        region.put(p);
        // put some non - increment columns
        p = new Put(Bytes.toBytes("r4"));
        p.add(familyBytes, Bytes.toBytes("c3"), Bytes.toBytes("value3"));
        region.put(p);
        p = new Put(Bytes.toBytes("r4"));
        p.add(familyBytes, Bytes.toBytes("c4"), Bytes.toBytes("value4"));
        region.put(p);
        p = new Put(Bytes.toBytes("r4"));
        p.add(familyBytes, Bytes.toBytes("c5"), Bytes.toBytes("value5"));
        region.put(p);
        // this put will appear as a "total" sum prior to all the delta puts
        p = new Put(Bytes.toBytes("r4"));
        p.add(familyBytes, columnBytes, now - 5, Bytes.toBytes(5L));
        region.put(p);
        Scan scan = new Scan(Bytes.toBytes("r4"));
        scan.setMaxVersions();
        RegionScanner scanner = new IncrementSummingScanner(region, 3, region.getScanner(scan), ScanType.USER_SCAN);
        List<Cell> results = Lists.newArrayList();
        scanner.next(results);
        assertEquals(3, results.size());
        Cell cell = results.get(0);
        assertNotNull(cell);
        assertEquals("value1", Bytes.toString(cell.getValue()));
        cell = results.get(1);
        assertNotNull(cell);
        assertEquals(8L, Bytes.toLong(cell.getValue()));
        cell = results.get(2);
        assertNotNull(cell);
        assertEquals("value3", Bytes.toString(cell.getValue()));
    } finally {
        region.close();
    }
}
Also used : TableId(io.cdap.cdap.data2.util.TableId) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) Scan(org.apache.hadoop.hbase.client.Scan) Cell(org.apache.hadoop.hbase.Cell) Put(org.apache.hadoop.hbase.client.Put) HBase12CDH570Test(io.cdap.cdap.data.hbase.HBase12CDH570Test) Test(org.junit.Test)

Example 72 with RegionScanner

use of org.apache.hadoop.hbase.regionserver.RegionScanner in project cdap by cdapio.

the class HBaseTableTest method testEnableIncrements.

@Test
public void testEnableIncrements() throws Exception {
    // setup a table with increments disabled and with it enabled
    String disableTableName = "incr-disable";
    String enabledTableName = "incr-enable";
    TableId disabledTableId = hBaseTableUtil.createHTableId(NAMESPACE1, disableTableName);
    TableId enabledTableId = hBaseTableUtil.createHTableId(NAMESPACE1, enabledTableName);
    DatasetProperties propsDisabled = TableProperties.builder().setReadlessIncrementSupport(false).setConflictDetection(ConflictDetection.COLUMN).build();
    HBaseTableAdmin disabledAdmin = getTableAdmin(CONTEXT1, disableTableName, propsDisabled);
    disabledAdmin.create();
    HBaseAdmin admin = TEST_HBASE.getHBaseAdmin();
    DatasetProperties propsEnabled = TableProperties.builder().setReadlessIncrementSupport(true).setConflictDetection(ConflictDetection.COLUMN).build();
    HBaseTableAdmin enabledAdmin = getTableAdmin(CONTEXT1, enabledTableName, propsEnabled);
    enabledAdmin.create();
    try {
        try {
            HTableDescriptor htd = hBaseTableUtil.getHTableDescriptor(admin, disabledTableId);
            List<String> cps = htd.getCoprocessors();
            assertFalse(cps.contains(IncrementHandler.class.getName()));
            htd = hBaseTableUtil.getHTableDescriptor(admin, enabledTableId);
            cps = htd.getCoprocessors();
            assertTrue(cps.contains(IncrementHandler.class.getName()));
        } finally {
            admin.close();
        }
        try (BufferingTable table = getTable(CONTEXT1, enabledTableName, propsEnabled)) {
            byte[] row = Bytes.toBytes("row1");
            byte[] col = Bytes.toBytes("col1");
            DetachedTxSystemClient txSystemClient = new DetachedTxSystemClient();
            Transaction tx = txSystemClient.startShort();
            table.startTx(tx);
            table.increment(row, col, 10);
            table.commitTx();
            // verify that value was written as a delta value
            final byte[] expectedValue = Bytes.add(IncrementHandlerState.DELTA_MAGIC_PREFIX, Bytes.toBytes(10L));
            final AtomicBoolean foundValue = new AtomicBoolean();
            byte[] enabledTableNameBytes = hBaseTableUtil.getHTableDescriptor(admin, enabledTableId).getName();
            TEST_HBASE.forEachRegion(enabledTableNameBytes, new Function<HRegion, Object>() {

                @Override
                public Object apply(HRegion hRegion) {
                    org.apache.hadoop.hbase.client.Scan scan = hBaseTableUtil.buildScan().build();
                    try {
                        RegionScanner scanner = hRegion.getScanner(scan);
                        List<Cell> results = Lists.newArrayList();
                        boolean hasMore;
                        do {
                            hasMore = scanner.next(results);
                            for (Cell cell : results) {
                                if (CellUtil.matchingValue(cell, expectedValue)) {
                                    foundValue.set(true);
                                }
                            }
                        } while (hasMore);
                    } catch (IOException ioe) {
                        fail("IOException scanning region: " + ioe.getMessage());
                    }
                    return null;
                }
            });
            assertTrue("Should have seen the expected encoded delta value in the " + enabledTableName + " table region", foundValue.get());
        }
    } finally {
        disabledAdmin.drop();
        enabledAdmin.drop();
    }
}
Also used : TableId(io.cdap.cdap.data2.util.TableId) DatasetProperties(io.cdap.cdap.api.dataset.DatasetProperties) IOException(java.io.IOException) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) BufferingTable(io.cdap.cdap.data2.dataset2.lib.table.BufferingTable) HBaseAdmin(org.apache.hadoop.hbase.client.HBaseAdmin) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) Transaction(org.apache.tephra.Transaction) RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) DetachedTxSystemClient(org.apache.tephra.inmemory.DetachedTxSystemClient) Scan(io.cdap.cdap.api.dataset.table.Scan) List(java.util.List) ImmutableList(com.google.common.collect.ImmutableList) Cell(org.apache.hadoop.hbase.Cell) BufferingTableTest(io.cdap.cdap.data2.dataset2.lib.table.BufferingTableTest) Test(org.junit.Test)

Example 73 with RegionScanner

use of org.apache.hadoop.hbase.regionserver.RegionScanner in project phoenix by apache.

the class TestLocalTableState method testOnlyLoadsRequestedColumns.

@SuppressWarnings("unchecked")
@Test
public void testOnlyLoadsRequestedColumns() throws Exception {
    // setup mocks
    RegionCoprocessorEnvironment env = Mockito.mock(RegionCoprocessorEnvironment.class);
    Region region = Mockito.mock(Region.class);
    Mockito.when(env.getRegion()).thenReturn(region);
    RegionScanner scanner = Mockito.mock(RegionScanner.class);
    Mockito.when(region.getScanner(Mockito.any(Scan.class))).thenReturn(scanner);
    final KeyValue storedKv = new KeyValue(row, fam, qual, ts, Type.Put, Bytes.toBytes("stored-value"));
    storedKv.setSequenceId(2);
    Mockito.when(scanner.next(Mockito.any(List.class))).thenAnswer(new Answer<Boolean>() {

        @Override
        public Boolean answer(InvocationOnMock invocation) throws Throwable {
            List<KeyValue> list = (List<KeyValue>) invocation.getArguments()[0];
            list.add(storedKv);
            return false;
        }
    });
    LocalHBaseState state = new LocalTable(env);
    Put pendingUpdate = new Put(row);
    pendingUpdate.add(fam, qual, ts, val);
    LocalTableState table = new LocalTableState(env, state, pendingUpdate);
    // do the lookup for the given column
    ColumnReference col = new ColumnReference(fam, qual);
    table.setCurrentTimestamp(ts);
    // check that the value is there
    Pair<Scanner, IndexUpdate> p = table.getIndexedColumnsTableState(Arrays.asList(col), false, false, indexMetaData);
    Scanner s = p.getFirst();
    // make sure it read the table the one time
    assertEquals("Didn't get the stored keyvalue!", storedKv, s.next());
    // on the second lookup it shouldn't access the underlying table again - the cached columns
    // should know they are done
    p = table.getIndexedColumnsTableState(Arrays.asList(col), false, false, indexMetaData);
    s = p.getFirst();
    assertEquals("Lost already loaded update!", storedKv, s.next());
    Mockito.verify(env, Mockito.times(1)).getRegion();
    Mockito.verify(region, Mockito.times(1)).getScanner(Mockito.any(Scan.class));
}
Also used : LocalTable(org.apache.phoenix.hbase.index.covered.data.LocalTable) Scanner(org.apache.phoenix.hbase.index.scanner.Scanner) RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) KeyValue(org.apache.hadoop.hbase.KeyValue) Put(org.apache.hadoop.hbase.client.Put) RegionCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment) LocalHBaseState(org.apache.phoenix.hbase.index.covered.data.LocalHBaseState) RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) InvocationOnMock(org.mockito.invocation.InvocationOnMock) Region(org.apache.hadoop.hbase.regionserver.Region) Scan(org.apache.hadoop.hbase.client.Scan) List(java.util.List) ColumnReference(org.apache.phoenix.hbase.index.covered.update.ColumnReference) Test(org.junit.Test)

Example 74 with RegionScanner

use of org.apache.hadoop.hbase.regionserver.RegionScanner in project phoenix by apache.

the class TestLocalTableState method testCorrectOrderingWithLazyLoadingColumns.

@SuppressWarnings("unchecked")
@Test
public void testCorrectOrderingWithLazyLoadingColumns() throws Exception {
    Put m = new Put(row);
    m.add(fam, qual, ts, val);
    // setup mocks
    Configuration conf = new Configuration(false);
    RegionCoprocessorEnvironment env = Mockito.mock(RegionCoprocessorEnvironment.class);
    Mockito.when(env.getConfiguration()).thenReturn(conf);
    Region region = Mockito.mock(Region.class);
    Mockito.when(env.getRegion()).thenReturn(region);
    RegionScanner scanner = Mockito.mock(RegionScanner.class);
    Mockito.when(region.getScanner(Mockito.any(Scan.class))).thenReturn(scanner);
    final byte[] stored = Bytes.toBytes("stored-value");
    Mockito.when(scanner.next(Mockito.any(List.class))).thenAnswer(new Answer<Boolean>() {

        @Override
        public Boolean answer(InvocationOnMock invocation) throws Throwable {
            List<KeyValue> list = (List<KeyValue>) invocation.getArguments()[0];
            KeyValue kv = new KeyValue(row, fam, qual, ts, Type.Put, stored);
            kv.setSequenceId(0);
            list.add(kv);
            return false;
        }
    });
    LocalHBaseState state = new LocalTable(env);
    LocalTableState table = new LocalTableState(env, state, m);
    //add the kvs from the mutation
    table.addPendingUpdates(KeyValueUtil.ensureKeyValues(m.get(fam, qual)));
    // setup the lookup
    ColumnReference col = new ColumnReference(fam, qual);
    table.setCurrentTimestamp(ts);
    //check that our value still shows up first on scan, even though this is a lazy load
    Pair<Scanner, IndexUpdate> p = table.getIndexedColumnsTableState(Arrays.asList(col), false, false, indexMetaData);
    Scanner s = p.getFirst();
    assertEquals("Didn't get the pending mutation's value first", m.get(fam, qual).get(0), s.next());
}
Also used : LocalTable(org.apache.phoenix.hbase.index.covered.data.LocalTable) Scanner(org.apache.phoenix.hbase.index.scanner.Scanner) RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) KeyValue(org.apache.hadoop.hbase.KeyValue) Configuration(org.apache.hadoop.conf.Configuration) Put(org.apache.hadoop.hbase.client.Put) RegionCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment) LocalHBaseState(org.apache.phoenix.hbase.index.covered.data.LocalHBaseState) RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) InvocationOnMock(org.mockito.invocation.InvocationOnMock) Region(org.apache.hadoop.hbase.regionserver.Region) Scan(org.apache.hadoop.hbase.client.Scan) List(java.util.List) ColumnReference(org.apache.phoenix.hbase.index.covered.update.ColumnReference) Test(org.junit.Test)

Example 75 with RegionScanner

use of org.apache.hadoop.hbase.regionserver.RegionScanner in project phoenix by apache.

the class LocalTable method getCurrentRowState.

@Override
public Result getCurrentRowState(Mutation m, Collection<? extends ColumnReference> columns, boolean ignoreNewerMutations) throws IOException {
    byte[] row = m.getRow();
    // need to use a scan here so we can get raw state, which Get doesn't provide.
    Scan s = IndexManagementUtil.newLocalStateScan(Collections.singletonList(columns));
    s.setStartRow(row);
    s.setStopRow(row);
    if (ignoreNewerMutations) {
        // Provides a means of client indicating that newer cells should not be considered,
        // enabling mutations to be replayed to partially rebuild the index when a write fails.
        // When replaying mutations we want the oldest timestamp (as anything newer we be replayed)
        long ts = getOldestTimestamp(m.getFamilyCellMap().values());
        s.setTimeRange(0, ts);
    }
    Region region = this.env.getRegion();
    RegionScanner scanner = region.getScanner(s);
    List<Cell> kvs = new ArrayList<Cell>(1);
    boolean more = scanner.next(kvs);
    assert !more : "Got more than one result when scanning" + " a single row in the primary table!";
    Result r = Result.create(kvs);
    scanner.close();
    return r;
}
Also used : RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) ArrayList(java.util.ArrayList) Region(org.apache.hadoop.hbase.regionserver.Region) Scan(org.apache.hadoop.hbase.client.Scan) Cell(org.apache.hadoop.hbase.Cell) Result(org.apache.hadoop.hbase.client.Result)

Aggregations

RegionScanner (org.apache.hadoop.hbase.regionserver.RegionScanner)162 Scan (org.apache.hadoop.hbase.client.Scan)126 Cell (org.apache.hadoop.hbase.Cell)116 Put (org.apache.hadoop.hbase.client.Put)60 Test (org.junit.Test)60 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)55 ArrayList (java.util.ArrayList)52 Delete (org.apache.hadoop.hbase.client.Delete)27 TableId (io.cdap.cdap.data2.util.TableId)26 List (java.util.List)25 Region (org.apache.hadoop.hbase.regionserver.Region)22 IOException (java.io.IOException)18 ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)18 TableId (co.cask.cdap.data2.util.TableId)17 RegionCoprocessorEnvironment (org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment)11 PTable (org.apache.phoenix.schema.PTable)10 PMetaDataEntity (org.apache.phoenix.schema.PMetaDataEntity)9 KeyValue (org.apache.hadoop.hbase.KeyValue)8 Mutation (org.apache.hadoop.hbase.client.Mutation)8 Result (org.apache.hadoop.hbase.client.Result)8