Search in sources :

Example 21 with RegionScanner

use of org.apache.hadoop.hbase.regionserver.RegionScanner in project phoenix by apache.

the class LocalTableStateTest method testCorrectRollback.

/**
 * Test that we correctly rollback the state of keyvalue
 * @throws Exception
 */
@Test
@SuppressWarnings("unchecked")
public void testCorrectRollback() throws Exception {
    Put m = new Put(row);
    m.add(fam, qual, ts, val);
    // setup mocks
    RegionCoprocessorEnvironment env = Mockito.mock(RegionCoprocessorEnvironment.class);
    Region region = Mockito.mock(Region.class);
    Mockito.when(env.getRegion()).thenReturn(region);
    RegionScanner scanner = Mockito.mock(RegionScanner.class);
    Mockito.when(region.getScanner(Mockito.any(Scan.class))).thenReturn(scanner);
    final byte[] stored = Bytes.toBytes("stored-value");
    final KeyValue storedKv = new KeyValue(row, fam, qual, ts, Type.Put, stored);
    storedKv.setSequenceId(2);
    Mockito.when(scanner.next(Mockito.any(List.class))).thenAnswer(new Answer<Boolean>() {

        @Override
        public Boolean answer(InvocationOnMock invocation) throws Throwable {
            List<KeyValue> list = (List<KeyValue>) invocation.getArguments()[0];
            list.add(storedKv);
            return false;
        }
    });
    LocalHBaseState state = new LocalTable(env);
    LocalTableState table = new LocalTableState(state, m);
    // add the kvs from the mutation
    KeyValue kv = KeyValueUtil.ensureKeyValue(m.get(fam, qual).get(0));
    kv.setSequenceId(0);
    table.addPendingUpdates(kv);
    // setup the lookup
    ColumnReference col = new ColumnReference(fam, qual);
    table.setCurrentTimestamp(ts);
    // check that the value is there
    Pair<CoveredDeleteScanner, IndexUpdate> p = table.getIndexedColumnsTableState(Arrays.asList(col), false, false, indexMetaData);
    Scanner s = p.getFirst();
    assertEquals("Didn't get the pending mutation's value first", kv, s.next());
    // rollback that value
    table.rollback(Arrays.asList(kv));
    p = table.getIndexedColumnsTableState(Arrays.asList(col), false, false, indexMetaData);
    s = p.getFirst();
    assertEquals("Didn't correctly rollback the row - still found it!", null, s.next());
    Mockito.verify(env, Mockito.times(1)).getRegion();
    Mockito.verify(region, Mockito.times(1)).getScanner(Mockito.any(Scan.class));
}
Also used : LocalTable(org.apache.phoenix.hbase.index.covered.data.LocalTable) RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) CoveredDeleteScanner(org.apache.phoenix.hbase.index.scanner.ScannerBuilder.CoveredDeleteScanner) Scanner(org.apache.phoenix.hbase.index.scanner.Scanner) KeyValue(org.apache.hadoop.hbase.KeyValue) CoveredDeleteScanner(org.apache.phoenix.hbase.index.scanner.ScannerBuilder.CoveredDeleteScanner) Put(org.apache.hadoop.hbase.client.Put) RegionCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment) LocalHBaseState(org.apache.phoenix.hbase.index.covered.data.LocalHBaseState) RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) InvocationOnMock(org.mockito.invocation.InvocationOnMock) Region(org.apache.hadoop.hbase.regionserver.Region) Scan(org.apache.hadoop.hbase.client.Scan) List(java.util.List) ColumnReference(org.apache.phoenix.hbase.index.covered.update.ColumnReference) Test(org.junit.Test)

Example 22 with RegionScanner

use of org.apache.hadoop.hbase.regionserver.RegionScanner in project phoenix by apache.

the class LocalTableStateTest method testCorrectOrderingWithLazyLoadingColumns.

@SuppressWarnings("unchecked")
@Test
public void testCorrectOrderingWithLazyLoadingColumns() throws Exception {
    Put m = new Put(row);
    m.add(fam, qual, ts, val);
    // setup mocks
    Configuration conf = new Configuration(false);
    RegionCoprocessorEnvironment env = Mockito.mock(RegionCoprocessorEnvironment.class);
    Mockito.when(env.getConfiguration()).thenReturn(conf);
    Region region = Mockito.mock(Region.class);
    Mockito.when(env.getRegion()).thenReturn(region);
    RegionScanner scanner = Mockito.mock(RegionScanner.class);
    Mockito.when(region.getScanner(Mockito.any(Scan.class))).thenReturn(scanner);
    final byte[] stored = Bytes.toBytes("stored-value");
    Mockito.when(scanner.next(Mockito.any(List.class))).thenAnswer(new Answer<Boolean>() {

        @Override
        public Boolean answer(InvocationOnMock invocation) throws Throwable {
            List<KeyValue> list = (List<KeyValue>) invocation.getArguments()[0];
            KeyValue kv = new KeyValue(row, fam, qual, ts, Type.Put, stored);
            kv.setSequenceId(0);
            list.add(kv);
            return false;
        }
    });
    LocalHBaseState state = new LocalTable(env);
    LocalTableState table = new LocalTableState(state, m);
    // add the kvs from the mutation
    table.addPendingUpdates(KeyValueUtil.ensureKeyValues(m.get(fam, qual)));
    // setup the lookup
    ColumnReference col = new ColumnReference(fam, qual);
    table.setCurrentTimestamp(ts);
    // check that our value still shows up first on scan, even though this is a lazy load
    Pair<CoveredDeleteScanner, IndexUpdate> p = table.getIndexedColumnsTableState(Arrays.asList(col), false, false, indexMetaData);
    Scanner s = p.getFirst();
    assertEquals("Didn't get the pending mutation's value first", m.get(fam, qual).get(0), s.next());
}
Also used : LocalTable(org.apache.phoenix.hbase.index.covered.data.LocalTable) RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) CoveredDeleteScanner(org.apache.phoenix.hbase.index.scanner.ScannerBuilder.CoveredDeleteScanner) Scanner(org.apache.phoenix.hbase.index.scanner.Scanner) KeyValue(org.apache.hadoop.hbase.KeyValue) Configuration(org.apache.hadoop.conf.Configuration) CoveredDeleteScanner(org.apache.phoenix.hbase.index.scanner.ScannerBuilder.CoveredDeleteScanner) Put(org.apache.hadoop.hbase.client.Put) RegionCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment) LocalHBaseState(org.apache.phoenix.hbase.index.covered.data.LocalHBaseState) RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) InvocationOnMock(org.mockito.invocation.InvocationOnMock) Region(org.apache.hadoop.hbase.regionserver.Region) Scan(org.apache.hadoop.hbase.client.Scan) List(java.util.List) ColumnReference(org.apache.phoenix.hbase.index.covered.update.ColumnReference) Test(org.junit.Test)

Example 23 with RegionScanner

use of org.apache.hadoop.hbase.regionserver.RegionScanner in project phoenix by apache.

the class NonTxIndexBuilderTest method setup.

/**
 * Test setup so that {@link NonTxIndexBuilder#getIndexUpdate(Mutation, IndexMetaData)} can be
 * called, where any read requests to
 * {@link LocalTable#getCurrentRowState(Mutation, Collection, boolean)} are read from our test
 * field 'currentRowCells'
 */
@Before
public void setup() throws Exception {
    RegionCoprocessorEnvironment env = Mockito.mock(RegionCoprocessorEnvironment.class);
    Configuration conf = new Configuration(false);
    conf.set(NonTxIndexBuilder.CODEC_CLASS_NAME_KEY, PhoenixIndexCodec.class.getName());
    Mockito.when(env.getConfiguration()).thenReturn(conf);
    // the following is used by LocalTable#getCurrentRowState()
    Region mockRegion = Mockito.mock(Region.class);
    Mockito.when(env.getRegion()).thenReturn(mockRegion);
    Mockito.when(mockRegion.getScanner(Mockito.any(Scan.class))).thenAnswer(new Answer<RegionScanner>() {

        @Override
        public RegionScanner answer(InvocationOnMock invocation) throws Throwable {
            Scan sArg = (Scan) invocation.getArguments()[0];
            TimeRange timeRange = sArg.getTimeRange();
            return getMockTimeRangeRegionScanner(timeRange);
        }
    });
    // the following is called by PhoenixIndexCodec#getIndexUpserts() , getIndexDeletes()
    HRegionInfo mockRegionInfo = Mockito.mock(HRegionInfo.class);
    Mockito.when(env.getRegionInfo()).thenReturn(mockRegionInfo);
    Mockito.when(mockRegion.getRegionInfo()).thenReturn(mockRegionInfo);
    Mockito.when(mockRegionInfo.getStartKey()).thenReturn(Bytes.toBytes("a"));
    Mockito.when(mockRegionInfo.getEndKey()).thenReturn(Bytes.toBytes("z"));
    Mockito.when(mockRegionInfo.getTable()).thenReturn(TableName.valueOf(TEST_TABLE_STRING));
    mockIndexMetaData = Mockito.mock(PhoenixIndexMetaData.class);
    Mockito.when(mockIndexMetaData.requiresPriorRowState((Mutation) Mockito.any())).thenReturn(true);
    Mockito.when(mockIndexMetaData.getIndexMaintainers()).thenReturn(Collections.singletonList(getTestIndexMaintainer()));
    indexBuilder = new NonTxIndexBuilder();
    indexBuilder.setup(env);
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) TimeRange(org.apache.hadoop.hbase.io.TimeRange) RegionCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment) PhoenixIndexMetaData(org.apache.phoenix.index.PhoenixIndexMetaData) BaseRegionScanner(org.apache.phoenix.coprocessor.BaseRegionScanner) RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) PhoenixIndexCodec(org.apache.phoenix.index.PhoenixIndexCodec) InvocationOnMock(org.mockito.invocation.InvocationOnMock) Region(org.apache.hadoop.hbase.regionserver.Region) Scan(org.apache.hadoop.hbase.client.Scan) Before(org.junit.Before)

Example 24 with RegionScanner

use of org.apache.hadoop.hbase.regionserver.RegionScanner in project phoenix by apache.

the class UngroupedAggregateRegionObserver method rebuildIndices.

private RegionScanner rebuildIndices(final RegionScanner innerScanner, final Region region, final Scan scan, Configuration config) throws IOException {
    byte[] indexMetaData = scan.getAttribute(PhoenixIndexCodec.INDEX_PROTO_MD);
    boolean useProto = true;
    // for backward compatibility fall back to look up by the old attribute
    if (indexMetaData == null) {
        useProto = false;
        indexMetaData = scan.getAttribute(PhoenixIndexCodec.INDEX_MD);
    }
    byte[] clientVersionBytes = scan.getAttribute(PhoenixIndexCodec.CLIENT_VERSION);
    boolean hasMore;
    int rowCount = 0;
    try {
        int maxBatchSize = config.getInt(MUTATE_BATCH_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE);
        long maxBatchSizeBytes = config.getLong(MUTATE_BATCH_SIZE_BYTES_ATTRIB, QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE_BYTES);
        MutationList mutations = new MutationList(maxBatchSize);
        region.startRegionOperation();
        byte[] uuidValue = ServerCacheClient.generateId();
        synchronized (innerScanner) {
            do {
                List<Cell> results = new ArrayList<Cell>();
                hasMore = innerScanner.nextRaw(results);
                if (!results.isEmpty()) {
                    Put put = null;
                    Delete del = null;
                    for (Cell cell : results) {
                        if (KeyValue.Type.codeToType(cell.getTypeByte()) == KeyValue.Type.Put) {
                            if (put == null) {
                                put = new Put(CellUtil.cloneRow(cell));
                                put.setAttribute(useProto ? PhoenixIndexCodec.INDEX_PROTO_MD : PhoenixIndexCodec.INDEX_MD, indexMetaData);
                                put.setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue);
                                put.setAttribute(REPLAY_WRITES, REPLAY_ONLY_INDEX_WRITES);
                                put.setAttribute(PhoenixIndexCodec.CLIENT_VERSION, clientVersionBytes);
                                mutations.add(put);
                                // Since we're replaying existing mutations, it makes no sense to write them to the wal
                                put.setDurability(Durability.SKIP_WAL);
                            }
                            put.add(cell);
                        } else {
                            if (del == null) {
                                del = new Delete(CellUtil.cloneRow(cell));
                                del.setAttribute(useProto ? PhoenixIndexCodec.INDEX_PROTO_MD : PhoenixIndexCodec.INDEX_MD, indexMetaData);
                                del.setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue);
                                del.setAttribute(REPLAY_WRITES, REPLAY_ONLY_INDEX_WRITES);
                                del.setAttribute(PhoenixIndexCodec.CLIENT_VERSION, clientVersionBytes);
                                mutations.add(del);
                                // Since we're replaying existing mutations, it makes no sense to write them to the wal
                                del.setDurability(Durability.SKIP_WAL);
                            }
                            del.addDeleteMarker(cell);
                        }
                    }
                    if (ServerUtil.readyToCommit(mutations.size(), mutations.byteSize(), maxBatchSize, maxBatchSizeBytes)) {
                        region.batchMutate(mutations.toArray(new Mutation[mutations.size()]), HConstants.NO_NONCE, HConstants.NO_NONCE);
                        uuidValue = ServerCacheClient.generateId();
                        mutations.clear();
                    }
                    rowCount++;
                }
            } while (hasMore);
            if (!mutations.isEmpty()) {
                region.batchMutate(mutations.toArray(new Mutation[mutations.size()]), HConstants.NO_NONCE, HConstants.NO_NONCE);
            }
        }
    } catch (IOException e) {
        logger.error("IOException during rebuilding: " + Throwables.getStackTraceAsString(e));
        throw e;
    } finally {
        region.closeRegionOperation();
    }
    byte[] rowCountBytes = PLong.INSTANCE.toBytes(Long.valueOf(rowCount));
    final KeyValue aggKeyValue = KeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length);
    RegionScanner scanner = new BaseRegionScanner(innerScanner) {

        @Override
        public HRegionInfo getRegionInfo() {
            return region.getRegionInfo();
        }

        @Override
        public boolean isFilterDone() {
            return true;
        }

        @Override
        public void close() throws IOException {
            innerScanner.close();
        }

        @Override
        public boolean next(List<Cell> results) throws IOException {
            results.add(aggKeyValue);
            return false;
        }

        @Override
        public long getMaxResultSize() {
            return scan.getMaxResultSize();
        }
    };
    return scanner;
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) KeyValue(org.apache.hadoop.hbase.KeyValue) ArrayList(java.util.ArrayList) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) Put(org.apache.hadoop.hbase.client.Put) RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) EncodedColumnQualiferCellsList(org.apache.phoenix.schema.tuple.EncodedColumnQualiferCellsList) ArrayList(java.util.ArrayList) List(java.util.List) Mutation(org.apache.hadoop.hbase.client.Mutation) Cell(org.apache.hadoop.hbase.Cell)

Example 25 with RegionScanner

use of org.apache.hadoop.hbase.regionserver.RegionScanner in project phoenix by apache.

the class UngroupedAggregateRegionObserver method collectStats.

private RegionScanner collectStats(final RegionScanner innerScanner, StatisticsCollector stats, final Region region, final Scan scan, Configuration config) throws IOException {
    StatsCollectionCallable callable = new StatsCollectionCallable(stats, region, innerScanner, config, scan);
    byte[] asyncBytes = scan.getAttribute(BaseScannerRegionObserver.RUN_UPDATE_STATS_ASYNC_ATTRIB);
    boolean async = false;
    if (asyncBytes != null) {
        async = Bytes.toBoolean(asyncBytes);
    }
    // in case of async, we report 0 as number of rows updated
    long rowCount = 0;
    StatisticsCollectionRunTracker statsRunTracker = StatisticsCollectionRunTracker.getInstance(config);
    boolean runUpdateStats = statsRunTracker.addUpdateStatsCommandRegion(region.getRegionInfo());
    if (runUpdateStats) {
        if (!async) {
            rowCount = callable.call();
        } else {
            statsRunTracker.runTask(callable);
        }
    } else {
        rowCount = CONCURRENT_UPDATE_STATS_ROW_COUNT;
        logger.info("UPDATE STATISTICS didn't run because another UPDATE STATISTICS command was already running on the region " + region.getRegionInfo().getRegionNameAsString());
    }
    byte[] rowCountBytes = PLong.INSTANCE.toBytes(Long.valueOf(rowCount));
    final KeyValue aggKeyValue = KeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length);
    RegionScanner scanner = new BaseRegionScanner(innerScanner) {

        @Override
        public HRegionInfo getRegionInfo() {
            return region.getRegionInfo();
        }

        @Override
        public boolean isFilterDone() {
            return true;
        }

        @Override
        public void close() throws IOException {
        // No-op because we want to manage closing of the inner scanner ourselves.
        // This happens inside StatsCollectionCallable.
        }

        @Override
        public boolean next(List<Cell> results) throws IOException {
            results.add(aggKeyValue);
            return false;
        }

        @Override
        public long getMaxResultSize() {
            return scan.getMaxResultSize();
        }
    };
    return scanner;
}
Also used : KeyValue(org.apache.hadoop.hbase.KeyValue) RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) EncodedColumnQualiferCellsList(org.apache.phoenix.schema.tuple.EncodedColumnQualiferCellsList) ArrayList(java.util.ArrayList) List(java.util.List) StatisticsCollectionRunTracker(org.apache.phoenix.schema.stats.StatisticsCollectionRunTracker)

Aggregations

RegionScanner (org.apache.hadoop.hbase.regionserver.RegionScanner)97 Scan (org.apache.hadoop.hbase.client.Scan)75 Cell (org.apache.hadoop.hbase.Cell)59 ArrayList (java.util.ArrayList)35 Test (org.junit.Test)35 Put (org.apache.hadoop.hbase.client.Put)33 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)25 Region (org.apache.hadoop.hbase.regionserver.Region)20 List (java.util.List)18 TableId (co.cask.cdap.data2.util.TableId)17 IOException (java.io.IOException)14 Delete (org.apache.hadoop.hbase.client.Delete)14 RegionCoprocessorEnvironment (org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment)12 ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)12 KeyValue (org.apache.hadoop.hbase.KeyValue)11 Configuration (org.apache.hadoop.conf.Configuration)9 ColumnReference (org.apache.phoenix.hbase.index.covered.update.ColumnReference)9 PMetaDataEntity (org.apache.phoenix.schema.PMetaDataEntity)9 InvocationOnMock (org.mockito.invocation.InvocationOnMock)8 Result (org.apache.hadoop.hbase.client.Result)6