Search in sources :

Example 56 with RegionCoprocessorEnvironment

use of org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment in project phoenix by apache.

the class TestLocalTableState method testOnlyLoadsRequestedColumns.

@SuppressWarnings("unchecked")
@Test
public void testOnlyLoadsRequestedColumns() throws Exception {
    // setup mocks
    RegionCoprocessorEnvironment env = Mockito.mock(RegionCoprocessorEnvironment.class);
    Region region = Mockito.mock(Region.class);
    Mockito.when(env.getRegion()).thenReturn(region);
    RegionScanner scanner = Mockito.mock(RegionScanner.class);
    Mockito.when(region.getScanner(Mockito.any(Scan.class))).thenReturn(scanner);
    final KeyValue storedKv = new KeyValue(row, fam, qual, ts, Type.Put, Bytes.toBytes("stored-value"));
    storedKv.setSequenceId(2);
    Mockito.when(scanner.next(Mockito.any(List.class))).thenAnswer(new Answer<Boolean>() {

        @Override
        public Boolean answer(InvocationOnMock invocation) throws Throwable {
            List<KeyValue> list = (List<KeyValue>) invocation.getArguments()[0];
            list.add(storedKv);
            return false;
        }
    });
    LocalHBaseState state = new LocalTable(env);
    Put pendingUpdate = new Put(row);
    pendingUpdate.add(fam, qual, ts, val);
    LocalTableState table = new LocalTableState(env, state, pendingUpdate);
    // do the lookup for the given column
    ColumnReference col = new ColumnReference(fam, qual);
    table.setCurrentTimestamp(ts);
    // check that the value is there
    Pair<Scanner, IndexUpdate> p = table.getIndexedColumnsTableState(Arrays.asList(col), false, false, indexMetaData);
    Scanner s = p.getFirst();
    // make sure it read the table the one time
    assertEquals("Didn't get the stored keyvalue!", storedKv, s.next());
    // on the second lookup it shouldn't access the underlying table again - the cached columns
    // should know they are done
    p = table.getIndexedColumnsTableState(Arrays.asList(col), false, false, indexMetaData);
    s = p.getFirst();
    assertEquals("Lost already loaded update!", storedKv, s.next());
    Mockito.verify(env, Mockito.times(1)).getRegion();
    Mockito.verify(region, Mockito.times(1)).getScanner(Mockito.any(Scan.class));
}
Also used : LocalTable(org.apache.phoenix.hbase.index.covered.data.LocalTable) Scanner(org.apache.phoenix.hbase.index.scanner.Scanner) RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) KeyValue(org.apache.hadoop.hbase.KeyValue) Put(org.apache.hadoop.hbase.client.Put) RegionCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment) LocalHBaseState(org.apache.phoenix.hbase.index.covered.data.LocalHBaseState) RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) InvocationOnMock(org.mockito.invocation.InvocationOnMock) Region(org.apache.hadoop.hbase.regionserver.Region) Scan(org.apache.hadoop.hbase.client.Scan) List(java.util.List) ColumnReference(org.apache.phoenix.hbase.index.covered.update.ColumnReference) Test(org.junit.Test)

Example 57 with RegionCoprocessorEnvironment

use of org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment in project phoenix by apache.

the class TestLocalTableState method testCorrectOrderingWithLazyLoadingColumns.

@SuppressWarnings("unchecked")
@Test
public void testCorrectOrderingWithLazyLoadingColumns() throws Exception {
    Put m = new Put(row);
    m.add(fam, qual, ts, val);
    // setup mocks
    Configuration conf = new Configuration(false);
    RegionCoprocessorEnvironment env = Mockito.mock(RegionCoprocessorEnvironment.class);
    Mockito.when(env.getConfiguration()).thenReturn(conf);
    Region region = Mockito.mock(Region.class);
    Mockito.when(env.getRegion()).thenReturn(region);
    RegionScanner scanner = Mockito.mock(RegionScanner.class);
    Mockito.when(region.getScanner(Mockito.any(Scan.class))).thenReturn(scanner);
    final byte[] stored = Bytes.toBytes("stored-value");
    Mockito.when(scanner.next(Mockito.any(List.class))).thenAnswer(new Answer<Boolean>() {

        @Override
        public Boolean answer(InvocationOnMock invocation) throws Throwable {
            List<KeyValue> list = (List<KeyValue>) invocation.getArguments()[0];
            KeyValue kv = new KeyValue(row, fam, qual, ts, Type.Put, stored);
            kv.setSequenceId(0);
            list.add(kv);
            return false;
        }
    });
    LocalHBaseState state = new LocalTable(env);
    LocalTableState table = new LocalTableState(env, state, m);
    //add the kvs from the mutation
    table.addPendingUpdates(KeyValueUtil.ensureKeyValues(m.get(fam, qual)));
    // setup the lookup
    ColumnReference col = new ColumnReference(fam, qual);
    table.setCurrentTimestamp(ts);
    //check that our value still shows up first on scan, even though this is a lazy load
    Pair<Scanner, IndexUpdate> p = table.getIndexedColumnsTableState(Arrays.asList(col), false, false, indexMetaData);
    Scanner s = p.getFirst();
    assertEquals("Didn't get the pending mutation's value first", m.get(fam, qual).get(0), s.next());
}
Also used : LocalTable(org.apache.phoenix.hbase.index.covered.data.LocalTable) Scanner(org.apache.phoenix.hbase.index.scanner.Scanner) RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) KeyValue(org.apache.hadoop.hbase.KeyValue) Configuration(org.apache.hadoop.conf.Configuration) Put(org.apache.hadoop.hbase.client.Put) RegionCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment) LocalHBaseState(org.apache.phoenix.hbase.index.covered.data.LocalHBaseState) RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) InvocationOnMock(org.mockito.invocation.InvocationOnMock) Region(org.apache.hadoop.hbase.regionserver.Region) Scan(org.apache.hadoop.hbase.client.Scan) List(java.util.List) ColumnReference(org.apache.phoenix.hbase.index.covered.update.ColumnReference) Test(org.junit.Test)

Example 58 with RegionCoprocessorEnvironment

use of org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment in project phoenix by apache.

the class TestCoveredColumnIndexCodec method testGeneratedIndexUpdates.

/**
   * Test that we get back the correct index updates for a given column group
   * @throws Exception on failure
   */
@Test
public void testGeneratedIndexUpdates() throws Exception {
    ColumnGroup group = new ColumnGroup("test-column-group");
    group.add(COLUMN_REF);
    final Result emptyState = Result.create(Collections.<Cell>emptyList());
    // setup the state we expect for the codec
    RegionCoprocessorEnvironment env = Mockito.mock(RegionCoprocessorEnvironment.class);
    Configuration conf = new Configuration(false);
    Mockito.when(env.getConfiguration()).thenReturn(conf);
    LocalHBaseState table = new SimpleTableState(emptyState);
    // make a new codec on those kvs
    CoveredColumnIndexCodec codec = CoveredColumnIndexCodec.getCodecForTesting(Arrays.asList(group));
    // start with a basic put that has some keyvalues
    Put p = new Put(PK);
    // setup the kvs to add
    List<KeyValue> kvs = new ArrayList<KeyValue>();
    byte[] v1 = Bytes.toBytes("v1");
    KeyValue kv = new KeyValue(PK, FAMILY, QUAL, 1, v1);
    kvs.add(kv);
    p.add(kv);
    byte[] v2 = Bytes.toBytes("v2");
    kv = new KeyValue(PK, Bytes.toBytes("family2"), QUAL, 1, v2);
    kvs.add(kv);
    p.add(kv);
    // check the codec for deletes it should send
    LocalTableState state = new LocalTableState(env, table, p);
    Iterable<IndexUpdate> updates = codec.getIndexDeletes(state, IndexMetaData.NULL_INDEX_META_DATA);
    assertFalse("Found index updates without any existing kvs in table!", updates.iterator().next().isValid());
    // get the updates with the pending update
    state.setCurrentTimestamp(1);
    state.addPendingUpdates(kvs);
    updates = codec.getIndexUpserts(state, IndexMetaData.NULL_INDEX_META_DATA);
    assertTrue("Didn't find index updates for pending primary table update!", updates.iterator().hasNext());
    for (IndexUpdate update : updates) {
        assertTrue("Update marked as invalid, but should be a pending index write!", update.isValid());
        Put m = (Put) update.getUpdate();
        // should just be the single update for the column reference
        byte[] expected = CoveredColumnIndexCodec.composeRowKey(PK, v1.length, Arrays.asList(toColumnEntry(v1)));
        assertArrayEquals("Didn't get expected index value", expected, m.getRow());
    }
    // then apply a delete
    Delete d = new Delete(PK, 2);
    // need to set the timestamp here, as would actually happen on the server, unlike what happens
    // with puts, where the get the constructor specified timestamp for unspecified methods.
    d.deleteFamily(FAMILY, 2);
    // setup the next batch of 'current state', basically just ripping out the current state from
    // the last round
    table = new SimpleTableState(new Result(kvs));
    state = new LocalTableState(env, table, d);
    state.setCurrentTimestamp(2);
    // check the cleanup of the current table, after the puts (mocking a 'next' update)
    updates = codec.getIndexDeletes(state, IndexMetaData.NULL_INDEX_META_DATA);
    for (IndexUpdate update : updates) {
        assertTrue("Didn't have any index cleanup, even though there is current state", update.isValid());
        Delete m = (Delete) update.getUpdate();
        // should just be the single update for the column reference
        byte[] expected = CoveredColumnIndexCodec.composeRowKey(PK, v1.length, Arrays.asList(toColumnEntry(v1)));
        assertArrayEquals("Didn't get expected index value", expected, m.getRow());
    }
    ensureNoUpdatesWhenCoveredByDelete(env, codec, kvs, d);
    // now with the delete of the columns
    d = new Delete(PK, 2);
    d.deleteColumns(FAMILY, QUAL, 2);
    ensureNoUpdatesWhenCoveredByDelete(env, codec, kvs, d);
    // this delete needs to match timestamps exactly, by contract, to have any effect
    d = new Delete(PK, 1);
    d.deleteColumn(FAMILY, QUAL, 1);
    ensureNoUpdatesWhenCoveredByDelete(env, codec, kvs, d);
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) KeyValue(org.apache.hadoop.hbase.KeyValue) Configuration(org.apache.hadoop.conf.Configuration) ArrayList(java.util.ArrayList) Put(org.apache.hadoop.hbase.client.Put) Result(org.apache.hadoop.hbase.client.Result) LocalTableState(org.apache.phoenix.hbase.index.covered.LocalTableState) RegionCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment) LocalHBaseState(org.apache.phoenix.hbase.index.covered.data.LocalHBaseState) IndexUpdate(org.apache.phoenix.hbase.index.covered.IndexUpdate) Test(org.junit.Test)

Example 59 with RegionCoprocessorEnvironment

use of org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment in project phoenix by apache.

the class UngroupedAggregateRegionObserver method doPostScannerOpen.

@Override
protected RegionScanner doPostScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c, final Scan scan, final RegionScanner s) throws IOException, SQLException {
    RegionCoprocessorEnvironment env = c.getEnvironment();
    Region region = env.getRegion();
    long ts = scan.getTimeRange().getMax();
    boolean localIndexScan = ScanUtil.isLocalIndex(scan);
    if (ScanUtil.isAnalyzeTable(scan)) {
        byte[] gp_width_bytes = scan.getAttribute(BaseScannerRegionObserver.GUIDEPOST_WIDTH_BYTES);
        byte[] gp_per_region_bytes = scan.getAttribute(BaseScannerRegionObserver.GUIDEPOST_PER_REGION);
        // Let this throw, as this scan is being done for the sole purpose of collecting stats
        StatisticsCollector statsCollector = StatisticsCollectorFactory.createStatisticsCollector(env, region.getRegionInfo().getTable().getNameAsString(), ts, gp_width_bytes, gp_per_region_bytes);
        return collectStats(s, statsCollector, region, scan, env.getConfiguration());
    } else if (ScanUtil.isIndexRebuild(scan)) {
        return rebuildIndices(s, region, scan, env.getConfiguration());
    }
    int offsetToBe = 0;
    if (localIndexScan) {
        /*
             * For local indexes, we need to set an offset on row key expressions to skip
             * the region start key.
             */
        offsetToBe = region.getRegionInfo().getStartKey().length != 0 ? region.getRegionInfo().getStartKey().length : region.getRegionInfo().getEndKey().length;
        ScanUtil.setRowKeyOffset(scan, offsetToBe);
    }
    final int offset = offsetToBe;
    PTable projectedTable = null;
    PTable writeToTable = null;
    byte[][] values = null;
    byte[] descRowKeyTableBytes = scan.getAttribute(UPGRADE_DESC_ROW_KEY);
    boolean isDescRowKeyOrderUpgrade = descRowKeyTableBytes != null;
    if (isDescRowKeyOrderUpgrade) {
        logger.debug("Upgrading row key for " + region.getRegionInfo().getTable().getNameAsString());
        projectedTable = deserializeTable(descRowKeyTableBytes);
        try {
            writeToTable = PTableImpl.makePTable(projectedTable, true);
        } catch (SQLException e) {
            // Impossible
            ServerUtil.throwIOException("Upgrade failed", e);
        }
        values = new byte[projectedTable.getPKColumns().size()][];
    }
    boolean useProto = false;
    byte[] localIndexBytes = scan.getAttribute(LOCAL_INDEX_BUILD_PROTO);
    useProto = localIndexBytes != null;
    if (localIndexBytes == null) {
        localIndexBytes = scan.getAttribute(LOCAL_INDEX_BUILD);
    }
    List<IndexMaintainer> indexMaintainers = localIndexBytes == null ? null : IndexMaintainer.deserialize(localIndexBytes, useProto);
    MutationList indexMutations = localIndexBytes == null ? new MutationList() : new MutationList(1024);
    RegionScanner theScanner = s;
    boolean replayMutations = scan.getAttribute(BaseScannerRegionObserver.IGNORE_NEWER_MUTATIONS) != null;
    byte[] indexUUID = scan.getAttribute(PhoenixIndexCodec.INDEX_UUID);
    byte[] txState = scan.getAttribute(BaseScannerRegionObserver.TX_STATE);
    List<Expression> selectExpressions = null;
    byte[] upsertSelectTable = scan.getAttribute(BaseScannerRegionObserver.UPSERT_SELECT_TABLE);
    boolean isUpsert = false;
    boolean isDelete = false;
    byte[] deleteCQ = null;
    byte[] deleteCF = null;
    byte[] emptyCF = null;
    HTable targetHTable = null;
    boolean areMutationInSameRegion = true;
    ImmutableBytesWritable ptr = new ImmutableBytesWritable();
    if (upsertSelectTable != null) {
        isUpsert = true;
        projectedTable = deserializeTable(upsertSelectTable);
        targetHTable = new HTable(env.getConfiguration(), projectedTable.getPhysicalName().getBytes());
        selectExpressions = deserializeExpressions(scan.getAttribute(BaseScannerRegionObserver.UPSERT_SELECT_EXPRS));
        values = new byte[projectedTable.getPKColumns().size()][];
        areMutationInSameRegion = Bytes.compareTo(targetHTable.getTableName(), region.getTableDesc().getTableName().getName()) == 0 && !ExpressionUtil.isPkPositionChanging(new TableRef(projectedTable), selectExpressions);
    } else {
        byte[] isDeleteAgg = scan.getAttribute(BaseScannerRegionObserver.DELETE_AGG);
        isDelete = isDeleteAgg != null && Bytes.compareTo(PDataType.TRUE_BYTES, isDeleteAgg) == 0;
        if (!isDelete) {
            deleteCF = scan.getAttribute(BaseScannerRegionObserver.DELETE_CF);
            deleteCQ = scan.getAttribute(BaseScannerRegionObserver.DELETE_CQ);
        }
        emptyCF = scan.getAttribute(BaseScannerRegionObserver.EMPTY_CF);
    }
    TupleProjector tupleProjector = null;
    byte[][] viewConstants = null;
    ColumnReference[] dataColumns = IndexUtil.deserializeDataTableColumnsToJoin(scan);
    final TupleProjector p = TupleProjector.deserializeProjectorFromScan(scan);
    final HashJoinInfo j = HashJoinInfo.deserializeHashJoinFromScan(scan);
    boolean useQualifierAsIndex = EncodedColumnsUtil.useQualifierAsIndex(EncodedColumnsUtil.getMinMaxQualifiersFromScan(scan));
    if ((localIndexScan && !isDelete && !isDescRowKeyOrderUpgrade) || (j == null && p != null)) {
        if (dataColumns != null) {
            tupleProjector = IndexUtil.getTupleProjector(scan, dataColumns);
            viewConstants = IndexUtil.deserializeViewConstantsFromScan(scan);
        }
        ImmutableBytesWritable tempPtr = new ImmutableBytesWritable();
        theScanner = getWrappedScanner(c, theScanner, offset, scan, dataColumns, tupleProjector, region, indexMaintainers == null ? null : indexMaintainers.get(0), viewConstants, p, tempPtr, useQualifierAsIndex);
    }
    if (j != null) {
        theScanner = new HashJoinRegionScanner(theScanner, p, j, ScanUtil.getTenantId(scan), env, useQualifierAsIndex, useNewValueColumnQualifier);
    }
    int maxBatchSize = 0;
    long maxBatchSizeBytes = 0L;
    MutationList mutations = new MutationList();
    boolean needToWrite = false;
    Configuration conf = c.getEnvironment().getConfiguration();
    long flushSize = region.getTableDesc().getMemStoreFlushSize();
    if (flushSize <= 0) {
        flushSize = conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE);
    }
    /**
         * Slow down the writes if the memstore size more than
         * (hbase.hregion.memstore.block.multiplier - 1) times hbase.hregion.memstore.flush.size
         * bytes. This avoids flush storm to hdfs for cases like index building where reads and
         * write happen to all the table regions in the server.
         */
    final long blockingMemStoreSize = flushSize * (conf.getLong(HConstants.HREGION_MEMSTORE_BLOCK_MULTIPLIER, HConstants.DEFAULT_HREGION_MEMSTORE_BLOCK_MULTIPLIER) - 1);
    boolean buildLocalIndex = indexMaintainers != null && dataColumns == null && !localIndexScan;
    if (isDescRowKeyOrderUpgrade || isDelete || isUpsert || (deleteCQ != null && deleteCF != null) || emptyCF != null || buildLocalIndex) {
        needToWrite = true;
        maxBatchSize = env.getConfiguration().getInt(MUTATE_BATCH_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE);
        mutations = new MutationList(Ints.saturatedCast(maxBatchSize + maxBatchSize / 10));
        maxBatchSizeBytes = env.getConfiguration().getLong(MUTATE_BATCH_SIZE_BYTES_ATTRIB, QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE_BYTES);
    }
    Aggregators aggregators = ServerAggregators.deserialize(scan.getAttribute(BaseScannerRegionObserver.AGGREGATORS), env.getConfiguration());
    Aggregator[] rowAggregators = aggregators.getAggregators();
    boolean hasMore;
    boolean hasAny = false;
    Pair<Integer, Integer> minMaxQualifiers = EncodedColumnsUtil.getMinMaxQualifiersFromScan(scan);
    Tuple result = useQualifierAsIndex ? new PositionBasedMultiKeyValueTuple() : new MultiKeyValueTuple();
    if (logger.isDebugEnabled()) {
        logger.debug(LogUtil.addCustomAnnotations("Starting ungrouped coprocessor scan " + scan + " " + region.getRegionInfo(), ScanUtil.getCustomAnnotations(scan)));
    }
    int rowCount = 0;
    final RegionScanner innerScanner = theScanner;
    boolean useIndexProto = true;
    byte[] indexMaintainersPtr = scan.getAttribute(PhoenixIndexCodec.INDEX_PROTO_MD);
    // for backward compatiblity fall back to look by the old attribute
    if (indexMaintainersPtr == null) {
        indexMaintainersPtr = scan.getAttribute(PhoenixIndexCodec.INDEX_MD);
        useIndexProto = false;
    }
    boolean acquiredLock = false;
    try {
        if (needToWrite) {
            synchronized (lock) {
                scansReferenceCount++;
            }
        }
        region.startRegionOperation();
        acquiredLock = true;
        synchronized (innerScanner) {
            do {
                List<Cell> results = useQualifierAsIndex ? new EncodedColumnQualiferCellsList(minMaxQualifiers.getFirst(), minMaxQualifiers.getSecond(), encodingScheme) : new ArrayList<Cell>();
                // Results are potentially returned even when the return value of s.next is false
                // since this is an indication of whether or not there are more values after the
                // ones returned
                hasMore = innerScanner.nextRaw(results);
                if (!results.isEmpty()) {
                    rowCount++;
                    result.setKeyValues(results);
                    if (isDescRowKeyOrderUpgrade) {
                        Arrays.fill(values, null);
                        Cell firstKV = results.get(0);
                        RowKeySchema schema = projectedTable.getRowKeySchema();
                        int maxOffset = schema.iterator(firstKV.getRowArray(), firstKV.getRowOffset() + offset, firstKV.getRowLength(), ptr);
                        for (int i = 0; i < schema.getFieldCount(); i++) {
                            Boolean hasValue = schema.next(ptr, i, maxOffset);
                            if (hasValue == null) {
                                break;
                            }
                            Field field = schema.getField(i);
                            if (field.getSortOrder() == SortOrder.DESC) {
                                // Special case for re-writing DESC ARRAY, as the actual byte value needs to change in this case
                                if (field.getDataType().isArrayType()) {
                                    field.getDataType().coerceBytes(ptr, null, field.getDataType(), field.getMaxLength(), field.getScale(), field.getSortOrder(), field.getMaxLength(), field.getScale(), field.getSortOrder(), // force to use correct separator byte
                                    true);
                                } else // Special case for re-writing DESC CHAR or DESC BINARY, to force the re-writing of trailing space characters
                                if (field.getDataType() == PChar.INSTANCE || field.getDataType() == PBinary.INSTANCE) {
                                    int len = ptr.getLength();
                                    while (len > 0 && ptr.get()[ptr.getOffset() + len - 1] == StringUtil.SPACE_UTF8) {
                                        len--;
                                    }
                                    ptr.set(ptr.get(), ptr.getOffset(), len);
                                // Special case for re-writing DESC FLOAT and DOUBLE, as they're not inverted like they should be (PHOENIX-2171)
                                } else if (field.getDataType() == PFloat.INSTANCE || field.getDataType() == PDouble.INSTANCE) {
                                    byte[] invertedBytes = SortOrder.invert(ptr.get(), ptr.getOffset(), ptr.getLength());
                                    ptr.set(invertedBytes);
                                }
                            } else if (field.getDataType() == PBinary.INSTANCE) {
                                // Remove trailing space characters so that the setValues call below will replace them
                                // with the correct zero byte character. Note this is somewhat dangerous as these
                                // could be legit, but I don't know what the alternative is.
                                int len = ptr.getLength();
                                while (len > 0 && ptr.get()[ptr.getOffset() + len - 1] == StringUtil.SPACE_UTF8) {
                                    len--;
                                }
                                ptr.set(ptr.get(), ptr.getOffset(), len);
                            }
                            values[i] = ptr.copyBytes();
                        }
                        writeToTable.newKey(ptr, values);
                        if (Bytes.compareTo(firstKV.getRowArray(), firstKV.getRowOffset() + offset, firstKV.getRowLength(), ptr.get(), ptr.getOffset() + offset, ptr.getLength()) == 0) {
                            continue;
                        }
                        byte[] newRow = ByteUtil.copyKeyBytesIfNecessary(ptr);
                        if (offset > 0) {
                            // for local indexes (prepend region start key)
                            byte[] newRowWithOffset = new byte[offset + newRow.length];
                            System.arraycopy(firstKV.getRowArray(), firstKV.getRowOffset(), newRowWithOffset, 0, offset);
                            ;
                            System.arraycopy(newRow, 0, newRowWithOffset, offset, newRow.length);
                            newRow = newRowWithOffset;
                        }
                        byte[] oldRow = Bytes.copy(firstKV.getRowArray(), firstKV.getRowOffset(), firstKV.getRowLength());
                        for (Cell cell : results) {
                            // Copy existing cell but with new row key
                            Cell newCell = new KeyValue(newRow, 0, newRow.length, cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(), cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(), cell.getTimestamp(), KeyValue.Type.codeToType(cell.getTypeByte()), cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
                            switch(KeyValue.Type.codeToType(cell.getTypeByte())) {
                                case Put:
                                    // If Put, point delete old Put
                                    Delete del = new Delete(oldRow);
                                    del.addDeleteMarker(new KeyValue(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(), cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(), cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(), cell.getTimestamp(), KeyValue.Type.Delete, ByteUtil.EMPTY_BYTE_ARRAY, 0, 0));
                                    mutations.add(del);
                                    Put put = new Put(newRow);
                                    put.add(newCell);
                                    mutations.add(put);
                                    break;
                                case Delete:
                                case DeleteColumn:
                                case DeleteFamily:
                                case DeleteFamilyVersion:
                                    Delete delete = new Delete(newRow);
                                    delete.addDeleteMarker(newCell);
                                    mutations.add(delete);
                                    break;
                            }
                        }
                    } else if (buildLocalIndex) {
                        for (IndexMaintainer maintainer : indexMaintainers) {
                            if (!results.isEmpty()) {
                                result.getKey(ptr);
                                ValueGetter valueGetter = maintainer.createGetterFromKeyValues(ImmutableBytesPtr.copyBytesIfNecessary(ptr), results);
                                Put put = maintainer.buildUpdateMutation(kvBuilder, valueGetter, ptr, results.get(0).getTimestamp(), env.getRegion().getRegionInfo().getStartKey(), env.getRegion().getRegionInfo().getEndKey());
                                indexMutations.add(put);
                            }
                        }
                        result.setKeyValues(results);
                    } else if (isDelete) {
                        // FIXME: the version of the Delete constructor without the lock
                        // args was introduced in 0.94.4, thus if we try to use it here
                        // we can no longer use the 0.94.2 version of the client.
                        Cell firstKV = results.get(0);
                        Delete delete = new Delete(firstKV.getRowArray(), firstKV.getRowOffset(), firstKV.getRowLength(), ts);
                        if (replayMutations) {
                            delete.setAttribute(IGNORE_NEWER_MUTATIONS, PDataType.TRUE_BYTES);
                        }
                        mutations.add(delete);
                        // force tephra to ignore this deletes
                        delete.setAttribute(TxConstants.TX_ROLLBACK_ATTRIBUTE_KEY, new byte[0]);
                    } else if (isUpsert) {
                        Arrays.fill(values, null);
                        int bucketNumOffset = 0;
                        if (projectedTable.getBucketNum() != null) {
                            values[0] = new byte[] { 0 };
                            bucketNumOffset = 1;
                        }
                        int i = bucketNumOffset;
                        List<PColumn> projectedColumns = projectedTable.getColumns();
                        for (; i < projectedTable.getPKColumns().size(); i++) {
                            Expression expression = selectExpressions.get(i - bucketNumOffset);
                            if (expression.evaluate(result, ptr)) {
                                values[i] = ptr.copyBytes();
                                // column being projected into then invert the bits.
                                if (expression.getSortOrder() != projectedColumns.get(i).getSortOrder()) {
                                    SortOrder.invert(values[i], 0, values[i], 0, values[i].length);
                                }
                            } else {
                                values[i] = ByteUtil.EMPTY_BYTE_ARRAY;
                            }
                        }
                        projectedTable.newKey(ptr, values);
                        PRow row = projectedTable.newRow(kvBuilder, ts, ptr, false);
                        for (; i < projectedColumns.size(); i++) {
                            Expression expression = selectExpressions.get(i - bucketNumOffset);
                            if (expression.evaluate(result, ptr)) {
                                PColumn column = projectedColumns.get(i);
                                if (!column.getDataType().isSizeCompatible(ptr, null, expression.getDataType(), expression.getSortOrder(), expression.getMaxLength(), expression.getScale(), column.getMaxLength(), column.getScale())) {
                                    throw new DataExceedsCapacityException(column.getDataType(), column.getMaxLength(), column.getScale(), column.getName().getString(), ptr);
                                }
                                column.getDataType().coerceBytes(ptr, null, expression.getDataType(), expression.getMaxLength(), expression.getScale(), expression.getSortOrder(), column.getMaxLength(), column.getScale(), column.getSortOrder(), projectedTable.rowKeyOrderOptimizable());
                                byte[] bytes = ByteUtil.copyKeyBytesIfNecessary(ptr);
                                row.setValue(column, bytes);
                            }
                        }
                        for (Mutation mutation : row.toRowMutations()) {
                            if (replayMutations) {
                                mutation.setAttribute(IGNORE_NEWER_MUTATIONS, PDataType.TRUE_BYTES);
                            }
                            mutations.add(mutation);
                        }
                        for (i = 0; i < selectExpressions.size(); i++) {
                            selectExpressions.get(i).reset();
                        }
                    } else if (deleteCF != null && deleteCQ != null) {
                        // if no empty key value is being set
                        if (emptyCF == null || result.getValue(deleteCF, deleteCQ) != null) {
                            Delete delete = new Delete(results.get(0).getRowArray(), results.get(0).getRowOffset(), results.get(0).getRowLength());
                            delete.deleteColumns(deleteCF, deleteCQ, ts);
                            // force tephra to ignore this deletes
                            delete.setAttribute(TxConstants.TX_ROLLBACK_ATTRIBUTE_KEY, new byte[0]);
                            mutations.add(delete);
                        }
                    }
                    if (emptyCF != null) {
                        /*
                             * If we've specified an emptyCF, then we need to insert an empty
                             * key value "retroactively" for any key value that is visible at
                             * the timestamp that the DDL was issued. Key values that are not
                             * visible at this timestamp will not ever be projected up to
                             * scans past this timestamp, so don't need to be considered.
                             * We insert one empty key value per row per timestamp.
                             */
                        Set<Long> timeStamps = Sets.newHashSetWithExpectedSize(results.size());
                        for (Cell kv : results) {
                            long kvts = kv.getTimestamp();
                            if (!timeStamps.contains(kvts)) {
                                Put put = new Put(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength());
                                put.add(emptyCF, QueryConstants.EMPTY_COLUMN_BYTES, kvts, ByteUtil.EMPTY_BYTE_ARRAY);
                                mutations.add(put);
                            }
                        }
                    }
                    if (readyToCommit(rowCount, mutations.byteSize(), maxBatchSize, maxBatchSizeBytes)) {
                        commit(region, mutations, indexUUID, blockingMemStoreSize, indexMaintainersPtr, txState, areMutationInSameRegion, targetHTable, useIndexProto);
                        mutations.clear();
                    }
                    if (readyToCommit(rowCount, indexMutations.byteSize(), maxBatchSize, maxBatchSizeBytes)) {
                        commitBatch(region, indexMutations, null, blockingMemStoreSize, null, txState, useIndexProto);
                        indexMutations.clear();
                    }
                    aggregators.aggregate(rowAggregators, result);
                    hasAny = true;
                }
            } while (hasMore);
            if (!mutations.isEmpty()) {
                commit(region, mutations, indexUUID, blockingMemStoreSize, indexMaintainersPtr, txState, areMutationInSameRegion, targetHTable, useIndexProto);
                mutations.clear();
            }
            if (!indexMutations.isEmpty()) {
                commitBatch(region, indexMutations, null, blockingMemStoreSize, indexMaintainersPtr, txState, useIndexProto);
                indexMutations.clear();
            }
        }
    } finally {
        if (needToWrite) {
            synchronized (lock) {
                scansReferenceCount--;
            }
        }
        if (targetHTable != null) {
            targetHTable.close();
        }
        try {
            innerScanner.close();
        } finally {
            if (acquiredLock)
                region.closeRegionOperation();
        }
    }
    if (logger.isDebugEnabled()) {
        logger.debug(LogUtil.addCustomAnnotations("Finished scanning " + rowCount + " rows for ungrouped coprocessor scan " + scan, ScanUtil.getCustomAnnotations(scan)));
    }
    final boolean hadAny = hasAny;
    KeyValue keyValue = null;
    if (hadAny) {
        byte[] value = aggregators.toBytes(rowAggregators);
        keyValue = KeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length);
    }
    final KeyValue aggKeyValue = keyValue;
    RegionScanner scanner = new BaseRegionScanner(innerScanner) {

        private boolean done = !hadAny;

        @Override
        public boolean isFilterDone() {
            return done;
        }

        @Override
        public boolean next(List<Cell> results) throws IOException {
            if (done)
                return false;
            done = true;
            results.add(aggKeyValue);
            return false;
        }

        @Override
        public long getMaxResultSize() {
            return scan.getMaxResultSize();
        }
    };
    return scanner;
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) Configuration(org.apache.hadoop.conf.Configuration) TupleProjector(org.apache.phoenix.execute.TupleProjector) PTable(org.apache.phoenix.schema.PTable) ValueGetter(org.apache.phoenix.hbase.index.ValueGetter) EncodedColumnQualiferCellsList(org.apache.phoenix.schema.tuple.EncodedColumnQualiferCellsList) ArrayList(java.util.ArrayList) List(java.util.List) Cell(org.apache.hadoop.hbase.Cell) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) DataExceedsCapacityException(org.apache.phoenix.exception.DataExceedsCapacityException) PositionBasedMultiKeyValueTuple(org.apache.phoenix.schema.tuple.PositionBasedMultiKeyValueTuple) Aggregators(org.apache.phoenix.expression.aggregator.Aggregators) ServerAggregators(org.apache.phoenix.expression.aggregator.ServerAggregators) PLong(org.apache.phoenix.schema.types.PLong) Region(org.apache.hadoop.hbase.regionserver.Region) Mutation(org.apache.hadoop.hbase.client.Mutation) ColumnReference(org.apache.phoenix.hbase.index.covered.update.ColumnReference) KeyValue(org.apache.hadoop.hbase.KeyValue) StatisticsCollector(org.apache.phoenix.schema.stats.StatisticsCollector) SQLException(java.sql.SQLException) HTable(org.apache.hadoop.hbase.client.HTable) PRow(org.apache.phoenix.schema.PRow) PColumn(org.apache.phoenix.schema.PColumn) Field(org.apache.phoenix.schema.ValueSchema.Field) RegionCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment) IndexMaintainer(org.apache.phoenix.index.IndexMaintainer) EncodedColumnQualiferCellsList(org.apache.phoenix.schema.tuple.EncodedColumnQualiferCellsList) Aggregator(org.apache.phoenix.expression.aggregator.Aggregator) RowKeySchema(org.apache.phoenix.schema.RowKeySchema) Put(org.apache.hadoop.hbase.client.Put) RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) Expression(org.apache.phoenix.expression.Expression) HashJoinInfo(org.apache.phoenix.join.HashJoinInfo) MultiKeyValueTuple(org.apache.phoenix.schema.tuple.MultiKeyValueTuple) PositionBasedMultiKeyValueTuple(org.apache.phoenix.schema.tuple.PositionBasedMultiKeyValueTuple) TableRef(org.apache.phoenix.schema.TableRef) MultiKeyValueTuple(org.apache.phoenix.schema.tuple.MultiKeyValueTuple) Tuple(org.apache.phoenix.schema.tuple.Tuple) PositionBasedMultiKeyValueTuple(org.apache.phoenix.schema.tuple.PositionBasedMultiKeyValueTuple)

Example 60 with RegionCoprocessorEnvironment

use of org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment in project phoenix by apache.

the class PhoenixTransactionalIndexer method start.

@Override
public void start(CoprocessorEnvironment e) throws IOException {
    final RegionCoprocessorEnvironment env = (RegionCoprocessorEnvironment) e;
    String serverName = env.getRegionServerServices().getServerName().getServerName();
    codec = new PhoenixIndexCodec();
    codec.initialize(env);
    // setup the actual index writer
    // For transactional tables, we keep the index active upon a write failure
    // since we have the all versus none behavior for transactions.
    this.writer = new IndexWriter(new LeaveIndexActiveFailurePolicy(), env, serverName + "-tx-index-writer");
}
Also used : RegionCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment) IndexWriter(org.apache.phoenix.hbase.index.write.IndexWriter) LeaveIndexActiveFailurePolicy(org.apache.phoenix.hbase.index.write.LeaveIndexActiveFailurePolicy)

Aggregations

RegionCoprocessorEnvironment (org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment)78 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)37 Configuration (org.apache.hadoop.conf.Configuration)25 CConfigurationReader (co.cask.cdap.data2.transaction.queue.hbase.coprocessor.CConfigurationReader)21 Test (org.junit.Test)16 TopicMetadataCacheSupplier (co.cask.cdap.messaging.TopicMetadataCacheSupplier)14 Put (org.apache.hadoop.hbase.client.Put)14 Region (org.apache.hadoop.hbase.regionserver.Region)14 DefaultScanBuilder (co.cask.cdap.data2.util.hbase.DefaultScanBuilder)11 Mutation (org.apache.hadoop.hbase.client.Mutation)9 InvocationOnMock (org.mockito.invocation.InvocationOnMock)9 Cell (org.apache.hadoop.hbase.Cell)8 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)8 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)8 HTableInterface (org.apache.hadoop.hbase.client.HTableInterface)8 AccessDeniedException (org.apache.hadoop.hbase.security.AccessDeniedException)8 User (org.apache.hadoop.hbase.security.User)8 CConfiguration (co.cask.cdap.common.conf.CConfiguration)7 IncrementHandlerState (co.cask.cdap.data2.increment.hbase.IncrementHandlerState)7 CConfigurationCacheSupplier (co.cask.cdap.data2.transaction.coprocessor.CConfigurationCacheSupplier)7