Search in sources :

Example 6 with DataExceedsCapacityException

use of org.apache.phoenix.exception.DataExceedsCapacityException in project phoenix by apache.

the class PTableImpl method newKey.

@Override
public int newKey(ImmutableBytesWritable key, byte[][] values) {
    List<PColumn> columns = getPKColumns();
    int nValues = values.length;
    while (nValues > 0 && (values[nValues - 1] == null || values[nValues - 1].length == 0)) {
        nValues--;
    }
    for (PColumn column : columns) {
        if (column.getExpressionStr() != null) {
            nValues++;
        }
    }
    int i = 0;
    TrustedByteArrayOutputStream os = new TrustedByteArrayOutputStream(SchemaUtil.estimateKeyLength(this));
    try {
        Integer bucketNum = this.getBucketNum();
        if (bucketNum != null) {
            // Write place holder for salt byte
            i++;
            os.write(QueryConstants.SEPARATOR_BYTE_ARRAY);
        }
        int nColumns = columns.size();
        PDataType type = null;
        SortOrder sortOrder = null;
        boolean wasNull = false;
        while (i < nValues && i < nColumns) {
            // Separate variable length column values in key with zero byte
            if (type != null && !type.isFixedWidth()) {
                os.write(SchemaUtil.getSeparatorByte(rowKeyOrderOptimizable(), wasNull, sortOrder));
            }
            PColumn column = columns.get(i);
            sortOrder = column.getSortOrder();
            type = column.getDataType();
            // This will throw if the value is null and the type doesn't allow null
            byte[] byteValue = values[i++];
            if (byteValue == null) {
                if (column.getExpressionStr() != null) {
                    try {
                        String url = PhoenixRuntime.JDBC_PROTOCOL + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + PhoenixRuntime.CONNECTIONLESS;
                        PhoenixConnection conn = DriverManager.getConnection(url).unwrap(PhoenixConnection.class);
                        StatementContext context = new StatementContext(new PhoenixStatement(conn));
                        ExpressionCompiler compiler = new ExpressionCompiler(context);
                        ParseNode defaultParseNode = new SQLParser(column.getExpressionStr()).parseExpression();
                        Expression defaultExpression = defaultParseNode.accept(compiler);
                        defaultExpression.evaluate(null, key);
                        column.getDataType().coerceBytes(key, null, defaultExpression.getDataType(), defaultExpression.getMaxLength(), defaultExpression.getScale(), defaultExpression.getSortOrder(), column.getMaxLength(), column.getScale(), column.getSortOrder());
                        byteValue = ByteUtil.copyKeyBytesIfNecessary(key);
                    } catch (SQLException e) {
                        // should not be possible
                        throw new ConstraintViolationException(name.getString() + "." + column.getName().getString() + " failed to compile default value expression of " + column.getExpressionStr());
                    }
                } else {
                    byteValue = ByteUtil.EMPTY_BYTE_ARRAY;
                }
            }
            wasNull = byteValue.length == 0;
            // here.
            if (byteValue.length == 0 && !column.isNullable()) {
                throw new ConstraintViolationException(name.getString() + "." + column.getName().getString() + " may not be null");
            }
            Integer maxLength = column.getMaxLength();
            Integer scale = column.getScale();
            key.set(byteValue);
            if (!type.isSizeCompatible(key, null, type, sortOrder, null, null, maxLength, scale)) {
                throw new DataExceedsCapacityException(name.getString() + "." + column.getName().getString() + " may not exceed " + maxLength + " (" + SchemaUtil.toString(type, byteValue) + ")");
            }
            key.set(byteValue);
            type.pad(key, maxLength, sortOrder);
            byteValue = ByteUtil.copyKeyBytesIfNecessary(key);
            os.write(byteValue, 0, byteValue.length);
        }
        // Need trailing byte for DESC columns
        if (type != null && !type.isFixedWidth() && SchemaUtil.getSeparatorByte(rowKeyOrderOptimizable(), wasNull, sortOrder) == QueryConstants.DESC_SEPARATOR_BYTE) {
            os.write(QueryConstants.DESC_SEPARATOR_BYTE);
        }
        // If some non null pk values aren't set, then throw
        if (i < nColumns) {
            PColumn column = columns.get(i);
            if (column.getDataType().isFixedWidth() || !column.isNullable()) {
                throw new ConstraintViolationException(name.getString() + "." + column.getName().getString() + " may not be null");
            }
        }
        if (nValues == 0) {
            throw new ConstraintViolationException("Primary key may not be null (" + name.getString() + ")");
        }
        byte[] buf = os.getBuffer();
        int size = os.size();
        if (bucketNum != null) {
            buf[0] = SaltingUtil.getSaltingByte(buf, 1, size - 1, bucketNum);
        }
        key.set(buf, 0, size);
        return i;
    } finally {
        try {
            os.close();
        } catch (IOException e) {
            // Impossible
            throw new RuntimeException(e);
        }
    }
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) SQLException(java.sql.SQLException) TrustedByteArrayOutputStream(org.apache.phoenix.util.TrustedByteArrayOutputStream) IOException(java.io.IOException) PhoenixStatement(org.apache.phoenix.jdbc.PhoenixStatement) StatementContext(org.apache.phoenix.compile.StatementContext) DataExceedsCapacityException(org.apache.phoenix.exception.DataExceedsCapacityException) PDataType(org.apache.phoenix.schema.types.PDataType) SQLParser(org.apache.phoenix.parse.SQLParser) SingleCellConstructorExpression(org.apache.phoenix.expression.SingleCellConstructorExpression) Expression(org.apache.phoenix.expression.Expression) LiteralExpression(org.apache.phoenix.expression.LiteralExpression) ParseNode(org.apache.phoenix.parse.ParseNode) ExpressionCompiler(org.apache.phoenix.compile.ExpressionCompiler)

Example 7 with DataExceedsCapacityException

use of org.apache.phoenix.exception.DataExceedsCapacityException in project phoenix by apache.

the class ArithmeticOperationTest method testDecimalSubtraction.

// Subtraction
// result scale should be: max(ls, rs)
// result precision should be: max(lp - ls, rp - rs) + 1 + max(ls, rs)
@Test
public void testDecimalSubtraction() throws Exception {
    LiteralExpression op1, op2, op3;
    List<Expression> children;
    DecimalSubtractExpression e;
    op1 = LiteralExpression.newConstant(new BigDecimal("1234567890123456789012345678901"), PDecimal.INSTANCE, 31, 0);
    op2 = LiteralExpression.newConstant(new BigDecimal("12345"), PDecimal.INSTANCE, 5, 0);
    children = Arrays.<Expression>asList(op1, op2);
    e = new DecimalSubtractExpression(children);
    assertEqualValue(e, PDecimal.INSTANCE, new BigDecimal("1234567890123456789012345666556"));
    op1 = LiteralExpression.newConstant(new BigDecimal("12345"), PDecimal.INSTANCE, 5, 0);
    op2 = LiteralExpression.newConstant(new BigDecimal("123.45"), PDecimal.INSTANCE, 5, 2);
    children = Arrays.<Expression>asList(op1, op2);
    e = new DecimalSubtractExpression(children);
    assertEqualValue(e, PDecimal.INSTANCE, new BigDecimal("12221.55"));
    // Excceds precision
    op1 = LiteralExpression.newConstant(new BigDecimal("99999999999999999999999999999999999999"), PDecimal.INSTANCE, 38, 0);
    op2 = LiteralExpression.newConstant(new BigDecimal("-123"), PDecimal.INSTANCE, 3, 0);
    children = Arrays.<Expression>asList(op1, op2);
    e = new DecimalSubtractExpression(children);
    try {
        e.evaluate(null, new ImmutableBytesWritable());
        fail("Evaluation should have failed");
    } catch (DataExceedsCapacityException ex) {
    }
    // Pass since we roll up precision and scale imposing.
    op1 = LiteralExpression.newConstant(new BigDecimal("99999999999999999999999999999999999999"), PDecimal.INSTANCE, 38, 0);
    op2 = LiteralExpression.newConstant(new BigDecimal("-123"), PDecimal.INSTANCE, 3, 0);
    op3 = LiteralExpression.newConstant(new BigDecimal("123"), PDecimal.INSTANCE, 3, 0);
    children = Arrays.<Expression>asList(op1, op2, op3);
    e = new DecimalSubtractExpression(children);
    assertEqualValue(e, PDecimal.INSTANCE, new BigDecimal("99999999999999999999999999999999999999"));
    // Exceeds scale.
    op1 = LiteralExpression.newConstant(new BigDecimal("12345678901234567890123456789012345678"), PDecimal.INSTANCE, 38, 0);
    op2 = LiteralExpression.newConstant(new BigDecimal("123.45"), PDecimal.INSTANCE, 5, 2);
    children = Arrays.<Expression>asList(op1, op2);
    e = new DecimalSubtractExpression(children);
    try {
        e.evaluate(null, new ImmutableBytesWritable());
        fail("Evaluation should have failed");
    } catch (DataExceedsCapacityException ex) {
    }
    // Decimal with no precision and scale.
    op1 = LiteralExpression.newConstant(new BigDecimal("1111.1"), PDecimal.INSTANCE);
    op2 = LiteralExpression.newConstant(new BigDecimal("1.1111"), PDecimal.INSTANCE, 5, 4);
    children = Arrays.<Expression>asList(op1, op2);
    e = new DecimalSubtractExpression(children);
    assertEqualValue(e, PDecimal.INSTANCE, new BigDecimal("1109.9889"));
}
Also used : ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) DataExceedsCapacityException(org.apache.phoenix.exception.DataExceedsCapacityException) BigDecimal(java.math.BigDecimal) Test(org.junit.Test)

Example 8 with DataExceedsCapacityException

use of org.apache.phoenix.exception.DataExceedsCapacityException in project phoenix by apache.

the class ArithmeticOperationTest method testDecimalMultiplication.

// Multiplication
// result scale should be: ls + rs
// result precision should be: lp + rp
@Test
public void testDecimalMultiplication() throws Exception {
    LiteralExpression op1, op2;
    List<Expression> children;
    DecimalMultiplyExpression e;
    op1 = LiteralExpression.newConstant(new BigDecimal("12345"), PDecimal.INSTANCE, 5, 0);
    op2 = LiteralExpression.newConstant(new BigDecimal("123.45"), PDecimal.INSTANCE, 5, 2);
    children = Arrays.<Expression>asList(op1, op2);
    e = new DecimalMultiplyExpression(children);
    assertEqualValue(e, PDecimal.INSTANCE, new BigDecimal("1523990.25"));
    // Value too big, exceeds precision.
    op1 = LiteralExpression.newConstant(new BigDecimal("12345678901234567890123456789012345678"), PDecimal.INSTANCE, 38, 0);
    op2 = LiteralExpression.newConstant(new BigDecimal("12345"), PDecimal.INSTANCE, 5, 0);
    children = Arrays.<Expression>asList(op1, op2);
    e = new DecimalMultiplyExpression(children);
    try {
        e.evaluate(null, new ImmutableBytesWritable());
        fail("Evaluation should have failed");
    } catch (DataExceedsCapacityException ex) {
    }
    // Values exceeds scale.
    op1 = LiteralExpression.newConstant(new BigDecimal("12345678901234567890123456789012345678"), PDecimal.INSTANCE, 38, 0);
    op2 = LiteralExpression.newConstant(new BigDecimal("1.45"), PDecimal.INSTANCE, 3, 2);
    children = Arrays.<Expression>asList(op1, op2);
    e = new DecimalMultiplyExpression(children);
    try {
        e.evaluate(null, new ImmutableBytesWritable());
        fail("Evaluation should have failed");
    } catch (DataExceedsCapacityException ex) {
    }
    // Decimal with no precision and scale.
    op1 = LiteralExpression.newConstant(new BigDecimal("1111.1"), PDecimal.INSTANCE);
    assertNull(op1.getScale());
    op2 = LiteralExpression.newConstant(new BigDecimal("1.1111"), PDecimal.INSTANCE, 5, 4);
    children = Arrays.<Expression>asList(op1, op2);
    e = new DecimalMultiplyExpression(children);
    assertEqualValue(e, PDecimal.INSTANCE, new BigDecimal("1234.54321"));
}
Also used : ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) DataExceedsCapacityException(org.apache.phoenix.exception.DataExceedsCapacityException) BigDecimal(java.math.BigDecimal) Test(org.junit.Test)

Example 9 with DataExceedsCapacityException

use of org.apache.phoenix.exception.DataExceedsCapacityException in project phoenix by apache.

the class ArithmeticOperationTest method testDecimalDivision.

// Division
// result scale should be: 31 - lp + ls - rs
// result precision should be: lp - ls + rp + scale
@Test
public void testDecimalDivision() throws Exception {
    LiteralExpression op1, op2;
    List<Expression> children;
    DecimalDivideExpression e;
    // The value should be 1234500.0000...00 because we set to scale to be 24. However, in
    // PhoenixResultSet.getBigDecimal, the case to (BigDecimal) actually cause the scale to be eradicated. As
    // a result, the resulting value does not have the right form.
    op1 = LiteralExpression.newConstant(new BigDecimal("12345"), PDecimal.INSTANCE, 5, 0);
    op2 = LiteralExpression.newConstant(new BigDecimal("0.01"), PDecimal.INSTANCE, 2, 2);
    children = Arrays.<Expression>asList(op1, op2);
    e = new DecimalDivideExpression(children);
    assertEqualValue(e, PDecimal.INSTANCE, new BigDecimal("1.2345E+6"));
    // Exceeds precision.
    op1 = LiteralExpression.newConstant(new BigDecimal("12345678901234567890123456789012345678"), PDecimal.INSTANCE, 38, 0);
    op2 = LiteralExpression.newConstant(new BigDecimal("0.01"), PDecimal.INSTANCE, 2, 2);
    children = Arrays.<Expression>asList(op1, op2);
    e = new DecimalDivideExpression(children);
    try {
        e.evaluate(null, new ImmutableBytesWritable());
        fail("Evaluation should have failed");
    } catch (DataExceedsCapacityException ex) {
    }
    // Decimal with no precision and scale.
    op1 = LiteralExpression.newConstant(new BigDecimal("10"), PDecimal.INSTANCE);
    op2 = LiteralExpression.newConstant(new BigDecimal("3"), PDecimal.INSTANCE, 5, 4);
    assertEquals(Integer.valueOf(4), op2.getScale());
    children = Arrays.<Expression>asList(op1, op2);
    e = new DecimalDivideExpression(children);
    assertEqualValue(e, PDecimal.INSTANCE, new BigDecimal("3.3333333333333333333333333333333333333"));
}
Also used : ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) DataExceedsCapacityException(org.apache.phoenix.exception.DataExceedsCapacityException) BigDecimal(java.math.BigDecimal) Test(org.junit.Test)

Example 10 with DataExceedsCapacityException

use of org.apache.phoenix.exception.DataExceedsCapacityException in project phoenix by apache.

the class UngroupedAggregateRegionObserver method doPostScannerOpen.

@Override
protected RegionScanner doPostScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c, final Scan scan, final RegionScanner s) throws IOException, SQLException {
    RegionCoprocessorEnvironment env = c.getEnvironment();
    Region region = env.getRegion();
    long ts = scan.getTimeRange().getMax();
    boolean localIndexScan = ScanUtil.isLocalIndex(scan);
    if (ScanUtil.isAnalyzeTable(scan)) {
        byte[] gp_width_bytes = scan.getAttribute(BaseScannerRegionObserver.GUIDEPOST_WIDTH_BYTES);
        byte[] gp_per_region_bytes = scan.getAttribute(BaseScannerRegionObserver.GUIDEPOST_PER_REGION);
        // Let this throw, as this scan is being done for the sole purpose of collecting stats
        StatisticsCollector statsCollector = StatisticsCollectorFactory.createStatisticsCollector(env, region.getRegionInfo().getTable().getNameAsString(), ts, gp_width_bytes, gp_per_region_bytes);
        return collectStats(s, statsCollector, region, scan, env.getConfiguration());
    } else if (ScanUtil.isIndexRebuild(scan)) {
        return rebuildIndices(s, region, scan, env.getConfiguration());
    }
    int offsetToBe = 0;
    if (localIndexScan) {
        /*
             * For local indexes, we need to set an offset on row key expressions to skip
             * the region start key.
             */
        offsetToBe = region.getRegionInfo().getStartKey().length != 0 ? region.getRegionInfo().getStartKey().length : region.getRegionInfo().getEndKey().length;
        ScanUtil.setRowKeyOffset(scan, offsetToBe);
    }
    final int offset = offsetToBe;
    PTable projectedTable = null;
    PTable writeToTable = null;
    byte[][] values = null;
    byte[] descRowKeyTableBytes = scan.getAttribute(UPGRADE_DESC_ROW_KEY);
    boolean isDescRowKeyOrderUpgrade = descRowKeyTableBytes != null;
    if (isDescRowKeyOrderUpgrade) {
        logger.debug("Upgrading row key for " + region.getRegionInfo().getTable().getNameAsString());
        projectedTable = deserializeTable(descRowKeyTableBytes);
        try {
            writeToTable = PTableImpl.makePTable(projectedTable, true);
        } catch (SQLException e) {
            // Impossible
            ServerUtil.throwIOException("Upgrade failed", e);
        }
        values = new byte[projectedTable.getPKColumns().size()][];
    }
    boolean useProto = false;
    byte[] localIndexBytes = scan.getAttribute(LOCAL_INDEX_BUILD_PROTO);
    useProto = localIndexBytes != null;
    if (localIndexBytes == null) {
        localIndexBytes = scan.getAttribute(LOCAL_INDEX_BUILD);
    }
    List<IndexMaintainer> indexMaintainers = localIndexBytes == null ? null : IndexMaintainer.deserialize(localIndexBytes, useProto);
    MutationList indexMutations = localIndexBytes == null ? new MutationList() : new MutationList(1024);
    RegionScanner theScanner = s;
    boolean replayMutations = scan.getAttribute(BaseScannerRegionObserver.IGNORE_NEWER_MUTATIONS) != null;
    byte[] indexUUID = scan.getAttribute(PhoenixIndexCodec.INDEX_UUID);
    byte[] txState = scan.getAttribute(BaseScannerRegionObserver.TX_STATE);
    List<Expression> selectExpressions = null;
    byte[] upsertSelectTable = scan.getAttribute(BaseScannerRegionObserver.UPSERT_SELECT_TABLE);
    boolean isUpsert = false;
    boolean isDelete = false;
    byte[] deleteCQ = null;
    byte[] deleteCF = null;
    byte[] emptyCF = null;
    HTable targetHTable = null;
    boolean areMutationInSameRegion = true;
    ImmutableBytesWritable ptr = new ImmutableBytesWritable();
    if (upsertSelectTable != null) {
        isUpsert = true;
        projectedTable = deserializeTable(upsertSelectTable);
        targetHTable = new HTable(env.getConfiguration(), projectedTable.getPhysicalName().getBytes());
        selectExpressions = deserializeExpressions(scan.getAttribute(BaseScannerRegionObserver.UPSERT_SELECT_EXPRS));
        values = new byte[projectedTable.getPKColumns().size()][];
        areMutationInSameRegion = Bytes.compareTo(targetHTable.getTableName(), region.getTableDesc().getTableName().getName()) == 0 && !ExpressionUtil.isPkPositionChanging(new TableRef(projectedTable), selectExpressions);
    } else {
        byte[] isDeleteAgg = scan.getAttribute(BaseScannerRegionObserver.DELETE_AGG);
        isDelete = isDeleteAgg != null && Bytes.compareTo(PDataType.TRUE_BYTES, isDeleteAgg) == 0;
        if (!isDelete) {
            deleteCF = scan.getAttribute(BaseScannerRegionObserver.DELETE_CF);
            deleteCQ = scan.getAttribute(BaseScannerRegionObserver.DELETE_CQ);
        }
        emptyCF = scan.getAttribute(BaseScannerRegionObserver.EMPTY_CF);
    }
    TupleProjector tupleProjector = null;
    byte[][] viewConstants = null;
    ColumnReference[] dataColumns = IndexUtil.deserializeDataTableColumnsToJoin(scan);
    final TupleProjector p = TupleProjector.deserializeProjectorFromScan(scan);
    final HashJoinInfo j = HashJoinInfo.deserializeHashJoinFromScan(scan);
    boolean useQualifierAsIndex = EncodedColumnsUtil.useQualifierAsIndex(EncodedColumnsUtil.getMinMaxQualifiersFromScan(scan));
    if ((localIndexScan && !isDelete && !isDescRowKeyOrderUpgrade) || (j == null && p != null)) {
        if (dataColumns != null) {
            tupleProjector = IndexUtil.getTupleProjector(scan, dataColumns);
            viewConstants = IndexUtil.deserializeViewConstantsFromScan(scan);
        }
        ImmutableBytesWritable tempPtr = new ImmutableBytesWritable();
        theScanner = getWrappedScanner(c, theScanner, offset, scan, dataColumns, tupleProjector, region, indexMaintainers == null ? null : indexMaintainers.get(0), viewConstants, p, tempPtr, useQualifierAsIndex);
    }
    if (j != null) {
        theScanner = new HashJoinRegionScanner(theScanner, p, j, ScanUtil.getTenantId(scan), env, useQualifierAsIndex, useNewValueColumnQualifier);
    }
    int maxBatchSize = 0;
    long maxBatchSizeBytes = 0L;
    MutationList mutations = new MutationList();
    boolean needToWrite = false;
    Configuration conf = c.getEnvironment().getConfiguration();
    long flushSize = region.getTableDesc().getMemStoreFlushSize();
    if (flushSize <= 0) {
        flushSize = conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE);
    }
    /**
         * Slow down the writes if the memstore size more than
         * (hbase.hregion.memstore.block.multiplier - 1) times hbase.hregion.memstore.flush.size
         * bytes. This avoids flush storm to hdfs for cases like index building where reads and
         * write happen to all the table regions in the server.
         */
    final long blockingMemStoreSize = flushSize * (conf.getLong(HConstants.HREGION_MEMSTORE_BLOCK_MULTIPLIER, HConstants.DEFAULT_HREGION_MEMSTORE_BLOCK_MULTIPLIER) - 1);
    boolean buildLocalIndex = indexMaintainers != null && dataColumns == null && !localIndexScan;
    if (isDescRowKeyOrderUpgrade || isDelete || isUpsert || (deleteCQ != null && deleteCF != null) || emptyCF != null || buildLocalIndex) {
        needToWrite = true;
        maxBatchSize = env.getConfiguration().getInt(MUTATE_BATCH_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE);
        mutations = new MutationList(Ints.saturatedCast(maxBatchSize + maxBatchSize / 10));
        maxBatchSizeBytes = env.getConfiguration().getLong(MUTATE_BATCH_SIZE_BYTES_ATTRIB, QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE_BYTES);
    }
    Aggregators aggregators = ServerAggregators.deserialize(scan.getAttribute(BaseScannerRegionObserver.AGGREGATORS), env.getConfiguration());
    Aggregator[] rowAggregators = aggregators.getAggregators();
    boolean hasMore;
    boolean hasAny = false;
    Pair<Integer, Integer> minMaxQualifiers = EncodedColumnsUtil.getMinMaxQualifiersFromScan(scan);
    Tuple result = useQualifierAsIndex ? new PositionBasedMultiKeyValueTuple() : new MultiKeyValueTuple();
    if (logger.isDebugEnabled()) {
        logger.debug(LogUtil.addCustomAnnotations("Starting ungrouped coprocessor scan " + scan + " " + region.getRegionInfo(), ScanUtil.getCustomAnnotations(scan)));
    }
    int rowCount = 0;
    final RegionScanner innerScanner = theScanner;
    boolean useIndexProto = true;
    byte[] indexMaintainersPtr = scan.getAttribute(PhoenixIndexCodec.INDEX_PROTO_MD);
    // for backward compatiblity fall back to look by the old attribute
    if (indexMaintainersPtr == null) {
        indexMaintainersPtr = scan.getAttribute(PhoenixIndexCodec.INDEX_MD);
        useIndexProto = false;
    }
    boolean acquiredLock = false;
    try {
        if (needToWrite) {
            synchronized (lock) {
                scansReferenceCount++;
            }
        }
        region.startRegionOperation();
        acquiredLock = true;
        synchronized (innerScanner) {
            do {
                List<Cell> results = useQualifierAsIndex ? new EncodedColumnQualiferCellsList(minMaxQualifiers.getFirst(), minMaxQualifiers.getSecond(), encodingScheme) : new ArrayList<Cell>();
                // Results are potentially returned even when the return value of s.next is false
                // since this is an indication of whether or not there are more values after the
                // ones returned
                hasMore = innerScanner.nextRaw(results);
                if (!results.isEmpty()) {
                    rowCount++;
                    result.setKeyValues(results);
                    if (isDescRowKeyOrderUpgrade) {
                        Arrays.fill(values, null);
                        Cell firstKV = results.get(0);
                        RowKeySchema schema = projectedTable.getRowKeySchema();
                        int maxOffset = schema.iterator(firstKV.getRowArray(), firstKV.getRowOffset() + offset, firstKV.getRowLength(), ptr);
                        for (int i = 0; i < schema.getFieldCount(); i++) {
                            Boolean hasValue = schema.next(ptr, i, maxOffset);
                            if (hasValue == null) {
                                break;
                            }
                            Field field = schema.getField(i);
                            if (field.getSortOrder() == SortOrder.DESC) {
                                // Special case for re-writing DESC ARRAY, as the actual byte value needs to change in this case
                                if (field.getDataType().isArrayType()) {
                                    field.getDataType().coerceBytes(ptr, null, field.getDataType(), field.getMaxLength(), field.getScale(), field.getSortOrder(), field.getMaxLength(), field.getScale(), field.getSortOrder(), // force to use correct separator byte
                                    true);
                                } else // Special case for re-writing DESC CHAR or DESC BINARY, to force the re-writing of trailing space characters
                                if (field.getDataType() == PChar.INSTANCE || field.getDataType() == PBinary.INSTANCE) {
                                    int len = ptr.getLength();
                                    while (len > 0 && ptr.get()[ptr.getOffset() + len - 1] == StringUtil.SPACE_UTF8) {
                                        len--;
                                    }
                                    ptr.set(ptr.get(), ptr.getOffset(), len);
                                // Special case for re-writing DESC FLOAT and DOUBLE, as they're not inverted like they should be (PHOENIX-2171)
                                } else if (field.getDataType() == PFloat.INSTANCE || field.getDataType() == PDouble.INSTANCE) {
                                    byte[] invertedBytes = SortOrder.invert(ptr.get(), ptr.getOffset(), ptr.getLength());
                                    ptr.set(invertedBytes);
                                }
                            } else if (field.getDataType() == PBinary.INSTANCE) {
                                // Remove trailing space characters so that the setValues call below will replace them
                                // with the correct zero byte character. Note this is somewhat dangerous as these
                                // could be legit, but I don't know what the alternative is.
                                int len = ptr.getLength();
                                while (len > 0 && ptr.get()[ptr.getOffset() + len - 1] == StringUtil.SPACE_UTF8) {
                                    len--;
                                }
                                ptr.set(ptr.get(), ptr.getOffset(), len);
                            }
                            values[i] = ptr.copyBytes();
                        }
                        writeToTable.newKey(ptr, values);
                        if (Bytes.compareTo(firstKV.getRowArray(), firstKV.getRowOffset() + offset, firstKV.getRowLength(), ptr.get(), ptr.getOffset() + offset, ptr.getLength()) == 0) {
                            continue;
                        }
                        byte[] newRow = ByteUtil.copyKeyBytesIfNecessary(ptr);
                        if (offset > 0) {
                            // for local indexes (prepend region start key)
                            byte[] newRowWithOffset = new byte[offset + newRow.length];
                            System.arraycopy(firstKV.getRowArray(), firstKV.getRowOffset(), newRowWithOffset, 0, offset);
                            ;
                            System.arraycopy(newRow, 0, newRowWithOffset, offset, newRow.length);
                            newRow = newRowWithOffset;
                        }
                        byte[] oldRow = Bytes.copy(firstKV.getRowArray(), firstKV.getRowOffset(), firstKV.getRowLength());
                        for (Cell cell : results) {
                            // Copy existing cell but with new row key
                            Cell newCell = new KeyValue(newRow, 0, newRow.length, cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(), cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(), cell.getTimestamp(), KeyValue.Type.codeToType(cell.getTypeByte()), cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
                            switch(KeyValue.Type.codeToType(cell.getTypeByte())) {
                                case Put:
                                    // If Put, point delete old Put
                                    Delete del = new Delete(oldRow);
                                    del.addDeleteMarker(new KeyValue(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(), cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(), cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(), cell.getTimestamp(), KeyValue.Type.Delete, ByteUtil.EMPTY_BYTE_ARRAY, 0, 0));
                                    mutations.add(del);
                                    Put put = new Put(newRow);
                                    put.add(newCell);
                                    mutations.add(put);
                                    break;
                                case Delete:
                                case DeleteColumn:
                                case DeleteFamily:
                                case DeleteFamilyVersion:
                                    Delete delete = new Delete(newRow);
                                    delete.addDeleteMarker(newCell);
                                    mutations.add(delete);
                                    break;
                            }
                        }
                    } else if (buildLocalIndex) {
                        for (IndexMaintainer maintainer : indexMaintainers) {
                            if (!results.isEmpty()) {
                                result.getKey(ptr);
                                ValueGetter valueGetter = maintainer.createGetterFromKeyValues(ImmutableBytesPtr.copyBytesIfNecessary(ptr), results);
                                Put put = maintainer.buildUpdateMutation(kvBuilder, valueGetter, ptr, results.get(0).getTimestamp(), env.getRegion().getRegionInfo().getStartKey(), env.getRegion().getRegionInfo().getEndKey());
                                indexMutations.add(put);
                            }
                        }
                        result.setKeyValues(results);
                    } else if (isDelete) {
                        // FIXME: the version of the Delete constructor without the lock
                        // args was introduced in 0.94.4, thus if we try to use it here
                        // we can no longer use the 0.94.2 version of the client.
                        Cell firstKV = results.get(0);
                        Delete delete = new Delete(firstKV.getRowArray(), firstKV.getRowOffset(), firstKV.getRowLength(), ts);
                        if (replayMutations) {
                            delete.setAttribute(IGNORE_NEWER_MUTATIONS, PDataType.TRUE_BYTES);
                        }
                        mutations.add(delete);
                        // force tephra to ignore this deletes
                        delete.setAttribute(TxConstants.TX_ROLLBACK_ATTRIBUTE_KEY, new byte[0]);
                    } else if (isUpsert) {
                        Arrays.fill(values, null);
                        int bucketNumOffset = 0;
                        if (projectedTable.getBucketNum() != null) {
                            values[0] = new byte[] { 0 };
                            bucketNumOffset = 1;
                        }
                        int i = bucketNumOffset;
                        List<PColumn> projectedColumns = projectedTable.getColumns();
                        for (; i < projectedTable.getPKColumns().size(); i++) {
                            Expression expression = selectExpressions.get(i - bucketNumOffset);
                            if (expression.evaluate(result, ptr)) {
                                values[i] = ptr.copyBytes();
                                // column being projected into then invert the bits.
                                if (expression.getSortOrder() != projectedColumns.get(i).getSortOrder()) {
                                    SortOrder.invert(values[i], 0, values[i], 0, values[i].length);
                                }
                            } else {
                                values[i] = ByteUtil.EMPTY_BYTE_ARRAY;
                            }
                        }
                        projectedTable.newKey(ptr, values);
                        PRow row = projectedTable.newRow(kvBuilder, ts, ptr, false);
                        for (; i < projectedColumns.size(); i++) {
                            Expression expression = selectExpressions.get(i - bucketNumOffset);
                            if (expression.evaluate(result, ptr)) {
                                PColumn column = projectedColumns.get(i);
                                if (!column.getDataType().isSizeCompatible(ptr, null, expression.getDataType(), expression.getSortOrder(), expression.getMaxLength(), expression.getScale(), column.getMaxLength(), column.getScale())) {
                                    throw new DataExceedsCapacityException(column.getDataType(), column.getMaxLength(), column.getScale(), column.getName().getString(), ptr);
                                }
                                column.getDataType().coerceBytes(ptr, null, expression.getDataType(), expression.getMaxLength(), expression.getScale(), expression.getSortOrder(), column.getMaxLength(), column.getScale(), column.getSortOrder(), projectedTable.rowKeyOrderOptimizable());
                                byte[] bytes = ByteUtil.copyKeyBytesIfNecessary(ptr);
                                row.setValue(column, bytes);
                            }
                        }
                        for (Mutation mutation : row.toRowMutations()) {
                            if (replayMutations) {
                                mutation.setAttribute(IGNORE_NEWER_MUTATIONS, PDataType.TRUE_BYTES);
                            }
                            mutations.add(mutation);
                        }
                        for (i = 0; i < selectExpressions.size(); i++) {
                            selectExpressions.get(i).reset();
                        }
                    } else if (deleteCF != null && deleteCQ != null) {
                        // if no empty key value is being set
                        if (emptyCF == null || result.getValue(deleteCF, deleteCQ) != null) {
                            Delete delete = new Delete(results.get(0).getRowArray(), results.get(0).getRowOffset(), results.get(0).getRowLength());
                            delete.deleteColumns(deleteCF, deleteCQ, ts);
                            // force tephra to ignore this deletes
                            delete.setAttribute(TxConstants.TX_ROLLBACK_ATTRIBUTE_KEY, new byte[0]);
                            mutations.add(delete);
                        }
                    }
                    if (emptyCF != null) {
                        /*
                             * If we've specified an emptyCF, then we need to insert an empty
                             * key value "retroactively" for any key value that is visible at
                             * the timestamp that the DDL was issued. Key values that are not
                             * visible at this timestamp will not ever be projected up to
                             * scans past this timestamp, so don't need to be considered.
                             * We insert one empty key value per row per timestamp.
                             */
                        Set<Long> timeStamps = Sets.newHashSetWithExpectedSize(results.size());
                        for (Cell kv : results) {
                            long kvts = kv.getTimestamp();
                            if (!timeStamps.contains(kvts)) {
                                Put put = new Put(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength());
                                put.add(emptyCF, QueryConstants.EMPTY_COLUMN_BYTES, kvts, ByteUtil.EMPTY_BYTE_ARRAY);
                                mutations.add(put);
                            }
                        }
                    }
                    if (readyToCommit(rowCount, mutations.byteSize(), maxBatchSize, maxBatchSizeBytes)) {
                        commit(region, mutations, indexUUID, blockingMemStoreSize, indexMaintainersPtr, txState, areMutationInSameRegion, targetHTable, useIndexProto);
                        mutations.clear();
                    }
                    if (readyToCommit(rowCount, indexMutations.byteSize(), maxBatchSize, maxBatchSizeBytes)) {
                        commitBatch(region, indexMutations, null, blockingMemStoreSize, null, txState, useIndexProto);
                        indexMutations.clear();
                    }
                    aggregators.aggregate(rowAggregators, result);
                    hasAny = true;
                }
            } while (hasMore);
            if (!mutations.isEmpty()) {
                commit(region, mutations, indexUUID, blockingMemStoreSize, indexMaintainersPtr, txState, areMutationInSameRegion, targetHTable, useIndexProto);
                mutations.clear();
            }
            if (!indexMutations.isEmpty()) {
                commitBatch(region, indexMutations, null, blockingMemStoreSize, indexMaintainersPtr, txState, useIndexProto);
                indexMutations.clear();
            }
        }
    } finally {
        if (needToWrite) {
            synchronized (lock) {
                scansReferenceCount--;
            }
        }
        if (targetHTable != null) {
            targetHTable.close();
        }
        try {
            innerScanner.close();
        } finally {
            if (acquiredLock)
                region.closeRegionOperation();
        }
    }
    if (logger.isDebugEnabled()) {
        logger.debug(LogUtil.addCustomAnnotations("Finished scanning " + rowCount + " rows for ungrouped coprocessor scan " + scan, ScanUtil.getCustomAnnotations(scan)));
    }
    final boolean hadAny = hasAny;
    KeyValue keyValue = null;
    if (hadAny) {
        byte[] value = aggregators.toBytes(rowAggregators);
        keyValue = KeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length);
    }
    final KeyValue aggKeyValue = keyValue;
    RegionScanner scanner = new BaseRegionScanner(innerScanner) {

        private boolean done = !hadAny;

        @Override
        public boolean isFilterDone() {
            return done;
        }

        @Override
        public boolean next(List<Cell> results) throws IOException {
            if (done)
                return false;
            done = true;
            results.add(aggKeyValue);
            return false;
        }

        @Override
        public long getMaxResultSize() {
            return scan.getMaxResultSize();
        }
    };
    return scanner;
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) Configuration(org.apache.hadoop.conf.Configuration) TupleProjector(org.apache.phoenix.execute.TupleProjector) PTable(org.apache.phoenix.schema.PTable) ValueGetter(org.apache.phoenix.hbase.index.ValueGetter) EncodedColumnQualiferCellsList(org.apache.phoenix.schema.tuple.EncodedColumnQualiferCellsList) ArrayList(java.util.ArrayList) List(java.util.List) Cell(org.apache.hadoop.hbase.Cell) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) DataExceedsCapacityException(org.apache.phoenix.exception.DataExceedsCapacityException) PositionBasedMultiKeyValueTuple(org.apache.phoenix.schema.tuple.PositionBasedMultiKeyValueTuple) Aggregators(org.apache.phoenix.expression.aggregator.Aggregators) ServerAggregators(org.apache.phoenix.expression.aggregator.ServerAggregators) PLong(org.apache.phoenix.schema.types.PLong) Region(org.apache.hadoop.hbase.regionserver.Region) Mutation(org.apache.hadoop.hbase.client.Mutation) ColumnReference(org.apache.phoenix.hbase.index.covered.update.ColumnReference) KeyValue(org.apache.hadoop.hbase.KeyValue) StatisticsCollector(org.apache.phoenix.schema.stats.StatisticsCollector) SQLException(java.sql.SQLException) HTable(org.apache.hadoop.hbase.client.HTable) PRow(org.apache.phoenix.schema.PRow) PColumn(org.apache.phoenix.schema.PColumn) Field(org.apache.phoenix.schema.ValueSchema.Field) RegionCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment) IndexMaintainer(org.apache.phoenix.index.IndexMaintainer) EncodedColumnQualiferCellsList(org.apache.phoenix.schema.tuple.EncodedColumnQualiferCellsList) Aggregator(org.apache.phoenix.expression.aggregator.Aggregator) RowKeySchema(org.apache.phoenix.schema.RowKeySchema) Put(org.apache.hadoop.hbase.client.Put) RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) Expression(org.apache.phoenix.expression.Expression) HashJoinInfo(org.apache.phoenix.join.HashJoinInfo) MultiKeyValueTuple(org.apache.phoenix.schema.tuple.MultiKeyValueTuple) PositionBasedMultiKeyValueTuple(org.apache.phoenix.schema.tuple.PositionBasedMultiKeyValueTuple) TableRef(org.apache.phoenix.schema.TableRef) MultiKeyValueTuple(org.apache.phoenix.schema.tuple.MultiKeyValueTuple) Tuple(org.apache.phoenix.schema.tuple.Tuple) PositionBasedMultiKeyValueTuple(org.apache.phoenix.schema.tuple.PositionBasedMultiKeyValueTuple)

Aggregations

DataExceedsCapacityException (org.apache.phoenix.exception.DataExceedsCapacityException)12 BigDecimal (java.math.BigDecimal)8 ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)6 PDataType (org.apache.phoenix.schema.types.PDataType)6 Test (org.junit.Test)4 Expression (org.apache.phoenix.expression.Expression)3 SortOrder (org.apache.phoenix.schema.SortOrder)3 SQLException (java.sql.SQLException)2 List (java.util.List)2 Cell (org.apache.hadoop.hbase.Cell)2 Delete (org.apache.hadoop.hbase.client.Delete)2 Mutation (org.apache.hadoop.hbase.client.Mutation)2 Put (org.apache.hadoop.hbase.client.Put)2 PColumn (org.apache.phoenix.schema.PColumn)2 PRow (org.apache.phoenix.schema.PRow)2 PTable (org.apache.phoenix.schema.PTable)2 ByteArrayInputStream (java.io.ByteArrayInputStream)1 DataInputStream (java.io.DataInputStream)1 EOFException (java.io.EOFException)1 IOException (java.io.IOException)1