Search in sources :

Example 66 with PDataType

use of org.apache.phoenix.schema.types.PDataType in project phoenix by apache.

the class IndexMaintainer method buildRowKey.

public byte[] buildRowKey(ValueGetter valueGetter, ImmutableBytesWritable rowKeyPtr, byte[] regionStartKey, byte[] regionEndKey) {
    ImmutableBytesWritable ptr = new ImmutableBytesWritable();
    boolean prependRegionStartKey = isLocalIndex && regionStartKey != null;
    boolean isIndexSalted = !isLocalIndex && nIndexSaltBuckets > 0;
    int prefixKeyLength = prependRegionStartKey ? (regionStartKey.length != 0 ? regionStartKey.length : regionEndKey.length) : 0;
    TrustedByteArrayOutputStream stream = new TrustedByteArrayOutputStream(estimatedIndexRowKeyBytes + (prependRegionStartKey ? prefixKeyLength : 0));
    DataOutput output = new DataOutputStream(stream);
    try {
        // For local indexes, we must prepend the row key with the start region key
        if (prependRegionStartKey) {
            if (regionStartKey.length == 0) {
                output.write(new byte[prefixKeyLength]);
            } else {
                output.write(regionStartKey);
            }
        }
        if (isIndexSalted) {
            // will be set at end to index salt byte
            output.write(0);
        }
        // The dataRowKeySchema includes the salt byte field,
        // so we must adjust for that here.
        int dataPosOffset = isDataTableSalted ? 1 : 0;
        BitSet viewConstantColumnBitSet = this.rowKeyMetaData.getViewConstantColumnBitSet();
        int nIndexedColumns = getIndexPkColumnCount() - getNumViewConstants();
        int[][] dataRowKeyLocator = new int[2][nIndexedColumns];
        // Skip data table salt byte
        int maxRowKeyOffset = rowKeyPtr.getOffset() + rowKeyPtr.getLength();
        dataRowKeySchema.iterator(rowKeyPtr, ptr, dataPosOffset);
        if (viewIndexId != null) {
            output.write(viewIndexId);
        }
        if (isMultiTenant) {
            dataRowKeySchema.next(ptr, dataPosOffset, maxRowKeyOffset);
            output.write(ptr.get(), ptr.getOffset(), ptr.getLength());
            if (!dataRowKeySchema.getField(dataPosOffset).getDataType().isFixedWidth()) {
                output.writeByte(SchemaUtil.getSeparatorByte(rowKeyOrderOptimizable, ptr.getLength() == 0, dataRowKeySchema.getField(dataPosOffset)));
            }
            dataPosOffset++;
        }
        // Write index row key
        for (int i = dataPosOffset; i < dataRowKeySchema.getFieldCount(); i++) {
            Boolean hasValue = dataRowKeySchema.next(ptr, i, maxRowKeyOffset);
            // same for all rows in this index)
            if (!viewConstantColumnBitSet.get(i)) {
                int pos = rowKeyMetaData.getIndexPkPosition(i - dataPosOffset);
                if (Boolean.TRUE.equals(hasValue)) {
                    dataRowKeyLocator[0][pos] = ptr.getOffset();
                    dataRowKeyLocator[1][pos] = ptr.getLength();
                } else {
                    dataRowKeyLocator[0][pos] = 0;
                    dataRowKeyLocator[1][pos] = 0;
                }
            }
        }
        BitSet descIndexColumnBitSet = rowKeyMetaData.getDescIndexColumnBitSet();
        Iterator<Expression> expressionIterator = indexedExpressions.iterator();
        for (int i = 0; i < nIndexedColumns; i++) {
            PDataType dataColumnType;
            boolean isNullable;
            SortOrder dataSortOrder;
            if (dataPkPosition[i] == EXPRESSION_NOT_PRESENT) {
                Expression expression = expressionIterator.next();
                dataColumnType = expression.getDataType();
                dataSortOrder = expression.getSortOrder();
                isNullable = expression.isNullable();
                expression.evaluate(new ValueGetterTuple(valueGetter), ptr);
            } else {
                Field field = dataRowKeySchema.getField(dataPkPosition[i]);
                dataColumnType = field.getDataType();
                ptr.set(rowKeyPtr.get(), dataRowKeyLocator[0][i], dataRowKeyLocator[1][i]);
                dataSortOrder = field.getSortOrder();
                isNullable = field.isNullable();
            }
            boolean isDataColumnInverted = dataSortOrder != SortOrder.ASC;
            PDataType indexColumnType = IndexUtil.getIndexColumnDataType(isNullable, dataColumnType);
            boolean isBytesComparable = dataColumnType.isBytesComparableWith(indexColumnType);
            boolean isIndexColumnDesc = descIndexColumnBitSet.get(i);
            if (isBytesComparable && isDataColumnInverted == isIndexColumnDesc) {
                output.write(ptr.get(), ptr.getOffset(), ptr.getLength());
            } else {
                if (!isBytesComparable) {
                    indexColumnType.coerceBytes(ptr, dataColumnType, dataSortOrder, SortOrder.getDefault());
                }
                if (isDataColumnInverted != isIndexColumnDesc) {
                    writeInverted(ptr.get(), ptr.getOffset(), ptr.getLength(), output);
                } else {
                    output.write(ptr.get(), ptr.getOffset(), ptr.getLength());
                }
            }
            if (!indexColumnType.isFixedWidth()) {
                output.writeByte(SchemaUtil.getSeparatorByte(rowKeyOrderOptimizable, ptr.getLength() == 0, isIndexColumnDesc ? SortOrder.DESC : SortOrder.ASC));
            }
        }
        int length = stream.size();
        int minLength = length - maxTrailingNulls;
        byte[] indexRowKey = stream.getBuffer();
        // Remove trailing nulls
        while (length > minLength && indexRowKey[length - 1] == QueryConstants.SEPARATOR_BYTE) {
            length--;
        }
        if (isIndexSalted) {
            // Set salt byte
            byte saltByte = SaltingUtil.getSaltingByte(indexRowKey, SaltingUtil.NUM_SALTING_BYTES, length - SaltingUtil.NUM_SALTING_BYTES, nIndexSaltBuckets);
            indexRowKey[0] = saltByte;
        }
        return indexRowKey.length == length ? indexRowKey : Arrays.copyOf(indexRowKey, length);
    } catch (IOException e) {
        // Impossible
        throw new RuntimeException(e);
    } finally {
        try {
            stream.close();
        } catch (IOException e) {
            // Impossible
            throw new RuntimeException(e);
        }
    }
}
Also used : DataOutput(java.io.DataOutput) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) DataOutputStream(java.io.DataOutputStream) BitSet(org.apache.phoenix.util.BitSet) SortOrder(org.apache.phoenix.schema.SortOrder) TrustedByteArrayOutputStream(org.apache.phoenix.util.TrustedByteArrayOutputStream) IOException(java.io.IOException) Field(org.apache.phoenix.schema.ValueSchema.Field) PDataType(org.apache.phoenix.schema.types.PDataType) KeyValueColumnExpression(org.apache.phoenix.expression.KeyValueColumnExpression) SingleCellConstructorExpression(org.apache.phoenix.expression.SingleCellConstructorExpression) Expression(org.apache.phoenix.expression.Expression) SingleCellColumnExpression(org.apache.phoenix.expression.SingleCellColumnExpression) CoerceExpression(org.apache.phoenix.expression.CoerceExpression) LiteralExpression(org.apache.phoenix.expression.LiteralExpression) ValueGetterTuple(org.apache.phoenix.schema.tuple.ValueGetterTuple)

Example 67 with PDataType

use of org.apache.phoenix.schema.types.PDataType in project phoenix by apache.

the class IndexMaintainer method write.

// Only called by code older than our 4.10 release
@Deprecated
@Override
public void write(DataOutput output) throws IOException {
    // Encode nIndexSaltBuckets and isMultiTenant together
    WritableUtils.writeVInt(output, (nIndexSaltBuckets + 1) * (isMultiTenant ? -1 : 1));
    // Encode indexedColumns.size() and whether or not there's a viewIndexId
    WritableUtils.writeVInt(output, (indexedColumns.size() + 1) * (viewIndexId != null ? -1 : 1));
    if (viewIndexId != null) {
        output.write(viewIndexId);
    }
    for (ColumnReference ref : indexedColumns) {
        Bytes.writeByteArray(output, ref.getFamily());
        Bytes.writeByteArray(output, ref.getQualifier());
    }
    //TODO remove indexedColumnTypes in the next major release
    for (int i = 0; i < indexedColumnTypes.size(); i++) {
        PDataType type = indexedColumnTypes.get(i);
        WritableUtils.writeVInt(output, type.ordinal());
    }
    // Encode coveredColumns.size() and whether or not this is a local index
    WritableUtils.writeVInt(output, (coveredColumnsMap.size() + 1) * (isLocalIndex ? -1 : 1));
    for (ColumnReference ref : coveredColumnsMap.keySet()) {
        Bytes.writeByteArray(output, ref.getFamily());
        Bytes.writeByteArray(output, ref.getQualifier());
    }
    // TODO: remove when rowKeyOrderOptimizable hack no longer needed
    WritableUtils.writeVInt(output, indexTableName.length * (rowKeyOrderOptimizable ? 1 : -1));
    output.write(indexTableName, 0, indexTableName.length);
    Bytes.writeByteArray(output, dataEmptyKeyValueCF);
    // TODO in order to maintain b/w compatibility encode emptyKeyValueCFPtr.getLength() as a negative value (so we can distinguish between new and old clients)
    // when indexedColumnTypes is removed, remove this 
    WritableUtils.writeVInt(output, -emptyKeyValueCFPtr.getLength());
    output.write(emptyKeyValueCFPtr.get(), emptyKeyValueCFPtr.getOffset(), emptyKeyValueCFPtr.getLength());
    WritableUtils.writeVInt(output, indexedExpressions.size());
    for (Expression expression : indexedExpressions) {
        WritableUtils.writeVInt(output, ExpressionType.valueOf(expression).ordinal());
        expression.write(output);
    }
    rowKeyMetaData.write(output);
    // Encode indexWALDisabled in nDataCFs
    WritableUtils.writeVInt(output, (nDataCFs + 1) * (indexWALDisabled ? -1 : 1));
    // Encode estimatedIndexRowKeyBytes and immutableRows together.
    WritableUtils.writeVInt(output, estimatedIndexRowKeyBytes * (immutableRows ? -1 : 1));
}
Also used : PDataType(org.apache.phoenix.schema.types.PDataType) KeyValueColumnExpression(org.apache.phoenix.expression.KeyValueColumnExpression) SingleCellConstructorExpression(org.apache.phoenix.expression.SingleCellConstructorExpression) Expression(org.apache.phoenix.expression.Expression) SingleCellColumnExpression(org.apache.phoenix.expression.SingleCellColumnExpression) CoerceExpression(org.apache.phoenix.expression.CoerceExpression) LiteralExpression(org.apache.phoenix.expression.LiteralExpression) ColumnReference(org.apache.phoenix.hbase.index.covered.update.ColumnReference)

Example 68 with PDataType

use of org.apache.phoenix.schema.types.PDataType in project phoenix by apache.

the class IndexMaintainer method buildDataRowKey.

/*
     * Build the data row key from the index row key
     */
public byte[] buildDataRowKey(ImmutableBytesWritable indexRowKeyPtr, byte[][] viewConstants) {
    RowKeySchema indexRowKeySchema = getIndexRowKeySchema();
    ImmutableBytesWritable ptr = new ImmutableBytesWritable();
    TrustedByteArrayOutputStream stream = new TrustedByteArrayOutputStream(estimatedIndexRowKeyBytes);
    DataOutput output = new DataOutputStream(stream);
    // Increment dataPosOffset until all have been written
    int dataPosOffset = 0;
    int viewConstantsIndex = 0;
    try {
        int indexPosOffset = !isLocalIndex && nIndexSaltBuckets > 0 ? 1 : 0;
        int maxRowKeyOffset = indexRowKeyPtr.getOffset() + indexRowKeyPtr.getLength();
        indexRowKeySchema.iterator(indexRowKeyPtr, ptr, indexPosOffset);
        if (isDataTableSalted) {
            dataPosOffset++;
            // will be set at end to salt byte
            output.write(0);
        }
        if (viewIndexId != null) {
            indexRowKeySchema.next(ptr, indexPosOffset++, maxRowKeyOffset);
        }
        if (isMultiTenant) {
            indexRowKeySchema.next(ptr, indexPosOffset, maxRowKeyOffset);
            output.write(ptr.get(), ptr.getOffset(), ptr.getLength());
            if (!dataRowKeySchema.getField(dataPosOffset).getDataType().isFixedWidth()) {
                output.writeByte(SchemaUtil.getSeparatorByte(rowKeyOrderOptimizable, ptr.getLength() == 0, dataRowKeySchema.getField(dataPosOffset)));
            }
            indexPosOffset++;
            dataPosOffset++;
        }
        indexPosOffset = (!isLocalIndex && nIndexSaltBuckets > 0 ? 1 : 0) + (isMultiTenant ? 1 : 0) + (viewIndexId == null ? 0 : 1);
        BitSet viewConstantColumnBitSet = this.rowKeyMetaData.getViewConstantColumnBitSet();
        BitSet descIndexColumnBitSet = rowKeyMetaData.getDescIndexColumnBitSet();
        for (int i = dataPosOffset; i < dataRowKeySchema.getFieldCount(); i++) {
            // same for all rows in this index)
            if (viewConstantColumnBitSet.get(i)) {
                output.write(viewConstants[viewConstantsIndex++]);
            } else {
                int pos = rowKeyMetaData.getIndexPkPosition(i - dataPosOffset);
                Boolean hasValue = indexRowKeySchema.iterator(indexRowKeyPtr, ptr, pos + indexPosOffset + 1);
                if (Boolean.TRUE.equals(hasValue)) {
                    // Write data row key value taking into account coercion and inversion
                    // if necessary
                    Field dataField = dataRowKeySchema.getField(i);
                    Field indexField = indexRowKeySchema.getField(pos + indexPosOffset);
                    PDataType indexColumnType = indexField.getDataType();
                    PDataType dataColumnType = dataField.getDataType();
                    SortOrder dataSortOrder = dataField.getSortOrder();
                    SortOrder indexSortOrder = indexField.getSortOrder();
                    boolean isDataColumnInverted = dataSortOrder != SortOrder.ASC;
                    boolean isBytesComparable = dataColumnType.isBytesComparableWith(indexColumnType);
                    if (isBytesComparable && isDataColumnInverted == descIndexColumnBitSet.get(pos)) {
                        output.write(ptr.get(), ptr.getOffset(), ptr.getLength());
                    } else {
                        if (!isBytesComparable) {
                            dataColumnType.coerceBytes(ptr, indexColumnType, indexSortOrder, SortOrder.getDefault());
                        }
                        if (descIndexColumnBitSet.get(pos) != isDataColumnInverted) {
                            writeInverted(ptr.get(), ptr.getOffset(), ptr.getLength(), output);
                        } else {
                            output.write(ptr.get(), ptr.getOffset(), ptr.getLength());
                        }
                    }
                }
            }
            // Write separator byte if variable length unless it's the last field in the schema
            // (but we still need to write it if it's DESC to ensure sort order is correct).
            byte sepByte = SchemaUtil.getSeparatorByte(rowKeyOrderOptimizable, ptr.getLength() == 0, dataRowKeySchema.getField(i));
            if (!dataRowKeySchema.getField(i).getDataType().isFixedWidth() && (((i + 1) != dataRowKeySchema.getFieldCount()) || sepByte == QueryConstants.DESC_SEPARATOR_BYTE)) {
                output.writeByte(sepByte);
            }
        }
        int length = stream.size();
        int minLength = length - maxTrailingNulls;
        byte[] dataRowKey = stream.getBuffer();
        // Remove trailing nulls
        while (length > minLength && dataRowKey[length - 1] == QueryConstants.SEPARATOR_BYTE) {
            length--;
        }
        // there to maintain compatibility between an old client and a new server.
        if (isDataTableSalted) {
            // Set salt byte
            byte saltByte = SaltingUtil.getSaltingByte(dataRowKey, SaltingUtil.NUM_SALTING_BYTES, length - SaltingUtil.NUM_SALTING_BYTES, nIndexSaltBuckets);
            dataRowKey[0] = saltByte;
        }
        return dataRowKey.length == length ? dataRowKey : Arrays.copyOf(dataRowKey, length);
    } catch (IOException e) {
        // Impossible
        throw new RuntimeException(e);
    } finally {
        try {
            stream.close();
        } catch (IOException e) {
            // Impossible
            throw new RuntimeException(e);
        }
    }
}
Also used : DataOutput(java.io.DataOutput) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) DataOutputStream(java.io.DataOutputStream) BitSet(org.apache.phoenix.util.BitSet) RowKeySchema(org.apache.phoenix.schema.RowKeySchema) SortOrder(org.apache.phoenix.schema.SortOrder) TrustedByteArrayOutputStream(org.apache.phoenix.util.TrustedByteArrayOutputStream) IOException(java.io.IOException) Field(org.apache.phoenix.schema.ValueSchema.Field) PDataType(org.apache.phoenix.schema.types.PDataType)

Example 69 with PDataType

use of org.apache.phoenix.schema.types.PDataType in project phoenix by apache.

the class ScanUtil method getTenantIdBytes.

public static byte[] getTenantIdBytes(RowKeySchema schema, boolean isSalted, PName tenantId, boolean isSharedIndex) throws SQLException {
    int pkPos = (isSalted ? 1 : 0) + (isSharedIndex ? 1 : 0);
    Field field = schema.getField(pkPos);
    PDataType dataType = field.getDataType();
    byte[] convertedValue;
    try {
        Object value = dataType.toObject(tenantId.getString());
        convertedValue = dataType.toBytes(value);
        ImmutableBytesWritable ptr = new ImmutableBytesWritable(convertedValue);
        dataType.pad(ptr, field.getMaxLength(), field.getSortOrder());
        convertedValue = ByteUtil.copyKeyBytesIfNecessary(ptr);
    } catch (IllegalDataException ex) {
        throw new SQLExceptionInfo.Builder(SQLExceptionCode.TENANTID_IS_OF_WRONG_TYPE).build().buildException();
    }
    return convertedValue;
}
Also used : Field(org.apache.phoenix.schema.ValueSchema.Field) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) PDataType(org.apache.phoenix.schema.types.PDataType) SQLExceptionInfo(org.apache.phoenix.exception.SQLExceptionInfo) IllegalDataException(org.apache.phoenix.schema.IllegalDataException)

Example 70 with PDataType

use of org.apache.phoenix.schema.types.PDataType in project phoenix by apache.

the class SchemaUtil method estimateKeyLength.

/**
     * Estimate the max key length in bytes of the PK for a given table
     * @param table the table
     * @return the max PK length
     */
public static int estimateKeyLength(PTable table) {
    int maxKeyLength = 0;
    // Calculate the max length of a key (each part must currently be of a fixed width)
    int i = 0;
    List<PColumn> columns = table.getPKColumns();
    while (i < columns.size()) {
        PColumn keyColumn = columns.get(i++);
        PDataType type = keyColumn.getDataType();
        Integer maxLength = keyColumn.getMaxLength();
        maxKeyLength += !type.isFixedWidth() ? VAR_LENGTH_ESTIMATE : maxLength == null ? type.getByteSize() : maxLength;
    }
    return maxKeyLength;
}
Also used : PColumn(org.apache.phoenix.schema.PColumn) PDataType(org.apache.phoenix.schema.types.PDataType)

Aggregations

PDataType (org.apache.phoenix.schema.types.PDataType)152 Expression (org.apache.phoenix.expression.Expression)54 LiteralExpression (org.apache.phoenix.expression.LiteralExpression)31 SortOrder (org.apache.phoenix.schema.SortOrder)29 ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)21 CoerceExpression (org.apache.phoenix.expression.CoerceExpression)18 Test (org.junit.Test)15 PDatum (org.apache.phoenix.schema.PDatum)12 BigDecimal (java.math.BigDecimal)11 SQLException (java.sql.SQLException)11 ArrayList (java.util.ArrayList)11 List (java.util.List)10 KeyValueColumnExpression (org.apache.phoenix.expression.KeyValueColumnExpression)10 RowKeyColumnExpression (org.apache.phoenix.expression.RowKeyColumnExpression)10 SingleCellConstructorExpression (org.apache.phoenix.expression.SingleCellConstructorExpression)10 IOException (java.io.IOException)9 PreparedStatement (java.sql.PreparedStatement)7 Pair (org.apache.hadoop.hbase.util.Pair)7 Date (java.sql.Date)6 AndExpression (org.apache.phoenix.expression.AndExpression)6