Search in sources :

Example 31 with PColumn

use of org.apache.phoenix.schema.PColumn in project phoenix by apache.

the class PrefixFunction method newKeyPart.

@Override
public KeyPart newKeyPart(final KeyPart childPart) {
    return new KeyPart() {

        private final List<Expression> extractNodes = extractNode() ? Collections.<Expression>singletonList(PrefixFunction.this) : Collections.<Expression>emptyList();

        @Override
        public PColumn getColumn() {
            return childPart.getColumn();
        }

        @Override
        public List<Expression> getExtractNodes() {
            return extractNodes;
        }

        @Override
        public KeyRange getKeyRange(CompareOp op, Expression rhs) {
            byte[] lowerRange = KeyRange.UNBOUND;
            byte[] upperRange = KeyRange.UNBOUND;
            boolean lowerInclusive = true;
            PDataType type = getColumn().getDataType();
            switch(op) {
                case EQUAL:
                    lowerRange = evaluateExpression(rhs);
                    upperRange = ByteUtil.nextKey(lowerRange);
                    break;
                case GREATER:
                    lowerRange = ByteUtil.nextKey(evaluateExpression(rhs));
                    break;
                case LESS_OR_EQUAL:
                    upperRange = ByteUtil.nextKey(evaluateExpression(rhs));
                    lowerInclusive = false;
                    break;
                default:
                    return childPart.getKeyRange(op, rhs);
            }
            PColumn column = getColumn();
            Integer length = column.getMaxLength();
            if (type.isFixedWidth()) {
                if (length != null) {
                    // *after* rows with no padding.
                    if (lowerRange != KeyRange.UNBOUND) {
                        lowerRange = type.pad(lowerRange, length, SortOrder.ASC);
                    }
                    if (upperRange != KeyRange.UNBOUND) {
                        upperRange = type.pad(upperRange, length, SortOrder.ASC);
                    }
                }
            } else if (column.getSortOrder() == SortOrder.DESC && getTable().rowKeyOrderOptimizable()) {
                // a lowerRange of 'a\xFF' would skip 'ab', while 'a\x00\xFF' would not.
                if (lowerRange != KeyRange.UNBOUND) {
                    lowerRange = Arrays.copyOf(lowerRange, lowerRange.length + 1);
                    lowerRange[lowerRange.length - 1] = QueryConstants.SEPARATOR_BYTE;
                }
            }
            return KeyRange.getKeyRange(lowerRange, lowerInclusive, upperRange, false);
        }

        @Override
        public PTable getTable() {
            return childPart.getTable();
        }
    };
}
Also used : PColumn(org.apache.phoenix.schema.PColumn) PDataType(org.apache.phoenix.schema.types.PDataType) Expression(org.apache.phoenix.expression.Expression) KeyPart(org.apache.phoenix.compile.KeyPart) List(java.util.List) CompareOp(org.apache.hadoop.hbase.filter.CompareFilter.CompareOp)

Example 32 with PColumn

use of org.apache.phoenix.schema.PColumn in project phoenix by apache.

the class PhoenixIndexBuilder method executeAtomicOp.

@Override
public List<Mutation> executeAtomicOp(Increment inc) throws IOException {
    byte[] opBytes = inc.getAttribute(ATOMIC_OP_ATTRIB);
    if (opBytes == null) {
        // Unexpected
        return null;
    }
    inc.setAttribute(ATOMIC_OP_ATTRIB, null);
    Put put = null;
    Delete delete = null;
    // We cannot neither use the time stamp in the Increment to set the Get time range
    // nor set the Put/Delete time stamp and have this be atomic as HBase does not
    // handle that. Though we disallow using ON DUPLICATE KEY clause when the
    // CURRENT_SCN is set, we still may have a time stamp set as of when the table
    // was resolved on the client side. We need to ignore this as well due to limitations
    // in HBase, but this isn't too bad as the time will be very close the the current
    // time anyway.
    long ts = HConstants.LATEST_TIMESTAMP;
    byte[] rowKey = inc.getRow();
    final Get get = new Get(rowKey);
    if (isDupKeyIgnore(opBytes)) {
        get.setFilter(new FirstKeyOnlyFilter());
        Result result = this.env.getRegion().get(get);
        return result.isEmpty() ? convertIncrementToPutInSingletonList(inc) : Collections.<Mutation>emptyList();
    }
    ByteArrayInputStream stream = new ByteArrayInputStream(opBytes);
    DataInputStream input = new DataInputStream(stream);
    boolean skipFirstOp = input.readBoolean();
    short repeat = input.readShort();
    final int[] estimatedSizeHolder = { 0 };
    List<Pair<PTable, List<Expression>>> operations = Lists.newArrayListWithExpectedSize(3);
    while (true) {
        ExpressionVisitor<Void> visitor = new StatelessTraverseAllExpressionVisitor<Void>() {

            @Override
            public Void visit(KeyValueColumnExpression expression) {
                get.addColumn(expression.getColumnFamily(), expression.getColumnQualifier());
                estimatedSizeHolder[0]++;
                return null;
            }
        };
        try {
            int nExpressions = WritableUtils.readVInt(input);
            List<Expression> expressions = Lists.newArrayListWithExpectedSize(nExpressions);
            for (int i = 0; i < nExpressions; i++) {
                Expression expression = ExpressionType.values()[WritableUtils.readVInt(input)].newInstance();
                expression.readFields(input);
                expressions.add(expression);
                expression.accept(visitor);
            }
            PTableProtos.PTable tableProto = PTableProtos.PTable.parseDelimitedFrom(input);
            PTable table = PTableImpl.createFromProto(tableProto);
            operations.add(new Pair<>(table, expressions));
        } catch (EOFException e) {
            break;
        }
    }
    int estimatedSize = estimatedSizeHolder[0];
    if (get.getFamilyMap().isEmpty()) {
        get.setFilter(new FirstKeyOnlyFilter());
    }
    MultiKeyValueTuple tuple;
    List<Cell> flattenedCells = null;
    List<Cell> cells = ((HRegion) this.env.getRegion()).get(get, false);
    if (cells.isEmpty()) {
        if (skipFirstOp) {
            if (operations.size() <= 1 && repeat <= 1) {
                return convertIncrementToPutInSingletonList(inc);
            }
            // Skip first operation (if first wasn't ON DUPLICATE KEY IGNORE)
            repeat--;
        }
        // Base current state off of new row
        flattenedCells = flattenCells(inc, estimatedSize);
        tuple = new MultiKeyValueTuple(flattenedCells);
    } else {
        // Base current state off of existing row
        tuple = new MultiKeyValueTuple(cells);
    }
    ImmutableBytesWritable ptr = new ImmutableBytesWritable();
    for (int opIndex = 0; opIndex < operations.size(); opIndex++) {
        Pair<PTable, List<Expression>> operation = operations.get(opIndex);
        PTable table = operation.getFirst();
        List<Expression> expressions = operation.getSecond();
        for (int j = 0; j < repeat; j++) {
            // repeater loop
            ptr.set(rowKey);
            // executed, not when the outer loop is exited. Hence we do it here, at the top of the loop.
            if (flattenedCells != null) {
                Collections.sort(flattenedCells, KeyValue.COMPARATOR);
            }
            PRow row = table.newRow(GenericKeyValueBuilder.INSTANCE, ts, ptr, false);
            for (int i = 0; i < expressions.size(); i++) {
                Expression expression = expressions.get(i);
                ptr.set(ByteUtil.EMPTY_BYTE_ARRAY);
                expression.evaluate(tuple, ptr);
                PColumn column = table.getColumns().get(i + 1);
                Object value = expression.getDataType().toObject(ptr, column.getSortOrder());
                // same type.
                if (!column.getDataType().isSizeCompatible(ptr, value, column.getDataType(), expression.getSortOrder(), expression.getMaxLength(), expression.getScale(), column.getMaxLength(), column.getScale())) {
                    throw new DataExceedsCapacityException(column.getDataType(), column.getMaxLength(), column.getScale());
                }
                column.getDataType().coerceBytes(ptr, value, expression.getDataType(), expression.getMaxLength(), expression.getScale(), expression.getSortOrder(), column.getMaxLength(), column.getScale(), column.getSortOrder(), table.rowKeyOrderOptimizable());
                byte[] bytes = ByteUtil.copyKeyBytesIfNecessary(ptr);
                row.setValue(column, bytes);
            }
            flattenedCells = Lists.newArrayListWithExpectedSize(estimatedSize);
            List<Mutation> mutations = row.toRowMutations();
            for (Mutation source : mutations) {
                flattenCells(source, flattenedCells);
            }
            tuple.setKeyValues(flattenedCells);
        }
        // Repeat only applies to first statement
        repeat = 1;
    }
    List<Mutation> mutations = Lists.newArrayListWithExpectedSize(2);
    for (int i = 0; i < tuple.size(); i++) {
        Cell cell = tuple.getValue(i);
        if (Type.codeToType(cell.getTypeByte()) == Type.Put) {
            if (put == null) {
                put = new Put(rowKey);
                transferAttributes(inc, put);
                mutations.add(put);
            }
            put.add(cell);
        } else {
            if (delete == null) {
                delete = new Delete(rowKey);
                transferAttributes(inc, delete);
                mutations.add(delete);
            }
            delete.addDeleteMarker(cell);
        }
    }
    return mutations;
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) PTable(org.apache.phoenix.schema.PTable) Result(org.apache.hadoop.hbase.client.Result) PRow(org.apache.phoenix.schema.PRow) PColumn(org.apache.phoenix.schema.PColumn) StatelessTraverseAllExpressionVisitor(org.apache.phoenix.expression.visitor.StatelessTraverseAllExpressionVisitor) EOFException(java.io.EOFException) List(java.util.List) KeyValueColumnExpression(org.apache.phoenix.expression.KeyValueColumnExpression) Cell(org.apache.hadoop.hbase.Cell) Pair(org.apache.hadoop.hbase.util.Pair) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) FirstKeyOnlyFilter(org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter) DataInputStream(java.io.DataInputStream) Put(org.apache.hadoop.hbase.client.Put) PTableProtos(org.apache.phoenix.coprocessor.generated.PTableProtos) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) DataExceedsCapacityException(org.apache.phoenix.exception.DataExceedsCapacityException) ByteArrayInputStream(java.io.ByteArrayInputStream) KeyValueColumnExpression(org.apache.phoenix.expression.KeyValueColumnExpression) Expression(org.apache.phoenix.expression.Expression) Get(org.apache.hadoop.hbase.client.Get) MultiKeyValueTuple(org.apache.phoenix.schema.tuple.MultiKeyValueTuple) Mutation(org.apache.hadoop.hbase.client.Mutation)

Example 33 with PColumn

use of org.apache.phoenix.schema.PColumn in project phoenix by apache.

the class SchemaUtil method estimateKeyLength.

/**
     * Estimate the max key length in bytes of the PK for a given table
     * @param table the table
     * @return the max PK length
     */
public static int estimateKeyLength(PTable table) {
    int maxKeyLength = 0;
    // Calculate the max length of a key (each part must currently be of a fixed width)
    int i = 0;
    List<PColumn> columns = table.getPKColumns();
    while (i < columns.size()) {
        PColumn keyColumn = columns.get(i++);
        PDataType type = keyColumn.getDataType();
        Integer maxLength = keyColumn.getMaxLength();
        maxKeyLength += !type.isFixedWidth() ? VAR_LENGTH_ESTIMATE : maxLength == null ? type.getByteSize() : maxLength;
    }
    return maxKeyLength;
}
Also used : PColumn(org.apache.phoenix.schema.PColumn) PDataType(org.apache.phoenix.schema.types.PDataType)

Example 34 with PColumn

use of org.apache.phoenix.schema.PColumn in project phoenix by apache.

the class KeyValueUtil method getEstimatedRowSize.

/**
     * Estimates the storage size of a row
     * @param mutations map from table to row to RowMutationState
     * @return estimated row size
     */
public static long getEstimatedRowSize(Map<TableRef, Map<ImmutableBytesPtr, RowMutationState>> mutations) {
    long size = 0;
    // iterate over tables
    for (Entry<TableRef, Map<ImmutableBytesPtr, RowMutationState>> tableEntry : mutations.entrySet()) {
        PTable table = tableEntry.getKey().getTable();
        // iterate over rows
        for (Entry<ImmutableBytesPtr, RowMutationState> rowEntry : tableEntry.getValue().entrySet()) {
            int rowLength = rowEntry.getKey().getLength();
            Map<PColumn, byte[]> colValueMap = rowEntry.getValue().getColumnValues();
            switch(table.getImmutableStorageScheme()) {
                case ONE_CELL_PER_COLUMN:
                    // iterate over columns
                    for (Entry<PColumn, byte[]> colValueEntry : colValueMap.entrySet()) {
                        PColumn pColumn = colValueEntry.getKey();
                        size += KeyValue.getKeyValueDataStructureSize(rowLength, pColumn.getFamilyName().getBytes().length, pColumn.getColumnQualifierBytes().length, colValueEntry.getValue().length);
                    }
                    break;
                case SINGLE_CELL_ARRAY_WITH_OFFSETS:
                    // we store all the column values in a single key value that contains all the
                    // column values followed by an offset array
                    size += PArrayDataTypeEncoder.getEstimatedByteSize(table, rowLength, colValueMap);
                    break;
            }
            // count the empty key value
            Pair<byte[], byte[]> emptyKeyValueInfo = EncodedColumnsUtil.getEmptyKeyValueInfo(table);
            size += KeyValue.getKeyValueDataStructureSize(rowLength, SchemaUtil.getEmptyColumnFamilyPtr(table).getLength(), emptyKeyValueInfo.getFirst().length, emptyKeyValueInfo.getSecond().length);
        }
    }
    return size;
}
Also used : ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) PTable(org.apache.phoenix.schema.PTable) PColumn(org.apache.phoenix.schema.PColumn) Map(java.util.Map) TableRef(org.apache.phoenix.schema.TableRef) RowMutationState(org.apache.phoenix.execute.MutationState.RowMutationState)

Example 35 with PColumn

use of org.apache.phoenix.schema.PColumn in project phoenix by apache.

the class PhoenixRuntime method getPkColsForSql.

/**
     *
     * @param columns - Initialized empty list to be filled with the pairs of column family name and column name for columns that are used 
     * as row key for the query plan. Column family names are optional and hence the first part of the pair is nullable.
     * Column names and family names are enclosed in double quotes to allow for case sensitivity and for presence of 
     * special characters. Salting column and view index id column are not included. If the connection is tenant specific 
     * and the table used by the query plan is multi-tenant, then the tenant id column is not included as well.
     * @param plan - query plan to get info for.
     * @param conn - connection used to generate the query plan. Caller should take care of closing the connection appropriately.
     * @param forDataTable - if true, then family names and column names correspond to the data table even if the query plan uses
     * the secondary index table. If false, and if the query plan uses the secondary index table, then the family names and column 
     * names correspond to the index table.
     * @throws SQLException
     */
@Deprecated
public static void getPkColsForSql(List<Pair<String, String>> columns, QueryPlan plan, Connection conn, boolean forDataTable) throws SQLException {
    checkNotNull(columns);
    checkNotNull(plan);
    checkNotNull(conn);
    List<PColumn> pkColumns = getPkColumns(plan.getTableRef().getTable(), conn, forDataTable);
    String columnName;
    String familyName;
    for (PColumn pCol : pkColumns) {
        columnName = addQuotes(pCol.getName().getString());
        familyName = pCol.getFamilyName() != null ? addQuotes(pCol.getFamilyName().getString()) : null;
        columns.add(new Pair<String, String>(familyName, columnName));
    }
}
Also used : PColumn(org.apache.phoenix.schema.PColumn)

Aggregations

PColumn (org.apache.phoenix.schema.PColumn)87 PTable (org.apache.phoenix.schema.PTable)47 Expression (org.apache.phoenix.expression.Expression)20 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)20 ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)17 ArrayList (java.util.ArrayList)16 LiteralExpression (org.apache.phoenix.expression.LiteralExpression)16 ColumnRef (org.apache.phoenix.schema.ColumnRef)16 PName (org.apache.phoenix.schema.PName)16 TableRef (org.apache.phoenix.schema.TableRef)16 PTableKey (org.apache.phoenix.schema.PTableKey)15 ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)14 SQLException (java.sql.SQLException)13 PColumnFamily (org.apache.phoenix.schema.PColumnFamily)13 PColumnImpl (org.apache.phoenix.schema.PColumnImpl)12 ProjectedColumnExpression (org.apache.phoenix.expression.ProjectedColumnExpression)11 Test (org.junit.Test)11 KeyValueColumnExpression (org.apache.phoenix.expression.KeyValueColumnExpression)10 Hint (org.apache.phoenix.parse.HintNode.Hint)10 List (java.util.List)9