Search in sources :

Example 56 with PTable

use of org.apache.phoenix.schema.PTable in project phoenix by apache.

the class TestUtil method getGuidePostsList.

public static Collection<GuidePostsInfo> getGuidePostsList(Connection conn, String tableName, String pkCol, byte[] lowerRange, byte[] upperRange, String whereClauseSuffix) throws SQLException {
    String whereClauseStart = (lowerRange == null && upperRange == null ? "" : " WHERE " + ((lowerRange != null ? (pkCol + " >= ? " + (upperRange != null ? " AND " : "")) : "") + (upperRange != null ? (pkCol + " < ?") : "")));
    String whereClause = whereClauseSuffix == null ? whereClauseStart : whereClauseStart.length() == 0 ? (" WHERE " + whereClauseSuffix) : (" AND " + whereClauseSuffix);
    String query = "SELECT /*+ NO_INDEX */ COUNT(*) FROM " + tableName + whereClause;
    PhoenixPreparedStatement pstmt = conn.prepareStatement(query).unwrap(PhoenixPreparedStatement.class);
    if (lowerRange != null) {
        pstmt.setBytes(1, lowerRange);
    }
    if (upperRange != null) {
        pstmt.setBytes(lowerRange != null ? 2 : 1, upperRange);
    }
    pstmt.execute();
    TableRef tableRef = pstmt.getQueryPlan().getTableRef();
    PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
    PTable table = tableRef.getTable();
    GuidePostsInfo info = pconn.getQueryServices().getTableStats(new GuidePostsKey(table.getName().getBytes(), SchemaUtil.getEmptyColumnFamily(table)));
    return Collections.singletonList(info);
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) GuidePostsKey(org.apache.phoenix.schema.stats.GuidePostsKey) GuidePostsInfo(org.apache.phoenix.schema.stats.GuidePostsInfo) PhoenixPreparedStatement(org.apache.phoenix.jdbc.PhoenixPreparedStatement) TableRef(org.apache.phoenix.schema.TableRef) PTable(org.apache.phoenix.schema.PTable)

Example 57 with PTable

use of org.apache.phoenix.schema.PTable in project phoenix by apache.

the class IndexMaintainer method serializeAdditional.

/**
     * For client-side to append serialized IndexMaintainers of keyValueIndexes
     * @param dataTable data table
     * @param indexMetaDataPtr bytes pointer to hold returned serialized value
     * @param keyValueIndexes indexes to serialize
     */
public static void serializeAdditional(PTable table, ImmutableBytesWritable indexMetaDataPtr, List<PTable> keyValueIndexes, PhoenixConnection connection) {
    int nMutableIndexes = indexMetaDataPtr.getLength() == 0 ? 0 : ByteUtil.vintFromBytes(indexMetaDataPtr);
    int nIndexes = nMutableIndexes + keyValueIndexes.size();
    // Just in case new size increases buffer
    int estimatedSize = indexMetaDataPtr.getLength() + 1;
    if (indexMetaDataPtr.getLength() == 0) {
        estimatedSize += table.getRowKeySchema().getEstimatedByteSize();
    }
    for (PTable index : keyValueIndexes) {
        estimatedSize += index.getIndexMaintainer(table, connection).getEstimatedByteSize();
    }
    TrustedByteArrayOutputStream stream = new TrustedByteArrayOutputStream(estimatedSize + 1);
    DataOutput output = new DataOutputStream(stream);
    try {
        // Encode data table salting in sign of number of indexes
        WritableUtils.writeVInt(output, nIndexes * (table.getBucketNum() == null ? 1 : -1));
        // as its still included
        if (indexMetaDataPtr.getLength() > 0) {
            output.write(indexMetaDataPtr.get(), indexMetaDataPtr.getOffset(), indexMetaDataPtr.getLength() - WritableUtils.getVIntSize(nMutableIndexes));
        } else {
            table.getRowKeySchema().write(output);
        }
        // Serialize mutable indexes afterwards
        for (PTable index : keyValueIndexes) {
            IndexMaintainer maintainer = index.getIndexMaintainer(table, connection);
            byte[] protoBytes = IndexMaintainer.toProto(maintainer).toByteArray();
            WritableUtils.writeVInt(output, protoBytes.length);
            output.write(protoBytes);
        }
    } catch (IOException e) {
        // Impossible
        throw new RuntimeException(e);
    }
    indexMetaDataPtr.set(stream.getBuffer(), 0, stream.size());
}
Also used : DataOutput(java.io.DataOutput) DataOutputStream(java.io.DataOutputStream) TrustedByteArrayOutputStream(org.apache.phoenix.util.TrustedByteArrayOutputStream) IOException(java.io.IOException) PTable(org.apache.phoenix.schema.PTable)

Example 58 with PTable

use of org.apache.phoenix.schema.PTable in project phoenix by apache.

the class PhoenixIndexBuilder method executeAtomicOp.

@Override
public List<Mutation> executeAtomicOp(Increment inc) throws IOException {
    byte[] opBytes = inc.getAttribute(ATOMIC_OP_ATTRIB);
    if (opBytes == null) {
        // Unexpected
        return null;
    }
    inc.setAttribute(ATOMIC_OP_ATTRIB, null);
    Put put = null;
    Delete delete = null;
    // We cannot neither use the time stamp in the Increment to set the Get time range
    // nor set the Put/Delete time stamp and have this be atomic as HBase does not
    // handle that. Though we disallow using ON DUPLICATE KEY clause when the
    // CURRENT_SCN is set, we still may have a time stamp set as of when the table
    // was resolved on the client side. We need to ignore this as well due to limitations
    // in HBase, but this isn't too bad as the time will be very close the the current
    // time anyway.
    long ts = HConstants.LATEST_TIMESTAMP;
    byte[] rowKey = inc.getRow();
    final Get get = new Get(rowKey);
    if (isDupKeyIgnore(opBytes)) {
        get.setFilter(new FirstKeyOnlyFilter());
        Result result = this.env.getRegion().get(get);
        return result.isEmpty() ? convertIncrementToPutInSingletonList(inc) : Collections.<Mutation>emptyList();
    }
    ByteArrayInputStream stream = new ByteArrayInputStream(opBytes);
    DataInputStream input = new DataInputStream(stream);
    boolean skipFirstOp = input.readBoolean();
    short repeat = input.readShort();
    final int[] estimatedSizeHolder = { 0 };
    List<Pair<PTable, List<Expression>>> operations = Lists.newArrayListWithExpectedSize(3);
    while (true) {
        ExpressionVisitor<Void> visitor = new StatelessTraverseAllExpressionVisitor<Void>() {

            @Override
            public Void visit(KeyValueColumnExpression expression) {
                get.addColumn(expression.getColumnFamily(), expression.getColumnQualifier());
                estimatedSizeHolder[0]++;
                return null;
            }
        };
        try {
            int nExpressions = WritableUtils.readVInt(input);
            List<Expression> expressions = Lists.newArrayListWithExpectedSize(nExpressions);
            for (int i = 0; i < nExpressions; i++) {
                Expression expression = ExpressionType.values()[WritableUtils.readVInt(input)].newInstance();
                expression.readFields(input);
                expressions.add(expression);
                expression.accept(visitor);
            }
            PTableProtos.PTable tableProto = PTableProtos.PTable.parseDelimitedFrom(input);
            PTable table = PTableImpl.createFromProto(tableProto);
            operations.add(new Pair<>(table, expressions));
        } catch (EOFException e) {
            break;
        }
    }
    int estimatedSize = estimatedSizeHolder[0];
    if (get.getFamilyMap().isEmpty()) {
        get.setFilter(new FirstKeyOnlyFilter());
    }
    MultiKeyValueTuple tuple;
    List<Cell> flattenedCells = null;
    List<Cell> cells = ((HRegion) this.env.getRegion()).get(get, false);
    if (cells.isEmpty()) {
        if (skipFirstOp) {
            if (operations.size() <= 1 && repeat <= 1) {
                return convertIncrementToPutInSingletonList(inc);
            }
            // Skip first operation (if first wasn't ON DUPLICATE KEY IGNORE)
            repeat--;
        }
        // Base current state off of new row
        flattenedCells = flattenCells(inc, estimatedSize);
        tuple = new MultiKeyValueTuple(flattenedCells);
    } else {
        // Base current state off of existing row
        tuple = new MultiKeyValueTuple(cells);
    }
    ImmutableBytesWritable ptr = new ImmutableBytesWritable();
    for (int opIndex = 0; opIndex < operations.size(); opIndex++) {
        Pair<PTable, List<Expression>> operation = operations.get(opIndex);
        PTable table = operation.getFirst();
        List<Expression> expressions = operation.getSecond();
        for (int j = 0; j < repeat; j++) {
            // repeater loop
            ptr.set(rowKey);
            // executed, not when the outer loop is exited. Hence we do it here, at the top of the loop.
            if (flattenedCells != null) {
                Collections.sort(flattenedCells, KeyValue.COMPARATOR);
            }
            PRow row = table.newRow(GenericKeyValueBuilder.INSTANCE, ts, ptr, false);
            for (int i = 0; i < expressions.size(); i++) {
                Expression expression = expressions.get(i);
                ptr.set(ByteUtil.EMPTY_BYTE_ARRAY);
                expression.evaluate(tuple, ptr);
                PColumn column = table.getColumns().get(i + 1);
                Object value = expression.getDataType().toObject(ptr, column.getSortOrder());
                // same type.
                if (!column.getDataType().isSizeCompatible(ptr, value, column.getDataType(), expression.getSortOrder(), expression.getMaxLength(), expression.getScale(), column.getMaxLength(), column.getScale())) {
                    throw new DataExceedsCapacityException(column.getDataType(), column.getMaxLength(), column.getScale());
                }
                column.getDataType().coerceBytes(ptr, value, expression.getDataType(), expression.getMaxLength(), expression.getScale(), expression.getSortOrder(), column.getMaxLength(), column.getScale(), column.getSortOrder(), table.rowKeyOrderOptimizable());
                byte[] bytes = ByteUtil.copyKeyBytesIfNecessary(ptr);
                row.setValue(column, bytes);
            }
            flattenedCells = Lists.newArrayListWithExpectedSize(estimatedSize);
            List<Mutation> mutations = row.toRowMutations();
            for (Mutation source : mutations) {
                flattenCells(source, flattenedCells);
            }
            tuple.setKeyValues(flattenedCells);
        }
        // Repeat only applies to first statement
        repeat = 1;
    }
    List<Mutation> mutations = Lists.newArrayListWithExpectedSize(2);
    for (int i = 0; i < tuple.size(); i++) {
        Cell cell = tuple.getValue(i);
        if (Type.codeToType(cell.getTypeByte()) == Type.Put) {
            if (put == null) {
                put = new Put(rowKey);
                transferAttributes(inc, put);
                mutations.add(put);
            }
            put.add(cell);
        } else {
            if (delete == null) {
                delete = new Delete(rowKey);
                transferAttributes(inc, delete);
                mutations.add(delete);
            }
            delete.addDeleteMarker(cell);
        }
    }
    return mutations;
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) PTable(org.apache.phoenix.schema.PTable) Result(org.apache.hadoop.hbase.client.Result) PRow(org.apache.phoenix.schema.PRow) PColumn(org.apache.phoenix.schema.PColumn) StatelessTraverseAllExpressionVisitor(org.apache.phoenix.expression.visitor.StatelessTraverseAllExpressionVisitor) EOFException(java.io.EOFException) List(java.util.List) KeyValueColumnExpression(org.apache.phoenix.expression.KeyValueColumnExpression) Cell(org.apache.hadoop.hbase.Cell) Pair(org.apache.hadoop.hbase.util.Pair) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) FirstKeyOnlyFilter(org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter) DataInputStream(java.io.DataInputStream) Put(org.apache.hadoop.hbase.client.Put) PTableProtos(org.apache.phoenix.coprocessor.generated.PTableProtos) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) DataExceedsCapacityException(org.apache.phoenix.exception.DataExceedsCapacityException) ByteArrayInputStream(java.io.ByteArrayInputStream) KeyValueColumnExpression(org.apache.phoenix.expression.KeyValueColumnExpression) Expression(org.apache.phoenix.expression.Expression) Get(org.apache.hadoop.hbase.client.Get) MultiKeyValueTuple(org.apache.phoenix.schema.tuple.MultiKeyValueTuple) Mutation(org.apache.hadoop.hbase.client.Mutation)

Example 59 with PTable

use of org.apache.phoenix.schema.PTable in project phoenix by apache.

the class BaseResultIterators method initializeScan.

private static void initializeScan(QueryPlan plan, Integer perScanLimit, Integer offset, Scan scan) throws SQLException {
    StatementContext context = plan.getContext();
    TableRef tableRef = plan.getTableRef();
    PTable table = tableRef.getTable();
    Map<byte[], NavigableSet<byte[]>> familyMap = scan.getFamilyMap();
    // Hack for PHOENIX-2067 to force raw scan over all KeyValues to fix their row keys
    if (context.getConnection().isDescVarLengthRowKeyUpgrade()) {
        // We project *all* KeyValues across all column families as we make a pass over
        // a physical table and we want to make sure we catch all KeyValues that may be
        // dynamic or part of an updatable view.
        familyMap.clear();
        scan.setMaxVersions();
        // Remove any filter
        scan.setFilter(null);
        // Traverse (and subsequently clone) all KeyValues
        scan.setRaw(true);
        // Pass over PTable so we can re-write rows according to the row key schema
        scan.setAttribute(BaseScannerRegionObserver.UPGRADE_DESC_ROW_KEY, UngroupedAggregateRegionObserver.serialize(table));
    } else {
        FilterableStatement statement = plan.getStatement();
        RowProjector projector = plan.getProjector();
        boolean optimizeProjection = false;
        boolean keyOnlyFilter = familyMap.isEmpty() && context.getWhereConditionColumns().isEmpty();
        if (!projector.projectEverything()) {
            // not match the actual column families of the table (which is bad).
            if (keyOnlyFilter && table.getColumnFamilies().size() == 1) {
                // Project the one column family. We must project a column family since it's possible
                // that there are other non declared column families that we need to ignore.
                scan.addFamily(table.getColumnFamilies().get(0).getName().getBytes());
            } else {
                optimizeProjection = true;
                if (projector.projectEveryRow()) {
                    if (table.getViewType() == ViewType.MAPPED) {
                        // Since we don't have the empty key value in MAPPED tables, 
                        // we must project all CFs in HRS. However, only the
                        // selected column values are returned back to client.
                        context.getWhereConditionColumns().clear();
                        for (PColumnFamily family : table.getColumnFamilies()) {
                            context.addWhereConditionColumn(family.getName().getBytes(), null);
                        }
                    } else {
                        byte[] ecf = SchemaUtil.getEmptyColumnFamily(table);
                        // been projected in its entirety.
                        if (!familyMap.containsKey(ecf) || familyMap.get(ecf) != null) {
                            scan.addColumn(ecf, EncodedColumnsUtil.getEmptyKeyValueInfo(table).getFirst());
                        }
                    }
                }
            }
        }
        // Add FirstKeyOnlyFilter if there are no references to key value columns
        if (keyOnlyFilter) {
            ScanUtil.andFilterAtBeginning(scan, new FirstKeyOnlyFilter());
        }
        if (perScanLimit != null) {
            ScanUtil.andFilterAtEnd(scan, new PageFilter(perScanLimit));
        }
        if (offset != null) {
            ScanUtil.addOffsetAttribute(scan, offset);
        }
        int cols = plan.getGroupBy().getOrderPreservingColumnCount();
        if (cols > 0 && keyOnlyFilter && !plan.getStatement().getHint().hasHint(HintNode.Hint.RANGE_SCAN) && cols < plan.getTableRef().getTable().getRowKeySchema().getFieldCount() && plan.getGroupBy().isOrderPreserving() && (context.getAggregationManager().isEmpty() || plan.getGroupBy().isUngroupedAggregate())) {
            ScanUtil.andFilterAtEnd(scan, new DistinctPrefixFilter(plan.getTableRef().getTable().getRowKeySchema(), cols));
            if (plan.getLimit() != null) {
                // We can push the limit to the server
                ScanUtil.andFilterAtEnd(scan, new PageFilter(plan.getLimit()));
            }
        }
        scan.setAttribute(BaseScannerRegionObserver.QUALIFIER_ENCODING_SCHEME, new byte[] { table.getEncodingScheme().getSerializedMetadataValue() });
        scan.setAttribute(BaseScannerRegionObserver.IMMUTABLE_STORAGE_ENCODING_SCHEME, new byte[] { table.getImmutableStorageScheme().getSerializedMetadataValue() });
        // we use this flag on the server side to determine which value column qualifier to use in the key value we return from server.
        scan.setAttribute(BaseScannerRegionObserver.USE_NEW_VALUE_COLUMN_QUALIFIER, Bytes.toBytes(true));
        // So there is no point setting the range.
        if (!ScanUtil.isAnalyzeTable(scan)) {
            setQualifierRanges(keyOnlyFilter, table, scan, context);
        }
        if (optimizeProjection) {
            optimizeProjection(context, scan, table, statement);
        }
    }
}
Also used : NavigableSet(java.util.NavigableSet) FirstKeyOnlyFilter(org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter) DistinctPrefixFilter(org.apache.phoenix.filter.DistinctPrefixFilter) PColumnFamily(org.apache.phoenix.schema.PColumnFamily) PTable(org.apache.phoenix.schema.PTable) Hint(org.apache.phoenix.parse.HintNode.Hint) StatementContext(org.apache.phoenix.compile.StatementContext) RowProjector(org.apache.phoenix.compile.RowProjector) FilterableStatement(org.apache.phoenix.parse.FilterableStatement) PageFilter(org.apache.hadoop.hbase.filter.PageFilter) TableRef(org.apache.phoenix.schema.TableRef)

Example 60 with PTable

use of org.apache.phoenix.schema.PTable in project phoenix by apache.

the class PhoenixIndexFailurePolicy method getLocalIndexNames.

private Collection<? extends String> getLocalIndexNames(HTableInterfaceReference ref, Collection<Mutation> mutations) throws IOException {
    Set<String> indexTableNames = new HashSet<String>(1);
    PhoenixConnection conn = null;
    try {
        conn = QueryUtil.getConnectionOnServer(this.env.getConfiguration()).unwrap(PhoenixConnection.class);
        PTable dataTable = PhoenixRuntime.getTableNoCache(conn, ref.getTableName());
        List<PTable> indexes = dataTable.getIndexes();
        // local index used to get view id from index mutation row key.
        PTable localIndex = null;
        Map<ImmutableBytesWritable, String> localIndexNames = new HashMap<ImmutableBytesWritable, String>();
        for (PTable index : indexes) {
            if (index.getIndexType() == IndexType.LOCAL && index.getIndexState() == PIndexState.ACTIVE) {
                if (localIndex == null)
                    localIndex = index;
                localIndexNames.put(new ImmutableBytesWritable(MetaDataUtil.getViewIndexIdDataType().toBytes(index.getViewIndexId())), index.getName().getString());
            }
        }
        if (localIndex == null) {
            return Collections.emptySet();
        }
        IndexMaintainer indexMaintainer = localIndex.getIndexMaintainer(dataTable, conn);
        HRegionInfo regionInfo = this.env.getRegion().getRegionInfo();
        int offset = regionInfo.getStartKey().length == 0 ? regionInfo.getEndKey().length : regionInfo.getStartKey().length;
        byte[] viewId = null;
        for (Mutation mutation : mutations) {
            viewId = indexMaintainer.getViewIndexIdFromIndexRowKey(new ImmutableBytesWritable(mutation.getRow(), offset, mutation.getRow().length - offset));
            String indexTableName = localIndexNames.get(new ImmutableBytesWritable(viewId));
            indexTableNames.add(indexTableName);
        }
    } catch (ClassNotFoundException e) {
        throw new IOException(e);
    } catch (SQLException e) {
        throw new IOException(e);
    } finally {
        if (conn != null) {
            try {
                conn.close();
            } catch (SQLException e) {
                throw new IOException(e);
            }
        }
    }
    return indexTableNames;
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) HashMap(java.util.HashMap) SQLException(java.sql.SQLException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) IOException(java.io.IOException) PTable(org.apache.phoenix.schema.PTable) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) Mutation(org.apache.hadoop.hbase.client.Mutation) HashSet(java.util.HashSet)

Aggregations

PTable (org.apache.phoenix.schema.PTable)153 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)63 PTableKey (org.apache.phoenix.schema.PTableKey)48 PColumn (org.apache.phoenix.schema.PColumn)47 Connection (java.sql.Connection)35 TableRef (org.apache.phoenix.schema.TableRef)29 SQLException (java.sql.SQLException)28 ArrayList (java.util.ArrayList)28 ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)28 Test (org.junit.Test)27 ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)24 Expression (org.apache.phoenix.expression.Expression)24 Scan (org.apache.hadoop.hbase.client.Scan)21 LiteralExpression (org.apache.phoenix.expression.LiteralExpression)21 Properties (java.util.Properties)20 Mutation (org.apache.hadoop.hbase.client.Mutation)17 ColumnRef (org.apache.phoenix.schema.ColumnRef)16 IOException (java.io.IOException)15 Hint (org.apache.phoenix.parse.HintNode.Hint)14 PName (org.apache.phoenix.schema.PName)14