Search in sources :

Example 31 with TableRef

use of org.apache.phoenix.schema.TableRef in project phoenix by apache.

the class CreateTableCompiler method compile.

public MutationPlan compile(CreateTableStatement create) throws SQLException {
    final PhoenixConnection connection = statement.getConnection();
    ColumnResolver resolver = FromCompiler.getResolverForCreation(create, connection);
    PTableType type = create.getTableType();
    PhoenixConnection connectionToBe = connection;
    PTable parentToBe = null;
    ViewType viewTypeToBe = null;
    Scan scan = new Scan();
    final StatementContext context = new StatementContext(statement, resolver, scan, new SequenceManager(statement));
    // TODO: support any statement for a VIEW instead of just a WHERE clause
    ParseNode whereNode = create.getWhereClause();
    String viewStatementToBe = null;
    byte[][] viewColumnConstantsToBe = null;
    BitSet isViewColumnReferencedToBe = null;
    // Check whether column families having local index column family suffix or not if present
    // don't allow creating table.
    // Also validate the default values expressions.
    List<ColumnDef> columnDefs = create.getColumnDefs();
    List<ColumnDef> overideColumnDefs = null;
    PrimaryKeyConstraint pkConstraint = create.getPrimaryKeyConstraint();
    for (int i = 0; i < columnDefs.size(); i++) {
        ColumnDef columnDef = columnDefs.get(i);
        if (columnDef.getColumnDefName().getFamilyName() != null && columnDef.getColumnDefName().getFamilyName().contains(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX)) {
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.UNALLOWED_COLUMN_FAMILY).build().buildException();
        }
        // False means we do not need the default (because it evaluated to null)
        if (!columnDef.validateDefault(context, pkConstraint)) {
            if (overideColumnDefs == null) {
                overideColumnDefs = new ArrayList<>(columnDefs);
            }
            overideColumnDefs.set(i, new ColumnDef(columnDef, null));
        }
    }
    if (overideColumnDefs != null) {
        create = new CreateTableStatement(create, overideColumnDefs);
    }
    final CreateTableStatement finalCreate = create;
    if (type == PTableType.VIEW) {
        TableRef tableRef = resolver.getTables().get(0);
        int nColumns = tableRef.getTable().getColumns().size();
        isViewColumnReferencedToBe = new BitSet(nColumns);
        // Used to track column references in a view
        ExpressionCompiler expressionCompiler = new ColumnTrackingExpressionCompiler(context, isViewColumnReferencedToBe);
        parentToBe = tableRef.getTable();
        viewTypeToBe = parentToBe.getViewType() == ViewType.MAPPED ? ViewType.MAPPED : ViewType.UPDATABLE;
        if (whereNode == null) {
            viewStatementToBe = parentToBe.getViewStatement();
        } else {
            whereNode = StatementNormalizer.normalize(whereNode, resolver);
            if (whereNode.isStateless()) {
                throw new SQLExceptionInfo.Builder(SQLExceptionCode.VIEW_WHERE_IS_CONSTANT).build().buildException();
            }
            // If our parent has a VIEW statement, combine it with this one
            if (parentToBe.getViewStatement() != null) {
                SelectStatement select = new SQLParser(parentToBe.getViewStatement()).parseQuery().combine(whereNode);
                whereNode = select.getWhere();
            }
            Expression where = whereNode.accept(expressionCompiler);
            if (where != null && !LiteralExpression.isTrue(where)) {
                TableName baseTableName = create.getBaseTableName();
                StringBuilder buf = new StringBuilder();
                whereNode.toSQL(resolver, buf);
                viewStatementToBe = QueryUtil.getViewStatement(baseTableName.getSchemaName(), baseTableName.getTableName(), buf.toString());
            }
            if (viewTypeToBe != ViewType.MAPPED) {
                Long scn = connection.getSCN();
                connectionToBe = (scn != null || tableRef.getTable().isTransactional()) ? connection : // clocks being in sync.
                new PhoenixConnection(// on our connection.
                new DelegateConnectionQueryServices(connection.getQueryServices()) {

                    @Override
                    public void addTable(PTable table, long resolvedTime) throws SQLException {
                        connection.addTable(table, resolvedTime);
                    }
                }, connection, tableRef.getTimeStamp() + 1);
                viewColumnConstantsToBe = new byte[nColumns][];
                ViewWhereExpressionVisitor visitor = new ViewWhereExpressionVisitor(parentToBe, viewColumnConstantsToBe);
                where.accept(visitor);
                // If view is not updatable, viewColumnConstants should be empty. We will still
                // inherit our parent viewConstants, but we have no additional ones.
                viewTypeToBe = visitor.isUpdatable() ? ViewType.UPDATABLE : ViewType.READ_ONLY;
                if (viewTypeToBe != ViewType.UPDATABLE) {
                    viewColumnConstantsToBe = null;
                }
            }
        }
    }
    final ViewType viewType = viewTypeToBe;
    final String viewStatement = viewStatementToBe;
    final byte[][] viewColumnConstants = viewColumnConstantsToBe;
    final BitSet isViewColumnReferenced = isViewColumnReferencedToBe;
    List<ParseNode> splitNodes = create.getSplitNodes();
    final byte[][] splits = new byte[splitNodes.size()][];
    ImmutableBytesWritable ptr = context.getTempPtr();
    ExpressionCompiler expressionCompiler = new ExpressionCompiler(context);
    for (int i = 0; i < splits.length; i++) {
        ParseNode node = splitNodes.get(i);
        if (node instanceof BindParseNode) {
            context.getBindManager().addParamMetaData((BindParseNode) node, VARBINARY_DATUM);
        }
        if (node.isStateless()) {
            Expression expression = node.accept(expressionCompiler);
            if (expression.evaluate(null, ptr)) {
                ;
                splits[i] = ByteUtil.copyKeyBytesIfNecessary(ptr);
                continue;
            }
        }
        throw new SQLExceptionInfo.Builder(SQLExceptionCode.SPLIT_POINT_NOT_CONSTANT).setMessage("Node: " + node).build().buildException();
    }
    final MetaDataClient client = new MetaDataClient(connectionToBe);
    final PTable parent = parentToBe;
    return new BaseMutationPlan(context, operation) {

        @Override
        public MutationState execute() throws SQLException {
            try {
                return client.createTable(finalCreate, splits, parent, viewStatement, viewType, viewColumnConstants, isViewColumnReferenced);
            } finally {
                if (client.getConnection() != connection) {
                    client.getConnection().close();
                }
            }
        }

        @Override
        public ExplainPlan getExplainPlan() throws SQLException {
            return new ExplainPlan(Collections.singletonList("CREATE TABLE"));
        }
    };
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) SQLException(java.sql.SQLException) DelegateConnectionQueryServices(org.apache.phoenix.query.DelegateConnectionQueryServices) PTable(org.apache.phoenix.schema.PTable) SelectStatement(org.apache.phoenix.parse.SelectStatement) BindParseNode(org.apache.phoenix.parse.BindParseNode) ColumnParseNode(org.apache.phoenix.parse.ColumnParseNode) ParseNode(org.apache.phoenix.parse.ParseNode) SQLExceptionInfo(org.apache.phoenix.exception.SQLExceptionInfo) MetaDataClient(org.apache.phoenix.schema.MetaDataClient) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) PTableType(org.apache.phoenix.schema.PTableType) CreateTableStatement(org.apache.phoenix.parse.CreateTableStatement) BitSet(java.util.BitSet) ColumnDef(org.apache.phoenix.parse.ColumnDef) PrimaryKeyConstraint(org.apache.phoenix.parse.PrimaryKeyConstraint) PrimaryKeyConstraint(org.apache.phoenix.parse.PrimaryKeyConstraint) TableName(org.apache.phoenix.parse.TableName) SQLParser(org.apache.phoenix.parse.SQLParser) KeyValueColumnExpression(org.apache.phoenix.expression.KeyValueColumnExpression) Expression(org.apache.phoenix.expression.Expression) SingleCellColumnExpression(org.apache.phoenix.expression.SingleCellColumnExpression) LiteralExpression(org.apache.phoenix.expression.LiteralExpression) RowKeyColumnExpression(org.apache.phoenix.expression.RowKeyColumnExpression) IsNullExpression(org.apache.phoenix.expression.IsNullExpression) AndExpression(org.apache.phoenix.expression.AndExpression) ComparisonExpression(org.apache.phoenix.expression.ComparisonExpression) Scan(org.apache.hadoop.hbase.client.Scan) ViewType(org.apache.phoenix.schema.PTable.ViewType) TableRef(org.apache.phoenix.schema.TableRef) BindParseNode(org.apache.phoenix.parse.BindParseNode)

Example 32 with TableRef

use of org.apache.phoenix.schema.TableRef in project phoenix by apache.

the class PostLocalIndexDDLCompiler method compile.

public MutationPlan compile(PTable index) throws SQLException {
    try (final PhoenixStatement statement = new PhoenixStatement(connection)) {
        String query = "SELECT count(*) FROM " + tableName;
        final QueryPlan plan = statement.compileQuery(query);
        TableRef tableRef = plan.getTableRef();
        Scan scan = plan.getContext().getScan();
        ImmutableBytesWritable ptr = new ImmutableBytesWritable();
        final PTable dataTable = tableRef.getTable();
        List<PTable> indexes = Lists.newArrayListWithExpectedSize(1);
        for (PTable indexTable : dataTable.getIndexes()) {
            if (indexTable.getKey().equals(index.getKey())) {
                index = indexTable;
                break;
            }
        }
        // Only build newly created index.
        indexes.add(index);
        IndexMaintainer.serialize(dataTable, ptr, indexes, plan.getContext().getConnection());
        // Set attribute on scan that UngroupedAggregateRegionObserver will switch on.
        // We'll detect that this attribute was set the server-side and write the index
        // rows per region as a result. The value of the attribute will be our persisted
        // index maintainers.
        // Define the LOCAL_INDEX_BUILD as a new static in BaseScannerRegionObserver
        scan.setAttribute(BaseScannerRegionObserver.LOCAL_INDEX_BUILD_PROTO, ByteUtil.copyKeyBytesIfNecessary(ptr));
        // By default, we'd use a FirstKeyOnly filter as nothing else needs to be projected for count(*).
        // However, in this case, we need to project all of the data columns that contribute to the index.
        IndexMaintainer indexMaintainer = index.getIndexMaintainer(dataTable, connection);
        for (ColumnReference columnRef : indexMaintainer.getAllColumns()) {
            if (index.getImmutableStorageScheme() == ImmutableStorageScheme.SINGLE_CELL_ARRAY_WITH_OFFSETS) {
                scan.addFamily(columnRef.getFamily());
            } else {
                scan.addColumn(columnRef.getFamily(), columnRef.getQualifier());
            }
        }
        // with a connectionless connection (which makes testing easier).
        return new BaseMutationPlan(plan.getContext(), Operation.UPSERT) {

            @Override
            public MutationState execute() throws SQLException {
                connection.getMutationState().commitDDLFence(dataTable);
                Tuple tuple = plan.iterator().next();
                long rowCount = 0;
                if (tuple != null) {
                    Cell kv = tuple.getValue(0);
                    ImmutableBytesWritable tmpPtr = new ImmutableBytesWritable(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength());
                    // A single Cell will be returned with the count(*) - we decode that here
                    rowCount = PLong.INSTANCE.getCodec().decodeLong(tmpPtr, SortOrder.getDefault());
                }
                // rows that were added.
                return new MutationState(0, 0, connection, rowCount);
            }
        };
    }
}
Also used : ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) PhoenixStatement(org.apache.phoenix.jdbc.PhoenixStatement) PTable(org.apache.phoenix.schema.PTable) IndexMaintainer(org.apache.phoenix.index.IndexMaintainer) MutationState(org.apache.phoenix.execute.MutationState) Scan(org.apache.hadoop.hbase.client.Scan) Cell(org.apache.hadoop.hbase.Cell) TableRef(org.apache.phoenix.schema.TableRef) Tuple(org.apache.phoenix.schema.tuple.Tuple) ColumnReference(org.apache.phoenix.hbase.index.covered.update.ColumnReference)

Example 33 with TableRef

use of org.apache.phoenix.schema.TableRef in project phoenix by apache.

the class CorrelatePlanTest method createProjectedTableFromLiterals.

private TableRef createProjectedTableFromLiterals(Object[] row) {
    List<PColumn> columns = Lists.<PColumn>newArrayList();
    for (int i = 0; i < row.length; i++) {
        String name = ParseNodeFactory.createTempAlias();
        Expression expr = LiteralExpression.newConstant(row[i]);
        PName colName = PNameFactory.newName(name);
        columns.add(new PColumnImpl(PNameFactory.newName(name), PNameFactory.newName(VALUE_COLUMN_FAMILY), expr.getDataType(), expr.getMaxLength(), expr.getScale(), expr.isNullable(), i, expr.getSortOrder(), null, null, false, name, false, false, colName.getBytes()));
    }
    try {
        PTable pTable = PTableImpl.makePTable(null, PName.EMPTY_NAME, PName.EMPTY_NAME, PTableType.SUBQUERY, null, MetaDataProtocol.MIN_TABLE_TIMESTAMP, PTable.INITIAL_SEQ_NUM, null, null, columns, null, null, Collections.<PTable>emptyList(), false, Collections.<PName>emptyList(), null, null, false, false, false, null, null, null, true, false, 0, 0L, Boolean.FALSE, null, false, ImmutableStorageScheme.ONE_CELL_PER_COLUMN, QualifierEncodingScheme.NON_ENCODED_QUALIFIERS, EncodedCQCounter.NULL_COUNTER, true);
        TableRef sourceTable = new TableRef(pTable);
        List<ColumnRef> sourceColumnRefs = Lists.<ColumnRef>newArrayList();
        for (PColumn column : sourceTable.getTable().getColumns()) {
            sourceColumnRefs.add(new ColumnRef(sourceTable, column.getPosition()));
        }
        return new TableRef(TupleProjectionCompiler.createProjectedTable(sourceTable, sourceColumnRefs, false));
    } catch (SQLException e) {
        throw new RuntimeException(e);
    }
}
Also used : PColumnImpl(org.apache.phoenix.schema.PColumnImpl) SQLException(java.sql.SQLException) PTable(org.apache.phoenix.schema.PTable) PColumn(org.apache.phoenix.schema.PColumn) Expression(org.apache.phoenix.expression.Expression) ProjectedColumnExpression(org.apache.phoenix.expression.ProjectedColumnExpression) LiteralExpression(org.apache.phoenix.expression.LiteralExpression) ComparisonExpression(org.apache.phoenix.expression.ComparisonExpression) CorrelateVariableFieldAccessExpression(org.apache.phoenix.expression.CorrelateVariableFieldAccessExpression) PName(org.apache.phoenix.schema.PName) ColumnRef(org.apache.phoenix.schema.ColumnRef) TableRef(org.apache.phoenix.schema.TableRef)

Example 34 with TableRef

use of org.apache.phoenix.schema.TableRef in project phoenix by apache.

the class UngroupedAggregateRegionObserver method doPostScannerOpen.

@Override
protected RegionScanner doPostScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c, final Scan scan, final RegionScanner s) throws IOException, SQLException {
    RegionCoprocessorEnvironment env = c.getEnvironment();
    Region region = env.getRegion();
    long ts = scan.getTimeRange().getMax();
    boolean localIndexScan = ScanUtil.isLocalIndex(scan);
    if (ScanUtil.isAnalyzeTable(scan)) {
        byte[] gp_width_bytes = scan.getAttribute(BaseScannerRegionObserver.GUIDEPOST_WIDTH_BYTES);
        byte[] gp_per_region_bytes = scan.getAttribute(BaseScannerRegionObserver.GUIDEPOST_PER_REGION);
        // Let this throw, as this scan is being done for the sole purpose of collecting stats
        StatisticsCollector statsCollector = StatisticsCollectorFactory.createStatisticsCollector(env, region.getRegionInfo().getTable().getNameAsString(), ts, gp_width_bytes, gp_per_region_bytes);
        return collectStats(s, statsCollector, region, scan, env.getConfiguration());
    } else if (ScanUtil.isIndexRebuild(scan)) {
        return rebuildIndices(s, region, scan, env.getConfiguration());
    }
    int offsetToBe = 0;
    if (localIndexScan) {
        /*
             * For local indexes, we need to set an offset on row key expressions to skip
             * the region start key.
             */
        offsetToBe = region.getRegionInfo().getStartKey().length != 0 ? region.getRegionInfo().getStartKey().length : region.getRegionInfo().getEndKey().length;
        ScanUtil.setRowKeyOffset(scan, offsetToBe);
    }
    final int offset = offsetToBe;
    PTable projectedTable = null;
    PTable writeToTable = null;
    byte[][] values = null;
    byte[] descRowKeyTableBytes = scan.getAttribute(UPGRADE_DESC_ROW_KEY);
    boolean isDescRowKeyOrderUpgrade = descRowKeyTableBytes != null;
    if (isDescRowKeyOrderUpgrade) {
        logger.debug("Upgrading row key for " + region.getRegionInfo().getTable().getNameAsString());
        projectedTable = deserializeTable(descRowKeyTableBytes);
        try {
            writeToTable = PTableImpl.makePTable(projectedTable, true);
        } catch (SQLException e) {
            // Impossible
            ServerUtil.throwIOException("Upgrade failed", e);
        }
        values = new byte[projectedTable.getPKColumns().size()][];
    }
    boolean useProto = false;
    byte[] localIndexBytes = scan.getAttribute(LOCAL_INDEX_BUILD_PROTO);
    useProto = localIndexBytes != null;
    if (localIndexBytes == null) {
        localIndexBytes = scan.getAttribute(LOCAL_INDEX_BUILD);
    }
    List<IndexMaintainer> indexMaintainers = localIndexBytes == null ? null : IndexMaintainer.deserialize(localIndexBytes, useProto);
    MutationList indexMutations = localIndexBytes == null ? new MutationList() : new MutationList(1024);
    RegionScanner theScanner = s;
    boolean replayMutations = scan.getAttribute(BaseScannerRegionObserver.IGNORE_NEWER_MUTATIONS) != null;
    byte[] indexUUID = scan.getAttribute(PhoenixIndexCodec.INDEX_UUID);
    byte[] txState = scan.getAttribute(BaseScannerRegionObserver.TX_STATE);
    List<Expression> selectExpressions = null;
    byte[] upsertSelectTable = scan.getAttribute(BaseScannerRegionObserver.UPSERT_SELECT_TABLE);
    boolean isUpsert = false;
    boolean isDelete = false;
    byte[] deleteCQ = null;
    byte[] deleteCF = null;
    byte[] emptyCF = null;
    HTable targetHTable = null;
    boolean areMutationInSameRegion = true;
    ImmutableBytesWritable ptr = new ImmutableBytesWritable();
    if (upsertSelectTable != null) {
        isUpsert = true;
        projectedTable = deserializeTable(upsertSelectTable);
        targetHTable = new HTable(env.getConfiguration(), projectedTable.getPhysicalName().getBytes());
        selectExpressions = deserializeExpressions(scan.getAttribute(BaseScannerRegionObserver.UPSERT_SELECT_EXPRS));
        values = new byte[projectedTable.getPKColumns().size()][];
        areMutationInSameRegion = Bytes.compareTo(targetHTable.getTableName(), region.getTableDesc().getTableName().getName()) == 0 && !ExpressionUtil.isPkPositionChanging(new TableRef(projectedTable), selectExpressions);
    } else {
        byte[] isDeleteAgg = scan.getAttribute(BaseScannerRegionObserver.DELETE_AGG);
        isDelete = isDeleteAgg != null && Bytes.compareTo(PDataType.TRUE_BYTES, isDeleteAgg) == 0;
        if (!isDelete) {
            deleteCF = scan.getAttribute(BaseScannerRegionObserver.DELETE_CF);
            deleteCQ = scan.getAttribute(BaseScannerRegionObserver.DELETE_CQ);
        }
        emptyCF = scan.getAttribute(BaseScannerRegionObserver.EMPTY_CF);
    }
    TupleProjector tupleProjector = null;
    byte[][] viewConstants = null;
    ColumnReference[] dataColumns = IndexUtil.deserializeDataTableColumnsToJoin(scan);
    final TupleProjector p = TupleProjector.deserializeProjectorFromScan(scan);
    final HashJoinInfo j = HashJoinInfo.deserializeHashJoinFromScan(scan);
    boolean useQualifierAsIndex = EncodedColumnsUtil.useQualifierAsIndex(EncodedColumnsUtil.getMinMaxQualifiersFromScan(scan));
    if ((localIndexScan && !isDelete && !isDescRowKeyOrderUpgrade) || (j == null && p != null)) {
        if (dataColumns != null) {
            tupleProjector = IndexUtil.getTupleProjector(scan, dataColumns);
            viewConstants = IndexUtil.deserializeViewConstantsFromScan(scan);
        }
        ImmutableBytesWritable tempPtr = new ImmutableBytesWritable();
        theScanner = getWrappedScanner(c, theScanner, offset, scan, dataColumns, tupleProjector, region, indexMaintainers == null ? null : indexMaintainers.get(0), viewConstants, p, tempPtr, useQualifierAsIndex);
    }
    if (j != null) {
        theScanner = new HashJoinRegionScanner(theScanner, p, j, ScanUtil.getTenantId(scan), env, useQualifierAsIndex, useNewValueColumnQualifier);
    }
    int maxBatchSize = 0;
    long maxBatchSizeBytes = 0L;
    MutationList mutations = new MutationList();
    boolean needToWrite = false;
    Configuration conf = c.getEnvironment().getConfiguration();
    long flushSize = region.getTableDesc().getMemStoreFlushSize();
    if (flushSize <= 0) {
        flushSize = conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE);
    }
    /**
         * Slow down the writes if the memstore size more than
         * (hbase.hregion.memstore.block.multiplier - 1) times hbase.hregion.memstore.flush.size
         * bytes. This avoids flush storm to hdfs for cases like index building where reads and
         * write happen to all the table regions in the server.
         */
    final long blockingMemStoreSize = flushSize * (conf.getLong(HConstants.HREGION_MEMSTORE_BLOCK_MULTIPLIER, HConstants.DEFAULT_HREGION_MEMSTORE_BLOCK_MULTIPLIER) - 1);
    boolean buildLocalIndex = indexMaintainers != null && dataColumns == null && !localIndexScan;
    if (isDescRowKeyOrderUpgrade || isDelete || isUpsert || (deleteCQ != null && deleteCF != null) || emptyCF != null || buildLocalIndex) {
        needToWrite = true;
        maxBatchSize = env.getConfiguration().getInt(MUTATE_BATCH_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE);
        mutations = new MutationList(Ints.saturatedCast(maxBatchSize + maxBatchSize / 10));
        maxBatchSizeBytes = env.getConfiguration().getLong(MUTATE_BATCH_SIZE_BYTES_ATTRIB, QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE_BYTES);
    }
    Aggregators aggregators = ServerAggregators.deserialize(scan.getAttribute(BaseScannerRegionObserver.AGGREGATORS), env.getConfiguration());
    Aggregator[] rowAggregators = aggregators.getAggregators();
    boolean hasMore;
    boolean hasAny = false;
    Pair<Integer, Integer> minMaxQualifiers = EncodedColumnsUtil.getMinMaxQualifiersFromScan(scan);
    Tuple result = useQualifierAsIndex ? new PositionBasedMultiKeyValueTuple() : new MultiKeyValueTuple();
    if (logger.isDebugEnabled()) {
        logger.debug(LogUtil.addCustomAnnotations("Starting ungrouped coprocessor scan " + scan + " " + region.getRegionInfo(), ScanUtil.getCustomAnnotations(scan)));
    }
    int rowCount = 0;
    final RegionScanner innerScanner = theScanner;
    boolean useIndexProto = true;
    byte[] indexMaintainersPtr = scan.getAttribute(PhoenixIndexCodec.INDEX_PROTO_MD);
    // for backward compatiblity fall back to look by the old attribute
    if (indexMaintainersPtr == null) {
        indexMaintainersPtr = scan.getAttribute(PhoenixIndexCodec.INDEX_MD);
        useIndexProto = false;
    }
    boolean acquiredLock = false;
    try {
        if (needToWrite) {
            synchronized (lock) {
                scansReferenceCount++;
            }
        }
        region.startRegionOperation();
        acquiredLock = true;
        synchronized (innerScanner) {
            do {
                List<Cell> results = useQualifierAsIndex ? new EncodedColumnQualiferCellsList(minMaxQualifiers.getFirst(), minMaxQualifiers.getSecond(), encodingScheme) : new ArrayList<Cell>();
                // Results are potentially returned even when the return value of s.next is false
                // since this is an indication of whether or not there are more values after the
                // ones returned
                hasMore = innerScanner.nextRaw(results);
                if (!results.isEmpty()) {
                    rowCount++;
                    result.setKeyValues(results);
                    if (isDescRowKeyOrderUpgrade) {
                        Arrays.fill(values, null);
                        Cell firstKV = results.get(0);
                        RowKeySchema schema = projectedTable.getRowKeySchema();
                        int maxOffset = schema.iterator(firstKV.getRowArray(), firstKV.getRowOffset() + offset, firstKV.getRowLength(), ptr);
                        for (int i = 0; i < schema.getFieldCount(); i++) {
                            Boolean hasValue = schema.next(ptr, i, maxOffset);
                            if (hasValue == null) {
                                break;
                            }
                            Field field = schema.getField(i);
                            if (field.getSortOrder() == SortOrder.DESC) {
                                // Special case for re-writing DESC ARRAY, as the actual byte value needs to change in this case
                                if (field.getDataType().isArrayType()) {
                                    field.getDataType().coerceBytes(ptr, null, field.getDataType(), field.getMaxLength(), field.getScale(), field.getSortOrder(), field.getMaxLength(), field.getScale(), field.getSortOrder(), // force to use correct separator byte
                                    true);
                                } else // Special case for re-writing DESC CHAR or DESC BINARY, to force the re-writing of trailing space characters
                                if (field.getDataType() == PChar.INSTANCE || field.getDataType() == PBinary.INSTANCE) {
                                    int len = ptr.getLength();
                                    while (len > 0 && ptr.get()[ptr.getOffset() + len - 1] == StringUtil.SPACE_UTF8) {
                                        len--;
                                    }
                                    ptr.set(ptr.get(), ptr.getOffset(), len);
                                // Special case for re-writing DESC FLOAT and DOUBLE, as they're not inverted like they should be (PHOENIX-2171)
                                } else if (field.getDataType() == PFloat.INSTANCE || field.getDataType() == PDouble.INSTANCE) {
                                    byte[] invertedBytes = SortOrder.invert(ptr.get(), ptr.getOffset(), ptr.getLength());
                                    ptr.set(invertedBytes);
                                }
                            } else if (field.getDataType() == PBinary.INSTANCE) {
                                // Remove trailing space characters so that the setValues call below will replace them
                                // with the correct zero byte character. Note this is somewhat dangerous as these
                                // could be legit, but I don't know what the alternative is.
                                int len = ptr.getLength();
                                while (len > 0 && ptr.get()[ptr.getOffset() + len - 1] == StringUtil.SPACE_UTF8) {
                                    len--;
                                }
                                ptr.set(ptr.get(), ptr.getOffset(), len);
                            }
                            values[i] = ptr.copyBytes();
                        }
                        writeToTable.newKey(ptr, values);
                        if (Bytes.compareTo(firstKV.getRowArray(), firstKV.getRowOffset() + offset, firstKV.getRowLength(), ptr.get(), ptr.getOffset() + offset, ptr.getLength()) == 0) {
                            continue;
                        }
                        byte[] newRow = ByteUtil.copyKeyBytesIfNecessary(ptr);
                        if (offset > 0) {
                            // for local indexes (prepend region start key)
                            byte[] newRowWithOffset = new byte[offset + newRow.length];
                            System.arraycopy(firstKV.getRowArray(), firstKV.getRowOffset(), newRowWithOffset, 0, offset);
                            ;
                            System.arraycopy(newRow, 0, newRowWithOffset, offset, newRow.length);
                            newRow = newRowWithOffset;
                        }
                        byte[] oldRow = Bytes.copy(firstKV.getRowArray(), firstKV.getRowOffset(), firstKV.getRowLength());
                        for (Cell cell : results) {
                            // Copy existing cell but with new row key
                            Cell newCell = new KeyValue(newRow, 0, newRow.length, cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(), cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(), cell.getTimestamp(), KeyValue.Type.codeToType(cell.getTypeByte()), cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
                            switch(KeyValue.Type.codeToType(cell.getTypeByte())) {
                                case Put:
                                    // If Put, point delete old Put
                                    Delete del = new Delete(oldRow);
                                    del.addDeleteMarker(new KeyValue(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(), cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(), cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(), cell.getTimestamp(), KeyValue.Type.Delete, ByteUtil.EMPTY_BYTE_ARRAY, 0, 0));
                                    mutations.add(del);
                                    Put put = new Put(newRow);
                                    put.add(newCell);
                                    mutations.add(put);
                                    break;
                                case Delete:
                                case DeleteColumn:
                                case DeleteFamily:
                                case DeleteFamilyVersion:
                                    Delete delete = new Delete(newRow);
                                    delete.addDeleteMarker(newCell);
                                    mutations.add(delete);
                                    break;
                            }
                        }
                    } else if (buildLocalIndex) {
                        for (IndexMaintainer maintainer : indexMaintainers) {
                            if (!results.isEmpty()) {
                                result.getKey(ptr);
                                ValueGetter valueGetter = maintainer.createGetterFromKeyValues(ImmutableBytesPtr.copyBytesIfNecessary(ptr), results);
                                Put put = maintainer.buildUpdateMutation(kvBuilder, valueGetter, ptr, results.get(0).getTimestamp(), env.getRegion().getRegionInfo().getStartKey(), env.getRegion().getRegionInfo().getEndKey());
                                indexMutations.add(put);
                            }
                        }
                        result.setKeyValues(results);
                    } else if (isDelete) {
                        // FIXME: the version of the Delete constructor without the lock
                        // args was introduced in 0.94.4, thus if we try to use it here
                        // we can no longer use the 0.94.2 version of the client.
                        Cell firstKV = results.get(0);
                        Delete delete = new Delete(firstKV.getRowArray(), firstKV.getRowOffset(), firstKV.getRowLength(), ts);
                        if (replayMutations) {
                            delete.setAttribute(IGNORE_NEWER_MUTATIONS, PDataType.TRUE_BYTES);
                        }
                        mutations.add(delete);
                        // force tephra to ignore this deletes
                        delete.setAttribute(TxConstants.TX_ROLLBACK_ATTRIBUTE_KEY, new byte[0]);
                    } else if (isUpsert) {
                        Arrays.fill(values, null);
                        int bucketNumOffset = 0;
                        if (projectedTable.getBucketNum() != null) {
                            values[0] = new byte[] { 0 };
                            bucketNumOffset = 1;
                        }
                        int i = bucketNumOffset;
                        List<PColumn> projectedColumns = projectedTable.getColumns();
                        for (; i < projectedTable.getPKColumns().size(); i++) {
                            Expression expression = selectExpressions.get(i - bucketNumOffset);
                            if (expression.evaluate(result, ptr)) {
                                values[i] = ptr.copyBytes();
                                // column being projected into then invert the bits.
                                if (expression.getSortOrder() != projectedColumns.get(i).getSortOrder()) {
                                    SortOrder.invert(values[i], 0, values[i], 0, values[i].length);
                                }
                            } else {
                                values[i] = ByteUtil.EMPTY_BYTE_ARRAY;
                            }
                        }
                        projectedTable.newKey(ptr, values);
                        PRow row = projectedTable.newRow(kvBuilder, ts, ptr, false);
                        for (; i < projectedColumns.size(); i++) {
                            Expression expression = selectExpressions.get(i - bucketNumOffset);
                            if (expression.evaluate(result, ptr)) {
                                PColumn column = projectedColumns.get(i);
                                if (!column.getDataType().isSizeCompatible(ptr, null, expression.getDataType(), expression.getSortOrder(), expression.getMaxLength(), expression.getScale(), column.getMaxLength(), column.getScale())) {
                                    throw new DataExceedsCapacityException(column.getDataType(), column.getMaxLength(), column.getScale(), column.getName().getString(), ptr);
                                }
                                column.getDataType().coerceBytes(ptr, null, expression.getDataType(), expression.getMaxLength(), expression.getScale(), expression.getSortOrder(), column.getMaxLength(), column.getScale(), column.getSortOrder(), projectedTable.rowKeyOrderOptimizable());
                                byte[] bytes = ByteUtil.copyKeyBytesIfNecessary(ptr);
                                row.setValue(column, bytes);
                            }
                        }
                        for (Mutation mutation : row.toRowMutations()) {
                            if (replayMutations) {
                                mutation.setAttribute(IGNORE_NEWER_MUTATIONS, PDataType.TRUE_BYTES);
                            }
                            mutations.add(mutation);
                        }
                        for (i = 0; i < selectExpressions.size(); i++) {
                            selectExpressions.get(i).reset();
                        }
                    } else if (deleteCF != null && deleteCQ != null) {
                        // if no empty key value is being set
                        if (emptyCF == null || result.getValue(deleteCF, deleteCQ) != null) {
                            Delete delete = new Delete(results.get(0).getRowArray(), results.get(0).getRowOffset(), results.get(0).getRowLength());
                            delete.deleteColumns(deleteCF, deleteCQ, ts);
                            // force tephra to ignore this deletes
                            delete.setAttribute(TxConstants.TX_ROLLBACK_ATTRIBUTE_KEY, new byte[0]);
                            mutations.add(delete);
                        }
                    }
                    if (emptyCF != null) {
                        /*
                             * If we've specified an emptyCF, then we need to insert an empty
                             * key value "retroactively" for any key value that is visible at
                             * the timestamp that the DDL was issued. Key values that are not
                             * visible at this timestamp will not ever be projected up to
                             * scans past this timestamp, so don't need to be considered.
                             * We insert one empty key value per row per timestamp.
                             */
                        Set<Long> timeStamps = Sets.newHashSetWithExpectedSize(results.size());
                        for (Cell kv : results) {
                            long kvts = kv.getTimestamp();
                            if (!timeStamps.contains(kvts)) {
                                Put put = new Put(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength());
                                put.add(emptyCF, QueryConstants.EMPTY_COLUMN_BYTES, kvts, ByteUtil.EMPTY_BYTE_ARRAY);
                                mutations.add(put);
                            }
                        }
                    }
                    if (readyToCommit(rowCount, mutations.byteSize(), maxBatchSize, maxBatchSizeBytes)) {
                        commit(region, mutations, indexUUID, blockingMemStoreSize, indexMaintainersPtr, txState, areMutationInSameRegion, targetHTable, useIndexProto);
                        mutations.clear();
                    }
                    if (readyToCommit(rowCount, indexMutations.byteSize(), maxBatchSize, maxBatchSizeBytes)) {
                        commitBatch(region, indexMutations, null, blockingMemStoreSize, null, txState, useIndexProto);
                        indexMutations.clear();
                    }
                    aggregators.aggregate(rowAggregators, result);
                    hasAny = true;
                }
            } while (hasMore);
            if (!mutations.isEmpty()) {
                commit(region, mutations, indexUUID, blockingMemStoreSize, indexMaintainersPtr, txState, areMutationInSameRegion, targetHTable, useIndexProto);
                mutations.clear();
            }
            if (!indexMutations.isEmpty()) {
                commitBatch(region, indexMutations, null, blockingMemStoreSize, indexMaintainersPtr, txState, useIndexProto);
                indexMutations.clear();
            }
        }
    } finally {
        if (needToWrite) {
            synchronized (lock) {
                scansReferenceCount--;
            }
        }
        if (targetHTable != null) {
            targetHTable.close();
        }
        try {
            innerScanner.close();
        } finally {
            if (acquiredLock)
                region.closeRegionOperation();
        }
    }
    if (logger.isDebugEnabled()) {
        logger.debug(LogUtil.addCustomAnnotations("Finished scanning " + rowCount + " rows for ungrouped coprocessor scan " + scan, ScanUtil.getCustomAnnotations(scan)));
    }
    final boolean hadAny = hasAny;
    KeyValue keyValue = null;
    if (hadAny) {
        byte[] value = aggregators.toBytes(rowAggregators);
        keyValue = KeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length);
    }
    final KeyValue aggKeyValue = keyValue;
    RegionScanner scanner = new BaseRegionScanner(innerScanner) {

        private boolean done = !hadAny;

        @Override
        public boolean isFilterDone() {
            return done;
        }

        @Override
        public boolean next(List<Cell> results) throws IOException {
            if (done)
                return false;
            done = true;
            results.add(aggKeyValue);
            return false;
        }

        @Override
        public long getMaxResultSize() {
            return scan.getMaxResultSize();
        }
    };
    return scanner;
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) Configuration(org.apache.hadoop.conf.Configuration) TupleProjector(org.apache.phoenix.execute.TupleProjector) PTable(org.apache.phoenix.schema.PTable) ValueGetter(org.apache.phoenix.hbase.index.ValueGetter) EncodedColumnQualiferCellsList(org.apache.phoenix.schema.tuple.EncodedColumnQualiferCellsList) ArrayList(java.util.ArrayList) List(java.util.List) Cell(org.apache.hadoop.hbase.Cell) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) DataExceedsCapacityException(org.apache.phoenix.exception.DataExceedsCapacityException) PositionBasedMultiKeyValueTuple(org.apache.phoenix.schema.tuple.PositionBasedMultiKeyValueTuple) Aggregators(org.apache.phoenix.expression.aggregator.Aggregators) ServerAggregators(org.apache.phoenix.expression.aggregator.ServerAggregators) PLong(org.apache.phoenix.schema.types.PLong) Region(org.apache.hadoop.hbase.regionserver.Region) Mutation(org.apache.hadoop.hbase.client.Mutation) ColumnReference(org.apache.phoenix.hbase.index.covered.update.ColumnReference) KeyValue(org.apache.hadoop.hbase.KeyValue) StatisticsCollector(org.apache.phoenix.schema.stats.StatisticsCollector) SQLException(java.sql.SQLException) HTable(org.apache.hadoop.hbase.client.HTable) PRow(org.apache.phoenix.schema.PRow) PColumn(org.apache.phoenix.schema.PColumn) Field(org.apache.phoenix.schema.ValueSchema.Field) RegionCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment) IndexMaintainer(org.apache.phoenix.index.IndexMaintainer) EncodedColumnQualiferCellsList(org.apache.phoenix.schema.tuple.EncodedColumnQualiferCellsList) Aggregator(org.apache.phoenix.expression.aggregator.Aggregator) RowKeySchema(org.apache.phoenix.schema.RowKeySchema) Put(org.apache.hadoop.hbase.client.Put) RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) Expression(org.apache.phoenix.expression.Expression) HashJoinInfo(org.apache.phoenix.join.HashJoinInfo) MultiKeyValueTuple(org.apache.phoenix.schema.tuple.MultiKeyValueTuple) PositionBasedMultiKeyValueTuple(org.apache.phoenix.schema.tuple.PositionBasedMultiKeyValueTuple) TableRef(org.apache.phoenix.schema.TableRef) MultiKeyValueTuple(org.apache.phoenix.schema.tuple.MultiKeyValueTuple) Tuple(org.apache.phoenix.schema.tuple.Tuple) PositionBasedMultiKeyValueTuple(org.apache.phoenix.schema.tuple.PositionBasedMultiKeyValueTuple)

Example 35 with TableRef

use of org.apache.phoenix.schema.TableRef in project phoenix by apache.

the class MutationState method shouldResubmitTransaction.

/**
     * Determines whether indexes were added to mutated tables while the transaction was in progress.
     * @return true if indexes were added and false otherwise.
     * @throws SQLException 
     */
private boolean shouldResubmitTransaction(Set<TableRef> txTableRefs) throws SQLException {
    if (logger.isInfoEnabled())
        logger.info("Checking for index updates as of " + getInitialWritePointer());
    MetaDataClient client = new MetaDataClient(connection);
    PMetaData cache = connection.getMetaDataCache();
    boolean addedAnyIndexes = false;
    boolean allImmutableTables = !txTableRefs.isEmpty();
    for (TableRef tableRef : txTableRefs) {
        PTable dataTable = tableRef.getTable();
        List<PTable> oldIndexes;
        PTableRef ptableRef = cache.getTableRef(dataTable.getKey());
        oldIndexes = ptableRef.getTable().getIndexes();
        // Always check at server for metadata change, as it's possible that the table is configured to not check for metadata changes
        // but in this case, the tx manager is telling us it's likely that there has been a change.
        MetaDataMutationResult result = client.updateCache(dataTable.getTenantId(), dataTable.getSchemaName().getString(), dataTable.getTableName().getString(), true);
        long timestamp = TransactionUtil.getResolvedTime(connection, result);
        tableRef.setTimeStamp(timestamp);
        PTable updatedDataTable = result.getTable();
        if (updatedDataTable == null) {
            throw new TableNotFoundException(dataTable.getSchemaName().getString(), dataTable.getTableName().getString());
        }
        allImmutableTables &= updatedDataTable.isImmutableRows();
        tableRef.setTable(updatedDataTable);
        if (!addedAnyIndexes) {
            // TODO: in theory we should do a deep equals check here, as it's possible
            // that an index was dropped and recreated with the same name but different
            // indexed/covered columns.
            addedAnyIndexes = (!oldIndexes.equals(updatedDataTable.getIndexes()));
            if (logger.isInfoEnabled())
                logger.info((addedAnyIndexes ? "Updates " : "No updates ") + "as of " + timestamp + " to " + updatedDataTable.getName().getString() + " with indexes " + updatedDataTable.getIndexes());
        }
    }
    if (logger.isInfoEnabled())
        logger.info((addedAnyIndexes ? "Updates " : "No updates ") + "to indexes as of " + getInitialWritePointer() + " over " + (allImmutableTables ? " all immutable tables" : " some mutable tables"));
    // If any indexes were added, then the conflict might be due to DDL/DML fence.
    return allImmutableTables || addedAnyIndexes;
}
Also used : MetaDataClient(org.apache.phoenix.schema.MetaDataClient) TableNotFoundException(org.apache.phoenix.schema.TableNotFoundException) PMetaData(org.apache.phoenix.schema.PMetaData) PTableRef(org.apache.phoenix.schema.PTableRef) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult) PTableRef(org.apache.phoenix.schema.PTableRef) TableRef(org.apache.phoenix.schema.TableRef) PTable(org.apache.phoenix.schema.PTable)

Aggregations

TableRef (org.apache.phoenix.schema.TableRef)43 PTable (org.apache.phoenix.schema.PTable)30 PColumn (org.apache.phoenix.schema.PColumn)16 Expression (org.apache.phoenix.expression.Expression)14 SQLException (java.sql.SQLException)13 ColumnRef (org.apache.phoenix.schema.ColumnRef)13 LiteralExpression (org.apache.phoenix.expression.LiteralExpression)12 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)12 Scan (org.apache.hadoop.hbase.client.Scan)11 ParseNode (org.apache.phoenix.parse.ParseNode)11 SelectStatement (org.apache.phoenix.parse.SelectStatement)10 ArrayList (java.util.ArrayList)9 ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)9 PTableRef (org.apache.phoenix.schema.PTableRef)8 List (java.util.List)7 Map (java.util.Map)7 ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)7 Hint (org.apache.phoenix.parse.HintNode.Hint)7 Tuple (org.apache.phoenix.schema.tuple.Tuple)6 ProjectedColumnExpression (org.apache.phoenix.expression.ProjectedColumnExpression)5