Search in sources :

Example 1 with ColumnName

use of org.apache.phoenix.parse.ColumnName in project phoenix by apache.

the class MetaDataClient method checkAndValidateRowTimestampCol.

private static boolean checkAndValidateRowTimestampCol(ColumnDef colDef, PrimaryKeyConstraint pkConstraint, boolean rowTimeStampColAlreadyFound, PTableType tableType) throws SQLException {
    ColumnName columnDefName = colDef.getColumnDefName();
    if (tableType == VIEW && (pkConstraint.getNumColumnsWithRowTimestamp() > 0 || colDef.isRowTimestamp())) {
        throw new SQLExceptionInfo.Builder(SQLExceptionCode.ROWTIMESTAMP_NOT_ALLOWED_ON_VIEW).setColumnName(columnDefName.getColumnName()).build().buildException();
    }
    /*
         * For indexes we have already validated that the data table has the right kind and number of row_timestamp
         * columns. So we don't need to perform any extra validations for them.
         */
    if (tableType == TABLE) {
        boolean isColumnDeclaredRowTimestamp = colDef.isRowTimestamp() || pkConstraint.isColumnRowTimestamp(columnDefName);
        if (isColumnDeclaredRowTimestamp) {
            boolean isColumnPartOfPk = colDef.isPK() || pkConstraint.contains(columnDefName);
            // A column can be declared as ROW_TIMESTAMP only if it is part of the primary key
            if (isColumnDeclaredRowTimestamp && !isColumnPartOfPk) {
                throw new SQLExceptionInfo.Builder(SQLExceptionCode.ROWTIMESTAMP_PK_COL_ONLY).setColumnName(columnDefName.getColumnName()).build().buildException();
            }
            // A column can be declared as ROW_TIMESTAMP only if it can be represented as a long
            PDataType dataType = colDef.getDataType();
            if (isColumnDeclaredRowTimestamp && (dataType != PLong.INSTANCE && dataType != PUnsignedLong.INSTANCE && !dataType.isCoercibleTo(PTimestamp.INSTANCE))) {
                throw new SQLExceptionInfo.Builder(SQLExceptionCode.ROWTIMESTAMP_COL_INVALID_TYPE).setColumnName(columnDefName.getColumnName()).build().buildException();
            }
            // Only one column can be declared as a ROW_TIMESTAMP column
            if (rowTimeStampColAlreadyFound && isColumnDeclaredRowTimestamp) {
                throw new SQLExceptionInfo.Builder(SQLExceptionCode.ROWTIMESTAMP_ONE_PK_COL_ONLY).setColumnName(columnDefName.getColumnName()).build().buildException();
            }
            return true;
        }
    }
    return false;
}
Also used : ColumnName(org.apache.phoenix.parse.ColumnName) PDataType(org.apache.phoenix.schema.types.PDataType) SQLExceptionInfo(org.apache.phoenix.exception.SQLExceptionInfo)

Example 2 with ColumnName

use of org.apache.phoenix.parse.ColumnName in project phoenix by apache.

the class UpsertCompiler method compile.

public MutationPlan compile(UpsertStatement upsert) throws SQLException {
    final PhoenixConnection connection = statement.getConnection();
    ConnectionQueryServices services = connection.getQueryServices();
    final int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
    final int maxSizeBytes = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE_BYTES);
    List<ColumnName> columnNodes = upsert.getColumns();
    TableRef tableRefToBe = null;
    PTable table = null;
    Set<PColumn> addViewColumnsToBe = Collections.emptySet();
    Set<PColumn> overlapViewColumnsToBe = Collections.emptySet();
    List<PColumn> allColumnsToBe = Collections.emptyList();
    boolean isTenantSpecific = false;
    boolean isSharedViewIndex = false;
    String tenantIdStr = null;
    ColumnResolver resolver = null;
    int[] columnIndexesToBe;
    int nColumnsToSet = 0;
    int[] pkSlotIndexesToBe;
    List<ParseNode> valueNodes = upsert.getValues();
    List<PColumn> targetColumns;
    NamedTableNode tableNode = upsert.getTable();
    String tableName = tableNode.getName().getTableName();
    String schemaName = tableNode.getName().getSchemaName();
    QueryPlan queryPlanToBe = null;
    int nValuesToSet;
    boolean sameTable = false;
    boolean runOnServer = false;
    boolean serverUpsertSelectEnabled = services.getProps().getBoolean(QueryServices.ENABLE_SERVER_UPSERT_SELECT, QueryServicesOptions.DEFAULT_ENABLE_SERVER_UPSERT_SELECT);
    UpsertingParallelIteratorFactory parallelIteratorFactoryToBe = null;
    boolean useServerTimestampToBe = false;
    resolver = FromCompiler.getResolverForMutation(upsert, connection);
    tableRefToBe = resolver.getTables().get(0);
    table = tableRefToBe.getTable();
    // - transactional table with a connection having an SCN
    if (table.getType() == PTableType.VIEW && table.getViewType().isReadOnly()) {
        throw new ReadOnlyTableException(schemaName, tableName);
    } else if (connection.isBuildingIndex() && table.getType() != PTableType.INDEX) {
        throw new SQLExceptionInfo.Builder(SQLExceptionCode.ONLY_INDEX_UPDATABLE_AT_SCN).setSchemaName(schemaName).setTableName(tableName).build().buildException();
    } else if (table.isTransactional() && connection.getSCN() != null) {
        throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_SPECIFY_SCN_FOR_TXN_TABLE).setSchemaName(schemaName).setTableName(tableName).build().buildException();
    }
    boolean isSalted = table.getBucketNum() != null;
    isTenantSpecific = table.isMultiTenant() && connection.getTenantId() != null;
    isSharedViewIndex = table.getViewIndexId() != null;
    tenantIdStr = isTenantSpecific ? connection.getTenantId().getString() : null;
    int posOffset = isSalted ? 1 : 0;
    // Setup array of column indexes parallel to values that are going to be set
    allColumnsToBe = table.getColumns();
    nColumnsToSet = 0;
    if (table.getViewType() == ViewType.UPDATABLE) {
        addViewColumnsToBe = Sets.newLinkedHashSetWithExpectedSize(allColumnsToBe.size());
        for (PColumn column : allColumnsToBe) {
            if (column.getViewConstant() != null) {
                addViewColumnsToBe.add(column);
            }
        }
    }
    ImmutableBytesWritable ptr = new ImmutableBytesWritable();
    // Allow full row upsert if no columns or only dynamic ones are specified and values count match
    if (columnNodes.isEmpty() || columnNodes.size() == upsert.getTable().getDynamicColumns().size()) {
        nColumnsToSet = allColumnsToBe.size() - posOffset;
        columnIndexesToBe = new int[nColumnsToSet];
        pkSlotIndexesToBe = new int[columnIndexesToBe.length];
        targetColumns = Lists.newArrayListWithExpectedSize(columnIndexesToBe.length);
        targetColumns.addAll(Collections.<PColumn>nCopies(columnIndexesToBe.length, null));
        int minPKPos = 0;
        if (isSharedViewIndex) {
            PColumn indexIdColumn = table.getPKColumns().get(minPKPos);
            columnIndexesToBe[minPKPos] = indexIdColumn.getPosition();
            targetColumns.set(minPKPos, indexIdColumn);
            minPKPos++;
        }
        if (isTenantSpecific) {
            PColumn tenantColumn = table.getPKColumns().get(minPKPos);
            columnIndexesToBe[minPKPos] = tenantColumn.getPosition();
            targetColumns.set(minPKPos, tenantColumn);
            minPKPos++;
        }
        for (int i = posOffset, j = 0; i < allColumnsToBe.size(); i++) {
            PColumn column = allColumnsToBe.get(i);
            if (SchemaUtil.isPKColumn(column)) {
                pkSlotIndexesToBe[i - posOffset] = j + posOffset;
                if (j++ < minPKPos) {
                    // Skip, as it's already been set above
                    continue;
                }
                minPKPos = 0;
            }
            columnIndexesToBe[i - posOffset + minPKPos] = i;
            targetColumns.set(i - posOffset + minPKPos, column);
        }
        if (!addViewColumnsToBe.isEmpty()) {
            // All view columns overlap in this case
            overlapViewColumnsToBe = addViewColumnsToBe;
            addViewColumnsToBe = Collections.emptySet();
        }
    } else {
        // Size for worse case
        int numColsInUpsert = columnNodes.size();
        nColumnsToSet = numColsInUpsert + addViewColumnsToBe.size() + (isTenantSpecific ? 1 : 0) + +(isSharedViewIndex ? 1 : 0);
        columnIndexesToBe = new int[nColumnsToSet];
        pkSlotIndexesToBe = new int[columnIndexesToBe.length];
        targetColumns = Lists.newArrayListWithExpectedSize(columnIndexesToBe.length);
        targetColumns.addAll(Collections.<PColumn>nCopies(columnIndexesToBe.length, null));
        // TODO: necessary? So we'll get an AIOB exception if it's not replaced
        Arrays.fill(columnIndexesToBe, -1);
        // TODO: necessary? So we'll get an AIOB exception if it's not replaced
        Arrays.fill(pkSlotIndexesToBe, -1);
        BitSet columnsBeingSet = new BitSet(table.getColumns().size());
        int i = 0;
        if (isSharedViewIndex) {
            PColumn indexIdColumn = table.getPKColumns().get(i + posOffset);
            columnsBeingSet.set(columnIndexesToBe[i] = indexIdColumn.getPosition());
            pkSlotIndexesToBe[i] = i + posOffset;
            targetColumns.set(i, indexIdColumn);
            i++;
        }
        // Add tenant column directly, as we don't want to resolve it as this will fail
        if (isTenantSpecific) {
            PColumn tenantColumn = table.getPKColumns().get(i + posOffset);
            columnsBeingSet.set(columnIndexesToBe[i] = tenantColumn.getPosition());
            pkSlotIndexesToBe[i] = i + posOffset;
            targetColumns.set(i, tenantColumn);
            i++;
        }
        for (ColumnName colName : columnNodes) {
            ColumnRef ref = resolver.resolveColumn(null, colName.getFamilyName(), colName.getColumnName());
            PColumn column = ref.getColumn();
            if (IndexUtil.getViewConstantValue(column, ptr)) {
                if (overlapViewColumnsToBe.isEmpty()) {
                    overlapViewColumnsToBe = Sets.newHashSetWithExpectedSize(addViewColumnsToBe.size());
                }
                nColumnsToSet--;
                overlapViewColumnsToBe.add(column);
                addViewColumnsToBe.remove(column);
            }
            columnsBeingSet.set(columnIndexesToBe[i] = ref.getColumnPosition());
            targetColumns.set(i, column);
            if (SchemaUtil.isPKColumn(column)) {
                pkSlotIndexesToBe[i] = ref.getPKSlotPosition();
            }
            i++;
        }
        for (PColumn column : addViewColumnsToBe) {
            columnsBeingSet.set(columnIndexesToBe[i] = column.getPosition());
            targetColumns.set(i, column);
            if (SchemaUtil.isPKColumn(column)) {
                pkSlotIndexesToBe[i] = SchemaUtil.getPKPosition(table, column);
            }
            i++;
        }
        // If a table has rowtimestamp col, then we always set it.
        useServerTimestampToBe = table.getRowTimestampColPos() != -1 && !isRowTimestampSet(pkSlotIndexesToBe, table);
        if (useServerTimestampToBe) {
            PColumn rowTimestampCol = table.getPKColumns().get(table.getRowTimestampColPos());
            // Need to resize columnIndexesToBe and pkSlotIndexesToBe to include this extra column.
            columnIndexesToBe = Arrays.copyOf(columnIndexesToBe, columnIndexesToBe.length + 1);
            pkSlotIndexesToBe = Arrays.copyOf(pkSlotIndexesToBe, pkSlotIndexesToBe.length + 1);
            columnsBeingSet.set(columnIndexesToBe[i] = rowTimestampCol.getPosition());
            pkSlotIndexesToBe[i] = table.getRowTimestampColPos();
            targetColumns.add(rowTimestampCol);
            if (valueNodes != null && !valueNodes.isEmpty()) {
                valueNodes.add(getNodeForRowTimestampColumn(rowTimestampCol));
            }
            nColumnsToSet++;
        }
        for (i = posOffset; i < table.getColumns().size(); i++) {
            PColumn column = table.getColumns().get(i);
            if (!columnsBeingSet.get(i) && !column.isNullable() && column.getExpressionStr() == null) {
                throw new ConstraintViolationException(SchemaUtil.getColumnDisplayName(column) + " may not be null");
            }
        }
    }
    boolean isAutoCommit = connection.getAutoCommit();
    if (valueNodes == null) {
        SelectStatement select = upsert.getSelect();
        assert (select != null);
        select = SubselectRewriter.flatten(select, connection);
        ColumnResolver selectResolver = FromCompiler.getResolverForQuery(select, connection, false, upsert.getTable().getName());
        select = StatementNormalizer.normalize(select, selectResolver);
        select = prependTenantAndViewConstants(table, select, tenantIdStr, addViewColumnsToBe, useServerTimestampToBe);
        SelectStatement transformedSelect = SubqueryRewriter.transform(select, selectResolver, connection);
        if (transformedSelect != select) {
            selectResolver = FromCompiler.getResolverForQuery(transformedSelect, connection, false, upsert.getTable().getName());
            select = StatementNormalizer.normalize(transformedSelect, selectResolver);
        }
        sameTable = !select.isJoin() && tableRefToBe.equals(selectResolver.getTables().get(0));
        /* We can run the upsert in a coprocessor if:
             * 1) from has only 1 table or server UPSERT SELECT is enabled
             * 2) the select query isn't doing aggregation (which requires a client-side final merge)
             * 3) autoCommit is on
             * 4) the table is not immutable with indexes, as the client is the one that figures out the additional
             *    puts for index tables.
             * 5) no limit clause, as the limit clause requires client-side post processing
             * 6) no sequences, as sequences imply that the order of upsert must match the order of
             *    selection. TODO: change this and only force client side if there's a ORDER BY on the sequence value
             * Otherwise, run the query to pull the data from the server
             * and populate the MutationState (upto a limit).
            */
        if (!(select.isAggregate() || select.isDistinct() || select.getLimit() != null || select.hasSequence())) {
            // We can pipeline the upsert select instead of spooling everything to disk first,
            // if we don't have any post processing that's required.
            parallelIteratorFactoryToBe = new UpsertingParallelIteratorFactory(connection, tableRefToBe, useServerTimestampToBe);
            // If we're in the else, then it's not an aggregate, distinct, limited, or sequence using query,
            // so we might be able to run it entirely on the server side.
            // region space managed by region servers. So we bail out on executing on server side.
            // Disable running upsert select on server side if a table has global mutable secondary indexes on it
            boolean hasGlobalMutableIndexes = SchemaUtil.hasGlobalIndex(table) && !table.isImmutableRows();
            boolean hasWhereSubquery = select.getWhere() != null && select.getWhere().hasSubquery();
            runOnServer = (sameTable || (serverUpsertSelectEnabled && !hasGlobalMutableIndexes)) && isAutoCommit && !table.isTransactional() && !(table.isImmutableRows() && !table.getIndexes().isEmpty()) && !select.isJoin() && !hasWhereSubquery && table.getRowTimestampColPos() == -1;
        }
        // If we may be able to run on the server, add a hint that favors using the data table
        // if all else is equal.
        // TODO: it'd be nice if we could figure out in advance if the PK is potentially changing,
        // as this would disallow running on the server. We currently use the row projector we
        // get back to figure this out.
        HintNode hint = upsert.getHint();
        if (!upsert.getHint().hasHint(Hint.USE_INDEX_OVER_DATA_TABLE)) {
            hint = HintNode.create(hint, Hint.USE_DATA_OVER_INDEX_TABLE);
        }
        select = SelectStatement.create(select, hint);
        // Pass scan through if same table in upsert and select so that projection is computed correctly
        // Use optimizer to choose the best plan
        QueryCompiler compiler = new QueryCompiler(statement, select, selectResolver, targetColumns, parallelIteratorFactoryToBe, new SequenceManager(statement), false, false, null);
        queryPlanToBe = compiler.compile();
        // steps and parallelIteratorFactory did not take effect.
        if (queryPlanToBe.getTableRef().getTable().getType() == PTableType.PROJECTED || queryPlanToBe.getTableRef().getTable().getType() == PTableType.SUBQUERY) {
            parallelIteratorFactoryToBe = null;
        }
        nValuesToSet = queryPlanToBe.getProjector().getColumnCount();
    // Cannot auto commit if doing aggregation or topN or salted
    // Salted causes problems because the row may end up living on a different region
    } else {
        nValuesToSet = valueNodes.size() + addViewColumnsToBe.size() + (isTenantSpecific ? 1 : 0) + (isSharedViewIndex ? 1 : 0);
    }
    // Resize down to allow a subset of columns to be specifiable
    if (columnNodes.isEmpty() && columnIndexesToBe.length >= nValuesToSet) {
        nColumnsToSet = nValuesToSet;
        columnIndexesToBe = Arrays.copyOf(columnIndexesToBe, nValuesToSet);
        pkSlotIndexesToBe = Arrays.copyOf(pkSlotIndexesToBe, nValuesToSet);
        for (int i = posOffset + nValuesToSet; i < table.getColumns().size(); i++) {
            PColumn column = table.getColumns().get(i);
            if (!column.isNullable() && column.getExpressionStr() == null) {
                throw new ConstraintViolationException(SchemaUtil.getColumnDisplayName(column) + " may not be null");
            }
        }
    }
    if (nValuesToSet != nColumnsToSet) {
        // been removed and the added back and we wouldn't detect that here.
        throw new UpsertColumnsValuesMismatchException(schemaName, tableName, "Numbers of columns: " + nColumnsToSet + ". Number of values: " + nValuesToSet);
    }
    final QueryPlan originalQueryPlan = queryPlanToBe;
    RowProjector projectorToBe = null;
    // Optimize only after all checks have been performed
    if (valueNodes == null) {
        queryPlanToBe = new QueryOptimizer(services).optimize(queryPlanToBe, statement, targetColumns, parallelIteratorFactoryToBe);
        projectorToBe = queryPlanToBe.getProjector();
    }
    final List<PColumn> allColumns = allColumnsToBe;
    final RowProjector projector = projectorToBe;
    final QueryPlan queryPlan = queryPlanToBe;
    final TableRef tableRef = tableRefToBe;
    final Set<PColumn> addViewColumns = addViewColumnsToBe;
    final Set<PColumn> overlapViewColumns = overlapViewColumnsToBe;
    final UpsertingParallelIteratorFactory parallelIteratorFactory = parallelIteratorFactoryToBe;
    final int[] columnIndexes = columnIndexesToBe;
    final int[] pkSlotIndexes = pkSlotIndexesToBe;
    final boolean useServerTimestamp = useServerTimestampToBe;
    if (table.getRowTimestampColPos() == -1 && useServerTimestamp) {
        throw new IllegalStateException("For a table without row timestamp column, useServerTimestamp cannot be true");
    }
    // ///////////////////////////////////////////////////////////////////
    if (valueNodes == null) {
        // Before we re-order, check that for updatable view columns
        // the projected expression either matches the column name or
        // is a constant with the same required value.
        throwIfNotUpdatable(tableRef, overlapViewColumnsToBe, targetColumns, projector, sameTable);
        // ///////////////////////////////////////////////////////////////////
        if (runOnServer) {
            // At most this array will grow bigger by the number of PK columns
            int[] allColumnsIndexes = Arrays.copyOf(columnIndexes, columnIndexes.length + nValuesToSet);
            int[] reverseColumnIndexes = new int[table.getColumns().size()];
            List<Expression> projectedExpressions = Lists.newArrayListWithExpectedSize(reverseColumnIndexes.length);
            Arrays.fill(reverseColumnIndexes, -1);
            for (int i = 0; i < nValuesToSet; i++) {
                projectedExpressions.add(projector.getColumnProjector(i).getExpression());
                reverseColumnIndexes[columnIndexes[i]] = i;
            }
            /*
                 * Order projected columns and projected expressions with PK columns
                 * leading order by slot position
                 */
            int offset = table.getBucketNum() == null ? 0 : 1;
            for (int i = 0; i < table.getPKColumns().size() - offset; i++) {
                PColumn column = table.getPKColumns().get(i + offset);
                int pos = reverseColumnIndexes[column.getPosition()];
                if (pos == -1) {
                    // it's not valid to set a fixed width type to null.
                    if (column.getDataType().isFixedWidth()) {
                        continue;
                    }
                    // Add literal null for missing PK columns
                    pos = projectedExpressions.size();
                    Expression literalNull = LiteralExpression.newConstant(null, column.getDataType(), Determinism.ALWAYS);
                    projectedExpressions.add(literalNull);
                    allColumnsIndexes[pos] = column.getPosition();
                }
                // Swap select expression at pos with i
                Collections.swap(projectedExpressions, i, pos);
                // Swap column indexes and reverse column indexes too
                int tempPos = allColumnsIndexes[i];
                allColumnsIndexes[i] = allColumnsIndexes[pos];
                allColumnsIndexes[pos] = tempPos;
                reverseColumnIndexes[tempPos] = pos;
                reverseColumnIndexes[i] = i;
            }
            // If any pk slots are changing and server side UPSERT SELECT is disabled, do not run on server
            if (!serverUpsertSelectEnabled && ExpressionUtil.isPkPositionChanging(new TableRef(table), projectedExpressions)) {
                runOnServer = false;
            }
            // ///////////////////////////////////////////////////////////////////
            if (runOnServer) {
                // Iterate through columns being projected
                List<PColumn> projectedColumns = Lists.newArrayListWithExpectedSize(projectedExpressions.size());
                int posOff = table.getBucketNum() != null ? 1 : 0;
                for (int i = 0; i < projectedExpressions.size(); i++) {
                    // Must make new column if position has changed
                    PColumn column = allColumns.get(allColumnsIndexes[i]);
                    projectedColumns.add(column.getPosition() == i + posOff ? column : new PColumnImpl(column, i + posOff));
                }
                // Build table from projectedColumns
                // Hack to add default column family to be used on server in case no value column is projected.
                PTable projectedTable = PTableImpl.makePTable(table, projectedColumns, PNameFactory.newName(SchemaUtil.getEmptyColumnFamily(table)));
                SelectStatement select = SelectStatement.create(SelectStatement.COUNT_ONE, upsert.getHint());
                StatementContext statementContext = queryPlan.getContext();
                RowProjector aggProjectorToBe = ProjectionCompiler.compile(statementContext, select, GroupBy.EMPTY_GROUP_BY);
                statementContext.getAggregationManager().compile(queryPlan.getContext(), GroupBy.EMPTY_GROUP_BY);
                if (queryPlan.getProjector().projectEveryRow()) {
                    aggProjectorToBe = new RowProjector(aggProjectorToBe, true);
                }
                final RowProjector aggProjector = aggProjectorToBe;
                /*
                     * Transfer over PTable representing subset of columns selected, but all PK columns.
                     * Move columns setting PK first in pkSlot order, adding LiteralExpression of null for any missing ones.
                     * Transfer over List<Expression> for projection.
                     * In region scan, evaluate expressions in order, collecting first n columns for PK and collection non PK in mutation Map
                     * Create the PRow and get the mutations, adding them to the batch
                     */
                final StatementContext context = queryPlan.getContext();
                final Scan scan = context.getScan();
                scan.setAttribute(BaseScannerRegionObserver.UPSERT_SELECT_TABLE, UngroupedAggregateRegionObserver.serialize(projectedTable));
                scan.setAttribute(BaseScannerRegionObserver.UPSERT_SELECT_EXPRS, UngroupedAggregateRegionObserver.serialize(projectedExpressions));
                // Ignore order by - it has no impact
                final QueryPlan aggPlan = new AggregatePlan(context, select, statementContext.getCurrentTable(), aggProjector, null, null, OrderBy.EMPTY_ORDER_BY, null, GroupBy.EMPTY_GROUP_BY, null, originalQueryPlan);
                return new ServerUpsertSelectMutationPlan(queryPlan, tableRef, originalQueryPlan, context, connection, scan, aggPlan, aggProjector, maxSize, maxSizeBytes);
            }
        }
        // ///////////////////////////////////////////////////////////////////
        return new ClientUpsertSelectMutationPlan(queryPlan, tableRef, originalQueryPlan, parallelIteratorFactory, projector, columnIndexes, pkSlotIndexes, useServerTimestamp, maxSize, maxSizeBytes);
    }
    // //////////////////////////////////////////////////////////////////
    // UPSERT VALUES
    // ///////////////////////////////////////////////////////////////////
    final byte[][] values = new byte[nValuesToSet][];
    int nodeIndex = 0;
    if (isSharedViewIndex) {
        values[nodeIndex++] = MetaDataUtil.getViewIndexIdDataType().toBytes(table.getViewIndexId());
    }
    if (isTenantSpecific) {
        PName tenantId = connection.getTenantId();
        values[nodeIndex++] = ScanUtil.getTenantIdBytes(table.getRowKeySchema(), table.getBucketNum() != null, tenantId, isSharedViewIndex);
    }
    final int nodeIndexOffset = nodeIndex;
    // Allocate array based on size of all columns in table,
    // since some values may not be set (if they're nullable).
    final StatementContext context = new StatementContext(statement, resolver, new Scan(), new SequenceManager(statement));
    UpsertValuesCompiler expressionBuilder = new UpsertValuesCompiler(context);
    final List<Expression> constantExpressions = Lists.newArrayListWithExpectedSize(valueNodes.size());
    // and initialize them in one batch
    for (ParseNode valueNode : valueNodes) {
        if (!valueNode.isStateless()) {
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.VALUE_IN_UPSERT_NOT_CONSTANT).build().buildException();
        }
        PColumn column = allColumns.get(columnIndexes[nodeIndex]);
        expressionBuilder.setColumn(column);
        Expression expression = valueNode.accept(expressionBuilder);
        if (expression.getDataType() != null && !expression.getDataType().isCastableTo(column.getDataType())) {
            throw TypeMismatchException.newException(expression.getDataType(), column.getDataType(), "expression: " + expression.toString() + " in column " + column);
        }
        constantExpressions.add(expression);
        nodeIndex++;
    }
    byte[] onDupKeyBytesToBe = null;
    List<Pair<ColumnName, ParseNode>> onDupKeyPairs = upsert.getOnDupKeyPairs();
    if (onDupKeyPairs != null) {
        if (table.isImmutableRows()) {
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_USE_ON_DUP_KEY_FOR_IMMUTABLE).setSchemaName(table.getSchemaName().getString()).setTableName(table.getTableName().getString()).build().buildException();
        }
        if (table.isTransactional()) {
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_USE_ON_DUP_KEY_FOR_TRANSACTIONAL).setSchemaName(table.getSchemaName().getString()).setTableName(table.getTableName().getString()).build().buildException();
        }
        if (connection.getSCN() != null) {
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_SET_SCN_IN_ON_DUP_KEY).setSchemaName(table.getSchemaName().getString()).setTableName(table.getTableName().getString()).build().buildException();
        }
        if (SchemaUtil.hasGlobalIndex(table)) {
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_USE_ON_DUP_KEY_WITH_GLOBAL_IDX).setSchemaName(table.getSchemaName().getString()).setTableName(table.getTableName().getString()).build().buildException();
        }
        if (onDupKeyPairs.isEmpty()) {
            // ON DUPLICATE KEY IGNORE
            onDupKeyBytesToBe = PhoenixIndexBuilder.serializeOnDupKeyIgnore();
        } else {
            // ON DUPLICATE KEY UPDATE;
            int position = table.getBucketNum() == null ? 0 : 1;
            UpdateColumnCompiler compiler = new UpdateColumnCompiler(context);
            int nColumns = onDupKeyPairs.size();
            List<Expression> updateExpressions = Lists.newArrayListWithExpectedSize(nColumns);
            LinkedHashSet<PColumn> updateColumns = Sets.newLinkedHashSetWithExpectedSize(nColumns + 1);
            updateColumns.add(new PColumnImpl(// Use first PK column name as we know it won't conflict with others
            table.getPKColumns().get(position).getName(), null, PVarbinary.INSTANCE, null, null, false, position, SortOrder.getDefault(), 0, null, false, null, false, false, null));
            position++;
            for (Pair<ColumnName, ParseNode> columnPair : onDupKeyPairs) {
                ColumnName colName = columnPair.getFirst();
                PColumn updateColumn = resolver.resolveColumn(null, colName.getFamilyName(), colName.getColumnName()).getColumn();
                if (SchemaUtil.isPKColumn(updateColumn)) {
                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_UPDATE_PK_ON_DUP_KEY).setSchemaName(table.getSchemaName().getString()).setTableName(table.getTableName().getString()).setColumnName(updateColumn.getName().getString()).build().buildException();
                }
                final int columnPosition = position++;
                if (!updateColumns.add(new DelegateColumn(updateColumn) {

                    @Override
                    public int getPosition() {
                        return columnPosition;
                    }
                })) {
                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.DUPLICATE_COLUMN_IN_ON_DUP_KEY).setSchemaName(table.getSchemaName().getString()).setTableName(table.getTableName().getString()).setColumnName(updateColumn.getName().getString()).build().buildException();
                }
                ;
                ParseNode updateNode = columnPair.getSecond();
                compiler.setColumn(updateColumn);
                Expression updateExpression = updateNode.accept(compiler);
                // Check that updateExpression is coercible to updateColumn
                if (updateExpression.getDataType() != null && !updateExpression.getDataType().isCastableTo(updateColumn.getDataType())) {
                    throw TypeMismatchException.newException(updateExpression.getDataType(), updateColumn.getDataType(), "expression: " + updateExpression.toString() + " for column " + updateColumn);
                }
                if (compiler.isAggregate()) {
                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.AGGREGATION_NOT_ALLOWED_IN_ON_DUP_KEY).setSchemaName(table.getSchemaName().getString()).setTableName(table.getTableName().getString()).setColumnName(updateColumn.getName().getString()).build().buildException();
                }
                updateExpressions.add(updateExpression);
            }
            PTable onDupKeyTable = PTableImpl.makePTable(table, updateColumns);
            onDupKeyBytesToBe = PhoenixIndexBuilder.serializeOnDupKeyUpdate(onDupKeyTable, updateExpressions);
        }
    }
    final byte[] onDupKeyBytes = onDupKeyBytesToBe;
    return new UpsertValuesMutationPlan(context, tableRef, nodeIndexOffset, constantExpressions, allColumns, columnIndexes, overlapViewColumns, values, addViewColumns, connection, pkSlotIndexes, useServerTimestamp, onDupKeyBytes, maxSize, maxSizeBytes);
}
Also used : PhoenixIndexBuilder(org.apache.phoenix.index.PhoenixIndexBuilder) PTable(org.apache.phoenix.schema.PTable) DelegateColumn(org.apache.phoenix.schema.DelegateColumn) LiteralParseNode(org.apache.phoenix.parse.LiteralParseNode) BindParseNode(org.apache.phoenix.parse.BindParseNode) SequenceValueParseNode(org.apache.phoenix.parse.SequenceValueParseNode) ParseNode(org.apache.phoenix.parse.ParseNode) ConstraintViolationException(org.apache.phoenix.schema.ConstraintViolationException) AggregatePlan(org.apache.phoenix.execute.AggregatePlan) SQLExceptionInfo(org.apache.phoenix.exception.SQLExceptionInfo) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) BitSet(java.util.BitSet) ReadOnlyTableException(org.apache.phoenix.schema.ReadOnlyTableException) HintNode(org.apache.phoenix.parse.HintNode) Scan(org.apache.hadoop.hbase.client.Scan) ColumnRef(org.apache.phoenix.schema.ColumnRef) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) PColumnImpl(org.apache.phoenix.schema.PColumnImpl) PColumn(org.apache.phoenix.schema.PColumn) SelectStatement(org.apache.phoenix.parse.SelectStatement) UpsertColumnsValuesMismatchException(org.apache.phoenix.schema.UpsertColumnsValuesMismatchException) Pair(org.apache.hadoop.hbase.util.Pair) QueryOptimizer(org.apache.phoenix.optimize.QueryOptimizer) Hint(org.apache.phoenix.parse.HintNode.Hint) PSmallint(org.apache.phoenix.schema.types.PSmallint) ColumnName(org.apache.phoenix.parse.ColumnName) Expression(org.apache.phoenix.expression.Expression) LiteralExpression(org.apache.phoenix.expression.LiteralExpression) PName(org.apache.phoenix.schema.PName) NamedTableNode(org.apache.phoenix.parse.NamedTableNode) ConnectionQueryServices(org.apache.phoenix.query.ConnectionQueryServices) TableRef(org.apache.phoenix.schema.TableRef)

Example 3 with ColumnName

use of org.apache.phoenix.parse.ColumnName in project phoenix by apache.

the class MetaDataClient method dropColumn.

public MutationState dropColumn(DropColumnStatement statement) throws SQLException {
    connection.rollback();
    boolean wasAutoCommit = connection.getAutoCommit();
    try {
        connection.setAutoCommit(false);
        PName tenantId = connection.getTenantId();
        TableName tableNameNode = statement.getTable().getName();
        String schemaName = tableNameNode.getSchemaName();
        String tableName = tableNameNode.getTableName();
        String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
        boolean retried = false;
        while (true) {
            final ColumnResolver resolver = FromCompiler.getResolver(statement, connection);
            TableRef tableRef = resolver.getTables().get(0);
            PTable table = tableRef.getTable();
            List<ColumnName> columnRefs = statement.getColumnRefs();
            if (columnRefs == null) {
                columnRefs = Lists.newArrayListWithCapacity(0);
            }
            List<ColumnRef> columnsToDrop = Lists.newArrayListWithExpectedSize(columnRefs.size() + table.getIndexes().size());
            List<TableRef> indexesToDrop = Lists.newArrayListWithExpectedSize(table.getIndexes().size());
            List<Mutation> tableMetaData = Lists.newArrayListWithExpectedSize((table.getIndexes().size() + 1) * (1 + table.getColumns().size() - columnRefs.size()));
            List<PColumn> tableColumnsToDrop = Lists.newArrayListWithExpectedSize(columnRefs.size());
            for (ColumnName column : columnRefs) {
                ColumnRef columnRef = null;
                try {
                    columnRef = resolver.resolveColumn(null, column.getFamilyName(), column.getColumnName());
                } catch (ColumnNotFoundException e) {
                    if (statement.ifExists()) {
                        return new MutationState(0, 0, connection);
                    }
                    throw e;
                }
                PColumn columnToDrop = columnRef.getColumn();
                tableColumnsToDrop.add(columnToDrop);
                if (SchemaUtil.isPKColumn(columnToDrop)) {
                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_DROP_PK).setColumnName(columnToDrop.getName().getString()).build().buildException();
                } else if (table.isAppendOnlySchema()) {
                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_DROP_COL_APPEND_ONLY_SCHEMA).setColumnName(columnToDrop.getName().getString()).build().buildException();
                }
                columnsToDrop.add(new ColumnRef(columnRef.getTableRef(), columnToDrop.getPosition()));
            }
            dropColumnMutations(table, tableColumnsToDrop);
            boolean removedIndexTableOrColumn = false;
            Long timeStamp = table.isTransactional() ? tableRef.getTimeStamp() : null;
            for (PTable index : table.getIndexes()) {
                IndexMaintainer indexMaintainer = index.getIndexMaintainer(table, connection);
                // get the covered columns
                List<PColumn> indexColumnsToDrop = Lists.newArrayListWithExpectedSize(columnRefs.size());
                Set<Pair<String, String>> indexedColsInfo = indexMaintainer.getIndexedColumnInfo();
                Set<ColumnReference> coveredCols = indexMaintainer.getCoveredColumns();
                for (PColumn columnToDrop : tableColumnsToDrop) {
                    Pair<String, String> columnToDropInfo = new Pair<>(columnToDrop.getFamilyName().getString(), columnToDrop.getName().getString());
                    ColumnReference colDropRef = new ColumnReference(columnToDrop.getFamilyName() == null ? null : columnToDrop.getFamilyName().getBytes(), columnToDrop.getColumnQualifierBytes());
                    boolean isColumnIndexed = indexedColsInfo.contains(columnToDropInfo);
                    if (isColumnIndexed) {
                        if (index.getViewIndexId() == null) {
                            indexesToDrop.add(new TableRef(index));
                        }
                        connection.removeTable(tenantId, SchemaUtil.getTableName(schemaName, index.getName().getString()), index.getParentName() == null ? null : index.getParentName().getString(), index.getTimeStamp());
                        removedIndexTableOrColumn = true;
                    } else if (coveredCols.contains(colDropRef)) {
                        String indexColumnName = IndexUtil.getIndexColumnName(columnToDrop);
                        PColumn indexColumn = index.getColumnForColumnName(indexColumnName);
                        indexColumnsToDrop.add(indexColumn);
                        // add the index column to be dropped so that we actually delete the column values
                        columnsToDrop.add(new ColumnRef(new TableRef(index), indexColumn.getPosition()));
                        removedIndexTableOrColumn = true;
                    }
                }
                if (!indexColumnsToDrop.isEmpty()) {
                    long indexTableSeqNum = incrementTableSeqNum(index, index.getType(), -indexColumnsToDrop.size(), null, null);
                    dropColumnMutations(index, indexColumnsToDrop);
                    long clientTimestamp = MutationState.getTableTimestamp(timeStamp, connection.getSCN());
                    connection.removeColumn(tenantId, index.getName().getString(), indexColumnsToDrop, clientTimestamp, indexTableSeqNum, TransactionUtil.getResolvedTimestamp(connection, index.isTransactional(), clientTimestamp));
                }
            }
            tableMetaData.addAll(connection.getMutationState().toMutations(timeStamp).next().getSecond());
            connection.rollback();
            long seqNum = incrementTableSeqNum(table, statement.getTableType(), -tableColumnsToDrop.size(), null, null);
            tableMetaData.addAll(connection.getMutationState().toMutations(timeStamp).next().getSecond());
            connection.rollback();
            // Force table header to be first in list
            Collections.reverse(tableMetaData);
            /*
                 * Ensure our "empty column family to be" exists. Somewhat of an edge case, but can occur if we drop the last column
                 * in a column family that was the empty column family. In that case, we have to pick another one. If there are no other
                 * ones, then we need to create our default empty column family. Note that this may no longer be necessary once we
                 * support declaring what the empty column family is on a table, as:
                 * - If you declare it, we'd just ensure it's created at DDL time and never switch what it is unless you change it
                 * - If you don't declare it, we can just continue to use the old empty column family in this case, dynamically updating
                 *    the empty column family name on the PTable.
                 */
            for (ColumnRef columnRefToDrop : columnsToDrop) {
                PTable tableContainingColumnToDrop = columnRefToDrop.getTable();
                byte[] emptyCF = getNewEmptyColumnFamilyOrNull(tableContainingColumnToDrop, columnRefToDrop.getColumn());
                if (emptyCF != null) {
                    try {
                        tableContainingColumnToDrop.getColumnFamily(emptyCF);
                    } catch (ColumnFamilyNotFoundException e) {
                        // Only if it's not already a column family do we need to ensure it's created
                        Map<String, List<Pair<String, Object>>> family = new HashMap<>(1);
                        family.put(Bytes.toString(emptyCF), Collections.<Pair<String, Object>>emptyList());
                        // Just use a Put without any key values as the Mutation, as addColumn will treat this specially
                        // TODO: pass through schema name and table name instead to these methods as it's cleaner
                        byte[] tenantIdBytes = connection.getTenantId() == null ? null : connection.getTenantId().getBytes();
                        if (tenantIdBytes == null)
                            tenantIdBytes = ByteUtil.EMPTY_BYTE_ARRAY;
                        connection.getQueryServices().addColumn(Collections.<Mutation>singletonList(new Put(SchemaUtil.getTableKey(tenantIdBytes, tableContainingColumnToDrop.getSchemaName().getBytes(), tableContainingColumnToDrop.getTableName().getBytes()))), tableContainingColumnToDrop, family, Sets.newHashSet(Bytes.toString(emptyCF)), Collections.<PColumn>emptyList());
                    }
                }
            }
            MetaDataMutationResult result = connection.getQueryServices().dropColumn(tableMetaData, statement.getTableType());
            try {
                MutationCode code = processMutationResult(schemaName, tableName, result);
                if (code == MutationCode.COLUMN_NOT_FOUND) {
                    addTableToCache(result);
                    if (!statement.ifExists()) {
                        throw new ColumnNotFoundException(schemaName, tableName, Bytes.toString(result.getFamilyName()), Bytes.toString(result.getColumnName()));
                    }
                    return new MutationState(0, 0, connection);
                }
                // the server when needed.
                if (tableColumnsToDrop.size() > 0) {
                    if (removedIndexTableOrColumn)
                        connection.removeTable(tenantId, tableName, table.getParentName() == null ? null : table.getParentName().getString(), table.getTimeStamp());
                    else
                        connection.removeColumn(tenantId, SchemaUtil.getTableName(schemaName, tableName), tableColumnsToDrop, result.getMutationTime(), seqNum, TransactionUtil.getResolvedTime(connection, result));
                }
                // If we have a VIEW, then only delete the metadata, and leave the table data alone
                if (table.getType() != PTableType.VIEW) {
                    MutationState state = null;
                    connection.setAutoCommit(true);
                    Long scn = connection.getSCN();
                    // Delete everything in the column. You'll still be able to do queries at earlier timestamps
                    long ts = (scn == null ? result.getMutationTime() : scn);
                    PostDDLCompiler compiler = new PostDDLCompiler(connection);
                    boolean dropMetaData = connection.getQueryServices().getProps().getBoolean(DROP_METADATA_ATTRIB, DEFAULT_DROP_METADATA);
                    // if the index is a local index or view index it uses a shared physical table
                    // so we need to issue deletes markers for all the rows of the index
                    final List<TableRef> tableRefsToDrop = Lists.newArrayList();
                    Map<String, List<TableRef>> tenantIdTableRefMap = Maps.newHashMap();
                    if (result.getSharedTablesToDelete() != null) {
                        for (SharedTableState sharedTableState : result.getSharedTablesToDelete()) {
                            PTableImpl viewIndexTable = new PTableImpl(sharedTableState.getTenantId(), sharedTableState.getSchemaName(), sharedTableState.getTableName(), ts, table.getColumnFamilies(), sharedTableState.getColumns(), sharedTableState.getPhysicalNames(), sharedTableState.getViewIndexId(), table.isMultiTenant(), table.isNamespaceMapped(), table.getImmutableStorageScheme(), table.getEncodingScheme(), table.getEncodedCQCounter(), table.useStatsForParallelization());
                            TableRef indexTableRef = new TableRef(viewIndexTable);
                            PName indexTableTenantId = sharedTableState.getTenantId();
                            if (indexTableTenantId == null) {
                                tableRefsToDrop.add(indexTableRef);
                            } else {
                                if (!tenantIdTableRefMap.containsKey(indexTableTenantId)) {
                                    tenantIdTableRefMap.put(indexTableTenantId.getString(), Lists.<TableRef>newArrayList());
                                }
                                tenantIdTableRefMap.get(indexTableTenantId.getString()).add(indexTableRef);
                            }
                        }
                    }
                    // they would have been dropped in ConnectionQueryServices.dropColumn)
                    if (!dropMetaData) {
                        tableRefsToDrop.addAll(indexesToDrop);
                    }
                    // Drop any index tables that had the dropped column in the PK
                    state = connection.getQueryServices().updateData(compiler.compile(tableRefsToDrop, null, null, Collections.<PColumn>emptyList(), ts));
                    // Drop any tenant-specific indexes
                    if (!tenantIdTableRefMap.isEmpty()) {
                        for (Entry<String, List<TableRef>> entry : tenantIdTableRefMap.entrySet()) {
                            String indexTenantId = entry.getKey();
                            Properties props = new Properties(connection.getClientInfo());
                            props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, indexTenantId);
                            try (PhoenixConnection tenantConn = new PhoenixConnection(connection, connection.getQueryServices(), props)) {
                                PostDDLCompiler dropCompiler = new PostDDLCompiler(tenantConn);
                                state = tenantConn.getQueryServices().updateData(dropCompiler.compile(entry.getValue(), null, null, Collections.<PColumn>emptyList(), ts));
                            }
                        }
                    }
                    // See https://issues.apache.org/jira/browse/PHOENIX-3605
                    if (!table.isImmutableRows() || table.getImmutableStorageScheme() == ImmutableStorageScheme.ONE_CELL_PER_COLUMN) {
                        // Update empty key value column if necessary
                        for (ColumnRef droppedColumnRef : columnsToDrop) {
                            // Painful, but we need a TableRef with a pre-set timestamp to prevent attempts
                            // to get any updates from the region server.
                            // TODO: move this into PostDDLCompiler
                            // TODO: consider filtering mutable indexes here, but then the issue is that
                            // we'd need to force an update of the data row empty key value if a mutable
                            // secondary index is changing its empty key value family.
                            droppedColumnRef = droppedColumnRef.cloneAtTimestamp(ts);
                            TableRef droppedColumnTableRef = droppedColumnRef.getTableRef();
                            PColumn droppedColumn = droppedColumnRef.getColumn();
                            MutationPlan plan = compiler.compile(Collections.singletonList(droppedColumnTableRef), getNewEmptyColumnFamilyOrNull(droppedColumnTableRef.getTable(), droppedColumn), null, Collections.singletonList(droppedColumn), ts);
                            state = connection.getQueryServices().updateData(plan);
                        }
                    }
                    // Return the last MutationState
                    return state;
                }
                return new MutationState(0, 0, connection);
            } catch (ConcurrentTableMutationException e) {
                if (retried) {
                    throw e;
                }
                table = connection.getTable(new PTableKey(tenantId, fullTableName));
                retried = true;
            }
        }
    } finally {
        connection.setAutoCommit(wasAutoCommit);
    }
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) PostDDLCompiler(org.apache.phoenix.compile.PostDDLCompiler) Properties(java.util.Properties) IndexMaintainer(org.apache.phoenix.index.IndexMaintainer) ArrayList(java.util.ArrayList) List(java.util.List) SQLExceptionInfo(org.apache.phoenix.exception.SQLExceptionInfo) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult) ColumnResolver(org.apache.phoenix.compile.ColumnResolver) Pair(org.apache.hadoop.hbase.util.Pair) MutationPlan(org.apache.phoenix.compile.MutationPlan) Put(org.apache.hadoop.hbase.client.Put) MutationCode(org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode) TableName(org.apache.phoenix.parse.TableName) ColumnName(org.apache.phoenix.parse.ColumnName) SharedTableState(org.apache.phoenix.coprocessor.MetaDataProtocol.SharedTableState) MutationState(org.apache.phoenix.execute.MutationState) PUnsignedLong(org.apache.phoenix.schema.types.PUnsignedLong) PLong(org.apache.phoenix.schema.types.PLong) Mutation(org.apache.hadoop.hbase.client.Mutation) Map(java.util.Map) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) ColumnReference(org.apache.phoenix.hbase.index.covered.update.ColumnReference)

Example 4 with ColumnName

use of org.apache.phoenix.parse.ColumnName in project phoenix by apache.

the class MetaDataClient method createTableInternal.

private PTable createTableInternal(CreateTableStatement statement, byte[][] splits, final PTable parent, String viewStatement, ViewType viewType, final byte[][] viewColumnConstants, final BitSet isViewColumnReferenced, boolean allocateIndexId, IndexType indexType, Date asyncCreatedDate, Map<String, Object> tableProps, Map<String, Object> commonFamilyProps) throws SQLException {
    final PTableType tableType = statement.getTableType();
    boolean wasAutoCommit = connection.getAutoCommit();
    connection.rollback();
    try {
        connection.setAutoCommit(false);
        List<Mutation> tableMetaData = Lists.newArrayListWithExpectedSize(statement.getColumnDefs().size() + 3);
        TableName tableNameNode = statement.getTableName();
        final String schemaName = connection.getSchema() != null && tableNameNode.getSchemaName() == null ? connection.getSchema() : tableNameNode.getSchemaName();
        final String tableName = tableNameNode.getTableName();
        String parentTableName = null;
        PName tenantId = connection.getTenantId();
        String tenantIdStr = tenantId == null ? null : tenantId.getString();
        Long scn = connection.getSCN();
        long clientTimeStamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
        boolean multiTenant = false;
        boolean storeNulls = false;
        boolean transactional = (parent != null) ? parent.isTransactional() : false;
        Integer saltBucketNum = null;
        String defaultFamilyName = null;
        boolean isImmutableRows = false;
        boolean isAppendOnlySchema = false;
        List<PName> physicalNames = Collections.emptyList();
        boolean addSaltColumn = false;
        boolean rowKeyOrderOptimizable = true;
        Long timestamp = null;
        boolean isNamespaceMapped = parent == null ? SchemaUtil.isNamespaceMappingEnabled(tableType, connection.getQueryServices().getProps()) : parent.isNamespaceMapped();
        boolean isLocalIndex = indexType == IndexType.LOCAL;
        QualifierEncodingScheme encodingScheme = NON_ENCODED_QUALIFIERS;
        ImmutableStorageScheme immutableStorageScheme = ONE_CELL_PER_COLUMN;
        if (parent != null && tableType == PTableType.INDEX) {
            timestamp = TransactionUtil.getTableTimestamp(connection, transactional);
            storeNulls = parent.getStoreNulls();
            isImmutableRows = parent.isImmutableRows();
            isAppendOnlySchema = parent.isAppendOnlySchema();
            // from the table to the index, though.
            if (isLocalIndex || (parent.getType() == PTableType.VIEW && parent.getViewType() != ViewType.MAPPED)) {
                PName physicalName = parent.getPhysicalName();
                saltBucketNum = parent.getBucketNum();
                addSaltColumn = (saltBucketNum != null && !isLocalIndex);
                defaultFamilyName = parent.getDefaultFamilyName() == null ? null : parent.getDefaultFamilyName().getString();
                if (isLocalIndex) {
                    defaultFamilyName = parent.getDefaultFamilyName() == null ? QueryConstants.DEFAULT_LOCAL_INDEX_COLUMN_FAMILY : IndexUtil.getLocalIndexColumnFamily(parent.getDefaultFamilyName().getString());
                    saltBucketNum = null;
                    // Set physical name of local index table
                    physicalNames = Collections.singletonList(PNameFactory.newName(physicalName.getBytes()));
                } else {
                    defaultFamilyName = parent.getDefaultFamilyName() == null ? QueryConstants.DEFAULT_COLUMN_FAMILY : parent.getDefaultFamilyName().getString();
                    // Set physical name of view index table
                    physicalNames = Collections.singletonList(PNameFactory.newName(MetaDataUtil.getViewIndexPhysicalName(physicalName.getBytes())));
                }
            }
            multiTenant = parent.isMultiTenant();
            storeNulls = parent.getStoreNulls();
            parentTableName = parent.getTableName().getString();
            // Pass through data table sequence number so we can check it hasn't changed
            PreparedStatement incrementStatement = connection.prepareStatement(INCREMENT_SEQ_NUM);
            incrementStatement.setString(1, tenantIdStr);
            incrementStatement.setString(2, schemaName);
            incrementStatement.setString(3, parentTableName);
            incrementStatement.setLong(4, parent.getSequenceNumber());
            incrementStatement.execute();
            // Get list of mutations and add to table meta data that will be passed to server
            // to guarantee order. This row will always end up last
            tableMetaData.addAll(connection.getMutationState().toMutations(timestamp).next().getSecond());
            connection.rollback();
            // Add row linking from data table row to index table row
            PreparedStatement linkStatement = connection.prepareStatement(CREATE_LINK);
            linkStatement.setString(1, tenantIdStr);
            linkStatement.setString(2, schemaName);
            linkStatement.setString(3, parentTableName);
            linkStatement.setString(4, tableName);
            linkStatement.setByte(5, LinkType.INDEX_TABLE.getSerializedValue());
            linkStatement.setLong(6, parent.getSequenceNumber());
            linkStatement.setString(7, PTableType.INDEX.getSerializedValue());
            linkStatement.execute();
        }
        PrimaryKeyConstraint pkConstraint = statement.getPrimaryKeyConstraint();
        String pkName = null;
        List<Pair<ColumnName, SortOrder>> pkColumnsNames = Collections.<Pair<ColumnName, SortOrder>>emptyList();
        Iterator<Pair<ColumnName, SortOrder>> pkColumnsIterator = Collections.emptyIterator();
        if (pkConstraint != null) {
            pkColumnsNames = pkConstraint.getColumnNames();
            pkColumnsIterator = pkColumnsNames.iterator();
            pkName = pkConstraint.getName();
        }
        // This tells Phoenix that you're managing the index maintenance yourself.
        if (tableType != PTableType.INDEX && (tableType != PTableType.VIEW || viewType == ViewType.MAPPED)) {
            // TODO remove TableProperty.IMMUTABLE_ROWS at the next major release
            Boolean isImmutableRowsProp = statement.immutableRows() != null ? statement.immutableRows() : (Boolean) TableProperty.IMMUTABLE_ROWS.getValue(tableProps);
            if (isImmutableRowsProp == null) {
                isImmutableRows = connection.getQueryServices().getProps().getBoolean(QueryServices.IMMUTABLE_ROWS_ATTRIB, QueryServicesOptions.DEFAULT_IMMUTABLE_ROWS);
            } else {
                isImmutableRows = isImmutableRowsProp;
            }
        }
        if (tableType == PTableType.TABLE) {
            Boolean isAppendOnlySchemaProp = (Boolean) TableProperty.APPEND_ONLY_SCHEMA.getValue(tableProps);
            isAppendOnlySchema = isAppendOnlySchemaProp != null ? isAppendOnlySchemaProp : false;
        }
        // Can't set any of these on views or shared indexes on views
        if (tableType != PTableType.VIEW && !allocateIndexId) {
            saltBucketNum = (Integer) TableProperty.SALT_BUCKETS.getValue(tableProps);
            if (saltBucketNum != null) {
                if (saltBucketNum < 0 || saltBucketNum > SaltingUtil.MAX_BUCKET_NUM) {
                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_BUCKET_NUM).build().buildException();
                }
            }
            // Salt the index table if the data table is salted
            if (saltBucketNum == null) {
                if (parent != null) {
                    saltBucketNum = parent.getBucketNum();
                }
            } else if (saltBucketNum.intValue() == 0) {
                // Provides a way for an index to not be salted if its data table is salted
                saltBucketNum = null;
            }
            addSaltColumn = (saltBucketNum != null);
        }
        // Can't set MULTI_TENANT or DEFAULT_COLUMN_FAMILY_NAME on an INDEX or a non mapped VIEW
        if (tableType != PTableType.INDEX && (tableType != PTableType.VIEW || viewType == ViewType.MAPPED)) {
            Boolean multiTenantProp = (Boolean) tableProps.get(PhoenixDatabaseMetaData.MULTI_TENANT);
            multiTenant = Boolean.TRUE.equals(multiTenantProp);
            defaultFamilyName = (String) TableProperty.DEFAULT_COLUMN_FAMILY.getValue(tableProps);
        }
        boolean disableWAL = false;
        Boolean disableWALProp = (Boolean) TableProperty.DISABLE_WAL.getValue(tableProps);
        if (disableWALProp != null) {
            disableWAL = disableWALProp;
        }
        long updateCacheFrequency = connection.getQueryServices().getProps().getLong(QueryServices.DEFAULT_UPDATE_CACHE_FREQUENCY_ATRRIB, QueryServicesOptions.DEFAULT_UPDATE_CACHE_FREQUENCY);
        Long updateCacheFrequencyProp = (Long) TableProperty.UPDATE_CACHE_FREQUENCY.getValue(tableProps);
        if (updateCacheFrequencyProp != null) {
            updateCacheFrequency = updateCacheFrequencyProp;
        }
        String autoPartitionSeq = (String) TableProperty.AUTO_PARTITION_SEQ.getValue(tableProps);
        Long guidePostsWidth = (Long) TableProperty.GUIDE_POSTS_WIDTH.getValue(tableProps);
        // We only allow setting guide post width for a base table
        if (guidePostsWidth != null && tableType != PTableType.TABLE) {
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_SET_GUIDE_POST_WIDTH).setSchemaName(schemaName).setTableName(tableName).build().buildException();
        }
        Boolean storeNullsProp = (Boolean) TableProperty.STORE_NULLS.getValue(tableProps);
        if (storeNullsProp == null) {
            if (parent == null) {
                storeNulls = connection.getQueryServices().getProps().getBoolean(QueryServices.DEFAULT_STORE_NULLS_ATTRIB, QueryServicesOptions.DEFAULT_STORE_NULLS);
                tableProps.put(PhoenixDatabaseMetaData.STORE_NULLS, Boolean.valueOf(storeNulls));
            }
        } else {
            storeNulls = storeNullsProp;
        }
        Boolean transactionalProp = (Boolean) TableProperty.TRANSACTIONAL.getValue(tableProps);
        if (transactionalProp != null && parent != null) {
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.ONLY_TABLE_MAY_BE_DECLARED_TRANSACTIONAL).setSchemaName(schemaName).setTableName(tableName).build().buildException();
        }
        if (parent == null) {
            if (transactionalProp == null) {
                transactional = connection.getQueryServices().getProps().getBoolean(QueryServices.DEFAULT_TABLE_ISTRANSACTIONAL_ATTRIB, QueryServicesOptions.DEFAULT_TABLE_ISTRANSACTIONAL);
            } else {
                transactional = transactionalProp;
            }
        }
        boolean transactionsEnabled = connection.getQueryServices().getProps().getBoolean(QueryServices.TRANSACTIONS_ENABLED, QueryServicesOptions.DEFAULT_TRANSACTIONS_ENABLED);
        // can't create a transactional table if transactions are not enabled
        if (!transactionsEnabled && transactional) {
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_CREATE_TXN_TABLE_IF_TXNS_DISABLED).setSchemaName(schemaName).setTableName(tableName).build().buildException();
        }
        // can't create a transactional table if it has a row timestamp column
        if (pkConstraint.getNumColumnsWithRowTimestamp() > 0 && transactional) {
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_CREATE_TXN_TABLE_WITH_ROW_TIMESTAMP).setSchemaName(schemaName).setTableName(tableName).build().buildException();
        }
        // Put potentially inferred value into tableProps as it's used by the createTable call below
        // to determine which coprocessors to install on the new table.
        tableProps.put(PhoenixDatabaseMetaData.TRANSACTIONAL, transactional);
        if (transactional) {
            // If TTL set, use Tephra TTL property name instead
            Object ttl = commonFamilyProps.remove(HColumnDescriptor.TTL);
            if (ttl != null) {
                commonFamilyProps.put(PhoenixTransactionContext.PROPERTY_TTL, ttl);
            }
        }
        Boolean useStatsForParallelizationProp = (Boolean) TableProperty.USE_STATS_FOR_PARALLELIZATION.getValue(tableProps);
        boolean sharedTable = statement.getTableType() == PTableType.VIEW || allocateIndexId;
        if (transactional) {
            // maintenance code being able to see the prior state to update the rows correctly.
            if (Boolean.FALSE.equals(storeNullsProp)) {
                throw new SQLExceptionInfo.Builder(SQLExceptionCode.STORE_NULLS_MUST_BE_TRUE_FOR_TRANSACTIONAL).setSchemaName(schemaName).setTableName(tableName).build().buildException();
            }
            // Force STORE_NULLS to true when transactional as Tephra cannot deal with column deletes
            storeNulls = true;
            tableProps.put(PhoenixDatabaseMetaData.STORE_NULLS, Boolean.TRUE);
            if (!sharedTable) {
                Integer maxVersionsProp = (Integer) commonFamilyProps.get(HConstants.VERSIONS);
                if (maxVersionsProp == null) {
                    if (parent != null) {
                        HTableDescriptor desc = connection.getQueryServices().getTableDescriptor(parent.getPhysicalName().getBytes());
                        if (desc != null) {
                            maxVersionsProp = desc.getFamily(SchemaUtil.getEmptyColumnFamily(parent)).getMaxVersions();
                        }
                    }
                    if (maxVersionsProp == null) {
                        maxVersionsProp = connection.getQueryServices().getProps().getInt(QueryServices.MAX_VERSIONS_TRANSACTIONAL_ATTRIB, QueryServicesOptions.DEFAULT_MAX_VERSIONS_TRANSACTIONAL);
                    }
                    commonFamilyProps.put(HConstants.VERSIONS, maxVersionsProp);
                }
            }
        }
        timestamp = timestamp == null ? TransactionUtil.getTableTimestamp(connection, transactional) : timestamp;
        // Delay this check as it is supported to have IMMUTABLE_ROWS and SALT_BUCKETS defined on views
        if (sharedTable) {
            if (tableProps.get(PhoenixDatabaseMetaData.DEFAULT_COLUMN_FAMILY_NAME) != null) {
                throw new SQLExceptionInfo.Builder(SQLExceptionCode.DEFAULT_COLUMN_FAMILY_ON_SHARED_TABLE).setSchemaName(schemaName).setTableName(tableName).build().buildException();
            }
            if (SchemaUtil.hasHTableDescriptorProps(tableProps)) {
                throw new SQLExceptionInfo.Builder(SQLExceptionCode.VIEW_WITH_PROPERTIES).build().buildException();
            }
        }
        List<ColumnDef> colDefs = statement.getColumnDefs();
        LinkedHashMap<PColumn, PColumn> columns;
        LinkedHashSet<PColumn> pkColumns;
        if (tenantId != null && !sharedTable) {
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_CREATE_TENANT_SPECIFIC_TABLE).setSchemaName(schemaName).setTableName(tableName).build().buildException();
        }
        if (autoPartitionSeq != null) {
            int autoPartitionColIndex = multiTenant ? 1 : 0;
            PDataType dataType = colDefs.get(autoPartitionColIndex).getDataType();
            if (!PLong.INSTANCE.isCastableTo(dataType)) {
                throw new SQLExceptionInfo.Builder(SQLExceptionCode.SEQUENCE_NOT_CASTABLE_TO_AUTO_PARTITION_ID_COLUMN).setSchemaName(schemaName).setTableName(tableName).build().buildException();
            }
        }
        if (tableType == PTableType.VIEW) {
            physicalNames = Collections.singletonList(PNameFactory.newName(parent.getPhysicalName().getString()));
            if (viewType == ViewType.MAPPED) {
                columns = Maps.newLinkedHashMap();
                pkColumns = newLinkedHashSetWithExpectedSize(colDefs.size());
            } else {
                // Propagate property values to VIEW.
                // TODO: formalize the known set of these properties
                // Manually transfer the ROW_KEY_ORDER_OPTIMIZABLE_BYTES from parent as we don't
                // want to add this hacky flag to the schema (see PHOENIX-2067).
                rowKeyOrderOptimizable = parent.rowKeyOrderOptimizable();
                if (rowKeyOrderOptimizable) {
                    UpgradeUtil.addRowKeyOrderOptimizableCell(tableMetaData, SchemaUtil.getTableKey(tenantIdStr, schemaName, tableName), clientTimeStamp);
                }
                multiTenant = parent.isMultiTenant();
                saltBucketNum = parent.getBucketNum();
                isAppendOnlySchema = parent.isAppendOnlySchema();
                isImmutableRows = parent.isImmutableRows();
                if (updateCacheFrequencyProp == null) {
                    // set to the parent value if the property is not set on the view
                    updateCacheFrequency = parent.getUpdateCacheFrequency();
                }
                disableWAL = (disableWALProp == null ? parent.isWALDisabled() : disableWALProp);
                defaultFamilyName = parent.getDefaultFamilyName() == null ? null : parent.getDefaultFamilyName().getString();
                List<PColumn> allColumns = parent.getColumns();
                if (saltBucketNum != null) {
                    // Don't include salt column in columns, as it should not have it when created
                    allColumns = allColumns.subList(1, allColumns.size());
                }
                columns = new LinkedHashMap<PColumn, PColumn>(allColumns.size() + colDefs.size());
                for (PColumn column : allColumns) {
                    columns.put(column, column);
                }
                pkColumns = newLinkedHashSet(parent.getPKColumns());
                // Add row linking view to its parent
                PreparedStatement linkStatement = connection.prepareStatement(CREATE_VIEW_LINK);
                linkStatement.setString(1, tenantIdStr);
                linkStatement.setString(2, schemaName);
                linkStatement.setString(3, tableName);
                linkStatement.setString(4, parent.getName().getString());
                linkStatement.setByte(5, LinkType.PARENT_TABLE.getSerializedValue());
                linkStatement.setString(6, parent.getTenantId() == null ? null : parent.getTenantId().getString());
                linkStatement.execute();
                // Add row linking parent to view
                linkStatement = connection.prepareStatement(CREATE_CHILD_LINK);
                linkStatement.setString(1, parent.getTenantId() == null ? null : parent.getTenantId().getString());
                linkStatement.setString(2, parent.getSchemaName() == null ? null : parent.getSchemaName().getString());
                linkStatement.setString(3, parent.getTableName().getString());
                linkStatement.setString(4, tenantIdStr);
                linkStatement.setString(5, SchemaUtil.getTableName(schemaName, tableName));
                linkStatement.setByte(6, LinkType.CHILD_TABLE.getSerializedValue());
                linkStatement.execute();
            }
        } else {
            columns = new LinkedHashMap<PColumn, PColumn>(colDefs.size());
            // in case salted
            pkColumns = newLinkedHashSetWithExpectedSize(colDefs.size() + 1);
        }
        // fail because it looks like there's always a view associated with it.
        if (!physicalNames.isEmpty()) {
            // Otherwise, we end up with a self-referencing link and then cannot ever drop the view.
            if (viewType != ViewType.MAPPED || (!physicalNames.get(0).getString().equals(SchemaUtil.getTableName(schemaName, tableName)) && !physicalNames.get(0).getString().equals(SchemaUtil.getPhysicalHBaseTableName(schemaName, tableName, isNamespaceMapped).getString()))) {
                // Add row linking from data table row to physical table row
                PreparedStatement linkStatement = connection.prepareStatement(CREATE_LINK);
                for (PName physicalName : physicalNames) {
                    linkStatement.setString(1, tenantIdStr);
                    linkStatement.setString(2, schemaName);
                    linkStatement.setString(3, tableName);
                    linkStatement.setString(4, physicalName.getString());
                    linkStatement.setByte(5, LinkType.PHYSICAL_TABLE.getSerializedValue());
                    if (tableType == PTableType.VIEW) {
                        PTable physicalTable = connection.getTable(new PTableKey(null, physicalName.getString().replace(QueryConstants.NAMESPACE_SEPARATOR, QueryConstants.NAME_SEPARATOR)));
                        linkStatement.setLong(6, physicalTable.getSequenceNumber());
                        linkStatement.setString(7, null);
                    } else {
                        linkStatement.setLong(6, parent.getSequenceNumber());
                        linkStatement.setString(7, PTableType.INDEX.getSerializedValue());
                    }
                    linkStatement.execute();
                }
                tableMetaData.addAll(connection.getMutationState().toMutations(timestamp).next().getSecond());
                connection.rollback();
            }
        }
        Map<String, PName> familyNames = Maps.newLinkedHashMap();
        boolean rowTimeStampColumnAlreadyFound = false;
        int positionOffset = columns.size();
        if (saltBucketNum != null) {
            positionOffset++;
            if (addSaltColumn) {
                pkColumns.add(SaltingUtil.SALTING_COLUMN);
            }
        }
        int pkPositionOffset = pkColumns.size();
        int position = positionOffset;
        EncodedCQCounter cqCounter = NULL_COUNTER;
        PTable viewPhysicalTable = null;
        if (tableType == PTableType.VIEW) {
            /*
                 * We can't control what column qualifiers are used in HTable mapped to Phoenix views. So we are not
                 * able to encode column names.
                 */
            if (viewType != MAPPED) {
                /*
                     * For regular phoenix views, use the storage scheme of the physical table since they all share the
                     * the same HTable. Views always use the base table's column qualifier counter for doling out
                     * encoded column qualifier.
                     */
                viewPhysicalTable = PhoenixRuntime.getTable(connection, physicalNames.get(0).getString());
                immutableStorageScheme = viewPhysicalTable.getImmutableStorageScheme();
                encodingScheme = viewPhysicalTable.getEncodingScheme();
                if (EncodedColumnsUtil.usesEncodedColumnNames(viewPhysicalTable)) {
                    cqCounter = viewPhysicalTable.getEncodedCQCounter();
                }
            }
        } else // System tables have hard-coded column qualifiers. So we can't use column encoding for them.
        if (!SchemaUtil.isSystemTable(Bytes.toBytes(SchemaUtil.getTableName(schemaName, tableName)))) {
            /*
                 * Indexes inherit the storage scheme of the parent data tables. Otherwise, we always attempt to 
                 * create tables with encoded column names. 
                 * 
                 * Also of note is the case with shared indexes i.e. local indexes and view indexes. In these cases, 
                 * column qualifiers for covered columns don't have to be unique because rows of the logical indexes are 
                 * partitioned by the virtue of indexId present in the row key. As such, different shared indexes can use
                 * potentially overlapping column qualifiers.
                 * 
                 */
            if (parent != null) {
                encodingScheme = parent.getEncodingScheme();
                immutableStorageScheme = parent.getImmutableStorageScheme();
            } else {
                Byte encodingSchemeSerializedByte = (Byte) TableProperty.COLUMN_ENCODED_BYTES.getValue(tableProps);
                if (encodingSchemeSerializedByte == null) {
                    encodingSchemeSerializedByte = (byte) connection.getQueryServices().getProps().getInt(QueryServices.DEFAULT_COLUMN_ENCODED_BYTES_ATRRIB, QueryServicesOptions.DEFAULT_COLUMN_ENCODED_BYTES);
                }
                encodingScheme = QualifierEncodingScheme.fromSerializedValue(encodingSchemeSerializedByte);
                if (isImmutableRows) {
                    immutableStorageScheme = (ImmutableStorageScheme) TableProperty.IMMUTABLE_STORAGE_SCHEME.getValue(tableProps);
                    if (immutableStorageScheme == null) {
                        if (multiTenant) {
                            immutableStorageScheme = ImmutableStorageScheme.valueOf(connection.getQueryServices().getProps().get(QueryServices.DEFAULT_MULTITENANT_IMMUTABLE_STORAGE_SCHEME_ATTRIB, QueryServicesOptions.DEFAULT_MULTITENANT_IMMUTABLE_STORAGE_SCHEME));
                        } else {
                            immutableStorageScheme = ImmutableStorageScheme.valueOf(connection.getQueryServices().getProps().get(QueryServices.DEFAULT_IMMUTABLE_STORAGE_SCHEME_ATTRIB, QueryServicesOptions.DEFAULT_IMMUTABLE_STORAGE_SCHEME));
                        }
                    }
                    if (immutableStorageScheme != ONE_CELL_PER_COLUMN && encodingScheme == NON_ENCODED_QUALIFIERS) {
                        throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_IMMUTABLE_STORAGE_SCHEME_AND_COLUMN_QUALIFIER_BYTES).setSchemaName(schemaName).setTableName(tableName).build().buildException();
                    }
                }
            }
            cqCounter = encodingScheme != NON_ENCODED_QUALIFIERS ? new EncodedCQCounter() : NULL_COUNTER;
        }
        Map<String, Integer> changedCqCounters = new HashMap<>(colDefs.size());
        boolean wasPKDefined = false;
        for (ColumnDef colDef : colDefs) {
            rowTimeStampColumnAlreadyFound = checkAndValidateRowTimestampCol(colDef, pkConstraint, rowTimeStampColumnAlreadyFound, tableType);
            if (colDef.isPK()) {
                // i.e. the column is declared as CREATE TABLE COLNAME DATATYPE PRIMARY KEY...
                if (wasPKDefined) {
                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.PRIMARY_KEY_ALREADY_EXISTS).setColumnName(colDef.getColumnDefName().getColumnName()).build().buildException();
                }
                wasPKDefined = true;
            } else {
                // do not allow setting NOT-NULL constraint on non-primary columns.
                if (!colDef.isNull() && !isImmutableRows && (wasPKDefined || !isPkColumn(pkConstraint, colDef))) {
                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.KEY_VALUE_NOT_NULL).setSchemaName(schemaName).setTableName(tableName).setColumnName(colDef.getColumnDefName().getColumnName()).build().buildException();
                }
            }
            ColumnName columnDefName = colDef.getColumnDefName();
            String colDefFamily = columnDefName.getFamilyName();
            boolean isPkColumn = isPkColumn(pkConstraint, colDef);
            String cqCounterFamily = null;
            if (!isPkColumn) {
                if (immutableStorageScheme == SINGLE_CELL_ARRAY_WITH_OFFSETS && encodingScheme != NON_ENCODED_QUALIFIERS) {
                    // For this scheme we track column qualifier counters at the column family level.
                    cqCounterFamily = colDefFamily != null ? colDefFamily : (defaultFamilyName != null ? defaultFamilyName : DEFAULT_COLUMN_FAMILY);
                } else {
                    // For other schemes, column qualifier counters are tracked using the default column family.
                    cqCounterFamily = defaultFamilyName != null ? defaultFamilyName : DEFAULT_COLUMN_FAMILY;
                }
            }
            Integer encodedCQ = isPkColumn ? null : cqCounter.getNextQualifier(cqCounterFamily);
            byte[] columnQualifierBytes = null;
            try {
                columnQualifierBytes = EncodedColumnsUtil.getColumnQualifierBytes(columnDefName.getColumnName(), encodedCQ, encodingScheme, isPkColumn);
            } catch (QualifierOutOfRangeException e) {
                throw new SQLExceptionInfo.Builder(SQLExceptionCode.MAX_COLUMNS_EXCEEDED).setSchemaName(schemaName).setTableName(tableName).build().buildException();
            }
            PColumn column = newColumn(position++, colDef, pkConstraint, defaultFamilyName, false, columnQualifierBytes, isImmutableRows);
            if (cqCounter.increment(cqCounterFamily)) {
                changedCqCounters.put(cqCounterFamily, cqCounter.getNextQualifier(cqCounterFamily));
            }
            if (SchemaUtil.isPKColumn(column)) {
                // TODO: remove this constraint?
                if (pkColumnsIterator.hasNext() && !column.getName().getString().equals(pkColumnsIterator.next().getFirst().getColumnName())) {
                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.PRIMARY_KEY_OUT_OF_ORDER).setSchemaName(schemaName).setTableName(tableName).setColumnName(column.getName().getString()).build().buildException();
                }
                if (tableType == PTableType.VIEW && viewType != ViewType.MAPPED) {
                    throwIfLastPKOfParentIsFixedLength(parent, schemaName, tableName, colDef);
                }
                if (!pkColumns.add(column)) {
                    throw new ColumnAlreadyExistsException(schemaName, tableName, column.getName().getString());
                }
            }
            if (columns.put(column, column) != null) {
                throw new ColumnAlreadyExistsException(schemaName, tableName, column.getName().getString());
            }
            if ((colDef.getDataType() == PVarbinary.INSTANCE || colDef.getDataType().isArrayType()) && SchemaUtil.isPKColumn(column) && pkColumnsIterator.hasNext()) {
                throw new SQLExceptionInfo.Builder(SQLExceptionCode.VARBINARY_IN_ROW_KEY).setSchemaName(schemaName).setTableName(tableName).setColumnName(column.getName().getString()).build().buildException();
            }
            if (column.getFamilyName() != null) {
                familyNames.put(IndexUtil.getActualColumnFamilyName(column.getFamilyName().getString()), column.getFamilyName());
            }
        }
        // We need a PK definition for a TABLE or mapped VIEW
        if (!wasPKDefined && pkColumnsNames.isEmpty() && tableType != PTableType.VIEW && viewType != ViewType.MAPPED) {
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.PRIMARY_KEY_MISSING).setSchemaName(schemaName).setTableName(tableName).build().buildException();
        }
        if (!pkColumnsNames.isEmpty() && pkColumnsNames.size() != pkColumns.size() - pkPositionOffset) {
            // Then a column name in the primary key constraint wasn't resolved
            Iterator<Pair<ColumnName, SortOrder>> pkColumnNamesIterator = pkColumnsNames.iterator();
            while (pkColumnNamesIterator.hasNext()) {
                ColumnName colName = pkColumnNamesIterator.next().getFirst();
                ColumnDef colDef = findColumnDefOrNull(colDefs, colName);
                if (colDef == null) {
                    throw new ColumnNotFoundException(schemaName, tableName, null, colName.getColumnName());
                }
                if (colDef.getColumnDefName().getFamilyName() != null) {
                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.PRIMARY_KEY_WITH_FAMILY_NAME).setSchemaName(schemaName).setTableName(tableName).setColumnName(colDef.getColumnDefName().getColumnName()).setFamilyName(colDef.getColumnDefName().getFamilyName()).build().buildException();
                }
            }
            // The above should actually find the specific one, but just in case...
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_PRIMARY_KEY_CONSTRAINT).setSchemaName(schemaName).setTableName(tableName).build().buildException();
        }
        List<Pair<byte[], Map<String, Object>>> familyPropList = Lists.newArrayListWithExpectedSize(familyNames.size());
        if (!statement.getProps().isEmpty()) {
            for (String familyName : statement.getProps().keySet()) {
                if (!familyName.equals(QueryConstants.ALL_FAMILY_PROPERTIES_KEY)) {
                    if (familyNames.get(familyName) == null) {
                        throw new SQLExceptionInfo.Builder(SQLExceptionCode.PROPERTIES_FOR_FAMILY).setFamilyName(familyName).build().buildException();
                    } else if (statement.getTableType() == PTableType.VIEW) {
                        throw new SQLExceptionInfo.Builder(SQLExceptionCode.VIEW_WITH_PROPERTIES).build().buildException();
                    }
                }
            }
        }
        throwIfInsufficientColumns(schemaName, tableName, pkColumns, saltBucketNum != null, multiTenant);
        for (PName familyName : familyNames.values()) {
            String fam = familyName.getString();
            Collection<Pair<String, Object>> props = statement.getProps().get(IndexUtil.getActualColumnFamilyName(fam));
            if (props.isEmpty()) {
                familyPropList.add(new Pair<byte[], Map<String, Object>>(familyName.getBytes(), commonFamilyProps));
            } else {
                Map<String, Object> combinedFamilyProps = Maps.newHashMapWithExpectedSize(props.size() + commonFamilyProps.size());
                combinedFamilyProps.putAll(commonFamilyProps);
                for (Pair<String, Object> prop : props) {
                    // i.e. it can't be column family specific.
                    if (!familyName.equals(QueryConstants.ALL_FAMILY_PROPERTIES_KEY) && prop.getFirst().equals(TTL)) {
                        throw new SQLExceptionInfo.Builder(SQLExceptionCode.COLUMN_FAMILY_NOT_ALLOWED_FOR_TTL).build().buildException();
                    }
                    combinedFamilyProps.put(prop.getFirst(), prop.getSecond());
                }
                familyPropList.add(new Pair<byte[], Map<String, Object>>(familyName.getBytes(), combinedFamilyProps));
            }
        }
        if (familyNames.isEmpty()) {
            // if there are no family names, use the default column family name. This also takes care of the case when
            // the table ddl has only PK cols present (which means familyNames is empty).
            byte[] cf = defaultFamilyName == null ? (!isLocalIndex ? QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES : QueryConstants.DEFAULT_LOCAL_INDEX_COLUMN_FAMILY_BYTES) : Bytes.toBytes(defaultFamilyName);
            familyPropList.add(new Pair<byte[], Map<String, Object>>(cf, commonFamilyProps));
        }
        // Bootstrapping for our SYSTEM.TABLE that creates itself before it exists
        if (SchemaUtil.isMetaTable(schemaName, tableName)) {
            // TODO: what about stats for system catalog?
            PName newSchemaName = PNameFactory.newName(schemaName);
            // Column names and qualifiers and hardcoded for system tables.
            PTable table = PTableImpl.makePTable(tenantId, newSchemaName, PNameFactory.newName(tableName), tableType, null, MetaDataProtocol.MIN_TABLE_TIMESTAMP, PTable.INITIAL_SEQ_NUM, PNameFactory.newName(QueryConstants.SYSTEM_TABLE_PK_NAME), null, columns.values(), null, null, Collections.<PTable>emptyList(), isImmutableRows, Collections.<PName>emptyList(), defaultFamilyName == null ? null : PNameFactory.newName(defaultFamilyName), null, Boolean.TRUE.equals(disableWAL), false, false, null, null, indexType, true, false, 0, 0L, isNamespaceMapped, autoPartitionSeq, isAppendOnlySchema, ONE_CELL_PER_COLUMN, NON_ENCODED_QUALIFIERS, PTable.EncodedCQCounter.NULL_COUNTER, true);
            connection.addTable(table, MetaDataProtocol.MIN_TABLE_TIMESTAMP);
        }
        // Update column qualifier counters
        if (EncodedColumnsUtil.usesEncodedColumnNames(encodingScheme)) {
            // Store the encoded column counter for phoenix entities that have their own hbase
            // tables i.e. base tables and indexes.
            String schemaNameToUse = tableType == VIEW ? viewPhysicalTable.getSchemaName().getString() : schemaName;
            String tableNameToUse = tableType == VIEW ? viewPhysicalTable.getTableName().getString() : tableName;
            boolean sharedIndex = tableType == PTableType.INDEX && (indexType == IndexType.LOCAL || parent.getType() == PTableType.VIEW);
            // For local indexes and indexes on views, pass on the the tenant id since all their meta-data rows have
            // tenant ids in there.
            String tenantIdToUse = connection.getTenantId() != null && sharedIndex ? connection.getTenantId().getString() : null;
            // too since we want clients to get the latest PTable of the base table.
            for (Entry<String, Integer> entry : changedCqCounters.entrySet()) {
                try (PreparedStatement linkStatement = connection.prepareStatement(UPDATE_ENCODED_COLUMN_COUNTER)) {
                    linkStatement.setString(1, tenantIdToUse);
                    linkStatement.setString(2, schemaNameToUse);
                    linkStatement.setString(3, tableNameToUse);
                    linkStatement.setString(4, entry.getKey());
                    linkStatement.setInt(5, entry.getValue());
                    linkStatement.execute();
                }
            }
            if (tableType == VIEW && !changedCqCounters.isEmpty()) {
                PreparedStatement incrementStatement = connection.prepareStatement(INCREMENT_SEQ_NUM);
                incrementStatement.setString(1, null);
                incrementStatement.setString(2, viewPhysicalTable.getSchemaName().getString());
                incrementStatement.setString(3, viewPhysicalTable.getTableName().getString());
                incrementStatement.setLong(4, viewPhysicalTable.getSequenceNumber() + 1);
                incrementStatement.execute();
            }
            if (connection.getMutationState().toMutations(timestamp).hasNext()) {
                tableMetaData.addAll(connection.getMutationState().toMutations(timestamp).next().getSecond());
                connection.rollback();
            }
        }
        short nextKeySeq = 0;
        List<Mutation> columnMetadata = Lists.newArrayListWithExpectedSize(columns.size());
        try (PreparedStatement colUpsert = connection.prepareStatement(INSERT_COLUMN_CREATE_TABLE)) {
            for (Map.Entry<PColumn, PColumn> entry : columns.entrySet()) {
                PColumn column = entry.getValue();
                final int columnPosition = column.getPosition();
                // set the autoPartition column attributes
                if (parent != null && parent.getAutoPartitionSeqName() != null && parent.getPKColumns().get(MetaDataUtil.getAutoPartitionColIndex(parent)).equals(column)) {
                    entry.setValue(column = new DelegateColumn(column) {

                        @Override
                        public byte[] getViewConstant() {
                            // will be set correctly on the server
                            return QueryConstants.EMPTY_COLUMN_VALUE_BYTES;
                        }

                        @Override
                        public boolean isViewReferenced() {
                            return true;
                        }
                    });
                } else if (isViewColumnReferenced != null) {
                    if (viewColumnConstants != null && columnPosition < viewColumnConstants.length) {
                        entry.setValue(column = new DelegateColumn(column) {

                            @Override
                            public byte[] getViewConstant() {
                                return viewColumnConstants[columnPosition];
                            }

                            @Override
                            public boolean isViewReferenced() {
                                return isViewColumnReferenced.get(columnPosition);
                            }
                        });
                    } else {
                        entry.setValue(column = new DelegateColumn(column) {

                            @Override
                            public boolean isViewReferenced() {
                                return isViewColumnReferenced.get(columnPosition);
                            }
                        });
                    }
                }
                Short keySeq = SchemaUtil.isPKColumn(column) ? ++nextKeySeq : null;
                addColumnMutation(schemaName, tableName, column, colUpsert, parentTableName, pkName, keySeq, saltBucketNum != null);
                columnMetadata.addAll(connection.getMutationState().toMutations(timestamp).next().getSecond());
                connection.rollback();
            }
        }
        // add the columns in reverse order since we reverse the list later
        Collections.reverse(columnMetadata);
        tableMetaData.addAll(columnMetadata);
        String dataTableName = parent == null || tableType == PTableType.VIEW ? null : parent.getTableName().getString();
        PIndexState indexState = parent == null || tableType == PTableType.VIEW ? null : PIndexState.BUILDING;
        PreparedStatement tableUpsert = connection.prepareStatement(CREATE_TABLE);
        tableUpsert.setString(1, tenantIdStr);
        tableUpsert.setString(2, schemaName);
        tableUpsert.setString(3, tableName);
        tableUpsert.setString(4, tableType.getSerializedValue());
        tableUpsert.setLong(5, PTable.INITIAL_SEQ_NUM);
        tableUpsert.setInt(6, position);
        if (saltBucketNum != null) {
            tableUpsert.setInt(7, saltBucketNum);
        } else {
            tableUpsert.setNull(7, Types.INTEGER);
        }
        tableUpsert.setString(8, pkName);
        tableUpsert.setString(9, dataTableName);
        tableUpsert.setString(10, indexState == null ? null : indexState.getSerializedValue());
        tableUpsert.setBoolean(11, isImmutableRows);
        tableUpsert.setString(12, defaultFamilyName);
        if (parent != null && parent.getAutoPartitionSeqName() != null && viewStatement == null) {
            // set to non-null value so that we will generate a Put that
            // will be set correctly on the server
            tableUpsert.setString(13, QueryConstants.EMPTY_COLUMN_VALUE);
        } else {
            tableUpsert.setString(13, viewStatement);
        }
        tableUpsert.setBoolean(14, disableWAL);
        tableUpsert.setBoolean(15, multiTenant);
        if (viewType == null) {
            tableUpsert.setNull(16, Types.TINYINT);
        } else {
            tableUpsert.setByte(16, viewType.getSerializedValue());
        }
        if (indexType == null) {
            tableUpsert.setNull(17, Types.TINYINT);
        } else {
            tableUpsert.setByte(17, indexType.getSerializedValue());
        }
        tableUpsert.setBoolean(18, storeNulls);
        if (parent != null && tableType == PTableType.VIEW) {
            tableUpsert.setInt(19, parent.getColumns().size());
        } else {
            tableUpsert.setInt(19, BASE_TABLE_BASE_COLUMN_COUNT);
        }
        tableUpsert.setBoolean(20, transactional);
        tableUpsert.setLong(21, updateCacheFrequency);
        tableUpsert.setBoolean(22, isNamespaceMapped);
        if (autoPartitionSeq == null) {
            tableUpsert.setNull(23, Types.VARCHAR);
        } else {
            tableUpsert.setString(23, autoPartitionSeq);
        }
        tableUpsert.setBoolean(24, isAppendOnlySchema);
        if (guidePostsWidth == null) {
            tableUpsert.setNull(25, Types.BIGINT);
        } else {
            tableUpsert.setLong(25, guidePostsWidth);
        }
        tableUpsert.setByte(26, immutableStorageScheme.getSerializedMetadataValue());
        tableUpsert.setByte(27, encodingScheme.getSerializedMetadataValue());
        if (useStatsForParallelizationProp == null) {
            tableUpsert.setNull(28, Types.BOOLEAN);
        } else {
            tableUpsert.setBoolean(28, useStatsForParallelizationProp);
        }
        tableUpsert.execute();
        if (asyncCreatedDate != null) {
            PreparedStatement setAsync = connection.prepareStatement(SET_ASYNC_CREATED_DATE);
            setAsync.setString(1, tenantIdStr);
            setAsync.setString(2, schemaName);
            setAsync.setString(3, tableName);
            setAsync.setDate(4, asyncCreatedDate);
            setAsync.execute();
        }
        tableMetaData.addAll(connection.getMutationState().toMutations(timestamp).next().getSecond());
        connection.rollback();
        /*
             * The table metadata must be in the following order:
             * 1) table header row
             * 2) ordered column rows
             * 3) parent table header row
             */
        Collections.reverse(tableMetaData);
        if (indexType != IndexType.LOCAL) {
            splits = SchemaUtil.processSplits(splits, pkColumns, saltBucketNum, connection.getQueryServices().getProps().getBoolean(QueryServices.FORCE_ROW_KEY_ORDER_ATTRIB, QueryServicesOptions.DEFAULT_FORCE_ROW_KEY_ORDER));
        }
        MetaDataMutationResult result = connection.getQueryServices().createTable(tableMetaData, viewType == ViewType.MAPPED || allocateIndexId ? physicalNames.get(0).getBytes() : null, tableType, tableProps, familyPropList, splits, isNamespaceMapped, allocateIndexId);
        MutationCode code = result.getMutationCode();
        switch(code) {
            case TABLE_ALREADY_EXISTS:
                if (result.getTable() != null) {
                    // Can happen for transactional table that already exists as HBase table
                    addTableToCache(result);
                }
                if (!statement.ifNotExists()) {
                    throw new TableAlreadyExistsException(schemaName, tableName, result.getTable());
                }
                return null;
            case PARENT_TABLE_NOT_FOUND:
                throw new TableNotFoundException(schemaName, parent.getName().getString());
            case NEWER_TABLE_FOUND:
                // it to this connection as we can't see it.
                if (!statement.ifNotExists()) {
                    throw new NewerTableAlreadyExistsException(schemaName, tableName, result.getTable());
                }
            case UNALLOWED_TABLE_MUTATION:
                throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_MUTATE_TABLE).setSchemaName(schemaName).setTableName(tableName).build().buildException();
            case CONCURRENT_TABLE_MUTATION:
                addTableToCache(result);
                throw new ConcurrentTableMutationException(schemaName, tableName);
            case AUTO_PARTITION_SEQUENCE_NOT_FOUND:
                throw new SQLExceptionInfo.Builder(SQLExceptionCode.AUTO_PARTITION_SEQUENCE_UNDEFINED).setSchemaName(schemaName).setTableName(tableName).build().buildException();
            case CANNOT_COERCE_AUTO_PARTITION_ID:
                throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_COERCE_AUTO_PARTITION_ID).setSchemaName(schemaName).setTableName(tableName).build().buildException();
            case TOO_MANY_INDEXES:
                throw new SQLExceptionInfo.Builder(SQLExceptionCode.TOO_MANY_INDEXES).setSchemaName(SchemaUtil.getSchemaNameFromFullName(parent.getPhysicalName().getString())).setTableName(SchemaUtil.getTableNameFromFullName(parent.getPhysicalName().getString())).build().buildException();
            default:
                // set the view statement and relevant partition column attributes correctly
                if (parent != null && parent.getAutoPartitionSeqName() != null) {
                    final PColumn autoPartitionCol = parent.getPKColumns().get(MetaDataUtil.getAutoPartitionColIndex(parent));
                    final Long autoPartitionNum = Long.valueOf(result.getAutoPartitionNum());
                    columns.put(autoPartitionCol, new DelegateColumn(autoPartitionCol) {

                        @Override
                        public byte[] getViewConstant() {
                            PDataType dataType = autoPartitionCol.getDataType();
                            Object val = dataType.toObject(autoPartitionNum, PLong.INSTANCE);
                            byte[] bytes = new byte[dataType.getByteSize() + 1];
                            dataType.toBytes(val, bytes, 0);
                            return bytes;
                        }

                        @Override
                        public boolean isViewReferenced() {
                            return true;
                        }
                    });
                    String viewPartitionClause = QueryUtil.getViewPartitionClause(MetaDataUtil.getAutoPartitionColumnName(parent), autoPartitionNum);
                    if (viewStatement != null) {
                        viewStatement = viewStatement + " AND " + viewPartitionClause;
                    } else {
                        viewStatement = QueryUtil.getViewStatement(parent.getSchemaName().getString(), parent.getTableName().getString(), viewPartitionClause);
                    }
                }
                PName newSchemaName = PNameFactory.newName(schemaName);
                /*
                 * It doesn't hurt for the PTable of views to have the cqCounter. However, views always rely on the
                 * parent table's counter to dole out encoded column qualifiers. So setting the counter as NULL_COUNTER
                 * for extra safety.
                 */
                EncodedCQCounter cqCounterToBe = tableType == PTableType.VIEW ? NULL_COUNTER : cqCounter;
                PTable table = PTableImpl.makePTable(tenantId, newSchemaName, PNameFactory.newName(tableName), tableType, indexState, timestamp != null ? timestamp : result.getMutationTime(), PTable.INITIAL_SEQ_NUM, pkName == null ? null : PNameFactory.newName(pkName), saltBucketNum, columns.values(), parent == null ? null : parent.getSchemaName(), parent == null ? null : parent.getTableName(), Collections.<PTable>emptyList(), isImmutableRows, physicalNames, defaultFamilyName == null ? null : PNameFactory.newName(defaultFamilyName), viewStatement, Boolean.TRUE.equals(disableWAL), multiTenant, storeNulls, viewType, result.getViewIndexId(), indexType, rowKeyOrderOptimizable, transactional, updateCacheFrequency, 0L, isNamespaceMapped, autoPartitionSeq, isAppendOnlySchema, immutableStorageScheme, encodingScheme, cqCounterToBe, useStatsForParallelizationProp);
                result = new MetaDataMutationResult(code, result.getMutationTime(), table, true);
                addTableToCache(result);
                return table;
        }
    } finally {
        connection.setAutoCommit(wasAutoCommit);
    }
}
Also used : LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) PDataType(org.apache.phoenix.schema.types.PDataType) SQLExceptionInfo(org.apache.phoenix.exception.SQLExceptionInfo) ColumnDef(org.apache.phoenix.parse.ColumnDef) PrimaryKeyConstraint(org.apache.phoenix.parse.PrimaryKeyConstraint) TableName(org.apache.phoenix.parse.TableName) PUnsignedLong(org.apache.phoenix.schema.types.PUnsignedLong) PLong(org.apache.phoenix.schema.types.PLong) Mutation(org.apache.hadoop.hbase.client.Mutation) Map(java.util.Map) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) QualifierOutOfRangeException(org.apache.phoenix.schema.PTable.QualifierEncodingScheme.QualifierOutOfRangeException) QualifierEncodingScheme(org.apache.phoenix.schema.PTable.QualifierEncodingScheme) ImmutableStorageScheme(org.apache.phoenix.schema.PTable.ImmutableStorageScheme) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult) Pair(org.apache.hadoop.hbase.util.Pair) PreparedStatement(java.sql.PreparedStatement) IndexKeyConstraint(org.apache.phoenix.parse.IndexKeyConstraint) PrimaryKeyConstraint(org.apache.phoenix.parse.PrimaryKeyConstraint) ColumnDefInPkConstraint(org.apache.phoenix.parse.ColumnDefInPkConstraint) MutationCode(org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) PInteger(org.apache.phoenix.schema.types.PInteger) ColumnName(org.apache.phoenix.parse.ColumnName) EncodedCQCounter(org.apache.phoenix.schema.PTable.EncodedCQCounter)

Example 5 with ColumnName

use of org.apache.phoenix.parse.ColumnName in project phoenix by apache.

the class MetaDataClient method createIndex.

/**
 * Create an index table by morphing the CreateIndexStatement into a CreateTableStatement and calling
 * MetaDataClient.createTable. In doing so, we perform the following translations:
 * 1) Change the type of any columns being indexed to types that support null if the column is nullable.
 *    For example, a BIGINT type would be coerced to a DECIMAL type, since a DECIMAL type supports null
 *    when it's in the row key while a BIGINT does not.
 * 2) Append any row key column from the data table that is not in the indexed column list. Our indexes
 *    rely on having a 1:1 correspondence between the index and data rows.
 * 3) Change the name of the columns to include the column family. For example, if you have a column
 *    named "B" in a column family named "A", the indexed column name will be "A:B". This makes it easy
 *    to translate the column references in a query to the correct column references in an index table
 *    regardless of whether the column reference is prefixed with the column family name or not. It also
 *    has the side benefit of allowing the same named column in different column families to both be
 *    listed as an index column.
 * @param statement
 * @param splits
 * @return MutationState from population of index table from data table
 * @throws SQLException
 */
public MutationState createIndex(CreateIndexStatement statement, byte[][] splits) throws SQLException {
    IndexKeyConstraint ik = statement.getIndexConstraint();
    TableName indexTableName = statement.getIndexTableName();
    Map<String, Object> tableProps = Maps.newHashMapWithExpectedSize(statement.getProps().size());
    Map<String, Object> commonFamilyProps = Maps.newHashMapWithExpectedSize(statement.getProps().size() + 1);
    populatePropertyMaps(statement.getProps(), tableProps, commonFamilyProps);
    List<Pair<ParseNode, SortOrder>> indexParseNodeAndSortOrderList = ik.getParseNodeAndSortOrderList();
    List<ColumnName> includedColumns = statement.getIncludeColumns();
    TableRef tableRef = null;
    PTable table = null;
    int numRetries = 0;
    boolean allocateIndexId = false;
    boolean isLocalIndex = statement.getIndexType() == IndexType.LOCAL;
    int hbaseVersion = connection.getQueryServices().getLowestClusterHBaseVersion();
    if (isLocalIndex) {
        if (!connection.getQueryServices().getProps().getBoolean(QueryServices.ALLOW_LOCAL_INDEX_ATTRIB, QueryServicesOptions.DEFAULT_ALLOW_LOCAL_INDEX)) {
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.UNALLOWED_LOCAL_INDEXES).setTableName(indexTableName.getTableName()).build().buildException();
        }
        if (!connection.getQueryServices().supportsFeature(Feature.LOCAL_INDEX)) {
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.NO_LOCAL_INDEXES).setTableName(indexTableName.getTableName()).build().buildException();
        }
    }
    while (true) {
        try {
            ColumnResolver resolver = FromCompiler.getResolver(statement, connection, statement.getUdfParseNodes());
            tableRef = resolver.getTables().get(0);
            Date asyncCreatedDate = null;
            if (statement.isAsync()) {
                asyncCreatedDate = new Date(tableRef.getTimeStamp());
            }
            PTable dataTable = tableRef.getTable();
            boolean isTenantConnection = connection.getTenantId() != null;
            if (isTenantConnection) {
                if (dataTable.getType() != PTableType.VIEW) {
                    throw new SQLFeatureNotSupportedException("An index may only be created for a VIEW through a tenant-specific connection");
                }
            }
            if (!dataTable.isImmutableRows()) {
                if (hbaseVersion < PhoenixDatabaseMetaData.MUTABLE_SI_VERSION_THRESHOLD) {
                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.NO_MUTABLE_INDEXES).setTableName(indexTableName.getTableName()).build().buildException();
                }
                if (!connection.getQueryServices().hasIndexWALCodec() && !dataTable.isTransactional()) {
                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_MUTABLE_INDEX_CONFIG).setTableName(indexTableName.getTableName()).build().buildException();
                }
                boolean tableWithRowTimestampCol = dataTable.getRowTimestampColPos() != -1;
                if (tableWithRowTimestampCol) {
                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_CREATE_INDEX_ON_MUTABLE_TABLE_WITH_ROWTIMESTAMP).setTableName(indexTableName.getTableName()).build().buildException();
                }
            }
            int posOffset = 0;
            List<PColumn> pkColumns = dataTable.getPKColumns();
            Set<RowKeyColumnExpression> unusedPkColumns;
            if (dataTable.getBucketNum() != null) {
                // Ignore SALT column
                unusedPkColumns = Sets.newLinkedHashSetWithExpectedSize(pkColumns.size() - 1);
                posOffset++;
            } else {
                unusedPkColumns = Sets.newLinkedHashSetWithExpectedSize(pkColumns.size());
            }
            for (int i = posOffset; i < pkColumns.size(); i++) {
                PColumn column = pkColumns.get(i);
                unusedPkColumns.add(new RowKeyColumnExpression(column, new RowKeyValueAccessor(pkColumns, i), "\"" + column.getName().getString() + "\""));
            }
            List<ColumnDefInPkConstraint> allPkColumns = Lists.newArrayListWithExpectedSize(unusedPkColumns.size());
            List<ColumnDef> columnDefs = Lists.newArrayListWithExpectedSize(includedColumns.size() + indexParseNodeAndSortOrderList.size());
            /*
                 * Allocate an index ID in two circumstances:
                 * 1) for a local index, as all local indexes will reside in the same HBase table
                 * 2) for a view on an index.
                 */
            if (isLocalIndex || (dataTable.getType() == PTableType.VIEW && dataTable.getViewType() != ViewType.MAPPED)) {
                allocateIndexId = true;
                PDataType dataType = MetaDataUtil.getViewIndexIdDataType();
                ColumnName colName = ColumnName.caseSensitiveColumnName(MetaDataUtil.getViewIndexIdColumnName());
                allPkColumns.add(new ColumnDefInPkConstraint(colName, SortOrder.getDefault(), false));
                columnDefs.add(FACTORY.columnDef(colName, dataType.getSqlTypeName(), false, null, null, false, SortOrder.getDefault(), null, false));
            }
            if (dataTable.isMultiTenant()) {
                PColumn col = dataTable.getPKColumns().get(posOffset);
                RowKeyColumnExpression columnExpression = new RowKeyColumnExpression(col, new RowKeyValueAccessor(pkColumns, posOffset), col.getName().getString());
                unusedPkColumns.remove(columnExpression);
                PDataType dataType = IndexUtil.getIndexColumnDataType(col);
                ColumnName colName = ColumnName.caseSensitiveColumnName(IndexUtil.getIndexColumnName(col));
                allPkColumns.add(new ColumnDefInPkConstraint(colName, col.getSortOrder(), false));
                columnDefs.add(FACTORY.columnDef(colName, dataType.getSqlTypeName(), col.isNullable(), col.getMaxLength(), col.getScale(), false, SortOrder.getDefault(), col.getName().getString(), col.isRowTimestamp()));
            }
            PhoenixStatement phoenixStatment = new PhoenixStatement(connection);
            StatementContext context = new StatementContext(phoenixStatment, resolver);
            IndexExpressionCompiler expressionIndexCompiler = new IndexExpressionCompiler(context);
            Set<ColumnName> indexedColumnNames = Sets.newHashSetWithExpectedSize(indexParseNodeAndSortOrderList.size());
            for (Pair<ParseNode, SortOrder> pair : indexParseNodeAndSortOrderList) {
                ParseNode parseNode = pair.getFirst();
                // normalize the parse node
                parseNode = StatementNormalizer.normalize(parseNode, resolver);
                // compile the parseNode to get an expression
                expressionIndexCompiler.reset();
                Expression expression = parseNode.accept(expressionIndexCompiler);
                if (expressionIndexCompiler.isAggregate()) {
                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.AGGREGATE_EXPRESSION_NOT_ALLOWED_IN_INDEX).build().buildException();
                }
                if (expression.getDeterminism() != Determinism.ALWAYS) {
                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.NON_DETERMINISTIC_EXPRESSION_NOT_ALLOWED_IN_INDEX).build().buildException();
                }
                if (expression.isStateless()) {
                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.STATELESS_EXPRESSION_NOT_ALLOWED_IN_INDEX).build().buildException();
                }
                unusedPkColumns.remove(expression);
                // Go through parse node to get string as otherwise we
                // can lose information during compilation
                StringBuilder buf = new StringBuilder();
                parseNode.toSQL(resolver, buf);
                // need to escape backslash as this expression will be re-parsed later
                String expressionStr = StringUtil.escapeBackslash(buf.toString());
                ColumnName colName = null;
                ColumnRef colRef = expressionIndexCompiler.getColumnRef();
                boolean isRowTimestamp = false;
                if (colRef != null) {
                    // if this is a regular column
                    PColumn column = colRef.getColumn();
                    String columnFamilyName = column.getFamilyName() != null ? column.getFamilyName().getString() : null;
                    colName = ColumnName.caseSensitiveColumnName(IndexUtil.getIndexColumnName(columnFamilyName, column.getName().getString()));
                    isRowTimestamp = column.isRowTimestamp();
                    if (colRef.getColumn().getExpressionStr() != null) {
                        expressionStr = colRef.getColumn().getExpressionStr();
                    }
                } else {
                    // if this is an expression
                    // TODO column names cannot have double quotes, remove this once this PHOENIX-1621 is fixed
                    String name = expressionStr.replaceAll("\"", "'");
                    colName = ColumnName.caseSensitiveColumnName(IndexUtil.getIndexColumnName(null, name));
                }
                indexedColumnNames.add(colName);
                PDataType dataType = IndexUtil.getIndexColumnDataType(expression.isNullable(), expression.getDataType());
                allPkColumns.add(new ColumnDefInPkConstraint(colName, pair.getSecond(), isRowTimestamp));
                columnDefs.add(FACTORY.columnDef(colName, dataType.getSqlTypeName(), expression.isNullable(), expression.getMaxLength(), expression.getScale(), false, pair.getSecond(), expressionStr, isRowTimestamp));
            }
            // Next all the PK columns from the data table that aren't indexed
            if (!unusedPkColumns.isEmpty()) {
                for (RowKeyColumnExpression colExpression : unusedPkColumns) {
                    PColumn col = dataTable.getPKColumns().get(colExpression.getPosition());
                    // we don't need these in the index
                    if (col.getViewConstant() == null) {
                        ColumnName colName = ColumnName.caseSensitiveColumnName(IndexUtil.getIndexColumnName(col));
                        allPkColumns.add(new ColumnDefInPkConstraint(colName, colExpression.getSortOrder(), col.isRowTimestamp()));
                        PDataType dataType = IndexUtil.getIndexColumnDataType(colExpression.isNullable(), colExpression.getDataType());
                        columnDefs.add(FACTORY.columnDef(colName, dataType.getSqlTypeName(), colExpression.isNullable(), colExpression.getMaxLength(), colExpression.getScale(), false, colExpression.getSortOrder(), colExpression.toString(), col.isRowTimestamp()));
                    }
                }
            }
            // Last all the included columns (minus any PK columns)
            for (ColumnName colName : includedColumns) {
                PColumn col = resolver.resolveColumn(null, colName.getFamilyName(), colName.getColumnName()).getColumn();
                colName = ColumnName.caseSensitiveColumnName(IndexUtil.getIndexColumnName(col));
                // Check for duplicates between indexed and included columns
                if (indexedColumnNames.contains(colName)) {
                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.COLUMN_EXIST_IN_DEF).build().buildException();
                }
                if (!SchemaUtil.isPKColumn(col) && col.getViewConstant() == null) {
                    // Need to re-create ColumnName, since the above one won't have the column family name
                    colName = ColumnName.caseSensitiveColumnName(isLocalIndex ? IndexUtil.getLocalIndexColumnFamily(col.getFamilyName().getString()) : col.getFamilyName().getString(), IndexUtil.getIndexColumnName(col));
                    columnDefs.add(FACTORY.columnDef(colName, col.getDataType().getSqlTypeName(), col.isNullable(), col.getMaxLength(), col.getScale(), false, col.getSortOrder(), col.getExpressionStr(), col.isRowTimestamp()));
                }
            }
            // We need this in the props so that the correct column family is created
            if (dataTable.getDefaultFamilyName() != null && dataTable.getType() != PTableType.VIEW && !allocateIndexId) {
                statement.getProps().put("", new Pair<String, Object>(DEFAULT_COLUMN_FAMILY_NAME, dataTable.getDefaultFamilyName().getString()));
            }
            PrimaryKeyConstraint pk = FACTORY.primaryKey(null, allPkColumns);
            tableProps.put(MetaDataUtil.DATA_TABLE_NAME_PROP_NAME, dataTable.getName().getString());
            CreateTableStatement tableStatement = FACTORY.createTable(indexTableName, statement.getProps(), columnDefs, pk, statement.getSplitNodes(), PTableType.INDEX, statement.ifNotExists(), null, null, statement.getBindCount(), null);
            table = createTableInternal(tableStatement, splits, dataTable, null, null, null, null, allocateIndexId, statement.getIndexType(), asyncCreatedDate, tableProps, commonFamilyProps);
            break;
        } catch (ConcurrentTableMutationException e) {
            // Can happen if parent data table changes while above is in progress
            if (numRetries < 5) {
                numRetries++;
                continue;
            }
            throw e;
        }
    }
    if (table == null) {
        return new MutationState(0, 0, connection);
    }
    if (logger.isInfoEnabled())
        logger.info("Created index " + table.getName().getString() + " at " + table.getTimeStamp());
    boolean asyncIndexBuildEnabled = connection.getQueryServices().getProps().getBoolean(QueryServices.INDEX_ASYNC_BUILD_ENABLED, QueryServicesOptions.DEFAULT_INDEX_ASYNC_BUILD_ENABLED);
    // In async process, we return immediately as the MR job needs to be triggered .
    if (statement.isAsync() && asyncIndexBuildEnabled) {
        return new MutationState(0, 0, connection);
    }
    // connection so that our new index table is visible.
    if (connection.getSCN() != null) {
        return buildIndexAtTimeStamp(table, statement.getTable());
    }
    return buildIndex(table, tableRef);
}
Also used : SQLFeatureNotSupportedException(java.sql.SQLFeatureNotSupportedException) PhoenixStatement(org.apache.phoenix.jdbc.PhoenixStatement) StatementContext(org.apache.phoenix.compile.StatementContext) PDataType(org.apache.phoenix.schema.types.PDataType) ParseNode(org.apache.phoenix.parse.ParseNode) SQLExceptionInfo(org.apache.phoenix.exception.SQLExceptionInfo) ColumnResolver(org.apache.phoenix.compile.ColumnResolver) Pair(org.apache.hadoop.hbase.util.Pair) CreateTableStatement(org.apache.phoenix.parse.CreateTableStatement) IndexKeyConstraint(org.apache.phoenix.parse.IndexKeyConstraint) ColumnDef(org.apache.phoenix.parse.ColumnDef) ColumnDefInPkConstraint(org.apache.phoenix.parse.ColumnDefInPkConstraint) RowKeyColumnExpression(org.apache.phoenix.expression.RowKeyColumnExpression) IndexKeyConstraint(org.apache.phoenix.parse.IndexKeyConstraint) PrimaryKeyConstraint(org.apache.phoenix.parse.PrimaryKeyConstraint) ColumnDefInPkConstraint(org.apache.phoenix.parse.ColumnDefInPkConstraint) PDate(org.apache.phoenix.schema.types.PDate) Date(java.sql.Date) PrimaryKeyConstraint(org.apache.phoenix.parse.PrimaryKeyConstraint) TableName(org.apache.phoenix.parse.TableName) ColumnName(org.apache.phoenix.parse.ColumnName) MutationState(org.apache.phoenix.execute.MutationState) RowKeyColumnExpression(org.apache.phoenix.expression.RowKeyColumnExpression) Expression(org.apache.phoenix.expression.Expression) IndexExpressionCompiler(org.apache.phoenix.compile.IndexExpressionCompiler)

Aggregations

SQLExceptionInfo (org.apache.phoenix.exception.SQLExceptionInfo)7 ColumnName (org.apache.phoenix.parse.ColumnName)7 Pair (org.apache.hadoop.hbase.util.Pair)4 PDataType (org.apache.phoenix.schema.types.PDataType)4 HashMap (java.util.HashMap)3 LinkedHashMap (java.util.LinkedHashMap)3 Mutation (org.apache.hadoop.hbase.client.Mutation)3 ColumnResolver (org.apache.phoenix.compile.ColumnResolver)3 MetaDataMutationResult (org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult)3 MutationCode (org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode)3 MutationState (org.apache.phoenix.execute.MutationState)3 Expression (org.apache.phoenix.expression.Expression)3 ColumnDef (org.apache.phoenix.parse.ColumnDef)3 ColumnDefInPkConstraint (org.apache.phoenix.parse.ColumnDefInPkConstraint)3 TableName (org.apache.phoenix.parse.TableName)3 PLong (org.apache.phoenix.schema.types.PLong)3 PUnsignedLong (org.apache.phoenix.schema.types.PUnsignedLong)3 PreparedStatement (java.sql.PreparedStatement)2 ArrayList (java.util.ArrayList)2 List (java.util.List)2