Search in sources :

Example 1 with ConstraintViolationException

use of org.apache.phoenix.schema.ConstraintViolationException in project phoenix by apache.

the class VariableLengthPKIT method testMissingKVColumn.

// Broken, since we don't know if insert vs update. @Test
public void testMissingKVColumn() throws Exception {
    long ts = nextTimestamp();
    ensureTableCreated(getUrl(), BTABLE_NAME, BTABLE_NAME, null, ts - 2, null);
    // Insert at timestamp 0
    String url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" + ts;
    Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
    Connection conn = DriverManager.getConnection(url, props);
    conn.setAutoCommit(true);
    PreparedStatement stmt = conn.prepareStatement("upsert into BTABLE VALUES (?, ?, ?, ?, ?, ?)");
    stmt.setString(1, "abc");
    stmt.setString(2, "123");
    stmt.setString(3, "x");
    stmt.setInt(4, 1);
    stmt.setString(5, "ab");
    stmt.setInt(6, 1);
    try {
        stmt.execute();
        fail();
    } catch (ConstraintViolationException e) {
        // Non nullable key value E_STRING has no value
        assertTrue(e.getMessage().contains("may not be null"));
    } finally {
        conn.close();
    }
}
Also used : Connection(java.sql.Connection) ConstraintViolationException(org.apache.phoenix.schema.ConstraintViolationException) PreparedStatement(java.sql.PreparedStatement) Properties(java.util.Properties)

Example 2 with ConstraintViolationException

use of org.apache.phoenix.schema.ConstraintViolationException in project phoenix by apache.

the class ColumnDef method validateDefault.

public boolean validateDefault(StatementContext context, PrimaryKeyConstraint pkConstraint) throws SQLException {
    String defaultStr = this.getExpression();
    if (defaultStr == null) {
        return true;
    }
    ExpressionCompiler compiler = new ExpressionCompiler(context);
    ParseNode defaultParseNode = new SQLParser(this.getExpression()).parseExpression();
    Expression defaultExpression = defaultParseNode.accept(compiler);
    if (!defaultParseNode.isStateless() || defaultExpression.getDeterminism() != Determinism.ALWAYS) {
        throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_CREATE_DEFAULT).setColumnName(this.getColumnDefName().getColumnName()).build().buildException();
    }
    if (this.isRowTimestamp() || (pkConstraint != null && pkConstraint.isColumnRowTimestamp(this.getColumnDefName()))) {
        throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_CREATE_DEFAULT_ROWTIMESTAMP).setColumnName(this.getColumnDefName().getColumnName()).build().buildException();
    }
    ImmutableBytesWritable ptr = new ImmutableBytesWritable();
    // Evaluate the expression to confirm it's validity
    LiteralExpression defaultValue = ExpressionUtil.getConstantExpression(defaultExpression, ptr);
    // A DEFAULT that evaluates to null should be ignored as it adds nothing
    if (defaultValue.getValue() == null) {
        return false;
    }
    PDataType sourceType = defaultExpression.getDataType();
    PDataType targetType = this.getDataType();
    // Ensure that coercion works (will throw if not)
    context.getTempPtr().set(ptr.get(), ptr.getOffset(), ptr.getLength());
    try {
        targetType.coerceBytes(context.getTempPtr(), defaultValue.getValue(), sourceType, defaultValue.getMaxLength(), defaultValue.getScale(), defaultValue.getSortOrder(), this.getMaxLength(), this.getScale(), this.getSortOrder());
    } catch (ConstraintViolationException e) {
        if (e.getCause() instanceof SQLException) {
            SQLException sqlE = (SQLException) e.getCause();
            throw new DelegateSQLException(sqlE, ". DEFAULT " + SQLExceptionInfo.COLUMN_NAME + "=" + this.getColumnDefName().getColumnName());
        }
        throw e;
    }
    if (!targetType.isSizeCompatible(ptr, defaultValue.getValue(), sourceType, sortOrder, defaultValue.getMaxLength(), defaultValue.getScale(), this.getMaxLength(), this.getScale())) {
        throw new SQLExceptionInfo.Builder(SQLExceptionCode.DATA_EXCEEDS_MAX_CAPACITY).setColumnName(this.getColumnDefName().getColumnName()).setMessage("DEFAULT " + this.getExpression()).build().buildException();
    }
    return true;
}
Also used : ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) SQLException(java.sql.SQLException) DelegateSQLException(org.apache.phoenix.schema.DelegateSQLException) LiteralExpression(org.apache.phoenix.expression.LiteralExpression) DelegateSQLException(org.apache.phoenix.schema.DelegateSQLException) PDataType(org.apache.phoenix.schema.types.PDataType) LiteralExpression(org.apache.phoenix.expression.LiteralExpression) Expression(org.apache.phoenix.expression.Expression) ConstraintViolationException(org.apache.phoenix.schema.ConstraintViolationException) ExpressionCompiler(org.apache.phoenix.compile.ExpressionCompiler) SQLExceptionInfo(org.apache.phoenix.exception.SQLExceptionInfo)

Example 3 with ConstraintViolationException

use of org.apache.phoenix.schema.ConstraintViolationException in project phoenix by apache.

the class PArrayDataType method toBytes.

public byte[] toBytes(Object object, PDataType baseType, SortOrder sortOrder, boolean rowKeyOrderOptimizable) {
    if (object == null) {
        throw new ConstraintViolationException(this + " may not be null");
    }
    PhoenixArray arr = toPhoenixArray(object, baseType);
    int noOfElements = arr.numElements;
    if (noOfElements == 0) {
        return ByteUtil.EMPTY_BYTE_ARRAY;
    }
    TrustedByteArrayOutputStream byteStream = null;
    if (!baseType.isFixedWidth()) {
        Pair<Integer, Integer> nullsVsNullRepeationCounter = new Pair<>();
        int size = estimateByteSize(object, nullsVsNullRepeationCounter, PDataType.fromTypeId((baseType.getSqlType() + PDataType.ARRAY_TYPE_BASE)));
        size += ((2 * Bytes.SIZEOF_BYTE) + (noOfElements - nullsVsNullRepeationCounter.getFirst()) * Bytes.SIZEOF_BYTE) + (nullsVsNullRepeationCounter.getSecond() * 2 * Bytes.SIZEOF_BYTE);
        // Assume an offset array that fit into Short.MAX_VALUE. Also not considering nulls that could be > 255
        // In both of these cases, finally an array copy would happen
        int capacity = noOfElements * Bytes.SIZEOF_SHORT;
        // Here the int for noofelements, byte for the version, int for the offsetarray position and 2 bytes for the
        // end seperator
        byteStream = new TrustedByteArrayOutputStream(size + capacity + Bytes.SIZEOF_INT + Bytes.SIZEOF_BYTE + Bytes.SIZEOF_INT);
    } else {
        int elemLength = (arr.getMaxLength() == null ? baseType.getByteSize() : arr.getMaxLength());
        int size = elemLength * noOfElements;
        // Here the int for noofelements, byte for the version
        byteStream = new TrustedByteArrayOutputStream(size);
    }
    DataOutputStream oStream = new DataOutputStream(byteStream);
    // Handles bit inversion also
    return createArrayBytes(byteStream, oStream, arr, noOfElements, baseType, sortOrder, rowKeyOrderOptimizable);
}
Also used : DataOutputStream(java.io.DataOutputStream) ConstraintViolationException(org.apache.phoenix.schema.ConstraintViolationException) TrustedByteArrayOutputStream(org.apache.phoenix.util.TrustedByteArrayOutputStream) Pair(org.apache.hadoop.hbase.util.Pair)

Example 4 with ConstraintViolationException

use of org.apache.phoenix.schema.ConstraintViolationException in project phoenix by apache.

the class VariableLengthPKIT method testTooShortPKColumn.

@Test
public void testTooShortPKColumn() throws Exception {
    String pTSDBTableName = generateUniqueName();
    ensureTableCreated(getUrl(), pTSDBTableName, BTABLE_NAME, null, null, null);
    // Insert at timestamp 0
    String url = getUrl();
    Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
    Connection conn = DriverManager.getConnection(url, props);
    conn.setAutoCommit(true);
    // Insert all rows at ts
    PreparedStatement stmt = conn.prepareStatement("upsert into " + pTSDBTableName + " (" + "    A_STRING, " + "    A_ID," + "    B_STRING," + "    A_INTEGER," + "    C_STRING," + "    E_STRING)" + "VALUES (?, ?, ?, ?, ?, ?)");
    stmt.setString(1, "abc");
    stmt.setString(2, "12");
    stmt.setString(3, "x");
    stmt.setInt(4, 1);
    stmt.setString(5, "ab");
    stmt.setString(6, "0123456789");
    try {
        stmt.execute();
    } catch (ConstraintViolationException e) {
        fail("Constraint voilation Exception should not be thrown, the characters have to be padded");
    } finally {
        conn.close();
    }
}
Also used : Connection(java.sql.Connection) ConstraintViolationException(org.apache.phoenix.schema.ConstraintViolationException) PreparedStatement(java.sql.PreparedStatement) Properties(java.util.Properties) Test(org.junit.Test)

Example 5 with ConstraintViolationException

use of org.apache.phoenix.schema.ConstraintViolationException in project phoenix by apache.

the class UpsertCompiler method compile.

public MutationPlan compile(UpsertStatement upsert) throws SQLException {
    final PhoenixConnection connection = statement.getConnection();
    ConnectionQueryServices services = connection.getQueryServices();
    final int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
    final int maxSizeBytes = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE_BYTES);
    List<ColumnName> columnNodes = upsert.getColumns();
    TableRef tableRefToBe = null;
    PTable table = null;
    Set<PColumn> addViewColumnsToBe = Collections.emptySet();
    Set<PColumn> overlapViewColumnsToBe = Collections.emptySet();
    List<PColumn> allColumnsToBe = Collections.emptyList();
    boolean isTenantSpecific = false;
    boolean isSharedViewIndex = false;
    String tenantIdStr = null;
    ColumnResolver resolver = null;
    int[] columnIndexesToBe;
    int nColumnsToSet = 0;
    int[] pkSlotIndexesToBe;
    List<ParseNode> valueNodes = upsert.getValues();
    List<PColumn> targetColumns;
    NamedTableNode tableNode = upsert.getTable();
    String tableName = tableNode.getName().getTableName();
    String schemaName = tableNode.getName().getSchemaName();
    QueryPlan queryPlanToBe = null;
    int nValuesToSet;
    boolean sameTable = false;
    boolean runOnServer = false;
    boolean serverUpsertSelectEnabled = services.getProps().getBoolean(QueryServices.ENABLE_SERVER_UPSERT_SELECT, QueryServicesOptions.DEFAULT_ENABLE_SERVER_UPSERT_SELECT);
    UpsertingParallelIteratorFactory parallelIteratorFactoryToBe = null;
    boolean useServerTimestampToBe = false;
    resolver = FromCompiler.getResolverForMutation(upsert, connection);
    tableRefToBe = resolver.getTables().get(0);
    table = tableRefToBe.getTable();
    // - transactional table with a connection having an SCN
    if (table.getType() == PTableType.VIEW && table.getViewType().isReadOnly()) {
        throw new ReadOnlyTableException(schemaName, tableName);
    } else if (connection.isBuildingIndex() && table.getType() != PTableType.INDEX) {
        throw new SQLExceptionInfo.Builder(SQLExceptionCode.ONLY_INDEX_UPDATABLE_AT_SCN).setSchemaName(schemaName).setTableName(tableName).build().buildException();
    } else if (table.isTransactional() && connection.getSCN() != null) {
        throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_SPECIFY_SCN_FOR_TXN_TABLE).setSchemaName(schemaName).setTableName(tableName).build().buildException();
    }
    boolean isSalted = table.getBucketNum() != null;
    isTenantSpecific = table.isMultiTenant() && connection.getTenantId() != null;
    isSharedViewIndex = table.getViewIndexId() != null;
    tenantIdStr = isTenantSpecific ? connection.getTenantId().getString() : null;
    int posOffset = isSalted ? 1 : 0;
    // Setup array of column indexes parallel to values that are going to be set
    allColumnsToBe = table.getColumns();
    nColumnsToSet = 0;
    if (table.getViewType() == ViewType.UPDATABLE) {
        addViewColumnsToBe = Sets.newLinkedHashSetWithExpectedSize(allColumnsToBe.size());
        for (PColumn column : allColumnsToBe) {
            if (column.getViewConstant() != null) {
                addViewColumnsToBe.add(column);
            }
        }
    }
    ImmutableBytesWritable ptr = new ImmutableBytesWritable();
    // Allow full row upsert if no columns or only dynamic ones are specified and values count match
    if (columnNodes.isEmpty() || columnNodes.size() == upsert.getTable().getDynamicColumns().size()) {
        nColumnsToSet = allColumnsToBe.size() - posOffset;
        columnIndexesToBe = new int[nColumnsToSet];
        pkSlotIndexesToBe = new int[columnIndexesToBe.length];
        targetColumns = Lists.newArrayListWithExpectedSize(columnIndexesToBe.length);
        targetColumns.addAll(Collections.<PColumn>nCopies(columnIndexesToBe.length, null));
        int minPKPos = 0;
        if (isSharedViewIndex) {
            PColumn indexIdColumn = table.getPKColumns().get(minPKPos);
            columnIndexesToBe[minPKPos] = indexIdColumn.getPosition();
            targetColumns.set(minPKPos, indexIdColumn);
            minPKPos++;
        }
        if (isTenantSpecific) {
            PColumn tenantColumn = table.getPKColumns().get(minPKPos);
            columnIndexesToBe[minPKPos] = tenantColumn.getPosition();
            targetColumns.set(minPKPos, tenantColumn);
            minPKPos++;
        }
        for (int i = posOffset, j = 0; i < allColumnsToBe.size(); i++) {
            PColumn column = allColumnsToBe.get(i);
            if (SchemaUtil.isPKColumn(column)) {
                pkSlotIndexesToBe[i - posOffset] = j + posOffset;
                if (j++ < minPKPos) {
                    // Skip, as it's already been set above
                    continue;
                }
                minPKPos = 0;
            }
            columnIndexesToBe[i - posOffset + minPKPos] = i;
            targetColumns.set(i - posOffset + minPKPos, column);
        }
        if (!addViewColumnsToBe.isEmpty()) {
            // All view columns overlap in this case
            overlapViewColumnsToBe = addViewColumnsToBe;
            addViewColumnsToBe = Collections.emptySet();
        }
    } else {
        // Size for worse case
        int numColsInUpsert = columnNodes.size();
        nColumnsToSet = numColsInUpsert + addViewColumnsToBe.size() + (isTenantSpecific ? 1 : 0) + +(isSharedViewIndex ? 1 : 0);
        columnIndexesToBe = new int[nColumnsToSet];
        pkSlotIndexesToBe = new int[columnIndexesToBe.length];
        targetColumns = Lists.newArrayListWithExpectedSize(columnIndexesToBe.length);
        targetColumns.addAll(Collections.<PColumn>nCopies(columnIndexesToBe.length, null));
        // TODO: necessary? So we'll get an AIOB exception if it's not replaced
        Arrays.fill(columnIndexesToBe, -1);
        // TODO: necessary? So we'll get an AIOB exception if it's not replaced
        Arrays.fill(pkSlotIndexesToBe, -1);
        BitSet columnsBeingSet = new BitSet(table.getColumns().size());
        int i = 0;
        if (isSharedViewIndex) {
            PColumn indexIdColumn = table.getPKColumns().get(i + posOffset);
            columnsBeingSet.set(columnIndexesToBe[i] = indexIdColumn.getPosition());
            pkSlotIndexesToBe[i] = i + posOffset;
            targetColumns.set(i, indexIdColumn);
            i++;
        }
        // Add tenant column directly, as we don't want to resolve it as this will fail
        if (isTenantSpecific) {
            PColumn tenantColumn = table.getPKColumns().get(i + posOffset);
            columnsBeingSet.set(columnIndexesToBe[i] = tenantColumn.getPosition());
            pkSlotIndexesToBe[i] = i + posOffset;
            targetColumns.set(i, tenantColumn);
            i++;
        }
        for (ColumnName colName : columnNodes) {
            ColumnRef ref = resolver.resolveColumn(null, colName.getFamilyName(), colName.getColumnName());
            PColumn column = ref.getColumn();
            if (IndexUtil.getViewConstantValue(column, ptr)) {
                if (overlapViewColumnsToBe.isEmpty()) {
                    overlapViewColumnsToBe = Sets.newHashSetWithExpectedSize(addViewColumnsToBe.size());
                }
                nColumnsToSet--;
                overlapViewColumnsToBe.add(column);
                addViewColumnsToBe.remove(column);
            }
            columnsBeingSet.set(columnIndexesToBe[i] = ref.getColumnPosition());
            targetColumns.set(i, column);
            if (SchemaUtil.isPKColumn(column)) {
                pkSlotIndexesToBe[i] = ref.getPKSlotPosition();
            }
            i++;
        }
        for (PColumn column : addViewColumnsToBe) {
            columnsBeingSet.set(columnIndexesToBe[i] = column.getPosition());
            targetColumns.set(i, column);
            if (SchemaUtil.isPKColumn(column)) {
                pkSlotIndexesToBe[i] = SchemaUtil.getPKPosition(table, column);
            }
            i++;
        }
        // If a table has rowtimestamp col, then we always set it.
        useServerTimestampToBe = table.getRowTimestampColPos() != -1 && !isRowTimestampSet(pkSlotIndexesToBe, table);
        if (useServerTimestampToBe) {
            PColumn rowTimestampCol = table.getPKColumns().get(table.getRowTimestampColPos());
            // Need to resize columnIndexesToBe and pkSlotIndexesToBe to include this extra column.
            columnIndexesToBe = Arrays.copyOf(columnIndexesToBe, columnIndexesToBe.length + 1);
            pkSlotIndexesToBe = Arrays.copyOf(pkSlotIndexesToBe, pkSlotIndexesToBe.length + 1);
            columnsBeingSet.set(columnIndexesToBe[i] = rowTimestampCol.getPosition());
            pkSlotIndexesToBe[i] = table.getRowTimestampColPos();
            targetColumns.add(rowTimestampCol);
            if (valueNodes != null && !valueNodes.isEmpty()) {
                valueNodes.add(getNodeForRowTimestampColumn(rowTimestampCol));
            }
            nColumnsToSet++;
        }
        for (i = posOffset; i < table.getColumns().size(); i++) {
            PColumn column = table.getColumns().get(i);
            if (!columnsBeingSet.get(i) && !column.isNullable() && column.getExpressionStr() == null) {
                throw new ConstraintViolationException(SchemaUtil.getColumnDisplayName(column) + " may not be null");
            }
        }
    }
    boolean isAutoCommit = connection.getAutoCommit();
    if (valueNodes == null) {
        SelectStatement select = upsert.getSelect();
        assert (select != null);
        select = SubselectRewriter.flatten(select, connection);
        ColumnResolver selectResolver = FromCompiler.getResolverForQuery(select, connection, false, upsert.getTable().getName());
        select = StatementNormalizer.normalize(select, selectResolver);
        select = prependTenantAndViewConstants(table, select, tenantIdStr, addViewColumnsToBe, useServerTimestampToBe);
        SelectStatement transformedSelect = SubqueryRewriter.transform(select, selectResolver, connection);
        if (transformedSelect != select) {
            selectResolver = FromCompiler.getResolverForQuery(transformedSelect, connection, false, upsert.getTable().getName());
            select = StatementNormalizer.normalize(transformedSelect, selectResolver);
        }
        sameTable = !select.isJoin() && tableRefToBe.equals(selectResolver.getTables().get(0));
        /* We can run the upsert in a coprocessor if:
             * 1) from has only 1 table or server UPSERT SELECT is enabled
             * 2) the select query isn't doing aggregation (which requires a client-side final merge)
             * 3) autoCommit is on
             * 4) the table is not immutable with indexes, as the client is the one that figures out the additional
             *    puts for index tables.
             * 5) no limit clause, as the limit clause requires client-side post processing
             * 6) no sequences, as sequences imply that the order of upsert must match the order of
             *    selection. TODO: change this and only force client side if there's a ORDER BY on the sequence value
             * Otherwise, run the query to pull the data from the server
             * and populate the MutationState (upto a limit).
            */
        if (!(select.isAggregate() || select.isDistinct() || select.getLimit() != null || select.hasSequence())) {
            // We can pipeline the upsert select instead of spooling everything to disk first,
            // if we don't have any post processing that's required.
            parallelIteratorFactoryToBe = new UpsertingParallelIteratorFactory(connection, tableRefToBe, useServerTimestampToBe);
            // If we're in the else, then it's not an aggregate, distinct, limited, or sequence using query,
            // so we might be able to run it entirely on the server side.
            // region space managed by region servers. So we bail out on executing on server side.
            // Disable running upsert select on server side if a table has global mutable secondary indexes on it
            boolean hasGlobalMutableIndexes = SchemaUtil.hasGlobalIndex(table) && !table.isImmutableRows();
            boolean hasWhereSubquery = select.getWhere() != null && select.getWhere().hasSubquery();
            runOnServer = (sameTable || (serverUpsertSelectEnabled && !hasGlobalMutableIndexes)) && isAutoCommit && !table.isTransactional() && !(table.isImmutableRows() && !table.getIndexes().isEmpty()) && !select.isJoin() && !hasWhereSubquery && table.getRowTimestampColPos() == -1;
        }
        // If we may be able to run on the server, add a hint that favors using the data table
        // if all else is equal.
        // TODO: it'd be nice if we could figure out in advance if the PK is potentially changing,
        // as this would disallow running on the server. We currently use the row projector we
        // get back to figure this out.
        HintNode hint = upsert.getHint();
        if (!upsert.getHint().hasHint(Hint.USE_INDEX_OVER_DATA_TABLE)) {
            hint = HintNode.create(hint, Hint.USE_DATA_OVER_INDEX_TABLE);
        }
        select = SelectStatement.create(select, hint);
        // Pass scan through if same table in upsert and select so that projection is computed correctly
        // Use optimizer to choose the best plan
        QueryCompiler compiler = new QueryCompiler(statement, select, selectResolver, targetColumns, parallelIteratorFactoryToBe, new SequenceManager(statement), false, false, null);
        queryPlanToBe = compiler.compile();
        // steps and parallelIteratorFactory did not take effect.
        if (queryPlanToBe.getTableRef().getTable().getType() == PTableType.PROJECTED || queryPlanToBe.getTableRef().getTable().getType() == PTableType.SUBQUERY) {
            parallelIteratorFactoryToBe = null;
        }
        nValuesToSet = queryPlanToBe.getProjector().getColumnCount();
    // Cannot auto commit if doing aggregation or topN or salted
    // Salted causes problems because the row may end up living on a different region
    } else {
        nValuesToSet = valueNodes.size() + addViewColumnsToBe.size() + (isTenantSpecific ? 1 : 0) + (isSharedViewIndex ? 1 : 0);
    }
    // Resize down to allow a subset of columns to be specifiable
    if (columnNodes.isEmpty() && columnIndexesToBe.length >= nValuesToSet) {
        nColumnsToSet = nValuesToSet;
        columnIndexesToBe = Arrays.copyOf(columnIndexesToBe, nValuesToSet);
        pkSlotIndexesToBe = Arrays.copyOf(pkSlotIndexesToBe, nValuesToSet);
        for (int i = posOffset + nValuesToSet; i < table.getColumns().size(); i++) {
            PColumn column = table.getColumns().get(i);
            if (!column.isNullable() && column.getExpressionStr() == null) {
                throw new ConstraintViolationException(SchemaUtil.getColumnDisplayName(column) + " may not be null");
            }
        }
    }
    if (nValuesToSet != nColumnsToSet) {
        // been removed and the added back and we wouldn't detect that here.
        throw new UpsertColumnsValuesMismatchException(schemaName, tableName, "Numbers of columns: " + nColumnsToSet + ". Number of values: " + nValuesToSet);
    }
    final QueryPlan originalQueryPlan = queryPlanToBe;
    RowProjector projectorToBe = null;
    // Optimize only after all checks have been performed
    if (valueNodes == null) {
        queryPlanToBe = new QueryOptimizer(services).optimize(queryPlanToBe, statement, targetColumns, parallelIteratorFactoryToBe);
        projectorToBe = queryPlanToBe.getProjector();
    }
    final List<PColumn> allColumns = allColumnsToBe;
    final RowProjector projector = projectorToBe;
    final QueryPlan queryPlan = queryPlanToBe;
    final TableRef tableRef = tableRefToBe;
    final Set<PColumn> addViewColumns = addViewColumnsToBe;
    final Set<PColumn> overlapViewColumns = overlapViewColumnsToBe;
    final UpsertingParallelIteratorFactory parallelIteratorFactory = parallelIteratorFactoryToBe;
    final int[] columnIndexes = columnIndexesToBe;
    final int[] pkSlotIndexes = pkSlotIndexesToBe;
    final boolean useServerTimestamp = useServerTimestampToBe;
    if (table.getRowTimestampColPos() == -1 && useServerTimestamp) {
        throw new IllegalStateException("For a table without row timestamp column, useServerTimestamp cannot be true");
    }
    // ///////////////////////////////////////////////////////////////////
    if (valueNodes == null) {
        // Before we re-order, check that for updatable view columns
        // the projected expression either matches the column name or
        // is a constant with the same required value.
        throwIfNotUpdatable(tableRef, overlapViewColumnsToBe, targetColumns, projector, sameTable);
        // ///////////////////////////////////////////////////////////////////
        if (runOnServer) {
            // At most this array will grow bigger by the number of PK columns
            int[] allColumnsIndexes = Arrays.copyOf(columnIndexes, columnIndexes.length + nValuesToSet);
            int[] reverseColumnIndexes = new int[table.getColumns().size()];
            List<Expression> projectedExpressions = Lists.newArrayListWithExpectedSize(reverseColumnIndexes.length);
            Arrays.fill(reverseColumnIndexes, -1);
            for (int i = 0; i < nValuesToSet; i++) {
                projectedExpressions.add(projector.getColumnProjector(i).getExpression());
                reverseColumnIndexes[columnIndexes[i]] = i;
            }
            /*
                 * Order projected columns and projected expressions with PK columns
                 * leading order by slot position
                 */
            int offset = table.getBucketNum() == null ? 0 : 1;
            for (int i = 0; i < table.getPKColumns().size() - offset; i++) {
                PColumn column = table.getPKColumns().get(i + offset);
                int pos = reverseColumnIndexes[column.getPosition()];
                if (pos == -1) {
                    // it's not valid to set a fixed width type to null.
                    if (column.getDataType().isFixedWidth()) {
                        continue;
                    }
                    // Add literal null for missing PK columns
                    pos = projectedExpressions.size();
                    Expression literalNull = LiteralExpression.newConstant(null, column.getDataType(), Determinism.ALWAYS);
                    projectedExpressions.add(literalNull);
                    allColumnsIndexes[pos] = column.getPosition();
                }
                // Swap select expression at pos with i
                Collections.swap(projectedExpressions, i, pos);
                // Swap column indexes and reverse column indexes too
                int tempPos = allColumnsIndexes[i];
                allColumnsIndexes[i] = allColumnsIndexes[pos];
                allColumnsIndexes[pos] = tempPos;
                reverseColumnIndexes[tempPos] = pos;
                reverseColumnIndexes[i] = i;
            }
            // If any pk slots are changing and server side UPSERT SELECT is disabled, do not run on server
            if (!serverUpsertSelectEnabled && ExpressionUtil.isPkPositionChanging(new TableRef(table), projectedExpressions)) {
                runOnServer = false;
            }
            // ///////////////////////////////////////////////////////////////////
            if (runOnServer) {
                // Iterate through columns being projected
                List<PColumn> projectedColumns = Lists.newArrayListWithExpectedSize(projectedExpressions.size());
                int posOff = table.getBucketNum() != null ? 1 : 0;
                for (int i = 0; i < projectedExpressions.size(); i++) {
                    // Must make new column if position has changed
                    PColumn column = allColumns.get(allColumnsIndexes[i]);
                    projectedColumns.add(column.getPosition() == i + posOff ? column : new PColumnImpl(column, i + posOff));
                }
                // Build table from projectedColumns
                // Hack to add default column family to be used on server in case no value column is projected.
                PTable projectedTable = PTableImpl.makePTable(table, projectedColumns, PNameFactory.newName(SchemaUtil.getEmptyColumnFamily(table)));
                SelectStatement select = SelectStatement.create(SelectStatement.COUNT_ONE, upsert.getHint());
                StatementContext statementContext = queryPlan.getContext();
                RowProjector aggProjectorToBe = ProjectionCompiler.compile(statementContext, select, GroupBy.EMPTY_GROUP_BY);
                statementContext.getAggregationManager().compile(queryPlan.getContext(), GroupBy.EMPTY_GROUP_BY);
                if (queryPlan.getProjector().projectEveryRow()) {
                    aggProjectorToBe = new RowProjector(aggProjectorToBe, true);
                }
                final RowProjector aggProjector = aggProjectorToBe;
                /*
                     * Transfer over PTable representing subset of columns selected, but all PK columns.
                     * Move columns setting PK first in pkSlot order, adding LiteralExpression of null for any missing ones.
                     * Transfer over List<Expression> for projection.
                     * In region scan, evaluate expressions in order, collecting first n columns for PK and collection non PK in mutation Map
                     * Create the PRow and get the mutations, adding them to the batch
                     */
                final StatementContext context = queryPlan.getContext();
                final Scan scan = context.getScan();
                scan.setAttribute(BaseScannerRegionObserver.UPSERT_SELECT_TABLE, UngroupedAggregateRegionObserver.serialize(projectedTable));
                scan.setAttribute(BaseScannerRegionObserver.UPSERT_SELECT_EXPRS, UngroupedAggregateRegionObserver.serialize(projectedExpressions));
                // Ignore order by - it has no impact
                final QueryPlan aggPlan = new AggregatePlan(context, select, statementContext.getCurrentTable(), aggProjector, null, null, OrderBy.EMPTY_ORDER_BY, null, GroupBy.EMPTY_GROUP_BY, null, originalQueryPlan);
                return new ServerUpsertSelectMutationPlan(queryPlan, tableRef, originalQueryPlan, context, connection, scan, aggPlan, aggProjector, maxSize, maxSizeBytes);
            }
        }
        // ///////////////////////////////////////////////////////////////////
        return new ClientUpsertSelectMutationPlan(queryPlan, tableRef, originalQueryPlan, parallelIteratorFactory, projector, columnIndexes, pkSlotIndexes, useServerTimestamp, maxSize, maxSizeBytes);
    }
    // //////////////////////////////////////////////////////////////////
    // UPSERT VALUES
    // ///////////////////////////////////////////////////////////////////
    final byte[][] values = new byte[nValuesToSet][];
    int nodeIndex = 0;
    if (isSharedViewIndex) {
        values[nodeIndex++] = MetaDataUtil.getViewIndexIdDataType().toBytes(table.getViewIndexId());
    }
    if (isTenantSpecific) {
        PName tenantId = connection.getTenantId();
        values[nodeIndex++] = ScanUtil.getTenantIdBytes(table.getRowKeySchema(), table.getBucketNum() != null, tenantId, isSharedViewIndex);
    }
    final int nodeIndexOffset = nodeIndex;
    // Allocate array based on size of all columns in table,
    // since some values may not be set (if they're nullable).
    final StatementContext context = new StatementContext(statement, resolver, new Scan(), new SequenceManager(statement));
    UpsertValuesCompiler expressionBuilder = new UpsertValuesCompiler(context);
    final List<Expression> constantExpressions = Lists.newArrayListWithExpectedSize(valueNodes.size());
    // and initialize them in one batch
    for (ParseNode valueNode : valueNodes) {
        if (!valueNode.isStateless()) {
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.VALUE_IN_UPSERT_NOT_CONSTANT).build().buildException();
        }
        PColumn column = allColumns.get(columnIndexes[nodeIndex]);
        expressionBuilder.setColumn(column);
        Expression expression = valueNode.accept(expressionBuilder);
        if (expression.getDataType() != null && !expression.getDataType().isCastableTo(column.getDataType())) {
            throw TypeMismatchException.newException(expression.getDataType(), column.getDataType(), "expression: " + expression.toString() + " in column " + column);
        }
        constantExpressions.add(expression);
        nodeIndex++;
    }
    byte[] onDupKeyBytesToBe = null;
    List<Pair<ColumnName, ParseNode>> onDupKeyPairs = upsert.getOnDupKeyPairs();
    if (onDupKeyPairs != null) {
        if (table.isImmutableRows()) {
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_USE_ON_DUP_KEY_FOR_IMMUTABLE).setSchemaName(table.getSchemaName().getString()).setTableName(table.getTableName().getString()).build().buildException();
        }
        if (table.isTransactional()) {
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_USE_ON_DUP_KEY_FOR_TRANSACTIONAL).setSchemaName(table.getSchemaName().getString()).setTableName(table.getTableName().getString()).build().buildException();
        }
        if (connection.getSCN() != null) {
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_SET_SCN_IN_ON_DUP_KEY).setSchemaName(table.getSchemaName().getString()).setTableName(table.getTableName().getString()).build().buildException();
        }
        if (SchemaUtil.hasGlobalIndex(table)) {
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_USE_ON_DUP_KEY_WITH_GLOBAL_IDX).setSchemaName(table.getSchemaName().getString()).setTableName(table.getTableName().getString()).build().buildException();
        }
        if (onDupKeyPairs.isEmpty()) {
            // ON DUPLICATE KEY IGNORE
            onDupKeyBytesToBe = PhoenixIndexBuilder.serializeOnDupKeyIgnore();
        } else {
            // ON DUPLICATE KEY UPDATE;
            int position = table.getBucketNum() == null ? 0 : 1;
            UpdateColumnCompiler compiler = new UpdateColumnCompiler(context);
            int nColumns = onDupKeyPairs.size();
            List<Expression> updateExpressions = Lists.newArrayListWithExpectedSize(nColumns);
            LinkedHashSet<PColumn> updateColumns = Sets.newLinkedHashSetWithExpectedSize(nColumns + 1);
            updateColumns.add(new PColumnImpl(// Use first PK column name as we know it won't conflict with others
            table.getPKColumns().get(position).getName(), null, PVarbinary.INSTANCE, null, null, false, position, SortOrder.getDefault(), 0, null, false, null, false, false, null));
            position++;
            for (Pair<ColumnName, ParseNode> columnPair : onDupKeyPairs) {
                ColumnName colName = columnPair.getFirst();
                PColumn updateColumn = resolver.resolveColumn(null, colName.getFamilyName(), colName.getColumnName()).getColumn();
                if (SchemaUtil.isPKColumn(updateColumn)) {
                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_UPDATE_PK_ON_DUP_KEY).setSchemaName(table.getSchemaName().getString()).setTableName(table.getTableName().getString()).setColumnName(updateColumn.getName().getString()).build().buildException();
                }
                final int columnPosition = position++;
                if (!updateColumns.add(new DelegateColumn(updateColumn) {

                    @Override
                    public int getPosition() {
                        return columnPosition;
                    }
                })) {
                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.DUPLICATE_COLUMN_IN_ON_DUP_KEY).setSchemaName(table.getSchemaName().getString()).setTableName(table.getTableName().getString()).setColumnName(updateColumn.getName().getString()).build().buildException();
                }
                ;
                ParseNode updateNode = columnPair.getSecond();
                compiler.setColumn(updateColumn);
                Expression updateExpression = updateNode.accept(compiler);
                // Check that updateExpression is coercible to updateColumn
                if (updateExpression.getDataType() != null && !updateExpression.getDataType().isCastableTo(updateColumn.getDataType())) {
                    throw TypeMismatchException.newException(updateExpression.getDataType(), updateColumn.getDataType(), "expression: " + updateExpression.toString() + " for column " + updateColumn);
                }
                if (compiler.isAggregate()) {
                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.AGGREGATION_NOT_ALLOWED_IN_ON_DUP_KEY).setSchemaName(table.getSchemaName().getString()).setTableName(table.getTableName().getString()).setColumnName(updateColumn.getName().getString()).build().buildException();
                }
                updateExpressions.add(updateExpression);
            }
            PTable onDupKeyTable = PTableImpl.makePTable(table, updateColumns);
            onDupKeyBytesToBe = PhoenixIndexBuilder.serializeOnDupKeyUpdate(onDupKeyTable, updateExpressions);
        }
    }
    final byte[] onDupKeyBytes = onDupKeyBytesToBe;
    return new UpsertValuesMutationPlan(context, tableRef, nodeIndexOffset, constantExpressions, allColumns, columnIndexes, overlapViewColumns, values, addViewColumns, connection, pkSlotIndexes, useServerTimestamp, onDupKeyBytes, maxSize, maxSizeBytes);
}
Also used : PhoenixIndexBuilder(org.apache.phoenix.index.PhoenixIndexBuilder) PTable(org.apache.phoenix.schema.PTable) DelegateColumn(org.apache.phoenix.schema.DelegateColumn) LiteralParseNode(org.apache.phoenix.parse.LiteralParseNode) BindParseNode(org.apache.phoenix.parse.BindParseNode) SequenceValueParseNode(org.apache.phoenix.parse.SequenceValueParseNode) ParseNode(org.apache.phoenix.parse.ParseNode) ConstraintViolationException(org.apache.phoenix.schema.ConstraintViolationException) AggregatePlan(org.apache.phoenix.execute.AggregatePlan) SQLExceptionInfo(org.apache.phoenix.exception.SQLExceptionInfo) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) BitSet(java.util.BitSet) ReadOnlyTableException(org.apache.phoenix.schema.ReadOnlyTableException) HintNode(org.apache.phoenix.parse.HintNode) Scan(org.apache.hadoop.hbase.client.Scan) ColumnRef(org.apache.phoenix.schema.ColumnRef) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) PColumnImpl(org.apache.phoenix.schema.PColumnImpl) PColumn(org.apache.phoenix.schema.PColumn) SelectStatement(org.apache.phoenix.parse.SelectStatement) UpsertColumnsValuesMismatchException(org.apache.phoenix.schema.UpsertColumnsValuesMismatchException) Pair(org.apache.hadoop.hbase.util.Pair) QueryOptimizer(org.apache.phoenix.optimize.QueryOptimizer) Hint(org.apache.phoenix.parse.HintNode.Hint) PSmallint(org.apache.phoenix.schema.types.PSmallint) ColumnName(org.apache.phoenix.parse.ColumnName) Expression(org.apache.phoenix.expression.Expression) LiteralExpression(org.apache.phoenix.expression.LiteralExpression) PName(org.apache.phoenix.schema.PName) NamedTableNode(org.apache.phoenix.parse.NamedTableNode) ConnectionQueryServices(org.apache.phoenix.query.ConnectionQueryServices) TableRef(org.apache.phoenix.schema.TableRef)

Aggregations

ConstraintViolationException (org.apache.phoenix.schema.ConstraintViolationException)6 Connection (java.sql.Connection)3 PreparedStatement (java.sql.PreparedStatement)3 Properties (java.util.Properties)3 ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)2 Pair (org.apache.hadoop.hbase.util.Pair)2 SQLExceptionInfo (org.apache.phoenix.exception.SQLExceptionInfo)2 Expression (org.apache.phoenix.expression.Expression)2 LiteralExpression (org.apache.phoenix.expression.LiteralExpression)2 DataOutputStream (java.io.DataOutputStream)1 SQLException (java.sql.SQLException)1 BitSet (java.util.BitSet)1 Scan (org.apache.hadoop.hbase.client.Scan)1 ExpressionCompiler (org.apache.phoenix.compile.ExpressionCompiler)1 AggregatePlan (org.apache.phoenix.execute.AggregatePlan)1 PhoenixIndexBuilder (org.apache.phoenix.index.PhoenixIndexBuilder)1 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)1 QueryOptimizer (org.apache.phoenix.optimize.QueryOptimizer)1 BindParseNode (org.apache.phoenix.parse.BindParseNode)1 ColumnName (org.apache.phoenix.parse.ColumnName)1