Search in sources :

Example 26 with IN

use of org.apache.ignite.internal.processors.query.h2.sql.GridSqlOperationType.IN in project ignite by apache.

the class PartitionExtractor method tryExtractBetween.

/**
 * Try to extract partitions from {@code op} assuming that it's between operation or simple range.
 *
 * @param op Sql operation.
 * @param tblModel Table model.
 * @return {@code PartitionSingleNode} if operation reduced to one partition,
 *   {@code PartitionGroupNode} if operation reduced to multiple partitions or null if operation is neither
 *   between nor simple range. Null also returns if it's not possible to extract partitions from given operation.
 * @throws IgniteCheckedException If failed.
 */
private PartitionNode tryExtractBetween(GridSqlOperation op, PartitionTableModel tblModel) throws IgniteCheckedException {
    // Between operation (or similar range) should contain exact two children.
    assert op.size() == 2;
    GridSqlAst left = op.child();
    GridSqlAst right = op.child(1);
    GridSqlOperationType leftOpType = retrieveOperationType(left);
    GridSqlOperationType rightOpType = retrieveOperationType(right);
    if ((GridSqlOperationType.BIGGER == rightOpType || GridSqlOperationType.BIGGER_EQUAL == rightOpType) && (GridSqlOperationType.SMALLER == leftOpType || GridSqlOperationType.SMALLER_EQUAL == leftOpType)) {
        GridSqlAst tmp = left;
        left = right;
        right = tmp;
    } else if (!((GridSqlOperationType.BIGGER == leftOpType || GridSqlOperationType.BIGGER_EQUAL == leftOpType) && (GridSqlOperationType.SMALLER == rightOpType || GridSqlOperationType.SMALLER_EQUAL == rightOpType)))
        return null;
    // Try parse left AST.
    GridSqlColumn leftCol;
    if (left instanceof GridSqlOperation && left.child() instanceof GridSqlColumn && (((GridSqlColumn) left.child()).column().getTable() instanceof GridH2Table))
        leftCol = left.child();
    else
        return null;
    // Try parse right AST.
    GridSqlColumn rightCol;
    if (right instanceof GridSqlOperation && right.child() instanceof GridSqlColumn)
        rightCol = right.child();
    else
        return null;
    GridH2Table tbl = (GridH2Table) leftCol.column().getTable();
    // Check that columns might be used for partition pruning.
    if (!tbl.isColumnForPartitionPruning(leftCol.column()))
        return null;
    // Check that both left and right AST use same column.
    if (!F.eq(leftCol.schema(), rightCol.schema()) || !F.eq(leftCol.columnName(), rightCol.columnName()) || !F.eq(leftCol.tableAlias(), rightCol.tableAlias()))
        return null;
    // Check columns type
    if (!(leftCol.column().getType() == Value.BYTE || leftCol.column().getType() == Value.SHORT || leftCol.column().getType() == Value.INT || leftCol.column().getType() == Value.LONG))
        return null;
    // Try parse left AST right value (value to the right of '>' or '>=').
    GridSqlConst leftConst;
    if (left.child(1) instanceof GridSqlConst)
        leftConst = left.child(1);
    else
        return null;
    // Try parse right AST right value (value to the right of '<' or '<=').
    GridSqlConst rightConst;
    if (right.child(1) instanceof GridSqlConst)
        rightConst = right.child(1);
    else
        return null;
    long leftLongVal;
    long rightLongVal;
    try {
        leftLongVal = leftConst.value().getLong();
        rightLongVal = rightConst.value().getLong();
    } catch (Exception e) {
        return null;
    }
    // Increment left long value if '>' is used.
    if (((GridSqlOperation) left).operationType() == GridSqlOperationType.BIGGER)
        leftLongVal++;
    // Decrement right long value if '<' is used.
    if (((GridSqlOperation) right).operationType() == GridSqlOperationType.SMALLER)
        rightLongVal--;
    Set<PartitionSingleNode> parts = new HashSet<>();
    PartitionTable tbl0 = tblModel.table(leftCol.tableAlias());
    // If table is in ignored set, then we cannot use it for partition extraction.
    if (tbl0 == null)
        return null;
    for (long i = leftLongVal; i <= rightLongVal; i++) {
        int part = partResolver.partition(i, leftCol.column().getType(), tbl0.cacheName());
        parts.add(new PartitionConstantNode(tbl0, part));
        if (parts.size() > maxPartsCntBetween)
            return null;
    }
    return parts.isEmpty() ? PartitionNoneNode.INSTANCE : parts.size() == 1 ? parts.iterator().next() : new PartitionGroupNode(parts);
}
Also used : PartitionTable(org.apache.ignite.internal.sql.optimizer.affinity.PartitionTable) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) GridSqlConst(org.apache.ignite.internal.processors.query.h2.sql.GridSqlConst) GridSqlAst(org.apache.ignite.internal.processors.query.h2.sql.GridSqlAst) GridSqlColumn(org.apache.ignite.internal.processors.query.h2.sql.GridSqlColumn) GridH2Table(org.apache.ignite.internal.processors.query.h2.opt.GridH2Table) PartitionSingleNode(org.apache.ignite.internal.sql.optimizer.affinity.PartitionSingleNode) PartitionConstantNode(org.apache.ignite.internal.sql.optimizer.affinity.PartitionConstantNode) GridSqlOperationType(org.apache.ignite.internal.processors.query.h2.sql.GridSqlOperationType) GridSqlOperation(org.apache.ignite.internal.processors.query.h2.sql.GridSqlOperation) PartitionGroupNode(org.apache.ignite.internal.sql.optimizer.affinity.PartitionGroupNode) HashSet(java.util.HashSet)

Example 27 with IN

use of org.apache.ignite.internal.processors.query.h2.sql.GridSqlOperationType.IN in project ignite by apache.

the class H2TreeIndex method onIndexRangeRequest.

/**
 * @param node Requesting node.
 * @param msg Request message.
 */
private void onIndexRangeRequest(final ClusterNode node, final GridH2IndexRangeRequest msg) {
    // We don't use try with resources on purpose - the catch block must also be executed in the context of this span.
    TraceSurroundings trace = MTC.support(ctx.tracing().create(SQL_IDX_RANGE_REQ, MTC.span()));
    Span span = MTC.span();
    try {
        span.addTag(SQL_IDX, () -> idxName);
        span.addTag(SQL_TABLE, () -> tblName);
        GridH2IndexRangeResponse res = new GridH2IndexRangeResponse();
        res.originNodeId(msg.originNodeId());
        res.queryId(msg.queryId());
        res.originSegmentId(msg.originSegmentId());
        res.segment(msg.segment());
        res.batchLookupId(msg.batchLookupId());
        QueryContext qctx = qryCtxRegistry.getShared(msg.originNodeId(), msg.queryId(), msg.originSegmentId());
        if (qctx == null)
            res.status(STATUS_NOT_FOUND);
        else {
            DistributedJoinContext joinCtx = qctx.distributedJoinContext();
            assert joinCtx != null;
            try {
                RangeSource src;
                if (msg.bounds() != null) {
                    // This is the first request containing all the search rows.
                    assert !msg.bounds().isEmpty() : "empty bounds";
                    src = new RangeSource(this, msg.bounds(), msg.segment(), idxQryContext(qctx));
                } else {
                    // This is request to fetch next portion of data.
                    src = joinCtx.getSource(node.id(), msg.segment(), msg.batchLookupId());
                    assert src != null;
                }
                List<GridH2RowRange> ranges = new ArrayList<>();
                int maxRows = joinCtx.pageSize();
                assert maxRows > 0 : maxRows;
                while (maxRows > 0) {
                    GridH2RowRange range = src.next(maxRows);
                    if (range == null)
                        break;
                    ranges.add(range);
                    if (range.rows() != null)
                        maxRows -= range.rows().size();
                }
                assert !ranges.isEmpty();
                if (src.hasMoreRows()) {
                    // Save source for future fetches.
                    if (msg.bounds() != null)
                        joinCtx.putSource(node.id(), msg.segment(), msg.batchLookupId(), src);
                } else if (msg.bounds() == null) {
                    // Drop saved source.
                    joinCtx.putSource(node.id(), msg.segment(), msg.batchLookupId(), null);
                }
                res.ranges(ranges);
                res.status(STATUS_OK);
                span.addTag(SQL_IDX_RANGE_ROWS, () -> Integer.toString(ranges.stream().mapToInt(GridH2RowRange::rowsSize).sum()));
            } catch (Throwable th) {
                span.addTag(ERROR, th::getMessage);
                U.error(log, "Failed to process request: " + msg, th);
                res.error(th.getClass() + ": " + th.getMessage());
                res.status(STATUS_ERROR);
            }
        }
        send(singletonList(node), res);
    } catch (Throwable th) {
        span.addTag(ERROR, th::getMessage);
        throw th;
    } finally {
        if (trace != null)
            trace.close();
    }
}
Also used : GridH2IndexRangeResponse(org.apache.ignite.internal.processors.query.h2.twostep.msg.GridH2IndexRangeResponse) DistributedJoinContext(org.apache.ignite.internal.processors.query.h2.opt.join.DistributedJoinContext) RangeSource(org.apache.ignite.internal.processors.query.h2.opt.join.RangeSource) ArrayList(java.util.ArrayList) GridH2RowRange(org.apache.ignite.internal.processors.query.h2.twostep.msg.GridH2RowRange) IndexQueryContext(org.apache.ignite.internal.cache.query.index.sorted.inline.IndexQueryContext) QueryContext(org.apache.ignite.internal.processors.query.h2.opt.QueryContext) Span(org.apache.ignite.internal.processors.tracing.Span) TraceSurroundings(org.apache.ignite.internal.processors.tracing.MTC.TraceSurroundings)

Example 28 with IN

use of org.apache.ignite.internal.processors.query.h2.sql.GridSqlOperationType.IN in project ignite by apache.

the class UpdatePlan method processRow.

/**
 * Convert a row into key-value pair.
 *
 * @param row Row to process.
 * @throws IgniteCheckedException if failed.
 */
public IgniteBiTuple<?, ?> processRow(List<?> row) throws IgniteCheckedException {
    if (mode != BULK_LOAD && row.size() != colNames.length)
        throw new IgniteSQLException("Not enough values in a row: " + row.size() + " instead of " + colNames.length, IgniteQueryErrorCode.ENTRY_PROCESSING);
    GridH2RowDescriptor rowDesc = tbl.rowDescriptor();
    GridQueryTypeDescriptor desc = rowDesc.type();
    GridCacheContext cctx = rowDesc.context();
    Object key = keySupplier.apply(row);
    if (QueryUtils.isSqlType(desc.keyClass())) {
        assert keyColIdx != -1;
        key = DmlUtils.convert(key, rowDesc, desc.keyClass(), colTypes[keyColIdx], colNames[keyColIdx]);
    }
    Object val = valSupplier.apply(row);
    if (QueryUtils.isSqlType(desc.valueClass())) {
        assert valColIdx != -1;
        val = DmlUtils.convert(val, rowDesc, desc.valueClass(), colTypes[valColIdx], colNames[valColIdx]);
    }
    if (key == null) {
        if (F.isEmpty(desc.keyFieldName()))
            throw new IgniteSQLException("Key for INSERT, COPY, or MERGE must not be null", IgniteQueryErrorCode.NULL_KEY);
        else
            throw new IgniteSQLException("Null value is not allowed for column '" + desc.keyFieldName() + "'", IgniteQueryErrorCode.NULL_KEY);
    }
    if (val == null) {
        if (F.isEmpty(desc.valueFieldName()))
            throw new IgniteSQLException("Value for INSERT, COPY, MERGE, or UPDATE must not be null", IgniteQueryErrorCode.NULL_VALUE);
        else
            throw new IgniteSQLException("Null value is not allowed for column '" + desc.valueFieldName() + "'", IgniteQueryErrorCode.NULL_VALUE);
    }
    int actualColCnt = Math.min(colNames.length, row.size());
    Map<String, Object> newColVals = new HashMap<>();
    for (int i = 0; i < actualColCnt; i++) {
        if (i == keyColIdx || i == valColIdx)
            continue;
        String colName = colNames[i];
        GridQueryProperty prop = desc.property(colName);
        assert prop != null;
        Class<?> expCls = prop.type();
        newColVals.put(colName, DmlUtils.convert(row.get(i), rowDesc, expCls, colTypes[i], colNames[i]));
    }
    desc.setDefaults(key, val);
    // We update columns in the order specified by the table for a reason - table's
    // column order preserves their precedence for correct update of nested properties.
    Column[] tblCols = tbl.getColumns();
    // First 2 columns are _key and _val Skip 'em.
    for (int i = QueryUtils.DEFAULT_COLUMNS_COUNT; i < tblCols.length; i++) {
        if (tbl.rowDescriptor().isKeyValueOrVersionColumn(i))
            continue;
        String colName = tblCols[i].getName();
        if (!newColVals.containsKey(colName))
            continue;
        Object colVal = newColVals.get(colName);
        desc.setValue(colName, key, val, colVal);
    }
    if (cctx.binaryMarshaller()) {
        if (key instanceof BinaryObjectBuilder)
            key = ((BinaryObjectBuilder) key).build();
        if (val instanceof BinaryObjectBuilder)
            val = ((BinaryObjectBuilder) val).build();
    }
    desc.validateKeyAndValue(key, val);
    return new IgniteBiTuple<>(key, val);
}
Also used : GridCacheContext(org.apache.ignite.internal.processors.cache.GridCacheContext) HashMap(java.util.HashMap) IgniteBiTuple(org.apache.ignite.lang.IgniteBiTuple) GridQueryTypeDescriptor(org.apache.ignite.internal.processors.query.GridQueryTypeDescriptor) GridQueryProperty(org.apache.ignite.internal.processors.query.GridQueryProperty) GridH2RowDescriptor(org.apache.ignite.internal.processors.query.h2.opt.GridH2RowDescriptor) Column(org.h2.table.Column) IgniteSQLException(org.apache.ignite.internal.processors.query.IgniteSQLException) BinaryObject(org.apache.ignite.binary.BinaryObject) BinaryObjectBuilder(org.apache.ignite.binary.BinaryObjectBuilder)

Example 29 with IN

use of org.apache.ignite.internal.processors.query.h2.sql.GridSqlOperationType.IN in project ignite by apache.

the class UpdatePlan method createRows.

/**
 * Extract rows from plan without performing any query.
 *
 * @param args Original query arguments.
 * @return {@link List} of rows from the plan for a single query.
 * For example, if we have multiple args in a query: <br/>
 * {@code INSERT INTO person VALUES (k1, v1), (k2, v2), (k3, v3);} <br/>
 * we will get a {@link List} of {@link List} with items {@code {[k1, v1], [k2, v2], [k3, v3]}}.
 * @throws IgniteCheckedException if failed.
 */
public List<List<?>> createRows(Object[] args) throws IgniteCheckedException {
    assert rowsNum > 0 && !F.isEmpty(colNames);
    List<List<?>> res = new ArrayList<>(rowsNum);
    GridH2RowDescriptor desc = tbl.rowDescriptor();
    extractArgsValues(args, res, desc);
    return res;
}
Also used : GridH2RowDescriptor(org.apache.ignite.internal.processors.query.h2.opt.GridH2RowDescriptor) ArrayList(java.util.ArrayList) ArrayList(java.util.ArrayList) List(java.util.List)

Example 30 with IN

use of org.apache.ignite.internal.processors.query.h2.sql.GridSqlOperationType.IN in project ignite by apache.

the class UpdatePlanBuilder method verifyDmlColumns.

/**
 * Checks that DML query (insert, merge, update, bulk load aka copy) columns: <br/>
 * 1) doesn't contain both entire key (_key or alias) and columns referring to part of the key; <br/>
 * 2) doesn't contain both entire value (_val or alias) and columns referring to part of the value. <br/>
 *
 * @param tab - updated table.
 * @param affectedCols - table's column names affected by dml query. Their order should be the same as in the
 * dml statement only to have the same columns order in the error message.
 * @throws IgniteSQLException if check failed.
 */
private static void verifyDmlColumns(GridH2Table tab, Collection<Column> affectedCols) {
    GridH2RowDescriptor desc = tab.rowDescriptor();
    // _key (_val) or it alias exist in the update columns.
    String keyColName = null;
    String valColName = null;
    // Whether fields that are part of the key (value) exist in the updated columns.
    boolean hasKeyProps = false;
    boolean hasValProps = false;
    for (Column col : affectedCols) {
        int colId = col.getColumnId();
        // Checking that it's not specified both _key(_val) and its alias by the way.
        if (desc.isKeyColumn(colId)) {
            if (keyColName == null)
                keyColName = col.getName();
            else
                throw new IgniteSQLException("Columns " + keyColName + " and " + col + " both refer to entire cache key object.", IgniteQueryErrorCode.PARSING);
        } else if (desc.isValueColumn(colId)) {
            if (valColName == null)
                valColName = col.getName();
            else
                throw new IgniteSQLException("Columns " + valColName + " and " + col + " both refer to entire cache value object.", IgniteQueryErrorCode.PARSING);
        } else {
            // Column ids 0..2 are _key, _val, _ver
            assert colId >= QueryUtils.DEFAULT_COLUMNS_COUNT : "Unexpected column [name=" + col + ", id=" + colId + "].";
            if (desc.isColumnKeyProperty(colId - QueryUtils.DEFAULT_COLUMNS_COUNT))
                hasKeyProps = true;
            else
                hasValProps = true;
        }
        // And check invariants for the fast fail.
        boolean hasEntireKeyCol = keyColName != null;
        boolean hasEntireValcol = valColName != null;
        if (hasEntireKeyCol && hasKeyProps)
            throw new IgniteSQLException("Column " + keyColName + " refers to entire key cache object. " + "It must not be mixed with other columns that refer to parts of key.", IgniteQueryErrorCode.PARSING);
        if (hasEntireValcol && hasValProps)
            throw new IgniteSQLException("Column " + valColName + " refers to entire value cache object. " + "It must not be mixed with other columns that refer to parts of value.", IgniteQueryErrorCode.PARSING);
        if (!ALLOW_KEY_VAL_UPDATES) {
            if (desc.isKeyColumn(colId) && !QueryUtils.isSqlType(desc.type().keyClass())) {
                throw new IgniteSQLException("Update of composite key column is not supported", IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
            }
            if (desc.isValueColumn(colId) && !QueryUtils.isSqlType(desc.type().valueClass())) {
                throw new IgniteSQLException("Update of composite value column is not supported", IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
            }
        }
    }
}
Also used : GridH2RowDescriptor(org.apache.ignite.internal.processors.query.h2.opt.GridH2RowDescriptor) GridSqlColumn(org.apache.ignite.internal.processors.query.h2.sql.GridSqlColumn) Column(org.h2.table.Column) IgniteSQLException(org.apache.ignite.internal.processors.query.IgniteSQLException)

Aggregations

IgniteCheckedException (org.apache.ignite.IgniteCheckedException)35 ArrayList (java.util.ArrayList)29 IgniteException (org.apache.ignite.IgniteException)26 IgniteSQLException (org.apache.ignite.internal.processors.query.IgniteSQLException)26 SQLException (java.sql.SQLException)21 List (java.util.List)21 GridH2Table (org.apache.ignite.internal.processors.query.h2.opt.GridH2Table)21 GridCacheContext (org.apache.ignite.internal.processors.cache.GridCacheContext)19 CacheException (javax.cache.CacheException)15 QueryCancelledException (org.apache.ignite.cache.query.QueryCancelledException)13 GridH2RowDescriptor (org.apache.ignite.internal.processors.query.h2.opt.GridH2RowDescriptor)13 Column (org.h2.table.Column)13 IgniteH2Indexing (org.apache.ignite.internal.processors.query.h2.IgniteH2Indexing)10 IgniteBiTuple (org.apache.ignite.lang.IgniteBiTuple)10 ResultSet (java.sql.ResultSet)9 HashMap (java.util.HashMap)9 CacheDataRow (org.apache.ignite.internal.processors.cache.persistence.CacheDataRow)9 GridSqlSelect (org.apache.ignite.internal.processors.query.h2.sql.GridSqlSelect)9 PreparedStatement (java.sql.PreparedStatement)8 HashSet (java.util.HashSet)8