Search in sources :

Example 6 with Update

use of org.h2.command.dml.Update in project ignite by apache.

the class GridH2Table method doUpdate.

/**
     * For testing only.
     *
     * @param row Row.
     * @param del If given row should be deleted from table.
     * @return {@code True} if operation succeeded.
     * @throws IgniteCheckedException If failed.
     */
@SuppressWarnings("LockAcquiredButNotSafelyReleased")
boolean doUpdate(final GridH2Row row, boolean del) throws IgniteCheckedException {
    // Here we assume that each key can't be updated concurrently and case when different indexes
    // getting updated from different threads with different rows with the same key is impossible.
    GridUnsafeMemory mem = desc == null ? null : desc.memory();
    lock(false);
    if (mem != null)
        desc.guard().begin();
    try {
        ensureNotDestroyed();
        GridH2IndexBase pk = pk();
        if (!del) {
            assert rowFactory == null || row.link != 0 : row;
            // Put to PK.
            GridH2Row old = pk.put(row);
            if (old == null)
                size.increment();
            int len = idxs.size();
            int i = pkIndexPos;
            // Start from 3 because 0 - Scan (don't need to update), 1 - PK hash (already updated), 2 - PK (already updated).
            while (++i < len) {
                if (!(idxs.get(i) instanceof GridH2IndexBase))
                    continue;
                GridH2IndexBase idx = index(i);
                addToIndex(idx, pk, row, old, false);
            }
            for (GridH2IndexBase idx : tmpIdxs.values()) addToIndex(idx, pk, row, old, true);
        } else {
            //  index(1) is PK, get full row from there (search row here contains only key but no other columns).
            GridH2Row old = pk.remove(row);
            if (old != null) {
                // Start from 3 because 0 - Scan (don't need to update), 1 - PK hash (already updated), 2 - PK (already updated).
                for (int i = pkIndexPos + 1, len = idxs.size(); i < len; i++) {
                    if (!(idxs.get(i) instanceof GridH2IndexBase))
                        continue;
                    Row res = index(i).remove(old);
                    assert eq(pk, res, old) : "\n" + old + "\n" + res + "\n" + i + " -> " + index(i).getName();
                }
                for (GridH2IndexBase idx : tmpIdxs.values()) idx.remove(old);
                size.decrement();
            } else
                return false;
        }
        // The snapshot is not actual after update.
        if (actualSnapshot != null)
            actualSnapshot.set(pk.segmentForRow(row), null);
        return true;
    } finally {
        unlock(false);
        if (mem != null)
            desc.guard().end();
    }
}
Also used : Row(org.h2.result.Row) SearchRow(org.h2.result.SearchRow) GridUnsafeMemory(org.apache.ignite.internal.util.offheap.unsafe.GridUnsafeMemory)

Example 7 with Update

use of org.h2.command.dml.Update in project ignite by apache.

the class GridSqlQueryParser method processExtraParam.

/**
 * @param name Param name.
 * @param val Param value.
 * @param res Table params to update.
 */
private static void processExtraParam(String name, String val, GridSqlCreateTable res) {
    assert !F.isEmpty(name);
    switch(name) {
        case PARAM_TEMPLATE:
            ensureNotEmpty(name, val);
            res.templateName(val);
            break;
        case PARAM_BACKUPS:
            ensureNotEmpty(name, val);
            int backups = parseIntParam(PARAM_BACKUPS, val);
            if (backups < 0)
                throw new IgniteSQLException("\"" + PARAM_BACKUPS + "\" cannot be negative: " + backups, IgniteQueryErrorCode.PARSING);
            res.backups(backups);
            break;
        case PARAM_ATOMICITY:
            ensureNotEmpty(name, val);
            CacheAtomicityMode atomicityMode;
            if (CacheAtomicityMode.TRANSACTIONAL.name().equalsIgnoreCase(val))
                atomicityMode = CacheAtomicityMode.TRANSACTIONAL;
            else if (CacheAtomicityMode.ATOMIC.name().equalsIgnoreCase(val))
                atomicityMode = CacheAtomicityMode.ATOMIC;
            else
                throw new IgniteSQLException("Invalid value of \"" + PARAM_ATOMICITY + "\" parameter " + "(should be either TRANSACTIONAL or ATOMIC): " + val, IgniteQueryErrorCode.PARSING);
            res.atomicityMode(atomicityMode);
            break;
        case PARAM_CACHE_NAME:
            ensureNotEmpty(name, val);
            res.cacheName(val);
            break;
        case PARAM_KEY_TYPE:
            ensureNotEmpty(name, val);
            res.keyTypeName(val);
            break;
        case PARAM_VAL_TYPE:
            ensureNotEmpty(name, val);
            res.valueTypeName(val);
            break;
        case PARAM_CACHE_GROUP_OLD:
        case PARAM_CACHE_GROUP:
            ensureNotEmpty(name, val);
            res.cacheGroup(val);
            break;
        case PARAM_AFFINITY_KEY_OLD:
        case PARAM_AFFINITY_KEY:
            ensureNotEmpty(name, val);
            String affColName = null;
            // Either strip column name off its quotes, or uppercase it.
            if (val.startsWith("'")) {
                if (val.length() == 1 || !val.endsWith("'"))
                    throw new IgniteSQLException("Affinity key column name does not have trailing quote: " + val, IgniteQueryErrorCode.PARSING);
                val = val.substring(1, val.length() - 1);
                ensureNotEmpty(name, val);
                affColName = val;
            } else {
                for (String colName : res.columns().keySet()) {
                    if (val.equalsIgnoreCase(colName)) {
                        if (affColName != null)
                            throw new IgniteSQLException("Ambiguous affinity column name, use single quotes " + "for case sensitivity: " + val, IgniteQueryErrorCode.PARSING);
                        affColName = colName;
                    }
                }
            }
            if (affColName == null || !res.columns().containsKey(affColName))
                throw new IgniteSQLException("Affinity key column with given name not found: " + val, IgniteQueryErrorCode.PARSING);
            if (!res.primaryKeyColumns().contains(affColName))
                throw new IgniteSQLException("Affinity key column must be one of key columns: " + affColName, IgniteQueryErrorCode.PARSING);
            res.affinityKey(affColName);
            break;
        case PARAM_WRITE_SYNC:
            ensureNotEmpty(name, val);
            CacheWriteSynchronizationMode writeSyncMode;
            if (CacheWriteSynchronizationMode.FULL_ASYNC.name().equalsIgnoreCase(val))
                writeSyncMode = CacheWriteSynchronizationMode.FULL_ASYNC;
            else if (CacheWriteSynchronizationMode.FULL_SYNC.name().equalsIgnoreCase(val))
                writeSyncMode = CacheWriteSynchronizationMode.FULL_SYNC;
            else if (CacheWriteSynchronizationMode.PRIMARY_SYNC.name().equalsIgnoreCase(val))
                writeSyncMode = CacheWriteSynchronizationMode.PRIMARY_SYNC;
            else
                throw new IgniteSQLException("Invalid value of \"" + PARAM_WRITE_SYNC + "\" parameter " + "(should be FULL_SYNC, FULL_ASYNC, or PRIMARY_SYNC): " + val, IgniteQueryErrorCode.PARSING);
            res.writeSynchronizationMode(writeSyncMode);
            break;
        case PARAM_WRAP_KEY:
            {
                res.wrapKey(F.isEmpty(val) || Boolean.parseBoolean(val));
                break;
            }
        case PARAM_WRAP_VALUE:
            res.wrapValue(F.isEmpty(val) || Boolean.parseBoolean(val));
            break;
        case PARAM_DATA_REGION:
            ensureNotEmpty(name, val);
            res.dataRegionName(val);
            break;
        default:
            throw new IgniteSQLException("Unsupported parameter: " + name, IgniteQueryErrorCode.PARSING);
    }
}
Also used : CacheWriteSynchronizationMode(org.apache.ignite.cache.CacheWriteSynchronizationMode) IgniteSQLException(org.apache.ignite.internal.processors.query.IgniteSQLException) CacheAtomicityMode(org.apache.ignite.cache.CacheAtomicityMode) AlterTableAddConstraint(org.h2.command.ddl.AlterTableAddConstraint)

Example 8 with Update

use of org.h2.command.dml.Update in project ignite by apache.

the class UpdatePlan method processRowForUpdate.

/**
 * Convert a row into value.
 *
 * @param row Row to process.
 * @throws IgniteCheckedException if failed.
 * @return Tuple contains: [key, old value, new value]
 */
public T3<Object, Object, Object> processRowForUpdate(List<?> row) throws IgniteCheckedException {
    GridH2RowDescriptor rowDesc = tbl.rowDescriptor();
    GridQueryTypeDescriptor desc = rowDesc.type();
    GridCacheContext cctx = rowDesc.context();
    boolean hasNewVal = (valColIdx != -1);
    boolean hasProps = !hasNewVal || colNames.length > 1;
    Object key = row.get(0);
    Object oldVal = row.get(1);
    if (cctx.binaryMarshaller() && !(oldVal instanceof BinaryObject))
        oldVal = cctx.grid().binary().toBinary(oldVal);
    Object newVal;
    Map<String, Object> newColVals = new HashMap<>();
    for (int i = 0; i < colNames.length; i++) {
        if (hasNewVal && i == valColIdx - 2)
            continue;
        GridQueryProperty prop = tbl.rowDescriptor().type().property(colNames[i]);
        assert prop != null : "Unknown property: " + colNames[i];
        newColVals.put(colNames[i], DmlUtils.convert(row.get(i + 2), rowDesc, prop.type(), colTypes[i]));
    }
    newVal = valSupplier.apply(row);
    if (newVal == null)
        throw new IgniteSQLException("New value for UPDATE must not be null", IgniteQueryErrorCode.NULL_VALUE);
    // Skip key and value - that's why we start off with 3rd column
    for (int i = 0; i < tbl.getColumns().length - DEFAULT_COLUMNS_COUNT; i++) {
        Column c = tbl.getColumn(i + DEFAULT_COLUMNS_COUNT);
        if (rowDesc.isKeyValueOrVersionColumn(c.getColumnId()))
            continue;
        GridQueryProperty prop = desc.property(c.getName());
        if (prop.key())
            // Don't get values of key's columns - we won't use them anyway
            continue;
        boolean hasNewColVal = newColVals.containsKey(c.getName());
        if (!hasNewColVal)
            continue;
        Object colVal = newColVals.get(c.getName());
        // UPDATE currently does not allow to modify key or its fields, so we must be safe to pass null as key.
        rowDesc.setColumnValue(null, newVal, colVal, i);
    }
    if (cctx.binaryMarshaller() && hasProps) {
        assert newVal instanceof BinaryObjectBuilder;
        newVal = ((BinaryObjectBuilder) newVal).build();
    }
    desc.validateKeyAndValue(key, newVal);
    return new T3<>(key, oldVal, newVal);
}
Also used : GridCacheContext(org.apache.ignite.internal.processors.cache.GridCacheContext) HashMap(java.util.HashMap) GridQueryTypeDescriptor(org.apache.ignite.internal.processors.query.GridQueryTypeDescriptor) GridQueryProperty(org.apache.ignite.internal.processors.query.GridQueryProperty) GridH2RowDescriptor(org.apache.ignite.internal.processors.query.h2.opt.GridH2RowDescriptor) BinaryObject(org.apache.ignite.binary.BinaryObject) Column(org.h2.table.Column) IgniteSQLException(org.apache.ignite.internal.processors.query.IgniteSQLException) BinaryObject(org.apache.ignite.binary.BinaryObject) BinaryObjectBuilder(org.apache.ignite.binary.BinaryObjectBuilder) T3(org.apache.ignite.internal.util.typedef.T3)

Example 9 with Update

use of org.h2.command.dml.Update in project ignite by apache.

the class UpdatePlan method processRow.

/**
 * Convert a row into key-value pair.
 *
 * @param row Row to process.
 * @throws IgniteCheckedException if failed.
 */
public IgniteBiTuple<?, ?> processRow(List<?> row) throws IgniteCheckedException {
    if (mode != BULK_LOAD && row.size() != colNames.length)
        throw new IgniteSQLException("Not enough values in a row: " + row.size() + " instead of " + colNames.length, IgniteQueryErrorCode.ENTRY_PROCESSING);
    GridH2RowDescriptor rowDesc = tbl.rowDescriptor();
    GridQueryTypeDescriptor desc = rowDesc.type();
    GridCacheContext cctx = rowDesc.context();
    Object key = keySupplier.apply(row);
    if (QueryUtils.isSqlType(desc.keyClass())) {
        assert keyColIdx != -1;
        key = DmlUtils.convert(key, rowDesc, desc.keyClass(), colTypes[keyColIdx]);
    }
    Object val = valSupplier.apply(row);
    if (QueryUtils.isSqlType(desc.valueClass())) {
        assert valColIdx != -1;
        val = DmlUtils.convert(val, rowDesc, desc.valueClass(), colTypes[valColIdx]);
    }
    if (key == null) {
        if (F.isEmpty(desc.keyFieldName()))
            throw new IgniteSQLException("Key for INSERT, COPY, or MERGE must not be null", IgniteQueryErrorCode.NULL_KEY);
        else
            throw new IgniteSQLException("Null value is not allowed for column '" + desc.keyFieldName() + "'", IgniteQueryErrorCode.NULL_KEY);
    }
    if (val == null) {
        if (F.isEmpty(desc.valueFieldName()))
            throw new IgniteSQLException("Value for INSERT, COPY, MERGE, or UPDATE must not be null", IgniteQueryErrorCode.NULL_VALUE);
        else
            throw new IgniteSQLException("Null value is not allowed for column '" + desc.valueFieldName() + "'", IgniteQueryErrorCode.NULL_VALUE);
    }
    int actualColCnt = Math.min(colNames.length, row.size());
    Map<String, Object> newColVals = new HashMap<>();
    for (int i = 0; i < actualColCnt; i++) {
        if (i == keyColIdx || i == valColIdx)
            continue;
        String colName = colNames[i];
        GridQueryProperty prop = desc.property(colName);
        assert prop != null;
        Class<?> expCls = prop.type();
        newColVals.put(colName, DmlUtils.convert(row.get(i), rowDesc, expCls, colTypes[i]));
    }
    desc.setDefaults(key, val);
    // We update columns in the order specified by the table for a reason - table's
    // column order preserves their precedence for correct update of nested properties.
    Column[] tblCols = tbl.getColumns();
    // First 3 columns are _key, _val and _ver. Skip 'em.
    for (int i = DEFAULT_COLUMNS_COUNT; i < tblCols.length; i++) {
        if (tbl.rowDescriptor().isKeyValueOrVersionColumn(i))
            continue;
        String colName = tblCols[i].getName();
        if (!newColVals.containsKey(colName))
            continue;
        Object colVal = newColVals.get(colName);
        desc.setValue(colName, key, val, colVal);
    }
    if (cctx.binaryMarshaller()) {
        if (key instanceof BinaryObjectBuilder)
            key = ((BinaryObjectBuilder) key).build();
        if (val instanceof BinaryObjectBuilder)
            val = ((BinaryObjectBuilder) val).build();
    }
    desc.validateKeyAndValue(key, val);
    return new IgniteBiTuple<>(key, val);
}
Also used : GridCacheContext(org.apache.ignite.internal.processors.cache.GridCacheContext) HashMap(java.util.HashMap) IgniteBiTuple(org.apache.ignite.lang.IgniteBiTuple) GridQueryTypeDescriptor(org.apache.ignite.internal.processors.query.GridQueryTypeDescriptor) GridQueryProperty(org.apache.ignite.internal.processors.query.GridQueryProperty) GridH2RowDescriptor(org.apache.ignite.internal.processors.query.h2.opt.GridH2RowDescriptor) Column(org.h2.table.Column) IgniteSQLException(org.apache.ignite.internal.processors.query.IgniteSQLException) BinaryObject(org.apache.ignite.binary.BinaryObject) BinaryObjectBuilder(org.apache.ignite.binary.BinaryObjectBuilder)

Example 10 with Update

use of org.h2.command.dml.Update in project ignite by apache.

the class GridReduceQueryExecutor method update.

/**
 * @param schemaName Schema name.
 * @param cacheIds Cache ids.
 * @param selectQry Select query.
 * @param params SQL parameters.
 * @param enforceJoinOrder Enforce join order of tables.
 * @param pageSize Page size.
 * @param timeoutMillis Timeout.
 * @param parts Partitions.
 * @param isReplicatedOnly Whether query uses only replicated caches.
 * @param cancel Cancel state.
 * @return Update result, or {@code null} when some map node doesn't support distributed DML.
 */
public UpdateResult update(String schemaName, List<Integer> cacheIds, String selectQry, Object[] params, boolean enforceJoinOrder, int pageSize, int timeoutMillis, final int[] parts, boolean isReplicatedOnly, GridQueryCancel cancel) {
    AffinityTopologyVersion topVer = h2.readyTopologyVersion();
    NodesForPartitionsResult nodesParts = nodesForPartitions(cacheIds, topVer, parts, isReplicatedOnly);
    final long reqId = qryIdGen.incrementAndGet();
    final GridRunningQueryInfo qryInfo = new GridRunningQueryInfo(reqId, selectQry, GridCacheQueryType.SQL_FIELDS, schemaName, U.currentTimeMillis(), cancel, false);
    Collection<ClusterNode> nodes = nodesParts.nodes();
    if (nodes == null)
        throw new CacheException("Failed to determine nodes participating in the update. " + "Explanation (Retry update once topology recovers).");
    if (isReplicatedOnly) {
        ClusterNode locNode = ctx.discovery().localNode();
        if (nodes.contains(locNode))
            nodes = singletonList(locNode);
        else
            nodes = singletonList(F.rand(nodes));
    }
    for (ClusterNode n : nodes) {
        if (!n.version().greaterThanEqual(2, 3, 0)) {
            log.warning("Server-side DML optimization is skipped because map node does not support it. " + "Falling back to normal DML. [node=" + n.id() + ", v=" + n.version() + "].");
            return null;
        }
    }
    final DistributedUpdateRun r = new DistributedUpdateRun(nodes.size(), qryInfo);
    int flags = enforceJoinOrder ? GridH2QueryRequest.FLAG_ENFORCE_JOIN_ORDER : 0;
    if (isReplicatedOnly)
        flags |= GridH2QueryRequest.FLAG_REPLICATED;
    GridH2DmlRequest req = new GridH2DmlRequest().requestId(reqId).topologyVersion(topVer).caches(cacheIds).schemaName(schemaName).query(selectQry).pageSize(pageSize).parameters(params).timeout(timeoutMillis).flags(flags);
    updRuns.put(reqId, r);
    boolean release = false;
    try {
        Map<ClusterNode, IntArray> partsMap = (nodesParts.queryPartitionsMap() != null) ? nodesParts.queryPartitionsMap() : nodesParts.partitionsMap();
        ExplicitPartitionsSpecializer partsSpec = (parts == null) ? null : new ExplicitPartitionsSpecializer(partsMap);
        final Collection<ClusterNode> finalNodes = nodes;
        cancel.set(new Runnable() {

            @Override
            public void run() {
                r.future().onCancelled();
                send(finalNodes, new GridQueryCancelRequest(reqId), null, false);
            }
        });
        // send() logs the debug message
        if (send(nodes, req, partsSpec, false))
            return r.future().get();
        throw new CacheException("Failed to send update request to participating nodes.");
    } catch (IgniteCheckedException | RuntimeException e) {
        release = true;
        U.error(log, "Error during update [localNodeId=" + ctx.localNodeId() + "]", e);
        throw new CacheException("Failed to run update. " + e.getMessage(), e);
    } finally {
        if (release)
            send(nodes, new GridQueryCancelRequest(reqId), null, false);
        if (!updRuns.remove(reqId, r))
            U.warn(log, "Update run was already removed: " + reqId);
    }
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) GridH2DmlRequest(org.apache.ignite.internal.processors.query.h2.twostep.msg.GridH2DmlRequest) GridQueryCancelRequest(org.apache.ignite.internal.processors.query.h2.twostep.messages.GridQueryCancelRequest) AffinityTopologyVersion(org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion) CacheException(javax.cache.CacheException) GridRunningQueryInfo(org.apache.ignite.internal.processors.query.GridRunningQueryInfo) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) IntArray(org.h2.util.IntArray)

Aggregations

SQLException (java.sql.SQLException)44 DbException (org.h2.message.DbException)40 Database (org.h2.engine.Database)39 Connection (java.sql.Connection)37 PreparedStatement (java.sql.PreparedStatement)35 Value (org.h2.value.Value)34 ResultSet (java.sql.ResultSet)32 Statement (java.sql.Statement)31 Column (org.h2.table.Column)30 Table (org.h2.table.Table)23 JdbcConnection (org.h2.jdbc.JdbcConnection)22 Expression (org.h2.expression.Expression)19 StatementBuilder (org.h2.util.StatementBuilder)14 ValueExpression (org.h2.expression.ValueExpression)13 ValueString (org.h2.value.ValueString)13 ArrayList (java.util.ArrayList)10 Constraint (org.h2.constraint.Constraint)10 Index (org.h2.index.Index)10 IndexColumn (org.h2.table.IndexColumn)10 Task (org.h2.util.Task)10