Search in sources :

Example 16 with In

use of org.h2.dev.util.BinaryArithmeticStream.In in project ignite by apache.

the class GridReduceQueryExecutor method update.

/**
 * @param schemaName Schema name.
 * @param cacheIds Cache ids.
 * @param selectQry Select query.
 * @param params SQL parameters.
 * @param enforceJoinOrder Enforce join order of tables.
 * @param pageSize Page size.
 * @param timeoutMillis Timeout.
 * @param parts Partitions.
 * @param isReplicatedOnly Whether query uses only replicated caches.
 * @param cancel Cancel state.
 * @return Update result, or {@code null} when some map node doesn't support distributed DML.
 */
public UpdateResult update(String schemaName, List<Integer> cacheIds, String selectQry, Object[] params, boolean enforceJoinOrder, int pageSize, int timeoutMillis, final int[] parts, boolean isReplicatedOnly, GridQueryCancel cancel) {
    AffinityTopologyVersion topVer = h2.readyTopologyVersion();
    NodesForPartitionsResult nodesParts = nodesForPartitions(cacheIds, topVer, parts, isReplicatedOnly);
    final long reqId = qryIdGen.incrementAndGet();
    final GridRunningQueryInfo qryInfo = new GridRunningQueryInfo(reqId, selectQry, GridCacheQueryType.SQL_FIELDS, schemaName, U.currentTimeMillis(), cancel, false);
    Collection<ClusterNode> nodes = nodesParts.nodes();
    if (nodes == null)
        throw new CacheException("Failed to determine nodes participating in the update. " + "Explanation (Retry update once topology recovers).");
    if (isReplicatedOnly) {
        ClusterNode locNode = ctx.discovery().localNode();
        if (nodes.contains(locNode))
            nodes = singletonList(locNode);
        else
            nodes = singletonList(F.rand(nodes));
    }
    for (ClusterNode n : nodes) {
        if (!n.version().greaterThanEqual(2, 3, 0)) {
            log.warning("Server-side DML optimization is skipped because map node does not support it. " + "Falling back to normal DML. [node=" + n.id() + ", v=" + n.version() + "].");
            return null;
        }
    }
    final DistributedUpdateRun r = new DistributedUpdateRun(nodes.size(), qryInfo);
    int flags = enforceJoinOrder ? GridH2QueryRequest.FLAG_ENFORCE_JOIN_ORDER : 0;
    if (isReplicatedOnly)
        flags |= GridH2QueryRequest.FLAG_REPLICATED;
    GridH2DmlRequest req = new GridH2DmlRequest().requestId(reqId).topologyVersion(topVer).caches(cacheIds).schemaName(schemaName).query(selectQry).pageSize(pageSize).parameters(params).timeout(timeoutMillis).flags(flags);
    updRuns.put(reqId, r);
    boolean release = false;
    try {
        Map<ClusterNode, IntArray> partsMap = (nodesParts.queryPartitionsMap() != null) ? nodesParts.queryPartitionsMap() : nodesParts.partitionsMap();
        ExplicitPartitionsSpecializer partsSpec = (parts == null) ? null : new ExplicitPartitionsSpecializer(partsMap);
        final Collection<ClusterNode> finalNodes = nodes;
        cancel.set(new Runnable() {

            @Override
            public void run() {
                r.future().onCancelled();
                send(finalNodes, new GridQueryCancelRequest(reqId), null, false);
            }
        });
        // send() logs the debug message
        if (send(nodes, req, partsSpec, false))
            return r.future().get();
        throw new CacheException("Failed to send update request to participating nodes.");
    } catch (IgniteCheckedException | RuntimeException e) {
        release = true;
        U.error(log, "Error during update [localNodeId=" + ctx.localNodeId() + "]", e);
        throw new CacheException("Failed to run update. " + e.getMessage(), e);
    } finally {
        if (release)
            send(nodes, new GridQueryCancelRequest(reqId), null, false);
        if (!updRuns.remove(reqId, r))
            U.warn(log, "Update run was already removed: " + reqId);
    }
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) GridH2DmlRequest(org.apache.ignite.internal.processors.query.h2.twostep.msg.GridH2DmlRequest) GridQueryCancelRequest(org.apache.ignite.internal.processors.query.h2.twostep.messages.GridQueryCancelRequest) AffinityTopologyVersion(org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion) CacheException(javax.cache.CacheException) GridRunningQueryInfo(org.apache.ignite.internal.processors.query.GridRunningQueryInfo) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) IntArray(org.h2.util.IntArray)

Example 17 with In

use of org.h2.dev.util.BinaryArithmeticStream.In in project ignite by apache.

the class GridReduceQueryExecutor method partitionedUnstableDataNodes.

/**
 * Calculates partition mapping for partitioned cache on unstable topology.
 *
 * @param cacheIds Cache IDs.
 * @return Partition mapping or {@code null} if we can't calculate it due to repartitioning and we need to retry.
 */
@SuppressWarnings("unchecked")
private Map<ClusterNode, IntArray> partitionedUnstableDataNodes(List<Integer> cacheIds) {
    // If the main cache is replicated, just replace it with the first partitioned.
    GridCacheContext<?, ?> cctx = findFirstPartitioned(cacheIds);
    final int partsCnt = cctx.affinity().partitions();
    if (cacheIds.size() > 1) {
        // Check correct number of partitions for partitioned caches.
        for (Integer cacheId : cacheIds) {
            GridCacheContext<?, ?> extraCctx = cacheContext(cacheId);
            if (extraCctx.isReplicated() || extraCctx.isLocal())
                continue;
            int parts = extraCctx.affinity().partitions();
            if (parts != partsCnt)
                throw new CacheException("Number of partitions must be the same for correct collocation [cache1=" + cctx.name() + ", parts1=" + partsCnt + ", cache2=" + extraCctx.name() + ", parts2=" + parts + "]");
        }
    }
    Set<ClusterNode>[] partLocs = new Set[partsCnt];
    // Fill partition locations for main cache.
    for (int p = 0; p < partsCnt; p++) {
        List<ClusterNode> owners = cctx.topology().owners(p);
        if (F.isEmpty(owners)) {
            // Handle special case: no mapping is configured for a partition.
            if (F.isEmpty(cctx.affinity().assignment(NONE).get(p))) {
                // Mark unmapped partition.
                partLocs[p] = UNMAPPED_PARTS;
                continue;
            } else if (!F.isEmpty(dataNodes(cctx.groupId(), NONE)))
                // Retry.
                return null;
            throw new CacheException("Failed to find data nodes [cache=" + cctx.name() + ", part=" + p + "]");
        }
        partLocs[p] = new HashSet<>(owners);
    }
    if (cacheIds.size() > 1) {
        // We need this for logical collocation between different partitioned caches with the same affinity.
        for (Integer cacheId : cacheIds) {
            GridCacheContext<?, ?> extraCctx = cacheContext(cacheId);
            // This is possible if we have replaced a replicated cache with a partitioned one earlier.
            if (cctx == extraCctx)
                continue;
            if (extraCctx.isReplicated() || extraCctx.isLocal())
                continue;
            for (int p = 0, parts = extraCctx.affinity().partitions(); p < parts; p++) {
                List<ClusterNode> owners = extraCctx.topology().owners(p);
                if (partLocs[p] == UNMAPPED_PARTS)
                    // Skip unmapped partitions.
                    continue;
                if (F.isEmpty(owners)) {
                    if (!F.isEmpty(dataNodes(extraCctx.groupId(), NONE)))
                        // Retry.
                        return null;
                    throw new CacheException("Failed to find data nodes [cache=" + extraCctx.name() + ", part=" + p + "]");
                }
                if (partLocs[p] == null)
                    partLocs[p] = new HashSet<>(owners);
                else {
                    // Intersection of owners.
                    partLocs[p].retainAll(owners);
                    if (partLocs[p].isEmpty())
                        // Intersection is empty -> retry.
                        return null;
                }
            }
        }
        // Filter nodes where not all the replicated caches loaded.
        for (Integer cacheId : cacheIds) {
            GridCacheContext<?, ?> extraCctx = cacheContext(cacheId);
            if (!extraCctx.isReplicated())
                continue;
            Set<ClusterNode> dataNodes = replicatedUnstableDataNodes(extraCctx);
            if (F.isEmpty(dataNodes))
                // Retry.
                return null;
            for (Set<ClusterNode> partLoc : partLocs) {
                if (partLoc == UNMAPPED_PARTS)
                    // Skip unmapped partition.
                    continue;
                partLoc.retainAll(dataNodes);
                if (partLoc.isEmpty())
                    // Retry.
                    return null;
            }
        }
    }
    // Collect the final partitions mapping.
    Map<ClusterNode, IntArray> res = new HashMap<>();
    // Here partitions in all IntArray's will be sorted in ascending order, this is important.
    for (int p = 0; p < partLocs.length; p++) {
        Set<ClusterNode> pl = partLocs[p];
        // Skip unmapped partitions.
        if (pl == UNMAPPED_PARTS)
            continue;
        assert !F.isEmpty(pl) : pl;
        ClusterNode n = pl.size() == 1 ? F.first(pl) : F.rand(pl);
        IntArray parts = res.get(n);
        if (parts == null)
            res.put(n, parts = new IntArray());
        parts.add(p);
    }
    return res;
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) ResultSet(java.sql.ResultSet) Set(java.util.Set) HashSet(java.util.HashSet) CacheException(javax.cache.CacheException) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) IntArray(org.h2.util.IntArray) HashSet(java.util.HashSet)

Example 18 with In

use of org.h2.dev.util.BinaryArithmeticStream.In in project ignite by apache.

the class GridH2Table method removeChildrenAndResources.

/**
 * {@inheritDoc}
 */
@SuppressWarnings("ThrowableResultOfMethodCallIgnored")
@Override
public void removeChildrenAndResources(Session ses) {
    lock(true);
    try {
        super.removeChildrenAndResources(ses);
        // Clear all user indexes registered in schema.
        while (idxs.size() > sysIdxsCnt) {
            Index idx = idxs.get(sysIdxsCnt);
            if (idx.getName() != null && idx.getSchema().findIndex(ses, idx.getName()) == idx) {
                // This call implicitly removes both idx and its proxy, if any, from idxs.
                database.removeSchemaObject(ses, idx);
                // We have to call destroy here if we are who has removed this index from the table.
                if (idx instanceof GridH2IndexBase)
                    ((GridH2IndexBase) idx).destroy(rmIndex);
            }
        }
        if (SysProperties.CHECK) {
            for (SchemaObject obj : database.getAllSchemaObjects(DbObject.INDEX)) {
                Index idx = (Index) obj;
                if (idx.getTable() == this)
                    DbException.throwInternalError("index not dropped: " + idx.getName());
            }
        }
        database.removeMeta(ses, getId());
        invalidate();
    } finally {
        unlock(true);
    }
}
Also used : SchemaObject(org.h2.schema.SchemaObject) Index(org.h2.index.Index) SpatialIndex(org.h2.index.SpatialIndex) H2TreeIndex(org.apache.ignite.internal.processors.query.h2.database.H2TreeIndex)

Example 19 with In

use of org.h2.dev.util.BinaryArithmeticStream.In in project ignite by apache.

the class GridMapQueryExecutor method onQueryRequest0.

/**
 * @param node Node authored request.
 * @param reqId Request ID.
 * @param segmentId index segment ID.
 * @param schemaName Schema name.
 * @param qrys Queries to execute.
 * @param cacheIds Caches which will be affected by these queries.
 * @param topVer Topology version.
 * @param partsMap Partitions map for unstable topology.
 * @param parts Explicit partitions for current node.
 * @param pageSize Page size.
 * @param distributedJoinMode Query distributed join mode.
 * @param lazy Streaming flag.
 */
private void onQueryRequest0(final ClusterNode node, final long reqId, final int segmentId, final String schemaName, final Collection<GridCacheSqlQuery> qrys, final List<Integer> cacheIds, final AffinityTopologyVersion topVer, final Map<UUID, int[]> partsMap, final int[] parts, final int pageSize, final DistributedJoinMode distributedJoinMode, final boolean enforceJoinOrder, final boolean replicated, final int timeout, final Object[] params, boolean lazy) {
    if (lazy && MapQueryLazyWorker.currentWorker() == null) {
        // Lazy queries must be re-submitted to dedicated workers.
        MapQueryLazyWorkerKey key = new MapQueryLazyWorkerKey(node.id(), reqId, segmentId);
        MapQueryLazyWorker worker = new MapQueryLazyWorker(ctx.igniteInstanceName(), key, log, this);
        worker.submit(new Runnable() {

            @Override
            public void run() {
                onQueryRequest0(node, reqId, segmentId, schemaName, qrys, cacheIds, topVer, partsMap, parts, pageSize, distributedJoinMode, enforceJoinOrder, replicated, timeout, params, true);
            }
        });
        if (lazyWorkerBusyLock.enterBusy()) {
            try {
                MapQueryLazyWorker oldWorker = lazyWorkers.put(key, worker);
                if (oldWorker != null)
                    oldWorker.stop();
                IgniteThread thread = new IgniteThread(worker);
                thread.start();
            } finally {
                lazyWorkerBusyLock.leaveBusy();
            }
        } else
            log.info("Ignored query request (node is stopping) [nodeId=" + node.id() + ", reqId=" + reqId + ']');
        return;
    }
    // Prepare to run queries.
    GridCacheContext<?, ?> mainCctx = !F.isEmpty(cacheIds) ? ctx.cache().context().cacheContext(cacheIds.get(0)) : null;
    MapNodeResults nodeRess = resultsForNode(node.id());
    MapQueryResults qr = null;
    List<GridReservable> reserved = new ArrayList<>();
    try {
        if (topVer != null) {
            // Reserve primary for topology version or explicit partitions.
            if (!reservePartitions(cacheIds, topVer, parts, reserved)) {
                // Unregister lazy worker because re-try may never reach this node again.
                if (lazy)
                    stopAndUnregisterCurrentLazyWorker();
                sendRetry(node, reqId, segmentId);
                return;
            }
        }
        qr = new MapQueryResults(h2, reqId, qrys.size(), mainCctx, MapQueryLazyWorker.currentWorker());
        if (nodeRess.put(reqId, segmentId, qr) != null)
            throw new IllegalStateException();
        // Prepare query context.
        GridH2QueryContext qctx = new GridH2QueryContext(ctx.localNodeId(), node.id(), reqId, segmentId, replicated ? REPLICATED : MAP).filter(h2.backupFilter(topVer, parts)).partitionsMap(partsMap).distributedJoinMode(distributedJoinMode).pageSize(pageSize).topologyVersion(topVer).reservations(reserved);
        Connection conn = h2.connectionForSchema(schemaName);
        H2Utils.setupConnection(conn, distributedJoinMode != OFF, enforceJoinOrder);
        GridH2QueryContext.set(qctx);
        // qctx is set, we have to release reservations inside of it.
        reserved = null;
        try {
            if (nodeRess.cancelled(reqId)) {
                GridH2QueryContext.clear(ctx.localNodeId(), node.id(), reqId, qctx.type());
                nodeRess.cancelRequest(reqId);
                throw new QueryCancelledException();
            }
            // Run queries.
            int qryIdx = 0;
            boolean evt = mainCctx != null && mainCctx.events().isRecordable(EVT_CACHE_QUERY_EXECUTED);
            for (GridCacheSqlQuery qry : qrys) {
                ResultSet rs = null;
                // If we are not the target node for this replicated query, just ignore it.
                if (qry.node() == null || (segmentId == 0 && qry.node().equals(ctx.localNodeId()))) {
                    rs = h2.executeSqlQueryWithTimer(conn, qry.query(), F.asList(qry.parameters(params)), true, timeout, qr.queryCancel(qryIdx));
                    if (evt) {
                        ctx.event().record(new CacheQueryExecutedEvent<>(node, "SQL query executed.", EVT_CACHE_QUERY_EXECUTED, CacheQueryType.SQL.name(), mainCctx.name(), null, qry.query(), null, null, params, node.id(), null));
                    }
                    assert rs instanceof JdbcResultSet : rs.getClass();
                }
                qr.addResult(qryIdx, qry, node.id(), rs, params);
                if (qr.cancelled()) {
                    qr.result(qryIdx).close();
                    throw new QueryCancelledException();
                }
                // Send the first page.
                sendNextPage(nodeRess, node, qr, qryIdx, segmentId, pageSize);
                qryIdx++;
            }
            // All request results are in the memory in result set already, so it's ok to release partitions.
            if (!lazy)
                releaseReservations();
        } catch (Throwable e) {
            releaseReservations();
            throw e;
        }
    } catch (Throwable e) {
        if (qr != null) {
            nodeRess.remove(reqId, segmentId, qr);
            qr.cancel(false);
        }
        // Unregister worker after possible cancellation.
        if (lazy)
            stopAndUnregisterCurrentLazyWorker();
        if (X.hasCause(e, GridH2RetryException.class))
            sendRetry(node, reqId, segmentId);
        else {
            U.error(log, "Failed to execute local query.", e);
            sendError(node, reqId, e);
            if (e instanceof Error)
                throw (Error) e;
        }
    } finally {
        if (reserved != null) {
            // Release reserved partitions.
            for (int i = 0; i < reserved.size(); i++) reserved.get(i).release();
        }
    }
}
Also used : ArrayList(java.util.ArrayList) Connection(java.sql.Connection) GridH2RetryException(org.apache.ignite.internal.processors.query.h2.opt.GridH2RetryException) GridReservable(org.apache.ignite.internal.processors.cache.distributed.dht.GridReservable) ResultSet(java.sql.ResultSet) JdbcResultSet(org.h2.jdbc.JdbcResultSet) IgniteThread(org.apache.ignite.thread.IgniteThread) GridCacheSqlQuery(org.apache.ignite.internal.processors.cache.query.GridCacheSqlQuery) QueryCancelledException(org.apache.ignite.cache.query.QueryCancelledException) GridH2QueryContext(org.apache.ignite.internal.processors.query.h2.opt.GridH2QueryContext) JdbcResultSet(org.h2.jdbc.JdbcResultSet)

Example 20 with In

use of org.h2.dev.util.BinaryArithmeticStream.In in project ignite by apache.

the class GridH2Table method commitUserIndex.

/**
 * Promote temporary index to make it usable in queries.
 *
 * @param ses H2 session.
 * @param idxName Index name.
 * @return Temporary index with given name.
 */
private Index commitUserIndex(Session ses, String idxName) {
    lock(true);
    try {
        ensureNotDestroyed();
        Index idx = tmpIdxs.remove(idxName);
        assert idx != null;
        Index cloneIdx = createDuplicateIndexIfNeeded(idx);
        ArrayList<Index> newIdxs = new ArrayList<>(idxs.size() + ((cloneIdx == null) ? 1 : 2));
        newIdxs.addAll(idxs);
        newIdxs.add(idx);
        if (cloneIdx != null)
            newIdxs.add(cloneIdx);
        idxs = newIdxs;
        database.addSchemaObject(ses, idx);
        if (cloneIdx != null)
            database.addSchemaObject(ses, cloneIdx);
        setModified();
        return idx;
    } finally {
        unlock(true);
    }
}
Also used : ArrayList(java.util.ArrayList) Index(org.h2.index.Index) SpatialIndex(org.h2.index.SpatialIndex) H2TreeIndex(org.apache.ignite.internal.processors.query.h2.database.H2TreeIndex)

Aggregations

SQLException (java.sql.SQLException)63 Connection (java.sql.Connection)59 DbException (org.h2.message.DbException)56 PreparedStatement (java.sql.PreparedStatement)54 ResultSet (java.sql.ResultSet)47 Statement (java.sql.Statement)44 Value (org.h2.value.Value)40 IOException (java.io.IOException)39 ByteArrayInputStream (java.io.ByteArrayInputStream)30 InputStream (java.io.InputStream)29 Column (org.h2.table.Column)24 ArrayList (java.util.ArrayList)23 SimpleResultSet (org.h2.tools.SimpleResultSet)23 Random (java.util.Random)19 Expression (org.h2.expression.Expression)18 JdbcConnection (org.h2.jdbc.JdbcConnection)18 Index (org.h2.index.Index)16 ValueString (org.h2.value.ValueString)16 ByteArrayOutputStream (java.io.ByteArrayOutputStream)15 IgniteSQLException (org.apache.ignite.internal.processors.query.IgniteSQLException)15