Search in sources :

Example 6 with Cache

use of org.h2.util.Cache in project ignite by apache.

the class IgniteH2Indexing method queryLocalSqlFields.

/**
 * Queries individual fields (generally used by JDBC drivers).
 *
 * @param schemaName Schema name.
 * @param qry Query.
 * @param params Query parameters.
 * @param filter Cache name and key filter.
 * @param enforceJoinOrder Enforce join order of tables in the query.
 * @param timeout Query timeout in milliseconds.
 * @param cancel Query cancel.
 * @return Query result.
 * @throws IgniteCheckedException If failed.
 */
@SuppressWarnings("unchecked")
GridQueryFieldsResult queryLocalSqlFields(final String schemaName, final String qry, @Nullable final Collection<Object> params, final IndexingQueryFilter filter, boolean enforceJoinOrder, final int timeout, final GridQueryCancel cancel) throws IgniteCheckedException {
    final Connection conn = connectionForSchema(schemaName);
    H2Utils.setupConnection(conn, false, enforceJoinOrder);
    final PreparedStatement stmt = preparedStatementWithParams(conn, qry, params, true);
    if (GridSqlQueryParser.checkMultipleStatements(stmt))
        throw new IgniteSQLException("Multiple statements queries are not supported for local queries");
    Prepared p = GridSqlQueryParser.prepared(stmt);
    if (DmlStatementsProcessor.isDmlStatement(p)) {
        SqlFieldsQuery fldsQry = new SqlFieldsQuery(qry);
        if (params != null)
            fldsQry.setArgs(params.toArray());
        fldsQry.setEnforceJoinOrder(enforceJoinOrder);
        fldsQry.setTimeout(timeout, TimeUnit.MILLISECONDS);
        return dmlProc.updateSqlFieldsLocal(schemaName, conn, p, fldsQry, filter, cancel);
    } else if (DdlStatementsProcessor.isDdlStatement(p))
        throw new IgniteSQLException("DDL statements are supported for the whole cluster only", IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
    List<GridQueryFieldMetadata> meta;
    try {
        meta = H2Utils.meta(stmt.getMetaData());
    } catch (SQLException e) {
        throw new IgniteCheckedException("Cannot prepare query metadata", e);
    }
    final GridH2QueryContext ctx = new GridH2QueryContext(nodeId, nodeId, 0, LOCAL).filter(filter).distributedJoinMode(OFF);
    return new GridQueryFieldsResultAdapter(meta, null) {

        @Override
        public GridCloseableIterator<List<?>> iterator() throws IgniteCheckedException {
            assert GridH2QueryContext.get() == null;
            GridH2QueryContext.set(ctx);
            GridRunningQueryInfo run = new GridRunningQueryInfo(qryIdGen.incrementAndGet(), qry, SQL_FIELDS, schemaName, U.currentTimeMillis(), cancel, true);
            runs.putIfAbsent(run.id(), run);
            try {
                ResultSet rs = executeSqlQueryWithTimer(stmt, conn, qry, params, timeout, cancel);
                return new H2FieldsIterator(rs);
            } finally {
                GridH2QueryContext.clearThreadLocal();
                runs.remove(run.id());
            }
        }
    };
}
Also used : GridQueryFieldsResultAdapter(org.apache.ignite.internal.processors.query.GridQueryFieldsResultAdapter) SQLException(java.sql.SQLException) IgniteSQLException(org.apache.ignite.internal.processors.query.IgniteSQLException) Connection(java.sql.Connection) Prepared(org.h2.command.Prepared) GridRunningQueryInfo(org.apache.ignite.internal.processors.query.GridRunningQueryInfo) GridQueryFieldMetadata(org.apache.ignite.internal.processors.query.GridQueryFieldMetadata) PreparedStatement(java.sql.PreparedStatement) SqlFieldsQuery(org.apache.ignite.cache.query.SqlFieldsQuery) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) IgniteSQLException(org.apache.ignite.internal.processors.query.IgniteSQLException) ResultSet(java.sql.ResultSet) ArrayList(java.util.ArrayList) List(java.util.List) GridH2QueryContext(org.apache.ignite.internal.processors.query.h2.opt.GridH2QueryContext)

Example 7 with Cache

use of org.h2.util.Cache in project ignite by apache.

the class IgniteH2Indexing method unregisterCache.

/**
 * {@inheritDoc}
 */
@Override
public void unregisterCache(GridCacheContext cctx, boolean rmvIdx) {
    rowCache.onCacheUnregistered(cctx);
    String cacheName = cctx.name();
    String schemaName = schema(cacheName);
    H2Schema schema = schemas.get(schemaName);
    if (schema != null) {
        mapQryExec.onCacheStop(cacheName);
        dmlProc.onCacheStop(cacheName);
        // Remove this mapping only after callback to DML proc - it needs that mapping internally
        cacheName2schema.remove(cacheName);
        // Drop tables.
        Collection<H2TableDescriptor> rmvTbls = new HashSet<>();
        for (H2TableDescriptor tbl : schema.tables()) {
            if (F.eq(tbl.cache().name(), cacheName)) {
                try {
                    tbl.table().setRemoveIndexOnDestroy(rmvIdx);
                    dropTable(tbl);
                } catch (IgniteCheckedException e) {
                    U.error(log, "Failed to drop table on cache stop (will ignore): " + tbl.fullTableName(), e);
                }
                schema.drop(tbl);
                rmvTbls.add(tbl);
            }
        }
        if (!isDefaultSchema(schemaName)) {
            synchronized (schemaMux) {
                if (schema.decrementUsageCount() == 0) {
                    schemas.remove(schemaName);
                    try {
                        dropSchema(schemaName);
                    } catch (IgniteCheckedException e) {
                        U.error(log, "Failed to drop schema on cache stop (will ignore): " + cacheName, e);
                    }
                }
            }
        }
        stmtCache.clear();
        for (H2TableDescriptor tbl : rmvTbls) {
            for (Index idx : tbl.table().getIndexes()) idx.close(null);
        }
        int cacheId = CU.cacheId(cacheName);
        for (Iterator<Map.Entry<H2TwoStepCachedQueryKey, H2TwoStepCachedQuery>> it = twoStepCache.entrySet().iterator(); it.hasNext(); ) {
            Map.Entry<H2TwoStepCachedQueryKey, H2TwoStepCachedQuery> e = it.next();
            GridCacheTwoStepQuery qry = e.getValue().query();
            if (!F.isEmpty(qry.cacheIds()) && qry.cacheIds().contains(cacheId))
                it.remove();
        }
    }
}
Also used : GridCacheTwoStepQuery(org.apache.ignite.internal.processors.cache.query.GridCacheTwoStepQuery) Index(org.h2.index.Index) H2TreeIndex(org.apache.ignite.internal.processors.query.h2.database.H2TreeIndex) IgniteSystemProperties.getString(org.apache.ignite.IgniteSystemProperties.getString) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) ConcurrentMap(java.util.concurrent.ConcurrentMap) GridBoundedConcurrentLinkedHashMap(org.apache.ignite.internal.util.GridBoundedConcurrentLinkedHashMap) LinkedHashSet(java.util.LinkedHashSet) HashSet(java.util.HashSet)

Example 8 with Cache

use of org.h2.util.Cache in project ignite by apache.

the class GridH2RowDescriptor method wrap.

/**
 * Wraps object to respective {@link Value}.
 *
 * @param obj Object.
 * @param type Value type.
 * @return Value.
 * @throws IgniteCheckedException If failed.
 */
@SuppressWarnings("ConstantConditions")
public Value wrap(Object obj, int type) throws IgniteCheckedException {
    assert obj != null;
    if (obj instanceof CacheObject) {
        // Handle cache object.
        CacheObject co = (CacheObject) obj;
        if (type == Value.JAVA_OBJECT)
            return new GridH2ValueCacheObject(co, idx.objectContext());
        obj = co.value(idx.objectContext(), false);
    }
    switch(type) {
        case Value.BOOLEAN:
            return ValueBoolean.get((Boolean) obj);
        case Value.BYTE:
            return ValueByte.get((Byte) obj);
        case Value.SHORT:
            return ValueShort.get((Short) obj);
        case Value.INT:
            return ValueInt.get((Integer) obj);
        case Value.FLOAT:
            return ValueFloat.get((Float) obj);
        case Value.LONG:
            return ValueLong.get((Long) obj);
        case Value.DOUBLE:
            return ValueDouble.get((Double) obj);
        case Value.UUID:
            UUID uuid = (UUID) obj;
            return ValueUuid.get(uuid.getMostSignificantBits(), uuid.getLeastSignificantBits());
        case Value.DATE:
            if (LocalDateTimeUtils.isLocalDate(obj.getClass()))
                return LocalDateTimeUtils.localDateToDateValue(obj);
            return ValueDate.get((Date) obj);
        case Value.TIME:
            if (LocalDateTimeUtils.isLocalTime(obj.getClass()))
                return LocalDateTimeUtils.localTimeToTimeValue(obj);
            return ValueTime.get((Time) obj);
        case Value.TIMESTAMP:
            if (obj instanceof java.util.Date && !(obj instanceof Timestamp))
                obj = new Timestamp(((java.util.Date) obj).getTime());
            if (LocalDateTimeUtils.isLocalDateTime(obj.getClass()))
                return LocalDateTimeUtils.localDateTimeToValue(obj);
            return ValueTimestamp.get((Timestamp) obj);
        case Value.DECIMAL:
            return ValueDecimal.get((BigDecimal) obj);
        case Value.STRING:
            return ValueString.get(obj.toString());
        case Value.BYTES:
            return ValueBytes.get((byte[]) obj);
        case Value.JAVA_OBJECT:
            return ValueJavaObject.getNoCopy(obj, null, null);
        case Value.ARRAY:
            Object[] arr = (Object[]) obj;
            Value[] valArr = new Value[arr.length];
            for (int i = 0; i < arr.length; i++) {
                Object o = arr[i];
                valArr[i] = o == null ? ValueNull.INSTANCE : wrap(o, DataType.getTypeFromClass(o.getClass()));
            }
            return ValueArray.get(valArr);
        case Value.GEOMETRY:
            return ValueGeometry.getFromGeometry(obj);
    }
    throw new IgniteCheckedException("Failed to wrap value[type=" + type + ", value=" + obj + "]");
}
Also used : IgniteCheckedException(org.apache.ignite.IgniteCheckedException) Value(org.h2.value.Value) ValueJavaObject(org.h2.value.ValueJavaObject) CacheObject(org.apache.ignite.internal.processors.cache.CacheObject) CacheObject(org.apache.ignite.internal.processors.cache.CacheObject) UUID(java.util.UUID) Timestamp(java.sql.Timestamp) ValueTimestamp(org.h2.value.ValueTimestamp)

Example 9 with Cache

use of org.h2.util.Cache in project ignite by apache.

the class GridReduceQueryExecutor method update.

/**
 * @param schemaName Schema name.
 * @param cacheIds Cache ids.
 * @param selectQry Select query.
 * @param params SQL parameters.
 * @param enforceJoinOrder Enforce join order of tables.
 * @param pageSize Page size.
 * @param timeoutMillis Timeout.
 * @param parts Partitions.
 * @param isReplicatedOnly Whether query uses only replicated caches.
 * @param cancel Cancel state.
 * @return Update result, or {@code null} when some map node doesn't support distributed DML.
 */
public UpdateResult update(String schemaName, List<Integer> cacheIds, String selectQry, Object[] params, boolean enforceJoinOrder, int pageSize, int timeoutMillis, final int[] parts, boolean isReplicatedOnly, GridQueryCancel cancel) {
    AffinityTopologyVersion topVer = h2.readyTopologyVersion();
    NodesForPartitionsResult nodesParts = nodesForPartitions(cacheIds, topVer, parts, isReplicatedOnly);
    final long reqId = qryIdGen.incrementAndGet();
    final GridRunningQueryInfo qryInfo = new GridRunningQueryInfo(reqId, selectQry, GridCacheQueryType.SQL_FIELDS, schemaName, U.currentTimeMillis(), cancel, false);
    Collection<ClusterNode> nodes = nodesParts.nodes();
    if (nodes == null)
        throw new CacheException("Failed to determine nodes participating in the update. " + "Explanation (Retry update once topology recovers).");
    if (isReplicatedOnly) {
        ClusterNode locNode = ctx.discovery().localNode();
        if (nodes.contains(locNode))
            nodes = singletonList(locNode);
        else
            nodes = singletonList(F.rand(nodes));
    }
    for (ClusterNode n : nodes) {
        if (!n.version().greaterThanEqual(2, 3, 0)) {
            log.warning("Server-side DML optimization is skipped because map node does not support it. " + "Falling back to normal DML. [node=" + n.id() + ", v=" + n.version() + "].");
            return null;
        }
    }
    final DistributedUpdateRun r = new DistributedUpdateRun(nodes.size(), qryInfo);
    int flags = enforceJoinOrder ? GridH2QueryRequest.FLAG_ENFORCE_JOIN_ORDER : 0;
    if (isReplicatedOnly)
        flags |= GridH2QueryRequest.FLAG_REPLICATED;
    GridH2DmlRequest req = new GridH2DmlRequest().requestId(reqId).topologyVersion(topVer).caches(cacheIds).schemaName(schemaName).query(selectQry).pageSize(pageSize).parameters(params).timeout(timeoutMillis).flags(flags);
    updRuns.put(reqId, r);
    boolean release = false;
    try {
        Map<ClusterNode, IntArray> partsMap = (nodesParts.queryPartitionsMap() != null) ? nodesParts.queryPartitionsMap() : nodesParts.partitionsMap();
        ExplicitPartitionsSpecializer partsSpec = (parts == null) ? null : new ExplicitPartitionsSpecializer(partsMap);
        final Collection<ClusterNode> finalNodes = nodes;
        cancel.set(new Runnable() {

            @Override
            public void run() {
                r.future().onCancelled();
                send(finalNodes, new GridQueryCancelRequest(reqId), null, false);
            }
        });
        // send() logs the debug message
        if (send(nodes, req, partsSpec, false))
            return r.future().get();
        throw new CacheException("Failed to send update request to participating nodes.");
    } catch (IgniteCheckedException | RuntimeException e) {
        release = true;
        U.error(log, "Error during update [localNodeId=" + ctx.localNodeId() + "]", e);
        throw new CacheException("Failed to run update. " + e.getMessage(), e);
    } finally {
        if (release)
            send(nodes, new GridQueryCancelRequest(reqId), null, false);
        if (!updRuns.remove(reqId, r))
            U.warn(log, "Update run was already removed: " + reqId);
    }
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) GridH2DmlRequest(org.apache.ignite.internal.processors.query.h2.twostep.msg.GridH2DmlRequest) GridQueryCancelRequest(org.apache.ignite.internal.processors.query.h2.twostep.messages.GridQueryCancelRequest) AffinityTopologyVersion(org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion) CacheException(javax.cache.CacheException) GridRunningQueryInfo(org.apache.ignite.internal.processors.query.GridRunningQueryInfo) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) IntArray(org.h2.util.IntArray)

Example 10 with Cache

use of org.h2.util.Cache in project ignite by apache.

the class GridReduceQueryExecutor method partitionedUnstableDataNodes.

/**
 * Calculates partition mapping for partitioned cache on unstable topology.
 *
 * @param cacheIds Cache IDs.
 * @return Partition mapping or {@code null} if we can't calculate it due to repartitioning and we need to retry.
 */
@SuppressWarnings("unchecked")
private Map<ClusterNode, IntArray> partitionedUnstableDataNodes(List<Integer> cacheIds) {
    // If the main cache is replicated, just replace it with the first partitioned.
    GridCacheContext<?, ?> cctx = findFirstPartitioned(cacheIds);
    final int partsCnt = cctx.affinity().partitions();
    if (cacheIds.size() > 1) {
        // Check correct number of partitions for partitioned caches.
        for (Integer cacheId : cacheIds) {
            GridCacheContext<?, ?> extraCctx = cacheContext(cacheId);
            if (extraCctx.isReplicated() || extraCctx.isLocal())
                continue;
            int parts = extraCctx.affinity().partitions();
            if (parts != partsCnt)
                throw new CacheException("Number of partitions must be the same for correct collocation [cache1=" + cctx.name() + ", parts1=" + partsCnt + ", cache2=" + extraCctx.name() + ", parts2=" + parts + "]");
        }
    }
    Set<ClusterNode>[] partLocs = new Set[partsCnt];
    // Fill partition locations for main cache.
    for (int p = 0; p < partsCnt; p++) {
        List<ClusterNode> owners = cctx.topology().owners(p);
        if (F.isEmpty(owners)) {
            // Handle special case: no mapping is configured for a partition.
            if (F.isEmpty(cctx.affinity().assignment(NONE).get(p))) {
                // Mark unmapped partition.
                partLocs[p] = UNMAPPED_PARTS;
                continue;
            } else if (!F.isEmpty(dataNodes(cctx.groupId(), NONE)))
                // Retry.
                return null;
            throw new CacheException("Failed to find data nodes [cache=" + cctx.name() + ", part=" + p + "]");
        }
        partLocs[p] = new HashSet<>(owners);
    }
    if (cacheIds.size() > 1) {
        // We need this for logical collocation between different partitioned caches with the same affinity.
        for (Integer cacheId : cacheIds) {
            GridCacheContext<?, ?> extraCctx = cacheContext(cacheId);
            // This is possible if we have replaced a replicated cache with a partitioned one earlier.
            if (cctx == extraCctx)
                continue;
            if (extraCctx.isReplicated() || extraCctx.isLocal())
                continue;
            for (int p = 0, parts = extraCctx.affinity().partitions(); p < parts; p++) {
                List<ClusterNode> owners = extraCctx.topology().owners(p);
                if (partLocs[p] == UNMAPPED_PARTS)
                    // Skip unmapped partitions.
                    continue;
                if (F.isEmpty(owners)) {
                    if (!F.isEmpty(dataNodes(extraCctx.groupId(), NONE)))
                        // Retry.
                        return null;
                    throw new CacheException("Failed to find data nodes [cache=" + extraCctx.name() + ", part=" + p + "]");
                }
                if (partLocs[p] == null)
                    partLocs[p] = new HashSet<>(owners);
                else {
                    // Intersection of owners.
                    partLocs[p].retainAll(owners);
                    if (partLocs[p].isEmpty())
                        // Intersection is empty -> retry.
                        return null;
                }
            }
        }
        // Filter nodes where not all the replicated caches loaded.
        for (Integer cacheId : cacheIds) {
            GridCacheContext<?, ?> extraCctx = cacheContext(cacheId);
            if (!extraCctx.isReplicated())
                continue;
            Set<ClusterNode> dataNodes = replicatedUnstableDataNodes(extraCctx);
            if (F.isEmpty(dataNodes))
                // Retry.
                return null;
            for (Set<ClusterNode> partLoc : partLocs) {
                if (partLoc == UNMAPPED_PARTS)
                    // Skip unmapped partition.
                    continue;
                partLoc.retainAll(dataNodes);
                if (partLoc.isEmpty())
                    // Retry.
                    return null;
            }
        }
    }
    // Collect the final partitions mapping.
    Map<ClusterNode, IntArray> res = new HashMap<>();
    // Here partitions in all IntArray's will be sorted in ascending order, this is important.
    for (int p = 0; p < partLocs.length; p++) {
        Set<ClusterNode> pl = partLocs[p];
        // Skip unmapped partitions.
        if (pl == UNMAPPED_PARTS)
            continue;
        assert !F.isEmpty(pl) : pl;
        ClusterNode n = pl.size() == 1 ? F.first(pl) : F.rand(pl);
        IntArray parts = res.get(n);
        if (parts == null)
            res.put(n, parts = new IntArray());
        parts.add(p);
    }
    return res;
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) ResultSet(java.sql.ResultSet) Set(java.util.Set) HashSet(java.util.HashSet) CacheException(javax.cache.CacheException) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) IntArray(org.h2.util.IntArray) HashSet(java.util.HashSet)

Aggregations

IgniteCheckedException (org.apache.ignite.IgniteCheckedException)12 Connection (java.sql.Connection)11 PreparedStatement (java.sql.PreparedStatement)10 IgniteSQLException (org.apache.ignite.internal.processors.query.IgniteSQLException)10 ResultSet (java.sql.ResultSet)9 SQLException (java.sql.SQLException)9 CacheException (javax.cache.CacheException)9 Statement (java.sql.Statement)7 ArrayList (java.util.ArrayList)7 IgniteException (org.apache.ignite.IgniteException)7 List (java.util.List)5 UUID (java.util.UUID)5 IgniteSystemProperties.getString (org.apache.ignite.IgniteSystemProperties.getString)5 ClusterNode (org.apache.ignite.cluster.ClusterNode)5 Prepared (org.h2.command.Prepared)5 IntArray (org.h2.util.IntArray)5 HashMap (java.util.HashMap)4 LinkedHashMap (java.util.LinkedHashMap)4 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)4 BinaryObject (org.apache.ignite.binary.BinaryObject)4