Search in sources :

Example 6 with Data

use of org.h2.store.Data in project ignite by apache.

the class GridH2KeyValueRowOffheap method incrementRefCount.

/** {@inheritDoc} */
@SuppressWarnings({ "NonPrivateFieldAccessedInSynchronizedContext" })
@Override
public void incrementRefCount() {
    long p = ptr;
    GridUnsafeMemory mem = desc.memory();
    if (p == 0) {
        // Serialize data to offheap memory.
        Value key = peekValue(KEY_COL);
        Value val = peekValue(VAL_COL);
        assert key != null;
        assert val != null;
        Data data = Data.create(null, new byte[SIZE_CALCULATOR.getValueLen(key)]);
        data.writeValue(key);
        int keySize = data.length();
        p = mem.allocate(keySize + OFFSET_KEY);
        // We don't need any synchronization or volatility here because we publish via
        // volatile write to tree node.
        mem.writeInt(p, 1);
        mem.writeLong(p + OFFSET_EXPIRATION, expirationTime);
        mem.writeInt(p + OFFSET_KEY_SIZE, keySize);
        mem.writeBytes(p + OFFSET_KEY, data.getBytes(), 0, keySize);
        data = Data.create(null, new byte[SIZE_CALCULATOR.getValueLen(val)]);
        data.writeValue(val);
        int valSize = data.length();
        long valPtr = mem.allocate(valSize + OFFSET_VALUE);
        mem.writeInt(valPtr, valSize);
        mem.writeBytes(valPtr + OFFSET_VALUE, data.getBytes(), 0, valSize);
        mem.writeLongVolatile(p + OFFSET_VALUE_REF, valPtr);
        ptr = p;
        desc.cache(this);
    } else {
        for (; ; ) {
            int cnt = mem.readIntVolatile(p);
            assert cnt > 0 : cnt;
            if (mem.casInt(p, cnt, cnt + 1))
                break;
        }
    }
}
Also used : Value(org.h2.value.Value) Data(org.h2.store.Data) GridUnsafeMemory(org.apache.ignite.internal.util.offheap.unsafe.GridUnsafeMemory)

Example 7 with Data

use of org.h2.store.Data in project jackrabbit-oak by apache.

the class BlobCache method addGeneration.

@Override
public void addGeneration(int generation, boolean readOnly) {
    CacheMap<Long, byte[]> d = cache.openMap(generation, "data", new MVMap.Builder<Long, byte[]>());
    data.addReadMap(generation, d);
    CacheMap<String, byte[]> m = cache.openMap(generation, "meta", new MVMap.Builder<String, byte[]>());
    meta.addReadMap(generation, m);
    if (!readOnly) {
        // the order is important:
        // if we switch the data first,
        // we could end up with the data in store 1
        // but the metadata in store 2 - which could
        // result in a data block not found if store 1
        // is removed later on
        meta.setWriteMap(m);
        data.setWriteMap(d);
    }
    if (streamStore == null) {
        streamStore = new StreamStore(data);
    }
}
Also used : StreamStore(org.h2.mvstore.StreamStore) MVMap(org.h2.mvstore.MVMap)

Example 8 with Data

use of org.h2.store.Data in project ignite by apache.

the class GridReduceQueryExecutor method partitionedUnstableDataNodes.

/**
 * Calculates partition mapping for partitioned cache on unstable topology.
 *
 * @param cacheIds Cache IDs.
 * @return Partition mapping or {@code null} if we can't calculate it due to repartitioning and we need to retry.
 */
@SuppressWarnings("unchecked")
private Map<ClusterNode, IntArray> partitionedUnstableDataNodes(List<Integer> cacheIds) {
    // If the main cache is replicated, just replace it with the first partitioned.
    GridCacheContext<?, ?> cctx = findFirstPartitioned(cacheIds);
    final int partsCnt = cctx.affinity().partitions();
    if (cacheIds.size() > 1) {
        // Check correct number of partitions for partitioned caches.
        for (Integer cacheId : cacheIds) {
            GridCacheContext<?, ?> extraCctx = cacheContext(cacheId);
            if (extraCctx.isReplicated() || extraCctx.isLocal())
                continue;
            int parts = extraCctx.affinity().partitions();
            if (parts != partsCnt)
                throw new CacheException("Number of partitions must be the same for correct collocation [cache1=" + cctx.name() + ", parts1=" + partsCnt + ", cache2=" + extraCctx.name() + ", parts2=" + parts + "]");
        }
    }
    Set<ClusterNode>[] partLocs = new Set[partsCnt];
    // Fill partition locations for main cache.
    for (int p = 0; p < partsCnt; p++) {
        List<ClusterNode> owners = cctx.topology().owners(p);
        if (F.isEmpty(owners)) {
            // Handle special case: no mapping is configured for a partition.
            if (F.isEmpty(cctx.affinity().assignment(NONE).get(p))) {
                // Mark unmapped partition.
                partLocs[p] = UNMAPPED_PARTS;
                continue;
            } else if (!F.isEmpty(dataNodes(cctx.groupId(), NONE)))
                // Retry.
                return null;
            throw new CacheException("Failed to find data nodes [cache=" + cctx.name() + ", part=" + p + "]");
        }
        partLocs[p] = new HashSet<>(owners);
    }
    if (cacheIds.size() > 1) {
        // We need this for logical collocation between different partitioned caches with the same affinity.
        for (Integer cacheId : cacheIds) {
            GridCacheContext<?, ?> extraCctx = cacheContext(cacheId);
            // This is possible if we have replaced a replicated cache with a partitioned one earlier.
            if (cctx == extraCctx)
                continue;
            if (extraCctx.isReplicated() || extraCctx.isLocal())
                continue;
            for (int p = 0, parts = extraCctx.affinity().partitions(); p < parts; p++) {
                List<ClusterNode> owners = extraCctx.topology().owners(p);
                if (partLocs[p] == UNMAPPED_PARTS)
                    // Skip unmapped partitions.
                    continue;
                if (F.isEmpty(owners)) {
                    if (!F.isEmpty(dataNodes(extraCctx.groupId(), NONE)))
                        // Retry.
                        return null;
                    throw new CacheException("Failed to find data nodes [cache=" + extraCctx.name() + ", part=" + p + "]");
                }
                if (partLocs[p] == null)
                    partLocs[p] = new HashSet<>(owners);
                else {
                    // Intersection of owners.
                    partLocs[p].retainAll(owners);
                    if (partLocs[p].isEmpty())
                        // Intersection is empty -> retry.
                        return null;
                }
            }
        }
        // Filter nodes where not all the replicated caches loaded.
        for (Integer cacheId : cacheIds) {
            GridCacheContext<?, ?> extraCctx = cacheContext(cacheId);
            if (!extraCctx.isReplicated())
                continue;
            Set<ClusterNode> dataNodes = replicatedUnstableDataNodes(extraCctx);
            if (F.isEmpty(dataNodes))
                // Retry.
                return null;
            for (Set<ClusterNode> partLoc : partLocs) {
                if (partLoc == UNMAPPED_PARTS)
                    // Skip unmapped partition.
                    continue;
                partLoc.retainAll(dataNodes);
                if (partLoc.isEmpty())
                    // Retry.
                    return null;
            }
        }
    }
    // Collect the final partitions mapping.
    Map<ClusterNode, IntArray> res = new HashMap<>();
    // Here partitions in all IntArray's will be sorted in ascending order, this is important.
    for (int p = 0; p < partLocs.length; p++) {
        Set<ClusterNode> pl = partLocs[p];
        // Skip unmapped partitions.
        if (pl == UNMAPPED_PARTS)
            continue;
        assert !F.isEmpty(pl) : pl;
        ClusterNode n = pl.size() == 1 ? F.first(pl) : F.rand(pl);
        IntArray parts = res.get(n);
        if (parts == null)
            res.put(n, parts = new IntArray());
        parts.add(p);
    }
    return res;
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) ResultSet(java.sql.ResultSet) Set(java.util.Set) HashSet(java.util.HashSet) CacheException(javax.cache.CacheException) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) IntArray(org.h2.util.IntArray) HashSet(java.util.HashSet)

Example 9 with Data

use of org.h2.store.Data in project ignite by apache.

the class GridH2Table method addColumns.

/**
 * Add new columns to this table.
 *
 * @param cols Columns to add.
 * @param ifNotExists Ignore this command if {@code cols} has size of 1 and column with given name already exists.
 */
public void addColumns(List<QueryField> cols, boolean ifNotExists) {
    assert !ifNotExists || cols.size() == 1;
    lock(true);
    try {
        int pos = columns.length;
        Column[] newCols = new Column[columns.length + cols.size()];
        // First, let's copy existing columns to new array
        System.arraycopy(columns, 0, newCols, 0, columns.length);
        // And now, let's add new columns
        for (QueryField col : cols) {
            if (doesColumnExist(col.name())) {
                if (ifNotExists && cols.size() == 1)
                    return;
                else
                    throw new IgniteSQLException("Column already exists [tblName=" + getName() + ", colName=" + col.name() + ']');
            }
            try {
                Column c = new Column(col.name(), DataType.getTypeFromClass(Class.forName(col.typeName())));
                c.setNullable(col.isNullable());
                newCols[pos++] = c;
            } catch (ClassNotFoundException e) {
                throw new IgniteSQLException("H2 data type not found for class: " + col.typeName(), e);
            }
        }
        setColumns(newCols);
        desc.refreshMetadataFromTypeDescriptor();
        setModified();
    } finally {
        unlock(true);
    }
}
Also used : QueryField(org.apache.ignite.internal.processors.query.QueryField) Column(org.h2.table.Column) IndexColumn(org.h2.table.IndexColumn) IgniteSQLException(org.apache.ignite.internal.processors.query.IgniteSQLException)

Example 10 with Data

use of org.h2.store.Data in project ignite by apache.

the class GridReduceQueryExecutor method stableDataNodes.

/**
 * @param isReplicatedOnly If we must only have replicated caches.
 * @param topVer Topology version.
 * @param cacheIds Participating cache IDs.
 * @param parts Partitions.
 * @return Data nodes or {@code null} if repartitioning started and we need to retry.
 */
private Map<ClusterNode, IntArray> stableDataNodes(boolean isReplicatedOnly, AffinityTopologyVersion topVer, List<Integer> cacheIds, int[] parts) {
    GridCacheContext<?, ?> cctx = cacheContext(cacheIds.get(0));
    Map<ClusterNode, IntArray> map = stableDataNodesMap(topVer, cctx, parts);
    Set<ClusterNode> nodes = map.keySet();
    if (F.isEmpty(map))
        throw new CacheException("Failed to find data nodes for cache: " + cctx.name());
    for (int i = 1; i < cacheIds.size(); i++) {
        GridCacheContext<?, ?> extraCctx = cacheContext(cacheIds.get(i));
        String extraCacheName = extraCctx.name();
        if (extraCctx.isLocal())
            // No consistency guaranties for local caches.
            continue;
        if (isReplicatedOnly && !extraCctx.isReplicated())
            throw new CacheException("Queries running on replicated cache should not contain JOINs " + "with partitioned tables [replicatedCache=" + cctx.name() + ", partitionedCache=" + extraCacheName + "]");
        Set<ClusterNode> extraNodes = stableDataNodesMap(topVer, extraCctx, parts).keySet();
        if (F.isEmpty(extraNodes))
            throw new CacheException("Failed to find data nodes for cache: " + extraCacheName);
        boolean disjoint;
        if (extraCctx.isReplicated()) {
            if (isReplicatedOnly) {
                nodes.retainAll(extraNodes);
                disjoint = map.isEmpty();
            } else
                disjoint = !extraNodes.containsAll(nodes);
        } else
            disjoint = !extraNodes.equals(nodes);
        if (disjoint) {
            if (isPreloadingActive(cacheIds))
                // Retry.
                return null;
            else
                throw new CacheException("Caches have distinct sets of data nodes [cache1=" + cctx.name() + ", cache2=" + extraCacheName + "]");
        }
    }
    return map;
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) IntArray(org.h2.util.IntArray) CacheException(javax.cache.CacheException)

Aggregations

IgniteException (org.apache.ignite.IgniteException)6 ResultSet (java.sql.ResultSet)5 SQLException (java.sql.SQLException)5 LinkedHashMap (java.util.LinkedHashMap)5 CacheException (javax.cache.CacheException)5 IgniteSQLException (org.apache.ignite.internal.processors.query.IgniteSQLException)5 ArrayList (java.util.ArrayList)4 List (java.util.List)4 IgniteCheckedException (org.apache.ignite.IgniteCheckedException)4 Column (org.h2.table.Column)4 HashMap (java.util.HashMap)3 UUID (java.util.UUID)3 ClusterNode (org.apache.ignite.cluster.ClusterNode)3 GridQueryCacheObjectsIterator (org.apache.ignite.internal.processors.query.GridQueryCacheObjectsIterator)3 Index (org.h2.index.Index)3 SimpleResultSet (org.h2.tools.SimpleResultSet)3 IOException (java.io.IOException)2 StringReader (java.io.StringReader)2 PreparedStatement (java.sql.PreparedStatement)2 Collections.singletonList (java.util.Collections.singletonList)2