use of org.apache.ignite.internal.util.typedef.T2 in project ignite by apache.
the class GridCacheConfigurationConsistencySelfTest method testCacheUtilsCheckAttributeMismatch.
/**
* @throws Exception If failed.
*/
public void testCacheUtilsCheckAttributeMismatch() throws Exception {
Ignite ignite = startGrid(1);
final ClusterNode node = ignite.cluster().localNode();
final GridStringLogger strLog = new GridStringLogger(false, log);
CU.checkAttributeMismatch(strLog, "cache", node.id(), "cacheMode", "Cache mode", LOCAL, PARTITIONED, false);
assertTrue("No expected message in log: " + strLog.toString(), strLog.toString().contains("Cache mode mismatch"));
strLog.reset();
GridTestUtils.assertThrows(log, new Callable<Void>() {
/**
* {@inheritDoc}
*/
@Override
public Void call() throws Exception {
CU.checkAttributeMismatch(strLog, "cache", node.id(), "cacheMode", "Cache mode", LOCAL, PARTITIONED, true);
return null;
}
}, IgniteCheckedException.class, "Cache mode mismatch");
final CacheConfiguration cfg1 = defaultCacheConfiguration();
cfg1.setCacheMode(LOCAL);
final CacheConfiguration cfg2 = defaultCacheConfiguration();
cfg2.setCacheMode(PARTITIONED);
CU.checkAttributeMismatch(strLog, cfg1, cfg2, node.id(), new T2<>("cacheMode", "Cache mode"), false);
assertTrue("No expected message in log: " + strLog.toString(), strLog.toString().contains("Cache mode mismatch"));
GridTestUtils.assertThrows(log, new Callable<Void>() {
/**
* {@inheritDoc}
*/
@Override
public Void call() throws Exception {
CU.checkAttributeMismatch(strLog, cfg1, cfg2, node.id(), new T2<>("cacheMode", "Cache mode"), true);
return null;
}
}, IgniteCheckedException.class, "Cache mode mismatch");
}
use of org.apache.ignite.internal.util.typedef.T2 in project ignite by apache.
the class IgniteCachePeekModesAbstractTest method offheapKeys.
/**
* @param nodeIdx Node index.
* @return Tuple with primary and backup keys.
*/
private T2<List<Integer>, List<Integer>> offheapKeys(int nodeIdx) {
GridCacheAdapter<Integer, String> internalCache = ((IgniteKernal) ignite(nodeIdx)).context().cache().internalCache(DEFAULT_CACHE_NAME);
// TODO GG-11148.
Iterator<Map.Entry<Integer, String>> offheapIt = Collections.EMPTY_MAP.entrySet().iterator();
// if (internalCache.context().isNear())
// offheapIt = internalCache.context().near().dht().context().swap().lazyOffHeapIterator(false);
// else
// offheapIt = internalCache.context().swap().lazyOffHeapIterator(false);
Affinity aff = ignite(nodeIdx).affinity(DEFAULT_CACHE_NAME);
ClusterNode node = ignite(nodeIdx).cluster().localNode();
List<Integer> primary = new ArrayList<>();
List<Integer> backups = new ArrayList<>();
while (offheapIt.hasNext()) {
Map.Entry<Integer, String> e = offheapIt.next();
if (aff.isPrimary(node, e.getKey()))
primary.add(e.getKey());
else {
assertTrue(aff.isBackup(node, e.getKey()));
backups.add(e.getKey());
}
}
return new T2<>(primary, backups);
}
use of org.apache.ignite.internal.util.typedef.T2 in project ignite by apache.
the class GridCachePartitionExchangeManager method createPartitionsSingleMessage.
/**
* @param exchangeId Exchange ID.
* @param clientOnlyExchange Client exchange flag.
* @param sndCounters {@code True} if need send partition update counters.
* @param newCntrMap {@code True} if possible to use {@link CachePartitionPartialCountersMap}.
* @return Message.
*/
public GridDhtPartitionsSingleMessage createPartitionsSingleMessage(@Nullable GridDhtPartitionExchangeId exchangeId, boolean clientOnlyExchange, boolean sndCounters, boolean newCntrMap, ExchangeActions exchActions) {
GridDhtPartitionsSingleMessage m = new GridDhtPartitionsSingleMessage(exchangeId, clientOnlyExchange, cctx.versions().last(), true);
Map<Object, T2<Integer, GridPartitionStateMap>> dupData = new HashMap<>();
for (CacheGroupContext grp : cctx.cache().cacheGroups()) {
if (!grp.isLocal() && (exchActions == null || !exchActions.cacheGroupStopping(grp.groupId()))) {
GridDhtPartitionMap locMap = grp.topology().localPartitionMap();
addPartitionMap(m, dupData, true, grp.groupId(), locMap, grp.affinity().similarAffinityKey());
if (sndCounters) {
CachePartitionPartialCountersMap cntrsMap = grp.topology().localUpdateCounters(true);
m.addPartitionUpdateCounters(grp.groupId(), newCntrMap ? cntrsMap : CachePartitionPartialCountersMap.toCountersMap(cntrsMap));
}
}
}
for (GridClientPartitionTopology top : clientTops.values()) {
if (m.partitions() != null && m.partitions().containsKey(top.groupId()))
continue;
GridDhtPartitionMap locMap = top.localPartitionMap();
addPartitionMap(m, dupData, true, top.groupId(), locMap, top.similarAffinityKey());
if (sndCounters) {
CachePartitionPartialCountersMap cntrsMap = top.localUpdateCounters(true);
m.addPartitionUpdateCounters(top.groupId(), newCntrMap ? cntrsMap : CachePartitionPartialCountersMap.toCountersMap(cntrsMap));
}
}
return m;
}
use of org.apache.ignite.internal.util.typedef.T2 in project ignite by apache.
the class GridCachePartitionExchangeManager method addFullPartitionsMap.
/**
* @param m Message.
* @param dupData Duplicated data map.
* @param compress {@code True} if need check for duplicated partition state data.
* @param grpId Cache group ID.
* @param map Map to add.
* @param affKey Cache affinity key.
*/
private void addFullPartitionsMap(GridDhtPartitionsFullMessage m, Map<Object, T2<Integer, GridDhtPartitionFullMap>> dupData, boolean compress, Integer grpId, GridDhtPartitionFullMap map, Object affKey) {
assert map != null;
Integer dupDataCache = null;
if (compress && affKey != null && !m.containsGroup(grpId)) {
T2<Integer, GridDhtPartitionFullMap> state0 = dupData.get(affKey);
if (state0 != null && state0.get2().partitionStateEquals(map)) {
GridDhtPartitionFullMap map0 = new GridDhtPartitionFullMap(map.nodeId(), map.nodeOrder(), map.updateSequence());
for (Map.Entry<UUID, GridDhtPartitionMap> e : map.entrySet()) map0.put(e.getKey(), e.getValue().emptyCopy());
map = map0;
dupDataCache = state0.get1();
} else
dupData.put(affKey, new T2<>(grpId, map));
}
m.addFullPartitionsMap(grpId, map, dupDataCache);
}
use of org.apache.ignite.internal.util.typedef.T2 in project ignite by apache.
the class GridCachePartitionExchangeManager method createPartitionsFullMessage.
/**
* @param compress {@code True} if possible to compress message (properly work only if prepareMarshall/
* finishUnmarshall methods are called).
* @param newCntrMap {@code True} if possible to use {@link CachePartitionFullCountersMap}.
* @param exchId Non-null exchange ID if message is created for exchange.
* @param lastVer Last version.
* @param partHistSuppliers Partition history suppliers map.
* @param partsToReload Partitions to reload map.
* @return Message.
*/
public GridDhtPartitionsFullMessage createPartitionsFullMessage(boolean compress, boolean newCntrMap, @Nullable final GridDhtPartitionExchangeId exchId, @Nullable GridCacheVersion lastVer, @Nullable IgniteDhtPartitionHistorySuppliersMap partHistSuppliers, @Nullable IgniteDhtPartitionsToReloadMap partsToReload) {
final GridDhtPartitionsFullMessage m = new GridDhtPartitionsFullMessage(exchId, lastVer, exchId != null ? exchId.topologyVersion() : AffinityTopologyVersion.NONE, partHistSuppliers, partsToReload);
m.compress(compress);
final Map<Object, T2<Integer, GridDhtPartitionFullMap>> dupData = new HashMap<>();
for (CacheGroupContext grp : cctx.cache().cacheGroups()) {
if (!grp.isLocal()) {
if (exchId != null) {
AffinityTopologyVersion startTopVer = grp.localStartVersion();
if (startTopVer.compareTo(exchId.topologyVersion()) > 0)
continue;
}
GridAffinityAssignmentCache affCache = grp.affinity();
GridDhtPartitionFullMap locMap = grp.topology().partitionMap(true);
if (locMap != null) {
addFullPartitionsMap(m, dupData, compress, grp.groupId(), locMap, affCache.similarAffinityKey());
}
if (exchId != null) {
CachePartitionFullCountersMap cntrsMap = grp.topology().fullUpdateCounters();
if (newCntrMap)
m.addPartitionUpdateCounters(grp.groupId(), cntrsMap);
else {
m.addPartitionUpdateCounters(grp.groupId(), CachePartitionFullCountersMap.toCountersMap(cntrsMap));
}
}
}
}
// It is important that client topologies be added after contexts.
for (GridClientPartitionTopology top : cctx.exchange().clientTopologies()) {
GridDhtPartitionFullMap map = top.partitionMap(true);
if (map != null) {
addFullPartitionsMap(m, dupData, compress, top.groupId(), map, top.similarAffinityKey());
}
if (exchId != null) {
CachePartitionFullCountersMap cntrsMap = top.fullUpdateCounters();
if (newCntrMap)
m.addPartitionUpdateCounters(top.groupId(), cntrsMap);
else
m.addPartitionUpdateCounters(top.groupId(), CachePartitionFullCountersMap.toCountersMap(cntrsMap));
}
}
return m;
}
Aggregations