use of org.apache.ignite.internal.util.lang.IgnitePair in project ignite by apache.
the class IgniteIndexReader method getCacheAndTypeId.
/**
* Tries to get cache id and type id from index tree name.
*
* @param name Index name.
* @return Pair of cache id and type id.
*/
public static IgnitePair<Integer> getCacheAndTypeId(String name) {
return CACHE_TYPE_IDS.computeIfAbsent(name, k -> {
Matcher mId = CACHE_TYPE_ID_SEACH_PATTERN.matcher(k);
if (mId.find()) {
String id = mId.group("id");
String typeId = mId.group("typeId");
return new IgnitePair<>(parseInt(id), parseInt(typeId));
} else {
Matcher cId = CACHE_ID_SEACH_PATTERN.matcher(k);
if (cId.find()) {
String id = cId.group("id");
return new IgnitePair<>(parseInt(id), 0);
}
}
return new IgnitePair<>(0, 0);
});
}
use of org.apache.ignite.internal.util.lang.IgnitePair in project ignite by apache.
the class SparseBlockDistributedMatrix method times.
/**
* {@inheritDoc}
*/
@SuppressWarnings({ "unchecked" })
@Override
public Matrix times(final Matrix mtx) {
if (mtx == null)
throw new IllegalArgumentException("The matrix should be not null.");
if (columnSize() != mtx.rowSize())
throw new CardinalityException(columnSize(), mtx.rowSize());
SparseBlockDistributedMatrix matrixA = this;
SparseBlockDistributedMatrix matrixB = (SparseBlockDistributedMatrix) mtx;
String cacheName = this.storage().cacheName();
SparseBlockDistributedMatrix matrixC = new SparseBlockDistributedMatrix(matrixA.rowSize(), matrixB.columnSize());
CacheUtils.bcast(cacheName, () -> {
Ignite ignite = Ignition.localIgnite();
Affinity<MatrixBlockKey> affinity = ignite.affinity(cacheName);
IgniteCache<MatrixBlockKey, MatrixBlockEntry> cache = ignite.getOrCreateCache(cacheName);
ClusterNode locNode = ignite.cluster().localNode();
BlockMatrixStorage storageC = matrixC.storage();
Map<ClusterNode, Collection<MatrixBlockKey>> keysCToNodes = affinity.mapKeysToNodes(storageC.getAllKeys());
Collection<MatrixBlockKey> locKeys = keysCToNodes.get(locNode);
if (locKeys == null)
return;
// compute Cij locally on each node
// TODO: IGNITE:5114, exec in parallel
locKeys.forEach(key -> {
long newBlockIdRow = key.blockRowId();
long newBlockIdCol = key.blockColId();
IgnitePair<Long> newBlockId = new IgnitePair<>(newBlockIdRow, newBlockIdCol);
MatrixBlockEntry blockC = null;
List<MatrixBlockEntry> aRow = matrixA.storage().getRowForBlock(newBlockId);
List<MatrixBlockEntry> bCol = matrixB.storage().getColForBlock(newBlockId);
for (int i = 0; i < aRow.size(); i++) {
MatrixBlockEntry blockA = aRow.get(i);
MatrixBlockEntry blockB = bCol.get(i);
MatrixBlockEntry tmpBlock = new MatrixBlockEntry(blockA.times(blockB));
blockC = blockC == null ? tmpBlock : new MatrixBlockEntry(blockC.plus(tmpBlock));
}
cache.put(storageC.getCacheKey(newBlockIdRow, newBlockIdCol), blockC);
});
});
return matrixC;
}
use of org.apache.ignite.internal.util.lang.IgnitePair in project ignite by apache.
the class SparseBlockDistributedMatrix method times.
/**
* {@inheritDoc}
*/
@SuppressWarnings({ "unchecked" })
@Override
public Vector times(final Vector vec) {
if (vec == null)
throw new IllegalArgumentException("The vector should be not null.");
if (columnSize() != vec.size())
throw new CardinalityException(columnSize(), vec.size());
SparseBlockDistributedMatrix matrixA = this;
SparseBlockDistributedVector vectorB = (SparseBlockDistributedVector) vec;
String cacheName = this.storage().cacheName();
SparseBlockDistributedVector vectorC = new SparseBlockDistributedVector(matrixA.rowSize());
CacheUtils.bcast(cacheName, () -> {
Ignite ignite = Ignition.localIgnite();
Affinity<VectorBlockKey> affinity = ignite.affinity(cacheName);
IgniteCache<VectorBlockKey, VectorBlockEntry> cache = ignite.getOrCreateCache(cacheName);
ClusterNode locNode = ignite.cluster().localNode();
BlockVectorStorage storageC = vectorC.storage();
Map<ClusterNode, Collection<VectorBlockKey>> keysCToNodes = affinity.mapKeysToNodes(storageC.getAllKeys());
Collection<VectorBlockKey> locKeys = keysCToNodes.get(locNode);
if (locKeys == null)
return;
// compute Cij locally on each node
// TODO: IGNITE:5114, exec in parallel
locKeys.forEach(key -> {
long newBlockId = key.blockId();
IgnitePair<Long> newBlockIdForMtx = new IgnitePair<>(newBlockId, 0L);
VectorBlockEntry blockC = null;
List<MatrixBlockEntry> aRow = matrixA.storage().getRowForBlock(newBlockIdForMtx);
List<VectorBlockEntry> bCol = vectorB.storage().getColForBlock(newBlockId);
for (int i = 0; i < aRow.size(); i++) {
MatrixBlockEntry blockA = aRow.get(i);
VectorBlockEntry blockB = bCol.get(i);
VectorBlockEntry tmpBlock = new VectorBlockEntry(blockA.times(blockB));
blockC = blockC == null ? tmpBlock : new VectorBlockEntry(blockC.plus(tmpBlock));
}
cache.put(storageC.getCacheKey(newBlockId), blockC);
});
});
return vectorC;
}
use of org.apache.ignite.internal.util.lang.IgnitePair in project ignite by apache.
the class IgniteIndexReader method printTraversalResults.
/**
* Prints traversal info.
*
* @param treeInfos Tree traversal info.
*/
private void printTraversalResults(String prefix, Map<String, TreeTraversalInfo> treeInfos) {
print("\n" + prefix + "Tree traversal results");
Map<Class, Long> totalStat = new HashMap<>();
AtomicInteger totalErr = new AtomicInteger(0);
// Map (cacheId, typeId) -> (map idxName -> size))
Map<IgnitePair<Integer>, Map<String, Long>> cacheIdxSizes = new HashMap<>();
treeInfos.forEach((idxName, validationInfo) -> {
print(prefix + "-----");
print(prefix + "Index tree: " + idxName);
print(prefix + "-- Page stat:");
validationInfo.ioStat.forEach((cls, cnt) -> {
print(prefix + cls.getSimpleName() + ": " + cnt);
totalStat.compute(cls, (k, v) -> v == null ? 1 : v + 1);
});
print(prefix + "-- Count of items found in leaf pages: " + validationInfo.itemStorage.size());
printErrors(prefix, "Errors:", "No errors occurred while traversing.", "Page id=%s, exceptions:", true, validationInfo.errors);
totalErr.addAndGet(validationInfo.errors.size());
cacheIdxSizes.computeIfAbsent(getCacheAndTypeId(idxName), k -> new HashMap<>()).put(idxName, validationInfo.itemStorage.size());
});
print(prefix + "---");
printPageStat(prefix, "Total page stat collected during trees traversal:", totalStat);
print("");
AtomicBoolean sizeConsistencyErrorsFound = new AtomicBoolean(false);
cacheIdxSizes.forEach((cacheTypeId, idxSizes) -> {
if (idxSizes.values().stream().distinct().count() > 1) {
sizeConsistencyErrorsFound.set(true);
totalErr.incrementAndGet();
printErr("Index size inconsistency: cacheId=" + cacheTypeId.get1() + ", typeId=" + cacheTypeId.get2());
idxSizes.forEach((name, size) -> printErr(" Index name: " + name + ", size=" + size));
}
});
if (!sizeConsistencyErrorsFound.get())
print(prefix + "No index size consistency errors found.");
print("");
print(prefix + "Total trees: " + treeInfos.keySet().size());
print(prefix + "Total pages found in trees: " + totalStat.values().stream().mapToLong(a -> a).sum());
print(prefix + "Total errors during trees traversal: " + totalErr.get());
print("");
print("------------------");
}
use of org.apache.ignite.internal.util.lang.IgnitePair in project ignite by apache.
the class BinaryMarshallerSelfTest method handleToCollection.
/**
* Checks {@link BinaryBuilderReader#parseValue()} for object that contains handles to collection.
*
* @throws Exception If failed.
*/
@Test
public void handleToCollection() throws Exception {
final IgnitePair<String>[] fieldsColAndHandle = new IgnitePair[] { new IgnitePair<>("lst", "hndLst"), new IgnitePair<>("linkedLst", "hndLinkedLst"), new IgnitePair<>("map", "hndMap"), new IgnitePair<>("linkedMap", "hndLinkedMap") };
BinaryMarshaller m = binaryMarshaller();
HandleToCollections obj = new HandleToCollections();
BinaryObject bo = marshal(obj, m);
for (int i = 0; i < 10; ++i) {
BinaryObjectBuilder bob = bo.toBuilder();
if (i > 0)
assertEquals(i - 1, (int) bo.field("a"));
bob.setField("a", i);
for (IgnitePair<String> flds : fieldsColAndHandle) {
// Different orders to read collection and handle to collection.
Object col;
Object colHnd;
if (i % 2 == 0) {
col = bob.getField(flds.get1());
colHnd = bob.getField(flds.get2());
} else {
colHnd = bob.getField(flds.get2());
col = bob.getField(flds.get1());
}
// Must be assertSame but now BinaryObjectBuilder doesn't support handle to collection.
// Now we check only that BinaryObjectBuilder#getField doesn't crash and returns valid collection.
assertEquals("Check: " + flds, col, colHnd);
}
bo = bob.build();
}
}
Aggregations