use of org.apache.ignite.lang.IgniteBiPredicate in project ignite by apache.
the class CustomersClusterizationExample method computeMeanEntropy.
/**
* Computes mean entropy in clusters.
*
* @param cache Dataset cache.
* @param filter Test dataset filter.
* @param vectorizer Upstream vectorizer.
* @param mdl KMeans model.
* @return Score.
*/
private static double computeMeanEntropy(IgniteCache<Integer, Vector> cache, IgniteBiPredicate<Integer, Vector> filter, Vectorizer<Integer, Vector, Integer, Double> vectorizer, KMeansModel mdl) {
Map<Integer, Map<Integer, AtomicInteger>> clusterUniqueLbCounts = new HashMap<>();
try (QueryCursor<Cache.Entry<Integer, Vector>> cursor = cache.query(new ScanQuery<>(filter))) {
for (Cache.Entry<Integer, Vector> ent : cursor) {
LabeledVector<Double> vec = vectorizer.apply(ent.getKey(), ent.getValue());
int cluster = mdl.predict(vec.features());
int ch = vec.label().intValue();
if (!clusterUniqueLbCounts.containsKey(cluster))
clusterUniqueLbCounts.put(cluster, new HashMap<>());
if (!clusterUniqueLbCounts.get(cluster).containsKey(ch))
clusterUniqueLbCounts.get(cluster).put(ch, new AtomicInteger());
clusterUniqueLbCounts.get(cluster).get(ch).incrementAndGet();
}
}
double sumOfClusterEntropies = 0.0;
for (Integer cluster : clusterUniqueLbCounts.keySet()) {
Map<Integer, AtomicInteger> lbCounters = clusterUniqueLbCounts.get(cluster);
int sizeOfCluster = lbCounters.values().stream().mapToInt(AtomicInteger::get).sum();
double entropyInCluster = lbCounters.values().stream().mapToDouble(AtomicInteger::get).map(lblsCount -> lblsCount / sizeOfCluster).map(lblProb -> -lblProb * Math.log(lblProb)).sum();
sumOfClusterEntropies += entropyInCluster;
}
return sumOfClusterEntropies / clusterUniqueLbCounts.size();
}
use of org.apache.ignite.lang.IgniteBiPredicate in project ignite by apache.
the class GridDhtCacheAdapter method localLoadCache.
/**
* {@inheritDoc}
*/
@Override
public void localLoadCache(final IgniteBiPredicate<K, V> p, Object[] args) throws IgniteCheckedException {
if (ctx.store().isLocal()) {
super.localLoadCache(p, args);
return;
}
// TODO IGNITE-7954
MvccUtils.verifyMvccOperationSupport(ctx, "Load");
final AffinityTopologyVersion topVer = ctx.affinity().affinityTopologyVersion();
// Version for all loaded entries.
final GridCacheVersion ver0 = ctx.shared().versions().nextForLoad(topVer.topologyVersion());
final boolean replicate = ctx.isDrEnabled();
CacheOperationContext opCtx = ctx.operationContextPerCall();
ExpiryPolicy plc0 = opCtx != null ? opCtx.expiry() : null;
final ExpiryPolicy plc = plc0 != null ? plc0 : ctx.expiry();
final IgniteBiPredicate<K, V> pred;
if (p != null) {
ctx.kernalContext().resource().injectGeneric(p);
pred = SecurityUtils.sandboxedProxy(ctx.kernalContext(), IgniteBiPredicate.class, p);
} else
pred = null;
try {
ctx.store().loadCache(new CI3<KeyCacheObject, Object, GridCacheVersion>() {
@Override
public void apply(KeyCacheObject key, Object val, @Nullable GridCacheVersion ver) {
assert ver == null;
loadEntry(key, val, ver0, pred, topVer, replicate, plc);
}
}, args);
} finally {
if (p instanceof PlatformCacheEntryFilter)
((PlatformCacheEntryFilter) p).onClose();
}
}
use of org.apache.ignite.lang.IgniteBiPredicate in project ignite by apache.
the class GridCacheDatabaseSharedManager method performBinaryMemoryRestore.
/**
* @param status Checkpoint status.
* @param cacheGroupsPredicate Cache groups to restore.
* @throws IgniteCheckedException If failed.
* @throws StorageException In case I/O error occurred during operations with storage.
*/
private RestoreBinaryState performBinaryMemoryRestore(CheckpointStatus status, IgnitePredicate<Integer> cacheGroupsPredicate, IgniteBiPredicate<WALRecord.RecordType, WALPointer> recordTypePredicate, boolean finalizeState) throws IgniteCheckedException {
if (log.isInfoEnabled())
log.info("Checking memory state [lastValidPos=" + status.endPtr + ", lastMarked=" + status.startPtr + ", lastCheckpointId=" + status.cpStartId + ']');
WALPointer recPtr = status.endPtr;
boolean apply = status.needRestoreMemory();
try {
WALRecord startRec = !CheckpointStatus.NULL_PTR.equals(status.startPtr) || apply ? cctx.wal().read(status.startPtr) : null;
if (apply) {
if (finalizeState)
U.quietAndWarn(log, "Ignite node stopped in the middle of checkpoint. Will restore memory state and " + "finish checkpoint on node start.");
cctx.cache().cacheGroupDescriptors().forEach((grpId, desc) -> {
if (!cacheGroupsPredicate.apply(grpId))
return;
try {
DataRegion region = cctx.database().dataRegion(desc.config().getDataRegionName());
if (region == null || !cctx.isLazyMemoryAllocation(region))
return;
region.pageMemory().start();
} catch (IgniteCheckedException e) {
throw new IgniteException(e);
}
});
cctx.pageStore().beginRecover();
if (!(startRec instanceof CheckpointRecord))
throw new StorageException("Checkpoint marker doesn't point to checkpoint record " + "[ptr=" + status.startPtr + ", rec=" + startRec + "]");
WALPointer cpMark = ((CheckpointRecord) startRec).checkpointMark();
if (cpMark != null) {
if (log.isInfoEnabled())
log.info("Restoring checkpoint after logical recovery, will start physical recovery from " + "back pointer: " + cpMark);
recPtr = cpMark;
}
} else
cctx.wal().notchLastCheckpointPtr(status.startPtr);
} catch (NoSuchElementException e) {
throw new StorageException("Failed to read checkpoint record from WAL, persistence consistency " + "cannot be guaranteed. Make sure configuration points to correct WAL folders and WAL folder is " + "properly mounted [ptr=" + status.startPtr + ", walPath=" + persistenceCfg.getWalPath() + ", walArchive=" + persistenceCfg.getWalArchivePath() + "]");
}
AtomicReference<Throwable> applyError = new AtomicReference<>();
StripedExecutor exec = cctx.kernalContext().pools().getStripedExecutorService();
Semaphore semaphore = new Semaphore(semaphorePertmits(exec));
long start = U.currentTimeMillis();
long lastArchivedSegment = cctx.wal().lastArchivedSegment();
WALIterator it = cctx.wal().replay(recPtr, recordTypePredicate);
RestoreBinaryState restoreBinaryState = new RestoreBinaryState(status, it, lastArchivedSegment, cacheGroupsPredicate);
AtomicLong applied = new AtomicLong();
try {
while (restoreBinaryState.hasNext()) {
if (applyError.get() != null)
break;
WALRecord rec = restoreBinaryState.next();
if (rec == null)
break;
switch(rec.type()) {
case PAGE_RECORD:
if (restoreBinaryState.needApplyBinaryUpdate()) {
PageSnapshot pageSnapshot = (PageSnapshot) rec;
// Here we do not require tag check because we may be applying memory changes after
// several repetitive restarts and the same pages may have changed several times.
int groupId = pageSnapshot.fullPageId().groupId();
int partId = partId(pageSnapshot.fullPageId().pageId());
if (skipRemovedIndexUpdates(groupId, partId))
break;
stripedApplyPage((pageMem) -> {
try {
applyPageSnapshot(pageMem, pageSnapshot);
applied.incrementAndGet();
} catch (Throwable t) {
U.error(log, "Failed to apply page snapshot. rec=[" + pageSnapshot + ']');
applyError.compareAndSet(null, (t instanceof IgniteCheckedException) ? (IgniteCheckedException) t : new IgniteCheckedException("Failed to apply page snapshot", t));
}
}, groupId, partId, exec, semaphore);
}
break;
case PART_META_UPDATE_STATE:
PartitionMetaStateRecord metaStateRecord = (PartitionMetaStateRecord) rec;
{
int groupId = metaStateRecord.groupId();
int partId = metaStateRecord.partitionId();
stripedApplyPage((pageMem) -> {
GridDhtPartitionState state = fromOrdinal(metaStateRecord.state());
if (state == null || state == GridDhtPartitionState.EVICTED)
schedulePartitionDestroy(groupId, partId);
else {
try {
cancelOrWaitPartitionDestroy(groupId, partId);
} catch (Throwable t) {
U.error(log, "Failed to cancel or wait partition destroy. rec=[" + metaStateRecord + ']');
applyError.compareAndSet(null, (t instanceof IgniteCheckedException) ? (IgniteCheckedException) t : new IgniteCheckedException("Failed to cancel or wait partition destroy", t));
}
}
}, groupId, partId, exec, semaphore);
}
break;
case PARTITION_DESTROY:
PartitionDestroyRecord destroyRecord = (PartitionDestroyRecord) rec;
{
int groupId = destroyRecord.groupId();
int partId = destroyRecord.partitionId();
stripedApplyPage((pageMem) -> {
pageMem.invalidate(groupId, partId);
schedulePartitionDestroy(groupId, partId);
}, groupId, partId, exec, semaphore);
}
break;
default:
if (restoreBinaryState.needApplyBinaryUpdate() && rec instanceof PageDeltaRecord) {
PageDeltaRecord pageDelta = (PageDeltaRecord) rec;
int groupId = pageDelta.groupId();
int partId = partId(pageDelta.pageId());
if (skipRemovedIndexUpdates(groupId, partId))
break;
stripedApplyPage((pageMem) -> {
try {
applyPageDelta(pageMem, pageDelta, true);
applied.incrementAndGet();
} catch (Throwable t) {
U.error(log, "Failed to apply page delta. rec=[" + pageDelta + ']');
applyError.compareAndSet(null, (t instanceof IgniteCheckedException) ? (IgniteCheckedException) t : new IgniteCheckedException("Failed to apply page delta", t));
}
}, groupId, partId, exec, semaphore);
}
}
}
} finally {
it.close();
awaitApplyComplete(exec, applyError);
}
if (!finalizeState)
return null;
WALPointer lastReadPtr = restoreBinaryState.lastReadRecordPointer();
if (status.needRestoreMemory()) {
if (restoreBinaryState.needApplyBinaryUpdate())
throw new StorageException("Failed to restore memory state (checkpoint marker is present " + "on disk, but checkpoint record is missed in WAL) " + "[cpStatus=" + status + ", lastRead=" + lastReadPtr + "]");
if (log.isInfoEnabled())
log.info("Finished applying memory changes [changesApplied=" + applied + ", time=" + (U.currentTimeMillis() - start) + " ms]");
finalizeCheckpointOnRecovery(status.cpStartTs, status.cpStartId, status.startPtr, exec);
}
return restoreBinaryState;
}
use of org.apache.ignite.lang.IgniteBiPredicate in project ignite by apache.
the class GridCommandHandlerTest method testKillHangingRemoteTransactions.
/**
* Simulate uncommitted backup transactions and test rolling back using utility.
*/
@Test
public void testKillHangingRemoteTransactions() throws Exception {
final int cnt = 3;
startGridsMultiThreaded(cnt);
Ignite[] clients = new Ignite[] { startGrid("client1"), startGrid("client2"), startGrid("client3"), startGrid("client4") };
clients[0].getOrCreateCache(new CacheConfiguration<>(DEFAULT_CACHE_NAME).setBackups(2).setAtomicityMode(TRANSACTIONAL).setWriteSynchronizationMode(FULL_SYNC).setAffinity(new RendezvousAffinityFunction(false, 64)));
awaitPartitionMapExchange();
for (Ignite client : clients) {
assertTrue(client.configuration().isClientMode());
assertNotNull(client.cache(DEFAULT_CACHE_NAME));
}
LongAdder progress = new LongAdder();
AtomicInteger idx = new AtomicInteger();
int tc = clients.length;
CountDownLatch lockLatch = new CountDownLatch(1);
CountDownLatch commitLatch = new CountDownLatch(1);
Ignite prim = primaryNode(0L, DEFAULT_CACHE_NAME);
TestRecordingCommunicationSpi primSpi = TestRecordingCommunicationSpi.spi(prim);
primSpi.blockMessages(new IgniteBiPredicate<ClusterNode, Message>() {
@Override
public boolean apply(ClusterNode node, Message message) {
return message instanceof GridDhtTxFinishRequest;
}
});
Set<IgniteUuid> xidSet = new GridConcurrentHashSet<>();
IgniteInternalFuture<?> fut = multithreadedAsync(new Runnable() {
@Override
public void run() {
int id = idx.getAndIncrement();
Ignite client = clients[id];
try (Transaction tx = client.transactions().txStart(PESSIMISTIC, READ_COMMITTED, 0, 1)) {
xidSet.add(tx.xid());
IgniteCache<Long, Long> cache = client.cache(DEFAULT_CACHE_NAME);
if (id != 0)
U.awaitQuiet(lockLatch);
cache.invoke(0L, new IncrementClosure(), null);
if (id == 0) {
lockLatch.countDown();
U.awaitQuiet(commitLatch);
// Wait until candidates will enqueue.
doSleep(500);
}
tx.commit();
} catch (Exception e) {
assertTrue(X.hasCause(e, TransactionTimeoutException.class));
}
progress.increment();
}
}, tc, "invoke-thread");
U.awaitQuiet(lockLatch);
commitLatch.countDown();
primSpi.waitForBlocked(clients.length);
// Unblock only finish messages from clients from 2 to 4.
primSpi.stopBlock(true, blockedMsg -> {
GridIoMessage iom = blockedMsg.ioMessage();
Message m = iom.message();
if (m instanceof GridDhtTxFinishRequest) {
GridDhtTxFinishRequest r = (GridDhtTxFinishRequest) m;
return !r.nearNodeId().equals(clients[0].cluster().localNode().id());
}
return true;
});
// Wait until queue is stable
for (Ignite ignite : G.allGrids()) {
if (ignite.configuration().isClientMode())
continue;
Collection<IgniteInternalTx> txs = ((IgniteEx) ignite).context().cache().context().tm().activeTransactions();
waitForCondition(new GridAbsPredicate() {
@Override
public boolean apply() {
for (IgniteInternalTx tx : txs) if (!tx.local()) {
IgniteTxEntry entry = tx.writeEntries().iterator().next();
GridCacheEntryEx cached = entry.cached();
Collection<GridCacheMvccCandidate> candidates = cached.remoteMvccSnapshot();
if (candidates.size() != clients.length)
return false;
}
return true;
}
}, 10_000);
}
CommandHandler h = new CommandHandler();
// Check listing.
validate(h, map -> {
for (int i = 0; i < cnt; i++) {
IgniteEx grid = grid(i);
// Skip primary.
if (grid.localNode().id().equals(prim.cluster().localNode().id()))
continue;
VisorTxTaskResult res = map.get(grid.localNode());
List<VisorTxInfo> infos = res.getInfos().stream().filter(info -> xidSet.contains(info.getNearXid())).collect(Collectors.toList());
// Validate queue length on backups.
assertEquals(clients.length, infos.size());
}
}, "--tx");
// Check kill.
validate(h, map -> {
// No-op.
}, "--tx", "--kill");
// Wait for all remote txs to finish.
for (Ignite ignite : G.allGrids()) {
if (ignite.configuration().isClientMode())
continue;
Collection<IgniteInternalTx> txs = ((IgniteEx) ignite).context().cache().context().tm().activeTransactions();
for (IgniteInternalTx tx : txs) if (!tx.local())
tx.finishFuture().get();
}
// Unblock finish message from client1.
primSpi.stopBlock(true);
fut.get();
Long cur = (Long) clients[0].cache(DEFAULT_CACHE_NAME).get(0L);
assertEquals(tc - 1, cur.longValue());
checkUserFutures();
}
use of org.apache.ignite.lang.IgniteBiPredicate in project ignite by apache.
the class CacheGroupsMetricsRebalanceTest method testCacheGroupRebalance.
/**
* @throws Exception If failed.
*/
@Test
public void testCacheGroupRebalance() throws Exception {
IgniteEx ignite0 = startGrid(0);
List<String> cacheNames = Arrays.asList(CACHE4, CACHE5);
int allKeysCount = 0;
for (String cacheName : cacheNames) {
Map<Integer, Long> data = new Random().ints(KEYS_COUNT).distinct().boxed().collect(Collectors.toMap(i -> i, i -> (long) i));
ignite0.getOrCreateCache(cacheName).putAll(data);
allKeysCount += data.size();
}
TestRecordingCommunicationSpi.spi(ignite0).blockMessages(new IgniteBiPredicate<ClusterNode, Message>() {
@Override
public boolean apply(ClusterNode node, Message msg) {
return (msg instanceof GridDhtPartitionSupplyMessage) && CU.cacheId(GROUP2) == ((GridCacheGroupIdMessage) msg).groupId();
}
});
IgniteEx ignite1 = startGrid(1);
TestRecordingCommunicationSpi.spi(ignite0).waitForBlocked();
MetricRegistry mreg = ignite1.context().metric().registry(metricName(CACHE_GROUP_METRICS_PREFIX, GROUP2));
LongMetric startTime = mreg.findMetric("RebalancingStartTime");
LongMetric lastCancelledTime = mreg.findMetric("RebalancingLastCancelledTime");
LongMetric endTime = mreg.findMetric("RebalancingEndTime");
LongMetric partitionsLeft = mreg.findMetric("RebalancingPartitionsLeft");
IntMetric partitionsTotal = mreg.findMetric("RebalancingPartitionsTotal");
LongMetric receivedKeys = mreg.findMetric("RebalancingReceivedKeys");
LongMetric receivedBytes = mreg.findMetric("RebalancingReceivedBytes");
ObjectGauge<Map<UUID, Long>> fullReceivedKeys = mreg.findMetric("RebalancingFullReceivedKeys");
ObjectGauge<Map<UUID, Long>> histReceivedKeys = mreg.findMetric("RebalancingHistReceivedKeys");
ObjectGauge<Map<UUID, Long>> fullReceivedBytes = mreg.findMetric("RebalancingFullReceivedBytes");
ObjectGauge<Map<UUID, Long>> histReceivedBytes = mreg.findMetric("RebalancingHistReceivedBytes");
assertEquals("During the start of the rebalancing, the number of partitions in the metric should be " + "equal to the number of partitions in the cache group.", DFLT_PARTITION_COUNT, partitionsLeft.value());
assertEquals("The total number of partitions in the metric should be " + "equal to the number of partitions in the cache group.", DFLT_PARTITION_COUNT, partitionsTotal.value());
long rebalancingStartTime = startTime.value();
assertNotSame("During rebalancing start, the start time metric must be determined.", -1, startTime.value());
assertEquals("Rebalancing last cancelled time must be undefined.", -1, lastCancelledTime.value());
assertEquals("Before the rebalancing is completed, the end time metric must be undefined.", -1, endTime.value());
ToLongFunction<Map<UUID, Long>> sumFunc = map -> map.values().stream().mapToLong(Long::longValue).sum();
String zeroReceivedKeysMsg = "Until a partition supply message has been delivered, keys cannot be received.";
assertEquals(zeroReceivedKeysMsg, 0, receivedKeys.value());
assertEquals(zeroReceivedKeysMsg, 0, sumFunc.applyAsLong(fullReceivedKeys.value()));
assertEquals(zeroReceivedKeysMsg, 0, sumFunc.applyAsLong(histReceivedKeys.value()));
String zeroReceivedBytesMsg = "Until a partition supply message has been delivered, bytes cannot be received.";
assertEquals(zeroReceivedBytesMsg, 0, receivedBytes.value());
assertEquals(zeroReceivedBytesMsg, 0, sumFunc.applyAsLong(fullReceivedBytes.value()));
assertEquals(zeroReceivedBytesMsg, 0, sumFunc.applyAsLong(histReceivedBytes.value()));
checkSuppliers(Arrays.asList(ignite0.localNode().id()), fullReceivedKeys, histReceivedKeys, fullReceivedBytes, histReceivedBytes);
TestRecordingCommunicationSpi.spi(ignite0).stopBlock();
for (String cacheName : cacheNames) ignite1.context().cache().internalCache(cacheName).preloader().rebalanceFuture().get();
assertEquals("After completion of rebalancing, there are no partitions of the cache group that are" + " left to rebalance.", 0, partitionsLeft.value());
assertEquals("After completion of rebalancing, the total number of partitions in the metric should be" + " equal to the number of partitions in the cache group.", DFLT_PARTITION_COUNT, partitionsTotal.value());
assertEquals("After the rebalancing is ended, the rebalancing start time must be equal to the start time " + "measured immediately after the rebalancing start.", rebalancingStartTime, startTime.value());
assertEquals("Rebalancing last cancelled time must be undefined.", -1, lastCancelledTime.value());
waitForCondition(() -> endTime.value() != -1, 1000);
assertTrue("Rebalancing end time must be determined and must be longer than the start time " + "[RebalancingStartTime=" + rebalancingStartTime + ", RebalancingEndTime=" + endTime.value() + "].", rebalancingStartTime < endTime.value());
String wrongReceivedKeyCntMsg = "The number of currently rebalanced keys for the whole cache group should " + "be equal to the number of entries in the caches.";
assertEquals(wrongReceivedKeyCntMsg, allKeysCount, receivedKeys.value());
assertEquals(wrongReceivedKeyCntMsg, allKeysCount, sumFunc.applyAsLong(fullReceivedKeys.value()));
assertEquals(0, sumFunc.applyAsLong(histReceivedKeys.value()));
int estimateByteCnt = allKeysCount * (Integer.BYTES + Long.BYTES);
String wrongReceivedByteCntMsg = "The number of currently rebalanced bytes of this cache group was expected " + "more " + estimateByteCnt + " bytes.";
assertTrue(wrongReceivedByteCntMsg, receivedBytes.value() > estimateByteCnt);
assertTrue(wrongReceivedByteCntMsg, sumFunc.applyAsLong(fullReceivedBytes.value()) > estimateByteCnt);
assertEquals(0, sumFunc.applyAsLong(histReceivedBytes.value()));
checkSuppliers(Arrays.asList(ignite0.localNode().id()), fullReceivedKeys, histReceivedKeys, fullReceivedBytes, histReceivedBytes);
}
Aggregations