use of org.apache.ignite.internal.processors.cache.CacheEntryImpl in project ignite by apache.
the class IgnitePersistentStoreTest method loadCacheTest.
/**
*/
@Test
public void loadCacheTest() {
Ignition.stopAll(true);
LOGGER.info("Running loadCache test");
LOGGER.info("Filling Cassandra table with test data");
CacheStore store = CacheStoreHelper.createCacheStore("personTypes", new ClassPathResource("org/apache/ignite/tests/persistence/pojo/persistence-settings-3.xml"), CassandraHelper.getAdminDataSrc());
Collection<CacheEntryImpl<PersonId, Person>> entries = TestsHelper.generatePersonIdsPersonsEntries();
// noinspection unchecked
store.writeAll(entries);
LOGGER.info("Cassandra table filled with test data");
LOGGER.info("Running loadCache test");
try (Ignite ignite = Ignition.start("org/apache/ignite/tests/persistence/pojo/ignite-config.xml")) {
CacheConfiguration<PersonId, Person> ccfg = new CacheConfiguration<>("cache3");
IgniteCache<PersonId, Person> personCache3 = ignite.getOrCreateCache(ccfg);
int size = personCache3.size(CachePeekMode.ALL);
LOGGER.info("Initial cache size " + size);
LOGGER.info("Loading cache data from Cassandra table");
String qry = "select * from test1.pojo_test3 limit 3";
personCache3.loadCache(null, qry);
size = personCache3.size(CachePeekMode.ALL);
Assert.assertEquals("Cache data was incorrectly loaded from Cassandra table by '" + qry + "'", 3, size);
personCache3.clear();
personCache3.loadCache(null, new SimpleStatement(qry));
size = personCache3.size(CachePeekMode.ALL);
Assert.assertEquals("Cache data was incorrectly loaded from Cassandra table by statement", 3, size);
personCache3.clear();
personCache3.loadCache(null);
size = personCache3.size(CachePeekMode.ALL);
Assert.assertEquals("Cache data was incorrectly loaded from Cassandra. " + "Expected number of records is " + TestsHelper.getBulkOperationSize() + ", but loaded number of records is " + size, TestsHelper.getBulkOperationSize(), size);
LOGGER.info("Cache data loaded from Cassandra table");
}
LOGGER.info("loadCache test passed");
}
use of org.apache.ignite.internal.processors.cache.CacheEntryImpl in project ignite by apache.
the class GridCacheWriteBehindStoreAbstractSelfTest method runPutGetRemoveMultithreaded.
/**
* Performs multiple put, get and remove operations in several threads on a store. After
* all threads finished their operations, returns the total set of keys that should be
* in underlying store.
*
* @param threadCnt Count of threads that should update keys.
* @param keysPerThread Count of unique keys assigned to a thread.
* @return Set of keys that was totally put in store.
* @throws Exception If failed.
*/
protected Set<Integer> runPutGetRemoveMultithreaded(int threadCnt, final int keysPerThread) throws Exception {
final ConcurrentMap<String, Set<Integer>> perThread = new ConcurrentHashMap<>();
final AtomicBoolean running = new AtomicBoolean(true);
final AtomicInteger cntr = new AtomicInteger();
final AtomicInteger operations = new AtomicInteger();
IgniteInternalFuture<?> fut = multithreadedAsync(new Runnable() {
@SuppressWarnings({ "NullableProblems" })
@Override
public void run() {
// Initialize key set for this thread.
Set<Integer> set = new HashSet<>();
Set<Integer> old = perThread.putIfAbsent(Thread.currentThread().getName(), set);
if (old != null)
set = old;
List<Integer> original = new ArrayList<>();
Random rnd = new Random();
for (int i = 0; i < keysPerThread; i++) original.add(cntr.getAndIncrement());
try {
while (running.get()) {
int op = rnd.nextInt(3);
int idx = rnd.nextInt(keysPerThread);
int key = original.get(idx);
switch(op) {
case 0:
store.write(new CacheEntryImpl<>(key, "val" + key));
set.add(key);
operations.incrementAndGet();
break;
case 1:
store.delete(key);
set.remove(key);
operations.incrementAndGet();
break;
case 2:
default:
store.write(new CacheEntryImpl<>(key, "broken"));
String val = store.load(key);
assertEquals("Invalid intermediate value: " + val, "broken", val);
store.write(new CacheEntryImpl<>(key, "val" + key));
set.add(key);
// 2 put operations performed here.
operations.incrementAndGet();
operations.incrementAndGet();
operations.incrementAndGet();
break;
}
}
} catch (Exception e) {
error("Unexpected exception in put thread", e);
assert false;
}
}
}, threadCnt, "put");
U.sleep(10000);
running.set(false);
fut.get();
log().info(">>> " + operations + " operations performed totally");
Set<Integer> total = new HashSet<>();
for (Set<Integer> threadVals : perThread.values()) {
total.addAll(threadVals);
}
return total;
}
use of org.apache.ignite.internal.processors.cache.CacheEntryImpl in project ignite by apache.
the class GridCacheWriteBehindStoreSelfTest method testContinuousPut.
/**
* Tests store behaviour under continuous put of the same key with different values.
*
* @param writeCoalescing Write coalescing flag for cache.
* @throws Exception If failed.
*/
private void testContinuousPut(boolean writeCoalescing) throws Exception {
initStore(2, writeCoalescing);
try {
final AtomicBoolean running = new AtomicBoolean(true);
final AtomicInteger actualPutCnt = new AtomicInteger();
IgniteInternalFuture<?> fut = multithreadedAsync(new Runnable() {
@SuppressWarnings({ "NullableProblems" })
@Override
public void run() {
try {
while (running.get()) {
for (int i = 0; i < CACHE_SIZE; i++) {
store.write(new CacheEntryImpl<>(i, "val-0"));
actualPutCnt.incrementAndGet();
store.write(new CacheEntryImpl<>(i, "val" + i));
actualPutCnt.incrementAndGet();
}
}
} catch (Exception e) {
error("Unexpected exception in put thread", e);
assert false;
}
}
}, 1, "put");
U.sleep(FLUSH_FREQUENCY * 2 + 500);
running.set(false);
U.sleep(FLUSH_FREQUENCY * 2 + 500);
int delegatePutCnt = delegate.getPutAllCount();
fut.get();
log().info(">>> [putCnt = " + actualPutCnt.get() + ", delegatePutCnt=" + delegatePutCnt + "]");
assertTrue("No puts were made to the underlying store", delegatePutCnt > 0);
if (store.getWriteCoalescing()) {
assertTrue("Too many puts were made to the underlying store", delegatePutCnt < actualPutCnt.get() / 10);
} else {
assertTrue("Too few puts cnt=" + actualPutCnt.get() + " << storePutCnt=" + delegatePutCnt, delegatePutCnt > actualPutCnt.get() / 2);
}
} finally {
shutdownStore();
}
// These checks must be done after the store shut down
assertEquals("Invalid store size", CACHE_SIZE, delegate.getMap().size());
for (int i = 0; i < CACHE_SIZE; i++) assertEquals("Invalid value stored", "val" + i, delegate.getMap().get(i));
}
use of org.apache.ignite.internal.processors.cache.CacheEntryImpl in project ignite by apache.
the class GridCacheWriteBehindStoreSelfTest method testShutdown.
/**
* Tests that all values were put into the store will be written to the underlying store
* after shutdown is called.
*
* @param writeCoalescing Write coalescing flag.
* @throws Exception If failed.
*/
private void testShutdown(boolean writeCoalescing) throws Exception {
initStore(2, writeCoalescing);
try {
final AtomicBoolean running = new AtomicBoolean(true);
IgniteInternalFuture<?> fut = multithreadedAsync(new Runnable() {
@SuppressWarnings({ "NullableProblems" })
@Override
public void run() {
try {
while (running.get()) {
for (int i = 0; i < CACHE_SIZE; i++) {
store.write(new CacheEntryImpl<>(i, "val-0"));
store.write(new CacheEntryImpl<>(i, "val" + i));
}
}
} catch (Exception e) {
error("Unexpected exception in put thread", e);
assert false;
}
}
}, 1, "put");
U.sleep(300);
running.set(false);
fut.get();
} finally {
shutdownStore();
}
// These checks must be done after the store shut down
assertEquals("Invalid store size", CACHE_SIZE, delegate.getMap().size());
for (int i = 0; i < CACHE_SIZE; i++) assertEquals("Invalid value stored", "val" + i, delegate.getMap().get(i));
}
use of org.apache.ignite.internal.processors.cache.CacheEntryImpl in project ignite by apache.
the class ColumnDecisionTreeTrainer method doTrain.
/**
*/
@NotNull
private DecisionTreeModel doTrain(ColumnDecisionTreeTrainerInput input, UUID uuid) {
RootNode root = new RootNode();
// List containing setters of leaves of the tree.
List<TreeTip> tips = new LinkedList<>();
tips.add(new TreeTip(root::setSplit, 0));
int curDepth = 0;
int regsCnt = 1;
int featuresCnt = input.featuresCount();
IntStream.range(0, featuresCnt).mapToObj(fIdx -> SplitCache.key(fIdx, input.affinityKey(fIdx, ignite), uuid)).forEach(k -> SplitCache.getOrCreate(ignite).put(k, new IgniteBiTuple<>(0, 0.0)));
updateSplitCache(0, regsCnt, featuresCnt, ig -> i -> input.affinityKey(i, ig), uuid);
// regions cannot be split more and split only those that can.
while (true) {
long before = System.currentTimeMillis();
IgniteBiTuple<Integer, IgniteBiTuple<Integer, Double>> b = findBestSplitIndexForFeatures(featuresCnt, input::affinityKey, uuid);
long findBestRegIdx = System.currentTimeMillis() - before;
Integer bestFeatureIdx = b.get1();
Integer regIdx = b.get2().get1();
Double bestInfoGain = b.get2().get2();
if (regIdx >= 0 && bestInfoGain > MIN_INFO_GAIN) {
before = System.currentTimeMillis();
SplitInfo bi = ignite.compute().affinityCall(ProjectionsCache.CACHE_NAME, input.affinityKey(bestFeatureIdx, ignite), () -> {
TrainingContext<ContinuousRegionInfo> ctx = ContextCache.getOrCreate(ignite).get(uuid);
Ignite ignite = Ignition.localIgnite();
RegionKey key = ProjectionsCache.key(bestFeatureIdx, regIdx / BLOCK_SIZE, input.affinityKey(bestFeatureIdx, Ignition.localIgnite()), uuid);
RegionProjection reg = ProjectionsCache.getOrCreate(ignite).localPeek(key).get(regIdx % BLOCK_SIZE);
return ctx.featureProcessor(bestFeatureIdx).findBestSplit(reg, ctx.values(bestFeatureIdx, ignite), ctx.labels(), regIdx);
});
long findBestSplit = System.currentTimeMillis() - before;
IndexAndSplitInfo best = new IndexAndSplitInfo(bestFeatureIdx, bi);
regsCnt++;
if (log.isDebugEnabled())
log.debug("Globally best: " + best.info + " idx time: " + findBestRegIdx + ", calculate best: " + findBestSplit + " fi: " + best.featureIdx + ", regs: " + regsCnt);
// Request bitset for split region.
int ind = best.info.regionIndex();
SparseBitSet bs = ignite.compute().affinityCall(ProjectionsCache.CACHE_NAME, input.affinityKey(bestFeatureIdx, ignite), () -> {
Ignite ignite = Ignition.localIgnite();
IgniteCache<FeatureKey, double[]> featuresCache = FeaturesCache.getOrCreate(ignite);
IgniteCache<UUID, TrainingContext<D>> ctxCache = ContextCache.getOrCreate(ignite);
TrainingContext ctx = ctxCache.localPeek(uuid);
double[] values = featuresCache.localPeek(getFeatureCacheKey(bestFeatureIdx, uuid, input.affinityKey(bestFeatureIdx, Ignition.localIgnite())));
RegionKey key = ProjectionsCache.key(bestFeatureIdx, regIdx / BLOCK_SIZE, input.affinityKey(bestFeatureIdx, Ignition.localIgnite()), uuid);
RegionProjection reg = ProjectionsCache.getOrCreate(ignite).localPeek(key).get(regIdx % BLOCK_SIZE);
return ctx.featureProcessor(bestFeatureIdx).calculateOwnershipBitSet(reg, values, best.info);
});
SplitNode sn = best.info.createSplitNode(best.featureIdx);
TreeTip tipToSplit = tips.get(ind);
tipToSplit.leafSetter.accept(sn);
tipToSplit.leafSetter = sn::setLeft;
int d = tipToSplit.depth++;
tips.add(new TreeTip(sn::setRight, d));
if (d > curDepth) {
curDepth = d;
if (log.isDebugEnabled()) {
log.debug("Depth: " + curDepth);
log.debug("Cache size: " + prjsCache.size(CachePeekMode.PRIMARY));
}
}
before = System.currentTimeMillis();
// Perform split on all feature vectors.
IgniteSupplier<Set<RegionKey>> bestRegsKeys = () -> IntStream.range(0, featuresCnt).mapToObj(fIdx -> ProjectionsCache.key(fIdx, ind / BLOCK_SIZE, input.affinityKey(fIdx, Ignition.localIgnite()), uuid)).collect(Collectors.toSet());
int rc = regsCnt;
// Perform split.
CacheUtils.update(prjsCache.getName(), ignite, (Ignite ign, Cache.Entry<RegionKey, List<RegionProjection>> e) -> {
RegionKey k = e.getKey();
List<RegionProjection> leftBlock = e.getValue();
int fIdx = k.featureIdx();
int idxInBlock = ind % BLOCK_SIZE;
IgniteCache<UUID, TrainingContext<D>> ctxCache = ContextCache.getOrCreate(ign);
TrainingContext<D> ctx = ctxCache.get(uuid);
RegionProjection targetRegProj = leftBlock.get(idxInBlock);
IgniteBiTuple<RegionProjection, RegionProjection> regs = ctx.performSplit(input, bs, fIdx, best.featureIdx, targetRegProj, best.info.leftData(), best.info.rightData(), ign);
RegionProjection left = regs.get1();
RegionProjection right = regs.get2();
leftBlock.set(idxInBlock, left);
RegionKey rightKey = ProjectionsCache.key(fIdx, (rc - 1) / BLOCK_SIZE, input.affinityKey(fIdx, ign), uuid);
IgniteCache<RegionKey, List<RegionProjection>> c = ProjectionsCache.getOrCreate(ign);
List<RegionProjection> rightBlock = rightKey.equals(k) ? leftBlock : c.localPeek(rightKey);
if (rightBlock == null) {
List<RegionProjection> newBlock = new ArrayList<>(BLOCK_SIZE);
newBlock.add(right);
return Stream.of(new CacheEntryImpl<>(k, leftBlock), new CacheEntryImpl<>(rightKey, newBlock));
} else {
rightBlock.add(right);
return rightBlock.equals(k) ? Stream.of(new CacheEntryImpl<>(k, leftBlock)) : Stream.of(new CacheEntryImpl<>(k, leftBlock), new CacheEntryImpl<>(rightKey, rightBlock));
}
}, bestRegsKeys);
if (log.isDebugEnabled())
log.debug("Update of projections cache time: " + (System.currentTimeMillis() - before));
before = System.currentTimeMillis();
updateSplitCache(ind, rc, featuresCnt, ig -> i -> input.affinityKey(i, ig), uuid);
if (log.isDebugEnabled())
log.debug("Update of split cache time: " + (System.currentTimeMillis() - before));
} else {
if (log.isDebugEnabled())
log.debug("Best split [bestFeatureIdx=" + bestFeatureIdx + ", bestInfoGain=" + bestInfoGain + "]");
break;
}
}
int rc = regsCnt;
IgniteSupplier<Iterable<Cache.Entry<RegionKey, List<RegionProjection>>>> featZeroRegs = () -> {
IgniteCache<RegionKey, List<RegionProjection>> projsCache = ProjectionsCache.getOrCreate(Ignition.localIgnite());
return () -> IntStream.range(0, (rc - 1) / BLOCK_SIZE + 1).mapToObj(rBIdx -> ProjectionsCache.key(0, rBIdx, input.affinityKey(0, Ignition.localIgnite()), uuid)).map(k -> (Cache.Entry<RegionKey, List<RegionProjection>>) new CacheEntryImpl<>(k, projsCache.localPeek(k))).iterator();
};
Map<Integer, Double> vals = CacheUtils.reduce(prjsCache.getName(), ignite, (TrainingContext ctx, Cache.Entry<RegionKey, List<RegionProjection>> e, Map<Integer, Double> m) -> {
int regBlockIdx = e.getKey().regionBlockIndex();
if (e.getValue() != null) {
for (int i = 0; i < e.getValue().size(); i++) {
int regIdx = regBlockIdx * BLOCK_SIZE + i;
RegionProjection reg = e.getValue().get(i);
Double res = regCalc.apply(Arrays.stream(reg.sampleIndexes()).mapToDouble(s -> ctx.labels()[s]));
m.put(regIdx, res);
}
}
return m;
}, () -> ContextCache.getOrCreate(Ignition.localIgnite()).get(uuid), featZeroRegs, (infos, infos2) -> {
Map<Integer, Double> res = new HashMap<>();
res.putAll(infos);
res.putAll(infos2);
return res;
}, HashMap::new);
int i = 0;
for (TreeTip tip : tips) {
tip.leafSetter.accept(new Leaf(vals.get(i)));
i++;
}
ProjectionsCache.clear(featuresCnt, rc, input::affinityKey, uuid, ignite);
ContextCache.getOrCreate(ignite).remove(uuid);
FeaturesCache.clear(featuresCnt, input::affinityKey, uuid, ignite);
SplitCache.clear(featuresCnt, input::affinityKey, uuid, ignite);
return new DecisionTreeModel(root.s);
}
Aggregations