use of org.apache.ignite.cache.store.CacheStore in project ignite by apache.
the class GridCacheProcessor method createCache.
/**
* @param cfg Cache configuration to use to create cache.
* @param pluginMgr Cache plugin manager.
* @param desc Cache descriptor.
* @param locStartTopVer Current topology version.
* @param cacheObjCtx Cache object context.
* @param affNode {@code True} if local node affinity node.
* @param updatesAllowed Updates allowed flag.
* @return Cache context.
* @throws IgniteCheckedException If failed to create cache.
*/
private GridCacheContext createCache(CacheConfiguration<?, ?> cfg, @Nullable CachePluginManager pluginMgr, DynamicCacheDescriptor desc, AffinityTopologyVersion locStartTopVer, CacheObjectContext cacheObjCtx, boolean affNode, boolean updatesAllowed) throws IgniteCheckedException {
assert cfg != null;
if (cfg.getCacheStoreFactory() instanceof GridCacheLoaderWriterStoreFactory) {
GridCacheLoaderWriterStoreFactory factory = (GridCacheLoaderWriterStoreFactory) cfg.getCacheStoreFactory();
prepare(cfg, factory.loaderFactory(), false);
prepare(cfg, factory.writerFactory(), false);
} else
prepare(cfg, cfg.getCacheStoreFactory(), false);
CacheStore cfgStore = cfg.getCacheStoreFactory() != null ? cfg.getCacheStoreFactory().create() : null;
validate(ctx.config(), cfg, desc.cacheType(), cfgStore);
if (pluginMgr == null)
pluginMgr = new CachePluginManager(ctx, cfg);
pluginMgr.validate();
sharedCtx.jta().registerCache(cfg);
// Skip suggestions for internal caches.
if (desc.cacheType().userCache())
suggestOptimizations(cfg, cfgStore != null);
Collection<Object> toPrepare = new ArrayList<>();
if (cfgStore instanceof GridCacheLoaderWriterStore) {
toPrepare.add(((GridCacheLoaderWriterStore) cfgStore).loader());
toPrepare.add(((GridCacheLoaderWriterStore) cfgStore).writer());
} else
toPrepare.add(cfgStore);
prepare(cfg, toPrepare);
U.startLifecycleAware(lifecycleAwares(cfg, cfgStore));
boolean nearEnabled = GridCacheUtils.isNearEnabled(cfg);
GridCacheAffinityManager affMgr = new GridCacheAffinityManager();
GridCacheEventManager evtMgr = new GridCacheEventManager();
CacheEvictionManager evictMgr = (nearEnabled || cfg.isOnheapCacheEnabled()) ? new GridCacheEvictionManager() : new CacheOffheapEvictionManager();
GridCacheQueryManager qryMgr = queryManager(cfg);
CacheContinuousQueryManager contQryMgr = new CacheContinuousQueryManager();
CacheDataStructuresManager dataStructuresMgr = new CacheDataStructuresManager();
GridCacheTtlManager ttlMgr = new GridCacheTtlManager();
CacheConflictResolutionManager rslvrMgr = pluginMgr.createComponent(CacheConflictResolutionManager.class);
GridCacheDrManager drMgr = pluginMgr.createComponent(GridCacheDrManager.class);
CacheStoreManager storeMgr = pluginMgr.createComponent(CacheStoreManager.class);
IgniteCacheOffheapManager offheapMgr = pluginMgr.createComponent(IgniteCacheOffheapManager.class);
storeMgr.initialize(cfgStore, sesHolders);
String memPlcName = cfg.getMemoryPolicyName();
MemoryPolicy memPlc = sharedCtx.database().memoryPolicy(memPlcName);
FreeList freeList = sharedCtx.database().freeList(memPlcName);
ReuseList reuseList = sharedCtx.database().reuseList(memPlcName);
GridCacheContext<?, ?> cacheCtx = new GridCacheContext(ctx, sharedCtx, cfg, desc.cacheType(), locStartTopVer, desc.receivedFrom(), affNode, updatesAllowed, memPlc, freeList, reuseList, /*
* Managers in starting order!
* ===========================
*/
evtMgr, storeMgr, evictMgr, qryMgr, contQryMgr, dataStructuresMgr, ttlMgr, drMgr, offheapMgr, rslvrMgr, pluginMgr, affMgr);
cacheCtx.cacheObjectContext(cacheObjCtx);
GridCacheAdapter cache = null;
switch(cfg.getCacheMode()) {
case LOCAL:
{
switch(cfg.getAtomicityMode()) {
case TRANSACTIONAL:
{
cache = new GridLocalCache(cacheCtx);
break;
}
case ATOMIC:
{
cache = new GridLocalAtomicCache(cacheCtx);
break;
}
default:
{
assert false : "Invalid cache atomicity mode: " + cfg.getAtomicityMode();
}
}
break;
}
case PARTITIONED:
case REPLICATED:
{
if (nearEnabled) {
switch(cfg.getAtomicityMode()) {
case TRANSACTIONAL:
{
cache = new GridNearTransactionalCache(cacheCtx);
break;
}
case ATOMIC:
{
cache = new GridNearAtomicCache(cacheCtx);
break;
}
default:
{
assert false : "Invalid cache atomicity mode: " + cfg.getAtomicityMode();
}
}
} else {
switch(cfg.getAtomicityMode()) {
case TRANSACTIONAL:
{
cache = cacheCtx.affinityNode() ? new GridDhtColocatedCache(cacheCtx) : new GridDhtColocatedCache(cacheCtx, new GridNoStorageCacheMap(cacheCtx));
break;
}
case ATOMIC:
{
cache = cacheCtx.affinityNode() ? new GridDhtAtomicCache(cacheCtx) : new GridDhtAtomicCache(cacheCtx, new GridNoStorageCacheMap(cacheCtx));
break;
}
default:
{
assert false : "Invalid cache atomicity mode: " + cfg.getAtomicityMode();
}
}
}
break;
}
default:
{
assert false : "Invalid cache mode: " + cfg.getCacheMode();
}
}
cacheCtx.cache(cache);
GridCacheContext<?, ?> ret = cacheCtx;
/*
* Create DHT cache.
* ================
*/
if (cfg.getCacheMode() != LOCAL && nearEnabled) {
/*
* Specifically don't create the following managers
* here and reuse the one from Near cache:
* 1. GridCacheVersionManager
* 2. GridCacheIoManager
* 3. GridCacheDeploymentManager
* 4. GridCacheQueryManager (note, that we start it for DHT cache though).
* 5. CacheContinuousQueryManager (note, that we start it for DHT cache though).
* 6. GridCacheDgcManager
* 7. GridCacheTtlManager.
* ===============================================
*/
evictMgr = cfg.isOnheapCacheEnabled() ? new GridCacheEvictionManager() : new CacheOffheapEvictionManager();
evtMgr = new GridCacheEventManager();
pluginMgr = new CachePluginManager(ctx, cfg);
drMgr = pluginMgr.createComponent(GridCacheDrManager.class);
cacheCtx = new GridCacheContext(ctx, sharedCtx, cfg, desc.cacheType(), locStartTopVer, desc.receivedFrom(), affNode, true, memPlc, freeList, reuseList, /*
* Managers in starting order!
* ===========================
*/
evtMgr, storeMgr, evictMgr, qryMgr, contQryMgr, dataStructuresMgr, ttlMgr, drMgr, offheapMgr, rslvrMgr, pluginMgr, affMgr);
cacheCtx.cacheObjectContext(cacheObjCtx);
GridDhtCacheAdapter dht = null;
switch(cfg.getAtomicityMode()) {
case TRANSACTIONAL:
{
assert cache instanceof GridNearTransactionalCache;
GridNearTransactionalCache near = (GridNearTransactionalCache) cache;
GridDhtCache dhtCache = cacheCtx.affinityNode() ? new GridDhtCache(cacheCtx) : new GridDhtCache(cacheCtx, new GridNoStorageCacheMap(cacheCtx));
dhtCache.near(near);
near.dht(dhtCache);
dht = dhtCache;
break;
}
case ATOMIC:
{
assert cache instanceof GridNearAtomicCache;
GridNearAtomicCache near = (GridNearAtomicCache) cache;
GridDhtAtomicCache dhtCache = cacheCtx.affinityNode() ? new GridDhtAtomicCache(cacheCtx) : new GridDhtAtomicCache(cacheCtx, new GridNoStorageCacheMap(cacheCtx));
dhtCache.near(near);
near.dht(dhtCache);
dht = dhtCache;
break;
}
default:
{
assert false : "Invalid cache atomicity mode: " + cfg.getAtomicityMode();
}
}
cacheCtx.cache(dht);
}
if (!CU.isUtilityCache(cache.name()) && !CU.isSystemCache(cache.name())) {
registerMbean(cache.localMxBean(), cache.name(), false);
registerMbean(cache.clusterMxBean(), cache.name(), false);
}
return ret;
}
use of org.apache.ignite.cache.store.CacheStore in project ignite by apache.
the class CassandraDirectPersistenceTest method primitiveStrategyTest.
/**
*/
@Test
@SuppressWarnings("unchecked")
public void primitiveStrategyTest() {
CacheStore store1 = CacheStoreHelper.createCacheStore("longTypes", new ClassPathResource("org/apache/ignite/tests/persistence/primitive/persistence-settings-1.xml"), CassandraHelper.getAdminDataSrc());
CacheStore store2 = CacheStoreHelper.createCacheStore("stringTypes", new ClassPathResource("org/apache/ignite/tests/persistence/primitive/persistence-settings-2.xml"), CassandraHelper.getAdminDataSrc());
Collection<CacheEntryImpl<Long, Long>> longEntries = TestsHelper.generateLongsEntries();
Collection<CacheEntryImpl<String, String>> strEntries = TestsHelper.generateStringsEntries();
Collection<Long> fakeLongKeys = TestsHelper.getKeys(longEntries);
fakeLongKeys.add(-1L);
fakeLongKeys.add(-2L);
fakeLongKeys.add(-3L);
fakeLongKeys.add(-4L);
Collection<String> fakeStrKeys = TestsHelper.getKeys(strEntries);
fakeStrKeys.add("-1");
fakeStrKeys.add("-2");
fakeStrKeys.add("-3");
fakeStrKeys.add("-4");
LOGGER.info("Running PRIMITIVE strategy write tests");
LOGGER.info("Running single write operation tests");
store1.write(longEntries.iterator().next());
store2.write(strEntries.iterator().next());
LOGGER.info("Single write operation tests passed");
LOGGER.info("Running bulk write operation tests");
store1.writeAll(longEntries);
store2.writeAll(strEntries);
LOGGER.info("Bulk write operation tests passed");
LOGGER.info("PRIMITIVE strategy write tests passed");
LOGGER.info("Running PRIMITIVE strategy read tests");
LOGGER.info("Running single read operation tests");
LOGGER.info("Running real keys read tests");
Long longVal = (Long) store1.load(longEntries.iterator().next().getKey());
if (!longEntries.iterator().next().getValue().equals(longVal))
throw new RuntimeException("Long values were incorrectly deserialized from Cassandra");
String strVal = (String) store2.load(strEntries.iterator().next().getKey());
if (!strEntries.iterator().next().getValue().equals(strVal))
throw new RuntimeException("String values were incorrectly deserialized from Cassandra");
LOGGER.info("Running fake keys read tests");
longVal = (Long) store1.load(-1L);
if (longVal != null)
throw new RuntimeException("Long value with fake key '-1' was found in Cassandra");
strVal = (String) store2.load("-1");
if (strVal != null)
throw new RuntimeException("String value with fake key '-1' was found in Cassandra");
LOGGER.info("Single read operation tests passed");
LOGGER.info("Running bulk read operation tests");
LOGGER.info("Running real keys read tests");
Map longValues = store1.loadAll(TestsHelper.getKeys(longEntries));
if (!TestsHelper.checkCollectionsEqual(longValues, longEntries))
throw new RuntimeException("Long values were incorrectly deserialized from Cassandra");
Map strValues = store2.loadAll(TestsHelper.getKeys(strEntries));
if (!TestsHelper.checkCollectionsEqual(strValues, strEntries))
throw new RuntimeException("String values were incorrectly deserialized from Cassandra");
LOGGER.info("Running fake keys read tests");
longValues = store1.loadAll(fakeLongKeys);
if (!TestsHelper.checkCollectionsEqual(longValues, longEntries))
throw new RuntimeException("Long values were incorrectly deserialized from Cassandra");
strValues = store2.loadAll(fakeStrKeys);
if (!TestsHelper.checkCollectionsEqual(strValues, strEntries))
throw new RuntimeException("String values were incorrectly deserialized from Cassandra");
LOGGER.info("Bulk read operation tests passed");
LOGGER.info("PRIMITIVE strategy read tests passed");
LOGGER.info("Running PRIMITIVE strategy delete tests");
LOGGER.info("Deleting real keys");
store1.delete(longEntries.iterator().next().getKey());
store1.deleteAll(TestsHelper.getKeys(longEntries));
store2.delete(strEntries.iterator().next().getKey());
store2.deleteAll(TestsHelper.getKeys(strEntries));
LOGGER.info("Deleting fake keys");
store1.delete(-1L);
store2.delete("-1");
store1.deleteAll(fakeLongKeys);
store2.deleteAll(fakeStrKeys);
LOGGER.info("PRIMITIVE strategy delete tests passed");
}
use of org.apache.ignite.cache.store.CacheStore in project ignite by apache.
the class CassandraDirectPersistenceTest method pojoStrategyTest.
/**
*/
@Test
@SuppressWarnings("unchecked")
public void pojoStrategyTest() {
CacheStore store1 = CacheStoreHelper.createCacheStore("longTypes", new ClassPathResource("org/apache/ignite/tests/persistence/pojo/persistence-settings-1.xml"), CassandraHelper.getAdminDataSrc());
CacheStore store2 = CacheStoreHelper.createCacheStore("personTypes", new ClassPathResource("org/apache/ignite/tests/persistence/pojo/persistence-settings-2.xml"), CassandraHelper.getAdminDataSrc());
CacheStore store3 = CacheStoreHelper.createCacheStore("personTypes", new ClassPathResource("org/apache/ignite/tests/persistence/pojo/persistence-settings-3.xml"), CassandraHelper.getAdminDataSrc());
CacheStore store4 = CacheStoreHelper.createCacheStore("persons", new ClassPathResource("org/apache/ignite/tests/persistence/pojo/persistence-settings-4.xml"), CassandraHelper.getAdminDataSrc());
CacheStore productStore = CacheStoreHelper.createCacheStore("product", new ClassPathResource("org/apache/ignite/tests/persistence/pojo/product.xml"), CassandraHelper.getAdminDataSrc());
CacheStore orderStore = CacheStoreHelper.createCacheStore("order", new ClassPathResource("org/apache/ignite/tests/persistence/pojo/order.xml"), CassandraHelper.getAdminDataSrc());
Collection<CacheEntryImpl<Long, Person>> entries1 = TestsHelper.generateLongsPersonsEntries();
Collection<CacheEntryImpl<PersonId, Person>> entries2 = TestsHelper.generatePersonIdsPersonsEntries();
Collection<CacheEntryImpl<PersonId, Person>> entries3 = TestsHelper.generatePersonIdsPersonsEntries();
Collection<CacheEntryImpl<Long, Product>> productEntries = TestsHelper.generateProductEntries();
Collection<CacheEntryImpl<Long, ProductOrder>> orderEntries = TestsHelper.generateOrderEntries();
LOGGER.info("Running POJO strategy write tests");
LOGGER.info("Running single write operation tests");
store1.write(entries1.iterator().next());
store2.write(entries2.iterator().next());
store3.write(entries3.iterator().next());
store4.write(entries3.iterator().next());
productStore.write(productEntries.iterator().next());
orderStore.write(orderEntries.iterator().next());
LOGGER.info("Single write operation tests passed");
LOGGER.info("Running bulk write operation tests");
store1.writeAll(entries1);
store2.writeAll(entries2);
store3.writeAll(entries3);
store4.writeAll(entries3);
productStore.writeAll(productEntries);
orderStore.writeAll(orderEntries);
LOGGER.info("Bulk write operation tests passed");
LOGGER.info("POJO strategy write tests passed");
LOGGER.info("Running POJO strategy read tests");
LOGGER.info("Running single read operation tests");
Person person = (Person) store1.load(entries1.iterator().next().getKey());
if (!entries1.iterator().next().getValue().equalsPrimitiveFields(person))
throw new RuntimeException("Person values were incorrectly deserialized from Cassandra");
person = (Person) store2.load(entries2.iterator().next().getKey());
if (!entries2.iterator().next().getValue().equalsPrimitiveFields(person))
throw new RuntimeException("Person values were incorrectly deserialized from Cassandra");
person = (Person) store3.load(entries3.iterator().next().getKey());
if (!entries3.iterator().next().getValue().equals(person))
throw new RuntimeException("Person values were incorrectly deserialized from Cassandra");
person = (Person) store4.load(entries3.iterator().next().getKey());
if (!entries3.iterator().next().getValue().equals(person))
throw new RuntimeException("Person values were incorrectly deserialized from Cassandra");
Product product = (Product) productStore.load(productEntries.iterator().next().getKey());
if (!productEntries.iterator().next().getValue().equals(product))
throw new RuntimeException("Product values were incorrectly deserialized from Cassandra");
ProductOrder order = (ProductOrder) orderStore.load(orderEntries.iterator().next().getKey());
if (!orderEntries.iterator().next().getValue().equals(order))
throw new RuntimeException("Order values were incorrectly deserialized from Cassandra");
LOGGER.info("Single read operation tests passed");
LOGGER.info("Running bulk read operation tests");
Map persons = store1.loadAll(TestsHelper.getKeys(entries1));
if (!TestsHelper.checkPersonCollectionsEqual(persons, entries1, true))
throw new RuntimeException("Person values were incorrectly deserialized from Cassandra");
persons = store2.loadAll(TestsHelper.getKeys(entries2));
if (!TestsHelper.checkPersonCollectionsEqual(persons, entries2, true))
throw new RuntimeException("Person values were incorrectly deserialized from Cassandra");
persons = store3.loadAll(TestsHelper.getKeys(entries3));
if (!TestsHelper.checkPersonCollectionsEqual(persons, entries3, false))
throw new RuntimeException("Person values were incorrectly deserialized from Cassandra");
persons = store4.loadAll(TestsHelper.getKeys(entries3));
if (!TestsHelper.checkPersonCollectionsEqual(persons, entries3, false))
throw new RuntimeException("Person values were incorrectly deserialized from Cassandra");
Map products = productStore.loadAll(TestsHelper.getKeys(productEntries));
if (!TestsHelper.checkProductCollectionsEqual(products, productEntries))
throw new RuntimeException("Product values were incorrectly deserialized from Cassandra");
Map orders = orderStore.loadAll(TestsHelper.getKeys(orderEntries));
if (!TestsHelper.checkOrderCollectionsEqual(orders, orderEntries))
throw new RuntimeException("Order values were incorrectly deserialized from Cassandra");
LOGGER.info("Bulk read operation tests passed");
LOGGER.info("POJO strategy read tests passed");
LOGGER.info("Running POJO strategy delete tests");
store1.delete(entries1.iterator().next().getKey());
store1.deleteAll(TestsHelper.getKeys(entries1));
store2.delete(entries2.iterator().next().getKey());
store2.deleteAll(TestsHelper.getKeys(entries2));
store3.delete(entries3.iterator().next().getKey());
store3.deleteAll(TestsHelper.getKeys(entries3));
store4.delete(entries3.iterator().next().getKey());
store4.deleteAll(TestsHelper.getKeys(entries3));
productStore.delete(productEntries.iterator().next().getKey());
productStore.deleteAll(TestsHelper.getKeys(productEntries));
orderStore.delete(orderEntries.iterator().next().getKey());
orderStore.deleteAll(TestsHelper.getKeys(orderEntries));
LOGGER.info("POJO strategy delete tests passed");
}
use of org.apache.ignite.cache.store.CacheStore in project ignite by apache.
the class GridCacheProcessor method createCacheContext.
/**
* @param cfg Cache configuration to use to create cache.
* @param grp Cache group.
* @param pluginMgr Cache plugin manager.
* @param desc Cache descriptor.
* @param locStartTopVer Current topology version.
* @param cacheObjCtx Cache object context.
* @param affNode {@code True} if local node affinity node.
* @param updatesAllowed Updates allowed flag.
* @param disabledAfterStart If true, then we will discard restarting state from proxies. If false then we will
* change state of proxies to restarting
* @return Cache context.
* @throws IgniteCheckedException If failed to create cache.
*/
private GridCacheContext<?, ?> createCacheContext(CacheConfiguration<?, ?> cfg, CacheGroupContext grp, @Nullable CachePluginManager pluginMgr, DynamicCacheDescriptor desc, AffinityTopologyVersion locStartTopVer, CacheObjectContext cacheObjCtx, boolean affNode, boolean updatesAllowed, boolean disabledAfterStart, boolean recoveryMode) throws IgniteCheckedException {
assert cfg != null;
if (cfg.getCacheStoreFactory() instanceof GridCacheLoaderWriterStoreFactory) {
GridCacheLoaderWriterStoreFactory factory = (GridCacheLoaderWriterStoreFactory) cfg.getCacheStoreFactory();
prepare(cfg, factory.loaderFactory(), false);
prepare(cfg, factory.writerFactory(), false);
} else
prepare(cfg, cfg.getCacheStoreFactory(), false);
CacheStore cfgStore = cfg.getCacheStoreFactory() != null ? cfg.getCacheStoreFactory().create() : null;
ValidationOnNodeJoinUtils.validate(ctx.config(), cfg, desc.cacheType(), cfgStore, ctx, log, (x, y) -> {
try {
assertParameter(x, y);
} catch (IgniteCheckedException ex) {
return ex;
}
return null;
});
if (pluginMgr == null)
pluginMgr = new CachePluginManager(ctx, cfg);
pluginMgr.validate();
if (!recoveryMode && cfg.getAtomicityMode() == TRANSACTIONAL_SNAPSHOT && grp.affinityNode())
sharedCtx.coordinators().ensureStarted();
sharedCtx.jta().registerCache(cfg);
// Skip suggestions for internal caches.
if (desc.cacheType().userCache())
suggestOptimizations(cfg, cfgStore != null);
Collection<Object> toPrepare = new ArrayList<>();
if (cfgStore instanceof GridCacheLoaderWriterStore) {
toPrepare.add(((GridCacheLoaderWriterStore) cfgStore).loader());
toPrepare.add(((GridCacheLoaderWriterStore) cfgStore).writer());
} else
toPrepare.add(cfgStore);
prepare(cfg, toPrepare);
U.startLifecycleAware(lifecycleAwares(grp, cfg, cfgStore));
boolean nearEnabled = GridCacheUtils.isNearEnabled(cfg);
CacheCompressionManager compressMgr = new CacheCompressionManager();
GridCacheAffinityManager affMgr = new GridCacheAffinityManager();
GridCacheEventManager evtMgr = new GridCacheEventManager();
CacheEvictionManager evictMgr = (nearEnabled || cfg.isOnheapCacheEnabled()) ? new GridCacheEvictionManager() : new CacheOffheapEvictionManager();
GridCacheQueryManager qryMgr = cfg.getCacheMode() == LOCAL ? new GridCacheLocalQueryManager() : new GridCacheDistributedQueryManager();
CacheContinuousQueryManager contQryMgr = new CacheContinuousQueryManager();
CacheDataStructuresManager dataStructuresMgr = new CacheDataStructuresManager();
GridCacheTtlManager ttlMgr = new GridCacheTtlManager();
CacheConflictResolutionManager rslvrMgr = pluginMgr.createComponent(CacheConflictResolutionManager.class);
GridCacheDrManager drMgr = pluginMgr.createComponent(GridCacheDrManager.class);
CacheStoreManager storeMgr = pluginMgr.createComponent(CacheStoreManager.class);
PlatformCacheManager platformMgr = ctx.platform().cacheManager();
if (cfgStore == null)
storeMgr.initialize(cfgStore, sesHolders);
else
initializationProtector.protect(cfgStore, () -> storeMgr.initialize(cfgStore, sesHolders));
GridCacheContext<?, ?> cacheCtx = new GridCacheContext(ctx, sharedCtx, cfg, grp, desc.cacheType(), locStartTopVer, desc.deploymentId(), affNode, updatesAllowed, desc.cacheConfiguration().isStatisticsEnabled(), recoveryMode, /*
* Managers in starting order!
* ===========================
*/
compressMgr, evtMgr, storeMgr, evictMgr, qryMgr, contQryMgr, dataStructuresMgr, ttlMgr, drMgr, rslvrMgr, pluginMgr, affMgr, platformMgr);
cacheCtx.cacheObjectContext(cacheObjCtx);
GridCacheAdapter cache = null;
switch(cfg.getCacheMode()) {
case LOCAL:
{
switch(cfg.getAtomicityMode()) {
case TRANSACTIONAL:
case TRANSACTIONAL_SNAPSHOT:
{
cache = new GridLocalCache(cacheCtx);
break;
}
case ATOMIC:
{
cache = new GridLocalAtomicCache(cacheCtx);
break;
}
default:
{
assert false : "Invalid cache atomicity mode: " + cfg.getAtomicityMode();
}
}
break;
}
case PARTITIONED:
case REPLICATED:
{
if (nearEnabled) {
switch(cfg.getAtomicityMode()) {
case TRANSACTIONAL:
case TRANSACTIONAL_SNAPSHOT:
{
cache = new GridNearTransactionalCache(cacheCtx);
break;
}
case ATOMIC:
{
cache = new GridNearAtomicCache(cacheCtx);
break;
}
default:
{
assert false : "Invalid cache atomicity mode: " + cfg.getAtomicityMode();
}
}
} else {
switch(cfg.getAtomicityMode()) {
case TRANSACTIONAL:
case TRANSACTIONAL_SNAPSHOT:
{
cache = cacheCtx.affinityNode() ? new GridDhtColocatedCache(cacheCtx) : new GridDhtColocatedCache(cacheCtx, new GridNoStorageCacheMap());
break;
}
case ATOMIC:
{
cache = cacheCtx.affinityNode() ? new GridDhtAtomicCache(cacheCtx) : new GridDhtAtomicCache(cacheCtx, new GridNoStorageCacheMap());
break;
}
default:
{
assert false : "Invalid cache atomicity mode: " + cfg.getAtomicityMode();
}
}
}
break;
}
default:
{
assert false : "Invalid cache mode: " + cfg.getCacheMode();
}
}
cache.active(!disabledAfterStart);
cacheCtx.cache(cache);
GridCacheContext<?, ?> ret = cacheCtx;
/*
* Create DHT cache.
* ================
*/
if (cfg.getCacheMode() != LOCAL && nearEnabled) {
/*
* Specifically don't create the following managers
* here and reuse the one from Near cache:
* 1. GridCacheVersionManager
* 2. GridCacheIoManager
* 3. GridCacheDeploymentManager
* 4. GridCacheQueryManager (note, that we start it for DHT cache though).
* 5. CacheContinuousQueryManager (note, that we start it for DHT cache though).
* 6. GridCacheDgcManager
* 7. GridCacheTtlManager.
* 8. PlatformCacheManager.
* ===============================================
*/
evictMgr = cfg.isOnheapCacheEnabled() ? new GridCacheEvictionManager() : new CacheOffheapEvictionManager();
evtMgr = new GridCacheEventManager();
pluginMgr = new CachePluginManager(ctx, cfg);
drMgr = pluginMgr.createComponent(GridCacheDrManager.class);
cacheCtx = new GridCacheContext(ctx, sharedCtx, cfg, grp, desc.cacheType(), locStartTopVer, desc.deploymentId(), affNode, true, desc.cacheConfiguration().isStatisticsEnabled(), recoveryMode, /*
* Managers in starting order!
* ===========================
*/
compressMgr, evtMgr, storeMgr, evictMgr, qryMgr, contQryMgr, dataStructuresMgr, ttlMgr, drMgr, rslvrMgr, pluginMgr, affMgr, platformMgr);
cacheCtx.cacheObjectContext(cacheObjCtx);
GridDhtCacheAdapter dht = null;
switch(cfg.getAtomicityMode()) {
case TRANSACTIONAL:
case TRANSACTIONAL_SNAPSHOT:
{
assert cache instanceof GridNearTransactionalCache;
GridNearTransactionalCache near = (GridNearTransactionalCache) cache;
GridDhtCache dhtCache = cacheCtx.affinityNode() ? new GridDhtCache(cacheCtx) : new GridDhtCache(cacheCtx, new GridNoStorageCacheMap());
dhtCache.near(near);
near.dht(dhtCache);
dht = dhtCache;
break;
}
case ATOMIC:
{
assert cache instanceof GridNearAtomicCache;
GridNearAtomicCache near = (GridNearAtomicCache) cache;
GridDhtAtomicCache dhtCache = cacheCtx.affinityNode() ? new GridDhtAtomicCache(cacheCtx) : new GridDhtAtomicCache(cacheCtx, new GridNoStorageCacheMap());
dhtCache.near(near);
near.dht(dhtCache);
dht = dhtCache;
break;
}
default:
{
assert false : "Invalid cache atomicity mode: " + cfg.getAtomicityMode();
}
}
cacheCtx.cache(dht);
}
if (!CU.isUtilityCache(cache.name()) && !CU.isSystemCache(cache.name())) {
registerMbean(cache.localMxBean(), cache.name(), false);
registerMbean(cache.clusterMxBean(), cache.name(), false);
}
return ret;
}
use of org.apache.ignite.cache.store.CacheStore in project ignite by apache.
the class ClientAbstractSelfTest method cacheConfiguration.
/**
* @param cacheName Cache name.
* @return Cache configuration.
* @throws Exception In case of error.
*/
@SuppressWarnings("unchecked")
private static CacheConfiguration cacheConfiguration(@NotNull final String cacheName) throws Exception {
CacheConfiguration cfg = defaultCacheConfiguration();
cfg.setCacheMode(DEFAULT_CACHE_NAME.equals(cacheName) || CACHE_NAME.equals(cacheName) ? LOCAL : "replicated".equals(cacheName) ? REPLICATED : PARTITIONED);
cfg.setName(cacheName);
cfg.setWriteSynchronizationMode(FULL_SYNC);
cfg.setCacheStoreFactory(new Factory<CacheStore>() {
@Override
public CacheStore create() {
synchronized (cacheStores) {
HashMapStore cacheStore = cacheStores.get(cacheName);
if (cacheStore == null)
cacheStores.put(cacheName, cacheStore = new HashMapStore());
return cacheStore;
}
}
});
cfg.setWriteThrough(true);
cfg.setReadThrough(true);
cfg.setLoadPreviousValue(true);
if (cfg.getCacheMode() == PARTITIONED)
cfg.setBackups(1);
return cfg;
}
Aggregations