use of org.apache.ignite.internal.processors.cache.IgniteInternalCache in project ignite by apache.
the class GridCacheCommandHandler method executeCommand.
/**
* Executes command on flagged cache projection. Checks {@code destId} to find if command could be performed locally
* or routed to a remote node.
*
* @param destId Target node Id for the operation. If {@code null} - operation could be executed anywhere.
* @param clientId Client ID.
* @param cacheName Cache name.
* @param skipStore Skip store.
* @param key Key to set affinity mapping in the response.
* @param op Operation to perform.
* @return Operation result in future.
* @throws IgniteCheckedException If failed
*/
private IgniteInternalFuture<GridRestResponse> executeCommand(@Nullable UUID destId, UUID clientId, final String cacheName, final boolean skipStore, final Object key, final CacheProjectionCommand op) throws IgniteCheckedException {
final boolean locExec = destId == null || destId.equals(ctx.localNodeId()) || replicatedCacheAvailable(cacheName);
if (locExec) {
IgniteInternalCache<?, ?> prj = localCache(cacheName).forSubjectId(clientId).setSkipStore(skipStore);
return op.apply((IgniteInternalCache<Object, Object>) prj, ctx).chain(resultWrapper((IgniteInternalCache<Object, Object>) prj, key));
} else {
ClusterGroup prj = ctx.grid().cluster().forPredicate(F.nodeForNodeId(destId));
ctx.task().setThreadContext(TC_NO_FAILOVER, true);
return ctx.closure().callAsync(BALANCE, new FlaggedCacheOperationCallable(clientId, cacheName, skipStore, op, key), prj.nodes());
}
}
use of org.apache.ignite.internal.processors.cache.IgniteInternalCache in project ignite by apache.
the class GridServiceProcessor method serviceEntries.
/**
* @param p Entry predicate used to execute query from client node.
* @return Service deployment entries.
*/
@SuppressWarnings("unchecked")
private Iterator<Cache.Entry<Object, Object>> serviceEntries(IgniteBiPredicate<Object, Object> p) {
try {
IgniteInternalCache<Object, Object> cache = serviceCache();
GridCacheQueryManager qryMgr = cache.context().queries();
CacheQuery<Map.Entry<Object, Object>> qry = qryMgr.createScanQuery(p, null, false);
qry.keepAll(false);
if (!cache.context().affinityNode()) {
ClusterNode oldestSrvNode = ctx.discovery().oldestAliveServerNode(AffinityTopologyVersion.NONE);
if (oldestSrvNode == null)
return new GridEmptyIterator<>();
qry.projection(ctx.cluster().get().forNode(oldestSrvNode));
} else
qry.projection(ctx.cluster().get().forLocal());
GridCloseableIterator<Map.Entry<Object, Object>> iter = qry.executeScanQuery();
return cache.context().itHolder().iterator(iter, new CacheIteratorConverter<Cache.Entry<Object, Object>, Map.Entry<Object, Object>>() {
@Override
protected Cache.Entry<Object, Object> convert(Map.Entry<Object, Object> e) {
// CacheQueryEntry implements both Map.Entry and Cache.Entry interfaces.
return (Cache.Entry<Object, Object>) e;
}
@Override
protected void remove(Cache.Entry<Object, Object> item) {
throw new UnsupportedOperationException();
}
});
} catch (IgniteCheckedException e) {
throw new IgniteException(e);
}
}
use of org.apache.ignite.internal.processors.cache.IgniteInternalCache in project ignite by apache.
the class DataStructuresProcessor method compatibleCache.
/**
* @param cfg Collection configuration.
* @return Cache name.
* @param grpName Group name.
* @throws IgniteCheckedException If failed.
*/
@Nullable
private IgniteInternalCache compatibleCache(CollectionConfiguration cfg, String grpName) throws IgniteCheckedException {
String cacheName = DS_CACHE_NAME_PREFIX + cfg.getAtomicityMode() + "_" + cfg.getCacheMode() + "_" + cfg.getBackups() + "@" + grpName;
IgniteInternalCache cache = ctx.cache().cache(cacheName);
if (cache == null) {
ctx.cache().dynamicStartCache(cacheConfiguration(cfg, cacheName, grpName), cacheName, null, CacheType.DATA_STRUCTURES, false, false, true, true).get();
} else {
IgnitePredicate<ClusterNode> cacheNodeFilter = cache.context().group().nodeFilter();
String clsName1 = cacheNodeFilter != null ? cacheNodeFilter.getClass().getName() : CacheConfiguration.IgniteAllNodesPredicate.class.getName();
String clsName2 = cfg.getNodeFilter() != null ? cfg.getNodeFilter().getClass().getName() : CacheConfiguration.IgniteAllNodesPredicate.class.getName();
if (!clsName1.equals(clsName2))
throw new IgniteCheckedException("Could not add collection to group " + grpName + " because of different node filters [existing=" + clsName1 + ", new=" + clsName2 + "]");
}
cache = ctx.cache().getOrStartCache(cacheName);
assert cache != null;
return cache;
}
use of org.apache.ignite.internal.processors.cache.IgniteInternalCache in project ignite by apache.
the class IgfsSizeSelfTest method check.
/**
* Ensure that IGFS cache size is calculated correctly.
*
* @throws Exception If failed.
*/
private void check() throws Exception {
startUp();
// Ensure that cache was marked as IGFS data cache.
for (int i = 0; i < GRID_CNT; i++) {
IgniteEx g = grid(i);
IgniteInternalCache cache = g.cachex(g.igfsx(IGFS_NAME).configuration().getDataCacheConfiguration().getName()).cache();
assert cache.isIgfsDataCache();
}
// Perform writes.
Collection<IgfsFile> files = write();
// Check sizes.
Map<UUID, Integer> expSizes = new HashMap<>(GRID_CNT, 1.0f);
for (IgfsFile file : files) {
for (IgfsBlock block : file.blocks()) {
Collection<UUID> ids = primaryOrBackups(block.key());
for (UUID id : ids) {
if (expSizes.get(id) == null)
expSizes.put(id, block.length());
else
expSizes.put(id, expSizes.get(id) + block.length());
}
}
}
for (int i = 0; i < GRID_CNT; i++) {
UUID id = grid(i).localNode().id();
GridCacheAdapter<IgfsBlockKey, byte[]> cache = cache(id);
int expSize = expSizes.get(id) != null ? expSizes.get(id) : 0;
assert expSize == cache.igfsDataSpaceUsed();
}
// Perform reads which could potentially be non-local.
byte[] buf = new byte[BLOCK_SIZE];
for (IgfsFile file : files) {
for (int i = 0; i < GRID_CNT; i++) {
int total = 0;
IgfsInputStream is = igfs(i).open(file.path());
while (true) {
int read = is.read(buf);
if (read == -1)
break;
else
total += read;
}
assert total == file.length() : "Not enough bytes read: [expected=" + file.length() + ", actual=" + total + ']';
is.close();
}
}
// Check sizes after read.
if (cacheMode == PARTITIONED) {
// No changes since the previous check for co-located cache.
for (int i = 0; i < GRID_CNT; i++) {
UUID id = grid(i).localNode().id();
GridCacheAdapter<IgfsBlockKey, byte[]> cache = cache(id);
int expSize = expSizes.get(id) != null ? expSizes.get(id) : 0;
assert expSize == cache.igfsDataSpaceUsed();
}
} else {
// All data must exist on each cache.
int totalSize = 0;
for (IgfsFile file : files) totalSize += file.length();
for (int i = 0; i < GRID_CNT; i++) {
UUID id = grid(i).localNode().id();
GridCacheAdapter<IgfsBlockKey, byte[]> cache = cache(id);
assertEquals(totalSize, cache.igfsDataSpaceUsed());
}
}
// Delete data and ensure that all counters are 0 now.
for (IgfsFile file : files) {
igfs(0).delete(file.path(), false);
// Await for actual delete to occur.
for (IgfsBlock block : file.blocks()) {
for (int i = 0; i < GRID_CNT; i++) {
while (localPeek(cache(grid(i).localNode().id()), block.key()) != null) U.sleep(100);
}
}
}
for (int i = 0; i < GRID_CNT; i++) {
GridCacheAdapter<IgfsBlockKey, byte[]> cache = cache(grid(i).localNode().id());
assert 0 == cache.igfsDataSpaceUsed() : "Size counter is not 0: " + cache.igfsDataSpaceUsed();
}
}
use of org.apache.ignite.internal.processors.cache.IgniteInternalCache in project ignite by apache.
the class ViewCacheClosure method collectSequences.
/**
* @param ctx Context.
* @param compiled Compiled pattern.
* @param cacheInfo Cache info.
*/
private void collectSequences(GridKernalContext ctx, Pattern compiled, List<CacheInfo> cacheInfo) throws IgniteCheckedException {
String dsCacheName = DataStructuresProcessor.ATOMICS_CACHE_NAME + "@default-ds-group";
IgniteInternalCache<GridCacheInternalKey, AtomicDataStructureValue> cache0 = ctx.cache().cache(dsCacheName);
final Iterator<Cache.Entry<GridCacheInternalKey, AtomicDataStructureValue>> iter = cache0.scanIterator(false, null);
while (iter.hasNext()) {
Cache.Entry<GridCacheInternalKey, AtomicDataStructureValue> entry = iter.next();
final AtomicDataStructureValue val = entry.getValue();
if (val.type() == DataStructureType.ATOMIC_SEQ) {
final String name = entry.getKey().name();
if (compiled.matcher(name).find()) {
CacheInfo ci = new CacheInfo();
ci.setSeqName(name);
ci.setSeqVal(((GridCacheAtomicSequenceValue) val).get());
cacheInfo.add(ci);
}
}
}
}
Aggregations