use of org.apache.ignite.internal.processors.cache.IgniteInternalCache in project ignite by apache.
the class DataStructuresProcessor method onKernalStart0.
/**
*
*/
private void onKernalStart0(boolean activeOnStart) {
if (!activeOnStart && ctx.state().active())
ctx.event().addLocalEventListener(lsnr, EVT_NODE_LEFT, EVT_NODE_FAILED);
utilityCache = (IgniteInternalCache) ctx.cache().utilityCache();
utilityDataCache = (IgniteInternalCache) ctx.cache().utilityCache();
assert utilityCache != null;
if (atomicCfg != null) {
IgniteInternalCache atomicsCache = ctx.cache().atomicsCache();
assert atomicsCache != null;
dsView = atomicsCache;
cntDownLatchView = atomicsCache;
semView = atomicsCache;
reentrantLockView = atomicsCache;
atomicLongView = atomicsCache;
atomicRefView = atomicsCache;
atomicStampedView = atomicsCache;
seqView = atomicsCache;
dsCacheCtx = atomicsCache.context();
}
initLatch.countDown();
}
use of org.apache.ignite.internal.processors.cache.IgniteInternalCache in project ignite by apache.
the class IgfsMetaManager method createFileOrDirectory.
/**
* Create file or directory.
*
* @param dir Directory flag.
* @param pathIds Path IDs.
* @param lockInfos Lock infos.
* @param dirProps Directory properties.
* @param fileProps File properties.
* @param blockSize Block size.
* @param affKey Affinity key.
* @param evictExclude Evict exclude flag.
* @param secondaryCtx Secondary file system create context.
* @param secondaryOutHolder Secondary output stream holder.
* @return Result.
* @throws IgniteCheckedException If failed.
*/
@SuppressWarnings("unchecked")
private IgfsPathsCreateResult createFileOrDirectory(boolean dir, IgfsPathIds pathIds, Map<IgniteUuid, IgfsEntryInfo> lockInfos, Map<String, String> dirProps, Map<String, String> fileProps, int blockSize, @Nullable IgniteUuid affKey, boolean evictExclude, @Nullable IgfsSecondaryFileSystemCreateContext secondaryCtx, @Nullable T1<OutputStream> secondaryOutHolder) throws IgniteCheckedException {
// This is our starting point.
int lastExistingIdx = pathIds.lastExistingIndex();
IgfsEntryInfo lastExistingInfo = lockInfos.get(pathIds.lastExistingId());
// If current info already contains entry with the same name as it's child, then something
// has changed concurrently. We must re-try because we cannot get info of this unexpected
// element due to possible deadlocks.
int curIdx = lastExistingIdx + 1;
String curPart = pathIds.part(curIdx);
IgniteUuid curId = pathIds.surrogateId(curIdx);
if (lastExistingInfo.hasChild(curPart))
return null;
// Create entry in the secondary file system if needed.
if (secondaryCtx != null) {
assert secondaryOutHolder != null;
secondaryOutHolder.set(secondaryCtx.create());
}
Map<IgniteUuid, EntryProcessor> procMap = new HashMap<>();
// First step: add new entry to the last existing element.
procMap.put(lastExistingInfo.id(), new IgfsMetaDirectoryListingAddProcessor(curPart, new IgfsListingEntry(curId, dir || !pathIds.isLastIndex(curIdx))));
// Events support.
IgfsPath lastCreatedPath = pathIds.lastExistingPath();
List<IgfsPath> createdPaths = new ArrayList<>(pathIds.count() - curIdx);
// Second step: create middle directories.
long curTime = System.currentTimeMillis();
while (curIdx < pathIds.count() - 1) {
lastCreatedPath = new IgfsPath(lastCreatedPath, curPart);
int nextIdx = curIdx + 1;
String nextPart = pathIds.part(nextIdx);
IgniteUuid nextId = pathIds.surrogateId(nextIdx);
long accessTime;
long modificationTime;
Map<String, String> props;
if (secondaryCtx != null) {
accessTime = 0L;
modificationTime = 0L;
props = null;
} else {
accessTime = curTime;
modificationTime = curTime;
props = dirProps;
}
procMap.put(curId, new IgfsMetaDirectoryCreateProcessor(accessTime, modificationTime, props, nextPart, new IgfsListingEntry(nextId, dir || !pathIds.isLastIndex(nextIdx))));
// Save event.
createdPaths.add(lastCreatedPath);
// Advance things further.
curIdx++;
curPart = nextPart;
curId = nextId;
}
// Third step: create leaf.
if (dir) {
long accessTime;
long modificationTime;
Map<String, String> props;
if (secondaryCtx != null) {
accessTime = 0L;
modificationTime = 0L;
props = null;
} else {
accessTime = curTime;
modificationTime = curTime;
props = dirProps;
}
procMap.put(curId, new IgfsMetaDirectoryCreateProcessor(accessTime, modificationTime, props));
} else {
long newAccessTime;
long newModificationTime;
Map<String, String> newProps;
long newLen;
int newBlockSize;
if (secondaryCtx != null) {
newAccessTime = 0L;
newModificationTime = 0L;
newProps = null;
} else {
newAccessTime = curTime;
newModificationTime = curTime;
newProps = fileProps;
}
newLen = 0L;
newBlockSize = blockSize;
procMap.put(curId, new IgfsMetaFileCreateProcessor(newAccessTime, newModificationTime, newProps, newBlockSize, affKey, createFileLockId(false), evictExclude, newLen));
}
createdPaths.add(pathIds.path());
// Execute cache operations.
Map<Object, EntryProcessorResult> invokeRes = ((IgniteInternalCache) id2InfoPrj).invokeAll(procMap);
IgfsEntryInfo info = (IgfsEntryInfo) invokeRes.get(curId).get();
return new IgfsPathsCreateResult(createdPaths, info);
}
use of org.apache.ignite.internal.processors.cache.IgniteInternalCache in project ignite by apache.
the class IgniteDrDataStreamerCacheUpdater method receive.
/**
* {@inheritDoc}
*/
@Override
public void receive(IgniteCache<KeyCacheObject, CacheObject> cache0, Collection<Map.Entry<KeyCacheObject, CacheObject>> col) {
try {
String cacheName = cache0.getConfiguration(CacheConfiguration.class).getName();
GridKernalContext ctx = ((IgniteKernal) cache0.unwrap(Ignite.class)).context();
IgniteLogger log = ctx.log(IgniteDrDataStreamerCacheUpdater.class);
GridCacheAdapter internalCache = ctx.cache().internalCache(cacheName);
CacheOperationContext opCtx = ((IgniteCacheProxy) cache0).context().operationContextPerCall();
IgniteInternalCache cache = opCtx != null ? new GridCacheProxyImpl(internalCache.context(), internalCache, opCtx) : internalCache;
assert !F.isEmpty(col);
if (log.isDebugEnabled())
log.debug("Running DR put job [nodeId=" + ctx.localNodeId() + ", cacheName=" + cacheName + ']');
CacheObjectContext cacheObjCtx = cache.context().cacheObjectContext();
for (Map.Entry<KeyCacheObject, CacheObject> entry0 : col) {
GridCacheRawVersionedEntry entry = (GridCacheRawVersionedEntry) entry0;
entry.unmarshal(cacheObjCtx, ctx.config().getMarshaller());
KeyCacheObject key = entry.getKey();
// Ensure that receiver to not receive special-purpose values for TTL and expire time.
assert entry.ttl() != CU.TTL_NOT_CHANGED && entry.ttl() != CU.TTL_ZERO && entry.ttl() >= 0;
assert entry.expireTime() != CU.EXPIRE_TIME_CALCULATE && entry.expireTime() >= 0;
CacheObject cacheVal = entry.getValue();
GridCacheDrInfo val = cacheVal != null ? entry.ttl() != CU.TTL_ETERNAL ? new GridCacheDrExpirationInfo(cacheVal, entry.version(), entry.ttl(), entry.expireTime()) : new GridCacheDrInfo(cacheVal, entry.version()) : null;
if (val == null)
cache.removeAllConflict(Collections.singletonMap(key, entry.version()));
else
cache.putAllConflict(Collections.singletonMap(key, val));
}
if (log.isDebugEnabled())
log.debug("DR put job finished [nodeId=" + ctx.localNodeId() + ", cacheName=" + cacheName + ']');
} catch (IgniteCheckedException e) {
throw U.convertException(e);
}
}
use of org.apache.ignite.internal.processors.cache.IgniteInternalCache in project ignite by apache.
the class CacheDataStructuresManager method set0.
/**
* @param name Name of set.
* @param collocated Collocation flag.
* @param create If {@code true} set will be created in case it is not in cache.
* @param separated Separated cache flag.
* @return Set.
* @throws IgniteCheckedException If failed.
*/
@SuppressWarnings("unchecked")
@Nullable
private <T> IgniteSet<T> set0(String name, boolean collocated, boolean create, boolean separated) throws IgniteCheckedException {
cctx.gate().enter();
try {
GridCacheSetHeaderKey key = new GridCacheSetHeaderKey(name);
GridCacheSetHeader hdr;
IgniteInternalCache cache = cctx.cache().withNoRetries();
if (create) {
hdr = new GridCacheSetHeader(IgniteUuid.randomUuid(), collocated, separated);
GridCacheSetHeader old = (GridCacheSetHeader) cache.getAndPutIfAbsent(key, hdr);
if (old != null)
hdr = old;
} else
hdr = (GridCacheSetHeader) cache.get(key);
if (hdr == null)
return null;
GridCacheSetProxy<T> set = setsMap.get(hdr.id());
if (set == null) {
GridCacheSetProxy<T> old = setsMap.putIfAbsent(hdr.id(), set = new GridCacheSetProxy<>(cctx, new GridCacheSetImpl<T>(cctx, name, hdr)));
if (old != null)
set = old;
}
return set;
} finally {
cctx.gate().leave();
}
}
use of org.apache.ignite.internal.processors.cache.IgniteInternalCache in project ignite by apache.
the class GridCacheQuerySqlMetadataJobV2 method call.
/**
* {@inheritDoc}
*/
@Override
public Collection<GridCacheQueryManager.CacheSqlMetadata> call() {
final GridKernalContext ctx = ((IgniteKernal) ignite).context();
Collection<String> cacheNames = F.viewReadOnly(ctx.cache().caches(), new C1<IgniteInternalCache<?, ?>, String>() {
@Override
public String apply(IgniteInternalCache<?, ?> c) {
return c.name();
}
}, new P1<IgniteInternalCache<?, ?>>() {
@Override
public boolean apply(IgniteInternalCache<?, ?> c) {
return !CU.isSystemCache(c.name()) && !DataStructuresProcessor.isDataStructureCache(c.name());
}
});
return F.transform(cacheNames, new C1<String, GridCacheQueryManager.CacheSqlMetadata>() {
@Override
public GridCacheQueryManager.CacheSqlMetadata apply(String cacheName) {
Collection<GridQueryTypeDescriptor> types = ctx.query().types(cacheName);
Collection<String> names = U.newHashSet(types.size());
Map<String, String> keyClasses = U.newHashMap(types.size());
Map<String, String> valClasses = U.newHashMap(types.size());
Map<String, Map<String, String>> fields = U.newHashMap(types.size());
Map<String, Collection<GridCacheSqlIndexMetadata>> indexes = U.newHashMap(types.size());
Map<String, Set<String>> notNullFields = U.newHashMap(types.size());
for (GridQueryTypeDescriptor type : types) {
// Filter internal types (e.g., data structures).
if (type.name().startsWith("GridCache"))
continue;
names.add(type.name());
keyClasses.put(type.name(), type.keyClass().getName());
valClasses.put(type.name(), type.valueClass().getName());
int size = type.fields().isEmpty() ? NO_FIELDS_COLUMNS_COUNT : type.fields().size();
Map<String, String> fieldsMap = U.newLinkedHashMap(size);
HashSet<String> notNullFieldsSet = U.newHashSet(1);
// _KEY and _VAL are not included in GridIndexingTypeDescriptor.valueFields
if (type.fields().isEmpty()) {
fieldsMap.put("_KEY", type.keyClass().getName());
fieldsMap.put("_VAL", type.valueClass().getName());
}
for (Map.Entry<String, Class<?>> e : type.fields().entrySet()) {
String fieldName = e.getKey();
fieldsMap.put(fieldName.toUpperCase(), e.getValue().getName());
if (type.property(fieldName).notNull())
notNullFieldsSet.add(fieldName.toUpperCase());
}
fields.put(type.name(), fieldsMap);
notNullFields.put(type.name(), notNullFieldsSet);
Map<String, GridQueryIndexDescriptor> idxs = type.indexes();
Collection<GridCacheSqlIndexMetadata> indexesCol = new ArrayList<>(idxs.size());
for (Map.Entry<String, GridQueryIndexDescriptor> e : idxs.entrySet()) {
GridQueryIndexDescriptor desc = e.getValue();
// Add only SQL indexes.
if (desc.type() == QueryIndexType.SORTED) {
Collection<String> idxFields = new LinkedList<>();
Collection<String> descendings = new LinkedList<>();
for (String idxField : e.getValue().fields()) {
String idxFieldUpper = idxField.toUpperCase();
idxFields.add(idxFieldUpper);
if (desc.descending(idxField))
descendings.add(idxFieldUpper);
}
indexesCol.add(new GridCacheQueryManager.CacheSqlIndexMetadata(e.getKey().toUpperCase(), idxFields, descendings, false));
}
}
indexes.put(type.name(), indexesCol);
}
return new GridCacheQuerySqlMetadataV2(cacheName, names, keyClasses, valClasses, fields, indexes, notNullFields);
}
});
}
Aggregations