use of org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal in project ignite by apache.
the class GridCacheAdapter method asyncOp.
/**
* @param tx Transaction.
* @param op Cache operation.
* @param opCtx Cache operation context.
* @param <T> Return type.
* @return Future.
*/
@SuppressWarnings("unchecked")
protected <T> IgniteInternalFuture<T> asyncOp(GridNearTxLocal tx, final AsyncOp<T> op, final CacheOperationContext opCtx, final boolean retry) {
IgniteInternalFuture<T> fail = asyncOpAcquire(retry);
if (fail != null)
return fail;
FutureHolder holder = lastFut.get();
holder.lock();
try {
IgniteInternalFuture fut = holder.future();
final GridNearTxLocal tx0 = tx;
if (fut != null && !fut.isDone()) {
IgniteInternalFuture<T> f = new GridEmbeddedFuture(fut, new IgniteOutClosure<IgniteInternalFuture>() {
@Override
public IgniteInternalFuture<T> apply() {
if (ctx.kernalContext().isStopping())
return new GridFinishedFuture<>(new IgniteCheckedException("Operation has been cancelled (node is stopping)."));
try {
return op.op(tx0, opCtx).chain(new CX1<IgniteInternalFuture<T>, T>() {
@Override
public T applyx(IgniteInternalFuture<T> tFut) throws IgniteCheckedException {
try {
return tFut.get();
} catch (IgniteTxRollbackCheckedException | NodeStoppingException e) {
throw e;
} catch (IgniteCheckedException e1) {
try {
tx0.rollbackNearTxLocalAsync();
} catch (Throwable e2) {
if (e1 != e2)
e1.addSuppressed(e2);
}
throw e1;
} finally {
ctx.shared().txContextReset();
}
}
});
} finally {
// It is necessary to clear tx context in this thread as well.
ctx.shared().txContextReset();
}
}
});
saveFuture(holder, f, retry);
return f;
}
IgniteInternalFuture<T> f;
try {
f = op.op(tx, opCtx).chain(new CX1<IgniteInternalFuture<T>, T>() {
@Override
public T applyx(IgniteInternalFuture<T> tFut) throws IgniteCheckedException {
try {
return tFut.get();
} catch (IgniteTxRollbackCheckedException | NodeStoppingException e) {
throw e;
} catch (IgniteCheckedException e1) {
try {
tx0.rollbackNearTxLocalAsync();
} catch (Throwable e2) {
if (e2 != e1)
e1.addSuppressed(e2);
}
throw e1;
} finally {
ctx.shared().txContextReset();
}
}
});
} finally {
// It is necessary to clear tx context in this thread as well.
ctx.shared().txContextReset();
}
saveFuture(holder, f, retry);
if (tx.implicit())
ctx.tm().resetContext();
return f;
} finally {
holder.unlock();
}
}
use of org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal in project ignite by apache.
the class IgfsDataManager method spreadBlocks.
/**
* Moves all collocated blocks in range to non-colocated keys.
* @param fileInfo File info to move data for.
* @param range Range to move.
*/
public void spreadBlocks(IgfsEntryInfo fileInfo, IgfsFileAffinityRange range) {
long startIdx = range.startOffset() / fileInfo.blockSize();
long endIdx = range.endOffset() / fileInfo.blockSize();
try {
try (IgniteDataStreamer<IgfsBlockKey, byte[]> ldr = dataStreamer()) {
long bytesProcessed = 0;
for (long idx = startIdx; idx <= endIdx; idx++) {
IgfsBlockKey colocatedKey = new IgfsBlockKey(fileInfo.id(), range.affinityKey(), fileInfo.evictExclude(), idx);
IgfsBlockKey key = new IgfsBlockKey(fileInfo.id(), null, fileInfo.evictExclude(), idx);
// Most of the time should be local get.
byte[] block = dataCachePrj.get(colocatedKey);
if (block != null) {
// If so, must update it in pessimistic transaction.
if (block.length != fileInfo.blockSize()) {
try (GridNearTxLocal tx = dataCachePrj.txStartEx(PESSIMISTIC, REPEATABLE_READ)) {
Map<IgfsBlockKey, byte[]> vals = dataCachePrj.getAll(F.asList(colocatedKey, key));
byte[] val = vals.get(colocatedKey);
if (val != null) {
putBlock(fileInfo.blockSize(), key, val);
tx.commit();
} else {
// File is being concurrently deleted.
if (log.isDebugEnabled())
log.debug("Failed to find colocated file block for spread (will ignore) " + "[fileInfo=" + fileInfo + ", range=" + range + ", startIdx=" + startIdx + ", endIdx=" + endIdx + ", idx=" + idx + ']');
}
}
} else
ldr.addData(key, block);
bytesProcessed += block.length;
if (bytesProcessed >= igfsCtx.configuration().getFragmentizerThrottlingBlockLength()) {
ldr.flush();
bytesProcessed = 0;
U.sleep(igfsCtx.configuration().getFragmentizerThrottlingDelay());
}
} else if (log.isDebugEnabled())
log.debug("Failed to find colocated file block for spread (will ignore) " + "[fileInfo=" + fileInfo + ", range=" + range + ", startIdx=" + startIdx + ", endIdx=" + endIdx + ", idx=" + idx + ']');
}
}
} catch (IgniteCheckedException e) {
log.error("Failed to clean up file range [fileInfo=" + fileInfo + ", range=" + range + ']', e);
}
}
use of org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal in project ignite by apache.
the class IgfsDataManager method processPartialBlockWrite.
/**
* If partial block write is attempted, both colocated and non-colocated keys are locked and data is appended
* to correct block.
*
* @param fileId File ID.
* @param colocatedKey Block key.
* @param startOff Data start offset within block.
* @param data Data to write.
* @param blockSize The block size.
* @throws IgniteCheckedException If update failed.
*/
private void processPartialBlockWrite(IgniteUuid fileId, IgfsBlockKey colocatedKey, int startOff, byte[] data, int blockSize) throws IgniteCheckedException {
// No affinity key present, just concat and return.
if (colocatedKey.affinityKey() == null) {
dataCachePrj.invoke(colocatedKey, new UpdateProcessor(startOff, data));
return;
}
// If writing from block beginning, just put and return.
if (startOff == 0) {
putBlock(blockSize, colocatedKey, data);
return;
}
// Create non-colocated key.
IgfsBlockKey key = new IgfsBlockKey(colocatedKey.getFileId(), null, colocatedKey.evictExclude(), colocatedKey.blockId());
try (GridNearTxLocal tx = dataCachePrj.txStartEx(PESSIMISTIC, REPEATABLE_READ)) {
// Lock keys.
Map<IgfsBlockKey, byte[]> vals = dataCachePrj.getAll(F.asList(colocatedKey, key));
boolean hasVal = false;
UpdateProcessor transformClos = new UpdateProcessor(startOff, data);
if (vals.get(colocatedKey) != null) {
dataCachePrj.invoke(colocatedKey, transformClos);
hasVal = true;
}
if (vals.get(key) != null) {
dataCachePrj.invoke(key, transformClos);
hasVal = true;
}
if (!hasVal)
throw new IgniteCheckedException("Failed to write partial block (no previous data was found in cache) " + "[key=" + colocatedKey + ", relaxedKey=" + key + ", startOff=" + startOff + ", dataLen=" + data.length + ']');
tx.commit();
}
}
use of org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal in project ignite by apache.
the class IgfsMetaManager method synchronizeAndExecute.
/**
* Synchronize file system structure and then execute provided task. All these actions are performed withing
* the transaction.
*
* @param task Task to execute.
* @param fs File system.
* @param strict Whether paths must be re-created strictly.
* @param extraLockIds Additional IDs to lock (optional).
* @param paths Paths to synchronize.
* @return Result of task execution.
* @throws IgniteCheckedException If failed.
*/
@SuppressWarnings({ "Contract", "ConstantConditions" })
private <T> T synchronizeAndExecute(SynchronizationTask<T> task, IgfsSecondaryFileSystem fs, boolean strict, @Nullable Collection<IgniteUuid> extraLockIds, IgfsPath... paths) throws IgniteCheckedException {
assert task != null;
assert fs != null;
assert paths != null && paths.length > 0;
// Sort paths so that we know in which order to synchronize them.
if (paths.length > 1)
Arrays.sort(paths);
boolean finished = false;
T res = null;
while (!finished) {
// Obtain existing IDs outside the transaction.
List<List<IgniteUuid>> pathIds = new ArrayList<>(paths.length);
for (IgfsPath path : paths) pathIds.add(idsForPath(path));
// Start pessimistic.
try (GridNearTxLocal tx = startTx()) {
// Lock the very first existing parents and possibly the leaf as well.
Map<IgfsPath, IgfsPath> pathToParent = new HashMap<>();
Map<IgfsPath, IgniteUuid> pathToId = new HashMap<>();
for (int i = 0; i < paths.length; i++) {
IgfsPath path = paths[i];
// Determine the very first existing parent
List<IgniteUuid> ids = pathIds.get(i);
if (ids.size() > 1) {
// The path is not root.
IgfsPath parentPath = path.parent();
IgniteUuid parentId = ids.get(ids.size() - 2);
for (int j = ids.size() - 3; j >= 0; j--) {
if (parentId != null)
break;
else {
parentPath = parentPath.parent();
parentId = ids.get(j);
}
}
assert parentPath != null && parentId != null;
pathToParent.put(path, parentPath);
pathToId.put(parentPath, parentId);
}
IgniteUuid pathId = ids.get(ids.size() - 1);
if (pathId != null)
pathToId.put(path, pathId);
}
IgniteUuid[] lockArr = new IgniteUuid[extraLockIds == null ? pathToId.size() : pathToId.size() + extraLockIds.size()];
int idx = 0;
for (IgniteUuid id : pathToId.values()) lockArr[idx++] = id;
if (extraLockIds != null) {
for (IgniteUuid id : extraLockIds) lockArr[idx++] = id;
}
Map<IgniteUuid, IgfsEntryInfo> idToInfo = lockIds(lockArr);
if (extraLockIds != null) {
for (IgniteUuid id : extraLockIds) idToInfo.remove(id);
}
// Ensure that locked IDs still point to expected paths.
IgfsPath changed = null;
for (Map.Entry<IgfsPath, IgniteUuid> entry : pathToId.entrySet()) {
if (!idToInfo.containsKey(entry.getValue()) || !F.eq(entry.getValue(), fileId(entry.getKey(), true))) {
changed = entry.getKey();
break;
}
}
if (changed != null) {
finished = true;
throw fsException(new IgfsConcurrentModificationException("File system entry has been " + "modified concurrently: " + changed));
} else {
boolean newParents = false;
// Check whether any new parents appeared before we have obtained the locks.
for (int i = 0; i < paths.length; i++) {
List<IgniteUuid> newIds = fileIds(paths[i], true);
if (!pathIds.get(i).equals(newIds)) {
newParents = true;
break;
}
}
if (newParents)
// Release all locks and try again.
continue;
else {
// Perform synchronization.
Map<IgfsPath, IgfsEntryInfo> infos = new HashMap<>();
TreeMap<IgfsPath, IgfsEntryInfo> created = new TreeMap<>();
for (IgfsPath path : paths) {
IgfsPath parentPath = path.parent();
if (pathToId.containsKey(path)) {
infos.put(path, info(pathToId.get(path)));
if (parentPath != null)
infos.put(parentPath, info(pathToId.get(parentPath)));
} else {
IgfsPath firstParentPath = pathToParent.get(path);
assert firstParentPath != null;
assert pathToId.get(firstParentPath) != null;
IgfsEntryInfo info = synchronize(fs, firstParentPath, idToInfo.get(pathToId.get(firstParentPath)), path, strict, created);
assert strict && info != null || !strict;
if (info != null)
infos.put(path, info);
if (parentPath != null) {
if (parentPath.equals(firstParentPath))
infos.put(firstParentPath, idToInfo.get(pathToId.get(firstParentPath)));
else {
assert strict && created.get(parentPath) != null || !strict;
if (created.get(parentPath) != null)
infos.put(parentPath, created.get(parentPath));
else {
// Put the last created path.
infos.put(created.lastKey(), created.get(created.lastKey()));
}
}
}
}
}
// Finally, execute the task.
finished = true;
try {
res = task.onSuccess(infos);
} catch (Exception e) {
res = task.onFailure(e);
}
}
}
tx.commit();
} catch (IgniteCheckedException e) {
if (!finished) {
finished = true;
res = task.onFailure(e);
} else
throw e;
}
}
return res;
}
use of org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal in project ignite by apache.
the class DataStructuresProcessor method getAtomic.
/**
* @param c Closure creating data structure instance.
* @param cfg Optional custom configuration or {@code null} to use default one.
* @param name Data structure name.
* @param type Data structure type.
* @param create Create flag.
* @param cls Expected data structure class.
* @return Data structure instance.
* @throws IgniteCheckedException If failed.
*/
@Nullable
private <T extends GridCacheRemovable> T getAtomic(final AtomicAccessor<T> c, @Nullable AtomicConfiguration cfg, final String name, final DataStructureType type, final boolean create, Class<? extends T> cls) throws IgniteCheckedException {
A.notNull(name, "name");
awaitInitialization();
if (cfg == null) {
checkAtomicsConfiguration();
cfg = dfltAtomicCfg;
}
final String grpName;
if (type.isVolatile())
grpName = DEFAULT_VOLATILE_DS_GROUP_NAME;
else if (cfg.getGroupName() != null)
grpName = cfg.getGroupName();
else
grpName = DEFAULT_DS_GROUP_NAME;
String cacheName = ATOMICS_CACHE_NAME + "@" + grpName;
IgniteInternalCache<GridCacheInternalKey, AtomicDataStructureValue> cache0 = ctx.cache().cache(cacheName);
if (cache0 == null) {
if (!create && ctx.cache().cacheDescriptor(cacheName) == null)
return null;
ctx.cache().dynamicStartCache(cacheConfiguration(cfg, cacheName, grpName), cacheName, null, CacheType.DATA_STRUCTURES, false, false, true, true).get();
cache0 = ctx.cache().cache(cacheName);
assert cache0 != null;
}
final IgniteInternalCache<GridCacheInternalKey, AtomicDataStructureValue> cache = cache0;
startQuery(cache.context());
final GridCacheInternalKey key = new GridCacheInternalKeyImpl(name, grpName);
// Check type of structure received by key from local cache.
T dataStructure = cast(dsMap.get(key), cls);
if (dataStructure != null)
return dataStructure;
return retryTopologySafe(new IgniteOutClosureX<T>() {
@Override
public T applyx() throws IgniteCheckedException {
cache.context().gate().enter();
try (GridNearTxLocal tx = cache.txStartEx(PESSIMISTIC, REPEATABLE_READ)) {
AtomicDataStructureValue val = cache.get(key);
if (isObsolete(val))
val = null;
if (val == null && !create)
return null;
if (val != null) {
if (val.type() != type)
throw new IgniteCheckedException("Another data structure with the same name already created " + "[name=" + name + ", newType=" + type + ", existingType=" + val.type() + ']');
}
T2<T, ? extends AtomicDataStructureValue> ret;
try {
ret = c.get(key, val, cache);
dsMap.put(key, ret.get1());
if (ret.get2() != null)
cache.put(key, ret.get2());
tx.commit();
} catch (Error | Exception e) {
dsMap.remove(key);
U.error(log, "Failed to make datastructure: " + name, e);
throw e;
}
return ret.get1();
} finally {
cache.context().gate().leave();
}
}
});
}
Aggregations