use of org.apache.ignite.lang.IgniteUuid in project ignite by apache.
the class GridDhtCacheAdapter method endMultiUpdate.
/**
* Ends multi-update lock.
*
* @throws IgniteCheckedException If failed.
*/
public void endMultiUpdate() throws IgniteCheckedException {
IgniteBiTuple<IgniteUuid, GridDhtTopologyFuture> tup = multiTxHolder.get();
if (tup == null)
throw new IgniteCheckedException("Multi-update was not started or released twice.");
ctx.group().topology().readLock();
try {
IgniteUuid lockId = tup.get1();
MultiUpdateFuture multiFut = multiTxFuts.remove(lockId);
multiTxHolder.set(null);
// Finish future.
multiFut.onDone(lockId);
} finally {
ctx.group().topology().readUnlock();
}
}
use of org.apache.ignite.lang.IgniteUuid in project ignite by apache.
the class IgfsMetaManager method fileIds.
/**
* Gets all file IDs for components of specified path possibly skipping existing transaction. Result cannot
* be empty - there is at least root element. But each element (except the first) can be {@code null} if such
* files don't exist.
*
* @param path Path.
* @param skipTx Whether to skip existing transaction.
* @return Collection of file IDs for components of specified path.
* @throws IgniteCheckedException If failed.
*/
private List<IgniteUuid> fileIds(IgfsPath path, boolean skipTx) throws IgniteCheckedException {
assert path != null;
// Path components.
Collection<String> components = path.components();
// Collection of file IDs for components of specified path.
List<IgniteUuid> ids = new ArrayList<>(components.size() + 1);
// Always add root ID.
ids.add(IgfsUtils.ROOT_ID);
IgniteUuid fileId = IgfsUtils.ROOT_ID;
for (String s : components) {
assert !s.isEmpty();
if (fileId != null)
fileId = fileId(fileId, s, skipTx);
ids.add(fileId);
}
return ids;
}
use of org.apache.ignite.lang.IgniteUuid in project ignite by apache.
the class IgfsMetaManager method synchronizeAndExecute.
/**
* Synchronize file system structure and then execute provided task. All these actions are performed withing
* the transaction.
*
* @param task Task to execute.
* @param fs File system.
* @param strict Whether paths must be re-created strictly.
* @param extraLockIds Additional IDs to lock (optional).
* @param paths Paths to synchronize.
* @return Result of task execution.
* @throws IgniteCheckedException If failed.
*/
@SuppressWarnings({ "Contract", "ConstantConditions" })
private <T> T synchronizeAndExecute(SynchronizationTask<T> task, IgfsSecondaryFileSystem fs, boolean strict, @Nullable Collection<IgniteUuid> extraLockIds, IgfsPath... paths) throws IgniteCheckedException {
assert task != null;
assert fs != null;
assert paths != null && paths.length > 0;
// Sort paths so that we know in which order to synchronize them.
if (paths.length > 1)
Arrays.sort(paths);
boolean finished = false;
T res = null;
while (!finished) {
// Obtain existing IDs outside the transaction.
List<List<IgniteUuid>> pathIds = new ArrayList<>(paths.length);
for (IgfsPath path : paths) pathIds.add(idsForPath(path));
// Start pessimistic.
try (GridNearTxLocal tx = startTx()) {
// Lock the very first existing parents and possibly the leaf as well.
Map<IgfsPath, IgfsPath> pathToParent = new HashMap<>();
Map<IgfsPath, IgniteUuid> pathToId = new HashMap<>();
for (int i = 0; i < paths.length; i++) {
IgfsPath path = paths[i];
// Determine the very first existing parent
List<IgniteUuid> ids = pathIds.get(i);
if (ids.size() > 1) {
// The path is not root.
IgfsPath parentPath = path.parent();
IgniteUuid parentId = ids.get(ids.size() - 2);
for (int j = ids.size() - 3; j >= 0; j--) {
if (parentId != null)
break;
else {
parentPath = parentPath.parent();
parentId = ids.get(j);
}
}
assert parentPath != null && parentId != null;
pathToParent.put(path, parentPath);
pathToId.put(parentPath, parentId);
}
IgniteUuid pathId = ids.get(ids.size() - 1);
if (pathId != null)
pathToId.put(path, pathId);
}
IgniteUuid[] lockArr = new IgniteUuid[extraLockIds == null ? pathToId.size() : pathToId.size() + extraLockIds.size()];
int idx = 0;
for (IgniteUuid id : pathToId.values()) lockArr[idx++] = id;
if (extraLockIds != null) {
for (IgniteUuid id : extraLockIds) lockArr[idx++] = id;
}
Map<IgniteUuid, IgfsEntryInfo> idToInfo = lockIds(lockArr);
if (extraLockIds != null) {
for (IgniteUuid id : extraLockIds) idToInfo.remove(id);
}
// Ensure that locked IDs still point to expected paths.
IgfsPath changed = null;
for (Map.Entry<IgfsPath, IgniteUuid> entry : pathToId.entrySet()) {
if (!idToInfo.containsKey(entry.getValue()) || !F.eq(entry.getValue(), fileId(entry.getKey(), true))) {
changed = entry.getKey();
break;
}
}
if (changed != null) {
finished = true;
throw fsException(new IgfsConcurrentModificationException("File system entry has been " + "modified concurrently: " + changed));
} else {
boolean newParents = false;
// Check whether any new parents appeared before we have obtained the locks.
for (int i = 0; i < paths.length; i++) {
List<IgniteUuid> newIds = fileIds(paths[i], true);
if (!pathIds.get(i).equals(newIds)) {
newParents = true;
break;
}
}
if (newParents)
// Release all locks and try again.
continue;
else {
// Perform synchronization.
Map<IgfsPath, IgfsEntryInfo> infos = new HashMap<>();
TreeMap<IgfsPath, IgfsEntryInfo> created = new TreeMap<>();
for (IgfsPath path : paths) {
IgfsPath parentPath = path.parent();
if (pathToId.containsKey(path)) {
infos.put(path, info(pathToId.get(path)));
if (parentPath != null)
infos.put(parentPath, info(pathToId.get(parentPath)));
} else {
IgfsPath firstParentPath = pathToParent.get(path);
assert firstParentPath != null;
assert pathToId.get(firstParentPath) != null;
IgfsEntryInfo info = synchronize(fs, firstParentPath, idToInfo.get(pathToId.get(firstParentPath)), path, strict, created);
assert strict && info != null || !strict;
if (info != null)
infos.put(path, info);
if (parentPath != null) {
if (parentPath.equals(firstParentPath))
infos.put(firstParentPath, idToInfo.get(pathToId.get(firstParentPath)));
else {
assert strict && created.get(parentPath) != null || !strict;
if (created.get(parentPath) != null)
infos.put(parentPath, created.get(parentPath));
else {
// Put the last created path.
infos.put(created.lastKey(), created.get(created.lastKey()));
}
}
}
}
}
// Finally, execute the task.
finished = true;
try {
res = task.onSuccess(infos);
} catch (Exception e) {
res = task.onFailure(e);
}
}
}
tx.commit();
} catch (IgniteCheckedException e) {
if (!finished) {
finished = true;
res = task.onFailure(e);
} else
throw e;
}
}
return res;
}
use of org.apache.ignite.lang.IgniteUuid in project ignite by apache.
the class IgfsMetaManager method lockIds.
/**
* Lock file IDs.
*
* @param fileIds File IDs (sorted).
* @return Map with lock info.
* @throws IgniteCheckedException If failed.
*/
private Map<IgniteUuid, IgfsEntryInfo> lockIds(Collection<IgniteUuid> fileIds) throws IgniteCheckedException {
assert isSorted(fileIds);
validTxState(true);
if (log.isDebugEnabled())
log.debug("Locking file ids: " + fileIds);
// Lock files and get their infos.
Map<IgniteUuid, IgfsEntryInfo> map = getInfos(fileIds);
if (log.isDebugEnabled())
log.debug("Locked file ids: " + fileIds);
for (IgniteUuid fileId : fileIds) {
if (IgfsUtils.isRootOrTrashId(fileId)) {
if (!map.containsKey(fileId))
map.put(fileId, createSystemDirectoryIfAbsent(fileId));
}
}
// Returns detail's map for locked IDs.
return map;
}
use of org.apache.ignite.lang.IgniteUuid in project ignite by apache.
the class IgfsMetaManager method transferEntry.
/**
* Transfer entry from one directory to another.
*
* @param entry Entry to be transferred.
* @param srcId Source ID.
* @param srcName Source name.
* @param destId Destination ID.
* @param destName Destination name.
* @throws IgniteCheckedException If failed.
*/
private void transferEntry(IgfsListingEntry entry, IgniteUuid srcId, String srcName, IgniteUuid destId, String destName) throws IgniteCheckedException {
validTxState(true);
if (F.eq(srcId, destId))
id2InfoPrj.invoke(srcId, new IgfsMetaDirectoryListingRenameProcessor(srcName, destName));
else {
Map<IgniteUuid, EntryProcessor<IgniteUuid, IgfsEntryInfo, Void>> procMap = new HashMap<>();
procMap.put(srcId, new IgfsMetaDirectoryListingRemoveProcessor(srcName, entry.fileId()));
procMap.put(destId, new IgfsMetaDirectoryListingAddProcessor(destName, entry));
id2InfoPrj.invokeAll(procMap);
}
}
Aggregations