use of org.apache.ignite.igfs.IgfsException in project ignite by apache.
the class LocalFileSystemUtils method updateProperties.
/**
* Update file properties.
*
* @param file File.
* @param grp Group.
* @param perm Permissions.
*/
public static void updateProperties(File file, String grp, String perm) {
PosixFileAttributeView attrs = Files.getFileAttributeView(file.toPath(), PosixFileAttributeView.class);
if (attrs == null)
throw new UnsupportedOperationException("Posix file attributes not available");
if (grp != null) {
try {
UserPrincipalLookupService lookupService = FileSystems.getDefault().getUserPrincipalLookupService();
GroupPrincipal grp0 = lookupService.lookupPrincipalByGroupName(grp);
attrs.setGroup(grp0);
} catch (IOException e) {
throw new IgfsException("Update the '" + IgfsUtils.PROP_GROUP_NAME + "' property is failed.", e);
}
}
if (perm != null) {
int perm0 = Integer.parseInt(perm, 8);
Set<PosixFilePermission> permSet = new HashSet<>(9);
for (int i = 0; i < LocalFileSystemUtils.POSIX_PERMISSIONS.length; ++i) {
if ((perm0 & (1 << i)) != 0)
permSet.add(LocalFileSystemUtils.POSIX_PERMISSIONS[i]);
}
try {
attrs.setPermissions(permSet);
} catch (IOException e) {
throw new IgfsException("Update the '" + IgfsUtils.PROP_PERMISSION + "' property is failed.", e);
}
}
}
use of org.apache.ignite.igfs.IgfsException in project ignite by apache.
the class IgfsUtils method toIgfsException.
/**
* Converts any passed exception to IGFS exception.
*
* @param err Initial exception.
* @return Converted IGFS exception.
*/
@SuppressWarnings("ThrowableResultOfMethodCallIgnored")
public static IgfsException toIgfsException(Throwable err) {
IgfsException err0 = err instanceof IgfsException ? (IgfsException) err : null;
IgfsException igfsErr = X.cause(err, IgfsException.class);
while (igfsErr != null && igfsErr != err0) {
err0 = igfsErr;
igfsErr = X.cause(err, IgfsException.class);
}
// If initial exception is already IGFS exception and no inner stuff exists, just return it unchanged.
if (err0 != err) {
if (err0 != null)
// Dealing with a kind of IGFS error, wrap it once again, preserving message and root cause.
err0 = newIgfsException(err0.getClass(), err0.getMessage(), err0);
else {
if (err instanceof ClusterTopologyServerNotFoundException)
err0 = new IgfsException("Cache server nodes not found.", err);
else
// Unknown error nature.
err0 = new IgfsException("Generic IGFS error occurred.", err);
}
}
return err0;
}
use of org.apache.ignite.igfs.IgfsException in project ignite by apache.
the class LocalIgfsSecondaryFileSystem method rename.
/**
* {@inheritDoc}
*/
@Override
public void rename(IgfsPath src, IgfsPath dest) {
File srcFile = fileForPath(src);
File destFile = fileForPath(dest);
if (!srcFile.exists())
throw new IgfsPathNotFoundException("Failed to perform rename because source path not found: " + src);
if (srcFile.isDirectory() && destFile.isFile())
throw new IgfsPathIsNotDirectoryException("Failed to perform rename because destination path is " + "directory and source path is file [src=" + src + ", dest=" + dest + ']');
try {
if (destFile.isDirectory())
Files.move(srcFile.toPath(), destFile.toPath().resolve(srcFile.getName()));
else if (!srcFile.renameTo(destFile))
throw new IgfsException("Failed to perform rename (underlying file system returned false) " + "[src=" + src + ", dest=" + dest + ']');
} catch (IOException e) {
throw handleSecondaryFsError(e, "Failed to rename [src=" + src + ", dest=" + dest + ']');
}
}
use of org.apache.ignite.igfs.IgfsException in project ignite by apache.
the class IgfsDataManager method affinity0.
/**
* Calculates non-colocated affinity for given file info and given region of file.
*
* @param info File info.
* @param start Start offset.
* @param len Length.
* @param maxLen Maximum allowed split length.
* @param res Result collection to add regions to.
*/
private void affinity0(IgfsEntryInfo info, long start, long len, long maxLen, Deque<IgfsBlockLocation> res) {
long firstGrpIdx = start / grpBlockSize;
long limitGrpIdx = (start + len + grpBlockSize - 1) / grpBlockSize;
if (limitGrpIdx - firstGrpIdx > Integer.MAX_VALUE)
throw new IgfsException("Failed to get affinity (range is too wide)" + " [info=" + info + ", start=" + start + ", len=" + len + ']');
if (log.isDebugEnabled())
log.debug("Mapping file region [fileInfo=" + info + ", start=" + start + ", len=" + len + ']');
for (long grpIdx = firstGrpIdx; grpIdx < limitGrpIdx; grpIdx++) {
// Boundaries of the block.
long blockStart;
long blockLen;
// The first block.
if (grpIdx == firstGrpIdx) {
blockStart = start % grpBlockSize;
blockLen = Math.min(grpBlockSize - blockStart, len);
} else // The last block.
if (grpIdx == limitGrpIdx - 1) {
blockStart = 0;
blockLen = (start + len - 1) % grpBlockSize + 1;
} else // Other blocks.
{
blockStart = 0;
blockLen = grpBlockSize;
}
// Affinity for the first block in the group.
IgfsBlockKey key = new IgfsBlockKey(info.id(), info.affinityKey(), info.evictExclude(), grpIdx * grpSize);
Collection<ClusterNode> affNodes = dataCache.affinity().mapKeyToPrimaryAndBackups(key);
if (log.isDebugEnabled())
log.debug("Mapped key to nodes [key=" + key + ", nodes=" + F.nodeIds(affNodes) + ", blockStart=" + blockStart + ", blockLen=" + blockLen + ']');
IgfsBlockLocation last = res.peekLast();
// Merge with previous affinity block location?
if (last != null && equal(last.nodeIds(), F.viewReadOnly(affNodes, F.node2id()))) {
// Remove previous incomplete value.
res.removeLast();
// Update affinity block location with merged one.
splitBlocks(last.start(), last.length() + blockLen, maxLen, affNodes, res);
} else
splitBlocks(grpIdx * grpBlockSize + blockStart, blockLen, maxLen, affNodes, res);
}
if (log.isDebugEnabled())
log.debug("Calculated file affinity [info=" + info + ", start=" + start + ", len=" + len + ", res=" + res + ']');
}
use of org.apache.ignite.igfs.IgfsException in project ignite by apache.
the class IgfsMetaManager method softDelete.
/**
* Move path to the trash directory.
*
* @param path Path.
* @param recursive Recursive flag.
* @param secondaryFs Secondary file system (optional).
* @return ID of an entry located directly under the trash directory.
* @throws IgniteCheckedException If failed.
*/
IgfsDeleteResult softDelete(final IgfsPath path, final boolean recursive, @Nullable IgfsSecondaryFileSystem secondaryFs) throws IgniteCheckedException {
while (true) {
if (busyLock.enterBusy()) {
try {
validTxState(false);
IgfsPathIds pathIds = pathIds(path);
if (!pathIds.allExists() && secondaryFs == null)
return new IgfsDeleteResult(false, null);
IgniteUuid victimId = pathIds.lastId();
String victimName = pathIds.lastPart();
if (IgfsUtils.isRootId(victimId))
throw new IgfsException("Cannot remove root directory");
// Prepare IDs to lock.
SortedSet<IgniteUuid> allIds = new TreeSet<>(PATH_ID_SORTING_COMPARATOR);
pathIds.addExistingIds(allIds, relaxed);
IgniteUuid trashId = IgfsUtils.randomTrashId();
allIds.add(trashId);
try (GridNearTxLocal tx = startTx()) {
// Lock participants.
Map<IgniteUuid, IgfsEntryInfo> lockInfos = lockIds(allIds);
if (secondaryFs != null && isRetryForSecondary(pathIds, lockInfos))
continue;
// Ensure that all participants are still in place.
if (!pathIds.allExists() || !pathIds.verifyIntegrity(lockInfos, relaxed)) {
// For DUAL mode we will try to update the underlying FS still. Note we do that inside TX.
if (secondaryFs != null) {
boolean res = secondaryFs.delete(path, recursive);
return new IgfsDeleteResult(res, null);
} else
return new IgfsDeleteResult(false, null);
}
IgfsEntryInfo victimInfo = lockInfos.get(victimId);
// Cannot delete non-empty directory if recursive flag is not set.
if (!recursive && victimInfo.hasChildren())
throw new IgfsDirectoryNotEmptyException("Failed to remove directory (directory is not " + "empty and recursive flag is not set).");
// Prepare trash data.
IgfsEntryInfo trashInfo = lockInfos.get(trashId);
final String trashName = IgfsUtils.composeNameForTrash(path, victimId);
assert !trashInfo.hasChild(trashName) : "Failed to add file name into the " + "destination directory (file already exists) [destName=" + trashName + ']';
IgniteUuid parentId = pathIds.lastParentId();
IgfsEntryInfo parentInfo = lockInfos.get(parentId);
// Propagate call to the secondary file system.
if (secondaryFs != null && !secondaryFs.delete(path, recursive))
return new IgfsDeleteResult(false, null);
transferEntry(parentInfo.listing().get(victimName), parentId, victimName, trashId, trashName);
tx.commit();
signalDeleteWorker();
return new IgfsDeleteResult(true, victimInfo);
}
} finally {
busyLock.leaveBusy();
}
} else
throw new IllegalStateException("Failed to perform soft delete because Grid is " + "stopping [path=" + path + ']');
}
}
Aggregations