use of org.apache.ignite.igfs.IgfsException in project ignite by apache.
the class IgfsMetaManager method create.
/**
* Create a file.
*
* @param path Path.
* @param dirProps Directory properties.
* @param overwrite Overwrite flag.
* @param blockSize Block size.
* @param affKey Affinity key.
* @param evictExclude Evict exclude flag.
* @param fileProps File properties.
* @param secondaryCtx Secondary file system create context.
* @return @return Operation result.
* @throws IgniteCheckedException If failed.
*/
IgfsCreateResult create(final IgfsPath path, Map<String, String> dirProps, final boolean overwrite, final int blockSize, @Nullable final IgniteUuid affKey, final boolean evictExclude, @Nullable Map<String, String> fileProps, @Nullable IgfsSecondaryFileSystemCreateContext secondaryCtx) throws IgniteCheckedException {
validTxState(false);
while (true) {
if (busyLock.enterBusy()) {
OutputStream secondaryOut = null;
try {
// Prepare path IDs.
IgfsPathIds pathIds = pathIds(path);
// Prepare lock IDs.
Set<IgniteUuid> lockIds = new TreeSet<>(PATH_ID_SORTING_COMPARATOR);
pathIds.addExistingIds(lockIds, relaxed);
pathIds.addSurrogateIds(lockIds);
// In overwrite mode we also lock ID of potential replacement as well as trash ID.
IgniteUuid overwriteId = IgniteUuid.randomUuid();
IgniteUuid trashId = IgfsUtils.randomTrashId();
if (overwrite) {
lockIds.add(overwriteId);
// Trash ID is only added if we suspect conflict.
if (pathIds.allExists())
lockIds.add(trashId);
}
// Start TX.
try (GridNearTxLocal tx = startTx()) {
Map<IgniteUuid, IgfsEntryInfo> lockInfos = lockIds(lockIds);
if (secondaryCtx != null && isRetryForSecondary(pathIds, lockInfos))
continue;
if (!pathIds.verifyIntegrity(lockInfos, relaxed))
// Directory structure changed concurrently. So we simply re-try.
continue;
if (pathIds.allExists()) {
// All participants found.
IgfsEntryInfo oldInfo = lockInfos.get(pathIds.lastId());
// Check: is it a file?
if (!oldInfo.isFile())
throw new IgfsPathIsDirectoryException("Failed to create a file: " + path);
// Check: can we overwrite it?
if (!overwrite)
throw new IgfsPathAlreadyExistsException("Failed to create a file: " + path);
// Check if file already opened for write.
if (oldInfo.lockId() != null)
throw new IgfsException("File is already opened for write: " + path);
// At this point file can be re-created safely.
// Add existing to trash listing.
IgniteUuid oldId = pathIds.lastId();
id2InfoPrj.invoke(trashId, new IgfsMetaDirectoryListingAddProcessor(IgfsUtils.composeNameForTrash(path, oldId), new IgfsListingEntry(oldInfo)));
// Replace ID in parent directory.
String name = pathIds.lastPart();
IgniteUuid parentId = pathIds.lastParentId();
id2InfoPrj.invoke(parentId, new IgfsMetaDirectoryListingReplaceProcessor(name, overwriteId));
// Create the file.
IgniteUuid newLockId = createFileLockId(false);
long newAccessTime;
long newModificationTime;
Map<String, String> newProps;
long newLen;
int newBlockSize;
if (secondaryCtx != null) {
secondaryOut = secondaryCtx.create();
newAccessTime = 0L;
newModificationTime = 0L;
newProps = null;
} else {
newAccessTime = System.currentTimeMillis();
newModificationTime = newAccessTime;
newProps = fileProps;
}
newLen = 0L;
newBlockSize = blockSize;
IgfsEntryInfo newInfo = invokeAndGet(overwriteId, new IgfsMetaFileCreateProcessor(newAccessTime, newModificationTime, newProps, newBlockSize, affKey, newLockId, evictExclude, newLen));
// Prepare result and commit.
tx.commit();
IgfsUtils.sendEvents(igfsCtx.kernalContext(), path, EventType.EVT_IGFS_FILE_OPENED_WRITE);
return new IgfsCreateResult(newInfo, secondaryOut);
} else {
// Create file and parent folders.
T1<OutputStream> secondaryOutHolder = null;
if (secondaryCtx != null)
secondaryOutHolder = new T1<>();
IgfsPathsCreateResult res;
try {
res = createFile(pathIds, lockInfos, dirProps, fileProps, blockSize, affKey, evictExclude, secondaryCtx, secondaryOutHolder);
} finally {
if (secondaryOutHolder != null)
secondaryOut = secondaryOutHolder.get();
}
if (res == null)
continue;
// Commit.
tx.commit();
// Generate events.
generateCreateEvents(res.createdPaths(), true);
return new IgfsCreateResult(res.info(), secondaryOut);
}
}
} catch (IgniteException | IgniteCheckedException e) {
U.closeQuiet(secondaryOut);
throw e;
} catch (Exception e) {
U.closeQuiet(secondaryOut);
throw new IgniteCheckedException("Create failed due to unexpected exception: " + path, e);
} finally {
busyLock.leaveBusy();
}
} else
throw new IllegalStateException("Failed to mkdir because Grid is stopping. [path=" + path + ']');
}
}
use of org.apache.ignite.igfs.IgfsException in project ignite by apache.
the class IgfsMetaManager method append.
/**
* Append routine.
*
* @param path Path.
* @param dirProps Directory properties.
* @param create Create flag.
* @param blockSize Block size.
* @param affKey Affinity key.
* @param evictExclude Evict exclude flag.
* @param fileProps File properties.
* @return Resulting info.
* @throws IgniteCheckedException If failed.
*/
IgfsEntryInfo append(final IgfsPath path, Map<String, String> dirProps, final boolean create, final int blockSize, @Nullable final IgniteUuid affKey, final boolean evictExclude, @Nullable Map<String, String> fileProps) throws IgniteCheckedException {
validTxState(false);
while (true) {
if (busyLock.enterBusy()) {
try {
// Prepare path IDs.
IgfsPathIds pathIds = pathIds(path);
// Fail-fast: create flag is not specified and some paths are missing.
if (!pathIds.allExists() && !create)
throw new IgfsPathNotFoundException("Failed to append because file is not found: " + path);
// Prepare lock IDs.
Set<IgniteUuid> lockIds = new TreeSet<>(PATH_ID_SORTING_COMPARATOR);
pathIds.addExistingIds(lockIds, relaxed);
pathIds.addSurrogateIds(lockIds);
// Start TX.
try (GridNearTxLocal tx = startTx()) {
Map<IgniteUuid, IgfsEntryInfo> lockInfos = lockIds(lockIds);
if (!pathIds.verifyIntegrity(lockInfos, relaxed))
// Directory structure changed concurrently. So we simply re-try.
continue;
if (pathIds.allExists()) {
// All participants are found. Simply open the stream.
IgfsEntryInfo info = lockInfos.get(pathIds.lastId());
// Check: is it a file?
if (!info.isFile())
throw new IgfsPathIsDirectoryException("Failed to open file for write." + path);
// Check if file already opened for write.
if (info.lockId() != null)
throw new IgfsException("File is already opened for write: " + path);
// At this point we can open the stream safely.
info = invokeLock(info.id(), false);
tx.commit();
IgfsUtils.sendEvents(igfsCtx.kernalContext(), path, EventType.EVT_IGFS_FILE_OPENED_WRITE);
return info;
} else {
// Create file and parent folders.
IgfsPathsCreateResult res = createFile(pathIds, lockInfos, dirProps, fileProps, blockSize, affKey, evictExclude, null, null);
if (res == null)
continue;
// Commit.
tx.commit();
// Generate events.
generateCreateEvents(res.createdPaths(), true);
return res.info();
}
}
} finally {
busyLock.leaveBusy();
}
} else
throw new IllegalStateException("Failed to append for file because Grid is stopping:" + path);
}
}
use of org.apache.ignite.igfs.IgfsException in project ignite by apache.
the class IgfsAbstractSelfTest method testMkdirs.
/**
* Test mkdirs in case both local and remote file systems have the same folder structure.
*
* @throws Exception If failed.
*/
@SuppressWarnings("ConstantConditions")
public void testMkdirs() throws Exception {
if (!propertiesSupported())
return;
// mkdirs command doesn't propagate user info.
Map<String, String> props = properties(null, null, "0555");
igfs.mkdirs(new IgfsPath("/x"), null);
checkExist(igfs, igfsSecondary, new IgfsPath("/x"));
igfs.mkdirs(new IgfsPath("/k/l"), null);
checkExist(igfs, igfsSecondary, new IgfsPath("/k/l"));
igfs.mkdirs(new IgfsPath("/x/y"), null);
checkExist(igfs, igfsSecondary, new IgfsPath("/x/y"));
igfs.mkdirs(new IgfsPath("/a/b/c/d"), null);
checkExist(igfs, igfsSecondary, new IgfsPath("/a/b/c/d"));
igfs.mkdirs(new IgfsPath("/a/b/c/d/e"), null);
checkExist(igfs, igfsSecondary, new IgfsPath("/a/b/c/d/e"));
// "f" is a file.
create(igfs, null, new IgfsPath[] { new IgfsPath("/d/f") });
checkExist(igfs, igfsSecondary, new IgfsPath("/d/f"));
assertTrue(igfs.info(new IgfsPath("/d/f")).isFile());
try {
igfs.mkdirs(new IgfsPath("/d/f"), null);
fail("IgfsParentNotDirectoryException expected.");
} catch (IgfsParentNotDirectoryException ignore) {
// No-op.
} catch (IgfsException e) {
// Currently Ok for Hadoop fs:
if (!getClass().getSimpleName().startsWith("Hadoop"))
throw e;
}
try {
igfs.mkdirs(new IgfsPath("/d/f/something/else"), null);
fail("IgfsParentNotDirectoryException expected.");
} catch (IgfsParentNotDirectoryException ignore) {
// No-op.
} catch (IgfsException e) {
// Currently Ok for Hadoop fs:
if (!getClass().getSimpleName().startsWith("Hadoop"))
throw e;
}
create(igfs, paths(DIR, SUBDIR), null);
igfs.mkdirs(SUBSUBDIR, props);
// Ensure that directory was created and properties are propagated.
checkExist(igfs, igfsSecondary, SUBSUBDIR);
if (permissionsSupported()) {
if (dual)
// Check only permissions because user and group will always be present in Hadoop Fs.
assertEquals(props.get(IgfsUtils.PROP_PERMISSION), igfsSecondary.permissions(SUBSUBDIR.toString()));
// We check only permission because IGFS client adds username and group name explicitly.
assertEquals(props.get(IgfsUtils.PROP_PERMISSION), igfs.info(SUBSUBDIR).properties().get(IgfsUtils.PROP_PERMISSION));
}
}
use of org.apache.ignite.igfs.IgfsException in project ignite by apache.
the class IgfsMetaManagerSelfTest method expectsRenameFail.
/**
* Test expected failures for 'move file' operation.
*/
private void expectsRenameFail(final String src, final String dst) {
Throwable err = assertThrowsInherited(log, new Callable() {
@Override
public Object call() throws Exception {
mgr.move(new IgfsPath(src), new IgfsPath(dst));
return null;
}
}, IgfsException.class, null);
assertTrue("Unexpected cause: " + err, err instanceof IgfsException);
}
use of org.apache.ignite.igfs.IgfsException in project ignite by apache.
the class LocalIgfsSecondaryFileSystem method usedSpaceSize.
/**
* {@inheritDoc}
*/
@Override
public long usedSpaceSize() {
Path p = fileForPath(IgfsPath.ROOT).toPath();
try {
LocalFileSystemSizeVisitor visitor = new LocalFileSystemSizeVisitor();
Files.walkFileTree(p, visitor);
return visitor.size();
} catch (IOException e) {
throw new IgfsException("Failed to calculate used space size.", e);
}
}
Aggregations