use of org.apache.ignite.igfs.IgfsFile in project ignite by apache.
the class IgfsImpl method summaryRecursive.
/**
* Calculates size of directory or file for given ID.
*
* @param file IGFS File object.
* @param sum Summary object that will collect information.
* @throws IgniteCheckedException If failed.
*/
private void summaryRecursive(IgfsFile file, IgfsPathSummary sum) throws IgniteCheckedException {
assert file != null;
assert sum != null;
if (file.isDirectory()) {
if (!F.eq(IgfsPath.ROOT, file.path()))
sum.directoriesCount(sum.directoriesCount() + 1);
for (IgfsFile childFile : listFiles(file.path())) summaryRecursive(childFile, sum);
} else {
sum.filesCount(sum.filesCount() + 1);
sum.totalLength(sum.totalLength() + file.length());
}
}
use of org.apache.ignite.igfs.IgfsFile in project ignite by apache.
the class IgfsMetaManager method onSuccessCreate.
/**
* A delegate method that performs file creation in the synchronization task.
*
* @param fs File system.
* @param path Path.
* @param simpleCreate "Simple create" flag.
* @param props Properties..
* @param overwrite Overwrite flag.
* @param bufSize Buffer size.
* @param replication Replication factor.
* @param blockSize Block size.
* @param affKey Affinity key.
* @param infos Map from paths to corresponding infos.
* @param pendingEvts A non-null collection the events are to be accumulated in.
* @param t1 A signle-object tuple to hold the created output stream.
* @return Output stream descriptor.
* @throws Exception On error.
*/
IgfsCreateResult onSuccessCreate(IgfsSecondaryFileSystem fs, IgfsPath path, boolean simpleCreate, @Nullable final Map<String, String> props, boolean overwrite, int bufSize, short replication, long blockSize, IgniteUuid affKey, Map<IgfsPath, IgfsEntryInfo> infos, final Deque<IgfsEvent> pendingEvts, final T1<OutputStream> t1) throws Exception {
validTxState(true);
assert !infos.isEmpty();
// Determine the first existing parent.
IgfsPath parentPath = null;
for (IgfsPath curPath : infos.keySet()) {
if (parentPath == null || curPath.isSubDirectoryOf(parentPath))
parentPath = curPath;
}
assert parentPath != null;
IgfsEntryInfo parentInfo = infos.get(parentPath);
// Delegate to the secondary file system.
OutputStream out = simpleCreate ? fs.create(path, overwrite) : fs.create(path, bufSize, overwrite, replication, blockSize, props);
t1.set(out);
IgfsPath parent0 = path.parent();
assert parent0 != null : "path.parent() is null (are we creating ROOT?): " + path;
// If some of the parent directories were missing, synchronize again.
if (!parentPath.equals(parent0)) {
parentInfo = synchronize(fs, parentPath, parentInfo, parent0, true, null);
// Fire notification about missing directories creation.
if (evts.isRecordable(EventType.EVT_IGFS_DIR_CREATED)) {
IgfsPath evtPath = parent0;
while (!parentPath.equals(evtPath)) {
pendingEvts.addFirst(new IgfsEvent(evtPath, locNode, EventType.EVT_IGFS_DIR_CREATED));
evtPath = evtPath.parent();
// If this fails, then ROOT does not exist.
assert evtPath != null;
}
}
}
// Get created file info.
IgfsFile status = fs.info(path);
if (status == null)
throw fsException("Failed to open output stream to the file created in " + "the secondary file system because it no longer exists: " + path);
else if (status.isDirectory())
throw fsException("Failed to open output stream to the file created in " + "the secondary file system because the path points to a directory: " + path);
IgfsEntryInfo newInfo = IgfsUtils.createFile(IgniteUuid.randomUuid(), igfsCtx.configuration().getBlockSize(), status.length(), affKey, createFileLockId(false), igfsCtx.igfs().evictExclude(path, false), status.properties(), status.accessTime(), status.modificationTime());
// Add new file info to the listing optionally removing the previous one.
assert parentInfo != null;
IgniteUuid oldId = putIfAbsentNonTx(parentInfo.id(), path.name(), newInfo);
if (oldId != null) {
IgfsEntryInfo oldInfo = info(oldId);
// Otherwise cache is in inconsistent state.
assert oldInfo != null;
// The contact is that we cannot overwrite a file locked for writing:
if (oldInfo.lockId() != null)
throw fsException("Failed to overwrite file (file is opened for writing) [path=" + path + ", fileId=" + oldId + ", lockId=" + oldInfo.lockId() + ']');
// Remove the old one.
id2InfoPrj.remove(oldId);
id2InfoPrj.invoke(parentInfo.id(), new IgfsMetaDirectoryListingRemoveProcessor(path.name(), parentInfo.listing().get(path.name()).fileId()));
// Put new one.
createNewEntry(newInfo, parentInfo.id(), path.name());
igfsCtx.data().delete(oldInfo);
}
// Record CREATE event if needed.
if (oldId == null && evts.isRecordable(EventType.EVT_IGFS_FILE_CREATED))
pendingEvts.add(new IgfsEvent(path, locNode, EventType.EVT_IGFS_FILE_CREATED));
return new IgfsCreateResult(newInfo, out);
}
use of org.apache.ignite.igfs.IgfsFile in project ignite by apache.
the class IgniteHadoopFileSystem method listStatus.
/**
* {@inheritDoc}
*/
@Override
public FileStatus[] listStatus(Path f) throws IOException {
A.notNull(f, "f");
enterBusy();
try {
IgfsPath path = convert(f);
Collection<IgfsFile> list = rmtClient.listFiles(path);
if (list == null)
throw new FileNotFoundException("File " + f + " does not exist.");
List<IgfsFile> files = new ArrayList<>(list);
FileStatus[] arr = new FileStatus[files.size()];
for (int i = 0; i < arr.length; i++) arr[i] = convert(files.get(i));
if (clientLog.isLogEnabled()) {
String[] fileArr = new String[arr.length];
for (int i = 0; i < arr.length; i++) fileArr[i] = arr[i].getPath().toString();
clientLog.logListDirectory(path, fileArr);
}
return arr;
} finally {
leaveBusy();
}
}
use of org.apache.ignite.igfs.IgfsFile in project ignite by apache.
the class IgniteHadoopFileSystem method getFileStatus.
/**
* {@inheritDoc}
*/
@Override
public FileStatus getFileStatus(Path f) throws IOException {
A.notNull(f, "f");
enterBusy();
try {
IgfsFile info = rmtClient.info(convert(f));
if (info == null)
throw new FileNotFoundException("File not found: " + f);
return convert(info);
} finally {
leaveBusy();
}
}
use of org.apache.ignite.igfs.IgfsFile in project ignite by apache.
the class IgfsImpl method open.
/**
* {@inheritDoc}
*/
@Override
public IgfsInputStream open(final IgfsPath path, final int bufSize, final int seqReadsBeforePrefetch) {
A.notNull(path, "path");
A.ensure(bufSize >= 0, "bufSize >= 0");
A.ensure(seqReadsBeforePrefetch >= 0, "seqReadsBeforePrefetch >= 0");
return safeOp(new Callable<IgfsInputStream>() {
@Override
public IgfsInputStream call() throws Exception {
if (log.isDebugEnabled())
log.debug("Open file for reading [path=" + path + ", bufSize=" + bufSize + ']');
int bufSize0 = bufSize == 0 ? cfg.getBufferSize() : bufSize;
IgfsMode mode = resolveMode(path);
switch(mode) {
case PRIMARY:
{
IgfsEntryInfo info = meta.infoForPath(path);
if (info == null)
throw new IgfsPathNotFoundException("File not found: " + path);
if (!info.isFile())
throw new IgfsPathIsDirectoryException("Failed to open file (not a file): " + path);
// Input stream to read data from grid cache with separate blocks.
IgfsInputStreamImpl os = new IgfsInputStreamImpl(igfsCtx, path, info, cfg.getPrefetchBlocks(), seqReadsBeforePrefetch, null, info.length(), info.blockSize(), info.blocksCount(), false);
IgfsUtils.sendEvents(igfsCtx.kernalContext(), path, EVT_IGFS_FILE_OPENED_READ);
return os;
}
case DUAL_ASYNC:
case DUAL_SYNC:
{
assert IgfsUtils.isDualMode(mode);
IgfsSecondaryInputStreamDescriptor desc = meta.openDual(secondaryFs, path, bufSize0);
IgfsEntryInfo info = desc.info();
IgfsInputStreamImpl os = new IgfsInputStreamImpl(igfsCtx, path, info, cfg.getPrefetchBlocks(), seqReadsBeforePrefetch, desc.reader(), info.length(), info.blockSize(), info.blocksCount(), false);
IgfsUtils.sendEvents(igfsCtx.kernalContext(), path, EVT_IGFS_FILE_OPENED_READ);
return os;
}
case PROXY:
{
assert secondaryFs != null;
IgfsFile info = info(path);
if (info == null)
throw new IgfsPathNotFoundException("File not found: " + path);
if (!info.isFile())
throw new IgfsPathIsDirectoryException("Failed to open file (not a file): " + path);
IgfsSecondaryFileSystemPositionedReadable secReader = new IgfsLazySecondaryFileSystemPositionedReadable(secondaryFs, path, bufSize);
long len = info.length();
int blockSize = info.blockSize() > 0 ? info.blockSize() : cfg.getBlockSize();
long blockCnt = len / blockSize;
if (len % blockSize != 0)
blockCnt++;
IgfsInputStream os = new IgfsInputStreamImpl(igfsCtx, path, null, cfg.getPrefetchBlocks(), seqReadsBeforePrefetch, secReader, info.length(), blockSize, blockCnt, true);
IgfsUtils.sendEvents(igfsCtx.kernalContext(), path, EVT_IGFS_FILE_OPENED_READ);
return os;
}
default:
assert false : "Unexpected mode " + mode;
return null;
}
}
});
}
Aggregations