use of org.apache.ignite.internal.processors.igfs.IgfsEntryInfo in project ignite by apache.
the class IgfsMetaFileUnlockProcessor method process.
/** {@inheritDoc} */
@Override
public Void process(MutableEntry<IgniteUuid, IgfsEntryInfo> entry, Object... args) throws EntryProcessorException {
IgfsEntryInfo oldInfo = entry.getValue();
assert oldInfo != null;
IgfsEntryInfo newInfo = oldInfo.unlock(modificationTime);
if (updateSpace) {
IgfsFileMap newMap = new IgfsFileMap(newInfo.fileMap());
newMap.addRange(affRange);
newInfo = newInfo.length(newInfo.length() + space).fileMap(newMap);
}
entry.setValue(newInfo);
return null;
}
use of org.apache.ignite.internal.processors.igfs.IgfsEntryInfo in project ignite by apache.
the class IgfsMetaDirectoryListingReplaceProcessor method process.
/** {@inheritDoc} */
@Override
public Void process(MutableEntry<IgniteUuid, IgfsEntryInfo> e, Object... args) throws EntryProcessorException {
IgfsEntryInfo fileInfo = e.getValue();
assert fileInfo.isDirectory();
Map<String, IgfsListingEntry> listing = new HashMap<>(fileInfo.listing());
// Modify listing in-place.
IgfsListingEntry oldEntry = listing.get(name);
if (oldEntry == null)
throw new IgniteException("Directory listing doesn't contain expected entry: " + name);
listing.put(name, new IgfsListingEntry(id, oldEntry.isDirectory()));
e.setValue(fileInfo.listing(listing));
return null;
}
use of org.apache.ignite.internal.processors.igfs.IgfsEntryInfo in project ignite by apache.
the class IgfsMetaFileLockProcessor method process.
/** {@inheritDoc} */
@Override
public IgfsEntryInfo process(MutableEntry<IgniteUuid, IgfsEntryInfo> entry, Object... args) throws EntryProcessorException {
IgfsEntryInfo oldInfo = entry.getValue();
IgfsEntryInfo newInfo = oldInfo.lock(lockId);
entry.setValue(newInfo);
return newInfo;
}
use of org.apache.ignite.internal.processors.igfs.IgfsEntryInfo in project ignite by apache.
the class HadoopIgfsSecondaryFileSystemDelegateImpl method listFiles.
/** {@inheritDoc} */
@Override
public Collection<IgfsFile> listFiles(IgfsPath path) {
try {
FileStatus[] statuses = fileSystemForUser().listStatus(convert(path));
if (statuses == null)
throw new IgfsPathNotFoundException("Failed to list files (path not found): " + path);
Collection<IgfsFile> res = new ArrayList<>(statuses.length);
for (FileStatus s : statuses) {
IgfsEntryInfo fsInfo = s.isDirectory() ? IgfsUtils.createDirectory(IgniteUuid.randomUuid(), null, properties(s), s.getAccessTime(), s.getModificationTime()) : IgfsUtils.createFile(IgniteUuid.randomUuid(), (int) s.getBlockSize(), s.getLen(), null, null, false, properties(s), s.getAccessTime(), s.getModificationTime());
res.add(new IgfsFileImpl(new IgfsPath(path, s.getPath().getName()), fsInfo, 1));
}
return res;
} catch (FileNotFoundException ignored) {
throw new IgfsPathNotFoundException("Failed to list files (path not found): " + path);
} catch (IOException e) {
throw handleSecondaryFsError(e, "Failed to list statuses due to secondary file system exception: " + path);
}
}
use of org.apache.ignite.internal.processors.igfs.IgfsEntryInfo in project ignite by apache.
the class HadoopIgfsDualAbstractSelfTest method testOpenPrefetchOverride.
/**
* Check how prefetch override works.
*
* @throws Exception IF failed.
*/
public void testOpenPrefetchOverride() throws Exception {
create(igfsSecondary, paths(DIR, SUBDIR), paths(FILE));
// Write enough data to the secondary file system.
final int blockSize = IGFS_BLOCK_SIZE;
IgfsOutputStream out = igfsSecondary.append(FILE, false);
int totalWritten = 0;
while (totalWritten < blockSize * 2 + chunk.length) {
out.write(chunk);
totalWritten += chunk.length;
}
out.close();
awaitFileClose(igfsSecondary, FILE);
// Instantiate file system with overridden "seq reads before prefetch" property.
Configuration cfg = new Configuration();
cfg.addResource(U.resolveIgniteUrl(PRIMARY_CFG));
int seqReads = SEQ_READS_BEFORE_PREFETCH + 1;
cfg.setInt(String.format(PARAM_IGFS_SEQ_READS_BEFORE_PREFETCH, "igfs@"), seqReads);
FileSystem fs = FileSystem.get(new URI(PRIMARY_URI), cfg);
// Read the first two blocks.
Path fsHome = new Path(PRIMARY_URI);
Path dir = new Path(fsHome, DIR.name());
Path subdir = new Path(dir, SUBDIR.name());
Path file = new Path(subdir, FILE.name());
FSDataInputStream fsIn = fs.open(file);
final byte[] readBuf = new byte[blockSize * 2];
fsIn.readFully(0, readBuf, 0, readBuf.length);
// Wait for a while for prefetch to finish (if any).
IgfsMetaManager meta = igfs.context().meta();
IgfsEntryInfo info = meta.info(meta.fileId(FILE));
IgfsBlockKey key = new IgfsBlockKey(info.id(), info.affinityKey(), info.evictExclude(), 2);
IgniteCache<IgfsBlockKey, byte[]> dataCache = igfs.context().kernalContext().cache().jcache(igfs.configuration().getDataCacheConfiguration().getName());
for (int i = 0; i < 10; i++) {
if (dataCache.containsKey(key))
break;
else
U.sleep(100);
}
fsIn.close();
// Remove the file from the secondary file system.
igfsSecondary.delete(FILE, false);
// Try reading the third block. Should fail.
GridTestUtils.assertThrows(log, new Callable<Object>() {
@Override
public Object call() throws Exception {
IgfsInputStream in0 = igfs.open(FILE);
in0.seek(blockSize * 2);
try {
in0.read(readBuf);
} finally {
U.closeQuiet(in0);
}
return null;
}
}, IOException.class, "Failed to read data due to secondary file system exception: /dir/subdir/file");
}
Aggregations