use of org.apache.ignite.igfs.IgfsOutputStream in project ignite by apache.
the class HadoopIgfsDualAbstractSelfTest method testOpenPrefetchOverride.
/**
* Check how prefetch override works.
*
* @throws Exception IF failed.
*/
public void testOpenPrefetchOverride() throws Exception {
create(igfsSecondary, paths(DIR, SUBDIR), paths(FILE));
// Write enough data to the secondary file system.
final int blockSize = IGFS_BLOCK_SIZE;
IgfsOutputStream out = igfsSecondary.append(FILE, false);
int totalWritten = 0;
while (totalWritten < blockSize * 2 + chunk.length) {
out.write(chunk);
totalWritten += chunk.length;
}
out.close();
awaitFileClose(igfsSecondary, FILE);
// Instantiate file system with overridden "seq reads before prefetch" property.
Configuration cfg = new Configuration();
cfg.addResource(U.resolveIgniteUrl(PRIMARY_CFG));
int seqReads = SEQ_READS_BEFORE_PREFETCH + 1;
cfg.setInt(String.format(PARAM_IGFS_SEQ_READS_BEFORE_PREFETCH, "igfs@"), seqReads);
FileSystem fs = FileSystem.get(new URI(PRIMARY_URI), cfg);
// Read the first two blocks.
Path fsHome = new Path(PRIMARY_URI);
Path dir = new Path(fsHome, DIR.name());
Path subdir = new Path(dir, SUBDIR.name());
Path file = new Path(subdir, FILE.name());
FSDataInputStream fsIn = fs.open(file);
final byte[] readBuf = new byte[blockSize * 2];
fsIn.readFully(0, readBuf, 0, readBuf.length);
// Wait for a while for prefetch to finish (if any).
IgfsMetaManager meta = igfs.context().meta();
IgfsEntryInfo info = meta.info(meta.fileId(FILE));
IgfsBlockKey key = new IgfsBlockKey(info.id(), info.affinityKey(), info.evictExclude(), 2);
IgniteCache<IgfsBlockKey, byte[]> dataCache = igfs.context().kernalContext().cache().jcache(igfs.configuration().getDataCacheConfiguration().getName());
for (int i = 0; i < 10; i++) {
if (dataCache.containsKey(key))
break;
else
U.sleep(100);
}
fsIn.close();
// Remove the file from the secondary file system.
igfsSecondary.delete(FILE, false);
// Try reading the third block. Should fail.
GridTestUtils.assertThrows(log, new Callable<Object>() {
@Override
public Object call() throws Exception {
IgfsInputStream in0 = igfs.open(FILE);
in0.seek(blockSize * 2);
try {
in0.read(readBuf);
} finally {
U.closeQuiet(in0);
}
return null;
}
}, IOException.class, "Failed to read data due to secondary file system exception: /dir/subdir/file");
}
use of org.apache.ignite.igfs.IgfsOutputStream in project ignite by apache.
the class IgfsStreamsSelfTest method testCreateFileColocated.
/**
* @throws Exception If failed.
*/
public void testCreateFileColocated() throws Exception {
IgfsPath path = new IgfsPath("/colocated");
UUID uuid = UUID.randomUUID();
IgniteUuid affKey;
long idx = 0;
while (true) {
affKey = new IgniteUuid(uuid, idx);
if (grid(0).affinity(grid(0).igfsx("igfs").configuration().getDataCacheConfiguration().getName()).mapKeyToNode(affKey).id().equals(grid(0).localNode().id()))
break;
idx++;
}
try (IgfsOutputStream out = fs.create(path, 1024, true, affKey, 0, 1024, null)) {
// Write 5M, should be enough to test distribution.
for (int i = 0; i < 15; i++) out.write(new byte[1024 * 1024]);
}
IgfsFile info = fs.info(path);
Collection<IgfsBlockLocation> affNodes = fs.affinity(path, 0, info.length());
assertEquals(1, affNodes.size());
Collection<UUID> nodeIds = F.first(affNodes).nodeIds();
assertEquals(1, nodeIds.size());
assertEquals(grid(0).localNode().id(), F.first(nodeIds));
}
use of org.apache.ignite.igfs.IgfsOutputStream in project ignite by apache.
the class IgfsStreamsSelfTest method testCreateFile.
/**
* Test file creation.
*
* @param path Path to file to store.
* @param size Size of file to store.
* @param salt Salt for file content generation.
* @throws Exception In case of any exception.
*/
private void testCreateFile(final IgfsPath path, final long size, final int salt) throws Exception {
info("Create file [path=" + path + ", size=" + size + ", salt=" + salt + ']');
final AtomicInteger cnt = new AtomicInteger(0);
final Collection<IgfsPath> cleanUp = new ConcurrentLinkedQueue<>();
long time = runMultiThreaded(new Callable<Object>() {
@Override
public Object call() throws Exception {
int id = cnt.incrementAndGet();
IgfsPath f = new IgfsPath(path.parent(), "asdf" + (id > 1 ? "-" + id : ""));
try (IgfsOutputStream out = fs.create(f, 0, true, null, 0, 1024, null)) {
assertNotNull(out);
// Add all created into cleanup list.
cleanUp.add(f);
U.copy(new IgfsTestInputStream(size, salt), out);
}
return null;
}
}, WRITING_THREADS_CNT, "perform-multi-thread-writing");
if (time > 0) {
double rate = size * 1000. / time / 1024 / 1024;
info(String.format("Write file [path=%s, size=%d kB, rate=%2.1f MB/s]", path, WRITING_THREADS_CNT * size / 1024, WRITING_THREADS_CNT * rate));
}
info("Read and validate saved file: " + path);
final InputStream expIn = new IgfsTestInputStream(size, salt);
final IgfsInputStream actIn = fs.open(path, CFG_BLOCK_SIZE * READING_THREADS_CNT * 11 / 10);
// Validate continuous reading of whole file.
assertEqualStreams(expIn, actIn, size, null);
// Validate random seek and reading.
final Random rnd = new Random();
runMultiThreaded(new Callable<Object>() {
@Override
public Object call() throws Exception {
long skip = Math.abs(rnd.nextLong() % (size + 1));
long range = Math.min(size - skip, rnd.nextInt(CFG_BLOCK_SIZE * 400));
assertEqualStreams(new IgfsTestInputStream(size, salt), actIn, range, skip);
return null;
}
}, READING_THREADS_CNT, "validate-multi-thread-reading");
expIn.close();
actIn.close();
info("Get stored file info: " + path);
IgfsFile desc = fs.info(path);
info("Validate stored file info: " + desc);
assertNotNull(desc);
if (log.isDebugEnabled())
log.debug("File descriptor: " + desc);
Collection<IgfsBlockLocation> aff = fs.affinity(path, 0, desc.length());
assertFalse("Affinity: " + aff, desc.length() != 0 && aff.isEmpty());
int blockSize = desc.blockSize();
assertEquals("File size", size, desc.length());
assertEquals("Binary block size", CFG_BLOCK_SIZE, blockSize);
// assertEquals("Permission", "rwxr-xr-x", desc.getPermission().toString());
// assertEquals("Permission sticky bit marks this is file", false, desc.getPermission().getStickyBit());
assertEquals("Type", true, desc.isFile());
assertEquals("Type", false, desc.isDirectory());
info("Cleanup files: " + cleanUp);
for (IgfsPath f : cleanUp) {
fs.delete(f, true);
assertNull(fs.info(f));
}
}
use of org.apache.ignite.igfs.IgfsOutputStream in project ignite by apache.
the class IgfsProcessorSelfTest method testCreate.
/**
* @throws Exception If failed.
*/
public void testCreate() throws Exception {
IgfsPath path = path("/file");
try (IgfsOutputStream os = igfs.create(path, false)) {
assert os != null;
IgfsFileImpl info = (IgfsFileImpl) igfs.info(path);
for (int i = 0; i < nodesCount(); i++) {
IgfsEntryInfo fileInfo = (IgfsEntryInfo) grid(i).cachex(metaCacheName).localPeek(info.fileId(), null, null);
assertNotNull(fileInfo);
assertNotNull(fileInfo.listing());
}
} finally {
igfs.delete(path("/"), true);
}
}
use of org.apache.ignite.igfs.IgfsOutputStream in project ignite by apache.
the class IgfsSizeSelfTest method checkOversize.
/**
* Ensure that an exception is thrown in case of IGFS oversize.
*
* @throws Exception If failed.
*/
private void checkOversize() throws Exception {
final long maxSize = 32 * 1024 * 1024;
memIgfsdDataPlcSetter = new IgniteInClosure<IgniteConfiguration>() {
@Override
public void apply(IgniteConfiguration cfg) {
String memPlcName = "igfsDataMemPlc";
cfg.setDataStorageConfiguration(new DataStorageConfiguration().setDataRegionConfigurations(new DataRegionConfiguration().setMaxSize(maxSize).setInitialSize(maxSize).setName(memPlcName)));
FileSystemConfiguration igfsCfg = cfg.getFileSystemConfiguration()[0];
igfsCfg.getDataCacheConfiguration().setDataRegionName(memPlcName);
cfg.setCacheConfiguration(new CacheConfiguration().setName("QQQ").setDataRegionName(memPlcName));
}
};
startUp();
final IgfsPath path = new IgfsPath("/file");
final int writeChunkSize = (int) (maxSize / 1024);
// This write is expected to be successful.
IgfsOutputStream os = igfs(0).create(path, false);
os.write(chunk(writeChunkSize));
os.close();
// This write must be successful as well.
os = igfs(0).append(path, false);
os.write(chunk(1));
os.close();
// This write must fail w/ exception.
GridTestUtils.assertThrows(log(), new Callable<Object>() {
@Override
public Object call() throws Exception {
IgfsOutputStream osErr = igfs(0).append(path, false);
try {
for (int i = 0; i < maxSize / writeChunkSize * GRID_CNT; ++i) osErr.write(chunk(writeChunkSize));
osErr.close();
return null;
} catch (IOException e) {
Throwable e0 = e;
while (e0.getCause() != null) e0 = e0.getCause();
throw (Exception) e0;
} finally {
U.closeQuiet(osErr);
}
}
}, IgniteOutOfMemoryException.class, "Not enough memory allocated");
}
Aggregations