use of org.apache.ignite.igfs.IgfsOutputStream in project ignite by apache.
the class IgfsStreamsSelfTest method testCreateFileColocated.
/** @throws Exception If failed. */
public void testCreateFileColocated() throws Exception {
IgfsPath path = new IgfsPath("/colocated");
UUID uuid = UUID.randomUUID();
IgniteUuid affKey;
long idx = 0;
while (true) {
affKey = new IgniteUuid(uuid, idx);
if (grid(0).affinity(grid(0).igfsx("igfs").configuration().getDataCacheConfiguration().getName()).mapKeyToNode(affKey).id().equals(grid(0).localNode().id()))
break;
idx++;
}
try (IgfsOutputStream out = fs.create(path, 1024, true, affKey, 0, 1024, null)) {
// Write 5M, should be enough to test distribution.
for (int i = 0; i < 15; i++) out.write(new byte[1024 * 1024]);
}
IgfsFile info = fs.info(path);
Collection<IgfsBlockLocation> affNodes = fs.affinity(path, 0, info.length());
assertEquals(1, affNodes.size());
Collection<UUID> nodeIds = F.first(affNodes).nodeIds();
assertEquals(1, nodeIds.size());
assertEquals(grid(0).localNode().id(), F.first(nodeIds));
}
use of org.apache.ignite.igfs.IgfsOutputStream in project ignite by apache.
the class IgfsStreamsSelfTest method testCreateFile.
/**
* Test file creation.
*
* @param path Path to file to store.
* @param size Size of file to store.
* @param salt Salt for file content generation.
* @throws Exception In case of any exception.
*/
private void testCreateFile(final IgfsPath path, final long size, final int salt) throws Exception {
info("Create file [path=" + path + ", size=" + size + ", salt=" + salt + ']');
final AtomicInteger cnt = new AtomicInteger(0);
final Collection<IgfsPath> cleanUp = new ConcurrentLinkedQueue<>();
long time = runMultiThreaded(new Callable<Object>() {
@Override
public Object call() throws Exception {
int id = cnt.incrementAndGet();
IgfsPath f = new IgfsPath(path.parent(), "asdf" + (id > 1 ? "-" + id : ""));
try (IgfsOutputStream out = fs.create(f, 0, true, null, 0, 1024, null)) {
assertNotNull(out);
// Add all created into cleanup list.
cleanUp.add(f);
U.copy(new IgfsTestInputStream(size, salt), out);
}
return null;
}
}, WRITING_THREADS_CNT, "perform-multi-thread-writing");
if (time > 0) {
double rate = size * 1000. / time / 1024 / 1024;
info(String.format("Write file [path=%s, size=%d kB, rate=%2.1f MB/s]", path, WRITING_THREADS_CNT * size / 1024, WRITING_THREADS_CNT * rate));
}
info("Read and validate saved file: " + path);
final InputStream expIn = new IgfsTestInputStream(size, salt);
final IgfsInputStream actIn = fs.open(path, CFG_BLOCK_SIZE * READING_THREADS_CNT * 11 / 10);
// Validate continuous reading of whole file.
assertEqualStreams(expIn, actIn, size, null);
// Validate random seek and reading.
final Random rnd = new Random();
runMultiThreaded(new Callable<Object>() {
@Override
public Object call() throws Exception {
long skip = Math.abs(rnd.nextLong() % (size + 1));
long range = Math.min(size - skip, rnd.nextInt(CFG_BLOCK_SIZE * 400));
assertEqualStreams(new IgfsTestInputStream(size, salt), actIn, range, skip);
return null;
}
}, READING_THREADS_CNT, "validate-multi-thread-reading");
expIn.close();
actIn.close();
info("Get stored file info: " + path);
IgfsFile desc = fs.info(path);
info("Validate stored file info: " + desc);
assertNotNull(desc);
if (log.isDebugEnabled())
log.debug("File descriptor: " + desc);
Collection<IgfsBlockLocation> aff = fs.affinity(path, 0, desc.length());
assertFalse("Affinity: " + aff, desc.length() != 0 && aff.isEmpty());
int blockSize = desc.blockSize();
assertEquals("File size", size, desc.length());
assertEquals("Binary block size", CFG_BLOCK_SIZE, blockSize);
//assertEquals("Permission", "rwxr-xr-x", desc.getPermission().toString());
//assertEquals("Permission sticky bit marks this is file", false, desc.getPermission().getStickyBit());
assertEquals("Type", true, desc.isFile());
assertEquals("Type", false, desc.isDirectory());
info("Cleanup files: " + cleanUp);
for (IgfsPath f : cleanUp) {
fs.delete(f, true);
assertNull(fs.info(f));
}
}
use of org.apache.ignite.igfs.IgfsOutputStream in project ignite by apache.
the class HadoopIgfsInProc method writeData.
/** {@inheritDoc} */
@Override
public void writeData(HadoopIgfsStreamDelegate delegate, byte[] data, int off, int len) throws IOException {
try {
IgfsOutputStream stream = delegate.target();
stream.write(data, off, len);
} catch (IllegalStateException | IOException e) {
HadoopIgfsStreamEventListener lsnr = lsnrs.get(delegate);
if (lsnr != null)
lsnr.onError(e.getMessage());
if (e instanceof IllegalStateException)
throw new IOException("Failed to write data to IGFS stream because Grid is stopping.", e);
else
throw e;
}
}
use of org.apache.ignite.igfs.IgfsOutputStream in project ignite by apache.
the class HadoopExternalTaskExecutionSelfTest method prepareTestFile.
/**
* @param filePath File path to prepare.
* @throws Exception If failed.
*/
private void prepareTestFile(String filePath) throws Exception {
IgniteFileSystem igfs = grid(0).fileSystem(igfsName);
try (IgfsOutputStream out = igfs.create(new IgfsPath(filePath), true)) {
PrintWriter wr = new PrintWriter(new OutputStreamWriter(out));
for (int i = 0; i < 1000; i++) wr.println("Hello, world: " + i);
wr.flush();
}
}
use of org.apache.ignite.igfs.IgfsOutputStream in project ignite by apache.
the class IgfsModesSelfTest method checkPropagation.
/**
* Check propagation of various operations to secondary file system.
*
* @throws Exception If failed.
*/
private void checkPropagation() throws Exception {
byte[] testData1 = new byte[] { 0, 1, 2, 3, 4, 5, 6, 7 };
byte[] testData2 = new byte[] { 8, 9, 10, 11 };
byte[] testData = Arrays.copyOf(testData1, testData1.length + testData2.length);
U.arrayCopy(testData2, 0, testData, testData1.length, testData2.length);
setSecondaryFs = true;
startUp();
boolean primaryNotUsed = mode == PROXY;
boolean secondaryUsed = mode != PRIMARY;
IgfsPath dir = new IgfsPath("/dir");
IgfsPath file = new IgfsPath("/dir/file");
// Create new directory.
igfs.mkdirs(dir);
// Create new file.
IgfsOutputStream os = igfs.create(file, 1024, true, null, 0, 2048, null);
os.write(testData1);
os.close();
// Re-open it and append.
os = igfs.append(file, 1024, false, null);
os.write(testData2);
os.close();
// Check file content.
IgfsInputStream is = igfs.open(file);
assertEquals(testData.length, is.length());
byte[] data = new byte[testData.length];
is.read(data, 0, testData.length);
is.close();
assert Arrays.equals(testData, data);
if (secondaryUsed) {
assert igfsSecondary.exists(dir);
assert igfsSecondary.exists(file);
// In ASYNC mode we wait at most 2 seconds for background writer to finish.
for (int i = 0; i < 20; i++) {
IgfsInputStream isSecondary = null;
try {
isSecondary = igfsSecondary.open(file);
if (isSecondary.length() == testData.length)
break;
else
U.sleep(100);
} finally {
U.closeQuiet(isSecondary);
}
}
IgfsInputStream isSecondary = igfsSecondary.open(file);
assertEquals(testData.length, isSecondary.length());
isSecondary.read(data, 0, testData.length);
assert Arrays.equals(testData, data);
} else {
assert !igfsSecondary.exists(dir);
assert !igfsSecondary.exists(file);
}
int cacheSize = grid.cachex(grid.igfsx("igfs").configuration().getDataCacheConfiguration().getName()).size();
if (primaryNotUsed)
assert cacheSize == 0;
else
assert cacheSize != 0;
// Now delete all.
igfs.delete(dir, true);
assert !igfs.exists(dir);
assert !igfs.exists(file);
assert !igfsSecondary.exists(dir);
assert !igfsSecondary.exists(file);
}
Aggregations