use of org.apache.ignite.igfs.IgfsOutputStream in project ignite by apache.
the class IgfsMapReduceExample method writeFile.
/**
* Write file to the Ignite file system.
*
* @param fs Ignite file system.
* @param fsPath Ignite file system path.
* @param file File to write.
* @throws Exception In case of exception.
*/
private static void writeFile(IgniteFileSystem fs, IgfsPath fsPath, File file) throws Exception {
System.out.println();
System.out.println("Copying file to IGFS: " + file);
try (IgfsOutputStream os = fs.create(fsPath, true);
FileInputStream fis = new FileInputStream(file)) {
byte[] buf = new byte[2048];
int read = fis.read(buf);
while (read != -1) {
os.write(buf, 0, read);
read = fis.read(buf);
}
}
}
use of org.apache.ignite.igfs.IgfsOutputStream in project ignite by apache.
the class IgfsProcessorSelfTest method checkCreateAppendLongData.
/**
* @param chunkSize Chunk size.
* @param bufSize Buffer size.
* @param cnt Count.
* @throws Exception If failed.
*/
private void checkCreateAppendLongData(int chunkSize, int bufSize, int cnt) throws Exception {
IgfsPath path = new IgfsPath("/someFile");
byte[] buf = new byte[chunkSize];
for (int i = 0; i < buf.length; i++) buf[i] = (byte) (i * i);
IgfsOutputStream os = igfs.create(path, bufSize, true, null, 0, 1024, null);
try {
for (int i = 0; i < cnt; i++) os.write(buf);
os.flush();
} finally {
os.close();
}
os = igfs.append(path, chunkSize, false, null);
try {
for (int i = 0; i < cnt; i++) os.write(buf);
os.flush();
} finally {
os.close();
}
byte[] readBuf = new byte[chunkSize];
try (IgfsInputStream in = igfs.open(path)) {
long pos = 0;
for (int k = 0; k < 2 * cnt; k++) {
in.readFully(pos, readBuf);
for (int i = 0; i < readBuf.length; i++) assertEquals(buf[i], readBuf[i]);
pos += readBuf.length;
}
}
}
use of org.apache.ignite.igfs.IgfsOutputStream in project ignite by apache.
the class IgfsBackupFailoverSelfTest method testWriteFailoverWhileStoppingMultipleNodes.
/**
* @throws Exception
*/
public void testWriteFailoverWhileStoppingMultipleNodes() throws Exception {
final IgfsImpl igfs0 = nodeDatas[0].igfsImpl;
clear(igfs0);
IgfsAbstractSelfTest.create(igfs0, paths(DIR, SUBDIR), null);
final IgfsOutputStream[] outStreams = new IgfsOutputStream[files];
// Create files:
for (int f = 0; f < files; f++) {
final byte[] data = createChunk(fileSize, f);
IgfsOutputStream os = null;
try {
os = igfs0.create(filePath(f), 256, true, null, 0, -1, null);
assert os != null;
writeFileChunks(os, data);
} finally {
if (os != null)
os.flush();
}
outStreams[f] = os;
X.println("write #1 completed: " + f);
}
final AtomicBoolean stop = new AtomicBoolean();
GridTestUtils.runMultiThreadedAsync(new Callable() {
@Override
public Object call() throws Exception {
// Some delay to ensure read is in progress.
Thread.sleep(10_000);
// Now stop all the nodes but the 1st:
for (int n = 1; n < numIgfsNodes; n++) {
stopGrid(n);
X.println("#### grid " + n + " stopped.");
}
// Thread.sleep(10_000);
stop.set(true);
return null;
}
}, 1, "igfs-node-stopper");
// Write #2:
for (int f0 = 0; f0 < files; f0++) {
final IgfsOutputStream os = outStreams[f0];
assert os != null;
final int f = f0;
int att = doWithRetries(1, new Callable<Void>() {
@Override
public Void call() throws Exception {
IgfsOutputStream ios = os;
try {
writeChunks0(igfs0, ios, f);
} catch (IOException ioe) {
log().warning("Attempt to append the data to existing stream failed: ", ioe);
ios = igfs0.append(filePath(f), false);
assert ios != null;
writeChunks0(igfs0, ios, f);
}
return null;
}
});
assert att == 1;
X.println("write #2 completed: " + f0 + " in " + att + " attempts.");
}
GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override
public boolean apply() {
return stop.get();
}
}, 25_000);
// Check files:
for (int f = 0; f < files; f++) {
IgfsPath path = filePath(f);
byte[] data = createChunk(fileSize, f);
// Check through 1st node:
checkExist(igfs0, path);
assertEquals("File length mismatch.", data.length * 2, igfs0.size(path));
checkFileContent(igfs0, path, data, data);
X.println("Read test completed: " + f);
}
}
use of org.apache.ignite.igfs.IgfsOutputStream in project ignite by apache.
the class HadoopIgfsInProc method flush.
/**
* {@inheritDoc}
*/
@Override
public void flush(HadoopIgfsStreamDelegate delegate) throws IOException {
try {
IgfsOutputStream stream = delegate.target();
stream.flush();
} catch (IllegalStateException | IOException e) {
HadoopIgfsStreamEventListener lsnr = lsnrs.get(delegate);
if (lsnr != null)
lsnr.onError(e.getMessage());
if (e instanceof IllegalStateException)
throw new IOException("Failed to flush data to IGFS stream because Grid is stopping.", e);
else
throw e;
}
}
use of org.apache.ignite.igfs.IgfsOutputStream in project ignite by apache.
the class IgfsBenchmark method handleFile.
/**
* {@inheritDoc}
*/
@Override
public void handleFile(String strPath) throws Exception {
IgfsPath path = new IgfsPath(strPath);
IgfsOutputStream out;
try {
out = fs.create(path, false);
} catch (IgniteException ex) {
System.out.println("create file " + path.toString() + " failed: " + ex);
throw ex;
}
try {
for (int i = 0; i < size / dataBufer.capacity(); i++) out.write(dataBufer.array());
} catch (IOException ex) {
System.out.println("write file " + path.toString() + " failed: " + ex);
throw ex;
} finally {
out.close();
}
}
Aggregations