use of org.apache.ignite.igfs.IgfsInputStream in project ignite by apache.
the class IgfsProcessorSelfTest method checkCreateAppendLongData.
/**
* @param chunkSize Chunk size.
* @param bufSize Buffer size.
* @param cnt Count.
* @throws Exception If failed.
*/
private void checkCreateAppendLongData(int chunkSize, int bufSize, int cnt) throws Exception {
IgfsPath path = new IgfsPath("/someFile");
byte[] buf = new byte[chunkSize];
for (int i = 0; i < buf.length; i++) buf[i] = (byte) (i * i);
IgfsOutputStream os = igfs.create(path, bufSize, true, null, 0, 1024, null);
try {
for (int i = 0; i < cnt; i++) os.write(buf);
os.flush();
} finally {
os.close();
}
os = igfs.append(path, chunkSize, false, null);
try {
for (int i = 0; i < cnt; i++) os.write(buf);
os.flush();
} finally {
os.close();
}
byte[] readBuf = new byte[chunkSize];
try (IgfsInputStream in = igfs.open(path)) {
long pos = 0;
for (int k = 0; k < 2 * cnt; k++) {
in.readFully(pos, readBuf);
for (int i = 0; i < readBuf.length; i++) assertEquals(buf[i], readBuf[i]);
pos += readBuf.length;
}
}
}
use of org.apache.ignite.igfs.IgfsInputStream in project ignite by apache.
the class IgfsStreamsSelfTest method testCreateFileFragmented.
/** @throws Exception If failed. */
public void testCreateFileFragmented() throws Exception {
IgfsEx impl = (IgfsEx) grid(0).fileSystem("igfs");
String metaCacheName = grid(0).igfsx("igfs").configuration().getMetaCacheConfiguration().getName();
final String dataCacheName = grid(0).igfsx("igfs").configuration().getDataCacheConfiguration().getName();
IgfsFragmentizerManager fragmentizer = impl.context().fragmentizer();
GridTestUtils.setFieldValue(fragmentizer, "fragmentizerEnabled", false);
IgfsPath path = new IgfsPath("/file");
try {
IgniteFileSystem fs0 = grid(0).fileSystem("igfs");
IgniteFileSystem fs1 = grid(1).fileSystem("igfs");
IgniteFileSystem fs2 = grid(2).fileSystem("igfs");
try (IgfsOutputStream out = fs0.create(path, 128, false, 1, CFG_GRP_SIZE, F.asMap(IgfsUtils.PROP_PREFER_LOCAL_WRITES, "true"))) {
// 1.5 blocks
byte[] data = new byte[CFG_BLOCK_SIZE * 3 / 2];
Arrays.fill(data, (byte) 1);
out.write(data);
}
try (IgfsOutputStream out = fs1.append(path, false)) {
// 1.5 blocks.
byte[] data = new byte[CFG_BLOCK_SIZE * 3 / 2];
Arrays.fill(data, (byte) 2);
out.write(data);
}
// After this we should have first two block colocated with grid 0 and last block colocated with grid 1.
IgfsFileImpl fileImpl = (IgfsFileImpl) fs.info(path);
GridCacheAdapter<Object, Object> metaCache = ((IgniteKernal) grid(0)).internalCache(metaCacheName);
IgfsEntryInfo fileInfo = (IgfsEntryInfo) metaCache.get(fileImpl.fileId());
IgfsFileMap map = fileInfo.fileMap();
List<IgfsFileAffinityRange> ranges = map.ranges();
assertEquals(2, ranges.size());
assertTrue(ranges.get(0).startOffset() == 0);
assertTrue(ranges.get(0).endOffset() == 2 * CFG_BLOCK_SIZE - 1);
assertTrue(ranges.get(1).startOffset() == 2 * CFG_BLOCK_SIZE);
assertTrue(ranges.get(1).endOffset() == 3 * CFG_BLOCK_SIZE - 1);
// Validate data read after colocated writes.
try (IgfsInputStream in = fs2.open(path)) {
// Validate first part of file.
for (int i = 0; i < CFG_BLOCK_SIZE * 3 / 2; i++) assertEquals((byte) 1, in.read());
// Validate second part of file.
for (int i = 0; i < CFG_BLOCK_SIZE * 3 / 2; i++) assertEquals((byte) 2, in.read());
assertEquals(-1, in.read());
}
} finally {
GridTestUtils.setFieldValue(fragmentizer, "fragmentizerEnabled", true);
boolean hasData = false;
for (int i = 0; i < NODES_CNT; i++) hasData |= !grid(i).cachex(dataCacheName).isEmpty();
assertTrue(hasData);
fs.delete(path, true);
}
GridTestUtils.retryAssert(log, ASSERT_RETRIES, ASSERT_RETRY_INTERVAL, new CAX() {
@Override
public void applyx() {
for (int i = 0; i < NODES_CNT; i++) assertTrue(grid(i).cachex(dataCacheName).isEmpty());
}
});
}
use of org.apache.ignite.igfs.IgfsInputStream in project ignite by apache.
the class IgfsBenchmark method handleFile.
/** {@inheritDoc} */
@Override
public void handleFile(String strPath) throws Exception {
IgfsPath path = new IgfsPath(strPath);
IgfsInputStream in;
try {
in = fs.open(path);
} catch (IgfsPathNotFoundException ex) {
System.out.println("file " + path.toString() + " not exist: " + ex);
throw ex;
} catch (IgniteException ex) {
System.out.println("open file " + path.toString() + " failed: " + ex);
throw ex;
}
try {
for (int i = 0; i < size / dataBufer.capacity(); i++) in.read(dataBufer.array());
} catch (IOException ex) {
System.out.println("read file " + path.toString() + " failed: " + ex);
throw ex;
} finally {
in.close();
}
}
use of org.apache.ignite.igfs.IgfsInputStream in project ignite by apache.
the class IgfsByteDelimiterRecordResolverSelfTest method assertSplit.
/**
* Check split resolution.
*
* @param suggestedStart Suggested start.
* @param suggestedLen Suggested length.
* @param expStart Expected start.
* @param expLen Expected length.
* @param data File data.
* @param delims Delimiters.
* @throws Exception If failed.
*/
public void assertSplit(long suggestedStart, long suggestedLen, long expStart, long expLen, byte[] data, byte[]... delims) throws Exception {
write(data);
IgfsByteDelimiterRecordResolver rslvr = resolver(delims);
IgfsFileRange split;
try (IgfsInputStream is = read()) {
split = rslvr.resolveRecords(igfs, is, split(suggestedStart, suggestedLen));
}
assert split != null : "Split is null.";
assert split.start() == expStart : "Incorrect start [expected=" + expStart + ", actual=" + split.start() + ']';
assert split.length() == expLen : "Incorrect length [expected=" + expLen + ", actual=" + split.length() + ']';
}
use of org.apache.ignite.igfs.IgfsInputStream in project ignite by apache.
the class IgfsByteDelimiterRecordResolverSelfTest method assertSplitNull.
/**
* Check the split resolution resulted in {@code null}.
*
* @param suggestedStart Suggested start.
* @param suggestedLen Suggested length.
* @param data File data.
* @param delims Delimiters.
* @throws Exception If failed.
*/
public void assertSplitNull(long suggestedStart, long suggestedLen, byte[] data, byte[]... delims) throws Exception {
write(data);
IgfsByteDelimiterRecordResolver rslvr = resolver(delims);
IgfsFileRange split;
try (IgfsInputStream is = read()) {
split = rslvr.resolveRecords(igfs, is, split(suggestedStart, suggestedLen));
}
assert split == null : "Split is not null.";
}
Aggregations