use of org.apache.ignite.igfs.IgfsInputStream in project ignite by apache.
the class IgfsFixedLengthRecordResolverSelfTest method assertSplitNull.
/**
* Check the split resolution resulted in {@code null}.
*
* @param suggestedStart Suggested start.
* @param suggestedLen Suggested length.
* @param data File data.
* @param len Length.
* @throws Exception If failed.
*/
public void assertSplitNull(long suggestedStart, long suggestedLen, byte[] data, int len) throws Exception {
write(data);
IgfsFixedLengthRecordResolver rslvr = resolver(len);
IgfsFileRange split;
try (IgfsInputStream is = read()) {
split = rslvr.resolveRecords(igfs, is, split(suggestedStart, suggestedLen));
}
assert split == null : "Split is not null.";
}
use of org.apache.ignite.igfs.IgfsInputStream in project ignite by apache.
the class IgfsStringDelimiterRecordResolverSelfTest method assertSplitNull.
/**
* Check the split resolution resulted in {@code null}.
*
* @param suggestedStart Suggested start.
* @param suggestedLen Suggested length.
* @param data File data.
* @param delims Delimiters.
* @throws Exception If failed.
*/
public void assertSplitNull(long suggestedStart, long suggestedLen, byte[] data, String... delims) throws Exception {
write(data);
IgfsStringDelimiterRecordResolver rslvr = resolver(delims);
IgfsFileRange split;
try (IgfsInputStream is = read()) {
split = rslvr.resolveRecords(igfs, is, split(suggestedStart, suggestedLen));
}
assert split == null : "Split is not null.";
}
use of org.apache.ignite.igfs.IgfsInputStream in project ignite by apache.
the class IgfsStringDelimiterRecordResolverSelfTest method assertSplit.
/**
* Check split resolution.
*
* @param suggestedStart Suggested start.
* @param suggestedLen Suggested length.
* @param expStart Expected start.
* @param expLen Expected length.
* @param data File data.
* @param delims Delimiters.
* @throws Exception If failed.
*/
public void assertSplit(long suggestedStart, long suggestedLen, long expStart, long expLen, byte[] data, String... delims) throws Exception {
write(data);
IgfsByteDelimiterRecordResolver rslvr = resolver(delims);
IgfsFileRange split;
try (IgfsInputStream is = read()) {
split = rslvr.resolveRecords(igfs, is, split(suggestedStart, suggestedLen));
}
assert split != null : "Split is null.";
assert split.start() == expStart : "Incorrect start [expected=" + expStart + ", actual=" + split.start() + ']';
assert split.length() == expLen : "Incorrect length [expected=" + expLen + ", actual=" + split.length() + ']';
}
use of org.apache.ignite.igfs.IgfsInputStream in project ignite by apache.
the class HadoopIgfsInProc method readData.
/** {@inheritDoc} */
@Override
public IgniteInternalFuture<byte[]> readData(HadoopIgfsStreamDelegate delegate, long pos, int len, @Nullable byte[] outBuf, int outOff, int outLen) {
IgfsInputStream stream = delegate.target();
try {
byte[] res = null;
if (outBuf != null) {
int outTailLen = outBuf.length - outOff;
if (len <= outTailLen)
stream.readFully(pos, outBuf, outOff, len);
else {
stream.readFully(pos, outBuf, outOff, outTailLen);
int remainderLen = len - outTailLen;
res = new byte[remainderLen];
stream.readFully(pos, res, 0, remainderLen);
}
} else {
res = new byte[len];
stream.readFully(pos, res, 0, len);
}
return new GridFinishedFuture<>(res);
} catch (IllegalStateException | IOException e) {
HadoopIgfsStreamEventListener lsnr = lsnrs.get(delegate);
if (lsnr != null)
lsnr.onError(e.getMessage());
return new GridFinishedFuture<>(e);
}
}
use of org.apache.ignite.igfs.IgfsInputStream in project ignite by apache.
the class HadoopIgfsDualAbstractSelfTest method testOpenPrefetchOverride.
/**
* Check how prefetch override works.
*
* @throws Exception IF failed.
*/
public void testOpenPrefetchOverride() throws Exception {
create(igfsSecondary, paths(DIR, SUBDIR), paths(FILE));
// Write enough data to the secondary file system.
final int blockSize = IGFS_BLOCK_SIZE;
IgfsOutputStream out = igfsSecondary.append(FILE, false);
int totalWritten = 0;
while (totalWritten < blockSize * 2 + chunk.length) {
out.write(chunk);
totalWritten += chunk.length;
}
out.close();
awaitFileClose(igfsSecondary, FILE);
// Instantiate file system with overridden "seq reads before prefetch" property.
Configuration cfg = new Configuration();
cfg.addResource(U.resolveIgniteUrl(PRIMARY_CFG));
int seqReads = SEQ_READS_BEFORE_PREFETCH + 1;
cfg.setInt(String.format(PARAM_IGFS_SEQ_READS_BEFORE_PREFETCH, "igfs@"), seqReads);
FileSystem fs = FileSystem.get(new URI(PRIMARY_URI), cfg);
// Read the first two blocks.
Path fsHome = new Path(PRIMARY_URI);
Path dir = new Path(fsHome, DIR.name());
Path subdir = new Path(dir, SUBDIR.name());
Path file = new Path(subdir, FILE.name());
FSDataInputStream fsIn = fs.open(file);
final byte[] readBuf = new byte[blockSize * 2];
fsIn.readFully(0, readBuf, 0, readBuf.length);
// Wait for a while for prefetch to finish (if any).
IgfsMetaManager meta = igfs.context().meta();
IgfsEntryInfo info = meta.info(meta.fileId(FILE));
IgfsBlockKey key = new IgfsBlockKey(info.id(), info.affinityKey(), info.evictExclude(), 2);
IgniteCache<IgfsBlockKey, byte[]> dataCache = igfs.context().kernalContext().cache().jcache(igfs.configuration().getDataCacheConfiguration().getName());
for (int i = 0; i < 10; i++) {
if (dataCache.containsKey(key))
break;
else
U.sleep(100);
}
fsIn.close();
// Remove the file from the secondary file system.
igfsSecondary.delete(FILE, false);
// Try reading the third block. Should fail.
GridTestUtils.assertThrows(log, new Callable<Object>() {
@Override
public Object call() throws Exception {
IgfsInputStream in0 = igfs.open(FILE);
in0.seek(blockSize * 2);
try {
in0.read(readBuf);
} finally {
U.closeQuiet(in0);
}
return null;
}
}, IOException.class, "Failed to read data due to secondary file system exception: /dir/subdir/file");
}
Aggregations