Search in sources :

Example 11 with IgfsInputStream

use of org.apache.ignite.igfs.IgfsInputStream in project ignite by apache.

the class IgfsFixedLengthRecordResolverSelfTest method assertSplitNull.

/**
     * Check the split resolution resulted in {@code null}.
     *
     * @param suggestedStart Suggested start.
     * @param suggestedLen Suggested length.
     * @param data File data.
     * @param len Length.
     * @throws Exception If failed.
     */
public void assertSplitNull(long suggestedStart, long suggestedLen, byte[] data, int len) throws Exception {
    write(data);
    IgfsFixedLengthRecordResolver rslvr = resolver(len);
    IgfsFileRange split;
    try (IgfsInputStream is = read()) {
        split = rslvr.resolveRecords(igfs, is, split(suggestedStart, suggestedLen));
    }
    assert split == null : "Split is not null.";
}
Also used : IgfsInputStream(org.apache.ignite.igfs.IgfsInputStream) IgfsFileRange(org.apache.ignite.igfs.mapreduce.IgfsFileRange) IgfsFixedLengthRecordResolver(org.apache.ignite.igfs.mapreduce.records.IgfsFixedLengthRecordResolver)

Example 12 with IgfsInputStream

use of org.apache.ignite.igfs.IgfsInputStream in project ignite by apache.

the class IgfsStringDelimiterRecordResolverSelfTest method assertSplitNull.

/**
     * Check the split resolution resulted in {@code null}.
     *
     * @param suggestedStart Suggested start.
     * @param suggestedLen Suggested length.
     * @param data File data.
     * @param delims Delimiters.
     * @throws Exception If failed.
     */
public void assertSplitNull(long suggestedStart, long suggestedLen, byte[] data, String... delims) throws Exception {
    write(data);
    IgfsStringDelimiterRecordResolver rslvr = resolver(delims);
    IgfsFileRange split;
    try (IgfsInputStream is = read()) {
        split = rslvr.resolveRecords(igfs, is, split(suggestedStart, suggestedLen));
    }
    assert split == null : "Split is not null.";
}
Also used : IgfsInputStream(org.apache.ignite.igfs.IgfsInputStream) IgfsFileRange(org.apache.ignite.igfs.mapreduce.IgfsFileRange) IgfsStringDelimiterRecordResolver(org.apache.ignite.igfs.mapreduce.records.IgfsStringDelimiterRecordResolver)

Example 13 with IgfsInputStream

use of org.apache.ignite.igfs.IgfsInputStream in project ignite by apache.

the class IgfsStringDelimiterRecordResolverSelfTest method assertSplit.

/**
     * Check split resolution.
     *
     * @param suggestedStart Suggested start.
     * @param suggestedLen Suggested length.
     * @param expStart Expected start.
     * @param expLen Expected length.
     * @param data File data.
     * @param delims Delimiters.
     * @throws Exception If failed.
     */
public void assertSplit(long suggestedStart, long suggestedLen, long expStart, long expLen, byte[] data, String... delims) throws Exception {
    write(data);
    IgfsByteDelimiterRecordResolver rslvr = resolver(delims);
    IgfsFileRange split;
    try (IgfsInputStream is = read()) {
        split = rslvr.resolveRecords(igfs, is, split(suggestedStart, suggestedLen));
    }
    assert split != null : "Split is null.";
    assert split.start() == expStart : "Incorrect start [expected=" + expStart + ", actual=" + split.start() + ']';
    assert split.length() == expLen : "Incorrect length [expected=" + expLen + ", actual=" + split.length() + ']';
}
Also used : IgfsInputStream(org.apache.ignite.igfs.IgfsInputStream) IgfsFileRange(org.apache.ignite.igfs.mapreduce.IgfsFileRange) IgfsByteDelimiterRecordResolver(org.apache.ignite.igfs.mapreduce.records.IgfsByteDelimiterRecordResolver)

Example 14 with IgfsInputStream

use of org.apache.ignite.igfs.IgfsInputStream in project ignite by apache.

the class HadoopIgfsInProc method readData.

/** {@inheritDoc} */
@Override
public IgniteInternalFuture<byte[]> readData(HadoopIgfsStreamDelegate delegate, long pos, int len, @Nullable byte[] outBuf, int outOff, int outLen) {
    IgfsInputStream stream = delegate.target();
    try {
        byte[] res = null;
        if (outBuf != null) {
            int outTailLen = outBuf.length - outOff;
            if (len <= outTailLen)
                stream.readFully(pos, outBuf, outOff, len);
            else {
                stream.readFully(pos, outBuf, outOff, outTailLen);
                int remainderLen = len - outTailLen;
                res = new byte[remainderLen];
                stream.readFully(pos, res, 0, remainderLen);
            }
        } else {
            res = new byte[len];
            stream.readFully(pos, res, 0, len);
        }
        return new GridFinishedFuture<>(res);
    } catch (IllegalStateException | IOException e) {
        HadoopIgfsStreamEventListener lsnr = lsnrs.get(delegate);
        if (lsnr != null)
            lsnr.onError(e.getMessage());
        return new GridFinishedFuture<>(e);
    }
}
Also used : IgfsInputStream(org.apache.ignite.igfs.IgfsInputStream) IOException(java.io.IOException) GridFinishedFuture(org.apache.ignite.internal.util.future.GridFinishedFuture)

Example 15 with IgfsInputStream

use of org.apache.ignite.igfs.IgfsInputStream in project ignite by apache.

the class HadoopIgfsDualAbstractSelfTest method testOpenPrefetchOverride.

/**
     * Check how prefetch override works.
     *
     * @throws Exception IF failed.
     */
public void testOpenPrefetchOverride() throws Exception {
    create(igfsSecondary, paths(DIR, SUBDIR), paths(FILE));
    // Write enough data to the secondary file system.
    final int blockSize = IGFS_BLOCK_SIZE;
    IgfsOutputStream out = igfsSecondary.append(FILE, false);
    int totalWritten = 0;
    while (totalWritten < blockSize * 2 + chunk.length) {
        out.write(chunk);
        totalWritten += chunk.length;
    }
    out.close();
    awaitFileClose(igfsSecondary, FILE);
    // Instantiate file system with overridden "seq reads before prefetch" property.
    Configuration cfg = new Configuration();
    cfg.addResource(U.resolveIgniteUrl(PRIMARY_CFG));
    int seqReads = SEQ_READS_BEFORE_PREFETCH + 1;
    cfg.setInt(String.format(PARAM_IGFS_SEQ_READS_BEFORE_PREFETCH, "igfs@"), seqReads);
    FileSystem fs = FileSystem.get(new URI(PRIMARY_URI), cfg);
    // Read the first two blocks.
    Path fsHome = new Path(PRIMARY_URI);
    Path dir = new Path(fsHome, DIR.name());
    Path subdir = new Path(dir, SUBDIR.name());
    Path file = new Path(subdir, FILE.name());
    FSDataInputStream fsIn = fs.open(file);
    final byte[] readBuf = new byte[blockSize * 2];
    fsIn.readFully(0, readBuf, 0, readBuf.length);
    // Wait for a while for prefetch to finish (if any).
    IgfsMetaManager meta = igfs.context().meta();
    IgfsEntryInfo info = meta.info(meta.fileId(FILE));
    IgfsBlockKey key = new IgfsBlockKey(info.id(), info.affinityKey(), info.evictExclude(), 2);
    IgniteCache<IgfsBlockKey, byte[]> dataCache = igfs.context().kernalContext().cache().jcache(igfs.configuration().getDataCacheConfiguration().getName());
    for (int i = 0; i < 10; i++) {
        if (dataCache.containsKey(key))
            break;
        else
            U.sleep(100);
    }
    fsIn.close();
    // Remove the file from the secondary file system.
    igfsSecondary.delete(FILE, false);
    // Try reading the third block. Should fail.
    GridTestUtils.assertThrows(log, new Callable<Object>() {

        @Override
        public Object call() throws Exception {
            IgfsInputStream in0 = igfs.open(FILE);
            in0.seek(blockSize * 2);
            try {
                in0.read(readBuf);
            } finally {
                U.closeQuiet(in0);
            }
            return null;
        }
    }, IOException.class, "Failed to read data due to secondary file system exception: /dir/subdir/file");
}
Also used : IgfsPath(org.apache.ignite.igfs.IgfsPath) Path(org.apache.hadoop.fs.Path) IgfsInputStream(org.apache.ignite.igfs.IgfsInputStream) IgfsIpcEndpointConfiguration(org.apache.ignite.igfs.IgfsIpcEndpointConfiguration) Configuration(org.apache.hadoop.conf.Configuration) FileSystemConfiguration(org.apache.ignite.configuration.FileSystemConfiguration) IgniteConfiguration(org.apache.ignite.configuration.IgniteConfiguration) CacheConfiguration(org.apache.ignite.configuration.CacheConfiguration) IgfsOutputStream(org.apache.ignite.igfs.IgfsOutputStream) URI(java.net.URI) IOException(java.io.IOException) IgfsMetaManager(org.apache.ignite.internal.processors.igfs.IgfsMetaManager) FileSystem(org.apache.hadoop.fs.FileSystem) IgniteHadoopIgfsSecondaryFileSystem(org.apache.ignite.hadoop.fs.IgniteHadoopIgfsSecondaryFileSystem) IgfsSecondaryFileSystem(org.apache.ignite.igfs.secondary.IgfsSecondaryFileSystem) IgfsBlockKey(org.apache.ignite.internal.processors.igfs.IgfsBlockKey) IgfsEntryInfo(org.apache.ignite.internal.processors.igfs.IgfsEntryInfo) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream)

Aggregations

IgfsInputStream (org.apache.ignite.igfs.IgfsInputStream)27 IgfsPath (org.apache.ignite.igfs.IgfsPath)11 IgfsFileRange (org.apache.ignite.igfs.mapreduce.IgfsFileRange)9 IgfsOutputStream (org.apache.ignite.igfs.IgfsOutputStream)8 IOException (java.io.IOException)5 IgniteException (org.apache.ignite.IgniteException)5 IgniteFileSystem (org.apache.ignite.IgniteFileSystem)4 IgniteCheckedException (org.apache.ignite.IgniteCheckedException)3 IgfsMetrics (org.apache.ignite.igfs.IgfsMetrics)3 IgfsByteDelimiterRecordResolver (org.apache.ignite.igfs.mapreduce.records.IgfsByteDelimiterRecordResolver)3 IgfsException (org.apache.ignite.igfs.IgfsException)2 IgfsFile (org.apache.ignite.igfs.IgfsFile)2 IgfsPathNotFoundException (org.apache.ignite.igfs.IgfsPathNotFoundException)2 IgfsFixedLengthRecordResolver (org.apache.ignite.igfs.mapreduce.records.IgfsFixedLengthRecordResolver)2 IgfsNewLineRecordResolver (org.apache.ignite.igfs.mapreduce.records.IgfsNewLineRecordResolver)2 InputStream (java.io.InputStream)1 URI (java.net.URI)1 HashMap (java.util.HashMap)1 Random (java.util.Random)1 UUID (java.util.UUID)1