Search in sources :

Example 1 with IgfsInputStream

use of org.apache.ignite.igfs.IgfsInputStream in project ignite by apache.

the class IgfsExample method read.

/**
 * Opens file and reads it to byte array.
 *
 * @param fs IgniteFs.
 * @param path File path.
 * @throws IgniteException If file can't be opened.
 * @throws IOException If data can't be read.
 */
private static void read(IgniteFileSystem fs, IgfsPath path) throws IgniteException, IOException {
    assert fs != null;
    assert path != null;
    assert fs.info(path).isFile();
    byte[] data = new byte[(int) fs.info(path).length()];
    try (IgfsInputStream in = fs.open(path)) {
        in.read(data);
    }
    System.out.println();
    System.out.println(">>> Read data from " + path + ": " + Arrays.toString(data));
}
Also used : IgfsInputStream(org.apache.ignite.igfs.IgfsInputStream)

Example 2 with IgfsInputStream

use of org.apache.ignite.igfs.IgfsInputStream in project ignite by apache.

the class IgfsMetricsSelfTest method testBlockMetrics.

/**
 * Test block metrics.
 *
 * @throws Exception If failed.
 */
@SuppressWarnings({ "ResultOfMethodCallIgnored", "ConstantConditions" })
public void testBlockMetrics() throws Exception {
    IgfsEx igfs = (IgfsEx) igfsPrimary[0];
    IgfsPath fileRemote = new IgfsPath("/fileRemote");
    IgfsPath file1 = new IgfsPath("/primary/file1");
    IgfsPath file2 = new IgfsPath("/primary/file2");
    // Create remote file and write some data to it.
    IgfsOutputStream out = igfsSecondary.create(fileRemote, 256, true, null, 1, 256, null);
    int rmtBlockSize = igfsSecondary.info(fileRemote).blockSize();
    out.write(new byte[rmtBlockSize]);
    out.close();
    // Start metrics measuring.
    IgfsMetrics initMetrics = igfs.metrics();
    // Create empty file.
    igfs.create(file1, 256, true, null, 1, 256, null).close();
    int blockSize = igfs.info(file1).blockSize();
    checkBlockMetrics(initMetrics, igfs.metrics(), 0, 0, 0, 0, 0, 0);
    // Write two blocks to the file.
    IgfsOutputStream os = igfs.append(file1, false);
    os.write(new byte[blockSize * 2]);
    os.close();
    checkBlockMetrics(initMetrics, igfs.metrics(), 0, 0, 0, 2, 0, blockSize * 2);
    // Write one more file (one block).
    os = igfs.create(file2, 256, true, null, 1, 256, null);
    os.write(new byte[blockSize]);
    os.close();
    checkBlockMetrics(initMetrics, igfs.metrics(), 0, 0, 0, 3, 0, blockSize * 3);
    // Read data from the first file.
    IgfsInputStream is = igfs.open(file1);
    is.readFully(0, new byte[blockSize * 2]);
    is.close();
    checkBlockMetrics(initMetrics, igfs.metrics(), 2, 0, blockSize * 2, 3, 0, blockSize * 3);
    // Read data from the second file with hits.
    is = igfs.open(file2);
    is.read(new byte[blockSize]);
    is.close();
    checkBlockMetrics(initMetrics, igfs.metrics(), 3, 0, blockSize * 3, 3, 0, blockSize * 3);
    // Clear the first file.
    igfs.create(file1, true).close();
    checkBlockMetrics(initMetrics, igfs.metrics(), 3, 0, blockSize * 3, 3, 0, blockSize * 3);
    // Delete the second file.
    igfs.delete(file2, false);
    checkBlockMetrics(initMetrics, igfs.metrics(), 3, 0, blockSize * 3, 3, 0, blockSize * 3);
    // Read remote file.
    is = igfs.open(fileRemote);
    is.read(new byte[rmtBlockSize]);
    is.close();
    checkBlockMetrics(initMetrics, igfs.metrics(), 4, 1, blockSize * 3 + rmtBlockSize, 3, 0, blockSize * 3);
    // Lets wait for blocks will be placed to cache
    U.sleep(300);
    // Read remote file again.
    is = igfs.open(fileRemote);
    is.read(new byte[rmtBlockSize]);
    is.close();
    checkBlockMetrics(initMetrics, igfs.metrics(), 5, 1, blockSize * 3 + rmtBlockSize * 2, 3, 0, blockSize * 3);
    IgfsMetrics metrics = igfs.metrics();
    assert metrics.secondarySpaceSize() == rmtBlockSize;
    // Write some data to the file working in DUAL mode.
    os = igfs.append(fileRemote, false);
    os.write(new byte[rmtBlockSize]);
    os.close();
    // Additional block read here due to file ending synchronization.
    checkBlockMetrics(initMetrics, igfs.metrics(), 5, 1, blockSize * 3 + rmtBlockSize * 2, 4, 1, blockSize * 3 + rmtBlockSize);
    metrics = igfs.metrics();
    assert metrics.secondarySpaceSize() == rmtBlockSize * 2;
    igfs.delete(fileRemote, false);
    U.sleep(300);
    assert igfs.metrics().secondarySpaceSize() == 0;
    // Write partial block to the first file.
    os = igfs.append(file1, false);
    os.write(new byte[blockSize / 2]);
    os.close();
    checkBlockMetrics(initMetrics, igfs.metrics(), 5, 1, blockSize * 3 + rmtBlockSize * 2, 5, 1, blockSize * 7 / 2 + rmtBlockSize);
    igfs.resetMetrics();
    metrics = igfs.metrics();
    assert metrics.blocksReadTotal() == 0;
    assert metrics.blocksReadRemote() == 0;
    assert metrics.blocksWrittenTotal() == 0;
    assert metrics.blocksWrittenRemote() == 0;
    assert metrics.bytesRead() == 0;
    assert metrics.bytesReadTime() == 0;
    assert metrics.bytesWritten() == 0;
    assert metrics.bytesWriteTime() == 0;
}
Also used : IgfsPath(org.apache.ignite.igfs.IgfsPath) IgfsInputStream(org.apache.ignite.igfs.IgfsInputStream) IgfsMetrics(org.apache.ignite.igfs.IgfsMetrics) IgfsOutputStream(org.apache.ignite.igfs.IgfsOutputStream)

Example 3 with IgfsInputStream

use of org.apache.ignite.igfs.IgfsInputStream in project ignite by apache.

the class IgfsStreamsSelfTest method testCreateFileFragmented.

/**
 * @throws Exception If failed.
 */
public void testCreateFileFragmented() throws Exception {
    IgfsEx impl = (IgfsEx) grid(0).fileSystem("igfs");
    String metaCacheName = grid(0).igfsx("igfs").configuration().getMetaCacheConfiguration().getName();
    final String dataCacheName = grid(0).igfsx("igfs").configuration().getDataCacheConfiguration().getName();
    IgfsFragmentizerManager fragmentizer = impl.context().fragmentizer();
    GridTestUtils.setFieldValue(fragmentizer, "fragmentizerEnabled", false);
    IgfsPath path = new IgfsPath("/file");
    try {
        IgniteFileSystem fs0 = grid(0).fileSystem("igfs");
        IgniteFileSystem fs1 = grid(1).fileSystem("igfs");
        IgniteFileSystem fs2 = grid(2).fileSystem("igfs");
        try (IgfsOutputStream out = fs0.create(path, 128, false, 1, CFG_GRP_SIZE, F.asMap(IgfsUtils.PROP_PREFER_LOCAL_WRITES, "true"))) {
            // 1.5 blocks
            byte[] data = new byte[CFG_BLOCK_SIZE * 3 / 2];
            Arrays.fill(data, (byte) 1);
            out.write(data);
        }
        try (IgfsOutputStream out = fs1.append(path, false)) {
            // 1.5 blocks.
            byte[] data = new byte[CFG_BLOCK_SIZE * 3 / 2];
            Arrays.fill(data, (byte) 2);
            out.write(data);
        }
        // After this we should have first two block colocated with grid 0 and last block colocated with grid 1.
        IgfsFileImpl fileImpl = (IgfsFileImpl) fs.info(path);
        GridCacheAdapter<Object, Object> metaCache = ((IgniteKernal) grid(0)).internalCache(metaCacheName);
        IgfsEntryInfo fileInfo = (IgfsEntryInfo) metaCache.get(fileImpl.fileId());
        IgfsFileMap map = fileInfo.fileMap();
        List<IgfsFileAffinityRange> ranges = map.ranges();
        assertEquals(2, ranges.size());
        assertTrue(ranges.get(0).startOffset() == 0);
        assertTrue(ranges.get(0).endOffset() == 2 * CFG_BLOCK_SIZE - 1);
        assertTrue(ranges.get(1).startOffset() == 2 * CFG_BLOCK_SIZE);
        assertTrue(ranges.get(1).endOffset() == 3 * CFG_BLOCK_SIZE - 1);
        // Validate data read after colocated writes.
        try (IgfsInputStream in = fs2.open(path)) {
            // Validate first part of file.
            for (int i = 0; i < CFG_BLOCK_SIZE * 3 / 2; i++) assertEquals((byte) 1, in.read());
            // Validate second part of file.
            for (int i = 0; i < CFG_BLOCK_SIZE * 3 / 2; i++) assertEquals((byte) 2, in.read());
            assertEquals(-1, in.read());
        }
    } finally {
        GridTestUtils.setFieldValue(fragmentizer, "fragmentizerEnabled", true);
        boolean hasData = false;
        for (int i = 0; i < NODES_CNT; i++) hasData |= !grid(i).cachex(dataCacheName).isEmpty();
        assertTrue(hasData);
        fs.delete(path, true);
    }
    GridTestUtils.retryAssert(log, ASSERT_RETRIES, ASSERT_RETRY_INTERVAL, new CAX() {

        @Override
        public void applyx() {
            for (int i = 0; i < NODES_CNT; i++) assertTrue(grid(i).cachex(dataCacheName).isEmpty());
        }
    });
}
Also used : IgniteKernal(org.apache.ignite.internal.IgniteKernal) IgfsInputStream(org.apache.ignite.igfs.IgfsInputStream) IgniteFileSystem(org.apache.ignite.IgniteFileSystem) IgfsOutputStream(org.apache.ignite.igfs.IgfsOutputStream) IgfsPath(org.apache.ignite.igfs.IgfsPath) CAX(org.apache.ignite.internal.util.typedef.CAX)

Example 4 with IgfsInputStream

use of org.apache.ignite.igfs.IgfsInputStream in project ignite by apache.

the class IgfsProcessorSelfTest method checkCreateAppendLongData.

/**
 * @param chunkSize Chunk size.
 * @param bufSize Buffer size.
 * @param cnt Count.
 * @throws Exception If failed.
 */
private void checkCreateAppendLongData(int chunkSize, int bufSize, int cnt) throws Exception {
    IgfsPath path = new IgfsPath("/someFile");
    byte[] buf = new byte[chunkSize];
    for (int i = 0; i < buf.length; i++) buf[i] = (byte) (i * i);
    IgfsOutputStream os = igfs.create(path, bufSize, true, null, 0, 1024, null);
    try {
        for (int i = 0; i < cnt; i++) os.write(buf);
        os.flush();
    } finally {
        os.close();
    }
    os = igfs.append(path, chunkSize, false, null);
    try {
        for (int i = 0; i < cnt; i++) os.write(buf);
        os.flush();
    } finally {
        os.close();
    }
    byte[] readBuf = new byte[chunkSize];
    try (IgfsInputStream in = igfs.open(path)) {
        long pos = 0;
        for (int k = 0; k < 2 * cnt; k++) {
            in.readFully(pos, readBuf);
            for (int i = 0; i < readBuf.length; i++) assertEquals(buf[i], readBuf[i]);
            pos += readBuf.length;
        }
    }
}
Also used : IgfsPath(org.apache.ignite.igfs.IgfsPath) IgfsInputStream(org.apache.ignite.igfs.IgfsInputStream) IgfsOutputStream(org.apache.ignite.igfs.IgfsOutputStream)

Example 5 with IgfsInputStream

use of org.apache.ignite.igfs.IgfsInputStream in project ignite by apache.

the class IgfsIpcHandler method processPathControlRequest.

/**
 * Processes path control request.
 *
 * @param ses Session.
 * @param cmd Command.
 * @param msg Message.
 * @return Response message.
 * @throws IgniteCheckedException If failed.
 */
private IgfsMessage processPathControlRequest(final IgfsClientSession ses, final IgfsIpcCommand cmd, IgfsMessage msg) throws IgniteCheckedException {
    final IgfsPathControlRequest req = (IgfsPathControlRequest) msg;
    if (log.isDebugEnabled())
        log.debug("Processing path control request [igfsName=" + igfs.name() + ", req=" + req + ']');
    final IgfsControlResponse res = new IgfsControlResponse();
    final String userName = req.userName();
    assert userName != null;
    try {
        IgfsUserContext.doAs(userName, new IgniteOutClosure<Object>() {

            @Override
            public Void apply() {
                switch(cmd) {
                    case EXISTS:
                        res.response(igfs.exists(req.path()));
                        break;
                    case INFO:
                        res.response(igfs.info(req.path()));
                        break;
                    case PATH_SUMMARY:
                        res.response(igfs.summary(req.path()));
                        break;
                    case UPDATE:
                        res.response(igfs.update(req.path(), req.properties()));
                        break;
                    case RENAME:
                        igfs.rename(req.path(), req.destinationPath());
                        res.response(true);
                        break;
                    case DELETE:
                        res.response(igfs.delete(req.path(), req.flag()));
                        break;
                    case MAKE_DIRECTORIES:
                        igfs.mkdirs(req.path(), req.properties());
                        res.response(true);
                        break;
                    case LIST_PATHS:
                        res.paths(igfs.listPaths(req.path()));
                        break;
                    case LIST_FILES:
                        res.files(igfs.listFiles(req.path()));
                        break;
                    case SET_TIMES:
                        igfs.setTimes(req.path(), req.modificationTime(), req.accessTime());
                        res.response(true);
                        break;
                    case AFFINITY:
                        res.locations(igfs.affinity(req.path(), req.start(), req.length()));
                        break;
                    case OPEN_READ:
                        {
                            IgfsInputStream igfsIn = !req.flag() ? igfs.open(req.path(), bufSize) : igfs.open(req.path(), bufSize, req.sequentialReadsBeforePrefetch());
                            long streamId = registerResource(ses, igfsIn);
                            if (log.isDebugEnabled())
                                log.debug("Opened IGFS input stream for file read [igfsName=" + igfs.name() + ", path=" + req.path() + ", streamId=" + streamId + ", ses=" + ses + ']');
                            res.response(new IgfsInputStreamDescriptor(streamId, igfsIn.length()));
                            break;
                        }
                    case OPEN_CREATE:
                        {
                            long streamId = registerResource(ses, igfs.create(// Path.
                            req.path(), // Buffer size.
                            bufSize, // Overwrite if exists.
                            req.flag(), // Affinity key based on replication factor.
                            affinityKey(req), // Replication factor.
                            req.replication(), // Block size.
                            req.blockSize(), // File properties.
                            req.properties()));
                            if (log.isDebugEnabled())
                                log.debug("Opened IGFS output stream for file create [igfsName=" + igfs.name() + ", path=" + req.path() + ", streamId=" + streamId + ", ses=" + ses + ']');
                            res.response(streamId);
                            break;
                        }
                    case OPEN_APPEND:
                        {
                            long streamId = registerResource(ses, igfs.append(// Path.
                            req.path(), // Buffer size.
                            bufSize, // Create if absent.
                            req.flag(), // File properties.
                            req.properties()));
                            if (log.isDebugEnabled())
                                log.debug("Opened IGFS output stream for file append [igfsName=" + igfs.name() + ", path=" + req.path() + ", streamId=" + streamId + ", ses=" + ses + ']');
                            res.response(streamId);
                            break;
                        }
                    default:
                        assert false : "Unhandled path control request command: " + cmd;
                        break;
                }
                return null;
            }
        });
    } catch (IgniteException e) {
        throw new IgniteCheckedException(e);
    }
    if (log.isDebugEnabled())
        log.debug("Finished processing path control request [igfsName=" + igfs.name() + ", req=" + req + ", res=" + res + ']');
    return res;
}
Also used : IgfsInputStream(org.apache.ignite.igfs.IgfsInputStream) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) IgniteException(org.apache.ignite.IgniteException) IgfsPathControlRequest(org.apache.ignite.internal.igfs.common.IgfsPathControlRequest) IgfsControlResponse(org.apache.ignite.internal.igfs.common.IgfsControlResponse)

Aggregations

IgfsInputStream (org.apache.ignite.igfs.IgfsInputStream)26 IgfsPath (org.apache.ignite.igfs.IgfsPath)10 IgfsFileRange (org.apache.ignite.igfs.mapreduce.IgfsFileRange)9 IgfsOutputStream (org.apache.ignite.igfs.IgfsOutputStream)8 IOException (java.io.IOException)4 IgniteException (org.apache.ignite.IgniteException)4 IgniteFileSystem (org.apache.ignite.IgniteFileSystem)4 IgniteCheckedException (org.apache.ignite.IgniteCheckedException)3 IgfsMetrics (org.apache.ignite.igfs.IgfsMetrics)3 IgfsByteDelimiterRecordResolver (org.apache.ignite.igfs.mapreduce.records.IgfsByteDelimiterRecordResolver)3 IgfsException (org.apache.ignite.igfs.IgfsException)2 IgfsFile (org.apache.ignite.igfs.IgfsFile)2 IgfsFixedLengthRecordResolver (org.apache.ignite.igfs.mapreduce.records.IgfsFixedLengthRecordResolver)2 IgfsNewLineRecordResolver (org.apache.ignite.igfs.mapreduce.records.IgfsNewLineRecordResolver)2 InputStream (java.io.InputStream)1 URI (java.net.URI)1 HashMap (java.util.HashMap)1 Random (java.util.Random)1 UUID (java.util.UUID)1 ConcurrentLinkedQueue (java.util.concurrent.ConcurrentLinkedQueue)1