Search in sources :

Example 41 with IgfsOutputStream

use of org.apache.ignite.igfs.IgfsOutputStream in project ignite by apache.

the class HadoopIgfsDualAbstractSelfTest method testOpenPrefetchOverride.

/**
 * Check how prefetch override works.
 *
 * @throws Exception IF failed.
 */
public void testOpenPrefetchOverride() throws Exception {
    create(igfsSecondary, paths(DIR, SUBDIR), paths(FILE));
    // Write enough data to the secondary file system.
    final int blockSize = IGFS_BLOCK_SIZE;
    IgfsOutputStream out = igfsSecondary.append(FILE, false);
    int totalWritten = 0;
    while (totalWritten < blockSize * 2 + chunk.length) {
        out.write(chunk);
        totalWritten += chunk.length;
    }
    out.close();
    awaitFileClose(igfsSecondary, FILE);
    // Instantiate file system with overridden "seq reads before prefetch" property.
    Configuration cfg = new Configuration();
    cfg.addResource(U.resolveIgniteUrl(PRIMARY_CFG));
    int seqReads = SEQ_READS_BEFORE_PREFETCH + 1;
    cfg.setInt(String.format(PARAM_IGFS_SEQ_READS_BEFORE_PREFETCH, "igfs@"), seqReads);
    FileSystem fs = FileSystem.get(new URI(PRIMARY_URI), cfg);
    // Read the first two blocks.
    Path fsHome = new Path(PRIMARY_URI);
    Path dir = new Path(fsHome, DIR.name());
    Path subdir = new Path(dir, SUBDIR.name());
    Path file = new Path(subdir, FILE.name());
    FSDataInputStream fsIn = fs.open(file);
    final byte[] readBuf = new byte[blockSize * 2];
    fsIn.readFully(0, readBuf, 0, readBuf.length);
    // Wait for a while for prefetch to finish (if any).
    IgfsMetaManager meta = igfs.context().meta();
    IgfsEntryInfo info = meta.info(meta.fileId(FILE));
    IgfsBlockKey key = new IgfsBlockKey(info.id(), info.affinityKey(), info.evictExclude(), 2);
    IgniteCache<IgfsBlockKey, byte[]> dataCache = igfs.context().kernalContext().cache().jcache(igfs.configuration().getDataCacheConfiguration().getName());
    for (int i = 0; i < 10; i++) {
        if (dataCache.containsKey(key))
            break;
        else
            U.sleep(100);
    }
    fsIn.close();
    // Remove the file from the secondary file system.
    igfsSecondary.delete(FILE, false);
    // Try reading the third block. Should fail.
    GridTestUtils.assertThrows(log, new Callable<Object>() {

        @Override
        public Object call() throws Exception {
            IgfsInputStream in0 = igfs.open(FILE);
            in0.seek(blockSize * 2);
            try {
                in0.read(readBuf);
            } finally {
                U.closeQuiet(in0);
            }
            return null;
        }
    }, IOException.class, "Failed to read data due to secondary file system exception: /dir/subdir/file");
}
Also used : IgfsPath(org.apache.ignite.igfs.IgfsPath) Path(org.apache.hadoop.fs.Path) IgfsInputStream(org.apache.ignite.igfs.IgfsInputStream) IgfsIpcEndpointConfiguration(org.apache.ignite.igfs.IgfsIpcEndpointConfiguration) Configuration(org.apache.hadoop.conf.Configuration) FileSystemConfiguration(org.apache.ignite.configuration.FileSystemConfiguration) IgniteConfiguration(org.apache.ignite.configuration.IgniteConfiguration) CacheConfiguration(org.apache.ignite.configuration.CacheConfiguration) IgfsOutputStream(org.apache.ignite.igfs.IgfsOutputStream) URI(java.net.URI) IOException(java.io.IOException) IgfsMetaManager(org.apache.ignite.internal.processors.igfs.IgfsMetaManager) FileSystem(org.apache.hadoop.fs.FileSystem) IgniteHadoopIgfsSecondaryFileSystem(org.apache.ignite.hadoop.fs.IgniteHadoopIgfsSecondaryFileSystem) IgfsSecondaryFileSystem(org.apache.ignite.igfs.secondary.IgfsSecondaryFileSystem) IgfsBlockKey(org.apache.ignite.internal.processors.igfs.IgfsBlockKey) IgfsEntryInfo(org.apache.ignite.internal.processors.igfs.IgfsEntryInfo) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream)

Example 42 with IgfsOutputStream

use of org.apache.ignite.igfs.IgfsOutputStream in project ignite by apache.

the class IgfsStreamsSelfTest method testCreateFileColocated.

/**
 * @throws Exception If failed.
 */
public void testCreateFileColocated() throws Exception {
    IgfsPath path = new IgfsPath("/colocated");
    UUID uuid = UUID.randomUUID();
    IgniteUuid affKey;
    long idx = 0;
    while (true) {
        affKey = new IgniteUuid(uuid, idx);
        if (grid(0).affinity(grid(0).igfsx("igfs").configuration().getDataCacheConfiguration().getName()).mapKeyToNode(affKey).id().equals(grid(0).localNode().id()))
            break;
        idx++;
    }
    try (IgfsOutputStream out = fs.create(path, 1024, true, affKey, 0, 1024, null)) {
        // Write 5M, should be enough to test distribution.
        for (int i = 0; i < 15; i++) out.write(new byte[1024 * 1024]);
    }
    IgfsFile info = fs.info(path);
    Collection<IgfsBlockLocation> affNodes = fs.affinity(path, 0, info.length());
    assertEquals(1, affNodes.size());
    Collection<UUID> nodeIds = F.first(affNodes).nodeIds();
    assertEquals(1, nodeIds.size());
    assertEquals(grid(0).localNode().id(), F.first(nodeIds));
}
Also used : IgfsPath(org.apache.ignite.igfs.IgfsPath) IgniteUuid(org.apache.ignite.lang.IgniteUuid) IgfsBlockLocation(org.apache.ignite.igfs.IgfsBlockLocation) UUID(java.util.UUID) IgfsOutputStream(org.apache.ignite.igfs.IgfsOutputStream) IgfsFile(org.apache.ignite.igfs.IgfsFile)

Example 43 with IgfsOutputStream

use of org.apache.ignite.igfs.IgfsOutputStream in project ignite by apache.

the class IgfsStreamsSelfTest method testCreateFile.

/**
 * Test file creation.
 *
 * @param path Path to file to store.
 * @param size Size of file to store.
 * @param salt Salt for file content generation.
 * @throws Exception In case of any exception.
 */
private void testCreateFile(final IgfsPath path, final long size, final int salt) throws Exception {
    info("Create file [path=" + path + ", size=" + size + ", salt=" + salt + ']');
    final AtomicInteger cnt = new AtomicInteger(0);
    final Collection<IgfsPath> cleanUp = new ConcurrentLinkedQueue<>();
    long time = runMultiThreaded(new Callable<Object>() {

        @Override
        public Object call() throws Exception {
            int id = cnt.incrementAndGet();
            IgfsPath f = new IgfsPath(path.parent(), "asdf" + (id > 1 ? "-" + id : ""));
            try (IgfsOutputStream out = fs.create(f, 0, true, null, 0, 1024, null)) {
                assertNotNull(out);
                // Add all created into cleanup list.
                cleanUp.add(f);
                U.copy(new IgfsTestInputStream(size, salt), out);
            }
            return null;
        }
    }, WRITING_THREADS_CNT, "perform-multi-thread-writing");
    if (time > 0) {
        double rate = size * 1000. / time / 1024 / 1024;
        info(String.format("Write file [path=%s, size=%d kB, rate=%2.1f MB/s]", path, WRITING_THREADS_CNT * size / 1024, WRITING_THREADS_CNT * rate));
    }
    info("Read and validate saved file: " + path);
    final InputStream expIn = new IgfsTestInputStream(size, salt);
    final IgfsInputStream actIn = fs.open(path, CFG_BLOCK_SIZE * READING_THREADS_CNT * 11 / 10);
    // Validate continuous reading of whole file.
    assertEqualStreams(expIn, actIn, size, null);
    // Validate random seek and reading.
    final Random rnd = new Random();
    runMultiThreaded(new Callable<Object>() {

        @Override
        public Object call() throws Exception {
            long skip = Math.abs(rnd.nextLong() % (size + 1));
            long range = Math.min(size - skip, rnd.nextInt(CFG_BLOCK_SIZE * 400));
            assertEqualStreams(new IgfsTestInputStream(size, salt), actIn, range, skip);
            return null;
        }
    }, READING_THREADS_CNT, "validate-multi-thread-reading");
    expIn.close();
    actIn.close();
    info("Get stored file info: " + path);
    IgfsFile desc = fs.info(path);
    info("Validate stored file info: " + desc);
    assertNotNull(desc);
    if (log.isDebugEnabled())
        log.debug("File descriptor: " + desc);
    Collection<IgfsBlockLocation> aff = fs.affinity(path, 0, desc.length());
    assertFalse("Affinity: " + aff, desc.length() != 0 && aff.isEmpty());
    int blockSize = desc.blockSize();
    assertEquals("File size", size, desc.length());
    assertEquals("Binary block size", CFG_BLOCK_SIZE, blockSize);
    // assertEquals("Permission", "rwxr-xr-x", desc.getPermission().toString());
    // assertEquals("Permission sticky bit marks this is file", false, desc.getPermission().getStickyBit());
    assertEquals("Type", true, desc.isFile());
    assertEquals("Type", false, desc.isDirectory());
    info("Cleanup files: " + cleanUp);
    for (IgfsPath f : cleanUp) {
        fs.delete(f, true);
        assertNull(fs.info(f));
    }
}
Also used : IgfsInputStream(org.apache.ignite.igfs.IgfsInputStream) IgfsInputStream(org.apache.ignite.igfs.IgfsInputStream) InputStream(java.io.InputStream) IgfsBlockLocation(org.apache.ignite.igfs.IgfsBlockLocation) IgfsOutputStream(org.apache.ignite.igfs.IgfsOutputStream) IOException(java.io.IOException) IgfsPath(org.apache.ignite.igfs.IgfsPath) Random(java.util.Random) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ConcurrentLinkedQueue(java.util.concurrent.ConcurrentLinkedQueue) IgfsFile(org.apache.ignite.igfs.IgfsFile)

Example 44 with IgfsOutputStream

use of org.apache.ignite.igfs.IgfsOutputStream in project ignite by apache.

the class IgfsProcessorSelfTest method testCreate.

/**
 * @throws Exception If failed.
 */
public void testCreate() throws Exception {
    IgfsPath path = path("/file");
    try (IgfsOutputStream os = igfs.create(path, false)) {
        assert os != null;
        IgfsFileImpl info = (IgfsFileImpl) igfs.info(path);
        for (int i = 0; i < nodesCount(); i++) {
            IgfsEntryInfo fileInfo = (IgfsEntryInfo) grid(i).cachex(metaCacheName).localPeek(info.fileId(), null, null);
            assertNotNull(fileInfo);
            assertNotNull(fileInfo.listing());
        }
    } finally {
        igfs.delete(path("/"), true);
    }
}
Also used : IgfsPath(org.apache.ignite.igfs.IgfsPath) IgfsOutputStream(org.apache.ignite.igfs.IgfsOutputStream)

Example 45 with IgfsOutputStream

use of org.apache.ignite.igfs.IgfsOutputStream in project ignite by apache.

the class IgfsSizeSelfTest method checkOversize.

/**
 * Ensure that an exception is thrown in case of IGFS oversize.
 *
 * @throws Exception If failed.
 */
private void checkOversize() throws Exception {
    final long maxSize = 32 * 1024 * 1024;
    memIgfsdDataPlcSetter = new IgniteInClosure<IgniteConfiguration>() {

        @Override
        public void apply(IgniteConfiguration cfg) {
            String memPlcName = "igfsDataMemPlc";
            cfg.setDataStorageConfiguration(new DataStorageConfiguration().setDataRegionConfigurations(new DataRegionConfiguration().setMaxSize(maxSize).setInitialSize(maxSize).setName(memPlcName)));
            FileSystemConfiguration igfsCfg = cfg.getFileSystemConfiguration()[0];
            igfsCfg.getDataCacheConfiguration().setDataRegionName(memPlcName);
            cfg.setCacheConfiguration(new CacheConfiguration().setName("QQQ").setDataRegionName(memPlcName));
        }
    };
    startUp();
    final IgfsPath path = new IgfsPath("/file");
    final int writeChunkSize = (int) (maxSize / 1024);
    // This write is expected to be successful.
    IgfsOutputStream os = igfs(0).create(path, false);
    os.write(chunk(writeChunkSize));
    os.close();
    // This write must be successful as well.
    os = igfs(0).append(path, false);
    os.write(chunk(1));
    os.close();
    // This write must fail w/ exception.
    GridTestUtils.assertThrows(log(), new Callable<Object>() {

        @Override
        public Object call() throws Exception {
            IgfsOutputStream osErr = igfs(0).append(path, false);
            try {
                for (int i = 0; i < maxSize / writeChunkSize * GRID_CNT; ++i) osErr.write(chunk(writeChunkSize));
                osErr.close();
                return null;
            } catch (IOException e) {
                Throwable e0 = e;
                while (e0.getCause() != null) e0 = e0.getCause();
                throw (Exception) e0;
            } finally {
                U.closeQuiet(osErr);
            }
        }
    }, IgniteOutOfMemoryException.class, "Not enough memory allocated");
}
Also used : IOException(java.io.IOException) IgfsOutputStream(org.apache.ignite.igfs.IgfsOutputStream) IOException(java.io.IOException) IgniteOutOfMemoryException(org.apache.ignite.internal.mem.IgniteOutOfMemoryException) DataStorageConfiguration(org.apache.ignite.configuration.DataStorageConfiguration) IgfsPath(org.apache.ignite.igfs.IgfsPath) DataRegionConfiguration(org.apache.ignite.configuration.DataRegionConfiguration) IgniteConfiguration(org.apache.ignite.configuration.IgniteConfiguration) FileSystemConfiguration(org.apache.ignite.configuration.FileSystemConfiguration) NearCacheConfiguration(org.apache.ignite.configuration.NearCacheConfiguration) CacheConfiguration(org.apache.ignite.configuration.CacheConfiguration)

Aggregations

IgfsOutputStream (org.apache.ignite.igfs.IgfsOutputStream)47 IgfsPath (org.apache.ignite.igfs.IgfsPath)25 IOException (java.io.IOException)14 IgniteException (org.apache.ignite.IgniteException)13 IgniteCheckedException (org.apache.ignite.IgniteCheckedException)10 IgfsInputStream (org.apache.ignite.igfs.IgfsInputStream)8 IgniteUuid (org.apache.ignite.lang.IgniteUuid)7 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)6 GridAbsPredicate (org.apache.ignite.internal.util.lang.GridAbsPredicate)6 AtomicReference (java.util.concurrent.atomic.AtomicReference)4 IgniteFileSystem (org.apache.ignite.IgniteFileSystem)4 IgfsDirectoryNotEmptyException (org.apache.ignite.igfs.IgfsDirectoryNotEmptyException)4 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)3 IgfsException (org.apache.ignite.igfs.IgfsException)3 IgfsMetrics (org.apache.ignite.igfs.IgfsMetrics)3 IgfsParentNotDirectoryException (org.apache.ignite.igfs.IgfsParentNotDirectoryException)3 IgfsPathNotFoundException (org.apache.ignite.igfs.IgfsPathNotFoundException)3 ArrayList (java.util.ArrayList)2 Random (java.util.Random)2 CacheConfiguration (org.apache.ignite.configuration.CacheConfiguration)2