Search in sources :

Example 71 with FSDataOutputStream

use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.

the class TestFileCreation method checkFileCreation.

/**
   * Test if file creation and disk space consumption works right
   * @param netIf the local interface, if any, clients should use to access DNs
   * @param useDnHostname whether the client should contact DNs by hostname
   */
public void checkFileCreation(String netIf, boolean useDnHostname) throws IOException {
    Configuration conf = new HdfsConfiguration();
    if (netIf != null) {
        conf.set(HdfsClientConfigKeys.DFS_CLIENT_LOCAL_INTERFACES, netIf);
    }
    conf.setBoolean(HdfsClientConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME, useDnHostname);
    if (useDnHostname) {
        // Since the mini cluster only listens on the loopback we have to
        // ensure the hostname used to access DNs maps to the loopback. We
        // do this by telling the DN to advertise localhost as its hostname
        // instead of the default hostname.
        conf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, "localhost");
    }
    if (simulatedStorage) {
        SimulatedFSDataset.setFactory(conf);
    }
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).checkDataNodeHostConfig(true).build();
    FileSystem fs = cluster.getFileSystem();
    try {
        //
        // check that / exists
        //
        Path path = new Path("/");
        System.out.println("Path : \"" + path.toString() + "\"");
        System.out.println(fs.getFileStatus(path).isDirectory());
        assertTrue("/ should be a directory", fs.getFileStatus(path).isDirectory());
        //
        // Create a directory inside /, then try to overwrite it
        //
        Path dir1 = new Path("/test_dir");
        fs.mkdirs(dir1);
        System.out.println("createFile: Creating " + dir1.getName() + " for overwrite of existing directory.");
        try {
            // Create path, overwrite=true
            fs.create(dir1, true);
            fs.close();
            assertTrue("Did not prevent directory from being overwritten.", false);
        } catch (FileAlreadyExistsException e) {
        // expected
        }
        //
        // create a new file in home directory. Do not close it.
        //
        Path file1 = new Path("filestatus.dat");
        Path parent = file1.getParent();
        fs.mkdirs(parent);
        DistributedFileSystem dfs = (DistributedFileSystem) fs;
        dfs.setQuota(file1.getParent(), 100L, blockSize * 5);
        FSDataOutputStream stm = createFile(fs, file1, 1);
        // verify that file exists in FS namespace
        assertTrue(file1 + " should be a file", fs.getFileStatus(file1).isFile());
        System.out.println("Path : \"" + file1 + "\"");
        // write to file
        writeFile(stm);
        stm.close();
        // verify that file size has changed to the full size
        long len = fs.getFileStatus(file1).getLen();
        assertTrue(file1 + " should be of size " + fileSize + " but found to be of size " + len, len == fileSize);
        // verify the disk space the file occupied
        long diskSpace = dfs.getContentSummary(file1.getParent()).getLength();
        assertEquals(file1 + " should take " + fileSize + " bytes disk space " + "but found to take " + diskSpace + " bytes", fileSize, diskSpace);
        // can't check capacities for real storage since the OS file system may be changing under us.
        if (simulatedStorage) {
            DataNode dn = cluster.getDataNodes().get(0);
            FsDatasetSpi<?> dataset = DataNodeTestUtils.getFSDataset(dn);
            assertEquals(fileSize, dataset.getDfsUsed());
            assertEquals(SimulatedFSDataset.DEFAULT_CAPACITY - fileSize, dataset.getRemaining());
        }
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) FileAlreadyExistsException(org.apache.hadoop.fs.FileAlreadyExistsException) Configuration(org.apache.hadoop.conf.Configuration) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) FileSystem(org.apache.hadoop.fs.FileSystem) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream)

Example 72 with FSDataOutputStream

use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.

the class TestFileCreation method createFile.

// creates a file but does not close it
public static FSDataOutputStream createFile(FileSystem fileSys, Path name, int repl) throws IOException {
    System.out.println("createFile: Created " + name + " with " + repl + " replica.");
    FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf().getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), (short) repl, blockSize);
    return stm;
}
Also used : FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream)

Example 73 with FSDataOutputStream

use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.

the class TestFileCreation method testFileCreationSyncOnClose.

/**
   * Test creating a file whose data gets sync when closed
   */
@Test
public void testFileCreationSyncOnClose() throws IOException {
    Configuration conf = new HdfsConfiguration();
    conf.setBoolean(DFS_DATANODE_SYNCONCLOSE_KEY, true);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    try {
        FileSystem fs = cluster.getFileSystem();
        Path[] p = { new Path("/foo"), new Path("/bar") };
        //write 2 files at the same time
        FSDataOutputStream[] out = { fs.create(p[0]), fs.create(p[1]) };
        int i = 0;
        for (; i < 100; i++) {
            out[0].write(i);
            out[1].write(i);
        }
        out[0].close();
        for (; i < 200; i++) {
            out[1].write(i);
        }
        out[1].close();
        //verify
        FSDataInputStream[] in = { fs.open(p[0]), fs.open(p[1]) };
        for (i = 0; i < 100; i++) {
            assertEquals(i, in[0].read());
        }
        for (i = 0; i < 200; i++) {
            assertEquals(i, in[1].read());
        }
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) FileSystem(org.apache.hadoop.fs.FileSystem) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 74 with FSDataOutputStream

use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.

the class TestHFlush method testPipelineHeartbeat.

/** This creates a slow writer and check to see 
   * if pipeline heartbeats work fine
   */
@Test
public void testPipelineHeartbeat() throws Exception {
    final int DATANODE_NUM = 2;
    final int fileLen = 6;
    Configuration conf = new HdfsConfiguration();
    final int timeout = 2000;
    conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, timeout);
    final Path p = new Path("/pipelineHeartbeat/foo");
    System.out.println("p=" + p);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM).build();
    try {
        DistributedFileSystem fs = cluster.getFileSystem();
        byte[] fileContents = AppendTestUtil.initBuffer(fileLen);
        // create a new file.
        FSDataOutputStream stm = AppendTestUtil.createFile(fs, p, DATANODE_NUM);
        stm.write(fileContents, 0, 1);
        Thread.sleep(timeout);
        stm.hflush();
        System.out.println("Wrote 1 byte and hflush " + p);
        // write another byte
        Thread.sleep(timeout);
        stm.write(fileContents, 1, 1);
        stm.hflush();
        stm.write(fileContents, 2, 1);
        Thread.sleep(timeout);
        stm.hflush();
        stm.write(fileContents, 3, 1);
        Thread.sleep(timeout);
        stm.write(fileContents, 4, 1);
        stm.hflush();
        stm.write(fileContents, 5, 1);
        Thread.sleep(timeout);
        stm.close();
        // verify that entire file is good
        AppendTestUtil.checkFullFile(fs, p, fileLen, fileContents, "Failed to slowly write to a file");
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 75 with FSDataOutputStream

use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.

the class TestFileCreationDelete method testFileCreationDeleteParent.

@Test
public void testFileCreationDeleteParent() throws IOException {
    Configuration conf = new HdfsConfiguration();
    // 2s
    final int MAX_IDLE_TIME = 2000;
    conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
    conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
    // create cluster
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    FileSystem fs = null;
    try {
        cluster.waitActive();
        fs = cluster.getFileSystem();
        // create file1.
        Path dir = new Path("/foo");
        Path file1 = new Path(dir, "file1");
        FSDataOutputStream stm1 = TestFileCreation.createFile(fs, file1, 1);
        System.out.println("testFileCreationDeleteParent: " + "Created file " + file1);
        TestFileCreation.writeFile(stm1, 1000);
        stm1.hflush();
        // create file2.
        Path file2 = new Path("/file2");
        FSDataOutputStream stm2 = TestFileCreation.createFile(fs, file2, 1);
        System.out.println("testFileCreationDeleteParent: " + "Created file " + file2);
        TestFileCreation.writeFile(stm2, 1000);
        stm2.hflush();
        // rm dir
        fs.delete(dir, true);
        // restart cluster.
        // This ensures that leases are persisted in fsimage.
        cluster.shutdown();
        try {
            Thread.sleep(2 * MAX_IDLE_TIME);
        } catch (InterruptedException e) {
        }
        cluster = new MiniDFSCluster.Builder(conf).format(false).build();
        cluster.waitActive();
        // restart cluster yet again. This triggers the code to read in
        // persistent leases from fsimage.
        cluster.shutdown();
        try {
            Thread.sleep(5000);
        } catch (InterruptedException e) {
        }
        cluster = new MiniDFSCluster.Builder(conf).format(false).build();
        cluster.waitActive();
        fs = cluster.getFileSystem();
        assertTrue(!fs.exists(file1));
        assertTrue(fs.exists(file2));
    } finally {
        fs.close();
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) FileSystem(org.apache.hadoop.fs.FileSystem) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Aggregations

FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)789 Path (org.apache.hadoop.fs.Path)618 Test (org.junit.Test)345 FileSystem (org.apache.hadoop.fs.FileSystem)248 Configuration (org.apache.hadoop.conf.Configuration)190 IOException (java.io.IOException)163 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)94 IgfsPath (org.apache.ignite.igfs.IgfsPath)78 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)66 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)65 FileStatus (org.apache.hadoop.fs.FileStatus)57 FsPermission (org.apache.hadoop.fs.permission.FsPermission)45 CreateFlag (org.apache.hadoop.fs.CreateFlag)43 FileNotFoundException (java.io.FileNotFoundException)40 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)40 ArrayList (java.util.ArrayList)38 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)33 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)31 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)30 Random (java.util.Random)28