Search in sources :

Example 91 with MiniDFSCluster

use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.

the class TestSymlinkHdfsDisable method testSymlinkHdfsDisable.

@Test(timeout = 60000)
public void testSymlinkHdfsDisable() throws Exception {
    Configuration conf = new HdfsConfiguration();
    // disable symlink resolution
    conf.setBoolean(CommonConfigurationKeys.FS_CLIENT_RESOLVE_REMOTE_SYMLINKS_KEY, false);
    // spin up minicluster, get dfs and filecontext
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    DistributedFileSystem dfs = cluster.getFileSystem();
    FileContext fc = FileContext.getFileContext(cluster.getURI(0), conf);
    // Create test files/links
    FileContextTestHelper helper = new FileContextTestHelper("/tmp/TestSymlinkHdfsDisable");
    Path root = helper.getTestRootPath(fc);
    Path target = new Path(root, "target");
    Path link = new Path(root, "link");
    DFSTestUtil.createFile(dfs, target, 4096, (short) 1, 0xDEADDEAD);
    fc.createSymlink(target, link, false);
    // Try to resolve links with FileSystem and FileContext
    try {
        fc.open(link);
        fail("Expected error when attempting to resolve link");
    } catch (IOException e) {
        GenericTestUtils.assertExceptionContains("resolution is disabled", e);
    }
    try {
        dfs.open(link);
        fail("Expected error when attempting to resolve link");
    } catch (IOException e) {
        GenericTestUtils.assertExceptionContains("resolution is disabled", e);
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Configuration(org.apache.hadoop.conf.Configuration) IOException(java.io.IOException) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Test(org.junit.Test)

Example 92 with MiniDFSCluster

use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.

the class TestUrlStreamHandler method testDfsUrls.

/**
   * Test opening and reading from an InputStream through a hdfs:// URL.
   * <p>
   * First generate a file with some content through the FileSystem API, then
   * try to open and read the file through the URL stream API.
   * 
   * @throws IOException
   */
@Test
public void testDfsUrls() throws IOException {
    Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    FileSystem fs = cluster.getFileSystem();
    // Setup our own factory
    // setURLSteramHandlerFactor is can be set at most once in the JVM
    // the new URLStreamHandler is valid for all tests cases 
    // in TestStreamHandler
    FsUrlStreamHandlerFactory factory = new org.apache.hadoop.fs.FsUrlStreamHandlerFactory();
    java.net.URL.setURLStreamHandlerFactory(factory);
    Path filePath = new Path("/thefile");
    try {
        byte[] fileContent = new byte[1024];
        for (int i = 0; i < fileContent.length; ++i) fileContent[i] = (byte) i;
        // First create the file through the FileSystem API
        OutputStream os = fs.create(filePath);
        os.write(fileContent);
        os.close();
        // Second, open and read the file content through the URL API
        URI uri = fs.getUri();
        URL fileURL = new URL(uri.getScheme(), uri.getHost(), uri.getPort(), filePath.toString());
        InputStream is = fileURL.openStream();
        assertNotNull(is);
        byte[] bytes = new byte[4096];
        assertEquals(1024, is.read(bytes));
        is.close();
        for (int i = 0; i < fileContent.length; ++i) assertEquals(fileContent[i], bytes[i]);
        // Cleanup: delete the file
        fs.delete(filePath, false);
    } finally {
        fs.close();
        cluster.shutdown();
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Configuration(org.apache.hadoop.conf.Configuration) InputStream(java.io.InputStream) OutputStream(java.io.OutputStream) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) URI(java.net.URI) URL(java.net.URL) Test(org.junit.Test)

Example 93 with MiniDFSCluster

use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.

the class TestLoadGenerator method testLoadGenerator.

/** Test if the load generator works fine */
@Test
public void testLoadGenerator() throws Exception {
    final String TEST_SPACE_ROOT = "/test";
    final String SCRIPT_TEST_DIR = OUT_DIR.getAbsolutePath();
    String script = SCRIPT_TEST_DIR + "/" + "loadgenscript";
    String script2 = SCRIPT_TEST_DIR + "/" + "loadgenscript2";
    File scriptFile1 = new File(script);
    File scriptFile2 = new File(script2);
    FileWriter writer = new FileWriter(DIR_STRUCTURE_FILE);
    writer.write(DIR_STRUCTURE_FIRST_LINE + "\n");
    writer.write(DIR_STRUCTURE_SECOND_LINE + "\n");
    writer.close();
    writer = new FileWriter(FILE_STRUCTURE_FILE);
    writer.write(FILE_STRUCTURE_FIRST_LINE + "\n");
    writer.write(FILE_STRUCTURE_SECOND_LINE + "\n");
    writer.close();
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(3).build();
    cluster.waitActive();
    try {
        DataGenerator dg = new DataGenerator();
        dg.setConf(CONF);
        String[] args = new String[] { "-inDir", OUT_DIR.getAbsolutePath(), "-root", TEST_SPACE_ROOT };
        assertEquals(0, dg.run(args));
        final int READ_PROBABILITY = 1;
        final int WRITE_PROBABILITY = 3;
        final int MAX_DELAY_BETWEEN_OPS = 7;
        final int NUM_OF_THREADS = 9;
        final int START_TIME = 11;
        final int ELAPSED_TIME = 13;
        LoadGenerator lg = new LoadGenerator();
        lg.setConf(CONF);
        args = new String[] { "-readProbability", "0.3", "-writeProbability", "0.3", "-root", TEST_SPACE_ROOT, "-maxDelayBetweenOps", "0", "-numOfThreads", "1", "-startTime", Long.toString(Time.now()), "-elapsedTime", "10" };
        assertEquals(0, lg.run(args));
        String oldArg = args[READ_PROBABILITY];
        args[READ_PROBABILITY] = "1.1";
        assertEquals(-1, lg.run(args));
        args[READ_PROBABILITY] = "-1.1";
        assertEquals(-1, lg.run(args));
        args[READ_PROBABILITY] = oldArg;
        oldArg = args[WRITE_PROBABILITY];
        args[WRITE_PROBABILITY] = "1.1";
        assertEquals(-1, lg.run(args));
        args[WRITE_PROBABILITY] = "-1.1";
        assertEquals(-1, lg.run(args));
        args[WRITE_PROBABILITY] = "0.9";
        assertEquals(-1, lg.run(args));
        args[READ_PROBABILITY] = oldArg;
        oldArg = args[MAX_DELAY_BETWEEN_OPS];
        args[MAX_DELAY_BETWEEN_OPS] = "1.x1";
        assertEquals(-1, lg.run(args));
        args[MAX_DELAY_BETWEEN_OPS] = oldArg;
        oldArg = args[MAX_DELAY_BETWEEN_OPS];
        args[MAX_DELAY_BETWEEN_OPS] = "1.x1";
        assertEquals(-1, lg.run(args));
        args[MAX_DELAY_BETWEEN_OPS] = oldArg;
        oldArg = args[NUM_OF_THREADS];
        args[NUM_OF_THREADS] = "-1";
        assertEquals(-1, lg.run(args));
        args[NUM_OF_THREADS] = oldArg;
        oldArg = args[START_TIME];
        args[START_TIME] = "-1";
        assertEquals(-1, lg.run(args));
        args[START_TIME] = oldArg;
        oldArg = args[ELAPSED_TIME];
        args[ELAPSED_TIME] = "-1";
        assertEquals(-1, lg.run(args));
        args[ELAPSED_TIME] = oldArg;
        // test scripted operation
        // Test with good script
        FileWriter fw = new FileWriter(scriptFile1);
        fw.write("2 .22 .33\n");
        fw.write("3 .10 .6\n");
        fw.write("6 0 .7\n");
        fw.close();
        String[] scriptArgs = new String[] { "-root", TEST_SPACE_ROOT, "-maxDelayBetweenOps", "0", "-numOfThreads", "10", "-startTime", Long.toString(Time.now()), "-scriptFile", script };
        assertEquals(0, lg.run(scriptArgs));
        // Test with bad script
        fw = new FileWriter(scriptFile2);
        fw.write("2 .22 .33\n");
        fw.write("3 blah blah blah .6\n");
        fw.write("6 0 .7\n");
        fw.close();
        scriptArgs[scriptArgs.length - 1] = script2;
        assertEquals(-1, lg.run(scriptArgs));
    } finally {
        cluster.shutdown();
        DIR_STRUCTURE_FILE.delete();
        FILE_STRUCTURE_FILE.delete();
        scriptFile1.delete();
        scriptFile2.delete();
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) FileWriter(java.io.FileWriter) File(java.io.File) Test(org.junit.Test)

Example 94 with MiniDFSCluster

use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.

the class TestEnhancedByteBufferAccess method testFallbackRead.

/**
   * Test the {@link ByteBufferUtil#fallbackRead} function directly.
   */
@Test
public void testFallbackRead() throws Exception {
    HdfsConfiguration conf = initZeroCopyTest();
    MiniDFSCluster cluster = null;
    final Path TEST_PATH = new Path("/a");
    final int TEST_FILE_LENGTH = 16385;
    final int RANDOM_SEED = 23453;
    FSDataInputStream fsIn = null;
    DistributedFileSystem fs = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
        cluster.waitActive();
        fs = cluster.getFileSystem();
        DFSTestUtil.createFile(fs, TEST_PATH, TEST_FILE_LENGTH, (short) 1, RANDOM_SEED);
        try {
            DFSTestUtil.waitReplication(fs, TEST_PATH, (short) 1);
        } catch (InterruptedException e) {
            Assert.fail("unexpected InterruptedException during " + "waitReplication: " + e);
        } catch (TimeoutException e) {
            Assert.fail("unexpected TimeoutException during " + "waitReplication: " + e);
        }
        fsIn = fs.open(TEST_PATH);
        byte[] original = new byte[TEST_FILE_LENGTH];
        IOUtils.readFully(fsIn, original, 0, TEST_FILE_LENGTH);
        fsIn.close();
        fsIn = fs.open(TEST_PATH);
        testFallbackImpl(fsIn, original);
    } finally {
        if (fsIn != null)
            fsIn.close();
        if (fs != null)
            fs.close();
        if (cluster != null)
            cluster.shutdown();
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) TimeoutException(java.util.concurrent.TimeoutException) Test(org.junit.Test)

Example 95 with MiniDFSCluster

use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.

the class TestBlocksWithNotEnoughRacks method testSufficientlySingleReplBlockUsesNewRack.

/*
   * Like the previous test but the block starts with a single replica,
   * and therefore unlike the previous test the block does not start
   * off needing replicas.
   */
@Test
public void testSufficientlySingleReplBlockUsesNewRack() throws Exception {
    Configuration conf = getConf();
    short REPLICATION_FACTOR = 1;
    final Path filePath = new Path("/testFile");
    String[] racks = { "/rack1", "/rack1", "/rack1", "/rack2" };
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(racks.length).racks(racks).build();
    final FSNamesystem ns = cluster.getNameNode().getNamesystem();
    try {
        // Create a file with one block with a replication factor of 1
        final FileSystem fs = cluster.getFileSystem();
        DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
        ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
        DFSTestUtil.waitForReplication(cluster, b, 1, REPLICATION_FACTOR, 0);
        REPLICATION_FACTOR = 2;
        NameNodeAdapter.setReplication(ns, "/testFile", REPLICATION_FACTOR);
        DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Configuration(org.apache.hadoop.conf.Configuration) FileSystem(org.apache.hadoop.fs.FileSystem) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) Test(org.junit.Test)

Aggregations

MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)507 Test (org.junit.Test)429 Configuration (org.apache.hadoop.conf.Configuration)403 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)312 Path (org.apache.hadoop.fs.Path)290 FileSystem (org.apache.hadoop.fs.FileSystem)211 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)183 IOException (java.io.IOException)107 File (java.io.File)83 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)64 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)53 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)35 RandomAccessFile (java.io.RandomAccessFile)33 MetricsRecordBuilder (org.apache.hadoop.metrics2.MetricsRecordBuilder)33 URI (java.net.URI)31 ArrayList (java.util.ArrayList)29 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)28 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)26 FsPermission (org.apache.hadoop.fs.permission.FsPermission)25 HttpServerFunctionalTest (org.apache.hadoop.http.HttpServerFunctionalTest)24