Search in sources :

Example 71 with InetSocketAddress

use of java.net.InetSocketAddress in project hadoop by apache.

the class BlockReaderTestUtil method getBlockReader.

/**
   * Get a BlockReader for the given block.
   */
public static BlockReader getBlockReader(final DistributedFileSystem fs, LocatedBlock testBlock, int offset, long lenToRead) throws IOException {
    InetSocketAddress targetAddr = null;
    ExtendedBlock block = testBlock.getBlock();
    DatanodeInfo[] nodes = testBlock.getLocations();
    targetAddr = NetUtils.createSocketAddr(nodes[0].getXferAddr());
    return new BlockReaderFactory(fs.getClient().getConf()).setInetSocketAddress(targetAddr).setBlock(block).setFileName(targetAddr.toString() + ":" + block.getBlockId()).setBlockToken(testBlock.getBlockToken()).setStartOffset(offset).setLength(lenToRead).setVerifyChecksum(true).setClientName("BlockReaderTestUtil").setDatanodeInfo(nodes[0]).setClientCacheContext(ClientContext.getFromConf(fs.getConf())).setCachingStrategy(CachingStrategy.newDefaultStrategy()).setConfiguration(fs.getConf()).setAllowShortCircuitLocalReads(true).setTracer(FsTracer.get(fs.getConf())).setRemotePeerFactory(new RemotePeerFactory() {

        @Override
        public Peer newConnectedPeer(InetSocketAddress addr, Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId) throws IOException {
            Peer peer = null;
            Socket sock = NetUtils.getDefaultSocketFactory(fs.getConf()).createSocket();
            try {
                sock.connect(addr, HdfsConstants.READ_TIMEOUT);
                sock.setSoTimeout(HdfsConstants.READ_TIMEOUT);
                peer = DFSUtilClient.peerFromSocket(sock);
            } finally {
                if (peer == null) {
                    IOUtils.closeQuietly(sock);
                }
            }
            return peer;
        }
    }).build();
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) InetSocketAddress(java.net.InetSocketAddress) Peer(org.apache.hadoop.hdfs.net.Peer) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Token(org.apache.hadoop.security.token.Token) RemotePeerFactory(org.apache.hadoop.hdfs.RemotePeerFactory) Socket(java.net.Socket)

Example 72 with InetSocketAddress

use of java.net.InetSocketAddress in project hadoop by apache.

the class TestSetTimes method testTimesAtClose.

/**
   * Tests mod time change at close in DFS.
   */
@Test
public void testTimesAtClose() throws IOException {
    Configuration conf = new HdfsConfiguration();
    // 2s
    final int MAX_IDLE_TIME = 2000;
    int replicas = 1;
    // parameter initialization
    conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
    conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
    conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 50);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
    cluster.waitActive();
    InetSocketAddress addr = new InetSocketAddress("localhost", cluster.getNameNodePort());
    DFSClient client = new DFSClient(addr, conf);
    DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
    assertEquals("Number of Datanodes ", numDatanodes, info.length);
    FileSystem fileSys = cluster.getFileSystem();
    assertTrue(fileSys instanceof DistributedFileSystem);
    try {
        // create a new file and write to it
        Path file1 = new Path("/simple.dat");
        FSDataOutputStream stm = writeFile(fileSys, file1, replicas);
        System.out.println("Created and wrote file simple.dat");
        FileStatus statBeforeClose = fileSys.getFileStatus(file1);
        long mtimeBeforeClose = statBeforeClose.getModificationTime();
        String mdateBeforeClose = dateForm.format(new Date(mtimeBeforeClose));
        System.out.println("mtime on " + file1 + " before close is " + mdateBeforeClose + " (" + mtimeBeforeClose + ")");
        assertTrue(mtimeBeforeClose != 0);
        //close file after writing
        stm.close();
        System.out.println("Closed file.");
        FileStatus statAfterClose = fileSys.getFileStatus(file1);
        long mtimeAfterClose = statAfterClose.getModificationTime();
        String mdateAfterClose = dateForm.format(new Date(mtimeAfterClose));
        System.out.println("mtime on " + file1 + " after close is " + mdateAfterClose + " (" + mtimeAfterClose + ")");
        assertTrue(mtimeAfterClose != 0);
        assertTrue(mtimeBeforeClose != mtimeAfterClose);
        cleanupFile(fileSys, file1);
    } catch (IOException e) {
        info = client.datanodeReport(DatanodeReportType.ALL);
        printDatanodeReport(info);
        throw e;
    } finally {
        fileSys.close();
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) FileStatus(org.apache.hadoop.fs.FileStatus) Configuration(org.apache.hadoop.conf.Configuration) InetSocketAddress(java.net.InetSocketAddress) IOException(java.io.IOException) Date(java.util.Date) FileSystem(org.apache.hadoop.fs.FileSystem) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 73 with InetSocketAddress

use of java.net.InetSocketAddress in project hadoop by apache.

the class TestSetTimes method testTimes.

/**
   * Tests mod & access time in DFS.
   */
@Test
public void testTimes() throws IOException {
    Configuration conf = new HdfsConfiguration();
    // 2s
    final int MAX_IDLE_TIME = 2000;
    conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
    conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
    cluster.waitActive();
    final int nnport = cluster.getNameNodePort();
    InetSocketAddress addr = new InetSocketAddress("localhost", cluster.getNameNodePort());
    DFSClient client = new DFSClient(addr, conf);
    DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
    assertEquals("Number of Datanodes ", numDatanodes, info.length);
    FileSystem fileSys = cluster.getFileSystem();
    int replicas = 1;
    assertTrue(fileSys instanceof DistributedFileSystem);
    try {
        //
        // create file and record atime/mtime
        //
        System.out.println("Creating testdir1 and testdir1/test1.dat.");
        Path dir1 = new Path("testdir1");
        Path file1 = new Path(dir1, "test1.dat");
        FSDataOutputStream stm = writeFile(fileSys, file1, replicas);
        FileStatus stat = fileSys.getFileStatus(file1);
        long atimeBeforeClose = stat.getAccessTime();
        String adate = dateForm.format(new Date(atimeBeforeClose));
        System.out.println("atime on " + file1 + " before close is " + adate + " (" + atimeBeforeClose + ")");
        assertTrue(atimeBeforeClose != 0);
        stm.close();
        stat = fileSys.getFileStatus(file1);
        long atime1 = stat.getAccessTime();
        long mtime1 = stat.getModificationTime();
        adate = dateForm.format(new Date(atime1));
        String mdate = dateForm.format(new Date(mtime1));
        System.out.println("atime on " + file1 + " is " + adate + " (" + atime1 + ")");
        System.out.println("mtime on " + file1 + " is " + mdate + " (" + mtime1 + ")");
        assertTrue(atime1 != 0);
        //
        // record dir times
        //
        stat = fileSys.getFileStatus(dir1);
        long mdir1 = stat.getAccessTime();
        assertTrue(mdir1 == 0);
        // set the access time to be one day in the past
        long atime2 = atime1 - (24L * 3600L * 1000L);
        fileSys.setTimes(file1, -1, atime2);
        // check new access time on file
        stat = fileSys.getFileStatus(file1);
        long atime3 = stat.getAccessTime();
        String adate3 = dateForm.format(new Date(atime3));
        System.out.println("new atime on " + file1 + " is " + adate3 + " (" + atime3 + ")");
        assertTrue(atime2 == atime3);
        assertTrue(mtime1 == stat.getModificationTime());
        // set the modification time to be 1 hour in the past
        long mtime2 = mtime1 - (3600L * 1000L);
        fileSys.setTimes(file1, mtime2, -1);
        // check new modification time on file
        stat = fileSys.getFileStatus(file1);
        long mtime3 = stat.getModificationTime();
        String mdate3 = dateForm.format(new Date(mtime3));
        System.out.println("new mtime on " + file1 + " is " + mdate3 + " (" + mtime3 + ")");
        assertTrue(atime2 == stat.getAccessTime());
        assertTrue(mtime2 == mtime3);
        long mtime4 = Time.now() - (3600L * 1000L);
        long atime4 = Time.now();
        fileSys.setTimes(dir1, mtime4, atime4);
        // check new modification time on file
        stat = fileSys.getFileStatus(dir1);
        assertTrue("Not matching the modification times", mtime4 == stat.getModificationTime());
        assertTrue("Not matching the access times", atime4 == stat.getAccessTime());
        Path nonExistingDir = new Path(dir1, "/nonExistingDir/");
        try {
            fileSys.setTimes(nonExistingDir, mtime4, atime4);
            fail("Expecting FileNotFoundException");
        } catch (FileNotFoundException e) {
            assertTrue(e.getMessage().contains("File/Directory " + nonExistingDir.toString() + " does not exist."));
        }
        // shutdown cluster and restart
        cluster.shutdown();
        try {
            Thread.sleep(2 * MAX_IDLE_TIME);
        } catch (InterruptedException e) {
        }
        cluster = new MiniDFSCluster.Builder(conf).nameNodePort(nnport).format(false).build();
        cluster.waitActive();
        fileSys = cluster.getFileSystem();
        // verify that access times and modification times persist after a
        // cluster restart.
        System.out.println("Verifying times after cluster restart");
        stat = fileSys.getFileStatus(file1);
        assertTrue(atime2 == stat.getAccessTime());
        assertTrue(mtime3 == stat.getModificationTime());
        cleanupFile(fileSys, file1);
        cleanupFile(fileSys, dir1);
    } catch (IOException e) {
        info = client.datanodeReport(DatanodeReportType.ALL);
        printDatanodeReport(info);
        throw e;
    } finally {
        fileSys.close();
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) FileStatus(org.apache.hadoop.fs.FileStatus) Configuration(org.apache.hadoop.conf.Configuration) InetSocketAddress(java.net.InetSocketAddress) FileNotFoundException(java.io.FileNotFoundException) IOException(java.io.IOException) Date(java.util.Date) FileSystem(org.apache.hadoop.fs.FileSystem) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 74 with InetSocketAddress

use of java.net.InetSocketAddress in project hadoop by apache.

the class TestDiskError method testReplicationError.

/**
   * Test that when there is a failure replicating a block the temporary
   * and meta files are cleaned up and subsequent replication succeeds.
   */
@Test
public void testReplicationError() throws Exception {
    // create a file of replication factor of 1
    final Path fileName = new Path("/test.txt");
    final int fileLen = 1;
    DFSTestUtil.createFile(fs, fileName, 1, (short) 1, 1L);
    DFSTestUtil.waitReplication(fs, fileName, (short) 1);
    // get the block belonged to the created file
    LocatedBlocks blocks = NameNodeAdapter.getBlockLocations(cluster.getNameNode(), fileName.toString(), 0, (long) fileLen);
    assertEquals("Should only find 1 block", blocks.locatedBlockCount(), 1);
    LocatedBlock block = blocks.get(0);
    // bring up a second datanode
    cluster.startDataNodes(conf, 1, true, null, null);
    cluster.waitActive();
    final int sndNode = 1;
    DataNode datanode = cluster.getDataNodes().get(sndNode);
    FsDatasetTestUtils utils = cluster.getFsDatasetTestUtils(datanode);
    // replicate the block to the second datanode
    InetSocketAddress target = datanode.getXferAddress();
    Socket s = new Socket(target.getAddress(), target.getPort());
    // write the header.
    DataOutputStream out = new DataOutputStream(s.getOutputStream());
    DataChecksum checksum = DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512);
    new Sender(out).writeBlock(block.getBlock(), StorageType.DEFAULT, BlockTokenSecretManager.DUMMY_TOKEN, "", new DatanodeInfo[0], new StorageType[0], null, BlockConstructionStage.PIPELINE_SETUP_CREATE, 1, 0L, 0L, 0L, checksum, CachingStrategy.newDefaultStrategy(), false, false, null);
    out.flush();
    // close the connection before sending the content of the block
    out.close();
    // the temporary block & meta files should be deleted
    String bpid = cluster.getNamesystem().getBlockPoolId();
    while (utils.getStoredReplicas(bpid).hasNext()) {
        Thread.sleep(100);
    }
    // then increase the file's replication factor
    fs.setReplication(fileName, (short) 2);
    // replication should succeed
    DFSTestUtil.waitReplication(fs, fileName, (short) 1);
    // clean up the file
    fs.delete(fileName, false);
}
Also used : Path(org.apache.hadoop.fs.Path) Sender(org.apache.hadoop.hdfs.protocol.datatransfer.Sender) InetSocketAddress(java.net.InetSocketAddress) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) DataOutputStream(java.io.DataOutputStream) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Socket(java.net.Socket) DataChecksum(org.apache.hadoop.util.DataChecksum) Test(org.junit.Test)

Example 75 with InetSocketAddress

use of java.net.InetSocketAddress in project hadoop by apache.

the class TestWebHDFS method testWebHdfsOffsetAndLength.

@Test
public void testWebHdfsOffsetAndLength() throws Exception {
    MiniDFSCluster cluster = null;
    final Configuration conf = WebHdfsTestUtil.createConf();
    final int OFFSET = 42;
    final int LENGTH = 512;
    final String PATH = "/foo";
    byte[] CONTENTS = new byte[1024];
    RANDOM.nextBytes(CONTENTS);
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
        final WebHdfsFileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME);
        try (OutputStream os = fs.create(new Path(PATH))) {
            os.write(CONTENTS);
        }
        InetSocketAddress addr = cluster.getNameNode().getHttpAddress();
        URL url = new URL("http", addr.getHostString(), addr.getPort(), WebHdfsFileSystem.PATH_PREFIX + PATH + "?op=OPEN" + Param.toSortedString("&", new OffsetParam((long) OFFSET), new LengthParam((long) LENGTH)));
        HttpURLConnection conn = (HttpURLConnection) url.openConnection();
        conn.setInstanceFollowRedirects(true);
        Assert.assertEquals(LENGTH, conn.getContentLength());
        byte[] subContents = new byte[LENGTH];
        byte[] realContents = new byte[LENGTH];
        System.arraycopy(CONTENTS, OFFSET, subContents, 0, LENGTH);
        IOUtils.readFully(conn.getInputStream(), realContents);
        Assert.assertArrayEquals(subContents, realContents);
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) OffsetParam(org.apache.hadoop.hdfs.web.resources.OffsetParam) InetSocketAddress(java.net.InetSocketAddress) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) OutputStream(java.io.OutputStream) URL(java.net.URL) HttpURLConnection(java.net.HttpURLConnection) LengthParam(org.apache.hadoop.hdfs.web.resources.LengthParam) Test(org.junit.Test) HttpServerFunctionalTest(org.apache.hadoop.http.HttpServerFunctionalTest)

Aggregations

InetSocketAddress (java.net.InetSocketAddress)2586 Test (org.junit.Test)595 IOException (java.io.IOException)592 Socket (java.net.Socket)345 InetAddress (java.net.InetAddress)242 SocketAddress (java.net.SocketAddress)176 ServerSocket (java.net.ServerSocket)170 ArrayList (java.util.ArrayList)168 Configuration (org.apache.hadoop.conf.Configuration)140 ByteBuffer (java.nio.ByteBuffer)129 UnknownHostException (java.net.UnknownHostException)122 InputStream (java.io.InputStream)102 OutputStream (java.io.OutputStream)101 SocketChannel (java.nio.channels.SocketChannel)101 SocketException (java.net.SocketException)89 File (java.io.File)88 HashMap (java.util.HashMap)78 URI (java.net.URI)72 Proxy (java.net.Proxy)65 SocketTimeoutException (java.net.SocketTimeoutException)65