Search in sources :

Example 81 with RemoteException

use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.

the class TestFileAppend method testAppendTwice.

/** Test two consecutive appends on a file with a full block. */
@Test
public void testAppendTwice() throws Exception {
    Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    final FileSystem fs1 = cluster.getFileSystem();
    final FileSystem fs2 = AppendTestUtil.createHdfsWithDifferentUsername(conf);
    try {
        final Path p = new Path("/testAppendTwice/foo");
        final int len = 1 << 16;
        final byte[] fileContents = AppendTestUtil.initBuffer(len);
        {
            // create a new file with a full block.
            FSDataOutputStream out = fs2.create(p, true, 4096, (short) 1, len);
            out.write(fileContents, 0, len);
            out.close();
        }
        //1st append does not add any data so that the last block remains full
        //and the last block in INodeFileUnderConstruction is a BlockInfo
        //but does not have a BlockUnderConstructionFeature.
        fs2.append(p);
        //2nd append should get AlreadyBeingCreatedException
        fs1.append(p);
        Assert.fail();
    } catch (RemoteException re) {
        AppendTestUtil.LOG.info("Got an exception:", re);
        Assert.assertEquals(AlreadyBeingCreatedException.class.getName(), re.getClassName());
    } finally {
        fs2.close();
        fs1.close();
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) FileSystem(org.apache.hadoop.fs.FileSystem) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) RemoteException(org.apache.hadoop.ipc.RemoteException) Test(org.junit.Test)

Example 82 with RemoteException

use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.

the class TestLeaseRecovery method testLeaseRecoveryAndAppend.

/**
   * Recover the lease on a file and append file from another client.
   */
@Test
public void testLeaseRecoveryAndAppend() throws Exception {
    Configuration conf = new Configuration();
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
        Path file = new Path("/testLeaseRecovery");
        DistributedFileSystem dfs = cluster.getFileSystem();
        // create a file with 0 bytes
        FSDataOutputStream out = dfs.create(file);
        out.hflush();
        out.hsync();
        // abort the original stream
        ((DFSOutputStream) out.getWrappedStream()).abort();
        DistributedFileSystem newdfs = (DistributedFileSystem) FileSystem.newInstance(cluster.getConfiguration(0));
        // Append to a file , whose lease is held by another client should fail
        try {
            newdfs.append(file);
            fail("Append to a file(lease is held by another client) should fail");
        } catch (RemoteException e) {
            assertTrue(e.getMessage().contains("file lease is currently owned"));
        }
        // Lease recovery on first try should be successful
        boolean recoverLease = newdfs.recoverLease(file);
        assertTrue(recoverLease);
        FSDataOutputStream append = newdfs.append(file);
        append.write("test".getBytes());
        append.close();
    } finally {
        if (cluster != null) {
            cluster.shutdown();
            cluster = null;
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) RemoteException(org.apache.hadoop.ipc.RemoteException) Test(org.junit.Test)

Example 83 with RemoteException

use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.

the class TestNNWithQJM method testNewNamenodeTakesOverWriter.

@Test(timeout = 30000)
public void testNewNamenodeTakesOverWriter() throws Exception {
    File nn1Dir = new File(MiniDFSCluster.getBaseDirectory() + "/TestNNWithQJM/image-nn1");
    File nn2Dir = new File(MiniDFSCluster.getBaseDirectory() + "/TestNNWithQJM/image-nn2");
    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nn1Dir.getAbsolutePath());
    conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, mjc.getQuorumJournalURI("myjournal").toString());
    // Start the cluster once to generate the dfs dirs
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).manageNameDfsDirs(false).checkExitOnShutdown(false).build();
    // Shutdown the cluster before making a copy of the namenode dir
    // to release all file locks, otherwise, the copy will fail on
    // some platforms.
    cluster.shutdown();
    try {
        // Start a second NN pointed to the same quorum.
        // We need to copy the image dir from the first NN -- or else
        // the new NN will just be rejected because of Namespace mismatch.
        FileUtil.fullyDelete(nn2Dir);
        FileUtil.copy(nn1Dir, FileSystem.getLocal(conf).getRaw(), new Path(nn2Dir.getAbsolutePath()), false, conf);
        // Start the cluster again
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).manageNameDfsDirs(false).checkExitOnShutdown(false).build();
        cluster.getFileSystem().mkdirs(TEST_PATH);
        Configuration conf2 = new Configuration();
        conf2.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nn2Dir.getAbsolutePath());
        conf2.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, mjc.getQuorumJournalURI("myjournal").toString());
        MiniDFSCluster cluster2 = new MiniDFSCluster.Builder(conf2).numDataNodes(0).format(false).manageNameDfsDirs(false).build();
        // Check that the new cluster sees the edits made on the old cluster
        try {
            assertTrue(cluster2.getFileSystem().exists(TEST_PATH));
        } finally {
            cluster2.shutdown();
        }
        // that it aborts.
        try {
            cluster.getFileSystem().mkdirs(new Path("/x"));
            fail("Did not abort trying to write to a fenced NN");
        } catch (RemoteException re) {
            GenericTestUtils.assertExceptionContains("Could not sync enough journals to persistent storage", re);
        }
    } finally {
    //cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Configuration(org.apache.hadoop.conf.Configuration) RemoteException(org.apache.hadoop.ipc.RemoteException) File(java.io.File) Test(org.junit.Test)

Example 84 with RemoteException

use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.

the class TestWriteReadStripedFile method testConcatWithDifferentECPolicy.

@Test
public void testConcatWithDifferentECPolicy() throws Exception {
    final byte[] data = StripedFileTestUtil.generateBytes(blockSize * dataBlocks);
    Path nonECFile = new Path("/non_ec_file");
    DFSTestUtil.writeFile(fs, nonECFile, data);
    Path target = new Path("/ec/non_ec_file");
    fs.rename(nonECFile, target);
    int numFiles = 2;
    Path[] srcs = new Path[numFiles];
    for (int i = 0; i < numFiles; i++) {
        srcs[i] = new Path("/ec/testConcat_src_file_" + i);
        DFSTestUtil.writeFile(fs, srcs[i], data);
    }
    try {
        fs.concat(target, srcs);
        Assert.fail("non-ec file shouldn't concat with ec file");
    } catch (RemoteException e) {
        Assert.assertTrue(e.getMessage().contains("have different erasure coding policy"));
    }
}
Also used : Path(org.apache.hadoop.fs.Path) RemoteException(org.apache.hadoop.ipc.RemoteException) Test(org.junit.Test)

Example 85 with RemoteException

use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.

the class TestBlockManager method testStorageWithRemainingCapacity.

/**
   * Tests that a namenode doesn't choose a datanode with full disks to 
   * store blocks.
   * @throws Exception
   */
@Test
public void testStorageWithRemainingCapacity() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    FileSystem fs = FileSystem.get(conf);
    Path file1 = null;
    try {
        cluster.waitActive();
        final FSNamesystem namesystem = cluster.getNamesystem();
        final String poolId = namesystem.getBlockPoolId();
        final DatanodeRegistration nodeReg = InternalDataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId);
        final DatanodeDescriptor dd = NameNodeAdapter.getDatanode(namesystem, nodeReg);
        //create a file with 100k.
        for (DatanodeStorageInfo storage : dd.getStorageInfos()) {
            storage.setUtilizationForTesting(65536, 0, 65536, 0);
        }
        //sum of the remaining capacity of both the storages
        dd.setRemaining(131072);
        file1 = new Path("testRemainingStorage.dat");
        try {
            DFSTestUtil.createFile(fs, file1, 102400, 102400, 102400, (short) 1, 0x1BAD5EED);
        } catch (RemoteException re) {
            GenericTestUtils.assertExceptionContains("nodes instead of " + "minReplication", re);
        }
    } finally {
        // Clean up
        assertTrue(fs.exists(file1));
        fs.delete(file1, true);
        assertTrue(!fs.exists(file1));
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) RemoteException(org.apache.hadoop.ipc.RemoteException) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) Test(org.junit.Test)

Aggregations

RemoteException (org.apache.hadoop.ipc.RemoteException)99 IOException (java.io.IOException)53 Test (org.junit.Test)39 Path (org.apache.hadoop.fs.Path)36 Configuration (org.apache.hadoop.conf.Configuration)20 FileNotFoundException (java.io.FileNotFoundException)19 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)13 FileSystem (org.apache.hadoop.fs.FileSystem)12 InterruptedIOException (java.io.InterruptedIOException)10 AccessControlException (org.apache.hadoop.security.AccessControlException)10 ServerName (org.apache.hadoop.hbase.ServerName)9 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)8 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)8 FileAlreadyExistsException (org.apache.hadoop.fs.FileAlreadyExistsException)7 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)7 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)7 EOFException (java.io.EOFException)6 ArrayList (java.util.ArrayList)6 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)6 HBaseIOException (org.apache.hadoop.hbase.HBaseIOException)6