Search in sources :

Example 11 with CreateFlag

use of org.apache.hadoop.fs.CreateFlag in project hadoop by apache.

the class TestDFSClientRetries method testNotYetReplicatedErrors.

// more tests related to different failure cases can be added here.
/**
   * Verify that client will correctly give up after the specified number
   * of times trying to add a block
   */
@SuppressWarnings({ "serial", "unchecked" })
@Test
public void testNotYetReplicatedErrors() throws IOException {
    final String exceptionMsg = "Nope, not replicated yet...";
    // Allow one retry (total of two calls)
    final int maxRetries = 1;
    conf.setInt(HdfsClientConfigKeys.BlockWrite.LOCATEFOLLOWINGBLOCK_RETRIES_KEY, maxRetries);
    NamenodeProtocols mockNN = mock(NamenodeProtocols.class);
    Answer<Object> answer = new ThrowsException(new IOException()) {

        int retryCount = 0;

        @Override
        public Object answer(InvocationOnMock invocation) throws Throwable {
            retryCount++;
            System.out.println("addBlock has been called " + retryCount + " times");
            if (// First call was not a retry
            retryCount > maxRetries + 1)
                throw new IOException("Retried too many times: " + retryCount);
            else
                throw new RemoteException(NotReplicatedYetException.class.getName(), exceptionMsg);
        }
    };
    when(mockNN.addBlock(anyString(), anyString(), any(ExtendedBlock.class), any(DatanodeInfo[].class), anyLong(), any(String[].class), Matchers.<EnumSet<AddBlockFlag>>any())).thenAnswer(answer);
    Mockito.doReturn(new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission((short) 777), "owner", "group", new byte[0], new byte[0], 1010, 0, null, (byte) 0, null)).when(mockNN).getFileInfo(anyString());
    Mockito.doReturn(new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission((short) 777), "owner", "group", new byte[0], new byte[0], 1010, 0, null, (byte) 0, null)).when(mockNN).create(anyString(), (FsPermission) anyObject(), anyString(), (EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(), anyShort(), anyLong(), (CryptoProtocolVersion[]) anyObject());
    final DFSClient client = new DFSClient(null, mockNN, conf, null);
    OutputStream os = client.create("testfile", true);
    // write one random byte
    os.write(20);
    try {
        os.close();
    } catch (Exception e) {
        assertTrue("Retries are not being stopped correctly: " + e.getMessage(), e.getMessage().equals(exceptionMsg));
    }
}
Also used : CreateFlag(org.apache.hadoop.fs.CreateFlag) NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) ThrowsException(org.mockito.internal.stubbing.answers.ThrowsException) CryptoProtocolVersion(org.apache.hadoop.crypto.CryptoProtocolVersion) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) OutputStream(java.io.OutputStream) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Matchers.anyString(org.mockito.Matchers.anyString) IOException(java.io.IOException) ThrowsException(org.mockito.internal.stubbing.answers.ThrowsException) ChecksumException(org.apache.hadoop.fs.ChecksumException) FileNotFoundException(java.io.FileNotFoundException) NotReplicatedYetException(org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException) SocketTimeoutException(java.net.SocketTimeoutException) IOException(java.io.IOException) RemoteException(org.apache.hadoop.ipc.RemoteException) InvocationOnMock(org.mockito.invocation.InvocationOnMock) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) Matchers.anyObject(org.mockito.Matchers.anyObject) FsPermission(org.apache.hadoop.fs.permission.FsPermission) RemoteException(org.apache.hadoop.ipc.RemoteException) Test(org.junit.Test)

Example 12 with CreateFlag

use of org.apache.hadoop.fs.CreateFlag in project hadoop by apache.

the class TestFileCreation method doCreateTest.

private void doCreateTest(CreationMethod method) throws Exception {
    Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    try {
        FileSystem fs = cluster.getFileSystem();
        NamenodeProtocols nnrpc = cluster.getNameNodeRpc();
        for (String pathStr : NON_CANONICAL_PATHS) {
            System.out.println("Creating " + pathStr + " by " + method);
            switch(method) {
                case DIRECT_NN_RPC:
                    try {
                        nnrpc.create(pathStr, new FsPermission((short) 0755), "client", new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true, (short) 1, 128 * 1024 * 1024L, null);
                        fail("Should have thrown exception when creating '" + pathStr + "'" + " by " + method);
                    } catch (InvalidPathException ipe) {
                    // When we create by direct NN RPC, the NN just rejects the
                    // non-canonical paths, rather than trying to normalize them.
                    // So, we expect all of them to fail. 
                    }
                    break;
                case PATH_FROM_URI:
                case PATH_FROM_STRING:
                    // Unlike the above direct-to-NN case, we expect these to succeed,
                    // since the Path constructor should normalize the path.
                    Path p;
                    if (method == CreationMethod.PATH_FROM_URI) {
                        p = new Path(new URI(fs.getUri() + pathStr));
                    } else {
                        p = new Path(fs.getUri() + pathStr);
                    }
                    FSDataOutputStream stm = fs.create(p);
                    IOUtils.closeStream(stm);
                    break;
                default:
                    throw new AssertionError("bad method: " + method);
            }
        }
        cluster.restartNameNode();
    } finally {
        cluster.shutdown();
    }
}
Also used : CreateFlag(org.apache.hadoop.fs.CreateFlag) Path(org.apache.hadoop.fs.Path) NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) Configuration(org.apache.hadoop.conf.Configuration) URI(java.net.URI) InvalidPathException(org.apache.hadoop.fs.InvalidPathException) FileSystem(org.apache.hadoop.fs.FileSystem) FsPermission(org.apache.hadoop.fs.permission.FsPermission) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream)

Example 13 with CreateFlag

use of org.apache.hadoop.fs.CreateFlag in project hadoop by apache.

the class TestFileCreation method testFileCreationNonRecursive.

// Worker method for testing non-recursive. Extracted to allow other
// FileSystem implementations to re-use the tests
public static void testFileCreationNonRecursive(FileSystem fs) throws IOException {
    final Path path = new Path("/" + Time.now() + "-testFileCreationNonRecursive");
    IOException expectedException = null;
    final String nonExistDir = "/non-exist-" + Time.now();
    fs.delete(new Path(nonExistDir), true);
    EnumSet<CreateFlag> createFlag = EnumSet.of(CreateFlag.CREATE);
    // Create a new file in root dir, should succeed
    assertNull(createNonRecursive(fs, path, 1, createFlag));
    // Create a file when parent dir exists as file, should fail
    expectedException = createNonRecursive(fs, new Path(path, "Create"), 1, createFlag);
    assertTrue("Create a file when parent directory exists as a file" + " should throw ParentNotDirectoryException ", expectedException != null && expectedException instanceof ParentNotDirectoryException);
    fs.delete(path, true);
    // Create a file in a non-exist directory, should fail
    final Path path2 = new Path(nonExistDir + "/testCreateNonRecursive");
    expectedException = createNonRecursive(fs, path2, 1, createFlag);
    assertTrue("Create a file in a non-exist dir using" + " createNonRecursive() should throw FileNotFoundException ", expectedException != null && expectedException instanceof FileNotFoundException);
    EnumSet<CreateFlag> overwriteFlag = EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE);
    // Overwrite a file in root dir, should succeed
    assertNull(createNonRecursive(fs, path, 1, overwriteFlag));
    // Overwrite a file when parent dir exists as file, should fail
    expectedException = createNonRecursive(fs, new Path(path, "Overwrite"), 1, overwriteFlag);
    assertTrue("Overwrite a file when parent directory exists as a file" + " should throw ParentNotDirectoryException ", expectedException != null && expectedException instanceof ParentNotDirectoryException);
    fs.delete(path, true);
    // Overwrite a file in a non-exist directory, should fail
    final Path path3 = new Path(nonExistDir + "/testOverwriteNonRecursive");
    expectedException = createNonRecursive(fs, path3, 1, overwriteFlag);
    assertTrue("Overwrite a file in a non-exist dir using" + " createNonRecursive() should throw FileNotFoundException ", expectedException != null && expectedException instanceof FileNotFoundException);
}
Also used : Path(org.apache.hadoop.fs.Path) CreateFlag(org.apache.hadoop.fs.CreateFlag) ParentNotDirectoryException(org.apache.hadoop.fs.ParentNotDirectoryException) FileNotFoundException(java.io.FileNotFoundException) IOException(java.io.IOException)

Example 14 with CreateFlag

use of org.apache.hadoop.fs.CreateFlag in project hadoop by apache.

the class TestLease method testFactory.

@SuppressWarnings("unchecked")
@Test
public void testFactory() throws Exception {
    final String[] groups = new String[] { "supergroup" };
    final UserGroupInformation[] ugi = new UserGroupInformation[3];
    for (int i = 0; i < ugi.length; i++) {
        ugi[i] = UserGroupInformation.createUserForTesting("user" + i, groups);
    }
    Mockito.doReturn(new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission((short) 777), "owner", "group", new byte[0], new byte[0], 1010, 0, null, (byte) 0, null)).when(mcp).getFileInfo(anyString());
    Mockito.doReturn(new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission((short) 777), "owner", "group", new byte[0], new byte[0], 1010, 0, null, (byte) 0, null)).when(mcp).create(anyString(), (FsPermission) anyObject(), anyString(), (EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(), anyShort(), anyLong(), (CryptoProtocolVersion[]) anyObject());
    final Configuration conf = new Configuration();
    final DFSClient c1 = createDFSClientAs(ugi[0], conf);
    FSDataOutputStream out1 = createFsOut(c1, "/out1");
    final DFSClient c2 = createDFSClientAs(ugi[0], conf);
    FSDataOutputStream out2 = createFsOut(c2, "/out2");
    Assert.assertEquals(c1.getLeaseRenewer(), c2.getLeaseRenewer());
    final DFSClient c3 = createDFSClientAs(ugi[1], conf);
    FSDataOutputStream out3 = createFsOut(c3, "/out3");
    Assert.assertTrue(c1.getLeaseRenewer() != c3.getLeaseRenewer());
    final DFSClient c4 = createDFSClientAs(ugi[1], conf);
    FSDataOutputStream out4 = createFsOut(c4, "/out4");
    Assert.assertEquals(c3.getLeaseRenewer(), c4.getLeaseRenewer());
    final DFSClient c5 = createDFSClientAs(ugi[2], conf);
    FSDataOutputStream out5 = createFsOut(c5, "/out5");
    Assert.assertTrue(c1.getLeaseRenewer() != c5.getLeaseRenewer());
    Assert.assertTrue(c3.getLeaseRenewer() != c5.getLeaseRenewer());
}
Also used : CreateFlag(org.apache.hadoop.fs.CreateFlag) Configuration(org.apache.hadoop.conf.Configuration) CryptoProtocolVersion(org.apache.hadoop.crypto.CryptoProtocolVersion) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) Matchers.anyString(org.mockito.Matchers.anyString) FsPermission(org.apache.hadoop.fs.permission.FsPermission) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) Test(org.junit.Test)

Example 15 with CreateFlag

use of org.apache.hadoop.fs.CreateFlag in project hadoop by apache.

the class LazyPersistTestCase method makeTestFile.

protected final void makeTestFile(Path path, long length, boolean isLazyPersist) throws IOException {
    EnumSet<CreateFlag> createFlags = EnumSet.of(CREATE);
    if (isLazyPersist) {
        createFlags.add(LAZY_PERSIST);
    }
    FSDataOutputStream fos = null;
    try {
        fos = fs.create(path, FsPermission.getFileDefault(), createFlags, BUFFER_LENGTH, REPL_FACTOR, BLOCK_SIZE, null);
        // Allocate a block.
        byte[] buffer = new byte[BUFFER_LENGTH];
        for (int bytesWritten = 0; bytesWritten < length; ) {
            fos.write(buffer, 0, buffer.length);
            bytesWritten += buffer.length;
        }
        if (length > 0) {
            fos.hsync();
        }
    } finally {
        IOUtils.closeQuietly(fos);
    }
}
Also used : CreateFlag(org.apache.hadoop.fs.CreateFlag) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream)

Aggregations

CreateFlag (org.apache.hadoop.fs.CreateFlag)18 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)9 Test (org.junit.Test)8 IOException (java.io.IOException)6 FsPermission (org.apache.hadoop.fs.permission.FsPermission)6 Path (org.apache.hadoop.fs.Path)5 Configuration (org.apache.hadoop.conf.Configuration)4 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)4 NamenodeProtocols (org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols)4 FileNotFoundException (java.io.FileNotFoundException)3 OutputStream (java.io.OutputStream)2 URI (java.net.URI)2 Random (java.util.Random)2 CryptoProtocolVersion (org.apache.hadoop.crypto.CryptoProtocolVersion)2 FileSystem (org.apache.hadoop.fs.FileSystem)2 DFSClient (org.apache.hadoop.hdfs.DFSClient)2 LastBlockWithStatus (org.apache.hadoop.hdfs.protocol.LastBlockWithStatus)2 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)2 RemoteException (org.apache.hadoop.ipc.RemoteException)2 Matchers.anyString (org.mockito.Matchers.anyString)2