Search in sources :

Example 16 with CreateFlag

use of org.apache.hadoop.fs.CreateFlag in project hadoop by apache.

the class TestLazyPersistLockedMemory method testWritePipelineFailure.

/**
   * Verify that locked bytes are correctly updated when the client goes
   * away unexpectedly during a write.
   */
@Test
public void testWritePipelineFailure() throws IOException, TimeoutException, InterruptedException {
    getClusterBuilder().setNumDatanodes(1).build();
    final String METHOD_NAME = GenericTestUtils.getMethodName();
    final FsDatasetSpi<?> fsd = cluster.getDataNodes().get(0).getFSDataset();
    Path path = new Path("/" + METHOD_NAME + ".dat");
    EnumSet<CreateFlag> createFlags = EnumSet.of(CREATE, LAZY_PERSIST);
    // Write 1 byte to the file and kill the writer.
    final FSDataOutputStream fos = fs.create(path, FsPermission.getFileDefault(), createFlags, BUFFER_LENGTH, REPL_FACTOR, BLOCK_SIZE, null);
    fos.write(new byte[1]);
    fos.hsync();
    DFSTestUtil.abortStream((DFSOutputStream) fos.getWrappedStream());
    waitForLockedBytesUsed(fsd, osPageSize);
    // Delete the file and ensure locked RAM goes to zero.
    fs.delete(path, false);
    DataNodeTestUtils.triggerBlockReport(cluster.getDataNodes().get(0));
    waitForLockedBytesUsed(fsd, 0);
}
Also used : Path(org.apache.hadoop.fs.Path) CreateFlag(org.apache.hadoop.fs.CreateFlag) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 17 with CreateFlag

use of org.apache.hadoop.fs.CreateFlag in project hadoop by apache.

the class TestAddBlockRetry method testRetryAddBlockWhileInChooseTarget.

/**
   * Retry addBlock() while another thread is in chooseTarget().
   * See HDFS-4452.
   */
@Test
public void testRetryAddBlockWhileInChooseTarget() throws Exception {
    final String src = "/testRetryAddBlockWhileInChooseTarget";
    final FSNamesystem ns = cluster.getNamesystem();
    final NamenodeProtocols nn = cluster.getNameNodeRpc();
    // create file
    nn.create(src, FsPermission.getFileDefault(), "clientName", new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true, (short) 3, 1024, null);
    // start first addBlock()
    LOG.info("Starting first addBlock for " + src);
    LocatedBlock[] onRetryBlock = new LocatedBlock[1];
    ns.readLock();
    FSDirWriteFileOp.ValidateAddBlockResult r;
    FSPermissionChecker pc = Mockito.mock(FSPermissionChecker.class);
    try {
        r = FSDirWriteFileOp.validateAddBlock(ns, pc, src, HdfsConstants.GRANDFATHER_INODE_ID, "clientName", null, onRetryBlock);
    } finally {
        ns.readUnlock();
        ;
    }
    DatanodeStorageInfo[] targets = FSDirWriteFileOp.chooseTargetForNewBlock(ns.getBlockManager(), src, null, null, null, r);
    assertNotNull("Targets must be generated", targets);
    // run second addBlock()
    LOG.info("Starting second addBlock for " + src);
    nn.addBlock(src, "clientName", null, null, HdfsConstants.GRANDFATHER_INODE_ID, null, null);
    assertTrue("Penultimate block must be complete", checkFileProgress(src, false));
    LocatedBlocks lbs = nn.getBlockLocations(src, 0, Long.MAX_VALUE);
    assertEquals("Must be one block", 1, lbs.getLocatedBlocks().size());
    LocatedBlock lb2 = lbs.get(0);
    assertEquals("Wrong replication", REPLICATION, lb2.getLocations().length);
    // continue first addBlock()
    ns.writeLock();
    LocatedBlock newBlock;
    try {
        newBlock = FSDirWriteFileOp.storeAllocatedBlock(ns, src, HdfsConstants.GRANDFATHER_INODE_ID, "clientName", null, targets);
    } finally {
        ns.writeUnlock();
    }
    assertEquals("Blocks are not equal", lb2.getBlock(), newBlock.getBlock());
    // check locations
    lbs = nn.getBlockLocations(src, 0, Long.MAX_VALUE);
    assertEquals("Must be one block", 1, lbs.getLocatedBlocks().size());
    LocatedBlock lb1 = lbs.get(0);
    assertEquals("Wrong replication", REPLICATION, lb1.getLocations().length);
    assertEquals("Blocks are not equal", lb1.getBlock(), lb2.getBlock());
}
Also used : CreateFlag(org.apache.hadoop.fs.CreateFlag) NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) Test(org.junit.Test)

Example 18 with CreateFlag

use of org.apache.hadoop.fs.CreateFlag in project lucene-solr by apache.

the class HdfsFileWriter method getOutputStream.

private static final OutputStream getOutputStream(FileSystem fileSystem, Path path) throws IOException {
    Configuration conf = fileSystem.getConf();
    FsServerDefaults fsDefaults = fileSystem.getServerDefaults(path);
    EnumSet<CreateFlag> flags = EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE);
    if (Boolean.getBoolean(HDFS_SYNC_BLOCK)) {
        flags.add(CreateFlag.SYNC_BLOCK);
    }
    return fileSystem.create(path, FsPermission.getDefault().applyUMask(FsPermission.getUMask(conf)), flags, fsDefaults.getFileBufferSize(), fsDefaults.getReplication(), fsDefaults.getBlockSize(), null);
}
Also used : CreateFlag(org.apache.hadoop.fs.CreateFlag) Configuration(org.apache.hadoop.conf.Configuration) FsServerDefaults(org.apache.hadoop.fs.FsServerDefaults)

Aggregations

CreateFlag (org.apache.hadoop.fs.CreateFlag)18 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)9 Test (org.junit.Test)8 IOException (java.io.IOException)6 FsPermission (org.apache.hadoop.fs.permission.FsPermission)6 Path (org.apache.hadoop.fs.Path)5 Configuration (org.apache.hadoop.conf.Configuration)4 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)4 NamenodeProtocols (org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols)4 FileNotFoundException (java.io.FileNotFoundException)3 OutputStream (java.io.OutputStream)2 URI (java.net.URI)2 Random (java.util.Random)2 CryptoProtocolVersion (org.apache.hadoop.crypto.CryptoProtocolVersion)2 FileSystem (org.apache.hadoop.fs.FileSystem)2 DFSClient (org.apache.hadoop.hdfs.DFSClient)2 LastBlockWithStatus (org.apache.hadoop.hdfs.protocol.LastBlockWithStatus)2 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)2 RemoteException (org.apache.hadoop.ipc.RemoteException)2 Matchers.anyString (org.mockito.Matchers.anyString)2