Search in sources :

Example 51 with HdfsFileStatus

use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.

the class ClientNamenodeProtocolTranslatorPB method append.

@Override
public LastBlockWithStatus append(String src, String clientName, EnumSetWritable<CreateFlag> flag) throws IOException {
    AppendRequestProto req = AppendRequestProto.newBuilder().setSrc(src).setClientName(clientName).setFlag(PBHelperClient.convertCreateFlag(flag)).build();
    try {
        AppendResponseProto res = rpcProxy.append(null, req);
        LocatedBlock lastBlock = res.hasBlock() ? PBHelperClient.convertLocatedBlockProto(res.getBlock()) : null;
        HdfsFileStatus stat = (res.hasStat()) ? PBHelperClient.convert(res.getStat()) : null;
        return new LastBlockWithStatus(lastBlock, stat);
    } catch (ServiceException e) {
        throw ProtobufHelper.getRemoteException(e);
    }
}
Also used : ServiceException(com.google.protobuf.ServiceException) LastBlockWithStatus(org.apache.hadoop.hdfs.protocol.LastBlockWithStatus) AppendResponseProto(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) AppendRequestProto(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto)

Example 52 with HdfsFileStatus

use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.

the class TestDFSUpgradeFromImage method recoverAllLeases.

static void recoverAllLeases(DFSClient dfs, Path path) throws IOException {
    String pathStr = path.toString();
    HdfsFileStatus status = dfs.getFileInfo(pathStr);
    if (!status.isDir()) {
        for (int retries = 10; retries > 0; retries--) {
            if (dfs.recoverLease(pathStr)) {
                return;
            } else {
                try {
                    Thread.sleep(1000);
                } catch (InterruptedException ignored) {
                }
            }
        }
        throw new IOException("Failed to recover lease of " + path);
    }
    byte[] prev = HdfsFileStatus.EMPTY_NAME;
    DirectoryListing dirList;
    do {
        dirList = dfs.listPaths(pathStr, prev);
        HdfsFileStatus[] files = dirList.getPartialListing();
        for (HdfsFileStatus f : files) {
            recoverAllLeases(dfs, f.getFullPath(path));
        }
        prev = dirList.getLastName();
    } while (dirList.hasMore());
}
Also used : DirectoryListing(org.apache.hadoop.hdfs.protocol.DirectoryListing) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) IOException(java.io.IOException)

Example 53 with HdfsFileStatus

use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.

the class TestNamenodeRetryCache method testCreate.

/**
   * Test for create file
   */
@Test
public void testCreate() throws Exception {
    String src = "/testNamenodeRetryCache/testCreate/file";
    // Two retried calls succeed
    newCall();
    HdfsFileStatus status = nnRpc.create(src, perm, "holder", new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true, (short) 1, BlockSize, null);
    Assert.assertEquals(status, nnRpc.create(src, perm, "holder", new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true, (short) 1, BlockSize, null));
    Assert.assertEquals(status, nnRpc.create(src, perm, "holder", new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true, (short) 1, BlockSize, null));
    // A non-retried call fails
    newCall();
    try {
        nnRpc.create(src, perm, "holder", new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true, (short) 1, BlockSize, null);
        Assert.fail("testCreate - expected exception is not thrown");
    } catch (IOException e) {
    // expected
    }
}
Also used : CreateFlag(org.apache.hadoop.fs.CreateFlag) EnumSetWritable(org.apache.hadoop.io.EnumSetWritable) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) IOException(java.io.IOException) Test(org.junit.Test)

Example 54 with HdfsFileStatus

use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hbase by apache.

the class TestHFileOutputFormat2 method getStoragePolicyNameForOldHDFSVersion.

private String getStoragePolicyNameForOldHDFSVersion(FileSystem fs, Path path) {
    try {
        if (fs instanceof DistributedFileSystem) {
            DistributedFileSystem dfs = (DistributedFileSystem) fs;
            HdfsFileStatus status = dfs.getClient().getFileInfo(path.toUri().getPath());
            if (null != status) {
                byte storagePolicyId = status.getStoragePolicy();
                Field idUnspecified = BlockStoragePolicySuite.class.getField("ID_UNSPECIFIED");
                if (storagePolicyId != idUnspecified.getByte(BlockStoragePolicySuite.class)) {
                    BlockStoragePolicy[] policies = dfs.getStoragePolicies();
                    for (BlockStoragePolicy policy : policies) {
                        if (policy.getId() == storagePolicyId) {
                            return policy.getName();
                        }
                    }
                }
            }
        }
    } catch (Throwable e) {
        LOG.warn("failed to get block storage policy of [" + path + "]", e);
    }
    return null;
}
Also used : Field(java.lang.reflect.Field) BlockStoragePolicySuite(org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) BlockStoragePolicy(org.apache.hadoop.hdfs.protocol.BlockStoragePolicy) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem)

Example 55 with HdfsFileStatus

use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.

the class NamenodeWebHdfsMethods method chooseDatanode.

@VisibleForTesting
static DatanodeInfo chooseDatanode(final NameNode namenode, final String path, final HttpOpParam.Op op, final long openOffset, final long blocksize, final String excludeDatanodes, final String remoteAddr) throws IOException {
    FSNamesystem fsn = namenode.getNamesystem();
    if (fsn == null) {
        throw new IOException("Namesystem has not been intialized yet.");
    }
    final BlockManager bm = fsn.getBlockManager();
    HashSet<Node> excludes = new HashSet<Node>();
    if (excludeDatanodes != null) {
        for (String host : StringUtils.getTrimmedStringCollection(excludeDatanodes)) {
            int idx = host.indexOf(":");
            if (idx != -1) {
                excludes.add(bm.getDatanodeManager().getDatanodeByXferAddr(host.substring(0, idx), Integer.parseInt(host.substring(idx + 1))));
            } else {
                excludes.add(bm.getDatanodeManager().getDatanodeByHost(host));
            }
        }
    }
    if (op == PutOpParam.Op.CREATE) {
        //choose a datanode near to client 
        final DatanodeDescriptor clientNode = bm.getDatanodeManager().getDatanodeByHost(remoteAddr);
        if (clientNode != null) {
            final DatanodeStorageInfo[] storages = bm.chooseTarget4WebHDFS(path, clientNode, excludes, blocksize);
            if (storages.length > 0) {
                return storages[0].getDatanodeDescriptor();
            }
        }
    } else if (op == GetOpParam.Op.OPEN || op == GetOpParam.Op.GETFILECHECKSUM || op == PostOpParam.Op.APPEND) {
        //choose a datanode containing a replica 
        final NamenodeProtocols np = getRPCServer(namenode);
        final HdfsFileStatus status = np.getFileInfo(path);
        if (status == null) {
            throw new FileNotFoundException("File " + path + " not found.");
        }
        final long len = status.getLen();
        if (op == GetOpParam.Op.OPEN) {
            if (openOffset < 0L || (openOffset >= len && len > 0)) {
                throw new IOException("Offset=" + openOffset + " out of the range [0, " + len + "); " + op + ", path=" + path);
            }
        }
        if (len > 0) {
            final long offset = op == GetOpParam.Op.OPEN ? openOffset : len - 1;
            final LocatedBlocks locations = np.getBlockLocations(path, offset, 1);
            final int count = locations.locatedBlockCount();
            if (count > 0) {
                return bestNode(locations.get(0).getLocations(), excludes);
            }
        }
    }
    return (DatanodeDescriptor) bm.getDatanodeManager().getNetworkTopology().chooseRandom(NodeBase.ROOT, excludes);
}
Also used : NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) Node(org.apache.hadoop.net.Node) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) FileNotFoundException(java.io.FileNotFoundException) IOException(java.io.IOException) DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) HashSet(java.util.HashSet) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Aggregations

HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)124 Test (org.junit.Test)51 FileHandle (org.apache.hadoop.nfs.nfs3.FileHandle)34 IOException (java.io.IOException)28 InetSocketAddress (java.net.InetSocketAddress)28 XDR (org.apache.hadoop.oncrpc.XDR)28 AccessControlException (org.apache.hadoop.security.AccessControlException)26 Path (org.apache.hadoop.fs.Path)23 SnapshotAccessControlException (org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException)23 FileNotFoundException (java.io.FileNotFoundException)16 DFSClient (org.apache.hadoop.hdfs.DFSClient)11 DirectoryListing (org.apache.hadoop.hdfs.protocol.DirectoryListing)11 FsPermission (org.apache.hadoop.fs.permission.FsPermission)10 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)9 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)9 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)8 SetAttr3 (org.apache.hadoop.nfs.nfs3.request.SetAttr3)8 FileStatus (org.apache.hadoop.fs.FileStatus)7 Matchers.anyString (org.mockito.Matchers.anyString)7 Configuration (org.apache.hadoop.conf.Configuration)6