Search in sources :

Example 56 with HdfsFileStatus

use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project SSM by Intel-bigdata.

the class TestDBMethod method testInsetFiles.

@Test
public void testInsetFiles() throws Exception {
    Connection conn = null;
    try {
        conn = new TestDBUtil().getTestDBInstance();
        DBAdapter dbAdapter = new DBAdapter(conn);
        String pathString = "testFile";
        long length = 123L;
        boolean isDir = false;
        int blockReplication = 1;
        long blockSize = 128 * 1024L;
        long modTime = 123123123L;
        long accessTime = 123123120L;
        FsPermission perms = FsPermission.getDefault();
        String owner = "root";
        String group = "admin";
        byte[] symlink = null;
        byte[] path = DFSUtil.string2Bytes(pathString);
        long fileId = 312321L;
        int numChildren = 0;
        byte storagePolicy = 0;
        FileStatusInternal[] files = { new FileStatusInternal(length, isDir, blockReplication, blockSize, modTime, accessTime, perms, owner, group, symlink, path, "/tmp", fileId, numChildren, null, storagePolicy) };
        dbAdapter.insertFiles(files);
        HdfsFileStatus hdfsFileStatus = dbAdapter.getFile("/tmp/testFile");
        Assert.assertTrue(hdfsFileStatus.getBlockSize() == 128 * 1024L);
    } finally {
        if (conn != null) {
            conn.close();
        }
    }
}
Also used : Connection(java.sql.Connection) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) FsPermission(org.apache.hadoop.fs.permission.FsPermission) Test(org.junit.Test)

Example 57 with HdfsFileStatus

use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project SSM by Intel-bigdata.

the class TestWriteFileAction method testExecute.

@Test
public void testExecute() throws Exception {
    String filePath = "/testWriteFile/file";
    int size = 66560;
    writeFile(filePath, size);
    HdfsFileStatus fileStatus = dfs.getClient().getFileInfo(filePath);
    Assert.assertTrue(fileStatus.getLen() == size);
}
Also used : HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) Test(org.junit.Test)

Example 58 with HdfsFileStatus

use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project SSM by Intel-bigdata.

the class TestInotifyEventApplier method testApplier.

@Test
public void testApplier() throws Exception {
    DFSClient client = mock(DFSClient.class);
    Connection connection = databaseTester.getConnection().getConnection();
    Util.initializeDataBase(connection);
    DBAdapter adapter = new DBAdapter(connection);
    InotifyEventApplier applier = new InotifyEventApplier(adapter, client);
    Event.CreateEvent createEvent = new Event.CreateEvent.Builder().iNodeType(Event.CreateEvent.INodeType.FILE).ctime(1).defaultBlockSize(1024).groupName("cg1").overwrite(true).ownerName("user1").path("/file").perms(new FsPermission("777")).replication(3).build();
    HdfsFileStatus status1 = new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission((short) 777), "owner", "group", new byte[0], new byte[0], 1010, 0, null, (byte) 0);
    when(client.getFileInfo(anyString())).thenReturn(status1);
    applier.apply(new Event[] { createEvent });
    ResultSet result1 = adapter.executeQuery("SELECT * FROM files");
    Assert.assertEquals(result1.getString("path"), "/file");
    Assert.assertEquals(result1.getLong("fid"), 1010L);
    Assert.assertEquals(result1.getShort("permission"), 511);
    Event close = new Event.CloseEvent("/file", 1024, 0);
    applier.apply(new Event[] { close });
    ResultSet result2 = adapter.executeQuery("SELECT * FROM files");
    Assert.assertEquals(result2.getLong("length"), 1024);
    Assert.assertEquals(result2.getLong("modification_time"), 0L);
    //    Event truncate = new Event.TruncateEvent("/file", 512, 16);
    //    applier.apply(new Event[] {truncate});
    //    ResultSet result3 = adapter.executeQuery("SELECT * FROM files");
    //    Assert.assertEquals(result3.getLong("length"), 512);
    //    Assert.assertEquals(result3.getLong("modification_time"), 16L);
    Event meta = new Event.MetadataUpdateEvent.Builder().path("/file").metadataType(Event.MetadataUpdateEvent.MetadataType.TIMES).mtime(2).atime(3).replication(4).ownerName("user2").groupName("cg2").build();
    applier.apply(new Event[] { meta });
    ResultSet result4 = adapter.executeQuery("SELECT * FROM files");
    Assert.assertEquals(result4.getLong("access_time"), 3);
    Assert.assertEquals(result4.getLong("modification_time"), 2);
    Event.CreateEvent createEvent2 = new Event.CreateEvent.Builder().iNodeType(Event.CreateEvent.INodeType.DIRECTORY).ctime(1).groupName("cg1").overwrite(true).ownerName("user1").path("/dir").perms(new FsPermission("777")).replication(3).build();
    Event.CreateEvent createEvent3 = new Event.CreateEvent.Builder().iNodeType(Event.CreateEvent.INodeType.FILE).ctime(1).groupName("cg1").overwrite(true).ownerName("user1").path("/dir/file").perms(new FsPermission("777")).replication(3).build();
    Event rename = new Event.RenameEvent.Builder().dstPath("/dir2").srcPath("/dir").timestamp(5).build();
    applier.apply(new Event[] { createEvent2, createEvent3, rename });
    ResultSet result5 = adapter.executeQuery("SELECT * FROM files");
    List<String> expectedPaths = Arrays.asList("/dir2", "/dir2/file", "/file");
    List<String> actualPaths = new ArrayList<>();
    while (result5.next()) {
        actualPaths.add(result5.getString("path"));
    }
    Collections.sort(actualPaths);
    Assert.assertTrue(actualPaths.size() == 3);
    Assert.assertTrue(actualPaths.containsAll(expectedPaths));
    Event unlink = new Event.UnlinkEvent.Builder().path("/").timestamp(6).build();
    applier.apply(new Event[] { unlink });
    ResultSet result6 = adapter.executeQuery("SELECT * FROM files");
    Assert.assertFalse(result6.next());
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) Connection(java.sql.Connection) ArrayList(java.util.ArrayList) Matchers.anyString(org.mockito.Matchers.anyString) DBAdapter(org.smartdata.server.metastore.DBAdapter) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) ResultSet(java.sql.ResultSet) Event(org.apache.hadoop.hdfs.inotify.Event) FsPermission(org.apache.hadoop.fs.permission.FsPermission) Test(org.junit.Test) DBTest(org.smartdata.server.metastore.DBTest)

Example 59 with HdfsFileStatus

use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project SSM by Intel-bigdata.

the class CheckStorageAction method execute.

@Override
protected void execute() {
    ActionStatus actionStatus = getActionStatus();
    actionStatus.begin();
    try {
        HdfsFileStatus fileStatus = dfsClient.getFileInfo(fileName);
        long length = fileStatus.getLen();
        BlockLocation[] blockLocations = dfsClient.getBlockLocations(fileName, 0, length);
        for (BlockLocation blockLocation : blockLocations) {
            StringBuilder hosts = new StringBuilder();
            hosts.append("{");
            for (String host : blockLocation.getHosts()) {
                hosts.append(host + " ");
            }
            hosts.append("}");
            this.resultOut.println(hosts);
        }
        actionStatus.setSuccessful(true);
    } catch (Exception e) {
        actionStatus.setSuccessful(false);
        throw new RuntimeException(e);
    } finally {
        actionStatus.end();
    }
}
Also used : HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) BlockLocation(org.apache.hadoop.fs.BlockLocation) ActionStatus(org.smartdata.actions.ActionStatus)

Example 60 with HdfsFileStatus

use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project SSM by Intel-bigdata.

the class DBAdapter method convertFilesTableItem.

/**
   * Convert query result into HdfsFileStatus list.
   * Note: Some of the info in HdfsFileStatus are not the same
   *       as stored in NN.
   *
   * @param resultSet
   * @return
   */
public List<HdfsFileStatus> convertFilesTableItem(ResultSet resultSet) throws SQLException {
    List<HdfsFileStatus> ret = new LinkedList<>();
    if (resultSet == null) {
        return ret;
    }
    updateCache();
    while (resultSet.next()) {
        HdfsFileStatus status = new HdfsFileStatus(resultSet.getLong("length"), resultSet.getBoolean("is_dir"), resultSet.getShort("block_replication"), resultSet.getLong("block_size"), resultSet.getLong("modification_time"), resultSet.getLong("access_time"), new FsPermission(resultSet.getShort("permission")), mapOwnerIdName.get(resultSet.getShort("oid")), mapGroupIdName.get(resultSet.getShort("gid")), // Not tracked for now
        null, resultSet.getString("path").getBytes(), resultSet.getLong("fid"), // Not tracked for now, set to 0
        0, // Not tracked for now, set to null
        null, resultSet.getByte("sid"));
        ret.add(status);
    }
    return ret;
}
Also used : HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) FsPermission(org.apache.hadoop.fs.permission.FsPermission) LinkedList(java.util.LinkedList)

Aggregations

HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)124 Test (org.junit.Test)51 FileHandle (org.apache.hadoop.nfs.nfs3.FileHandle)34 IOException (java.io.IOException)28 InetSocketAddress (java.net.InetSocketAddress)28 XDR (org.apache.hadoop.oncrpc.XDR)28 AccessControlException (org.apache.hadoop.security.AccessControlException)26 Path (org.apache.hadoop.fs.Path)23 SnapshotAccessControlException (org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException)23 FileNotFoundException (java.io.FileNotFoundException)16 DFSClient (org.apache.hadoop.hdfs.DFSClient)11 DirectoryListing (org.apache.hadoop.hdfs.protocol.DirectoryListing)11 FsPermission (org.apache.hadoop.fs.permission.FsPermission)10 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)9 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)9 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)8 SetAttr3 (org.apache.hadoop.nfs.nfs3.request.SetAttr3)8 FileStatus (org.apache.hadoop.fs.FileStatus)7 Matchers.anyString (org.mockito.Matchers.anyString)7 Configuration (org.apache.hadoop.conf.Configuration)6