use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project SSM by Intel-bigdata.
the class TestDBMethod method testInsetFiles.
@Test
public void testInsetFiles() throws Exception {
Connection conn = null;
try {
conn = new TestDBUtil().getTestDBInstance();
DBAdapter dbAdapter = new DBAdapter(conn);
String pathString = "testFile";
long length = 123L;
boolean isDir = false;
int blockReplication = 1;
long blockSize = 128 * 1024L;
long modTime = 123123123L;
long accessTime = 123123120L;
FsPermission perms = FsPermission.getDefault();
String owner = "root";
String group = "admin";
byte[] symlink = null;
byte[] path = DFSUtil.string2Bytes(pathString);
long fileId = 312321L;
int numChildren = 0;
byte storagePolicy = 0;
FileStatusInternal[] files = { new FileStatusInternal(length, isDir, blockReplication, blockSize, modTime, accessTime, perms, owner, group, symlink, path, "/tmp", fileId, numChildren, null, storagePolicy) };
dbAdapter.insertFiles(files);
HdfsFileStatus hdfsFileStatus = dbAdapter.getFile("/tmp/testFile");
Assert.assertTrue(hdfsFileStatus.getBlockSize() == 128 * 1024L);
} finally {
if (conn != null) {
conn.close();
}
}
}
use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project SSM by Intel-bigdata.
the class TestWriteFileAction method testExecute.
@Test
public void testExecute() throws Exception {
String filePath = "/testWriteFile/file";
int size = 66560;
writeFile(filePath, size);
HdfsFileStatus fileStatus = dfs.getClient().getFileInfo(filePath);
Assert.assertTrue(fileStatus.getLen() == size);
}
use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project SSM by Intel-bigdata.
the class TestInotifyEventApplier method testApplier.
@Test
public void testApplier() throws Exception {
DFSClient client = mock(DFSClient.class);
Connection connection = databaseTester.getConnection().getConnection();
Util.initializeDataBase(connection);
DBAdapter adapter = new DBAdapter(connection);
InotifyEventApplier applier = new InotifyEventApplier(adapter, client);
Event.CreateEvent createEvent = new Event.CreateEvent.Builder().iNodeType(Event.CreateEvent.INodeType.FILE).ctime(1).defaultBlockSize(1024).groupName("cg1").overwrite(true).ownerName("user1").path("/file").perms(new FsPermission("777")).replication(3).build();
HdfsFileStatus status1 = new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission((short) 777), "owner", "group", new byte[0], new byte[0], 1010, 0, null, (byte) 0);
when(client.getFileInfo(anyString())).thenReturn(status1);
applier.apply(new Event[] { createEvent });
ResultSet result1 = adapter.executeQuery("SELECT * FROM files");
Assert.assertEquals(result1.getString("path"), "/file");
Assert.assertEquals(result1.getLong("fid"), 1010L);
Assert.assertEquals(result1.getShort("permission"), 511);
Event close = new Event.CloseEvent("/file", 1024, 0);
applier.apply(new Event[] { close });
ResultSet result2 = adapter.executeQuery("SELECT * FROM files");
Assert.assertEquals(result2.getLong("length"), 1024);
Assert.assertEquals(result2.getLong("modification_time"), 0L);
// Event truncate = new Event.TruncateEvent("/file", 512, 16);
// applier.apply(new Event[] {truncate});
// ResultSet result3 = adapter.executeQuery("SELECT * FROM files");
// Assert.assertEquals(result3.getLong("length"), 512);
// Assert.assertEquals(result3.getLong("modification_time"), 16L);
Event meta = new Event.MetadataUpdateEvent.Builder().path("/file").metadataType(Event.MetadataUpdateEvent.MetadataType.TIMES).mtime(2).atime(3).replication(4).ownerName("user2").groupName("cg2").build();
applier.apply(new Event[] { meta });
ResultSet result4 = adapter.executeQuery("SELECT * FROM files");
Assert.assertEquals(result4.getLong("access_time"), 3);
Assert.assertEquals(result4.getLong("modification_time"), 2);
Event.CreateEvent createEvent2 = new Event.CreateEvent.Builder().iNodeType(Event.CreateEvent.INodeType.DIRECTORY).ctime(1).groupName("cg1").overwrite(true).ownerName("user1").path("/dir").perms(new FsPermission("777")).replication(3).build();
Event.CreateEvent createEvent3 = new Event.CreateEvent.Builder().iNodeType(Event.CreateEvent.INodeType.FILE).ctime(1).groupName("cg1").overwrite(true).ownerName("user1").path("/dir/file").perms(new FsPermission("777")).replication(3).build();
Event rename = new Event.RenameEvent.Builder().dstPath("/dir2").srcPath("/dir").timestamp(5).build();
applier.apply(new Event[] { createEvent2, createEvent3, rename });
ResultSet result5 = adapter.executeQuery("SELECT * FROM files");
List<String> expectedPaths = Arrays.asList("/dir2", "/dir2/file", "/file");
List<String> actualPaths = new ArrayList<>();
while (result5.next()) {
actualPaths.add(result5.getString("path"));
}
Collections.sort(actualPaths);
Assert.assertTrue(actualPaths.size() == 3);
Assert.assertTrue(actualPaths.containsAll(expectedPaths));
Event unlink = new Event.UnlinkEvent.Builder().path("/").timestamp(6).build();
applier.apply(new Event[] { unlink });
ResultSet result6 = adapter.executeQuery("SELECT * FROM files");
Assert.assertFalse(result6.next());
}
use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project SSM by Intel-bigdata.
the class CheckStorageAction method execute.
@Override
protected void execute() {
ActionStatus actionStatus = getActionStatus();
actionStatus.begin();
try {
HdfsFileStatus fileStatus = dfsClient.getFileInfo(fileName);
long length = fileStatus.getLen();
BlockLocation[] blockLocations = dfsClient.getBlockLocations(fileName, 0, length);
for (BlockLocation blockLocation : blockLocations) {
StringBuilder hosts = new StringBuilder();
hosts.append("{");
for (String host : blockLocation.getHosts()) {
hosts.append(host + " ");
}
hosts.append("}");
this.resultOut.println(hosts);
}
actionStatus.setSuccessful(true);
} catch (Exception e) {
actionStatus.setSuccessful(false);
throw new RuntimeException(e);
} finally {
actionStatus.end();
}
}
use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project SSM by Intel-bigdata.
the class DBAdapter method convertFilesTableItem.
/**
* Convert query result into HdfsFileStatus list.
* Note: Some of the info in HdfsFileStatus are not the same
* as stored in NN.
*
* @param resultSet
* @return
*/
public List<HdfsFileStatus> convertFilesTableItem(ResultSet resultSet) throws SQLException {
List<HdfsFileStatus> ret = new LinkedList<>();
if (resultSet == null) {
return ret;
}
updateCache();
while (resultSet.next()) {
HdfsFileStatus status = new HdfsFileStatus(resultSet.getLong("length"), resultSet.getBoolean("is_dir"), resultSet.getShort("block_replication"), resultSet.getLong("block_size"), resultSet.getLong("modification_time"), resultSet.getLong("access_time"), new FsPermission(resultSet.getShort("permission")), mapOwnerIdName.get(resultSet.getShort("oid")), mapGroupIdName.get(resultSet.getShort("gid")), // Not tracked for now
null, resultSet.getString("path").getBytes(), resultSet.getLong("fid"), // Not tracked for now, set to 0
0, // Not tracked for now, set to null
null, resultSet.getByte("sid"));
ret.add(status);
}
return ret;
}
Aggregations