use of org.apache.hadoop.fs.StorageType in project SSM by Intel-bigdata.
the class TestArchiveFileAction method testAllSsd.
@Test
public void testAllSsd() throws Exception {
final String file = "/testArchive/file";
Path dir = new Path("/testArchive");
dfs.mkdirs(dir);
// write to DISK
dfs.setStoragePolicy(dir, "HOT");
final FSDataOutputStream out = dfs.create(new Path(file));
out.writeChars("testArchive");
out.close();
// schedule move to Archive
ArchiveFileAction action = new ArchiveFileAction();
action.setDfsClient(dfsClient);
action.setContext(smartContext);
action.init(file);
ActionStatus status = action.getActionStatus();
action.run();
while (!status.isFinished()) {
System.out.println("Mover running time : " + StringUtils.formatTime(status.getRunningTime()));
Thread.sleep(1000);
}
// verify after movement
Assert.assertTrue(status.isSuccessful());
LocatedBlock lb = dfsClient.getLocatedBlocks(file, 0).get(0);
StorageType[] storageTypes = lb.getStorageTypes();
for (StorageType storageType : storageTypes) {
Assert.assertTrue(StorageType.ARCHIVE == storageType);
}
}
Aggregations