use of org.apache.hadoop.fs.FileContext in project hadoop by apache.
the class TestJobHistoryParsing method testCountersForFailedTask.
@Test(timeout = 60000)
public void testCountersForFailedTask() throws Exception {
LOG.info("STARTING testCountersForFailedTask");
try {
Configuration conf = new Configuration();
conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, MyResolver.class, DNSToSwitchMapping.class);
RackResolver.init(conf);
MRApp app = new MRAppWithHistoryWithFailedTask(2, 1, true, this.getClass().getName(), true);
app.submit(conf);
Job job = app.getContext().getAllJobs().values().iterator().next();
JobId jobId = job.getID();
app.waitForState(job, JobState.FAILED);
// make sure all events are flushed
app.waitForState(Service.STATE.STOPPED);
JobHistory jobHistory = new JobHistory();
jobHistory.init(conf);
HistoryFileInfo fileInfo = jobHistory.getJobFileInfo(jobId);
JobHistoryParser parser;
JobInfo jobInfo;
synchronized (fileInfo) {
Path historyFilePath = fileInfo.getHistoryFile();
FSDataInputStream in = null;
FileContext fc = null;
try {
fc = FileContext.getFileContext(conf);
in = fc.open(fc.makeQualified(historyFilePath));
} catch (IOException ioe) {
LOG.info("Can not open history file: " + historyFilePath, ioe);
throw (new Exception("Can not open History File"));
}
parser = new JobHistoryParser(in);
jobInfo = parser.parse();
}
Exception parseException = parser.getParseException();
Assert.assertNull("Caught an expected exception " + parseException, parseException);
for (Map.Entry<TaskID, TaskInfo> entry : jobInfo.getAllTasks().entrySet()) {
TaskId yarnTaskID = TypeConverter.toYarn(entry.getKey());
CompletedTask ct = new CompletedTask(yarnTaskID, entry.getValue());
Assert.assertNotNull("completed task report has null counters", ct.getReport().getCounters());
}
final List<String> originalDiagnostics = job.getDiagnostics();
final String historyError = jobInfo.getErrorInfo();
assertTrue("No original diagnostics for a failed job", originalDiagnostics != null && !originalDiagnostics.isEmpty());
assertNotNull("No history error info for a failed job ", historyError);
for (String diagString : originalDiagnostics) {
assertTrue(historyError.contains(diagString));
}
} finally {
LOG.info("FINISHED testCountersForFailedTask");
}
}
use of org.apache.hadoop.fs.FileContext in project hadoop by apache.
the class TestJobHistoryParsing method testHistoryParsingForFailedAttempts.
@Test(timeout = 30000)
public void testHistoryParsingForFailedAttempts() throws Exception {
LOG.info("STARTING testHistoryParsingForFailedAttempts");
try {
Configuration conf = new Configuration();
conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, MyResolver.class, DNSToSwitchMapping.class);
RackResolver.init(conf);
MRApp app = new MRAppWithHistoryWithFailedAttempt(2, 1, true, this.getClass().getName(), true);
app.submit(conf);
Job job = app.getContext().getAllJobs().values().iterator().next();
JobId jobId = job.getID();
app.waitForState(job, JobState.SUCCEEDED);
// make sure all events are flushed
app.waitForState(Service.STATE.STOPPED);
JobHistory jobHistory = new JobHistory();
jobHistory.init(conf);
HistoryFileInfo fileInfo = jobHistory.getJobFileInfo(jobId);
JobHistoryParser parser;
JobInfo jobInfo;
synchronized (fileInfo) {
Path historyFilePath = fileInfo.getHistoryFile();
FSDataInputStream in = null;
FileContext fc = null;
try {
fc = FileContext.getFileContext(conf);
in = fc.open(fc.makeQualified(historyFilePath));
} catch (IOException ioe) {
LOG.info("Can not open history file: " + historyFilePath, ioe);
throw (new Exception("Can not open History File"));
}
parser = new JobHistoryParser(in);
jobInfo = parser.parse();
}
Exception parseException = parser.getParseException();
Assert.assertNull("Caught an expected exception " + parseException, parseException);
int noOffailedAttempts = 0;
Map<TaskID, TaskInfo> allTasks = jobInfo.getAllTasks();
for (Task task : job.getTasks().values()) {
TaskInfo taskInfo = allTasks.get(TypeConverter.fromYarn(task.getID()));
for (TaskAttempt taskAttempt : task.getAttempts().values()) {
TaskAttemptInfo taskAttemptInfo = taskInfo.getAllTaskAttempts().get(TypeConverter.fromYarn((taskAttempt.getID())));
// Verify rack-name for all task attempts
Assert.assertEquals("rack-name is incorrect", taskAttemptInfo.getRackname(), RACK_NAME);
if (taskAttemptInfo.getTaskStatus().equals("FAILED")) {
noOffailedAttempts++;
}
}
}
Assert.assertEquals("No of Failed tasks doesn't match.", 2, noOffailedAttempts);
} finally {
LOG.info("FINISHED testHistoryParsingForFailedAttempts");
}
}
use of org.apache.hadoop.fs.FileContext in project hadoop by apache.
the class DFSTestUtil method runOperations.
/**
* Run a set of operations and generate all edit logs
*/
public static void runOperations(MiniDFSCluster cluster, DistributedFileSystem filesystem, Configuration conf, long blockSize, int nnIndex) throws IOException {
// create FileContext for rename2
FileContext fc = FileContext.getFileContext(cluster.getURI(0), conf);
// OP_ADD 0
final Path pathFileCreate = new Path("/file_create");
FSDataOutputStream s = filesystem.create(pathFileCreate);
// OP_CLOSE 9
s.close();
// OP_APPEND 47
FSDataOutputStream s2 = filesystem.append(pathFileCreate, 4096, null);
s2.close();
// OP_UPDATE_BLOCKS 25
final String updateBlockFile = "/update_blocks";
FSDataOutputStream fout = filesystem.create(new Path(updateBlockFile), true, 4096, (short) 1, 4096L);
fout.write(1);
fout.hflush();
long fileId = ((DFSOutputStream) fout.getWrappedStream()).getFileId();
DFSClient dfsclient = DFSClientAdapter.getDFSClient(filesystem);
LocatedBlocks blocks = dfsclient.getNamenode().getBlockLocations(updateBlockFile, 0, Integer.MAX_VALUE);
dfsclient.getNamenode().abandonBlock(blocks.get(0).getBlock(), fileId, updateBlockFile, dfsclient.clientName);
fout.close();
// OP_SET_STORAGE_POLICY 45
filesystem.setStoragePolicy(pathFileCreate, HdfsConstants.HOT_STORAGE_POLICY_NAME);
// OP_RENAME_OLD 1
final Path pathFileMoved = new Path("/file_moved");
filesystem.rename(pathFileCreate, pathFileMoved);
// OP_DELETE 2
filesystem.delete(pathFileMoved, false);
// OP_MKDIR 3
Path pathDirectoryMkdir = new Path("/directory_mkdir");
filesystem.mkdirs(pathDirectoryMkdir);
// OP_ALLOW_SNAPSHOT 29
filesystem.allowSnapshot(pathDirectoryMkdir);
// OP_DISALLOW_SNAPSHOT 30
filesystem.disallowSnapshot(pathDirectoryMkdir);
// OP_CREATE_SNAPSHOT 26
String ssName = "snapshot1";
filesystem.allowSnapshot(pathDirectoryMkdir);
filesystem.createSnapshot(pathDirectoryMkdir, ssName);
// OP_RENAME_SNAPSHOT 28
String ssNewName = "snapshot2";
filesystem.renameSnapshot(pathDirectoryMkdir, ssName, ssNewName);
// OP_DELETE_SNAPSHOT 27
filesystem.deleteSnapshot(pathDirectoryMkdir, ssNewName);
// OP_SET_REPLICATION 4
s = filesystem.create(pathFileCreate);
s.close();
filesystem.setReplication(pathFileCreate, (short) 1);
// OP_SET_PERMISSIONS 7
Short permission = 0777;
filesystem.setPermission(pathFileCreate, new FsPermission(permission));
// OP_SET_OWNER 8
filesystem.setOwner(pathFileCreate, new String("newOwner"), null);
// OP_CLOSE 9 see above
// OP_SET_GENSTAMP 10 see above
// OP_SET_NS_QUOTA 11 obsolete
// OP_CLEAR_NS_QUOTA 12 obsolete
// OP_TIMES 13
// Wed, 22 Sep 2010 22:45:27 GMT
long mtime = 1285195527000L;
long atime = mtime;
filesystem.setTimes(pathFileCreate, mtime, atime);
// OP_SET_QUOTA 14
filesystem.setQuota(pathDirectoryMkdir, 1000L, HdfsConstants.QUOTA_DONT_SET);
// OP_SET_QUOTA_BY_STORAGETYPE
filesystem.setQuotaByStorageType(pathDirectoryMkdir, StorageType.SSD, 888L);
// OP_RENAME 15
fc.rename(pathFileCreate, pathFileMoved, Rename.NONE);
// OP_CONCAT_DELETE 16
Path pathConcatTarget = new Path("/file_concat_target");
Path[] pathConcatFiles = new Path[2];
pathConcatFiles[0] = new Path("/file_concat_0");
pathConcatFiles[1] = new Path("/file_concat_1");
// multiple of blocksize for concat
long length = blockSize * 3;
short replication = 1;
long seed = 1;
DFSTestUtil.createFile(filesystem, pathConcatTarget, length, replication, seed);
DFSTestUtil.createFile(filesystem, pathConcatFiles[0], length, replication, seed);
DFSTestUtil.createFile(filesystem, pathConcatFiles[1], length, replication, seed);
filesystem.concat(pathConcatTarget, pathConcatFiles);
// OP_TRUNCATE 46
length = blockSize * 2;
DFSTestUtil.createFile(filesystem, pathFileCreate, length, replication, seed);
filesystem.truncate(pathFileCreate, blockSize);
// OP_SYMLINK 17
Path pathSymlink = new Path("/file_symlink");
fc.createSymlink(pathConcatTarget, pathSymlink, false);
// OP_REASSIGN_LEASE 22
String filePath = "/hard-lease-recovery-test";
byte[] bytes = "foo-bar-baz".getBytes();
DFSClientAdapter.stopLeaseRenewer(filesystem);
FSDataOutputStream leaseRecoveryPath = filesystem.create(new Path(filePath));
leaseRecoveryPath.write(bytes);
leaseRecoveryPath.hflush();
// Set the hard lease timeout to 1 second.
cluster.setLeasePeriod(60 * 1000, 1000, nnIndex);
// wait for lease recovery to complete
LocatedBlocks locatedBlocks;
do {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
}
locatedBlocks = DFSClientAdapter.callGetBlockLocations(cluster.getNameNodeRpc(nnIndex), filePath, 0L, bytes.length);
} while (locatedBlocks.isUnderConstruction());
// OP_ADD_CACHE_POOL
filesystem.addCachePool(new CachePoolInfo("pool1"));
// OP_MODIFY_CACHE_POOL
filesystem.modifyCachePool(new CachePoolInfo("pool1").setLimit(99l));
// OP_ADD_PATH_BASED_CACHE_DIRECTIVE
long id = filesystem.addCacheDirective(new CacheDirectiveInfo.Builder().setPath(new Path("/path")).setReplication((short) 1).setPool("pool1").build(), EnumSet.of(CacheFlag.FORCE));
// OP_MODIFY_PATH_BASED_CACHE_DIRECTIVE
filesystem.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(id).setReplication((short) 2).build(), EnumSet.of(CacheFlag.FORCE));
// OP_REMOVE_PATH_BASED_CACHE_DIRECTIVE
filesystem.removeCacheDirective(id);
// OP_REMOVE_CACHE_POOL
filesystem.removeCachePool("pool1");
// OP_SET_ACL
List<AclEntry> aclEntryList = Lists.newArrayList();
aclEntryList.add(new AclEntry.Builder().setPermission(FsAction.READ_WRITE).setScope(AclEntryScope.ACCESS).setType(AclEntryType.USER).build());
aclEntryList.add(new AclEntry.Builder().setName("user").setPermission(FsAction.READ_WRITE).setScope(AclEntryScope.ACCESS).setType(AclEntryType.USER).build());
aclEntryList.add(new AclEntry.Builder().setPermission(FsAction.WRITE).setScope(AclEntryScope.ACCESS).setType(AclEntryType.GROUP).build());
aclEntryList.add(new AclEntry.Builder().setPermission(FsAction.NONE).setScope(AclEntryScope.ACCESS).setType(AclEntryType.OTHER).build());
filesystem.setAcl(pathConcatTarget, aclEntryList);
// OP_SET_XATTR
filesystem.setXAttr(pathConcatTarget, "user.a1", new byte[] { 0x31, 0x32, 0x33 });
filesystem.setXAttr(pathConcatTarget, "user.a2", new byte[] { 0x37, 0x38, 0x39 });
// OP_REMOVE_XATTR
filesystem.removeXAttr(pathConcatTarget, "user.a2");
}
use of org.apache.hadoop.fs.FileContext in project lucene-solr by apache.
the class HdfsDirectoryFactory method renameWithOverwrite.
// perform an atomic rename if possible
public void renameWithOverwrite(Directory dir, String fileName, String toName) throws IOException {
String hdfsDirPath = getPath(dir);
FileContext fileContext = FileContext.getFileContext(getConf());
fileContext.rename(new Path(hdfsDirPath + "/" + fileName), new Path(hdfsDirPath + "/" + toName), Options.Rename.OVERWRITE);
}
use of org.apache.hadoop.fs.FileContext in project lucene-solr by apache.
the class HdfsDirectoryFactory method move.
@Override
public void move(Directory fromDir, Directory toDir, String fileName, IOContext ioContext) throws IOException {
Directory baseFromDir = getBaseDir(fromDir);
Directory baseToDir = getBaseDir(toDir);
if (baseFromDir instanceof HdfsDirectory && baseToDir instanceof HdfsDirectory) {
Path dir1 = ((HdfsDirectory) baseFromDir).getHdfsDirPath();
Path dir2 = ((HdfsDirectory) baseToDir).getHdfsDirPath();
Path file1 = new Path(dir1, fileName);
Path file2 = new Path(dir2, fileName);
FileContext fileContext = FileContext.getFileContext(getConf());
fileContext.rename(file1, file2);
return;
}
super.move(fromDir, toDir, fileName, ioContext);
}
Aggregations