use of java.io.FileNotFoundException in project groovy by apache.
the class Java2GroovyProcessor method convert.
public static String convert(String filename, String input, boolean withHeader, boolean withNewLines) throws Exception {
JavaRecognizer parser = getJavaParser(input);
String[] tokenNames = parser.getTokenNames();
parser.compilationUnit();
AST ast = parser.getAST();
// which is a really nice way of seeing the AST, folding nodes etc
if ("mindmap".equals(System.getProperty("ANTLR.AST".toLowerCase()))) {
// uppercase to hide from jarjar
try {
PrintStream out = new PrintStream(new FileOutputStream(filename + ".mm"));
Visitor visitor = new MindMapPrinter(out, tokenNames);
AntlrASTProcessor treewalker = new PreOrderTraversal(visitor);
treewalker.process(ast);
} catch (FileNotFoundException e) {
System.out.println("Cannot create " + filename + ".mm");
}
}
// modify the Java AST into a Groovy AST
modifyJavaASTintoGroovyAST(tokenNames, ast);
String[] groovyTokenNames = getGroovyTokenNames(input);
// groovify the fat Java-Like Groovy AST
groovifyFatJavaLikeGroovyAST(ast, groovyTokenNames);
// now output
ByteArrayOutputStream baos = new ByteArrayOutputStream();
Visitor visitor = new SourcePrinter(new PrintStream(baos), groovyTokenNames, withNewLines);
AntlrASTProcessor traverser = new SourceCodeTraversal(visitor);
traverser.process(ast);
String header = "";
if (withHeader) {
header = "/*\n" + " Automatically Converted from Java Source \n" + " \n" + " by java2groovy v0.0.1 Copyright Jeremy Rayner 2007\n" + " \n" + " !! NOT FIT FOR ANY PURPOSE !! \n" + " 'java2groovy' cannot be used to convert one working program into another" + " */\n\n";
}
return header + new String(baos.toByteArray());
}
use of java.io.FileNotFoundException in project hadoop by apache.
the class UserGroupInformation method loginUserFromSubject.
/**
* Log in a user using the given subject
* @param subject the subject to use when logging in a user, or null to
* create a new subject.
*
* If subject is not null, the creator of subject is responsible for renewing
* credentials.
*
* @throws IOException if login fails
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public static synchronized void loginUserFromSubject(Subject subject) throws IOException {
ensureInitialized();
boolean externalSubject = false;
try {
if (subject == null) {
subject = new Subject();
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("Treat subject external: " + treatSubjectExternal + ". When true, assuming keytab is managed extenally since " + " logged in from subject");
}
externalSubject = treatSubjectExternal;
}
LoginContext login = newLoginContext(authenticationMethod.getLoginAppName(), subject, new HadoopConfiguration());
login.login();
UserGroupInformation realUser = new UserGroupInformation(subject, externalSubject);
realUser.setLogin(login);
realUser.setAuthenticationMethod(authenticationMethod);
// If the HADOOP_PROXY_USER environment variable or property
// is specified, create a proxy user as the logged in user.
String proxyUser = System.getenv(HADOOP_PROXY_USER);
if (proxyUser == null) {
proxyUser = System.getProperty(HADOOP_PROXY_USER);
}
loginUser = proxyUser == null ? realUser : createProxyUser(proxyUser, realUser);
String tokenFileLocation = System.getProperty(HADOOP_TOKEN_FILES);
if (tokenFileLocation == null) {
tokenFileLocation = conf.get(HADOOP_TOKEN_FILES);
}
if (tokenFileLocation != null) {
for (String tokenFileName : StringUtils.getTrimmedStrings(tokenFileLocation)) {
if (tokenFileName.length() > 0) {
File tokenFile = new File(tokenFileName);
if (tokenFile.exists() && tokenFile.isFile()) {
Credentials cred = Credentials.readTokenStorageFile(tokenFile, conf);
loginUser.addCredentials(cred);
} else {
LOG.info("tokenFile(" + tokenFileName + ") does not exist");
}
}
}
}
String fileLocation = System.getenv(HADOOP_TOKEN_FILE_LOCATION);
if (fileLocation != null) {
// Load the token storage file and put all of the tokens into the
// user. Don't use the FileSystem API for reading since it has a lock
// cycle (HADOOP-9212).
File source = new File(fileLocation);
LOG.debug("Reading credentials from location set in {}: {}", HADOOP_TOKEN_FILE_LOCATION, source.getCanonicalPath());
if (!source.isFile()) {
throw new FileNotFoundException("Source file " + source.getCanonicalPath() + " from " + HADOOP_TOKEN_FILE_LOCATION + " not found");
}
Credentials cred = Credentials.readTokenStorageFile(source, conf);
LOG.debug("Loaded {} tokens", cred.numberOfTokens());
loginUser.addCredentials(cred);
}
loginUser.spawnAutoRenewalThreadForUserCreds();
} catch (LoginException le) {
LOG.debug("failure to login", le);
throw new KerberosAuthException(FAILURE_TO_LOGIN, le);
}
if (LOG.isDebugEnabled()) {
LOG.debug("UGI loginUser:" + loginUser);
}
}
use of java.io.FileNotFoundException in project hadoop by apache.
the class TestSymlinkLocalFS method testGetLinkStatusPartQualTarget.
@Test(timeout = 1000)
public /**
* Test getLinkTarget with a partially qualified target.
* NB: Hadoop does not support fully qualified URIs for the
* file scheme (eg file://host/tmp/test).
*/
void testGetLinkStatusPartQualTarget() throws IOException {
Path fileAbs = new Path(testBaseDir1() + "/file");
Path fileQual = new Path(testURI().toString(), fileAbs);
Path dir = new Path(testBaseDir1());
Path link = new Path(testBaseDir1() + "/linkToFile");
Path dirNew = new Path(testBaseDir2());
Path linkNew = new Path(testBaseDir2() + "/linkToFile");
wrapper.delete(dirNew, true);
createAndWriteFile(fileQual);
wrapper.setWorkingDirectory(dir);
// Link target is partially qualified, we get the same back.
wrapper.createSymlink(fileQual, link, false);
assertEquals(fileQual, wrapper.getFileLinkStatus(link).getSymlink());
// Because the target was specified with an absolute path the
// link fails to resolve after moving the parent directory.
wrapper.rename(dir, dirNew);
// The target is still the old path
assertEquals(fileQual, wrapper.getFileLinkStatus(linkNew).getSymlink());
try {
readFile(linkNew);
fail("The link should be dangling now.");
} catch (FileNotFoundException x) {
// Expected.
}
// RawLocalFs only maintains the path part, not the URI, and
// therefore does not support links to other file systems.
Path anotherFs = new Path("hdfs://host:1000/dir/file");
FileUtil.fullyDelete(new File(linkNew.toString()));
try {
wrapper.createSymlink(anotherFs, linkNew, false);
fail("Created a local fs link to a non-local fs");
} catch (IOException x) {
// Excpected.
}
}
use of java.io.FileNotFoundException in project flink by apache.
the class WriteSinkFunction method cleanFile.
/**
* Creates target file if it does not exist, cleans it if it exists.
*
* @param path
* is the path to the location where the tuples are written
*/
protected void cleanFile(String path) {
try {
PrintWriter writer;
writer = new PrintWriter(path);
writer.print("");
writer.close();
} catch (FileNotFoundException e) {
throw new RuntimeException("An error occurred while cleaning the file: " + e.getMessage(), e);
}
}
use of java.io.FileNotFoundException in project flink by apache.
the class ContinuousFileMonitoringFunction method run.
@Override
public void run(SourceFunction.SourceContext<TimestampedFileInputSplit> context) throws Exception {
Path p = new Path(path);
FileSystem fileSystem = FileSystem.get(p.toUri());
if (!fileSystem.exists(p)) {
throw new FileNotFoundException("The provided file path " + path + " does not exist.");
}
checkpointLock = context.getCheckpointLock();
switch(watchType) {
case PROCESS_CONTINUOUSLY:
while (isRunning) {
synchronized (checkpointLock) {
monitorDirAndForwardSplits(fileSystem, context);
}
Thread.sleep(interval);
}
break;
case PROCESS_ONCE:
synchronized (checkpointLock) {
if (globalModificationTime == Long.MIN_VALUE) {
monitorDirAndForwardSplits(fileSystem, context);
globalModificationTime = Long.MAX_VALUE;
}
isRunning = false;
}
break;
default:
isRunning = false;
throw new RuntimeException("Unknown WatchType" + watchType);
}
}
Aggregations