use of java.io.PrintStream in project hadoop by apache.
the class RollingFileSystemSink method createOrAppendLogFile.
/**
* Create a new log file and return the {@link FSDataOutputStream}. If a
* file with the specified path already exists, open the file for append
* instead.
*
* Once the file is open, update {@link #currentFSOutStream},
* {@link #currentOutStream}, and {@#link #currentFilePath}.
*
* @param initial the target path
* @throws IOException thrown if the call to see the append operation fails.
*/
private void createOrAppendLogFile(Path targetFile) throws IOException {
// test-and-set.
try {
currentFSOutStream = fileSystem.create(targetFile, false);
currentOutStream = new PrintStream(currentFSOutStream, true, StandardCharsets.UTF_8.name());
} catch (IOException ex) {
// actually exist yet or the operation actually failed.
try {
currentFSOutStream = fileSystem.append(targetFile);
currentOutStream = new PrintStream(currentFSOutStream, true, StandardCharsets.UTF_8.name());
} catch (IOException ex2) {
// If the original create failed for a legit but transitory
// reason, the append will fail because the file now doesn't exist,
// resulting in a confusing stack trace. To avoid that, we set
// the cause of the second exception to be the first exception.
// It's still a tiny bit confusing, but it's enough
// information that someone should be able to figure it out.
ex2.initCause(ex);
throw ex2;
}
}
currentFilePath = targetFile;
}
use of java.io.PrintStream in project hadoop by apache.
the class RollingFileSystemSink method scheduleFlush.
/**
* Schedule the current interval's directory to be flushed. If this ends up
* running after the top of the next interval, it will execute immediately.
*
* @param when the time the thread should run
*/
private void scheduleFlush(Date when) {
// Store the current currentDirPath to close later
final PrintStream toClose = currentOutStream;
flushTimer.schedule(new TimerTask() {
@Override
public void run() {
synchronized (lock) {
// This close may have already been done by a putMetrics() call. If it
// has, the PrintStream will swallow the exception, which is fine.
toClose.close();
}
hasFlushed = true;
}
}, when);
}
use of java.io.PrintStream in project groovy by apache.
the class AntlrParserPlugin method outputASTInVariousFormsIfNeeded.
private void outputASTInVariousFormsIfNeeded(SourceUnit sourceUnit, SourceBuffer sourceBuffer) {
// straight xstream output of AST
// uppercase to hide from jarjar
String formatProp = System.getProperty("ANTLR.AST".toLowerCase());
if ("xml".equals(formatProp)) {
saveAsXML(sourceUnit.getName(), ast);
}
// 'pretty printer' output of AST
if ("groovy".equals(formatProp)) {
try {
PrintStream out = new PrintStream(new FileOutputStream(sourceUnit.getName() + ".pretty.groovy"));
Visitor visitor = new SourcePrinter(out, tokenNames);
AntlrASTProcessor treewalker = new SourceCodeTraversal(visitor);
treewalker.process(ast);
} catch (FileNotFoundException e) {
System.out.println("Cannot create " + sourceUnit.getName() + ".pretty.groovy");
}
}
// which is a really nice way of seeing the AST, folding nodes etc
if ("mindmap".equals(formatProp)) {
try {
PrintStream out = new PrintStream(new FileOutputStream(sourceUnit.getName() + ".mm"));
Visitor visitor = new MindMapPrinter(out, tokenNames);
AntlrASTProcessor treewalker = new PreOrderTraversal(visitor);
treewalker.process(ast);
} catch (FileNotFoundException e) {
System.out.println("Cannot create " + sourceUnit.getName() + ".mm");
}
}
// include original line/col info and source code on the mindmap output
if ("extendedMindmap".equals(formatProp)) {
try {
PrintStream out = new PrintStream(new FileOutputStream(sourceUnit.getName() + ".mm"));
Visitor visitor = new MindMapPrinter(out, tokenNames, sourceBuffer);
AntlrASTProcessor treewalker = new PreOrderTraversal(visitor);
treewalker.process(ast);
} catch (FileNotFoundException e) {
System.out.println("Cannot create " + sourceUnit.getName() + ".mm");
}
}
// html output of AST
if ("html".equals(formatProp)) {
try {
PrintStream out = new PrintStream(new FileOutputStream(sourceUnit.getName() + ".html"));
List<VisitorAdapter> v = new ArrayList<VisitorAdapter>();
v.add(new NodeAsHTMLPrinter(out, tokenNames));
v.add(new SourcePrinter(out, tokenNames));
Visitor visitors = new CompositeVisitor(v);
AntlrASTProcessor treewalker = new SourceCodeTraversal(visitors);
treewalker.process(ast);
} catch (FileNotFoundException e) {
System.out.println("Cannot create " + sourceUnit.getName() + ".html");
}
}
}
use of java.io.PrintStream in project hadoop by apache.
the class StructureGenerator method output.
/** Output directory structure to a file, each line of the file
* contains the directory name. Only empty directory names are printed. */
private void output(File outFile) throws FileNotFoundException {
System.out.println("Printing to " + outFile.toString());
PrintStream out = new PrintStream(outFile);
root.output(out, null);
out.close();
}
use of java.io.PrintStream in project hadoop by apache.
the class TestFind method processArgumentsMaxDepth.
// check maximum depth is handled
@Test
public void processArgumentsMaxDepth() throws IOException {
LinkedList<PathData> items = createDirectories();
Find find = new Find();
find.getOptions().setMaxDepth(1);
find.setConf(conf);
PrintStream out = mock(PrintStream.class);
find.getOptions().setOut(out);
PrintStream err = mock(PrintStream.class);
find.getOptions().setErr(err);
Expression expr = mock(Expression.class);
when(expr.apply((PathData) any(), anyInt())).thenReturn(Result.PASS);
FileStatusChecker fsCheck = mock(FileStatusChecker.class);
Expression test = new TestExpression(expr, fsCheck);
find.setRootExpression(test);
find.processArguments(items);
InOrder inOrder = inOrder(expr);
inOrder.verify(expr).setOptions(find.getOptions());
inOrder.verify(expr).prepare();
inOrder.verify(expr).apply(item1, 0);
inOrder.verify(expr).apply(item1a, 1);
inOrder.verify(expr).apply(item1b, 1);
inOrder.verify(expr).apply(item2, 0);
inOrder.verify(expr).apply(item3, 0);
inOrder.verify(expr).apply(item4, 0);
inOrder.verify(expr).apply(item5, 0);
inOrder.verify(expr).apply(item5a, 1);
inOrder.verify(expr).apply(item5b, 1);
inOrder.verify(expr).apply(item5c, 1);
inOrder.verify(expr).apply(item5d, 1);
inOrder.verify(expr).apply(item5e, 1);
inOrder.verify(expr).finish();
verifyNoMoreInteractions(expr);
InOrder inOrderFsCheck = inOrder(fsCheck);
inOrderFsCheck.verify(fsCheck).check(item1.stat);
inOrderFsCheck.verify(fsCheck).check(item1a.stat);
inOrderFsCheck.verify(fsCheck).check(item1b.stat);
inOrderFsCheck.verify(fsCheck).check(item2.stat);
inOrderFsCheck.verify(fsCheck).check(item3.stat);
inOrderFsCheck.verify(fsCheck).check(item4.stat);
inOrderFsCheck.verify(fsCheck).check(item5.stat);
inOrderFsCheck.verify(fsCheck).check(item5a.stat);
inOrderFsCheck.verify(fsCheck).check(item5b.stat);
inOrderFsCheck.verify(fsCheck).check(item5c.stat);
inOrderFsCheck.verify(fsCheck).check(item5d.stat);
inOrderFsCheck.verify(fsCheck).check(item5e.stat);
verifyNoMoreInteractions(fsCheck);
verifyNoMoreInteractions(out);
verifyNoMoreInteractions(err);
}
Aggregations