use of java.io.OutputStreamWriter in project Cloud9 by lintool.
the class CreateWordAlignmentCorpus method main.
public static void main(String[] args) {
if (args.length != 3) {
System.err.println("Usage: CreateWordAlignmentCorpus <lang> <infile.txt> <outfile.txt>");
System.err.println(" (note: lang must be a two-letter ISO639 code)");
System.exit(1);
}
try {
BufferedReader in = new BufferedReader(new InputStreamReader(new FileInputStream(args[1]), "UTF8"));
Language fl = Language.languageForISO639_1(args[0]);
LanguagePair lp = LanguagePair.languageForISO639_1Pair(args[0] + "-en");
AlignmentWordPreprocessor sawp = AlignmentWordPreprocessor.CreatePreprocessor(lp, fl, null);
String l;
BufferedWriter out = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(args[2]), "UTF8"));
while ((l = in.readLine()) != null) {
String[] res = sawp.preprocessWordsForAlignment(l.split("\\s+"));
boolean first = true;
for (String r : res) {
if (first)
first = false;
else
out.write(' ');
out.write(r);
}
out.newLine();
}
out.flush();
out.close();
} catch (IOException ex) {
ex.printStackTrace();
}
}
use of java.io.OutputStreamWriter in project Cloud9 by lintool.
the class ProfileLogParser method Parse.
public void Parse(Collection<File> theFiles, long startTime, long finishTime, OutputStream output) throws IOException {
BufferedReader inputReader = null;
BufferedWriter outputWriter = new BufferedWriter(new OutputStreamWriter(output));
HashMap<String, nodeData> dataHashMap = new HashMap<String, nodeData>();
itemData currData = null;
nodeData currNodeData = null;
long jobStartTime = startTime;
long jobFinishTime = finishTime;
String currLine = null;
for (File currFile : theFiles) {
try {
inputReader = new BufferedReader(new InputStreamReader(new FileInputStream(currFile)));
} catch (FileNotFoundException e) {
System.err.println(e.getMessage());
e.printStackTrace();
System.err.println("Input file not found");
return;
}
currLine = inputReader.readLine();
while (currLine != null) {
currData = new itemData(currLine);
if (currData.itemType == INVALID) {
currLine = inputReader.readLine();
continue;
}
if (currData.itemType == JOB_START)
jobStartTime = currData.timestamp;
else if (currData.itemType == JOB_FINISH)
jobFinishTime = currData.timestamp;
else {
if (!(dataHashMap.containsKey(currData.identifier)))
dataHashMap.put(currData.identifier, new nodeData());
currNodeData = dataHashMap.get(currData.identifier);
switch(currData.itemType) {
case MAP_START:
currNodeData.mapStart = currData.timestamp;
break;
case MAP_FINISH:
currNodeData.mapFinish = currData.timestamp;
break;
case REDUCE_START:
currNodeData.reduceStart = currData.timestamp;
break;
case REDUCE_FINISH:
currNodeData.reduceFinish = currData.timestamp;
break;
}
}
currLine = inputReader.readLine();
}
}
//Output the information
int numNodes = 0;
int failedReports = 0;
long timeTaken = jobFinishTime - jobStartTime;
long preMap = 0L;
long mapTime = 0L;
long intermediate = 0L;
long reduceTime = 0L;
long postReduce = 0L;
for (nodeData x : dataHashMap.values()) {
if ((x.mapStart == 0) || (x.mapFinish == 0) || (x.reduceStart == 0) || (x.reduceFinish == 0))
failedReports += 1;
else {
preMap += (x.mapStart - jobStartTime);
mapTime += (x.mapFinish - x.mapStart);
intermediate += (x.reduceStart - x.mapFinish);
reduceTime += (x.reduceFinish - x.reduceStart);
postReduce += (jobFinishTime - x.reduceFinish);
numNodes += 1;
}
}
outputWriter.write(Integer.toString(numNodes) + " total nodes reporting for " + Long.toString(timeTaken) + " milliseconds each\n");
outputWriter.write("Total node time taken before map operations: " + Long.toString(preMap) + "\n");
outputWriter.write("Total node time taken for map operations: " + Long.toString(mapTime) + "\n");
outputWriter.write("Total node time taken between map and reduce operations: " + Long.toString(intermediate) + "\n");
outputWriter.write("Total node time taken for reduce operations: " + Long.toString(reduceTime) + "\n");
outputWriter.write("Total node time taken after reduce operations: " + Long.toString(postReduce) + "\n");
outputWriter.write(Integer.toString(failedReports) + " nodes reporting incomplete data (not counted in above) \n");
outputWriter.close();
}
use of java.io.OutputStreamWriter in project liquibase by liquibase.
the class MissingDataExternalFileChangeGenerator method fixMissing.
@Override
public Change[] fixMissing(DatabaseObject missingObject, DiffOutputControl outputControl, Database referenceDatabase, Database comparisionDatabase, ChangeGeneratorChain chain) {
Statement stmt = null;
ResultSet rs = null;
try {
Data data = (Data) missingObject;
Table table = data.getTable();
if (referenceDatabase.isLiquibaseObject(table)) {
return null;
}
String sql = "SELECT * FROM " + referenceDatabase.escapeTableName(table.getSchema().getCatalogName(), table.getSchema().getName(), table.getName());
stmt = ((JdbcConnection) referenceDatabase.getConnection()).createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY);
stmt.setFetchSize(100);
rs = stmt.executeQuery(sql);
List<String> columnNames = new ArrayList<String>();
for (int i = 0; i < rs.getMetaData().getColumnCount(); i++) {
columnNames.add(rs.getMetaData().getColumnName(i + 1));
}
String fileName = table.getName().toLowerCase() + ".csv";
if (dataDir != null) {
fileName = dataDir + "/" + fileName;
File parentDir = new File(dataDir);
if (!parentDir.exists()) {
parentDir.mkdirs();
}
if (!parentDir.isDirectory()) {
throw new RuntimeException(parentDir + " is not a directory");
}
}
CSVWriter outputFile = new CSVWriter(new BufferedWriter(new OutputStreamWriter(new FileOutputStream(fileName), LiquibaseConfiguration.getInstance().getConfiguration(GlobalConfiguration.class).getOutputEncoding())));
String[] dataTypes = new String[columnNames.size()];
String[] line = new String[columnNames.size()];
for (int i = 0; i < columnNames.size(); i++) {
line[i] = columnNames.get(i);
}
outputFile.writeNext(line);
int rowNum = 0;
while (rs.next()) {
line = new String[columnNames.size()];
for (int i = 0; i < columnNames.size(); i++) {
Object value = JdbcUtils.getResultSetValue(rs, i + 1);
if (dataTypes[i] == null && value != null) {
if (value instanceof Number) {
dataTypes[i] = "NUMERIC";
} else if (value instanceof Boolean) {
dataTypes[i] = "BOOLEAN";
} else if (value instanceof Date) {
dataTypes[i] = "DATE";
} else {
dataTypes[i] = "STRING";
}
}
if (value == null) {
line[i] = "NULL";
} else {
if (value instanceof Date) {
line[i] = new ISODateFormat().format(((Date) value));
} else {
line[i] = value.toString();
}
}
}
outputFile.writeNext(line);
rowNum++;
if (rowNum % 5000 == 0) {
outputFile.flush();
}
}
outputFile.flush();
outputFile.close();
LoadDataChange change = new LoadDataChange();
change.setFile(fileName);
change.setEncoding(LiquibaseConfiguration.getInstance().getConfiguration(GlobalConfiguration.class).getOutputEncoding());
if (outputControl.getIncludeCatalog()) {
change.setCatalogName(table.getSchema().getCatalogName());
}
if (outputControl.getIncludeSchema()) {
change.setSchemaName(table.getSchema().getName());
}
change.setTableName(table.getName());
for (int i = 0; i < columnNames.size(); i++) {
String colName = columnNames.get(i);
LoadDataColumnConfig columnConfig = new LoadDataColumnConfig();
columnConfig.setHeader(colName);
columnConfig.setName(colName);
columnConfig.setType(dataTypes[i]);
change.addColumn(columnConfig);
}
return new Change[] { change };
} catch (Exception e) {
throw new UnexpectedLiquibaseException(e);
} finally {
if (rs != null) {
try {
rs.close();
} catch (SQLException ignore) {
}
}
if (stmt != null) {
try {
stmt.close();
} catch (SQLException ignore) {
}
}
}
}
use of java.io.OutputStreamWriter in project liquibase by liquibase.
the class DefaultXmlWriter method write.
@Override
public void write(Document doc, OutputStream outputStream) throws IOException {
try {
TransformerFactory factory = TransformerFactory.newInstance();
try {
factory.setAttribute("indent-number", 4);
} catch (Exception e) {
//guess we can't set it, that's ok
;
}
Transformer transformer = factory.newTransformer();
transformer.setOutputProperty(OutputKeys.METHOD, "xml");
transformer.setOutputProperty(OutputKeys.INDENT, "yes");
transformer.setOutputProperty(OutputKeys.ENCODING, LiquibaseConfiguration.getInstance().getConfiguration(GlobalConfiguration.class).getOutputEncoding());
//need to nest outputStreamWriter to get around JDK 5 bug. See http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6296446
OutputStreamWriter writer = new OutputStreamWriter(outputStream, LiquibaseConfiguration.getInstance().getConfiguration(GlobalConfiguration.class).getOutputEncoding());
transformer.transform(new DOMSource(doc), new StreamResult(writer));
writer.flush();
writer.close();
} catch (TransformerException e) {
throw new IOException(e.getMessage());
}
}
use of java.io.OutputStreamWriter in project camel by apache.
the class XmlRpcDataFormat method getXMLWriter.
protected XMLWriter getXMLWriter(Exchange exchange, OutputStream outputStream) throws XmlRpcException {
XMLWriter writer = new CharSetXMLWriter();
String encoding = IOHelper.getCharsetName(exchange);
writer.setEncoding(encoding);
writer.setIndenting(false);
writer.setFlushing(true);
try {
writer.setWriter(new BufferedWriter(new OutputStreamWriter(outputStream, encoding)));
} catch (UnsupportedEncodingException e) {
throw new XmlRpcException("Unsupported encoding: " + encoding, e);
}
return writer;
}
Aggregations