use of org.apache.flink.shaded.jackson2.com.fasterxml.jackson.core.JsonFactory in project jvm-serializers by eishay.
the class WriteResultsToJavascript method writeOutputFile.
// ----------------------------------------------------------------------------
// Write output file.
private static void writeOutputFile(File outputFile, List<Entry> entries) throws Exit {
// Write output.
try {
FileOutputStream fout = new FileOutputStream(outputFile);
try {
JsonFactory factory = new JsonFactory();
factory.disable(JsonGenerator.Feature.QUOTE_FIELD_NAMES);
JsonGenerator gen = factory.createGenerator(fout, JsonEncoding.UTF8);
gen.useDefaultPrettyPrinter();
fout.write("var benchmarkResults = ".getBytes("UTF-8"));
writeJavascriptStats(gen, columns, entries);
} finally {
fout.close();
}
} catch (IOException ex) {
throw new Exit(1, "Error writing to output file \"" + outputFile.getPath() + "\": " + ex.getMessage());
}
}
use of org.apache.flink.shaded.jackson2.com.fasterxml.jackson.core.JsonFactory in project hadoop by apache.
the class TestLogInfo method writeDomainLeaveOpen.
private void writeDomainLeaveOpen(TimelineDomain domain, Path logPath) throws IOException {
if (outStreamDomain == null) {
outStreamDomain = PluginStoreTestUtils.createLogFile(logPath, fs);
}
// Write domain uses its own json generator to isolate from entity writers
JsonGenerator jsonGeneratorLocal = new JsonFactory().createGenerator(outStreamDomain);
jsonGeneratorLocal.setPrettyPrinter(new MinimalPrettyPrinter("\n"));
objMapper.writeValue(jsonGeneratorLocal, domain);
outStreamDomain.hflush();
}
use of org.apache.flink.shaded.jackson2.com.fasterxml.jackson.core.JsonFactory in project buck by facebook.
the class AuditRulesCommand method printRulesToStdout.
private void printRulesToStdout(CommandRunnerParams params, List<Map<String, Object>> rawRules, final Predicate<String> includeType) throws IOException {
Iterable<Map<String, Object>> filteredRules = FluentIterable.from(rawRules).filter(rawRule -> {
String type = (String) rawRule.get(BuckPyFunction.TYPE_PROPERTY_NAME);
return includeType.apply(type);
});
PrintStream stdOut = params.getConsole().getStdOut();
if (json) {
Map<String, Object> rulesKeyedByName = new HashMap<>();
for (Map<String, Object> rawRule : filteredRules) {
String name = (String) rawRule.get("name");
Preconditions.checkNotNull(name);
rulesKeyedByName.put(name, Maps.filterValues(rawRule, v -> shouldInclude(v)));
}
// We create a new JsonGenerator that does not close the stream.
ObjectMapper mapper = params.getObjectMapper();
JsonFactory factory = mapper.getFactory();
try (JsonGenerator generator = factory.createGenerator(stdOut).disable(JsonGenerator.Feature.AUTO_CLOSE_TARGET).useDefaultPrettyPrinter()) {
mapper.writeValue(generator, rulesKeyedByName);
}
stdOut.print('\n');
} else {
for (Map<String, Object> rawRule : filteredRules) {
printRuleAsPythonToStdout(stdOut, rawRule);
}
}
}
use of org.apache.flink.shaded.jackson2.com.fasterxml.jackson.core.JsonFactory in project buck by facebook.
the class OfflineScribeLogger method sendStoredLogs.
private synchronized void sendStoredLogs() {
ImmutableSortedSet<Path> logsPaths;
try {
if (!filesystem.isDirectory(logDir)) {
// No logs to submit to Scribe.
return;
}
logsPaths = filesystem.getMtimeSortedMatchingDirectoryContents(logDir, LOGFILE_PATTERN);
} catch (Exception e) {
LOG.error(e, "Fetching stored logs list failed.");
return;
}
long totalBytesToSend = 0;
for (Path logPath : logsPaths) {
// Sending should be ceased if storing has been initiated or closing was started.
if (startedStoring || startedClosing) {
break;
}
// Get iterator.
Iterator<ScribeData> it;
File logFile;
try {
logFile = logPath.toFile();
totalBytesToSend += logFile.length();
if (totalBytesToSend > maxScribeOfflineLogsBytes) {
LOG.warn("Total size of offline logs exceeds the limit. Ceasing to send them to Scribe.");
return;
}
InputStream logFileStream;
try {
logFileStream = new BufferedInputStream(new FileInputStream(logFile), BUFFER_SIZE);
} catch (FileNotFoundException e) {
LOG.info(e, "There was a problem getting stream for logfile: %s. Likely logfile was resent and" + "deleted by a concurrent Buck command.", logPath);
continue;
}
it = new ObjectMapper().readValues(new JsonFactory().createParser(logFileStream), ScribeData.class);
} catch (Exception e) {
LOG.error(e, "Failed to initiate reading from: %s. File may be corrupted.", logPath);
continue;
}
// Read and submit.
int scribeLinesInFile = 0;
List<ListenableFuture<Void>> logFutures = new LinkedList<>();
Map<String, CategoryData> logReadData = new HashMap<>();
try {
boolean interrupted = false;
// Read data and build per category clusters - dispatch if needed.
while (it.hasNext()) {
if (startedStoring || startedClosing) {
interrupted = true;
break;
}
ScribeData newData = it.next();
// Prepare map entry for new data (dispatch old data if needed).
if (!logReadData.containsKey(newData.getCategory())) {
logReadData.put(newData.getCategory(), new CategoryData());
}
CategoryData categoryData = logReadData.get(newData.getCategory());
if (categoryData.getLinesBytes() > CLUSTER_DISPATCH_SIZE) {
logFutures.add(scribeLogger.log(newData.getCategory(), categoryData.getLines()));
categoryData.clearData();
}
// Add new data to the cluster for the category.
for (String line : newData.getLines()) {
categoryData.addLine(line);
scribeLinesInFile++;
}
}
// Send remaining data from per category clusters.
if (!interrupted) {
for (Map.Entry<String, CategoryData> logReadDataEntry : logReadData.entrySet()) {
if (startedStoring || startedClosing) {
interrupted = true;
break;
}
List<String> categoryLines = logReadDataEntry.getValue().getLines();
if (categoryLines.size() > 0) {
logFutures.add(scribeLogger.log(logReadDataEntry.getKey(), categoryLines));
}
}
}
if (interrupted) {
LOG.info("Stopped while sending from offline log (it will not be removed): %s.", logPath);
logFutures.clear();
break;
}
} catch (Exception e) {
LOG.error(e, "Error while reading offline log from: %s. This log will not be removed now. If this " + "error reappears in further runs, the file may be corrupted and should be deleted. ", logPath);
logFutures.clear();
continue;
} finally {
logReadData.clear();
}
// Confirm data was successfully sent and remove logfile.
try {
Futures.allAsList(logFutures).get(LOG_TIMEOUT, LOG_TIMEOUT_UNIT);
totalBytesResent.inc(logFile.length());
totalLinesResent.inc(scribeLinesInFile);
logfilesResent.inc();
try {
filesystem.deleteFileAtPathIfExists(logPath);
} catch (Exception e) {
LOG.error(e, "Failed to remove successfully resent offline log. Stopping sending.");
break;
}
} catch (Exception e) {
LOG.info("Failed to send all data from offline log: %s. Log will not be removed.", logPath);
// Do not attempt to send data from further logfiles - likely there are network issues.
break;
} finally {
logFutures.clear();
}
}
}
use of org.apache.flink.shaded.jackson2.com.fasterxml.jackson.core.JsonFactory in project rest.li by linkedin.
the class TestJacksonCodec method testNoStringIntern.
/**
* Test to make sure that field names are not interned.
*
* @throws IOException
*/
@Test
public void testNoStringIntern() throws IOException {
final String keyName = "testKey";
final String json = "{ \"" + keyName + "\" : 1 }";
final byte[] jsonAsBytes = json.getBytes(Data.UTF_8_CHARSET);
{
final JsonFactory jsonFactory = new JsonFactory();
final JacksonDataCodec codec = new JacksonDataCodec(jsonFactory);
// make sure intern field names is not enabled
assertFalse(jsonFactory.isEnabled(JsonFactory.Feature.INTERN_FIELD_NAMES));
assertTrue(jsonFactory.isEnabled(JsonFactory.Feature.CANONICALIZE_FIELD_NAMES));
final DataMap map = codec.bytesToMap(jsonAsBytes);
final String key = map.keySet().iterator().next();
assertNotSame(key, keyName);
}
{
final JsonFactory jsonFactory = new JsonFactory();
final JacksonDataCodec codec = new JacksonDataCodec(jsonFactory);
// enable intern field names
jsonFactory.enable(JsonFactory.Feature.INTERN_FIELD_NAMES);
assertTrue(jsonFactory.isEnabled(JsonFactory.Feature.INTERN_FIELD_NAMES));
assertTrue(jsonFactory.isEnabled(JsonFactory.Feature.CANONICALIZE_FIELD_NAMES));
final DataMap map = codec.bytesToMap(jsonAsBytes);
final String key = map.keySet().iterator().next();
assertSame(key, keyName);
}
}
Aggregations