use of com.fasterxml.jackson.databind.ObjectWriter in project gate-core by GateNLP.
the class DocumentJsonUtils method writeDocument.
/**
* Write a substring of a GATE document to the specified
* JsonGenerator. The specified window of document text will be
* written as a property named "text" and the specified annotations
* will be written as "entities", with their offsets adjusted to be
* relative to the specified window.
*
* @param doc the document to write
* @param start the start offset of the segment to write
* @param end the end offset of the segment to write
* @param extraFeatures additional properties to add to the generated
* JSON. If the map includes a "text" key this will be
* ignored, and if it contains a key "entities" whose value
* is a map then these entities will be merged with the
* generated ones derived from the annotationsMap. This would
* typically be used for documents that were originally
* derived from Twitter data, to re-create the original JSON.
* @param annotationTypeProperty if non-null, the annotation type will
* be written as a property under this name, as if it were an
* additional feature of each annotation.
* @param annotationIDProperty if non-null, the annotation ID will
* be written as a property under this name, as if it were an
* additional feature of each annotation.
* @param json the {@link JsonGenerator} to write to.
* @throws JsonGenerationException if a problem occurs while
* generating the JSON
* @throws IOException if an I/O error occurs.
*/
public static void writeDocument(Document doc, Long start, Long end, Map<String, Collection<Annotation>> annotationsMap, Map<?, ?> extraFeatures, String annotationTypeProperty, String annotationIDProperty, JsonGenerator json) throws JsonGenerationException, IOException, InvalidOffsetException {
ObjectWriter writer = MAPPER.writer();
json.writeStartObject();
RepositioningInfo repos = new RepositioningInfo();
String text = escape(doc.getContent().getContent(start, end).toString(), repos);
json.writeStringField("text", text);
json.writeFieldName("entities");
json.writeStartObject();
// if the extraFeatures already includes entities, merge them with
// the new ones we create
Object entitiesExtraFeature = (extraFeatures == null) ? null : extraFeatures.get("entities");
Map<?, ?> entitiesMap = null;
if (entitiesExtraFeature instanceof Map) {
entitiesMap = (Map<?, ?>) entitiesExtraFeature;
}
for (Map.Entry<String, Collection<Annotation>> annsByType : annotationsMap.entrySet()) {
String annotationType = annsByType.getKey();
Collection<Annotation> annotations = annsByType.getValue();
json.writeFieldName(annotationType);
json.writeStartArray();
for (Annotation a : annotations) {
json.writeStartObject();
// indices:[start, end], corrected to match the sub-range of
// text we're writing
json.writeArrayFieldStart("indices");
json.writeNumber(repos.getOriginalPos(a.getStartNode().getOffset() - start, true));
json.writeNumber(repos.getOriginalPos(a.getEndNode().getOffset() - start, false));
// end of indices
json.writeEndArray();
if (annotationTypeProperty != null) {
json.writeStringField(annotationTypeProperty, a.getType());
}
if (annotationIDProperty != null) {
json.writeNumberField(annotationIDProperty, a.getId());
}
// other features
for (Map.Entry<?, ?> feature : a.getFeatures().entrySet()) {
if (annotationTypeProperty != null && annotationTypeProperty.equals(feature.getKey())) {
// annotationTypeProperty
continue;
}
json.writeFieldName(String.valueOf(feature.getKey()));
writer.writeValue(json, feature.getValue());
}
// end of annotation
json.writeEndObject();
}
// add any entities from the extraFeatures map
if (entitiesMap != null && entitiesMap.get(annotationType) instanceof Collection) {
for (Object ent : (Collection<?>) entitiesMap.get(annotationType)) {
writer.writeValue(json, ent);
}
}
json.writeEndArray();
}
if (entitiesMap != null) {
for (Map.Entry<?, ?> entitiesEntry : entitiesMap.entrySet()) {
if (!annotationsMap.containsKey(entitiesEntry.getKey())) {
// not an entity type we've already seen
json.writeFieldName(String.valueOf(entitiesEntry.getKey()));
writer.writeValue(json, entitiesEntry.getValue());
}
}
}
// end of entities
json.writeEndObject();
if (extraFeatures != null) {
for (Map.Entry<?, ?> feature : extraFeatures.entrySet()) {
if ("text".equals(feature.getKey()) || "entities".equals(feature.getKey())) {
// already dealt with text and entities
continue;
}
json.writeFieldName(String.valueOf(feature.getKey()));
writer.writeValue(json, feature.getValue());
}
}
// end of document
json.writeEndObject();
// Make sure that everything we have generated is flushed to the
// underlying OutputStream. It seems that not doing this can easily
// lead to corrupt files that just end in the middle of a JSON
// object. This occurs even if you flush the OutputStream instance
// as the data never leaves the JsonGenerator
json.flush();
}
use of com.fasterxml.jackson.databind.ObjectWriter in project jackson-jaxrs-propertyfiltering by HubSpot.
the class PropertyFilteringMessageBodyWriter method writeTo.
@Override
public void writeTo(Object o, Class<?> type, Type genericType, Annotation[] annotations, MediaType mediaType, MultivaluedMap<String, Object> httpHeaders, OutputStream os) throws IOException {
PropertyFiltering annotation = findPropertyFiltering(annotations);
Collection<String> properties = new ArrayList<>();
if (annotation != null) {
properties.addAll(getProperties(annotation.using()));
properties.addAll(Arrays.asList(annotation.always()));
}
PropertyFilter propertyFilter = createPropertyFilter(properties, o, type, genericType, annotations, httpHeaders);
if (!propertyFilter.hasFilters()) {
write(o, type, genericType, annotations, mediaType, httpHeaders, os);
return;
}
Timer timer = getTimer();
Timer.Context context = timer.time();
try {
ObjectMapper mapper = getJsonProvider().locateMapper(type, mediaType);
ObjectWriter writer = JsonEndpointConfig.forWriting(mapper.writer(), annotations, null).getWriter();
JsonNode tree = valueToTree(mapper, writer, o);
propertyFilter.filter(tree);
write(tree, tree.getClass(), tree.getClass(), annotations, mediaType, httpHeaders, os);
} finally {
context.stop();
}
}
use of com.fasterxml.jackson.databind.ObjectWriter in project OpenTripPlanner by opentripplanner.
the class EmbedConfig method serializedConfiguration.
private String serializedConfiguration(JsonNode config) throws JsonProcessingException {
ObjectMapper mapper = new ObjectMapper();
ObjectWriter writer = mapper.writerWithDefaultPrettyPrinter();
return config.isMissingNode() ? null : writer.writeValueAsString(config);
}
use of com.fasterxml.jackson.databind.ObjectWriter in project endpoints-java by cloudendpoints.
the class GetDiscoveryDocAction method getDiscoveryDoc.
/**
* Generates a Java client library for an API. Combines the steps of generating API
* configuration, generating Discovery doc and generating client library into one.
* @param classPath Class path to load service classes and their dependencies
* @param outputDirPath Directory to write output files into
* @param serviceClassNames Array of service class names of the API
* @param hostname The hostname to use
* @param basePath The base path to use
* @param outputToDisk Whether or not to output discovery docs to disk
*/
public Map<String, String> getDiscoveryDoc(URL[] classPath, String outputDirPath, List<String> serviceClassNames, String hostname, String basePath, boolean outputToDisk) throws ClassNotFoundException, IOException, ApiConfigException {
File outputDir = new File(outputDirPath);
if (!outputDir.isDirectory()) {
throw new IllegalArgumentException(outputDirPath + " is not a directory");
}
ClassLoader classLoader = new URLClassLoader(classPath, getClass().getClassLoader());
ApiConfig.Factory configFactory = new ApiConfig.Factory();
TypeLoader typeLoader = new TypeLoader(classLoader);
SchemaRepository schemaRepository = new SchemaRepository(typeLoader);
ApiConfigValidator validator = new ApiConfigValidator(typeLoader, schemaRepository);
DiscoveryGenerator discoveryGenerator = new DiscoveryGenerator(typeLoader);
List<ApiConfig> apiConfigs = Lists.newArrayListWithCapacity(serviceClassNames.size());
ImmutableListMultimap<ApiKey, ApiConfig> configsByKey = Multimaps.index(apiConfigs, new Function<ApiConfig, ApiKey>() {
@Override
public ApiKey apply(ApiConfig input) {
return input.getApiKey();
}
});
for (ApiKey key : configsByKey.keys()) {
validator.validate(configsByKey.get(key));
}
ApiConfigLoader configLoader = new ApiConfigLoader(configFactory, typeLoader, new ApiConfigAnnotationReader(typeLoader.getAnnotationTypes()));
ServiceContext serviceContext = ServiceContext.createFromHostname(hostname, ServiceContext.DEFAULT_API_NAME);
for (Class<?> serviceClass : loadClasses(classLoader, serviceClassNames)) {
apiConfigs.add(configLoader.loadConfiguration(serviceContext, serviceClass));
}
DiscoveryGenerator.Result result = discoveryGenerator.writeDiscovery(apiConfigs, new DiscoveryContext().setHostname(hostname).setBasePath(basePath), schemaRepository);
ObjectWriter writer = ObjectMapperUtil.createStandardObjectMapper().writer(new EndpointsPrettyPrinter());
ImmutableMap.Builder<String, String> builder = ImmutableMap.builder();
for (Map.Entry<ApiKey, RestDescription> entry : result.discoveryDocs().entrySet()) {
ApiKey key = entry.getKey();
String discoveryDocFilePath = outputDir + "/" + key.getName() + "-" + key.getVersion() + "-rest.discovery";
String docString = writer.writeValueAsString(entry.getValue());
if (outputToDisk) {
Files.write(docString, new File(discoveryDocFilePath), UTF_8);
System.out.println("API Discovery Document written to " + discoveryDocFilePath);
}
builder.put(discoveryDocFilePath, docString);
}
return builder.build();
}
use of com.fasterxml.jackson.databind.ObjectWriter in project hub-fortify-ssc-integration-service by blackducksoftware.
the class CSVUtils method writeToCSV.
/**
* It will be used to render the list of vulnerabilities in CSV
*
* @param vulnerabilities
* @param fileName
* @param delimiter
* @throws JsonGenerationException
* @throws JsonMappingException
* @throws FileNotFoundException
* @throws UnsupportedEncodingException
* @throws IOException
*/
@SuppressWarnings("resource")
public static void writeToCSV(List<Vulnerability> vulnerabilities, String fileName, char delimiter) throws JsonGenerationException, JsonMappingException, FileNotFoundException, UnsupportedEncodingException, IOException {
// create mapper and schema
CsvMapper mapper = new CsvMapper();
// Create the schema with the header
CsvSchema schema = mapper.schemaFor(Vulnerability.class).withHeader();
schema = schema.withColumnSeparator(delimiter);
// output writer
ObjectWriter objectWriter = mapper.writer(schema);
File file = new File(fileName);
FileOutputStream fileOutputStream;
try {
fileOutputStream = new FileOutputStream(file);
} catch (FileNotFoundException e) {
throw new FileSystemNotFoundException(fileName + " CSV file is not created successfully");
}
BufferedOutputStream bufferedOutputStream = new BufferedOutputStream(fileOutputStream, 1024);
OutputStreamWriter writerOutputStream;
try {
writerOutputStream = new OutputStreamWriter(bufferedOutputStream, "UTF-8");
} catch (UnsupportedEncodingException e) {
throw new UnsupportedEncodingException(e.getMessage());
}
// write to CSV file
try {
objectWriter.writeValue(writerOutputStream, vulnerabilities);
} catch (IOException e) {
throw new IOException("Error while rendering the vulnerabilities in CSV file::" + fileName, e);
}
}
Aggregations