use of iso.std.iso._20022.tech.xsd.pain_001_001.Document in project jspwiki by apache.
the class XmlUtil method parse.
/**
* Parses the given stream and returns the requested nodes. If there's an error accessing or parsing the stream, an
* empty list is returned.
*
* @param xmlStream stream to parse.
* @param requestedNodes requestd nodes on the xml stream.
* @return the requested nodes of the XML stream.
*/
public static List<Element> parse(InputStream xmlStream, String requestedNodes) {
if (xmlStream != null && StringUtils.isNotEmpty(requestedNodes)) {
SAXBuilder builder = new SAXBuilder();
try {
Document doc = builder.build(xmlStream);
XPathFactory xpfac = XPathFactory.instance();
XPathExpression<Element> xp = xpfac.compile(requestedNodes, Filters.element());
return xp.evaluate(doc);
} catch (IOException ioe) {
log.error("Couldn't load all " + xmlStream + " resources", ioe);
} catch (JDOMException jdome) {
log.error("error parsing " + xmlStream + " resources", jdome);
}
}
return Collections.<Element>emptyList();
}
use of iso.std.iso._20022.tech.xsd.pain_001_001.Document in project pom-manipulation-ext by release-engineering.
the class PomIO method write.
private void write(final Project project, final File pom, final Model model) throws ManipulationException {
try {
final String manifestInformation = project.isInheritanceRoot() ? ManifestUtils.getManifestInformation() : null;
MavenJDOMWriter mjw = new MavenJDOMWriter(model);
// We possibly could store the EOL type in the Project when we first read
// the file but we would then have to do a dual read, then write as opposed
// to a read, then read + write now.
LineSeparator ls = determineEOL(pom);
mjw.setLineSeparator(ls);
mjw.write(model, pom, new DocumentModifier() {
@Override
public void postProcess(final Document doc) {
// Only add the modified by to the top level pom.
if (project.isExecutionRoot()) {
final Iterator<Content> it = doc.getContent(new ContentFilter(ContentFilter.COMMENT)).iterator();
while (it.hasNext()) {
final Comment c = (Comment) it.next();
if (c.toString().contains(MODIFIED_BY)) {
it.remove();
}
}
doc.addContent(Collections.<Content>singletonList(new Comment("\nModified by POM Manipulation Extension for Maven " + manifestInformation + "\n")));
}
}
});
} catch (final IOException e) {
throw new ManipulationException("Failed to read POM for rewrite: %s. Reason: %s", e, pom, e.getMessage());
} catch (final JDOMException e) {
throw new ManipulationException("Failed to parse POM for rewrite: %s. Reason: %s", e, pom, e.getMessage());
}
}
use of iso.std.iso._20022.tech.xsd.pain_001_001.Document in project java-docs-samples by GoogleCloudPlatform.
the class Analyze method analyzeEntitiesFile.
/**
* Identifies entities in the contents of the object at the given GCS {@code path}.
*/
public static void analyzeEntitiesFile(String gcsUri) throws Exception {
// Instantiate the Language client com.google.cloud.language.v1.LanguageServiceClient
try (LanguageServiceClient language = LanguageServiceClient.create()) {
// set the GCS Content URI path to the file to be analyzed
Document doc = Document.newBuilder().setGcsContentUri(gcsUri).setType(Type.PLAIN_TEXT).build();
AnalyzeEntitiesRequest request = AnalyzeEntitiesRequest.newBuilder().setDocument(doc).setEncodingType(EncodingType.UTF16).build();
AnalyzeEntitiesResponse response = language.analyzeEntities(request);
// Print the response
for (Entity entity : response.getEntitiesList()) {
System.out.printf("Entity: %s", entity.getName());
System.out.printf("Salience: %.3f\n", entity.getSalience());
System.out.println("Metadata: ");
for (Map.Entry<String, String> entry : entity.getMetadataMap().entrySet()) {
System.out.printf("%s : %s", entry.getKey(), entry.getValue());
}
for (EntityMention mention : entity.getMentionsList()) {
System.out.printf("Begin offset: %d\n", mention.getText().getBeginOffset());
System.out.printf("Content: %s\n", mention.getText().getContent());
System.out.printf("Type: %s\n\n", mention.getType());
}
}
}
// [END analyze_entities_gcs]
}
use of iso.std.iso._20022.tech.xsd.pain_001_001.Document in project java-docs-samples by GoogleCloudPlatform.
the class Analyze method entitySentimentFile.
/**
* Identifies the entity sentiments in the the GCS hosted file using the Language Beta API.
*/
public static void entitySentimentFile(String gcsUri) throws Exception {
// Instantiate the Language client com.google.cloud.language.v1.LanguageServiceClient
try (LanguageServiceClient language = LanguageServiceClient.create()) {
Document doc = Document.newBuilder().setGcsContentUri(gcsUri).setType(Type.PLAIN_TEXT).build();
AnalyzeEntitySentimentRequest request = AnalyzeEntitySentimentRequest.newBuilder().setDocument(doc).setEncodingType(EncodingType.UTF16).build();
// Detect entity sentiments in the given file
AnalyzeEntitySentimentResponse response = language.analyzeEntitySentiment(request);
// Print the response
for (Entity entity : response.getEntitiesList()) {
System.out.printf("Entity: %s\n", entity.getName());
System.out.printf("Salience: %.3f\n", entity.getSalience());
System.out.printf("Sentiment : %s\n", entity.getSentiment());
for (EntityMention mention : entity.getMentionsList()) {
System.out.printf("Begin offset: %d\n", mention.getText().getBeginOffset());
System.out.printf("Content: %s\n", mention.getText().getContent());
System.out.printf("Magnitude: %.3f\n", mention.getSentiment().getMagnitude());
System.out.printf("Sentiment score : %.3f\n", mention.getSentiment().getScore());
System.out.printf("Type: %s\n\n", mention.getType());
}
}
}
// [END entity_sentiment_file]
}
use of iso.std.iso._20022.tech.xsd.pain_001_001.Document in project java-docs-samples by GoogleCloudPlatform.
the class AnalyzeBeta method classifyFile.
/**
* Detects categories in a GCS hosted file using the Language Beta API.
*/
public static void classifyFile(String gcsUri) throws Exception {
// Instantiate a beta client : com.google.cloud.language.v1beta2.LanguageServiceClient
try (LanguageServiceClient language = LanguageServiceClient.create()) {
// set the GCS content URI path
Document doc = Document.newBuilder().setGcsContentUri(gcsUri).setType(Type.PLAIN_TEXT).build();
ClassifyTextRequest request = ClassifyTextRequest.newBuilder().setDocument(doc).build();
// detect categories in the given file
ClassifyTextResponse response = language.classifyText(request);
for (ClassificationCategory category : response.getCategoriesList()) {
System.out.printf("Category name : %s, Confidence : %.3f\n", category.getName(), category.getConfidence());
}
}
// [END classify_file]
}
Aggregations