use of io.atlasmap.v2.Document in project java-docs-samples by GoogleCloudPlatform.
the class Analyze method entitySentimentFile.
/**
* Identifies the entity sentiments in the the GCS hosted file using the Language Beta API.
*/
public static void entitySentimentFile(String gcsUri) throws Exception {
// Instantiate the Language client com.google.cloud.language.v1.LanguageServiceClient
try (LanguageServiceClient language = LanguageServiceClient.create()) {
Document doc = Document.newBuilder().setGcsContentUri(gcsUri).setType(Type.PLAIN_TEXT).build();
AnalyzeEntitySentimentRequest request = AnalyzeEntitySentimentRequest.newBuilder().setDocument(doc).setEncodingType(EncodingType.UTF16).build();
// Detect entity sentiments in the given file
AnalyzeEntitySentimentResponse response = language.analyzeEntitySentiment(request);
// Print the response
for (Entity entity : response.getEntitiesList()) {
System.out.printf("Entity: %s\n", entity.getName());
System.out.printf("Salience: %.3f\n", entity.getSalience());
System.out.printf("Sentiment : %s\n", entity.getSentiment());
for (EntityMention mention : entity.getMentionsList()) {
System.out.printf("Begin offset: %d\n", mention.getText().getBeginOffset());
System.out.printf("Content: %s\n", mention.getText().getContent());
System.out.printf("Magnitude: %.3f\n", mention.getSentiment().getMagnitude());
System.out.printf("Sentiment score : %.3f\n", mention.getSentiment().getScore());
System.out.printf("Type: %s\n\n", mention.getType());
}
}
}
// [END entity_sentiment_file]
}
use of io.atlasmap.v2.Document in project java-docs-samples by GoogleCloudPlatform.
the class AnalyzeBeta method classifyFile.
/**
* Detects categories in a GCS hosted file using the Language Beta API.
*/
public static void classifyFile(String gcsUri) throws Exception {
// Instantiate a beta client : com.google.cloud.language.v1beta2.LanguageServiceClient
try (LanguageServiceClient language = LanguageServiceClient.create()) {
// set the GCS content URI path
Document doc = Document.newBuilder().setGcsContentUri(gcsUri).setType(Type.PLAIN_TEXT).build();
ClassifyTextRequest request = ClassifyTextRequest.newBuilder().setDocument(doc).build();
// detect categories in the given file
ClassifyTextResponse response = language.classifyText(request);
for (ClassificationCategory category : response.getCategoriesList()) {
System.out.printf("Category name : %s, Confidence : %.3f\n", category.getName(), category.getConfidence());
}
}
// [END classify_file]
}
use of io.atlasmap.v2.Document in project qpp-conversion-tool by CMSgov.
the class QrdaDecoderEngineTest method topLevelNodeHasTemplateId.
@Test
void topLevelNodeHasTemplateId() {
Document document = new Document();
Element testElement = createGenericElement();
document.addContent(testElement);
addChildToParent(testElement, createFinishElement());
QrdaDecoderEngine objectUnderTest = new QrdaDecoderEngine(context);
Node node = objectUnderTest.decode(testElement);
assertThat(node.getType()).isEqualTo(TemplateId.IA_SECTION);
}
use of io.atlasmap.v2.Document in project qpp-conversion-tool by CMSgov.
the class QrdaDecoderEngineTest method topLevelNodeDoesntHaveTemplateId.
@Test
void topLevelNodeDoesntHaveTemplateId() {
Document document = new Document();
Element testElement = createGenericElement();
document.addContent(testElement);
Element secondLevelElement = createGenericElement();
addChildToParent(testElement, secondLevelElement);
addChildToParent(secondLevelElement, createFinishElement());
QrdaDecoderEngine objectUnderTest = new QrdaDecoderEngine(context);
Node node = objectUnderTest.decode(testElement);
assertThat(node.getType()).isEqualTo(TemplateId.PLACEHOLDER);
assertThat(node.getChildNodes().get(0).getType()).isEqualTo(TemplateId.IA_SECTION);
}
use of io.atlasmap.v2.Document in project beam by apache.
the class AnnotateTextIT method analyzesLanguage.
@Test
public void analyzesLanguage() {
Document doc = Document.newBuilder().setContent(TEST_STRING).setType(Document.Type.PLAIN_TEXT).build();
AnnotateTextRequest.Features features = AnnotateTextRequest.Features.newBuilder().setExtractSyntax(true).build();
PCollection<AnnotateTextResponse> responses = testPipeline.apply(Create.of(doc)).apply(AnnotateText.newBuilder().setFeatures(features).build());
PAssert.that(responses).satisfies(new VerifyTextAnnotationResult());
testPipeline.run().waitUntilFinish();
}
Aggregations