use of com.google.cloud.vision.v1p3beta1.Feature in project ddf by codice.
the class KmlMarshallerTest method unmarshallNullStream.
@Test(expected = NoSuchElementException.class)
public void unmarshallNullStream() {
final Kml kml = kmlMarshaller.unmarshal(null).get();
final Feature feature = kml.getFeature();
assertThat(feature.getName(), is("Simple placemark"));
}
use of com.google.cloud.vision.v1p3beta1.Feature in project osate-plugin by sireum.
the class Visitor method buildEndPoint.
private List<org.sireum.hamr.ir.EndPoint> buildEndPoint(ConnectionInstanceEnd connInstEnd, List<String> path) {
List<org.sireum.hamr.ir.EndPoint> result = VisitorUtil.iList();
final List<String> component = Arrays.asList(connInstEnd.getComponentInstance().getInstanceObjectPath().split("\\."));
final Position componentPos = VisitorUtil.buildPosInfo(connInstEnd.getComponentInstance().getInstantiatedObjects().get(0));
if (connInstEnd instanceof FeatureInstance) {
FeatureInstance connElem = (FeatureInstance) connInstEnd;
String featurePre = connElem.getFeature().getName();
FeatureInstance temp = connElem;
while (temp.eContainer() instanceof FeatureInstance) {
featurePre = ((FeatureInstance) temp.eContainer()).getName() + "_" + featurePre;
temp = (FeatureInstance) temp.eContainer();
}
if (connElem.getCategory() == FeatureCategory.FEATURE_GROUP && !connElem.getFeatureInstances().isEmpty()) {
// Feature ff = connElem.getFeature().getRefined();
final String fp = featurePre;
result = VisitorUtil.addAll(result, connElem.getFeatureInstances().stream().flatMap(fii -> flattenFeatureGroupInstance(fii, fp, component, componentPos, false).stream()).collect(Collectors.toList()));
} else if (connElem.getCategory() == FeatureCategory.BUS_ACCESS) {
final List<String> feature = VisitorUtil.add(component, featurePre);
final Position featurePos = VisitorUtil.buildPosInfo(connElem.getInstantiatedObjects().get(0));
final AadlASTJavaFactory.Direction direction = AadlASTJavaFactory.Direction.InOut;
result = VisitorUtil.add(result, factory.endPoint(factory.name(component, componentPos), factory.name(feature, featurePos), direction));
} else {
final List<String> feature = VisitorUtil.add(component, featurePre);
final Position featurePos = VisitorUtil.buildPosInfo(connElem.getInstantiatedObjects().get(0));
final AadlASTJavaFactory.Direction direction = handleDirection(connElem.getDirection());
result = VisitorUtil.add(result, factory.endPoint(factory.name(component, componentPos), factory.name(feature, featurePos), direction));
}
// org.sireum.hamr.ir.Feature f = buildFeature(connElem, component);
// if(connElem.getCategory() == FeatureCategory.FEATURE_GROUP) {
// connElem.getFeatureInstances().forEach(fi -> {
//
// });
// }
// final List<String> component = (connElem.getgetContext() != null) && (connElem
// .getContext() instanceof Subcomponent)
// ? VisitorUtil.add(path, connElem.getContext().getName())
// : path;
} else if (connInstEnd instanceof ComponentInstance) {
result = VisitorUtil.toIList(factory.endPoint(factory.name(component, componentPos), null, null));
} else if (connInstEnd instanceof ModeTransitionInstance) {
throw new RuntimeException("Need to handle ModeTransitionInstanceImpl: " + connInstEnd);
} else {
throw new RuntimeException("Unexpected: " + connInstEnd);
}
if (result.size() > 1) {
// System.out.println("");
}
return result;
}
use of com.google.cloud.vision.v1p3beta1.Feature in project java-docs-samples by GoogleCloudPlatform.
the class ImageMagick method accept.
// [END functions_imagemagick_setup]
// [START functions_imagemagick_analyze]
@Override
public // Blurs uploaded images that are flagged as Adult or Violence.
void accept(CloudEvent event) {
// Extract the GCS Event data from the CloudEvent's data payload.
GcsEvent data = getEventData(event);
// Validate parameters
if (data.getBucket() == null || data.getName() == null) {
logger.severe("Error: Malformed GCS event.");
return;
}
BlobInfo blobInfo = BlobInfo.newBuilder(data.getBucket(), data.getName()).build();
// Construct URI to GCS bucket and file.
String gcsPath = String.format("gs://%s/%s", data.getBucket(), data.getName());
logger.info(String.format("Analyzing %s", data.getName()));
// Construct request.
ImageSource imgSource = ImageSource.newBuilder().setImageUri(gcsPath).build();
Image img = Image.newBuilder().setSource(imgSource).build();
Feature feature = Feature.newBuilder().setType(Type.SAFE_SEARCH_DETECTION).build();
AnnotateImageRequest request = AnnotateImageRequest.newBuilder().addFeatures(feature).setImage(img).build();
List<AnnotateImageRequest> requests = List.of(request);
// Send request to the Vision API.
try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) {
BatchAnnotateImagesResponse response = client.batchAnnotateImages(requests);
List<AnnotateImageResponse> responses = response.getResponsesList();
for (AnnotateImageResponse res : responses) {
if (res.hasError()) {
logger.info(String.format("Error: %s", res.getError().getMessage()));
return;
}
// Get Safe Search Annotations
SafeSearchAnnotation annotation = res.getSafeSearchAnnotation();
if (annotation.getAdultValue() == 5 || annotation.getViolenceValue() == 5) {
logger.info(String.format("Detected %s as inappropriate.", data.getName()));
blur(blobInfo);
} else {
logger.info(String.format("Detected %s as OK.", data.getName()));
}
}
} catch (IOException e) {
logger.log(Level.SEVERE, "Error with Vision API: " + e.getMessage(), e);
}
}
use of com.google.cloud.vision.v1p3beta1.Feature in project java-docs-samples by GoogleCloudPlatform.
the class ImageMagick method accept.
// [END functions_imagemagick_setup]
// [START functions_imagemagick_analyze]
@Override
public // Blurs uploaded images that are flagged as Adult or Violence.
void accept(GcsEvent event, Context context) {
// Validate parameters
if (event.getBucket() == null || event.getName() == null) {
logger.severe("Error: Malformed GCS event.");
return;
}
BlobInfo blobInfo = BlobInfo.newBuilder(event.getBucket(), event.getName()).build();
// Construct URI to GCS bucket and file.
String gcsPath = String.format("gs://%s/%s", event.getBucket(), event.getName());
logger.info(String.format("Analyzing %s", event.getName()));
// Construct request.
ImageSource imgSource = ImageSource.newBuilder().setImageUri(gcsPath).build();
Image img = Image.newBuilder().setSource(imgSource).build();
Feature feature = Feature.newBuilder().setType(Type.SAFE_SEARCH_DETECTION).build();
AnnotateImageRequest request = AnnotateImageRequest.newBuilder().addFeatures(feature).setImage(img).build();
List<AnnotateImageRequest> requests = List.of(request);
// Send request to the Vision API.
try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) {
BatchAnnotateImagesResponse response = client.batchAnnotateImages(requests);
List<AnnotateImageResponse> responses = response.getResponsesList();
for (AnnotateImageResponse res : responses) {
if (res.hasError()) {
logger.info(String.format("Error: %s", res.getError().getMessage()));
return;
}
// Get Safe Search Annotations
SafeSearchAnnotation annotation = res.getSafeSearchAnnotation();
if (annotation.getAdultValue() == 5 || annotation.getViolenceValue() == 5) {
logger.info(String.format("Detected %s as inappropriate.", event.getName()));
blur(blobInfo);
} else {
logger.info(String.format("Detected %s as OK.", event.getName()));
}
}
} catch (IOException e) {
logger.log(Level.SEVERE, "Error with Vision API: " + e.getMessage(), e);
}
}
use of com.google.cloud.vision.v1p3beta1.Feature in project java-vision by googleapis.
the class DetectBeta method detectHandwrittenOcr.
// [END vision_localize_objects_gcs_beta]
// [START vision_handwritten_ocr_beta]
/**
* Performs handwritten text detection on a local image file.
*
* @param filePath The path to the local file to detect handwritten text on.
* @param out A {@link PrintStream} to write the results to.
* @throws Exception on errors while closing the client.
* @throws IOException on Input/Output errors.
*/
public static void detectHandwrittenOcr(String filePath, PrintStream out) throws Exception {
List<AnnotateImageRequest> requests = new ArrayList<>();
ByteString imgBytes = ByteString.readFrom(new FileInputStream(filePath));
Image img = Image.newBuilder().setContent(imgBytes).build();
Feature feat = Feature.newBuilder().setType(Type.DOCUMENT_TEXT_DETECTION).build();
// Set the Language Hint codes for handwritten OCR
ImageContext imageContext = ImageContext.newBuilder().addLanguageHints("en-t-i0-handwrit").build();
AnnotateImageRequest request = AnnotateImageRequest.newBuilder().addFeatures(feat).setImage(img).setImageContext(imageContext).build();
requests.add(request);
try (ImageAnnotatorClient client = ImageAnnotatorClient.create()) {
BatchAnnotateImagesResponse response = client.batchAnnotateImages(requests);
List<AnnotateImageResponse> responses = response.getResponsesList();
client.close();
for (AnnotateImageResponse res : responses) {
if (res.hasError()) {
out.printf("Error: %s\n", res.getError().getMessage());
return;
}
// For full list of available annotations, see http://g.co/cloud/vision/docs
TextAnnotation annotation = res.getFullTextAnnotation();
for (Page page : annotation.getPagesList()) {
String pageText = "";
for (Block block : page.getBlocksList()) {
String blockText = "";
for (Paragraph para : block.getParagraphsList()) {
String paraText = "";
for (Word word : para.getWordsList()) {
String wordText = "";
for (Symbol symbol : word.getSymbolsList()) {
wordText = wordText + symbol.getText();
out.format("Symbol text: %s (confidence: %f)\n", symbol.getText(), symbol.getConfidence());
}
out.format("Word text: %s (confidence: %f)\n\n", wordText, word.getConfidence());
paraText = String.format("%s %s", paraText, wordText);
}
// Output Example using Paragraph:
out.println("\nParagraph: \n" + paraText);
out.format("Paragraph Confidence: %f\n", para.getConfidence());
blockText = blockText + paraText;
}
pageText = pageText + blockText;
}
}
out.println("\nComplete annotation:");
out.println(annotation.getText());
}
}
}
Aggregations