Search in sources :

Example 26 with Image

use of com.google.cloud.automl.v1.Image in project java-automl by googleapis.

the class PredictionApi method predict.

// [START automl_vision_predict]
/**
 * Demonstrates using the AutoML client to predict an image.
 *
 * @param projectId the Id of the project.
 * @param computeRegion the Region name.
 * @param modelId the Id of the model which will be used for text classification.
 * @param filePath the Local text file path of the content to be classified.
 * @param scoreThreshold the Confidence score. Only classifications with confidence score above
 *     scoreThreshold are displayed.
 */
static void predict(String projectId, String computeRegion, String modelId, String filePath, String scoreThreshold) throws IOException {
    // Instantiate client for prediction service.
    try (PredictionServiceClient predictionClient = PredictionServiceClient.create()) {
        // Get the full path of the model.
        ModelName name = ModelName.of(projectId, computeRegion, modelId);
        // Read the image and assign to payload.
        ByteString content = ByteString.copyFrom(Files.readAllBytes(Paths.get(filePath)));
        Image image = Image.newBuilder().setImageBytes(content).build();
        ExamplePayload examplePayload = ExamplePayload.newBuilder().setImage(image).build();
        // Additional parameters that can be provided for prediction e.g. Score Threshold
        Map<String, String> params = new HashMap<>();
        if (scoreThreshold != null) {
            params.put("score_threshold", scoreThreshold);
        }
        // Perform the AutoML Prediction request
        PredictResponse response = predictionClient.predict(name, examplePayload, params);
        System.out.println("Prediction results:");
        for (AnnotationPayload annotationPayload : response.getPayloadList()) {
            System.out.println("Predicted class name :" + annotationPayload.getDisplayName());
            System.out.println("Predicted class score :" + annotationPayload.getClassification().getScore());
        }
    }
}
Also used : ModelName(com.google.cloud.automl.v1.ModelName) HashMap(java.util.HashMap) ByteString(com.google.protobuf.ByteString) PredictResponse(com.google.cloud.automl.v1.PredictResponse) ExamplePayload(com.google.cloud.automl.v1.ExamplePayload) ByteString(com.google.protobuf.ByteString) Image(com.google.cloud.automl.v1.Image) PredictionServiceClient(com.google.cloud.automl.v1.PredictionServiceClient) AnnotationPayload(com.google.cloud.automl.v1.AnnotationPayload)

Example 27 with Image

use of com.google.cloud.automl.v1.Image in project java-automl by googleapis.

the class ListDatasets method listDatasets.

// List the datasets
static void listDatasets(String projectId) throws IOException {
    // the "close" method on the client to safely clean up any remaining background resources.
    try (AutoMlClient client = AutoMlClient.create()) {
        // A resource that represents Google Cloud Platform location.
        LocationName projectLocation = LocationName.of(projectId, "us-central1");
        ListDatasetsRequest request = ListDatasetsRequest.newBuilder().setParent(projectLocation.toString()).build();
        // List all the datasets available in the region by applying filter.
        System.out.println("List of datasets:");
        for (Dataset dataset : client.listDatasets(request).iterateAll()) {
            // Display the dataset information
            System.out.format("\nDataset name: %s\n", dataset.getName());
            // To get the dataset id, you have to parse it out of the `name` field. As dataset Ids are
            // required for other methods.
            // Name Form: `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}`
            String[] names = dataset.getName().split("/");
            String retrievedDatasetId = names[names.length - 1];
            System.out.format("Dataset id: %s\n", retrievedDatasetId);
            System.out.format("Dataset display name: %s\n", dataset.getDisplayName());
            System.out.println("Dataset create time:");
            System.out.format("\tseconds: %s\n", dataset.getCreateTime().getSeconds());
            System.out.format("\tnanos: %s\n", dataset.getCreateTime().getNanos());
            // [END automl_language_sentiment_analysis_list_datasets]
            // [END automl_language_text_classification_list_datasets]
            // [END automl_translate_list_datasets]
            // [END automl_vision_classification_list_datasets]
            // [END automl_vision_object_detection_list_datasets]
            System.out.format("Text extraction dataset metadata: %s\n", dataset.getTextExtractionDatasetMetadata());
            // [END automl_language_entity_extraction_list_datasets]
            // [START automl_language_sentiment_analysis_list_datasets]
            System.out.format("Text sentiment dataset metadata: %s\n", dataset.getTextSentimentDatasetMetadata());
            // [END automl_language_sentiment_analysis_list_datasets]
            // [START automl_language_text_classification_list_datasets]
            System.out.format("Text classification dataset metadata: %s\n", dataset.getTextClassificationDatasetMetadata());
            // [END automl_language_text_classification_list_datasets]
            // [START automl_translate_list_datasets]
            System.out.println("Translation dataset metadata:");
            System.out.format("\tSource language code: %s\n", dataset.getTranslationDatasetMetadata().getSourceLanguageCode());
            System.out.format("\tTarget language code: %s\n", dataset.getTranslationDatasetMetadata().getTargetLanguageCode());
            // [END automl_translate_list_datasets]
            // [START automl_vision_classification_list_datasets]
            System.out.format("Image classification dataset metadata: %s\n", dataset.getImageClassificationDatasetMetadata());
            // [END automl_vision_classification_list_datasets]
            // [START automl_vision_object_detection_list_datasets]
            System.out.format("Image object detection dataset metadata: %s\n", dataset.getImageObjectDetectionDatasetMetadata());
        // [START automl_language_entity_extraction_list_datasets]
        // [START automl_language_sentiment_analysis_list_datasets]
        // [START automl_language_text_classification_list_datasets]
        // [START automl_translate_list_datasets]
        // [START automl_vision_classification_list_datasets]
        }
    }
}
Also used : Dataset(com.google.cloud.automl.v1.Dataset) AutoMlClient(com.google.cloud.automl.v1.AutoMlClient) LocationName(com.google.cloud.automl.v1.LocationName) ListDatasetsRequest(com.google.cloud.automl.v1.ListDatasetsRequest)

Example 28 with Image

use of com.google.cloud.automl.v1.Image in project openj9 by eclipse-openj9.

the class XMLIndexReader method setJ9DumpData.

public void setJ9DumpData(long environ, String osType, String osSubType, String cpuType, int cpuCount, long bytesMem, int pointerSize, Image[] imageRef, ImageAddressSpace[] addressSpaceRef, ImageProcess[] processRef) {
    Builder builder = null;
    if (_stream == null) {
        // extract directly from the file
        builder = new Builder(_coreFile, _reader, environ, _fileResolvingAgent);
    } else {
        // extract using the data stream
        builder = new Builder(_coreFile, _stream, environ, _fileResolvingAgent);
    }
    _coreFile.extract(builder);
    // Jazz 4961 : chamlain : NumberFormatException opening corrupt dump
    if (cpuType == null)
        cpuType = builder.getCPUType();
    String cpuSubType = builder.getCPUSubType();
    if (osType == null)
        osType = builder.getOSType();
    long creationTime = builder.getCreationTime();
    _coreImage = new Image(osType, osSubType, cpuType, cpuSubType, cpuCount, bytesMem, creationTime);
    ImageAddressSpace addressSpace = (ImageAddressSpace) builder.getAddressSpaces().next();
    ImageProcess process = (ImageProcess) addressSpace.getCurrentProcess();
    // If not sure, use the first address space/process pair found
    for (Iterator it = builder.getAddressSpaces(); it.hasNext(); ) {
        ImageAddressSpace addressSpace1 = (ImageAddressSpace) it.next();
        final boolean vb = false;
        if (vb)
            System.out.println("address space " + addressSpace1);
        _coreImage.addAddressSpace(addressSpace1);
        for (Iterator it2 = addressSpace1.getProcesses(); it2.hasNext(); ) {
            ImageProcess process1 = (ImageProcess) it2.next();
            if (vb)
                try {
                    System.out.println("process " + process1.getID());
                } catch (DataUnavailable e) {
                } catch (CorruptDataException e) {
                }
            if (process == null || isProcessForEnvironment(environ, addressSpace1, process1)) {
                addressSpace = addressSpace1;
                process = process1;
                if (vb)
                    System.out.println("default process for Runtime");
            }
        }
    }
    if (null != process) {
        // z/OS can have 64-bit or 31-bit processes, Java only reports 64-bit or 32-bit.
        if (process.getPointerSize() != pointerSize && !(process.getPointerSize() == 31 && pointerSize == 32)) {
            System.out.println("XML and core file pointer sizes differ " + process.getPointerSize() + "!=" + pointerSize);
        }
    } else {
        throw new IllegalStateException("No process found in the dump.");
    }
    imageRef[0] = _coreImage;
    addressSpaceRef[0] = addressSpace;
    processRef[0] = process;
}
Also used : ImageAddressSpace(com.ibm.dtfj.image.j9.ImageAddressSpace) ImageProcess(com.ibm.dtfj.image.j9.ImageProcess) Builder(com.ibm.dtfj.image.j9.Builder) Iterator(java.util.Iterator) DataUnavailable(com.ibm.dtfj.image.DataUnavailable) CorruptDataException(com.ibm.dtfj.image.CorruptDataException) Image(com.ibm.dtfj.image.j9.Image)

Example 29 with Image

use of com.google.cloud.automl.v1.Image in project aem-core-wcm-components by adobe.

the class ImageIT method setupBeforeEach.

@BeforeEach
public void setupBeforeEach() throws ClientException {
    imageTests = new ImageTests();
    imageTests.setup(adminClient, contextPath, label, Commons.rtImage_v1, rootPage, defaultPageTemplate, clientlibs, new Image());
}
Also used : ImageTests(com.adobe.cq.wcm.core.components.it.seljup.tests.image.ImageTests) Image(com.adobe.cq.wcm.core.components.it.seljup.util.components.image.v1.Image) BeforeEach(org.junit.jupiter.api.BeforeEach)

Example 30 with Image

use of com.google.cloud.automl.v1.Image in project aem-core-wcm-components by adobe.

the class ImageIT method setupBeforeEach.

@BeforeEach
public void setupBeforeEach() throws ClientException {
    clientlibs = "core.wcm.components.image.v3";
    imageTests = new ImageTests();
    imageTests.setup(adminClient, contextPath, label, Commons.rtImage_v3, rootPage, defaultPageTemplate, clientlibs, new Image());
}
Also used : ImageTests(com.adobe.cq.wcm.core.components.it.seljup.tests.image.ImageTests) Image(com.adobe.cq.wcm.core.components.it.seljup.util.components.image.v2.Image) BeforeEach(org.junit.jupiter.api.BeforeEach)

Aggregations

AnnotateImageRequest (com.google.cloud.vision.v1.AnnotateImageRequest)72 Image (com.google.cloud.vision.v1.Image)72 Feature (com.google.cloud.vision.v1.Feature)70 BatchAnnotateImagesResponse (com.google.cloud.vision.v1.BatchAnnotateImagesResponse)69 ImageAnnotatorClient (com.google.cloud.vision.v1.ImageAnnotatorClient)66 ArrayList (java.util.ArrayList)64 AnnotateImageResponse (com.google.cloud.vision.v1.AnnotateImageResponse)63 ByteString (com.google.protobuf.ByteString)51 ImageSource (com.google.cloud.vision.v1.ImageSource)39 FileInputStream (java.io.FileInputStream)31 EntityAnnotation (com.google.cloud.vision.v1.EntityAnnotation)27 WebImage (com.google.cloud.vision.v1.WebDetection.WebImage)26 IOException (java.io.IOException)17 ImageContext (com.google.cloud.vision.v1.ImageContext)14 WebDetection (com.google.cloud.vision.v1.WebDetection)11 LocationInfo (com.google.cloud.vision.v1.LocationInfo)10 SafeSearchAnnotation (com.google.cloud.vision.v1.SafeSearchAnnotation)10 Arrays (java.util.Arrays)10 CropHint (com.google.cloud.vision.v1.CropHint)9 Type (com.google.cloud.vision.v1.Feature.Type)9