use of com.ibm.watson.visual_recognition.v4.model.AnalyzeResponse in project java-sdk by watson-developer-cloud.
the class VisualRecognition method analyze.
/**
* Analyze images.
*
* <p>Analyze images by URL, by file, or both against your own collection. Make sure that
* **training_status.objects.ready** is `true` for the feature before you use a collection to
* analyze images.
*
* <p>Encode the image and .zip file names in UTF-8 if they contain non-ASCII characters. The
* service assumes UTF-8 encoding if it encounters non-ASCII characters.
*
* @param analyzeOptions the {@link AnalyzeOptions} containing the options for the call
* @return a {@link ServiceCall} with a result of type {@link AnalyzeResponse}
*/
public ServiceCall<AnalyzeResponse> analyze(AnalyzeOptions analyzeOptions) {
com.ibm.cloud.sdk.core.util.Validator.notNull(analyzeOptions, "analyzeOptions cannot be null");
RequestBuilder builder = RequestBuilder.post(RequestBuilder.resolveRequestUrl(getServiceUrl(), "/v4/analyze"));
Map<String, String> sdkHeaders = SdkCommon.getSdkHeaders("watson_vision_combined", "v4", "analyze");
for (Entry<String, String> header : sdkHeaders.entrySet()) {
builder.header(header.getKey(), header.getValue());
}
builder.header("Accept", "application/json");
builder.query("version", String.valueOf(this.version));
MultipartBody.Builder multipartBuilder = new MultipartBody.Builder();
multipartBuilder.setType(MultipartBody.FORM);
for (String item : analyzeOptions.collectionIds()) {
multipartBuilder.addFormDataPart("collection_ids", item);
}
for (String item : analyzeOptions.features()) {
multipartBuilder.addFormDataPart("features", item);
}
if (analyzeOptions.imagesFile() != null) {
for (FileWithMetadata item : analyzeOptions.imagesFile()) {
okhttp3.RequestBody itemBody = RequestUtils.inputStreamBody(item.data(), item.contentType());
multipartBuilder.addFormDataPart("images_file", item.filename(), itemBody);
}
}
if (analyzeOptions.imageUrl() != null) {
for (String item : analyzeOptions.imageUrl()) {
multipartBuilder.addFormDataPart("image_url", item);
}
}
if (analyzeOptions.threshold() != null) {
multipartBuilder.addFormDataPart("threshold", String.valueOf(analyzeOptions.threshold()));
}
builder.body(multipartBuilder.build());
ResponseConverter<AnalyzeResponse> responseConverter = ResponseConverterUtils.getValue(new com.google.gson.reflect.TypeToken<AnalyzeResponse>() {
}.getType());
return createServiceCall(builder.build(), responseConverter);
}
use of com.ibm.watson.visual_recognition.v4.model.AnalyzeResponse in project java-sdk by watson-developer-cloud.
the class VisualRecognitionTest method testAnalyzeWOptions.
@Test
public void testAnalyzeWOptions() throws Throwable {
// Schedule some responses.
String mockResponseBody = "{\"images\": [{\"source\": {\"type\": \"file\", \"filename\": \"filename\", \"archive_filename\": \"archiveFilename\", \"source_url\": \"sourceUrl\", \"resolved_url\": \"resolvedUrl\"}, \"dimensions\": {\"height\": 6, \"width\": 5}, \"objects\": {\"collections\": [{\"collection_id\": \"collectionId\", \"objects\": [{\"object\": \"object\", \"location\": {\"top\": 3, \"left\": 4, \"width\": 5, \"height\": 6}, \"score\": 5}]}]}, \"errors\": [{\"code\": \"invalid_field\", \"message\": \"message\", \"more_info\": \"moreInfo\", \"target\": {\"type\": \"field\", \"name\": \"name\"}}]}], \"warnings\": [{\"code\": \"invalid_field\", \"message\": \"message\", \"more_info\": \"moreInfo\"}], \"trace\": \"trace\"}";
String analyzePath = "/v4/analyze";
server.enqueue(new MockResponse().setHeader("Content-type", "application/json").setResponseCode(200).setBody(mockResponseBody));
constructClientService();
// Construct an instance of the AnalyzeOptions model
AnalyzeOptions analyzeOptionsModel = new AnalyzeOptions.Builder().collectionIds(new java.util.ArrayList<String>(java.util.Arrays.asList("testString"))).features(new java.util.ArrayList<String>(java.util.Arrays.asList("objects"))).imagesFile(mockListFileWithMetadata).imageUrl(new java.util.ArrayList<String>(java.util.Arrays.asList("testString"))).threshold(Float.valueOf("0.15")).build();
// Invoke operation with valid options model (positive test)
Response<AnalyzeResponse> response = visualRecognitionService.analyze(analyzeOptionsModel).execute();
assertNotNull(response);
AnalyzeResponse responseObj = response.getResult();
assertNotNull(responseObj);
// Verify the contents of the request
RecordedRequest request = server.takeRequest();
assertNotNull(request);
assertEquals(request.getMethod(), "POST");
// Check query
Map<String, String> query = TestUtilities.parseQueryString(request);
assertNotNull(query);
// Get query params
assertEquals(query.get("version"), "testString");
// Check request path
String parsedPath = TestUtilities.parseReqPath(request);
assertEquals(parsedPath, analyzePath);
}
use of com.ibm.watson.visual_recognition.v4.model.AnalyzeResponse in project java-sdk by watson-developer-cloud.
the class VisualRecognitionIT method testAnalyzeWithUrl.
/**
* Test analyze with url.
*
* @throws FileNotFoundException the file not found exception
*/
@Test
public void testAnalyzeWithUrl() throws FileNotFoundException {
AnalyzeOptions options = new AnalyzeOptions.Builder().addImageUrl(DOG_IMAGE_URL).addCollectionIds(COLLECTION_ID).addFeatures(AnalyzeOptions.Features.OBJECTS).build();
AnalyzeResponse response = service.analyze(options).execute().getResult();
assertNotNull(response);
assertEquals(1, response.getImages().size());
}
use of com.ibm.watson.visual_recognition.v4.model.AnalyzeResponse in project java-sdk by watson-developer-cloud.
the class VisualRecognitionIT method testAnalyzeWithFiles.
/**
* Test analyze with files.
*
* @throws FileNotFoundException the file not found exception
*/
@Test
public void testAnalyzeWithFiles() throws FileNotFoundException {
FileWithMetadata giraffeImage = new FileWithMetadata.Builder().data(new File(SINGLE_GIRAFFE_IMAGE_PATH)).contentType("image/jpeg").build();
FileWithMetadata turtleImage = new FileWithMetadata.Builder().data(new File(SINGLE_TURTLE_IMAGE_PATH)).contentType("image/jpeg").build();
List<FileWithMetadata> filesToAnalyze = Arrays.asList(giraffeImage, turtleImage);
List<String> collectionIds = Collections.singletonList(COLLECTION_ID);
AnalyzeOptions options = new AnalyzeOptions.Builder().imagesFile(filesToAnalyze).collectionIds(collectionIds).addFeatures(AnalyzeOptions.Features.OBJECTS).threshold(.5f).build();
AnalyzeResponse response = service.analyze(options).execute().getResult();
assertNotNull(response);
assertEquals(2, response.getImages().size());
}
Aggregations