use of com.ibm.watson.text_to_speech.v1.model.Prompts in project java-sdk by watson-developer-cloud.
the class TextToSpeechTest method testListVoicesWOptions.
// Test the listVoices operation with a valid options model parameter
@Test
public void testListVoicesWOptions() throws Throwable {
// Register a mock response
String mockResponseBody = "{\"voices\": [{\"url\": \"url\", \"gender\": \"gender\", \"name\": \"name\", \"language\": \"language\", \"description\": \"description\", \"customizable\": true, \"supported_features\": {\"custom_pronunciation\": false, \"voice_transformation\": false}, \"customization\": {\"customization_id\": \"customizationId\", \"name\": \"name\", \"language\": \"language\", \"owner\": \"owner\", \"created\": \"created\", \"last_modified\": \"lastModified\", \"description\": \"description\", \"words\": [{\"word\": \"word\", \"translation\": \"translation\", \"part_of_speech\": \"Dosi\"}], \"prompts\": [{\"prompt\": \"prompt\", \"prompt_id\": \"promptId\", \"status\": \"status\", \"error\": \"error\", \"speaker_id\": \"speakerId\"}]}}]}";
String listVoicesPath = "/v1/voices";
server.enqueue(new MockResponse().setHeader("Content-type", "application/json").setResponseCode(200).setBody(mockResponseBody));
// Construct an instance of the ListVoicesOptions model
ListVoicesOptions listVoicesOptionsModel = new ListVoicesOptions();
// Invoke listVoices() with a valid options model and verify the result
Response<Voices> response = textToSpeechService.listVoices(listVoicesOptionsModel).execute();
assertNotNull(response);
Voices responseObj = response.getResult();
assertNotNull(responseObj);
// Verify the contents of the request sent to the mock server
RecordedRequest request = server.takeRequest();
assertNotNull(request);
assertEquals(request.getMethod(), "GET");
// Verify request path
String parsedPath = TestUtilities.parseReqPath(request);
assertEquals(parsedPath, listVoicesPath);
// Verify that there is no query string
Map<String, String> query = TestUtilities.parseQueryString(request);
assertNull(query);
}
use of com.ibm.watson.text_to_speech.v1.model.Prompts in project java-sdk by watson-developer-cloud.
the class TextToSpeechTest method testGetCustomPromptWOptions.
// Test the getCustomPrompt operation with a valid options model parameter
@Test
public void testGetCustomPromptWOptions() throws Throwable {
// Register a mock response
String mockResponseBody = "{\"prompt\": \"prompt\", \"prompt_id\": \"promptId\", \"status\": \"status\", \"error\": \"error\", \"speaker_id\": \"speakerId\"}";
String getCustomPromptPath = "/v1/customizations/testString/prompts/testString";
server.enqueue(new MockResponse().setHeader("Content-type", "application/json").setResponseCode(200).setBody(mockResponseBody));
// Construct an instance of the GetCustomPromptOptions model
GetCustomPromptOptions getCustomPromptOptionsModel = new GetCustomPromptOptions.Builder().customizationId("testString").promptId("testString").build();
// Invoke getCustomPrompt() with a valid options model and verify the result
Response<Prompt> response = textToSpeechService.getCustomPrompt(getCustomPromptOptionsModel).execute();
assertNotNull(response);
Prompt responseObj = response.getResult();
assertNotNull(responseObj);
// Verify the contents of the request sent to the mock server
RecordedRequest request = server.takeRequest();
assertNotNull(request);
assertEquals(request.getMethod(), "GET");
// Verify request path
String parsedPath = TestUtilities.parseReqPath(request);
assertEquals(parsedPath, getCustomPromptPath);
// Verify that there is no query string
Map<String, String> query = TestUtilities.parseQueryString(request);
assertNull(query);
}
use of com.ibm.watson.text_to_speech.v1.model.Prompts in project java-sdk by watson-developer-cloud.
the class TextToSpeechTest method testCreateCustomModelWOptions.
// Test the createCustomModel operation with a valid options model parameter
@Test
public void testCreateCustomModelWOptions() throws Throwable {
// Register a mock response
String mockResponseBody = "{\"customization_id\": \"customizationId\", \"name\": \"name\", \"language\": \"language\", \"owner\": \"owner\", \"created\": \"created\", \"last_modified\": \"lastModified\", \"description\": \"description\", \"words\": [{\"word\": \"word\", \"translation\": \"translation\", \"part_of_speech\": \"Dosi\"}], \"prompts\": [{\"prompt\": \"prompt\", \"prompt_id\": \"promptId\", \"status\": \"status\", \"error\": \"error\", \"speaker_id\": \"speakerId\"}]}";
String createCustomModelPath = "/v1/customizations";
server.enqueue(new MockResponse().setHeader("Content-type", "application/json").setResponseCode(201).setBody(mockResponseBody));
// Construct an instance of the CreateCustomModelOptions model
CreateCustomModelOptions createCustomModelOptionsModel = new CreateCustomModelOptions.Builder().name("testString").language("en-US").description("testString").build();
// Invoke createCustomModel() with a valid options model and verify the result
Response<CustomModel> response = textToSpeechService.createCustomModel(createCustomModelOptionsModel).execute();
assertNotNull(response);
CustomModel responseObj = response.getResult();
assertNotNull(responseObj);
// Verify the contents of the request sent to the mock server
RecordedRequest request = server.takeRequest();
assertNotNull(request);
assertEquals(request.getMethod(), "POST");
// Verify request path
String parsedPath = TestUtilities.parseReqPath(request);
assertEquals(parsedPath, createCustomModelPath);
// Verify that there is no query string
Map<String, String> query = TestUtilities.parseQueryString(request);
assertNull(query);
}
use of com.ibm.watson.text_to_speech.v1.model.Prompts in project java-sdk by watson-developer-cloud.
the class TextToSpeech method addCustomPrompt.
/**
* Add a custom prompt.
*
* <p>Adds a custom prompt to a custom model. A prompt is defined by the text that is to be
* spoken, the audio for that text, a unique user-specified ID for the prompt, and an optional
* speaker ID. The information is used to generate prosodic data that is not visible to the user.
* This data is used by the service to produce the synthesized audio upon request. You must use
* credentials for the instance of the service that owns a custom model to add a prompt to it. You
* can add a maximum of 1000 custom prompts to a single custom model.
*
* <p>You are recommended to assign meaningful values for prompt IDs. For example, use `goodbye`
* to identify a prompt that speaks a farewell message. Prompt IDs must be unique within a given
* custom model. You cannot define two prompts with the same name for the same custom model. If
* you provide the ID of an existing prompt, the previously uploaded prompt is replaced by the new
* information. The existing prompt is reprocessed by using the new text and audio and, if
* provided, new speaker model, and the prosody data associated with the prompt is updated.
*
* <p>The quality of a prompt is undefined if the language of a prompt does not match the language
* of its custom model. This is consistent with any text or SSML that is specified for a speech
* synthesis request. The service makes a best-effort attempt to render the specified text for the
* prompt; it does not validate that the language of the text matches the language of the model.
*
* <p>Adding a prompt is an asynchronous operation. Although it accepts less audio than speaker
* enrollment, the service must align the audio with the provided text. The time that it takes to
* process a prompt depends on the prompt itself. The processing time for a reasonably sized
* prompt generally matches the length of the audio (for example, it takes 20 seconds to process a
* 20-second prompt).
*
* <p>For shorter prompts, you can wait for a reasonable amount of time and then check the status
* of the prompt with the [Get a custom prompt](#getcustomprompt) method. For longer prompts,
* consider using that method to poll the service every few seconds to determine when the prompt
* becomes available. No prompt can be used for speech synthesis if it is in the `processing` or
* `failed` state. Only prompts that are in the `available` state can be used for speech
* synthesis.
*
* <p>When it processes a request, the service attempts to align the text and the audio that are
* provided for the prompt. The text that is passed with a prompt must match the spoken audio as
* closely as possible. Optimally, the text and audio match exactly. The service does its best to
* align the specified text with the audio, and it can often compensate for mismatches between the
* two. But if the service cannot effectively align the text and the audio, possibly because the
* magnitude of mismatches between the two is too great, processing of the prompt fails.
*
* <p>### Evaluating a prompt
*
* <p>Always listen to and evaluate a prompt to determine its quality before using it in
* production. To evaluate a prompt, include only the single prompt in a speech synthesis request
* by using the following SSML extension, in this case for a prompt whose ID is `goodbye`:
*
* <p>`<ibm:prompt id="goodbye"/>`
*
* <p>In some cases, you might need to rerecord and resubmit a prompt as many as five times to
* address the following possible problems: * The service might fail to detect a mismatch between
* the prompt’s text and audio. The longer the prompt, the greater the chance for misalignment
* between its text and audio. Therefore, multiple shorter prompts are preferable to a single long
* prompt. * The text of a prompt might include a word that the service does not recognize. In
* this case, you can create a custom word and pronunciation pair to tell the service how to
* pronounce the word. You must then re-create the prompt. * The quality of the input audio might
* be insufficient or the service’s processing of the audio might fail to detect the intended
* prosody. Submitting new audio for the prompt can correct these issues.
*
* <p>If a prompt that is created without a speaker ID does not adequately reflect the intended
* prosody, enrolling the speaker and providing a speaker ID for the prompt is one recommended
* means of potentially improving the quality of the prompt. This is especially important for
* shorter prompts such as "good-bye" or "thank you," where less audio data makes it more
* difficult to match the prosody of the speaker. Custom prompts are supported only for use with
* US English custom models and voices.
*
* <p>**See also:** * [Add a custom
* prompt](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-tbe-create#tbe-create-add-prompt)
* * [Evaluate a custom
* prompt](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-tbe-create#tbe-create-evaluate-prompt)
* * [Rules for creating custom
* prompts](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-tbe-rules#tbe-rules-prompts).
*
* @param addCustomPromptOptions the {@link AddCustomPromptOptions} containing the options for the
* call
* @return a {@link ServiceCall} with a result of type {@link Prompt}
*/
public ServiceCall<Prompt> addCustomPrompt(AddCustomPromptOptions addCustomPromptOptions) {
com.ibm.cloud.sdk.core.util.Validator.notNull(addCustomPromptOptions, "addCustomPromptOptions cannot be null");
Map<String, String> pathParamsMap = new HashMap<String, String>();
pathParamsMap.put("customization_id", addCustomPromptOptions.customizationId());
pathParamsMap.put("prompt_id", addCustomPromptOptions.promptId());
RequestBuilder builder = RequestBuilder.post(RequestBuilder.resolveRequestUrl(getServiceUrl(), "/v1/customizations/{customization_id}/prompts/{prompt_id}", pathParamsMap));
Map<String, String> sdkHeaders = SdkCommon.getSdkHeaders("text_to_speech", "v1", "addCustomPrompt");
for (Entry<String, String> header : sdkHeaders.entrySet()) {
builder.header(header.getKey(), header.getValue());
}
builder.header("Accept", "application/json");
MultipartBody.Builder multipartBuilder = new MultipartBody.Builder();
multipartBuilder.setType(MultipartBody.FORM);
multipartBuilder.addFormDataPart("metadata", addCustomPromptOptions.metadata().toString());
okhttp3.RequestBody fileBody = RequestUtils.inputStreamBody(addCustomPromptOptions.file(), "audio/wav");
multipartBuilder.addFormDataPart("file", "filename", fileBody);
builder.body(multipartBuilder.build());
ResponseConverter<Prompt> responseConverter = ResponseConverterUtils.getValue(new com.google.gson.reflect.TypeToken<Prompt>() {
}.getType());
return createServiceCall(builder.build(), responseConverter);
}
use of com.ibm.watson.text_to_speech.v1.model.Prompts in project java-sdk by watson-developer-cloud.
the class TextToSpeech method getCustomPrompt.
/**
* Get a custom prompt.
*
* <p>Gets information about a specified custom prompt for a specified custom model. The
* information includes the prompt ID, prompt text, status, and optional speaker ID for each
* prompt of the custom model. You must use credentials for the instance of the service that owns
* the custom model. Custom prompts are supported only for use with US English custom models and
* voices.
*
* <p>**See also:** [Listing custom
* prompts](https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-tbe-custom-prompts#tbe-custom-prompts-list).
*
* @param getCustomPromptOptions the {@link GetCustomPromptOptions} containing the options for the
* call
* @return a {@link ServiceCall} with a result of type {@link Prompt}
*/
public ServiceCall<Prompt> getCustomPrompt(GetCustomPromptOptions getCustomPromptOptions) {
com.ibm.cloud.sdk.core.util.Validator.notNull(getCustomPromptOptions, "getCustomPromptOptions cannot be null");
Map<String, String> pathParamsMap = new HashMap<String, String>();
pathParamsMap.put("customization_id", getCustomPromptOptions.customizationId());
pathParamsMap.put("prompt_id", getCustomPromptOptions.promptId());
RequestBuilder builder = RequestBuilder.get(RequestBuilder.resolveRequestUrl(getServiceUrl(), "/v1/customizations/{customization_id}/prompts/{prompt_id}", pathParamsMap));
Map<String, String> sdkHeaders = SdkCommon.getSdkHeaders("text_to_speech", "v1", "getCustomPrompt");
for (Entry<String, String> header : sdkHeaders.entrySet()) {
builder.header(header.getKey(), header.getValue());
}
builder.header("Accept", "application/json");
ResponseConverter<Prompt> responseConverter = ResponseConverterUtils.getValue(new com.google.gson.reflect.TypeToken<Prompt>() {
}.getType());
return createServiceCall(builder.build(), responseConverter);
}
Aggregations