use of com.amazonaws.services.rekognition.model.GetContentModerationRequest in project aws-doc-sdk-examples by awsdocs.
the class VideoDetect method GetResultsModerationLabels.
// Gets the results of unsafe content label detection by calling
// GetContentModeration. Analysis is started by a call to StartContentModeration.
private static void GetResultsModerationLabels() throws Exception {
int maxResults = 10;
String paginationToken = null;
GetContentModerationResult moderationLabelDetectionResult = null;
do {
if (moderationLabelDetectionResult != null) {
paginationToken = moderationLabelDetectionResult.getNextToken();
}
moderationLabelDetectionResult = rek.getContentModeration(new GetContentModerationRequest().withJobId(startJobId).withNextToken(paginationToken).withSortBy(ContentModerationSortBy.TIMESTAMP).withMaxResults(maxResults));
VideoMetadata videoMetaData = moderationLabelDetectionResult.getVideoMetadata();
System.out.println("Format: " + videoMetaData.getFormat());
System.out.println("Codec: " + videoMetaData.getCodec());
System.out.println("Duration: " + videoMetaData.getDurationMillis());
System.out.println("FrameRate: " + videoMetaData.getFrameRate());
// Show moderated content labels, confidence and detection times
List<ContentModerationDetection> moderationLabelsInFrames = moderationLabelDetectionResult.getModerationLabels();
for (ContentModerationDetection label : moderationLabelsInFrames) {
long seconds = label.getTimestamp() / 1000;
System.out.print("Sec: " + Long.toString(seconds));
System.out.println(label.getModerationLabel().toString());
System.out.println();
}
} while (moderationLabelDetectionResult != null && moderationLabelDetectionResult.getNextToken() != null);
}
Aggregations