use of org.opencastproject.mediapackage.MediaPackageReferenceImpl in project opencast by opencast.
the class SegmentPreviewsWorkflowOperationHandler method createPreviews.
/**
* Encode tracks from MediaPackage using profiles stored in properties and updates current MediaPackage.
*
* @param mediaPackage
* @param properties
* @return the operation result containing the updated mediapackage
* @throws EncoderException
* @throws ExecutionException
* @throws InterruptedException
* @throws IOException
* @throws NotFoundException
* @throws WorkflowOperationException
*/
private WorkflowOperationResult createPreviews(final MediaPackage mediaPackage, WorkflowOperationInstance operation) throws EncoderException, InterruptedException, ExecutionException, NotFoundException, MediaPackageException, IOException, WorkflowOperationException {
long totalTimeInQueue = 0;
// Read the configuration properties
String sourceVideoFlavor = StringUtils.trimToNull(operation.getConfiguration("source-flavor"));
String sourceTags = StringUtils.trimToNull(operation.getConfiguration("source-tags"));
String targetImageTags = StringUtils.trimToNull(operation.getConfiguration("target-tags"));
String targetImageFlavor = StringUtils.trimToNull(operation.getConfiguration("target-flavor"));
String encodingProfileName = StringUtils.trimToNull(operation.getConfiguration("encoding-profile"));
String referenceFlavor = StringUtils.trimToNull(operation.getConfiguration("reference-flavor"));
String referenceTags = StringUtils.trimToNull(operation.getConfiguration("reference-tags"));
// Find the encoding profile
EncodingProfile profile = composerService.getProfile(encodingProfileName);
if (profile == null)
throw new IllegalStateException("Encoding profile '" + encodingProfileName + "' was not found");
List<String> sourceTagSet = asList(sourceTags);
// Select the tracks based on the tags and flavors
Set<Track> videoTrackSet = new HashSet<>();
for (Track track : mediaPackage.getTracksByTags(sourceTagSet)) {
if (sourceVideoFlavor == null || (track.getFlavor() != null && sourceVideoFlavor.equals(track.getFlavor().toString()))) {
if (!track.hasVideo())
continue;
videoTrackSet.add(track);
}
}
if (videoTrackSet.size() == 0) {
logger.debug("Mediapackage {} has no suitable tracks to extract images based on tags {} and flavor {}", mediaPackage, sourceTags, sourceVideoFlavor);
return createResult(mediaPackage, Action.CONTINUE);
} else {
// Determine the tagset for the reference
List<String> referenceTagSet = asList(referenceTags);
// Determine the reference master
for (Track t : videoTrackSet) {
// Try to load the segments catalog
MediaPackageReference trackReference = new MediaPackageReferenceImpl(t);
Catalog[] segmentCatalogs = mediaPackage.getCatalogs(MediaPackageElements.SEGMENTS, trackReference);
Mpeg7Catalog mpeg7 = null;
if (segmentCatalogs.length > 0) {
mpeg7 = loadMpeg7Catalog(segmentCatalogs[0]);
if (segmentCatalogs.length > 1)
logger.warn("More than one segments catalog found for track {}. Resuming with the first one ({})", t, mpeg7);
} else {
logger.debug("No segments catalog found for track {}", t);
continue;
}
// Check the catalog's consistency
if (mpeg7.videoContent() == null || mpeg7.videoContent().next() == null) {
logger.info("Segments catalog {} contains no video content", mpeg7);
continue;
}
Video videoContent = mpeg7.videoContent().next();
TemporalDecomposition<? extends Segment> decomposition = videoContent.getTemporalDecomposition();
// Are there any segments?
if (decomposition == null || !decomposition.hasSegments()) {
logger.info("Segments catalog {} contains no video content", mpeg7);
continue;
}
// Is a derived track with the configured reference flavor available?
MediaPackageElement referenceMaster = getReferenceMaster(mediaPackage, t, referenceFlavor, referenceTagSet);
// Create the preview images according to the mpeg7 segments
if (t.hasVideo() && mpeg7 != null) {
Iterator<? extends Segment> segmentIterator = decomposition.segments();
List<MediaTimePoint> timePointList = new LinkedList<>();
while (segmentIterator.hasNext()) {
Segment segment = segmentIterator.next();
MediaTimePoint tp = segment.getMediaTime().getMediaTimePoint();
timePointList.add(tp);
}
// convert to time array
double[] timeArray = new double[timePointList.size()];
for (int i = 0; i < timePointList.size(); i++) timeArray[i] = (double) timePointList.get(i).getTimeInMilliseconds() / 1000;
Job job = composerService.image(t, profile.getIdentifier(), timeArray);
if (!waitForStatus(job).isSuccess()) {
throw new WorkflowOperationException("Extracting preview image from " + t + " failed");
}
// Get the latest copy
try {
job = serviceRegistry.getJob(job.getId());
} catch (ServiceRegistryException e) {
throw new WorkflowOperationException(e);
}
// add this receipt's queue time to the total
totalTimeInQueue += job.getQueueTime();
List<? extends MediaPackageElement> composedImages = MediaPackageElementParser.getArrayFromXml(job.getPayload());
Iterator<MediaTimePoint> it = timePointList.iterator();
for (MediaPackageElement element : composedImages) {
Attachment composedImage = (Attachment) element;
if (composedImage == null)
throw new IllegalStateException("Unable to compose image");
// Add the flavor, either from the operation configuration or from the composer
if (targetImageFlavor != null) {
composedImage.setFlavor(MediaPackageElementFlavor.parseFlavor(targetImageFlavor));
logger.debug("Preview image has flavor '{}'", composedImage.getFlavor());
}
// Set the mimetype
if (profile.getMimeType() != null)
composedImage.setMimeType(MimeTypes.parseMimeType(profile.getMimeType()));
// Add tags
for (String tag : asList(targetImageTags)) {
logger.trace("Tagging image with '{}'", tag);
composedImage.addTag(tag);
}
// Refer to the original track including a timestamp
MediaPackageReferenceImpl ref = new MediaPackageReferenceImpl(referenceMaster);
ref.setProperty("time", it.next().toString());
composedImage.setReference(ref);
// store new image in the mediaPackage
mediaPackage.add(composedImage);
String fileName = getFileNameFromElements(t, composedImage);
composedImage.setURI(workspace.moveTo(composedImage.getURI(), mediaPackage.getIdentifier().toString(), composedImage.getIdentifier(), fileName));
}
}
}
}
return createResult(mediaPackage, Action.CONTINUE, totalTimeInQueue);
}
use of org.opencastproject.mediapackage.MediaPackageReferenceImpl in project opencast by opencast.
the class VideoSegmenterWorkflowOperationHandler method start.
/**
* {@inheritDoc}
*
* @see org.opencastproject.workflow.api.WorkflowOperationHandler#start(org.opencastproject.workflow.api.WorkflowInstance,
* JobContext)
*/
public WorkflowOperationResult start(final WorkflowInstance workflowInstance, JobContext context) throws WorkflowOperationException {
logger.debug("Running video segmentation on workflow {}", workflowInstance.getId());
WorkflowOperationInstance operation = workflowInstance.getCurrentOperation();
MediaPackage mediaPackage = workflowInstance.getMediaPackage();
// Find movie track to analyze
String trackFlavor = StringUtils.trimToNull(operation.getConfiguration(PROP_ANALYSIS_TRACK_FLAVOR));
List<String> targetTags = asList(operation.getConfiguration(PROP_TARGET_TAGS));
List<Track> candidates = new ArrayList<Track>();
if (trackFlavor != null)
candidates.addAll(Arrays.asList(mediaPackage.getTracks(MediaPackageElementFlavor.parseFlavor(trackFlavor))));
else
candidates.addAll(Arrays.asList(mediaPackage.getTracks(MediaPackageElements.PRESENTATION_SOURCE)));
// Remove unsupported tracks (only those containing video can be segmented)
Iterator<Track> ti = candidates.iterator();
while (ti.hasNext()) {
Track t = ti.next();
if (!t.hasVideo())
ti.remove();
}
// Found one?
if (candidates.size() == 0) {
logger.info("No matching tracks available for video segmentation in workflow {}", workflowInstance);
return createResult(Action.CONTINUE);
}
// More than one left? Let's be pragmatic...
if (candidates.size() > 1) {
logger.info("Found more than one track to segment, choosing the first one ({})", candidates.get(0));
}
Track track = candidates.get(0);
// Segment the media package
Catalog mpeg7Catalog = null;
Job job = null;
try {
job = videosegmenter.segment(track);
if (!waitForStatus(job).isSuccess()) {
throw new WorkflowOperationException("Video segmentation of " + track + " failed");
}
mpeg7Catalog = (Catalog) MediaPackageElementParser.getFromXml(job.getPayload());
mediaPackage.add(mpeg7Catalog);
mpeg7Catalog.setURI(workspace.moveTo(mpeg7Catalog.getURI(), mediaPackage.getIdentifier().toString(), mpeg7Catalog.getIdentifier(), "segments.xml"));
mpeg7Catalog.setReference(new MediaPackageReferenceImpl(track));
// Add target tags
for (String tag : targetTags) {
mpeg7Catalog.addTag(tag);
}
} catch (Exception e) {
throw new WorkflowOperationException(e);
}
logger.debug("Video segmentation completed");
return createResult(mediaPackage, Action.CONTINUE, job.getQueueTime());
}
use of org.opencastproject.mediapackage.MediaPackageReferenceImpl in project opencast by opencast.
the class DublinCoreTest method testClone.
@Test
public void testClone() {
final DublinCoreCatalog dc = DublinCores.mkOpencastEpisode().getCatalog();
final MimeType mimeType = MimeType.mimeType("text", "xml");
dc.setMimeType(MimeType.mimeType("text", "xml"));
dc.setReference(new MediaPackageReferenceImpl("type", "identifier"));
dc.setURI(uri("http://localhost"));
assertNotNull(dc.getMimeType());
assertEquals(mimeType, dc.getMimeType());
// clone
DublinCoreCatalog dcClone = (DublinCoreCatalog) dc.clone();
assertEquals("The mime type should be cloned", dc.getMimeType(), dcClone.getMimeType());
assertEquals("The flavor should be cloned", dc.getFlavor(), dcClone.getFlavor());
assertEquals("The values should be cloned", dc.getValues(), dcClone.getValues());
assertNull("The URI should not be cloned", dcClone.getURI());
assertNull("A media package reference should not be cloned.", dcClone.getReference());
}
use of org.opencastproject.mediapackage.MediaPackageReferenceImpl in project opencast by opencast.
the class PublishEngageWorkflowOperationHandler method getMediaPackageForSearchIndex.
/**
* Returns a mediapackage that only contains elements that are marked for distribution.
*
* @param current
* the current mediapackage
* @param jobs
* the distribution jobs
* @param downloadSubflavor
* flavor to be applied to elements distributed to download
* @param downloadTargetTags
* tags to be applied to elements distributed to downloads
* @param downloadElementIds
* identifiers for elements that have been distributed to downloads
* @param streamingSubflavor
* flavor to be applied to elements distributed to streaming
* @param streamingElementIds
* identifiers for elements that have been distributed to streaming
* @param streamingTargetTags
* tags to be applied to elements distributed to streaming
* @return the new mediapackage
*/
protected MediaPackage getMediaPackageForSearchIndex(MediaPackage current, List<Job> jobs, MediaPackageElementFlavor downloadSubflavor, String[] downloadTargetTags, Set<String> downloadElementIds, MediaPackageElementFlavor streamingSubflavor, Set<String> streamingElementIds, String[] streamingTargetTags) throws MediaPackageException, NotFoundException, ServiceRegistryException, WorkflowOperationException {
MediaPackage mp = (MediaPackage) current.clone();
// All the jobs have passed, let's update the mediapackage with references to the distributed elements
List<String> elementsToPublish = new ArrayList<String>();
Map<String, String> distributedElementIds = new HashMap<String, String>();
for (Job entry : jobs) {
Job job = serviceRegistry.getJob(entry.getId());
// If there is no payload, then the item has not been distributed.
if (job.getPayload() == null)
continue;
List<MediaPackageElement> distributedElements = null;
try {
distributedElements = (List<MediaPackageElement>) MediaPackageElementParser.getArrayFromXml(job.getPayload());
} catch (MediaPackageException e) {
throw new WorkflowOperationException(e);
}
// kind of element. So we just keep on looping.
if (distributedElements == null || distributedElements.size() < 1)
continue;
for (MediaPackageElement distributedElement : distributedElements) {
String sourceElementId = distributedElement.getIdentifier();
if (sourceElementId != null) {
MediaPackageElement sourceElement = mp.getElementById(sourceElementId);
// Make sure the mediapackage is prompted to create a new identifier for this element
distributedElement.setIdentifier(null);
if (sourceElement != null) {
// Adjust the flavor and tags for downloadable elements
if (downloadElementIds.contains(sourceElementId)) {
if (downloadSubflavor != null) {
MediaPackageElementFlavor flavor = sourceElement.getFlavor();
if (flavor != null) {
MediaPackageElementFlavor newFlavor = new MediaPackageElementFlavor(flavor.getType(), downloadSubflavor.getSubtype());
distributedElement.setFlavor(newFlavor);
}
}
} else // Adjust the flavor and tags for streaming elements
if (streamingElementIds.contains(sourceElementId)) {
if (streamingSubflavor != null && streamingElementIds.contains(sourceElementId)) {
MediaPackageElementFlavor flavor = sourceElement.getFlavor();
if (flavor != null) {
MediaPackageElementFlavor newFlavor = new MediaPackageElementFlavor(flavor.getType(), streamingSubflavor.getSubtype());
distributedElement.setFlavor(newFlavor);
}
}
}
// Copy references from the source elements to the distributed elements
MediaPackageReference ref = sourceElement.getReference();
if (ref != null && mp.getElementByReference(ref) != null) {
MediaPackageReference newReference = (MediaPackageReference) ref.clone();
distributedElement.setReference(newReference);
}
}
}
if (isStreamingFormat(distributedElement))
applyTags(distributedElement, streamingTargetTags);
else
applyTags(distributedElement, downloadTargetTags);
// Add the new element to the mediapackage
mp.add(distributedElement);
elementsToPublish.add(distributedElement.getIdentifier());
distributedElementIds.put(sourceElementId, distributedElement.getIdentifier());
}
}
// Mark everything that is set for removal
List<MediaPackageElement> removals = new ArrayList<MediaPackageElement>();
for (MediaPackageElement element : mp.getElements()) {
if (!elementsToPublish.contains(element.getIdentifier())) {
removals.add(element);
}
}
// Translate references to the distributed artifacts
for (MediaPackageElement element : mp.getElements()) {
if (removals.contains(element))
continue;
// Is the element referencing anything?
MediaPackageReference reference = element.getReference();
if (reference == null)
continue;
// See if the element has been distributed
String distributedElementId = distributedElementIds.get(reference.getIdentifier());
if (distributedElementId == null)
continue;
MediaPackageReference translatedReference = new MediaPackageReferenceImpl(mp.getElementById(distributedElementId));
if (reference.getProperties() != null) {
translatedReference.getProperties().putAll(reference.getProperties());
}
// Set the new reference
element.setReference(translatedReference);
}
// Remove everything we don't want to add to publish
for (MediaPackageElement element : removals) {
mp.remove(element);
}
return mp;
}
use of org.opencastproject.mediapackage.MediaPackageReferenceImpl in project opencast by opencast.
the class TextAnalysisWorkflowOperationHandler method extractVideoText.
/**
* Runs the text analysis service on each of the video segments found.
*
* @param mediaPackage
* the original mediapackage
* @param operation
* the workflow operation
* @throws ExecutionException
* @throws InterruptedException
* @throws NotFoundException
* @throws WorkflowOperationException
*/
protected WorkflowOperationResult extractVideoText(final MediaPackage mediaPackage, WorkflowOperationInstance operation) throws EncoderException, InterruptedException, ExecutionException, IOException, NotFoundException, MediaPackageException, TextAnalyzerException, WorkflowOperationException, ServiceRegistryException {
long totalTimeInQueue = 0;
List<String> sourceTagSet = asList(operation.getConfiguration("source-tags"));
List<String> targetTagSet = asList(operation.getConfiguration("target-tags"));
// Select the catalogs according to the tags
Map<Catalog, Mpeg7Catalog> catalogs = loadSegmentCatalogs(mediaPackage, operation);
// Was there at least one matching catalog
if (catalogs.size() == 0) {
logger.debug("Mediapackage {} has no suitable mpeg-7 catalogs based on tags {} to to run text analysis", mediaPackage, sourceTagSet);
return createResult(mediaPackage, Action.CONTINUE);
}
// Loop over all existing segment catalogs
for (Entry<Catalog, Mpeg7Catalog> mapEntry : catalogs.entrySet()) {
Map<VideoSegment, Job> jobs = new HashMap<VideoSegment, Job>();
List<Attachment> images = new LinkedList<Attachment>();
Catalog segmentCatalog = mapEntry.getKey();
try {
MediaPackageReference catalogRef = segmentCatalog.getReference();
// Make sure we can figure out the source track
if (catalogRef == null) {
logger.info("Skipping catalog {} since we can't determine the source track", segmentCatalog);
} else if (mediaPackage.getElementByReference(catalogRef) == null) {
logger.info("Skipping catalog {} since we can't determine the source track", segmentCatalog);
} else if (!(mediaPackage.getElementByReference(catalogRef) instanceof Track)) {
logger.info("Skipping catalog {} since it's source was not a track", segmentCatalog);
}
logger.info("Analyzing mpeg-7 segments catalog {} for text", segmentCatalog);
// Create a copy that will contain the segments enriched with the video text elements
Mpeg7Catalog textCatalog = mapEntry.getValue().clone();
Track sourceTrack = mediaPackage.getTrack(catalogRef.getIdentifier());
// Load the temporal decomposition (segments)
Video videoContent = textCatalog.videoContent().next();
TemporalDecomposition<? extends Segment> decomposition = videoContent.getTemporalDecomposition();
Iterator<? extends Segment> segmentIterator = decomposition.segments();
// For every segment, try to find the still image and run text analysis on it
List<VideoSegment> videoSegments = new LinkedList<VideoSegment>();
while (segmentIterator.hasNext()) {
Segment segment = segmentIterator.next();
if ((segment instanceof VideoSegment))
videoSegments.add((VideoSegment) segment);
}
// argument array for image extraction
long[] times = new long[videoSegments.size()];
for (int i = 0; i < videoSegments.size(); i++) {
VideoSegment videoSegment = videoSegments.get(i);
MediaTimePoint segmentTimePoint = videoSegment.getMediaTime().getMediaTimePoint();
MediaDuration segmentDuration = videoSegment.getMediaTime().getMediaDuration();
// Choose a time
MediaPackageReference reference = null;
if (catalogRef == null)
reference = new MediaPackageReferenceImpl();
else
reference = new MediaPackageReferenceImpl(catalogRef.getType(), catalogRef.getIdentifier());
reference.setProperty("time", segmentTimePoint.toString());
// Have the time for ocr image created. To circumvent problems with slowly building slides, we take the image
// that is
// almost at the end of the segment, it should contain the most content and is stable as well.
long startTimeSeconds = segmentTimePoint.getTimeInMilliseconds() / 1000;
long durationSeconds = segmentDuration.getDurationInMilliseconds() / 1000;
times[i] = Math.max(startTimeSeconds + durationSeconds - stabilityThreshold + 1, 0);
}
// Have the ocr image(s) created.
// TODO: Note that the way of having one image extracted after the other is suited for
// the ffmpeg-based encoder. When switching to other encoding engines such as gstreamer, it might be preferable
// to pass in all timepoints to the image extraction method at once.
SortedMap<Long, Job> extractImageJobs = new TreeMap<Long, Job>();
try {
for (long time : times) {
extractImageJobs.put(time, composer.image(sourceTrack, IMAGE_EXTRACTION_PROFILE, time));
}
if (!waitForStatus(extractImageJobs.values().toArray(new Job[extractImageJobs.size()])).isSuccess())
throw new WorkflowOperationException("Extracting scene image from " + sourceTrack + " failed");
for (Map.Entry<Long, Job> entry : extractImageJobs.entrySet()) {
Job job = serviceRegistry.getJob(entry.getValue().getId());
Attachment image = (Attachment) MediaPackageElementParser.getFromXml(job.getPayload());
images.add(image);
totalTimeInQueue += job.getQueueTime();
}
} catch (EncoderException e) {
logger.error("Error creating still image(s) from {}", sourceTrack);
throw e;
}
// Run text extraction on each of the images
Iterator<VideoSegment> it = videoSegments.iterator();
for (MediaPackageElement element : images) {
Attachment image = (Attachment) element;
VideoSegment videoSegment = it.next();
jobs.put(videoSegment, analysisService.extract(image));
}
// Wait for all jobs to be finished
if (!waitForStatus(jobs.values().toArray(new Job[jobs.size()])).isSuccess()) {
throw new WorkflowOperationException("Text extraction failed on images from " + sourceTrack);
}
// Process the text extraction results
for (Map.Entry<VideoSegment, Job> entry : jobs.entrySet()) {
Job job = serviceRegistry.getJob(entry.getValue().getId());
totalTimeInQueue += job.getQueueTime();
VideoSegment videoSegment = entry.getKey();
MediaDuration segmentDuration = videoSegment.getMediaTime().getMediaDuration();
Catalog catalog = (Catalog) MediaPackageElementParser.getFromXml(job.getPayload());
if (catalog == null) {
logger.warn("Text analysis did not return a valid mpeg7 for segment {}", videoSegment);
continue;
}
Mpeg7Catalog videoTextCatalog = loadMpeg7Catalog(catalog);
if (videoTextCatalog == null)
throw new IllegalStateException("Text analysis service did not return a valid mpeg7");
// Add the spatiotemporal decompositions from the new catalog to the existing video segments
Iterator<Video> videoTextContents = videoTextCatalog.videoContent();
if (videoTextContents == null || !videoTextContents.hasNext()) {
logger.debug("Text analysis was not able to extract any text from {}", job.getArguments().get(0));
break;
}
try {
Video textVideoContent = videoTextContents.next();
VideoSegment textVideoSegment = (VideoSegment) textVideoContent.getTemporalDecomposition().segments().next();
VideoText[] videoTexts = textVideoSegment.getSpatioTemporalDecomposition().getVideoText();
SpatioTemporalDecomposition std = videoSegment.createSpatioTemporalDecomposition(true, false);
for (VideoText videoText : videoTexts) {
MediaTime mediaTime = new MediaTimeImpl(new MediaRelTimePointImpl(0), segmentDuration);
SpatioTemporalLocator locator = new SpatioTemporalLocatorImpl(mediaTime);
videoText.setSpatioTemporalLocator(locator);
std.addVideoText(videoText);
}
} catch (Exception e) {
logger.warn("The mpeg-7 structure returned by the text analyzer is not what is expected", e);
continue;
}
}
// Put the catalog into the workspace and add it to the media package
MediaPackageElementBuilder builder = MediaPackageElementBuilderFactory.newInstance().newElementBuilder();
Catalog catalog = (Catalog) builder.newElement(MediaPackageElement.Type.Catalog, MediaPackageElements.TEXTS);
catalog.setIdentifier(null);
catalog.setReference(segmentCatalog.getReference());
// the catalog now has an ID, so we can store the file properly
mediaPackage.add(catalog);
InputStream in = mpeg7CatalogService.serialize(textCatalog);
String filename = "slidetext.xml";
URI workspaceURI = workspace.put(mediaPackage.getIdentifier().toString(), catalog.getIdentifier(), filename, in);
catalog.setURI(workspaceURI);
// Since we've enriched and stored the mpeg7 catalog, remove the original
try {
mediaPackage.remove(segmentCatalog);
workspace.delete(segmentCatalog.getURI());
} catch (Exception e) {
logger.warn("Unable to delete segment catalog {}: {}", segmentCatalog.getURI(), e);
}
// Add flavor and target tags
catalog.setFlavor(MediaPackageElements.TEXTS);
for (String tag : targetTagSet) {
catalog.addTag(tag);
}
} finally {
// Remove images that were created for text extraction
logger.debug("Removing temporary images");
for (Attachment image : images) {
try {
workspace.delete(image.getURI());
} catch (Exception e) {
logger.warn("Unable to delete temporary image {}: {}", image.getURI(), e);
}
}
// Remove the temporary text
for (Job j : jobs.values()) {
Catalog catalog = null;
try {
Job job = serviceRegistry.getJob(j.getId());
if (!Job.Status.FINISHED.equals(job.getStatus()))
continue;
catalog = (Catalog) MediaPackageElementParser.getFromXml(job.getPayload());
if (catalog != null)
workspace.delete(catalog.getURI());
} catch (Exception e) {
if (catalog != null) {
logger.warn("Unable to delete temporary text file {}: {}", catalog.getURI(), e);
} else {
logger.warn("Unable to parse textextraction payload of job {}", j.getId());
}
}
}
}
}
logger.debug("Text analysis completed");
return createResult(mediaPackage, Action.CONTINUE, totalTimeInQueue);
}
Aggregations