use of org.opencastproject.metadata.mpeg7.MediaTime in project opencast by opencast.
the class TextAnalyzerServiceImpl method extract.
/**
* Starts text extraction on the image and returns a receipt containing the final result in the form of an
* Mpeg7Catalog.
*
* @param image
* the element to analyze
* @param block
* <code>true</code> to make this operation synchronous
* @return a receipt containing the resulting mpeg-7 catalog
* @throws TextAnalyzerException
*/
private Catalog extract(Job job, Attachment image) throws TextAnalyzerException, MediaPackageException {
final Attachment attachment = image;
final URI imageUrl = attachment.getURI();
File imageFile = null;
try {
Mpeg7CatalogImpl mpeg7 = Mpeg7CatalogImpl.newInstance();
logger.info("Starting text extraction from {}", imageUrl);
try {
imageFile = workspace.get(imageUrl);
} catch (NotFoundException e) {
throw new TextAnalyzerException("Image " + imageUrl + " not found in workspace", e);
} catch (IOException e) {
throw new TextAnalyzerException("Unable to access " + imageUrl + " in workspace", e);
}
VideoText[] videoTexts = analyze(imageFile, image.getIdentifier());
// Create a temporal decomposition
MediaTime mediaTime = new MediaTimeImpl(0, 0);
Video avContent = mpeg7.addVideoContent(image.getIdentifier(), mediaTime, null);
TemporalDecomposition<VideoSegment> temporalDecomposition = (TemporalDecomposition<VideoSegment>) avContent.getTemporalDecomposition();
// Add a segment
VideoSegment videoSegment = temporalDecomposition.createSegment("segment-0");
videoSegment.setMediaTime(mediaTime);
// Add the video text to the spacio temporal decomposition of the segment
SpatioTemporalDecomposition spatioTemporalDecomposition = videoSegment.createSpatioTemporalDecomposition(true, false);
for (VideoText videoText : videoTexts) {
spatioTemporalDecomposition.addVideoText(videoText);
}
logger.info("Text extraction of {} finished, {} lines found", attachment.getURI(), videoTexts.length);
URI uri;
InputStream in;
try {
in = mpeg7CatalogService.serialize(mpeg7);
} catch (IOException e) {
throw new TextAnalyzerException("Error serializing mpeg7", e);
}
try {
uri = workspace.putInCollection(COLLECTION_ID, job.getId() + ".xml", in);
} catch (IOException e) {
throw new TextAnalyzerException("Unable to put mpeg7 into the workspace", e);
}
Catalog catalog = (Catalog) MediaPackageElementBuilderFactory.newInstance().newElementBuilder().newElement(Catalog.TYPE, MediaPackageElements.TEXTS);
catalog.setURI(uri);
logger.debug("Created MPEG7 catalog for {}", imageUrl);
return catalog;
} catch (Exception e) {
logger.warn("Error extracting text from " + imageUrl, e);
if (e instanceof TextAnalyzerException) {
throw (TextAnalyzerException) e;
} else {
throw new TextAnalyzerException(e);
}
} finally {
try {
workspace.delete(imageUrl);
} catch (Exception e) {
logger.warn("Unable to delete temporary text analysis image {}: {}", imageUrl, e);
}
}
}
use of org.opencastproject.metadata.mpeg7.MediaTime in project opencast by opencast.
the class Mpeg7CaptionConverter method exportCaption.
@Override
public void exportCaption(OutputStream outputStream, List<Caption> captions, String language) throws IOException {
Mpeg7Catalog mpeg7 = Mpeg7CatalogImpl.newInstance();
MediaTime mediaTime = new MediaTimeImpl(0, 0);
Audio audioContent = mpeg7.addAudioContent("captions", mediaTime, null);
@SuppressWarnings("unchecked") TemporalDecomposition<AudioSegment> captionDecomposition = (TemporalDecomposition<AudioSegment>) audioContent.getTemporalDecomposition();
int segmentCount = 0;
for (Caption caption : captions) {
// Get all the words/parts for the transcript
String[] words = caption.getCaption();
if (words.length == 0)
continue;
// Create a new segment
AudioSegment segment = captionDecomposition.createSegment("segment-" + segmentCount++);
Time captionST = caption.getStartTime();
Time captionET = caption.getStopTime();
// Calculate start time
Calendar startTime = Calendar.getInstance(TimeZone.getTimeZone("UTC"));
startTime.setTimeInMillis(0);
startTime.add(Calendar.HOUR_OF_DAY, captionST.getHours());
startTime.add(Calendar.MINUTE, captionST.getMinutes());
startTime.add(Calendar.SECOND, captionST.getSeconds());
startTime.add(Calendar.MILLISECOND, captionST.getMilliseconds());
// Calculate end time
Calendar endTime = Calendar.getInstance(TimeZone.getTimeZone("UTC"));
endTime.setTimeInMillis(0);
endTime.add(Calendar.HOUR_OF_DAY, captionET.getHours());
endTime.add(Calendar.MINUTE, captionET.getMinutes());
endTime.add(Calendar.SECOND, captionET.getSeconds());
endTime.add(Calendar.MILLISECOND, captionET.getMilliseconds());
long startTimeInMillis = startTime.getTimeInMillis();
long endTimeInMillis = endTime.getTimeInMillis();
long duration = endTimeInMillis - startTimeInMillis;
segment.setMediaTime(new MediaTimeImpl(startTimeInMillis, duration));
TextAnnotation textAnnotation = segment.createTextAnnotation(0, 0, language);
// Collect all the words in the segment
StringBuffer captionLine = new StringBuffer();
// Add each words/parts as segment to the catalog
for (String word : words) {
if (captionLine.length() > 0)
captionLine.append(' ');
captionLine.append(word);
}
// Append the text to the annotation
textAnnotation.addFreeTextAnnotation(new FreeTextAnnotationImpl(captionLine.toString()));
}
Transformer tf = null;
try {
tf = TransformerFactory.newInstance().newTransformer();
DOMSource xmlSource = new DOMSource(mpeg7.toXml());
tf.transform(xmlSource, new StreamResult(outputStream));
} catch (TransformerConfigurationException e) {
logger.warn("Error serializing mpeg7 captions catalog: {}", e.getMessage());
throw new IOException(e);
} catch (TransformerFactoryConfigurationError e) {
logger.warn("Error serializing mpeg7 captions catalog: {}", e.getMessage());
throw new IOException(e);
} catch (TransformerException e) {
logger.warn("Error serializing mpeg7 captions catalog: {}", e.getMessage());
throw new IOException(e);
} catch (ParserConfigurationException e) {
logger.warn("Error serializing mpeg7 captions catalog: {}", e.getMessage());
throw new IOException(e);
}
}
use of org.opencastproject.metadata.mpeg7.MediaTime in project opencast by opencast.
the class TextAnalysisWorkflowOperationHandler method extractVideoText.
/**
* Runs the text analysis service on each of the video segments found.
*
* @param mediaPackage
* the original mediapackage
* @param operation
* the workflow operation
* @throws ExecutionException
* @throws InterruptedException
* @throws NotFoundException
* @throws WorkflowOperationException
*/
protected WorkflowOperationResult extractVideoText(final MediaPackage mediaPackage, WorkflowOperationInstance operation) throws EncoderException, InterruptedException, ExecutionException, IOException, NotFoundException, MediaPackageException, TextAnalyzerException, WorkflowOperationException, ServiceRegistryException {
long totalTimeInQueue = 0;
List<String> sourceTagSet = asList(operation.getConfiguration("source-tags"));
List<String> targetTagSet = asList(operation.getConfiguration("target-tags"));
// Select the catalogs according to the tags
Map<Catalog, Mpeg7Catalog> catalogs = loadSegmentCatalogs(mediaPackage, operation);
// Was there at least one matching catalog
if (catalogs.size() == 0) {
logger.debug("Mediapackage {} has no suitable mpeg-7 catalogs based on tags {} to to run text analysis", mediaPackage, sourceTagSet);
return createResult(mediaPackage, Action.CONTINUE);
}
// Loop over all existing segment catalogs
for (Entry<Catalog, Mpeg7Catalog> mapEntry : catalogs.entrySet()) {
Map<VideoSegment, Job> jobs = new HashMap<VideoSegment, Job>();
List<Attachment> images = new LinkedList<Attachment>();
Catalog segmentCatalog = mapEntry.getKey();
try {
MediaPackageReference catalogRef = segmentCatalog.getReference();
// Make sure we can figure out the source track
if (catalogRef == null) {
logger.info("Skipping catalog {} since we can't determine the source track", segmentCatalog);
} else if (mediaPackage.getElementByReference(catalogRef) == null) {
logger.info("Skipping catalog {} since we can't determine the source track", segmentCatalog);
} else if (!(mediaPackage.getElementByReference(catalogRef) instanceof Track)) {
logger.info("Skipping catalog {} since it's source was not a track", segmentCatalog);
}
logger.info("Analyzing mpeg-7 segments catalog {} for text", segmentCatalog);
// Create a copy that will contain the segments enriched with the video text elements
Mpeg7Catalog textCatalog = mapEntry.getValue().clone();
Track sourceTrack = mediaPackage.getTrack(catalogRef.getIdentifier());
// Load the temporal decomposition (segments)
Video videoContent = textCatalog.videoContent().next();
TemporalDecomposition<? extends Segment> decomposition = videoContent.getTemporalDecomposition();
Iterator<? extends Segment> segmentIterator = decomposition.segments();
// For every segment, try to find the still image and run text analysis on it
List<VideoSegment> videoSegments = new LinkedList<VideoSegment>();
while (segmentIterator.hasNext()) {
Segment segment = segmentIterator.next();
if ((segment instanceof VideoSegment))
videoSegments.add((VideoSegment) segment);
}
// argument array for image extraction
long[] times = new long[videoSegments.size()];
for (int i = 0; i < videoSegments.size(); i++) {
VideoSegment videoSegment = videoSegments.get(i);
MediaTimePoint segmentTimePoint = videoSegment.getMediaTime().getMediaTimePoint();
MediaDuration segmentDuration = videoSegment.getMediaTime().getMediaDuration();
// Choose a time
MediaPackageReference reference = null;
if (catalogRef == null)
reference = new MediaPackageReferenceImpl();
else
reference = new MediaPackageReferenceImpl(catalogRef.getType(), catalogRef.getIdentifier());
reference.setProperty("time", segmentTimePoint.toString());
// Have the time for ocr image created. To circumvent problems with slowly building slides, we take the image
// that is
// almost at the end of the segment, it should contain the most content and is stable as well.
long startTimeSeconds = segmentTimePoint.getTimeInMilliseconds() / 1000;
long durationSeconds = segmentDuration.getDurationInMilliseconds() / 1000;
times[i] = Math.max(startTimeSeconds + durationSeconds - stabilityThreshold + 1, 0);
}
// Have the ocr image(s) created.
// TODO: Note that the way of having one image extracted after the other is suited for
// the ffmpeg-based encoder. When switching to other encoding engines such as gstreamer, it might be preferable
// to pass in all timepoints to the image extraction method at once.
SortedMap<Long, Job> extractImageJobs = new TreeMap<Long, Job>();
try {
for (long time : times) {
extractImageJobs.put(time, composer.image(sourceTrack, IMAGE_EXTRACTION_PROFILE, time));
}
if (!waitForStatus(extractImageJobs.values().toArray(new Job[extractImageJobs.size()])).isSuccess())
throw new WorkflowOperationException("Extracting scene image from " + sourceTrack + " failed");
for (Map.Entry<Long, Job> entry : extractImageJobs.entrySet()) {
Job job = serviceRegistry.getJob(entry.getValue().getId());
Attachment image = (Attachment) MediaPackageElementParser.getFromXml(job.getPayload());
images.add(image);
totalTimeInQueue += job.getQueueTime();
}
} catch (EncoderException e) {
logger.error("Error creating still image(s) from {}", sourceTrack);
throw e;
}
// Run text extraction on each of the images
Iterator<VideoSegment> it = videoSegments.iterator();
for (MediaPackageElement element : images) {
Attachment image = (Attachment) element;
VideoSegment videoSegment = it.next();
jobs.put(videoSegment, analysisService.extract(image));
}
// Wait for all jobs to be finished
if (!waitForStatus(jobs.values().toArray(new Job[jobs.size()])).isSuccess()) {
throw new WorkflowOperationException("Text extraction failed on images from " + sourceTrack);
}
// Process the text extraction results
for (Map.Entry<VideoSegment, Job> entry : jobs.entrySet()) {
Job job = serviceRegistry.getJob(entry.getValue().getId());
totalTimeInQueue += job.getQueueTime();
VideoSegment videoSegment = entry.getKey();
MediaDuration segmentDuration = videoSegment.getMediaTime().getMediaDuration();
Catalog catalog = (Catalog) MediaPackageElementParser.getFromXml(job.getPayload());
if (catalog == null) {
logger.warn("Text analysis did not return a valid mpeg7 for segment {}", videoSegment);
continue;
}
Mpeg7Catalog videoTextCatalog = loadMpeg7Catalog(catalog);
if (videoTextCatalog == null)
throw new IllegalStateException("Text analysis service did not return a valid mpeg7");
// Add the spatiotemporal decompositions from the new catalog to the existing video segments
Iterator<Video> videoTextContents = videoTextCatalog.videoContent();
if (videoTextContents == null || !videoTextContents.hasNext()) {
logger.debug("Text analysis was not able to extract any text from {}", job.getArguments().get(0));
break;
}
try {
Video textVideoContent = videoTextContents.next();
VideoSegment textVideoSegment = (VideoSegment) textVideoContent.getTemporalDecomposition().segments().next();
VideoText[] videoTexts = textVideoSegment.getSpatioTemporalDecomposition().getVideoText();
SpatioTemporalDecomposition std = videoSegment.createSpatioTemporalDecomposition(true, false);
for (VideoText videoText : videoTexts) {
MediaTime mediaTime = new MediaTimeImpl(new MediaRelTimePointImpl(0), segmentDuration);
SpatioTemporalLocator locator = new SpatioTemporalLocatorImpl(mediaTime);
videoText.setSpatioTemporalLocator(locator);
std.addVideoText(videoText);
}
} catch (Exception e) {
logger.warn("The mpeg-7 structure returned by the text analyzer is not what is expected", e);
continue;
}
}
// Put the catalog into the workspace and add it to the media package
MediaPackageElementBuilder builder = MediaPackageElementBuilderFactory.newInstance().newElementBuilder();
Catalog catalog = (Catalog) builder.newElement(MediaPackageElement.Type.Catalog, MediaPackageElements.TEXTS);
catalog.setIdentifier(null);
catalog.setReference(segmentCatalog.getReference());
// the catalog now has an ID, so we can store the file properly
mediaPackage.add(catalog);
InputStream in = mpeg7CatalogService.serialize(textCatalog);
String filename = "slidetext.xml";
URI workspaceURI = workspace.put(mediaPackage.getIdentifier().toString(), catalog.getIdentifier(), filename, in);
catalog.setURI(workspaceURI);
// Since we've enriched and stored the mpeg7 catalog, remove the original
try {
mediaPackage.remove(segmentCatalog);
workspace.delete(segmentCatalog.getURI());
} catch (Exception e) {
logger.warn("Unable to delete segment catalog {}: {}", segmentCatalog.getURI(), e);
}
// Add flavor and target tags
catalog.setFlavor(MediaPackageElements.TEXTS);
for (String tag : targetTagSet) {
catalog.addTag(tag);
}
} finally {
// Remove images that were created for text extraction
logger.debug("Removing temporary images");
for (Attachment image : images) {
try {
workspace.delete(image.getURI());
} catch (Exception e) {
logger.warn("Unable to delete temporary image {}: {}", image.getURI(), e);
}
}
// Remove the temporary text
for (Job j : jobs.values()) {
Catalog catalog = null;
try {
Job job = serviceRegistry.getJob(j.getId());
if (!Job.Status.FINISHED.equals(job.getStatus()))
continue;
catalog = (Catalog) MediaPackageElementParser.getFromXml(job.getPayload());
if (catalog != null)
workspace.delete(catalog.getURI());
} catch (Exception e) {
if (catalog != null) {
logger.warn("Unable to delete temporary text file {}: {}", catalog.getURI(), e);
} else {
logger.warn("Unable to parse textextraction payload of job {}", j.getId());
}
}
}
}
}
logger.debug("Text analysis completed");
return createResult(mediaPackage, Action.CONTINUE, totalTimeInQueue);
}
use of org.opencastproject.metadata.mpeg7.MediaTime in project opencast by opencast.
the class VideoSegmenterServiceImpl method uniformSegmentation.
/**
* Creates a uniform segmentation for a given track, with prefNumber as the number of segments
* which will all have the same length
*
* @param track the track that is segmented
* @param segmentsNew will be set to list of new segments (pass null if not required)
* @param prefNumber number of generated segments
* @return Mpeg7Catalog that can later be saved in a Catalog as endresult
*/
protected Mpeg7Catalog uniformSegmentation(Track track, LinkedList<Segment> segmentsNew, int prefNumber) {
if (segmentsNew == null) {
segmentsNew = new LinkedList<Segment>();
}
MediaTime contentTime = new MediaRelTimeImpl(0, track.getDuration());
MediaLocator contentLocator = new MediaLocatorImpl(track.getURI());
Mpeg7Catalog mpeg7 = mpeg7CatalogService.newInstance();
Video videoContent = mpeg7.addVideoContent("videosegment", contentTime, contentLocator);
long segmentDuration = track.getDuration() / prefNumber;
long currentSegStart = 0;
// create "prefNumber"-many segments that all have the same length
for (int i = 1; i < prefNumber; i++) {
Segment s = videoContent.getTemporalDecomposition().createSegment("segment-" + i);
s.setMediaTime(new MediaRelTimeImpl(currentSegStart, segmentDuration));
segmentsNew.add(s);
currentSegStart += segmentDuration;
}
// add last segment separately to make sure the last segment ends exactly at the end of the track
Segment s = videoContent.getTemporalDecomposition().createSegment("segment-" + prefNumber);
s.setMediaTime(new MediaRelTimeImpl(currentSegStart, track.getDuration() - currentSegStart));
segmentsNew.add(s);
return mpeg7;
}
use of org.opencastproject.metadata.mpeg7.MediaTime in project opencast by opencast.
the class VideoSegmenterServiceImpl method filterSegmentation.
/**
* Merges small subsequent segments (with high difference) into a bigger one
*
* @param segments list of segments to be filtered
* @param track the track that is segmented
* @param segmentsNew will be set to list of new segments (pass null if not required)
* @param mergeThresh minimum duration for a segment in milliseconds
* @return Mpeg7Catalog that can later be saved in a Catalog as endresult
*/
protected Mpeg7Catalog filterSegmentation(LinkedList<Segment> segments, Track track, LinkedList<Segment> segmentsNew, int mergeThresh) {
if (segmentsNew == null) {
segmentsNew = new LinkedList<Segment>();
}
boolean merging = false;
MediaTime contentTime = new MediaRelTimeImpl(0, track.getDuration());
MediaLocator contentLocator = new MediaLocatorImpl(track.getURI());
Mpeg7Catalog mpeg7 = mpeg7CatalogService.newInstance();
Video videoContent = mpeg7.addVideoContent("videosegment", contentTime, contentLocator);
int segmentcount = 1;
MediaTimePoint currentSegStart = new MediaTimePointImpl();
for (Segment o : segments) {
// if the current segment is shorter than merge treshold start merging
if (o.getMediaTime().getMediaDuration().getDurationInMilliseconds() <= mergeThresh) {
// start merging and save beginning of new segment that will be generated
if (!merging) {
currentSegStart = o.getMediaTime().getMediaTimePoint();
merging = true;
}
// current segment is longer than merge threshold
} else {
long currentSegDuration = o.getMediaTime().getMediaDuration().getDurationInMilliseconds();
long currentSegEnd = o.getMediaTime().getMediaTimePoint().getTimeInMilliseconds() + currentSegDuration;
if (merging) {
long newDuration = o.getMediaTime().getMediaTimePoint().getTimeInMilliseconds() - currentSegStart.getTimeInMilliseconds();
// save new segment that merges all previously skipped short segments
if (newDuration >= mergeThresh) {
Segment s = videoContent.getTemporalDecomposition().createSegment("segment-" + segmentcount++);
s.setMediaTime(new MediaRelTimeImpl(currentSegStart.getTimeInMilliseconds(), newDuration));
segmentsNew.add(s);
// copy the following long segment to new list
Segment s2 = videoContent.getTemporalDecomposition().createSegment("segment-" + segmentcount++);
s2.setMediaTime(o.getMediaTime());
segmentsNew.add(s2);
// if too short split new segment in middle and merge halves to
// previous and following segments
} else {
long followingStartOld = o.getMediaTime().getMediaTimePoint().getTimeInMilliseconds();
long newSplit = (currentSegStart.getTimeInMilliseconds() + followingStartOld) / 2;
long followingEnd = followingStartOld + o.getMediaTime().getMediaDuration().getDurationInMilliseconds();
long followingDuration = followingEnd - newSplit;
// if at beginning, don't split, just merge to first large segment
if (segmentsNew.isEmpty()) {
Segment s = videoContent.getTemporalDecomposition().createSegment("segment-" + segmentcount++);
s.setMediaTime(new MediaRelTimeImpl(0, followingEnd));
segmentsNew.add(s);
} else {
long previousStart = segmentsNew.getLast().getMediaTime().getMediaTimePoint().getTimeInMilliseconds();
// adjust end time of previous segment to split time
segmentsNew.getLast().setMediaTime(new MediaRelTimeImpl(previousStart, newSplit - previousStart));
// create new segment starting at split time
Segment s = videoContent.getTemporalDecomposition().createSegment("segment-" + segmentcount++);
s.setMediaTime(new MediaRelTimeImpl(newSplit, followingDuration));
segmentsNew.add(s);
}
}
merging = false;
// copy segments that are long enough to new list (with corrected number)
} else {
Segment s = videoContent.getTemporalDecomposition().createSegment("segment-" + segmentcount++);
s.setMediaTime(o.getMediaTime());
segmentsNew.add(s);
}
}
}
// if there is an unfinished merging process after going through all segments
if (merging && !segmentsNew.isEmpty()) {
long newDuration = track.getDuration() - currentSegStart.getTimeInMilliseconds();
// if merged segment is long enough, create new segment
if (newDuration >= mergeThresh) {
Segment s = videoContent.getTemporalDecomposition().createSegment("segment-" + segmentcount);
s.setMediaTime(new MediaRelTimeImpl(currentSegStart.getTimeInMilliseconds(), newDuration));
segmentsNew.add(s);
// if not long enough, merge with previous segment
} else {
newDuration = track.getDuration() - segmentsNew.getLast().getMediaTime().getMediaTimePoint().getTimeInMilliseconds();
segmentsNew.getLast().setMediaTime(new MediaRelTimeImpl(segmentsNew.getLast().getMediaTime().getMediaTimePoint().getTimeInMilliseconds(), newDuration));
}
}
// segment spanning the whole video
if (segmentsNew.isEmpty()) {
Segment s = videoContent.getTemporalDecomposition().createSegment("segment-" + segmentcount);
s.setMediaTime(new MediaRelTimeImpl(0, track.getDuration()));
segmentsNew.add(s);
}
return mpeg7;
}
Aggregations