use of com.att.aro.core.videoanalysis.pojo.VideoEvent in project VideoOptimzer by attdevsupport.
the class VideoConcurrentSessionImpl method manifestConcurrentSessions.
public List<VideoConcurrentSession> manifestConcurrentSessions(SortedMap<Double, VideoStream> videoStreamMap) {
List<VideoConcurrentSession> concurrentSessionList = new ArrayList<VideoConcurrentSession>();
if (MapUtils.isNotEmpty(videoStreamCollection)) {
for (VideoStream videoStream : videoStreamMap.values()) {
if (videoStream.isSelected()) {
ArrayList<Double> sessionStartTimes = new ArrayList<>();
ArrayList<Double> sessionEndTimes = new ArrayList<>();
ArrayList<Session> sessionList = new ArrayList<>();
SortedMap<String, VideoEvent> videoEventList = videoStream.getVideoEventMap();
for (VideoEvent veEntry : videoEventList.values()) {
Session session = veEntry.getSession();
if (!sessionList.contains(session)) {
sessionList.add(session);
sessionStartTimes.add(session.getSessionStartTime());
sessionEndTimes.add(session.getSessionEndTime());
}
}
VideoConcurrentSession videoConcurrentSession = findConcurrency(sessionStartTimes, sessionEndTimes);
if (videoConcurrentSession != null && videoConcurrentSession.getConcurrentSessionCount() > 0) {
videoConcurrentSession.setVideoName(videoStream.getManifest().getVideoName());
concurrentSessionList.add(videoConcurrentSession);
}
}
}
}
return concurrentSessionList;
}
use of com.att.aro.core.videoanalysis.pojo.VideoEvent in project VideoOptimzer by attdevsupport.
the class VideoSegmentPacingImpl method runTest.
@Override
public AbstractBestPracticeResult runTest(PacketAnalyzerResult tracedata) {
BPResultType bpResultType = BPResultType.SELF_TEST;
VideoChunkPacingResult result = new VideoChunkPacingResult();
Double dlFirst = Double.MAX_VALUE;
Double dlLast = 0D;
int count = 0;
init(result);
if ((streamingVideoData = tracedata.getStreamingVideoData()) != null && (videoStreamCollection = streamingVideoData.getVideoStreamMap()) != null && MapUtils.isNotEmpty(videoStreamCollection)) {
selectedCount = streamingVideoData.getSelectedManifestCount();
invalidCount = streamingVideoData.getInvalidManifestCount();
if (selectedCount == 0) {
if (invalidCount == videoStreamCollection.size()) {
result.setResultText(invalidManifestsFound);
} else if (invalidCount > 0) {
result.setResultText(noManifestsSelectedMixed);
} else {
result.setResultText(noManifestsSelected);
}
bpResultType = BPResultType.CONFIG_REQUIRED;
result.setResultExcelText(bpResultType.getDescription());
result.setSelfTest(false);
} else if (selectedCount > 1) {
result.setResultText(multipleManifestsSelected);
bpResultType = BPResultType.CONFIG_REQUIRED;
result.setResultExcelText(bpResultType.getDescription());
result.setSelfTest(false);
} else {
for (VideoStream videoStream : videoStreamCollection.values()) {
if (videoStream != null && videoStream.isSelected() && !videoStream.getVideoEventsBySegment().isEmpty()) {
for (VideoEvent videoEvent : videoStream.getVideoEventsBySegment()) {
if (videoEvent.isNormalSegment()) {
count++;
double dlTime = videoEvent.getDLLastTimestamp();
if (dlTime < dlFirst) {
// look for earliest download of valid segment in a stream
dlFirst = dlTime;
}
if (dlTime > dlLast) {
// look for last download of valid segment in a stream
dlLast = dlTime;
}
}
}
break;
}
}
double segmentPacing = 0;
if (count > 1) {
segmentPacing = (dlLast - dlFirst) / (count - 1);
}
bpResultType = BPResultType.SELF_TEST;
result.setResultText(MessageFormat.format(textResults, count == 1 ? "was" : "were", count, count == 1 ? "" : "different", count == 1 ? "" : "s", count == 1 ? "was" : "were", segmentPacing, MathUtils.equals(segmentPacing, 1.0) ? "" : "s"));
result.setResultExcelText(MessageFormat.format(textExcelResults, bpResultType.getDescription(), count, count <= 1 ? "" : "different", count <= 1 ? "" : "s", count <= 1 ? "was" : "were", segmentPacing, segmentPacing <= 1.0 ? "" : "s"));
result.setChunkPacing(segmentPacing);
result.setSelfTest(true);
}
} else {
result.setResultText(noData);
bpResultType = BPResultType.NO_DATA;
result.setResultExcelText(bpResultType.getDescription());
}
result.setResultType(bpResultType);
return result;
}
use of com.att.aro.core.videoanalysis.pojo.VideoEvent in project VideoOptimzer by attdevsupport.
the class VideoThroughputPlot method populate.
@Override
public void populate(XYPlot plot, AROTraceData analysis) {
if (analysis == null) {
LOGGER.info("no trace data here");
} else {
XYItemRenderer videoRenderer = plot.getRenderer();
videoRenderer.setBaseToolTipGenerator(new XYToolTipGenerator() {
@Override
public String generateToolTip(XYDataset dataset, int series, int item) {
if (dataset.getSeriesCount() > 1) {
if (series == 0 && item < videoEventList.size()) {
VideoEvent videoEvent = videoEventList.get(item);
return getToolTip(videoEvent);
} else if (series == 1 && item < audioEventList.size()) {
VideoEvent audioEvent = audioEventList.get(item);
return getToolTip(audioEvent);
} else {
return "";
}
} else {
if (item < eventList.size()) {
return getToolTip(eventList.get(item));
} else {
return "";
}
}
}
private String getToolTip(VideoEvent event) {
StringBuffer tooltipValue = new StringBuffer();
tooltipValue.append(String.format("%.0f,%s, %.2f,%.2f,%.3f,%.3f", (double) event.getSegmentID(), event.getQuality(), event.getDLTimeStamp(), event.getDLLastTimestamp(), event.getDuration(), getThroughput(event)));
String[] value = tooltipValue.toString().split(",");
return (MessageFormat.format(ResourceBundleHelper.getDefaultBundle().getString("videotab.throughput.tooltip"), value[0], value[1], value[2], value[3], value[4], value[5]));
}
});
}
XYSeriesCollection collection = new XYSeriesCollection();
if (!isMuxed) {
if (optionSelected == SegmentOptions.DEFAULT) {
collection.addSeries(videoEventSeries);
collection.addSeries(audioEventSeries);
} else {
collection.addSeries(optionSelected == SegmentOptions.VIDEO ? videoEventSeries : audioEventSeries);
}
plot.setDataset(collection);
} else {
collection.addSeries(createMuxedSeries(!videoEventSeries.isEmpty() ? videoEventSeries : audioEventSeries));
plot.setDataset(collection);
}
}
use of com.att.aro.core.videoanalysis.pojo.VideoEvent in project VideoOptimzer by attdevsupport.
the class VideoSegmentAnalyzer method syncWithAudio.
/**
* <pre>
* Scan through all audio event related to videoEvent Starting with audio event
* from before the videoEvent Record all audio segments associated with Video
* segment. Including partial overlaps, often audio and video segments do not
* start at the same time.
*
* @param startupOffset
*
* @param videoStream contains collections of Video, Audio and Captioning
* @param audioStreamMap contains all audio in videoStream (when non-muxed)
* <key definition: segmentStartTime, endTS(in
* milliseconds)>
* @param videoEvent The video segment to receive audio linkage
* @param appliedStallTime
* @return audioEvent associated with a stall
*/
private VideoEvent syncWithAudio(double startupOffset, VideoStream videoStream, TreeMap<String, VideoEvent> audioStreamMap, VideoEvent videoEvent) {
VideoEvent audioEvent = null;
String segmentStartTime = VideoStream.generateTimestampKey(videoEvent.getSegmentStartTime());
String segmentEndTime = VideoStream.generateTimestampKey(videoEvent.getSegmentStartTime() + videoEvent.getDuration());
String audioKeyStart = null;
String audioKeyEnd = null;
try {
audioKeyStart = audioStreamMap.lowerKey(segmentStartTime);
audioKeyEnd = audioStreamMap.higherKey(segmentEndTime);
String key = audioKeyStart;
while (!key.equals(audioKeyEnd)) {
VideoEvent lastAudioEvent = audioEvent;
VideoEvent tempEvent = audioStreamMap.get(key);
if (tempEvent.isSelected()) {
audioEvent = tempEvent;
calcAudioTime(videoEvent, audioEvent);
double audioPlaytime = audioEvent.getSegmentStartTime() + startupOffset + totalStallOffset;
if (audioEvent.getDLLastTimestamp() > audioPlaytime) {
double stallPoint = lastAudioEvent.getSegmentStartTime() + audioEvent.getDuration() - videoPrefs.getStallPausePoint();
stallOffset = audioEvent.getDLLastTimestamp() - audioEvent.getPlayTime() + getStallRecovery();
stallOffset = calcSegmentStallOffset(startupOffset, audioEvent, totalStallOffset);
stallOffset = audioEvent.getDLLastTimestamp() - audioPlaytime + getStallRecovery();
audioEvent.setStallTime(stallOffset);
videoEvent.setStallTime(stallOffset);
totalStallOffset += stallOffset;
videoStall = new VideoStall(stallPoint);
videoStall.setSegmentTryingToPlay(audioEvent);
videoStall.setStallEndTimestamp(audioEvent.getPlayTime());
double resumePoint = audioEvent.getDLLastTimestamp() + getStallRecovery();
videoStall.setStallEndTimestamp(resumePoint);
stalls.add(videoStall);
}
}
// advance to next segmentStartTime
key = audioStreamMap.higherKey(StringUtils.substringBefore(key, ":") + "z");
}
} catch (Exception e) {
e.printStackTrace();
}
return audioEvent;
}
use of com.att.aro.core.videoanalysis.pojo.VideoEvent in project VideoOptimzer by attdevsupport.
the class VideoSegmentAnalyzer method generateByteBufferData.
/**
* Scans VideoStream to produce/populate - VideoStream.byteBufferList -
* VideoStream.toolTipDetailMap
*
* @param videoStream
*/
private void generateByteBufferData(VideoStream videoStream) {
VideoEvent eventPlay = null;
Double buffer = 0D;
VideoEvent eventDL;
double timeKey = 0D;
this.videoStream = videoStream;
videoStream.clearBufferOccupancyData();
TreeMap<Double, VideoEvent> mergedPlayMap = new TreeMap<>();
TreeMap<String, VideoEvent> mergedMap = new TreeMap<>();
videoStream.getVideoEventMap().entrySet().stream().filter((f) -> !f.getValue().isFailedRequest()).forEach(e -> {
mergedMap.put(e.getKey(), e.getValue());
});
videoStream.getAudioEventMap().entrySet().stream().filter((f) -> !f.getValue().isFailedRequest()).forEach(e -> {
mergedMap.put(e.getKey(), e.getValue());
});
byteBufferList = videoStream.getByteBufferList();
for (String key : mergedMap.keySet()) {
eventDL = mergedMap.get(key);
if (eventDL.isNormalSegment()) {
if (timeKey > 0 && (timeKey < eventDL.getEndTS())) {
eventPlay = mergedPlayMap.get(timeKey);
while (eventPlay != null && eventPlay.getPlayTime() <= eventDL.getEndTS()) {
mergedPlayMap.remove(eventPlay.getPlayTime());
buffer = addByteBufferPoints(buffer, eventPlay, eventPlay.getPlayTime(), -eventPlay.getSize());
timeKey = mergedPlayMap.isEmpty() ? 0 : mergedPlayMap.firstKey();
eventPlay = mergedPlayMap.isEmpty() ? null : mergedPlayMap.get(mergedPlayMap.firstKey());
}
}
mergedPlayMap.put(eventDL.getPlayTime(), eventDL);
timeKey = mergedPlayMap.firstKey();
buffer = addByteBufferPoints(buffer, eventDL, eventDL.getEndTS(), eventDL.getSize());
}
}
timeKey = mergedPlayMap.isEmpty() ? 0 : mergedPlayMap.firstKey();
while (!mergedPlayMap.isEmpty()) {
eventPlay = mergedPlayMap.remove(timeKey);
buffer = addByteBufferPoints(buffer, eventPlay, eventPlay.getPlayTime(), -eventPlay.getSize());
timeKey = mergedPlayMap.isEmpty() ? 0 : mergedPlayMap.firstKey();
eventPlay = mergedPlayMap.isEmpty() ? null : mergedPlayMap.get(mergedPlayMap.firstKey());
}
}
Aggregations