use of androidx.media3.common.Metadata.Entry in project media by androidx.
the class PlaybackOutput method dumpMetadata.
private void dumpMetadata(Dumper dumper) {
if (metadatas.isEmpty()) {
return;
}
dumper.startBlock("MetadataOutput");
for (int i = 0; i < metadatas.size(); i++) {
dumper.startBlock("Metadata[" + i + "]");
Metadata metadata = metadatas.get(i);
for (int j = 0; j < metadata.length(); j++) {
dumper.add("entry[" + j + "]", getEntryAsString(metadata.get(j)));
}
dumper.endBlock();
}
dumper.endBlock();
}
use of androidx.media3.common.Metadata.Entry in project media by androidx.
the class SefSlowMotionFlattener method getMetadataInfo.
/**
* Returns the {@link MetadataInfo} derived from the {@link Metadata} provided.
*/
private static MetadataInfo getMetadataInfo(@Nullable Metadata metadata) {
MetadataInfo metadataInfo = new MetadataInfo();
if (metadata == null) {
return metadataInfo;
}
for (int i = 0; i < metadata.length(); i++) {
Metadata.Entry entry = metadata.get(i);
if (entry instanceof SmtaMetadataEntry) {
SmtaMetadataEntry smtaMetadataEntry = (SmtaMetadataEntry) entry;
metadataInfo.captureFrameRate = smtaMetadataEntry.captureFrameRate;
metadataInfo.inputMaxLayer = smtaMetadataEntry.svcTemporalLayerCount - 1;
} else if (entry instanceof SlowMotionData) {
metadataInfo.slowMotionData = (SlowMotionData) entry;
}
}
if (metadataInfo.slowMotionData == null) {
return metadataInfo;
}
checkState(metadataInfo.inputMaxLayer != C.INDEX_UNSET, "SVC temporal layer count not found.");
checkState(metadataInfo.captureFrameRate != C.RATE_UNSET, "Capture frame rate not found.");
checkState(metadataInfo.captureFrameRate % 1 == 0 && metadataInfo.captureFrameRate % TARGET_OUTPUT_FRAME_RATE == 0, "Invalid capture frame rate: " + metadataInfo.captureFrameRate);
int frameCountDivisor = (int) metadataInfo.captureFrameRate / TARGET_OUTPUT_FRAME_RATE;
int normalSpeedMaxLayer = metadataInfo.inputMaxLayer;
while (normalSpeedMaxLayer >= 0) {
if ((frameCountDivisor & 1) == 1) {
// Set normalSpeedMaxLayer only if captureFrameRate / TARGET_OUTPUT_FRAME_RATE is a power of
// 2. Otherwise, the target output frame rate cannot be reached because removing a layer
// divides the number of frames by 2.
checkState(frameCountDivisor >> 1 == 0, "Could not compute normal speed max SVC layer for capture frame rate " + metadataInfo.captureFrameRate);
metadataInfo.normalSpeedMaxLayer = normalSpeedMaxLayer;
break;
}
frameCountDivisor >>= 1;
normalSpeedMaxLayer--;
}
return metadataInfo;
}
use of androidx.media3.common.Metadata.Entry in project media by androidx.
the class SegmentSpeedProvider method extractSlowMotionSegments.
private static ImmutableList<Segment> extractSlowMotionSegments(Format format) {
List<Segment> segments = new ArrayList<>();
@Nullable Metadata metadata = format.metadata;
if (metadata != null) {
for (int i = 0; i < metadata.length(); i++) {
Metadata.Entry entry = metadata.get(i);
if (entry instanceof SlowMotionData) {
segments.addAll(((SlowMotionData) entry).segments);
}
}
}
return ImmutableList.sortedCopyOf(BY_START_THEN_END_THEN_DIVISOR, segments);
}
use of androidx.media3.common.Metadata.Entry in project media by androidx.
the class MappingTrackSelector method selectTracks.
@Override
public final TrackSelectorResult selectTracks(RendererCapabilities[] rendererCapabilities, TrackGroupArray trackGroups, MediaPeriodId periodId, Timeline timeline) throws ExoPlaybackException {
// Structures into which data will be written during the selection. The extra item at the end
// of each array is to store data associated with track groups that cannot be associated with
// any renderer.
int[] rendererTrackGroupCounts = new int[rendererCapabilities.length + 1];
TrackGroup[][] rendererTrackGroups = new TrackGroup[rendererCapabilities.length + 1][];
@Capabilities int[][][] rendererFormatSupports = new int[rendererCapabilities.length + 1][][];
for (int i = 0; i < rendererTrackGroups.length; i++) {
rendererTrackGroups[i] = new TrackGroup[trackGroups.length];
rendererFormatSupports[i] = new int[trackGroups.length][];
}
// Determine the extent to which each renderer supports mixed mimeType adaptation.
@AdaptiveSupport int[] rendererMixedMimeTypeAdaptationSupports = getMixedMimeTypeAdaptationSupports(rendererCapabilities);
// renderer provides for each track in the group.
for (int groupIndex = 0; groupIndex < trackGroups.length; groupIndex++) {
TrackGroup group = trackGroups.get(groupIndex);
// Associate the group to a preferred renderer.
boolean preferUnassociatedRenderer = MimeTypes.getTrackType(group.getFormat(0).sampleMimeType) == C.TRACK_TYPE_METADATA;
int rendererIndex = findRenderer(rendererCapabilities, group, rendererTrackGroupCounts, preferUnassociatedRenderer);
// Evaluate the support that the renderer provides for each track in the group.
@Capabilities int[] rendererFormatSupport = rendererIndex == rendererCapabilities.length ? new int[group.length] : getFormatSupport(rendererCapabilities[rendererIndex], group);
// Stash the results.
int rendererTrackGroupCount = rendererTrackGroupCounts[rendererIndex];
rendererTrackGroups[rendererIndex][rendererTrackGroupCount] = group;
rendererFormatSupports[rendererIndex][rendererTrackGroupCount] = rendererFormatSupport;
rendererTrackGroupCounts[rendererIndex]++;
}
// Create a track group array for each renderer, and trim each rendererFormatSupports entry.
TrackGroupArray[] rendererTrackGroupArrays = new TrackGroupArray[rendererCapabilities.length];
String[] rendererNames = new String[rendererCapabilities.length];
int[] rendererTrackTypes = new int[rendererCapabilities.length];
for (int i = 0; i < rendererCapabilities.length; i++) {
int rendererTrackGroupCount = rendererTrackGroupCounts[i];
rendererTrackGroupArrays[i] = new TrackGroupArray(Util.nullSafeArrayCopy(rendererTrackGroups[i], rendererTrackGroupCount));
rendererFormatSupports[i] = Util.nullSafeArrayCopy(rendererFormatSupports[i], rendererTrackGroupCount);
rendererNames[i] = rendererCapabilities[i].getName();
rendererTrackTypes[i] = rendererCapabilities[i].getTrackType();
}
// Create a track group array for track groups not mapped to a renderer.
int unmappedTrackGroupCount = rendererTrackGroupCounts[rendererCapabilities.length];
TrackGroupArray unmappedTrackGroupArray = new TrackGroupArray(Util.nullSafeArrayCopy(rendererTrackGroups[rendererCapabilities.length], unmappedTrackGroupCount));
// Package up the track information and selections.
MappedTrackInfo mappedTrackInfo = new MappedTrackInfo(rendererNames, rendererTrackTypes, rendererTrackGroupArrays, rendererMixedMimeTypeAdaptationSupports, rendererFormatSupports, unmappedTrackGroupArray);
Pair<@NullableType RendererConfiguration[], @NullableType ExoTrackSelection[]> result = selectTracks(mappedTrackInfo, rendererFormatSupports, rendererMixedMimeTypeAdaptationSupports, periodId, timeline);
TracksInfo tracksInfo = buildTracksInfo(result.second, mappedTrackInfo);
return new TrackSelectorResult(result.first, result.second, tracksInfo, mappedTrackInfo);
}
use of androidx.media3.common.Metadata.Entry in project media by androidx.
the class TtmlNode method getCues.
public List<Cue> getCues(long timeUs, Map<String, TtmlStyle> globalStyles, Map<String, TtmlRegion> regionMap, Map<String, String> imageMap) {
List<Pair<String, String>> regionImageOutputs = new ArrayList<>();
traverseForImage(timeUs, regionId, regionImageOutputs);
TreeMap<String, Cue.Builder> regionTextOutputs = new TreeMap<>();
traverseForText(timeUs, false, regionId, regionTextOutputs);
traverseForStyle(timeUs, globalStyles, regionMap, regionId, regionTextOutputs);
List<Cue> cues = new ArrayList<>();
// Create image based cues.
for (Pair<String, String> regionImagePair : regionImageOutputs) {
@Nullable String encodedBitmapData = imageMap.get(regionImagePair.second);
if (encodedBitmapData == null) {
// Image reference points to an invalid image. Do nothing.
continue;
}
byte[] bitmapData = Base64.decode(encodedBitmapData, Base64.DEFAULT);
Bitmap bitmap = BitmapFactory.decodeByteArray(bitmapData, /* offset= */
0, bitmapData.length);
TtmlRegion region = Assertions.checkNotNull(regionMap.get(regionImagePair.first));
cues.add(new Cue.Builder().setBitmap(bitmap).setPosition(region.position).setPositionAnchor(Cue.ANCHOR_TYPE_START).setLine(region.line, Cue.LINE_TYPE_FRACTION).setLineAnchor(region.lineAnchor).setSize(region.width).setBitmapHeight(region.height).setVerticalType(region.verticalType).build());
}
// Create text based cues.
for (Map.Entry<String, Cue.Builder> entry : regionTextOutputs.entrySet()) {
TtmlRegion region = Assertions.checkNotNull(regionMap.get(entry.getKey()));
Cue.Builder regionOutput = entry.getValue();
cleanUpText((SpannableStringBuilder) Assertions.checkNotNull(regionOutput.getText()));
regionOutput.setLine(region.line, region.lineType);
regionOutput.setLineAnchor(region.lineAnchor);
regionOutput.setPosition(region.position);
regionOutput.setSize(region.width);
regionOutput.setTextSize(region.textSize, region.textSizeType);
regionOutput.setVerticalType(region.verticalType);
cues.add(regionOutput.build());
}
return cues;
}
Aggregations