use of com.amazonaws.services.s3.model.Filter in project stocator by CODAIT.
the class COSAPIClient method list.
@Override
public FileStatus[] list(String hostName, Path path, boolean fullListing, boolean prefixBased, Boolean isDirectory, boolean flatListing, PathFilter filter) throws FileNotFoundException, IOException {
LOG.debug("Native direct list status for {}", path);
ArrayList<FileStatus> tmpResult = new ArrayList<FileStatus>();
String key = pathToKey(path);
if (isDirectory != null && isDirectory.booleanValue() && !key.endsWith("/") && !path.toString().equals(hostName)) {
key = key + "/";
LOG.debug("listNativeDirect modify key to {}", key);
}
Map<String, FileStatus> emptyObjects = new HashMap<String, FileStatus>();
ListObjectsRequest request = new ListObjectsRequest();
request.setBucketName(mBucket);
request.setMaxKeys(5000);
request.setPrefix(key);
if (!flatListing) {
request.setDelimiter("/");
}
ObjectListing objectList = mClient.listObjects(request);
List<S3ObjectSummary> objectSummaries = objectList.getObjectSummaries();
List<String> commonPrefixes = objectList.getCommonPrefixes();
boolean objectScanContinue = true;
S3ObjectSummary prevObj = null;
// start FTA logic
boolean stocatorOrigin = isSparkOrigin(key, path.toString());
if (stocatorOrigin) {
LOG.debug("Stocator origin is true for {}", key);
if (!isJobSuccessful(key)) {
LOG.debug("{} created by failed Spark job. Skipped", key);
if (fModeAutomaticDelete) {
delete(hostName, new Path(key), true);
}
return new FileStatus[0];
}
}
while (objectScanContinue) {
for (S3ObjectSummary obj : objectSummaries) {
if (prevObj == null) {
prevObj = obj;
prevObj.setKey(correctPlusSign(key, prevObj.getKey()));
continue;
}
obj.setKey(correctPlusSign(key, obj.getKey()));
String objKey = obj.getKey();
String unifiedObjectName = extractUnifiedObjectName(objKey);
LOG.trace("list candidate {}, unified name {}", objKey, unifiedObjectName);
if (stocatorOrigin && !fullListing) {
LOG.trace("{} created by Spark", unifiedObjectName);
// we need to make sure there are no failed attempts
if (nameWithoutTaskID(objKey).equals(nameWithoutTaskID(prevObj.getKey()))) {
// found failed that was not aborted.
LOG.trace("Colisiion found between {} and {}", prevObj.getKey(), objKey);
if (prevObj.getSize() < obj.getSize()) {
LOG.trace("New candidate is {}. Removed {}", obj.getKey(), prevObj.getKey());
prevObj = obj;
}
continue;
}
}
FileStatus fs = createFileStatus(prevObj, hostName, path);
if (fs.getLen() > 0 || fullListing) {
LOG.trace("Native direct list. Adding {} size {}", fs.getPath(), fs.getLen());
if (filter == null) {
tmpResult.add(fs);
} else if (filter != null && filter.accept(fs.getPath())) {
tmpResult.add(fs);
} else {
LOG.trace("{} rejected by path filter during list. Filter {}", fs.getPath(), filter);
}
} else {
emptyObjects.put(fs.getPath().toString(), fs);
}
prevObj = obj;
}
boolean isTruncated = objectList.isTruncated();
if (isTruncated) {
objectList = mClient.listNextBatchOfObjects(objectList);
objectSummaries = objectList.getObjectSummaries();
} else {
objectScanContinue = false;
}
}
if (prevObj != null) {
FileStatus fs = createFileStatus(prevObj, hostName, path);
LOG.trace("Adding the last object from the list {}", fs.getPath());
if (fs.getLen() > 0 || fullListing) {
LOG.trace("Native direct list. Adding {} size {}", fs.getPath(), fs.getLen());
if (filter == null) {
memoryCache.putFileStatus(fs.getPath().toString(), fs);
tmpResult.add(fs);
} else if (filter != null && filter.accept(fs.getPath())) {
memoryCache.putFileStatus(fs.getPath().toString(), fs);
tmpResult.add(fs);
} else {
LOG.trace("{} rejected by path filter during list. Filter {}", fs.getPath(), filter);
}
} else if (!fs.getPath().getName().equals(HADOOP_SUCCESS)) {
emptyObjects.put(fs.getPath().toString(), fs);
}
}
// get common prefixes
for (String comPrefix : commonPrefixes) {
LOG.trace("Common prefix is {}", comPrefix);
if (emptyObjects.containsKey(keyToQualifiedPath(hostName, comPrefix).toString()) || emptyObjects.isEmpty()) {
FileStatus status = new COSFileStatus(true, false, keyToQualifiedPath(hostName, comPrefix));
LOG.trace("Match between common prefix and empty object {}. Adding to result", comPrefix);
if (filter == null) {
memoryCache.putFileStatus(status.getPath().toString(), status);
tmpResult.add(status);
} else if (filter != null && filter.accept(status.getPath())) {
memoryCache.putFileStatus(status.getPath().toString(), status);
tmpResult.add(status);
} else {
LOG.trace("Common prefix {} rejected by path filter during list. Filter {}", status.getPath(), filter);
}
}
}
return tmpResult.toArray(new FileStatus[tmpResult.size()]);
}
use of com.amazonaws.services.s3.model.Filter in project cloudbreak by hortonworks.
the class AwsNetworkService method findNonOverLappingCIDR.
public String findNonOverLappingCIDR(AuthenticatedContext ac, CloudStack stack) {
AwsNetworkView awsNetworkView = new AwsNetworkView(stack.getNetwork());
String region = ac.getCloudContext().getLocation().getRegion().value();
AmazonEc2Client ec2Client = awsClient.createEc2Client(new AwsCredentialView(ac.getCloudCredential()), region);
DescribeVpcsRequest vpcRequest = new DescribeVpcsRequest().withVpcIds(awsNetworkView.getExistingVpc());
Vpc vpc = ec2Client.describeVpcs(vpcRequest).getVpcs().get(0);
String vpcCidr = vpc.getCidrBlock();
LOGGER.debug("Subnet cidr is empty, find a non-overlapping subnet for VPC cidr: {}", vpcCidr);
DescribeSubnetsRequest request = new DescribeSubnetsRequest().withFilters(new Filter("vpc-id", singletonList(awsNetworkView.getExistingVpc())));
List<Subnet> awsSubnets = ec2Client.describeSubnets(request).getSubnets();
List<String> subnetCidrs = awsSubnets.stream().map(Subnet::getCidrBlock).collect(Collectors.toList());
LOGGER.debug("The selected VPCs: {}, has the following subnets: {}", vpc.getVpcId(), String.join(",", subnetCidrs));
return calculateSubnet(ac.getCloudContext().getName(), vpc, subnetCidrs);
}
use of com.amazonaws.services.s3.model.Filter in project incubator-gobblin by apache.
the class AWSSdkClient method getInstancesForGroup.
/**
* Get list of EC2 {@link Instance}s for a auto scaling group
*
* @param groupName Auto scaling group name
* @param status Instance status (eg. running)
* @return List of EC2 instances found for the input auto scaling group
*/
public List<Instance> getInstancesForGroup(String groupName, String status) {
final AmazonEC2 amazonEC2 = getEc2Client();
final DescribeInstancesResult instancesResult = amazonEC2.describeInstances(new DescribeInstancesRequest().withFilters(new Filter().withName("tag:aws:autoscaling:groupName").withValues(groupName)));
final List<Instance> instances = new ArrayList<>();
for (Reservation reservation : instancesResult.getReservations()) {
for (Instance instance : reservation.getInstances()) {
if (null == status || null == instance.getState() || status.equals(instance.getState().getName())) {
instances.add(instance);
LOGGER.info("Found instance: " + instance + " which qualified filter: " + status);
} else {
LOGGER.info("Found instance: " + instance + " but did not qualify for filter: " + status);
}
}
}
return instances;
}
use of com.amazonaws.services.s3.model.Filter in project kafka-connect-storage-cloud by confluentinc.
the class S3SinkConnectorConfig method newConfigDef.
public static ConfigDef newConfigDef() {
ConfigDef configDef = StorageSinkConnectorConfig.newConfigDef(FORMAT_CLASS_RECOMMENDER, AVRO_COMPRESSION_RECOMMENDER);
final String connectorGroup = "Connector";
final int latestOrderInGroup = configDef.configKeys().values().stream().filter(c -> connectorGroup.equalsIgnoreCase(c.group)).map(c -> c.orderInGroup).max(Integer::compare).orElse(0);
StorageSinkConnectorConfig.enableParquetConfig(configDef, PARQUET_COMPRESSION_RECOMMENDER, connectorGroup, latestOrderInGroup);
{
final String group = "S3";
int orderInGroup = 0;
configDef.define(S3_BUCKET_CONFIG, Type.STRING, Importance.HIGH, "The S3 Bucket.", group, ++orderInGroup, Width.LONG, "S3 Bucket");
configDef.define(S3_OBJECT_TAGGING_CONFIG, Type.BOOLEAN, S3_OBJECT_TAGGING_DEFAULT, Importance.LOW, "Tag S3 objects with start and end offsets, as well as record count.", group, ++orderInGroup, Width.LONG, "S3 Object Tagging");
configDef.define(REGION_CONFIG, Type.STRING, REGION_DEFAULT, new RegionValidator(), Importance.MEDIUM, "The AWS region to be used the connector.", group, ++orderInGroup, Width.LONG, "AWS region", new RegionRecommender());
configDef.define(PART_SIZE_CONFIG, Type.INT, PART_SIZE_DEFAULT, new PartRange(), Importance.HIGH, "The Part Size in S3 Multi-part Uploads.", group, ++orderInGroup, Width.LONG, "S3 Part Size");
configDef.define(CREDENTIALS_PROVIDER_CLASS_CONFIG, Type.CLASS, CREDENTIALS_PROVIDER_CLASS_DEFAULT, new CredentialsProviderValidator(), Importance.LOW, "Credentials provider or provider chain to use for authentication to AWS. By default " + "the connector uses ``" + DefaultAWSCredentialsProviderChain.class.getSimpleName() + "``.", group, ++orderInGroup, Width.LONG, "AWS Credentials Provider Class");
configDef.define(AWS_ACCESS_KEY_ID_CONFIG, Type.STRING, AWS_ACCESS_KEY_ID_DEFAULT, Importance.HIGH, "The AWS access key ID used to authenticate personal AWS credentials such as IAM " + "credentials. Use only if you do not wish to authenticate by using a credentials " + "provider class via ``" + CREDENTIALS_PROVIDER_CLASS_CONFIG + "``", group, ++orderInGroup, Width.LONG, "AWS Access Key ID");
configDef.define(AWS_SECRET_ACCESS_KEY_CONFIG, Type.PASSWORD, AWS_SECRET_ACCESS_KEY_DEFAULT, Importance.HIGH, "The secret access key used to authenticate personal AWS credentials such as IAM " + "credentials. Use only if you do not wish to authenticate by using a credentials " + "provider class via ``" + CREDENTIALS_PROVIDER_CLASS_CONFIG + "``", group, ++orderInGroup, Width.LONG, "AWS Secret Access Key");
List<String> validSsea = new ArrayList<>(SSEAlgorithm.values().length + 1);
validSsea.add("");
for (SSEAlgorithm algo : SSEAlgorithm.values()) {
validSsea.add(algo.toString());
}
configDef.define(SSEA_CONFIG, Type.STRING, SSEA_DEFAULT, ConfigDef.ValidString.in(validSsea.toArray(new String[validSsea.size()])), Importance.LOW, "The S3 Server Side Encryption Algorithm.", group, ++orderInGroup, Width.LONG, "S3 Server Side Encryption Algorithm", new SseAlgorithmRecommender());
configDef.define(SSE_CUSTOMER_KEY, Type.PASSWORD, SSE_CUSTOMER_KEY_DEFAULT, Importance.LOW, "The S3 Server Side Encryption Customer-Provided Key (SSE-C).", group, ++orderInGroup, Width.LONG, "S3 Server Side Encryption Customer-Provided Key (SSE-C)");
configDef.define(SSE_KMS_KEY_ID_CONFIG, Type.STRING, SSE_KMS_KEY_ID_DEFAULT, Importance.LOW, "The name of the AWS Key Management Service (AWS-KMS) key to be used for server side " + "encryption of the S3 objects. No encryption is used when no key is provided, but" + " it is enabled when ``" + SSEAlgorithm.KMS + "`` is specified as encryption " + "algorithm with a valid key name.", group, ++orderInGroup, Width.LONG, "S3 Server Side Encryption Key", new SseKmsKeyIdRecommender());
configDef.define(ACL_CANNED_CONFIG, Type.STRING, ACL_CANNED_DEFAULT, new CannedAclValidator(), Importance.LOW, "An S3 canned ACL header value to apply when writing objects.", group, ++orderInGroup, Width.LONG, "S3 Canned ACL");
configDef.define(WAN_MODE_CONFIG, Type.BOOLEAN, WAN_MODE_DEFAULT, Importance.MEDIUM, "Use S3 accelerated endpoint.", group, ++orderInGroup, Width.LONG, "S3 accelerated endpoint enabled");
configDef.define(COMPRESSION_TYPE_CONFIG, Type.STRING, COMPRESSION_TYPE_DEFAULT, new CompressionTypeValidator(), Importance.LOW, "Compression type for files written to S3. " + "Applied when using JsonFormat or ByteArrayFormat. " + "Available values: none, gzip.", group, ++orderInGroup, Width.LONG, "Compression type");
configDef.define(COMPRESSION_LEVEL_CONFIG, Type.INT, COMPRESSION_LEVEL_DEFAULT, COMPRESSION_LEVEL_VALIDATOR, Importance.LOW, "Compression level for files written to S3. " + "Applied when using JsonFormat or ByteArrayFormat. ", group, ++orderInGroup, Width.LONG, "Compression Level", COMPRESSION_LEVEL_VALIDATOR);
configDef.define(S3_PART_RETRIES_CONFIG, Type.INT, S3_PART_RETRIES_DEFAULT, atLeast(0), Importance.MEDIUM, "Maximum number of retry attempts for failed requests. Zero means no retries. " + "The actual number of attempts is determined by the S3 client based on multiple " + "factors including, but not limited to: the value of this parameter, type of " + "exception occurred, and throttling settings of the underlying S3 client.", group, ++orderInGroup, Width.LONG, "S3 Part Upload Retries");
configDef.define(S3_RETRY_BACKOFF_CONFIG, Type.LONG, S3_RETRY_BACKOFF_DEFAULT, atLeast(0L), Importance.LOW, "How long to wait in milliseconds before attempting the first retry " + "of a failed S3 request. Upon a failure, this connector may wait up to twice as " + "long as the previous wait, up to the maximum number of retries. " + "This avoids retrying in a tight loop under failure scenarios.", group, ++orderInGroup, Width.SHORT, "Retry Backoff (ms)");
configDef.define(FORMAT_BYTEARRAY_EXTENSION_CONFIG, Type.STRING, FORMAT_BYTEARRAY_EXTENSION_DEFAULT, Importance.LOW, String.format("Output file extension for ByteArrayFormat. Defaults to ``%s``.", FORMAT_BYTEARRAY_EXTENSION_DEFAULT), group, ++orderInGroup, Width.LONG, "Output file extension for ByteArrayFormat");
configDef.define(FORMAT_BYTEARRAY_LINE_SEPARATOR_CONFIG, Type.STRING, // the default is applied in getFormatByteArrayLineSeparator().
null, Importance.LOW, "String inserted between records for ByteArrayFormat. " + "Defaults to ``System.lineSeparator()`` " + "and may contain escape sequences like ``\\n``. " + "An input record that contains the line separator will look like " + "multiple records in the output S3 object.", group, ++orderInGroup, Width.LONG, "Line separator ByteArrayFormat");
configDef.define(S3_PROXY_URL_CONFIG, Type.STRING, S3_PROXY_URL_DEFAULT, Importance.LOW, "S3 Proxy settings encoded in URL syntax. This property is meant to be used only if you" + " need to access S3 through a proxy.", group, ++orderInGroup, Width.LONG, "S3 Proxy Settings");
configDef.define(S3_PROXY_USER_CONFIG, Type.STRING, S3_PROXY_USER_DEFAULT, Importance.LOW, "S3 Proxy User. This property is meant to be used only if you" + " need to access S3 through a proxy. Using ``" + S3_PROXY_USER_CONFIG + "`` instead of embedding the username and password in ``" + S3_PROXY_URL_CONFIG + "`` allows the password to be hidden in the logs.", group, ++orderInGroup, Width.LONG, "S3 Proxy User");
configDef.define(S3_PROXY_PASS_CONFIG, Type.PASSWORD, S3_PROXY_PASS_DEFAULT, Importance.LOW, "S3 Proxy Password. This property is meant to be used only if you" + " need to access S3 through a proxy. Using ``" + S3_PROXY_PASS_CONFIG + "`` instead of embedding the username and password in ``" + S3_PROXY_URL_CONFIG + "`` allows the password to be hidden in the logs.", group, ++orderInGroup, Width.LONG, "S3 Proxy Password");
configDef.define(HEADERS_USE_EXPECT_CONTINUE_CONFIG, Type.BOOLEAN, HEADERS_USE_EXPECT_CONTINUE_DEFAULT, Importance.LOW, "Enable or disable use of the HTTP/1.1 handshake using EXPECT: 100-CONTINUE during " + "multi-part upload. If true, the client will wait for a 100 (CONTINUE) response " + "before sending the request body. Else, the client uploads the entire request " + "body without checking if the server is willing to accept the request.", group, ++orderInGroup, Width.SHORT, "S3 HTTP Send Uses Expect Continue");
configDef.define(BEHAVIOR_ON_NULL_VALUES_CONFIG, Type.STRING, BEHAVIOR_ON_NULL_VALUES_DEFAULT, BehaviorOnNullValues.VALIDATOR, Importance.LOW, "How to handle records with a null value (i.e. Kafka tombstone records)." + " Valid options are 'ignore' and 'fail'.", group, ++orderInGroup, Width.SHORT, "Behavior for null-valued records");
}
{
final String group = "Keys and Headers";
int orderInGroup = 0;
configDef.define(STORE_KAFKA_KEYS_CONFIG, Type.BOOLEAN, false, Importance.LOW, "Enable or disable writing keys to storage.", group, ++orderInGroup, Width.SHORT, "Store kafka keys", Collections.singletonList(KEYS_FORMAT_CLASS_CONFIG));
configDef.define(STORE_KAFKA_HEADERS_CONFIG, Type.BOOLEAN, false, Importance.LOW, "Enable or disable writing headers to storage.", group, ++orderInGroup, Width.SHORT, "Store kafka headers", Collections.singletonList(HEADERS_FORMAT_CLASS_CONFIG));
configDef.define(KEYS_FORMAT_CLASS_CONFIG, Type.CLASS, KEYS_FORMAT_CLASS_DEFAULT, Importance.LOW, "The format class to use when writing keys to the store.", group, ++orderInGroup, Width.NONE, "Keys format class", KEYS_FORMAT_CLASS_RECOMMENDER);
configDef.define(HEADERS_FORMAT_CLASS_CONFIG, Type.CLASS, HEADERS_FORMAT_CLASS_DEFAULT, Importance.LOW, "The format class to use when writing headers to the store.", group, ++orderInGroup, Width.NONE, "Headers format class", HEADERS_FORMAT_CLASS_RECOMMENDER);
configDef.define(S3_PATH_STYLE_ACCESS_ENABLED_CONFIG, Type.BOOLEAN, S3_PATH_STYLE_ACCESS_ENABLED_DEFAULT, Importance.LOW, "Specifies whether or not to enable path style access to the bucket used by the " + "connector", group, ++orderInGroup, Width.SHORT, "Enable Path Style Access to S3");
}
return configDef;
}
use of com.amazonaws.services.s3.model.Filter in project opencast by opencast.
the class AwsS3DistributionServiceImpl method distributeHLSElements.
/**
* Distribute static items, create a temp directory for playlists, modify them to fix references, then publish the new
* list and then delete the temp files. This is used if there are any HLS playlists in the mediapackage, all the
* videos in the publication should be HLS or progressive, but not both. However, If this is called with non HLS
* files, it will distribute them anyway.
*
* @param channelId
* - distribution channel
* @param mediapackage
* - that holds all the files
* @param elements
* - all the elements for publication
* @param checkAvailability
* - check before pub
* @return distributed elements
* @throws DistributionException
* @throws IOException
*/
private MediaPackageElement[] distributeHLSElements(String channelId, MediaPackage mediapackage, Set<MediaPackageElement> elements, boolean checkAvailability) throws DistributionException {
List<MediaPackageElement> distributedElements = new ArrayList<MediaPackageElement>();
List<MediaPackageElement> nontrackElements = elements.stream().filter(e -> e.getElementType() != MediaPackageElement.Type.Track).collect(Collectors.toList());
// Distribute non track items
for (MediaPackageElement element : nontrackElements) {
MediaPackageElement distributedElement = distributeElement(channelId, mediapackage, element, checkAvailability);
distributedElements.add(distributedElement);
}
// Then get all tracks from mediapackage and sort them by flavor
// Each flavor is one video with multiple renditions
List<Track> trackElements = elements.stream().filter(e -> e.getElementType() == MediaPackageElement.Type.Track).map(e -> (Track) e).collect(Collectors.toList());
HashMap<MediaPackageElementFlavor, List<Track>> trackElementsMap = new HashMap<MediaPackageElementFlavor, List<Track>>();
for (Track t : trackElements) {
List<Track> l = trackElementsMap.get(t.getFlavor());
if (l == null) {
l = new ArrayList<Track>();
}
l.add(t);
trackElementsMap.put(t.getFlavor(), l);
}
Path tmpDir = null;
try {
tmpDir = Files.createTempDirectory(tmpPath, mediapackage.getIdentifier().toString());
// Run distribution one flavor at a time
for (Entry<MediaPackageElementFlavor, List<Track>> elementSet : trackElementsMap.entrySet()) {
List<Track> tracks = elementSet.getValue();
try {
List<Track> transformedTracks = new ArrayList<Track>();
// If there are playlists in this flavor
if (tracks.stream().anyMatch(AdaptivePlaylist.isHLSTrackPred)) {
// For each adaptive playlist, get all the HLS files from the track URI
// and put them into a temporary directory
List<Track> tmpTracks = new ArrayList<Track>();
for (Track t : tracks) {
Track tcopy = (Track) t.clone();
String newName = "./" + t.getURI().getPath();
Path newPath = tmpDir.resolve(newName).normalize();
Files.createDirectories(newPath.getParent());
// If this flavor is a HLS playlist and therefore has internal references
if (AdaptivePlaylist.isPlaylist(t)) {
// Get actual file
File f = workspace.get(t.getURI());
Path plcopy = Files.copy(f.toPath(), newPath);
// make it into an URI from filesystem
tcopy.setURI(plcopy.toUri());
} else {
// new Empty File, only care about the URI
Path plcopy = Files.createFile(newPath);
tcopy.setURI(plcopy.toUri());
}
tmpTracks.add(tcopy);
}
// The playlists' references are then replaced with relative links
// replace with fixed elements
tmpTracks = AdaptivePlaylist.fixReferences(tmpTracks, tmpDir.toFile());
// after fixing it, we retrieve the new playlist files and discard the old
// we collect the mp4 tracks and the playlists and put them into transformedTracks
tracks.stream().filter(AdaptivePlaylist.isHLSTrackPred.negate()).forEach(t -> transformedTracks.add(t));
tmpTracks.stream().filter(AdaptivePlaylist.isHLSTrackPred).forEach(t -> transformedTracks.add(t));
} else {
// not playlists, distribute anyway
transformedTracks.addAll(tracks);
}
for (Track track : transformedTracks) {
MediaPackageElement distributedElement;
if (AdaptivePlaylist.isPlaylist(track)) {
distributedElement = distributeElement(channelId, mediapackage, track, checkAvailability, new File(track.getURI()));
} else {
distributedElement = distributeElement(channelId, mediapackage, track, checkAvailability);
}
distributedElements.add(distributedElement);
}
} catch (MediaPackageException | NotFoundException | IOException e1) {
logger.error("HLS Prepare failed for mediapackage {} in {}: {} ", elementSet.getKey(), mediapackage, e1);
throw new DistributionException("Cannot distribute " + mediapackage);
} catch (URISyntaxException e1) {
logger.error("HLS Prepare failed - Bad URI syntax {} in {}: {} ", elementSet.getKey(), mediapackage, e1);
throw new DistributionException("Cannot distribute - BAD URI syntax " + mediapackage);
}
}
} catch (IOException e2) {
throw new DistributionException("Cannot create tmp dir to process HLS:" + mediapackage + e2.getMessage());
} finally {
// Clean up temp dir
try (Stream<Path> walk = Files.walk(tmpDir)) {
walk.sorted(Comparator.reverseOrder()).map(Path::toFile).forEach(File::delete);
} catch (IOException e) {
logger.warn("Cannot delete tmp dir for processing HLS mp {}, path {}", mediapackage, tmpPath, e);
}
}
return distributedElements.toArray(new MediaPackageElement[distributedElements.size()]);
}
Aggregations