use of org.apache.flink.core.fs.FileStatus in project flink by apache.
the class PartitionPathUtils method getFileStatusRecurse.
private static FileStatus[] getFileStatusRecurse(Path path, int expectLevel, FileSystem fs) {
ArrayList<FileStatus> result = new ArrayList<>();
try {
FileStatus fileStatus = fs.getFileStatus(path);
listStatusRecursively(fs, fileStatus, 0, expectLevel, result);
} catch (IOException ignore) {
return new FileStatus[0];
}
return result.toArray(new FileStatus[0]);
}
use of org.apache.flink.core.fs.FileStatus in project flink by apache.
the class FileCacheDirectoriesTest method testDirectoryDownloaded.
private void testDirectoryDownloaded(DistributedCache.DistributedCacheEntry entry) throws Exception {
JobID jobID = new JobID();
ExecutionAttemptID attemptID = new ExecutionAttemptID();
// copy / create the file
final String fileName = "test_file";
Future<Path> copyResult = fileCache.createTmpFile(fileName, entry, jobID, attemptID);
final Path dstPath = copyResult.get();
final FileSystem fs = dstPath.getFileSystem();
final FileStatus fileStatus = fs.getFileStatus(dstPath);
assertTrue(fileStatus.isDir());
final Path cacheFile = new Path(dstPath, "cacheFile");
assertTrue(fs.exists(cacheFile));
final String actualContent = FileUtils.readFileUtf8(new File(cacheFile.getPath()));
assertEquals(testFileContent, actualContent);
}
use of org.apache.flink.core.fs.FileStatus in project flink by apache.
the class FileMonitoringFunction method listNewFiles.
private List<String> listNewFiles(FileSystem fileSystem) throws IOException {
List<String> files = new ArrayList<String>();
FileStatus[] statuses = fileSystem.listStatus(new Path(path));
if (statuses == null) {
LOG.warn("Path does not exist: {}", path);
} else {
for (FileStatus status : statuses) {
Path filePath = status.getPath();
String fileName = filePath.getName();
long modificationTime = status.getModificationTime();
if (!isFiltered(fileName, modificationTime)) {
files.add(filePath.toString());
modificationTimes.put(fileName, modificationTime);
}
}
}
return files;
}
use of org.apache.flink.core.fs.FileStatus in project flink by apache.
the class ContinuousFileMonitoringFunction method getInputSplitsSortedByModTime.
/**
* Creates the input splits to be forwarded to the downstream tasks of the {@link
* ContinuousFileReaderOperator}. Splits are sorted <b>by modification time</b> before being
* forwarded and only splits belonging to files in the {@code eligibleFiles} list will be
* processed.
*
* @param eligibleFiles The files to process.
*/
private Map<Long, List<TimestampedFileInputSplit>> getInputSplitsSortedByModTime(Map<Path, FileStatus> eligibleFiles) throws IOException {
Map<Long, List<TimestampedFileInputSplit>> splitsByModTime = new TreeMap<>();
if (eligibleFiles.isEmpty()) {
return splitsByModTime;
}
for (FileInputSplit split : format.createInputSplits(readerParallelism)) {
FileStatus fileStatus = eligibleFiles.get(split.getPath());
if (fileStatus != null) {
Long modTime = fileStatus.getModificationTime();
List<TimestampedFileInputSplit> splitsToForward = splitsByModTime.get(modTime);
if (splitsToForward == null) {
splitsToForward = new ArrayList<>();
splitsByModTime.put(modTime, splitsToForward);
}
splitsToForward.add(new TimestampedFileInputSplit(modTime, split.getSplitNumber(), split.getPath(), split.getStart(), split.getLength(), split.getHostnames()));
}
}
return splitsByModTime;
}
use of org.apache.flink.core.fs.FileStatus in project flink by apache.
the class MapRFileSystem method listStatus.
@Override
public FileStatus[] listStatus(final Path f) throws IOException {
final org.apache.hadoop.fs.FileStatus[] hadoopFiles = this.fs.listStatus(new org.apache.hadoop.fs.Path(f.toString()));
final FileStatus[] files = new FileStatus[hadoopFiles.length];
// Convert types
for (int i = 0; i < files.length; i++) {
files[i] = new HadoopFileStatus(hadoopFiles[i]);
}
return files;
}
Aggregations