use of java.nio.file.Files in project flink by apache.
the class UnalignedCheckpointStressITCase method discoverRetainedCheckpoint.
private File discoverRetainedCheckpoint() throws Exception {
// structure: root/attempt/checkpoint/_metadata
File rootDir = temporaryFolder.getRoot();
Path checkpointDir = null;
for (int i = 0; i <= 1000 && checkpointDir == null; i++) {
Thread.sleep(5);
try (Stream<Path> files = Files.walk(Paths.get(rootDir.getPath()))) {
checkpointDir = files.filter(Files::isRegularFile).filter(path -> path.endsWith("_metadata")).map(path -> path.getParent()).sorted(Comparator.comparingInt(UnalignedCheckpointStressITCase::getCheckpointNumberFromPath)).reduce((first, second) -> second).orElse(null);
}
}
if (checkpointDir == null) {
List<Path> files = Files.walk(Paths.get(rootDir.getPath())).collect(Collectors.toList());
throw new IllegalStateException("Failed to find _metadata file among " + files);
}
return checkpointDir.toFile();
}
use of java.nio.file.Files in project flink by apache.
the class JarFileChecker method findNonBinaryFilesContainingText.
private static int findNonBinaryFilesContainingText(Path jar, Path jarRoot, Collection<Pattern> forbidden) throws IOException {
try (Stream<Path> files = Files.walk(jarRoot)) {
return files.filter(path -> !path.equals(jarRoot)).filter(path -> !Files.isDirectory(path)).filter(JarFileChecker::isNoClassFile).filter(path -> !getFileName(path).equals("dependencies")).filter(path -> !getFileName(path).startsWith("license")).filter(path -> !getFileName(path).startsWith("notice")).filter(path -> !pathStartsWith(path, "/META-INF/versions/11/javax/xml/bind")).filter(path -> !isJavaxManifest(jar, path)).filter(path -> !pathStartsWith(path, "/org/glassfish/jersey/internal")).filter(path -> !pathStartsWith(path, "/org/apache/pulsar/shade/org/glassfish/jersey/")).map(path -> {
try {
final String fileContents;
try {
fileContents = readFile(path).toLowerCase(Locale.ROOT);
} catch (MalformedInputException mie) {
// binary file
return 0;
}
int violations = 0;
for (Pattern text : forbidden) {
if (text.matcher(fileContents).find()) {
// do not count individual violations because it can be
// confusing when checking with aliases for the same
// license
violations = 1;
LOG.error("File '{}' in jar '{}' contains match with forbidden regex '{}'.", path, jar, text);
}
}
return violations;
} catch (IOException e) {
throw new RuntimeException(String.format("Could not read contents of file '%s' in jar '%s'.", path, jar), e);
}
}).reduce(Integer::sum).orElse(0);
}
}
use of java.nio.file.Files in project flink by apache.
the class TestBaseUtils method getAllInvolvedFiles.
private static File[] getAllInvolvedFiles(String resultPath, final String[] excludePrefixes) {
final File result = asFile(resultPath);
assertTrue("Result file was not written", result.exists());
if (result.isDirectory()) {
try {
return Files.walk(result.toPath()).filter(Files::isRegularFile).filter(path -> {
for (String prefix : excludePrefixes) {
if (path.getFileName().startsWith(prefix)) {
return false;
}
}
return true;
}).map(Path::toFile).filter(file -> !file.isHidden()).toArray(File[]::new);
} catch (IOException e) {
throw new RuntimeException("Failed to retrieve result files");
}
} else {
return new File[] { result };
}
}
use of java.nio.file.Files in project flink by apache.
the class SavepointITCase method testTriggerSavepointAndResumeWithNoClaim.
@Test
@Ignore("Disabling this test because it regularly fails on AZP. See FLINK-25427.")
public void testTriggerSavepointAndResumeWithNoClaim() throws Exception {
final int numTaskManagers = 2;
final int numSlotsPerTaskManager = 2;
final int parallelism = numTaskManagers * numSlotsPerTaskManager;
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setStateBackend(new EmbeddedRocksDBStateBackend(true));
env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
env.getCheckpointConfig().setCheckpointStorage(folder.newFolder().toURI());
env.setParallelism(parallelism);
final SharedReference<CountDownLatch> counter = sharedObjects.add(new CountDownLatch(10_000));
env.fromSequence(1, Long.MAX_VALUE).keyBy(i -> i % parallelism).process(new KeyedProcessFunction<Long, Long, Long>() {
private ListState<Long> last;
@Override
public void open(Configuration parameters) {
// we use list state here to create sst files of a significant size
// if sst files do not reach certain thresholds they are not stored
// in files, but as a byte stream in checkpoints metadata
last = getRuntimeContext().getListState(new ListStateDescriptor<>("last", BasicTypeInfo.LONG_TYPE_INFO));
}
@Override
public void processElement(Long value, KeyedProcessFunction<Long, Long, Long>.Context ctx, Collector<Long> out) throws Exception {
last.add(value);
out.collect(value);
}
}).addSink(new SinkFunction<Long>() {
@Override
public void invoke(Long value) {
counter.consumeSync(CountDownLatch::countDown);
}
}).setParallelism(1);
final JobGraph jobGraph = env.getStreamGraph().getJobGraph();
MiniClusterWithClientResource cluster = new MiniClusterWithClientResource(new MiniClusterResourceConfiguration.Builder().setNumberTaskManagers(numTaskManagers).setNumberSlotsPerTaskManager(numSlotsPerTaskManager).build());
cluster.before();
try {
final JobID jobID1 = new JobID();
jobGraph.setJobID(jobID1);
cluster.getClusterClient().submitJob(jobGraph).get();
CommonTestUtils.waitForAllTaskRunning(cluster.getMiniCluster(), jobID1, false);
// wait for some records to be processed before taking the checkpoint
counter.get().await();
final String firstCheckpoint = cluster.getMiniCluster().triggerCheckpoint(jobID1).get();
cluster.getClusterClient().cancel(jobID1).get();
jobGraph.setSavepointRestoreSettings(SavepointRestoreSettings.forPath(firstCheckpoint, false, RestoreMode.NO_CLAIM));
final JobID jobID2 = new JobID();
jobGraph.setJobID(jobID2);
cluster.getClusterClient().submitJob(jobGraph).get();
CommonTestUtils.waitForAllTaskRunning(cluster.getMiniCluster(), jobID2, false);
String secondCheckpoint = cluster.getMiniCluster().triggerCheckpoint(jobID2).get();
cluster.getClusterClient().cancel(jobID2).get();
// delete the checkpoint we restored from
FileUtils.deleteDirectory(Paths.get(new URI(firstCheckpoint)).getParent().toFile());
// we should be able to restore from the second checkpoint even though it has been built
// on top of the first checkpoint
jobGraph.setSavepointRestoreSettings(SavepointRestoreSettings.forPath(secondCheckpoint, false, RestoreMode.NO_CLAIM));
final JobID jobID3 = new JobID();
jobGraph.setJobID(jobID3);
cluster.getClusterClient().submitJob(jobGraph).get();
CommonTestUtils.waitForAllTaskRunning(cluster.getMiniCluster(), jobID3, false);
} finally {
cluster.after();
}
}
use of java.nio.file.Files in project kafka by apache.
the class TestPlugins method writeJar.
private static void writeJar(JarOutputStream jar, Path inputDir) throws IOException {
List<Path> paths = Files.walk(inputDir).filter(Files::isRegularFile).filter(path -> !path.toFile().getName().endsWith(".java")).collect(Collectors.toList());
for (Path path : paths) {
try (InputStream in = new BufferedInputStream(new FileInputStream(path.toFile()))) {
jar.putNextEntry(new JarEntry(inputDir.relativize(path).toFile().getPath().replace(File.separator, "/")));
byte[] buffer = new byte[1024];
for (int count; (count = in.read(buffer)) != -1; ) {
jar.write(buffer, 0, count);
}
jar.closeEntry();
}
}
}
Aggregations