use of org.apache.flink.core.fs.FileSystem in project flink by apache.
the class HadoopFsFactoryTest method testCreateHadoopFsWithoutConfig.
@Test
public void testCreateHadoopFsWithoutConfig() throws Exception {
final URI uri = URI.create("hdfs://localhost:12345/");
HadoopFsFactory factory = new HadoopFsFactory();
FileSystem fs = factory.create(uri);
assertEquals(uri.getScheme(), fs.getUri().getScheme());
assertEquals(uri.getAuthority(), fs.getUri().getAuthority());
assertEquals(uri.getPort(), fs.getUri().getPort());
}
use of org.apache.flink.core.fs.FileSystem in project flink by apache.
the class PrestoS3FileSystemTest method testConfigPropagationAlternateStyle.
@Test
public void testConfigPropagationAlternateStyle() throws Exception {
final Configuration conf = new Configuration();
conf.setString("s3.access.key", "test_access_key_id");
conf.setString("s3.secret.key", "test_secret_access_key");
FileSystem.initialize(conf);
FileSystem fs = FileSystem.get(new URI("s3://test"));
validateBasicCredentials(fs);
}
use of org.apache.flink.core.fs.FileSystem in project flink by apache.
the class PrestoS3FileSystemTest method testConfigPropagationWithPrestoPrefix.
@Test
public void testConfigPropagationWithPrestoPrefix() throws Exception {
final Configuration conf = new Configuration();
conf.setString("presto.s3.access-key", "test_access_key_id");
conf.setString("presto.s3.secret-key", "test_secret_access_key");
FileSystem.initialize(conf);
FileSystem fs = FileSystem.get(new URI("s3://test"));
validateBasicCredentials(fs);
}
use of org.apache.flink.core.fs.FileSystem in project flink by apache.
the class CompactOperatorTest method testCompactOperator.
@Test
public void testCompactOperator() throws Exception {
AtomicReference<OperatorSubtaskState> state = new AtomicReference<>();
Path f0 = newFile(".uncompacted-f0", 3);
Path f1 = newFile(".uncompacted-f1", 2);
Path f2 = newFile(".uncompacted-f2", 2);
Path f3 = newFile(".uncompacted-f3", 5);
Path f4 = newFile(".uncompacted-f4", 1);
Path f5 = newFile(".uncompacted-f5", 5);
Path f6 = newFile(".uncompacted-f6", 4);
FileSystem fs = f0.getFileSystem();
runCompact(harness -> {
harness.setup();
harness.open();
harness.processElement(new CompactionUnit(0, "p0", Arrays.asList(f0, f1, f4)), 0);
harness.processElement(new CompactionUnit(1, "p0", Collections.singletonList(f3)), 0);
harness.processElement(new CompactionUnit(2, "p1", Arrays.asList(f2, f5)), 0);
harness.processElement(new CompactionUnit(3, "p0", Collections.singletonList(f6)), 0);
harness.processElement(new EndCompaction(1), 0);
state.set(harness.snapshot(2, 0));
// check output commit info
List<PartitionCommitInfo> outputs = harness.extractOutputValues();
Assert.assertEquals(1, outputs.size());
Assert.assertEquals(1, outputs.get(0).getCheckpointId());
Assert.assertEquals(Arrays.asList("p0", "p1"), outputs.get(0).getPartitions());
// check all compacted file generated
Assert.assertTrue(fs.exists(new Path(folder, "compacted-f0")));
Assert.assertTrue(fs.exists(new Path(folder, "compacted-f2")));
Assert.assertTrue(fs.exists(new Path(folder, "compacted-f3")));
Assert.assertTrue(fs.exists(new Path(folder, "compacted-f6")));
// check one compacted file
byte[] bytes = FileUtils.readAllBytes(new File(folder.getPath(), "compacted-f0").toPath());
Arrays.sort(bytes);
Assert.assertArrayEquals(new byte[] { 0, 0, 0, 1, 1, 2 }, bytes);
});
runCompact(harness -> {
harness.setup();
harness.initializeState(state.get());
harness.open();
harness.notifyOfCompletedCheckpoint(2);
// check all temp files have been deleted
Assert.assertFalse(fs.exists(f0));
Assert.assertFalse(fs.exists(f1));
Assert.assertFalse(fs.exists(f2));
Assert.assertFalse(fs.exists(f3));
Assert.assertFalse(fs.exists(f4));
Assert.assertFalse(fs.exists(f5));
Assert.assertFalse(fs.exists(f6));
});
}
use of org.apache.flink.core.fs.FileSystem in project flink by apache.
the class CompactOperatorTest method testUnitSelection.
@Test
public void testUnitSelection() throws Exception {
OneInputStreamOperatorTestHarness<CoordinatorOutput, PartitionCommitInfo> harness0 = create(2, 0);
harness0.setup();
harness0.open();
OneInputStreamOperatorTestHarness<CoordinatorOutput, PartitionCommitInfo> harness1 = create(2, 1);
harness1.setup();
harness1.open();
Path f0 = newFile(".uncompacted-f0", 3);
Path f1 = newFile(".uncompacted-f1", 2);
Path f2 = newFile(".uncompacted-f2", 2);
Path f3 = newFile(".uncompacted-f3", 5);
Path f4 = newFile(".uncompacted-f4", 1);
Path f5 = newFile(".uncompacted-f5", 5);
Path f6 = newFile(".uncompacted-f6", 4);
FileSystem fs = f0.getFileSystem();
// broadcast
harness0.processElement(new CompactionUnit(0, "p0", Arrays.asList(f0, f1, f4)), 0);
harness0.processElement(new CompactionUnit(1, "p0", Collections.singletonList(f3)), 0);
harness0.processElement(new CompactionUnit(2, "p0", Arrays.asList(f2, f5)), 0);
harness0.processElement(new CompactionUnit(3, "p0", Collections.singletonList(f6)), 0);
harness0.processElement(new EndCompaction(1), 0);
// check compacted file generated
Assert.assertTrue(fs.exists(new Path(folder, "compacted-f0")));
Assert.assertTrue(fs.exists(new Path(folder, "compacted-f2")));
// f3 and f6 are in the charge of another task
Assert.assertFalse(fs.exists(new Path(folder, "compacted-f3")));
Assert.assertFalse(fs.exists(new Path(folder, "compacted-f6")));
harness1.processElement(new CompactionUnit(0, "p0", Arrays.asList(f0, f1, f4)), 0);
harness1.processElement(new CompactionUnit(1, "p0", Collections.singletonList(f3)), 0);
harness1.processElement(new CompactionUnit(2, "p0", Arrays.asList(f2, f5)), 0);
harness1.processElement(new CompactionUnit(3, "p0", Collections.singletonList(f6)), 0);
harness1.processElement(new EndCompaction(1), 0);
// check compacted file generated
Assert.assertTrue(fs.exists(new Path(folder, "compacted-f3")));
Assert.assertTrue(fs.exists(new Path(folder, "compacted-f6")));
harness0.close();
harness1.close();
}
Aggregations