use of org.apache.flink.configuration.MemorySize in project flink by apache.
the class ProcessMemoryUtilsTestBase method testConfigJvmMetaspaceSize.
@Test
public void testConfigJvmMetaspaceSize() {
MemorySize jvmMetaspaceSize = MemorySize.parse("50m");
Configuration conf = new Configuration();
conf.set(options.getJvmOptions().getJvmMetaspaceOption(), jvmMetaspaceSize);
validateInAllConfigurations(conf, processSpec -> assertThat(processSpec.getJvmMetaspaceSize(), is(jvmMetaspaceSize)));
}
use of org.apache.flink.configuration.MemorySize in project flink by apache.
the class ProcessMemoryUtilsTestBase method testConfigJvmOverheadRangeFailure.
@Test
public void testConfigJvmOverheadRangeFailure() {
MemorySize minSize = MemorySize.parse("200m");
MemorySize maxSize = MemorySize.parse("50m");
Configuration conf = new Configuration();
conf.set(options.getJvmOptions().getJvmOverheadMax(), maxSize);
conf.set(options.getJvmOptions().getJvmOverheadMin(), minSize);
validateFailInAllConfigurations(conf);
}
use of org.apache.flink.configuration.MemorySize in project flink by apache.
the class ProcessMemoryUtilsTestBase method testConfigBothNewOptionAndLegacyHeapSize.
@Test
public void testConfigBothNewOptionAndLegacyHeapSize() {
MemorySize newOptionValue = MemorySize.parse("1g");
MemorySize legacyHeapSize = MemorySize.parse("2g");
Configuration conf = new Configuration();
conf.set(getNewOptionForLegacyHeapOption(), newOptionValue);
conf.set(legacyMemoryOptions.getHeap(), legacyHeapSize);
testConfigLegacyHeapMemory(conf, newOptionValue);
}
use of org.apache.flink.configuration.MemorySize in project flink by apache.
the class LocalStreamingFileSinkTest method testClosingWithCustomizedBucketer.
@Test
public void testClosingWithCustomizedBucketer() throws Exception {
final File outDir = TEMP_FOLDER.newFolder();
final long partMaxSize = 2L;
final long inactivityInterval = 100L;
final RollingPolicy<Tuple2<String, Integer>, Integer> rollingPolicy = DefaultRollingPolicy.builder().withMaxPartSize(new MemorySize(partMaxSize)).withRolloverInterval(Duration.ofMillis(inactivityInterval)).withInactivityInterval(Duration.ofMillis(inactivityInterval)).build();
try (OneInputStreamOperatorTestHarness<Tuple2<String, Integer>, Object> testHarness = TestUtils.createCustomizedRescalingTestSink(outDir, 1, 0, 100L, new TupleToIntegerBucketer(), new Tuple2Encoder(), rollingPolicy, new DefaultBucketFactoryImpl<>())) {
testHarness.setup();
testHarness.open();
testHarness.setProcessingTime(0L);
testHarness.processElement(new StreamRecord<>(Tuple2.of("test1", 1), 1L));
testHarness.processElement(new StreamRecord<>(Tuple2.of("test2", 2), 1L));
TestUtils.checkLocalFs(outDir, 2, 0);
// this is to check the inactivity threshold
testHarness.setProcessingTime(101L);
TestUtils.checkLocalFs(outDir, 2, 0);
testHarness.processElement(new StreamRecord<>(Tuple2.of("test3", 3), 1L));
TestUtils.checkLocalFs(outDir, 3, 0);
testHarness.snapshot(0L, 1L);
TestUtils.checkLocalFs(outDir, 3, 0);
testHarness.notifyOfCompletedCheckpoint(0L);
TestUtils.checkLocalFs(outDir, 0, 3);
testHarness.processElement(new StreamRecord<>(Tuple2.of("test4", 4), 10L));
TestUtils.checkLocalFs(outDir, 1, 3);
testHarness.snapshot(1L, 0L);
testHarness.notifyOfCompletedCheckpoint(1L);
}
// at close all files moved to final.
TestUtils.checkLocalFs(outDir, 0, 4);
// check file content and bucket ID.
Map<File, String> contents = TestUtils.getFileContentByPath(outDir);
for (Map.Entry<File, String> fileContents : contents.entrySet()) {
Integer bucketId = Integer.parseInt(fileContents.getKey().getParentFile().getName());
Assert.assertTrue(bucketId >= 1 && bucketId <= 4);
Assert.assertEquals(String.format("test%d@%d\n", bucketId, bucketId), fileContents.getValue());
}
}
use of org.apache.flink.configuration.MemorySize in project flink by apache.
the class JobManagerProcessUtilsTest method testOffHeapMemoryDerivedFromJvmHeapAndTotalFlinkMemory.
@Test
public void testOffHeapMemoryDerivedFromJvmHeapAndTotalFlinkMemory() {
MemorySize jvmHeap = MemorySize.ofMebiBytes(150);
MemorySize defaultOffHeap = JobManagerOptions.OFF_HEAP_MEMORY.defaultValue();
MemorySize expectedOffHeap = MemorySize.ofMebiBytes(100).add(defaultOffHeap);
MemorySize totalFlinkMemory = jvmHeap.add(expectedOffHeap);
Configuration conf = new Configuration();
conf.set(JobManagerOptions.TOTAL_FLINK_MEMORY, totalFlinkMemory);
conf.set(JobManagerOptions.JVM_HEAP_MEMORY, jvmHeap);
JobManagerProcessSpec jobManagerProcessSpec = JobManagerProcessUtils.processSpecFromConfig(conf);
assertThat(jobManagerProcessSpec.getJvmDirectMemorySize(), is(expectedOffHeap));
MatcherAssert.assertThat(testLoggerResource.getMessages(), hasItem(containsString(String.format("The Off-Heap Memory size (%s) is derived the configured Total Flink Memory size (%s) minus " + "the configured JVM Heap Memory size (%s). The default Off-Heap Memory size (%s) is ignored.", expectedOffHeap.toHumanReadableString(), totalFlinkMemory.toHumanReadableString(), jvmHeap.toHumanReadableString(), defaultOffHeap.toHumanReadableString()))));
}
Aggregations