use of org.apache.flink.metrics.groups.UnregisteredMetricsGroup in project flink by apache.
the class NettyShuffleUtilsTest method createInputGate.
private SingleInputGate createInputGate(NettyShuffleEnvironment network, ResultPartitionType resultPartitionType, int numInputChannels) throws IOException {
ShuffleDescriptor[] shuffleDescriptors = new NettyShuffleDescriptor[numInputChannels];
for (int i = 0; i < numInputChannels; i++) {
shuffleDescriptors[i] = createRemoteWithIdAndLocation(new IntermediateResultPartitionID(), ResourceID.generate());
}
InputGateDeploymentDescriptor inputGateDeploymentDescriptor = new InputGateDeploymentDescriptor(new IntermediateDataSetID(), resultPartitionType, 0, shuffleDescriptors);
ExecutionAttemptID consumerID = new ExecutionAttemptID();
Collection<SingleInputGate> inputGates = network.createInputGates(network.createShuffleIOOwnerContext("", consumerID, new UnregisteredMetricsGroup()), SingleInputGateBuilder.NO_OP_PRODUCER_CHECKER, Collections.singletonList(inputGateDeploymentDescriptor));
return inputGates.iterator().next();
}
use of org.apache.flink.metrics.groups.UnregisteredMetricsGroup in project flink by apache.
the class StateBackendBenchmarkUtils method createRocksDBKeyedStateBackend.
private static RocksDBKeyedStateBackend<Long> createRocksDBKeyedStateBackend(File rootDir) throws IOException {
File recoveryBaseDir = prepareDirectory(recoveryDirName, rootDir);
File dbPathFile = prepareDirectory(dbDirName, rootDir);
ExecutionConfig executionConfig = new ExecutionConfig();
RocksDBResourceContainer resourceContainer = new RocksDBResourceContainer();
RocksDBKeyedStateBackendBuilder<Long> builder = new RocksDBKeyedStateBackendBuilder<>("Test", Thread.currentThread().getContextClassLoader(), dbPathFile, resourceContainer, stateName -> resourceContainer.getColumnOptions(), null, LongSerializer.INSTANCE, 2, new KeyGroupRange(0, 1), executionConfig, new LocalRecoveryConfig(null), EmbeddedRocksDBStateBackend.PriorityQueueStateType.ROCKSDB, TtlTimeProvider.DEFAULT, LatencyTrackingStateConfig.disabled(), new UnregisteredMetricsGroup(), Collections.emptyList(), AbstractStateBackend.getCompressionDecorator(executionConfig), new CloseableRegistry());
try {
return builder.build();
} catch (Exception e) {
IOUtils.closeQuietly(resourceContainer);
throw e;
}
}
use of org.apache.flink.metrics.groups.UnregisteredMetricsGroup in project flink by apache.
the class RocksDBStateBackendConfigTest method testFailWhenNoLocalStorageDir.
// ------------------------------------------------------------------------
// RocksDB local file directory initialization
// ------------------------------------------------------------------------
@Test
public void testFailWhenNoLocalStorageDir() throws Exception {
final File targetDir = tempFolder.newFolder();
Assume.assumeTrue("Cannot mark directory non-writable", targetDir.setWritable(false, false));
String checkpointPath = tempFolder.newFolder().toURI().toString();
RocksDBStateBackend rocksDbBackend = new RocksDBStateBackend(checkpointPath);
try (MockEnvironment env = getMockEnvironment(tempFolder.newFolder())) {
rocksDbBackend.setDbStoragePath(targetDir.getAbsolutePath());
boolean hasFailure = false;
try {
rocksDbBackend.createKeyedStateBackend(env, env.getJobID(), "foobar", IntSerializer.INSTANCE, 1, new KeyGroupRange(0, 0), new KvStateRegistry().createTaskRegistry(env.getJobID(), new JobVertexID()), TtlTimeProvider.DEFAULT, new UnregisteredMetricsGroup(), Collections.emptyList(), new CloseableRegistry());
} catch (Exception e) {
assertTrue(e.getMessage().contains("No local storage directories available"));
assertTrue(e.getMessage().contains(targetDir.getAbsolutePath()));
hasFailure = true;
}
assertTrue("We must see a failure because no storaged directory is feasible.", hasFailure);
} finally {
// noinspection ResultOfMethodCallIgnored
targetDir.setWritable(true, false);
}
}
use of org.apache.flink.metrics.groups.UnregisteredMetricsGroup in project flink by apache.
the class RocksDBStateBackendConfigTest method testContinueOnSomeDbDirectoriesMissing.
@Test
public void testContinueOnSomeDbDirectoriesMissing() throws Exception {
final File targetDir1 = tempFolder.newFolder();
final File targetDir2 = tempFolder.newFolder();
Assume.assumeTrue("Cannot mark directory non-writable", targetDir1.setWritable(false, false));
String checkpointPath = tempFolder.newFolder().toURI().toString();
RocksDBStateBackend rocksDbBackend = new RocksDBStateBackend(checkpointPath);
try (MockEnvironment env = getMockEnvironment(tempFolder.newFolder())) {
rocksDbBackend.setDbStoragePaths(targetDir1.getAbsolutePath(), targetDir2.getAbsolutePath());
try {
AbstractKeyedStateBackend<Integer> keyedStateBackend = rocksDbBackend.createKeyedStateBackend(env, env.getJobID(), "foobar", IntSerializer.INSTANCE, 1, new KeyGroupRange(0, 0), new KvStateRegistry().createTaskRegistry(env.getJobID(), new JobVertexID()), TtlTimeProvider.DEFAULT, new UnregisteredMetricsGroup(), Collections.emptyList(), new CloseableRegistry());
IOUtils.closeQuietly(keyedStateBackend);
keyedStateBackend.dispose();
} catch (Exception e) {
e.printStackTrace();
fail("Backend initialization failed even though some paths were available");
}
} finally {
// noinspection ResultOfMethodCallIgnored
targetDir1.setWritable(true, false);
}
}
use of org.apache.flink.metrics.groups.UnregisteredMetricsGroup in project flink by apache.
the class Kafka09FetcherTest method ensureOffsetsGetCommitted.
@Test
public void ensureOffsetsGetCommitted() throws Exception {
// test data
final KafkaTopicPartition testPartition1 = new KafkaTopicPartition("test", 42);
final KafkaTopicPartition testPartition2 = new KafkaTopicPartition("another", 99);
final Map<KafkaTopicPartition, Long> testCommitData1 = new HashMap<>();
testCommitData1.put(testPartition1, 11L);
testCommitData1.put(testPartition2, 18L);
final Map<KafkaTopicPartition, Long> testCommitData2 = new HashMap<>();
testCommitData2.put(testPartition1, 19L);
testCommitData2.put(testPartition2, 28L);
final BlockingQueue<Map<TopicPartition, OffsetAndMetadata>> commitStore = new LinkedBlockingQueue<>();
// ----- the mock consumer with poll(), wakeup(), and commit(A)sync calls ----
final MultiShotLatch blockerLatch = new MultiShotLatch();
KafkaConsumer<?, ?> mockConsumer = mock(KafkaConsumer.class);
when(mockConsumer.poll(anyLong())).thenAnswer(new Answer<ConsumerRecords<?, ?>>() {
@Override
public ConsumerRecords<?, ?> answer(InvocationOnMock invocation) throws InterruptedException {
blockerLatch.await();
return ConsumerRecords.empty();
}
});
doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) {
blockerLatch.trigger();
return null;
}
}).when(mockConsumer).wakeup();
doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) {
@SuppressWarnings("unchecked") Map<TopicPartition, OffsetAndMetadata> offsets = (Map<TopicPartition, OffsetAndMetadata>) invocation.getArguments()[0];
OffsetCommitCallback callback = (OffsetCommitCallback) invocation.getArguments()[1];
commitStore.add(offsets);
callback.onComplete(offsets, null);
return null;
}
}).when(mockConsumer).commitAsync(Mockito.<Map<TopicPartition, OffsetAndMetadata>>any(), any(OffsetCommitCallback.class));
// make sure the fetcher creates the mock consumer
whenNew(KafkaConsumer.class).withAnyArguments().thenReturn(mockConsumer);
// ----- create the test fetcher -----
@SuppressWarnings("unchecked") SourceContext<String> sourceContext = mock(SourceContext.class);
Map<KafkaTopicPartition, Long> partitionsWithInitialOffsets = Collections.singletonMap(new KafkaTopicPartition("test", 42), KafkaTopicPartitionStateSentinel.GROUP_OFFSET);
KeyedDeserializationSchema<String> schema = new KeyedDeserializationSchemaWrapper<>(new SimpleStringSchema());
final Kafka09Fetcher<String> fetcher = new Kafka09Fetcher<>(sourceContext, partitionsWithInitialOffsets, null, /* periodic watermark extractor */
null, /* punctuated watermark extractor */
new TestProcessingTimeService(), 10, /* watermark interval */
this.getClass().getClassLoader(), "task_name", new UnregisteredMetricsGroup(), schema, new Properties(), 0L, false);
// ----- run the fetcher -----
final AtomicReference<Throwable> error = new AtomicReference<>();
final Thread fetcherRunner = new Thread("fetcher runner") {
@Override
public void run() {
try {
fetcher.runFetchLoop();
} catch (Throwable t) {
error.set(t);
}
}
};
fetcherRunner.start();
// ----- trigger the first offset commit -----
fetcher.commitInternalOffsetsToKafka(testCommitData1);
Map<TopicPartition, OffsetAndMetadata> result1 = commitStore.take();
for (Entry<TopicPartition, OffsetAndMetadata> entry : result1.entrySet()) {
TopicPartition partition = entry.getKey();
if (partition.topic().equals("test")) {
assertEquals(42, partition.partition());
assertEquals(12L, entry.getValue().offset());
} else if (partition.topic().equals("another")) {
assertEquals(99, partition.partition());
assertEquals(17L, entry.getValue().offset());
}
}
// ----- trigger the second offset commit -----
fetcher.commitInternalOffsetsToKafka(testCommitData2);
Map<TopicPartition, OffsetAndMetadata> result2 = commitStore.take();
for (Entry<TopicPartition, OffsetAndMetadata> entry : result2.entrySet()) {
TopicPartition partition = entry.getKey();
if (partition.topic().equals("test")) {
assertEquals(42, partition.partition());
assertEquals(20L, entry.getValue().offset());
} else if (partition.topic().equals("another")) {
assertEquals(99, partition.partition());
assertEquals(27L, entry.getValue().offset());
}
}
// ----- test done, wait till the fetcher is done for a clean shutdown -----
fetcher.cancel();
fetcherRunner.join();
// check that there were no errors in the fetcher
final Throwable caughtError = error.get();
if (caughtError != null && !(caughtError instanceof Handover.ClosedException)) {
throw new Exception("Exception in the fetcher", caughtError);
}
}
Aggregations