use of org.apache.samza.coordinator.metadatastore.CoordinatorStreamStore in project samza by apache.
the class StorageRecovery method getContainerModels.
/**
* Build ContainerModels from job config file and put the results in the containerModels map.
*/
private void getContainerModels() {
MetricsRegistryMap metricsRegistryMap = new MetricsRegistryMap();
CoordinatorStreamStore coordinatorStreamStore = new CoordinatorStreamStore(jobConfig, metricsRegistryMap);
coordinatorStreamStore.init();
try {
Config configFromCoordinatorStream = CoordinatorStreamUtil.readConfigFromCoordinatorStream(coordinatorStreamStore);
ChangelogStreamManager changelogStreamManager = new ChangelogStreamManager(coordinatorStreamStore);
JobModelManager jobModelManager = JobModelManager.apply(configFromCoordinatorStream, changelogStreamManager.readPartitionMapping(), coordinatorStreamStore, metricsRegistryMap);
JobModel jobModel = jobModelManager.jobModel();
this.jobModel = jobModel;
containers = jobModel.getContainers();
} finally {
coordinatorStreamStore.close();
}
}
use of org.apache.samza.coordinator.metadatastore.CoordinatorStreamStore in project samza by apache.
the class TestContainerAllocatorWithHostAffinity method setup.
@Before
public void setup() throws Exception {
LocalityManager mockLocalityManager = mock(LocalityManager.class);
when(mockLocalityManager.readLocality()).thenReturn(new LocalityModel(ImmutableMap.of("0", new ProcessorLocality("0", "abc"))));
CoordinatorStreamStoreTestUtil coordinatorStreamStoreTestUtil = new CoordinatorStreamStoreTestUtil(config);
CoordinatorStreamStore coordinatorStreamStore = coordinatorStreamStoreTestUtil.getCoordinatorStreamStore();
coordinatorStreamStore.init();
containerPlacementMetadataStore = new ContainerPlacementMetadataStore(coordinatorStreamStore);
containerPlacementMetadataStore.start();
containerManager = new ContainerManager(containerPlacementMetadataStore, state, clusterResourceManager, true, false, mockLocalityManager, faultDomainManager, config);
containerAllocator = new ContainerAllocator(clusterResourceManager, config, state, true, containerManager);
requestState = new MockContainerRequestState(clusterResourceManager, true);
Field requestStateField = containerAllocator.getClass().getDeclaredField("resourceRequestState");
requestStateField.setAccessible(true);
requestStateField.set(containerAllocator, requestState);
allocatorThread = new Thread(containerAllocator);
}
use of org.apache.samza.coordinator.metadatastore.CoordinatorStreamStore in project samza by apache.
the class TestJobCoordinatorLaunchUtil method testRunClusterBasedJobCoordinator.
@Test
public void testRunClusterBasedJobCoordinator() throws Exception {
Config originalConfig = buildOriginalConfig(ImmutableMap.of());
JobConfig fullConfig = new JobConfig(new MapConfig(originalConfig, Collections.singletonMap("isAfterPlanning", "true")));
Config autoSizingConfig = new MapConfig(Collections.singletonMap(JobConfig.JOB_AUTOSIZING_CONTAINER_COUNT, "10"));
Config finalConfig = new MapConfig(autoSizingConfig, fullConfig);
RemoteJobPlanner mockJobPlanner = mock(RemoteJobPlanner.class);
CoordinatorStreamStore mockCoordinatorStreamStore = mock(CoordinatorStreamStore.class);
ClusterBasedJobCoordinator mockJC = mock(ClusterBasedJobCoordinator.class);
PowerMockito.mockStatic(CoordinatorStreamUtil.class);
PowerMockito.doNothing().when(CoordinatorStreamUtil.class, "createCoordinatorStream", any());
PowerMockito.doReturn(new MapConfig()).when(CoordinatorStreamUtil.class, "buildCoordinatorStreamConfig", any());
PowerMockito.doReturn(autoSizingConfig).when(CoordinatorStreamUtil.class, "readLaunchConfigFromCoordinatorStream", any(), any());
PowerMockito.whenNew(CoordinatorStreamStore.class).withAnyArguments().thenReturn(mockCoordinatorStreamStore);
PowerMockito.whenNew(RemoteJobPlanner.class).withAnyArguments().thenReturn(mockJobPlanner);
PowerMockito.whenNew(ClusterBasedJobCoordinator.class).withAnyArguments().thenReturn(mockJC);
when(mockJobPlanner.prepareJobs()).thenReturn(Collections.singletonList(fullConfig));
JobCoordinatorLaunchUtil.run(new MockStreamApplication(), originalConfig);
verifyNew(ClusterBasedJobCoordinator.class).withArguments(any(MetricsRegistryMap.class), eq(mockCoordinatorStreamStore), eq(finalConfig));
verify(mockJC, times(1)).run();
verifyStatic(times(1));
CoordinatorStreamUtil.createCoordinatorStream(fullConfig);
verifyStatic(times(1));
CoordinatorStreamUtil.writeConfigToCoordinatorStream(finalConfig, true);
}
use of org.apache.samza.coordinator.metadatastore.CoordinatorStreamStore in project samza by apache.
the class TestStartpoint method testStartpointOldest.
@Test
public void testStartpointOldest() throws InterruptedException {
publishKafkaEventsWithDelayPerEvent(inputKafkaTopic3, 0, NUM_KAFKA_EVENTS, PROCESSOR_IDS[2], Duration.ofMillis(2));
ConcurrentHashMap<String, IncomingMessageEnvelope> recvEventsInputStartpointOldest = new ConcurrentHashMap<>();
CoordinatorStreamStore coordinatorStreamStore = createCoordinatorStreamStore(applicationConfig1);
coordinatorStreamStore.init();
StartpointManager startpointManager = new StartpointManager(coordinatorStreamStore);
startpointManager.start();
StartpointOldest startpointOldest = new StartpointOldest();
writeStartpoints(startpointManager, inputKafkaTopic3, ZK_TEST_PARTITION_COUNT, startpointOldest);
startpointManager.stop();
coordinatorStreamStore.close();
TestTaskApplication.TaskApplicationProcessCallback processedCallback = (IncomingMessageEnvelope ime, TaskCallback callback) -> {
try {
String streamName = ime.getSystemStreamPartition().getStream();
TestKafkaEvent testKafkaEvent = TestKafkaEvent.fromString((String) ime.getMessage());
String eventIndex = testKafkaEvent.getEventData();
if (inputKafkaTopic3.equals(streamName)) {
recvEventsInputStartpointOldest.put(eventIndex, ime);
} else {
throw new RuntimeException("Unexpected input stream: " + streamName);
}
callback.complete();
} catch (Exception ex) {
callback.failure(ex);
}
};
// Fetch all since consuming from oldest
CountDownLatch processedMessagesLatchStartpointOldest = new CountDownLatch(NUM_KAFKA_EVENTS);
CountDownLatch shutdownLatchStartpointOldest = new CountDownLatch(1);
TestTaskApplication testTaskApplicationStartpointOldest = new TestTaskApplication(TEST_SYSTEM, inputKafkaTopic3, outputKafkaTopic, processedMessagesLatchStartpointOldest, shutdownLatchStartpointOldest, Optional.of(processedCallback));
ApplicationRunner appRunner = ApplicationRunners.getApplicationRunner(testTaskApplicationStartpointOldest, applicationConfig3);
executeRun(appRunner, applicationConfig3);
assertTrue(processedMessagesLatchStartpointOldest.await(1, TimeUnit.MINUTES));
appRunner.kill();
appRunner.waitForFinish();
assertTrue(shutdownLatchStartpointOldest.await(1, TimeUnit.MINUTES));
assertEquals("Expecting to have processed all the events", NUM_KAFKA_EVENTS, recvEventsInputStartpointOldest.size());
}
use of org.apache.samza.coordinator.metadatastore.CoordinatorStreamStore in project samza by apache.
the class TestStartpoint method testStartpointUpcoming.
@Test
public void testStartpointUpcoming() throws InterruptedException {
publishKafkaEventsWithDelayPerEvent(inputKafkaTopic4, 0, NUM_KAFKA_EVENTS, PROCESSOR_IDS[3], Duration.ofMillis(2));
ConcurrentHashMap<String, IncomingMessageEnvelope> recvEventsInputStartpointUpcoming = new ConcurrentHashMap<>();
CoordinatorStreamStore coordinatorStreamStore = createCoordinatorStreamStore(applicationConfig1);
coordinatorStreamStore.init();
StartpointManager startpointManager = new StartpointManager(coordinatorStreamStore);
startpointManager.start();
StartpointUpcoming startpointUpcoming = new StartpointUpcoming();
writeStartpoints(startpointManager, inputKafkaTopic4, ZK_TEST_PARTITION_COUNT, startpointUpcoming);
startpointManager.stop();
coordinatorStreamStore.close();
TestTaskApplication.TaskApplicationProcessCallback processedCallback = (IncomingMessageEnvelope ime, TaskCallback callback) -> {
try {
String streamName = ime.getSystemStreamPartition().getStream();
TestKafkaEvent testKafkaEvent = TestKafkaEvent.fromString((String) ime.getMessage());
String eventIndex = testKafkaEvent.getEventData();
if (inputKafkaTopic4.equals(streamName)) {
recvEventsInputStartpointUpcoming.put(eventIndex, ime);
} else {
throw new RuntimeException("Unexpected input stream: " + streamName);
}
callback.complete();
} catch (Exception ex) {
callback.failure(ex);
}
};
// Expecting none, so just attempt a small number of fetches.
CountDownLatch processedMessagesLatchStartpointUpcoming = new CountDownLatch(5);
CountDownLatch shutdownLatchStartpointUpcoming = new CountDownLatch(1);
TestTaskApplication testTaskApplicationStartpointUpcoming = new TestTaskApplication(TEST_SYSTEM, inputKafkaTopic4, outputKafkaTopic, processedMessagesLatchStartpointUpcoming, shutdownLatchStartpointUpcoming, Optional.of(processedCallback));
// Startpoint upcoming
ApplicationRunner appRunner = ApplicationRunners.getApplicationRunner(testTaskApplicationStartpointUpcoming, applicationConfig4);
executeRun(appRunner, applicationConfig4);
assertFalse("Expecting to timeout and not process any old messages.", processedMessagesLatchStartpointUpcoming.await(15, TimeUnit.SECONDS));
assertEquals("Expecting not to process any old messages.", 0, recvEventsInputStartpointUpcoming.size());
appRunner.kill();
appRunner.waitForFinish();
assertTrue(shutdownLatchStartpointUpcoming.await(1, TimeUnit.MINUTES));
}
Aggregations