Search in sources :

Example 11 with CoordinatorStreamStore

use of org.apache.samza.coordinator.metadatastore.CoordinatorStreamStore in project samza by apache.

the class StorageRecovery method getContainerModels.

/**
 * Build ContainerModels from job config file and put the results in the containerModels map.
 */
private void getContainerModels() {
    MetricsRegistryMap metricsRegistryMap = new MetricsRegistryMap();
    CoordinatorStreamStore coordinatorStreamStore = new CoordinatorStreamStore(jobConfig, metricsRegistryMap);
    coordinatorStreamStore.init();
    try {
        Config configFromCoordinatorStream = CoordinatorStreamUtil.readConfigFromCoordinatorStream(coordinatorStreamStore);
        ChangelogStreamManager changelogStreamManager = new ChangelogStreamManager(coordinatorStreamStore);
        JobModelManager jobModelManager = JobModelManager.apply(configFromCoordinatorStream, changelogStreamManager.readPartitionMapping(), coordinatorStreamStore, metricsRegistryMap);
        JobModel jobModel = jobModelManager.jobModel();
        this.jobModel = jobModel;
        containers = jobModel.getContainers();
    } finally {
        coordinatorStreamStore.close();
    }
}
Also used : CoordinatorStreamStore(org.apache.samza.coordinator.metadatastore.CoordinatorStreamStore) SystemConfig(org.apache.samza.config.SystemConfig) StorageConfig(org.apache.samza.config.StorageConfig) SerializerConfig(org.apache.samza.config.SerializerConfig) TaskConfig(org.apache.samza.config.TaskConfig) Config(org.apache.samza.config.Config) JobModelManager(org.apache.samza.coordinator.JobModelManager) JobModel(org.apache.samza.job.model.JobModel) MetricsRegistryMap(org.apache.samza.metrics.MetricsRegistryMap)

Example 12 with CoordinatorStreamStore

use of org.apache.samza.coordinator.metadatastore.CoordinatorStreamStore in project samza by apache.

the class TestContainerAllocatorWithHostAffinity method setup.

@Before
public void setup() throws Exception {
    LocalityManager mockLocalityManager = mock(LocalityManager.class);
    when(mockLocalityManager.readLocality()).thenReturn(new LocalityModel(ImmutableMap.of("0", new ProcessorLocality("0", "abc"))));
    CoordinatorStreamStoreTestUtil coordinatorStreamStoreTestUtil = new CoordinatorStreamStoreTestUtil(config);
    CoordinatorStreamStore coordinatorStreamStore = coordinatorStreamStoreTestUtil.getCoordinatorStreamStore();
    coordinatorStreamStore.init();
    containerPlacementMetadataStore = new ContainerPlacementMetadataStore(coordinatorStreamStore);
    containerPlacementMetadataStore.start();
    containerManager = new ContainerManager(containerPlacementMetadataStore, state, clusterResourceManager, true, false, mockLocalityManager, faultDomainManager, config);
    containerAllocator = new ContainerAllocator(clusterResourceManager, config, state, true, containerManager);
    requestState = new MockContainerRequestState(clusterResourceManager, true);
    Field requestStateField = containerAllocator.getClass().getDeclaredField("resourceRequestState");
    requestStateField.setAccessible(true);
    requestStateField.set(containerAllocator, requestState);
    allocatorThread = new Thread(containerAllocator);
}
Also used : ProcessorLocality(org.apache.samza.job.model.ProcessorLocality) Field(java.lang.reflect.Field) CoordinatorStreamStore(org.apache.samza.coordinator.metadatastore.CoordinatorStreamStore) ContainerPlacementMetadataStore(org.apache.samza.clustermanager.container.placement.ContainerPlacementMetadataStore) LocalityManager(org.apache.samza.container.LocalityManager) LocalityModel(org.apache.samza.job.model.LocalityModel) CoordinatorStreamStoreTestUtil(org.apache.samza.coordinator.metadatastore.CoordinatorStreamStoreTestUtil) Before(org.junit.Before)

Example 13 with CoordinatorStreamStore

use of org.apache.samza.coordinator.metadatastore.CoordinatorStreamStore in project samza by apache.

the class TestJobCoordinatorLaunchUtil method testRunClusterBasedJobCoordinator.

@Test
public void testRunClusterBasedJobCoordinator() throws Exception {
    Config originalConfig = buildOriginalConfig(ImmutableMap.of());
    JobConfig fullConfig = new JobConfig(new MapConfig(originalConfig, Collections.singletonMap("isAfterPlanning", "true")));
    Config autoSizingConfig = new MapConfig(Collections.singletonMap(JobConfig.JOB_AUTOSIZING_CONTAINER_COUNT, "10"));
    Config finalConfig = new MapConfig(autoSizingConfig, fullConfig);
    RemoteJobPlanner mockJobPlanner = mock(RemoteJobPlanner.class);
    CoordinatorStreamStore mockCoordinatorStreamStore = mock(CoordinatorStreamStore.class);
    ClusterBasedJobCoordinator mockJC = mock(ClusterBasedJobCoordinator.class);
    PowerMockito.mockStatic(CoordinatorStreamUtil.class);
    PowerMockito.doNothing().when(CoordinatorStreamUtil.class, "createCoordinatorStream", any());
    PowerMockito.doReturn(new MapConfig()).when(CoordinatorStreamUtil.class, "buildCoordinatorStreamConfig", any());
    PowerMockito.doReturn(autoSizingConfig).when(CoordinatorStreamUtil.class, "readLaunchConfigFromCoordinatorStream", any(), any());
    PowerMockito.whenNew(CoordinatorStreamStore.class).withAnyArguments().thenReturn(mockCoordinatorStreamStore);
    PowerMockito.whenNew(RemoteJobPlanner.class).withAnyArguments().thenReturn(mockJobPlanner);
    PowerMockito.whenNew(ClusterBasedJobCoordinator.class).withAnyArguments().thenReturn(mockJC);
    when(mockJobPlanner.prepareJobs()).thenReturn(Collections.singletonList(fullConfig));
    JobCoordinatorLaunchUtil.run(new MockStreamApplication(), originalConfig);
    verifyNew(ClusterBasedJobCoordinator.class).withArguments(any(MetricsRegistryMap.class), eq(mockCoordinatorStreamStore), eq(finalConfig));
    verify(mockJC, times(1)).run();
    verifyStatic(times(1));
    CoordinatorStreamUtil.createCoordinatorStream(fullConfig);
    verifyStatic(times(1));
    CoordinatorStreamUtil.writeConfigToCoordinatorStream(finalConfig, true);
}
Also used : CoordinatorStreamStore(org.apache.samza.coordinator.metadatastore.CoordinatorStreamStore) MockStreamApplication(org.apache.samza.application.MockStreamApplication) MetricsConfig(org.apache.samza.config.MetricsConfig) JobConfig(org.apache.samza.config.JobConfig) MapConfig(org.apache.samza.config.MapConfig) JobCoordinatorConfig(org.apache.samza.config.JobCoordinatorConfig) Config(org.apache.samza.config.Config) MapConfig(org.apache.samza.config.MapConfig) MetricsRegistryMap(org.apache.samza.metrics.MetricsRegistryMap) JobConfig(org.apache.samza.config.JobConfig) RemoteJobPlanner(org.apache.samza.execution.RemoteJobPlanner) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) Test(org.junit.Test)

Example 14 with CoordinatorStreamStore

use of org.apache.samza.coordinator.metadatastore.CoordinatorStreamStore in project samza by apache.

the class TestStartpoint method testStartpointOldest.

@Test
public void testStartpointOldest() throws InterruptedException {
    publishKafkaEventsWithDelayPerEvent(inputKafkaTopic3, 0, NUM_KAFKA_EVENTS, PROCESSOR_IDS[2], Duration.ofMillis(2));
    ConcurrentHashMap<String, IncomingMessageEnvelope> recvEventsInputStartpointOldest = new ConcurrentHashMap<>();
    CoordinatorStreamStore coordinatorStreamStore = createCoordinatorStreamStore(applicationConfig1);
    coordinatorStreamStore.init();
    StartpointManager startpointManager = new StartpointManager(coordinatorStreamStore);
    startpointManager.start();
    StartpointOldest startpointOldest = new StartpointOldest();
    writeStartpoints(startpointManager, inputKafkaTopic3, ZK_TEST_PARTITION_COUNT, startpointOldest);
    startpointManager.stop();
    coordinatorStreamStore.close();
    TestTaskApplication.TaskApplicationProcessCallback processedCallback = (IncomingMessageEnvelope ime, TaskCallback callback) -> {
        try {
            String streamName = ime.getSystemStreamPartition().getStream();
            TestKafkaEvent testKafkaEvent = TestKafkaEvent.fromString((String) ime.getMessage());
            String eventIndex = testKafkaEvent.getEventData();
            if (inputKafkaTopic3.equals(streamName)) {
                recvEventsInputStartpointOldest.put(eventIndex, ime);
            } else {
                throw new RuntimeException("Unexpected input stream: " + streamName);
            }
            callback.complete();
        } catch (Exception ex) {
            callback.failure(ex);
        }
    };
    // Fetch all since consuming from oldest
    CountDownLatch processedMessagesLatchStartpointOldest = new CountDownLatch(NUM_KAFKA_EVENTS);
    CountDownLatch shutdownLatchStartpointOldest = new CountDownLatch(1);
    TestTaskApplication testTaskApplicationStartpointOldest = new TestTaskApplication(TEST_SYSTEM, inputKafkaTopic3, outputKafkaTopic, processedMessagesLatchStartpointOldest, shutdownLatchStartpointOldest, Optional.of(processedCallback));
    ApplicationRunner appRunner = ApplicationRunners.getApplicationRunner(testTaskApplicationStartpointOldest, applicationConfig3);
    executeRun(appRunner, applicationConfig3);
    assertTrue(processedMessagesLatchStartpointOldest.await(1, TimeUnit.MINUTES));
    appRunner.kill();
    appRunner.waitForFinish();
    assertTrue(shutdownLatchStartpointOldest.await(1, TimeUnit.MINUTES));
    assertEquals("Expecting to have processed all the events", NUM_KAFKA_EVENTS, recvEventsInputStartpointOldest.size());
}
Also used : IncomingMessageEnvelope(org.apache.samza.system.IncomingMessageEnvelope) TaskCallback(org.apache.samza.task.TaskCallback) CountDownLatch(java.util.concurrent.CountDownLatch) ExpectedException(org.junit.rules.ExpectedException) SamzaException(org.apache.samza.SamzaException) CoordinatorStreamStore(org.apache.samza.coordinator.metadatastore.CoordinatorStreamStore) TestKafkaEvent(org.apache.samza.test.util.TestKafkaEvent) ApplicationRunner(org.apache.samza.runtime.ApplicationRunner) TestTaskApplication(org.apache.samza.test.processor.TestTaskApplication) StartpointManager(org.apache.samza.startpoint.StartpointManager) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) StartpointOldest(org.apache.samza.startpoint.StartpointOldest) Test(org.junit.Test)

Example 15 with CoordinatorStreamStore

use of org.apache.samza.coordinator.metadatastore.CoordinatorStreamStore in project samza by apache.

the class TestStartpoint method testStartpointUpcoming.

@Test
public void testStartpointUpcoming() throws InterruptedException {
    publishKafkaEventsWithDelayPerEvent(inputKafkaTopic4, 0, NUM_KAFKA_EVENTS, PROCESSOR_IDS[3], Duration.ofMillis(2));
    ConcurrentHashMap<String, IncomingMessageEnvelope> recvEventsInputStartpointUpcoming = new ConcurrentHashMap<>();
    CoordinatorStreamStore coordinatorStreamStore = createCoordinatorStreamStore(applicationConfig1);
    coordinatorStreamStore.init();
    StartpointManager startpointManager = new StartpointManager(coordinatorStreamStore);
    startpointManager.start();
    StartpointUpcoming startpointUpcoming = new StartpointUpcoming();
    writeStartpoints(startpointManager, inputKafkaTopic4, ZK_TEST_PARTITION_COUNT, startpointUpcoming);
    startpointManager.stop();
    coordinatorStreamStore.close();
    TestTaskApplication.TaskApplicationProcessCallback processedCallback = (IncomingMessageEnvelope ime, TaskCallback callback) -> {
        try {
            String streamName = ime.getSystemStreamPartition().getStream();
            TestKafkaEvent testKafkaEvent = TestKafkaEvent.fromString((String) ime.getMessage());
            String eventIndex = testKafkaEvent.getEventData();
            if (inputKafkaTopic4.equals(streamName)) {
                recvEventsInputStartpointUpcoming.put(eventIndex, ime);
            } else {
                throw new RuntimeException("Unexpected input stream: " + streamName);
            }
            callback.complete();
        } catch (Exception ex) {
            callback.failure(ex);
        }
    };
    // Expecting none, so just attempt a small number of fetches.
    CountDownLatch processedMessagesLatchStartpointUpcoming = new CountDownLatch(5);
    CountDownLatch shutdownLatchStartpointUpcoming = new CountDownLatch(1);
    TestTaskApplication testTaskApplicationStartpointUpcoming = new TestTaskApplication(TEST_SYSTEM, inputKafkaTopic4, outputKafkaTopic, processedMessagesLatchStartpointUpcoming, shutdownLatchStartpointUpcoming, Optional.of(processedCallback));
    // Startpoint upcoming
    ApplicationRunner appRunner = ApplicationRunners.getApplicationRunner(testTaskApplicationStartpointUpcoming, applicationConfig4);
    executeRun(appRunner, applicationConfig4);
    assertFalse("Expecting to timeout and not process any old messages.", processedMessagesLatchStartpointUpcoming.await(15, TimeUnit.SECONDS));
    assertEquals("Expecting not to process any old messages.", 0, recvEventsInputStartpointUpcoming.size());
    appRunner.kill();
    appRunner.waitForFinish();
    assertTrue(shutdownLatchStartpointUpcoming.await(1, TimeUnit.MINUTES));
}
Also used : IncomingMessageEnvelope(org.apache.samza.system.IncomingMessageEnvelope) TaskCallback(org.apache.samza.task.TaskCallback) CountDownLatch(java.util.concurrent.CountDownLatch) ExpectedException(org.junit.rules.ExpectedException) SamzaException(org.apache.samza.SamzaException) CoordinatorStreamStore(org.apache.samza.coordinator.metadatastore.CoordinatorStreamStore) TestKafkaEvent(org.apache.samza.test.util.TestKafkaEvent) ApplicationRunner(org.apache.samza.runtime.ApplicationRunner) TestTaskApplication(org.apache.samza.test.processor.TestTaskApplication) StartpointUpcoming(org.apache.samza.startpoint.StartpointUpcoming) StartpointManager(org.apache.samza.startpoint.StartpointManager) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Test(org.junit.Test)

Aggregations

CoordinatorStreamStore (org.apache.samza.coordinator.metadatastore.CoordinatorStreamStore)24 Config (org.apache.samza.config.Config)13 JobConfig (org.apache.samza.config.JobConfig)12 MapConfig (org.apache.samza.config.MapConfig)12 Test (org.junit.Test)12 JobCoordinatorConfig (org.apache.samza.config.JobCoordinatorConfig)10 ApplicationConfig (org.apache.samza.config.ApplicationConfig)8 MetricsRegistryMap (org.apache.samza.metrics.MetricsRegistryMap)8 PrepareForTest (org.powermock.core.classloader.annotations.PrepareForTest)8 HashMap (java.util.HashMap)7 Before (org.junit.Before)7 SamzaException (org.apache.samza.SamzaException)6 ImmutableMap (com.google.common.collect.ImmutableMap)5 Map (java.util.Map)5 StreamProcessor (org.apache.samza.processor.StreamProcessor)5 SystemAdmins (org.apache.samza.system.SystemAdmins)5 Optional (java.util.Optional)4 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)4 CountDownLatch (java.util.concurrent.CountDownLatch)4 LegacyTaskApplication (org.apache.samza.application.LegacyTaskApplication)4