use of org.apache.samza.metrics.MetricsReporter in project samza by apache.
the class LocalContainerRunner method run.
@Override
public void run(StreamApplication streamApp) {
ContainerModel containerModel = jobModel.getContainers().get(containerId);
Object taskFactory = TaskFactoryUtil.createTaskFactory(config, streamApp, this);
container = SamzaContainer$.MODULE$.apply(containerModel, config, jobModel.maxChangeLogStreamPartitions, Util.<String, MetricsReporter>javaMapAsScalaMap(new HashMap<>()), taskFactory);
container.setContainerListener(new SamzaContainerListener() {
@Override
public void onContainerStart() {
log.info("Container Started");
}
@Override
public void onContainerStop(boolean invokedExternally) {
log.info("Container Stopped");
}
@Override
public void onContainerFailed(Throwable t) {
log.info("Container Failed");
containerRunnerException = t;
}
});
startContainerHeartbeatMonitor();
container.run();
stopContainerHeartbeatMonitor();
if (containerRunnerException != null) {
log.error("Container stopped with Exception. Exiting process now.", containerRunnerException);
System.exit(1);
}
}
use of org.apache.samza.metrics.MetricsReporter in project samza by apache.
the class MetricsReporterLoader method getMetricsReporters.
public static Map<String, MetricsReporter> getMetricsReporters(MetricsConfig metricsConfig, String containerName) {
Map<String, MetricsReporter> metricsReporters = new HashMap<>();
List<String> metricsReporterNames = metricsConfig.getMetricReporterNames();
for (String metricsReporterName : metricsReporterNames) {
String metricsFactoryClassName = metricsConfig.getMetricsFactoryClass(metricsReporterName).orElseThrow(() -> new SamzaException(String.format("Metrics reporter %s missing .class config", metricsReporterName)));
MetricsReporterFactory metricsReporterFactory = ReflectionUtil.getObj(metricsFactoryClassName, MetricsReporterFactory.class);
metricsReporters.put(metricsReporterName, metricsReporterFactory.getMetricsReporter(metricsReporterName, containerName, metricsConfig));
}
return metricsReporters;
}
use of org.apache.samza.metrics.MetricsReporter in project samza by apache.
the class JobCoordinatorLaunchUtil method runJobCoordinator.
private static void runJobCoordinator(String jobCoordinatorClassName, MetricsRegistryMap metrics, MetadataStore metadataStore, Config finalConfig) {
JobCoordinatorFactory jobCoordinatorFactory = ReflectionUtil.getObj(jobCoordinatorClassName, JobCoordinatorFactory.class);
JobCoordinator jobCoordinator = jobCoordinatorFactory.getJobCoordinator(JOB_COORDINATOR_PROCESSOR_ID_PLACEHOLDER, finalConfig, metrics, metadataStore);
Map<String, MetricsReporter> metricsReporters = MetricsReporterLoader.getMetricsReporters(new MetricsConfig(finalConfig), CoordinationConstants.JOB_COORDINATOR_CONTAINER_NAME);
metricsReporters.values().forEach(metricsReporter -> metricsReporter.register(CoordinationConstants.JOB_COORDINATOR_CONTAINER_NAME, metrics));
metricsReporters.values().forEach(MetricsReporter::start);
CountDownLatch waitForShutdownLatch = new CountDownLatch(1);
jobCoordinator.setListener(new NoProcessorJobCoordinatorListener(waitForShutdownLatch));
jobCoordinator.start();
addShutdownHook(jobCoordinator);
try {
waitForShutdownLatch.await();
} catch (InterruptedException e) {
String errorMessage = "Error while waiting for coordinator to complete";
LOG.error(errorMessage, e);
throw new SamzaException(errorMessage, e);
} finally {
metricsReporters.values().forEach(MetricsReporter::stop);
}
}
use of org.apache.samza.metrics.MetricsReporter in project samza by apache.
the class TestJobCoordinatorLaunchUtil method testRunJobCoordinator.
@Test
public void testRunJobCoordinator() throws Exception {
String jobCoordinatorFactoryClass = "org.apache.samza.custom.MyJobCoordinatorFactory";
Config originalConfig = buildOriginalConfig(ImmutableMap.of(JobCoordinatorConfig.JOB_COORDINATOR_FACTORY, jobCoordinatorFactoryClass));
JobConfig fullConfig = new JobConfig(new MapConfig(originalConfig, Collections.singletonMap("isAfterPlanning", "true")));
Config autoSizingConfig = new MapConfig(Collections.singletonMap(JobConfig.JOB_AUTOSIZING_CONTAINER_COUNT, "10"));
Config finalConfig = new MapConfig(autoSizingConfig, fullConfig);
RemoteJobPlanner remoteJobPlanner = mock(RemoteJobPlanner.class);
CoordinatorStreamStore coordinatorStreamStore = mock(CoordinatorStreamStore.class);
JobCoordinatorFactory jobCoordinatorFactory = mock(JobCoordinatorFactory.class);
JobCoordinator jobCoordinator = mock(JobCoordinator.class);
PowerMockito.mockStatic(CoordinatorStreamUtil.class);
PowerMockito.doNothing().when(CoordinatorStreamUtil.class, "createCoordinatorStream", any());
PowerMockito.doReturn(new MapConfig()).when(CoordinatorStreamUtil.class, "buildCoordinatorStreamConfig", any());
PowerMockito.doReturn(autoSizingConfig).when(CoordinatorStreamUtil.class, "readLaunchConfigFromCoordinatorStream", any(), any());
PowerMockito.whenNew(CoordinatorStreamStore.class).withAnyArguments().thenReturn(coordinatorStreamStore);
PowerMockito.whenNew(RemoteJobPlanner.class).withAnyArguments().thenReturn(remoteJobPlanner);
when(remoteJobPlanner.prepareJobs()).thenReturn(Collections.singletonList(fullConfig));
PowerMockito.mockStatic(ReflectionUtil.class);
PowerMockito.doReturn(jobCoordinatorFactory).when(ReflectionUtil.class, "getObj", jobCoordinatorFactoryClass, JobCoordinatorFactory.class);
when(jobCoordinatorFactory.getJobCoordinator(eq("samza-job-coordinator"), eq(finalConfig), any(), eq(coordinatorStreamStore))).thenReturn(jobCoordinator);
// use a latch to keep track of when shutdown hook was added to know when we should start verifications
CountDownLatch addShutdownHookLatch = new CountDownLatch(1);
PowerMockito.spy(JobCoordinatorLaunchUtil.class);
PowerMockito.doAnswer(invocation -> {
addShutdownHookLatch.countDown();
return null;
}).when(JobCoordinatorLaunchUtil.class, "addShutdownHook", any());
MetricsReporter metricsReporter = mock(MetricsReporter.class);
Map<String, MetricsReporter> metricsReporterMap = ImmutableMap.of("reporter", metricsReporter);
PowerMockito.mockStatic(MetricsReporterLoader.class);
PowerMockito.doReturn(metricsReporterMap).when(MetricsReporterLoader.class, "getMetricsReporters", new MetricsConfig(finalConfig), "JobCoordinator");
NoProcessorJobCoordinatorListener jobCoordinatorListener = mock(NoProcessorJobCoordinatorListener.class);
PowerMockito.whenNew(NoProcessorJobCoordinatorListener.class).withAnyArguments().thenReturn(jobCoordinatorListener);
Thread runThread = new Thread(() -> JobCoordinatorLaunchUtil.run(new MockStreamApplication(), originalConfig));
runThread.start();
// last thing before waiting for shutdown is to add shutdown hook, so do verifications once hook is added
addShutdownHookLatch.await();
verifyStatic();
CoordinatorStreamUtil.createCoordinatorStream(fullConfig);
verifyStatic();
CoordinatorStreamUtil.writeConfigToCoordinatorStream(finalConfig, true);
verifyStatic();
JobCoordinatorLaunchUtil.addShutdownHook(jobCoordinator);
InOrder inOrder = Mockito.inOrder(metricsReporter, jobCoordinator);
inOrder.verify(metricsReporter).register(eq("JobCoordinator"), any());
inOrder.verify(metricsReporter).start();
ArgumentCaptor<CountDownLatch> countDownLatchArgumentCaptor = ArgumentCaptor.forClass(CountDownLatch.class);
verifyNew(NoProcessorJobCoordinatorListener.class).withArguments(countDownLatchArgumentCaptor.capture());
inOrder.verify(jobCoordinator).setListener(jobCoordinatorListener);
inOrder.verify(jobCoordinator).start();
// wait some time and then make sure the run thread is still alive
Thread.sleep(Duration.ofMillis(500).toMillis());
assertTrue(runThread.isAlive());
// trigger the count down latch so that the run thread can exit
countDownLatchArgumentCaptor.getValue().countDown();
runThread.join(Duration.ofSeconds(10).toMillis());
assertFalse(runThread.isAlive());
verify(metricsReporter).stop();
}
use of org.apache.samza.metrics.MetricsReporter in project samza by apache.
the class ContainerLaunchUtil method run.
@VisibleForTesting
static void run(ApplicationDescriptorImpl<? extends ApplicationDescriptor> appDesc, String jobName, String jobId, String containerId, Optional<String> executionEnvContainerId, Optional<String> samzaEpochId, JobModel jobModel, Config config, Optional<ExternalContext> externalContextOptional) {
CoordinatorStreamStore coordinatorStreamStore = buildCoordinatorStreamStore(config, new MetricsRegistryMap());
coordinatorStreamStore.init();
/*
* We track the exit code and only trigger exit in the finally block to make sure we are able to execute all the
* clean up steps. Prior implementation had short circuited exit causing some of the clean up steps to be missed.
*/
int exitCode = 0;
try {
TaskFactory taskFactory = TaskFactoryUtil.getTaskFactory(appDesc);
LocalityManager localityManager = new LocalityManager(new NamespaceAwareCoordinatorStreamStore(coordinatorStreamStore, SetContainerHostMapping.TYPE));
// StartpointManager wraps the coordinatorStreamStore in the namespaces internally
StartpointManager startpointManager = null;
if (new JobConfig(config).getStartpointEnabled()) {
startpointManager = new StartpointManager(coordinatorStreamStore);
}
Map<String, MetricsReporter> metricsReporters = loadMetricsReporters(appDesc, containerId, config);
// Creating diagnostics manager and reporter, and wiring it respectively
Optional<DiagnosticsManager> diagnosticsManager = DiagnosticsUtil.buildDiagnosticsManager(jobName, jobId, jobModel, containerId, executionEnvContainerId, samzaEpochId, config);
MetricsRegistryMap metricsRegistryMap = new MetricsRegistryMap();
SamzaContainer container = SamzaContainer$.MODULE$.apply(containerId, jobModel, ScalaJavaUtil.toScalaMap(metricsReporters), metricsRegistryMap, taskFactory, JobContextImpl.fromConfigWithDefaults(config, jobModel), Option.apply(appDesc.getApplicationContainerContextFactory().orElse(null)), Option.apply(appDesc.getApplicationTaskContextFactory().orElse(null)), Option.apply(externalContextOptional.orElse(null)), localityManager, startpointManager, Option.apply(diagnosticsManager.orElse(null)));
ProcessorLifecycleListener processorLifecycleListener = appDesc.getProcessorLifecycleListenerFactory().createInstance(new ProcessorContext() {
}, config);
ClusterBasedProcessorLifecycleListener listener = new ClusterBasedProcessorLifecycleListener(config, processorLifecycleListener, container::shutdown);
container.setContainerListener(listener);
ContainerHeartbeatMonitor heartbeatMonitor = createContainerHeartbeatMonitor(container, new NamespaceAwareCoordinatorStreamStore(coordinatorStreamStore, SetConfig.TYPE), config);
if (heartbeatMonitor != null) {
heartbeatMonitor.start();
}
if (new JobConfig(config).getApplicationMasterHighAvailabilityEnabled()) {
executionEnvContainerId.ifPresent(execEnvContainerId -> {
ExecutionContainerIdManager executionContainerIdManager = new ExecutionContainerIdManager(new NamespaceAwareCoordinatorStreamStore(coordinatorStreamStore, SetExecutionEnvContainerIdMapping.TYPE));
executionContainerIdManager.writeExecutionEnvironmentContainerIdMapping(containerId, execEnvContainerId);
});
}
container.run();
if (heartbeatMonitor != null) {
heartbeatMonitor.stop();
}
// overriding the value with what the listener returns
if (containerRunnerException == null) {
containerRunnerException = listener.getContainerException();
}
if (containerRunnerException != null) {
log.error("Container stopped with Exception. Exiting process now.", containerRunnerException);
exitCode = 1;
}
} catch (Throwable e) {
/*
* Two separate log statements are intended to print the entire stack trace as part of the logs. Using
* single log statement with custom format requires explicitly fetching stack trace and null checks which makes
* the code slightly hard to read in comparison with the current choice.
*/
log.error("Exiting the process due to", e);
log.error("Container runner exception: ", containerRunnerException);
exitCode = 1;
} finally {
coordinatorStreamStore.close();
/*
* Only exit in the scenario of non-zero exit code in order to maintain parity with current implementation where
* the method completes when no errors are encountered.
*/
if (exitCode != 0) {
exitProcess(exitCode);
}
}
}
Aggregations