use of org.apache.samza.coordinator.MetadataResourceUtil in project samza by apache.
the class ClusterBasedJobCoordinator method run.
/**
* Starts the JobCoordinator.
*/
public void run() {
if (!isStarted.compareAndSet(false, true)) {
LOG.warn("Attempting to start an already started job coordinator. ");
return;
}
// set up JmxServer (if jmx is enabled)
if (isJmxEnabled) {
jmxServer = new JmxServer();
state.jmxUrl = jmxServer.getJmxUrl();
state.jmxTunnelingUrl = jmxServer.getTunnelingJmxUrl();
} else {
jmxServer = null;
}
try {
// initialize JobCoordinator state
LOG.info("Starting cluster based job coordinator");
// write the diagnostics metadata file
String jobName = new JobConfig(config).getName().get();
String jobId = new JobConfig(config).getJobId();
Optional<String> execEnvContainerId = Optional.ofNullable(System.getenv("CONTAINER_ID"));
DiagnosticsUtil.writeMetadataFile(jobName, jobId, METRICS_SOURCE_NAME, execEnvContainerId, config);
// create necessary checkpoint and changelog streams, if not created
JobModel jobModel = jobModelManager.jobModel();
MetadataResourceUtil metadataResourceUtil = new MetadataResourceUtil(jobModel, this.metrics, config);
metadataResourceUtil.createResources();
// create all the resources required for state backend factories
StorageConfig storageConfig = new StorageConfig(config);
storageConfig.getBackupFactories().forEach(stateStorageBackendBackupFactory -> {
StateBackendFactory stateBackendFactory = ReflectionUtil.getObj(stateStorageBackendBackupFactory, StateBackendFactory.class);
StateBackendAdmin stateBackendAdmin = stateBackendFactory.getAdmin(jobModel, config);
// Create resources required for state backend admin
stateBackendAdmin.createResources();
// Validate resources required for state backend admin
stateBackendAdmin.validateResources();
});
/*
* We fanout startpoint if and only if
* 1. Startpoint is enabled in configuration
* 2. If AM HA is enabled, fanout only if startpoint enabled and job coordinator metadata changed
*/
if (shouldFanoutStartpoint()) {
StartpointManager startpointManager = createStartpointManager();
startpointManager.start();
try {
startpointManager.fanOut(JobModelUtil.getTaskToSystemStreamPartitions(jobModel));
} finally {
startpointManager.stop();
}
}
// Remap changelog partitions to tasks
Map<TaskName, Integer> prevPartitionMappings = changelogStreamManager.readPartitionMapping();
Map<TaskName, Integer> taskPartitionMappings = new HashMap<>();
Map<String, ContainerModel> containers = jobModel.getContainers();
for (ContainerModel containerModel : containers.values()) {
for (TaskModel taskModel : containerModel.getTasks().values()) {
taskPartitionMappings.put(taskModel.getTaskName(), taskModel.getChangelogPartition().getPartitionId());
}
}
changelogStreamManager.updatePartitionMapping(prevPartitionMappings, taskPartitionMappings);
containerProcessManager.start();
systemAdmins.start();
partitionMonitor.start();
inputStreamRegexMonitor.ifPresent(StreamRegexMonitor::start);
// containerPlacementRequestAllocator thread has to start after the cpm is started
LOG.info("Starting the container placement handler thread");
containerPlacementMetadataStore.start();
containerPlacementRequestAllocatorThread.start();
boolean isInterrupted = false;
while (!containerProcessManager.shouldShutdown() && !checkAndThrowException() && !isInterrupted && checkcontainerPlacementRequestAllocatorThreadIsAlive()) {
try {
Thread.sleep(jobCoordinatorSleepInterval);
} catch (InterruptedException e) {
isInterrupted = true;
LOG.error("Interrupted in job coordinator loop", e);
Thread.currentThread().interrupt();
}
}
} catch (Throwable e) {
LOG.error("Exception thrown in the JobCoordinator loop", e);
throw new SamzaException(e);
} finally {
onShutDown();
}
}
use of org.apache.samza.coordinator.MetadataResourceUtil in project samza by apache.
the class ZkJobCoordinator method loadMetadataResources.
/**
* Stores the configuration of the job in the coordinator stream.
*/
@VisibleForTesting
void loadMetadataResources(JobModel jobModel) {
try {
MetadataResourceUtil metadataResourceUtil = createMetadataResourceUtil(jobModel, config);
metadataResourceUtil.createResources();
if (coordinatorStreamStore != null) {
// TODO: SAMZA-2273 - publish configs async
CoordinatorStreamValueSerde jsonSerde = new CoordinatorStreamValueSerde(SetConfig.TYPE);
NamespaceAwareCoordinatorStreamStore configStore = new NamespaceAwareCoordinatorStreamStore(coordinatorStreamStore, SetConfig.TYPE);
for (Map.Entry<String, String> entry : config.entrySet()) {
byte[] serializedValue = jsonSerde.toBytes(entry.getValue());
configStore.put(entry.getKey(), serializedValue);
}
configStore.flush();
if (new JobConfig(config).getStartpointEnabled()) {
// fan out the startpoints
StartpointManager startpointManager = createStartpointManager();
startpointManager.start();
try {
startpointManager.fanOut(JobModelUtil.getTaskToSystemStreamPartitions(jobModel));
} finally {
startpointManager.stop();
}
}
} else {
LOG.warn("No metadata store registered to this job coordinator. Config not written to the metadata store and no Startpoints fan out.");
}
} catch (IOException ex) {
throw new SamzaException(String.format("IO exception while loading metadata resources."), ex);
}
}
use of org.apache.samza.coordinator.MetadataResourceUtil in project samza by apache.
the class TestStaticResourceJobCoordinator method testSameJobModelAsPrevious.
@Test
public void testSameJobModelAsPrevious() throws IOException {
Config jobModelConfig = mock(Config.class);
JobModel jobModel = setupJobModel(jobModelConfig);
StreamPartitionCountMonitor streamPartitionCountMonitor = setupStreamPartitionCountMonitor(jobModelConfig);
StreamRegexMonitor streamRegexMonitor = setupStreamRegexMonitor(jobModel, jobModelConfig);
setupJobCoordinatorMetadata(jobModel, jobModelConfig, ImmutableSet.of(), true);
setUpDiagnosticsManager(jobModel);
MetadataResourceUtil metadataResourceUtil = metadataResourceUtil(jobModel);
this.staticResourceJobCoordinator.start();
assertEquals(jobModel, this.staticResourceJobCoordinator.getJobModel());
verifyStartLifecycle();
verify(this.staticResourceJobCoordinator).doSetLoggingContextConfig(jobModelConfig);
verify(this.diagnosticsManager).start();
verifyPrepareWorkerExecutionAndMonitor(jobModel, metadataResourceUtil, streamPartitionCountMonitor, streamRegexMonitor, null, null);
verify(this.jobCoordinatorListener).onNewJobModel(PROCESSOR_ID, jobModel);
}
use of org.apache.samza.coordinator.MetadataResourceUtil in project samza by apache.
the class TestStaticResourceJobCoordinator method testNoExistingJobModel.
@Test
public void testNoExistingJobModel() throws IOException {
Config jobModelConfig = mock(Config.class);
JobModel jobModel = setupJobModel(jobModelConfig);
StreamPartitionCountMonitor streamPartitionCountMonitor = setupStreamPartitionCountMonitor(jobModelConfig);
StreamRegexMonitor streamRegexMonitor = setupStreamRegexMonitor(jobModel, jobModelConfig);
JobCoordinatorMetadata newMetadata = setupJobCoordinatorMetadata(jobModel, jobModelConfig, ImmutableSet.copyOf(Arrays.asList(JobMetadataChange.values())), false);
setUpDiagnosticsManager(jobModel);
MetadataResourceUtil metadataResourceUtil = metadataResourceUtil(jobModel);
this.staticResourceJobCoordinator.start();
assertEquals(jobModel, this.staticResourceJobCoordinator.getJobModel());
verifyStartLifecycle();
verify(this.staticResourceJobCoordinator).doSetLoggingContextConfig(jobModelConfig);
verify(this.diagnosticsManager).start();
verifyPrepareWorkerExecutionAndMonitor(jobModel, metadataResourceUtil, streamPartitionCountMonitor, streamRegexMonitor, newMetadata, SINGLE_SSP_FANOUT);
verify(this.jobCoordinatorListener).onNewJobModel(PROCESSOR_ID, jobModel);
}
use of org.apache.samza.coordinator.MetadataResourceUtil in project samza by apache.
the class TestStaticResourceJobCoordinator method testStartMissingOptionalComponents.
/**
* Missing {@link StartpointManager}, {@link JobCoordinatorListener}, {@link StreamRegexMonitor}
*/
@Test
public void testStartMissingOptionalComponents() throws IOException {
this.staticResourceJobCoordinator = spy(new StaticResourceJobCoordinator(PROCESSOR_ID, this.jobModelHelper, this.jobModelServingContext, this.coordinatorCommunication, this.jobCoordinatorMetadataManager, this.streamPartitionCountMonitorFactory, this.streamRegexMonitorFactory, Optional.empty(), this.changelogStreamManager, this.jobRestartSignal, this.metrics, this.systemAdmins, Optional.empty(), Optional.empty(), this.config));
Config jobModelConfig = mock(Config.class);
JobModel jobModel = setupJobModel(jobModelConfig);
StreamPartitionCountMonitor streamPartitionCountMonitor = setupStreamPartitionCountMonitor(jobModelConfig);
when(this.streamRegexMonitorFactory.build(any(), any(), any())).thenReturn(Optional.empty());
JobCoordinatorMetadata newMetadata = setupJobCoordinatorMetadata(jobModel, jobModelConfig, ImmutableSet.copyOf(Arrays.asList(JobMetadataChange.values())), false);
doReturn(Optional.empty()).when(this.staticResourceJobCoordinator).buildDiagnosticsManager(JOB_NAME, JOB_ID, jobModel, CoordinationConstants.JOB_COORDINATOR_CONTAINER_NAME, Optional.empty(), Optional.empty(), this.config);
MetadataResourceUtil metadataResourceUtil = metadataResourceUtil(jobModel);
this.staticResourceJobCoordinator.start();
assertEquals(jobModel, this.staticResourceJobCoordinator.getJobModel());
verify(this.systemAdmins).start();
verify(this.staticResourceJobCoordinator).doSetLoggingContextConfig(jobModelConfig);
verify(this.staticResourceJobCoordinator).buildDiagnosticsManager(JOB_NAME, JOB_ID, jobModel, CoordinationConstants.JOB_COORDINATOR_CONTAINER_NAME, Optional.empty(), Optional.empty(), this.config);
verifyPrepareWorkerExecutionAndMonitor(jobModel, metadataResourceUtil, streamPartitionCountMonitor, null, newMetadata, null);
verifyZeroInteractions(this.jobCoordinatorListener, this.startpointManager);
}
Aggregations