use of org.apache.samza.coordinator.metadatastore.CoordinatorStreamStore in project samza by apache.
the class TestContainerAllocatorWithoutHostAffinity method setup.
@Before
public void setup() throws Exception {
LocalityManager mockLocalityManager = mock(LocalityManager.class);
when(mockLocalityManager.readLocality()).thenReturn(new LocalityModel(new HashMap<>()));
CoordinatorStreamStoreTestUtil coordinatorStreamStoreTestUtil = new CoordinatorStreamStoreTestUtil(config);
CoordinatorStreamStore coordinatorStreamStore = coordinatorStreamStoreTestUtil.getCoordinatorStreamStore();
coordinatorStreamStore.init();
containerPlacementMetadataStore = new ContainerPlacementMetadataStore(coordinatorStreamStore);
containerPlacementMetadataStore.start();
containerAllocator = new ContainerAllocator(manager, config, state, false, new ContainerManager(containerPlacementMetadataStore, state, manager, false, false, mockLocalityManager, faultDomainManager, config));
requestState = new MockContainerRequestState(manager, false);
Field requestStateField = containerAllocator.getClass().getDeclaredField("resourceRequestState");
requestStateField.setAccessible(true);
requestStateField.set(containerAllocator, requestState);
allocatorThread = new Thread(containerAllocator);
}
use of org.apache.samza.coordinator.metadatastore.CoordinatorStreamStore in project samza by apache.
the class TestContainerProcessManager method setup.
@Before
public void setup() throws Exception {
server = new MockHttpServer("/", 7777, null, new ServletHolder(DefaultServlet.class));
CoordinatorStreamStoreTestUtil coordinatorStreamStoreTestUtil = new CoordinatorStreamStoreTestUtil(config);
CoordinatorStreamStore coordinatorStreamStore = coordinatorStreamStoreTestUtil.getCoordinatorStreamStore();
coordinatorStreamStore.init();
containerPlacementMetadataStore = new ContainerPlacementMetadataStore(coordinatorStreamStore);
containerPlacementMetadataStore.start();
}
use of org.apache.samza.coordinator.metadatastore.CoordinatorStreamStore in project samza by apache.
the class JobCoordinatorLaunchUtil method run.
/**
* Run {@link ClusterBasedJobCoordinator} with full job config.
*
* @param app SamzaApplication to run.
* @param config full job config.
*/
@SuppressWarnings("rawtypes")
public static void run(SamzaApplication app, Config config) {
// Execute planning
ApplicationDescriptorImpl<? extends ApplicationDescriptor> appDesc = ApplicationDescriptorUtil.getAppDescriptor(app, config);
RemoteJobPlanner planner = new RemoteJobPlanner(appDesc);
List<JobConfig> jobConfigs = planner.prepareJobs();
if (jobConfigs.size() != 1) {
throw new SamzaException("Only support single remote job is supported.");
}
Config fullConfig = jobConfigs.get(0);
// Create coordinator stream if does not exist before fetching launch config from it.
CoordinatorStreamUtil.createCoordinatorStream(fullConfig);
MetricsRegistryMap metrics = new MetricsRegistryMap();
MetadataStore metadataStore = new CoordinatorStreamStore(CoordinatorStreamUtil.buildCoordinatorStreamConfig(fullConfig), metrics);
// MetadataStore will be closed in ClusterBasedJobCoordinator#onShutDown
// initialization of MetadataStore can be moved to ClusterBasedJobCoordinator after we clean up
// ClusterBasedJobCoordinator#createFromMetadataStore
metadataStore.init();
// Reads extra launch config from metadata store.
Config launchConfig = CoordinatorStreamUtil.readLaunchConfigFromCoordinatorStream(fullConfig, metadataStore);
Config finalConfig = new MapConfig(launchConfig, fullConfig);
// This needs to be consistent with RemoteApplicationRunner#run where JobRunner#submit to be called instead of JobRunner#run
CoordinatorStreamUtil.writeConfigToCoordinatorStream(finalConfig, true);
DiagnosticsUtil.createDiagnosticsStream(finalConfig);
Optional<String> jobCoordinatorFactoryClassName = new JobCoordinatorConfig(config).getOptionalJobCoordinatorFactoryClassName();
if (jobCoordinatorFactoryClassName.isPresent()) {
runJobCoordinator(jobCoordinatorFactoryClassName.get(), metrics, metadataStore, finalConfig);
} else {
ClusterBasedJobCoordinator jc = new ClusterBasedJobCoordinator(metrics, metadataStore, finalConfig);
jc.run();
}
}
use of org.apache.samza.coordinator.metadatastore.CoordinatorStreamStore in project samza by apache.
the class SamzaTaskProxy method readTasksFromCoordinatorStream.
/**
* Builds list of {@link Task} from job model in coordinator stream.
* @param consumer system consumer associated with a job's coordinator stream.
* @return list of {@link Task} constructed from job model in coordinator stream.
*/
protected List<Task> readTasksFromCoordinatorStream(CoordinatorStreamSystemConsumer consumer) {
CoordinatorStreamStore coordinatorStreamStore = new CoordinatorStreamStore(consumer.getConfig(), new MetricsRegistryMap());
LocalityManager localityManager = new LocalityManager(coordinatorStreamStore);
Map<String, ProcessorLocality> containerLocalities = localityManager.readLocality().getProcessorLocalities();
TaskAssignmentManager taskAssignmentManager = new TaskAssignmentManager(new NamespaceAwareCoordinatorStreamStore(coordinatorStreamStore, SetTaskContainerMapping.TYPE), new NamespaceAwareCoordinatorStreamStore(coordinatorStreamStore, SetTaskModeMapping.TYPE));
Map<String, String> taskNameToContainerIdMapping = taskAssignmentManager.readTaskAssignment();
StorageConfig storageConfig = new StorageConfig(consumer.getConfig());
List<String> storeNames = storageConfig.getStoreNames();
return taskNameToContainerIdMapping.entrySet().stream().map(entry -> {
String hostName = Optional.ofNullable(containerLocalities.get(entry.getValue())).map(ProcessorLocality::host).orElse(null);
return new Task(hostName, entry.getKey(), entry.getValue(), new ArrayList<>(), storeNames);
}).collect(Collectors.toList());
}
Aggregations