use of org.apache.samza.metadatastore.MetadataStore in project samza by apache.
the class TestZkLocalApplicationRunner method getConfigFromCoordinatorStream.
private MapConfig getConfigFromCoordinatorStream(Config config) {
MetadataStoreFactory metadataStoreFactory = ReflectionUtil.getObj(new JobConfig(config).getMetadataStoreFactory(), MetadataStoreFactory.class);
MetadataStore metadataStore = metadataStoreFactory.getMetadataStore("set-config", config, new MetricsRegistryMap());
metadataStore.init();
Map<String, String> configMap = new HashMap<>();
CoordinatorStreamValueSerde jsonSerde = new CoordinatorStreamValueSerde("set-config");
metadataStore.all().forEach((key, value) -> {
CoordinatorStreamStore.CoordinatorMessageKey coordinatorMessageKey = CoordinatorStreamStore.deserializeCoordinatorMessageKeyFromJson(key);
String deserializedValue = jsonSerde.fromBytes(value);
configMap.put(coordinatorMessageKey.getKey(), deserializedValue);
});
return new MapConfig(configMap);
}
use of org.apache.samza.metadatastore.MetadataStore in project samza by apache.
the class JobCoordinatorLaunchUtil method run.
/**
* Run {@link ClusterBasedJobCoordinator} with full job config.
*
* @param app SamzaApplication to run.
* @param config full job config.
*/
@SuppressWarnings("rawtypes")
public static void run(SamzaApplication app, Config config) {
// Execute planning
ApplicationDescriptorImpl<? extends ApplicationDescriptor> appDesc = ApplicationDescriptorUtil.getAppDescriptor(app, config);
RemoteJobPlanner planner = new RemoteJobPlanner(appDesc);
List<JobConfig> jobConfigs = planner.prepareJobs();
if (jobConfigs.size() != 1) {
throw new SamzaException("Only support single remote job is supported.");
}
Config fullConfig = jobConfigs.get(0);
// Create coordinator stream if does not exist before fetching launch config from it.
CoordinatorStreamUtil.createCoordinatorStream(fullConfig);
MetricsRegistryMap metrics = new MetricsRegistryMap();
MetadataStore metadataStore = new CoordinatorStreamStore(CoordinatorStreamUtil.buildCoordinatorStreamConfig(fullConfig), metrics);
// MetadataStore will be closed in ClusterBasedJobCoordinator#onShutDown
// initialization of MetadataStore can be moved to ClusterBasedJobCoordinator after we clean up
// ClusterBasedJobCoordinator#createFromMetadataStore
metadataStore.init();
// Reads extra launch config from metadata store.
Config launchConfig = CoordinatorStreamUtil.readLaunchConfigFromCoordinatorStream(fullConfig, metadataStore);
Config finalConfig = new MapConfig(launchConfig, fullConfig);
// This needs to be consistent with RemoteApplicationRunner#run where JobRunner#submit to be called instead of JobRunner#run
CoordinatorStreamUtil.writeConfigToCoordinatorStream(finalConfig, true);
DiagnosticsUtil.createDiagnosticsStream(finalConfig);
Optional<String> jobCoordinatorFactoryClassName = new JobCoordinatorConfig(config).getOptionalJobCoordinatorFactoryClassName();
if (jobCoordinatorFactoryClassName.isPresent()) {
runJobCoordinator(jobCoordinatorFactoryClassName.get(), metrics, metadataStore, finalConfig);
} else {
ClusterBasedJobCoordinator jc = new ClusterBasedJobCoordinator(metrics, metadataStore, finalConfig);
jc.run();
}
}
Aggregations