use of io.mantisrx.runtime.parameter.Parameters in project mantis by Netflix.
the class KafkaSourceTest method testKafkaSourceSingleConsumerReadsAllMessagesInOrderFromSinglePartition.
@Test
public void testKafkaSourceSingleConsumerReadsAllMessagesInOrderFromSinglePartition() throws InterruptedException {
String testTopic = "testTopic" + topicNum.incrementAndGet();
int numPartitions = 1;
kafkaServer.createTopic(testTopic, numPartitions);
int numMessages = 10;
for (int i = 0; i < numMessages; i++) {
ProducerRecord<String, String> keyedMessage = new ProducerRecord<>(testTopic, "{\"messageNum\":" + i + "}");
kafkaServer.sendMessages(keyedMessage);
}
KafkaSource kafkaSource = new KafkaSource(new NoopRegistry());
Context context = mock(Context.class);
Parameters params = ParameterTestUtils.createParameters(KafkaSourceParameters.TOPIC, testTopic, KafkaSourceParameters.PREFIX + ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest", KafkaSourceParameters.PREFIX + ConsumerConfig.GROUP_ID_CONFIG, "testKafkaConsumer-" + random.nextInt());
when(context.getParameters()).then((Answer<Parameters>) invocation -> params);
when(context.getWorkerInfo()).then((Answer<WorkerInfo>) invocation -> new WorkerInfo("testJobName", "testJobName-1", 1, 0, 1, MantisJobDurationType.Perpetual, "1.1.1.1"));
when(context.getJobId()).then((Answer<String>) invocation -> "testJobName-1");
Index index = new Index(0, 10);
Observable<Observable<KafkaAckable>> sourceObs = kafkaSource.call(context, index);
final CountDownLatch latch = new CountDownLatch(numMessages);
final AtomicInteger counter = new AtomicInteger(0);
sourceObs.flatMap(kafkaAckableObs -> kafkaAckableObs).map(kafkaAckable -> {
Optional<Map<String, Object>> parsedEvent = kafkaAckable.getKafkaData().getParsedEvent();
assertTrue(parsedEvent.isPresent());
assertEquals(counter.getAndIncrement(), parsedEvent.get().get("messageNum"));
LOGGER.info("got message on topic {} consumer Id {}", parsedEvent.get(), kafkaAckable.getKafkaData().getMantisKafkaConsumerId());
kafkaAckable.ack();
latch.countDown();
return parsedEvent;
}).subscribe();
assertTrue("timed out waiting to get all messages from Kafka", latch.await(10, TimeUnit.SECONDS));
kafkaServer.deleteTopic(testTopic);
}
use of io.mantisrx.runtime.parameter.Parameters in project mantis by Netflix.
the class MantisKafkaProducerConfig method applyJobParamOverrides.
private static Map<String, Object> applyJobParamOverrides(Context context, Map<String, Object> parsedValues) {
final Parameters parameters = context.getParameters();
Map<String, Object> defaultProps = defaultProps();
for (String key : configNames()) {
Object value = parameters.get(KafkaSinkJobParameters.PREFIX + key, null);
if (value != null) {
LOGGER.info("job param override for key {} -> {}", key, value);
parsedValues.put(key, value);
}
}
final String bootstrapBrokers = (String) parameters.get(KafkaSinkJobParameters.PREFIX + ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, defaultProps.get(BOOTSTRAP_SERVERS_CONFIG));
parsedValues.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapBrokers);
final String clientId = (String) parameters.get(KafkaSinkJobParameters.PREFIX + ProducerConfig.CLIENT_ID_CONFIG, context.getJobId());
parsedValues.put(ProducerConfig.CLIENT_ID_CONFIG, clientId);
return parsedValues;
}
use of io.mantisrx.runtime.parameter.Parameters in project mantis by Netflix.
the class WorkerExecutionOperationsNetworkStage method executeStage.
@SuppressWarnings({ "rawtypes", "unchecked" })
@Override
public void executeStage(final ExecutionDetails setup) {
ExecuteStageRequest executionRequest = setup.getExecuteStageRequest().getRequest();
// Initialize the schedulingInfo observable for current job and mark it shareable to be reused by anyone interested in this data.
// Observable<JobSchedulingInfo> selfSchedulingInfo = mantisMasterApi.schedulingChanges(executionRequest.getJobId()).switchMap((e) -> Observable.just(e).repeatWhen(x -> x.delay(5 , TimeUnit.SECONDS))).subscribeOn(Schedulers.io()).share();
Observable<JobSchedulingInfo> selfSchedulingInfo = mantisMasterApi.schedulingChanges(executionRequest.getJobId()).subscribeOn(Schedulers.io()).share();
WorkerInfo workerInfo = generateWorkerInfo(executionRequest.getJobName(), executionRequest.getJobId(), executionRequest.getStage(), executionRequest.getWorkerIndex(), executionRequest.getWorkerNumber(), executionRequest.getDurationType(), "host", executionRequest.getWorkerPorts());
final Observable<Integer> sourceStageTotalWorkersObs = createSourceStageTotalWorkersObservable(selfSchedulingInfo);
RunningWorker.Builder rwBuilder = new RunningWorker.Builder().job(setup.getMantisJob()).schedulingInfo(executionRequest.getSchedulingInfo()).stageTotalWorkersObservable(sourceStageTotalWorkersObs).jobName(executionRequest.getJobName()).stageNum(executionRequest.getStage()).workerIndex(executionRequest.getWorkerIndex()).workerNum(executionRequest.getWorkerNumber()).totalStages(executionRequest.getTotalNumStages()).metricsPort(executionRequest.getMetricsPort()).ports(executionRequest.getPorts().iterator()).jobStatusObserver(setup.getStatus()).requestSubject(setup.getExecuteStageRequest().getRequestSubject()).workerInfo(workerInfo).vmTaskStatusObservable(vmTaskStatusObserver).hasJobMaster(executionRequest.getHasJobMaster()).jobId(executionRequest.getJobId());
if (executionRequest.getStage() == 0) {
rwBuilder = rwBuilder.stage(new JobMasterStageConfig("jobmasterconfig"));
} else {
rwBuilder = rwBuilder.stage((StageConfig) setup.getMantisJob().getStages().get(executionRequest.getStage() - 1));
}
final RunningWorker rw = rwBuilder.build();
AtomicReference<SubscriptionStateHandler> subscriptionStateHandlerRef = new AtomicReference<>();
if (rw.getStageNum() == rw.getTotalStagesNet()) {
// set up subscription state handler only for sink (last) stage
subscriptionStateHandlerRef.set(setupSubscriptionStateHandler(setup.getExecuteStageRequest().getRequest().getJobId(), mantisMasterApi, setup.getExecuteStageRequest().getRequest().getSubscriptionTimeoutSecs(), setup.getExecuteStageRequest().getRequest().getMinRuntimeSecs()));
}
logger.info("Running worker info: " + rw);
rw.signalStartedInitiated();
try {
logger.info(">>>>>>>>>>>>>>>>Calling lifecycle.startup()");
Lifecycle lifecycle = rw.getJob().getLifecycle();
lifecycle.startup();
ServiceLocator serviceLocator = lifecycle.getServiceLocator();
if (lookupSpectatorRegistry) {
try {
final Registry spectatorRegistry = serviceLocator.service(Registry.class);
SpectatorRegistryFactory.setRegistry(spectatorRegistry);
} catch (Throwable t) {
logger.error("failed to init spectator registry using service locator, falling back to {}", SpectatorRegistryFactory.getRegistry().getClass().getCanonicalName());
}
}
// create job context
Parameters parameters = ParameterUtils.createContextParameters(rw.getJob().getParameterDefinitions(), setup.getParameters());
final Context context = generateContext(parameters, serviceLocator, workerInfo, MetricsRegistry.getInstance(), () -> {
rw.signalCompleted();
// wait for completion signal to go to the master and us getting killed. Upon timeout, exit.
try {
Thread.sleep(60000);
} catch (InterruptedException ie) {
logger.warn("Unexpected exception sleeping: " + ie.getMessage());
}
System.exit(0);
}, createWorkerMapObservable(selfSchedulingInfo, executionRequest.getJobName(), executionRequest.getJobId(), executionRequest.getDurationType()));
// context.setPrevStageCompletedObservable(createPrevStageCompletedObservable(selfSchedulingInfo, rw.getJobId(), rw.getStageNum()));
rw.setContext(context);
// setup heartbeats
heartbeatRef.set(new Heartbeat(rw.getJobId(), rw.getStageNum(), rw.getWorkerIndex(), rw.getWorkerNum()));
final double networkMbps = executionRequest.getSchedulingInfo().forStage(rw.getStageNum()).getMachineDefinition().getNetworkMbps();
startSendingHeartbeats(rw.getJobStatus(), new WorkerId(executionRequest.getJobId(), executionRequest.getWorkerIndex(), executionRequest.getWorkerNumber()).getId(), networkMbps);
// execute stage
if (rw.getStageNum() == 0) {
logger.info("JobId: " + rw.getJobId() + ", executing Job Master");
final AutoScaleMetricsConfig autoScaleMetricsConfig = new AutoScaleMetricsConfig();
// Temporary workaround to enable auto-scaling by custom metric in Job Master. This will be revisited to get the entire autoscaling config
// for a job as a System parameter in the JobMaster
final String autoScaleMetricString = (String) parameters.get(JOB_MASTER_AUTOSCALE_METRIC_SYSTEM_PARAM, "");
if (!Strings.isNullOrEmpty(autoScaleMetricString)) {
final List<String> tokens = Splitter.on("::").omitEmptyStrings().trimResults().splitToList(autoScaleMetricString);
if (tokens.size() == 3) {
final String metricGroup = tokens.get(0);
final String metricName = tokens.get(1);
final String algo = tokens.get(2);
try {
final AutoScaleMetricsConfig.AggregationAlgo aggregationAlgo = AutoScaleMetricsConfig.AggregationAlgo.valueOf(algo);
logger.info("registered UserDefined auto scale metric {}:{} algo {}", metricGroup, metricName, aggregationAlgo);
autoScaleMetricsConfig.addUserDefinedMetric(metricGroup, metricName, aggregationAlgo);
} catch (IllegalArgumentException e) {
final String errorMsg = String.format("ERROR: Invalid algorithm value %s for param %s (algo should be one of %s)", autoScaleMetricsConfig, JOB_MASTER_AUTOSCALE_METRIC_SYSTEM_PARAM, Arrays.stream(AutoScaleMetricsConfig.AggregationAlgo.values()).map(a -> a.name()).collect(Collectors.toList()));
logger.error(errorMsg);
throw new RuntimeException(errorMsg);
}
} else {
final String errorMsg = String.format("ERROR: Invalid value %s for param %s", autoScaleMetricString, JOB_MASTER_AUTOSCALE_METRIC_SYSTEM_PARAM);
logger.error(errorMsg);
throw new RuntimeException(errorMsg);
}
} else {
logger.info("param {} is null or empty", JOB_MASTER_AUTOSCALE_METRIC_SYSTEM_PARAM);
}
final JobMasterService jobMasterService = new JobMasterService(rw.getJobId(), rw.getSchedulingInfo(), workerMetricsClient, autoScaleMetricsConfig, mantisMasterApi, rw.getContext(), rw.getOnCompleteCallback(), rw.getOnErrorCallback(), rw.getOnTerminateCallback());
jobMasterService.start();
signalStarted(rw, subscriptionStateHandlerRef);
// block until worker terminates
rw.waitUntilTerminate();
} else if (rw.getStageNum() == 1 && rw.getTotalStagesNet() == 1) {
logger.info("JobId: " + rw.getJobId() + ", single stage job, executing entire job");
// single stage, execute entire job on this machine
PortSelector portSelector = new PortSelector() {
@Override
public int acquirePort() {
return rw.getPorts().next();
}
};
RxMetrics rxMetrics = new RxMetrics();
StageExecutors.executeSingleStageJob(rw.getJob().getSource(), rw.getStage(), rw.getJob().getSink(), portSelector, rxMetrics, rw.getContext(), rw.getOnTerminateCallback(), rw.getWorkerIndex(), rw.getSourceStageTotalWorkersObservable(), onSinkSubscribe, onSinkUnsubscribe, rw.getOnCompleteCallback(), rw.getOnErrorCallback());
signalStarted(rw, subscriptionStateHandlerRef);
// block until worker terminates
rw.waitUntilTerminate();
} else {
logger.info("JobId: " + rw.getJobId() + ", executing a multi-stage job, stage: " + rw.getStageNum());
if (rw.getStageNum() == 1) {
// execute source stage
String remoteObservableName = rw.getJobId() + "_" + rw.getStageNum();
StageSchedulingInfo currentStageSchedulingInfo = rw.getSchedulingInfo().forStage(1);
WorkerPublisherRemoteObservable publisher = new WorkerPublisherRemoteObservable<>(rw.getPorts().next(), remoteObservableName, numWorkersAtStage(selfSchedulingInfo, rw.getJobId(), rw.getStageNum() + 1), rw.getJobName());
StageExecutors.executeSource(rw.getWorkerIndex(), rw.getJob().getSource(), rw.getStage(), publisher, rw.getContext(), rw.getSourceStageTotalWorkersObservable());
logger.info("JobId: " + rw.getJobId() + " stage: " + rw.getStageNum() + ", serving remote observable for source with name: " + remoteObservableName);
RemoteRxServer server = publisher.getServer();
RxMetrics rxMetrics = server.getMetrics();
MetricsRegistry.getInstance().registerAndGet(rxMetrics.getCountersAndGauges());
signalStarted(rw, subscriptionStateHandlerRef);
logger.info("JobId: " + rw.getJobId() + " stage: " + rw.getStageNum() + ", blocking until source observable completes");
server.blockUntilServerShutdown();
} else {
// execute intermediate stage or last stage plus sink
executeNonSourceStage(selfSchedulingInfo, rw, subscriptionStateHandlerRef);
}
}
logger.info("Calling lifecycle.shutdown()");
lifecycle.shutdown();
} catch (Throwable t) {
rw.signalFailed(t);
shutdownStage();
}
}
use of io.mantisrx.runtime.parameter.Parameters in project mantis by Netflix.
the class KafkaSourceTest method testKafkaSourceMultipleConsumersReadsAllMessagesFromMultiplePartitions.
@Test
public void testKafkaSourceMultipleConsumersReadsAllMessagesFromMultiplePartitions() throws InterruptedException {
String testTopic = "testTopic" + topicNum.incrementAndGet();
int numPartitions = 2;
kafkaServer.createTopic(testTopic, numPartitions);
int numMessages = 10;
Set<Integer> outstandingMsgs = new ConcurrentSkipListSet<>();
for (int i = 0; i < numMessages; i++) {
ProducerRecord<String, String> keyedMessage = new ProducerRecord<>(testTopic, "{\"messageNum\":" + i + "}");
kafkaServer.sendMessages(keyedMessage);
outstandingMsgs.add(i);
}
KafkaSource kafkaSource = new KafkaSource(new NoopRegistry());
Context context = mock(Context.class);
Parameters params = ParameterTestUtils.createParameters(KafkaSourceParameters.NUM_KAFKA_CONSUMER_PER_WORKER, 2, KafkaSourceParameters.TOPIC, testTopic, KafkaSourceParameters.PREFIX + ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest", KafkaSourceParameters.PREFIX + ConsumerConfig.GROUP_ID_CONFIG, "testKafkaConsumer-" + random.nextInt());
when(context.getParameters()).then((Answer<Parameters>) invocation -> params);
when(context.getWorkerInfo()).then((Answer<WorkerInfo>) invocation -> new WorkerInfo("testJobName", "testJobName-1", 1, 0, 1, MantisJobDurationType.Perpetual, "1.1.1.1"));
when(context.getJobId()).then((Answer<String>) invocation -> "testJobName-1");
Index index = new Index(0, 10);
Observable<Observable<KafkaAckable>> sourceObs = kafkaSource.call(context, index);
final CountDownLatch latch = new CountDownLatch(numMessages);
final Map<Integer, Integer> lastMessageNumByConsumerId = new ConcurrentHashMap<>();
sourceObs.flatMap(kafkaAckableObs -> kafkaAckableObs).map(kafkaAckable -> {
Optional<Map<String, Object>> parsedEvent = kafkaAckable.getKafkaData().getParsedEvent();
assertTrue(parsedEvent.isPresent());
Integer messageNum = (Integer) parsedEvent.get().get("messageNum");
assertTrue(outstandingMsgs.contains(messageNum));
outstandingMsgs.remove(messageNum);
int mantisKafkaConsumerId = kafkaAckable.getKafkaData().getMantisKafkaConsumerId();
lastMessageNumByConsumerId.putIfAbsent(mantisKafkaConsumerId, -1);
// assert consumption of higher message numbers across consumer instances
assertTrue(messageNum > lastMessageNumByConsumerId.get(mantisKafkaConsumerId));
lastMessageNumByConsumerId.put(mantisKafkaConsumerId, messageNum);
LOGGER.info("got message on topic {} consumer id {}", parsedEvent.get(), mantisKafkaConsumerId);
kafkaAckable.ack();
latch.countDown();
return parsedEvent;
}).doOnError(t -> {
LOGGER.error("caught unexpected exception", t);
fail("test failed due to unexpected error " + t.getMessage());
}).subscribe();
assertTrue("timed out waiting to get all messages from Kafka", latch.await(10, TimeUnit.SECONDS));
assertEquals(0, outstandingMsgs.size());
assertTrue(lastMessageNumByConsumerId.keySet().size() == 2);
lastMessageNumByConsumerId.keySet().forEach(consumerId -> {
assertTrue(lastMessageNumByConsumerId.get(consumerId) >= 0);
});
kafkaServer.deleteTopic(testTopic);
}
use of io.mantisrx.runtime.parameter.Parameters in project mantis by Netflix.
the class MantisKafkaSourceConfigTest method testDefaultConsumerConfig.
@Test
public void testDefaultConsumerConfig() {
Context context = mock(Context.class);
Parameters params = ParameterTestUtils.createParameters(KafkaSourceParameters.TOPIC, "testTopic");
when(context.getParameters()).then((Answer<Parameters>) invocation -> params);
MantisKafkaSourceConfig mantisKafkaSourceConfig = new MantisKafkaSourceConfig(context);
assertEquals(MantisKafkaSourceConfig.DEFAULT_CONSUMER_POLL_TIMEOUT_MS, mantisKafkaSourceConfig.getConsumerPollTimeoutMs());
assertEquals(CheckpointStrategyOptions.NONE, mantisKafkaSourceConfig.getCheckpointStrategy());
assertEquals(MantisKafkaConsumerConfig.DEFAULT_CHECKPOINT_INTERVAL_MS, mantisKafkaSourceConfig.getCheckpointIntervalMs());
assertEquals(MantisKafkaSourceConfig.DEFAULT_MAX_BYTES_IN_PROCESSING, mantisKafkaSourceConfig.getMaxBytesInProcessing());
assertEquals(ParserType.SIMPLE_JSON.getPropName(), mantisKafkaSourceConfig.getMessageParserType());
assertEquals(MantisKafkaSourceConfig.DEFAULT_NUM_KAFKA_CONSUMER_PER_WORKER, mantisKafkaSourceConfig.getNumConsumerInstances());
assertEquals(MantisKafkaSourceConfig.DEFAULT_PARSE_MSG_IN_SOURCE, mantisKafkaSourceConfig.getParseMessageInSource());
assertEquals(MantisKafkaSourceConfig.DEFAULT_RETRY_CHECKPOINT_CHECK_DELAY_MS, mantisKafkaSourceConfig.getRetryCheckpointCheckDelayMs());
assertEquals(MantisKafkaSourceConfig.DEFAULT_ENABLE_STATIC_PARTITION_ASSIGN, mantisKafkaSourceConfig.getStaticPartitionAssignmentEnabled());
assertEquals(Optional.empty(), mantisKafkaSourceConfig.getTopicPartitionCounts());
assertEquals(Arrays.asList("testTopic"), mantisKafkaSourceConfig.getTopics());
}
Aggregations