use of io.mantisrx.server.master.store.NamedJob in project mantis by Netflix.
the class SLA method initCron.
// caller must lock to avoid concurrent access with destroyCron()
private void initCron(NamedJob job) throws SchedulerException {
if (!hasCronSpec || triggerId != null)
return;
logger.info("Init'ing cron for " + job.getName());
triggerGroup = job.getName() + "-" + this;
try {
scheduledTrigger = new CronTrigger<>(cronSpec, job.getName(), job, NamedJob.class, NamedJob.CronTriggerAction.class);
triggerId = triggerOperator.registerTrigger(triggerGroup, scheduledTrigger);
} catch (IllegalArgumentException e) {
throw new SchedulerException(e.getMessage(), e);
}
}
use of io.mantisrx.server.master.store.NamedJob in project mantis by Netflix.
the class DataFormatAdapterTest method jobClusterMetadataConversionTest.
@Test
public void jobClusterMetadataConversionTest() {
String artifactName = "artifact1";
String version = "0.0.1";
List<Parameter> parameterList = new ArrayList<>();
Parameter parameter = new Parameter("param1", "value1");
parameterList.add(parameter);
List<Label> labels = new ArrayList<>();
Label label = new Label("label1", "labelvalue1");
labels.add(label);
long uAt = 1234l;
JobClusterConfig jobClusterConfig = new JobClusterConfig.Builder().withArtifactName(artifactName).withSchedulingInfo(DEFAULT_SCHED_INFO).withVersion(version).withUploadedAt(uAt).build();
String clusterName = "clusterName1";
JobOwner owner = new JobOwner("Neeraj", "Mantis", "desc", "nma@netflix.com", "repo");
boolean isReadyForMaster = true;
SLA sla = new SLA(1, 10, null, null);
JobClusterDefinitionImpl clusterDefn = new JobClusterDefinitionImpl.Builder().withJobClusterConfig(jobClusterConfig).withName(clusterName).withUser("user1").withIsReadyForJobMaster(isReadyForMaster).withOwner(owner).withMigrationConfig(WorkerMigrationConfig.DEFAULT).withSla(sla).withParameters(parameterList).withLabels(labels).build();
int lastJobCnt = 10;
boolean disabled = false;
IJobClusterMetadata clusterMeta = new JobClusterMetadataImpl.Builder().withJobClusterDefinition(clusterDefn).withLastJobCount(lastJobCnt).withIsDisabled(disabled).build();
NamedJob namedJob = DataFormatAdapter.convertJobClusterMetadataToNamedJob(clusterMeta);
assertEquals(disabled, namedJob.getDisabled());
assertEquals(clusterName, namedJob.getName());
assertEquals(lastJobCnt, namedJob.getLastJobCount());
assertEquals(1, namedJob.getLabels().size());
assertEquals(label, namedJob.getLabels().get(0));
assertEquals(owner, namedJob.getOwner());
assertEquals(isReadyForMaster, namedJob.getIsReadyForJobMaster());
assertEquals(WorkerMigrationConfig.DEFAULT, namedJob.getMigrationConfig());
// assert parameters
assertEquals(parameterList.size(), namedJob.getParameters().size());
assertEquals(parameter, namedJob.getParameters().get(0));
// assert sla
assertEquals(sla.getMin(), namedJob.getSla().getMin());
assertEquals(sla.getMax(), namedJob.getSla().getMax());
// assert jar info
assertEquals(1, namedJob.getJars().size());
// jar info
NamedJob.Jar jar = namedJob.getJars().get(0);
assertEquals(uAt, jar.getUploadedAt());
assertEquals(DEFAULT_SCHED_INFO, jar.getSchedulingInfo());
assertEquals(version, jar.getVersion());
assertEquals(artifactName, DataFormatAdapter.extractArtifactName(jar.getUrl()).orElse(""));
IJobClusterMetadata reconvertedJobCluster = DataFormatAdapter.convertNamedJobToJobClusterMetadata(namedJob);
assertEquals(disabled, reconvertedJobCluster.isDisabled());
assertEquals(clusterName, reconvertedJobCluster.getJobClusterDefinition().getName());
assertEquals(lastJobCnt, reconvertedJobCluster.getLastJobCount());
assertEquals(1, reconvertedJobCluster.getJobClusterDefinition().getLabels().size());
assertEquals(label, reconvertedJobCluster.getJobClusterDefinition().getLabels().get(0));
assertEquals(owner, reconvertedJobCluster.getJobClusterDefinition().getOwner());
assertEquals(isReadyForMaster, reconvertedJobCluster.getJobClusterDefinition().getIsReadyForJobMaster());
assertEquals(WorkerMigrationConfig.DEFAULT, reconvertedJobCluster.getJobClusterDefinition().getWorkerMigrationConfig());
assertEquals(parameterList.size(), reconvertedJobCluster.getJobClusterDefinition().getParameters().size());
assertEquals(parameter, reconvertedJobCluster.getJobClusterDefinition().getParameters().get(0));
assertEquals(sla.getMin(), reconvertedJobCluster.getJobClusterDefinition().getSLA().getMin());
assertEquals(sla.getMax(), reconvertedJobCluster.getJobClusterDefinition().getSLA().getMax());
JobClusterConfig clusterConfig1 = reconvertedJobCluster.getJobClusterDefinition().getJobClusterConfig();
assertEquals(uAt, clusterConfig1.getUploadedAt());
assertEquals(DEFAULT_SCHED_INFO, clusterConfig1.getSchedulingInfo());
assertEquals(version, clusterConfig1.getVersion());
assertEquals(artifactName, clusterConfig1.getArtifactName());
}
use of io.mantisrx.server.master.store.NamedJob in project mantis by Netflix.
the class MantisStorageProviderAdapter method loadAllJobClusters.
@Override
public List<IJobClusterMetadata> loadAllJobClusters() throws IOException {
if (logger.isTraceEnabled()) {
logger.trace("Enter StorageAdapter.loadAllJobClusters");
}
List<IJobClusterMetadata> jobClusters = Lists.newArrayList();
List<NamedJob> namedJobList = sProvider.initNamedJobs();
AtomicInteger failedCount = new AtomicInteger();
AtomicInteger successCount = new AtomicInteger();
jobClusters = namedJobList.stream().map((nJob) -> {
try {
IJobClusterMetadata jobClusterMetadata = DataFormatAdapter.convertNamedJobToJobClusterMetadata(nJob);
successCount.getAndIncrement();
return jobClusterMetadata;
} catch (Exception e) {
logger.error("Exception {} converting {} ", e.getMessage(), nJob);
logger.error("Exception is", e);
failedCount.getAndIncrement();
}
return null;
}).filter((jobClusterMeta) -> jobClusterMeta != null).collect(Collectors.toList());
logger.info("Succesfully read and converted {} job clusters", successCount.get());
logger.info("Failed to read and converted {} job clusters", failedCount.get());
if (logger.isTraceEnabled()) {
logger.trace("Exit StorageAdapter.loadAllJobClusters");
}
return jobClusters;
}
Aggregations