Search in sources :

Example 1 with NetworkResource

use of com.hashicorp.nomad.apimodel.NetworkResource in project heron by twitter.

the class NomadScheduler method getTask.

Task getTask(String taskName, int containerIndex, Resource containerResource) {
    String nomadDriver = NomadContext.getHeronNomadDriver(this.localConfig);
    Task task = new Task();
    if (nomadDriver.equals(NomadConstants.NomadDriver.RAW_EXEC.getName())) {
        getTaskSpecRawDriver(task, taskName, containerIndex);
    } else if (nomadDriver.equals(NomadConstants.NomadDriver.DOCKER.getName())) {
        getTaskSpecDockerDriver(task, taskName, containerIndex);
    } else {
        throw new IllegalArgumentException("Invalid Nomad driver specified: " + nomadDriver);
    }
    // set resources requests
    Resources resourceReqs = new Resources();
    // configure nomad to allocate dynamic ports
    Port[] ports = new Port[NomadConstants.EXECUTOR_PORTS.size()];
    int i = 0;
    for (SchedulerUtils.ExecutorPort port : NomadConstants.EXECUTOR_PORTS.keySet()) {
        ports[i] = new Port().setLabel(port.getName().replace("-", "_"));
        i++;
    }
    NetworkResource networkResource = new NetworkResource();
    networkResource.addDynamicPorts(ports);
    // set memory requirements
    long memoryReqMb = containerResource.getRam().asMegabytes();
    resourceReqs.setMemoryMb(longToInt(memoryReqMb));
    // set CPU requirements
    double coresReq = containerResource.getCpu();
    double coresReqFreq = NomadContext.getCoreFreqMapping(this.localConfig) * coresReq;
    resourceReqs.setCpu(Integer.valueOf((int) Math.round(coresReqFreq)));
    // set disk requirements
    long diskReqMb = containerResource.getDisk().asMegabytes();
    resourceReqs.setDiskMb(longToInt(diskReqMb));
    // allocate dynamic port for prometheus/websink metrics
    String prometheusPortFile = getPrometheusMetricsFile(this.localConfig);
    if (prometheusPortFile == null) {
        LOG.severe("Failed to find port file for Prometheus metrics. " + "Please check metrics sinks configurations");
    } else {
        networkResource.addDynamicPorts(new Port().setLabel(METRICS_PORT));
        task.addEnv(NomadConstants.METRICS_PORT_FILE, prometheusPortFile);
        if (NomadContext.getHeronNomadMetricsServiceRegister(this.localConfig)) {
            // getting tags for service
            List<String> tags = new LinkedList<>();
            tags.add(String.format("%s-%s", Runtime.topologyName(this.runtimeConfig), containerIndex));
            tags.addAll(Arrays.asList(NomadContext.getHeronNomadMetricsServiceAdditionalTags(this.localConfig)));
            // register metrics service with consul
            Service service = new Service().setName(getMetricsServiceName(Runtime.topologyName(this.runtimeConfig), containerIndex)).setPortLabel(METRICS_PORT).setTags(tags).addChecks(new ServiceCheck().setType(NomadConstants.NOMAD_SERVICE_CHECK_TYPE).setPortLabel(METRICS_PORT).setInterval(TimeUnit.NANOSECONDS.convert(NomadContext.getHeronNomadMetricsServiceCheckIntervalSec(this.localConfig), TimeUnit.SECONDS)).setTimeout(TimeUnit.NANOSECONDS.convert(NomadContext.getHeronNomadMetricsServiceCheckTimeoutSec(this.localConfig), TimeUnit.SECONDS)));
            task.addServices(service);
        }
    }
    resourceReqs.addNetworks(networkResource);
    task.setResources(resourceReqs);
    return task;
}
Also used : Task(com.hashicorp.nomad.apimodel.Task) ServiceCheck(com.hashicorp.nomad.apimodel.ServiceCheck) Port(com.hashicorp.nomad.apimodel.Port) SchedulerUtils(org.apache.heron.scheduler.utils.SchedulerUtils) Service(com.hashicorp.nomad.apimodel.Service) LinkedList(java.util.LinkedList) NetworkResource(com.hashicorp.nomad.apimodel.NetworkResource) Resources(com.hashicorp.nomad.apimodel.Resources)

Example 2 with NetworkResource

use of com.hashicorp.nomad.apimodel.NetworkResource in project incubator-heron by apache.

the class NomadScheduler method getTask.

Task getTask(String taskName, int containerIndex, Resource containerResource) {
    String nomadDriver = NomadContext.getHeronNomadDriver(this.localConfig);
    Task task = new Task();
    if (nomadDriver.equals(NomadConstants.NomadDriver.RAW_EXEC.getName())) {
        getTaskSpecRawDriver(task, taskName, containerIndex);
    } else if (nomadDriver.equals(NomadConstants.NomadDriver.DOCKER.getName())) {
        getTaskSpecDockerDriver(task, taskName, containerIndex);
    } else {
        throw new IllegalArgumentException("Invalid Nomad driver specified: " + nomadDriver);
    }
    // set resources requests
    Resources resourceReqs = new Resources();
    // configure nomad to allocate dynamic ports
    Port[] ports = new Port[NomadConstants.EXECUTOR_PORTS.size()];
    int i = 0;
    for (SchedulerUtils.ExecutorPort port : NomadConstants.EXECUTOR_PORTS.keySet()) {
        ports[i] = new Port().setLabel(port.getName().replace("-", "_"));
        i++;
    }
    resourceReqs.addNetworks(new NetworkResource().addDynamicPorts(ports));
    // set memory requirements
    long memoryReqMb = containerResource.getRam().asMegabytes();
    resourceReqs.setMemoryMb(longToInt(memoryReqMb));
    // set cpu requirements
    double coresReq = containerResource.getCpu();
    double coresReqFreq = NomadContext.getCoreFreqMapping(this.localConfig) * coresReq;
    resourceReqs.setCpu(Integer.valueOf((int) Math.round(coresReqFreq)));
    // set disk requirements
    long diskReqMb = containerResource.getDisk().asMegabytes();
    resourceReqs.setDiskMb(longToInt(diskReqMb));
    task.setResources(resourceReqs);
    return task;
}
Also used : NetworkResource(com.hashicorp.nomad.apimodel.NetworkResource) Task(com.hashicorp.nomad.apimodel.Task) Port(com.hashicorp.nomad.apimodel.Port) SchedulerUtils(com.twitter.heron.scheduler.utils.SchedulerUtils) Resources(com.hashicorp.nomad.apimodel.Resources)

Example 3 with NetworkResource

use of com.hashicorp.nomad.apimodel.NetworkResource in project twister2 by DSC-SPIDAL.

the class NomadController method getShellDriver.

private Task getShellDriver(JobAPI.Job job) {
    String taskName = job.getJobId();
    Task task = new Task();
    // get the job working directory
    String workingDirectory = NomadContext.workingDirectory(config);
    String jobWorkingDirectory = Paths.get(workingDirectory, job.getJobId()).toString();
    String configDirectoryName = Paths.get(workingDirectory, job.getJobId(), SchedulerContext.clusterType(config)).toString();
    String corePackageFile = SchedulerContext.temporaryPackagesPath(config) + "/" + SchedulerContext.corePackageFileName(config);
    String jobPackageFile = SchedulerContext.temporaryPackagesPath(config) + "/" + SchedulerContext.jobPackageFileName(config);
    String nomadScriptContent = getNomadScriptContent(config, configDirectoryName);
    task.setName(taskName);
    task.setDriver("raw_exec");
    task.addConfig(NomadContext.NOMAD_TASK_COMMAND, NomadContext.SHELL_CMD);
    String[] args = workerProcessCommand(workingDirectory, job);
    task.addConfig(NomadContext.NOMAD_TASK_COMMAND_ARGS, args);
    Template template = new Template();
    template.setEmbeddedTmpl(nomadScriptContent);
    template.setDestPath(NomadContext.NOMAD_SCRIPT_NAME);
    task.addTemplates(template);
    Resources resourceReqs = new Resources();
    String portNamesConfig = NomadContext.networkPortNames(config);
    String[] portNames = portNamesConfig.split(",");
    // configure nomad to allocate dynamic ports
    Port[] ports = new Port[portNames.length];
    int i = 0;
    for (String p : portNames) {
        ports[i] = new Port().setLabel(p);
        i++;
    }
    NetworkResource networkResource = new NetworkResource();
    networkResource.addDynamicPorts(ports);
    resourceReqs.addNetworks(networkResource);
    JobAPI.ComputeResource computeResource = JobUtils.getComputeResource(job, 0);
    if (computeResource == null) {
        LOG.log(Level.SEVERE, "Error: there is no compute resource");
        return null;
    }
    int cpu = (int) computeResource.getCpu();
    int disk = (int) computeResource.getDiskGigaBytes();
    int memory = computeResource.getRamMegaBytes();
    resourceReqs.setCpu(cpu * 200);
    resourceReqs.setMemoryMb(memory);
    resourceReqs.setDiskMb(disk * 1024);
    LOG.log(Level.INFO, "Compute resources are " + cpu + " " + memory + " " + disk);
    Map<String, String> envVars = new HashMap<>();
    envVars.put(NomadContext.WORKING_DIRECTORY_ENV, NomadContext.workingDirectory(config));
    if (!NomadContext.sharedFileSystem(config)) {
        envVars.put(NomadContext.DOWNLOAD_PACKAGE_ENV, "false");
    } else {
        envVars.put(NomadContext.DOWNLOAD_PACKAGE_ENV, "true");
    }
    // we are putting the core packages as env variable
    envVars.put(NomadContext.CORE_PACKAGE_ENV, corePackageFile);
    envVars.put(NomadContext.JOB_PACKAGE_ENV, jobPackageFile);
    task.setEnv(envVars);
    task.setResources(resourceReqs);
    return task;
}
Also used : Task(com.hashicorp.nomad.apimodel.Task) HashMap(java.util.HashMap) Port(com.hashicorp.nomad.apimodel.Port) JobAPI(edu.iu.dsc.tws.proto.system.job.JobAPI) Template(com.hashicorp.nomad.apimodel.Template) NetworkResource(com.hashicorp.nomad.apimodel.NetworkResource) Resources(com.hashicorp.nomad.apimodel.Resources)

Aggregations

NetworkResource (com.hashicorp.nomad.apimodel.NetworkResource)3 Port (com.hashicorp.nomad.apimodel.Port)3 Resources (com.hashicorp.nomad.apimodel.Resources)3 Task (com.hashicorp.nomad.apimodel.Task)3 Service (com.hashicorp.nomad.apimodel.Service)1 ServiceCheck (com.hashicorp.nomad.apimodel.ServiceCheck)1 Template (com.hashicorp.nomad.apimodel.Template)1 SchedulerUtils (com.twitter.heron.scheduler.utils.SchedulerUtils)1 JobAPI (edu.iu.dsc.tws.proto.system.job.JobAPI)1 HashMap (java.util.HashMap)1 LinkedList (java.util.LinkedList)1 SchedulerUtils (org.apache.heron.scheduler.utils.SchedulerUtils)1