use of org.apache.mesos.Protos.Offer in project storm-mesos by nathanmarz.
the class MesosNimbus method allSlotsAvailableForScheduling.
@Override
public Collection<WorkerSlot> allSlotsAvailableForScheduling(Collection<SupervisorDetails> existingSupervisors, Topologies topologies, Set<String> topologiesMissingAssignments) {
synchronized (OFFERS_LOCK) {
LOG.info("Currently have " + _offers.size() + " offers buffered");
if (!topologiesMissingAssignments.isEmpty()) {
LOG.info("Topologies that need assignments: " + topologiesMissingAssignments.toString());
}
}
Integer cpu = null;
Integer mem = null;
// it will mess up scheduling on this cluster permanently
for (String id : topologiesMissingAssignments) {
TopologyDetails details = topologies.getById(id);
int tcpu = MesosCommon.topologyCpu(_conf, details);
int tmem = MesosCommon.topologyMem(_conf, details);
if (cpu == null || tcpu > cpu) {
cpu = tcpu;
}
if (mem == null || tmem > mem) {
mem = tmem;
}
}
// need access to how many slots are currently used to limit number of slots taken up
List<WorkerSlot> allSlots = new ArrayList();
if (cpu != null && mem != null) {
synchronized (OFFERS_LOCK) {
for (Offer offer : _offers.values()) {
allSlots.addAll(toSlots(offer, cpu, mem));
}
}
}
LOG.info("Number of available slots: " + allSlots.size());
return allSlots;
}
use of org.apache.mesos.Protos.Offer in project jesos by groupon.
the class LocalSchedulerMessageProcessor method frameworkResourceOffers.
@Subscribe
public void frameworkResourceOffers(final ResourceOffersMessageEnvelope envelope) {
checkState(envelope.getRecipient().equals(context.getDriverUPID()), "Received a remote message for local delivery");
final UPID sender = envelope.getSender();
if (!driverIsConnected(sender)) {
return;
}
final ResourceOffersMessage resourceOffersMessage = envelope.getMessage();
final List<Offer> offers = resourceOffersMessage.getOffersList();
final List<UPID> pids = ImmutableList.copyOf(Lists.transform(resourceOffersMessage.getPidsList(), UPID.getCreateFunction()));
checkState(offers.size() == pids.size(), "Received %s offers but only %s pids!", offers.size(), pids.size());
int pidIndex = 0;
for (final Offer offer : offers) {
context.addOffer(offer.getId(), offer.getSlaveId(), pids.get(pidIndex++));
}
eventBus.post(new SchedulerCallback() {
@Override
public Runnable getCallback(final Scheduler scheduler, final SchedulerDriver schedulerDriver) {
return new Runnable() {
@Override
public void run() {
scheduler.resourceOffers(schedulerDriver, resourceOffersMessage.getOffersList());
}
};
}
});
}
use of org.apache.mesos.Protos.Offer in project storm-mesos by nathanmarz.
the class MesosNimbus method assignSlots.
@Override
public void assignSlots(Topologies topologies, Map<String, Collection<WorkerSlot>> slots) {
synchronized (OFFERS_LOCK) {
Map<OfferID, List<TaskInfo>> toLaunch = new HashMap();
for (String topologyId : slots.keySet()) {
for (WorkerSlot slot : slots.get(topologyId)) {
OfferID id = findOffer(slot);
Offer offer = _offers.get(id);
if (id != null) {
if (!toLaunch.containsKey(id)) {
toLaunch.put(id, new ArrayList());
}
TopologyDetails details = topologies.getById(topologyId);
int cpu = MesosCommon.topologyCpu(_conf, details);
int mem = MesosCommon.topologyMem(_conf, details);
Map executorData = new HashMap();
executorData.put(MesosCommon.SUPERVISOR_ID, slot.getNodeId() + "-" + details.getId());
executorData.put(MesosCommon.ASSIGNMENT_ID, slot.getNodeId());
String executorDataStr = JSONValue.toJSONString(executorData);
LOG.info("Launching task with executor data: <" + executorDataStr + ">");
TaskInfo task = TaskInfo.newBuilder().setName("worker " + slot.getNodeId() + ":" + slot.getPort()).setTaskId(TaskID.newBuilder().setValue(MesosCommon.taskId(slot.getNodeId(), slot.getPort()))).setSlaveId(offer.getSlaveId()).setExecutor(ExecutorInfo.newBuilder().setExecutorId(ExecutorID.newBuilder().setValue(details.getId())).setData(ByteString.copyFromUtf8(executorDataStr)).setCommand(CommandInfo.newBuilder().addUris(URI.newBuilder().setValue((String) _conf.get(CONF_EXECUTOR_URI))).setValue("cd storm-mesos* && python bin/storm-mesos supervisor"))).addResources(Resource.newBuilder().setName("cpus").setType(Type.SCALAR).setScalar(Scalar.newBuilder().setValue(cpu))).addResources(Resource.newBuilder().setName("mem").setType(Type.SCALAR).setScalar(Scalar.newBuilder().setValue(mem))).addResources(Resource.newBuilder().setName("ports").setType(Type.RANGES).setRanges(Ranges.newBuilder().addRange(Range.newBuilder().setBegin(slot.getPort()).setEnd(slot.getPort())))).build();
toLaunch.get(id).add(task);
}
}
}
for (OfferID id : toLaunch.keySet()) {
List<TaskInfo> tasks = toLaunch.get(id);
LOG.info("Launching tasks for offer " + id.getValue() + "\n" + tasks.toString());
_driver.launchTasks(id, tasks);
_offers.remove(id);
}
}
}
Aggregations