use of org.apache.myriad.scheduler.constraints.Constraint in project incubator-myriad by apache.
the class ByteBufferSupport method toByteBuffer.
public static ByteBuffer toByteBuffer(NodeTask nt) {
// Determine the size of ByteBuffer to allocate
// The ServiceResourceProfile toString() returns Json, if this ever changes then this
// will fail. Json is expected.
byte[] profile = toBytes(nt.getProfile().toString());
int size = profile.length + INT_SIZE;
Constraint constraint = nt.getConstraint();
Constraint.Type type = constraint == null ? Type.NULL : constraint.getType();
size += INT_SIZE;
byte[] constraintBytes = ZERO_BYTES;
if (constraint != null) {
constraintBytes = toBytes(constraint.toString());
size += constraintBytes.length + INT_SIZE;
} else {
size += INT_SIZE;
}
byte[] hostname = toBytes(nt.getHostname());
size += hostname.length + INT_SIZE;
if (nt.getSlaveId() != null) {
size += nt.getSlaveId().getSerializedSize() + INT_SIZE;
} else {
size += INT_SIZE;
}
if (nt.getTaskStatus() != null) {
size += nt.getTaskStatus().getSerializedSize() + INT_SIZE;
} else {
size += INT_SIZE;
}
if (nt.getExecutorInfo() != null) {
size += nt.getExecutorInfo().getSerializedSize() + INT_SIZE;
} else {
size += INT_SIZE;
}
byte[] taskPrefixBytes = ZERO_BYTES;
if (nt.getTaskPrefix() != null) {
taskPrefixBytes = toBytes(nt.getTaskPrefix());
size += taskPrefixBytes.length + INT_SIZE;
}
// Allocate and populate the buffer.
ByteBuffer bb = createBuffer(size);
putBytes(bb, profile);
bb.putInt(type.ordinal());
putBytes(bb, constraintBytes);
putBytes(bb, hostname);
putBytes(bb, getSlaveBytes(nt));
putBytes(bb, getTaskBytes(nt));
putBytes(bb, getExecutorInfoBytes(nt));
putBytes(bb, taskPrefixBytes);
// Make sure the buffer is at the beginning
bb.rewind();
return bb;
}
use of org.apache.myriad.scheduler.constraints.Constraint in project incubator-myriad by apache.
the class ByteBufferSupport method getConstraint.
public static Constraint getConstraint(ByteBuffer bb) {
Constraint.Type type = Constraint.Type.values()[bb.getInt()];
String p = toString(bb);
switch(type) {
case NULL:
return null;
case LIKE:
if (StringUtils.isNotEmpty(p)) {
return gson.fromJson(p, LikeConstraint.class);
}
}
return null;
}
use of org.apache.myriad.scheduler.constraints.Constraint in project incubator-myriad by apache.
the class MyriadOperations method flexUpCluster.
public void flexUpCluster(ServiceResourceProfile serviceResourceProfile, int instances, Constraint constraint) {
Collection<NodeTask> nodes = new HashSet<>();
for (int i = 0; i < instances; i++) {
NodeTask nodeTask = new NodeTask(serviceResourceProfile, constraint);
nodeTask.setTaskPrefix(NodeManagerConfiguration.DEFAULT_NM_TASK_PREFIX);
nodes.add(nodeTask);
}
LOGGER.info("Adding {} NM instances to cluster", nodes.size());
this.schedulerState.addNodes(nodes);
}
use of org.apache.myriad.scheduler.constraints.Constraint in project incubator-myriad by apache.
the class MyriadOperations method flexDownCluster.
public void flexDownCluster(ServiceResourceProfile serviceResourceProfile, Constraint constraint, int numInstancesToScaleDown) {
// Flex down Pending tasks, if any
int numPendingTasksScaledDown = flexDownPendingTasks(serviceResourceProfile, constraint, numInstancesToScaleDown);
// Flex down Staging tasks, if any
int numStagingTasksScaledDown = flexDownStagingTasks(serviceResourceProfile, constraint, numInstancesToScaleDown - numPendingTasksScaledDown);
// Flex down Active tasks, if any
int numActiveTasksScaledDown = flexDownActiveTasks(serviceResourceProfile, constraint, numInstancesToScaleDown - numPendingTasksScaledDown - numStagingTasksScaledDown);
if (numActiveTasksScaledDown + numStagingTasksScaledDown + numPendingTasksScaledDown == 0) {
LOGGER.info("No Node Managers with profile '{}' and constraint '{}' found for scaling down.", serviceResourceProfile.getName(), constraint == null ? "null" : constraint.toString());
} else {
LOGGER.info("Flexed down {} active, {} staging and {} pending Node Managers with " + "'{}' profile and constraint '{}'.", numActiveTasksScaledDown, numStagingTasksScaledDown, numPendingTasksScaledDown, serviceResourceProfile.getName(), constraint == null ? "null" : constraint.toString());
}
}
use of org.apache.myriad.scheduler.constraints.Constraint in project incubator-myriad by apache.
the class ResourceOffersEventHandler method onEvent.
@Override
public void onEvent(ResourceOffersEvent event, long sequence, boolean endOfBatch) throws Exception {
SchedulerDriver driver = event.getDriver();
List<Offer> offers = event.getOffers();
// to not process any offers unless Myriad receives a "framework registered" notification.
if (schedulerState.getFrameworkID() == null) {
LOGGER.warn("Received {} offers, but declining them since Framework ID is not yet set", offers.size());
for (Offer offer : offers) {
driver.declineOffer(offer.getId());
}
return;
}
LOGGER.debug("Received offers {}", offers.size());
LOGGER.debug("Pending tasks: {}", this.schedulerState.getPendingTaskIds());
// Let Myriad use reserved resources firstly.
Collections.sort(offers, new Comparator<Offer>() {
boolean isReserved(Offer o) {
for (Protos.Resource resource : o.getResourcesList()) {
if (resource.hasRole() && !Objects.equals(resource.getRole(), DEFAULT_ROLE)) {
return true;
}
}
return false;
}
@Override
public int compare(Offer o1, Offer o2) {
boolean reserved1 = isReserved(o1);
boolean reserved2 = isReserved(o2);
if (reserved1 == reserved2) {
return 0;
}
return reserved1 ? -1 : 1;
}
});
driverOperationLock.lock();
try {
for (Iterator<Offer> iterator = offers.iterator(); iterator.hasNext(); ) {
Offer offer = iterator.next();
Set<NodeTask> nodeTasks = schedulerState.getNodeTasks(offer.getSlaveId());
for (NodeTask nodeTask : nodeTasks) {
nodeTask.setSlaveAttributes(offer.getAttributesList());
}
// keep this in case SchedulerState gets out of sync. This should not happen with
// synchronizing addNodes method in SchedulerState
// but to keep it safe
final Set<Protos.TaskID> missingTasks = Sets.newHashSet();
Set<Protos.TaskID> pendingTasks = schedulerState.getPendingTaskIds();
if (CollectionUtils.isNotEmpty(pendingTasks)) {
for (Protos.TaskID pendingTaskId : pendingTasks) {
NodeTask taskToLaunch = schedulerState.getTask(pendingTaskId);
if (taskToLaunch == null) {
missingTasks.add(pendingTaskId);
LOGGER.warn("Node task for TaskID: {} does not exist", pendingTaskId);
continue;
}
String taskPrefix = taskToLaunch.getTaskPrefix();
ServiceResourceProfile profile = taskToLaunch.getProfile();
Constraint constraint = taskToLaunch.getConstraint();
Set<NodeTask> launchedTasks = new HashSet<>();
launchedTasks.addAll(schedulerState.getActiveTasksByType(taskPrefix));
launchedTasks.addAll(schedulerState.getStagingTasksByType(taskPrefix));
ResourceOfferContainer resourceOfferContainer = new ResourceOfferContainer(offer, taskToLaunch.getProfile(), role);
if (SchedulerUtils.isUniqueHostname(offer, taskToLaunch, launchedTasks) && resourceOfferContainer.satisfies(taskToLaunch.getProfile(), constraint)) {
try {
final TaskInfo task = taskFactoryMap.get(taskPrefix).createTask(resourceOfferContainer, schedulerState.getFrameworkID().get(), pendingTaskId, taskToLaunch);
LOGGER.info("Launching task: {} using offer: {}", task.getTaskId().getValue(), offer.getId());
LOGGER.debug("Launching task: {} with profile: {} using offer: {}", task, profile, offer);
driver.launchTasks(Collections.singleton(offer.getId()), Collections.singleton(task));
schedulerState.makeTaskStaging(pendingTaskId);
// For every NM Task that we launch, we currently
// need to backup the ExecutorInfo for that NM Task in the State Store.
// Without this, we will not be able to launch tasks corresponding to yarn
// containers. This is specially important in case the RM restarts.
taskToLaunch.setExecutorInfo(task.getExecutor());
taskToLaunch.setHostname(offer.getHostname());
taskToLaunch.setSlaveId(offer.getSlaveId());
schedulerState.addTask(pendingTaskId, taskToLaunch);
// remove the used offer from offers list
iterator.remove();
break;
} catch (Throwable t) {
LOGGER.error("Exception thrown while trying to create a task for {}", taskPrefix, t);
}
}
}
for (Protos.TaskID taskId : missingTasks) {
schedulerState.removeTask(taskId);
}
}
}
for (Offer offer : offers) {
if (SchedulerUtils.isEligibleForFineGrainedScaling(offer.getHostname(), schedulerState)) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("Picking an offer from slave with hostname {} for fine grained scaling.", offer.getHostname());
}
offerLifecycleMgr.addOffers(offer);
} else {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("Declining offer {} from slave {}.", offer, offer.getHostname());
}
driver.declineOffer(offer.getId());
}
}
} finally {
driverOperationLock.unlock();
}
}
Aggregations