use of org.apache.hadoop.yarn.sls.scheduler.ContainerSimulator in project hadoop by apache.
the class SLSRunner method startAMFromRumenTraces.
/**
* parse workload information from rumen trace files
*/
@SuppressWarnings("unchecked")
private void startAMFromRumenTraces(Resource containerResource, int heartbeatInterval) throws IOException {
Configuration conf = new Configuration();
conf.set("fs.defaultFS", "file:///");
long baselineTimeMS = 0;
for (String inputTrace : inputTraces) {
File fin = new File(inputTrace);
JobTraceReader reader = new JobTraceReader(new Path(fin.getAbsolutePath()), conf);
try {
LoggedJob job = null;
while ((job = reader.getNext()) != null) {
// only support MapReduce currently
String jobType = "mapreduce";
String user = job.getUser() == null ? "default" : job.getUser().getValue();
String jobQueue = job.getQueue().getValue();
String oldJobId = job.getJobID().toString();
long jobStartTimeMS = job.getSubmitTime();
long jobFinishTimeMS = job.getFinishTime();
if (baselineTimeMS == 0) {
baselineTimeMS = jobStartTimeMS;
}
jobStartTimeMS -= baselineTimeMS;
jobFinishTimeMS -= baselineTimeMS;
if (jobStartTimeMS < 0) {
LOG.warn("Warning: reset job " + oldJobId + " start time to 0.");
jobFinishTimeMS = jobFinishTimeMS - jobStartTimeMS;
jobStartTimeMS = 0;
}
boolean isTracked = trackedApps.contains(oldJobId);
int queueSize = queueAppNumMap.containsKey(jobQueue) ? queueAppNumMap.get(jobQueue) : 0;
queueSize++;
queueAppNumMap.put(jobQueue, queueSize);
List<ContainerSimulator> containerList = new ArrayList<ContainerSimulator>();
// map tasks
for (LoggedTask mapTask : job.getMapTasks()) {
if (mapTask.getAttempts().size() == 0) {
continue;
}
LoggedTaskAttempt taskAttempt = mapTask.getAttempts().get(mapTask.getAttempts().size() - 1);
String hostname = taskAttempt.getHostName().getValue();
long containerLifeTime = taskAttempt.getFinishTime() - taskAttempt.getStartTime();
containerList.add(new ContainerSimulator(containerResource, containerLifeTime, hostname, 10, "map"));
}
// reduce tasks
for (LoggedTask reduceTask : job.getReduceTasks()) {
if (reduceTask.getAttempts().size() == 0) {
continue;
}
LoggedTaskAttempt taskAttempt = reduceTask.getAttempts().get(reduceTask.getAttempts().size() - 1);
String hostname = taskAttempt.getHostName().getValue();
long containerLifeTime = taskAttempt.getFinishTime() - taskAttempt.getStartTime();
containerList.add(new ContainerSimulator(containerResource, containerLifeTime, hostname, 20, "reduce"));
}
// create a new AM
AMSimulator amSim = (AMSimulator) ReflectionUtils.newInstance(amClassMap.get(jobType), conf);
if (amSim != null) {
amSim.init(AM_ID++, heartbeatInterval, containerList, rm, this, jobStartTimeMS, jobFinishTimeMS, user, jobQueue, isTracked, oldJobId);
runner.schedule(amSim);
maxRuntime = Math.max(maxRuntime, jobFinishTimeMS);
numTasks += containerList.size();
amMap.put(oldJobId, amSim);
}
}
} finally {
reader.close();
}
}
}
use of org.apache.hadoop.yarn.sls.scheduler.ContainerSimulator in project hadoop by apache.
the class MRAMSimulator method init.
public void init(int id, int heartbeatInterval, List<ContainerSimulator> containerList, ResourceManager rm, SLSRunner se, long traceStartTime, long traceFinishTime, String user, String queue, boolean isTracked, String oldAppId) {
super.init(id, heartbeatInterval, containerList, rm, se, traceStartTime, traceFinishTime, user, queue, isTracked, oldAppId);
amtype = "mapreduce";
// get map/reduce tasks
for (ContainerSimulator cs : containerList) {
if (cs.getType().equals("map")) {
cs.setPriority(PRIORITY_MAP);
allMaps.add(cs);
} else if (cs.getType().equals("reduce")) {
cs.setPriority(PRIORITY_REDUCE);
allReduces.add(cs);
}
}
LOG.info(MessageFormat.format("Added new job with {0} mapper and {1} reducers", allMaps.size(), allReduces.size()));
mapTotal = allMaps.size();
reduceTotal = allReduces.size();
totalContainers = mapTotal + reduceTotal;
}
use of org.apache.hadoop.yarn.sls.scheduler.ContainerSimulator in project hadoop by apache.
the class MRAMSimulator method processResponseQueue.
@Override
@SuppressWarnings("unchecked")
protected void processResponseQueue() throws Exception {
while (!responseQueue.isEmpty()) {
AllocateResponse response = responseQueue.take();
// check completed containers
if (!response.getCompletedContainersStatuses().isEmpty()) {
for (ContainerStatus cs : response.getCompletedContainersStatuses()) {
ContainerId containerId = cs.getContainerId();
if (cs.getExitStatus() == ContainerExitStatus.SUCCESS) {
if (assignedMaps.containsKey(containerId)) {
LOG.debug(MessageFormat.format("Application {0} has one" + "mapper finished ({1}).", appId, containerId));
assignedMaps.remove(containerId);
mapFinished++;
finishedContainers++;
} else if (assignedReduces.containsKey(containerId)) {
LOG.debug(MessageFormat.format("Application {0} has one" + "reducer finished ({1}).", appId, containerId));
assignedReduces.remove(containerId);
reduceFinished++;
finishedContainers++;
} else if (amContainer.getId().equals(containerId)) {
// am container released event
isFinished = true;
LOG.info(MessageFormat.format("Application {0} goes to " + "finish.", appId));
}
if (mapFinished >= mapTotal && reduceFinished >= reduceTotal) {
lastStep();
}
} else {
// container to be killed
if (assignedMaps.containsKey(containerId)) {
LOG.debug(MessageFormat.format("Application {0} has one " + "mapper killed ({1}).", appId, containerId));
pendingFailedMaps.add(assignedMaps.remove(containerId));
} else if (assignedReduces.containsKey(containerId)) {
LOG.debug(MessageFormat.format("Application {0} has one " + "reducer killed ({1}).", appId, containerId));
pendingFailedReduces.add(assignedReduces.remove(containerId));
} else if (amContainer.getId().equals(containerId)) {
LOG.info(MessageFormat.format("Application {0}'s AM is " + "going to be killed. Waiting for rescheduling...", appId));
}
}
}
}
// check finished
if (isAMContainerRunning && (mapFinished >= mapTotal) && (reduceFinished >= reduceTotal)) {
isAMContainerRunning = false;
LOG.debug(MessageFormat.format("Application {0} sends out event " + "to clean up its AM container.", appId));
isFinished = true;
break;
}
// check allocated containers
for (Container container : response.getAllocatedContainers()) {
if (!scheduledMaps.isEmpty()) {
ContainerSimulator cs = scheduledMaps.remove();
LOG.debug(MessageFormat.format("Application {0} starts a " + "launch a mapper ({1}).", appId, container.getId()));
assignedMaps.put(container.getId(), cs);
se.getNmMap().get(container.getNodeId()).addNewContainer(container, cs.getLifeTime());
} else if (!this.scheduledReduces.isEmpty()) {
ContainerSimulator cs = scheduledReduces.remove();
LOG.debug(MessageFormat.format("Application {0} starts a " + "launch a reducer ({1}).", appId, container.getId()));
assignedReduces.put(container.getId(), cs);
se.getNmMap().get(container.getNodeId()).addNewContainer(container, cs.getLifeTime());
}
}
}
}
use of org.apache.hadoop.yarn.sls.scheduler.ContainerSimulator in project hadoop by apache.
the class MRAMSimulator method restart.
/**
* restart running because of the am container killed
*/
private void restart() throws YarnException, IOException, InterruptedException {
// clear
isFinished = false;
pendingFailedMaps.clear();
pendingMaps.clear();
pendingReduces.clear();
pendingFailedReduces.clear();
// Only add totalMaps - finishedMaps
int added = 0;
for (ContainerSimulator cs : allMaps) {
if (added >= mapTotal - mapFinished) {
break;
}
pendingMaps.add(cs);
}
// And same, only add totalReduces - finishedReduces
added = 0;
for (ContainerSimulator cs : allReduces) {
if (added >= reduceTotal - reduceFinished) {
break;
}
pendingReduces.add(cs);
}
amContainer = null;
}
use of org.apache.hadoop.yarn.sls.scheduler.ContainerSimulator in project hadoop by apache.
the class NMSimulator method init.
public void init(String nodeIdStr, int memory, int cores, int dispatchTime, int heartBeatInterval, ResourceManager rm) throws IOException, YarnException {
super.init(dispatchTime, dispatchTime + 1000000L * heartBeatInterval, heartBeatInterval);
// create resource
String[] rackHostName = SLSUtils.getRackHostName(nodeIdStr);
this.node = NodeInfo.newNodeInfo(rackHostName[0], rackHostName[1], BuilderUtils.newResource(memory, cores));
this.rm = rm;
// init data structures
completedContainerList = Collections.synchronizedList(new ArrayList<ContainerId>());
releasedContainerList = Collections.synchronizedList(new ArrayList<ContainerId>());
containerQueue = new DelayQueue<ContainerSimulator>();
amContainerList = Collections.synchronizedList(new ArrayList<ContainerId>());
runningContainers = new ConcurrentHashMap<ContainerId, ContainerSimulator>();
// register NM with RM
RegisterNodeManagerRequest req = Records.newRecord(RegisterNodeManagerRequest.class);
req.setNodeId(node.getNodeID());
req.setResource(node.getTotalCapability());
req.setHttpPort(80);
RegisterNodeManagerResponse response = rm.getResourceTrackerService().registerNodeManager(req);
masterKey = response.getNMTokenMasterKey();
}
Aggregations