use of org.apache.hyracks.api.exceptions.HyracksException in project asterixdb by apache.
the class JobManager method prepareComplete.
@Override
public void prepareComplete(JobRun run, JobStatus status, List<Exception> exceptions) throws HyracksException {
checkJob(run);
if (status == JobStatus.FAILURE_BEFORE_EXECUTION) {
run.setPendingStatus(JobStatus.FAILURE, exceptions);
finalComplete(run);
return;
}
JobId jobId = run.getJobId();
HyracksException caughtException = null;
if (run.getPendingStatus() != null && run.getCleanupPendingNodeIds().isEmpty()) {
finalComplete(run);
return;
}
if (run.getPendingStatus() != null) {
LOGGER.warning("Ignoring duplicate cleanup for JobRun with id: " + jobId);
return;
}
Set<String> targetNodes = run.getParticipatingNodeIds();
run.getCleanupPendingNodeIds().addAll(targetNodes);
if (run.getPendingStatus() != JobStatus.FAILURE && run.getPendingStatus() != JobStatus.TERMINATED) {
run.setPendingStatus(status, exceptions);
}
if (targetNodes != null && !targetNodes.isEmpty()) {
INodeManager nodeManager = ccs.getNodeManager();
Set<String> toDelete = new HashSet<>();
for (String n : targetNodes) {
NodeControllerState ncs = nodeManager.getNodeControllerState(n);
try {
if (ncs == null) {
toDelete.add(n);
} else {
ncs.getNodeController().cleanUpJoblet(jobId, status);
}
} catch (Exception e) {
LOGGER.log(Level.SEVERE, e.getMessage(), e);
if (caughtException == null) {
caughtException = new HyracksException(e);
} else {
caughtException.addSuppressed(e);
}
}
}
targetNodes.removeAll(toDelete);
run.getCleanupPendingNodeIds().removeAll(toDelete);
if (run.getCleanupPendingNodeIds().isEmpty()) {
finalComplete(run);
}
} else {
finalComplete(run);
}
// throws caught exceptions if any
if (caughtException != null) {
throw caughtException;
}
}
use of org.apache.hyracks.api.exceptions.HyracksException in project asterixdb by apache.
the class JobManagerTest method testNullJob.
@Test
public void testNullJob() throws HyracksException {
IJobCapacityController jobCapacityController = mock(IJobCapacityController.class);
IJobManager jobManager = new JobManager(ccConfig, mockClusterControllerService(), jobCapacityController);
boolean invalidParameter = false;
try {
jobManager.add(null);
} catch (HyracksException e) {
invalidParameter = e.getErrorCode() == ErrorCode.INVALID_INPUT_PARAMETER;
}
Assert.assertTrue(invalidParameter);
Assert.assertTrue(jobManager.getRunningJobs().isEmpty());
Assert.assertTrue(jobManager.getPendingJobs().isEmpty());
}
use of org.apache.hyracks.api.exceptions.HyracksException in project asterixdb by apache.
the class TaskCompleteWork method performEvent.
@Override
protected void performEvent(TaskAttempt ta) {
try {
IJobManager jobManager = ccs.getJobManager();
JobRun run = jobManager.get(jobId);
if (statistics != null) {
JobProfile jobProfile = run.getJobProfile();
Map<String, JobletProfile> jobletProfiles = jobProfile.getJobletProfiles();
JobletProfile jobletProfile = jobletProfiles.get(nodeId);
if (jobletProfile == null) {
jobletProfile = new JobletProfile(nodeId);
jobletProfiles.put(nodeId, jobletProfile);
}
jobletProfile.getTaskProfiles().put(taId, statistics);
}
run.getExecutor().notifyTaskComplete(ta);
} catch (HyracksException e) {
e.printStackTrace();
}
}
use of org.apache.hyracks.api.exceptions.HyracksException in project asterixdb by apache.
the class JobCapacityControllerTest method test.
@Test
public void test() throws HyracksException {
IResourceManager resourceManager = makeResourceManagerWithCapacity(4294967296L, 33);
JobCapacityController capacityController = new JobCapacityController(resourceManager);
// Verifies the correctness of the allocate method.
Assert.assertTrue(capacityController.allocate(makeJobWithRequiredCapacity(4294967296L, 16)) == IJobCapacityController.JobSubmissionStatus.EXECUTE);
Assert.assertTrue(capacityController.allocate(makeJobWithRequiredCapacity(2147483648L, 16)) == IJobCapacityController.JobSubmissionStatus.QUEUE);
Assert.assertTrue(capacityController.allocate(makeJobWithRequiredCapacity(2147483648L, 32)) == IJobCapacityController.JobSubmissionStatus.QUEUE);
boolean exceedCapacity = false;
try {
capacityController.allocate(makeJobWithRequiredCapacity(2147483648L, 64));
} catch (HyracksException e) {
exceedCapacity = e.getErrorCode() == ErrorCode.JOB_REQUIREMENTS_EXCEED_CAPACITY;
}
Assert.assertTrue(exceedCapacity);
Assert.assertTrue(capacityController.allocate(makeJobWithRequiredCapacity(4294967296L, 32)) == IJobCapacityController.JobSubmissionStatus.QUEUE);
exceedCapacity = false;
try {
capacityController.allocate(makeJobWithRequiredCapacity(4294967297L, 33));
} catch (HyracksException e) {
exceedCapacity = e.getErrorCode() == ErrorCode.JOB_REQUIREMENTS_EXCEED_CAPACITY;
}
Assert.assertTrue(exceedCapacity);
// Verifies that the release method does not leak resource.
capacityController.release(makeJobWithRequiredCapacity(4294967296L, 16));
Assert.assertTrue(resourceManager.getCurrentCapacity().getAggregatedMemoryByteSize() == 4294967296L);
Assert.assertTrue(resourceManager.getCurrentCapacity().getAggregatedCores() == 33);
}
use of org.apache.hyracks.api.exceptions.HyracksException in project asterixdb by apache.
the class Scheduler method getLocationConstraints.
/**
* Set location constraints for a file scan operator with a list of file
* splits. It guarantees the maximum slots a machine can is at most one more
* than the minimum slots a machine can get.
*
* @throws HyracksDataException
*/
public String[] getLocationConstraints(InputSplit[] splits) throws HyracksException {
if (splits == null) {
/** deal the case when the splits array is null */
return new String[] {};
}
int[] workloads = new int[NCs.length];
Arrays.fill(workloads, 0);
String[] locations = new String[splits.length];
Map<String, IntWritable> locationToNumOfSplits = new HashMap<String, IntWritable>();
/**
* upper bound number of slots that a machine can get
*/
int upperBoundSlots = splits.length % workloads.length == 0 ? (splits.length / workloads.length) : (splits.length / workloads.length + 1);
/**
* lower bound number of slots that a machine can get
*/
int lowerBoundSlots = splits.length % workloads.length == 0 ? upperBoundSlots : upperBoundSlots - 1;
try {
Random random = new Random(System.currentTimeMillis());
boolean[] scheduled = new boolean[splits.length];
Arrays.fill(scheduled, false);
/**
* scan the splits and build the popularity map
* give the machines with less local splits more scheduling priority
*/
buildPopularityMap(splits, locationToNumOfSplits);
/**
* push data-local lower-bounds slots to each machine
*/
scheduleLocalSlots(splits, workloads, locations, lowerBoundSlots, random, scheduled, locationToNumOfSplits);
/**
* push data-local upper-bounds slots to each machine
*/
scheduleLocalSlots(splits, workloads, locations, upperBoundSlots, random, scheduled, locationToNumOfSplits);
int dataLocalCount = 0;
for (int i = 0; i < scheduled.length; i++) {
if (scheduled[i] == true) {
dataLocalCount++;
}
}
LOGGER.info("Data local rate: " + (scheduled.length == 0 ? 0.0 : ((float) dataLocalCount / (float) (scheduled.length))));
/**
* push non-data-local lower-bounds slots to each machine
*/
scheduleNonLocalSlots(splits, workloads, locations, lowerBoundSlots, scheduled);
/**
* push non-data-local upper-bounds slots to each machine
*/
scheduleNonLocalSlots(splits, workloads, locations, upperBoundSlots, scheduled);
return locations;
} catch (IOException e) {
throw new HyracksException(e);
}
}
Aggregations