use of org.apache.hyracks.api.exceptions.HyracksException in project asterixdb by apache.
the class JobletCleanupNotificationWork method runWork.
@Override
public void runWork() {
IJobManager jobManager = ccs.getJobManager();
final JobRun run = jobManager.get(jobId);
Set<String> cleanupPendingNodes = run.getCleanupPendingNodeIds();
if (!cleanupPendingNodes.remove(nodeId)) {
if (LOGGER.isLoggable(Level.WARNING)) {
LOGGER.warning(nodeId + " not in pending cleanup nodes set: " + cleanupPendingNodes + " for Job: " + jobId);
}
return;
}
INodeManager nodeManager = ccs.getNodeManager();
NodeControllerState ncs = nodeManager.getNodeControllerState(nodeId);
if (ncs != null) {
ncs.getActiveJobIds().remove(jobId);
}
if (cleanupPendingNodes.isEmpty()) {
try {
jobManager.finalComplete(run);
} catch (HyracksException e) {
// Fail the job with the caught exception during final completion.
run.getExceptions().add(e);
run.setStatus(JobStatus.FAILURE, run.getExceptions());
}
}
}
use of org.apache.hyracks.api.exceptions.HyracksException in project asterixdb by apache.
the class RemoveDeadNodesWork method run.
@Override
public void run() {
try {
INodeManager nodeManager = ccs.getNodeManager();
Pair<Collection<String>, Collection<JobId>> result = nodeManager.removeDeadNodes();
Collection<String> deadNodes = result.getLeft();
Collection<JobId> affectedJobIds = result.getRight();
int size = affectedJobIds.size();
if (size > 0) {
if (LOGGER.isLoggable(Level.INFO)) {
LOGGER.info("Number of affected jobs: " + size);
}
IJobManager jobManager = ccs.getJobManager();
for (JobId jobId : affectedJobIds) {
JobRun run = jobManager.get(jobId);
if (run != null) {
run.getExecutor().notifyNodeFailures(deadNodes);
}
}
}
if (!deadNodes.isEmpty()) {
ccs.getContext().notifyNodeFailure(deadNodes);
}
} catch (HyracksException e) {
LOGGER.log(Level.WARNING, "Uncaught exception on notifyNodeFailure", e);
}
}
use of org.apache.hyracks.api.exceptions.HyracksException in project asterixdb by apache.
the class HDFSUtils method initializeIndexingHDFSScheduler.
public static IndexingScheduler initializeIndexingHDFSScheduler(ICCServiceContext serviceCtx) throws HyracksDataException {
ICCContext ccContext = serviceCtx.getCCContext();
IndexingScheduler scheduler = null;
try {
scheduler = new IndexingScheduler(ccContext.getClusterControllerInfo().getClientNetAddress(), ccContext.getClusterControllerInfo().getClientNetPort());
} catch (HyracksException e) {
throw new RuntimeDataException(ErrorCode.UTIL_HDFS_UTILS_CANNOT_OBTAIN_HDFS_SCHEDULER);
}
return scheduler;
}
use of org.apache.hyracks.api.exceptions.HyracksException in project asterixdb by apache.
the class HDFSUtils method initializeHDFSScheduler.
public static Scheduler initializeHDFSScheduler(ICCServiceContext serviceCtx) throws HyracksDataException {
ICCContext ccContext = serviceCtx.getCCContext();
Scheduler scheduler = null;
try {
scheduler = new Scheduler(ccContext.getClusterControllerInfo().getClientNetAddress(), ccContext.getClusterControllerInfo().getClientNetPort());
} catch (HyracksException e) {
throw new RuntimeDataException(ErrorCode.UTIL_HDFS_UTILS_CANNOT_OBTAIN_HDFS_SCHEDULER);
}
return scheduler;
}
use of org.apache.hyracks.api.exceptions.HyracksException in project asterixdb by apache.
the class IntersectPOperator method contributeRuntimeOperator.
@Override
public void contributeRuntimeOperator(IHyracksJobBuilder builder, JobGenContext context, ILogicalOperator op, IOperatorSchema opSchema, IOperatorSchema[] inputSchemas, IOperatorSchema outerPlanSchema) throws AlgebricksException {
// logical op should have checked all the mismatch issues.
IntersectOperator logicalOp = (IntersectOperator) op;
int nInput = logicalOp.getNumInput();
int[][] compareFields = new int[nInput][];
IBinaryComparatorFactory[] comparatorFactories = JobGenHelper.variablesToAscBinaryComparatorFactories(logicalOp.getInputVariables(0), context.getTypeEnvironment(op), context);
INormalizedKeyComputerFactoryProvider nkcfProvider = context.getNormalizedKeyComputerFactoryProvider();
INormalizedKeyComputerFactory nkcf = null;
if (nkcfProvider != null) {
Object type = context.getTypeEnvironment(op).getVarType(logicalOp.getInputVariables(0).get(0));
if (type != null) {
nkcf = nkcfProvider.getNormalizedKeyComputerFactory(type, true);
}
}
for (int i = 0; i < logicalOp.getNumInput(); i++) {
compareFields[i] = JobGenHelper.variablesToFieldIndexes(logicalOp.getInputVariables(i), inputSchemas[i]);
}
IOperatorDescriptorRegistry spec = builder.getJobSpec();
RecordDescriptor recordDescriptor = JobGenHelper.mkRecordDescriptor(context.getTypeEnvironment(op), opSchema, context);
IntersectOperatorDescriptor opDescriptor = null;
try {
opDescriptor = new IntersectOperatorDescriptor(spec, nInput, compareFields, nkcf, comparatorFactories, recordDescriptor);
} catch (HyracksException e) {
throw new AlgebricksException(e);
}
contributeOpDesc(builder, (AbstractLogicalOperator) op, opDescriptor);
for (int i = 0; i < op.getInputs().size(); i++) {
builder.contributeGraphEdge(op.getInputs().get(i).getValue(), 0, op, i);
}
}
Aggregations