use of org.apache.hyracks.api.dataset.IDatasetPartitionManager in project asterixdb by apache.
the class AbortTasksWork method run.
@Override
public void run() {
if (LOGGER.isLoggable(Level.INFO)) {
LOGGER.info("Aborting Tasks: " + jobId + ":" + tasks);
}
IDatasetPartitionManager dpm = ncs.getDatasetPartitionManager();
if (dpm != null) {
ncs.getDatasetPartitionManager().abortReader(jobId);
}
Joblet ji = ncs.getJobletMap().get(jobId);
if (ji != null) {
Map<TaskAttemptId, Task> taskMap = ji.getTaskMap();
for (TaskAttemptId taId : tasks) {
Task task = taskMap.get(taId);
if (task != null) {
task.abort();
}
}
} else {
LOGGER.log(Level.WARNING, "Joblet couldn't be found. Tasks of job " + jobId + " have all either completed or failed");
}
}
use of org.apache.hyracks.api.dataset.IDatasetPartitionManager in project asterixdb by apache.
the class ResultWriterOperatorDescriptor method createPushRuntime.
@Override
public IOperatorNodePushable createPushRuntime(final IHyracksTaskContext ctx, IRecordDescriptorProvider recordDescProvider, final int partition, final int nPartitions) throws HyracksDataException {
final IDatasetPartitionManager dpm = ctx.getDatasetPartitionManager();
final IFrame frame = new VSizeFrame(ctx);
final FrameOutputStream frameOutputStream = new FrameOutputStream(ctx.getInitialFrameSize());
frameOutputStream.reset(frame, true);
PrintStream printStream = new PrintStream(frameOutputStream);
final RecordDescriptor outRecordDesc = recordDescProvider.getInputRecordDescriptor(getActivityId(), 0);
final IResultSerializer resultSerializer = resultSerializerFactory.createResultSerializer(outRecordDesc, printStream);
final FrameTupleAccessor frameTupleAccessor = new FrameTupleAccessor(outRecordDesc);
return new AbstractUnaryInputSinkOperatorNodePushable() {
private IFrameWriter datasetPartitionWriter;
private boolean failed = false;
@Override
public void open() throws HyracksDataException {
try {
datasetPartitionWriter = dpm.createDatasetPartitionWriter(ctx, rsId, ordered, asyncMode, partition, nPartitions);
datasetPartitionWriter.open();
resultSerializer.init();
} catch (HyracksException e) {
throw HyracksDataException.create(e);
}
}
@Override
public void nextFrame(ByteBuffer buffer) throws HyracksDataException {
frameTupleAccessor.reset(buffer);
for (int tIndex = 0; tIndex < frameTupleAccessor.getTupleCount(); tIndex++) {
resultSerializer.appendTuple(frameTupleAccessor, tIndex);
if (!frameOutputStream.appendTuple()) {
frameOutputStream.flush(datasetPartitionWriter);
resultSerializer.appendTuple(frameTupleAccessor, tIndex);
frameOutputStream.appendTuple();
}
}
}
@Override
public void fail() throws HyracksDataException {
failed = true;
datasetPartitionWriter.fail();
}
@Override
public void close() throws HyracksDataException {
try {
if (!failed && frameOutputStream.getTupleCount() > 0) {
frameOutputStream.flush(datasetPartitionWriter);
}
} catch (Exception e) {
datasetPartitionWriter.fail();
throw e;
} finally {
datasetPartitionWriter.close();
}
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{ ");
sb.append("\"rsId\": \"").append(rsId).append("\", ");
sb.append("\"ordered\": ").append(ordered).append(", ");
sb.append("\"asyncMode\": ").append(asyncMode).append(" }");
return sb.toString();
}
};
}
use of org.apache.hyracks.api.dataset.IDatasetPartitionManager in project asterixdb by apache.
the class NotifyTaskFailureWork method run.
@Override
public void run() {
try {
JobId jobId = task.getJobletContext().getJobId();
IDatasetPartitionManager dpm = ncs.getDatasetPartitionManager();
if (dpm != null) {
dpm.abortReader(jobId);
}
ncs.getClusterController().notifyTaskFailure(jobId, task.getTaskAttemptId(), ncs.getId(), exceptions);
//exceptions.get(0).printStackTrace();
} catch (Exception e) {
e.printStackTrace();
}
task.getJoblet().removeTask(task);
}
Aggregations