use of org.apache.hadoop.mapreduce.JobID in project goldenorb by jzachr.
the class VertexInput method initialize.
/**
*/
@SuppressWarnings("unchecked")
public void initialize() {
// rebuild the input split
org.apache.hadoop.mapreduce.InputSplit split = null;
DataInputBuffer splitBuffer = new DataInputBuffer();
splitBuffer.reset(rawSplit.getBytes(), 0, rawSplit.getLength());
SerializationFactory factory = new SerializationFactory(orbConf);
Deserializer<? extends org.apache.hadoop.mapreduce.InputSplit> deserializer;
try {
deserializer = (Deserializer<? extends org.apache.hadoop.mapreduce.InputSplit>) factory.getDeserializer(orbConf.getClassByName(splitClass));
deserializer.open(splitBuffer);
split = deserializer.deserialize(null);
JobConf job = new JobConf(orbConf);
JobContext jobContext = new JobContext(job, new JobID(getOrbConf().getJobNumber(), 0));
InputFormat<INPUT_KEY, INPUT_VALUE> inputFormat;
inputFormat = (InputFormat<INPUT_KEY, INPUT_VALUE>) ReflectionUtils.newInstance(jobContext.getInputFormatClass(), orbConf);
TaskAttemptContext tao = new TaskAttemptContext(job, new TaskAttemptID(new TaskID(jobContext.getJobID(), true, partitionID), 0));
recordReader = inputFormat.createRecordReader(split, tao);
recordReader.initialize(split, tao);
} catch (ClassNotFoundException e) {
throw new RuntimeException(e);
} catch (IOException e) {
throw new RuntimeException(e);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
use of org.apache.hadoop.mapreduce.JobID in project goldenorb by jzachr.
the class InputSplitAllocator method assignInputSplits.
/**
* This method gets the raw splits and calls another method to assign them.
*
* @returns Map
*/
@SuppressWarnings({ "deprecation", "rawtypes", "unchecked" })
public Map<OrbPartitionMember, List<RawSplit>> assignInputSplits() {
List<RawSplit> rawSplits = null;
JobConf job = new JobConf(orbConf);
LOG.debug(orbConf.getJobNumber().toString());
JobContext jobContext = new JobContext(job, new JobID(orbConf.getJobNumber(), 0));
org.apache.hadoop.mapreduce.InputFormat<?, ?> input;
try {
input = ReflectionUtils.newInstance(jobContext.getInputFormatClass(), orbConf);
List<org.apache.hadoop.mapreduce.InputSplit> splits = input.getSplits(jobContext);
rawSplits = new ArrayList<RawSplit>(splits.size());
DataOutputBuffer buffer = new DataOutputBuffer();
SerializationFactory factory = new SerializationFactory(orbConf);
Serializer serializer = factory.getSerializer(splits.get(0).getClass());
serializer.open(buffer);
for (int i = 0; i < splits.size(); i++) {
buffer.reset();
serializer.serialize(splits.get(i));
RawSplit rawSplit = new RawSplit();
rawSplit.setClassName(splits.get(i).getClass().getName());
rawSplit.setDataLength(splits.get(i).getLength());
rawSplit.setBytes(buffer.getData(), 0, buffer.getLength());
rawSplit.setLocations(splits.get(i).getLocations());
rawSplits.add(rawSplit);
}
} catch (ClassNotFoundException e) {
e.printStackTrace();
throw new RuntimeException(e);
} catch (IOException e) {
e.printStackTrace();
throw new RuntimeException(e);
} catch (InterruptedException e) {
e.printStackTrace();
throw new RuntimeException(e);
}
return assignInputSplits(rawSplits);
}
use of org.apache.hadoop.mapreduce.JobID in project hbase by apache.
the class RoundRobinTableInputFormat method main.
/**
* Pass table name as argument. Set the zk ensemble to use with the System property
* 'hbase.zookeeper.quorum'
*/
public static void main(String[] args) throws IOException {
TableInputFormat tif = new RoundRobinTableInputFormat();
final Configuration configuration = HBaseConfiguration.create();
configuration.setBoolean("hbase.regionsizecalculator.enable", false);
configuration.set(HConstants.ZOOKEEPER_QUORUM, System.getProperty(HConstants.ZOOKEEPER_QUORUM, "localhost"));
configuration.set(TableInputFormat.INPUT_TABLE, args[0]);
tif.setConf(configuration);
List<InputSplit> splits = tif.getSplits(new JobContextImpl(configuration, new JobID()));
for (InputSplit split : splits) {
System.out.println(split);
}
}
use of org.apache.hadoop.mapreduce.JobID in project hbase by apache.
the class MapReduceBackupCopyJob method cancel.
@Override
public void cancel(String jobId) throws IOException {
JobID id = JobID.forName(jobId);
Cluster cluster = new Cluster(this.getConf());
try {
Job job = cluster.getJob(id);
if (job == null) {
LOG.error("No job found for " + id);
// should we throw exception
return;
}
if (job.isComplete() || job.isRetired()) {
return;
}
job.killJob();
LOG.debug("Killed copy job " + id);
} catch (InterruptedException e) {
throw new IOException(e);
}
}
Aggregations