use of org.apache.tez.runtime.api.events.CompositeDataMovementEvent in project tez by apache.
the class TestOrderedPartitionedKVOutput2 method testNonStartedOutput.
@Test(timeout = 5000)
public void testNonStartedOutput() throws IOException {
OutputContext outputContext = OutputTestHelpers.createOutputContext(conf, workingDir);
int numPartitions = 10;
OrderedPartitionedKVOutput output = new OrderedPartitionedKVOutput(outputContext, numPartitions);
output.initialize();
List<Event> events = output.close();
assertEquals(2, events.size());
Event event1 = events.get(0);
assertTrue(event1 instanceof VertexManagerEvent);
Event event2 = events.get(1);
assertTrue(event2 instanceof CompositeDataMovementEvent);
CompositeDataMovementEvent cdme = (CompositeDataMovementEvent) event2;
ByteBuffer bb = cdme.getUserPayload();
ShuffleUserPayloads.DataMovementEventPayloadProto shufflePayload = ShuffleUserPayloads.DataMovementEventPayloadProto.parseFrom(ByteString.copyFrom(bb));
assertTrue(shufflePayload.hasEmptyPartitions());
byte[] emptyPartitions = TezCommonUtils.decompressByteStringToByteArray(shufflePayload.getEmptyPartitions());
BitSet emptyPartionsBitSet = TezUtilsInternal.fromByteArray(emptyPartitions);
assertEquals(numPartitions, emptyPartionsBitSet.cardinality());
for (int i = 0; i < numPartitions; i++) {
assertTrue(emptyPartionsBitSet.get(i));
}
}
use of org.apache.tez.runtime.api.events.CompositeDataMovementEvent in project tez by apache.
the class TestUnorderedPartitionedKVOutput2 method testNonStartedOutput.
@Test(timeout = 5000)
public void testNonStartedOutput() throws Exception {
OutputContext outputContext = OutputTestHelpers.createOutputContext();
int numPartitions = 1;
UnorderedPartitionedKVOutput output = new UnorderedPartitionedKVOutput(outputContext, numPartitions);
output.initialize();
List<Event> events = output.close();
assertEquals(1, events.size());
Event event1 = events.get(0);
assertTrue(event1 instanceof CompositeDataMovementEvent);
CompositeDataMovementEvent dme = (CompositeDataMovementEvent) event1;
ByteBuffer bb = dme.getUserPayload();
ShuffleUserPayloads.DataMovementEventPayloadProto shufflePayload = ShuffleUserPayloads.DataMovementEventPayloadProto.parseFrom(ByteString.copyFrom(bb));
assertTrue(shufflePayload.hasEmptyPartitions());
byte[] emptyPartitions = TezCommonUtils.decompressByteStringToByteArray(shufflePayload.getEmptyPartitions());
BitSet emptyPartionsBitSet = TezUtilsInternal.fromByteArray(emptyPartitions);
assertEquals(numPartitions, emptyPartionsBitSet.cardinality());
for (int i = 0; i < numPartitions; i++) {
assertTrue(emptyPartionsBitSet.get(i));
}
}
use of org.apache.tez.runtime.api.events.CompositeDataMovementEvent in project tez by apache.
the class TestCompositeDataMovementEvent method testGetEvents.
@Test(timeout = 5000)
public void testGetEvents() {
int numOutputs = 0;
int startIndex = 1;
CompositeDataMovementEvent cdme2 = CompositeDataMovementEvent.create(startIndex, numOutputs, userPayload);
for (DataMovementEvent dme : cdme2.getEvents()) {
numOutputs++;
}
Assert.assertEquals(numOutputs, cdme2.getCount());
}
use of org.apache.tez.runtime.api.events.CompositeDataMovementEvent in project tez by apache.
the class TestCompositeDataMovementEvent method testGetCount.
@Test(timeout = 5000)
public void testGetCount() {
int numPartitions = 2;
int startIndex = 2;
CompositeDataMovementEvent cdme1 = CompositeDataMovementEvent.create(startIndex, numPartitions, userPayload);
Assert.assertEquals(numPartitions, cdme1.getCount());
Assert.assertEquals(startIndex, cdme1.getSourceIndexStart());
}
use of org.apache.tez.runtime.api.events.CompositeDataMovementEvent in project tez by apache.
the class TestReduceProcessor method testReduceProcessor.
@Test(timeout = 5000)
public void testReduceProcessor() throws Exception {
final String dagName = "mrdag0";
String mapVertexName = MultiStageMRConfigUtil.getInitialMapVertexName();
String reduceVertexName = MultiStageMRConfigUtil.getFinalReduceVertexName();
JobConf jobConf = new JobConf(defaultConf);
setUpJobConf(jobConf);
MRHelpers.translateMRConfToTez(jobConf);
jobConf.setInt(MRJobConfig.APPLICATION_ATTEMPT_ID, 0);
jobConf.set(MRFrameworkConfigs.TASK_LOCAL_RESOURCE_DIR, new Path(workDir, "localized-resources").toUri().toString());
jobConf.setBoolean(MRJobConfig.MR_TEZ_SPLITS_VIA_EVENTS, false);
Path mapInput = new Path(workDir, "map0");
MapUtils.generateInputSplit(localFs, workDir, jobConf, mapInput, 10);
InputSpec mapInputSpec = new InputSpec("NullSrcVertex", InputDescriptor.create(MRInputLegacy.class.getName()).setUserPayload(UserPayload.create(ByteBuffer.wrap(MRRuntimeProtos.MRInputUserPayloadProto.newBuilder().setConfigurationBytes(TezUtils.createByteStringFromConf(jobConf)).build().toByteArray()))), 1);
OutputSpec mapOutputSpec = new OutputSpec("NullDestVertex", OutputDescriptor.create(OrderedPartitionedKVOutput.class.getName()).setUserPayload(TezUtils.createUserPayloadFromConf(jobConf)), 1);
// Run a map
TestUmbilical testUmbilical = new TestUmbilical();
TezSharedExecutor sharedExecutor = new TezSharedExecutor(jobConf);
LogicalIOProcessorRuntimeTask mapTask = MapUtils.createLogicalTask(localFs, workDir, jobConf, 0, mapInput, testUmbilical, dagName, mapVertexName, Collections.singletonList(mapInputSpec), Collections.singletonList(mapOutputSpec), sharedExecutor);
mapTask.initialize();
mapTask.run();
mapTask.close();
// One VME, One DME
Assert.assertEquals(2, testUmbilical.getEvents().size());
Assert.assertEquals(EventType.VERTEX_MANAGER_EVENT, testUmbilical.getEvents().get(0).getEventType());
Assert.assertEquals(EventType.COMPOSITE_DATA_MOVEMENT_EVENT, testUmbilical.getEvents().get(1).getEventType());
CompositeDataMovementEvent cdmEvent = (CompositeDataMovementEvent) testUmbilical.getEvents().get(1).getEvent();
Assert.assertEquals(1, cdmEvent.getCount());
DataMovementEvent dme = cdmEvent.getEvents().iterator().next();
dme.setTargetIndex(0);
LOG.info("Starting reduce...");
JobTokenIdentifier identifier = new JobTokenIdentifier(new Text(dagName));
JobTokenSecretManager jobTokenSecretManager = new JobTokenSecretManager();
Token<JobTokenIdentifier> shuffleToken = new Token<JobTokenIdentifier>(identifier, jobTokenSecretManager);
shuffleToken.setService(identifier.getJobId());
jobConf.setOutputFormat(SequenceFileOutputFormat.class);
jobConf.set(MRFrameworkConfigs.TASK_LOCAL_RESOURCE_DIR, new Path(workDir, "localized-resources").toUri().toString());
jobConf.setBoolean(TezRuntimeConfiguration.TEZ_RUNTIME_OPTIMIZE_LOCAL_FETCH, true);
FileOutputFormat.setOutputPath(jobConf, new Path(workDir, "output"));
ProcessorDescriptor reduceProcessorDesc = ProcessorDescriptor.create(ReduceProcessor.class.getName()).setUserPayload(TezUtils.createUserPayloadFromConf(jobConf));
InputSpec reduceInputSpec = new InputSpec(mapVertexName, InputDescriptor.create(OrderedGroupedInputLegacy.class.getName()).setUserPayload(TezUtils.createUserPayloadFromConf(jobConf)), 1);
OutputSpec reduceOutputSpec = new OutputSpec("NullDestinationVertex", OutputDescriptor.create(MROutputLegacy.class.getName()).setUserPayload(TezUtils.createUserPayloadFromConf(jobConf)), 1);
// Now run a reduce
TaskSpec taskSpec = new TaskSpec(TezTestUtils.getMockTaskAttemptId(0, 1, 0, 0), dagName, reduceVertexName, -1, reduceProcessorDesc, Collections.singletonList(reduceInputSpec), Collections.singletonList(reduceOutputSpec), null, null);
Map<String, ByteBuffer> serviceConsumerMetadata = new HashMap<String, ByteBuffer>();
String auxiliaryService = jobConf.get(TezConfiguration.TEZ_AM_SHUFFLE_AUXILIARY_SERVICE_ID, TezConfiguration.TEZ_AM_SHUFFLE_AUXILIARY_SERVICE_ID_DEFAULT);
serviceConsumerMetadata.put(auxiliaryService, ShuffleUtils.convertJobTokenToBytes(shuffleToken));
Map<String, String> serviceProviderEnvMap = new HashMap<String, String>();
ByteBuffer shufflePortBb = ByteBuffer.allocate(4).putInt(0, 8000);
AuxiliaryServiceHelper.setServiceDataIntoEnv(auxiliaryService, shufflePortBb, serviceProviderEnvMap);
LogicalIOProcessorRuntimeTask task = new LogicalIOProcessorRuntimeTask(taskSpec, 0, jobConf, new String[] { workDir.toString() }, new TestUmbilical(), serviceConsumerMetadata, serviceProviderEnvMap, HashMultimap.<String, String>create(), null, "", new ExecutionContextImpl("localhost"), Runtime.getRuntime().maxMemory(), true, new DefaultHadoopShim(), sharedExecutor);
List<Event> destEvents = new LinkedList<Event>();
destEvents.add(dme);
task.initialize();
OrderedGroupedInputLegacy sortedOut = (OrderedGroupedInputLegacy) task.getInputs().values().iterator().next();
sortedOut.handleEvents(destEvents);
task.run();
task.close();
sharedExecutor.shutdownNow();
// MRTask mrTask = (MRTask)t.getProcessor();
// TODO NEWTEZ Verify the partitioner has not been created
// Likely not applicable anymore.
// Assert.assertNull(mrTask.getPartitioner());
// Only a task commit happens, hence the data is still in the temporary directory.
Path reduceOutputDir = new Path(new Path(workDir, "output"), "_temporary/0/" + IDConverter.toMRTaskIdForOutput(TezTestUtils.getMockTaskId(0, 1, 0)));
Path reduceOutputFile = new Path(reduceOutputDir, "part-v001-o000-00000");
SequenceFile.Reader reader = new SequenceFile.Reader(localFs, reduceOutputFile, jobConf);
LongWritable key = new LongWritable();
Text value = new Text();
long prev = Long.MIN_VALUE;
while (reader.next(key, value)) {
if (prev != Long.MIN_VALUE) {
Assert.assertTrue(prev < key.get());
prev = key.get();
}
}
reader.close();
}
Aggregations