use of org.apache.hadoop.io.DataOutputBuffer in project hive by apache.
the class DagUtils method createVertex.
private Vertex createVertex(JobConf conf, MergeJoinWork mergeJoinWork, LocalResource appJarLr, List<LocalResource> additionalLr, FileSystem fs, Path mrScratchDir, Context ctx, VertexType vertexType) throws Exception {
Utilities.setMergeWork(conf, mergeJoinWork, mrScratchDir, false);
if (mergeJoinWork.getMainWork() instanceof MapWork) {
List<BaseWork> mapWorkList = mergeJoinWork.getBaseWorkList();
MapWork mapWork = (MapWork) (mergeJoinWork.getMainWork());
Vertex mergeVx = createVertex(conf, mapWork, appJarLr, additionalLr, fs, mrScratchDir, ctx, vertexType);
conf.setClass("mapred.input.format.class", HiveInputFormat.class, InputFormat.class);
// mapreduce.tez.input.initializer.serialize.event.payload should be set
// to false when using this plug-in to avoid getting a serialized event at run-time.
conf.setBoolean("mapreduce.tez.input.initializer.serialize.event.payload", false);
for (int i = 0; i < mapWorkList.size(); i++) {
mapWork = (MapWork) (mapWorkList.get(i));
conf.set(TEZ_MERGE_CURRENT_MERGE_FILE_PREFIX, mapWork.getName());
conf.set(Utilities.INPUT_NAME, mapWork.getName());
LOG.info("Going through each work and adding MultiMRInput");
mergeVx.addDataSource(mapWork.getName(), MultiMRInput.createConfigBuilder(conf, HiveInputFormat.class).build());
}
VertexManagerPluginDescriptor desc = VertexManagerPluginDescriptor.create(CustomPartitionVertex.class.getName());
// the +1 to the size is because of the main work.
CustomVertexConfiguration vertexConf = new CustomVertexConfiguration(mergeJoinWork.getMergeJoinOperator().getConf().getNumBuckets(), vertexType, mergeJoinWork.getBigTableAlias(), mapWorkList.size() + 1);
DataOutputBuffer dob = new DataOutputBuffer();
vertexConf.write(dob);
byte[] userPayload = dob.getData();
desc.setUserPayload(UserPayload.create(ByteBuffer.wrap(userPayload)));
mergeVx.setVertexManagerPlugin(desc);
return mergeVx;
} else {
Vertex mergeVx = createVertex(conf, (ReduceWork) mergeJoinWork.getMainWork(), appJarLr, additionalLr, fs, mrScratchDir, ctx);
return mergeVx;
}
}
use of org.apache.hadoop.io.DataOutputBuffer in project hive by apache.
the class LlapTaskCommunicator method serializeCredentials.
private ByteBuffer serializeCredentials(Credentials credentials) throws IOException {
Credentials containerCredentials = new Credentials();
containerCredentials.addAll(credentials);
DataOutputBuffer containerTokens_dob = new DataOutputBuffer();
containerCredentials.writeTokenStorageToStream(containerTokens_dob);
return ByteBuffer.wrap(containerTokens_dob.getData(), 0, containerTokens_dob.getLength());
}
use of org.apache.hadoop.io.DataOutputBuffer in project hive by apache.
the class LlapBaseInputFormat method serializeCredentials.
private ByteBuffer serializeCredentials(Credentials credentials) throws IOException {
Credentials containerCredentials = new Credentials();
containerCredentials.addAll(credentials);
DataOutputBuffer containerTokens_dob = new DataOutputBuffer();
containerCredentials.writeTokenStorageToStream(containerTokens_dob);
return ByteBuffer.wrap(containerTokens_dob.getData(), 0, containerTokens_dob.getLength());
}
use of org.apache.hadoop.io.DataOutputBuffer in project hive by apache.
the class AppMasterEventOperator method initDataBuffer.
protected void initDataBuffer(boolean skipPruning) throws HiveException {
buffer = new DataOutputBuffer();
try {
// add any other header info
getConf().writeEventHeader(buffer);
// write byte to say whether to skip pruning or not
buffer.writeBoolean(skipPruning);
} catch (IOException e) {
throw new HiveException(e);
}
}
use of org.apache.hadoop.io.DataOutputBuffer in project hadoop by apache.
the class TestAppManager method testRMAppSubmitWithInvalidTokens.
@Test
public void testRMAppSubmitWithInvalidTokens() throws Exception {
// Setup invalid security tokens
DataOutputBuffer dob = new DataOutputBuffer();
ByteBuffer securityTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
Configuration conf = new Configuration();
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
UserGroupInformation.setConfiguration(conf);
asContext.getAMContainerSpec().setTokens(securityTokens);
try {
appMonitor.submitApplication(asContext, "test");
Assert.fail("Application submission should fail because" + " Tokens are invalid.");
} catch (YarnException e) {
// Exception is expected
Assert.assertTrue("The thrown exception is not" + " java.io.EOFException", e.getMessage().contains("java.io.EOFException"));
}
int timeoutSecs = 0;
while ((getAppEventType() == RMAppEventType.KILL) && timeoutSecs++ < 20) {
Thread.sleep(1000);
}
Assert.assertEquals("app event type sent is wrong", RMAppEventType.APP_REJECTED, getAppEventType());
asContext.getAMContainerSpec().setTokens(null);
}
Aggregations