use of org.apache.hadoop.io.DataOutputBuffer in project hive by apache.
the class DagUtils method createEdge.
/**
* Given a Vertex group and a vertex createEdge will create an
* Edge between them.
*
* @param group The parent VertexGroup
* @param vConf The job conf of one of the parrent (grouped) vertices
* @param w The child vertex
* @param edgeProp the edge property of connection between the two
* endpoints.
*/
@SuppressWarnings("rawtypes")
public GroupInputEdge createEdge(VertexGroup group, JobConf vConf, Vertex w, TezEdgeProperty edgeProp, VertexType vertexType) throws IOException {
Class mergeInputClass;
LOG.info("Creating Edge between " + group.getGroupName() + " and " + w.getName());
EdgeType edgeType = edgeProp.getEdgeType();
switch(edgeType) {
case BROADCAST_EDGE:
mergeInputClass = ConcatenatedMergedKeyValueInput.class;
break;
case CUSTOM_EDGE:
{
mergeInputClass = ConcatenatedMergedKeyValueInput.class;
int numBuckets = edgeProp.getNumBuckets();
CustomVertexConfiguration vertexConf = new CustomVertexConfiguration(numBuckets, vertexType);
DataOutputBuffer dob = new DataOutputBuffer();
vertexConf.write(dob);
VertexManagerPluginDescriptor desc = VertexManagerPluginDescriptor.create(CustomPartitionVertex.class.getName());
byte[] userPayloadBytes = dob.getData();
ByteBuffer userPayload = ByteBuffer.wrap(userPayloadBytes);
desc.setUserPayload(UserPayload.create(userPayload));
w.setVertexManagerPlugin(desc);
break;
}
case CUSTOM_SIMPLE_EDGE:
mergeInputClass = ConcatenatedMergedKeyValueInput.class;
break;
case SIMPLE_EDGE:
setupAutoReducerParallelism(edgeProp, w);
default:
mergeInputClass = TezMergedLogicalInput.class;
break;
}
return GroupInputEdge.create(group, w, createEdgeProperty(edgeProp, vConf), InputDescriptor.create(mergeInputClass.getName()));
}
use of org.apache.hadoop.io.DataOutputBuffer in project hive by apache.
the class DagUtils method createEdgeProperty.
/*
* Helper function to create an edge property from an edge type.
*/
private EdgeProperty createEdgeProperty(TezEdgeProperty edgeProp, Configuration conf) throws IOException {
MRHelpers.translateMRConfToTez(conf);
String keyClass = conf.get(TezRuntimeConfiguration.TEZ_RUNTIME_KEY_CLASS);
String valClass = conf.get(TezRuntimeConfiguration.TEZ_RUNTIME_VALUE_CLASS);
String partitionerClassName = conf.get("mapred.partitioner.class");
Map<String, String> partitionerConf;
EdgeType edgeType = edgeProp.getEdgeType();
switch(edgeType) {
case BROADCAST_EDGE:
UnorderedKVEdgeConfig et1Conf = UnorderedKVEdgeConfig.newBuilder(keyClass, valClass).setFromConfiguration(conf).setKeySerializationClass(TezBytesWritableSerialization.class.getName(), null).setValueSerializationClass(TezBytesWritableSerialization.class.getName(), null).build();
return et1Conf.createDefaultBroadcastEdgeProperty();
case CUSTOM_EDGE:
assert partitionerClassName != null;
partitionerConf = createPartitionerConf(partitionerClassName, conf);
UnorderedPartitionedKVEdgeConfig et2Conf = UnorderedPartitionedKVEdgeConfig.newBuilder(keyClass, valClass, MRPartitioner.class.getName(), partitionerConf).setFromConfiguration(conf).setKeySerializationClass(TezBytesWritableSerialization.class.getName(), null).setValueSerializationClass(TezBytesWritableSerialization.class.getName(), null).build();
EdgeManagerPluginDescriptor edgeDesc = EdgeManagerPluginDescriptor.create(CustomPartitionEdge.class.getName());
CustomEdgeConfiguration edgeConf = new CustomEdgeConfiguration(edgeProp.getNumBuckets(), null);
DataOutputBuffer dob = new DataOutputBuffer();
edgeConf.write(dob);
byte[] userPayload = dob.getData();
edgeDesc.setUserPayload(UserPayload.create(ByteBuffer.wrap(userPayload)));
return et2Conf.createDefaultCustomEdgeProperty(edgeDesc);
case CUSTOM_SIMPLE_EDGE:
assert partitionerClassName != null;
partitionerConf = createPartitionerConf(partitionerClassName, conf);
UnorderedPartitionedKVEdgeConfig et3Conf = UnorderedPartitionedKVEdgeConfig.newBuilder(keyClass, valClass, MRPartitioner.class.getName(), partitionerConf).setFromConfiguration(conf).setKeySerializationClass(TezBytesWritableSerialization.class.getName(), null).setValueSerializationClass(TezBytesWritableSerialization.class.getName(), null).build();
return et3Conf.createDefaultEdgeProperty();
case SIMPLE_EDGE:
default:
assert partitionerClassName != null;
partitionerConf = createPartitionerConf(partitionerClassName, conf);
OrderedPartitionedKVEdgeConfig et4Conf = OrderedPartitionedKVEdgeConfig.newBuilder(keyClass, valClass, MRPartitioner.class.getName(), partitionerConf).setFromConfiguration(conf).setKeySerializationClass(TezBytesWritableSerialization.class.getName(), TezBytesComparator.class.getName(), null).setValueSerializationClass(TezBytesWritableSerialization.class.getName(), null).build();
return et4Conf.createDefaultEdgeProperty();
}
}
use of org.apache.hadoop.io.DataOutputBuffer in project weave by continuuity.
the class YarnUtils method encodeCredentials.
public static ByteBuffer encodeCredentials(Credentials credentials) {
try {
DataOutputBuffer out = new DataOutputBuffer();
credentials.writeTokenStorageToStream(out);
return ByteBuffer.wrap(out.getData(), 0, out.getLength());
} catch (IOException e) {
// Shouldn't throw
LOG.error("Failed to encode Credentials.", e);
throw Throwables.propagate(e);
}
}
use of org.apache.hadoop.io.DataOutputBuffer in project hadoop by apache.
the class AMLauncher method setupTokens.
@Private
@VisibleForTesting
protected void setupTokens(ContainerLaunchContext container, ContainerId containerID) throws IOException {
Map<String, String> environment = container.getEnvironment();
environment.put(ApplicationConstants.APPLICATION_WEB_PROXY_BASE_ENV, application.getWebProxyBase());
// Set AppSubmitTime to be consumable by the AM.
ApplicationId applicationId = application.getAppAttemptId().getApplicationId();
environment.put(ApplicationConstants.APP_SUBMIT_TIME_ENV, String.valueOf(rmContext.getRMApps().get(applicationId).getSubmitTime()));
Credentials credentials = new Credentials();
DataInputByteBuffer dibb = new DataInputByteBuffer();
ByteBuffer tokens = container.getTokens();
if (tokens != null) {
// TODO: Don't do this kind of checks everywhere.
dibb.reset(tokens);
credentials.readTokenStorageStream(dibb);
tokens.rewind();
}
// Add AMRMToken
Token<AMRMTokenIdentifier> amrmToken = createAndSetAMRMToken();
if (amrmToken != null) {
credentials.addToken(amrmToken.getService(), amrmToken);
}
DataOutputBuffer dob = new DataOutputBuffer();
credentials.writeTokenStorageToStream(dob);
container.setTokens(ByteBuffer.wrap(dob.getData(), 0, dob.getLength()));
}
use of org.apache.hadoop.io.DataOutputBuffer in project hadoop by apache.
the class TestApplicationMasterLauncher method testSetupTokens.
@Test
public void testSetupTokens() throws Exception {
MockRM rm = new MockRM();
rm.start();
MockNM nm1 = rm.registerNode("h1:1234", 5000);
RMApp app = rm.submitApp(2000);
/// kick the scheduling
nm1.nodeHeartbeat(true);
RMAppAttempt attempt = app.getCurrentAppAttempt();
MyAMLauncher launcher = new MyAMLauncher(rm.getRMContext(), attempt, AMLauncherEventType.LAUNCH, rm.getConfig());
DataOutputBuffer dob = new DataOutputBuffer();
Credentials ts = new Credentials();
ts.writeTokenStorageToStream(dob);
ByteBuffer securityTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
ContainerLaunchContext amContainer = ContainerLaunchContext.newInstance(null, null, null, null, securityTokens, null);
ContainerId containerId = ContainerId.newContainerId(attempt.getAppAttemptId(), 0L);
try {
launcher.setupTokens(amContainer, containerId);
} catch (Exception e) {
// ignore the first fake exception
}
try {
launcher.setupTokens(amContainer, containerId);
} catch (java.io.EOFException e) {
Assert.fail("EOFException should not happen.");
}
}
Aggregations