use of org.apache.hadoop.io.DataOutputBuffer in project hive by apache.
the class ShuffleHandler method serializeServiceData.
/**
* A helper function to serialize the JobTokenIdentifier to be sent to the
* ShuffleHandler as ServiceData.
* @param jobToken the job token to be used for authentication of
* shuffle data requests.
* @return the serialized version of the jobToken.
*/
public static ByteBuffer serializeServiceData(Token<JobTokenIdentifier> jobToken) throws IOException {
// TODO these bytes should be versioned
DataOutputBuffer jobToken_dob = new DataOutputBuffer();
jobToken.write(jobToken_dob);
return ByteBuffer.wrap(jobToken_dob.getData(), 0, jobToken_dob.getLength());
}
use of org.apache.hadoop.io.DataOutputBuffer in project hive by apache.
the class TestInputOutputFormat method createMockOrcFile.
/**
* Create the binary contents of an ORC file that just has enough information
* to test the getInputSplits.
* @param stripeLengths the length of each stripe
* @return the bytes of the file
* @throws IOException
*/
static byte[] createMockOrcFile(long... stripeLengths) throws IOException {
OrcProto.Footer.Builder footer = OrcProto.Footer.newBuilder();
final long headerLen = 3;
long offset = headerLen;
DataOutputBuffer buffer = new DataOutputBuffer();
for (long stripeLength : stripeLengths) {
footer.addStripes(OrcProto.StripeInformation.newBuilder().setOffset(offset).setIndexLength(0).setDataLength(stripeLength - 10).setFooterLength(10).setNumberOfRows(1000));
offset += stripeLength;
}
fill(buffer, offset);
footer.addTypes(OrcProto.Type.newBuilder().setKind(OrcProto.Type.Kind.STRUCT).addFieldNames("col1").addSubtypes(1));
footer.addTypes(OrcProto.Type.newBuilder().setKind(OrcProto.Type.Kind.STRING));
footer.setNumberOfRows(1000 * stripeLengths.length).setHeaderLength(headerLen).setContentLength(offset - headerLen);
footer.addStatistics(OrcProto.ColumnStatistics.newBuilder().setNumberOfValues(1000 * stripeLengths.length).build());
footer.addStatistics(OrcProto.ColumnStatistics.newBuilder().setNumberOfValues(1000 * stripeLengths.length).setStringStatistics(OrcProto.StringStatistics.newBuilder().setMaximum("zzz").setMinimum("aaa").setSum(1000 * 3 * stripeLengths.length).build()).build());
footer.build().writeTo(buffer);
int footerEnd = buffer.getLength();
OrcProto.PostScript ps = OrcProto.PostScript.newBuilder().setCompression(OrcProto.CompressionKind.NONE).setFooterLength(footerEnd - offset).setMagic("ORC").build();
ps.writeTo(buffer);
buffer.write(buffer.getLength() - footerEnd);
byte[] result = new byte[buffer.getLength()];
System.arraycopy(buffer.getData(), 0, result, 0, buffer.getLength());
return result;
}
use of org.apache.hadoop.io.DataOutputBuffer in project hadoop by apache.
the class TestBlockToken method testEmptyLegacyBlockTokenBytesIsLegacy.
@Test
public void testEmptyLegacyBlockTokenBytesIsLegacy() throws IOException {
BlockTokenIdentifier emptyIdent = new BlockTokenIdentifier();
DataOutputBuffer dob = new DataOutputBuffer(4096);
DataInputBuffer dib = new DataInputBuffer();
emptyIdent.writeLegacy(dob);
byte[] emptyIdentBytes = Arrays.copyOf(dob.getData(), dob.getLength());
BlockTokenIdentifier legacyToken = new BlockTokenIdentifier();
BlockTokenIdentifier protobufToken = new BlockTokenIdentifier();
BlockTokenIdentifier readToken = new BlockTokenIdentifier();
dib.reset(emptyIdentBytes, emptyIdentBytes.length);
legacyToken.readFieldsLegacy(dib);
boolean invalidProtobufMessage = false;
try {
dib.reset(emptyIdentBytes, emptyIdentBytes.length);
protobufToken.readFieldsProtobuf(dib);
} catch (IOException e) {
invalidProtobufMessage = true;
}
assertTrue(invalidProtobufMessage);
dib.reset(emptyIdentBytes, emptyIdentBytes.length);
readToken.readFields(dib);
assertTrue(invalidProtobufMessage);
}
use of org.apache.hadoop.io.DataOutputBuffer in project hadoop by apache.
the class RMWebServices method createContainerLaunchContext.
/**
* Create the ContainerLaunchContext required for the
* ApplicationSubmissionContext. This function takes the user information and
* generates the ByteBuffer structures required by the ContainerLaunchContext
*
* @param newApp
* the information provided by the user
* @return created context
* @throws BadRequestException
* @throws IOException
*/
protected ContainerLaunchContext createContainerLaunchContext(ApplicationSubmissionContextInfo newApp) throws BadRequestException, IOException {
// create container launch context
HashMap<String, ByteBuffer> hmap = new HashMap<String, ByteBuffer>();
for (Map.Entry<String, String> entry : newApp.getContainerLaunchContextInfo().getAuxillaryServiceData().entrySet()) {
if (entry.getValue().isEmpty() == false) {
Base64 decoder = new Base64(0, null, true);
byte[] data = decoder.decode(entry.getValue());
hmap.put(entry.getKey(), ByteBuffer.wrap(data));
}
}
HashMap<String, LocalResource> hlr = new HashMap<String, LocalResource>();
for (Map.Entry<String, LocalResourceInfo> entry : newApp.getContainerLaunchContextInfo().getResources().entrySet()) {
LocalResourceInfo l = entry.getValue();
LocalResource lr = LocalResource.newInstance(URL.fromURI(l.getUrl()), l.getType(), l.getVisibility(), l.getSize(), l.getTimestamp());
hlr.put(entry.getKey(), lr);
}
DataOutputBuffer out = new DataOutputBuffer();
Credentials cs = createCredentials(newApp.getContainerLaunchContextInfo().getCredentials());
cs.writeTokenStorageToStream(out);
ByteBuffer tokens = ByteBuffer.wrap(out.getData());
ContainerLaunchContext ctx = ContainerLaunchContext.newInstance(hlr, newApp.getContainerLaunchContextInfo().getEnvironment(), newApp.getContainerLaunchContextInfo().getCommands(), hmap, tokens, newApp.getContainerLaunchContextInfo().getAcls());
return ctx;
}
use of org.apache.hadoop.io.DataOutputBuffer in project goldenorb by jzachr.
the class InputSplitAllocator method assignInputSplits.
/**
* This method gets the raw splits and calls another method to assign them.
*
* @returns Map
*/
@SuppressWarnings({ "deprecation", "rawtypes", "unchecked" })
public Map<OrbPartitionMember, List<RawSplit>> assignInputSplits() {
List<RawSplit> rawSplits = null;
JobConf job = new JobConf(orbConf);
LOG.debug(orbConf.getJobNumber().toString());
JobContext jobContext = new JobContext(job, new JobID(orbConf.getJobNumber(), 0));
org.apache.hadoop.mapreduce.InputFormat<?, ?> input;
try {
input = ReflectionUtils.newInstance(jobContext.getInputFormatClass(), orbConf);
List<org.apache.hadoop.mapreduce.InputSplit> splits = input.getSplits(jobContext);
rawSplits = new ArrayList<RawSplit>(splits.size());
DataOutputBuffer buffer = new DataOutputBuffer();
SerializationFactory factory = new SerializationFactory(orbConf);
Serializer serializer = factory.getSerializer(splits.get(0).getClass());
serializer.open(buffer);
for (int i = 0; i < splits.size(); i++) {
buffer.reset();
serializer.serialize(splits.get(i));
RawSplit rawSplit = new RawSplit();
rawSplit.setClassName(splits.get(i).getClass().getName());
rawSplit.setDataLength(splits.get(i).getLength());
rawSplit.setBytes(buffer.getData(), 0, buffer.getLength());
rawSplit.setLocations(splits.get(i).getLocations());
rawSplits.add(rawSplit);
}
} catch (ClassNotFoundException e) {
e.printStackTrace();
throw new RuntimeException(e);
} catch (IOException e) {
e.printStackTrace();
throw new RuntimeException(e);
} catch (InterruptedException e) {
e.printStackTrace();
throw new RuntimeException(e);
}
return assignInputSplits(rawSplits);
}
Aggregations