use of org.apache.hadoop.io.DataInputBuffer in project hadoop by apache.
the class TestBlockToken method testEmptyLegacyBlockTokenBytesIsLegacy.
@Test
public void testEmptyLegacyBlockTokenBytesIsLegacy() throws IOException {
BlockTokenIdentifier emptyIdent = new BlockTokenIdentifier();
DataOutputBuffer dob = new DataOutputBuffer(4096);
DataInputBuffer dib = new DataInputBuffer();
emptyIdent.writeLegacy(dob);
byte[] emptyIdentBytes = Arrays.copyOf(dob.getData(), dob.getLength());
BlockTokenIdentifier legacyToken = new BlockTokenIdentifier();
BlockTokenIdentifier protobufToken = new BlockTokenIdentifier();
BlockTokenIdentifier readToken = new BlockTokenIdentifier();
dib.reset(emptyIdentBytes, emptyIdentBytes.length);
legacyToken.readFieldsLegacy(dib);
boolean invalidProtobufMessage = false;
try {
dib.reset(emptyIdentBytes, emptyIdentBytes.length);
protobufToken.readFieldsProtobuf(dib);
} catch (IOException e) {
invalidProtobufMessage = true;
}
assertTrue(invalidProtobufMessage);
dib.reset(emptyIdentBytes, emptyIdentBytes.length);
readToken.readFields(dib);
assertTrue(invalidProtobufMessage);
}
use of org.apache.hadoop.io.DataInputBuffer in project hadoop by apache.
the class TestBlockToken method testLegacyBlockTokenBytesIsLegacy.
@Test
public void testLegacyBlockTokenBytesIsLegacy() throws IOException {
final boolean useProto = false;
BlockTokenSecretManager sm = new BlockTokenSecretManager(blockKeyUpdateInterval, blockTokenLifetime, 0, 1, "fake-pool", null, useProto);
Token<BlockTokenIdentifier> token = sm.generateToken(block1, EnumSet.noneOf(BlockTokenIdentifier.AccessMode.class));
final byte[] tokenBytes = token.getIdentifier();
BlockTokenIdentifier legacyToken = new BlockTokenIdentifier();
BlockTokenIdentifier protobufToken = new BlockTokenIdentifier();
BlockTokenIdentifier readToken = new BlockTokenIdentifier();
DataInputBuffer dib = new DataInputBuffer();
dib.reset(tokenBytes, tokenBytes.length);
legacyToken.readFieldsLegacy(dib);
boolean invalidProtobufMessage = false;
try {
dib.reset(tokenBytes, tokenBytes.length);
protobufToken.readFieldsProtobuf(dib);
} catch (IOException e) {
invalidProtobufMessage = true;
}
assertTrue(invalidProtobufMessage);
dib.reset(tokenBytes, tokenBytes.length);
readToken.readFields(dib);
// Using legacy, the token parses as a legacy block token and not a protobuf
assertEquals(legacyToken, readToken);
assertNotEquals(protobufToken, readToken);
}
use of org.apache.hadoop.io.DataInputBuffer in project tez by apache.
the class ContainerRunnerImpl method submitWork.
/**
* Submit an entire work unit - containerId + TaskSpec.
* This is intended for a task push from the AM
*
* @param request
* @throws org.apache.tez.dag.api.TezException
*/
@Override
public void submitWork(SubmitWorkRequestProto request) throws TezException {
LOG.info("Queuing work for execution: " + request);
checkAndThrowExceptionForTests(request);
Map<String, String> env = new HashMap<String, String>();
env.putAll(localEnv);
env.put(ApplicationConstants.Environment.USER.name(), request.getUser());
String[] localDirs = new String[localDirsBase.length];
// Setup up local dirs to be application specific, and create them.
for (int i = 0; i < localDirsBase.length; i++) {
localDirs[i] = createAppSpecificLocalDir(localDirsBase[i], request.getApplicationIdString(), request.getUser());
try {
localFs.mkdirs(new Path(localDirs[i]));
} catch (IOException e) {
throw new TezException(e);
}
}
if (LOG.isDebugEnabled()) {
LOG.debug("Dirs are: " + Arrays.toString(localDirs));
}
// Setup workingDir. This is otherwise setup as Environment.PWD
// Used for re-localization, to add the user specified configuration (conf_pb_binary_stream)
String workingDir = localDirs[0];
Credentials credentials = new Credentials();
DataInputBuffer dib = new DataInputBuffer();
byte[] tokenBytes = request.getCredentialsBinary().toByteArray();
dib.reset(tokenBytes, tokenBytes.length);
try {
credentials.readTokenStorageStream(dib);
} catch (IOException e) {
throw new TezException(e);
}
Token<JobTokenIdentifier> jobToken = TokenCache.getSessionToken(credentials);
// TODO Unregistering does not happen at the moment, since there's no signals on when an app completes.
LOG.info("Registering request with the ShuffleHandler for containerId {}", request.getContainerIdString());
ShuffleHandler.get().registerApplication(request.getApplicationIdString(), jobToken, request.getUser());
TaskRunnerCallable callable = new TaskRunnerCallable(request, new Configuration(getConfig()), new ExecutionContextImpl(localAddress.get().getHostName()), env, localDirs, workingDir, credentials, memoryPerExecutor, sharedExecutor);
ListenableFuture<ContainerExecutionResult> future = executorService.submit(callable);
Futures.addCallback(future, new TaskRunnerCallback(request, callable));
}
use of org.apache.hadoop.io.DataInputBuffer in project tez by apache.
the class ContainerRunnerImpl method queueContainer.
/**
* Submit a container which is ready for running.
* The regular pull mechanism will be used to fetch work from the AM
* @param request
* @throws TezException
*/
@Override
public void queueContainer(RunContainerRequestProto request) throws TezException {
LOG.info("Queuing container for execution: " + request);
Map<String, String> env = new HashMap<String, String>();
env.putAll(localEnv);
env.put(ApplicationConstants.Environment.USER.name(), request.getUser());
String[] localDirs = new String[localDirsBase.length];
// Setup up local dirs to be application specific, and create them.
for (int i = 0; i < localDirsBase.length; i++) {
localDirs[i] = createAppSpecificLocalDir(localDirsBase[i], request.getApplicationIdString(), request.getUser());
try {
localFs.mkdirs(new Path(localDirs[i]));
} catch (IOException e) {
throw new TezException(e);
}
}
LOG.info("Dirs for {} are {}", request.getContainerIdString(), Arrays.toString(localDirs));
// Setup workingDir. This is otherwise setup as Environment.PWD
// Used for re-localization, to add the user specified configuration (conf_pb_binary_stream)
String workingDir = localDirs[0];
Credentials credentials = new Credentials();
DataInputBuffer dib = new DataInputBuffer();
byte[] tokenBytes = request.getCredentialsBinary().toByteArray();
dib.reset(tokenBytes, tokenBytes.length);
try {
credentials.readTokenStorageStream(dib);
} catch (IOException e) {
throw new TezException(e);
}
Token<JobTokenIdentifier> jobToken = TokenCache.getSessionToken(credentials);
// TODO Unregistering does not happen at the moment, since there's no signals on when an app completes.
LOG.info("Registering request with the ShuffleHandler for containerId {}", request.getContainerIdString());
ShuffleHandler.get().registerApplication(request.getApplicationIdString(), jobToken, request.getUser());
ContainerRunnerCallable callable = new ContainerRunnerCallable(request, new Configuration(getConfig()), new ExecutionContextImpl(localAddress.get().getHostName()), env, localDirs, workingDir, credentials, memoryPerExecutor);
ListenableFuture<ContainerExecutionResult> future = executorService.submit(callable);
Futures.addCallback(future, new ContainerRunnerCallback(request, callable));
}
use of org.apache.hadoop.io.DataInputBuffer in project tez by apache.
the class ValuesIterator method readNextKey.
/**
* read the next key - which may be the same as the current key.
*/
private void readNextKey() throws IOException {
more = in.next();
if (more) {
DataInputBuffer nextKeyBytes = in.getKey();
if (!in.isSameKey()) {
keyIn.reset(nextKeyBytes.getData(), nextKeyBytes.getPosition(), nextKeyBytes.getLength() - nextKeyBytes.getPosition());
nextKey = keyDeserializer.deserialize(nextKey);
// hasMoreValues = is it first key or is key the same?
hasMoreValues = (key == null) || (comparator.compare(key, nextKey) == 0);
if (key == null || false == hasMoreValues) {
// so this indicates start of new key group
if (inputKeyCounter != null) {
inputKeyCounter.increment(1);
}
++keyCtr;
}
} else {
hasMoreValues = in.isSameKey();
}
} else {
hasMoreValues = false;
}
}
Aggregations