use of org.apache.hadoop.yarn.api.records.ApplicationId in project hive by apache.
the class LlapBaseInputFormat method constructSubmitWorkRequestProto.
private SubmitWorkRequestProto constructSubmitWorkRequestProto(SubmitWorkInfo submitWorkInfo, int taskNum, int attemptNum, InetSocketAddress address, Token<JobTokenIdentifier> token, byte[] fragmentBytes, byte[] fragmentBytesSignature, JobConf job) throws IOException {
ApplicationId appId = submitWorkInfo.getFakeAppId();
// This works, assuming the executor is running within YARN.
String user = System.getenv(ApplicationConstants.Environment.USER.name());
LOG.info("Setting user in submitWorkRequest to: " + user);
ContainerId containerId = ContainerId.newInstance(ApplicationAttemptId.newInstance(appId, attemptNum), taskNum);
// Credentials can change across DAGs. Ideally construct only once per DAG.
Credentials credentials = new Credentials();
TokenCache.setSessionToken(token, credentials);
ByteBuffer credentialsBinary = serializeCredentials(credentials);
FragmentRuntimeInfo.Builder runtimeInfo = FragmentRuntimeInfo.newBuilder();
runtimeInfo.setCurrentAttemptStartTime(System.currentTimeMillis());
runtimeInfo.setWithinDagPriority(0);
runtimeInfo.setDagStartTime(submitWorkInfo.getCreationTime());
runtimeInfo.setFirstAttemptStartTime(submitWorkInfo.getCreationTime());
runtimeInfo.setNumSelfAndUpstreamTasks(submitWorkInfo.getVertexParallelism());
runtimeInfo.setNumSelfAndUpstreamCompletedTasks(0);
SubmitWorkRequestProto.Builder builder = SubmitWorkRequestProto.newBuilder();
VertexOrBinary.Builder vertexBuilder = VertexOrBinary.newBuilder();
vertexBuilder.setVertexBinary(ByteString.copyFrom(submitWorkInfo.getVertexBinary()));
if (submitWorkInfo.getVertexSignature() != null) {
// Unsecure case?
builder.setWorkSpecSignature(ByteString.copyFrom(submitWorkInfo.getVertexSignature()));
}
builder.setWorkSpec(vertexBuilder.build());
builder.setFragmentNumber(taskNum);
builder.setAttemptNumber(attemptNum);
builder.setContainerIdString(containerId.toString());
builder.setAmHost(LlapUtil.getAmHostNameFromAddress(address, job));
builder.setAmPort(address.getPort());
builder.setCredentialsBinary(ByteString.copyFrom(credentialsBinary));
builder.setFragmentRuntimeInfo(runtimeInfo.build());
builder.setInitialEventBytes(ByteString.copyFrom(fragmentBytes));
if (fragmentBytesSignature != null) {
builder.setInitialEventSignature(ByteString.copyFrom(fragmentBytesSignature));
}
return builder.build();
}
use of org.apache.hadoop.yarn.api.records.ApplicationId in project hadoop by apache.
the class TestContainerManagerSecurity method testContainerTokenWithEpoch.
/**
* This tests whether a containerId is serialized/deserialized with epoch.
*
* @throws IOException
* @throws InterruptedException
* @throws YarnException
*/
private void testContainerTokenWithEpoch(Configuration conf) throws IOException, InterruptedException, YarnException {
LOG.info("Running test for serializing/deserializing containerIds");
NMTokenSecretManagerInRM nmTokenSecretManagerInRM = yarnCluster.getResourceManager().getRMContext().getNMTokenSecretManager();
ApplicationId appId = ApplicationId.newInstance(1, 1);
ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 0);
ContainerId cId = ContainerId.newContainerId(appAttemptId, (5L << 40) | 3L);
NodeManager nm = yarnCluster.getNodeManager(0);
NMTokenSecretManagerInNM nmTokenSecretManagerInNM = nm.getNMContext().getNMTokenSecretManager();
String user = "test";
waitForNMToReceiveNMTokenKey(nmTokenSecretManagerInNM, nm);
NodeId nodeId = nm.getNMContext().getNodeId();
// Both id should be equal.
Assert.assertEquals(nmTokenSecretManagerInNM.getCurrentKey().getKeyId(), nmTokenSecretManagerInRM.getCurrentKey().getKeyId());
// Creating a normal Container Token
RMContainerTokenSecretManager containerTokenSecretManager = yarnCluster.getResourceManager().getRMContext().getContainerTokenSecretManager();
Resource r = Resource.newInstance(1230, 2);
Token containerToken = containerTokenSecretManager.createContainerToken(cId, 0, nodeId, user, r, Priority.newInstance(0), 0);
ContainerTokenIdentifier containerTokenIdentifier = new ContainerTokenIdentifier();
byte[] tokenIdentifierContent = containerToken.getIdentifier().array();
DataInputBuffer dib = new DataInputBuffer();
dib.reset(tokenIdentifierContent, tokenIdentifierContent.length);
containerTokenIdentifier.readFields(dib);
Assert.assertEquals(cId, containerTokenIdentifier.getContainerID());
Assert.assertEquals(cId.toString(), containerTokenIdentifier.getContainerID().toString());
Token nmToken = nmTokenSecretManagerInRM.createNMToken(appAttemptId, nodeId, user);
YarnRPC rpc = YarnRPC.create(conf);
testStartContainer(rpc, appAttemptId, nodeId, containerToken, nmToken, false);
List<ContainerId> containerIds = new LinkedList<ContainerId>();
containerIds.add(cId);
ContainerManagementProtocol proxy = getContainerManagementProtocolProxy(rpc, nmToken, nodeId, user);
GetContainerStatusesResponse res = proxy.getContainerStatuses(GetContainerStatusesRequest.newInstance(containerIds));
Assert.assertNotNull(res.getContainerStatuses().get(0));
Assert.assertEquals(cId, res.getContainerStatuses().get(0).getContainerId());
Assert.assertEquals(cId.toString(), res.getContainerStatuses().get(0).getContainerId().toString());
}
use of org.apache.hadoop.yarn.api.records.ApplicationId in project hadoop by apache.
the class RemoteAppChecker method getActiveApplications.
@Override
@Private
public Collection<ApplicationId> getActiveApplications() throws YarnException {
try {
List<ApplicationId> activeApps = new ArrayList<ApplicationId>();
List<ApplicationReport> apps = client.getApplications(ACTIVE_STATES);
for (ApplicationReport app : apps) {
activeApps.add(app.getApplicationId());
}
return activeApps;
} catch (IOException e) {
throw new YarnException(e);
}
}
use of org.apache.hadoop.yarn.api.records.ApplicationId in project hadoop by apache.
the class TestInMemorySCMStore method startStoreWithResources.
private Map<String, String> startStoreWithResources() throws Exception {
Map<String, String> initialCachedResources = new HashMap<String, String>();
int count = 10;
for (int i = 0; i < count; i++) {
String key = String.valueOf(i);
String fileName = key + ".jar";
initialCachedResources.put(key, fileName);
}
doReturn(new ArrayList<ApplicationId>()).when(checker).getActiveApplications();
doReturn(initialCachedResources).when(store).getInitialCachedResources(isA(FileSystem.class), isA(Configuration.class));
this.store.init(new Configuration());
this.store.start();
return initialCachedResources;
}
use of org.apache.hadoop.yarn.api.records.ApplicationId in project hadoop by apache.
the class TestInMemorySCMStore method startStoreWithApps.
private void startStoreWithApps() throws Exception {
ArrayList<ApplicationId> list = new ArrayList<ApplicationId>();
int count = 5;
for (int i = 0; i < count; i++) {
list.add(createAppId(i, i));
}
doReturn(list).when(checker).getActiveApplications();
doReturn(new HashMap<String, String>()).when(store).getInitialCachedResources(isA(FileSystem.class), isA(Configuration.class));
this.store.init(new Configuration());
this.store.start();
}
Aggregations