use of org.apache.hadoop.io.DataOutputBuffer in project hadoop by apache.
the class TestAMRMClient method allocateAndStartContainers.
private List<Container> allocateAndStartContainers(final AMRMClient<ContainerRequest> amClient, final NMClient nmClient, int num) throws YarnException, IOException {
// set up allocation requests
for (int i = 0; i < num; ++i) {
amClient.addContainerRequest(new ContainerRequest(capability, nodes, racks, priority));
}
// send allocation requests
amClient.allocate(0.1f);
// let NM heartbeat to RM and trigger allocations
triggerSchedulingWithNMHeartBeat();
// get allocations
AllocateResponse allocResponse = amClient.allocate(0.1f);
List<Container> containers = allocResponse.getAllocatedContainers();
Assert.assertEquals(num, containers.size());
// build container launch context
Credentials ts = new Credentials();
DataOutputBuffer dob = new DataOutputBuffer();
ts.writeTokenStorageToStream(dob);
ByteBuffer securityTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
// start a process long enough for increase/decrease action to take effect
ContainerLaunchContext clc = BuilderUtils.newContainerLaunchContext(Collections.<String, LocalResource>emptyMap(), new HashMap<String, String>(), Arrays.asList("sleep", "100"), new HashMap<String, ByteBuffer>(), securityTokens, new HashMap<ApplicationAccessType, String>());
// start the containers and make sure they are in RUNNING state
try {
for (int i = 0; i < num; i++) {
Container container = containers.get(i);
nmClient.startContainer(container, clc);
// container status
while (true) {
ContainerStatus status = nmClient.getContainerStatus(container.getId(), container.getNodeId());
if (status.getState() == ContainerState.RUNNING) {
break;
}
sleep(10);
}
}
} catch (YarnException e) {
throw new AssertionError("Exception is not expected: " + e);
}
// let NM's heartbeat to RM to confirm container launch
triggerSchedulingWithNMHeartBeat();
return containers;
}
use of org.apache.hadoop.io.DataOutputBuffer in project hadoop by apache.
the class ContainerManagerImpl method buildAppProto.
private ContainerManagerApplicationProto buildAppProto(ApplicationId appId, String user, Credentials credentials, Map<ApplicationAccessType, String> appAcls, LogAggregationContext logAggregationContext) {
ContainerManagerApplicationProto.Builder builder = ContainerManagerApplicationProto.newBuilder();
builder.setId(((ApplicationIdPBImpl) appId).getProto());
builder.setUser(user);
if (logAggregationContext != null) {
builder.setLogAggregationContext(((LogAggregationContextPBImpl) logAggregationContext).getProto());
}
builder.clearCredentials();
if (credentials != null) {
DataOutputBuffer dob = new DataOutputBuffer();
try {
credentials.writeTokenStorageToStream(dob);
builder.setCredentials(ByteString.copyFrom(dob.getData()));
} catch (IOException e) {
// should not occur
LOG.error("Cannot serialize credentials", e);
}
}
builder.clearAcls();
if (appAcls != null) {
for (Map.Entry<ApplicationAccessType, String> acl : appAcls.entrySet()) {
ApplicationACLMapProto p = ApplicationACLMapProto.newBuilder().setAccessType(ProtoUtils.convertToProtoFormat(acl.getKey())).setAcl(acl.getValue()).build();
builder.addAcls(p);
}
}
return builder.build();
}
use of org.apache.hadoop.io.DataOutputBuffer in project hadoop by apache.
the class ApplicationImpl method buildAppProto.
static ContainerManagerApplicationProto buildAppProto(ApplicationImpl app) throws IOException {
ContainerManagerApplicationProto.Builder builder = ContainerManagerApplicationProto.newBuilder();
builder.setId(((ApplicationIdPBImpl) app.appId).getProto());
builder.setUser(app.getUser());
if (app.logAggregationContext != null) {
builder.setLogAggregationContext(((LogAggregationContextPBImpl) app.logAggregationContext).getProto());
}
builder.clearCredentials();
if (app.credentials != null) {
DataOutputBuffer dob = new DataOutputBuffer();
app.credentials.writeTokenStorageToStream(dob);
builder.setCredentials(ByteString.copyFrom(dob.getData()));
}
builder.clearAcls();
if (app.applicationACLs != null) {
for (Map.Entry<ApplicationAccessType, String> acl : app.applicationACLs.entrySet()) {
YarnProtos.ApplicationACLMapProto p = YarnProtos.ApplicationACLMapProto.newBuilder().setAccessType(ProtoUtils.convertToProtoFormat(acl.getKey())).setAcl(acl.getValue()).build();
builder.addAcls(p);
}
}
builder.setAppLogAggregationInitedTime(app.applicationLogInitedTimestamp);
return builder.build();
}
use of org.apache.hadoop.io.DataOutputBuffer in project hadoop by apache.
the class MockRM method submitApp.
public RMApp submitApp(Resource capability, String name, String user, Map<ApplicationAccessType, String> acls, boolean unmanaged, String queue, int maxAppAttempts, Credentials ts, String appType, boolean waitForAccepted, boolean keepContainers, boolean isAppIdProvided, ApplicationId applicationId, long attemptFailuresValidityInterval, LogAggregationContext logAggregationContext, boolean cancelTokensWhenComplete, Priority priority, String amLabel, Map<ApplicationTimeoutType, Long> applicationTimeouts, ByteBuffer tokensConf) throws Exception {
ApplicationId appId = isAppIdProvided ? applicationId : null;
ApplicationClientProtocol client = getClientRMService();
if (!isAppIdProvided) {
GetNewApplicationResponse resp = client.getNewApplication(Records.newRecord(GetNewApplicationRequest.class));
appId = resp.getApplicationId();
}
SubmitApplicationRequest req = Records.newRecord(SubmitApplicationRequest.class);
ApplicationSubmissionContext sub = Records.newRecord(ApplicationSubmissionContext.class);
sub.setKeepContainersAcrossApplicationAttempts(keepContainers);
sub.setApplicationId(appId);
sub.setApplicationName(name);
sub.setMaxAppAttempts(maxAppAttempts);
if (applicationTimeouts != null && applicationTimeouts.size() > 0) {
sub.setApplicationTimeouts(applicationTimeouts);
}
if (unmanaged) {
sub.setUnmanagedAM(true);
}
if (queue != null) {
sub.setQueue(queue);
}
if (priority != null) {
sub.setPriority(priority);
}
sub.setApplicationType(appType);
ContainerLaunchContext clc = Records.newRecord(ContainerLaunchContext.class);
sub.setResource(capability);
clc.setApplicationACLs(acls);
if (ts != null && UserGroupInformation.isSecurityEnabled()) {
DataOutputBuffer dob = new DataOutputBuffer();
ts.writeTokenStorageToStream(dob);
ByteBuffer securityTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
clc.setTokens(securityTokens);
clc.setTokensConf(tokensConf);
}
sub.setAMContainerSpec(clc);
sub.setAttemptFailuresValidityInterval(attemptFailuresValidityInterval);
if (logAggregationContext != null) {
sub.setLogAggregationContext(logAggregationContext);
}
sub.setCancelTokensWhenComplete(cancelTokensWhenComplete);
ResourceRequest amResourceRequest = ResourceRequest.newInstance(Priority.newInstance(0), ResourceRequest.ANY, capability, 1);
if (amLabel != null && !amLabel.isEmpty()) {
amResourceRequest.setNodeLabelExpression(amLabel.trim());
}
sub.setAMContainerResourceRequest(amResourceRequest);
req.setApplicationSubmissionContext(sub);
UserGroupInformation fakeUser = UserGroupInformation.createUserForTesting(user, new String[] { "someGroup" });
PrivilegedExceptionAction<SubmitApplicationResponse> action = new PrivilegedExceptionAction<SubmitApplicationResponse>() {
ApplicationClientProtocol client;
SubmitApplicationRequest req;
@Override
public SubmitApplicationResponse run() throws IOException, YarnException {
try {
return client.submitApplication(req);
} catch (YarnException | IOException e) {
e.printStackTrace();
throw e;
}
}
PrivilegedExceptionAction<SubmitApplicationResponse> setClientReq(ApplicationClientProtocol client, SubmitApplicationRequest req) {
this.client = client;
this.req = req;
return this;
}
}.setClientReq(client, req);
fakeUser.doAs(action);
// make sure app is immediately available after submit
if (waitForAccepted) {
waitForState(appId, RMAppState.ACCEPTED);
}
RMApp rmApp = getRMContext().getRMApps().get(appId);
// unmanaged AM won't go to RMAppAttemptState.SCHEDULED.
if (waitForAccepted && !unmanaged) {
waitForState(rmApp.getCurrentAppAttempt().getAppAttemptId(), RMAppAttemptState.SCHEDULED);
}
return rmApp;
}
use of org.apache.hadoop.io.DataOutputBuffer in project hadoop by apache.
the class TestFSCheckpointService method checkpointCreate.
public void checkpointCreate(ByteBuffer b) throws Exception {
int WRITES = 128;
FileSystem fs = mock(FileSystem.class);
DataOutputBuffer dob = new DataOutputBuffer();
FSDataOutputStream hdfs = spy(new FSDataOutputStream(dob, null));
// backed by array
@SuppressWarnings("resource") DataOutputBuffer verif = new DataOutputBuffer();
when(fs.create(isA(Path.class), eq((short) 1))).thenReturn(hdfs);
when(fs.rename(isA(Path.class), isA(Path.class))).thenReturn(true);
Path base = new Path("/chk");
Path finalLoc = new Path("/chk/checkpoint_chk0");
Path tmp = FSCheckpointService.tmpfile(finalLoc);
FSCheckpointService chk = new FSCheckpointService(fs, base, new SimpleNamingService("chk0"), (short) 1);
CheckpointWriteChannel out = chk.create();
Random r = new Random();
final byte[] randBytes = new byte[BUFSIZE];
for (int i = 0; i < WRITES; ++i) {
r.nextBytes(randBytes);
int s = r.nextInt(BUFSIZE - 1);
int e = r.nextInt(BUFSIZE - s) + 1;
verif.write(randBytes, s, e);
b.clear();
b.put(randBytes).flip();
b.position(s).limit(b.position() + e);
out.write(b);
}
verify(fs, never()).rename(any(Path.class), eq(finalLoc));
CheckpointID cid = chk.commit(out);
verify(hdfs).close();
verify(fs).rename(eq(tmp), eq(finalLoc));
assertArrayEquals(Arrays.copyOfRange(verif.getData(), 0, verif.getLength()), Arrays.copyOfRange(dob.getData(), 0, dob.getLength()));
}
Aggregations