use of org.apache.hadoop.yarn.api.records.ApplicationAttemptId in project hadoop by apache.
the class TestLogAggregationService method verifyLocalFileDeletion.
private void verifyLocalFileDeletion(LogAggregationService logAggregationService) throws Exception {
logAggregationService.init(this.conf);
logAggregationService.start();
ApplicationId application1 = BuilderUtils.newApplicationId(1234, 1);
// AppLogDir should be created
File app1LogDir = new File(localLogDir, application1.toString());
app1LogDir.mkdir();
logAggregationService.handle(new LogHandlerAppStartedEvent(application1, this.user, null, this.acls));
ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(application1, 1);
ContainerId container11 = createContainer(appAttemptId, 1, ContainerType.APPLICATION_MASTER);
// Simulate log-file creation
writeContainerLogs(app1LogDir, container11, new String[] { "stdout", "stderr", "syslog" });
logAggregationService.handle(new LogHandlerContainerFinishedEvent(container11, 0));
logAggregationService.handle(new LogHandlerAppFinishedEvent(application1));
logAggregationService.stop();
assertEquals(0, logAggregationService.getNumAggregators());
// ensure filesystems were closed
verify(logAggregationService).closeFileSystems(any(UserGroupInformation.class));
verify(delSrvc).delete(eq(user), eq((Path) null), eq(new Path(app1LogDir.getAbsolutePath())));
String containerIdStr = container11.toString();
File containerLogDir = new File(app1LogDir, containerIdStr);
int count = 0;
int maxAttempts = 50;
for (String fileType : new String[] { "stdout", "stderr", "syslog" }) {
File f = new File(containerLogDir, fileType);
count = 0;
while ((f.exists()) && (count < maxAttempts)) {
count++;
Thread.sleep(100);
}
Assert.assertFalse("File [" + f + "] was not deleted", f.exists());
}
count = 0;
while ((app1LogDir.exists()) && (count < maxAttempts)) {
count++;
Thread.sleep(100);
}
Assert.assertFalse("Directory [" + app1LogDir + "] was not deleted", app1LogDir.exists());
Path logFilePath = logAggregationService.getRemoteNodeLogFileForApp(application1, this.user);
Assert.assertTrue("Log file [" + logFilePath + "] not found", new File(logFilePath.toUri().getPath()).exists());
dispatcher.await();
ApplicationEvent[] expectedEvents = new ApplicationEvent[] { new ApplicationEvent(appAttemptId.getApplicationId(), ApplicationEventType.APPLICATION_LOG_HANDLING_INITED), new ApplicationEvent(appAttemptId.getApplicationId(), ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED) };
checkEvents(appEventHandler, expectedEvents, true, "getType", "getApplicationID");
}
use of org.apache.hadoop.yarn.api.records.ApplicationAttemptId in project hadoop by apache.
the class TestLogAggregationService method testLogAggregationServiceWithPatterns.
@Test(timeout = 50000)
@SuppressWarnings("unchecked")
public void testLogAggregationServiceWithPatterns() throws Exception {
LogAggregationContext logAggregationContextWithIncludePatterns = Records.newRecord(LogAggregationContext.class);
String includePattern = "stdout|syslog";
logAggregationContextWithIncludePatterns.setIncludePattern(includePattern);
LogAggregationContext LogAggregationContextWithExcludePatterns = Records.newRecord(LogAggregationContext.class);
String excludePattern = "stdout|syslog";
LogAggregationContextWithExcludePatterns.setExcludePattern(excludePattern);
this.conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDir.getAbsolutePath());
this.conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, this.remoteRootLogDir.getAbsolutePath());
ApplicationId application1 = BuilderUtils.newApplicationId(1234, 1);
ApplicationId application2 = BuilderUtils.newApplicationId(1234, 2);
ApplicationId application3 = BuilderUtils.newApplicationId(1234, 3);
ApplicationId application4 = BuilderUtils.newApplicationId(1234, 4);
Application mockApp = mock(Application.class);
when(mockApp.getContainers()).thenReturn(new HashMap<ContainerId, Container>());
this.context.getApplications().put(application1, mockApp);
this.context.getApplications().put(application2, mockApp);
this.context.getApplications().put(application3, mockApp);
this.context.getApplications().put(application4, mockApp);
LogAggregationService logAggregationService = new LogAggregationService(dispatcher, this.context, this.delSrvc, super.dirsHandler);
logAggregationService.init(this.conf);
logAggregationService.start();
// LogContext for application1 has includePatten which includes
// stdout and syslog.
// After logAggregation is finished, we expect the logs for application1
// has only logs from stdout and syslog
// AppLogDir should be created
File appLogDir1 = new File(localLogDir, application1.toString());
appLogDir1.mkdir();
logAggregationService.handle(new LogHandlerAppStartedEvent(application1, this.user, null, this.acls, logAggregationContextWithIncludePatterns));
ApplicationAttemptId appAttemptId1 = BuilderUtils.newApplicationAttemptId(application1, 1);
ContainerId container1 = createContainer(appAttemptId1, 1, ContainerType.APPLICATION_MASTER);
// Simulate log-file creation
writeContainerLogs(appLogDir1, container1, new String[] { "stdout", "stderr", "syslog" });
logAggregationService.handle(new LogHandlerContainerFinishedEvent(container1, 0));
// LogContext for application2 has excludePatten which includes
// stdout and syslog.
// After logAggregation is finished, we expect the logs for application2
// has only logs from stderr
ApplicationAttemptId appAttemptId2 = BuilderUtils.newApplicationAttemptId(application2, 1);
File app2LogDir = new File(localLogDir, application2.toString());
app2LogDir.mkdir();
LogAggregationContextWithExcludePatterns.setLogAggregationPolicyClassName(AMOnlyLogAggregationPolicy.class.getName());
logAggregationService.handle(new LogHandlerAppStartedEvent(application2, this.user, null, this.acls, LogAggregationContextWithExcludePatterns));
ContainerId container2 = createContainer(appAttemptId2, 1, ContainerType.APPLICATION_MASTER);
writeContainerLogs(app2LogDir, container2, new String[] { "stdout", "stderr", "syslog" });
logAggregationService.handle(new LogHandlerContainerFinishedEvent(container2, 0));
// LogContext for application3 has includePattern which is *.log and
// excludePatten which includes std.log and sys.log.
// After logAggregation is finished, we expect the logs for application3
// has all logs whose suffix is .log but excluding sys.log and std.log
LogAggregationContext context1 = Records.newRecord(LogAggregationContext.class);
context1.setIncludePattern(".*.log");
context1.setExcludePattern("sys.log|std.log");
ApplicationAttemptId appAttemptId3 = BuilderUtils.newApplicationAttemptId(application3, 1);
File app3LogDir = new File(localLogDir, application3.toString());
app3LogDir.mkdir();
context1.setLogAggregationPolicyClassName(AMOnlyLogAggregationPolicy.class.getName());
logAggregationService.handle(new LogHandlerAppStartedEvent(application3, this.user, null, this.acls, context1));
ContainerId container3 = createContainer(appAttemptId3, 1, ContainerType.APPLICATION_MASTER);
writeContainerLogs(app3LogDir, container3, new String[] { "stdout", "sys.log", "std.log", "out.log", "err.log", "log" });
logAggregationService.handle(new LogHandlerContainerFinishedEvent(container3, 0));
// LogContext for application4 has includePattern
// which includes std.log and sys.log and
// excludePatten which includes std.log.
// After logAggregation is finished, we expect the logs for application4
// only has sys.log
LogAggregationContext context2 = Records.newRecord(LogAggregationContext.class);
context2.setIncludePattern("sys.log|std.log");
context2.setExcludePattern("std.log");
ApplicationAttemptId appAttemptId4 = BuilderUtils.newApplicationAttemptId(application4, 1);
File app4LogDir = new File(localLogDir, application4.toString());
app4LogDir.mkdir();
context2.setLogAggregationPolicyClassName(AMOnlyLogAggregationPolicy.class.getName());
logAggregationService.handle(new LogHandlerAppStartedEvent(application4, this.user, null, this.acls, context2));
ContainerId container4 = createContainer(appAttemptId4, 1, ContainerType.APPLICATION_MASTER);
writeContainerLogs(app4LogDir, container4, new String[] { "stdout", "sys.log", "std.log", "out.log", "err.log", "log" });
logAggregationService.handle(new LogHandlerContainerFinishedEvent(container4, 0));
dispatcher.await();
ApplicationEvent[] expectedInitEvents = new ApplicationEvent[] { new ApplicationEvent(application1, ApplicationEventType.APPLICATION_LOG_HANDLING_INITED), new ApplicationEvent(application2, ApplicationEventType.APPLICATION_LOG_HANDLING_INITED), new ApplicationEvent(application3, ApplicationEventType.APPLICATION_LOG_HANDLING_INITED), new ApplicationEvent(application4, ApplicationEventType.APPLICATION_LOG_HANDLING_INITED) };
checkEvents(appEventHandler, expectedInitEvents, false, "getType", "getApplicationID");
reset(appEventHandler);
logAggregationService.handle(new LogHandlerAppFinishedEvent(application1));
logAggregationService.handle(new LogHandlerAppFinishedEvent(application2));
logAggregationService.handle(new LogHandlerAppFinishedEvent(application3));
logAggregationService.handle(new LogHandlerAppFinishedEvent(application4));
logAggregationService.stop();
assertEquals(0, logAggregationService.getNumAggregators());
String[] logFiles = new String[] { "stdout", "syslog" };
verifyContainerLogs(logAggregationService, application1, new ContainerId[] { container1 }, logFiles, 2, false);
logFiles = new String[] { "stderr" };
verifyContainerLogs(logAggregationService, application2, new ContainerId[] { container2 }, logFiles, 1, false);
logFiles = new String[] { "out.log", "err.log" };
verifyContainerLogs(logAggregationService, application3, new ContainerId[] { container3 }, logFiles, 2, false);
logFiles = new String[] { "sys.log" };
verifyContainerLogs(logAggregationService, application4, new ContainerId[] { container4 }, logFiles, 1, false);
dispatcher.await();
ApplicationEvent[] expectedFinishedEvents = new ApplicationEvent[] { new ApplicationEvent(application1, ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED), new ApplicationEvent(application2, ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED), new ApplicationEvent(application3, ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED), new ApplicationEvent(application4, ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED) };
checkEvents(appEventHandler, expectedFinishedEvents, false, "getType", "getApplicationID");
}
use of org.apache.hadoop.yarn.api.records.ApplicationAttemptId in project hadoop by apache.
the class TestNMTokenSecretManagerInNM method testRecovery.
@Test
public void testRecovery() throws IOException {
YarnConfiguration conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED, true);
final NodeId nodeId = NodeId.newInstance("somehost", 1234);
final ApplicationAttemptId attempt1 = ApplicationAttemptId.newInstance(ApplicationId.newInstance(1, 1), 1);
final ApplicationAttemptId attempt2 = ApplicationAttemptId.newInstance(ApplicationId.newInstance(2, 2), 2);
NMTokenKeyGeneratorForTest keygen = new NMTokenKeyGeneratorForTest();
NMMemoryStateStoreService stateStore = new NMMemoryStateStoreService();
stateStore.init(conf);
stateStore.start();
NMTokenSecretManagerInNM secretMgr = new NMTokenSecretManagerInNM(stateStore);
secretMgr.setNodeId(nodeId);
MasterKey currentKey = keygen.generateKey();
secretMgr.setMasterKey(currentKey);
NMTokenIdentifier attemptToken1 = getNMTokenId(secretMgr.createNMToken(attempt1, nodeId, "user1"));
NMTokenIdentifier attemptToken2 = getNMTokenId(secretMgr.createNMToken(attempt2, nodeId, "user2"));
secretMgr.appAttemptStartContainer(attemptToken1);
secretMgr.appAttemptStartContainer(attemptToken2);
assertTrue(secretMgr.isAppAttemptNMTokenKeyPresent(attempt1));
assertTrue(secretMgr.isAppAttemptNMTokenKeyPresent(attempt2));
assertNotNull(secretMgr.retrievePassword(attemptToken1));
assertNotNull(secretMgr.retrievePassword(attemptToken2));
// restart and verify key is still there and token still valid
secretMgr = new NMTokenSecretManagerInNM(stateStore);
secretMgr.recover();
secretMgr.setNodeId(nodeId);
assertEquals(currentKey, secretMgr.getCurrentKey());
assertTrue(secretMgr.isAppAttemptNMTokenKeyPresent(attempt1));
assertTrue(secretMgr.isAppAttemptNMTokenKeyPresent(attempt2));
assertNotNull(secretMgr.retrievePassword(attemptToken1));
assertNotNull(secretMgr.retrievePassword(attemptToken2));
// roll master key and remove an app
currentKey = keygen.generateKey();
secretMgr.setMasterKey(currentKey);
secretMgr.appFinished(attempt1.getApplicationId());
// restart and verify attempt1 key is still valid due to prev key persist
secretMgr = new NMTokenSecretManagerInNM(stateStore);
secretMgr.recover();
secretMgr.setNodeId(nodeId);
assertEquals(currentKey, secretMgr.getCurrentKey());
assertFalse(secretMgr.isAppAttemptNMTokenKeyPresent(attempt1));
assertTrue(secretMgr.isAppAttemptNMTokenKeyPresent(attempt2));
assertNotNull(secretMgr.retrievePassword(attemptToken1));
assertNotNull(secretMgr.retrievePassword(attemptToken2));
// roll master key again, restart, and verify attempt1 key is bad but
// attempt2 is still good due to app key persist
currentKey = keygen.generateKey();
secretMgr.setMasterKey(currentKey);
secretMgr = new NMTokenSecretManagerInNM(stateStore);
secretMgr.recover();
secretMgr.setNodeId(nodeId);
assertEquals(currentKey, secretMgr.getCurrentKey());
assertFalse(secretMgr.isAppAttemptNMTokenKeyPresent(attempt1));
assertTrue(secretMgr.isAppAttemptNMTokenKeyPresent(attempt2));
try {
secretMgr.retrievePassword(attemptToken1);
fail("attempt token should not still be valid");
} catch (InvalidToken e) {
// expected
}
assertNotNull(secretMgr.retrievePassword(attemptToken2));
// remove last attempt, restart, verify both tokens are now bad
secretMgr.appFinished(attempt2.getApplicationId());
secretMgr = new NMTokenSecretManagerInNM(stateStore);
secretMgr.recover();
secretMgr.setNodeId(nodeId);
assertEquals(currentKey, secretMgr.getCurrentKey());
assertFalse(secretMgr.isAppAttemptNMTokenKeyPresent(attempt1));
assertFalse(secretMgr.isAppAttemptNMTokenKeyPresent(attempt2));
try {
secretMgr.retrievePassword(attemptToken1);
fail("attempt token should not still be valid");
} catch (InvalidToken e) {
// expected
}
try {
secretMgr.retrievePassword(attemptToken2);
fail("attempt token should not still be valid");
} catch (InvalidToken e) {
// expected
}
stateStore.close();
}
use of org.apache.hadoop.yarn.api.records.ApplicationAttemptId in project hadoop by apache.
the class TestNMWebServices method testContainerLogs.
private void testContainerLogs(WebResource r, ContainerId containerId) throws IOException {
final String containerIdStr = containerId.toString();
final ApplicationAttemptId appAttemptId = containerId.getApplicationAttemptId();
final ApplicationId appId = appAttemptId.getApplicationId();
final String appIdStr = appId.toString();
final String filename = "logfile1";
final String logMessage = "log message\n";
nmContext.getApplications().put(appId, new ApplicationImpl(null, "user", appId, null, nmContext));
MockContainer container = new MockContainer(appAttemptId, new AsyncDispatcher(), new Configuration(), "user", appId, 1);
container.setState(ContainerState.RUNNING);
nmContext.getContainers().put(containerId, container);
// write out log file
Path path = dirsHandler.getLogPathForWrite(ContainerLaunch.getRelativeContainerLogDir(appIdStr, containerIdStr) + "/" + filename, false);
File logFile = new File(path.toUri().getPath());
logFile.deleteOnExit();
assertTrue("Failed to create log dir", logFile.getParentFile().mkdirs());
PrintWriter pw = new PrintWriter(logFile);
pw.print(logMessage);
pw.close();
// ask for it
ClientResponse response = r.path(filename).accept(MediaType.TEXT_PLAIN).get(ClientResponse.class);
String responseText = response.getEntity(String.class);
String responseLogMessage = getLogContext(responseText);
assertEquals(logMessage, responseLogMessage);
int fullTextSize = responseLogMessage.getBytes().length;
// specify how many bytes we should get from logs
// specify a position number, it would get the first n bytes from
// container log
response = r.path(filename).queryParam("size", "5").accept(MediaType.TEXT_PLAIN).get(ClientResponse.class);
responseText = response.getEntity(String.class);
responseLogMessage = getLogContext(responseText);
assertEquals(5, responseLogMessage.getBytes().length);
assertEquals(new String(logMessage.getBytes(), 0, 5), responseLogMessage);
assertTrue(fullTextSize >= responseLogMessage.getBytes().length);
// specify the bytes which is larger than the actual file size,
// we would get the full logs
response = r.path(filename).queryParam("size", "10000").accept(MediaType.TEXT_PLAIN).get(ClientResponse.class);
responseText = response.getEntity(String.class);
responseLogMessage = getLogContext(responseText);
assertEquals(fullTextSize, responseLogMessage.getBytes().length);
assertEquals(logMessage, responseLogMessage);
// specify a negative number, it would get the last n bytes from
// container log
response = r.path(filename).queryParam("size", "-5").accept(MediaType.TEXT_PLAIN).get(ClientResponse.class);
responseText = response.getEntity(String.class);
responseLogMessage = getLogContext(responseText);
assertEquals(5, responseLogMessage.getBytes().length);
assertEquals(new String(logMessage.getBytes(), logMessage.getBytes().length - 5, 5), responseLogMessage);
assertTrue(fullTextSize >= responseLogMessage.getBytes().length);
response = r.path(filename).queryParam("size", "-10000").accept(MediaType.TEXT_PLAIN).get(ClientResponse.class);
responseText = response.getEntity(String.class);
responseLogMessage = getLogContext(responseText);
assertEquals("text/plain; charset=utf-8", response.getType().toString());
assertEquals(fullTextSize, responseLogMessage.getBytes().length);
assertEquals(logMessage, responseLogMessage);
// ask and download it
response = r.path(filename).queryParam("format", "octet-stream").accept(MediaType.TEXT_PLAIN).get(ClientResponse.class);
responseText = response.getEntity(String.class);
responseLogMessage = getLogContext(responseText);
assertEquals(logMessage, responseLogMessage);
assertEquals(200, response.getStatus());
assertEquals("application/octet-stream; charset=utf-8", response.getType().toString());
// specify a invalid format value
response = r.path(filename).queryParam("format", "123").accept(MediaType.TEXT_PLAIN).get(ClientResponse.class);
responseText = response.getEntity(String.class);
assertEquals("The valid values for the parameter : format are " + WebAppUtils.listSupportedLogContentType(), responseText);
assertEquals(400, response.getStatus());
// ask for file that doesn't exist and it will re-direct to
// the log server
URI requestURI = r.path("uhhh").getURI();
String redirectURL = getRedirectURL(requestURI.toString());
assertTrue(redirectURL != null);
assertTrue(redirectURL.contains(LOGSERVICEWSADDR));
// Get container log files' name
WebResource r1 = resource();
response = r1.path("ws").path("v1").path("node").path("containers").path(containerIdStr).path("logs").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(200, response.getStatus());
List<ContainerLogsInfo> responseList = response.getEntity(new GenericType<List<ContainerLogsInfo>>() {
});
assertTrue(responseList.size() == 1);
assertEquals(responseList.get(0).getLogType(), ContainerLogAggregationType.LOCAL.toString());
List<PerContainerLogFileInfo> logMeta = responseList.get(0).getContainerLogsInfo();
assertTrue(logMeta.size() == 1);
assertEquals(logMeta.get(0).getFileName(), filename);
// now create an aggregated log in Remote File system
File tempLogDir = new File("target", TestNMWebServices.class.getSimpleName() + "temp-log-dir");
try {
String aggregatedLogFile = filename + "-aggregated";
String aggregatedLogMessage = "This is aggregated ;og.";
TestContainerLogsUtils.createContainerLogFileInRemoteFS(nmContext.getConf(), FileSystem.get(nmContext.getConf()), tempLogDir.getAbsolutePath(), containerId, nmContext.getNodeId(), aggregatedLogFile, "user", aggregatedLogMessage, true);
r1 = resource();
response = r1.path("ws").path("v1").path("node").path("containers").path(containerIdStr).path("logs").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(200, response.getStatus());
responseList = response.getEntity(new GenericType<List<ContainerLogsInfo>>() {
});
assertEquals(responseList.size(), 2);
for (ContainerLogsInfo logInfo : responseList) {
if (logInfo.getLogType().equals(ContainerLogAggregationType.AGGREGATED.toString())) {
List<PerContainerLogFileInfo> meta = logInfo.getContainerLogsInfo();
assertTrue(meta.size() == 1);
assertEquals(meta.get(0).getFileName(), aggregatedLogFile);
} else {
assertEquals(logInfo.getLogType(), ContainerLogAggregationType.LOCAL.toString());
List<PerContainerLogFileInfo> meta = logInfo.getContainerLogsInfo();
assertTrue(meta.size() == 1);
assertEquals(meta.get(0).getFileName(), filename);
}
}
// Test whether we could get aggregated log as well
TestContainerLogsUtils.createContainerLogFileInRemoteFS(nmContext.getConf(), FileSystem.get(nmContext.getConf()), tempLogDir.getAbsolutePath(), containerId, nmContext.getNodeId(), filename, "user", aggregatedLogMessage, true);
response = r.path(filename).accept(MediaType.TEXT_PLAIN).get(ClientResponse.class);
responseText = response.getEntity(String.class);
assertTrue(responseText.contains("LogAggregationType: " + ContainerLogAggregationType.AGGREGATED));
assertTrue(responseText.contains(aggregatedLogMessage));
assertTrue(responseText.contains("LogAggregationType: " + ContainerLogAggregationType.LOCAL));
assertTrue(responseText.contains(logMessage));
} finally {
FileUtil.fullyDelete(tempLogDir);
}
// After container is completed, it is removed from nmContext
nmContext.getContainers().remove(containerId);
Assert.assertNull(nmContext.getContainers().get(containerId));
response = r.path(filename).accept(MediaType.TEXT_PLAIN).get(ClientResponse.class);
responseText = response.getEntity(String.class);
assertTrue(responseText.contains(logMessage));
}
use of org.apache.hadoop.yarn.api.records.ApplicationAttemptId in project hadoop by apache.
the class CapacityScheduler method tryCommit.
@Override
public void tryCommit(Resource cluster, ResourceCommitRequest r) {
ResourceCommitRequest<FiCaSchedulerApp, FiCaSchedulerNode> request = (ResourceCommitRequest<FiCaSchedulerApp, FiCaSchedulerNode>) r;
ApplicationAttemptId attemptId = null;
// We need to update unconfirmed allocated resource of application when
// any container allocated.
boolean updateUnconfirmedAllocatedResource = request.getContainersToAllocate() != null && !request.getContainersToAllocate().isEmpty();
// find the application to accept and apply the ResourceCommitRequest
if (request.anythingAllocatedOrReserved()) {
ContainerAllocationProposal<FiCaSchedulerApp, FiCaSchedulerNode> c = request.getFirstAllocatedOrReservedContainer();
attemptId = c.getAllocatedOrReservedContainer().getSchedulerApplicationAttempt().getApplicationAttemptId();
} else {
if (!request.getContainersToRelease().isEmpty()) {
attemptId = request.getContainersToRelease().get(0).getSchedulerApplicationAttempt().getApplicationAttemptId();
}
}
if (LOG.isDebugEnabled()) {
LOG.debug("Try to commit allocation proposal=" + request);
}
if (attemptId != null) {
FiCaSchedulerApp app = getApplicationAttempt(attemptId);
if (app != null) {
if (app.accept(cluster, request)) {
app.apply(cluster, request);
LOG.info("Allocation proposal accepted");
} else {
LOG.info("Failed to accept allocation proposal");
}
// Update unconfirmed allocated resource.
if (updateUnconfirmedAllocatedResource) {
app.decUnconfirmedRes(request.getTotalAllocatedResource());
}
}
}
}
Aggregations