use of org.apache.hadoop.yarn.api.protocolrecords.ReleaseSharedCacheResourceRequest in project hadoop by apache.
the class TestClientSCMProtocolService method testRelease_MissingEntry.
@Test
public void testRelease_MissingEntry() throws Exception {
long releases = ClientSCMMetrics.getInstance().getCacheReleases();
ReleaseSharedCacheResourceRequest request = recordFactory.newRecordInstance(ReleaseSharedCacheResourceRequest.class);
request.setResourceKey("key2");
request.setAppId(createAppId(2, 2L));
clientSCMProxy.release(request);
assertNotNull(store.getResourceReferences("key2"));
assertEquals(0, store.getResourceReferences("key2").size());
assertEquals("Client SCM metrics were updated when a release did not happen.", 0, ClientSCMMetrics.getInstance().getCacheReleases() - releases);
}
use of org.apache.hadoop.yarn.api.protocolrecords.ReleaseSharedCacheResourceRequest in project hadoop by apache.
the class TestClientSCMProtocolService method testRelease_ExistingEntry_NonExistantAppId.
@Test
public void testRelease_ExistingEntry_NonExistantAppId() throws Exception {
// Pre-populate the SCM with one cache entry
store.addResource("key1", "foo.jar");
store.addResourceReference("key1", new SharedCacheResourceReference(createAppId(1, 1L), "user"));
assertEquals(1, store.getResourceReferences("key1").size());
long releases = ClientSCMMetrics.getInstance().getCacheReleases();
ReleaseSharedCacheResourceRequest request = recordFactory.newRecordInstance(ReleaseSharedCacheResourceRequest.class);
request.setResourceKey("key1");
request.setAppId(createAppId(2, 2L));
clientSCMProxy.release(request);
assertEquals(1, store.getResourceReferences("key1").size());
assertEquals("Client SCM metrics were updated when a release did not happen", 0, ClientSCMMetrics.getInstance().getCacheReleases() - releases);
}
use of org.apache.hadoop.yarn.api.protocolrecords.ReleaseSharedCacheResourceRequest in project hadoop by apache.
the class TestClientSCMProtocolService method testRelease_ExistingEntry_WithAppId.
@Test
public void testRelease_ExistingEntry_WithAppId() throws Exception {
// Pre-populate the SCM with one cache entry
store.addResource("key1", "foo.jar");
UserGroupInformation testUGI = UserGroupInformation.getCurrentUser();
store.addResourceReference("key1", new SharedCacheResourceReference(createAppId(1, 1L), testUGI.getShortUserName()));
assertEquals(1, store.getResourceReferences("key1").size());
long releases = ClientSCMMetrics.getInstance().getCacheReleases();
ReleaseSharedCacheResourceRequest request = recordFactory.newRecordInstance(ReleaseSharedCacheResourceRequest.class);
request.setResourceKey("key1");
request.setAppId(createAppId(1, 1L));
clientSCMProxy.release(request);
assertEquals(0, store.getResourceReferences("key1").size());
assertEquals("Client SCM metrics aren't updated.", 1, ClientSCMMetrics.getInstance().getCacheReleases() - releases);
}
use of org.apache.hadoop.yarn.api.protocolrecords.ReleaseSharedCacheResourceRequest in project hadoop by apache.
the class SharedCacheClientImpl method release.
@Override
public void release(ApplicationId applicationId, String resourceKey) throws YarnException {
ReleaseSharedCacheResourceRequest request = Records.newRecord(ReleaseSharedCacheResourceRequest.class);
request.setAppId(applicationId);
request.setResourceKey(resourceKey);
try {
// We do not care about the response because it is empty.
this.scmClient.release(request);
} catch (Exception e) {
// RPC call can throw ConnectionException.
throw new YarnException(e);
}
}
Aggregations