use of alluxio.exception.status.ResourceExhaustedException in project alluxio by Alluxio.
the class MemoryPageStore method put.
@Override
public void put(PageId pageId, byte[] page) throws ResourceExhaustedException, IOException {
PageId pageKey = getKeyFromPageId(pageId);
try {
byte[] mPage = new byte[page.length];
System.arraycopy(page, 0, mPage, 0, page.length);
mPageStoreMap.put(pageKey, mPage);
} catch (Exception e) {
throw new IOException("Failed to put cached data in memory for page " + pageId);
}
}
use of alluxio.exception.status.ResourceExhaustedException in project alluxio by Alluxio.
the class LocalCacheManager method putAttempt.
private PutResult putAttempt(PageId pageId, byte[] page, CacheContext cacheContext, boolean forcedToEvict) {
LOG.debug("putInternal({},{} bytes) enters", pageId, page.length);
PageInfo victimPageInfo = null;
CacheScope scopeToEvict;
ReadWriteLock pageLock = getPageLock(pageId);
try (LockResource r = new LockResource(pageLock.writeLock())) {
try (LockResource r2 = new LockResource(mMetaLock.writeLock())) {
if (mMetaStore.hasPage(pageId)) {
LOG.debug("{} is already inserted before", pageId);
// TODO(binfan): we should return more informative result in the future
return PutResult.OK;
}
scopeToEvict = checkScopeToEvict(page.length, cacheContext.getCacheScope(), cacheContext.getCacheQuota(), forcedToEvict);
if (scopeToEvict == null) {
mMetaStore.addPage(pageId, new PageInfo(pageId, page.length, cacheContext.getCacheScope()));
} else {
if (mQuotaEnabled) {
victimPageInfo = ((QuotaMetaStore) mMetaStore).evict(scopeToEvict);
} else {
victimPageInfo = mMetaStore.evict();
}
if (victimPageInfo == null) {
LOG.error("Unable to find page to evict: space used {}, page length {}, cache size {}", mMetaStore.bytes(), page.length, mCacheSize);
Metrics.PUT_EVICTION_ERRORS.inc();
return PutResult.OTHER;
}
}
}
if (scopeToEvict == null) {
try {
mPageStore.put(pageId, page);
// Bytes written to the cache
MetricsSystem.meter(MetricKey.CLIENT_CACHE_BYTES_WRITTEN_CACHE.getName()).mark(page.length);
return PutResult.OK;
} catch (ResourceExhaustedException e) {
undoAddPage(pageId);
LOG.error("Failed to add page {} to pageStore", pageId, e);
Metrics.PUT_STORE_WRITE_NO_SPACE_ERRORS.inc();
return PutResult.NO_SPACE_LEFT;
} catch (IOException e) {
undoAddPage(pageId);
LOG.error("Failed to add page {} to pageStore", pageId, e);
Metrics.PUT_STORE_WRITE_ERRORS.inc();
return PutResult.OTHER;
}
}
}
Pair<ReadWriteLock, ReadWriteLock> pageLockPair = getPageLockPair(pageId, victimPageInfo.getPageId());
try (LockResource r1 = new LockResource(pageLockPair.getFirst().writeLock());
LockResource r2 = new LockResource(pageLockPair.getSecond().writeLock())) {
// metalock. Evictor will be updated inside metastore.
try (LockResource r3 = new LockResource(mMetaLock.writeLock())) {
if (mMetaStore.hasPage(pageId)) {
return PutResult.OK;
}
try {
mMetaStore.removePage(victimPageInfo.getPageId());
} catch (PageNotFoundException e) {
LOG.debug("Page {} is unavailable to evict, likely due to a benign race", victimPageInfo.getPageId());
return PutResult.BENIGN_RACING;
}
// Check if we are able to insert page after evicting victim page
scopeToEvict = checkScopeToEvict(page.length, cacheContext.getCacheScope(), cacheContext.getCacheQuota(), false);
if (scopeToEvict == null) {
mMetaStore.addPage(pageId, new PageInfo(pageId, page.length, cacheContext.getCacheScope()));
}
}
// phase2: remove victim and add new page in pagestore
// Regardless of enoughSpace, delete the victim as it has been removed from the metastore
PageId victim = victimPageInfo.getPageId();
try {
mPageStore.delete(victim);
// Bytes evicted from the cache
MetricsSystem.meter(MetricKey.CLIENT_CACHE_BYTES_EVICTED.getName()).mark(victimPageInfo.getPageSize());
// Errors when adding pages
MetricsSystem.meter(MetricKey.CLIENT_CACHE_PAGES_EVICTED.getName()).mark();
} catch (IOException | PageNotFoundException e) {
if (scopeToEvict == null) {
// Failed to evict page, remove new page from metastore as there will not be enough space
undoAddPage(pageId);
}
LOG.error("Failed to delete page {} from pageStore", pageId, e);
Metrics.PUT_STORE_DELETE_ERRORS.inc();
return PutResult.OTHER;
}
if (scopeToEvict != null) {
return PutResult.INSUFFICIENT_SPACE_EVICTED;
}
try {
mPageStore.put(pageId, page);
// Bytes written to the cache
MetricsSystem.meter(MetricKey.CLIENT_CACHE_BYTES_WRITTEN_CACHE.getName()).mark(page.length);
return PutResult.OK;
} catch (ResourceExhaustedException e) {
undoAddPage(pageId);
LOG.error("Failed to add page {} to pageStore", pageId, e);
Metrics.PUT_STORE_WRITE_NO_SPACE_ERRORS.inc();
return PutResult.NO_SPACE_LEFT;
} catch (IOException e) {
// Failed to add page, remove new page from metastoree
undoAddPage(pageId);
LOG.error("Failed to add page {} to pageStore", pageId, e);
Metrics.PUT_STORE_WRITE_ERRORS.inc();
return PutResult.OTHER;
}
}
}
use of alluxio.exception.status.ResourceExhaustedException in project alluxio by Alluxio.
the class UfsFallbackLocalFileDataWriter method writeChunk.
@Override
public void writeChunk(ByteBuf chunk) throws IOException {
if (mIsWritingToLocal) {
long pos = mLocalFileDataWriter.pos();
try {
// chunk.refcount++ to ensure chunk not garbage-collected if writeChunk fails
chunk.retain();
// chunk.refcount-- inside regardless of exception
mLocalFileDataWriter.writeChunk(chunk);
// chunk.refcount-- on success
chunk.release();
return;
} catch (ResourceExhaustedException e) {
LOG.warn("Fallback to write to UFS for block {} due to a failure of insufficient space " + "on the local worker: {}", mBlockId, e.toString());
mIsWritingToLocal = false;
}
try {
if (pos == 0) {
// Nothing has been written to temp block, we can cancel this failed local writer and
// cleanup the temp block.
mLocalFileDataWriter.cancel();
} else {
// Note that, we can not cancel mLocalFileDataWriter now as the cancel message may
// arrive and clean the temp block before it is written to UFS.
mLocalFileDataWriter.flush();
}
// Close the block writer. We do not close the mLocalFileDataWriter to prevent the worker
// completes the block, commit it and remove it.
// mLocalFileDataWriter.getWriter().close();
mGrpcDataWriter = GrpcDataWriter.create(mContext, mWorkerNetAddress, mBlockId, mBlockSize, RequestType.UFS_FALLBACK_BLOCK, mOutStreamOptions);
// when there is data already written.
if (pos > 0) {
mGrpcDataWriter.writeFallbackInitRequest(pos);
}
} catch (Exception e) {
// chunk.refcount-- on exception
chunk.release();
throw new IOException("Failed to switch to writing block " + mBlockId + " to UFS", e);
}
}
// refcount-- inside to release chunk
mGrpcDataWriter.writeChunk(chunk);
}
use of alluxio.exception.status.ResourceExhaustedException in project alluxio by Alluxio.
the class LocalCacheManagerTest method noSpaceLeftPageStorePut.
@Test
public void noSpaceLeftPageStorePut() throws Exception {
LocalPageStore pageStore = new LocalPageStore(PageStoreOptions.create(mConf).toOptions()) {
private long mFreeBytes = PAGE_SIZE_BYTES;
@Override
public void delete(PageId pageId) throws IOException, PageNotFoundException {
mFreeBytes += PAGE_SIZE_BYTES;
super.delete(pageId);
}
@Override
public void put(PageId pageId, byte[] page) throws IOException {
if (mFreeBytes < page.length) {
throw new ResourceExhaustedException("No space left on device");
}
mFreeBytes -= page.length;
super.put(pageId, page);
}
};
mCacheManager = createLocalCacheManager(mConf, mMetaStore, new TimeBoundPageStore(pageStore, mPageStoreOptions));
assertTrue(mCacheManager.put(PAGE_ID1, PAGE1));
// trigger evicting PAGE1
assertTrue(mCacheManager.put(PAGE_ID2, PAGE2));
assertEquals(0, mCacheManager.get(PAGE_ID1, PAGE1.length, mBuf, 0));
}
use of alluxio.exception.status.ResourceExhaustedException in project alluxio by Alluxio.
the class JobMasterTest method flowControl.
@Test
public void flowControl() throws Exception {
PlanCoordinator coordinator = PowerMockito.mock(PlanCoordinator.class);
mockStatic(PlanCoordinator.class);
when(PlanCoordinator.create(any(CommandManager.class), any(JobServerContext.class), anyList(), anyLong(), any(JobConfig.class), any(Consumer.class))).thenReturn(coordinator);
TestPlanConfig jobConfig = new TestPlanConfig("/test");
for (long i = 0; i < TEST_JOB_MASTER_JOB_CAPACITY; i++) {
mJobMaster.run(jobConfig);
}
try {
mJobMaster.run(jobConfig);
Assert.fail("should not be able to run more jobs than job master capacity");
} catch (ResourceExhaustedException e) {
Assert.assertEquals(ExceptionMessage.JOB_MASTER_FULL_CAPACITY.getMessage(ServerConfiguration.get(PropertyKey.JOB_MASTER_JOB_CAPACITY)), e.getMessage());
}
}
Aggregations