use of java.util.concurrent.RejectedExecutionException in project hadoop by apache.
the class TestFileSystemOperationsWithThreads method testDeleteThreadPoolExecuteSingleThreadFailure.
/*
* Test case for delete operation with multiple threads and flat listing enabled.
*/
@Test
public void testDeleteThreadPoolExecuteSingleThreadFailure() throws Exception {
// Spy azure file system object and return mocked thread pool
NativeAzureFileSystem mockFs = Mockito.spy((NativeAzureFileSystem) fs);
// Spy a thread pool executor and link it to azure file system object.
String path = mockFs.pathToKey(mockFs.makeAbsolute(new Path("root")));
AzureFileSystemThreadPoolExecutor mockThreadPoolExecutor = Mockito.spy(mockFs.getThreadPoolExecutor(deleteThreads, "AzureBlobDeleteThread", "Delete", path, NativeAzureFileSystem.AZURE_DELETE_THREADS));
// With single iteration, we would have created 7 blobs resulting 7 threads.
Mockito.when(mockFs.getThreadPoolExecutor(deleteThreads, "AzureBlobDeleteThread", "Delete", path, NativeAzureFileSystem.AZURE_DELETE_THREADS)).thenReturn(mockThreadPoolExecutor);
// Create a thread executor and link it to mocked thread pool executor object.
ThreadPoolExecutor mockThreadExecutor = Mockito.spy(mockThreadPoolExecutor.getThreadPool(7));
Mockito.when(mockThreadPoolExecutor.getThreadPool(7)).thenReturn(mockThreadExecutor);
// Mock thread executor to throw exception for all requests.
Mockito.doCallRealMethod().doThrow(new RejectedExecutionException()).when(mockThreadExecutor).execute(Mockito.any(Runnable.class));
validateDeleteFolder(mockFs, "root");
// Validate from logs that threads are enabled and unused threads.
String content = logs.getOutput();
assertTrue(content.contains("Using thread pool for Delete operation with threads 7"));
assertTrue(content.contains("6 threads not used for Delete operation on blob"));
}
use of java.util.concurrent.RejectedExecutionException in project hadoop by apache.
the class TestFileSystemOperationsWithThreads method testDeleteThreadPoolExecuteFailure.
/*
* Test case for delete operation with multiple threads and flat listing enabled.
*/
@Test
public void testDeleteThreadPoolExecuteFailure() throws Exception {
// Mock thread pool executor to throw exception for all requests.
ThreadPoolExecutor mockThreadExecutor = Mockito.mock(ThreadPoolExecutor.class);
Mockito.doThrow(new RejectedExecutionException()).when(mockThreadExecutor).execute(Mockito.any(Runnable.class));
// Spy azure file system object and return mocked thread pool
NativeAzureFileSystem mockFs = Mockito.spy((NativeAzureFileSystem) fs);
String path = mockFs.pathToKey(mockFs.makeAbsolute(new Path("root")));
AzureFileSystemThreadPoolExecutor mockThreadPoolExecutor = Mockito.spy(mockFs.getThreadPoolExecutor(deleteThreads, "AzureBlobDeleteThread", "Delete", path, NativeAzureFileSystem.AZURE_DELETE_THREADS));
Mockito.when(mockThreadPoolExecutor.getThreadPool(7)).thenReturn(mockThreadExecutor);
// With single iteration, we would have created 7 blobs resulting 7 threads.
Mockito.when(mockFs.getThreadPoolExecutor(deleteThreads, "AzureBlobDeleteThread", "Delete", path, NativeAzureFileSystem.AZURE_DELETE_THREADS)).thenReturn(mockThreadPoolExecutor);
validateDeleteFolder(mockFs, "root");
// Validate from logs that threads are disabled.
String content = logs.getOutput();
assertTrue(content.contains("Rejected execution of thread for Delete operation on blob"));
assertTrue(content.contains("Serializing the Delete operation"));
}
use of java.util.concurrent.RejectedExecutionException in project hadoop by apache.
the class NonAggregatingLogHandler method recover.
private void recover() throws IOException {
if (stateStore.canRecover()) {
RecoveredLogDeleterState state = stateStore.loadLogDeleterState();
long now = System.currentTimeMillis();
for (Map.Entry<ApplicationId, LogDeleterProto> entry : state.getLogDeleterMap().entrySet()) {
ApplicationId appId = entry.getKey();
LogDeleterProto proto = entry.getValue();
long deleteDelayMsec = proto.getDeletionTime() - now;
if (LOG.isDebugEnabled()) {
LOG.debug("Scheduling deletion of " + appId + " logs in " + deleteDelayMsec + " msec");
}
LogDeleterRunnable logDeleter = new LogDeleterRunnable(proto.getUser(), appId);
try {
sched.schedule(logDeleter, deleteDelayMsec, TimeUnit.MILLISECONDS);
} catch (RejectedExecutionException e) {
// Handling this event in local thread before starting threads
// or after calling sched.shutdownNow().
logDeleter.run();
}
}
}
}
use of java.util.concurrent.RejectedExecutionException in project hadoop by apache.
the class NonAggregatingLogHandler method handle.
@SuppressWarnings("unchecked")
@Override
public void handle(LogHandlerEvent event) {
switch(event.getType()) {
case APPLICATION_STARTED:
LogHandlerAppStartedEvent appStartedEvent = (LogHandlerAppStartedEvent) event;
this.appOwners.put(appStartedEvent.getApplicationId(), appStartedEvent.getUser());
this.dispatcher.getEventHandler().handle(new ApplicationEvent(appStartedEvent.getApplicationId(), ApplicationEventType.APPLICATION_LOG_HANDLING_INITED));
break;
case CONTAINER_FINISHED:
// Ignore
break;
case APPLICATION_FINISHED:
LogHandlerAppFinishedEvent appFinishedEvent = (LogHandlerAppFinishedEvent) event;
ApplicationId appId = appFinishedEvent.getApplicationId();
// Schedule - so that logs are available on the UI till they're deleted.
LOG.info("Scheduling Log Deletion for application: " + appId + ", with delay of " + this.deleteDelaySeconds + " seconds");
String user = appOwners.remove(appId);
if (user == null) {
LOG.error("Unable to locate user for " + appId);
// send LOG_HANDLING_FAILED out
NonAggregatingLogHandler.this.dispatcher.getEventHandler().handle(new ApplicationEvent(appId, ApplicationEventType.APPLICATION_LOG_HANDLING_FAILED));
break;
}
LogDeleterRunnable logDeleter = new LogDeleterRunnable(user, appId);
long deletionTimestamp = System.currentTimeMillis() + this.deleteDelaySeconds * 1000;
LogDeleterProto deleterProto = LogDeleterProto.newBuilder().setUser(user).setDeletionTime(deletionTimestamp).build();
try {
stateStore.storeLogDeleter(appId, deleterProto);
} catch (IOException e) {
LOG.error("Unable to record log deleter state", e);
}
try {
sched.schedule(logDeleter, this.deleteDelaySeconds, TimeUnit.SECONDS);
} catch (RejectedExecutionException e) {
// Handling this event in local thread before starting threads
// or after calling sched.shutdownNow().
logDeleter.run();
}
break;
default:
// Ignore
;
}
}
use of java.util.concurrent.RejectedExecutionException in project hadoop by apache.
the class AzureFileSystemThreadPoolExecutor method executeParallel.
/**
* Execute the file operation parallel using threads. All threads works on a
* single working set of files stored in input 'contents'. The synchronization
* between multiple threads is achieved through retrieving atomic index value
* from the array. Once thread gets the index, it retrieves the file and initiates
* the file operation. The advantage with this method is that file operations
* doesn't get serialized due to any thread. Also, the input copy is not changed
* such that caller can reuse the list for other purposes.
*
* This implementation also considers that failure of operation on single file
* is considered as overall operation failure. All threads bail out their execution
* as soon as they detect any single thread either got exception or operation is failed.
*
* @param contents
* List of blobs on which operation to be done.
* @param threadOperation
* The actual operation to be executed by each thread on a file.
*
* @param operationStatus
* Returns true if the operation is success, false if operation is failed.
* @throws IOException
*
*/
boolean executeParallel(FileMetadata[] contents, AzureFileSystemThreadTask threadOperation) throws IOException {
boolean operationStatus = false;
boolean threadsEnabled = false;
int threadCount = this.threadCount;
ThreadPoolExecutor ioThreadPool = null;
// Start time for file operation
long start = Time.monotonicNow();
// If number of files are less then reduce threads to file count.
threadCount = Math.min(contents.length, threadCount);
if (threadCount > 1) {
try {
ioThreadPool = getThreadPool(threadCount);
threadsEnabled = true;
} catch (Exception e) {
// The possibility of this scenario is very remote. Added this code as safety net.
LOG.warn("Failed to create thread pool with threads {} for operation {} on blob {}." + " Use config {} to set less number of threads. Setting config value to <= 1 will disable threads.", threadCount, operation, key, config);
}
} else {
LOG.warn("Disabling threads for {} operation as thread count {} is <= 1", operation, threadCount);
}
if (threadsEnabled) {
LOG.debug("Using thread pool for {} operation with threads {}", operation, threadCount);
boolean started = false;
AzureFileSystemThreadRunnable runnable = new AzureFileSystemThreadRunnable(contents, threadOperation, operation);
// Don't start any new requests if there is an exception from any one thread.
for (int i = 0; i < threadCount && runnable.lastException == null && runnable.operationStatus; i++) {
try {
ioThreadPool.execute(runnable);
started = true;
} catch (RejectedExecutionException ex) {
// If threads can't be scheduled then report error and move ahead with next thread.
// Don't fail operation due to this issue.
LOG.error("Rejected execution of thread for {} operation on blob {}." + " Continuing with existing threads. Use config {} to set less number of threads" + " to avoid this error", operation, key, config);
}
}
// Stop accepting any new execute requests.
ioThreadPool.shutdown();
try {
// Wait for threads to terminate. Keep time out as large value
ioThreadPool.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS);
} catch (InterruptedException intrEx) {
// If current thread got interrupted then shutdown all threads now.
ioThreadPool.shutdownNow();
// Restore the interrupted status
Thread.currentThread().interrupt();
LOG.error("Threads got interrupted {} blob operation for {} ", operation, key);
}
int threadsNotUsed = threadCount - runnable.threadsUsed.get();
if (threadsNotUsed > 0) {
LOG.warn("{} threads not used for {} operation on blob {}", threadsNotUsed, operation, key);
}
if (!started) {
// No threads started. Fall back to serial mode.
threadsEnabled = false;
LOG.info("Not able to schedule threads to {} blob {}. Fall back to {} blob serially.", operation, key, operation);
} else {
IOException lastException = runnable.lastException;
// as failure only if file operations are not done on all files.
if (lastException == null && runnable.operationStatus && runnable.filesProcessed.get() < contents.length) {
LOG.error("{} failed as operation on subfolders and files failed.", operation);
lastException = new IOException(operation + " failed as operation on subfolders and files failed.");
}
if (lastException != null) {
// Raise the same exception.
throw lastException;
}
operationStatus = runnable.operationStatus;
}
}
if (!threadsEnabled) {
// No threads. Serialize the operation. Clear any last exceptions.
LOG.debug("Serializing the {} operation", operation);
for (int i = 0; i < contents.length; i++) {
if (!threadOperation.execute(contents[i])) {
LOG.warn("Failed to {} file {}", operation, contents[i]);
return false;
}
}
// Operation is success
operationStatus = true;
}
// Find the duration of time taken for file operation
long end = Time.monotonicNow();
LOG.info("Time taken for {} operation is: {} ms with threads: {}", operation, (end - start), threadCount);
return operationStatus;
}
Aggregations