use of java.util.concurrent.RejectedExecutionHandler in project ddf by codice.
the class DumpCommand method executeWithSubject.
@Override
protected Object executeWithSubject() throws Exception {
if (FilenameUtils.getExtension(dirPath).equals("") && !dirPath.endsWith(File.separator)) {
dirPath += File.separator;
}
final File dumpDir = new File(dirPath);
if (!dumpDir.exists()) {
printErrorMessage("Directory [" + dirPath + "] must exist.");
console.println("If the directory does indeed exist, try putting the path in quotes.");
return null;
}
if (!dumpDir.isDirectory()) {
printErrorMessage("Path [" + dirPath + "] must be a directory.");
return null;
}
if (!SERIALIZED_OBJECT_ID.matches(transformerId)) {
transformers = getTransformers();
if (transformers == null) {
console.println(transformerId + " is an invalid metacard transformer.");
return null;
}
}
if (StringUtils.isNotBlank(zipFileName) && new File(dirPath + zipFileName).exists()) {
console.println("Cannot dump Catalog. Zip file " + zipFileName + " already exists.");
return null;
}
SecurityLogger.audit("Called catalog:dump command with path : {}", dirPath);
CatalogFacade catalog = getCatalog();
if (StringUtils.isNotBlank(zipFileName)) {
zipArgs = new HashMap<>();
zipArgs.put(FILE_PATH, dirPath + zipFileName);
}
QueryImpl query = new QueryImpl(getFilter());
query.setRequestsTotalResultsCount(false);
query.setPageSize(pageSize);
Map<String, Serializable> props = new HashMap<>();
// Avoid caching all results while dumping with native query mode
props.put("mode", "native");
final AtomicLong resultCount = new AtomicLong(0);
long start = System.currentTimeMillis();
SourceResponse response = catalog.query(new QueryRequestImpl(query, props));
BlockingQueue<Runnable> blockingQueue = new ArrayBlockingQueue<>(multithreaded);
RejectedExecutionHandler rejectedExecutionHandler = new ThreadPoolExecutor.CallerRunsPolicy();
final ExecutorService executorService = new ThreadPoolExecutor(multithreaded, multithreaded, 0L, TimeUnit.MILLISECONDS, blockingQueue, rejectedExecutionHandler);
while (response.getResults().size() > 0) {
response = catalog.query(new QueryRequestImpl(query, props));
if (StringUtils.isNotBlank(zipFileName)) {
try {
Optional<QueryResponseTransformer> zipCompression = getZipCompression();
if (zipCompression.isPresent()) {
BinaryContent binaryContent = zipCompression.get().transform(response, zipArgs);
if (binaryContent != null) {
IOUtils.closeQuietly(binaryContent.getInputStream());
}
Long resultSize = (long) response.getResults().size();
printStatus(resultCount.addAndGet(resultSize));
}
} catch (InvalidSyntaxException e) {
LOGGER.info("No Zip Transformer found. Unable export metacards to a zip file.");
}
} else if (multithreaded > 1) {
final List<Result> results = new ArrayList<>(response.getResults());
executorService.submit(() -> {
boolean transformationFailed = false;
for (final Result result : results) {
Metacard metacard = result.getMetacard();
try {
exportMetacard(dumpDir, metacard);
} catch (IOException | CatalogTransformerException e) {
transformationFailed = true;
LOGGER.debug("Failed to dump metacard {}", metacard.getId(), e);
executorService.shutdownNow();
}
printStatus(resultCount.incrementAndGet());
}
if (transformationFailed) {
LOGGER.info("One or more metacards failed to transform. Enable debug log for more details.");
}
});
} else {
for (final Result result : response.getResults()) {
Metacard metacard = result.getMetacard();
exportMetacard(dumpDir, metacard);
printStatus(resultCount.incrementAndGet());
}
}
if (response.getResults().size() < pageSize || pageSize == -1) {
break;
}
if (pageSize > 0) {
query.setStartIndex(query.getStartIndex() + pageSize);
}
}
executorService.shutdown();
while (!executorService.isTerminated()) {
try {
TimeUnit.MILLISECONDS.sleep(100);
} catch (InterruptedException e) {
// ignore
}
}
long end = System.currentTimeMillis();
String elapsedTime = timeFormatter.print(new Period(start, end).withMillis(0));
console.printf(" %d file(s) dumped in %s\t%n", resultCount.get(), elapsedTime);
LOGGER.debug("{} file(s) dumped in {}", resultCount.get(), elapsedTime);
console.println();
SecurityLogger.audit("Exported {} files to {}", resultCount.get(), dirPath);
return null;
}
use of java.util.concurrent.RejectedExecutionHandler in project ddf by codice.
the class DuplicateCommands method duplicateInBatches.
/**
* In batches, loops through a query of the queryFacade and an ingest to the ingestFacade of the
* metacards from the response until there are no more metacards from the queryFacade or the
* maxMetacards has been reached.
*
* @param queryFacade - the CatalogFacade to duplicate from
* @param ingestFacade - the CatalogFacade to duplicate to
* @param filter - the filter to query with
*/
protected void duplicateInBatches(CatalogFacade queryFacade, CatalogFacade ingestFacade, Filter filter) {
AtomicInteger queryIndex = new AtomicInteger(1);
final long originalQuerySize;
if (maxMetacards > 0 && maxMetacards < batchSize) {
originalQuerySize = maxMetacards;
} else {
originalQuerySize = batchSize;
}
final SourceResponse originalResponse = query(queryFacade, filter, queryIndex.get(), originalQuerySize);
if (originalResponse == null) {
return;
}
final long totalHits = originalResponse.getHits();
if (totalHits <= 0) {
LOGGER.debug("Query returned 0 hits.");
return;
}
// If the maxMetacards is set, restrict the totalWanted to the number of maxMetacards
final long totalWanted;
if (maxMetacards > 0 && maxMetacards <= totalHits) {
totalWanted = maxMetacards;
} else {
totalWanted = totalHits;
}
ingestMetacards(ingestFacade, getMetacardsFromSourceResponse(originalResponse));
if (multithreaded > 1) {
BlockingQueue<Runnable> blockingQueue = new ArrayBlockingQueue<>(multithreaded);
RejectedExecutionHandler rejectedExecutionHandler = new ThreadPoolExecutor.CallerRunsPolicy();
final ExecutorService executorService = new ThreadPoolExecutor(multithreaded, multithreaded, 0L, TimeUnit.MILLISECONDS, blockingQueue, rejectedExecutionHandler);
console.printf("Running a maximum of %d threads during replication.%n", multithreaded);
printProgressAndFlush(start, totalWanted, ingestedCount.get());
int index;
while ((index = queryIndex.addAndGet(batchSize)) <= totalWanted) {
final int i = index;
executorService.submit(() -> {
final SourceResponse response = query(queryFacade, filter, i, getQuerySizeFromIndex(totalWanted, i));
if (response != null) {
ingestMetacards(ingestFacade, getMetacardsFromSourceResponse(response));
}
printProgressAndFlush(start, totalWanted, ingestedCount.get());
});
}
executorService.shutdown();
while (!executorService.isTerminated()) {
try {
TimeUnit.SECONDS.sleep(1);
} catch (InterruptedException e) {
// ignore
}
}
} else {
while (queryIndex.addAndGet(batchSize) <= totalWanted) {
printProgressAndFlush(start, totalWanted, ingestedCount.get());
final SourceResponse response = query(queryFacade, filter, queryIndex.get(), getQuerySizeFromIndex(totalWanted, queryIndex.get()));
if (response != null) {
ingestMetacards(ingestFacade, getMetacardsFromSourceResponse(response));
}
}
}
printProgressAndFlush(start, totalWanted, ingestedCount.get());
if (failedCount.get() > 0) {
LOGGER.info("Not all records were ingested. [{}] failed", failedCount.get());
if (StringUtils.isNotBlank(failedDir)) {
try {
writeFailedMetacards(failedMetacards);
} catch (IOException e) {
console.println("Error occurred while writing failed metacards to failedDir.");
}
}
}
}
Aggregations