use of java.util.Collections in project kie-wb-common by kiegroup.
the class LibraryServiceImpl method getProjectAssets.
@Override
public AssetQueryResult getProjectAssets(final ProjectAssetsQuery query) {
checkNotNull("query", query);
final boolean projectStillExists = ioService.exists(Paths.convert(query.getProject().getBranch().getPath()));
if (!projectStillExists) {
log.info("Asset lookup result: project [{}] does not exist.", projectIdentifierFrom(query));
return AssetQueryResult.nonexistent();
} else if (!indexOracle.isIndexed(query.getProject())) {
log.info("Asset lookup result: project [{}] is not indexed.", projectIdentifierFrom(query));
return AssetQueryResult.unindexed();
}
final HashSet<ValueIndexTerm> queryTerms = buildProjectAssetsQuery(query);
final PageResponse<RefactoringPageRow> findRulesByProjectQuery = refactoringQueryService.query(new RefactoringPageRequest(FindAllLibraryAssetsQuery.NAME, queryTerms, query.getStartIndex(), query.getAmount(), Boolean.TRUE));
final List<FolderItem> assets = findRulesByProjectQuery.getPageRowList().stream().map(row -> {
final Path path = (Path) row.getValue();
return new FolderItem(path, path.getFileName(), FolderItemType.FILE, false, Paths.readLockedBy(path), Collections.<String>emptyList(), explorerServiceHelper.getRestrictedOperations(path));
}).collect(Collectors.toList());
log.info("Asset lookup result: project [{}] is indexed with {} index hits.", projectIdentifierFrom(query), assets.size());
return AssetQueryResult.normal(assets.stream().map(asset -> {
AssetInfo info = null;
try {
final Map<String, Object> attributes = ioService.readAttributes(Paths.convert((Path) asset.getItem()));
final FileTime lastModifiedFileTime = (FileTime) getAttribute(LibraryService.LAST_MODIFIED_TIME, attributes).get();
final FileTime createdFileTime = (FileTime) getAttribute(LibraryService.CREATED_TIME, attributes).get();
final Date lastModifiedTime = new Date(lastModifiedFileTime.toMillis());
final Date createdTime = new Date(createdFileTime.toMillis());
info = new AssetInfo(asset, lastModifiedTime, createdTime);
} catch (NoSuchFileException nfe) {
log.debug("File '" + asset.getFileName() + "' in LibraryIndex but not VFS. Suspected deletion. Skipping.");
}
return Optional.ofNullable(info);
}).filter(Optional::isPresent).map(Optional::get).collect(Collectors.toList()));
}
use of java.util.Collections in project kylo by Teradata.
the class SparkShellProxyController method fileMetadata.
@POST
@Path(FILE_METADATA)
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
@ApiOperation("returns filemetadata based upon the list of file paths in the dataset.")
@ApiResponses({ @ApiResponse(code = 200, message = "Returns the status of the file-metadata job.", response = TransformResponse.class), @ApiResponse(code = 400, message = "The requested data source does not exist.", response = RestResponseStatus.class), @ApiResponse(code = 500, message = "There was a problem processing the data.", response = RestResponseStatus.class) })
public Response fileMetadata(com.thinkbiganalytics.kylo.catalog.rest.model.DataSet dataSet) {
TransformRequest request = new TransformRequest();
DataSet decrypted = catalogModelTransform.decryptOptions(dataSet);
request.setScript(FileMetadataScalaScriptGenerator.getScript(DataSetUtil.getPaths(decrypted).orElseGet(Collections::emptyList), DataSetUtil.mergeTemplates(decrypted).getOptions()));
final SparkShellProcess process = getSparkShellProcess();
return getModifiedTransformResponse(() -> Optional.of(restClient.transform(process, request)), new FileMetadataTransformResponseModifier(fileMetadataTrackerService));
}
use of java.util.Collections in project ignite by apache.
the class CacheAffinitySharedManager method processClientCacheStartRequests.
/**
* @param crd Coordinator flag.
* @param msg Change request.
* @param topVer Current topology version.
* @param discoCache Discovery data cache.
* @return Map of started caches (cache ID to near enabled flag).
*/
@Nullable
private Map<Integer, Boolean> processClientCacheStartRequests(boolean crd, ClientCacheChangeDummyDiscoveryMessage msg, AffinityTopologyVersion topVer, DiscoCache discoCache) {
Map<String, DynamicCacheChangeRequest> startReqs = msg.startRequests();
List<DynamicCacheDescriptor> startDescs = clientCachesToStart(msg.requestId(), startReqs);
if (startDescs == null || startDescs.isEmpty()) {
cctx.cache().completeClientCacheChangeFuture(msg.requestId(), null);
return null;
}
Map<Integer, GridDhtAssignmentFetchFuture> fetchFuts = U.newHashMap(startDescs.size());
Map<Integer, Boolean> startedInfos = U.newHashMap(startDescs.size());
List<StartCacheInfo> startCacheInfos = startDescs.stream().map(desc -> {
DynamicCacheChangeRequest changeReq = startReqs.get(desc.cacheName());
startedInfos.put(desc.cacheId(), changeReq.nearCacheConfiguration() != null);
return new StartCacheInfo(desc.cacheConfiguration(), desc, changeReq.nearCacheConfiguration(), topVer, changeReq.disabledAfterStart(), true);
}).collect(Collectors.toList());
Set<String> startedCaches = startCacheInfos.stream().map(info -> info.getCacheDescriptor().cacheName()).collect(Collectors.toSet());
try {
cctx.cache().prepareStartCaches(startCacheInfos);
} catch (IgniteCheckedException e) {
cctx.cache().closeCaches(startedCaches, false);
cctx.cache().completeClientCacheChangeFuture(msg.requestId(), e);
return null;
}
Set<CacheGroupDescriptor> groupDescs = startDescs.stream().map(DynamicCacheDescriptor::groupDescriptor).collect(Collectors.toSet());
for (CacheGroupDescriptor grpDesc : groupDescs) {
try {
CacheGroupContext grp = cctx.cache().cacheGroup(grpDesc.groupId());
assert grp != null : grpDesc.groupId();
assert !grp.affinityNode() || grp.isLocal() : grp.cacheOrGroupName();
// Skip for local caches.
if (grp.isLocal())
continue;
CacheGroupHolder grpHolder = grpHolders.get(grp.groupId());
assert !crd || (grpHolder != null && grpHolder.affinity().idealAssignmentRaw() != null);
if (grpHolder == null)
grpHolder = getOrCreateGroupHolder(topVer, grpDesc);
// If current node is not client and current node have no aff holder.
if (grpHolder.nonAffNode() && !cctx.localNode().isClient()) {
GridDhtPartitionsExchangeFuture excFut = context().exchange().lastFinishedFuture();
grp.topology().updateTopologyVersion(excFut, discoCache, -1, false);
// Exchange free cache creation, just replacing client topology with dht.
// Topology should be initialized before the use.
grp.topology().beforeExchange(excFut, true, false);
grpHolder = new CacheGroupAffNodeHolder(grp, grpHolder.affinity());
grpHolders.put(grp.groupId(), grpHolder);
GridClientPartitionTopology clientTop = cctx.exchange().clearClientTopology(grp.groupId());
if (clientTop != null) {
grp.topology().update(grpHolder.affinity().lastVersion(), clientTop.partitionMap(true), clientTop.fullUpdateCounters(), Collections.<Integer>emptySet(), null, null, null, clientTop.lostPartitions());
excFut.validate(grp);
}
assert grpHolder.affinity().lastVersion().equals(grp.affinity().lastVersion());
} else if (!crd && !fetchFuts.containsKey(grp.groupId())) {
boolean topVerLessOrNotInitialized = !grp.topology().initialized() || grp.topology().readyTopologyVersion().compareTo(topVer) < 0;
if (grp.affinity().lastVersion().compareTo(topVer) < 0 || topVerLessOrNotInitialized) {
GridDhtAssignmentFetchFuture fetchFut = new GridDhtAssignmentFetchFuture(cctx, grp.groupId(), topVer, discoCache);
fetchFut.init(true);
fetchFuts.put(grp.groupId(), fetchFut);
}
}
} catch (IgniteCheckedException e) {
cctx.cache().closeCaches(startedCaches, false);
cctx.cache().completeClientCacheChangeFuture(msg.requestId(), e);
return null;
}
}
for (GridDhtAssignmentFetchFuture fetchFut : fetchFuts.values()) {
try {
CacheGroupContext grp = cctx.cache().cacheGroup(fetchFut.groupId());
assert grp != null;
GridDhtAffinityAssignmentResponse res = fetchAffinity(topVer, null, discoCache, grp.affinity(), fetchFut);
GridDhtPartitionFullMap partMap;
if (res != null) {
partMap = res.partitionMap();
assert partMap != null : res;
} else
partMap = new GridDhtPartitionFullMap(cctx.localNodeId(), cctx.localNode().order(), 1);
GridDhtPartitionsExchangeFuture exchFut = context().exchange().lastFinishedFuture();
grp.topology().updateTopologyVersion(exchFut, discoCache, -1, false);
GridClientPartitionTopology clientTop = cctx.exchange().clearClientTopology(grp.groupId());
Set<Integer> lostParts = clientTop == null ? null : clientTop.lostPartitions();
grp.topology().update(topVer, partMap, null, Collections.emptySet(), null, null, null, lostParts);
if (clientTop == null)
grp.topology().detectLostPartitions(topVer, exchFut);
exchFut.validate(grp);
} catch (IgniteCheckedException e) {
cctx.cache().closeCaches(startedCaches, false);
cctx.cache().completeClientCacheChangeFuture(msg.requestId(), e);
return null;
}
}
for (DynamicCacheDescriptor desc : startDescs) {
if (desc.cacheConfiguration().getCacheMode() != LOCAL) {
CacheGroupContext grp = cctx.cache().cacheGroup(desc.groupId());
assert grp != null;
grp.topology().onExchangeDone(null, grp.affinity().cachedAffinity(topVer), true);
}
}
cctx.cache().initCacheProxies(topVer, null);
startReqs.keySet().forEach(req -> cctx.cache().completeProxyInitialize(req));
cctx.cache().completeClientCacheChangeFuture(msg.requestId(), null);
return startedInfos;
}
use of java.util.Collections in project ddf by codice.
the class DumpCommand method executeWithSubject.
@Override
protected final Object executeWithSubject() throws Exception {
if (signer == null) {
signer = new DigitalSignature(security);
}
if (FilenameUtils.getExtension(dirPath).equals("") && !dirPath.endsWith(File.separator)) {
dirPath += File.separator;
}
final File dumpDir = new File(dirPath);
if (!dumpDir.exists()) {
printErrorMessage("Directory [" + dirPath + "] must exist.");
console.println("If the directory does indeed exist, try putting the path in quotes.");
return null;
}
if (!dumpDir.isDirectory()) {
printErrorMessage("Path [" + dirPath + "] must be a directory.");
return null;
}
if (!SERIALIZED_OBJECT_ID.matches(transformerId)) {
transformers = getTransformers();
if (transformers == null) {
console.println(transformerId + " is an invalid metacard transformer.");
return null;
}
}
if (StringUtils.isNotBlank(zipFileName) && new File(dirPath + zipFileName).exists()) {
console.println("Cannot dump Catalog. Zip file " + zipFileName + " already exists.");
return null;
}
if (StringUtils.isNotBlank(zipFileName) && !zipFileName.endsWith(".zip")) {
zipFileName = zipFileName + ".zip";
}
securityLogger.audit("Called catalog:dump command with path : {}", dirPath);
CatalogFacade catalog = getCatalog();
SortBy sort = new SortByImpl(Core.ID, SortOrder.ASCENDING);
QueryImpl query = new QueryImpl(getFilter());
query.setRequestsTotalResultsCount(true);
query.setPageSize(pageSize);
query.setSortBy(sort);
final AtomicLong resultCount = new AtomicLong(0);
long start = System.currentTimeMillis();
BlockingQueue<Runnable> blockingQueue = new ArrayBlockingQueue<>(multithreaded);
RejectedExecutionHandler rejectedExecutionHandler = new ThreadPoolExecutor.CallerRunsPolicy();
final ExecutorService executorService = new ThreadPoolExecutor(multithreaded, multithreaded, 0L, TimeUnit.MILLISECONDS, blockingQueue, StandardThreadFactoryBuilder.newThreadFactory("dumpCommandThread"), rejectedExecutionHandler);
QueryRequest queryRequest = new QueryRequestImpl(query);
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("Hits for Search: {}", catalog.query(queryRequest).getHits());
}
if (StringUtils.isNotBlank(zipFileName)) {
File outputFile = new File(dirPath + zipFileName);
createZip(catalog, queryRequest, outputFile, resultCount);
String alias = AccessController.doPrivileged((PrivilegedAction<String>) () -> System.getProperty(SystemBaseUrl.EXTERNAL_HOST));
String password = AccessController.doPrivileged((PrivilegedAction<String>) () -> System.getProperty("javax.net.ssl.keyStorePassword"));
try (InputStream inputStream = new FileInputStream(outputFile)) {
byte[] signature = signer.createDigitalSignature(inputStream, alias, password);
if (signature != null) {
String epoch = Long.toString(Instant.now().getEpochSecond());
String signatureFilepath = String.format("%sdump_%s.sig", dirPath, epoch);
FileUtils.writeByteArrayToFile(new File(signatureFilepath), signature);
}
}
} else {
ResultIterable.resultIterable(catalog::query, queryRequest).stream().map(Collections::singletonList).map(result -> new SourceResponseImpl(queryRequest, result)).forEach(response -> handleResult(response, executorService, dumpDir, resultCount));
}
executorService.shutdown();
boolean interrupted = false;
try {
while (!executorService.isTerminated()) {
try {
TimeUnit.MILLISECONDS.sleep(100);
} catch (InterruptedException e) {
interrupted = true;
}
}
} finally {
if (interrupted) {
Thread.currentThread().interrupt();
}
}
long end = System.currentTimeMillis();
String elapsedTime = timeFormatter.print(new Period(start, end).withMillis(0));
console.printf(" %d file(s) dumped in %s\t%n", resultCount.get(), elapsedTime);
LOGGER.debug("{} file(s) dumped in {}", resultCount.get(), elapsedTime);
console.println();
securityLogger.audit("Exported {} files to {}", resultCount.get(), dirPath);
return null;
}
use of java.util.Collections in project groovy by apache.
the class StaticTypeCheckingVisitor method inferReturnTypeGenerics.
/**
* If a method call returns a parameterized type, then perform additional
* inference on the return type, so that the type gets actual type arguments.
* For example, the method {@code Arrays.asList(T...)} is parameterized with
* {@code T}, which can be deduced type arguments or call arguments.
*
* @param method the method node
* @param arguments the method call arguments
* @param receiver the object expression type
* @param explicitTypeHints type arguments (optional), for example {@code Collections.<String>emptyList()}
*/
protected ClassNode inferReturnTypeGenerics(final ClassNode receiver, final MethodNode method, final Expression arguments, final GenericsType[] explicitTypeHints) {
ClassNode returnType = method instanceof ConstructorNode ? method.getDeclaringClass() : method.getReturnType();
if (!GenericsUtils.hasUnresolvedGenerics(returnType)) {
// GROOVY-7538: replace "Type<?>" with "Type<? extends/super X>" for any "Type<T extends/super X>"
if (getGenericsWithoutArray(returnType) != null)
returnType = boundUnboundedWildcards(returnType);
return returnType;
}
if (method instanceof ExtensionMethodNode) {
ArgumentListExpression args = getExtensionArguments(receiver, method, arguments);
MethodNode extension = ((ExtensionMethodNode) method).getExtensionMethodNode();
return inferReturnTypeGenerics(receiver, extension, args, explicitTypeHints);
}
Map<GenericsTypeName, GenericsType> context = method.isStatic() || method instanceof ConstructorNode ? null : extractPlaceHoldersVisibleToDeclaration(receiver, method, arguments);
GenericsType[] methodGenericTypes = method instanceof ConstructorNode ? method.getDeclaringClass().getGenericsTypes() : applyGenericsContext(context, method.getGenericsTypes());
if (methodGenericTypes != null) {
Map<GenericsTypeName, GenericsType> resolvedPlaceholders = new HashMap<>();
for (GenericsType gt : methodGenericTypes) resolvedPlaceholders.put(new GenericsTypeName(gt.getName()), gt);
applyGenericsConnections(extractGenericsConnectionsFromArguments(methodGenericTypes, Arrays.stream(method.getParameters()).map(param -> new Parameter(applyGenericsContext(context, param.getType()), param.getName())).toArray(Parameter[]::new), arguments, explicitTypeHints), resolvedPlaceholders);
returnType = applyGenericsContext(resolvedPlaceholders, returnType);
}
if (context != null) {
returnType = applyGenericsContext(context, returnType);
if (receiver.getGenericsTypes() == null && receiver.redirect().getGenericsTypes() != null && GenericsUtils.hasUnresolvedGenerics(returnType)) {
// GROOVY-10049: do not return "Stream<E>" for raw type "List#stream()"
returnType = returnType.getPlainNodeReference();
}
}
// 3) resolve bounds of type parameters from calling context
returnType = applyGenericsContext(extractGenericsParameterMapOfThis(typeCheckingContext), returnType);
return returnType;
}
Aggregations