use of java.util.stream.Stream in project spring-framework by spring-projects.
the class StreamConverterTests method convertFromListToRawStream.
@Test
@SuppressWarnings("resource")
public void convertFromListToRawStream() throws NoSuchFieldException {
List<String> stream = Arrays.asList("1", "2", "3");
TypeDescriptor streamOfInteger = new TypeDescriptor(Types.class.getField("rawStream"));
;
Object result = this.conversionService.convert(stream, streamOfInteger);
assertNotNull("Converted object must not be null", result);
assertTrue("Converted object must be a stream", result instanceof Stream);
@SuppressWarnings("unchecked") Stream<Object> content = (Stream<Object>) result;
StringBuilder sb = new StringBuilder();
content.forEach(sb::append);
assertEquals("123", sb.toString());
}
use of java.util.stream.Stream in project cassandra by apache.
the class PartitionImplementationTest method testIter.
private void testIter(Supplier<Collection<? extends Unfiltered>> contentSupplier, Row staticRow) {
NavigableSet<Clusterable> sortedContent = new TreeSet<Clusterable>(metadata.comparator);
sortedContent.addAll(contentSupplier.get());
AbstractBTreePartition partition;
try (UnfilteredRowIterator iter = new Util.UnfilteredSource(metadata, Util.dk("pk"), staticRow, sortedContent.stream().map(x -> (Unfiltered) x).iterator())) {
partition = ImmutableBTreePartition.create(iter);
}
ColumnMetadata defCol = metadata.getColumn(new ColumnIdentifier("col", true));
ColumnFilter cf = ColumnFilter.selectionBuilder().add(defCol).build();
Function<? super Clusterable, ? extends Clusterable> colFilter = x -> x instanceof Row ? ((Row) x).filter(cf, metadata) : x;
Slices slices = Slices.with(metadata.comparator, Slice.make(clustering(KEY_RANGE / 4), clustering(KEY_RANGE * 3 / 4)));
Slices multiSlices = makeSlices();
// lastRow
assertRowsEqual((Row) get(sortedContent.descendingSet(), x -> x instanceof Row), partition.lastRow());
// get(static)
assertRowsEqual(staticRow, partition.getRow(Clustering.STATIC_CLUSTERING));
// get
for (int i = 0; i < KEY_RANGE; ++i) {
Clustering cl = clustering(i);
assertRowsEqual(getRow(sortedContent, cl), partition.getRow(cl));
}
// isEmpty
assertEquals(sortedContent.isEmpty() && staticRow == null, partition.isEmpty());
// hasRows
assertEquals(sortedContent.stream().anyMatch(x -> x instanceof Row), partition.hasRows());
// iterator
assertIteratorsEqual(sortedContent.stream().filter(x -> x instanceof Row).iterator(), partition.iterator());
// unfiltered iterator
assertIteratorsEqual(sortedContent.iterator(), partition.unfilteredIterator());
// unfiltered iterator
assertIteratorsEqual(sortedContent.iterator(), partition.unfilteredIterator(ColumnFilter.all(metadata), Slices.ALL, false));
// column-filtered
assertIteratorsEqual(sortedContent.stream().map(colFilter).iterator(), partition.unfilteredIterator(cf, Slices.ALL, false));
// sliced
assertIteratorsEqual(slice(sortedContent, slices.get(0)), partition.unfilteredIterator(ColumnFilter.all(metadata), slices, false));
assertIteratorsEqual(streamOf(slice(sortedContent, slices.get(0))).map(colFilter).iterator(), partition.unfilteredIterator(cf, slices, false));
// randomly multi-sliced
assertIteratorsEqual(slice(sortedContent, multiSlices), partition.unfilteredIterator(ColumnFilter.all(metadata), multiSlices, false));
assertIteratorsEqual(streamOf(slice(sortedContent, multiSlices)).map(colFilter).iterator(), partition.unfilteredIterator(cf, multiSlices, false));
// reversed
assertIteratorsEqual(sortedContent.descendingIterator(), partition.unfilteredIterator(ColumnFilter.all(metadata), Slices.ALL, true));
assertIteratorsEqual(sortedContent.descendingSet().stream().map(colFilter).iterator(), partition.unfilteredIterator(cf, Slices.ALL, true));
assertIteratorsEqual(invert(slice(sortedContent, slices.get(0))), partition.unfilteredIterator(ColumnFilter.all(metadata), slices, true));
assertIteratorsEqual(streamOf(invert(slice(sortedContent, slices.get(0)))).map(colFilter).iterator(), partition.unfilteredIterator(cf, slices, true));
assertIteratorsEqual(invert(slice(sortedContent, multiSlices)), partition.unfilteredIterator(ColumnFilter.all(metadata), multiSlices, true));
assertIteratorsEqual(streamOf(invert(slice(sortedContent, multiSlices))).map(colFilter).iterator(), partition.unfilteredIterator(cf, multiSlices, true));
// search iterator
testSearchIterator(sortedContent, partition, ColumnFilter.all(metadata), false);
testSearchIterator(sortedContent, partition, cf, false);
testSearchIterator(sortedContent, partition, ColumnFilter.all(metadata), true);
testSearchIterator(sortedContent, partition, cf, true);
// sliceable iter
testSlicingOfIterators(sortedContent, partition, ColumnFilter.all(metadata), false);
testSlicingOfIterators(sortedContent, partition, cf, false);
testSlicingOfIterators(sortedContent, partition, ColumnFilter.all(metadata), true);
testSlicingOfIterators(sortedContent, partition, cf, true);
}
use of java.util.stream.Stream in project keywhiz by square.
the class ClientResource method modifyClientGroups.
/**
* Modify groups a client has membership in
*
* @excludeParams automationClient
* @param name Client name
* @param request JSON request specifying which groups to add or remove
* @return Listing of groups client has membership in
*
* @responseMessage 201 Client modified successfully
* @responseMessage 404 Client not found
*/
@Timed
@ExceptionMetered
@PUT
@Path("{name}/groups")
@Produces(APPLICATION_JSON)
public Iterable<String> modifyClientGroups(@Auth AutomationClient automationClient, @PathParam("name") String name, @Valid ModifyGroupsRequestV2 request) {
Client client = clientDAOReadWrite.getClient(name).orElseThrow(NotFoundException::new);
String user = automationClient.getName();
long clientId = client.getId();
Set<String> oldGroups = aclDAOReadWrite.getGroupsFor(client).stream().map(Group::getName).collect(toSet());
Set<String> groupsToAdd = Sets.difference(request.addGroups(), oldGroups);
Set<String> groupsToRemove = Sets.intersection(request.removeGroups(), oldGroups);
// TODO: should optimize AclDAO to use names and return only name column
groupsToGroupIds(groupsToAdd).forEach((maybeGroupId) -> maybeGroupId.ifPresent((groupId) -> aclDAOReadWrite.findAndEnrollClient(clientId, groupId, auditLog, user, new HashMap<>())));
groupsToGroupIds(groupsToRemove).forEach((maybeGroupId) -> maybeGroupId.ifPresent((groupId) -> aclDAOReadWrite.findAndEvictClient(clientId, groupId, auditLog, user, new HashMap<>())));
return aclDAOReadWrite.getGroupsFor(client).stream().map(Group::getName).collect(toSet());
}
use of java.util.stream.Stream in project CoreNLP by stanfordnlp.
the class JSONOutputter method print.
/** {@inheritDoc} */
// It's lying; we need the "redundant" casts (as of 2014-09-08)
@SuppressWarnings("RedundantCast")
@Override
public void print(Annotation doc, OutputStream target, Options options) throws IOException {
PrintWriter writer = new PrintWriter(IOUtils.encodedOutputStreamWriter(target, options.encoding));
JSONWriter l0 = new JSONWriter(writer, options);
l0.object(l1 -> {
l1.set("docId", doc.get(CoreAnnotations.DocIDAnnotation.class));
l1.set("docDate", doc.get(CoreAnnotations.DocDateAnnotation.class));
l1.set("docSourceType", doc.get(CoreAnnotations.DocSourceTypeAnnotation.class));
l1.set("docType", doc.get(CoreAnnotations.DocTypeAnnotation.class));
l1.set("author", doc.get(CoreAnnotations.AuthorAnnotation.class));
l1.set("location", doc.get(CoreAnnotations.LocationAnnotation.class));
if (options.includeText) {
l1.set("text", doc.get(CoreAnnotations.TextAnnotation.class));
}
if (doc.get(CoreAnnotations.SentencesAnnotation.class) != null) {
l1.set("sentences", doc.get(CoreAnnotations.SentencesAnnotation.class).stream().map(sentence -> (Consumer<Writer>) (Writer l2) -> {
l2.set("id", sentence.get(CoreAnnotations.SentenceIDAnnotation.class));
l2.set("index", sentence.get(CoreAnnotations.SentenceIndexAnnotation.class));
l2.set("line", sentence.get(CoreAnnotations.LineNumberAnnotation.class));
StringWriter treeStrWriter = new StringWriter();
TreePrint treePrinter = options.constituentTreePrinter;
if (treePrinter == AnnotationOutputter.DEFAULT_CONSTITUENT_TREE_PRINTER) {
treePrinter = new TreePrint("oneline");
}
treePrinter.printTree(sentence.get(TreeCoreAnnotations.TreeAnnotation.class), new PrintWriter(treeStrWriter, true));
String treeStr = treeStrWriter.toString().trim();
if (!"SENTENCE_SKIPPED_OR_UNPARSABLE".equals(treeStr)) {
l2.set("parse", treeStr);
}
l2.set("basicDependencies", buildDependencyTree(sentence.get(SemanticGraphCoreAnnotations.BasicDependenciesAnnotation.class)));
l2.set("enhancedDependencies", buildDependencyTree(sentence.get(SemanticGraphCoreAnnotations.EnhancedDependenciesAnnotation.class)));
l2.set("enhancedPlusPlusDependencies", buildDependencyTree(sentence.get(SemanticGraphCoreAnnotations.EnhancedPlusPlusDependenciesAnnotation.class)));
Tree sentimentTree = sentence.get(SentimentCoreAnnotations.SentimentAnnotatedTree.class);
if (sentimentTree != null) {
int sentiment = RNNCoreAnnotations.getPredictedClass(sentimentTree);
String sentimentClass = sentence.get(SentimentCoreAnnotations.SentimentClass.class);
l2.set("sentimentValue", Integer.toString(sentiment));
l2.set("sentiment", sentimentClass.replaceAll(" ", ""));
}
Collection<RelationTriple> openIETriples = sentence.get(NaturalLogicAnnotations.RelationTriplesAnnotation.class);
if (openIETriples != null) {
l2.set("openie", openIETriples.stream().map(triple -> (Consumer<Writer>) (Writer tripleWriter) -> {
tripleWriter.set("subject", triple.subjectGloss());
tripleWriter.set("subjectSpan", Span.fromPair(triple.subjectTokenSpan()));
tripleWriter.set("relation", triple.relationGloss());
tripleWriter.set("relationSpan", Span.fromPair(triple.relationTokenSpan()));
tripleWriter.set("object", triple.objectGloss());
tripleWriter.set("objectSpan", Span.fromPair(triple.objectTokenSpan()));
}));
}
Collection<RelationTriple> kbpTriples = sentence.get(CoreAnnotations.KBPTriplesAnnotation.class);
if (kbpTriples != null) {
l2.set("kbp", kbpTriples.stream().map(triple -> (Consumer<Writer>) (Writer tripleWriter) -> {
tripleWriter.set("subject", triple.subjectGloss());
tripleWriter.set("subjectSpan", Span.fromPair(triple.subjectTokenSpan()));
tripleWriter.set("relation", triple.relationGloss());
tripleWriter.set("relationSpan", Span.fromPair(triple.relationTokenSpan()));
tripleWriter.set("object", triple.objectGloss());
tripleWriter.set("objectSpan", Span.fromPair(triple.objectTokenSpan()));
}));
}
if (sentence.get(CoreAnnotations.MentionsAnnotation.class) != null) {
Integer sentTokenBegin = sentence.get(CoreAnnotations.TokenBeginAnnotation.class);
l2.set("entitymentions", sentence.get(CoreAnnotations.MentionsAnnotation.class).stream().map(m -> (Consumer<Writer>) (Writer l3) -> {
Integer tokenBegin = m.get(CoreAnnotations.TokenBeginAnnotation.class);
Integer tokenEnd = m.get(CoreAnnotations.TokenEndAnnotation.class);
l3.set("docTokenBegin", tokenBegin);
l3.set("docTokenEnd", tokenEnd);
if (tokenBegin != null && sentTokenBegin != null) {
l3.set("tokenBegin", tokenBegin - sentTokenBegin);
}
if (tokenEnd != null && sentTokenBegin != null) {
l3.set("tokenEnd", tokenEnd - sentTokenBegin);
}
l3.set("text", m.get(CoreAnnotations.TextAnnotation.class));
l3.set("characterOffsetBegin", m.get(CoreAnnotations.CharacterOffsetBeginAnnotation.class));
l3.set("characterOffsetEnd", m.get(CoreAnnotations.CharacterOffsetEndAnnotation.class));
l3.set("ner", m.get(CoreAnnotations.NamedEntityTagAnnotation.class));
l3.set("normalizedNER", m.get(CoreAnnotations.NormalizedNamedEntityTagAnnotation.class));
l3.set("entitylink", m.get(CoreAnnotations.WikipediaEntityAnnotation.class));
Timex time = m.get(TimeAnnotations.TimexAnnotation.class);
if (time != null) {
Timex.Range range = time.range();
l3.set("timex", (Consumer<Writer>) l4 -> {
l4.set("tid", time.tid());
l4.set("type", time.timexType());
l4.set("value", time.value());
l4.set("altValue", time.altVal());
l4.set("range", (range != null) ? (Consumer<Writer>) l5 -> {
l5.set("begin", range.begin);
l5.set("end", range.end);
l5.set("duration", range.duration);
} : null);
});
}
}));
}
if (sentence.get(CoreAnnotations.TokensAnnotation.class) != null) {
l2.set("tokens", sentence.get(CoreAnnotations.TokensAnnotation.class).stream().map(token -> (Consumer<Writer>) (Writer l3) -> {
l3.set("index", token.index());
l3.set("word", token.word());
l3.set("originalText", token.originalText());
l3.set("lemma", token.lemma());
l3.set("characterOffsetBegin", token.beginPosition());
l3.set("characterOffsetEnd", token.endPosition());
l3.set("pos", token.tag());
l3.set("ner", token.ner());
l3.set("normalizedNER", token.get(CoreAnnotations.NormalizedNamedEntityTagAnnotation.class));
l3.set("speaker", token.get(CoreAnnotations.SpeakerAnnotation.class));
l3.set("truecase", token.get(CoreAnnotations.TrueCaseAnnotation.class));
l3.set("truecaseText", token.get(CoreAnnotations.TrueCaseTextAnnotation.class));
l3.set("before", token.get(CoreAnnotations.BeforeAnnotation.class));
l3.set("after", token.get(CoreAnnotations.AfterAnnotation.class));
l3.set("entitylink", token.get(CoreAnnotations.WikipediaEntityAnnotation.class));
Timex time = token.get(TimeAnnotations.TimexAnnotation.class);
if (time != null) {
Timex.Range range = time.range();
l3.set("timex", (Consumer<Writer>) l4 -> {
l4.set("tid", time.tid());
l4.set("type", time.timexType());
l4.set("value", time.value());
l4.set("altValue", time.altVal());
l4.set("range", (range != null) ? (Consumer<Writer>) l5 -> {
l5.set("begin", range.begin);
l5.set("end", range.end);
l5.set("duration", range.duration);
} : null);
});
}
}));
}
}));
}
if (doc.get(CorefCoreAnnotations.CorefChainAnnotation.class) != null) {
Map<Integer, CorefChain> corefChains = doc.get(CorefCoreAnnotations.CorefChainAnnotation.class);
if (corefChains != null) {
l1.set("corefs", (Consumer<Writer>) chainWriter -> {
for (CorefChain chain : corefChains.values()) {
CorefChain.CorefMention representative = chain.getRepresentativeMention();
chainWriter.set(Integer.toString(chain.getChainID()), chain.getMentionsInTextualOrder().stream().map(mention -> (Consumer<Writer>) (Writer mentionWriter) -> {
mentionWriter.set("id", mention.mentionID);
mentionWriter.set("text", mention.mentionSpan);
mentionWriter.set("type", mention.mentionType);
mentionWriter.set("number", mention.number);
mentionWriter.set("gender", mention.gender);
mentionWriter.set("animacy", mention.animacy);
mentionWriter.set("startIndex", mention.startIndex);
mentionWriter.set("endIndex", mention.endIndex);
mentionWriter.set("headIndex", mention.headIndex);
mentionWriter.set("sentNum", mention.sentNum);
mentionWriter.set("position", Arrays.stream(mention.position.elems()).boxed().collect(Collectors.toList()));
mentionWriter.set("isRepresentativeMention", mention == representative);
}));
}
});
}
}
if (doc.get(CoreAnnotations.QuotationsAnnotation.class) != null) {
List<CoreMap> quotes = QuoteAnnotator.gatherQuotes(doc);
l1.set("quotes", quotes.stream().map(quote -> (Consumer<Writer>) (Writer l2) -> {
l2.set("id", quote.get(CoreAnnotations.QuotationIndexAnnotation.class));
l2.set("text", quote.get(CoreAnnotations.TextAnnotation.class));
l2.set("beginIndex", quote.get(CoreAnnotations.CharacterOffsetBeginAnnotation.class));
l2.set("endIndex", quote.get(CoreAnnotations.CharacterOffsetEndAnnotation.class));
l2.set("beginToken", quote.get(CoreAnnotations.TokenBeginAnnotation.class));
l2.set("endToken", quote.get(CoreAnnotations.TokenEndAnnotation.class));
l2.set("beginSentence", quote.get(CoreAnnotations.SentenceBeginAnnotation.class));
l2.set("endSentence", quote.get(CoreAnnotations.SentenceEndAnnotation.class));
}));
}
});
// flush
l0.writer.flush();
}
use of java.util.stream.Stream in project torodb by torodb.
the class TopologyCoordinator method lookForSyncSource.
/**
* Looks for an optimal sync source to replicate from.
*
* The first attempt, we ignore those nodes with slave delay higher than our own, hidden nodes,
* and nodes that are excessively lagged. The second attempt includes such nodes, in case those
* are the only ones we can reach. This loop attempts to set 'closestIndex'.
*
* @param now the current time
* @param lastOpAppliedOp the last OpTime this node has apply
* @param onlyOptimal if true, slaves with more delay than ourselve, hidden nodes or
* excessively lagged nodes are ignored
* @param oldestSyncOpTime the oldest optime considered not excessively lagged. Only used if
* onlyOptimal is true.
* @return the new optimal sync source, which is not {@link Optional#isPresent() present} if no
* one can be chosen
*/
private Optional<MemberConfig> lookForSyncSource(Instant now, Optional<OpTime> lastOpAppliedOp, boolean onlyOptimal, OpTime oldestSyncOpTime) {
OpTime lastOpApplied = lastOpAppliedOp.orElse(OpTime.EPOCH);
Stream<MemberHeartbeatData> hbCandidateStream = _hbdata.stream().filter(MemberHeartbeatData::isUp).filter(hbData -> hbData.getState().isReadable()).filter(hbData -> hbData.getOpTime().isAfter(lastOpApplied));
if (onlyOptimal) {
hbCandidateStream = hbCandidateStream.filter(hbData -> hbData.getOpTime().isEqualOrAfter(oldestSyncOpTime));
}
Stream<MemberConfig> mcCandidateStream = hbCandidateStream.map(this::getMemberConfig).filter(mc -> !isBlacklistedMember(mc, now));
if (onlyOptimal) {
mcCandidateStream = mcCandidateStream.filter(mc -> !mc.isHidden()).filter(mc -> mc.getSlaveDelay() < slaveDelaySecs);
}
//If there are several candidates, the one whose ping is lower is returned
return mcCandidateStream.reduce((MemberConfig cand1, MemberConfig cand2) -> {
long ping1 = getPing(cand1.getHostAndPort());
long ping2 = getPing(cand2.getHostAndPort());
if (ping1 < ping2) {
return cand1;
}
return cand2;
});
}
Aggregations