use of java.util.SortedMap in project gerrit by GerritCodeReview.
the class ListProjects method display.
public SortedMap<String, ProjectInfo> display(@Nullable OutputStream displayOutputStream) throws BadRequestException, PermissionBackendException {
if (groupUuid != null) {
try {
if (!groupControlFactory.controlFor(groupUuid).isVisible()) {
return Collections.emptySortedMap();
}
} catch (NoSuchGroupException ex) {
return Collections.emptySortedMap();
}
}
PrintWriter stdout = null;
if (displayOutputStream != null) {
stdout = new PrintWriter(new BufferedWriter(new OutputStreamWriter(displayOutputStream, UTF_8)));
}
if (type == FilterType.PARENT_CANDIDATES) {
// Historically, PARENT_CANDIDATES implied showDescription.
showDescription = true;
}
int foundIndex = 0;
int found = 0;
TreeMap<String, ProjectInfo> output = new TreeMap<>();
Map<String, String> hiddenNames = new HashMap<>();
Map<Project.NameKey, Boolean> accessibleParents = new HashMap<>();
PermissionBackend.WithUser perm = permissionBackend.user(currentUser);
final TreeMap<Project.NameKey, ProjectNode> treeMap = new TreeMap<>();
try {
for (final Project.NameKey projectName : filter(perm)) {
final ProjectState e = projectCache.get(projectName);
if (e == null || (!all && e.getProject().getState() == HIDDEN)) {
// If all wasn't selected, and its HIDDEN, pretend its not present.
continue;
}
final ProjectControl pctl = e.controlFor(currentUser);
if (groupUuid != null && !pctl.getLocalGroups().contains(GroupReference.forGroup(groupsCollection.parseId(groupUuid.get())))) {
continue;
}
ProjectInfo info = new ProjectInfo();
if (showTree && !format.isJson()) {
treeMap.put(projectName, projectNodeFactory.create(pctl.getProject(), true));
continue;
}
info.name = projectName.get();
if (showTree && format.isJson()) {
ProjectState parent = Iterables.getFirst(e.parents(), null);
if (parent != null) {
if (isParentAccessible(accessibleParents, perm, parent)) {
info.parent = parent.getProject().getName();
} else {
info.parent = hiddenNames.get(parent.getProject().getName());
if (info.parent == null) {
info.parent = "?-" + (hiddenNames.size() + 1);
hiddenNames.put(parent.getProject().getName(), info.parent);
}
}
}
}
if (showDescription) {
info.description = Strings.emptyToNull(e.getProject().getDescription());
}
info.state = e.getProject().getState();
try {
if (!showBranch.isEmpty()) {
try (Repository git = repoManager.openRepository(projectName)) {
if (!type.matches(git)) {
continue;
}
List<Ref> refs = getBranchRefs(projectName, pctl);
if (!hasValidRef(refs)) {
continue;
}
for (int i = 0; i < showBranch.size(); i++) {
Ref ref = refs.get(i);
if (ref != null && ref.getObjectId() != null) {
if (info.branches == null) {
info.branches = new LinkedHashMap<>();
}
info.branches.put(showBranch.get(i), ref.getObjectId().name());
}
}
}
} else if (!showTree && type.useMatch()) {
try (Repository git = repoManager.openRepository(projectName)) {
if (!type.matches(git)) {
continue;
}
}
}
} catch (RepositoryNotFoundException err) {
// If the Git repository is gone, the project doesn't actually exist anymore.
continue;
} catch (IOException err) {
log.warn("Unexpected error reading " + projectName, err);
continue;
}
if (type != FilterType.PARENT_CANDIDATES) {
List<WebLinkInfo> links = webLinks.getProjectLinks(projectName.get());
info.webLinks = links.isEmpty() ? null : links;
}
if (foundIndex++ < start) {
continue;
}
if (limit > 0 && ++found > limit) {
break;
}
if (stdout == null || format.isJson()) {
output.put(info.name, info);
continue;
}
if (!showBranch.isEmpty()) {
for (String name : showBranch) {
String ref = info.branches != null ? info.branches.get(name) : null;
if (ref == null) {
// Print stub (forty '-' symbols)
ref = "----------------------------------------";
}
stdout.print(ref);
stdout.print(' ');
}
}
stdout.print(info.name);
if (info.description != null) {
// We still want to list every project as one-liners, hence escaping \n.
stdout.print(" - " + StringUtil.escapeString(info.description));
}
stdout.print('\n');
}
for (ProjectInfo info : output.values()) {
info.id = Url.encode(info.name);
info.name = null;
}
if (stdout == null) {
return output;
} else if (format.isJson()) {
format.newGson().toJson(output, new TypeToken<Map<String, ProjectInfo>>() {
}.getType(), stdout);
stdout.print('\n');
} else if (showTree && treeMap.size() > 0) {
printProjectTree(stdout, treeMap);
}
return null;
} finally {
if (stdout != null) {
stdout.flush();
}
}
}
use of java.util.SortedMap in project carbondata by apache.
the class FilterUtil method getEndKeyForNoDictionaryDimension.
/**
* Algorithm for getting the end key for a filter
* step 1: Iterate through each dimension and verify whether its not an exclude filter.
* step 2: Initialize end key with the last filter member value present in each filter model
* for the respective dimensions.(Already filter models are sorted)
* step 3: since its a no dictionary end key there will only actual value so compare
* the last filter model value with respect to the dimension data type.
* step 4: The highest value will be considered as the end key of dimension by comparing all
* its filter model.
* step 5: create a byte array of end key which comprises of highest filter member value of
* all dimension and the indexes which will help to read the respective filter value.
*
* @param dimColResolvedFilterInfo
* @param setOfEndKeyByteArray
* @return end key array
*/
public static void getEndKeyForNoDictionaryDimension(DimColumnResolvedFilterInfo dimColResolvedFilterInfo, SegmentProperties segmentProperties, SortedMap<Integer, byte[]> setOfEndKeyByteArray) {
Map<CarbonDimension, List<DimColumnFilterInfo>> dimensionFilter = dimColResolvedFilterInfo.getDimensionResolvedFilterInstance();
// step 1
for (Map.Entry<CarbonDimension, List<DimColumnFilterInfo>> entry : dimensionFilter.entrySet()) {
if (!entry.getKey().hasEncoding(Encoding.DICTIONARY)) {
List<DimColumnFilterInfo> listOfDimColFilterInfo = entry.getValue();
if (null == listOfDimColFilterInfo) {
continue;
}
boolean isExcludePresent = false;
for (DimColumnFilterInfo info : listOfDimColFilterInfo) {
if (!info.isIncludeFilter()) {
isExcludePresent = true;
}
}
if (isExcludePresent) {
continue;
}
// in case of restructure scenarios it can happen that the filter dimension is not
// present in the current block. In those cases no need to determine the key
CarbonDimension dimensionFromCurrentBlock = CarbonUtil.getDimensionFromCurrentBlock(segmentProperties.getDimensions(), entry.getKey());
if (null == dimensionFromCurrentBlock) {
continue;
}
// step 2
byte[] noDictionaryEndKey = listOfDimColFilterInfo.get(0).getNoDictionaryFilterValuesList().get(listOfDimColFilterInfo.get(0).getNoDictionaryFilterValuesList().size() - 1);
if (setOfEndKeyByteArray.isEmpty()) {
setOfEndKeyByteArray.put(dimensionFromCurrentBlock.getOrdinal(), noDictionaryEndKey);
} else if (null == setOfEndKeyByteArray.get(dimensionFromCurrentBlock.getOrdinal())) {
setOfEndKeyByteArray.put(dimensionFromCurrentBlock.getOrdinal(), noDictionaryEndKey);
} else if (ByteUtil.UnsafeComparer.INSTANCE.compareTo(setOfEndKeyByteArray.get(dimensionFromCurrentBlock.getOrdinal()), noDictionaryEndKey) < 0) {
setOfEndKeyByteArray.put(dimensionFromCurrentBlock.getOrdinal(), noDictionaryEndKey);
}
}
}
}
use of java.util.SortedMap in project beam by apache.
the class PipelineOptionsFactory method getPropertyDescriptors.
/**
* This method is meant to emulate the behavior of {@link Introspector#getBeanInfo(Class, int)}
* to construct the list of {@link PropertyDescriptor}.
*
* <p>TODO: Swap back to using Introspector once the proxy class issue with AppEngine is
* resolved.
*/
private static List<PropertyDescriptor> getPropertyDescriptors(Set<Method> methods, Class<? extends PipelineOptions> beanClass) throws IntrospectionException {
SortedMap<String, Method> propertyNamesToGetters = new TreeMap<>();
for (Map.Entry<String, Method> entry : PipelineOptionsReflector.getPropertyNamesToGetters(methods).entries()) {
propertyNamesToGetters.put(entry.getKey(), entry.getValue());
}
List<PropertyDescriptor> descriptors = Lists.newArrayList();
List<TypeMismatch> mismatches = new ArrayList<>();
Set<String> usedDescriptors = Sets.newHashSet();
/*
* Add all the getter/setter pairs to the list of descriptors removing the getter once
* it has been paired up.
*/
for (Method method : methods) {
String methodName = method.getName();
if (!methodName.startsWith("set") || method.getParameterTypes().length != 1 || method.getReturnType() != void.class) {
continue;
}
String propertyName = Introspector.decapitalize(methodName.substring(3));
Method getterMethod = propertyNamesToGetters.remove(propertyName);
// Validate that the getter and setter property types are the same.
if (getterMethod != null) {
Type getterPropertyType = getterMethod.getGenericReturnType();
Type setterPropertyType = method.getGenericParameterTypes()[0];
if (!getterPropertyType.equals(setterPropertyType)) {
TypeMismatch mismatch = new TypeMismatch();
mismatch.propertyName = propertyName;
mismatch.getterPropertyType = getterPropertyType;
mismatch.setterPropertyType = setterPropertyType;
mismatches.add(mismatch);
continue;
}
}
// getter and setter).
if (!usedDescriptors.contains(propertyName)) {
descriptors.add(new PropertyDescriptor(propertyName, getterMethod, method));
usedDescriptors.add(propertyName);
}
}
throwForTypeMismatches(mismatches);
// Add the remaining getters with missing setters.
for (Map.Entry<String, Method> getterToMethod : propertyNamesToGetters.entrySet()) {
descriptors.add(new PropertyDescriptor(getterToMethod.getKey(), getterToMethod.getValue(), null));
}
return descriptors;
}
use of java.util.SortedMap in project lucene-solr by apache.
the class Config method complainAboutUnknownAttributes.
/**
* Logs an error and throws an exception if any of the element(s) at the given elementXpath
* contains an attribute name that is not among knownAttributes.
*/
public void complainAboutUnknownAttributes(String elementXpath, String... knownAttributes) {
SortedMap<String, SortedSet<String>> problems = new TreeMap<>();
NodeList nodeList = getNodeList(elementXpath, false);
for (int i = 0; i < nodeList.getLength(); ++i) {
Element element = (Element) nodeList.item(i);
Set<String> unknownAttributes = getUnknownAttributes(element, knownAttributes);
if (null != unknownAttributes) {
String elementName = element.getNodeName();
SortedSet<String> allUnknownAttributes = problems.get(elementName);
if (null == allUnknownAttributes) {
allUnknownAttributes = new TreeSet<>();
problems.put(elementName, allUnknownAttributes);
}
allUnknownAttributes.addAll(unknownAttributes);
}
}
if (problems.size() > 0) {
StringBuilder message = new StringBuilder();
for (Map.Entry<String, SortedSet<String>> entry : problems.entrySet()) {
if (message.length() > 0) {
message.append(", ");
}
message.append('<');
message.append(entry.getKey());
for (String attributeName : entry.getValue()) {
message.append(' ');
message.append(attributeName);
message.append("=\"...\"");
}
message.append('>');
}
message.insert(0, "Unknown attribute(s) on element(s): ");
String msg = message.toString();
SolrException.log(log, msg);
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, msg);
}
}
use of java.util.SortedMap in project lucene-solr by apache.
the class TestBlockJoin method testRandom.
public void testRandom() throws Exception {
// We build two indices at once: one normalized (which
// ToParentBlockJoinQuery/Collector,
// ToChildBlockJoinQuery can query) and the other w/
// the same docs, just fully denormalized:
final Directory dir = newDirectory();
final Directory joinDir = newDirectory();
final int maxNumChildrenPerParent = 20;
final int numParentDocs = TestUtil.nextInt(random(), 100 * RANDOM_MULTIPLIER, 300 * RANDOM_MULTIPLIER);
//final int numParentDocs = 30;
// Values for parent fields:
final String[][] parentFields = getRandomFields(numParentDocs / 2);
// Values for child fields:
final String[][] childFields = getRandomFields(numParentDocs);
final boolean doDeletes = random().nextBoolean();
final List<Integer> toDelete = new ArrayList<>();
// TODO: parallel star join, nested join cases too!
final RandomIndexWriter w = new RandomIndexWriter(random(), dir);
final RandomIndexWriter joinW = new RandomIndexWriter(random(), joinDir);
for (int parentDocID = 0; parentDocID < numParentDocs; parentDocID++) {
Document parentDoc = new Document();
Document parentJoinDoc = new Document();
Field id = new StoredField("parentID", parentDocID);
parentDoc.add(id);
parentJoinDoc.add(id);
parentJoinDoc.add(newStringField("isParent", "x", Field.Store.NO));
id = new NumericDocValuesField("parentID", parentDocID);
parentDoc.add(id);
parentJoinDoc.add(id);
parentJoinDoc.add(newStringField("isParent", "x", Field.Store.NO));
for (int field = 0; field < parentFields.length; field++) {
if (random().nextDouble() < 0.9) {
String s = parentFields[field][random().nextInt(parentFields[field].length)];
Field f = newStringField("parent" + field, s, Field.Store.NO);
parentDoc.add(f);
parentJoinDoc.add(f);
f = new SortedDocValuesField("parent" + field, new BytesRef(s));
parentDoc.add(f);
parentJoinDoc.add(f);
}
}
if (doDeletes) {
parentDoc.add(new IntPoint("blockID", parentDocID));
parentJoinDoc.add(new IntPoint("blockID", parentDocID));
}
final List<Document> joinDocs = new ArrayList<>();
if (VERBOSE) {
StringBuilder sb = new StringBuilder();
sb.append("parentID=").append(parentDoc.get("parentID"));
for (int fieldID = 0; fieldID < parentFields.length; fieldID++) {
String s = parentDoc.get("parent" + fieldID);
if (s != null) {
sb.append(" parent" + fieldID + "=" + s);
}
}
System.out.println(" " + sb.toString());
}
final int numChildDocs = TestUtil.nextInt(random(), 1, maxNumChildrenPerParent);
for (int childDocID = 0; childDocID < numChildDocs; childDocID++) {
// Denormalize: copy all parent fields into child doc:
Document childDoc = TestUtil.cloneDocument(parentDoc);
Document joinChildDoc = new Document();
joinDocs.add(joinChildDoc);
Field childID = new StoredField("childID", childDocID);
childDoc.add(childID);
joinChildDoc.add(childID);
childID = new NumericDocValuesField("childID", childDocID);
childDoc.add(childID);
joinChildDoc.add(childID);
for (int childFieldID = 0; childFieldID < childFields.length; childFieldID++) {
if (random().nextDouble() < 0.9) {
String s = childFields[childFieldID][random().nextInt(childFields[childFieldID].length)];
Field f = newStringField("child" + childFieldID, s, Field.Store.NO);
childDoc.add(f);
joinChildDoc.add(f);
f = new SortedDocValuesField("child" + childFieldID, new BytesRef(s));
childDoc.add(f);
joinChildDoc.add(f);
}
}
if (VERBOSE) {
StringBuilder sb = new StringBuilder();
sb.append("childID=").append(joinChildDoc.get("childID"));
for (int fieldID = 0; fieldID < childFields.length; fieldID++) {
String s = joinChildDoc.get("child" + fieldID);
if (s != null) {
sb.append(" child" + fieldID + "=" + s);
}
}
System.out.println(" " + sb.toString());
}
if (doDeletes) {
joinChildDoc.add(new IntPoint("blockID", parentDocID));
}
w.addDocument(childDoc);
}
// Parent last:
joinDocs.add(parentJoinDoc);
joinW.addDocuments(joinDocs);
if (doDeletes && random().nextInt(30) == 7) {
toDelete.add(parentDocID);
}
}
if (!toDelete.isEmpty()) {
Query query = IntPoint.newSetQuery("blockID", toDelete);
w.deleteDocuments(query);
joinW.deleteDocuments(query);
}
final IndexReader r = w.getReader();
w.close();
final IndexReader joinR = joinW.getReader();
joinW.close();
if (VERBOSE) {
System.out.println("TEST: reader=" + r);
System.out.println("TEST: joinReader=" + joinR);
Bits liveDocs = MultiFields.getLiveDocs(joinR);
for (int docIDX = 0; docIDX < joinR.maxDoc(); docIDX++) {
System.out.println(" docID=" + docIDX + " doc=" + joinR.document(docIDX) + " deleted?=" + (liveDocs != null && liveDocs.get(docIDX) == false));
}
PostingsEnum parents = MultiFields.getTermDocsEnum(joinR, "isParent", new BytesRef("x"));
System.out.println("parent docIDs:");
while (parents.nextDoc() != PostingsEnum.NO_MORE_DOCS) {
System.out.println(" " + parents.docID());
}
}
final IndexSearcher s = newSearcher(r, false);
final IndexSearcher joinS = newSearcher(joinR);
final BitSetProducer parentsFilter = new QueryBitSetProducer(new TermQuery(new Term("isParent", "x")));
CheckJoinIndex.check(joinS.getIndexReader(), parentsFilter);
final int iters = 200 * RANDOM_MULTIPLIER;
for (int iter = 0; iter < iters; iter++) {
if (VERBOSE) {
System.out.println("TEST: iter=" + (1 + iter) + " of " + iters);
}
Query childQuery;
if (random().nextInt(3) == 2) {
final int childFieldID = random().nextInt(childFields.length);
childQuery = new TermQuery(new Term("child" + childFieldID, childFields[childFieldID][random().nextInt(childFields[childFieldID].length)]));
} else if (random().nextInt(3) == 2) {
BooleanQuery.Builder bq = new BooleanQuery.Builder();
final int numClauses = TestUtil.nextInt(random(), 2, 4);
boolean didMust = false;
for (int clauseIDX = 0; clauseIDX < numClauses; clauseIDX++) {
Query clause;
BooleanClause.Occur occur;
if (!didMust && random().nextBoolean()) {
occur = random().nextBoolean() ? BooleanClause.Occur.MUST : BooleanClause.Occur.MUST_NOT;
clause = new TermQuery(randomChildTerm(childFields[0]));
didMust = true;
} else {
occur = BooleanClause.Occur.SHOULD;
final int childFieldID = TestUtil.nextInt(random(), 1, childFields.length - 1);
clause = new TermQuery(new Term("child" + childFieldID, childFields[childFieldID][random().nextInt(childFields[childFieldID].length)]));
}
bq.add(clause, occur);
}
childQuery = bq.build();
} else {
BooleanQuery.Builder bq = new BooleanQuery.Builder();
bq.add(new TermQuery(randomChildTerm(childFields[0])), BooleanClause.Occur.MUST);
final int childFieldID = TestUtil.nextInt(random(), 1, childFields.length - 1);
bq.add(new TermQuery(new Term("child" + childFieldID, childFields[childFieldID][random().nextInt(childFields[childFieldID].length)])), random().nextBoolean() ? BooleanClause.Occur.MUST : BooleanClause.Occur.MUST_NOT);
childQuery = bq.build();
}
if (random().nextBoolean()) {
childQuery = new RandomApproximationQuery(childQuery, random());
}
final ScoreMode agg = ScoreMode.values()[random().nextInt(ScoreMode.values().length)];
final ToParentBlockJoinQuery childJoinQuery = new ToParentBlockJoinQuery(childQuery, parentsFilter, agg);
// To run against the block-join index:
final Query parentJoinQuery;
// Same query as parentJoinQuery, but to run against
// the fully denormalized index (so we can compare
// results):
final Query parentQuery;
if (random().nextBoolean()) {
parentQuery = childQuery;
parentJoinQuery = childJoinQuery;
} else {
// AND parent field w/ child field
final BooleanQuery.Builder bq = new BooleanQuery.Builder();
final Term parentTerm = randomParentTerm(parentFields[0]);
if (random().nextBoolean()) {
bq.add(childJoinQuery, BooleanClause.Occur.MUST);
bq.add(new TermQuery(parentTerm), BooleanClause.Occur.MUST);
} else {
bq.add(new TermQuery(parentTerm), BooleanClause.Occur.MUST);
bq.add(childJoinQuery, BooleanClause.Occur.MUST);
}
final BooleanQuery.Builder bq2 = new BooleanQuery.Builder();
if (random().nextBoolean()) {
bq2.add(childQuery, BooleanClause.Occur.MUST);
bq2.add(new TermQuery(parentTerm), BooleanClause.Occur.MUST);
} else {
bq2.add(new TermQuery(parentTerm), BooleanClause.Occur.MUST);
bq2.add(childQuery, BooleanClause.Occur.MUST);
}
parentJoinQuery = bq.build();
parentQuery = bq2.build();
}
final Sort parentSort = getRandomSort("parent", parentFields.length);
final Sort childSort = getRandomSort("child", childFields.length);
if (VERBOSE) {
System.out.println("\nTEST: query=" + parentQuery + " joinQuery=" + parentJoinQuery + " parentSort=" + parentSort + " childSort=" + childSort);
}
// Merge both sorts:
final List<SortField> sortFields = new ArrayList<>(Arrays.asList(parentSort.getSort()));
sortFields.addAll(Arrays.asList(childSort.getSort()));
final Sort parentAndChildSort = new Sort(sortFields.toArray(new SortField[sortFields.size()]));
final TopDocs results = s.search(parentQuery, r.numDocs(), parentAndChildSort);
if (VERBOSE) {
System.out.println("\nTEST: normal index gets " + results.totalHits + " hits; sort=" + parentAndChildSort);
final ScoreDoc[] hits = results.scoreDocs;
for (int hitIDX = 0; hitIDX < hits.length; hitIDX++) {
final Document doc = s.doc(hits[hitIDX].doc);
//System.out.println(" score=" + hits[hitIDX].score + " parentID=" + doc.get("parentID") + " childID=" + doc.get("childID") + " (docID=" + hits[hitIDX].doc + ")");
System.out.println(" parentID=" + doc.get("parentID") + " childID=" + doc.get("childID") + " (docID=" + hits[hitIDX].doc + ")");
FieldDoc fd = (FieldDoc) hits[hitIDX];
if (fd.fields != null) {
System.out.print(" " + fd.fields.length + " sort values: ");
for (Object o : fd.fields) {
if (o instanceof BytesRef) {
System.out.print(((BytesRef) o).utf8ToString() + " ");
} else {
System.out.print(o + " ");
}
}
System.out.println();
}
}
}
TopDocs joinedResults = joinS.search(parentJoinQuery, numParentDocs);
SortedMap<Integer, TopDocs> joinResults = new TreeMap<>();
for (ScoreDoc parentHit : joinedResults.scoreDocs) {
ParentChildrenBlockJoinQuery childrenQuery = new ParentChildrenBlockJoinQuery(parentsFilter, childQuery, parentHit.doc);
TopDocs childTopDocs = joinS.search(childrenQuery, maxNumChildrenPerParent, childSort);
final Document parentDoc = joinS.doc(parentHit.doc);
joinResults.put(Integer.valueOf(parentDoc.get("parentID")), childTopDocs);
}
final int hitsPerGroup = TestUtil.nextInt(random(), 1, 20);
if (VERBOSE) {
System.out.println("\nTEST: block join index gets " + (joinResults == null ? 0 : joinResults.size()) + " groups; hitsPerGroup=" + hitsPerGroup);
if (joinResults != null) {
for (Map.Entry<Integer, TopDocs> entry : joinResults.entrySet()) {
System.out.println(" group parentID=" + entry.getKey() + " (docID=" + entry.getKey() + ")");
for (ScoreDoc childHit : entry.getValue().scoreDocs) {
final Document doc = joinS.doc(childHit.doc);
// System.out.println(" score=" + childHit.score + " childID=" + doc.get("childID") + " (docID=" + childHit.doc + ")");
System.out.println(" childID=" + doc.get("childID") + " child0=" + doc.get("child0") + " (docID=" + childHit.doc + ")");
}
}
}
}
if (results.totalHits == 0) {
assertEquals(0, joinResults.size());
} else {
compareHits(r, joinR, results, joinResults);
TopDocs b = joinS.search(childJoinQuery, 10);
for (ScoreDoc hit : b.scoreDocs) {
Explanation explanation = joinS.explain(childJoinQuery, hit.doc);
Document document = joinS.doc(hit.doc - 1);
int childId = Integer.parseInt(document.get("childID"));
//System.out.println(" hit docID=" + hit.doc + " childId=" + childId + " parentId=" + document.get("parentID"));
assertTrue(explanation.isMatch());
assertEquals(hit.score, explanation.getValue(), 0.0f);
Matcher m = Pattern.compile("Score based on ([0-9]+) child docs in range from ([0-9]+) to ([0-9]+), best match:").matcher(explanation.getDescription());
assertTrue("Block Join description not matches", m.matches());
assertTrue("Matched children not positive", Integer.parseInt(m.group(1)) > 0);
assertEquals("Wrong child range start", hit.doc - 1 - childId, Integer.parseInt(m.group(2)));
assertEquals("Wrong child range end", hit.doc - 1, Integer.parseInt(m.group(3)));
Explanation childWeightExplanation = explanation.getDetails()[0];
if ("sum of:".equals(childWeightExplanation.getDescription())) {
childWeightExplanation = childWeightExplanation.getDetails()[0];
}
assertTrue("Wrong child weight description", childWeightExplanation.getDescription().startsWith("weight(child"));
}
}
// Test joining in the opposite direction (parent to
// child):
// Get random query against parent documents:
final Query parentQuery2;
if (random().nextInt(3) == 2) {
final int fieldID = random().nextInt(parentFields.length);
parentQuery2 = new TermQuery(new Term("parent" + fieldID, parentFields[fieldID][random().nextInt(parentFields[fieldID].length)]));
} else if (random().nextInt(3) == 2) {
BooleanQuery.Builder bq = new BooleanQuery.Builder();
final int numClauses = TestUtil.nextInt(random(), 2, 4);
boolean didMust = false;
for (int clauseIDX = 0; clauseIDX < numClauses; clauseIDX++) {
Query clause;
BooleanClause.Occur occur;
if (!didMust && random().nextBoolean()) {
occur = random().nextBoolean() ? BooleanClause.Occur.MUST : BooleanClause.Occur.MUST_NOT;
clause = new TermQuery(randomParentTerm(parentFields[0]));
didMust = true;
} else {
occur = BooleanClause.Occur.SHOULD;
final int fieldID = TestUtil.nextInt(random(), 1, parentFields.length - 1);
clause = new TermQuery(new Term("parent" + fieldID, parentFields[fieldID][random().nextInt(parentFields[fieldID].length)]));
}
bq.add(clause, occur);
}
parentQuery2 = bq.build();
} else {
BooleanQuery.Builder bq = new BooleanQuery.Builder();
bq.add(new TermQuery(randomParentTerm(parentFields[0])), BooleanClause.Occur.MUST);
final int fieldID = TestUtil.nextInt(random(), 1, parentFields.length - 1);
bq.add(new TermQuery(new Term("parent" + fieldID, parentFields[fieldID][random().nextInt(parentFields[fieldID].length)])), random().nextBoolean() ? BooleanClause.Occur.MUST : BooleanClause.Occur.MUST_NOT);
parentQuery2 = bq.build();
}
if (VERBOSE) {
System.out.println("\nTEST: top down: parentQuery2=" + parentQuery2);
}
// Maps parent query to child docs:
final ToChildBlockJoinQuery parentJoinQuery2 = new ToChildBlockJoinQuery(parentQuery2, parentsFilter);
// To run against the block-join index:
Query childJoinQuery2;
// Same query as parentJoinQuery, but to run against
// the fully denormalized index (so we can compare
// results):
Query childQuery2;
if (random().nextBoolean()) {
childQuery2 = parentQuery2;
childJoinQuery2 = parentJoinQuery2;
} else {
final Term childTerm = randomChildTerm(childFields[0]);
if (random().nextBoolean()) {
// filtered case
childJoinQuery2 = parentJoinQuery2;
childJoinQuery2 = new BooleanQuery.Builder().add(childJoinQuery2, Occur.MUST).add(new TermQuery(childTerm), Occur.FILTER).build();
} else {
// AND child field w/ parent query:
final BooleanQuery.Builder bq = new BooleanQuery.Builder();
if (random().nextBoolean()) {
bq.add(parentJoinQuery2, BooleanClause.Occur.MUST);
bq.add(new TermQuery(childTerm), BooleanClause.Occur.MUST);
} else {
bq.add(new TermQuery(childTerm), BooleanClause.Occur.MUST);
bq.add(parentJoinQuery2, BooleanClause.Occur.MUST);
}
childJoinQuery2 = bq.build();
}
if (random().nextBoolean()) {
// filtered case
childQuery2 = parentQuery2;
childQuery2 = new BooleanQuery.Builder().add(childQuery2, Occur.MUST).add(new TermQuery(childTerm), Occur.FILTER).build();
} else {
final BooleanQuery.Builder bq2 = new BooleanQuery.Builder();
if (random().nextBoolean()) {
bq2.add(parentQuery2, BooleanClause.Occur.MUST);
bq2.add(new TermQuery(childTerm), BooleanClause.Occur.MUST);
} else {
bq2.add(new TermQuery(childTerm), BooleanClause.Occur.MUST);
bq2.add(parentQuery2, BooleanClause.Occur.MUST);
}
childQuery2 = bq2.build();
}
}
final Sort childSort2 = getRandomSort("child", childFields.length);
// Search denormalized index:
if (VERBOSE) {
System.out.println("TEST: run top down query=" + childQuery2 + " sort=" + childSort2);
}
final TopDocs results2 = s.search(childQuery2, r.numDocs(), childSort2);
if (VERBOSE) {
System.out.println(" " + results2.totalHits + " totalHits:");
for (ScoreDoc sd : results2.scoreDocs) {
final Document doc = s.doc(sd.doc);
System.out.println(" childID=" + doc.get("childID") + " parentID=" + doc.get("parentID") + " docID=" + sd.doc);
}
}
// Search join index:
if (VERBOSE) {
System.out.println("TEST: run top down join query=" + childJoinQuery2 + " sort=" + childSort2);
}
TopDocs joinResults2 = joinS.search(childJoinQuery2, joinR.numDocs(), childSort2);
if (VERBOSE) {
System.out.println(" " + joinResults2.totalHits + " totalHits:");
for (ScoreDoc sd : joinResults2.scoreDocs) {
final Document doc = joinS.doc(sd.doc);
final Document parentDoc = getParentDoc(joinR, parentsFilter, sd.doc);
System.out.println(" childID=" + doc.get("childID") + " parentID=" + parentDoc.get("parentID") + " docID=" + sd.doc);
}
}
compareChildHits(r, joinR, results2, joinResults2);
}
r.close();
joinR.close();
dir.close();
joinDir.close();
}
Aggregations