use of org.wikidata.wdtk.datamodel.interfaces.Snak in project OpenRefine by OpenRefine.
the class WbStatementExpr method groupSnaks.
public static List<SnakGroup> groupSnaks(List<Snak> snaks) {
Map<PropertyIdValue, List<Snak>> snakGroups = new HashMap<>();
List<PropertyIdValue> propertyOrder = new ArrayList<PropertyIdValue>();
for (Snak snak : snaks) {
List<Snak> existingSnaks = snakGroups.get(snak.getPropertyId());
if (existingSnaks == null) {
existingSnaks = new ArrayList<Snak>();
snakGroups.put(snak.getPropertyId(), existingSnaks);
propertyOrder.add(snak.getPropertyId());
}
if (!existingSnaks.contains(snak)) {
existingSnaks.add(snak);
}
}
return propertyOrder.stream().map(pid -> Datamodel.makeSnakGroup(snakGroups.get(pid))).collect(Collectors.toList());
}
use of org.wikidata.wdtk.datamodel.interfaces.Snak in project OpenRefine by OpenRefine.
the class SnakOnlyStatementMerger method merge.
@Override
public Statement merge(Statement existing, Statement added) {
List<SnakGroup> existingQualifiers = existing.getQualifiers();
List<SnakGroup> addedQualifiers = added.getQualifiers();
// flatten snak groups
List<Snak> existingSnaks = flatten(existingQualifiers);
List<Snak> addedSnaks = flatten(addedQualifiers);
List<Snak> mergedSnaks = new ArrayList<>(existingSnaks);
for (Snak addedSnak : addedSnaks) {
boolean matchingSnakFound = mergedSnaks.stream().anyMatch(existingSnak -> match(existingSnak, addedSnak));
if (!matchingSnakFound) {
mergedSnaks.add(addedSnak);
}
}
List<SnakGroup> groupedQualifiers = WbStatementExpr.groupSnaks(mergedSnaks);
Claim newClaim = Datamodel.makeClaim(existing.getSubject(), existing.getMainSnak(), groupedQualifiers);
List<Reference> references = mergeReferences(existing.getReferences(), added.getReferences());
return Datamodel.makeStatement(newClaim, references, existing.getRank(), existing.getStatementId());
}
use of org.wikidata.wdtk.datamodel.interfaces.Snak in project OpenRefine by OpenRefine.
the class ConflictsWithScrutinizer method scrutinize.
@Override
public void scrutinize(TermedStatementEntityEdit update) {
Map<PropertyIdValue, Set<Value>> propertyIdValueValueMap = new HashMap<>();
for (Statement statement : update.getAddedStatements()) {
PropertyIdValue pid = statement.getClaim().getMainSnak().getPropertyId();
Value value = null;
Snak mainSnak = statement.getClaim().getMainSnak();
if (mainSnak instanceof ValueSnak) {
value = ((ValueSnak) mainSnak).getValue();
}
Set<Value> values;
if (value != null) {
if (propertyIdValueValueMap.containsKey(pid)) {
values = propertyIdValueValueMap.get(pid);
} else {
values = new HashSet<>();
}
values.add(value);
propertyIdValueValueMap.put(pid, values);
}
}
for (PropertyIdValue propertyId : propertyIdValueValueMap.keySet()) {
List<Statement> statementList = _fetcher.getConstraintsByType(propertyId, conflictsWithConstraintQid);
for (Statement statement : statementList) {
ConflictsWithConstraint constraint = new ConflictsWithConstraint(statement);
PropertyIdValue conflictingPid = constraint.conflictingPid;
List<Value> itemList = constraint.itemList;
if (propertyIdValueValueMap.containsKey(conflictingPid) && raiseWarning(propertyIdValueValueMap, conflictingPid, itemList)) {
QAWarning issue = new QAWarning(type, propertyId.getId() + conflictingPid.getId(), QAWarning.Severity.WARNING, 1);
issue.setProperty("property_entity", propertyId);
issue.setProperty("added_property_entity", conflictingPid);
issue.setProperty("example_entity", update.getEntityId());
addIssue(issue);
}
}
}
}
use of org.wikidata.wdtk.datamodel.interfaces.Snak in project OpenRefine by OpenRefine.
the class InverseConstraintScrutinizer method scrutinize.
@Override
public void scrutinize(Statement statement, EntityIdValue entityId, boolean added) {
if (!added) {
// TODO support for deleted statements
return;
}
Snak mainSnak = statement.getClaim().getMainSnak();
if (!(mainSnak instanceof ValueSnak)) {
return;
}
Value mainSnakValue = ((ValueSnak) mainSnak).getValue();
if (mainSnakValue instanceof ItemIdValue) {
PropertyIdValue pid = mainSnak.getPropertyId();
PropertyIdValue inversePid = getInverseConstraint(pid);
if (inversePid != null) {
EntityIdValue targetEntityId = (EntityIdValue) mainSnakValue;
Set<EntityIdValue> currentValues = _statements.get(pid).get(entityId);
if (currentValues == null) {
currentValues = new HashSet<EntityIdValue>();
_statements.get(pid).put(entityId, currentValues);
}
currentValues.add(targetEntityId);
}
}
}
use of org.wikidata.wdtk.datamodel.interfaces.Snak in project OpenRefine by OpenRefine.
the class QuickStatementsExporter method translateStatement.
protected void translateStatement(String qid, Statement statement, String pid, boolean add, Writer writer) throws IOException {
Claim claim = statement.getClaim();
Snak mainSnak = claim.getMainSnak();
String mainSnakQS = mainSnak.accept(mainSnakPrinter);
if (!add) {
// According to: https://www.wikidata.org/wiki/Help:QuickStatements#Removing_statements,
// Removing statements won't be followed by qualifiers or references.
writer.write("- ");
writer.write(qid + mainSnakQS);
writer.write("\n");
} else {
// add statements
if (statement.getReferences().isEmpty()) {
writer.write(qid + mainSnakQS);
for (SnakGroup q : claim.getQualifiers()) {
translateSnakGroup(q, false, writer);
}
writer.write("\n");
} else {
// So, to handle multiple references, we can duplicate the statement just with different references.
for (Reference r : statement.getReferences()) {
writer.write(qid + mainSnakQS);
for (SnakGroup q : claim.getQualifiers()) {
translateSnakGroup(q, false, writer);
}
for (SnakGroup g : r.getSnakGroups()) {
translateSnakGroup(g, true, writer);
}
writer.write("\n");
}
}
}
}
Aggregations