use of com.google.common.collect.Multiset in project tez by apache.
the class VertexImpl method logLocationHints.
private static void logLocationHints(String vertexName, VertexLocationHint locationHint) {
if (locationHint == null) {
LOG.debug("No Vertex LocationHint specified for vertex=" + vertexName);
return;
}
Multiset<String> hosts = HashMultiset.create();
Multiset<String> racks = HashMultiset.create();
int counter = 0;
for (TaskLocationHint taskLocationHint : locationHint.getTaskLocationHints()) {
StringBuilder sb = new StringBuilder();
if (taskLocationHint.getHosts() == null) {
sb.append("No Hosts");
} else {
sb.append("Hosts: ");
for (String host : taskLocationHint.getHosts()) {
hosts.add(host);
sb.append(host).append(", ");
}
}
if (taskLocationHint.getRacks() == null) {
sb.append("No Racks");
} else {
sb.append("Racks: ");
for (String rack : taskLocationHint.getRacks()) {
racks.add(rack);
sb.append(rack).append(", ");
}
}
LOG.debug("Vertex: " + vertexName + ", Location: " + counter + " : " + sb.toString());
counter++;
}
LOG.debug("Vertex: " + vertexName + ", Host Counts");
for (Multiset.Entry<String> host : hosts.entrySet()) {
LOG.debug("Vertex: " + vertexName + ", host: " + host.toString());
}
LOG.debug("Vertex: " + vertexName + ", Rack Counts");
for (Multiset.Entry<String> rack : racks.entrySet()) {
LOG.debug("Vertex: " + vertexName + ", rack: " + rack.toString());
}
}
use of com.google.common.collect.Multiset in project sonar-java by SonarSource.
the class CheckVerifier method validateAnalyzerMessage.
private static void validateAnalyzerMessage(Map<IssueAttribute, String> attrs, AnalyzerMessage analyzerMessage) {
Double effortToFix = analyzerMessage.getCost();
if (effortToFix != null) {
assertEquals(Integer.toString(effortToFix.intValue()), attrs, IssueAttribute.EFFORT_TO_FIX);
}
AnalyzerMessage.TextSpan textSpan = analyzerMessage.primaryLocation();
assertEquals(normalizeColumn(textSpan.startCharacter), attrs, IssueAttribute.START_COLUMN);
assertEquals(Integer.toString(textSpan.endLine), attrs, IssueAttribute.END_LINE);
assertEquals(normalizeColumn(textSpan.endCharacter), attrs, IssueAttribute.END_COLUMN);
if (attrs.containsKey(IssueAttribute.SECONDARY_LOCATIONS)) {
List<AnalyzerMessage> secondaryLocations = analyzerMessage.flows.stream().map(l -> l.get(0)).collect(Collectors.toList());
Multiset<String> actualLines = HashMultiset.create();
for (AnalyzerMessage secondaryLocation : secondaryLocations) {
actualLines.add(Integer.toString(secondaryLocation.getLine()));
}
List<String> expected = Lists.newArrayList(Splitter.on(",").omitEmptyStrings().trimResults().split(attrs.get(IssueAttribute.SECONDARY_LOCATIONS)));
List<String> unexpected = new ArrayList<>();
for (String actualLine : actualLines) {
if (expected.contains(actualLine)) {
expected.remove(actualLine);
} else {
unexpected.add(actualLine);
}
}
if (!expected.isEmpty() || !unexpected.isEmpty()) {
// Line is not covered by JaCoCo because of thrown exception but effectively covered in UT.
Fail.fail(String.format("Secondary locations: expected: %s unexpected:%s. In %s:%d", expected, unexpected, normalizedFilePath(analyzerMessage), analyzerMessage.getLine()));
}
}
}
use of com.google.common.collect.Multiset in project ANNIS by korpling.
the class SemanticValidator method checkAlternative.
public void checkAlternative(QueryData data, List<QueryNode> alternative, int alternativeIndex, boolean queryWasNormalized) {
// check if there is at least one search expression
if (alternative.isEmpty()) {
throw new AnnisQLSemanticsException("Missing search expression.");
}
// there are not linguistic binary relations allowed if there is only one node
if (alternative.size() == 1) {
QueryNode n = alternative.get(0);
for (Join j : n.getOutgoingJoins()) {
if (j.getTarget() != null) {
throw new AnnisQLSemanticsException(j.getParseLocation(), "No binary linguistic relations allowed if there is only one node in query.");
}
}
}
// get all nodes connected to the first one
Multimap<Long, QueryNode> connected = calculateConnected(alternative);
Set<Long> transitiveHull = new HashSet<>();
transitiveHull.add(alternative.get(0).getId());
createTransitiveHull(alternative.get(0), connected, transitiveHull);
Multiset<String> variableNames = TreeMultiset.create();
Set<Long> unconnectedNodes = new HashSet<>();
for (QueryNode n : alternative) {
unconnectedNodes.add(n.getId());
variableNames.add(n.getVariable());
}
unconnectedNodes.removeAll(transitiveHull);
// check if each node is contained in the connected nodes
if (!unconnectedNodes.isEmpty()) {
List<AqlParseError> errors = new LinkedList<>();
for (QueryNode n : alternative) {
if (unconnectedNodes.contains(n.getId())) {
errors.add(new AqlParseError(n, "variable \"" + n.getVariable() + "\" not bound (use linguistic operators)"));
}
}
if (!errors.isEmpty()) {
if (queryWasNormalized) {
// add the normalized query as "error" so the user is able to see it
errors.add(new AqlParseError("Normalized query is: \n" + data.toAQL()));
}
throw new AnnisQLSemanticsException("Not all variables bound", errors);
}
}
// check if any variable name was given more than once
List<String> invalidNames = new LinkedList<>();
for (Multiset.Entry<String> e : variableNames.entrySet()) {
if (e.getCount() > 1) {
invalidNames.add(e.getElement());
}
}
if (!invalidNames.isEmpty()) {
throw new AnnisQLSemanticsException("The following variable names are " + "used for more than one node: " + Joiner.on(", ").join(invalidNames) + "\nNormalized Query is: \n" + data.toAQL());
}
// check no non-reflexive operator is used with the same operands
for (QueryNode source : alternative) {
for (Join join : source.getOutgoingJoins()) {
if (join instanceof Inclusion || join instanceof SameSpan || join instanceof Overlap || join instanceof RightOverlap || join instanceof LeftOverlap || join instanceof RightAlignment || join instanceof LeftAlignment) {
if (source.equals(join.getTarget())) {
throw new AnnisQLSemanticsException(join, "Not-reflexive operator used with the same node as argument.");
}
}
}
}
}
use of com.google.common.collect.Multiset in project atlasdb by palantir.
the class SweepStatsKeyValueService method flushWrites.
private void flushWrites(Multiset<TableReference> writes, Set<TableReference> clears) {
if (writes.isEmpty() && clears.isEmpty()) {
log.info("No writes to flush");
return;
}
log.info("Flushing stats for {} writes and {} clears", SafeArg.of("writes", writes.size()), SafeArg.of("clears", clears.size()));
log.trace("Flushing writes: {}", UnsafeArg.of("writes", writes));
log.trace("Flushing clears: {}", UnsafeArg.of("clears", clears));
try {
Set<TableReference> tableNames = Sets.difference(writes.elementSet(), clears);
Collection<byte[]> rows = Collections2.transform(Collections2.transform(tableNames, t -> t.getQualifiedName()), Functions.compose(Persistables.persistToBytesFunction(), SweepPriorityRow.fromFullTableNameFun()));
Map<Cell, Value> oldWriteCounts = delegate().getRows(SWEEP_PRIORITY_TABLE, rows, SweepPriorityTable.getColumnSelection(SweepPriorityNamedColumn.WRITE_COUNT), Long.MAX_VALUE);
Map<Cell, byte[]> newWriteCounts = Maps.newHashMapWithExpectedSize(writes.elementSet().size());
byte[] col = SweepPriorityNamedColumn.WRITE_COUNT.getShortName();
for (TableReference tableRef : tableNames) {
Preconditions.checkState(!tableRef.getQualifiedName().startsWith(AtlasDbConstants.NAMESPACE_PREFIX), "The sweep stats kvs should wrap the namespace mapping kvs, not the other way around.");
byte[] row = SweepPriorityRow.of(tableRef.getQualifiedName()).persistToBytes();
Cell cell = Cell.create(row, col);
Value oldValue = oldWriteCounts.get(cell);
long oldCount = oldValue == null || oldValue.getContents().length == 0 ? 0 : SweepPriorityTable.WriteCount.BYTES_HYDRATOR.hydrateFromBytes(oldValue.getContents()).getValue();
long newValue = clears.contains(tableRef) ? writes.count(tableRef) : oldCount + writes.count(tableRef);
log.debug("Sweep priority for {} has {} writes (was {})", tableRef, newValue, oldCount);
newWriteCounts.put(cell, SweepPriorityTable.WriteCount.of(newValue).persistValue());
}
long timestamp = timestampService.getFreshTimestamp();
// Committing before writing is intentional, we want the start timestamp to
// show up in the transaction table before we write do our writes.
commit(timestamp);
delegate().put(SWEEP_PRIORITY_TABLE, newWriteCounts, timestamp);
} catch (RuntimeException e) {
if (Thread.interrupted()) {
return;
}
Set<TableReference> allTableNames = delegate().getAllTableNames();
if (!allTableNames.contains(SWEEP_PRIORITY_TABLE) || !allTableNames.contains(TransactionConstants.TRANSACTION_TABLE)) {
// ignore problems when sweep or transaction tables don't exist
log.warn("Ignoring failed sweep stats flush due to ", e);
}
log.error("Unable to flush sweep stats for writes {} and clears {}: ", writes, clears, e);
throw e;
}
}
use of com.google.common.collect.Multiset in project ForestryMC by ForestryMC.
the class MultiblockControllerForestry method onMachineAssembled.
@Override
protected void onMachineAssembled() {
super.onMachineAssembled();
if (world.isRemote) {
return;
}
// Figure out who owns the multiblock, by majority
Multiset<GameProfile> owners = HashMultiset.create();
for (IMultiblockComponent part : connectedParts) {
GameProfile owner = part.getOwner();
if (owner != null) {
owners.add(owner);
}
}
GameProfile owner = null;
int max = 0;
for (Multiset.Entry<GameProfile> entry : owners.entrySet()) {
int count = entry.getCount();
if (count > max) {
max = count;
owner = entry.getElement();
}
}
if (owner != null) {
getOwnerHandler().setOwner(owner);
}
}
Aggregations