use of com.google.common.collect.Collections2 in project copybara by google.
the class GitHubPrOrigin method checkRequiredStatusContextNames.
/**
* Check that the PR has a state of "success" for each status whose context is in the list
* provided in the `required_status_context_names` param
*/
private void checkRequiredStatusContextNames(GitHubApi api, String project, PullRequest prData) throws ValidationException, RepoException {
Set<String> requiredStatusContextNames = getRequiredStatusContextNames();
if (forceImport() || requiredStatusContextNames.isEmpty()) {
return;
}
try (ProfilerTask ignore = generalOptions.profiler().start("github_api_get_combined_status")) {
CombinedStatus combinedStatus = api.getCombinedStatus(project, prData.getHead().getSha());
Set<String> requiredButNotPresent = Sets.newHashSet(requiredStatusContextNames);
List<Status> successStatuses = combinedStatus.getStatuses().stream().filter(e -> e.getState() == State.SUCCESS).collect(Collectors.toList());
requiredButNotPresent.removeAll(Collections2.transform(successStatuses, Status::getContext));
if (!requiredButNotPresent.isEmpty()) {
throw new EmptyChangeException(String.format("Cannot migrate http://github.com/%s/pull/%d because the following ci labels " + "have not been passed: %s", project, prData.getNumber(), requiredButNotPresent));
}
}
}
use of com.google.common.collect.Collections2 in project controller by opendaylight.
the class JMXGeneratorTest method generateMBEsTest.
@Test
public void generateMBEsTest() throws Exception {
// default value for module factory file is true
map.put(JMXGenerator.MODULE_FACTORY_FILE_BOOLEAN, "randomValue");
jmxGenerator.setAdditionalConfig(map);
Collection<File> files = jmxGenerator.generateSources(context, outputBaseDir, Collections.singleton(threadsJavaModule), m -> Optional.empty());
assertEquals(expectedModuleFileNames, toFileNames(files));
for (File file : files) {
final String name = file.getName();
if (!name.endsWith("java")) {
continue;
}
MbeASTVisitor visitor = new MbeASTVisitor(EXPECTED_PACKAGE_PREFIX + ".threads.java", name);
verifyFile(file, visitor);
switch(name) {
case "AbstractDynamicThreadPoolModule.java":
assertAbstractDynamicThreadPoolModule(visitor);
break;
case "AsyncEventBusModuleMXBean.java":
assertEquals("Incorrenct number of generated methods", 4, visitor.methods.size());
break;
case "AbstractNamingThreadFactoryModuleFactory.java":
assertAbstractNamingThreadFactoryModuleFactory(visitor);
break;
case "AsyncEventBusModule.java":
assertContains(visitor.extnds, EXPECTED_PACKAGE_PREFIX + ".threads.java.AbstractAsyncEventBusModule");
visitor.assertFields(0);
assertEquals("Incorrenct number of generated methods", 2, visitor.methods.size());
visitor.assertConstructors(2);
visitor.assertMethodDescriptions(0);
visitor.assertMethodJavadocs(0);
break;
case "EventBusModuleFactory.java":
assertContains(visitor.extnds, EXPECTED_PACKAGE_PREFIX + ".threads.java.AbstractEventBusModuleFactory");
visitor.assertFields(0);
assertEquals("Incorrenct number of generated methods", 0, visitor.methods.size());
visitor.assertConstructors(0);
visitor.assertMethodDescriptions(0);
visitor.assertMethodJavadocs(0);
break;
}
}
verifyXmlFiles(Collections2.filter(files, input -> input.getName().endsWith("xml")));
// verify ModuleFactory file
File moduleFactoryFile = JMXGenerator.concatFolders(generatedResourcesDir, "META-INF", "services", ModuleFactory.class.getName());
assertTrue(moduleFactoryFile.exists());
Set<String> lines = ImmutableSet.copyOf(Files.readLines(moduleFactoryFile, StandardCharsets.UTF_8));
Set<String> expectedLines = ImmutableSet.of(EXPECTED_PACKAGE_PREFIX + ".threads.java.EventBusModuleFactory", EXPECTED_PACKAGE_PREFIX + ".threads.java.AsyncEventBusModuleFactory", EXPECTED_PACKAGE_PREFIX + ".threads.java.DynamicThreadPoolModuleFactory", EXPECTED_PACKAGE_PREFIX + ".threads.java.NamingThreadFactoryModuleFactory", EXPECTED_PACKAGE_PREFIX + ".threads.java.ThreadPoolRegistryImplModuleFactory");
assertEquals(expectedLines, lines);
}
use of com.google.common.collect.Collections2 in project atlasdb by palantir.
the class SweepStatsKeyValueService method flushWrites.
private void flushWrites(Multiset<TableReference> writes, Set<TableReference> clears) {
if (writes.isEmpty() && clears.isEmpty()) {
log.info("No writes to flush");
return;
}
log.info("Flushing stats for {} writes and {} clears", SafeArg.of("writes", writes.size()), SafeArg.of("clears", clears.size()));
log.trace("Flushing writes: {}", UnsafeArg.of("writes", writes));
log.trace("Flushing clears: {}", UnsafeArg.of("clears", clears));
try {
Set<TableReference> tableNames = Sets.difference(writes.elementSet(), clears);
Collection<byte[]> rows = Collections2.transform(Collections2.transform(tableNames, t -> t.getQualifiedName()), Functions.compose(Persistables.persistToBytesFunction(), SweepPriorityRow.fromFullTableNameFun()));
Map<Cell, Value> oldWriteCounts = delegate().getRows(SWEEP_PRIORITY_TABLE, rows, SweepPriorityTable.getColumnSelection(SweepPriorityNamedColumn.WRITE_COUNT), Long.MAX_VALUE);
Map<Cell, byte[]> newWriteCounts = Maps.newHashMapWithExpectedSize(writes.elementSet().size());
byte[] col = SweepPriorityNamedColumn.WRITE_COUNT.getShortName();
for (TableReference tableRef : tableNames) {
Preconditions.checkState(!tableRef.getQualifiedName().startsWith(AtlasDbConstants.NAMESPACE_PREFIX), "The sweep stats kvs should wrap the namespace mapping kvs, not the other way around.");
byte[] row = SweepPriorityRow.of(tableRef.getQualifiedName()).persistToBytes();
Cell cell = Cell.create(row, col);
Value oldValue = oldWriteCounts.get(cell);
long oldCount = oldValue == null || oldValue.getContents().length == 0 ? 0 : SweepPriorityTable.WriteCount.BYTES_HYDRATOR.hydrateFromBytes(oldValue.getContents()).getValue();
long newValue = clears.contains(tableRef) ? writes.count(tableRef) : oldCount + writes.count(tableRef);
log.debug("Sweep priority for {} has {} writes (was {})", tableRef, newValue, oldCount);
newWriteCounts.put(cell, SweepPriorityTable.WriteCount.of(newValue).persistValue());
}
long timestamp = timestampService.getFreshTimestamp();
// Committing before writing is intentional, we want the start timestamp to
// show up in the transaction table before we write do our writes.
commit(timestamp);
delegate().put(SWEEP_PRIORITY_TABLE, newWriteCounts, timestamp);
} catch (RuntimeException e) {
if (Thread.interrupted()) {
return;
}
Set<TableReference> allTableNames = delegate().getAllTableNames();
if (!allTableNames.contains(SWEEP_PRIORITY_TABLE) || !allTableNames.contains(TransactionConstants.TRANSACTION_TABLE)) {
// ignore problems when sweep or transaction tables don't exist
log.warn("Ignoring failed sweep stats flush due to ", e);
}
log.error("Unable to flush sweep stats for writes {} and clears {}: ", writes, clears, e);
throw e;
}
}
Aggregations