use of org.apache.commons.lang3.StringUtils in project samza by apache.
the class JobModelHelper method getProcessorLocality.
/**
* Retrieves and returns the processor locality of a samza job using provided {@see Config} and {@see LocalityManager}.
* @param config provides the configurations defined by the user. Required to connect to the storage layer.
* @param localityManager provides the processor to host mapping persisted to the metadata store.
* @return the processor locality.
*/
private static Map<String, LocationId> getProcessorLocality(Config config, LocalityManager localityManager) {
Map<String, LocationId> containerToLocationId = new HashMap<>();
Map<String, ProcessorLocality> existingContainerLocality = localityManager.readLocality().getProcessorLocalities();
for (int i = 0; i < new JobConfig(config).getContainerCount(); i++) {
String containerId = Integer.toString(i);
LocationId locationId = Optional.ofNullable(existingContainerLocality.get(containerId)).map(ProcessorLocality::host).filter(StringUtils::isNotEmpty).map(LocationId::new).orElse(new LocationId("ANY_HOST"));
containerToLocationId.put(containerId, locationId);
}
return containerToLocationId;
}
use of org.apache.commons.lang3.StringUtils in project neo4j by neo4j.
the class ServiceAnnotationProcessor method loadIfExists.
private SortedSet<String> loadIfExists(String path) {
final SortedSet<String> result = new TreeSet<>();
try {
final FileObject file = processingEnv.getFiler().getResource(CLASS_OUTPUT, "", path);
final List<String> lines = new ArrayList<>();
try (BufferedReader in = new BufferedReader(new InputStreamReader(file.openInputStream(), StandardCharsets.UTF_8))) {
String line;
while ((line = in.readLine()) != null) {
lines.add(line);
}
}
lines.stream().map(s -> substringBefore(s, "#")).map(String::trim).filter(StringUtils::isNotEmpty).forEach(result::add);
info("Loaded existing providers: " + result);
} catch (IOException ignore) {
info("No existing providers loaded");
}
return result;
}
use of org.apache.commons.lang3.StringUtils in project flink by apache.
the class EmulatedFullTopologyTest method testFullTopology.
// ======================================================================================================
// IMPORTANT: This test makes use of things that happen in the emulated PubSub that
// are GUARANTEED to be different in the real Google hosted PubSub.
// So running these tests against the real thing will have a very high probability of
// failing.
// The assumptions:
// 1) The ordering of the messages is maintained.
// We are inserting a STOP_MARKER _after_ the set of test measurements and we assume this
// STOP event will
// arrive after the actual test data so we can stop the processing. In the real PubSub this
// is NOT true.
// 2) Exactly once: We assume that every message we put in comes out exactly once.
// In the real PubSub there are a lot of situations (mostly failure/retry) where this is not
// true.
@Test
public void testFullTopology() throws Exception {
// ===============================================================================
// Step 0: The test data
List<String> input = new ArrayList<>(Arrays.asList("One", "Two", "Three", "Four", "Five", "Six", "Seven", "Eight", "Nine", "Ten"));
List<String> messagesToSend = new ArrayList<>(input);
// Now add some stream termination messages.
// NOTE: Messages are pulled from PubSub in batches by the source.
// So we need enough STOP_MARKERs to ensure ALL parallel tasks get at least one
// STOP_MARKER
// If not then at least one task will not terminate and the test will not end.
// We pull 3 at a time, have 4 parallel: We need at least 12 STOP_MARKERS
IntStream.rangeClosed(1, 20).forEach(i -> messagesToSend.add(STOP_MARKER));
// IMPORTANT NOTE: This way of testing uses an effect of the PubSub emulator that is
// absolutely
// guaranteed NOT to work in the real PubSub: The ordering of the messages is maintained in
// the topic.
// So here we can assume that if we add a stop message LAST we can terminate the test stream
// when we see it.
// ===============================================================================
// Step 1: We put test data into the topic
// Publish the test messages into the input topic
Publisher publisher = pubsubHelper.createPublisher(PROJECT_NAME, INPUT_TOPIC_NAME);
for (String s : messagesToSend) {
publisher.publish(PubsubMessage.newBuilder().setData(ByteString.copyFromUtf8(s)).build()).get();
}
publisher.shutdown();
// ===============================================================================
// Step 2: Now we run our topology
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.enableCheckpointing(100);
env.setParallelism(4);
env.setRestartStrategy(RestartStrategies.noRestart());
// Silly topology
env.addSource(// a self termination feature.
PubSubSource.newBuilder().withDeserializationSchema(new SimpleStringSchemaWithStopMarkerDetection()).withProjectName(PROJECT_NAME).withSubscriptionName(INPUT_SUBSCRIPTION_NAME).withCredentials(EmulatorCredentials.getInstance()).withPubSubSubscriberFactory(new PubSubSubscriberFactoryForEmulator(getPubSubHostPort(), PROJECT_NAME, INPUT_SUBSCRIPTION_NAME, 1, Duration.ofSeconds(1), 3)).build()).map((MapFunction<String, String>) StringUtils::reverse).addSink(PubSubSink.newBuilder().withSerializationSchema(new SimpleStringSchema()).withProjectName(PROJECT_NAME).withTopicName(OUTPUT_TOPIC_NAME).withCredentials(EmulatorCredentials.getInstance()).withHostAndPortForEmulator(getPubSubHostPort()).build());
env.execute("Running unit test");
// ===============================================================================
// Now we should have all the resulting data in the output topic.
// Step 3: Get the result from the output topic and verify if everything is there
List<ReceivedMessage> receivedMessages = pubsubHelper.pullMessages(PROJECT_NAME, OUTPUT_SUBSCRIPTION_NAME, 100);
assertEquals("Wrong number of elements", input.size(), receivedMessages.size());
// Check output strings
List<String> output = new ArrayList<>();
// Extract the actual Strings from the ReceivedMessages
receivedMessages.forEach(msg -> output.add(msg.getMessage().getData().toStringUtf8()));
for (String test : input) {
String reversedTest = org.apache.commons.lang3.StringUtils.reverse(test);
LOG.info("Checking if \"{}\" --> \"{}\" exists", test, reversedTest);
assertTrue("Missing " + test, output.contains(reversedTest));
}
// ===============================================================================
}
use of org.apache.commons.lang3.StringUtils in project dhis2-core by dhis2.
the class EnrollmentTimeFieldSqlRenderer method getSqlConditionForNonDefaultBoundaries.
@Override
protected String getSqlConditionForNonDefaultBoundaries(EventQueryParams params) {
String sql = params.getProgramIndicator().getAnalyticsPeriodBoundaries().stream().filter(boundary -> boundary.isCohortDateBoundary() && !boundary.isEnrollmentHavingEventDateCohortBoundary()).map(boundary -> statementBuilder.getBoundaryCondition(boundary, params.getProgramIndicator(), params.getTimeFieldAsField(), params.getEarliestStartDate(), params.getLatestEndDate())).collect(Collectors.joining(" and "));
String sqlEventCohortBoundary = params.getProgramIndicator().hasEventDateCohortBoundary() ? getProgramIndicatorEventInProgramStageSql(params.getProgramIndicator(), params.getEarliestStartDate(), params.getLatestEndDate()) : "";
return Stream.of(sql, sqlEventCohortBoundary).filter(StringUtils::isNotBlank).collect(Collectors.joining(" and "));
}
use of org.apache.commons.lang3.StringUtils in project hub-detect by blackducksoftware.
the class GradleInspectorExtractor method extract.
public Extraction extract(final File directory, final String gradleExe, final String gradleInspector, final File outputDirectory) {
try {
String gradleCommand = detectConfiguration.getProperty(DetectProperty.DETECT_GRADLE_BUILD_COMMAND, PropertyAuthority.None);
final List<String> arguments = new ArrayList<>();
if (StringUtils.isNotBlank(gradleCommand)) {
gradleCommand = gradleCommand.replaceAll("dependencies", "").trim();
Arrays.stream(gradleCommand.split(" ")).filter(StringUtils::isNotBlank).forEach(arguments::add);
}
arguments.add("dependencies");
arguments.add(String.format("--init-script=%s", gradleInspector));
arguments.add(String.format("-DGRADLEEXTRACTIONDIR=%s", outputDirectory.getCanonicalPath()));
arguments.add("--info");
final Executable executable = new Executable(directory, gradleExe, arguments);
final ExecutableOutput output = executableRunner.execute(executable);
if (output.getReturnCode() == 0) {
final File rootProjectMetadataFile = detectFileFinder.findFile(outputDirectory, "rootProjectMetadata.txt");
final List<File> codeLocationFiles = detectFileFinder.findFiles(outputDirectory, "*_dependencyGraph.txt");
final List<DetectCodeLocation> codeLocations = new ArrayList<>();
String projectName = null;
String projectVersion = null;
if (codeLocationFiles != null) {
codeLocationFiles.stream().map(codeLocationFile -> gradleReportParser.parseDependencies(codeLocationFile)).filter(Optional::isPresent).map(Optional::get).forEach(codeLocations::add);
if (rootProjectMetadataFile != null) {
final Optional<NameVersion> projectNameVersion = gradleReportParser.parseRootProjectNameVersion(rootProjectMetadataFile);
if (projectNameVersion.isPresent()) {
projectName = projectNameVersion.get().getName();
projectVersion = projectNameVersion.get().getVersion();
}
} else {
logger.warn("Gradle inspector did not create a meta data report so no project version information was found.");
}
}
return new Extraction.Builder().success(codeLocations).projectName(projectName).projectVersion(projectVersion).build();
} else {
return new Extraction.Builder().failure("The gradle inspector returned a non-zero exit code: " + output.getReturnCode()).build();
}
} catch (final Exception e) {
return new Extraction.Builder().exception(e).build();
}
}
Aggregations