use of com.google.common.base.Splitter in project jackrabbit-oak by apache.
the class SimpleExcerptProvider method getExcerpt.
static PropertyValue getExcerpt(PropertyValue value) {
Splitter listSplitter = Splitter.on(',').trimResults().omitEmptyStrings();
StringBuilder excerpt = new StringBuilder(EXCERPT_BEGIN);
for (String v : listSplitter.splitToList(value.toString())) {
excerpt.append(v);
}
excerpt.append(EXCERPT_END);
return PropertyValues.newString(excerpt.toString());
}
use of com.google.common.base.Splitter in project cdap by caskdata.
the class SparkPackageUtils method prepareSparkResources.
/**
* Prepares the resources that need to be localized to the Spark client container.
*
* @param sparkCompat the spark version to prepare for
* @param locationFactory the location factory for uploading files
* @param tempDir a temporary directory for file creation
* @param localizeResources A map from localized name to {@link LocalizeResource} for this method to update
* @param env the environment map to update
* @throws IOException if failed to prepare the spark resources
*/
public static void prepareSparkResources(SparkCompat sparkCompat, LocationFactory locationFactory, File tempDir, Map<String, LocalizeResource> localizeResources, Map<String, String> env) throws IOException {
Properties sparkConf = getSparkDefaultConf();
// Localize the spark framework
SparkFramework framework = prepareSparkFramework(sparkCompat, locationFactory, tempDir);
framework.addLocalizeResource(localizeResources);
framework.updateSparkConf(sparkConf);
framework.updateSparkEnv(env);
// Localize PySpark.
List<String> pySparkArchives = new ArrayList<>();
for (File archive : getLocalPySparkArchives(sparkCompat)) {
localizeResources.put(archive.getName(), new LocalizeResource(archive));
pySparkArchives.add(archive.getName());
}
// Set the PYSPARK_ARCHIVES_PATH environment variable in the YARN container.
env.put(PYSPARK_ARCHIVES_PATH, Joiner.on(",").join(pySparkArchives));
// Localize the spark-defaults.conf file
File sparkDefaultConfFile = saveSparkDefaultConf(sparkConf, File.createTempFile(SPARK_DEFAULTS_CONF, null, tempDir));
localizeResources.put(SPARK_DEFAULTS_CONF, new LocalizeResource(sparkDefaultConfFile));
// Shallow copy all files under directory defined by $HADOOP_CONF_DIR and the explore conf directory
// If $HADOOP_CONF_DIR is not defined, use the location of "yarn-site.xml" to determine the directory
// This is part of workaround for CDAP-5019 (SPARK-13441) and CDAP-12330
List<File> configDirs = new ArrayList<>();
if (System.getenv().containsKey(ApplicationConstants.Environment.HADOOP_CONF_DIR.key())) {
configDirs.add(new File(System.getenv(ApplicationConstants.Environment.HADOOP_CONF_DIR.key())));
} else {
URL yarnSiteLocation = SparkPackageUtils.class.getClassLoader().getResource("yarn-site.xml");
if (yarnSiteLocation == null || !"file".equals(yarnSiteLocation.getProtocol())) {
LOG.warn("Failed to derive HADOOP_CONF_DIR from yarn-site.xml location: {}", yarnSiteLocation);
} else {
configDirs.add(new File(yarnSiteLocation.getPath()).getParentFile());
}
}
// Include the explore config dirs as well
Splitter splitter = Splitter.on(File.pathSeparatorChar).omitEmptyStrings();
for (String dir : splitter.split(System.getProperty(EXPLORE_CONF_DIRS, ""))) {
configDirs.add(new File(dir));
}
if (!configDirs.isEmpty()) {
File targetFile = File.createTempFile(LOCALIZED_CONF_DIR, ".zip", tempDir);
Set<String> entries = new HashSet<>();
try (ZipOutputStream output = new ZipOutputStream(new BufferedOutputStream(new FileOutputStream(targetFile)))) {
for (File configDir : configDirs) {
try {
LOG.debug("Adding files from {} to {}.zip", configDir, LOCALIZED_CONF_DIR);
addConfigFiles(configDir, entries, output);
} catch (IOException e) {
LOG.warn("Failed to create archive from {}", configDir, e);
}
}
}
localizeResources.put(LOCALIZED_CONF_DIR, new LocalizeResource(targetFile, true));
env.put("YARN_CONF_DIR", "$PWD/" + LOCALIZED_CONF_DIR);
}
}
use of com.google.common.base.Splitter in project ANNIS by korpling.
the class AnnisBaseRunner method runInteractive.
protected void runInteractive() throws IOException {
System.out.println(helloMessage + " " + VersionInfo.getReleaseName());
System.out.println();
System.out.println("Use \"help\" for a list of all commands.");
System.out.println();
ConsoleReader console = new ConsoleReader();
File annisDir = new File(System.getProperty("user.home") + "/.annis/");
String annisDirPath = annisDir.getAbsolutePath();
if (!annisDir.exists()) {
log.info("Creating directory: " + annisDirPath);
if (!annisDir.mkdirs()) {
log.warn("Could not create directory: " + annisDirPath);
}
} else if (!annisDir.isDirectory()) {
log.warn("Could not create directory because a file with the same name already exists: " + annisDirPath);
}
history = new FileHistory(new File(System.getProperty("user.home") + "/.annis/shellhistory.txt"));
console.setHistory(history);
console.setHistoryEnabled(true);
console.setBellEnabled(true);
console.setExpandEvents(false);
List<String> commands = detectAvailableCommands();
Collections.sort(commands);
console.addCompleter(new StringsCompleter(commands));
Splitter argSplitter = Splitter.on(' ').limit(2);
String line;
StringBuilder input = new StringBuilder();
prompt = "no corpus>";
console.setPrompt(prompt + " ");
while ((line = console.readLine()) != null) {
if (line.endsWith("\\")) {
// multi-line input
input.append(line.substring(0, line.length() - 1)).append("\n");
// notifiy user by changing the prompt
console.setPrompt("> ");
} else {
// input finished, run command
input.append(line);
ArrayList<String> splitted = Lists.newArrayList(argSplitter.split(input.toString()));
String command = splitted.get(0);
String args = "";
if ("help".equalsIgnoreCase(command)) {
System.out.println("Available commands:");
System.out.println(StringUtils.join(commands, "\n"));
} else {
if (splitted.size() > 1) {
args = splitted.get(1);
}
}
try {
if (!command.isEmpty()) {
runCommand(command, args);
}
} catch (UsageException e) {
error(e);
}
// reset the current prompt
console.setPrompt(prompt + " ");
// empty input
input = new StringBuilder();
}
}
// end while
}
use of com.google.common.base.Splitter in project ANNIS by korpling.
the class AnnotateSqlGenerator method getDocumentQuery.
public String getDocumentQuery(long toplevelCorpusID, String documentName, List<String> nodeAnnotationFilter) {
TableAccessStrategy tas = createTableAccessStrategy();
List<String> fields = getSelectFields();
boolean filter = false;
Set<String> qualifiedNodeAnnos = new LinkedHashSet<>();
Set<String> unqualifiedNodeAnnos = new LinkedHashSet<>();
if (nodeAnnotationFilter != null) {
Splitter namespaceSplitter = Splitter.on("::").trimResults().limit(2);
filter = true;
for (String anno : nodeAnnotationFilter) {
List<String> splitted = namespaceSplitter.splitToList(anno);
if (splitted.size() > 1) {
qualifiedNodeAnnos.add(AnnotationConditionProvider.regexEscaper.escape(splitted.get(0)) + ":" + AnnotationConditionProvider.regexEscaper.escape(splitted.get(1)));
} else {
unqualifiedNodeAnnos.add(AnnotationConditionProvider.regexEscaper.escape(splitted.get(0)));
}
}
}
StringBuilder template = new StringBuilder();
template.append("SELECT DISTINCT \n" + "\tARRAY[-1::bigint] AS key, ARRAY[''::varchar] AS key_names, 0 as matchstart, " + StringUtils.join(fields, ", ") + ", " + "c.path_name as path, c.path_name[1] as document_name\n" + "FROM\n" + "\tfacts_:top AS facts,\n" + "\tcorpus as c, corpus as toplevel\n" + "WHERE\n" + "\ttoplevel.id = :top AND c.name = :document_name AND " + tas.aliasedColumn(NODE_TABLE, "corpus_ref") + " = c.id\n" + "\tAND toplevel.top_level IS TRUE\n" + "\tAND c.pre >= toplevel.pre AND c.post <= toplevel.post\n");
if (filter) {
template.append("\tAND (is_token IS TRUE");
if (!qualifiedNodeAnnos.isEmpty()) {
String orExpr = Joiner.on(")|(").join(qualifiedNodeAnnos);
template.append(" OR node_qannotext ~ '(^((").append(orExpr).append(")):(.*)$)' ");
}
if (!unqualifiedNodeAnnos.isEmpty()) {
String orExpr = Joiner.on(")|(").join(unqualifiedNodeAnnos);
template.append(" OR node_annotext ~ '(^((").append(orExpr).append(")):(.*)$)' ");
}
template.append(")\n");
}
template.append("ORDER BY ").append(tas.aliasedColumn(COMPONENT_TABLE, "name")).append(", ").append(tas.aliasedColumn(COMPONENT_TABLE, "id")).append(", ").append(tas.aliasedColumn(RANK_TABLE, "pre"));
String sql = template.toString().replace(":top", "" + toplevelCorpusID).replace(":document_name", sqlString(documentName));
return sql;
}
use of com.google.common.base.Splitter in project google-cloud-intellij by GoogleCloudPlatform.
the class GoogleLoginPrefs method addUser.
private static void addUser(String user) {
Preferences prefs = getPrefs();
String allUsersString = prefs.get(USERS, null);
if (allUsersString == null) {
prefs.put(USERS, user);
return;
}
List<String> allUsers = new ArrayList<String>();
Splitter splitter = Splitter.on(DELIMITER).omitEmptyStrings();
for (String scope : splitter.split(allUsersString)) {
allUsers.add(scope);
}
if (allUsers.contains(user)) {
return;
}
Joiner joiner = Joiner.on(DELIMITER);
prefs.put(USERS, joiner.join(allUsersString, user));
flushPrefs(prefs);
}
Aggregations