use of java.lang.Iterable in project atlasdb by palantir.
the class SchemaApiTestV2Table method getColumn2.
/**
* Returns a mapping from the specified row keys to their value at column Column2.
* As the Column2 values are all loaded in memory, do not use for large amounts of data.
* If the column does not exist for a key, the entry will be omitted from the map.
*/
public Map<String, StringValue> getColumn2(Iterable<String> rowKeys) {
ColumnSelection colSelection = ColumnSelection.create(ImmutableList.of(PtBytes.toCachedBytes("d")));
List<SchemaApiTestTable.SchemaApiTestRow> rows = Lists.newArrayList(rowKeys).stream().map(SchemaApiTestTable.SchemaApiTestRow::of).collect(Collectors.toList());
SortedMap<byte[], RowResult<byte[]>> results = t.getRows(tableRef, Persistables.persistAll(rows), colSelection);
return results.values().stream().map(entry -> SchemaApiTestTable.SchemaApiTestRowResult.of(entry)).collect(Collectors.toMap(entry -> entry.getRowName().getComponent1(), SchemaApiTestTable.SchemaApiTestRowResult::getColumn2));
}
use of java.lang.Iterable in project atlasdb by palantir.
the class SchemaApiTestV2Table method getColumn1.
/**
* Returns a mapping from the specified row keys to their value at column Column1.
* As the Column1 values are all loaded in memory, do not use for large amounts of data.
* If the column does not exist for a key, the entry will be omitted from the map.
*/
public Map<String, Long> getColumn1(Iterable<String> rowKeys) {
ColumnSelection colSelection = ColumnSelection.create(ImmutableList.of(PtBytes.toCachedBytes("c")));
List<SchemaApiTestTable.SchemaApiTestRow> rows = Lists.newArrayList(rowKeys).stream().map(SchemaApiTestTable.SchemaApiTestRow::of).collect(Collectors.toList());
SortedMap<byte[], RowResult<byte[]>> results = t.getRows(tableRef, Persistables.persistAll(rows), colSelection);
return results.values().stream().map(entry -> SchemaApiTestTable.SchemaApiTestRowResult.of(entry)).collect(Collectors.toMap(entry -> entry.getRowName().getComponent1(), SchemaApiTestTable.SchemaApiTestRowResult::getColumn1));
}
use of java.lang.Iterable in project selenium_java by sergueik.
the class Field method toJson.
/**
* Converts the given value to a JSON object.
*
* @param name Field name
* @param value New field value
* @param editmeta Edit metadata JSON object
*
* @return a JSON-encoded field value
*
* @throws JiraException when a value is bad or field has invalid metadata
* @throws UnsupportedOperationException when a field type isn't supported
*/
public static Object toJson(String name, Object value, JSONObject editmeta) throws JiraException, UnsupportedOperationException {
Meta m = getFieldMetadata(name, editmeta);
if (m.type == null)
throw new JiraException("Field '" + name + "' is missing metadata type");
if (m.type.equals("array")) {
if (value == null)
value = new ArrayList();
else if (!(value instanceof Iterable))
throw new JiraException("Field '" + name + "' expects an Iterable value");
return toArray((Iterable) value, m.items, m.custom);
} else if (m.type.equals("date")) {
if (value == null)
return JSONNull.getInstance();
Date d = toDate(value);
if (d == null)
throw new JiraException("Field '" + name + "' expects a date value or format is invalid");
SimpleDateFormat df = new SimpleDateFormat(DATE_FORMAT);
return df.format(d);
} else if (m.type.equals("datetime")) {
if (value == null)
return JSONNull.getInstance();
else if (!(value instanceof Timestamp))
throw new JiraException("Field '" + name + "' expects a Timestamp value");
SimpleDateFormat df = new SimpleDateFormat(DATETIME_FORMAT);
return df.format(value);
} else if (m.type.equals("issuetype") || m.type.equals("priority") || m.type.equals("user") || m.type.equals("resolution")) {
JSONObject json = new JSONObject();
if (value == null)
return JSONNull.getInstance();
else if (value instanceof ValueTuple) {
ValueTuple tuple = (ValueTuple) value;
json.put(tuple.type, tuple.value.toString());
} else
json.put(ValueType.NAME.toString(), value.toString());
return json.toString();
} else if (m.type.equals("project") || m.type.equals("issuelink")) {
JSONObject json = new JSONObject();
if (value == null)
return JSONNull.getInstance();
else if (value instanceof ValueTuple) {
ValueTuple tuple = (ValueTuple) value;
json.put(tuple.type, tuple.value.toString());
} else
json.put(ValueType.KEY.toString(), value.toString());
return json.toString();
} else if (m.type.equals("string") || (m.type.equals("securitylevel"))) {
if (value == null)
return "";
else if (value instanceof List)
return toJsonMap((List) value);
else if (value instanceof ValueTuple) {
JSONObject json = new JSONObject();
ValueTuple tuple = (ValueTuple) value;
json.put(tuple.type, tuple.value.toString());
return json.toString();
}
return value.toString();
} else if (m.type.equals("timetracking")) {
if (value == null)
return JSONNull.getInstance();
else if (value instanceof TimeTracking)
return ((TimeTracking) value).toJsonObject();
} else if (m.type.equals("number")) {
if (!(value instanceof java.lang.Integer) && !(value instanceof java.lang.Double) && !(value instanceof java.lang.Float) && !(value instanceof java.lang.Long)) {
throw new JiraException("Field '" + name + "' expects a Numeric value");
}
return value;
}
throw new UnsupportedOperationException(m.type + " is not a supported field type");
}
use of java.lang.Iterable in project learning-spark by databricks.
the class WordCount method main.
public static void main(String[] args) throws Exception {
String master = args[0];
JavaSparkContext sc = new JavaSparkContext(master, "wordcount", System.getenv("SPARK_HOME"), System.getenv("JARS"));
JavaRDD<String> rdd = sc.textFile(args[1]);
JavaPairRDD<String, Integer> counts = rdd.flatMap(new FlatMapFunction<String, String>() {
public Iterable<String> call(String x) {
return Arrays.asList(x.split(" "));
}
}).mapToPair(new PairFunction<String, String, Integer>() {
public Tuple2<String, Integer> call(String x) {
return new Tuple2(x, 1);
}
}).reduceByKey(new Function2<Integer, Integer, Integer>() {
public Integer call(Integer x, Integer y) {
return x + y;
}
});
counts.saveAsTextFile(args[2]);
}
use of java.lang.Iterable in project learning-spark by databricks.
the class WordCount method main.
public static void main(String[] args) throws Exception {
String inputFile = args[0];
String outputFile = args[1];
// Create a Java Spark Context.
SparkConf conf = new SparkConf().setAppName("wordCount");
JavaSparkContext sc = new JavaSparkContext(conf);
// Load our input data.
JavaRDD<String> input = sc.textFile(inputFile);
// Split up into words.
JavaRDD<String> words = input.flatMap(new FlatMapFunction<String, String>() {
public Iterable<String> call(String x) {
return Arrays.asList(x.split(" "));
}
});
// Transform into word and count.
JavaPairRDD<String, Integer> counts = words.mapToPair(new PairFunction<String, String, Integer>() {
public Tuple2<String, Integer> call(String x) {
return new Tuple2(x, 1);
}
}).reduceByKey(new Function2<Integer, Integer, Integer>() {
public Integer call(Integer x, Integer y) {
return x + y;
}
});
// Save the word count back out to a text file, causing evaluation.
counts.saveAsTextFile(outputFile);
}
Aggregations