use of org.apache.hadoop.hbase.spark.JavaHBaseContext in project hbase by apache.
the class JavaHBaseBulkPutExample method main.
public static void main(String[] args) {
if (args.length < 2) {
System.out.println("JavaHBaseBulkPutExample " + "{tableName} {columnFamily}");
return;
}
String tableName = args[0];
String columnFamily = args[1];
SparkConf sparkConf = new SparkConf().setAppName("JavaHBaseBulkPutExample " + tableName);
JavaSparkContext jsc = new JavaSparkContext(sparkConf);
try {
List<String> list = new ArrayList<>(5);
list.add("1," + columnFamily + ",a,1");
list.add("2," + columnFamily + ",a,2");
list.add("3," + columnFamily + ",a,3");
list.add("4," + columnFamily + ",a,4");
list.add("5," + columnFamily + ",a,5");
JavaRDD<String> rdd = jsc.parallelize(list);
Configuration conf = HBaseConfiguration.create();
JavaHBaseContext hbaseContext = new JavaHBaseContext(jsc, conf);
hbaseContext.bulkPut(rdd, TableName.valueOf(tableName), new PutFunction());
} finally {
jsc.stop();
}
}
use of org.apache.hadoop.hbase.spark.JavaHBaseContext in project hbase by apache.
the class JavaHBaseBulkDeleteExample method main.
public static void main(String[] args) {
if (args.length < 1) {
System.out.println("JavaHBaseBulkDeleteExample {tableName}");
return;
}
String tableName = args[0];
SparkConf sparkConf = new SparkConf().setAppName("JavaHBaseBulkDeleteExample " + tableName);
JavaSparkContext jsc = new JavaSparkContext(sparkConf);
try {
List<byte[]> list = new ArrayList<>(5);
list.add(Bytes.toBytes("1"));
list.add(Bytes.toBytes("2"));
list.add(Bytes.toBytes("3"));
list.add(Bytes.toBytes("4"));
list.add(Bytes.toBytes("5"));
JavaRDD<byte[]> rdd = jsc.parallelize(list);
Configuration conf = HBaseConfiguration.create();
JavaHBaseContext hbaseContext = new JavaHBaseContext(jsc, conf);
hbaseContext.bulkDelete(rdd, TableName.valueOf(tableName), new DeleteFunction(), 4);
} finally {
jsc.stop();
}
}
use of org.apache.hadoop.hbase.spark.JavaHBaseContext in project hbase by apache.
the class JavaHBaseBulkGetExample method main.
public static void main(String[] args) {
if (args.length < 1) {
System.out.println("JavaHBaseBulkGetExample {tableName}");
return;
}
String tableName = args[0];
SparkConf sparkConf = new SparkConf().setAppName("JavaHBaseBulkGetExample " + tableName);
JavaSparkContext jsc = new JavaSparkContext(sparkConf);
try {
List<byte[]> list = new ArrayList<>(5);
list.add(Bytes.toBytes("1"));
list.add(Bytes.toBytes("2"));
list.add(Bytes.toBytes("3"));
list.add(Bytes.toBytes("4"));
list.add(Bytes.toBytes("5"));
JavaRDD<byte[]> rdd = jsc.parallelize(list);
Configuration conf = HBaseConfiguration.create();
JavaHBaseContext hbaseContext = new JavaHBaseContext(jsc, conf);
hbaseContext.bulkGet(TableName.valueOf(tableName), 2, rdd, new GetFunction(), new ResultFunction());
} finally {
jsc.stop();
}
}
use of org.apache.hadoop.hbase.spark.JavaHBaseContext in project hbase by apache.
the class JavaHBaseDistributedScan method main.
public static void main(String[] args) {
if (args.length < 1) {
System.out.println("JavaHBaseDistributedScan {tableName}");
return;
}
String tableName = args[0];
SparkConf sparkConf = new SparkConf().setAppName("JavaHBaseDistributedScan " + tableName);
JavaSparkContext jsc = new JavaSparkContext(sparkConf);
try {
Configuration conf = HBaseConfiguration.create();
JavaHBaseContext hbaseContext = new JavaHBaseContext(jsc, conf);
Scan scan = new Scan();
scan.setCaching(100);
JavaRDD<Tuple2<ImmutableBytesWritable, Result>> javaRdd = hbaseContext.hbaseRDD(TableName.valueOf(tableName), scan);
List<String> results = javaRdd.map(new ScanConvertFunction()).collect();
System.out.println("Result Size: " + results.size());
} finally {
jsc.stop();
}
}
use of org.apache.hadoop.hbase.spark.JavaHBaseContext in project hbase by apache.
the class JavaHBaseMapGetPutExample method main.
public static void main(String[] args) {
if (args.length < 1) {
System.out.println("JavaHBaseBulkGetExample {tableName}");
return;
}
final String tableName = args[0];
SparkConf sparkConf = new SparkConf().setAppName("JavaHBaseBulkGetExample " + tableName);
JavaSparkContext jsc = new JavaSparkContext(sparkConf);
try {
List<byte[]> list = new ArrayList<>(5);
list.add(Bytes.toBytes("1"));
list.add(Bytes.toBytes("2"));
list.add(Bytes.toBytes("3"));
list.add(Bytes.toBytes("4"));
list.add(Bytes.toBytes("5"));
JavaRDD<byte[]> rdd = jsc.parallelize(list);
Configuration conf = HBaseConfiguration.create();
JavaHBaseContext hbaseContext = new JavaHBaseContext(jsc, conf);
hbaseContext.foreachPartition(rdd, new VoidFunction<Tuple2<Iterator<byte[]>, Connection>>() {
public void call(Tuple2<Iterator<byte[]>, Connection> t) throws Exception {
Table table = t._2().getTable(TableName.valueOf(tableName));
BufferedMutator mutator = t._2().getBufferedMutator(TableName.valueOf(tableName));
while (t._1().hasNext()) {
byte[] b = t._1().next();
Result r = table.get(new Get(b));
if (r.getExists()) {
mutator.mutate(new Put(b));
}
}
mutator.flush();
mutator.close();
table.close();
}
});
} finally {
jsc.stop();
}
}
Aggregations