use of org.apache.hadoop.hbase.io.ImmutableBytesWritable in project akela by mozilla-metrics.
the class HBaseMultiScanLoader method getNext.
/* (non-Javadoc)
* @see org.apache.pig.LoadFunc#getNext()
*/
@Override
public Tuple getNext() throws IOException {
try {
if (reader.nextKeyValue()) {
ImmutableBytesWritable rowKey = reader.getCurrentKey();
Result result = reader.getCurrentValue();
Tuple tuple = TupleFactory.getInstance().newTuple(columns.size() + 1);
tuple.set(0, new DataByteArray(rowKey.get()));
int i = 1;
for (Pair<String, String> pair : columns) {
byte[] v = result.getValue(pair.getFirst().getBytes(), pair.getSecond().getBytes());
if (v != null) {
tuple.set(i, new DataByteArray(v));
}
i++;
}
return tuple;
}
} catch (InterruptedException e) {
throw new IOException(e);
}
return null;
}
use of org.apache.hadoop.hbase.io.ImmutableBytesWritable in project honeycomb by altamiracorp.
the class BulkLoadMapper method map.
@Override
public void map(LongWritable offset, Text line, Context context) {
try {
Row row = rowParser.parseRow(line.toString());
List<Put> puts = mutationFactory.insert(tableId, row);
for (Put put : puts) {
context.write(new ImmutableBytesWritable(put.getRow()), put);
}
context.getCounter(Counters.ROWS).increment(1);
context.getCounter(Counters.PUTS).increment(puts.size());
} catch (IOException e) {
LOG.error("CSVParser unable to parse line: " + line.toString(), e);
context.getCounter(Counters.FAILED_ROWS).increment(1);
} catch (IllegalArgumentException e) {
LOG.error(format("The line %s was incorrectly formatted. Error %s", line.toString(), e.getMessage()));
context.getCounter(Counters.FAILED_ROWS).increment(1);
} catch (ParseException e) {
LOG.error(format("Parsing failed on line %s with message %s", line.toString(), e.getMessage()));
context.getCounter(Counters.FAILED_ROWS).increment(1);
} catch (Exception e) {
LOG.error(format("The following error %s occurred during mapping" + " for line %s", e.getMessage(), line.toString()));
context.getCounter(Counters.FAILED_ROWS).increment(1);
}
}
use of org.apache.hadoop.hbase.io.ImmutableBytesWritable in project tdi-studio-se by Talend.
the class HBaseStore method run.
public static void run(String zookeeperHost, String zookeeperPort, String table, final String columns, Map<String, String> properties, TalendRDD<List<Object>> rdd, final List<Integer> keyList) throws IOException {
Configuration conf = HBaseConfiguration.create();
conf.set("hbase.zookeeper.quorum", zookeeperHost);
conf.set("hbase.zookeeper.property.clientPort", zookeeperPort);
conf.set("hbase.mapred.tablecolumns", columns);
for (Entry<String, String> e : properties.entrySet()) {
conf.set(e.getKey(), e.getValue());
}
TalendPairRDD<ImmutableBytesWritable, Put> hbaseRdd = rdd.mapToPair(new PairFunction<List<Object>, ImmutableBytesWritable, Put>() {
private static final long serialVersionUID = 1L;
public Tuple2<ImmutableBytesWritable, Put> call(List<Object> t) throws Exception {
String key = "";
for (int i : keyList) {
key = key + t.get(i);
}
org.apache.hadoop.hbase.client.Put put = new org.apache.hadoop.hbase.client.Put(DigestUtils.md5("".equals(key) ? t.toString() : key));
String[] cols = columns.split(" ");
int i = 0;
for (Object o : t) {
if (cols.length > i) {
put.add(org.apache.hadoop.hbase.util.Bytes.toBytes(cols[i].split(":")[0]), org.apache.hadoop.hbase.util.Bytes.toBytes(cols[i].split(":")[1]), (o != null ? org.apache.hadoop.hbase.util.Bytes.toBytes(o.toString()) : null));
}
i++;
}
return new Tuple2<ImmutableBytesWritable, Put>(new ImmutableBytesWritable(), put);
}
});
JobConf config = new JobConf(conf);
config.set(TableOutputFormat.OUTPUT_TABLE, table);
config.setOutputFormat(TableOutputFormat.class);
hbaseRdd.saveAsHadoopDataset(config);
}
use of org.apache.hadoop.hbase.io.ImmutableBytesWritable in project phoenix by apache.
the class PhoenixRuntimeIT method assertTenantIds.
private static void assertTenantIds(Expression e, HTableInterface htable, Filter filter, String[] tenantIds) throws IOException {
ImmutableBytesWritable ptr = new ImmutableBytesWritable();
Scan scan = new Scan();
scan.setFilter(filter);
ResultScanner scanner = htable.getScanner(scan);
Result result = null;
ResultTuple tuple;
Set<String> actualTenantIds = Sets.newHashSetWithExpectedSize(tenantIds.length);
Set<String> expectedTenantIds = new HashSet<>(Arrays.asList(tenantIds));
while ((result = scanner.next()) != null) {
tuple = new ResultTuple(result);
e.evaluate(tuple, ptr);
String tenantId = (String) PVarchar.INSTANCE.toObject(ptr);
actualTenantIds.add(tenantId == null ? "" : tenantId);
}
assertTrue(actualTenantIds.containsAll(expectedTenantIds));
}
use of org.apache.hadoop.hbase.io.ImmutableBytesWritable in project phoenix by apache.
the class QueryIT method testTimestamp.
@Test
public void testTimestamp() throws Exception {
String updateStmt = "upsert into " + tableName + " (" + " ORGANIZATION_ID, " + " ENTITY_ID, " + " A_TIMESTAMP) " + "VALUES (?, ?, ?)";
// Override value that was set at creation time
String url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" + (ts + 10);
Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
Connection upsertConn = DriverManager.getConnection(url, props);
// Test auto commit
upsertConn.setAutoCommit(true);
PreparedStatement stmt = upsertConn.prepareStatement(updateStmt);
stmt.setString(1, tenantId);
stmt.setString(2, ROW4);
Timestamp tsValue1 = new Timestamp(5000);
byte[] ts1 = PTimestamp.INSTANCE.toBytes(tsValue1);
stmt.setTimestamp(3, tsValue1);
stmt.execute();
url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" + (ts + 15);
Connection conn1 = DriverManager.getConnection(url, props);
analyzeTable(conn1, tableName);
conn1.close();
updateStmt = "upsert into " + tableName + " (" + " ORGANIZATION_ID, " + " ENTITY_ID, " + " A_TIMESTAMP," + " A_TIME) " + "VALUES (?, ?, ?, ?)";
stmt = upsertConn.prepareStatement(updateStmt);
stmt.setString(1, tenantId);
stmt.setString(2, ROW5);
Timestamp tsValue2 = new Timestamp(5000);
tsValue2.setNanos(200);
byte[] ts2 = PTimestamp.INSTANCE.toBytes(tsValue2);
stmt.setTimestamp(3, tsValue2);
stmt.setTime(4, new Time(tsValue2.getTime()));
stmt.execute();
upsertConn.close();
assertTrue(compare(CompareOp.GREATER, new ImmutableBytesWritable(ts2), new ImmutableBytesWritable(ts1)));
assertFalse(compare(CompareOp.GREATER, new ImmutableBytesWritable(ts1), new ImmutableBytesWritable(ts1)));
String query = "SELECT entity_id, a_timestamp, a_time FROM " + tableName + " WHERE organization_id=? and a_timestamp > ?";
// Execute at timestamp 2
props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 30));
Connection conn = DriverManager.getConnection(getUrl(), props);
try {
PreparedStatement statement = conn.prepareStatement(query);
statement.setString(1, tenantId);
statement.setTimestamp(2, new Timestamp(5000));
ResultSet rs = statement.executeQuery();
assertTrue(rs.next());
assertEquals(rs.getString(1), ROW5);
assertEquals(rs.getTimestamp("A_TIMESTAMP"), tsValue2);
assertEquals(rs.getTime("A_TIME"), new Time(tsValue2.getTime()));
assertFalse(rs.next());
} finally {
conn.close();
}
}
Aggregations