use of org.apache.hadoop.hbase.client.Put in project hbase by apache.
the class TestMapReduceExamples method testSampleUploader.
/**
* Test SampleUploader from examples
*/
@SuppressWarnings("unchecked")
@Test
public void testSampleUploader() throws Exception {
Configuration configuration = new Configuration();
Uploader uploader = new Uploader();
Mapper<LongWritable, Text, ImmutableBytesWritable, Put>.Context<LongWritable, Text, ImmutableBytesWritable, Put> ctx = mock(Context.class);
doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
ImmutableBytesWritable writer = (ImmutableBytesWritable) invocation.getArguments()[0];
Put put = (Put) invocation.getArguments()[1];
assertEquals("row", Bytes.toString(writer.get()));
assertEquals("row", Bytes.toString(put.getRow()));
return null;
}
}).when(ctx).write(any(ImmutableBytesWritable.class), any(Put.class));
uploader.map(null, new Text("row,family,qualifier,value"), ctx);
Path dir = util.getDataTestDirOnTestFS("testSampleUploader");
String[] args = { dir.toString(), "simpleTable" };
Job job = SampleUploader.configureJob(configuration, args);
assertEquals(SequenceFileInputFormat.class, job.getInputFormatClass());
}
use of org.apache.hadoop.hbase.client.Put in project hbase by apache.
the class RSGroupInfoManagerImpl method flushConfigTable.
private synchronized Map<TableName, String> flushConfigTable(Map<String, RSGroupInfo> groupMap) throws IOException {
Map<TableName, String> newTableMap = Maps.newHashMap();
List<Mutation> mutations = Lists.newArrayList();
// populate deletes
for (String groupName : prevRSGroups) {
if (!groupMap.containsKey(groupName)) {
Delete d = new Delete(Bytes.toBytes(groupName));
mutations.add(d);
}
}
// populate puts
for (RSGroupInfo RSGroupInfo : groupMap.values()) {
RSGroupProtos.RSGroupInfo proto = RSGroupProtobufUtil.toProtoGroupInfo(RSGroupInfo);
Put p = new Put(Bytes.toBytes(RSGroupInfo.getName()));
p.addColumn(META_FAMILY_BYTES, META_QUALIFIER_BYTES, proto.toByteArray());
mutations.add(p);
for (TableName entry : RSGroupInfo.getTables()) {
newTableMap.put(entry, RSGroupInfo.getName());
}
}
if (mutations.size() > 0) {
multiMutate(mutations);
}
return newTableMap;
}
use of org.apache.hadoop.hbase.client.Put in project hbase by apache.
the class RSGroupInfoManagerImpl method multiMutate.
private void multiMutate(List<Mutation> mutations) throws IOException {
CoprocessorRpcChannel channel = rsGroupTable.coprocessorService(ROW_KEY);
MultiRowMutationProtos.MutateRowsRequest.Builder mmrBuilder = MultiRowMutationProtos.MutateRowsRequest.newBuilder();
for (Mutation mutation : mutations) {
if (mutation instanceof Put) {
mmrBuilder.addMutationRequest(org.apache.hadoop.hbase.protobuf.ProtobufUtil.toMutation(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType.PUT, mutation));
} else if (mutation instanceof Delete) {
mmrBuilder.addMutationRequest(org.apache.hadoop.hbase.protobuf.ProtobufUtil.toMutation(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType.DELETE, mutation));
} else {
throw new DoNotRetryIOException("multiMutate doesn't support " + mutation.getClass().getName());
}
}
MultiRowMutationProtos.MultiRowMutationService.BlockingInterface service = MultiRowMutationProtos.MultiRowMutationService.newBlockingStub(channel);
try {
service.mutateRows(null, mmrBuilder.build());
} catch (ServiceException ex) {
ProtobufUtil.toIOException(ex);
}
}
use of org.apache.hadoop.hbase.client.Put in project hbase by apache.
the class TestRemoteHTableRetries method testCheckAndPut.
@Test
public void testCheckAndPut() throws Exception {
testTimedOutCall(new CallExecutor() {
@Override
public void run() throws Exception {
Put put = new Put(ROW_1);
put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1);
remoteTable.checkAndPut(ROW_1, COLUMN_1, QUALIFIER_1, VALUE_1, put);
}
});
verify(client, times(RETRIES)).put(anyString(), anyString(), any(byte[].class));
}
use of org.apache.hadoop.hbase.client.Put in project hbase by apache.
the class TestRemoteTable method testDelete.
@Test
public void testDelete() throws IOException {
Put put = new Put(ROW_3);
put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1);
put.addColumn(COLUMN_2, QUALIFIER_2, VALUE_2);
remoteTable.put(put);
Get get = new Get(ROW_3);
get.addFamily(COLUMN_1);
get.addFamily(COLUMN_2);
Result result = remoteTable.get(get);
byte[] value1 = result.getValue(COLUMN_1, QUALIFIER_1);
byte[] value2 = result.getValue(COLUMN_2, QUALIFIER_2);
assertNotNull(value1);
assertTrue(Bytes.equals(VALUE_1, value1));
assertNotNull(value2);
assertTrue(Bytes.equals(VALUE_2, value2));
Delete delete = new Delete(ROW_3);
delete.addColumn(COLUMN_2, QUALIFIER_2);
remoteTable.delete(delete);
get = new Get(ROW_3);
get.addFamily(COLUMN_1);
get.addFamily(COLUMN_2);
result = remoteTable.get(get);
value1 = result.getValue(COLUMN_1, QUALIFIER_1);
value2 = result.getValue(COLUMN_2, QUALIFIER_2);
assertNotNull(value1);
assertTrue(Bytes.equals(VALUE_1, value1));
assertNull(value2);
delete = new Delete(ROW_3);
delete.setTimestamp(1L);
remoteTable.delete(delete);
get = new Get(ROW_3);
get.addFamily(COLUMN_1);
get.addFamily(COLUMN_2);
result = remoteTable.get(get);
value1 = result.getValue(COLUMN_1, QUALIFIER_1);
value2 = result.getValue(COLUMN_2, QUALIFIER_2);
assertNotNull(value1);
assertTrue(Bytes.equals(VALUE_1, value1));
assertNull(value2);
delete = new Delete(ROW_3);
remoteTable.delete(delete);
get = new Get(ROW_3);
get.addFamily(COLUMN_1);
get.addFamily(COLUMN_2);
result = remoteTable.get(get);
value1 = result.getValue(COLUMN_1, QUALIFIER_1);
value2 = result.getValue(COLUMN_2, QUALIFIER_2);
assertNull(value1);
assertNull(value2);
}
Aggregations