use of org.apache.hadoop.hbase.client.Put in project hbase by apache.
the class TestRowProcessorEndpoint method prepareTestData.
public void prepareTestData() throws Exception {
try {
util.getAdmin().disableTable(TABLE);
util.getAdmin().deleteTable(TABLE);
} catch (Exception e) {
// ignore table not found
}
table = util.createTable(TABLE, FAM);
{
Put put = new Put(ROW);
// B, C are friends of A
put.addColumn(FAM, A, Bytes.add(B, C));
// D, E, F are friends of B
put.addColumn(FAM, B, Bytes.add(D, E, F));
// G is a friend of C
put.addColumn(FAM, C, G);
table.put(put);
rowSize = put.size();
}
Put put = new Put(ROW2);
put.addColumn(FAM, D, E);
put.addColumn(FAM, F, G);
table.put(put);
row2Size = put.size();
}
use of org.apache.hadoop.hbase.client.Put in project hbase by apache.
the class TestServerCustomProtocol method before.
@Before
public void before() throws Exception {
final byte[][] SPLIT_KEYS = new byte[][] { ROW_B, ROW_C };
Table table = util.createTable(TEST_TABLE, TEST_FAMILY, SPLIT_KEYS);
Put puta = new Put(ROW_A);
puta.addColumn(TEST_FAMILY, Bytes.toBytes("col1"), Bytes.toBytes(1));
table.put(puta);
Put putb = new Put(ROW_B);
putb.addColumn(TEST_FAMILY, Bytes.toBytes("col1"), Bytes.toBytes(1));
table.put(putb);
Put putc = new Put(ROW_C);
putc.addColumn(TEST_FAMILY, Bytes.toBytes("col1"), Bytes.toBytes(1));
table.put(putc);
}
use of org.apache.hadoop.hbase.client.Put in project hbase by apache.
the class TestCoprocessorEndpoint method setupBeforeClass.
@BeforeClass
public static void setupBeforeClass() throws Exception {
// set configure to indicate which cp should be loaded
Configuration conf = util.getConfiguration();
conf.setInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, 5000);
conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, org.apache.hadoop.hbase.coprocessor.ColumnAggregationEndpoint.class.getName(), ProtobufCoprocessorService.class.getName());
conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, ProtobufCoprocessorService.class.getName());
util.startMiniCluster(2);
Admin admin = util.getAdmin();
HTableDescriptor desc = new HTableDescriptor(TEST_TABLE);
desc.addFamily(new HColumnDescriptor(TEST_FAMILY));
admin.createTable(desc, new byte[][] { ROWS[rowSeperator1], ROWS[rowSeperator2] });
util.waitUntilAllRegionsAssigned(TEST_TABLE);
Table table = util.getConnection().getTable(TEST_TABLE);
for (int i = 0; i < ROWSIZE; i++) {
Put put = new Put(ROWS[i]);
put.addColumn(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes(i));
table.put(put);
}
table.close();
}
use of org.apache.hadoop.hbase.client.Put in project hbase by apache.
the class TestMapReduceExamples method testSampleUploader.
/**
* Test SampleUploader from examples
*/
@SuppressWarnings("unchecked")
@Test
public void testSampleUploader() throws Exception {
Configuration configuration = new Configuration();
Uploader uploader = new Uploader();
Mapper<LongWritable, Text, ImmutableBytesWritable, Put>.Context<LongWritable, Text, ImmutableBytesWritable, Put> ctx = mock(Context.class);
doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
ImmutableBytesWritable writer = (ImmutableBytesWritable) invocation.getArguments()[0];
Put put = (Put) invocation.getArguments()[1];
assertEquals("row", Bytes.toString(writer.get()));
assertEquals("row", Bytes.toString(put.getRow()));
return null;
}
}).when(ctx).write(any(ImmutableBytesWritable.class), any(Put.class));
uploader.map(null, new Text("row,family,qualifier,value"), ctx);
Path dir = util.getDataTestDirOnTestFS("testSampleUploader");
String[] args = { dir.toString(), "simpleTable" };
Job job = SampleUploader.configureJob(configuration, args);
assertEquals(SequenceFileInputFormat.class, job.getInputFormatClass());
}
use of org.apache.hadoop.hbase.client.Put in project hbase by apache.
the class RSGroupInfoManagerImpl method flushConfigTable.
private synchronized Map<TableName, String> flushConfigTable(Map<String, RSGroupInfo> groupMap) throws IOException {
Map<TableName, String> newTableMap = Maps.newHashMap();
List<Mutation> mutations = Lists.newArrayList();
// populate deletes
for (String groupName : prevRSGroups) {
if (!groupMap.containsKey(groupName)) {
Delete d = new Delete(Bytes.toBytes(groupName));
mutations.add(d);
}
}
// populate puts
for (RSGroupInfo RSGroupInfo : groupMap.values()) {
RSGroupProtos.RSGroupInfo proto = RSGroupProtobufUtil.toProtoGroupInfo(RSGroupInfo);
Put p = new Put(Bytes.toBytes(RSGroupInfo.getName()));
p.addColumn(META_FAMILY_BYTES, META_QUALIFIER_BYTES, proto.toByteArray());
mutations.add(p);
for (TableName entry : RSGroupInfo.getTables()) {
newTableMap.put(entry, RSGroupInfo.getName());
}
}
if (mutations.size() > 0) {
multiMutate(mutations);
}
return newTableMap;
}
Aggregations