use of org.apache.hadoop.hbase.client.Get in project hbase by apache.
the class TestEndToEndSplitTransaction method blockUntilRegionIsOpened.
public static void blockUntilRegionIsOpened(Configuration conf, long timeout, HRegionInfo hri) throws IOException, InterruptedException {
log("blocking until region is opened for reading:" + hri.getRegionNameAsString());
long start = System.currentTimeMillis();
try (Connection conn = ConnectionFactory.createConnection(conf);
Table table = conn.getTable(hri.getTable())) {
byte[] row = hri.getStartKey();
// Check for null/empty row. If we find one, use a key that is likely to be in first region.
if (row == null || row.length <= 0)
row = new byte[] { '0' };
Get get = new Get(row);
while (System.currentTimeMillis() - start < timeout) {
try {
table.get(get);
break;
} catch (IOException ex) {
// wait some more
}
Threads.sleep(10);
}
}
}
use of org.apache.hadoop.hbase.client.Get in project hbase by apache.
the class TestMobStoreScanner method testReadFromCorruptMobFilesWithReadEmptyValueOnMobCellMiss.
@Test
public void testReadFromCorruptMobFilesWithReadEmptyValueOnMobCellMiss() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
setUp(0, tableName);
createRecordAndCorruptMobFile(tableName, row1, family, qf1, Bytes.toBytes("value1"));
Get get = new Get(row1);
get.setAttribute(MobConstants.EMPTY_VALUE_ON_MOBCELL_MISS, Bytes.toBytes(true));
Result result = table.get(get);
Cell cell = result.getColumnLatestCell(family, qf1);
Assert.assertEquals(0, cell.getValueLength());
}
use of org.apache.hadoop.hbase.client.Get in project hbase by apache.
the class TestMobStoreScanner method testGetMassive.
@Test(timeout = 60000)
public void testGetMassive() throws Exception {
setUp(defaultThreshold, TableName.valueOf(name.getMethodName()));
// Put some data 5 10, 15, 20 mb ok (this would be right below protobuf
// default max size of 64MB.
// 25, 30, 40 fail. these is above protobuf max size of 64MB
byte[] bigValue = new byte[25 * 1024 * 1024];
Put put = new Put(row1);
put.addColumn(family, qf1, bigValue);
put.addColumn(family, qf2, bigValue);
put.addColumn(family, qf3, bigValue);
table.put(put);
Get g = new Get(row1);
table.get(g);
// should not have blown up.
}
use of org.apache.hadoop.hbase.client.Get in project hbase by apache.
the class TestRegionReplicas method assertGetRpc.
// build a mock rpc
private void assertGetRpc(HRegionInfo info, int value, boolean expect) throws IOException, org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException {
byte[] row = Bytes.toBytes(String.valueOf(value));
Get get = new Get(row);
ClientProtos.GetRequest getReq = RequestConverter.buildGetRequest(info.getRegionName(), get);
ClientProtos.GetResponse getResp = getRS().getRSRpcServices().get(null, getReq);
Result result = ProtobufUtil.toResult(getResp.getResult());
if (expect) {
Assert.assertArrayEquals(row, result.getValue(f, null));
} else {
result.isEmpty();
}
}
use of org.apache.hadoop.hbase.client.Get in project hbase by apache.
the class TestReplicationSmallTests method testSimplePutDelete.
/**
* Add a row, check it's replicated, delete it, check's gone
* @throws Exception
*/
@Test(timeout = 300000)
public void testSimplePutDelete() throws Exception {
LOG.info("testSimplePutDelete");
Put put = new Put(row);
put.addColumn(famName, row, row);
htable1 = utility1.getConnection().getTable(tableName);
htable1.put(put);
Get get = new Get(row);
for (int i = 0; i < NB_RETRIES; i++) {
if (i == NB_RETRIES - 1) {
fail("Waited too much time for put replication");
}
Result res = htable2.get(get);
if (res.isEmpty()) {
LOG.info("Row not available");
Thread.sleep(SLEEP_TIME);
} else {
assertArrayEquals(res.value(), row);
break;
}
}
Delete del = new Delete(row);
htable1.delete(del);
get = new Get(row);
for (int i = 0; i < NB_RETRIES; i++) {
if (i == NB_RETRIES - 1) {
fail("Waited too much time for del replication");
}
Result res = htable2.get(get);
if (res.size() >= 1) {
LOG.info("Row not deleted");
Thread.sleep(SLEEP_TIME);
} else {
break;
}
}
}
Aggregations