use of org.apache.hadoop.hbase.thrift2.generated.TColumnValue in project hbase by apache.
the class TestThriftHBaseServiceHandler method testCheckAndDelete.
/**
* check that checkAndDelete fails if the cell does not exist, then put in the cell, then
* check that the checkAndDelete succeeds.
*
* @throws Exception
*/
@Test
public void testCheckAndDelete() throws Exception {
ThriftHBaseServiceHandler handler = createHandler();
byte[] rowName = "testCheckAndDelete".getBytes();
ByteBuffer table = wrap(tableAname);
List<TColumnValue> columnValuesA = new ArrayList<>(1);
TColumnValue columnValueA = new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname));
columnValuesA.add(columnValueA);
TPut putA = new TPut(wrap(rowName), columnValuesA);
putA.setColumnValues(columnValuesA);
List<TColumnValue> columnValuesB = new ArrayList<>(1);
TColumnValue columnValueB = new TColumnValue(wrap(familyBname), wrap(qualifierBname), wrap(valueBname));
columnValuesB.add(columnValueB);
TPut putB = new TPut(wrap(rowName), columnValuesB);
putB.setColumnValues(columnValuesB);
// put putB so that we know whether the row has been deleted or not
handler.put(table, putB);
TDelete delete = new TDelete(wrap(rowName));
assertFalse(handler.checkAndDelete(table, wrap(rowName), wrap(familyAname), wrap(qualifierAname), wrap(valueAname), delete));
TGet get = new TGet(wrap(rowName));
TResult result = handler.get(table, get);
assertArrayEquals(rowName, result.getRow());
assertTColumnValuesEqual(columnValuesB, result.getColumnValues());
handler.put(table, putA);
assertTrue(handler.checkAndDelete(table, wrap(rowName), wrap(familyAname), wrap(qualifierAname), wrap(valueAname), delete));
result = handler.get(table, get);
assertFalse(result.isSetRow());
assertEquals(0, result.getColumnValuesSize());
}
use of org.apache.hadoop.hbase.thrift2.generated.TColumnValue in project hbase by apache.
the class TestThriftHBaseServiceHandler method testScanWithBatchSize.
@Test
public void testScanWithBatchSize() throws Exception {
ThriftHBaseServiceHandler handler = createHandler();
ByteBuffer table = wrap(tableAname);
// insert data
List<TColumnValue> columnValues = new ArrayList<>(100);
for (int i = 0; i < 100; i++) {
String colNum = pad(i, (byte) 3);
TColumnValue columnValue = new TColumnValue(wrap(familyAname), wrap(("col" + colNum).getBytes()), wrap(("val" + colNum).getBytes()));
columnValues.add(columnValue);
}
TPut put = new TPut(wrap(("testScanWithBatchSize").getBytes()), columnValues);
handler.put(table, put);
// create scan instance
TScan scan = new TScan();
List<TColumn> columns = new ArrayList<>(1);
TColumn column = new TColumn();
column.setFamily(familyAname);
columns.add(column);
scan.setColumns(columns);
scan.setStartRow("testScanWithBatchSize".getBytes());
scan.setStopRow("testScanWithBatchSize".getBytes());
// set batch size to 10 columns per call
scan.setBatchSize(10);
// get scanner
int scanId = handler.openScanner(table, scan);
List<TResult> results = null;
for (int i = 0; i < 10; i++) {
// get batch for single row (10x10 is what we expect)
results = handler.getScannerRows(scanId, 1);
assertEquals(1, results.size());
// check length of batch
List<TColumnValue> cols = results.get(0).getColumnValues();
assertEquals(10, cols.size());
// check if the columns are returned and in order
for (int y = 0; y < 10; y++) {
int colNum = y + (10 * i);
String colNumPad = pad(colNum, (byte) 3);
assertArrayEquals(("col" + colNumPad).getBytes(), cols.get(y).getQualifier());
}
}
// check that we are at the end of the scan
results = handler.getScannerRows(scanId, 1);
assertEquals(0, results.size());
// close scanner and check that it was indeed closed
handler.closeScanner(scanId);
try {
handler.getScannerRows(scanId, 1);
fail("Scanner id should be invalid");
} catch (TIllegalArgument e) {
}
}
use of org.apache.hadoop.hbase.thrift2.generated.TColumnValue in project hbase by apache.
the class TestThriftHBaseServiceHandler method testAppend.
@Test
public void testAppend() throws Exception {
ThriftHBaseServiceHandler handler = createHandler();
byte[] rowName = "testAppend".getBytes();
ByteBuffer table = wrap(tableAname);
byte[] v1 = Bytes.toBytes("42");
byte[] v2 = Bytes.toBytes("23");
List<TColumnValue> columnValues = new ArrayList<>(1);
columnValues.add(new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(v1)));
TPut put = new TPut(wrap(rowName), columnValues);
put.setColumnValues(columnValues);
handler.put(table, put);
List<TColumnValue> appendColumns = new ArrayList<>(1);
appendColumns.add(new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(v2)));
TAppend append = new TAppend(wrap(rowName), appendColumns);
handler.append(table, append);
TGet get = new TGet(wrap(rowName));
TResult result = handler.get(table, get);
assertArrayEquals(rowName, result.getRow());
assertEquals(1, result.getColumnValuesSize());
TColumnValue columnValue = result.getColumnValues().get(0);
assertArrayEquals(Bytes.add(v1, v2), columnValue.getValue());
}
use of org.apache.hadoop.hbase.thrift2.generated.TColumnValue in project hbase by apache.
the class TestThriftHBaseServiceHandler method testDurability.
/**
* Create TPut, TDelete , TIncrement objects, set durability then call ThriftUtility
* functions to get Put , Delete and Increment respectively. Use getDurability to make sure
* the returned objects have the appropriate durability setting.
*
* @throws Exception
*/
@Test
public void testDurability() throws Exception {
byte[] rowName = "testDurability".getBytes();
List<TColumnValue> columnValues = new ArrayList<>(1);
columnValues.add(new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname)));
List<TColumnIncrement> incrementColumns = new ArrayList<>(1);
incrementColumns.add(new TColumnIncrement(wrap(familyAname), wrap(qualifierAname)));
TDelete tDelete = new TDelete(wrap(rowName));
tDelete.setDurability(TDurability.SKIP_WAL);
Delete delete = deleteFromThrift(tDelete);
assertEquals(delete.getDurability(), Durability.SKIP_WAL);
tDelete.setDurability(TDurability.ASYNC_WAL);
delete = deleteFromThrift(tDelete);
assertEquals(delete.getDurability(), Durability.ASYNC_WAL);
tDelete.setDurability(TDurability.SYNC_WAL);
delete = deleteFromThrift(tDelete);
assertEquals(delete.getDurability(), Durability.SYNC_WAL);
tDelete.setDurability(TDurability.FSYNC_WAL);
delete = deleteFromThrift(tDelete);
assertEquals(delete.getDurability(), Durability.FSYNC_WAL);
TPut tPut = new TPut(wrap(rowName), columnValues);
tPut.setDurability(TDurability.SKIP_WAL);
Put put = putFromThrift(tPut);
assertEquals(put.getDurability(), Durability.SKIP_WAL);
tPut.setDurability(TDurability.ASYNC_WAL);
put = putFromThrift(tPut);
assertEquals(put.getDurability(), Durability.ASYNC_WAL);
tPut.setDurability(TDurability.SYNC_WAL);
put = putFromThrift(tPut);
assertEquals(put.getDurability(), Durability.SYNC_WAL);
tPut.setDurability(TDurability.FSYNC_WAL);
put = putFromThrift(tPut);
assertEquals(put.getDurability(), Durability.FSYNC_WAL);
TIncrement tIncrement = new TIncrement(wrap(rowName), incrementColumns);
tIncrement.setDurability(TDurability.SKIP_WAL);
Increment increment = incrementFromThrift(tIncrement);
assertEquals(increment.getDurability(), Durability.SKIP_WAL);
tIncrement.setDurability(TDurability.ASYNC_WAL);
increment = incrementFromThrift(tIncrement);
assertEquals(increment.getDurability(), Durability.ASYNC_WAL);
tIncrement.setDurability(TDurability.SYNC_WAL);
increment = incrementFromThrift(tIncrement);
assertEquals(increment.getDurability(), Durability.SYNC_WAL);
tIncrement.setDurability(TDurability.FSYNC_WAL);
increment = incrementFromThrift(tIncrement);
assertEquals(increment.getDurability(), Durability.FSYNC_WAL);
}
use of org.apache.hadoop.hbase.thrift2.generated.TColumnValue in project hbase by apache.
the class TestThriftHBaseServiceHandler method testMetricsPrecision.
/**
* See HBASE-17611
*
* Latency metrics were capped at ~ 2 seconds due to the use of an int variable to capture the
* duration.
*/
@Test
public void testMetricsPrecision() throws Exception {
byte[] rowkey = Bytes.toBytes("row1");
byte[] family = Bytes.toBytes("f");
byte[] col = Bytes.toBytes("c");
// create a table which will throw exceptions for requests
TableName tableName = TableName.valueOf("testMetricsPrecision");
HTableDescriptor tableDesc = new HTableDescriptor(tableName);
tableDesc.addCoprocessor(DelayingRegionObserver.class.getName());
tableDesc.addFamily(new HColumnDescriptor(family));
Table table = null;
try {
table = UTIL.createTable(tableDesc, null);
table.put(new Put(rowkey).addColumn(family, col, Bytes.toBytes("val1")));
ThriftHBaseServiceHandler hbaseHandler = createHandler();
ThriftMetrics metrics = getMetrics(UTIL.getConfiguration());
THBaseService.Iface handler = ThriftHBaseServiceHandler.newInstance(hbaseHandler, metrics);
ByteBuffer tTableName = wrap(tableName.getName());
// check metrics latency with a successful get
TGet tGet = new TGet(wrap(rowkey));
TResult tResult = handler.get(tTableName, tGet);
List<TColumnValue> expectedColumnValues = Lists.newArrayList(new TColumnValue(wrap(family), wrap(col), wrap(Bytes.toBytes("val1"))));
assertArrayEquals(rowkey, tResult.getRow());
List<TColumnValue> returnedColumnValues = tResult.getColumnValues();
assertTColumnValuesEqual(expectedColumnValues, returnedColumnValues);
metricsHelper.assertGaugeGt("get_max", 3000L, metrics.getSource());
} finally {
if (table != null) {
try {
table.close();
} catch (IOException ignored) {
}
UTIL.deleteTable(tableName);
}
}
}
Aggregations