use of org.apache.hadoop.hbase.thrift2.generated.TResult in project hbase by apache.
the class TestThriftHBaseServiceHandler method testCheckAndDelete.
/**
* check that checkAndDelete fails if the cell does not exist, then put in the cell, then
* check that the checkAndDelete succeeds.
*
* @throws Exception
*/
@Test
public void testCheckAndDelete() throws Exception {
ThriftHBaseServiceHandler handler = createHandler();
byte[] rowName = "testCheckAndDelete".getBytes();
ByteBuffer table = wrap(tableAname);
List<TColumnValue> columnValuesA = new ArrayList<>(1);
TColumnValue columnValueA = new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname));
columnValuesA.add(columnValueA);
TPut putA = new TPut(wrap(rowName), columnValuesA);
putA.setColumnValues(columnValuesA);
List<TColumnValue> columnValuesB = new ArrayList<>(1);
TColumnValue columnValueB = new TColumnValue(wrap(familyBname), wrap(qualifierBname), wrap(valueBname));
columnValuesB.add(columnValueB);
TPut putB = new TPut(wrap(rowName), columnValuesB);
putB.setColumnValues(columnValuesB);
// put putB so that we know whether the row has been deleted or not
handler.put(table, putB);
TDelete delete = new TDelete(wrap(rowName));
assertFalse(handler.checkAndDelete(table, wrap(rowName), wrap(familyAname), wrap(qualifierAname), wrap(valueAname), delete));
TGet get = new TGet(wrap(rowName));
TResult result = handler.get(table, get);
assertArrayEquals(rowName, result.getRow());
assertTColumnValuesEqual(columnValuesB, result.getColumnValues());
handler.put(table, putA);
assertTrue(handler.checkAndDelete(table, wrap(rowName), wrap(familyAname), wrap(qualifierAname), wrap(valueAname), delete));
result = handler.get(table, get);
assertFalse(result.isSetRow());
assertEquals(0, result.getColumnValuesSize());
}
use of org.apache.hadoop.hbase.thrift2.generated.TResult in project hbase by apache.
the class TestThriftHBaseServiceHandler method testScanWithBatchSize.
@Test
public void testScanWithBatchSize() throws Exception {
ThriftHBaseServiceHandler handler = createHandler();
ByteBuffer table = wrap(tableAname);
// insert data
List<TColumnValue> columnValues = new ArrayList<>(100);
for (int i = 0; i < 100; i++) {
String colNum = pad(i, (byte) 3);
TColumnValue columnValue = new TColumnValue(wrap(familyAname), wrap(("col" + colNum).getBytes()), wrap(("val" + colNum).getBytes()));
columnValues.add(columnValue);
}
TPut put = new TPut(wrap(("testScanWithBatchSize").getBytes()), columnValues);
handler.put(table, put);
// create scan instance
TScan scan = new TScan();
List<TColumn> columns = new ArrayList<>(1);
TColumn column = new TColumn();
column.setFamily(familyAname);
columns.add(column);
scan.setColumns(columns);
scan.setStartRow("testScanWithBatchSize".getBytes());
scan.setStopRow("testScanWithBatchSize".getBytes());
// set batch size to 10 columns per call
scan.setBatchSize(10);
// get scanner
int scanId = handler.openScanner(table, scan);
List<TResult> results = null;
for (int i = 0; i < 10; i++) {
// get batch for single row (10x10 is what we expect)
results = handler.getScannerRows(scanId, 1);
assertEquals(1, results.size());
// check length of batch
List<TColumnValue> cols = results.get(0).getColumnValues();
assertEquals(10, cols.size());
// check if the columns are returned and in order
for (int y = 0; y < 10; y++) {
int colNum = y + (10 * i);
String colNumPad = pad(colNum, (byte) 3);
assertArrayEquals(("col" + colNumPad).getBytes(), cols.get(y).getQualifier());
}
}
// check that we are at the end of the scan
results = handler.getScannerRows(scanId, 1);
assertEquals(0, results.size());
// close scanner and check that it was indeed closed
handler.closeScanner(scanId);
try {
handler.getScannerRows(scanId, 1);
fail("Scanner id should be invalid");
} catch (TIllegalArgument e) {
}
}
use of org.apache.hadoop.hbase.thrift2.generated.TResult in project hbase by apache.
the class TestThriftHBaseServiceHandler method testAppend.
@Test
public void testAppend() throws Exception {
ThriftHBaseServiceHandler handler = createHandler();
byte[] rowName = "testAppend".getBytes();
ByteBuffer table = wrap(tableAname);
byte[] v1 = Bytes.toBytes("42");
byte[] v2 = Bytes.toBytes("23");
List<TColumnValue> columnValues = new ArrayList<>(1);
columnValues.add(new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(v1)));
TPut put = new TPut(wrap(rowName), columnValues);
put.setColumnValues(columnValues);
handler.put(table, put);
List<TColumnValue> appendColumns = new ArrayList<>(1);
appendColumns.add(new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(v2)));
TAppend append = new TAppend(wrap(rowName), appendColumns);
handler.append(table, append);
TGet get = new TGet(wrap(rowName));
TResult result = handler.get(table, get);
assertArrayEquals(rowName, result.getRow());
assertEquals(1, result.getColumnValuesSize());
TColumnValue columnValue = result.getColumnValues().get(0);
assertArrayEquals(Bytes.add(v1, v2), columnValue.getValue());
}
use of org.apache.hadoop.hbase.thrift2.generated.TResult in project hbase by apache.
the class TestThriftHBaseServiceHandler method testMetricsPrecision.
/**
* See HBASE-17611
*
* Latency metrics were capped at ~ 2 seconds due to the use of an int variable to capture the
* duration.
*/
@Test
public void testMetricsPrecision() throws Exception {
byte[] rowkey = Bytes.toBytes("row1");
byte[] family = Bytes.toBytes("f");
byte[] col = Bytes.toBytes("c");
// create a table which will throw exceptions for requests
TableName tableName = TableName.valueOf("testMetricsPrecision");
HTableDescriptor tableDesc = new HTableDescriptor(tableName);
tableDesc.addCoprocessor(DelayingRegionObserver.class.getName());
tableDesc.addFamily(new HColumnDescriptor(family));
Table table = null;
try {
table = UTIL.createTable(tableDesc, null);
table.put(new Put(rowkey).addColumn(family, col, Bytes.toBytes("val1")));
ThriftHBaseServiceHandler hbaseHandler = createHandler();
ThriftMetrics metrics = getMetrics(UTIL.getConfiguration());
THBaseService.Iface handler = ThriftHBaseServiceHandler.newInstance(hbaseHandler, metrics);
ByteBuffer tTableName = wrap(tableName.getName());
// check metrics latency with a successful get
TGet tGet = new TGet(wrap(rowkey));
TResult tResult = handler.get(tTableName, tGet);
List<TColumnValue> expectedColumnValues = Lists.newArrayList(new TColumnValue(wrap(family), wrap(col), wrap(Bytes.toBytes("val1"))));
assertArrayEquals(rowkey, tResult.getRow());
List<TColumnValue> returnedColumnValues = tResult.getColumnValues();
assertTColumnValuesEqual(expectedColumnValues, returnedColumnValues);
metricsHelper.assertGaugeGt("get_max", 3000L, metrics.getSource());
} finally {
if (table != null) {
try {
table.close();
} catch (IOException ignored) {
}
UTIL.deleteTable(tableName);
}
}
}
use of org.apache.hadoop.hbase.thrift2.generated.TResult in project hbase by apache.
the class TestThriftHBaseServiceHandler method testDelete.
@Test
public void testDelete() throws Exception {
ThriftHBaseServiceHandler handler = createHandler();
byte[] rowName = "testDelete".getBytes();
ByteBuffer table = wrap(tableAname);
List<TColumnValue> columnValues = new ArrayList<>(2);
TColumnValue columnValueA = new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname));
TColumnValue columnValueB = new TColumnValue(wrap(familyBname), wrap(qualifierBname), wrap(valueBname));
columnValues.add(columnValueA);
columnValues.add(columnValueB);
TPut put = new TPut(wrap(rowName), columnValues);
put.setColumnValues(columnValues);
handler.put(table, put);
TDelete delete = new TDelete(wrap(rowName));
List<TColumn> deleteColumns = new ArrayList<>(1);
TColumn deleteColumn = new TColumn(wrap(familyAname));
deleteColumn.setQualifier(qualifierAname);
deleteColumns.add(deleteColumn);
delete.setColumns(deleteColumns);
handler.deleteSingle(table, delete);
TGet get = new TGet(wrap(rowName));
TResult result = handler.get(table, get);
assertArrayEquals(rowName, result.getRow());
List<TColumnValue> returnedColumnValues = result.getColumnValues();
List<TColumnValue> expectedColumnValues = new ArrayList<>(1);
expectedColumnValues.add(columnValueB);
assertTColumnValuesEqual(expectedColumnValues, returnedColumnValues);
}
Aggregations