use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.
the class TestHCM method testDeleteRpcTimeout.
@Test
public void testDeleteRpcTimeout() throws Exception {
HTableDescriptor hdt = TEST_UTIL.createTableDescriptor(TableName.valueOf(name.getMethodName()));
hdt.addCoprocessor(SleepCoprocessor.class.getName());
Configuration c = new Configuration(TEST_UTIL.getConfiguration());
try (Table t = TEST_UTIL.createTable(hdt, new byte[][] { FAM_NAM }, c)) {
t.setWriteRpcTimeout(SleepCoprocessor.SLEEP_TIME / 2);
t.setOperationTimeout(SleepCoprocessor.SLEEP_TIME * 100);
Delete d = new Delete(FAM_NAM);
d.addColumn(FAM_NAM, FAM_NAM, 1);
t.delete(d);
fail("Write should not have succeeded");
} catch (RetriesExhaustedException e) {
// expected
}
}
use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.
the class TestIncrementsFromClientSide method testDuplicateIncrement.
/**
* Test increment result when there are duplicate rpc request.
*/
@Test
public void testDuplicateIncrement() throws Exception {
HTableDescriptor hdt = TEST_UTIL.createTableDescriptor(TableName.valueOf(name.getMethodName()));
Map<String, String> kvs = new HashMap<>();
kvs.put(HConnectionTestingUtility.SleepAtFirstRpcCall.SLEEP_TIME_CONF_KEY, "2000");
hdt.addCoprocessor(HConnectionTestingUtility.SleepAtFirstRpcCall.class.getName(), null, 1, kvs);
TEST_UTIL.createTable(hdt, new byte[][] { ROW }).close();
Configuration c = new Configuration(TEST_UTIL.getConfiguration());
c.setInt(HConstants.HBASE_CLIENT_PAUSE, 50);
// Client will retry beacuse rpc timeout is small than the sleep time of first rpc call
c.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, 1500);
Connection connection = ConnectionFactory.createConnection(c);
Table t = connection.getTable(TableName.valueOf(name.getMethodName()));
if (t instanceof HTable) {
HTable table = (HTable) t;
table.setOperationTimeout(3 * 1000);
try {
Increment inc = new Increment(ROW);
inc.addColumn(TEST_UTIL.fam1, QUALIFIER, 1);
Result result = table.increment(inc);
Cell[] cells = result.rawCells();
assertEquals(1, cells.length);
assertIncrementKey(cells[0], ROW, TEST_UTIL.fam1, QUALIFIER, 1);
// Verify expected result
Result readResult = table.get(new Get(ROW));
cells = readResult.rawCells();
assertEquals(1, cells.length);
assertIncrementKey(cells[0], ROW, TEST_UTIL.fam1, QUALIFIER, 1);
} finally {
table.close();
connection.close();
}
}
}
use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.
the class TestFromClientSide method testMiscHTableStuff.
@Test
public void testMiscHTableStuff() throws IOException {
final TableName tableAname = TableName.valueOf(name.getMethodName() + "A");
final TableName tableBname = TableName.valueOf(name.getMethodName() + "B");
final byte[] attrName = Bytes.toBytes("TESTATTR");
final byte[] attrValue = Bytes.toBytes("somevalue");
byte[] value = Bytes.toBytes("value");
Table a = TEST_UTIL.createTable(tableAname, HConstants.CATALOG_FAMILY);
Table b = TEST_UTIL.createTable(tableBname, HConstants.CATALOG_FAMILY);
Put put = new Put(ROW);
put.addColumn(HConstants.CATALOG_FAMILY, null, value);
a.put(put);
// open a new connection to A and a connection to b
Table newA = TEST_UTIL.getConnection().getTable(tableAname);
// copy data from A to B
Scan scan = new Scan();
scan.addFamily(HConstants.CATALOG_FAMILY);
ResultScanner s = newA.getScanner(scan);
try {
for (Result r : s) {
put = new Put(r.getRow());
put.setDurability(Durability.SKIP_WAL);
for (Cell kv : r.rawCells()) {
put.add(kv);
}
b.put(put);
}
} finally {
s.close();
}
// Opening a new connection to A will cause the tables to be reloaded
Table anotherA = TEST_UTIL.getConnection().getTable(tableAname);
Get get = new Get(ROW);
get.addFamily(HConstants.CATALOG_FAMILY);
anotherA.get(get);
// We can still access A through newA because it has the table information
// cached. And if it needs to recalibrate, that will cause the information
// to be reloaded.
// Test user metadata
Admin admin = TEST_UTIL.getAdmin();
// make a modifiable descriptor
HTableDescriptor desc = new HTableDescriptor(a.getTableDescriptor());
// offline the table
admin.disableTable(tableAname);
// add a user attribute to HTD
desc.setValue(attrName, attrValue);
// add a user attribute to HCD
for (HColumnDescriptor c : desc.getFamilies()) c.setValue(attrName, attrValue);
// update metadata for all regions of this table
admin.modifyTable(tableAname, desc);
// enable the table
admin.enableTable(tableAname);
// Test that attribute changes were applied
desc = a.getTableDescriptor();
assertEquals("wrong table descriptor returned", desc.getTableName(), tableAname);
// check HTD attribute
value = desc.getValue(attrName);
assertFalse("missing HTD attribute value", value == null);
assertFalse("HTD attribute value is incorrect", Bytes.compareTo(value, attrValue) != 0);
// check HCD attribute
for (HColumnDescriptor c : desc.getFamilies()) {
value = c.getValue(attrName);
assertFalse("missing HCD attribute value", value == null);
assertFalse("HCD attribute value is incorrect", Bytes.compareTo(value, attrValue) != 0);
}
}
use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.
the class TestRestoreSnapshotFromClient method testRestoreSchemaChange.
@Test
public void testRestoreSchemaChange() throws Exception {
Table table = TEST_UTIL.getConnection().getTable(tableName);
// Add one column family and put some data in it
admin.disableTable(tableName);
admin.addColumnFamily(tableName, getTestRestoreSchemaChangeHCD());
admin.enableTable(tableName);
assertEquals(2, table.getTableDescriptor().getFamilies().size());
HTableDescriptor htd = admin.getTableDescriptor(tableName);
assertEquals(2, htd.getFamilies().size());
SnapshotTestingUtils.loadData(TEST_UTIL, tableName, 500, TEST_FAMILY2);
long snapshot2Rows = snapshot1Rows + 500;
assertEquals(snapshot2Rows, countRows(table));
assertEquals(500, countRows(table, TEST_FAMILY2));
Set<String> fsFamilies = getFamiliesFromFS(tableName);
assertEquals(2, fsFamilies.size());
// Take a snapshot
admin.disableTable(tableName);
admin.snapshot(snapshotName2, tableName);
// Restore the snapshot (without the cf)
admin.restoreSnapshot(snapshotName0);
admin.enableTable(tableName);
assertEquals(1, table.getTableDescriptor().getFamilies().size());
try {
countRows(table, TEST_FAMILY2);
fail("family '" + Bytes.toString(TEST_FAMILY2) + "' should not exists");
} catch (NoSuchColumnFamilyException e) {
// expected
}
assertEquals(snapshot0Rows, countRows(table));
htd = admin.getTableDescriptor(tableName);
assertEquals(1, htd.getFamilies().size());
fsFamilies = getFamiliesFromFS(tableName);
assertEquals(1, fsFamilies.size());
// Restore back the snapshot (with the cf)
admin.disableTable(tableName);
admin.restoreSnapshot(snapshotName2);
admin.enableTable(tableName);
htd = admin.getTableDescriptor(tableName);
assertEquals(2, htd.getFamilies().size());
assertEquals(2, table.getTableDescriptor().getFamilies().size());
assertEquals(500, countRows(table, TEST_FAMILY2));
assertEquals(snapshot2Rows, countRows(table));
fsFamilies = getFamiliesFromFS(tableName);
assertEquals(2, fsFamilies.size());
table.close();
}
use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.
the class TestPrefixTree method setUp.
@Before
public void setUp() throws Exception {
TableName tableName = TableName.valueOf(getClass().getSimpleName());
HTableDescriptor htd = new HTableDescriptor(tableName);
htd.addFamily(new HColumnDescriptor(fam).setDataBlockEncoding(DataBlockEncoding.PREFIX_TREE));
HRegionInfo info = new HRegionInfo(tableName, null, null, false);
Path path = testUtil.getDataTestDir(getClass().getSimpleName());
region = HBaseTestingUtility.createRegionAndWAL(info, path, testUtil.getConfiguration(), htd);
}
Aggregations