use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.
the class HBaseAdmin method checkAndSyncTableDescToPeers.
/**
* Connect to peer and check the table descriptor on peer:
* <ol>
* <li>Create the same table on peer when not exist.</li>
* <li>Throw an exception if the table already has replication enabled on any of the column
* families.</li>
* <li>Throw an exception if the table exists on peer cluster but descriptors are not same.</li>
* </ol>
* @param tableName name of the table to sync to the peer
* @param splits table split keys
* @throws IOException
*/
private void checkAndSyncTableDescToPeers(final TableName tableName, final byte[][] splits) throws IOException {
List<ReplicationPeerDescription> peers = listReplicationPeers();
if (peers == null || peers.size() <= 0) {
throw new IllegalArgumentException("Found no peer cluster for replication.");
}
for (ReplicationPeerDescription peerDesc : peers) {
if (needToReplicate(tableName, peerDesc)) {
Configuration peerConf = getPeerClusterConfiguration(peerDesc);
try (Connection conn = ConnectionFactory.createConnection(peerConf);
Admin repHBaseAdmin = conn.getAdmin()) {
HTableDescriptor localHtd = getTableDescriptor(tableName);
HTableDescriptor peerHtd = null;
if (!repHBaseAdmin.tableExists(tableName)) {
repHBaseAdmin.createTable(localHtd, splits);
} else {
peerHtd = repHBaseAdmin.getTableDescriptor(tableName);
if (peerHtd == null) {
throw new IllegalArgumentException("Failed to get table descriptor for table " + tableName.getNameAsString() + " from peer cluster " + peerDesc.getPeerId());
}
if (!compareForReplication(peerHtd, localHtd)) {
throw new IllegalArgumentException("Table " + tableName.getNameAsString() + " exists in peer cluster " + peerDesc.getPeerId() + ", but the table descriptors are not same when compared with source cluster." + " Thus can not enable the table's replication switch.");
}
}
}
}
}
}
use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.
the class HBaseAdmin method setTableRep.
/**
* Set the table's replication switch if the table's replication switch is already not set.
* @param tableName name of the table
* @param enableRep is replication switch enable or disable
* @throws IOException if a remote or network exception occurs
*/
private void setTableRep(final TableName tableName, boolean enableRep) throws IOException {
HTableDescriptor htd = getTableDescriptor(tableName);
ReplicationState currentReplicationState = getTableReplicationState(htd);
if (enableRep && currentReplicationState != ReplicationState.ENABLED || !enableRep && currentReplicationState != ReplicationState.DISABLED) {
for (HColumnDescriptor hcd : htd.getFamilies()) {
hcd.setScope(enableRep ? HConstants.REPLICATION_SCOPE_GLOBAL : HConstants.REPLICATION_SCOPE_LOCAL);
}
modifyTable(tableName, htd);
}
}
use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.
the class TestScannersWithFilters method setUpBeforeClass.
@BeforeClass
public static void setUpBeforeClass() throws Exception {
TEST_UTIL.startMiniCluster(3);
REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration());
context = JAXBContext.newInstance(CellModel.class, CellSetModel.class, RowModel.class, ScannerModel.class);
marshaller = context.createMarshaller();
unmarshaller = context.createUnmarshaller();
client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort()));
Admin admin = TEST_UTIL.getAdmin();
if (!admin.tableExists(TABLE)) {
HTableDescriptor htd = new HTableDescriptor(TABLE);
htd.addFamily(new HColumnDescriptor(FAMILIES[0]));
htd.addFamily(new HColumnDescriptor(FAMILIES[1]));
admin.createTable(htd);
Table table = TEST_UTIL.getConnection().getTable(TABLE);
// Insert first half
for (byte[] ROW : ROWS_ONE) {
Put p = new Put(ROW);
p.setDurability(Durability.SKIP_WAL);
for (byte[] QUALIFIER : QUALIFIERS_ONE) {
p.addColumn(FAMILIES[0], QUALIFIER, VALUES[0]);
}
table.put(p);
}
for (byte[] ROW : ROWS_TWO) {
Put p = new Put(ROW);
p.setDurability(Durability.SKIP_WAL);
for (byte[] QUALIFIER : QUALIFIERS_TWO) {
p.addColumn(FAMILIES[1], QUALIFIER, VALUES[1]);
}
table.put(p);
}
// Insert second half (reverse families)
for (byte[] ROW : ROWS_ONE) {
Put p = new Put(ROW);
p.setDurability(Durability.SKIP_WAL);
for (byte[] QUALIFIER : QUALIFIERS_ONE) {
p.addColumn(FAMILIES[1], QUALIFIER, VALUES[0]);
}
table.put(p);
}
for (byte[] ROW : ROWS_TWO) {
Put p = new Put(ROW);
p.setDurability(Durability.SKIP_WAL);
for (byte[] QUALIFIER : QUALIFIERS_TWO) {
p.addColumn(FAMILIES[0], QUALIFIER, VALUES[1]);
}
table.put(p);
}
// Delete the second qualifier from all rows and families
for (byte[] ROW : ROWS_ONE) {
Delete d = new Delete(ROW);
d.addColumns(FAMILIES[0], QUALIFIERS_ONE[1]);
d.addColumns(FAMILIES[1], QUALIFIERS_ONE[1]);
table.delete(d);
}
for (byte[] ROW : ROWS_TWO) {
Delete d = new Delete(ROW);
d.addColumns(FAMILIES[0], QUALIFIERS_TWO[1]);
d.addColumns(FAMILIES[1], QUALIFIERS_TWO[1]);
table.delete(d);
}
colsPerRow -= 2;
// Delete the second rows from both groups, one column at a time
for (byte[] QUALIFIER : QUALIFIERS_ONE) {
Delete d = new Delete(ROWS_ONE[1]);
d.addColumns(FAMILIES[0], QUALIFIER);
d.addColumns(FAMILIES[1], QUALIFIER);
table.delete(d);
}
for (byte[] QUALIFIER : QUALIFIERS_TWO) {
Delete d = new Delete(ROWS_TWO[1]);
d.addColumns(FAMILIES[0], QUALIFIER);
d.addColumns(FAMILIES[1], QUALIFIER);
table.delete(d);
}
numRows -= 2;
table.close();
}
}
use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.
the class TestTableResource method setUpBeforeClass.
@BeforeClass
public static void setUpBeforeClass() throws Exception {
TEST_UTIL.startMiniCluster(3);
REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration());
client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort()));
context = JAXBContext.newInstance(TableModel.class, TableInfoModel.class, TableListModel.class, TableRegionModel.class);
Admin admin = TEST_UTIL.getAdmin();
if (admin.tableExists(TABLE)) {
return;
}
HTableDescriptor htd = new HTableDescriptor(TABLE);
htd.addFamily(new HColumnDescriptor(COLUMN_FAMILY));
admin.createTable(htd);
byte[] k = new byte[3];
byte[][] famAndQf = KeyValue.parseColumn(Bytes.toBytes(COLUMN));
List<Put> puts = new ArrayList<>();
for (byte b1 = 'a'; b1 < 'z'; b1++) {
for (byte b2 = 'a'; b2 < 'z'; b2++) {
for (byte b3 = 'a'; b3 < 'z'; b3++) {
k[0] = b1;
k[1] = b2;
k[2] = b3;
Put put = new Put(k);
put.setDurability(Durability.SKIP_WAL);
put.addColumn(famAndQf[0], famAndQf[1], k);
puts.add(put);
}
}
}
Connection connection = TEST_UTIL.getConnection();
Table table = connection.getTable(TABLE);
table.put(puts);
table.close();
// get the initial layout (should just be one region)
RegionLocator regionLocator = connection.getRegionLocator(TABLE);
List<HRegionLocation> m = regionLocator.getAllRegionLocations();
assertEquals(m.size(), 1);
// tell the master to split the table
admin.split(TABLE);
// give some time for the split to happen
TestEndToEndSplitTransaction.blockUntilRegionSplit(TEST_UTIL.getConfiguration(), 60000, m.get(0).getRegionInfo().getRegionName(), true);
long timeout = System.currentTimeMillis() + (15 * 1000);
while (System.currentTimeMillis() < timeout && m.size() != 2) {
try {
Thread.sleep(250);
} catch (InterruptedException e) {
LOG.warn(StringUtils.stringifyException(e));
}
// check again
m = regionLocator.getAllRegionLocations();
}
// should have two regions now
assertEquals(m.size(), 2);
regionMap = m;
LOG.info("regions: " + regionMap);
regionLocator.close();
}
use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.
the class PerformanceEvaluation method getTableDescriptor.
protected HTableDescriptor getTableDescriptor() {
if (TABLE_DESCRIPTOR == null) {
TABLE_DESCRIPTOR = new HTableDescriptor(tableName);
HColumnDescriptor family = new HColumnDescriptor(FAMILY_NAME);
family.setDataBlockEncoding(blockEncoding);
family.setCompressionType(compression);
if (inMemoryCF) {
family.setInMemory(true);
}
TABLE_DESCRIPTOR.addFamily(family);
}
return TABLE_DESCRIPTOR;
}
Aggregations