use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.
the class ProtobufUtil method convertToHTableDesc.
/**
* Converts a TableSchema to HTableDescriptor
* @param ts A pb TableSchema instance.
* @return An {@link HTableDescriptor} made from the passed in pb <code>ts</code>.
*/
public static HTableDescriptor convertToHTableDesc(final TableSchema ts) {
List<ColumnFamilySchema> list = ts.getColumnFamiliesList();
HColumnDescriptor[] hcds = new HColumnDescriptor[list.size()];
int index = 0;
for (ColumnFamilySchema cfs : list) {
hcds[index++] = ProtobufUtil.convertToHColumnDesc(cfs);
}
HTableDescriptor htd = new HTableDescriptor(ProtobufUtil.toTableName(ts.getTableName()));
for (HColumnDescriptor hcd : hcds) {
htd.addFamily(hcd);
}
for (BytesBytesPair a : ts.getAttributesList()) {
htd.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray());
}
for (NameStringPair a : ts.getConfigurationList()) {
htd.setConfiguration(a.getName(), a.getValue());
}
return htd;
}
use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.
the class IntegrationTestBulkLoad method installSlowingCoproc.
/**
* Modify table {@code getTableName()} to carry {@link SlowMeCoproScanOperations}.
*/
private void installSlowingCoproc() throws IOException, InterruptedException {
int replicaCount = conf.getInt(NUM_REPLICA_COUNT_KEY, NUM_REPLICA_COUNT_DEFAULT);
if (replicaCount == NUM_REPLICA_COUNT_DEFAULT)
return;
TableName t = getTablename();
Admin admin = util.getAdmin();
HTableDescriptor desc = admin.getTableDescriptor(t);
desc.addCoprocessor(SlowMeCoproScanOperations.class.getName());
HBaseTestingUtility.modifyTableSync(admin, desc);
}
use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.
the class IntegrationTestMTTR method setupTables.
private static void setupTables() throws IOException {
// Get the table name.
tableName = TableName.valueOf(util.getConfiguration().get("hbase.IntegrationTestMTTR.tableName", "IntegrationTestMTTR"));
loadTableName = TableName.valueOf(util.getConfiguration().get("hbase.IntegrationTestMTTR.loadTableName", "IntegrationTestMTTRLoadTestTool"));
if (util.getAdmin().tableExists(tableName)) {
util.deleteTable(tableName);
}
if (util.getAdmin().tableExists(loadTableName)) {
util.deleteTable(loadTableName);
}
// Create the table. If this fails then fail everything.
HTableDescriptor tableDescriptor = new HTableDescriptor(tableName);
// Make the max file size huge so that splits don't happen during the test.
tableDescriptor.setMaxFileSize(Long.MAX_VALUE);
HColumnDescriptor descriptor = new HColumnDescriptor(FAMILY);
descriptor.setMaxVersions(1);
tableDescriptor.addFamily(descriptor);
util.getAdmin().createTable(tableDescriptor);
// Setup the table for LoadTestTool
int ret = loadTool.run(new String[] { "-tn", loadTableName.getNameAsString(), "-init_only" });
assertEquals("Failed to initialize LoadTestTool", 0, ret);
}
use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.
the class DecreaseMaxHFileSizeAction method perform.
@Override
public void perform() throws Exception {
HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility();
Admin admin = util.getAdmin();
HTableDescriptor htd = admin.getTableDescriptor(tableName);
// Try and get the current value.
long currentValue = htd.getMaxFileSize();
// That's ok. We're trying to cause chaos.
if (currentValue <= 0) {
currentValue = context.getHBaseCluster().getConf().getLong(HConstants.HREGION_MAX_FILESIZE, HConstants.DEFAULT_MAX_FILE_SIZE);
}
// Decrease by 10% at a time.
long newValue = (long) (currentValue * 0.9);
// We don't want to go too far below 1gb.
// So go to about 1gb +/- 512 on each side.
newValue = Math.max(minFileSize, newValue) - (512 - random.nextInt(1024));
// Change the table descriptor.
htd.setMaxFileSize(newValue);
// Don't try the modify if we're stopping
if (context.isStopping()) {
return;
}
// modify the table.
admin.modifyTable(tableName, htd);
// Sleep some time.
if (sleepTime > 0) {
Thread.sleep(sleepTime);
}
}
use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.
the class RemoveColumnAction method perform.
@Override
public void perform() throws Exception {
HTableDescriptor tableDescriptor = admin.getTableDescriptor(tableName);
HColumnDescriptor[] columnDescriptors = tableDescriptor.getColumnFamilies();
if (columnDescriptors.length <= (protectedColumns == null ? 1 : protectedColumns.size())) {
return;
}
int index = random.nextInt(columnDescriptors.length);
while (protectedColumns != null && protectedColumns.contains(columnDescriptors[index].getNameAsString())) {
index = random.nextInt(columnDescriptors.length);
}
byte[] colDescName = columnDescriptors[index].getName();
LOG.debug("Performing action: Removing " + Bytes.toString(colDescName) + " from " + tableName.getNameAsString());
tableDescriptor.removeFamily(colDescName);
// Don't try the modify if we're stopping
if (context.isStopping()) {
return;
}
admin.modifyTable(tableName, tableDescriptor);
}
Aggregations