use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.
the class HFileOutputFormat2 method configureIncrementalLoadMap.
public static void configureIncrementalLoadMap(Job job, TableDescriptor tableDescriptor) throws IOException {
Configuration conf = job.getConfiguration();
job.setOutputKeyClass(ImmutableBytesWritable.class);
job.setOutputValueClass(MapReduceExtendedCell.class);
job.setOutputFormatClass(HFileOutputFormat2.class);
ArrayList<TableDescriptor> singleTableDescriptor = new ArrayList<>(1);
singleTableDescriptor.add(tableDescriptor);
conf.set(OUTPUT_TABLE_NAME_CONF_KEY, tableDescriptor.getTableName().getNameAsString());
// Set compression algorithms based on column families
conf.set(COMPRESSION_FAMILIES_CONF_KEY, serializeColumnFamilyAttribute(compressionDetails, singleTableDescriptor));
conf.set(BLOCK_SIZE_FAMILIES_CONF_KEY, serializeColumnFamilyAttribute(blockSizeDetails, singleTableDescriptor));
conf.set(BLOOM_TYPE_FAMILIES_CONF_KEY, serializeColumnFamilyAttribute(bloomTypeDetails, singleTableDescriptor));
conf.set(BLOOM_PARAM_FAMILIES_CONF_KEY, serializeColumnFamilyAttribute(bloomParamDetails, singleTableDescriptor));
conf.set(DATABLOCK_ENCODING_FAMILIES_CONF_KEY, serializeColumnFamilyAttribute(dataBlockEncodingDetails, singleTableDescriptor));
TableMapReduceUtil.addDependencyJars(job);
TableMapReduceUtil.initCredentials(job);
LOG.info("Incremental table " + tableDescriptor.getTableName() + " output configured.");
}
use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.
the class IntegrationTestLoadAndVerify method testLoadAndVerify.
@Test
public void testLoadAndVerify() throws Exception {
TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(TableName.valueOf(TEST_NAME)).setColumnFamily(ColumnFamilyDescriptorBuilder.of(TEST_FAMILY)).build();
Admin admin = getTestingUtil(getConf()).getAdmin();
admin.createTable(tableDescriptor, Bytes.toBytes(0L), Bytes.toBytes(-1L), 40);
doLoad(getConf(), tableDescriptor);
doVerify(getConf(), tableDescriptor);
// Only disable and drop if we succeeded to verify - otherwise it's useful
// to leave it around for post-mortem
getTestingUtil(getConf()).deleteTable(tableDescriptor.getTableName());
}
use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.
the class IntegrationTestWithCellVisibilityLoadAndVerify method runTestFromCommandLine.
@Override
public int runTestFromCommandLine() throws Exception {
IntegrationTestingUtility.setUseDistributedCluster(getConf());
int numPresplits = getConf().getInt("loadmapper.numPresplits", 5);
// create HTableDescriptor for specified table
TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(getTablename()).setColumnFamily(ColumnFamilyDescriptorBuilder.of(TEST_FAMILY)).build();
try (Connection conn = ConnectionFactory.createConnection(getConf());
Admin admin = conn.getAdmin()) {
admin.createTable(tableDescriptor, Bytes.toBytes(0L), Bytes.toBytes(-1L), numPresplits);
}
doLoad(getConf(), tableDescriptor);
doVerify(getConf(), tableDescriptor);
getTestingUtil(getConf()).deleteTable(getTablename());
return 0;
}
use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.
the class ConstraintProcessor method start.
@Override
public void start(CoprocessorEnvironment environment) {
// make sure we are on a region server
if (!(environment instanceof RegionCoprocessorEnvironment)) {
throw new IllegalArgumentException("Constraints only act on regions - started in an environment that was not a region");
}
RegionCoprocessorEnvironment env = (RegionCoprocessorEnvironment) environment;
TableDescriptor desc = env.getRegion().getTableDescriptor();
// load all the constraints from the HTD
try {
this.constraints = Constraints.getConstraints(desc, classloader);
} catch (IOException e) {
throw new IllegalArgumentException(e);
}
if (LOG.isInfoEnabled()) {
LOG.info("Finished loading " + constraints.size() + " user Constraints on table: " + desc.getTableName());
}
}
use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.
the class EnableTableProcedure method executeFromState.
@Override
protected Flow executeFromState(final MasterProcedureEnv env, final EnableTableState state) throws InterruptedException {
LOG.trace("{} execute state={}", this, state);
try {
switch(state) {
case ENABLE_TABLE_PREPARE:
if (prepareEnable(env)) {
setNextState(EnableTableState.ENABLE_TABLE_PRE_OPERATION);
} else {
assert isFailed() : "enable should have an exception here";
return Flow.NO_MORE_STATE;
}
break;
case ENABLE_TABLE_PRE_OPERATION:
preEnable(env, state);
setNextState(EnableTableState.ENABLE_TABLE_SET_ENABLING_TABLE_STATE);
break;
case ENABLE_TABLE_SET_ENABLING_TABLE_STATE:
setTableStateToEnabling(env, tableName);
setNextState(EnableTableState.ENABLE_TABLE_MARK_REGIONS_ONLINE);
break;
case ENABLE_TABLE_MARK_REGIONS_ONLINE:
// Get the region replica count. If changed since disable, need to do
// more work assigning.
TableDescriptor tableDescriptor = env.getMasterServices().getTableDescriptors().get(tableName);
int configuredReplicaCount = tableDescriptor.getRegionReplication();
// Get regions for the table from memory
List<RegionInfo> regionsOfTable = env.getAssignmentManager().getRegionStates().getRegionsOfTableForEnabling(tableName);
// How many replicas do we currently have? Check regions returned from
// in-memory state.
int currentMaxReplica = getMaxReplicaId(regionsOfTable);
if (currentMaxReplica == configuredReplicaCount - 1) {
LOG.debug("No change in number of region replicas (configuredReplicaCount={});" + " assigning.", configuredReplicaCount);
} else if (currentMaxReplica > (configuredReplicaCount - 1)) {
// We have additional regions as the replica count has been decreased. Delete
// those regions because already the table is in the unassigned state
LOG.warn("The number of replicas {} is more than the region replica count {}" + ", usually this should not happen as we will delete them in ModifyTableProcedure", currentMaxReplica + 1, configuredReplicaCount);
List<RegionInfo> copyOfRegions = new ArrayList<RegionInfo>(regionsOfTable);
for (RegionInfo regionInfo : copyOfRegions) {
if (regionInfo.getReplicaId() > (configuredReplicaCount - 1)) {
// delete the region from the regionStates
env.getAssignmentManager().getRegionStates().deleteRegion(regionInfo);
// remove it from the list of regions of the table
LOG.info("Removed replica={} of {}", regionInfo.getRegionId(), regionInfo);
regionsOfTable.remove(regionInfo);
}
}
} else if (currentMaxReplica < configuredReplicaCount - 1) {
// the replicasFound is less than the regionReplication
LOG.info("Number of replicas has increased for {}. Assigning new region replicas." + "The previous replica count was {}. The current replica count is {}.", this.tableName, currentMaxReplica + 1, configuredReplicaCount);
regionsOfTable = RegionReplicaUtil.addReplicas(regionsOfTable, currentMaxReplica + 1, configuredReplicaCount);
}
// Assign all the table regions. (including region replicas if added).
// createAssignProcedure will try to retain old assignments if possible.
addChildProcedure(env.getAssignmentManager().createAssignProcedures(regionsOfTable));
setNextState(EnableTableState.ENABLE_TABLE_SET_ENABLED_TABLE_STATE);
break;
case ENABLE_TABLE_SET_ENABLED_TABLE_STATE:
setTableStateToEnabled(env, tableName);
setNextState(EnableTableState.ENABLE_TABLE_POST_OPERATION);
break;
case ENABLE_TABLE_POST_OPERATION:
postEnable(env, state);
return Flow.NO_MORE_STATE;
default:
throw new UnsupportedOperationException("unhandled state=" + state);
}
} catch (IOException e) {
if (isRollbackSupported(state)) {
setFailure("master-enable-table", e);
} else {
LOG.warn("Retriable error trying to enable table=" + tableName + " (in state=" + state + ")", e);
}
}
return Flow.HAS_MORE_STATE;
}
Aggregations