use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.
the class TestMasterMetricsWrapper method testOfflineRegion.
/**
* tests online and offline region number
*/
@Test
public void testOfflineRegion() throws Exception {
HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
MetricsMasterWrapperImpl info = new MetricsMasterWrapperImpl(master);
TableName table = TableName.valueOf("testRegionNumber");
try {
RegionInfo hri;
byte[] FAMILY = Bytes.toBytes("FAMILY");
TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(table).setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).build();
TEST_UTIL.getAdmin().createTable(tableDescriptor, Bytes.toBytes("A"), Bytes.toBytes("Z"), 5);
// wait till the table is assigned
long timeoutTime = EnvironmentEdgeManager.currentTime() + 1000;
while (true) {
List<RegionInfo> regions = master.getAssignmentManager().getRegionStates().getRegionsOfTable(table);
if (regions.size() > 3) {
hri = regions.get(2);
break;
}
long now = EnvironmentEdgeManager.currentTime();
if (now > timeoutTime) {
fail("Could not find an online region");
}
Thread.sleep(10);
}
PairOfSameType<Integer> regionNumberPair = info.getRegionCounts();
assertEquals(5, regionNumberPair.getFirst().intValue());
assertEquals(0, regionNumberPair.getSecond().intValue());
TEST_UTIL.getAdmin().offline(hri.getRegionName());
timeoutTime = EnvironmentEdgeManager.currentTime() + 800;
RegionStates regionStates = master.getAssignmentManager().getRegionStates();
while (true) {
if (regionStates.getRegionByStateOfTable(table).get(RegionState.State.OFFLINE).contains(hri)) {
break;
}
long now = EnvironmentEdgeManager.currentTime();
if (now > timeoutTime) {
fail("Failed to offline the region in time");
break;
}
Thread.sleep(10);
}
regionNumberPair = info.getRegionCounts();
assertEquals(4, regionNumberPair.getFirst().intValue());
assertEquals(1, regionNumberPair.getSecond().intValue());
} finally {
TEST_UTIL.deleteTable(table);
}
}
use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.
the class TestMasterOperationsForRegionReplicas method testCreateTableWithSingleReplica.
@Test
public void testCreateTableWithSingleReplica() throws Exception {
final int numRegions = 3;
final int numReplica = 1;
final TableName tableName = TableName.valueOf(name.getMethodName());
try {
TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName).setRegionReplication(numReplica).setColumnFamily(ColumnFamilyDescriptorBuilder.of("family")).build();
ADMIN.createTable(desc, Bytes.toBytes("A"), Bytes.toBytes("Z"), numRegions);
TEST_UTIL.waitUntilAllRegionsAssigned(tableName);
TEST_UTIL.waitUntilNoRegionsInTransition();
validateNumberOfRowsInMeta(tableName, numRegions, ADMIN.getConnection());
List<RegionInfo> hris = MetaTableAccessor.getTableRegions(ADMIN.getConnection(), tableName);
assertEquals(numRegions * numReplica, hris.size());
} finally {
ADMIN.disableTable(tableName);
ADMIN.deleteTable(tableName);
}
}
use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.
the class TestEncryptionKeyRotation method testCFKeyRotation.
@Test
public void testCFKeyRotation() throws Exception {
// Create the table schema
TableDescriptorBuilder tableDescriptorBuilder = TableDescriptorBuilder.newBuilder(TableName.valueOf("default", name.getMethodName()));
ColumnFamilyDescriptorBuilder columnFamilyDescriptorBuilder = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("cf"));
String algorithm = conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES);
columnFamilyDescriptorBuilder.setEncryptionType(algorithm);
columnFamilyDescriptorBuilder.setEncryptionKey(EncryptionUtil.wrapKey(conf, "hbase", initialCFKey));
tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptorBuilder.build());
TableDescriptor tableDescriptor = tableDescriptorBuilder.build();
// Create the table and some on disk files
createTableAndFlush(tableDescriptor);
// Verify we have store file(s) with the initial key
final List<Path> initialPaths = findStorefilePaths(tableDescriptor.getTableName());
assertTrue(initialPaths.size() > 0);
for (Path path : initialPaths) {
assertTrue("Store file " + path + " has incorrect key", Bytes.equals(initialCFKey.getEncoded(), extractHFileKey(path)));
}
// Update the schema with a new encryption key
columnFamilyDescriptorBuilder.setEncryptionKey(EncryptionUtil.wrapKey(conf, conf.get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, User.getCurrent().getShortName()), secondCFKey));
TEST_UTIL.getAdmin().modifyColumnFamily(tableDescriptor.getTableName(), columnFamilyDescriptorBuilder.build());
// Need a predicate for online schema change
Thread.sleep(5000);
// And major compact
TEST_UTIL.getAdmin().majorCompact(tableDescriptor.getTableName());
// waiting for the major compaction to complete
TEST_UTIL.waitFor(30000, new Waiter.Predicate<IOException>() {
@Override
public boolean evaluate() throws IOException {
return TEST_UTIL.getAdmin().getCompactionState(tableDescriptor.getTableName()) == CompactionState.NONE;
}
});
List<Path> pathsAfterCompaction = findStorefilePaths(tableDescriptor.getTableName());
assertTrue(pathsAfterCompaction.size() > 0);
for (Path path : pathsAfterCompaction) {
assertTrue("Store file " + path + " has incorrect key", Bytes.equals(secondCFKey.getEncoded(), extractHFileKey(path)));
}
List<Path> compactedPaths = findCompactedStorefilePaths(tableDescriptor.getTableName());
assertTrue(compactedPaths.size() > 0);
for (Path path : compactedPaths) {
assertTrue("Store file " + path + " retains initial key", Bytes.equals(initialCFKey.getEncoded(), extractHFileKey(path)));
}
}
use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.
the class TestEndToEndSplitTransaction method testCanSplitJustAfterASplit.
/**
* This is the test for : HBASE-20940 This test will split the region and try to open an reference
* over store file. Once store file has any reference, it makes sure that region can't be split
*/
@Test
public void testCanSplitJustAfterASplit() throws Exception {
LOG.info("Starting testCanSplitJustAfterASplit");
byte[] fam = Bytes.toBytes("cf_split");
CompactSplit compactSplit = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0).getCompactSplitThread();
TableName tableName = TableName.valueOf("CanSplitTable");
Table source = TEST_UTIL.getConnection().getTable(tableName);
Admin admin = TEST_UTIL.getAdmin();
// set a large min compaction file count to avoid compaction just after splitting.
TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam)).build();
Map<String, StoreFileReader> scanner = Maps.newHashMap();
try {
admin.createTable(htd);
TEST_UTIL.loadTable(source, fam);
compactSplit.setCompactionsEnabled(false);
admin.split(tableName);
TEST_UTIL.waitFor(60000, () -> TEST_UTIL.getHBaseCluster().getRegions(tableName).size() == 2);
List<HRegion> regions = TEST_UTIL.getHBaseCluster().getRegions(tableName);
regions.stream().forEach(r -> r.getStores().get(0).getStorefiles().stream().filter(s -> s.isReference() && !scanner.containsKey(r.getRegionInfo().getEncodedName())).forEach(sf -> {
StoreFileReader reader = ((HStoreFile) sf).getReader();
reader.getStoreFileScanner(true, false, false, 0, 0, false);
scanner.put(r.getRegionInfo().getEncodedName(), reader);
LOG.info("Got reference to file = " + sf.getPath() + ",for region = " + r.getRegionInfo().getEncodedName());
}));
assertTrue("Regions did not split properly", regions.size() > 1);
assertTrue("Could not get reference any of the store file", scanner.size() > 1);
compactSplit.setCompactionsEnabled(true);
for (HRegion region : regions) {
region.compact(true);
}
regions.stream().filter(region -> scanner.containsKey(region.getRegionInfo().getEncodedName())).forEach(r -> assertFalse("Contains an open file reference which can be split", r.getStores().get(0).canSplit()));
} finally {
scanner.values().forEach(s -> {
try {
s.close(true);
} catch (IOException ioe) {
LOG.error("Failed while closing store file", ioe);
}
});
scanner.clear();
Closeables.close(source, true);
if (!compactSplit.isCompactionsEnabled()) {
compactSplit.setCompactionsEnabled(true);
}
TEST_UTIL.deleteTableIfAny(tableName);
}
}
use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.
the class TestHRegion method testCellTTLs.
@Test
public void testCellTTLs() throws IOException {
IncrementingEnvironmentEdge edge = new IncrementingEnvironmentEdge();
EnvironmentEdgeManager.injectEdge(edge);
final byte[] row = Bytes.toBytes("testRow");
final byte[] q1 = Bytes.toBytes("q1");
final byte[] q2 = Bytes.toBytes("q2");
final byte[] q3 = Bytes.toBytes("q3");
final byte[] q4 = Bytes.toBytes("q4");
// 10 seconds
TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(fam1).setTimeToLive(10).build()).build();
Configuration conf = new Configuration(TEST_UTIL.getConfiguration());
conf.setInt(HFile.FORMAT_VERSION_KEY, HFile.MIN_FORMAT_VERSION_WITH_TAGS);
region = HBaseTestingUtil.createRegionAndWAL(RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()).build(), TEST_UTIL.getDataTestDir(), conf, tableDescriptor);
assertNotNull(region);
long now = EnvironmentEdgeManager.currentTime();
// Add a cell that will expire in 5 seconds via cell TTL
region.put(new Put(row).add(new KeyValue(row, fam1, q1, now, HConstants.EMPTY_BYTE_ARRAY, new ArrayBackedTag[] { // TTL tags specify ts in milliseconds
new ArrayBackedTag(TagType.TTL_TAG_TYPE, Bytes.toBytes(5000L)) })));
// Add a cell that will expire after 10 seconds via family setting
region.put(new Put(row).addColumn(fam1, q2, now, HConstants.EMPTY_BYTE_ARRAY));
// Add a cell that will expire in 15 seconds via cell TTL
region.put(new Put(row).add(new KeyValue(row, fam1, q3, now + 10000 - 1, HConstants.EMPTY_BYTE_ARRAY, new ArrayBackedTag[] { // TTL tags specify ts in milliseconds
new ArrayBackedTag(TagType.TTL_TAG_TYPE, Bytes.toBytes(5000L)) })));
// Add a cell that will expire in 20 seconds via family setting
region.put(new Put(row).addColumn(fam1, q4, now + 10000 - 1, HConstants.EMPTY_BYTE_ARRAY));
// Flush so we are sure store scanning gets this right
region.flush(true);
// A query at time T+0 should return all cells
Result r = region.get(new Get(row));
assertNotNull(r.getValue(fam1, q1));
assertNotNull(r.getValue(fam1, q2));
assertNotNull(r.getValue(fam1, q3));
assertNotNull(r.getValue(fam1, q4));
// Increment time to T+5 seconds
edge.incrementTime(5000);
r = region.get(new Get(row));
assertNull(r.getValue(fam1, q1));
assertNotNull(r.getValue(fam1, q2));
assertNotNull(r.getValue(fam1, q3));
assertNotNull(r.getValue(fam1, q4));
// Increment time to T+10 seconds
edge.incrementTime(5000);
r = region.get(new Get(row));
assertNull(r.getValue(fam1, q1));
assertNull(r.getValue(fam1, q2));
assertNotNull(r.getValue(fam1, q3));
assertNotNull(r.getValue(fam1, q4));
// Increment time to T+15 seconds
edge.incrementTime(5000);
r = region.get(new Get(row));
assertNull(r.getValue(fam1, q1));
assertNull(r.getValue(fam1, q2));
assertNull(r.getValue(fam1, q3));
assertNotNull(r.getValue(fam1, q4));
// Increment time to T+20 seconds
edge.incrementTime(10000);
r = region.get(new Get(row));
assertNull(r.getValue(fam1, q1));
assertNull(r.getValue(fam1, q2));
assertNull(r.getValue(fam1, q3));
assertNull(r.getValue(fam1, q4));
// Fun with disappearing increments
// Start at 1
region.put(new Put(row).addColumn(fam1, q1, Bytes.toBytes(1L)));
r = region.get(new Get(row));
byte[] val = r.getValue(fam1, q1);
assertNotNull(val);
assertEquals(1L, Bytes.toLong(val));
// Increment with a TTL of 5 seconds
Increment incr = new Increment(row).addColumn(fam1, q1, 1L);
incr.setTTL(5000);
// 2
region.increment(incr);
// New value should be 2
r = region.get(new Get(row));
val = r.getValue(fam1, q1);
assertNotNull(val);
assertEquals(2L, Bytes.toLong(val));
// Increment time to T+25 seconds
edge.incrementTime(5000);
// Value should be back to 1
r = region.get(new Get(row));
val = r.getValue(fam1, q1);
assertNotNull(val);
assertEquals(1L, Bytes.toLong(val));
// Increment time to T+30 seconds
edge.incrementTime(5000);
// Original value written at T+20 should be gone now via family TTL
r = region.get(new Get(row));
assertNull(r.getValue(fam1, q1));
}
Aggregations