use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.
the class StoreFileTrackerValidationUtils method validatePreRestoreSnapshot.
/**
* Makes sure restoring a snapshot does not break the current SFT setup follows
* StoreUtils.createStoreConfiguration
* @param currentTableDesc Existing Table's TableDescriptor
* @param snapshotTableDesc Snapshot's TableDescriptor
* @param baseConf Current global configuration
* @throws RestoreSnapshotException if restore would break the current SFT setup
*/
public static void validatePreRestoreSnapshot(TableDescriptor currentTableDesc, TableDescriptor snapshotTableDesc, Configuration baseConf) throws RestoreSnapshotException {
for (ColumnFamilyDescriptor cfDesc : currentTableDesc.getColumnFamilies()) {
ColumnFamilyDescriptor snapCFDesc = snapshotTableDesc.getColumnFamily(cfDesc.getName());
// not matter
if (snapCFDesc != null) {
Configuration currentCompositeConf = StoreUtils.createStoreConfiguration(baseConf, currentTableDesc, cfDesc);
Configuration snapCompositeConf = StoreUtils.createStoreConfiguration(baseConf, snapshotTableDesc, snapCFDesc);
Class<? extends StoreFileTracker> currentSFT = StoreFileTrackerFactory.getTrackerClass(currentCompositeConf);
Class<? extends StoreFileTracker> snapSFT = StoreFileTrackerFactory.getTrackerClass(snapCompositeConf);
// restoration is not possible if there is an SFT mismatch
if (currentSFT != snapSFT) {
throw new RestoreSnapshotException("Restoring Snapshot is not possible because " + " the config for column family " + cfDesc.getNameAsString() + " has incompatible configuration. Current SFT: " + currentSFT + " SFT from snapshot: " + snapSFT);
}
}
}
}
use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.
the class StoreFileTrackerValidationUtils method checkForModifyTable.
/**
* Pre check when modifying a table.
* <p/>
* The basic idea is when you want to change the store file tracker implementation, you should use
* {@link Trackers#MIGRATION} first and then change to the destination store file tracker
* implementation.
* <p/>
* There are several rules:
* <ul>
* <li>For newly added family, you should not use {@link Trackers#MIGRATION}.</li>
* <li>For modifying a family:
* <ul>
* <li>If old tracker is {@link Trackers#MIGRATION}, then:
* <ul>
* <li>The new tracker is also {@link Trackers#MIGRATION}, then they must have the same src and
* dst tracker.</li>
* <li>The new tracker is not {@link Trackers#MIGRATION}, then the new tracker must be the dst
* tracker of the old tracker.</li>
* </ul>
* </li>
* <li>If the old tracker is not {@link Trackers#MIGRATION}, then:
* <ul>
* <li>If the new tracker is {@link Trackers#MIGRATION}, then the old tracker must be the src
* tracker of the new tracker.</li>
* <li>If the new tracker is not {@link Trackers#MIGRATION}, then the new tracker must be the same
* with old tracker.</li>
* </ul>
* </li>
* </ul>
* </li>
* </ul>
* @throws IOException when there are check errors, the upper layer should fail the
* {@code ModifyTableProcedure}.
*/
public static void checkForModifyTable(Configuration conf, TableDescriptor oldTable, TableDescriptor newTable) throws IOException {
for (ColumnFamilyDescriptor newFamily : newTable.getColumnFamilies()) {
ColumnFamilyDescriptor oldFamily = oldTable.getColumnFamily(newFamily.getName());
if (oldFamily == null) {
checkForNewFamily(conf, newTable, newFamily);
continue;
}
Configuration oldConf = StoreUtils.createStoreConfiguration(conf, oldTable, oldFamily);
Configuration newConf = StoreUtils.createStoreConfiguration(conf, newTable, newFamily);
Class<? extends StoreFileTracker> oldTracker = StoreFileTrackerFactory.getTrackerClass(oldConf);
Class<? extends StoreFileTracker> newTracker = StoreFileTrackerFactory.getTrackerClass(newConf);
if (MigrationStoreFileTracker.class.isAssignableFrom(oldTracker)) {
Class<? extends StoreFileTracker> oldSrcTracker = MigrationStoreFileTracker.getSrcTrackerClass(oldConf);
Class<? extends StoreFileTracker> oldDstTracker = MigrationStoreFileTracker.getDstTrackerClass(oldConf);
if (oldTracker.equals(newTracker)) {
// confirm that we have the same src tracker and dst tracker
Class<? extends StoreFileTracker> newSrcTracker = MigrationStoreFileTracker.getSrcTrackerClass(newConf);
if (!oldSrcTracker.equals(newSrcTracker)) {
throw new DoNotRetryIOException("The src tracker has been changed from " + StoreFileTrackerFactory.getStoreFileTrackerName(oldSrcTracker) + " to " + StoreFileTrackerFactory.getStoreFileTrackerName(newSrcTracker) + " for family " + newFamily.getNameAsString() + " of table " + newTable.getTableName());
}
Class<? extends StoreFileTracker> newDstTracker = MigrationStoreFileTracker.getDstTrackerClass(newConf);
if (!oldDstTracker.equals(newDstTracker)) {
throw new DoNotRetryIOException("The dst tracker has been changed from " + StoreFileTrackerFactory.getStoreFileTrackerName(oldDstTracker) + " to " + StoreFileTrackerFactory.getStoreFileTrackerName(newDstTracker) + " for family " + newFamily.getNameAsString() + " of table " + newTable.getTableName());
}
} else {
// we can only change to the dst tracker
if (!newTracker.equals(oldDstTracker)) {
throw new DoNotRetryIOException("Should migrate tracker to " + StoreFileTrackerFactory.getStoreFileTrackerName(oldDstTracker) + " but got " + StoreFileTrackerFactory.getStoreFileTrackerName(newTracker) + " for family " + newFamily.getNameAsString() + " of table " + newTable.getTableName());
}
}
} else {
if (!oldTracker.equals(newTracker)) {
// tracker
if (!MigrationStoreFileTracker.class.isAssignableFrom(newTracker)) {
throw new DoNotRetryIOException("Should change to " + Trackers.MIGRATION + " first when migrating from " + StoreFileTrackerFactory.getStoreFileTrackerName(oldTracker) + " for family " + newFamily.getNameAsString() + " of table " + newTable.getTableName());
}
Class<? extends StoreFileTracker> newSrcTracker = MigrationStoreFileTracker.getSrcTrackerClass(newConf);
if (!oldTracker.equals(newSrcTracker)) {
throw new DoNotRetryIOException("Should use src tracker " + StoreFileTrackerFactory.getStoreFileTrackerName(oldTracker) + " first but got " + StoreFileTrackerFactory.getStoreFileTrackerName(newSrcTracker) + " when migrating from " + StoreFileTrackerFactory.getStoreFileTrackerName(oldTracker) + " for family " + newFamily.getNameAsString() + " of table " + newTable.getTableName());
}
Class<? extends StoreFileTracker> newDstTracker = MigrationStoreFileTracker.getDstTrackerClass(newConf);
// the src and dst tracker should not be the same
if (newSrcTracker.equals(newDstTracker)) {
throw new DoNotRetryIOException("The src tracker and dst tracker are both " + StoreFileTrackerFactory.getStoreFileTrackerName(newSrcTracker) + " for family " + newFamily.getNameAsString() + " of table " + newTable.getTableName());
}
}
}
}
}
use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.
the class TestForceCacheImportantBlocks method testCacheBlocks.
@Test
public void testCacheBlocks() throws IOException {
// Set index block size to be the same as normal block size.
TEST_UTIL.getConfiguration().setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, BLOCK_SIZE);
BlockCache blockCache = BlockCacheFactory.createBlockCache(TEST_UTIL.getConfiguration());
ColumnFamilyDescriptor cfd = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CF)).setMaxVersions(MAX_VERSIONS).setCompressionType(COMPRESSION_ALGORITHM).setBloomFilterType(BLOOM_TYPE).setBlocksize(BLOCK_SIZE).setBlockCacheEnabled(cfCacheEnabled).build();
HRegion region = TEST_UTIL.createTestRegion(TABLE, cfd, blockCache);
CacheStats stats = blockCache.getStats();
writeTestData(region);
assertEquals(0, stats.getHitCount());
assertEquals(0, HFile.DATABLOCK_READ_COUNT.sum());
// Do a single get, take count of caches. If we are NOT caching DATA blocks, the miss
// count should go up. Otherwise, all should be cached and the miss count should not rise.
region.get(new Get(Bytes.toBytes("row" + 0)));
assertTrue(stats.getHitCount() > 0);
assertTrue(HFile.DATABLOCK_READ_COUNT.sum() > 0);
long missCount = stats.getMissCount();
region.get(new Get(Bytes.toBytes("row" + 0)));
if (this.cfCacheEnabled)
assertEquals(missCount, stats.getMissCount());
else
assertTrue(stats.getMissCount() > missCount);
}
use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.
the class TestPrefetch method testPrefetchSetInHCDWorks.
@Test
public void testPrefetchSetInHCDWorks() {
ColumnFamilyDescriptor columnFamilyDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("f")).setPrefetchBlocksOnOpen(true).build();
Configuration c = HBaseConfiguration.create();
assertFalse(c.getBoolean(CacheConfig.PREFETCH_BLOCKS_ON_OPEN_KEY, false));
CacheConfig cc = new CacheConfig(c, columnFamilyDescriptor, blockCache, ByteBuffAllocator.HEAP);
assertTrue(cc.shouldPrefetchOnOpen());
}
use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.
the class TestColumnSeeking method testDuplicateVersions.
@SuppressWarnings("unchecked")
@Test
public void testDuplicateVersions() throws IOException {
String family = "Family";
byte[] familyBytes = Bytes.toBytes("Family");
TableName table = TableName.valueOf(name.getMethodName());
ColumnFamilyDescriptor familyDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(familyBytes).setMaxVersions(1000).setMaxVersions(3).build();
TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(table).setColumnFamily(familyDescriptor).build();
RegionInfo info = RegionInfoBuilder.newBuilder(table).build();
// Set this so that the archiver writes to the temp dir as well.
HRegion region = TEST_UTIL.createLocalHRegion(info, tableDescriptor);
try {
List<String> rows = generateRandomWords(10, "row");
List<String> allColumns = generateRandomWords(10, "column");
List<String> values = generateRandomWords(100, "value");
long maxTimestamp = 2;
double selectPercent = 0.5;
int numberOfTests = 5;
double flushPercentage = 0.2;
double minorPercentage = 0.2;
double majorPercentage = 0.2;
double putPercentage = 0.2;
HashMap<String, KeyValue> allKVMap = new HashMap<>();
HashMap<String, KeyValue>[] kvMaps = new HashMap[numberOfTests];
ArrayList<String>[] columnLists = new ArrayList[numberOfTests];
for (int i = 0; i < numberOfTests; i++) {
kvMaps[i] = new HashMap<>();
columnLists[i] = new ArrayList<>();
for (String column : allColumns) {
if (Math.random() < selectPercent) {
columnLists[i].add(column);
}
}
}
for (String value : values) {
for (String row : rows) {
Put p = new Put(Bytes.toBytes(row));
p.setDurability(Durability.SKIP_WAL);
for (String column : allColumns) {
for (long timestamp = 1; timestamp <= maxTimestamp; timestamp++) {
KeyValue kv = KeyValueTestUtil.create(row, family, column, timestamp, value);
if (Math.random() < putPercentage) {
p.add(kv);
allKVMap.put(kv.getKeyString(), kv);
for (int i = 0; i < numberOfTests; i++) {
if (columnLists[i].contains(column)) {
kvMaps[i].put(kv.getKeyString(), kv);
}
}
}
}
}
region.put(p);
if (Math.random() < flushPercentage) {
LOG.info("Flushing... ");
region.flush(true);
}
if (Math.random() < minorPercentage) {
LOG.info("Minor compacting... ");
region.compact(false);
}
if (Math.random() < majorPercentage) {
LOG.info("Major compacting... ");
region.compact(true);
}
}
}
for (int i = 0; i < numberOfTests + 1; i++) {
Collection<KeyValue> kvSet;
Scan scan = new Scan();
scan.readAllVersions();
if (i < numberOfTests) {
// HBASE-7700
if (columnLists[i].isEmpty())
continue;
kvSet = kvMaps[i].values();
for (String column : columnLists[i]) {
scan.addColumn(familyBytes, Bytes.toBytes(column));
}
LOG.info("ExplicitColumns scanner");
LOG.info("Columns: " + columnLists[i].size() + " Keys: " + kvSet.size());
} else {
kvSet = allKVMap.values();
LOG.info("Wildcard scanner");
LOG.info("Columns: " + allColumns.size() + " Keys: " + kvSet.size());
}
InternalScanner scanner = region.getScanner(scan);
List<Cell> results = new ArrayList<>();
while (scanner.next(results)) ;
assertEquals(kvSet.size(), results.size());
assertTrue(KeyValueTestUtil.containsIgnoreMvccVersion(results, kvSet));
}
} finally {
HBaseTestingUtil.closeRegionAndWAL(region);
}
HBaseTestingUtil.closeRegionAndWAL(region);
}
Aggregations