use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.
the class TestHStoreFile method testEmptyStoreFileRestrictKeyRanges.
@Test
public void testEmptyStoreFileRestrictKeyRanges() throws Exception {
StoreFileReader reader = mock(StoreFileReader.class);
HStore store = mock(HStore.class);
byte[] cf = Bytes.toBytes("ty");
ColumnFamilyDescriptor cfd = ColumnFamilyDescriptorBuilder.of(cf);
when(store.getColumnFamilyDescriptor()).thenReturn(cfd);
try (StoreFileScanner scanner = new StoreFileScanner(reader, mock(HFileScanner.class), false, false, 0, 0, true)) {
Scan scan = new Scan();
scan.setColumnFamilyTimeRange(cf, 0, 1);
assertFalse(scanner.shouldUseScanner(scan, store, 0));
}
}
use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.
the class TestMiniClusterLoadSequential method prepareForLoadTest.
protected void prepareForLoadTest() throws IOException {
LOG.info("Starting load test: dataBlockEncoding=" + dataBlockEncoding + ", isMultiPut=" + isMultiPut);
numKeys = numKeys();
Admin admin = TEST_UTIL.getAdmin();
while (admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().size() < NUM_RS) {
LOG.info("Sleeping until " + NUM_RS + " RSs are online");
Threads.sleepWithoutInterrupt(1000);
}
admin.close();
TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(TABLE).build();
ColumnFamilyDescriptor familyDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(CF).setCompressionType(compression).setDataBlockEncoding(dataBlockEncoding).build();
createPreSplitLoadTestTable(tableDescriptor, familyDescriptor);
LoadTestDataGenerator dataGen = new MultiThreadedAction.DefaultDataGenerator(CF);
writerThreads = prepareWriterThreads(dataGen, conf, TABLE);
readerThreads = prepareReaderThreads(dataGen, conf, TABLE, 100);
}
use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.
the class TestMajorCompactorTTL method modifyTTL.
protected void modifyTTL(TableName tableName) throws IOException, InterruptedException {
// Set the TTL to 5 secs, so all the files just written above will get cleaned up on compact.
admin.disableTable(tableName);
utility.waitTableDisabled(tableName.getName());
TableDescriptor descriptor = admin.getDescriptor(tableName);
ColumnFamilyDescriptor colDesc = descriptor.getColumnFamily(FAMILY);
ColumnFamilyDescriptorBuilder cFDB = ColumnFamilyDescriptorBuilder.newBuilder(colDesc);
cFDB.setTimeToLive(5);
admin.modifyColumnFamily(tableName, cFDB.build());
admin.enableTable(tableName);
utility.waitTableEnabled(tableName);
}
use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.
the class TestHBaseFsckEncryption method setUp.
@Before
public void setUp() throws Exception {
conf = TEST_UTIL.getConfiguration();
conf.setInt("hfile.format.version", 3);
conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, KeyProviderForTesting.class.getName());
conf.set(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, "hbase");
// Create the test encryption key
SecureRandom rng = new SecureRandom();
byte[] keyBytes = new byte[AES.KEY_LENGTH];
rng.nextBytes(keyBytes);
String algorithm = conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES);
cfKey = new SecretKeySpec(keyBytes, algorithm);
// Start the minicluster
TEST_UTIL.startMiniCluster(3);
// Create the table
TableDescriptorBuilder tableDescriptorBuilder = TableDescriptorBuilder.newBuilder(TableName.valueOf("default", "TestHBaseFsckEncryption"));
ColumnFamilyDescriptor columnFamilyDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("cf")).setEncryptionType(algorithm).setEncryptionKey(EncryptionUtil.wrapKey(conf, conf.get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, User.getCurrent().getShortName()), cfKey)).build();
tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor);
tableDescriptor = tableDescriptorBuilder.build();
TEST_UTIL.getAdmin().createTable(tableDescriptor);
TEST_UTIL.waitTableAvailable(tableDescriptor.getTableName(), 5000);
}
use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.
the class TestZooKeeperTableArchiveClient method testArchivingOnSingleTable.
@Test
public void testArchivingOnSingleTable() throws Exception {
createArchiveDirectory();
FileSystem fs = UTIL.getTestFileSystem();
Path archiveDir = getArchiveDir();
Path tableDir = getTableDir(STRING_TABLE_NAME);
toCleanup.add(archiveDir);
toCleanup.add(tableDir);
Configuration conf = UTIL.getConfiguration();
// setup the delegate
Stoppable stop = new StoppableImplementation();
HFileCleaner cleaner = setupAndCreateCleaner(conf, fs, archiveDir, stop);
List<BaseHFileCleanerDelegate> cleaners = turnOnArchiving(STRING_TABLE_NAME, cleaner);
final LongTermArchivingHFileCleaner delegate = (LongTermArchivingHFileCleaner) cleaners.get(0);
// create the region
ColumnFamilyDescriptor hcd = ColumnFamilyDescriptorBuilder.of(TEST_FAM);
HRegion region = UTIL.createTestRegion(STRING_TABLE_NAME, hcd);
List<HRegion> regions = new ArrayList<>();
regions.add(region);
Mockito.doReturn(regions).when(rss).getRegions();
final CompactedHFilesDischarger compactionCleaner = new CompactedHFilesDischarger(100, stop, rss, false);
loadFlushAndCompact(region, TEST_FAM);
compactionCleaner.chore();
// get the current hfiles in the archive directory
List<Path> files = getAllFiles(fs, archiveDir);
if (files == null) {
CommonFSUtils.logFileSystemState(fs, UTIL.getDataTestDir(), LOG);
throw new RuntimeException("Didn't archive any files!");
}
CountDownLatch finished = setupCleanerWatching(delegate, cleaners, files.size());
runCleaner(cleaner, finished, stop);
// know the cleaner ran, so now check all the files again to make sure they are still there
List<Path> archivedFiles = getAllFiles(fs, archiveDir);
assertEquals("Archived files changed after running archive cleaner.", files, archivedFiles);
// but we still have the archive directory
assertTrue(fs.exists(HFileArchiveUtil.getArchivePath(UTIL.getConfiguration())));
}
Aggregations