use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.
the class TestFIFOCompactionPolicy method testSanityCheckMinVersion.
@Test
public void testSanityCheckMinVersion() throws Exception {
Configuration conf = TEST_UTIL.getConfiguration();
conf.setInt(HStore.BLOCKING_STOREFILES_KEY, 10000);
TEST_UTIL.startMiniCluster(1);
Admin admin = TEST_UTIL.getAdmin();
TableName tableName = TableName.valueOf(getClass().getSimpleName() + "-MinVersion");
if (admin.tableExists(tableName)) {
admin.disableTable(tableName);
admin.deleteTable(tableName);
}
HTableDescriptor desc = new HTableDescriptor(tableName);
desc.setConfiguration(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY, FIFOCompactionPolicy.class.getName());
desc.setConfiguration(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, DisabledRegionSplitPolicy.class.getName());
HColumnDescriptor colDesc = new HColumnDescriptor(family);
// 1 sec
colDesc.setTimeToLive(1);
colDesc.setMinVersions(1);
desc.addFamily(colDesc);
try {
admin.createTable(desc);
Assert.fail();
} catch (Exception e) {
} finally {
TEST_UTIL.shutdownMiniCluster();
}
}
use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.
the class TestFIFOCompactionPolicy method prepareData.
private Store prepareData() throws IOException {
Admin admin = TEST_UTIL.getAdmin();
if (admin.tableExists(tableName)) {
admin.disableTable(tableName);
admin.deleteTable(tableName);
}
HTableDescriptor desc = new HTableDescriptor(tableName);
desc.setConfiguration(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY, FIFOCompactionPolicy.class.getName());
desc.setConfiguration(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, DisabledRegionSplitPolicy.class.getName());
HColumnDescriptor colDesc = new HColumnDescriptor(family);
// 1 sec
colDesc.setTimeToLive(1);
desc.addFamily(colDesc);
admin.createTable(desc);
Table table = TEST_UTIL.getConnection().getTable(tableName);
Random rand = new Random();
TimeOffsetEnvironmentEdge edge = (TimeOffsetEnvironmentEdge) EnvironmentEdgeManager.getDelegate();
for (int i = 0; i < 10; i++) {
for (int j = 0; j < 10; j++) {
byte[] value = new byte[128 * 1024];
rand.nextBytes(value);
table.put(new Put(Bytes.toBytes(i * 10 + j)).addColumn(family, qualifier, value));
}
admin.flush(tableName);
edge.increment(1001);
}
return getStoreWithName(tableName);
}
use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.
the class TestStore method testDeleteExpiredStoreFiles.
/*
* @param minVersions the MIN_VERSIONS for the column family
*/
public void testDeleteExpiredStoreFiles(int minVersions) throws Exception {
int storeFileNum = 4;
int ttl = 4;
IncrementingEnvironmentEdge edge = new IncrementingEnvironmentEdge();
EnvironmentEdgeManagerTestHelper.injectEdge(edge);
Configuration conf = HBaseConfiguration.create();
// Enable the expired store file deletion
conf.setBoolean("hbase.store.delete.expired.storefile", true);
// Set the compaction threshold higher to avoid normal compactions.
conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, 5);
HColumnDescriptor hcd = new HColumnDescriptor(family);
hcd.setMinVersions(minVersions);
hcd.setTimeToLive(ttl);
init(name.getMethodName() + "-" + minVersions, conf, hcd);
long storeTtl = this.store.getScanInfo().getTtl();
long sleepTime = storeTtl / storeFileNum;
long timeStamp;
// store files will be (this.store.ttl / storeFileNum)
for (int i = 1; i <= storeFileNum; i++) {
LOG.info("Adding some data for the store file #" + i);
timeStamp = EnvironmentEdgeManager.currentTime();
this.store.add(new KeyValue(row, family, qf1, timeStamp, (byte[]) null), null);
this.store.add(new KeyValue(row, family, qf2, timeStamp, (byte[]) null), null);
this.store.add(new KeyValue(row, family, qf3, timeStamp, (byte[]) null), null);
flush(i);
edge.incrementTime(sleepTime);
}
// Verify the total number of store files
Assert.assertEquals(storeFileNum, this.store.getStorefiles().size());
// There will be no compaction due to threshold above. Last file will not be replaced.
for (int i = 1; i <= storeFileNum - 1; i++) {
// verify the expired store file.
assertNull(this.store.requestCompaction());
Collection<StoreFile> sfs = this.store.getStorefiles();
// Ensure i files are gone.
if (minVersions == 0) {
assertEquals(storeFileNum - i, sfs.size());
// Ensure only non-expired files remain.
for (StoreFile sf : sfs) {
assertTrue(sf.getReader().getMaxTimestamp() >= (edge.currentTime() - storeTtl));
}
} else {
assertEquals(storeFileNum, sfs.size());
}
// Let the next store file expired.
edge.incrementTime(sleepTime);
}
assertNull(this.store.requestCompaction());
Collection<StoreFile> sfs = this.store.getStorefiles();
// Assert the last expired file is not removed.
if (minVersions == 0) {
assertEquals(1, sfs.size());
}
long ts = sfs.iterator().next().getReader().getMaxTimestamp();
assertTrue(ts < (edge.currentTime() - storeTtl));
for (StoreFile sf : sfs) {
sf.closeReader(true);
}
}
use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.
the class TestStoreFile method testEmptyStoreFileRestrictKeyRanges.
@Test
public void testEmptyStoreFileRestrictKeyRanges() throws Exception {
StoreFileReader reader = mock(StoreFileReader.class);
Store store = mock(Store.class);
HColumnDescriptor hcd = mock(HColumnDescriptor.class);
byte[] cf = Bytes.toBytes("ty");
when(hcd.getName()).thenReturn(cf);
when(store.getFamily()).thenReturn(hcd);
StoreFileScanner scanner = new StoreFileScanner(reader, mock(HFileScanner.class), false, false, 0, 0, true);
Scan scan = new Scan();
scan.setColumnFamilyTimeRange(cf, 0, 1);
assertFalse(scanner.shouldUseScanner(scan, store, 0));
}
use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.
the class MobSnapshotTestingUtils method createMobTable.
private static void createMobTable(final HBaseTestingUtility util, final TableName tableName, final byte[][] splitKeys, int regionReplication, final byte[]... families) throws IOException, InterruptedException {
HTableDescriptor htd = new HTableDescriptor(tableName);
htd.setRegionReplication(regionReplication);
for (byte[] family : families) {
HColumnDescriptor hcd = new HColumnDescriptor(family);
hcd.setMobEnabled(true);
hcd.setMobThreshold(0L);
htd.addFamily(hcd);
}
util.getAdmin().createTable(htd, splitKeys);
SnapshotTestingUtils.waitForTableToBeOnline(util, tableName);
assertEquals((splitKeys.length + 1) * regionReplication, util.getAdmin().getTableRegions(tableName).size());
}
Aggregations