use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.
the class TestFilterFromRegionSide method setUpBeforeClass.
@BeforeClass
public static void setUpBeforeClass() throws Exception {
HTableDescriptor htd = new HTableDescriptor(TABLE_NAME);
for (byte[] family : FAMILIES) {
HColumnDescriptor hcd = new HColumnDescriptor(family);
htd.addFamily(hcd);
}
HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
REGION = HBaseTestingUtility.createRegionAndWAL(info, TEST_UTIL.getDataTestDir(), TEST_UTIL.getConfiguration(), htd);
for (Put put : createPuts(ROWS, FAMILIES, QUALIFIERS, VALUE)) {
REGION.put(put);
}
}
use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.
the class TestRegionObserverInterface method testCompactionOverride.
/**
* Tests overriding compaction handling via coprocessor hooks
* @throws Exception
*/
@Test(timeout = 300000)
public void testCompactionOverride() throws Exception {
final TableName compactTable = TableName.valueOf(name.getMethodName());
Admin admin = util.getAdmin();
if (admin.tableExists(compactTable)) {
admin.disableTable(compactTable);
admin.deleteTable(compactTable);
}
HTableDescriptor htd = new HTableDescriptor(compactTable);
htd.addFamily(new HColumnDescriptor(A));
htd.addCoprocessor(EvenOnlyCompactor.class.getName());
admin.createTable(htd);
Table table = util.getConnection().getTable(compactTable);
for (long i = 1; i <= 10; i++) {
byte[] iBytes = Bytes.toBytes(i);
Put put = new Put(iBytes);
put.setDurability(Durability.SKIP_WAL);
put.addColumn(A, A, iBytes);
table.put(put);
}
HRegion firstRegion = cluster.getRegions(compactTable).get(0);
Coprocessor cp = firstRegion.getCoprocessorHost().findCoprocessor(EvenOnlyCompactor.class.getName());
assertNotNull("EvenOnlyCompactor coprocessor should be loaded", cp);
EvenOnlyCompactor compactor = (EvenOnlyCompactor) cp;
// force a compaction
long ts = System.currentTimeMillis();
admin.flush(compactTable);
// wait for flush
for (int i = 0; i < 10; i++) {
if (compactor.lastFlush >= ts) {
break;
}
Thread.sleep(1000);
}
assertTrue("Flush didn't complete", compactor.lastFlush >= ts);
LOG.debug("Flush complete");
ts = compactor.lastFlush;
admin.majorCompact(compactTable);
// wait for compaction
for (int i = 0; i < 30; i++) {
if (compactor.lastCompaction >= ts) {
break;
}
Thread.sleep(1000);
}
LOG.debug("Last compaction was at " + compactor.lastCompaction);
assertTrue("Compaction didn't complete", compactor.lastCompaction >= ts);
// only even rows should remain
ResultScanner scanner = table.getScanner(new Scan());
try {
for (long i = 2; i <= 10; i += 2) {
Result r = scanner.next();
assertNotNull(r);
assertFalse(r.isEmpty());
byte[] iBytes = Bytes.toBytes(i);
assertArrayEquals("Row should be " + i, r.getRow(), iBytes);
assertArrayEquals("Value should be " + i, r.getValue(A, A), iBytes);
}
} finally {
scanner.close();
}
table.close();
}
use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.
the class TestFilterWrapper method createTable.
private static void createTable() {
assertNotNull("HBaseAdmin is not initialized successfully.", admin);
if (admin != null) {
HTableDescriptor desc = new HTableDescriptor(name);
HColumnDescriptor coldef = new HColumnDescriptor(Bytes.toBytes("f1"));
desc.addFamily(coldef);
try {
admin.createTable(desc);
assertTrue("Fail to create the table", admin.tableExists(name));
} catch (IOException e) {
assertNull("Exception found while creating table", e);
}
}
}
use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.
the class TestHFileOutputFormat2 method testColumnFamilySettings.
/**
* Test that {@link HFileOutputFormat2} RecordWriter uses compression and
* bloom filter settings from the column family descriptor
*/
@Ignore("Goes zombie too frequently; needs work. See HBASE-14563")
@Test
public void testColumnFamilySettings() throws Exception {
Configuration conf = new Configuration(this.util.getConfiguration());
RecordWriter<ImmutableBytesWritable, Cell> writer = null;
TaskAttemptContext context = null;
Path dir = util.getDataTestDir("testColumnFamilySettings");
// Setup table descriptor
Table table = Mockito.mock(Table.class);
RegionLocator regionLocator = Mockito.mock(RegionLocator.class);
HTableDescriptor htd = new HTableDescriptor(TABLE_NAME);
Mockito.doReturn(htd).when(table).getTableDescriptor();
for (HColumnDescriptor hcd : HBaseTestingUtility.generateColumnDescriptors()) {
htd.addFamily(hcd);
}
// set up the table to return some mock keys
setupMockStartKeys(regionLocator);
try {
// partial map red setup to get an operational writer for testing
// We turn off the sequence file compression, because DefaultCodec
// pollutes the GZip codec pool with an incompatible compressor.
conf.set("io.seqfile.compression.type", "NONE");
conf.set("hbase.fs.tmp.dir", dir.toString());
// turn locality off to eliminate getRegionLocation fail-and-retry time when writing kvs
conf.setBoolean(HFileOutputFormat2.LOCALITY_SENSITIVE_CONF_KEY, false);
Job job = new Job(conf, "testLocalMRIncrementalLoad");
job.setWorkingDirectory(util.getDataTestDirOnTestFS("testColumnFamilySettings"));
setupRandomGeneratorMapper(job, false);
HFileOutputFormat2.configureIncrementalLoad(job, table.getTableDescriptor(), regionLocator);
FileOutputFormat.setOutputPath(job, dir);
context = createTestTaskAttemptContext(job);
HFileOutputFormat2 hof = new HFileOutputFormat2();
writer = hof.getRecordWriter(context);
// write out random rows
writeRandomKeyValues(writer, context, htd.getFamiliesKeys(), ROWSPERSPLIT);
writer.close(context);
// Make sure that a directory was created for every CF
FileSystem fs = dir.getFileSystem(conf);
// commit so that the filesystem has one directory per column family
hof.getOutputCommitter(context).commitTask(context);
hof.getOutputCommitter(context).commitJob(context);
FileStatus[] families = FSUtils.listStatus(fs, dir, new FSUtils.FamilyDirFilter(fs));
assertEquals(htd.getFamilies().size(), families.length);
for (FileStatus f : families) {
String familyStr = f.getPath().getName();
HColumnDescriptor hcd = htd.getFamily(Bytes.toBytes(familyStr));
// verify that the compression on this file matches the configured
// compression
Path dataFilePath = fs.listStatus(f.getPath())[0].getPath();
Reader reader = HFile.createReader(fs, dataFilePath, new CacheConfig(conf), conf);
Map<byte[], byte[]> fileInfo = reader.loadFileInfo();
byte[] bloomFilter = fileInfo.get(StoreFile.BLOOM_FILTER_TYPE_KEY);
if (bloomFilter == null)
bloomFilter = Bytes.toBytes("NONE");
assertEquals("Incorrect bloom filter used for column family " + familyStr + "(reader: " + reader + ")", hcd.getBloomFilterType(), BloomType.valueOf(Bytes.toString(bloomFilter)));
assertEquals("Incorrect compression used for column family " + familyStr + "(reader: " + reader + ")", hcd.getCompressionType(), reader.getFileContext().getCompression());
}
} finally {
dir.getFileSystem(conf).delete(dir, true);
}
}
use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.
the class TestImportExport method testWithDeletes.
@Test
public void testWithDeletes() throws Exception {
HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(name.getMethodName()));
desc.addFamily(new HColumnDescriptor(FAMILYA).setMaxVersions(5).setKeepDeletedCells(KeepDeletedCells.TRUE));
UTIL.getAdmin().createTable(desc);
try (Table t = UTIL.getConnection().getTable(desc.getTableName())) {
Put p = new Put(ROW1);
p.addColumn(FAMILYA, QUAL, now, QUAL);
p.addColumn(FAMILYA, QUAL, now + 1, QUAL);
p.addColumn(FAMILYA, QUAL, now + 2, QUAL);
p.addColumn(FAMILYA, QUAL, now + 3, QUAL);
p.addColumn(FAMILYA, QUAL, now + 4, QUAL);
t.put(p);
Delete d = new Delete(ROW1, now + 3);
t.delete(d);
d = new Delete(ROW1);
d.addColumns(FAMILYA, QUAL, now + 2);
t.delete(d);
}
String[] args = new String[] { "-D" + Export.RAW_SCAN + "=true", name.getMethodName(), FQ_OUTPUT_DIR, // max number of key versions per key to export
"1000" };
assertTrue(runExport(args));
final String IMPORT_TABLE = name.getMethodName() + "import";
desc = new HTableDescriptor(TableName.valueOf(IMPORT_TABLE));
desc.addFamily(new HColumnDescriptor(FAMILYA).setMaxVersions(5).setKeepDeletedCells(KeepDeletedCells.TRUE));
UTIL.getAdmin().createTable(desc);
try (Table t = UTIL.getConnection().getTable(desc.getTableName())) {
args = new String[] { IMPORT_TABLE, FQ_OUTPUT_DIR };
assertTrue(runImport(args));
Scan s = new Scan();
s.setMaxVersions();
s.setRaw(true);
ResultScanner scanner = t.getScanner(s);
Result r = scanner.next();
Cell[] res = r.rawCells();
assertTrue(CellUtil.isDeleteFamily(res[0]));
assertEquals(now + 4, res[1].getTimestamp());
assertEquals(now + 3, res[2].getTimestamp());
assertTrue(CellUtil.isDelete(res[3]));
assertEquals(now + 2, res[4].getTimestamp());
assertEquals(now + 1, res[5].getTimestamp());
assertEquals(now, res[6].getTimestamp());
}
}
Aggregations