use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.
the class TestDefaultMemStore method testShouldFlushMeta.
@Test
public void testShouldFlushMeta() throws Exception {
// write an edit in the META and ensure the shouldFlush (that the periodic memstore
// flusher invokes) returns true after SYSTEM_CACHE_FLUSH_INTERVAL (even though
// the MEMSTORE_PERIODIC_FLUSH_INTERVAL is set to a higher value)
Configuration conf = new Configuration();
conf.setInt(HRegion.MEMSTORE_PERIODIC_FLUSH_INTERVAL, HRegion.SYSTEM_CACHE_FLUSH_INTERVAL * 10);
HBaseTestingUtil hbaseUtility = new HBaseTestingUtil(conf);
Path testDir = hbaseUtility.getDataTestDir();
EnvironmentEdgeForMemstoreTest edge = new EnvironmentEdgeForMemstoreTest();
EnvironmentEdgeManager.injectEdge(edge);
edge.setCurrentTimeMillis(1234);
WALFactory wFactory = new WALFactory(conf, "1234");
TableDescriptors tds = new FSTableDescriptors(conf);
FSTableDescriptors.tryUpdateMetaTableDescriptor(conf);
HRegion meta = HRegion.createHRegion(RegionInfoBuilder.FIRST_META_REGIONINFO, testDir, conf, tds.get(TableName.META_TABLE_NAME), wFactory.getWAL(RegionInfoBuilder.FIRST_META_REGIONINFO));
// parameterized tests add [#] suffix get rid of [ and ].
TableDescriptor desc = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName().replaceAll("[\\[\\]]", "_"))).setColumnFamily(ColumnFamilyDescriptorBuilder.of("foo")).build();
RegionInfo hri = RegionInfoBuilder.newBuilder(desc.getTableName()).setStartKey(Bytes.toBytes("row_0200")).setEndKey(Bytes.toBytes("row_0300")).build();
HRegion r = HRegion.createHRegion(hri, testDir, conf, desc, wFactory.getWAL(hri));
addRegionToMETA(meta, r);
edge.setCurrentTimeMillis(1234 + 100);
StringBuilder sb = new StringBuilder();
assertTrue(meta.shouldFlush(sb) == false);
edge.setCurrentTimeMillis(edge.currentTime() + HRegion.SYSTEM_CACHE_FLUSH_INTERVAL + 1);
assertTrue(meta.shouldFlush(sb) == true);
}
use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.
the class TestGetClosestAtOrBefore method testGetClosestRowBefore2.
/**
* For HBASE-694
*/
@Test
public void testGetClosestRowBefore2() throws IOException {
HRegion region = null;
byte[] c0 = HBaseTestingUtil.COLUMNS[0];
try {
TableName tn = TableName.valueOf(testName.getMethodName());
TableDescriptor htd = UTIL.createTableDescriptor(tn);
region = UTIL.createLocalHRegion(htd, null, null);
Put p = new Put(T10);
p.addColumn(c0, c0, T10);
region.put(p);
p = new Put(T30);
p.addColumn(c0, c0, T30);
region.put(p);
p = new Put(T40);
p.addColumn(c0, c0, T40);
region.put(p);
// try finding "035"
Result r = UTIL.getClosestRowBefore(region, T35, c0);
assertTrue(Bytes.equals(T30, r.getRow()));
region.flush(true);
// try finding "035"
r = UTIL.getClosestRowBefore(region, T35, c0);
assertTrue(Bytes.equals(T30, r.getRow()));
p = new Put(T20);
p.addColumn(c0, c0, T20);
region.put(p);
// try finding "035"
r = UTIL.getClosestRowBefore(region, T35, c0);
assertTrue(Bytes.equals(T30, r.getRow()));
region.flush(true);
// try finding "035"
r = UTIL.getClosestRowBefore(region, T35, c0);
assertTrue(Bytes.equals(T30, r.getRow()));
} finally {
if (region != null) {
try {
WAL wal = region.getWAL();
region.close();
wal.close();
} catch (Exception e) {
e.printStackTrace();
}
}
}
}
use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.
the class TestGetClosestAtOrBefore method testUsingMetaAndBinary.
@Test
public void testUsingMetaAndBinary() throws IOException {
Path rootdir = UTIL.getDataTestDirOnTestFS();
// Up flush size else we bind up when we use default catalog flush of 16k.
TableDescriptors tds = new FSTableDescriptors(UTIL.getConfiguration());
FSTableDescriptors.tryUpdateMetaTableDescriptor(UTIL.getConfiguration());
TableDescriptor td = tds.get(TableName.META_TABLE_NAME);
td = TableDescriptorBuilder.newBuilder(td).setMemStoreFlushSize(64 * 1024 * 1024).build();
HRegion mr = HBaseTestingUtil.createRegionAndWAL(RegionInfoBuilder.FIRST_META_REGIONINFO, rootdir, conf, td);
try {
// Write rows for three tables 'A', 'B', and 'C'.
for (char c = 'A'; c < 'D'; c++) {
TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf("" + c)).build();
final int last = 128;
final int interval = 2;
for (int i = 0; i <= last; i += interval) {
RegionInfo hri = RegionInfoBuilder.newBuilder(htd.getTableName()).setStartKey(i == 0 ? HConstants.EMPTY_BYTE_ARRAY : Bytes.toBytes((byte) i)).setEndKey(i == last ? HConstants.EMPTY_BYTE_ARRAY : Bytes.toBytes((byte) i + interval)).build();
Put put = MetaTableAccessor.makePutFromRegionInfo(hri, EnvironmentEdgeManager.currentTime());
put.setDurability(Durability.SKIP_WAL);
LOG.info("Put {}", put);
mr.put(put);
}
}
InternalScanner s = mr.getScanner(new Scan());
try {
List<Cell> keys = new ArrayList<>();
while (s.next(keys)) {
LOG.info("Scan {}", keys);
keys.clear();
}
} finally {
s.close();
}
findRow(mr, 'C', 44, 44);
findRow(mr, 'C', 45, 44);
findRow(mr, 'C', 46, 46);
findRow(mr, 'C', 43, 42);
mr.flush(true);
findRow(mr, 'C', 44, 44);
findRow(mr, 'C', 45, 44);
findRow(mr, 'C', 46, 46);
findRow(mr, 'C', 43, 42);
// Now delete 'C' and make sure I don't get entries from 'B'.
byte[] firstRowInC = RegionInfo.createRegionName(TableName.valueOf("" + 'C'), HConstants.EMPTY_BYTE_ARRAY, HConstants.ZEROES, false);
Scan scan = new Scan().withStartRow(firstRowInC);
s = mr.getScanner(scan);
try {
List<Cell> keys = new ArrayList<>();
while (s.next(keys)) {
LOG.info("Delete {}", keys);
mr.delete(new Delete(CellUtil.cloneRow(keys.get(0))));
keys.clear();
}
} finally {
s.close();
}
// Assert we get null back (pass -1).
findRow(mr, 'C', 44, -1);
findRow(mr, 'C', 45, -1);
findRow(mr, 'C', 46, -1);
findRow(mr, 'C', 43, -1);
mr.flush(true);
findRow(mr, 'C', 44, -1);
findRow(mr, 'C', 45, -1);
findRow(mr, 'C', 46, -1);
findRow(mr, 'C', 43, -1);
} finally {
HBaseTestingUtil.closeRegionAndWAL(mr);
}
}
use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.
the class TestGetClosestAtOrBefore method testGetClosestRowBefore3.
/**
* Test file of multiple deletes and with deletes as final key.
* @see <a href="https://issues.apache.org/jira/browse/HBASE-751">HBASE-751</a>
*/
@Test
public void testGetClosestRowBefore3() throws IOException {
HRegion region = null;
byte[] c0 = HBaseTestingUtil.COLUMNS[0];
byte[] c1 = HBaseTestingUtil.COLUMNS[1];
try {
TableName tn = TableName.valueOf(testName.getMethodName());
TableDescriptor htd = UTIL.createTableDescriptor(tn);
region = UTIL.createLocalHRegion(htd, null, null);
Put p = new Put(T00);
p.addColumn(c0, c0, T00);
region.put(p);
p = new Put(T10);
p.addColumn(c0, c0, T10);
region.put(p);
p = new Put(T20);
p.addColumn(c0, c0, T20);
region.put(p);
Result r = UTIL.getClosestRowBefore(region, T20, c0);
assertTrue(Bytes.equals(T20, r.getRow()));
Delete d = new Delete(T20);
d.addColumn(c0, c0);
region.delete(d);
r = UTIL.getClosestRowBefore(region, T20, c0);
assertTrue(Bytes.equals(T10, r.getRow()));
p = new Put(T30);
p.addColumn(c0, c0, T30);
region.put(p);
r = UTIL.getClosestRowBefore(region, T30, c0);
assertTrue(Bytes.equals(T30, r.getRow()));
d = new Delete(T30);
d.addColumn(c0, c0);
region.delete(d);
r = UTIL.getClosestRowBefore(region, T30, c0);
assertTrue(Bytes.equals(T10, r.getRow()));
r = UTIL.getClosestRowBefore(region, T31, c0);
assertTrue(Bytes.equals(T10, r.getRow()));
region.flush(true);
// try finding "010" after flush
r = UTIL.getClosestRowBefore(region, T30, c0);
assertTrue(Bytes.equals(T10, r.getRow()));
r = UTIL.getClosestRowBefore(region, T31, c0);
assertTrue(Bytes.equals(T10, r.getRow()));
// Put into a different column family. Should make it so I still get t10
p = new Put(T20);
p.addColumn(c1, c1, T20);
region.put(p);
r = UTIL.getClosestRowBefore(region, T30, c0);
assertTrue(Bytes.equals(T10, r.getRow()));
r = UTIL.getClosestRowBefore(region, T31, c0);
assertTrue(Bytes.equals(T10, r.getRow()));
region.flush(true);
r = UTIL.getClosestRowBefore(region, T30, c0);
assertTrue(Bytes.equals(T10, r.getRow()));
r = UTIL.getClosestRowBefore(region, T31, c0);
assertTrue(Bytes.equals(T10, r.getRow()));
// Now try combo of memcache and mapfiles. Delete the t20 COLUMS[1]
// in memory; make sure we get back t10 again.
d = new Delete(T20);
d.addColumn(c1, c1);
region.delete(d);
r = UTIL.getClosestRowBefore(region, T30, c0);
assertTrue(Bytes.equals(T10, r.getRow()));
// Ask for a value off the end of the file. Should return t10.
r = UTIL.getClosestRowBefore(region, T31, c0);
assertTrue(Bytes.equals(T10, r.getRow()));
region.flush(true);
r = UTIL.getClosestRowBefore(region, T31, c0);
assertTrue(Bytes.equals(T10, r.getRow()));
// Ok. Let the candidate come out of hfile but have delete of
// the candidate be in memory.
p = new Put(T11);
p.addColumn(c0, c0, T11);
region.put(p);
d = new Delete(T10);
d.addColumn(c1, c1);
r = UTIL.getClosestRowBefore(region, T12, c0);
assertTrue(Bytes.equals(T11, r.getRow()));
} finally {
if (region != null) {
try {
WAL wal = region.getWAL();
region.close();
wal.close();
} catch (Exception e) {
e.printStackTrace();
}
}
}
}
use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.
the class TestCompactionPolicy method initialize.
/**
* Setting up a Store
* @throws IOException with error
*/
protected void initialize() throws IOException {
Path basedir = new Path(DIR);
String logName = "logs";
Path logdir = new Path(DIR, logName);
ColumnFamilyDescriptor familyDescriptor = ColumnFamilyDescriptorBuilder.of(Bytes.toBytes("family"));
FileSystem fs = FileSystem.get(conf);
fs.delete(logdir, true);
TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(TableName.valueOf(Bytes.toBytes("table"))).setColumnFamily(familyDescriptor).build();
RegionInfo info = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()).build();
hlog = new FSHLog(fs, basedir, logName, conf);
hlog.init();
ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT);
region = HRegion.createHRegion(info, basedir, conf, tableDescriptor, hlog);
region.close();
Path tableDir = CommonFSUtils.getTableDir(basedir, tableDescriptor.getTableName());
region = new HRegion(tableDir, hlog, fs, conf, info, tableDescriptor, null);
store = new HStore(region, familyDescriptor, conf, false);
TEST_FILE = region.getRegionFileSystem().createTempName();
fs.createNewFile(TEST_FILE);
}
Aggregations