use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.
the class AbstractTestWALReplay method testReplayEditsWrittenIntoWAL.
/**
* Create an HRegion with the result of a WAL split and test we only see the good edits=
*/
@Test
public void testReplayEditsWrittenIntoWAL() throws Exception {
final TableName tableName = TableName.valueOf("testReplayEditsWrittenIntoWAL");
final MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
final RegionInfo hri = createBasic3FamilyHRegionInfo(tableName);
final Path basedir = CommonFSUtils.getTableDir(hbaseRootDir, tableName);
deleteDir(basedir);
final TableDescriptor htd = createBasic3FamilyHTD(tableName);
HRegion region2 = HBaseTestingUtil.createRegionAndWAL(hri, hbaseRootDir, this.conf, htd);
HBaseTestingUtil.closeRegionAndWAL(region2);
final WAL wal = createWAL(this.conf, hbaseRootDir, logName);
final byte[] rowName = tableName.getName();
final byte[] regionName = hri.getEncodedNameAsBytes();
// Add 1k to each family.
final int countPerFamily = 1000;
Set<byte[]> familyNames = new HashSet<>();
NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
for (byte[] fam : htd.getColumnFamilyNames()) {
scopes.put(fam, 0);
}
for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) {
addWALEdits(tableName, hri, rowName, hcd.getName(), countPerFamily, ee, wal, mvcc, scopes);
familyNames.add(hcd.getName());
}
// Add a cache flush, shouldn't have any effect
wal.startCacheFlush(regionName, familyNames);
wal.completeCacheFlush(regionName, HConstants.NO_SEQNUM);
// Add an edit to another family, should be skipped.
WALEdit edit = new WALEdit();
long now = ee.currentTime();
edit.add(new KeyValue(rowName, Bytes.toBytes("another family"), rowName, now, rowName));
wal.appendData(hri, new WALKeyImpl(hri.getEncodedNameAsBytes(), tableName, now, mvcc, scopes), edit);
// Delete the c family to verify deletes make it over.
edit = new WALEdit();
now = ee.currentTime();
edit.add(new KeyValue(rowName, Bytes.toBytes("c"), null, now, KeyValue.Type.DeleteFamily));
wal.appendData(hri, new WALKeyImpl(hri.getEncodedNameAsBytes(), tableName, now, mvcc, scopes), edit);
// Sync.
wal.sync();
// Make a new conf and a new fs for the splitter to run on so we can take
// over old wal.
final Configuration newConf = HBaseConfiguration.create(this.conf);
User user = HBaseTestingUtil.getDifferentUser(newConf, ".replay.wal.secondtime");
user.runAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
runWALSplit(newConf);
FileSystem newFS = FileSystem.get(newConf);
// 100k seems to make for about 4 flushes during HRegion#initialize.
newConf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024 * 100);
// Make a new wal for new region.
WAL newWal = createWAL(newConf, hbaseRootDir, logName);
final AtomicInteger flushcount = new AtomicInteger(0);
try {
final HRegion region = new HRegion(basedir, newWal, newFS, newConf, hri, htd, null) {
@Override
protected FlushResultImpl internalFlushcache(final WAL wal, final long myseqid, final Collection<HStore> storesToFlush, MonitoredTask status, boolean writeFlushWalMarker, FlushLifeCycleTracker tracker) throws IOException {
LOG.info("InternalFlushCache Invoked");
FlushResultImpl fs = super.internalFlushcache(wal, myseqid, storesToFlush, Mockito.mock(MonitoredTask.class), writeFlushWalMarker, tracker);
flushcount.incrementAndGet();
return fs;
}
};
// The seq id this region has opened up with
long seqid = region.initialize();
// The mvcc readpoint of from inserting data.
long writePoint = mvcc.getWritePoint();
// We flushed during init.
assertTrue("Flushcount=" + flushcount.get(), flushcount.get() > 0);
assertTrue((seqid - 1) == writePoint);
Get get = new Get(rowName);
Result result = region.get(get);
// Make sure we only see the good edits
assertEquals(countPerFamily * (htd.getColumnFamilies().length - 1), result.size());
region.close();
} finally {
newWal.close();
}
return null;
}
});
}
use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.
the class TestStoreFileTrackerValidationUtils method testCheckSFTCompatibility.
@Test
public void testCheckSFTCompatibility() throws Exception {
// checking default value change on different configuration levels
Configuration conf = new Configuration();
conf.set(StoreFileTrackerFactory.TRACKER_IMPL, "DEFAULT");
// creating a TD with only TableDescriptor level config
TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(TableName.valueOf("TableX"));
builder.setValue(StoreFileTrackerFactory.TRACKER_IMPL, "FILE");
ColumnFamilyDescriptor cf = ColumnFamilyDescriptorBuilder.of("cf");
builder.setColumnFamily(cf);
TableDescriptor td = builder.build();
// creating a TD with matching ColumnFamilyDescriptor level setting
TableDescriptorBuilder snapBuilder = TableDescriptorBuilder.newBuilder(TableName.valueOf("TableY"));
snapBuilder.setValue(StoreFileTrackerFactory.TRACKER_IMPL, "FILE");
ColumnFamilyDescriptorBuilder snapCFBuilder = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("cf"));
snapCFBuilder.setValue(StoreFileTrackerFactory.TRACKER_IMPL, "FILE");
snapBuilder.setColumnFamily(snapCFBuilder.build());
TableDescriptor snapTd = snapBuilder.build();
// adding a cf config that matches the td config is fine even when it does not match the default
StoreFileTrackerValidationUtils.validatePreRestoreSnapshot(td, snapTd, conf);
// removing cf level config is fine when it matches the td config
StoreFileTrackerValidationUtils.validatePreRestoreSnapshot(snapTd, td, conf);
TableDescriptorBuilder defaultBuilder = TableDescriptorBuilder.newBuilder(TableName.valueOf("TableY"));
defaultBuilder.setValue(StoreFileTrackerFactory.TRACKER_IMPL, "FILE");
ColumnFamilyDescriptorBuilder defaultCFBuilder = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("cf"));
defaultCFBuilder.setValue(StoreFileTrackerFactory.TRACKER_IMPL, "DEFAULT");
defaultBuilder.setColumnFamily(defaultCFBuilder.build());
TableDescriptor defaultTd = defaultBuilder.build();
assertThrows(RestoreSnapshotException.class, () -> {
StoreFileTrackerValidationUtils.validatePreRestoreSnapshot(td, defaultTd, conf);
});
assertThrows(RestoreSnapshotException.class, () -> {
StoreFileTrackerValidationUtils.validatePreRestoreSnapshot(snapTd, defaultTd, conf);
});
}
use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.
the class ThriftHBaseServiceHandler method modifyColumnFamily.
@Override
public void modifyColumnFamily(TTableName tableName, TColumnFamilyDescriptor column) throws TIOError, TException {
try {
TableName table = tableNameFromThrift(tableName);
ColumnFamilyDescriptor columnFamilyDescriptor = columnFamilyDescriptorFromThrift(column);
connectionCache.getAdmin().modifyColumnFamily(table, columnFamilyDescriptor);
} catch (IOException e) {
throw getTIOError(e);
}
}
use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.
the class HBaseTestingUtility method createTable.
/**
* Create a table.
* @param htd table descriptor
* @param splitRows array of split keys
* @return A Table instance for the created table.
* @throws IOException
*/
public Table createTable(TableDescriptor htd, byte[][] splitRows) throws IOException {
TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(htd);
if (isNewVersionBehaviorEnabled()) {
for (ColumnFamilyDescriptor family : htd.getColumnFamilies()) {
builder.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(family).setNewVersionBehavior(true).build());
}
}
if (splitRows != null) {
getAdmin().createTable(builder.build(), splitRows);
} else {
getAdmin().createTable(builder.build());
}
// HBaseAdmin only waits for regions to appear in hbase:meta
// we should wait until they are assigned
waitUntilAllRegionsAssigned(htd.getTableName());
return getConnection().getTable(htd.getTableName());
}
use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.
the class ThriftHBaseServiceHandler method getColumnDescriptors.
@Override
public Map<ByteBuffer, ColumnDescriptor> getColumnDescriptors(ByteBuffer tableName) throws IOError, TException {
Table table = null;
try {
TreeMap<ByteBuffer, ColumnDescriptor> columns = new TreeMap<>();
table = getTable(tableName);
TableDescriptor desc = table.getDescriptor();
for (ColumnFamilyDescriptor e : desc.getColumnFamilies()) {
ColumnDescriptor col = ThriftUtilities.colDescFromHbase(e);
columns.put(col.name, col);
}
return columns;
} catch (IOException e) {
LOG.warn(e.getMessage(), e);
throw getIOError(e);
} finally {
closeTable(table);
}
}
Aggregations