Search in sources :

Example 56 with ColumnFamilyDescriptor

use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.

the class AbstractTestWALReplay method testReplayEditsWrittenIntoWAL.

/**
 * Create an HRegion with the result of a WAL split and test we only see the good edits=
 */
@Test
public void testReplayEditsWrittenIntoWAL() throws Exception {
    final TableName tableName = TableName.valueOf("testReplayEditsWrittenIntoWAL");
    final MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
    final RegionInfo hri = createBasic3FamilyHRegionInfo(tableName);
    final Path basedir = CommonFSUtils.getTableDir(hbaseRootDir, tableName);
    deleteDir(basedir);
    final TableDescriptor htd = createBasic3FamilyHTD(tableName);
    HRegion region2 = HBaseTestingUtil.createRegionAndWAL(hri, hbaseRootDir, this.conf, htd);
    HBaseTestingUtil.closeRegionAndWAL(region2);
    final WAL wal = createWAL(this.conf, hbaseRootDir, logName);
    final byte[] rowName = tableName.getName();
    final byte[] regionName = hri.getEncodedNameAsBytes();
    // Add 1k to each family.
    final int countPerFamily = 1000;
    Set<byte[]> familyNames = new HashSet<>();
    NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
    for (byte[] fam : htd.getColumnFamilyNames()) {
        scopes.put(fam, 0);
    }
    for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) {
        addWALEdits(tableName, hri, rowName, hcd.getName(), countPerFamily, ee, wal, mvcc, scopes);
        familyNames.add(hcd.getName());
    }
    // Add a cache flush, shouldn't have any effect
    wal.startCacheFlush(regionName, familyNames);
    wal.completeCacheFlush(regionName, HConstants.NO_SEQNUM);
    // Add an edit to another family, should be skipped.
    WALEdit edit = new WALEdit();
    long now = ee.currentTime();
    edit.add(new KeyValue(rowName, Bytes.toBytes("another family"), rowName, now, rowName));
    wal.appendData(hri, new WALKeyImpl(hri.getEncodedNameAsBytes(), tableName, now, mvcc, scopes), edit);
    // Delete the c family to verify deletes make it over.
    edit = new WALEdit();
    now = ee.currentTime();
    edit.add(new KeyValue(rowName, Bytes.toBytes("c"), null, now, KeyValue.Type.DeleteFamily));
    wal.appendData(hri, new WALKeyImpl(hri.getEncodedNameAsBytes(), tableName, now, mvcc, scopes), edit);
    // Sync.
    wal.sync();
    // Make a new conf and a new fs for the splitter to run on so we can take
    // over old wal.
    final Configuration newConf = HBaseConfiguration.create(this.conf);
    User user = HBaseTestingUtil.getDifferentUser(newConf, ".replay.wal.secondtime");
    user.runAs(new PrivilegedExceptionAction<Void>() {

        @Override
        public Void run() throws Exception {
            runWALSplit(newConf);
            FileSystem newFS = FileSystem.get(newConf);
            // 100k seems to make for about 4 flushes during HRegion#initialize.
            newConf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024 * 100);
            // Make a new wal for new region.
            WAL newWal = createWAL(newConf, hbaseRootDir, logName);
            final AtomicInteger flushcount = new AtomicInteger(0);
            try {
                final HRegion region = new HRegion(basedir, newWal, newFS, newConf, hri, htd, null) {

                    @Override
                    protected FlushResultImpl internalFlushcache(final WAL wal, final long myseqid, final Collection<HStore> storesToFlush, MonitoredTask status, boolean writeFlushWalMarker, FlushLifeCycleTracker tracker) throws IOException {
                        LOG.info("InternalFlushCache Invoked");
                        FlushResultImpl fs = super.internalFlushcache(wal, myseqid, storesToFlush, Mockito.mock(MonitoredTask.class), writeFlushWalMarker, tracker);
                        flushcount.incrementAndGet();
                        return fs;
                    }
                };
                // The seq id this region has opened up with
                long seqid = region.initialize();
                // The mvcc readpoint of from inserting data.
                long writePoint = mvcc.getWritePoint();
                // We flushed during init.
                assertTrue("Flushcount=" + flushcount.get(), flushcount.get() > 0);
                assertTrue((seqid - 1) == writePoint);
                Get get = new Get(rowName);
                Result result = region.get(get);
                // Make sure we only see the good edits
                assertEquals(countPerFamily * (htd.getColumnFamilies().length - 1), result.size());
                region.close();
            } finally {
                newWal.close();
            }
            return null;
        }
    });
}
Also used : WAL(org.apache.hadoop.hbase.wal.WAL) KeyValue(org.apache.hadoop.hbase.KeyValue) User(org.apache.hadoop.hbase.security.User) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) MultiVersionConcurrencyControl(org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) ColumnFamilyDescriptor(org.apache.hadoop.hbase.client.ColumnFamilyDescriptor) Result(org.apache.hadoop.hbase.client.Result) WALEdit(org.apache.hadoop.hbase.wal.WALEdit) FileSystem(org.apache.hadoop.fs.FileSystem) HStore(org.apache.hadoop.hbase.regionserver.HStore) HashSet(java.util.HashSet) Path(org.apache.hadoop.fs.Path) IOException(java.io.IOException) TreeMap(java.util.TreeMap) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) StreamLacksCapabilityException(org.apache.hadoop.hbase.util.CommonFSUtils.StreamLacksCapabilityException) IOException(java.io.IOException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TableName(org.apache.hadoop.hbase.TableName) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Get(org.apache.hadoop.hbase.client.Get) FlushLifeCycleTracker(org.apache.hadoop.hbase.regionserver.FlushLifeCycleTracker) WALKeyImpl(org.apache.hadoop.hbase.wal.WALKeyImpl) MonitoredTask(org.apache.hadoop.hbase.monitoring.MonitoredTask) Test(org.junit.Test)

Example 57 with ColumnFamilyDescriptor

use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.

the class TestStoreFileTrackerValidationUtils method testCheckSFTCompatibility.

@Test
public void testCheckSFTCompatibility() throws Exception {
    // checking default value change on different configuration levels
    Configuration conf = new Configuration();
    conf.set(StoreFileTrackerFactory.TRACKER_IMPL, "DEFAULT");
    // creating a TD with only TableDescriptor level config
    TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(TableName.valueOf("TableX"));
    builder.setValue(StoreFileTrackerFactory.TRACKER_IMPL, "FILE");
    ColumnFamilyDescriptor cf = ColumnFamilyDescriptorBuilder.of("cf");
    builder.setColumnFamily(cf);
    TableDescriptor td = builder.build();
    // creating a TD with matching ColumnFamilyDescriptor level setting
    TableDescriptorBuilder snapBuilder = TableDescriptorBuilder.newBuilder(TableName.valueOf("TableY"));
    snapBuilder.setValue(StoreFileTrackerFactory.TRACKER_IMPL, "FILE");
    ColumnFamilyDescriptorBuilder snapCFBuilder = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("cf"));
    snapCFBuilder.setValue(StoreFileTrackerFactory.TRACKER_IMPL, "FILE");
    snapBuilder.setColumnFamily(snapCFBuilder.build());
    TableDescriptor snapTd = snapBuilder.build();
    // adding a cf config that matches the td config is fine even when it does not match the default
    StoreFileTrackerValidationUtils.validatePreRestoreSnapshot(td, snapTd, conf);
    // removing cf level config is fine when it matches the td config
    StoreFileTrackerValidationUtils.validatePreRestoreSnapshot(snapTd, td, conf);
    TableDescriptorBuilder defaultBuilder = TableDescriptorBuilder.newBuilder(TableName.valueOf("TableY"));
    defaultBuilder.setValue(StoreFileTrackerFactory.TRACKER_IMPL, "FILE");
    ColumnFamilyDescriptorBuilder defaultCFBuilder = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("cf"));
    defaultCFBuilder.setValue(StoreFileTrackerFactory.TRACKER_IMPL, "DEFAULT");
    defaultBuilder.setColumnFamily(defaultCFBuilder.build());
    TableDescriptor defaultTd = defaultBuilder.build();
    assertThrows(RestoreSnapshotException.class, () -> {
        StoreFileTrackerValidationUtils.validatePreRestoreSnapshot(td, defaultTd, conf);
    });
    assertThrows(RestoreSnapshotException.class, () -> {
        StoreFileTrackerValidationUtils.validatePreRestoreSnapshot(snapTd, defaultTd, conf);
    });
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) ColumnFamilyDescriptorBuilder(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder) TableDescriptorBuilder(org.apache.hadoop.hbase.client.TableDescriptorBuilder) ColumnFamilyDescriptor(org.apache.hadoop.hbase.client.ColumnFamilyDescriptor) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) Test(org.junit.Test)

Example 58 with ColumnFamilyDescriptor

use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.

the class ThriftHBaseServiceHandler method modifyColumnFamily.

@Override
public void modifyColumnFamily(TTableName tableName, TColumnFamilyDescriptor column) throws TIOError, TException {
    try {
        TableName table = tableNameFromThrift(tableName);
        ColumnFamilyDescriptor columnFamilyDescriptor = columnFamilyDescriptorFromThrift(column);
        connectionCache.getAdmin().modifyColumnFamily(table, columnFamilyDescriptor);
    } catch (IOException e) {
        throw getTIOError(e);
    }
}
Also used : TTableName(org.apache.hadoop.hbase.thrift2.generated.TTableName) TableName(org.apache.hadoop.hbase.TableName) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) IOException(java.io.IOException) ColumnFamilyDescriptor(org.apache.hadoop.hbase.client.ColumnFamilyDescriptor) TColumnFamilyDescriptor(org.apache.hadoop.hbase.thrift2.generated.TColumnFamilyDescriptor)

Example 59 with ColumnFamilyDescriptor

use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.

the class HBaseTestingUtility method createTable.

/**
 * Create a table.
 * @param htd table descriptor
 * @param splitRows array of split keys
 * @return A Table instance for the created table.
 * @throws IOException
 */
public Table createTable(TableDescriptor htd, byte[][] splitRows) throws IOException {
    TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(htd);
    if (isNewVersionBehaviorEnabled()) {
        for (ColumnFamilyDescriptor family : htd.getColumnFamilies()) {
            builder.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(family).setNewVersionBehavior(true).build());
        }
    }
    if (splitRows != null) {
        getAdmin().createTable(builder.build(), splitRows);
    } else {
        getAdmin().createTable(builder.build());
    }
    // HBaseAdmin only waits for regions to appear in hbase:meta
    // we should wait until they are assigned
    waitUntilAllRegionsAssigned(htd.getTableName());
    return getConnection().getTable(htd.getTableName());
}
Also used : TableDescriptorBuilder(org.apache.hadoop.hbase.client.TableDescriptorBuilder) ColumnFamilyDescriptor(org.apache.hadoop.hbase.client.ColumnFamilyDescriptor)

Example 60 with ColumnFamilyDescriptor

use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.

the class ThriftHBaseServiceHandler method getColumnDescriptors.

@Override
public Map<ByteBuffer, ColumnDescriptor> getColumnDescriptors(ByteBuffer tableName) throws IOError, TException {
    Table table = null;
    try {
        TreeMap<ByteBuffer, ColumnDescriptor> columns = new TreeMap<>();
        table = getTable(tableName);
        TableDescriptor desc = table.getDescriptor();
        for (ColumnFamilyDescriptor e : desc.getColumnFamilies()) {
            ColumnDescriptor col = ThriftUtilities.colDescFromHbase(e);
            columns.put(col.name, col);
        }
        return columns;
    } catch (IOException e) {
        LOG.warn(e.getMessage(), e);
        throw getIOError(e);
    } finally {
        closeTable(table);
    }
}
Also used : Table(org.apache.hadoop.hbase.client.Table) ColumnDescriptor(org.apache.hadoop.hbase.thrift.generated.ColumnDescriptor) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) IOException(java.io.IOException) TreeMap(java.util.TreeMap) ColumnFamilyDescriptor(org.apache.hadoop.hbase.client.ColumnFamilyDescriptor) ByteBuffer(java.nio.ByteBuffer) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor)

Aggregations

ColumnFamilyDescriptor (org.apache.hadoop.hbase.client.ColumnFamilyDescriptor)199 TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)95 Test (org.junit.Test)92 TableDescriptorBuilder (org.apache.hadoop.hbase.client.TableDescriptorBuilder)78 IOException (java.io.IOException)44 TableName (org.apache.hadoop.hbase.TableName)44 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)42 Path (org.apache.hadoop.fs.Path)41 Admin (org.apache.hadoop.hbase.client.Admin)36 Configuration (org.apache.hadoop.conf.Configuration)34 ArrayList (java.util.ArrayList)32 Put (org.apache.hadoop.hbase.client.Put)32 FileSystem (org.apache.hadoop.fs.FileSystem)28 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)24 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)22 Get (org.apache.hadoop.hbase.client.Get)20 Result (org.apache.hadoop.hbase.client.Result)19 ColumnFamilyDescriptorBuilder (org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder)17 Scan (org.apache.hadoop.hbase.client.Scan)17 Table (org.apache.hadoop.hbase.client.Table)17