Search in sources :

Example 91 with ColumnFamilyDescriptor

use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.

the class TestWALSplitToHFile method testWrittenViaHRegion.

/**
 * Test writing edits into an HRegion, closing it, splitting logs, opening
 * Region again.  Verify seqids.
 */
@Test
public void testWrittenViaHRegion() throws IOException, SecurityException, IllegalArgumentException, InterruptedException {
    Pair<TableDescriptor, RegionInfo> pair = setupTableAndRegion();
    TableDescriptor td = pair.getFirst();
    RegionInfo ri = pair.getSecond();
    // Write countPerFamily edits into the three families.  Do a flush on one
    // of the families during the load of edits so its seqid is not same as
    // others to test we do right thing when different seqids.
    WAL wal = createWAL(this.conf, rootDir, logName);
    HRegion region = HRegion.openHRegion(this.conf, this.fs, rootDir, ri, td, wal);
    long seqid = region.getOpenSeqNum();
    boolean first = true;
    for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) {
        addRegionEdits(ROW, cfd.getName(), countPerFamily, this.ee, region, "x");
        if (first) {
            // If first, so we have at least one family w/ different seqid to rest.
            region.flush(true);
            first = false;
        }
    }
    // Now assert edits made it in.
    final Get g = new Get(ROW);
    Result result = region.get(g);
    assertEquals(countPerFamily * td.getColumnFamilies().length, result.size());
    // Now close the region (without flush), split the log, reopen the region and assert that
    // replay of log has the correct effect, that our seqids are calculated correctly so
    // all edits in logs are seen as 'stale'/old.
    region.close(true);
    wal.shutdown();
    try {
        WALSplitter.split(rootDir, logDir, oldLogDir, FileSystem.get(this.conf), this.conf, wals);
    } catch (Exception e) {
        LOG.debug("Got exception", e);
    }
    WAL wal2 = createWAL(this.conf, rootDir, logName);
    HRegion region2 = HRegion.openHRegion(conf, this.fs, rootDir, ri, td, wal2);
    long seqid2 = region2.getOpenSeqNum();
    assertTrue(seqid + result.size() < seqid2);
    final Result result1b = region2.get(g);
    assertEquals(result.size(), result1b.size());
    // correctly when region is opened again.
    for (ColumnFamilyDescriptor hcd : td.getColumnFamilies()) {
        addRegionEdits(ROW, hcd.getName(), countPerFamily, this.ee, region2, "y");
    }
    // Get count of edits.
    final Result result2 = region2.get(g);
    assertEquals(2 * result.size(), result2.size());
    wal2.sync();
    final Configuration newConf = HBaseConfiguration.create(this.conf);
    User user = HBaseTestingUtil.getDifferentUser(newConf, td.getTableName().getNameAsString());
    user.runAs(new PrivilegedExceptionAction<Object>() {

        @Override
        public Object run() throws Exception {
            WALSplitter.split(rootDir, logDir, oldLogDir, FileSystem.get(conf), conf, wals);
            FileSystem newFS = FileSystem.get(newConf);
            // Make a new wal for new region open.
            WAL wal3 = createWAL(newConf, rootDir, logName);
            Path tableDir = CommonFSUtils.getTableDir(rootDir, td.getTableName());
            HRegion region3 = new HRegion(tableDir, wal3, newFS, newConf, ri, td, null);
            long seqid3 = region3.initialize();
            Result result3 = region3.get(g);
            // Assert that count of cells is same as before crash.
            assertEquals(result2.size(), result3.size());
            // I can't close wal1.  Its been appropriated when we split.
            region3.close();
            wal3.close();
            return null;
        }
    });
}
Also used : Path(org.apache.hadoop.fs.Path) User(org.apache.hadoop.hbase.security.User) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) ColumnFamilyDescriptor(org.apache.hadoop.hbase.client.ColumnFamilyDescriptor) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) CorruptHFileException(org.apache.hadoop.hbase.io.hfile.CorruptHFileException) IOException(java.io.IOException) Result(org.apache.hadoop.hbase.client.Result) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) Get(org.apache.hadoop.hbase.client.Get) FileSystem(org.apache.hadoop.fs.FileSystem) Test(org.junit.Test)

Example 92 with ColumnFamilyDescriptor

use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.

the class TestWALSplitToHFile method testRecoverSequenceId.

@Test
public void testRecoverSequenceId() throws Exception {
    Pair<TableDescriptor, RegionInfo> pair = setupTableAndRegion();
    TableDescriptor td = pair.getFirst();
    RegionInfo ri = pair.getSecond();
    WAL wal = createWAL(this.conf, rootDir, logName);
    HRegion region = HRegion.openHRegion(this.conf, this.fs, rootDir, ri, td, wal);
    Map<Integer, Map<String, Long>> seqIdMap = new HashMap<>();
    // Write data and do not flush
    for (int i = 0; i < countPerFamily; i++) {
        for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) {
            region.put(new Put(Bytes.toBytes(i)).addColumn(cfd.getName(), QUALIFIER, VALUE1));
            Result result = region.get(new Get(Bytes.toBytes(i)).addFamily(cfd.getName()));
            assertTrue(Bytes.equals(VALUE1, result.getValue(cfd.getName(), QUALIFIER)));
            List<Cell> cells = result.listCells();
            assertEquals(1, cells.size());
            seqIdMap.computeIfAbsent(i, r -> new HashMap<>()).put(cfd.getNameAsString(), cells.get(0).getSequenceId());
        }
    }
    // Now close the region without flush
    region.close(true);
    wal.shutdown();
    // split the log
    WALSplitter.split(rootDir, logDir, oldLogDir, FileSystem.get(this.conf), this.conf, wals);
    // reopen the region
    WAL wal2 = createWAL(this.conf, rootDir, logName);
    HRegion region2 = HRegion.openHRegion(conf, this.fs, rootDir, ri, td, wal2);
    // assert the seqid was recovered
    for (int i = 0; i < countPerFamily; i++) {
        for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) {
            Result result = region2.get(new Get(Bytes.toBytes(i)).addFamily(cfd.getName()));
            assertTrue(Bytes.equals(VALUE1, result.getValue(cfd.getName(), QUALIFIER)));
            List<Cell> cells = result.listCells();
            assertEquals(1, cells.size());
            assertEquals((long) seqIdMap.get(i).get(cfd.getNameAsString()), cells.get(0).getSequenceId());
        }
    }
}
Also used : Arrays(java.util.Arrays) FSHLog(org.apache.hadoop.hbase.regionserver.wal.FSHLog) Result(org.apache.hadoop.hbase.client.Result) FileSystem(org.apache.hadoop.fs.FileSystem) LoggerFactory(org.slf4j.LoggerFactory) FileStatus(org.apache.hadoop.fs.FileStatus) RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) Map(java.util.Map) Configuration(org.apache.hadoop.conf.Configuration) After(org.junit.After) Path(org.apache.hadoop.fs.Path) ColumnFamilyDescriptor(org.apache.hadoop.hbase.client.ColumnFamilyDescriptor) Assert.fail(org.junit.Assert.fail) ClassRule(org.junit.ClassRule) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) Cell(org.apache.hadoop.hbase.Cell) Pair(org.apache.hadoop.hbase.util.Pair) AbstractTestWALReplay.addRegionEdits(org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.addRegionEdits) CommonFSUtils(org.apache.hadoop.hbase.util.CommonFSUtils) AfterClass(org.junit.AfterClass) HBaseTestingUtil(org.apache.hadoop.hbase.HBaseTestingUtil) DefaultStoreEngine(org.apache.hadoop.hbase.regionserver.DefaultStoreEngine) Get(org.apache.hadoop.hbase.client.Get) HBaseClassTestRule(org.apache.hadoop.hbase.HBaseClassTestRule) PrivilegedExceptionAction(java.security.PrivilegedExceptionAction) Category(org.junit.experimental.categories.Category) FSTableDescriptors(org.apache.hadoop.hbase.util.FSTableDescriptors) List(java.util.List) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) EnvironmentEdgeManager(org.apache.hadoop.hbase.util.EnvironmentEdgeManager) RegionInfoBuilder(org.apache.hadoop.hbase.client.RegionInfoBuilder) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) BeforeClass(org.junit.BeforeClass) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) User(org.apache.hadoop.hbase.security.User) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) HConstants(org.apache.hadoop.hbase.HConstants) TableDescriptorBuilder(org.apache.hadoop.hbase.client.TableDescriptorBuilder) ColumnFamilyDescriptorBuilder(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder) RegionServerTests(org.apache.hadoop.hbase.testclassification.RegionServerTests) TestName(org.junit.rules.TestName) ServerName(org.apache.hadoop.hbase.ServerName) Bytes(org.apache.hadoop.hbase.util.Bytes) Before(org.junit.Before) TableName(org.apache.hadoop.hbase.TableName) Logger(org.slf4j.Logger) RegionServerServices(org.apache.hadoop.hbase.regionserver.RegionServerServices) Assert.assertNotNull(org.junit.Assert.assertNotNull) MediumTests(org.apache.hadoop.hbase.testclassification.MediumTests) Put(org.apache.hadoop.hbase.client.Put) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) CorruptHFileException(org.apache.hadoop.hbase.io.hfile.CorruptHFileException) EnvironmentEdge(org.apache.hadoop.hbase.util.EnvironmentEdge) Assert.assertTrue(org.junit.Assert.assertTrue) IOException(java.io.IOException) Test(org.junit.Test) Mockito.when(org.mockito.Mockito.when) Scan(org.apache.hadoop.hbase.client.Scan) AbstractTestWALReplay(org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay) Mockito(org.mockito.Mockito) Rule(org.junit.Rule) WAL_SPLIT_TO_HFILE(org.apache.hadoop.hbase.wal.WALSplitter.WAL_SPLIT_TO_HFILE) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) Assert.assertEquals(org.junit.Assert.assertEquals) HashMap(java.util.HashMap) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) ColumnFamilyDescriptor(org.apache.hadoop.hbase.client.ColumnFamilyDescriptor) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) Put(org.apache.hadoop.hbase.client.Put) Result(org.apache.hadoop.hbase.client.Result) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) Get(org.apache.hadoop.hbase.client.Get) Map(java.util.Map) HashMap(java.util.HashMap) Cell(org.apache.hadoop.hbase.Cell) Test(org.junit.Test)

Example 93 with ColumnFamilyDescriptor

use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.

the class HBaseTestingUtility method generateColumnDescriptors.

/**
 * Create a set of column descriptors with the combination of compression,
 * encoding, bloom codecs available.
 * @param prefix family names prefix
 * @return the list of column descriptors
 */
public static List<ColumnFamilyDescriptor> generateColumnDescriptors(final String prefix) {
    List<ColumnFamilyDescriptor> columnFamilyDescriptors = new ArrayList<>();
    long familyId = 0;
    for (Compression.Algorithm compressionType : getSupportedCompressionAlgorithms()) {
        for (DataBlockEncoding encodingType : DataBlockEncoding.values()) {
            for (BloomType bloomType : BloomType.values()) {
                String name = String.format("%s-cf-!@#&-%d!@#", prefix, familyId);
                ColumnFamilyDescriptorBuilder columnFamilyDescriptorBuilder = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(name));
                columnFamilyDescriptorBuilder.setCompressionType(compressionType);
                columnFamilyDescriptorBuilder.setDataBlockEncoding(encodingType);
                columnFamilyDescriptorBuilder.setBloomFilterType(bloomType);
                columnFamilyDescriptors.add(columnFamilyDescriptorBuilder.build());
                familyId++;
            }
        }
    }
    return columnFamilyDescriptors;
}
Also used : DataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding) Compression(org.apache.hadoop.hbase.io.compress.Compression) Algorithm(org.apache.hadoop.hbase.io.compress.Compression.Algorithm) BloomType(org.apache.hadoop.hbase.regionserver.BloomType) ArrayList(java.util.ArrayList) ColumnFamilyDescriptorBuilder(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder) ColumnFamilyDescriptor(org.apache.hadoop.hbase.client.ColumnFamilyDescriptor)

Example 94 with ColumnFamilyDescriptor

use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project hbase by apache.

the class TestWALSplitToHFile method testPutWithSameTimestamp.

@Test
public void testPutWithSameTimestamp() throws Exception {
    Pair<TableDescriptor, RegionInfo> pair = setupTableAndRegion();
    TableDescriptor td = pair.getFirst();
    RegionInfo ri = pair.getSecond();
    WAL wal = createWAL(this.conf, rootDir, logName);
    HRegion region = HRegion.openHRegion(this.conf, this.fs, rootDir, ri, td, wal);
    final long timestamp = this.ee.currentTime();
    // Write data and flush
    for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) {
        region.put(new Put(ROW).addColumn(cfd.getName(), QUALIFIER, timestamp, VALUE1));
    }
    region.flush(true);
    // Write data with same timestamp and do not flush
    for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) {
        region.put(new Put(ROW).addColumn(cfd.getName(), QUALIFIER, timestamp, VALUE2));
    }
    // Now close the region without flush
    region.close(true);
    wal.shutdown();
    // split the log
    WALSplitter.split(rootDir, logDir, oldLogDir, FileSystem.get(this.conf), this.conf, wals);
    // reopen the region
    WAL wal2 = createWAL(this.conf, rootDir, logName);
    HRegion region2 = HRegion.openHRegion(conf, this.fs, rootDir, ri, td, wal2);
    Result result2 = region2.get(new Get(ROW));
    assertEquals(td.getColumnFamilies().length, result2.size());
    for (ColumnFamilyDescriptor cfd : td.getColumnFamilies()) {
        assertTrue(Bytes.equals(VALUE2, result2.getValue(cfd.getName(), QUALIFIER)));
    }
}
Also used : HRegion(org.apache.hadoop.hbase.regionserver.HRegion) Get(org.apache.hadoop.hbase.client.Get) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) ColumnFamilyDescriptor(org.apache.hadoop.hbase.client.ColumnFamilyDescriptor) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) Put(org.apache.hadoop.hbase.client.Put) Result(org.apache.hadoop.hbase.client.Result) Test(org.junit.Test)

Example 95 with ColumnFamilyDescriptor

use of org.apache.hadoop.hbase.client.ColumnFamilyDescriptor in project gora by apache.

the class TestHBaseStore method testNewVersionBehavior.

@Test
public void testNewVersionBehavior() throws IOException {
    // Following Test fails in HBase 2.0.5 when NEW_VERSION_BEHAVIOR == true
    // Persisting for cases where qualifier ==  null, deleting row does not delete the column family.
    // Once these issues are fixed, we could remove the workarounds we have added on
    // HBaseDataStore #put method.
    Connection conn = ConnectionFactory.createConnection(conf);
    TableName test = TableName.valueOf("Test");
    TableDescriptorBuilder tableDescBuilder = TableDescriptorBuilder.newBuilder(test);
    ColumnFamilyDescriptorBuilder columnDescBuilder = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("test-family"));
    columnDescBuilder.setNewVersionBehavior(true);
    ColumnFamilyDescriptor columnDescriptor = columnDescBuilder.build();
    tableDescBuilder.setColumnFamily(columnDescriptor);
    TableDescriptor tableDescriptor = tableDescBuilder.build();
    conn.getAdmin().createTable(tableDescriptor);
    Table table = conn.getTable(test);
    Put put = new Put(Bytes.toBytes("com.example/http"));
    put.addColumn(Bytes.toBytes("test-family"), null, Bytes.toBytes("test-value"));
    table.put(put);
    Delete del = new Delete(Bytes.toBytes("com.example/http"));
    table.delete(del);
    Get get = new Get(Bytes.toBytes("com.example/http"));
    // get.addColumn(Bytes.toBytes("test-family"), null);
    Result result = table.get(get);
    byte[] value = result.getValue(Bytes.toBytes("test-family"), null);
    if (value != null) {
    // Test failed, this should be null after the delete row operation.
    }
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) TableName(org.apache.hadoop.hbase.TableName) Table(org.apache.hadoop.hbase.client.Table) Get(org.apache.hadoop.hbase.client.Get) Connection(org.apache.hadoop.hbase.client.Connection) ColumnFamilyDescriptorBuilder(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder) TableDescriptorBuilder(org.apache.hadoop.hbase.client.TableDescriptorBuilder) ColumnFamilyDescriptor(org.apache.hadoop.hbase.client.ColumnFamilyDescriptor) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) Put(org.apache.hadoop.hbase.client.Put) Result(org.apache.hadoop.hbase.client.Result) Test(org.junit.Test)

Aggregations

ColumnFamilyDescriptor (org.apache.hadoop.hbase.client.ColumnFamilyDescriptor)199 TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)95 Test (org.junit.Test)92 TableDescriptorBuilder (org.apache.hadoop.hbase.client.TableDescriptorBuilder)78 IOException (java.io.IOException)44 TableName (org.apache.hadoop.hbase.TableName)44 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)42 Path (org.apache.hadoop.fs.Path)41 Admin (org.apache.hadoop.hbase.client.Admin)36 Configuration (org.apache.hadoop.conf.Configuration)34 ArrayList (java.util.ArrayList)32 Put (org.apache.hadoop.hbase.client.Put)32 FileSystem (org.apache.hadoop.fs.FileSystem)28 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)24 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)22 Get (org.apache.hadoop.hbase.client.Get)20 Result (org.apache.hadoop.hbase.client.Result)19 ColumnFamilyDescriptorBuilder (org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder)17 Scan (org.apache.hadoop.hbase.client.Scan)17 Table (org.apache.hadoop.hbase.client.Table)17