Search in sources :

Example 96 with HTableDescriptor

use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.

the class TestLogRollAbort method testLogRollAfterSplitStart.

/**
   * Tests the case where a RegionServer enters a GC pause,
   * comes back online after the master declared it dead and started to split.
   * Want log rolling after a master split to fail. See HBASE-2312.
   */
@Test(timeout = 300000)
public void testLogRollAfterSplitStart() throws IOException {
    LOG.info("Verify wal roll after split starts will fail.");
    String logName = ServerName.valueOf("testLogRollAfterSplitStart", 16010, System.currentTimeMillis()).toString();
    Path thisTestsDir = new Path(HBASELOGDIR, AbstractFSWALProvider.getWALDirectoryName(logName));
    final WALFactory wals = new WALFactory(conf, null, logName);
    try {
        // put some entries in an WAL
        TableName tableName = TableName.valueOf(this.getClass().getName());
        HRegionInfo regioninfo = new HRegionInfo(tableName, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
        final WAL log = wals.getWAL(regioninfo.getEncodedNameAsBytes(), regioninfo.getTable().getNamespace());
        MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl(1);
        final int total = 20;
        for (int i = 0; i < total; i++) {
            WALEdit kvs = new WALEdit();
            kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), tableName.getName()));
            HTableDescriptor htd = new HTableDescriptor(tableName);
            htd.addFamily(new HColumnDescriptor("column"));
            NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
            for (byte[] fam : htd.getFamiliesKeys()) {
                scopes.put(fam, 0);
            }
            log.append(regioninfo, new WALKey(regioninfo.getEncodedNameAsBytes(), tableName, System.currentTimeMillis(), mvcc, scopes), kvs, true);
        }
        // Send the data to HDFS datanodes and close the HDFS writer
        log.sync();
        ((AbstractFSWAL<?>) log).replaceWriter(((FSHLog) log).getOldPath(), null, null);
        /* code taken from MasterFileSystem.getLogDirs(), which is called from MasterFileSystem.splitLog()
       * handles RS shutdowns (as observed by the splitting process)
       */
        // rename the directory so a rogue RS doesn't create more WALs
        Path rsSplitDir = thisTestsDir.suffix(AbstractFSWALProvider.SPLITTING_EXT);
        if (!fs.rename(thisTestsDir, rsSplitDir)) {
            throw new IOException("Failed fs.rename for log split: " + thisTestsDir);
        }
        LOG.debug("Renamed region directory: " + rsSplitDir);
        LOG.debug("Processing the old log files.");
        WALSplitter.split(HBASELOGDIR, rsSplitDir, OLDLOGDIR, fs, conf, wals);
        LOG.debug("Trying to roll the WAL.");
        try {
            log.rollWriter();
            Assert.fail("rollWriter() did not throw any exception.");
        } catch (IOException ioe) {
            if (ioe.getCause() instanceof FileNotFoundException) {
                LOG.info("Got the expected exception: ", ioe.getCause());
            } else {
                Assert.fail("Unexpected exception: " + ioe);
            }
        }
    } finally {
        wals.close();
        if (fs.exists(thisTestsDir)) {
            fs.delete(thisTestsDir, true);
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) WAL(org.apache.hadoop.hbase.wal.WAL) KeyValue(org.apache.hadoop.hbase.KeyValue) MultiVersionConcurrencyControl(org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) FileNotFoundException(java.io.FileNotFoundException) IOException(java.io.IOException) TreeMap(java.util.TreeMap) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) WALKey(org.apache.hadoop.hbase.wal.WALKey) TableName(org.apache.hadoop.hbase.TableName) WALFactory(org.apache.hadoop.hbase.wal.WALFactory) Test(org.junit.Test)

Example 97 with HTableDescriptor

use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.

the class TestLogRolling method testLogRollOnPipelineRestart.

/**
   * Test that WAL is rolled when all data nodes in the pipeline have been restarted.
   * @throws Exception
   */
@Test
public void testLogRollOnPipelineRestart() throws Exception {
    LOG.info("Starting testLogRollOnPipelineRestart");
    assertTrue("This test requires WAL file replication.", fs.getDefaultReplication(TEST_UTIL.getDataTestDirOnTestFS()) > 1);
    LOG.info("Replication=" + fs.getDefaultReplication(TEST_UTIL.getDataTestDirOnTestFS()));
    // When the hbase:meta table can be opened, the region servers are running
    Table t = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME);
    try {
        this.server = cluster.getRegionServer(0);
        // Create the test table and open it
        HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(getName()));
        desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
        admin.createTable(desc);
        Table table = TEST_UTIL.getConnection().getTable(desc.getTableName());
        server = TEST_UTIL.getRSForFirstRegionInTable(desc.getTableName());
        HRegionInfo region = server.getOnlineRegions(desc.getTableName()).get(0).getRegionInfo();
        final WAL log = server.getWAL(region);
        final List<Path> paths = new ArrayList<>(1);
        final List<Integer> preLogRolledCalled = new ArrayList<>();
        paths.add(AbstractFSWALProvider.getCurrentFileName(log));
        log.registerWALActionsListener(new WALActionsListener.Base() {

            @Override
            public void preLogRoll(Path oldFile, Path newFile) {
                LOG.debug("preLogRoll: oldFile=" + oldFile + " newFile=" + newFile);
                preLogRolledCalled.add(new Integer(1));
            }

            @Override
            public void postLogRoll(Path oldFile, Path newFile) {
                paths.add(newFile);
            }
        });
        writeData(table, 1002);
        long curTime = System.currentTimeMillis();
        LOG.info("log.getCurrentFileName()): " + AbstractFSWALProvider.getCurrentFileName(log));
        long oldFilenum = AbstractFSWALProvider.extractFileNumFromWAL(log);
        assertTrue("Log should have a timestamp older than now", curTime > oldFilenum && oldFilenum != -1);
        assertTrue("The log shouldn't have rolled yet", oldFilenum == AbstractFSWALProvider.extractFileNumFromWAL(log));
        // roll all datanodes in the pipeline
        dfsCluster.restartDataNodes();
        Thread.sleep(1000);
        dfsCluster.waitActive();
        LOG.info("Data Nodes restarted");
        validateData(table, 1002);
        // this write should succeed, but trigger a log roll
        writeData(table, 1003);
        long newFilenum = AbstractFSWALProvider.extractFileNumFromWAL(log);
        assertTrue("Missing datanode should've triggered a log roll", newFilenum > oldFilenum && newFilenum > curTime);
        validateData(table, 1003);
        writeData(table, 1004);
        // roll all datanode again
        dfsCluster.restartDataNodes();
        Thread.sleep(1000);
        dfsCluster.waitActive();
        LOG.info("Data Nodes restarted");
        validateData(table, 1004);
        // this write should succeed, but trigger a log roll
        writeData(table, 1005);
        // force a log roll to read back and verify previously written logs
        log.rollWriter(true);
        assertTrue("preLogRolledCalled has size of " + preLogRolledCalled.size(), preLogRolledCalled.size() >= 1);
        // read back the data written
        Set<String> loggedRows = new HashSet<>();
        FSUtils fsUtils = FSUtils.getInstance(fs, TEST_UTIL.getConfiguration());
        for (Path p : paths) {
            LOG.debug("recovering lease for " + p);
            fsUtils.recoverFileLease(((HFileSystem) fs).getBackingFs(), p, TEST_UTIL.getConfiguration(), null);
            LOG.debug("Reading WAL " + FSUtils.getPath(p));
            WAL.Reader reader = null;
            try {
                reader = WALFactory.createReader(fs, p, TEST_UTIL.getConfiguration());
                WAL.Entry entry;
                while ((entry = reader.next()) != null) {
                    LOG.debug("#" + entry.getKey().getSequenceId() + ": " + entry.getEdit().getCells());
                    for (Cell cell : entry.getEdit().getCells()) {
                        loggedRows.add(Bytes.toStringBinary(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()));
                    }
                }
            } catch (EOFException e) {
                LOG.debug("EOF reading file " + FSUtils.getPath(p));
            } finally {
                if (reader != null)
                    reader.close();
            }
        }
        // verify the written rows are there
        assertTrue(loggedRows.contains("row1002"));
        assertTrue(loggedRows.contains("row1003"));
        assertTrue(loggedRows.contains("row1004"));
        assertTrue(loggedRows.contains("row1005"));
        // flush all regions
        for (Region r : server.getOnlineRegionsLocalContext()) {
            try {
                r.flush(true);
            } catch (Exception e) {
                // This try/catch was added by HBASE-14317. It is needed
                // because this issue tightened up the semantic such that
                // a failed append could not be followed by a successful
                // sync. What is coming out here is a failed sync, a sync
                // that used to 'pass'.
                LOG.info(e);
            }
        }
        ResultScanner scanner = table.getScanner(new Scan());
        try {
            for (int i = 2; i <= 5; i++) {
                Result r = scanner.next();
                assertNotNull(r);
                assertFalse(r.isEmpty());
                assertEquals("row100" + i, Bytes.toString(r.getRow()));
            }
        } finally {
            scanner.close();
        }
        // verify that no region servers aborted
        for (JVMClusterUtil.RegionServerThread rsThread : TEST_UTIL.getHBaseCluster().getRegionServerThreads()) {
            assertFalse(rsThread.getRegionServer().isAborted());
        }
    } finally {
        if (t != null)
            t.close();
    }
}
Also used : WAL(org.apache.hadoop.hbase.wal.WAL) ArrayList(java.util.ArrayList) Result(org.apache.hadoop.hbase.client.Result) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) EOFException(java.io.EOFException) Cell(org.apache.hadoop.hbase.Cell) FSUtils(org.apache.hadoop.hbase.util.FSUtils) HashSet(java.util.HashSet) Path(org.apache.hadoop.fs.Path) Table(org.apache.hadoop.hbase.client.Table) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) IOException(java.io.IOException) EOFException(java.io.EOFException) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) JVMClusterUtil(org.apache.hadoop.hbase.util.JVMClusterUtil) Region(org.apache.hadoop.hbase.regionserver.Region) Scan(org.apache.hadoop.hbase.client.Scan) Test(org.junit.Test)

Example 98 with HTableDescriptor

use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.

the class TestLogRolling method testLogRollOnDatanodeDeath.

/**
   * Tests that logs are rolled upon detecting datanode death Requires an HDFS jar with HDFS-826 &
   * syncFs() support (HDFS-200)
   */
@Test
public void testLogRollOnDatanodeDeath() throws Exception {
    TEST_UTIL.ensureSomeRegionServersAvailable(2);
    assertTrue("This test requires WAL file replication set to 2.", fs.getDefaultReplication(TEST_UTIL.getDataTestDirOnTestFS()) == 2);
    LOG.info("Replication=" + fs.getDefaultReplication(TEST_UTIL.getDataTestDirOnTestFS()));
    this.server = cluster.getRegionServer(0);
    // Create the test table and open it
    HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(getName()));
    desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
    admin.createTable(desc);
    Table table = TEST_UTIL.getConnection().getTable(desc.getTableName());
    server = TEST_UTIL.getRSForFirstRegionInTable(desc.getTableName());
    HRegionInfo region = server.getOnlineRegions(desc.getTableName()).get(0).getRegionInfo();
    final FSHLog log = (FSHLog) server.getWAL(region);
    final AtomicBoolean lowReplicationHookCalled = new AtomicBoolean(false);
    log.registerWALActionsListener(new WALActionsListener.Base() {

        @Override
        public void logRollRequested(boolean lowReplication) {
            if (lowReplication) {
                lowReplicationHookCalled.lazySet(true);
            }
        }
    });
    // add up the datanode count, to ensure proper replication when we kill 1
    // This function is synchronous; when it returns, the dfs cluster is active
    // We start 3 servers and then stop 2 to avoid a directory naming conflict
    // when we stop/start a namenode later, as mentioned in HBASE-5163
    List<DataNode> existingNodes = dfsCluster.getDataNodes();
    int numDataNodes = 3;
    dfsCluster.startDataNodes(TEST_UTIL.getConfiguration(), numDataNodes, true, null, null);
    List<DataNode> allNodes = dfsCluster.getDataNodes();
    for (int i = allNodes.size() - 1; i >= 0; i--) {
        if (existingNodes.contains(allNodes.get(i))) {
            dfsCluster.stopDataNode(i);
        }
    }
    assertTrue("DataNodes " + dfsCluster.getDataNodes().size() + " default replication " + fs.getDefaultReplication(TEST_UTIL.getDataTestDirOnTestFS()), dfsCluster.getDataNodes().size() >= fs.getDefaultReplication(TEST_UTIL.getDataTestDirOnTestFS()) + 1);
    writeData(table, 2);
    long curTime = System.currentTimeMillis();
    LOG.info("log.getCurrentFileName(): " + log.getCurrentFileName());
    long oldFilenum = AbstractFSWALProvider.extractFileNumFromWAL(log);
    assertTrue("Log should have a timestamp older than now", curTime > oldFilenum && oldFilenum != -1);
    assertTrue("The log shouldn't have rolled yet", oldFilenum == AbstractFSWALProvider.extractFileNumFromWAL(log));
    final DatanodeInfo[] pipeline = log.getPipeline();
    assertTrue(pipeline.length == fs.getDefaultReplication(TEST_UTIL.getDataTestDirOnTestFS()));
    // kill a datanode in the pipeline to force a log roll on the next sync()
    // This function is synchronous, when it returns the node is killed.
    assertTrue(dfsCluster.stopDataNode(pipeline[0].getName()) != null);
    // this write should succeed, but trigger a log roll
    writeData(table, 2);
    long newFilenum = AbstractFSWALProvider.extractFileNumFromWAL(log);
    assertTrue("Missing datanode should've triggered a log roll", newFilenum > oldFilenum && newFilenum > curTime);
    assertTrue("The log rolling hook should have been called with the low replication flag", lowReplicationHookCalled.get());
    // write some more log data (this should use a new hdfs_out)
    writeData(table, 3);
    assertTrue("The log should not roll again.", AbstractFSWALProvider.extractFileNumFromWAL(log) == newFilenum);
    // kill another datanode in the pipeline, so the replicas will be lower than
    // the configured value 2.
    assertTrue(dfsCluster.stopDataNode(pipeline[1].getName()) != null);
    batchWriteAndWait(table, log, 3, false, 14000);
    int replication = log.getLogReplication();
    assertTrue("LowReplication Roller should've been disabled, current replication=" + replication, !log.isLowReplicationRollEnabled());
    dfsCluster.startDataNodes(TEST_UTIL.getConfiguration(), 1, true, null, null);
    // Force roll writer. The new log file will have the default replications,
    // and the LowReplication Roller will be enabled.
    log.rollWriter(true);
    batchWriteAndWait(table, log, 13, true, 10000);
    replication = log.getLogReplication();
    assertTrue("New log file should have the default replication instead of " + replication, replication == fs.getDefaultReplication(TEST_UTIL.getDataTestDirOnTestFS()));
    assertTrue("LowReplication Roller should've been enabled", log.isLowReplicationRollEnabled());
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) Table(org.apache.hadoop.hbase.client.Table) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) Test(org.junit.Test)

Example 99 with HTableDescriptor

use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.

the class TestReplicationBase method setUpBeforeClass.

/**
   * @throws java.lang.Exception
   */
@BeforeClass
public static void setUpBeforeClass() throws Exception {
    conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1");
    // We don't want too many edits per batch sent to the ReplicationEndpoint to trigger
    // sufficient number of events. But we don't want to go too low because
    // HBaseInterClusterReplicationEndpoint partitions entries into batches and we want
    // more than one batch sent to the peer cluster for better testing.
    conf1.setInt("replication.source.size.capacity", 102400);
    conf1.setLong("replication.source.sleepforretries", 100);
    conf1.setInt("hbase.regionserver.maxlogs", 10);
    conf1.setLong("hbase.master.logcleaner.ttl", 10);
    conf1.setInt("zookeeper.recovery.retry", 1);
    conf1.setInt("zookeeper.recovery.retry.intervalmill", 10);
    conf1.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100);
    conf1.setInt("replication.stats.thread.period.seconds", 5);
    conf1.setBoolean("hbase.tests.use.shortcircuit.reads", false);
    conf1.setLong("replication.sleep.before.failover", 2000);
    conf1.setInt("replication.source.maxretriesmultiplier", 10);
    conf1.setFloat("replication.source.ratio", 1.0f);
    utility1 = new HBaseTestingUtility(conf1);
    utility1.startMiniZKCluster();
    MiniZooKeeperCluster miniZK = utility1.getZkCluster();
    // Have to reget conf1 in case zk cluster location different
    // than default
    conf1 = utility1.getConfiguration();
    zkw1 = new ZooKeeperWatcher(conf1, "cluster1", null, true);
    admin = new ReplicationAdmin(conf1);
    LOG.info("Setup first Zk");
    // Base conf2 on conf1 so it gets the right zk cluster.
    conf2 = HBaseConfiguration.create(conf1);
    conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
    conf2.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6);
    conf2.setBoolean("hbase.tests.use.shortcircuit.reads", false);
    utility2 = new HBaseTestingUtility(conf2);
    utility2.setZkCluster(miniZK);
    zkw2 = new ZooKeeperWatcher(conf2, "cluster2", null, true);
    LOG.info("Setup second Zk");
    CONF_WITH_LOCALFS = HBaseConfiguration.create(conf1);
    utility1.startMiniCluster(2);
    // Have a bunch of slave servers, because inter-cluster shipping logic uses number of sinks
    // as a component in deciding maximum number of parallel batches to send to the peer cluster.
    utility2.startMiniCluster(4);
    ReplicationPeerConfig rpc = new ReplicationPeerConfig();
    rpc.setClusterKey(utility2.getClusterKey());
    hbaseAdmin = ConnectionFactory.createConnection(conf1).getAdmin();
    hbaseAdmin.addReplicationPeer("2", rpc);
    HTableDescriptor table = new HTableDescriptor(tableName);
    HColumnDescriptor fam = new HColumnDescriptor(famName);
    fam.setMaxVersions(100);
    fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
    table.addFamily(fam);
    fam = new HColumnDescriptor(noRepfamName);
    table.addFamily(fam);
    scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
    for (HColumnDescriptor f : table.getColumnFamilies()) {
        scopes.put(f.getName(), f.getScope());
    }
    Connection connection1 = ConnectionFactory.createConnection(conf1);
    Connection connection2 = ConnectionFactory.createConnection(conf2);
    try (Admin admin1 = connection1.getAdmin()) {
        admin1.createTable(table, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);
    }
    try (Admin admin2 = connection2.getAdmin()) {
        admin2.createTable(table, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);
    }
    utility1.waitUntilAllRegionsAssigned(tableName);
    utility2.waitUntilAllRegionsAssigned(tableName);
    htable1 = connection1.getTable(tableName);
    htable1.setWriteBufferSize(1024);
    htable2 = connection2.getTable(tableName);
}
Also used : HBaseTestingUtility(org.apache.hadoop.hbase.HBaseTestingUtility) ZooKeeperWatcher(org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) ReplicationAdmin(org.apache.hadoop.hbase.client.replication.ReplicationAdmin) Connection(org.apache.hadoop.hbase.client.Connection) MiniZooKeeperCluster(org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster) Admin(org.apache.hadoop.hbase.client.Admin) ReplicationAdmin(org.apache.hadoop.hbase.client.replication.ReplicationAdmin) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) BeforeClass(org.junit.BeforeClass)

Example 100 with HTableDescriptor

use of org.apache.hadoop.hbase.HTableDescriptor in project hbase by apache.

the class TestCompactionWithThroughputController method testGetCompactionPressureForStripedStore.

/**
   * Test the logic that we calculate compaction pressure for a striped store.
   */
@Test
public void testGetCompactionPressureForStripedStore() throws Exception {
    Configuration conf = TEST_UTIL.getConfiguration();
    conf.set(StoreEngine.STORE_ENGINE_CLASS_KEY, StripeStoreEngine.class.getName());
    conf.setBoolean(StripeStoreConfig.FLUSH_TO_L0_KEY, false);
    conf.setInt(StripeStoreConfig.INITIAL_STRIPE_COUNT_KEY, 2);
    conf.setInt(StripeStoreConfig.MIN_FILES_KEY, 4);
    conf.setInt(HStore.BLOCKING_STOREFILES_KEY, 12);
    TEST_UTIL.startMiniCluster(1);
    Connection conn = ConnectionFactory.createConnection(conf);
    try {
        HTableDescriptor htd = new HTableDescriptor(tableName);
        htd.addFamily(new HColumnDescriptor(family));
        htd.setCompactionEnabled(false);
        TEST_UTIL.getAdmin().createTable(htd);
        TEST_UTIL.waitTableAvailable(tableName);
        HStore store = (HStore) getStoreWithName(tableName);
        assertEquals(0, store.getStorefilesCount());
        assertEquals(0.0, store.getCompactionPressure(), EPSILON);
        Table table = conn.getTable(tableName);
        for (int i = 0; i < 4; i++) {
            byte[] value1 = new byte[0];
            table.put(new Put(Bytes.toBytes(i)).addColumn(family, qualifier, value1));
            byte[] value = new byte[0];
            table.put(new Put(Bytes.toBytes(100 + i)).addColumn(family, qualifier, value));
            TEST_UTIL.flush(tableName);
        }
        assertEquals(8, store.getStorefilesCount());
        assertEquals(0.0, store.getCompactionPressure(), EPSILON);
        byte[] value5 = new byte[0];
        table.put(new Put(Bytes.toBytes(4)).addColumn(family, qualifier, value5));
        byte[] value4 = new byte[0];
        table.put(new Put(Bytes.toBytes(104)).addColumn(family, qualifier, value4));
        TEST_UTIL.flush(tableName);
        assertEquals(10, store.getStorefilesCount());
        assertEquals(0.5, store.getCompactionPressure(), EPSILON);
        byte[] value3 = new byte[0];
        table.put(new Put(Bytes.toBytes(5)).addColumn(family, qualifier, value3));
        byte[] value2 = new byte[0];
        table.put(new Put(Bytes.toBytes(105)).addColumn(family, qualifier, value2));
        TEST_UTIL.flush(tableName);
        assertEquals(12, store.getStorefilesCount());
        assertEquals(1.0, store.getCompactionPressure(), EPSILON);
        byte[] value1 = new byte[0];
        table.put(new Put(Bytes.toBytes(6)).addColumn(family, qualifier, value1));
        byte[] value = new byte[0];
        table.put(new Put(Bytes.toBytes(106)).addColumn(family, qualifier, value));
        TEST_UTIL.flush(tableName);
        assertEquals(14, store.getStorefilesCount());
        assertEquals(2.0, store.getCompactionPressure(), EPSILON);
    } finally {
        conn.close();
        TEST_UTIL.shutdownMiniCluster();
    }
}
Also used : Table(org.apache.hadoop.hbase.client.Table) Configuration(org.apache.hadoop.conf.Configuration) CompactionConfiguration(org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration) StripeStoreEngine(org.apache.hadoop.hbase.regionserver.StripeStoreEngine) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) Connection(org.apache.hadoop.hbase.client.Connection) HStore(org.apache.hadoop.hbase.regionserver.HStore) Put(org.apache.hadoop.hbase.client.Put) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) Test(org.junit.Test)

Aggregations

HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)867 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)555 Test (org.junit.Test)425 TableName (org.apache.hadoop.hbase.TableName)258 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)171 IOException (java.io.IOException)167 Put (org.apache.hadoop.hbase.client.Put)149 Table (org.apache.hadoop.hbase.client.Table)134 Path (org.apache.hadoop.fs.Path)127 Admin (org.apache.hadoop.hbase.client.Admin)121 Configuration (org.apache.hadoop.conf.Configuration)87 HBaseAdmin (org.apache.hadoop.hbase.client.HBaseAdmin)77 ArrayList (java.util.ArrayList)75 FileSystem (org.apache.hadoop.fs.FileSystem)66 Result (org.apache.hadoop.hbase.client.Result)62 Connection (org.apache.hadoop.hbase.client.Connection)57 Scan (org.apache.hadoop.hbase.client.Scan)51 Cell (org.apache.hadoop.hbase.Cell)44 Delete (org.apache.hadoop.hbase.client.Delete)44 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)43