Search in sources :

Example 6 with FSTableDescriptors

use of org.apache.hadoop.hbase.util.FSTableDescriptors in project hbase by apache.

the class TestFSTableDescriptorForceCreation method testShouldNotCreateTheSameTableDescriptorIfForcefulCreationIsFalse.

@Test
public void testShouldNotCreateTheSameTableDescriptorIfForcefulCreationIsFalse() throws IOException {
    final String name = this.name.getMethodName();
    FileSystem fs = FileSystem.get(UTIL.getConfiguration());
    // Cleanup old tests if any detritus laying around.
    Path rootdir = new Path(UTIL.getDataTestDir(), name);
    FSTableDescriptors fstd = new FSTableDescriptors(fs, rootdir);
    TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name)).build();
    fstd.update(htd);
    assertFalse("Should not create new table descriptor", fstd.createTableDescriptor(htd, false));
}
Also used : Path(org.apache.hadoop.fs.Path) FileSystem(org.apache.hadoop.fs.FileSystem) FSTableDescriptors(org.apache.hadoop.hbase.util.FSTableDescriptors) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) Test(org.junit.Test)

Example 7 with FSTableDescriptors

use of org.apache.hadoop.hbase.util.FSTableDescriptors in project hbase by apache.

the class TestRestoreSnapshotHelper method testRestore.

/**
 * Execute the restore operation
 * @param snapshotDir The snapshot directory to use as "restore source"
 * @param sd The snapshot descriptor
 * @param htdClone The HTableDescriptor of the table to restore/clone.
 */
private void testRestore(final Path snapshotDir, final SnapshotDescription sd, final TableDescriptor htdClone) throws IOException {
    LOG.debug("pre-restore table=" + htdClone.getTableName() + " snapshot=" + snapshotDir);
    CommonFSUtils.logFileSystemState(fs, rootDir, LOG);
    new FSTableDescriptors(conf).createTableDescriptor(htdClone);
    RestoreSnapshotHelper helper = getRestoreHelper(rootDir, snapshotDir, sd, htdClone);
    helper.restoreHdfsRegions();
    LOG.debug("post-restore table=" + htdClone.getTableName() + " snapshot=" + snapshotDir);
    CommonFSUtils.logFileSystemState(fs, rootDir, LOG);
}
Also used : FSTableDescriptors(org.apache.hadoop.hbase.util.FSTableDescriptors)

Example 8 with FSTableDescriptors

use of org.apache.hadoop.hbase.util.FSTableDescriptors in project hbase by apache.

the class TestGetClosestAtOrBefore method testUsingMetaAndBinary.

@Test
public void testUsingMetaAndBinary() throws IOException {
    Path rootdir = UTIL.getDataTestDirOnTestFS();
    // Up flush size else we bind up when we use default catalog flush of 16k.
    TableDescriptors tds = new FSTableDescriptors(UTIL.getConfiguration());
    FSTableDescriptors.tryUpdateMetaTableDescriptor(UTIL.getConfiguration());
    TableDescriptor td = tds.get(TableName.META_TABLE_NAME);
    td = TableDescriptorBuilder.newBuilder(td).setMemStoreFlushSize(64 * 1024 * 1024).build();
    HRegion mr = HBaseTestingUtil.createRegionAndWAL(RegionInfoBuilder.FIRST_META_REGIONINFO, rootdir, conf, td);
    try {
        // Write rows for three tables 'A', 'B', and 'C'.
        for (char c = 'A'; c < 'D'; c++) {
            TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf("" + c)).build();
            final int last = 128;
            final int interval = 2;
            for (int i = 0; i <= last; i += interval) {
                RegionInfo hri = RegionInfoBuilder.newBuilder(htd.getTableName()).setStartKey(i == 0 ? HConstants.EMPTY_BYTE_ARRAY : Bytes.toBytes((byte) i)).setEndKey(i == last ? HConstants.EMPTY_BYTE_ARRAY : Bytes.toBytes((byte) i + interval)).build();
                Put put = MetaTableAccessor.makePutFromRegionInfo(hri, EnvironmentEdgeManager.currentTime());
                put.setDurability(Durability.SKIP_WAL);
                LOG.info("Put {}", put);
                mr.put(put);
            }
        }
        InternalScanner s = mr.getScanner(new Scan());
        try {
            List<Cell> keys = new ArrayList<>();
            while (s.next(keys)) {
                LOG.info("Scan {}", keys);
                keys.clear();
            }
        } finally {
            s.close();
        }
        findRow(mr, 'C', 44, 44);
        findRow(mr, 'C', 45, 44);
        findRow(mr, 'C', 46, 46);
        findRow(mr, 'C', 43, 42);
        mr.flush(true);
        findRow(mr, 'C', 44, 44);
        findRow(mr, 'C', 45, 44);
        findRow(mr, 'C', 46, 46);
        findRow(mr, 'C', 43, 42);
        // Now delete 'C' and make sure I don't get entries from 'B'.
        byte[] firstRowInC = RegionInfo.createRegionName(TableName.valueOf("" + 'C'), HConstants.EMPTY_BYTE_ARRAY, HConstants.ZEROES, false);
        Scan scan = new Scan().withStartRow(firstRowInC);
        s = mr.getScanner(scan);
        try {
            List<Cell> keys = new ArrayList<>();
            while (s.next(keys)) {
                LOG.info("Delete {}", keys);
                mr.delete(new Delete(CellUtil.cloneRow(keys.get(0))));
                keys.clear();
            }
        } finally {
            s.close();
        }
        // Assert we get null back (pass -1).
        findRow(mr, 'C', 44, -1);
        findRow(mr, 'C', 45, -1);
        findRow(mr, 'C', 46, -1);
        findRow(mr, 'C', 43, -1);
        mr.flush(true);
        findRow(mr, 'C', 44, -1);
        findRow(mr, 'C', 45, -1);
        findRow(mr, 'C', 46, -1);
        findRow(mr, 'C', 43, -1);
    } finally {
        HBaseTestingUtil.closeRegionAndWAL(mr);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Delete(org.apache.hadoop.hbase.client.Delete) ArrayList(java.util.ArrayList) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) Put(org.apache.hadoop.hbase.client.Put) TableDescriptors(org.apache.hadoop.hbase.TableDescriptors) FSTableDescriptors(org.apache.hadoop.hbase.util.FSTableDescriptors) FSTableDescriptors(org.apache.hadoop.hbase.util.FSTableDescriptors) Scan(org.apache.hadoop.hbase.client.Scan) Cell(org.apache.hadoop.hbase.Cell) Test(org.junit.Test)

Example 9 with FSTableDescriptors

use of org.apache.hadoop.hbase.util.FSTableDescriptors in project hbase by apache.

the class TestDefaultMemStore method testShouldFlushMeta.

@Test
public void testShouldFlushMeta() throws Exception {
    // write an edit in the META and ensure the shouldFlush (that the periodic memstore
    // flusher invokes) returns true after SYSTEM_CACHE_FLUSH_INTERVAL (even though
    // the MEMSTORE_PERIODIC_FLUSH_INTERVAL is set to a higher value)
    Configuration conf = new Configuration();
    conf.setInt(HRegion.MEMSTORE_PERIODIC_FLUSH_INTERVAL, HRegion.SYSTEM_CACHE_FLUSH_INTERVAL * 10);
    HBaseTestingUtil hbaseUtility = new HBaseTestingUtil(conf);
    Path testDir = hbaseUtility.getDataTestDir();
    EnvironmentEdgeForMemstoreTest edge = new EnvironmentEdgeForMemstoreTest();
    EnvironmentEdgeManager.injectEdge(edge);
    edge.setCurrentTimeMillis(1234);
    WALFactory wFactory = new WALFactory(conf, "1234");
    TableDescriptors tds = new FSTableDescriptors(conf);
    FSTableDescriptors.tryUpdateMetaTableDescriptor(conf);
    HRegion meta = HRegion.createHRegion(RegionInfoBuilder.FIRST_META_REGIONINFO, testDir, conf, tds.get(TableName.META_TABLE_NAME), wFactory.getWAL(RegionInfoBuilder.FIRST_META_REGIONINFO));
    // parameterized tests add [#] suffix get rid of [ and ].
    TableDescriptor desc = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName().replaceAll("[\\[\\]]", "_"))).setColumnFamily(ColumnFamilyDescriptorBuilder.of("foo")).build();
    RegionInfo hri = RegionInfoBuilder.newBuilder(desc.getTableName()).setStartKey(Bytes.toBytes("row_0200")).setEndKey(Bytes.toBytes("row_0300")).build();
    HRegion r = HRegion.createHRegion(hri, testDir, conf, desc, wFactory.getWAL(hri));
    addRegionToMETA(meta, r);
    edge.setCurrentTimeMillis(1234 + 100);
    StringBuilder sb = new StringBuilder();
    assertTrue(meta.shouldFlush(sb) == false);
    edge.setCurrentTimeMillis(edge.currentTime() + HRegion.SYSTEM_CACHE_FLUSH_INTERVAL + 1);
    assertTrue(meta.shouldFlush(sb) == true);
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) TableDescriptors(org.apache.hadoop.hbase.TableDescriptors) FSTableDescriptors(org.apache.hadoop.hbase.util.FSTableDescriptors) FSTableDescriptors(org.apache.hadoop.hbase.util.FSTableDescriptors) WALFactory(org.apache.hadoop.hbase.wal.WALFactory) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) HBaseTestingUtil(org.apache.hadoop.hbase.HBaseTestingUtil) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) Test(org.junit.Test)

Example 10 with FSTableDescriptors

use of org.apache.hadoop.hbase.util.FSTableDescriptors in project hbase by apache.

the class TestLogRollingNoCluster method testContendedLogRolling.

/**
 * Spin up a bunch of threads and have them all append to a WAL.  Roll the
 * WAL frequently to try and trigger NPE.
 * @throws IOException
 * @throws InterruptedException
 */
@Test
public void testContendedLogRolling() throws Exception {
    TEST_UTIL.startMiniDFSCluster(3);
    Path dir = TEST_UTIL.getDataTestDirOnTestFS();
    // The implementation needs to know the 'handler' count.
    TEST_UTIL.getConfiguration().setInt(HConstants.REGION_SERVER_HANDLER_COUNT, NUM_THREADS);
    final Configuration conf = new Configuration(TEST_UTIL.getConfiguration());
    conf.set(WALFactory.WAL_PROVIDER, "filesystem");
    CommonFSUtils.setRootDir(conf, dir);
    FSTableDescriptors fsTableDescriptors = new FSTableDescriptors(TEST_UTIL.getConfiguration());
    FSTableDescriptors.tryUpdateMetaTableDescriptor(TEST_UTIL.getConfiguration());
    TableDescriptor metaTableDescriptor = fsTableDescriptors.get(TableName.META_TABLE_NAME);
    conf.set("hbase.regionserver.hlog.writer.impl", HighLatencySyncWriter.class.getName());
    final WALFactory wals = new WALFactory(conf, TestLogRollingNoCluster.class.getName());
    final WAL wal = wals.getWAL(null);
    Appender[] appenders = null;
    final int numThreads = NUM_THREADS;
    appenders = new Appender[numThreads];
    try {
        for (int i = 0; i < numThreads; i++) {
            // Have each appending thread write 'count' entries
            appenders[i] = new Appender(metaTableDescriptor, wal, i, NUM_ENTRIES);
        }
        for (int i = 0; i < numThreads; i++) {
            appenders[i].start();
        }
        for (int i = 0; i < numThreads; i++) {
            // ensure that all threads are joined before closing the wal
            appenders[i].join();
        }
    } finally {
        wals.close();
    }
    for (int i = 0; i < numThreads; i++) {
        assertFalse("Error: " + appenders[i].getException(), appenders[i].isException());
    }
    TEST_UTIL.shutdownMiniDFSCluster();
}
Also used : Path(org.apache.hadoop.fs.Path) WAL(org.apache.hadoop.hbase.wal.WAL) Configuration(org.apache.hadoop.conf.Configuration) FSTableDescriptors(org.apache.hadoop.hbase.util.FSTableDescriptors) WALFactory(org.apache.hadoop.hbase.wal.WALFactory) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) Test(org.junit.Test)

Aggregations

FSTableDescriptors (org.apache.hadoop.hbase.util.FSTableDescriptors)21 Path (org.apache.hadoop.fs.Path)16 Test (org.junit.Test)10 TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)6 FileSystem (org.apache.hadoop.fs.FileSystem)5 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)5 MasterFileSystem (org.apache.hadoop.hbase.master.MasterFileSystem)4 Configuration (org.apache.hadoop.conf.Configuration)3 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)3 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)3 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)2 HBaseTestingUtil (org.apache.hadoop.hbase.HBaseTestingUtil)2 HBaseTestingUtility (org.apache.hadoop.hbase.HBaseTestingUtility)2 TableDescriptors (org.apache.hadoop.hbase.TableDescriptors)2 WALFactory (org.apache.hadoop.hbase.wal.WALFactory)2 IOException (java.io.IOException)1 ArrayList (java.util.ArrayList)1 Cell (org.apache.hadoop.hbase.Cell)1 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)1 MiniHBaseCluster (org.apache.hadoop.hbase.MiniHBaseCluster)1