Search in sources :

Example 26 with RegionLocator

use of org.apache.hadoop.hbase.client.RegionLocator in project hbase by apache.

the class TestRegionServerNoMaster method before.

@BeforeClass
public static void before() throws Exception {
    HTU.startMiniCluster(NB_SERVERS);
    final TableName tableName = TableName.valueOf(TestRegionServerNoMaster.class.getSimpleName());
    // Create table then get the single region for our new table.
    table = HTU.createTable(tableName, HConstants.CATALOG_FAMILY);
    Put p = new Put(row);
    p.addColumn(HConstants.CATALOG_FAMILY, row, row);
    table.put(p);
    try (RegionLocator locator = HTU.getConnection().getRegionLocator(tableName)) {
        hri = locator.getRegionLocation(row, false).getRegionInfo();
    }
    regionName = hri.getRegionName();
    stopMasterAndAssignMeta(HTU);
}
Also used : TableName(org.apache.hadoop.hbase.TableName) RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) Put(org.apache.hadoop.hbase.client.Put) BeforeClass(org.junit.BeforeClass)

Example 27 with RegionLocator

use of org.apache.hadoop.hbase.client.RegionLocator in project hbase by apache.

the class TestAccessController method testRegionOffline.

@Test(timeout = 180000)
public void testRegionOffline() throws Exception {
    List<HRegionLocation> regions;
    try (RegionLocator locator = systemUserConnection.getRegionLocator(TEST_TABLE)) {
        regions = locator.getAllRegionLocations();
    }
    HRegionLocation location = regions.get(0);
    final HRegionInfo hri = location.getRegionInfo();
    AccessTestAction action = new AccessTestAction() {

        @Override
        public Object run() throws Exception {
            ACCESS_CONTROLLER.preRegionOffline(ObserverContext.createAndPrepare(CP_ENV, null), hri);
            return null;
        }
    };
    verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_OWNER, USER_GROUP_ADMIN);
    verifyDenied(action, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE);
}
Also used : HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) HRegionLocation(org.apache.hadoop.hbase.HRegionLocation) Test(org.junit.Test)

Example 28 with RegionLocator

use of org.apache.hadoop.hbase.client.RegionLocator in project hbase by apache.

the class TestRegionReplicaReplicationEndpointNoMaster method beforeClass.

@BeforeClass
public static void beforeClass() throws Exception {
    Configuration conf = HTU.getConfiguration();
    conf.setBoolean(ServerRegionReplicaUtil.REGION_REPLICA_REPLICATION_CONF_KEY, true);
    conf.setBoolean(ServerRegionReplicaUtil.REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY, false);
    // install WALObserver coprocessor for tests
    String walCoprocs = HTU.getConfiguration().get(CoprocessorHost.WAL_COPROCESSOR_CONF_KEY);
    if (walCoprocs == null) {
        walCoprocs = WALEditCopro.class.getName();
    } else {
        walCoprocs += "," + WALEditCopro.class.getName();
    }
    HTU.getConfiguration().set(CoprocessorHost.WAL_COPROCESSOR_CONF_KEY, walCoprocs);
    HTU.startMiniCluster(NB_SERVERS);
    // Create table then get the single region for our new table.
    HTableDescriptor htd = HTU.createTableDescriptor(tableName.getNameAsString());
    table = HTU.createTable(htd, new byte[][] { f }, null);
    try (RegionLocator locator = HTU.getConnection().getRegionLocator(tableName)) {
        hriPrimary = locator.getRegionLocation(row, false).getRegionInfo();
    }
    // mock a secondary region info to open
    hriSecondary = new HRegionInfo(hriPrimary.getTable(), hriPrimary.getStartKey(), hriPrimary.getEndKey(), hriPrimary.isSplit(), hriPrimary.getRegionId(), 1);
    // No master
    TestRegionServerNoMaster.stopMasterAndAssignMeta(HTU);
    rs0 = HTU.getMiniHBaseCluster().getRegionServer(0);
    rs1 = HTU.getMiniHBaseCluster().getRegionServer(1);
}
Also used : HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) Configuration(org.apache.hadoop.conf.Configuration) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) BeforeClass(org.junit.BeforeClass)

Example 29 with RegionLocator

use of org.apache.hadoop.hbase.client.RegionLocator in project hbase by apache.

the class TestReplicationSink method testReplicateEntriesForHFiles.

/**
   * Test replicateEntries with a bulk load entry for 25 HFiles
   */
@Test
public void testReplicateEntriesForHFiles() throws Exception {
    Path dir = TEST_UTIL.getDataTestDirOnTestFS("testReplicateEntries");
    Path familyDir = new Path(dir, Bytes.toString(FAM_NAME1));
    int numRows = 10;
    List<Path> p = new ArrayList<>(1);
    // 1. Generate 25 hfile ranges
    Random rng = new SecureRandom();
    Set<Integer> numbers = new HashSet<>();
    while (numbers.size() < 50) {
        numbers.add(rng.nextInt(1000));
    }
    List<Integer> numberList = new ArrayList<>(numbers);
    Collections.sort(numberList);
    Map<String, Long> storeFilesSize = new HashMap<>(1);
    // 2. Create 25 hfiles
    Configuration conf = TEST_UTIL.getConfiguration();
    FileSystem fs = dir.getFileSystem(conf);
    Iterator<Integer> numbersItr = numberList.iterator();
    for (int i = 0; i < 25; i++) {
        Path hfilePath = new Path(familyDir, "hfile_" + i);
        HFileTestUtil.createHFile(conf, fs, hfilePath, FAM_NAME1, FAM_NAME1, Bytes.toBytes(numbersItr.next()), Bytes.toBytes(numbersItr.next()), numRows);
        p.add(hfilePath);
        storeFilesSize.put(hfilePath.getName(), fs.getFileStatus(hfilePath).getLen());
    }
    // 3. Create a BulkLoadDescriptor and a WALEdit
    Map<byte[], List<Path>> storeFiles = new HashMap<>(1);
    storeFiles.put(FAM_NAME1, p);
    WALEdit edit = null;
    WALProtos.BulkLoadDescriptor loadDescriptor = null;
    try (Connection c = ConnectionFactory.createConnection(conf);
        RegionLocator l = c.getRegionLocator(TABLE_NAME1)) {
        HRegionInfo regionInfo = l.getAllRegionLocations().get(0).getRegionInfo();
        loadDescriptor = ProtobufUtil.toBulkLoadDescriptor(TABLE_NAME1, UnsafeByteOperations.unsafeWrap(regionInfo.getEncodedNameAsBytes()), storeFiles, storeFilesSize, 1);
        edit = WALEdit.createBulkLoadEvent(regionInfo, loadDescriptor);
    }
    List<WALEntry> entries = new ArrayList<>(1);
    // 4. Create a WALEntryBuilder
    WALEntry.Builder builder = createWALEntryBuilder(TABLE_NAME1);
    // 5. Copy the hfile to the path as it is in reality
    for (int i = 0; i < 25; i++) {
        String pathToHfileFromNS = new StringBuilder(100).append(TABLE_NAME1.getNamespaceAsString()).append(Path.SEPARATOR).append(Bytes.toString(TABLE_NAME1.getName())).append(Path.SEPARATOR).append(Bytes.toString(loadDescriptor.getEncodedRegionName().toByteArray())).append(Path.SEPARATOR).append(Bytes.toString(FAM_NAME1)).append(Path.SEPARATOR).append("hfile_" + i).toString();
        String dst = baseNamespaceDir + Path.SEPARATOR + pathToHfileFromNS;
        FileUtil.copy(fs, p.get(0), fs, new Path(dst), false, conf);
    }
    entries.add(builder.build());
    try (ResultScanner scanner = table1.getScanner(new Scan())) {
        // 6. Assert no existing data in table
        assertEquals(0, scanner.next(numRows).length);
    }
    // 7. Replicate the bulk loaded entry
    SINK.replicateEntries(entries, CellUtil.createCellScanner(edit.getCells().iterator()), replicationClusterId, baseNamespaceDir, hfileArchiveDir);
    try (ResultScanner scanner = table1.getScanner(new Scan())) {
        // 8. Assert data is replicated
        assertEquals(numRows, scanner.next(numRows).length);
    }
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) Random(java.util.Random) SecureRandom(java.security.SecureRandom) WALEdit(org.apache.hadoop.hbase.regionserver.wal.WALEdit) FileSystem(org.apache.hadoop.fs.FileSystem) List(java.util.List) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) Path(org.apache.hadoop.fs.Path) RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) Connection(org.apache.hadoop.hbase.client.Connection) SecureRandom(java.security.SecureRandom) WALProtos(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos) Scan(org.apache.hadoop.hbase.client.Scan) WALEntry(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WALEntry) Test(org.junit.Test)

Example 30 with RegionLocator

use of org.apache.hadoop.hbase.client.RegionLocator in project hbase by apache.

the class OfflineMetaRebuildTestCore method deleteRegion.

protected void deleteRegion(Configuration conf, final Table tbl, byte[] startKey, byte[] endKey) throws IOException {
    LOG.info("Before delete:");
    HTableDescriptor htd = tbl.getTableDescriptor();
    dumpMeta(htd);
    List<HRegionLocation> regions;
    try (RegionLocator rl = connection.getRegionLocator(tbl.getName())) {
        regions = rl.getAllRegionLocations();
    }
    for (HRegionLocation e : regions) {
        HRegionInfo hri = e.getRegionInfo();
        ServerName hsa = e.getServerName();
        if (Bytes.compareTo(hri.getStartKey(), startKey) == 0 && Bytes.compareTo(hri.getEndKey(), endKey) == 0) {
            LOG.info("RegionName: " + hri.getRegionNameAsString());
            byte[] deleteRow = hri.getRegionName();
            TEST_UTIL.getAdmin().unassign(deleteRow, true);
            LOG.info("deleting hdfs data: " + hri.toString() + hsa.toString());
            Path rootDir = FSUtils.getRootDir(conf);
            FileSystem fs = rootDir.getFileSystem(conf);
            Path p = new Path(FSUtils.getTableDir(rootDir, htd.getTableName()), hri.getEncodedName());
            fs.delete(p, true);
            try (Table meta = this.connection.getTable(TableName.META_TABLE_NAME)) {
                Delete delete = new Delete(deleteRow);
                meta.delete(delete);
            }
        }
        LOG.info(hri.toString() + hsa.toString());
    }
    TEST_UTIL.getMetaTableRows(htd.getTableName());
    LOG.info("After delete:");
    dumpMeta(htd);
}
Also used : HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) Path(org.apache.hadoop.fs.Path) Delete(org.apache.hadoop.hbase.client.Delete) RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) HRegionLocation(org.apache.hadoop.hbase.HRegionLocation) Table(org.apache.hadoop.hbase.client.Table) ServerName(org.apache.hadoop.hbase.ServerName) FileSystem(org.apache.hadoop.fs.FileSystem) HRegionFileSystem(org.apache.hadoop.hbase.regionserver.HRegionFileSystem) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor)

Aggregations

RegionLocator (org.apache.hadoop.hbase.client.RegionLocator)84 Table (org.apache.hadoop.hbase.client.Table)59 Test (org.junit.Test)49 TableName (org.apache.hadoop.hbase.TableName)39 Admin (org.apache.hadoop.hbase.client.Admin)33 Path (org.apache.hadoop.fs.Path)31 HRegionLocation (org.apache.hadoop.hbase.HRegionLocation)30 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)29 Connection (org.apache.hadoop.hbase.client.Connection)25 Configuration (org.apache.hadoop.conf.Configuration)21 IOException (java.io.IOException)19 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)15 FileSystem (org.apache.hadoop.fs.FileSystem)14 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)13 ServerName (org.apache.hadoop.hbase.ServerName)13 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)12 ClusterConnection (org.apache.hadoop.hbase.client.ClusterConnection)10 Put (org.apache.hadoop.hbase.client.Put)10 ArrayList (java.util.ArrayList)9 Result (org.apache.hadoop.hbase.client.Result)8