Search in sources :

Example 76 with RegionLocator

use of org.apache.hadoop.hbase.client.RegionLocator in project hbase by apache.

the class WALPlayer method createSubmittableJob.

/**
   * Sets up the actual job.
   *
   * @param args  The command line parameters.
   * @return The newly created job.
   * @throws IOException When setting up the job fails.
   */
public Job createSubmittableJob(String[] args) throws IOException {
    Configuration conf = getConf();
    setupTime(conf, WALInputFormat.START_TIME_KEY);
    setupTime(conf, WALInputFormat.END_TIME_KEY);
    String inputDirs = args[0];
    String[] tables = args[1].split(",");
    String[] tableMap;
    if (args.length > 2) {
        tableMap = args[2].split(",");
        if (tableMap.length != tables.length) {
            throw new IOException("The same number of tables and mapping must be provided.");
        }
    } else {
        // if not mapping is specified map each table to itself
        tableMap = tables;
    }
    conf.setStrings(TABLES_KEY, tables);
    conf.setStrings(TABLE_MAP_KEY, tableMap);
    Job job = Job.getInstance(conf, conf.get(JOB_NAME_CONF_KEY, NAME + "_" + System.currentTimeMillis()));
    job.setJarByClass(WALPlayer.class);
    FileInputFormat.addInputPaths(job, inputDirs);
    job.setInputFormatClass(WALInputFormat.class);
    job.setMapOutputKeyClass(ImmutableBytesWritable.class);
    String hfileOutPath = conf.get(BULK_OUTPUT_CONF_KEY);
    if (hfileOutPath != null) {
        LOG.debug("add incremental job :" + hfileOutPath + " from " + inputDirs);
        // the bulk HFile case
        if (tables.length != 1) {
            throw new IOException("Exactly one table must be specified for the bulk export option");
        }
        TableName tableName = TableName.valueOf(tables[0]);
        job.setMapperClass(WALKeyValueMapper.class);
        job.setReducerClass(KeyValueSortReducer.class);
        Path outputDir = new Path(hfileOutPath);
        FileOutputFormat.setOutputPath(job, outputDir);
        job.setMapOutputValueClass(KeyValue.class);
        try (Connection conn = ConnectionFactory.createConnection(conf);
            Table table = conn.getTable(tableName);
            RegionLocator regionLocator = conn.getRegionLocator(tableName)) {
            HFileOutputFormat2.configureIncrementalLoad(job, table.getTableDescriptor(), regionLocator);
        }
        TableMapReduceUtil.addDependencyJarsForClasses(job.getConfiguration(), com.google.common.base.Preconditions.class);
    } else {
        // output to live cluster
        job.setMapperClass(WALMapper.class);
        job.setOutputFormatClass(MultiTableOutputFormat.class);
        TableMapReduceUtil.addDependencyJars(job);
        TableMapReduceUtil.initCredentials(job);
        // No reducers.
        job.setNumReduceTasks(0);
    }
    String codecCls = WALCellCodec.getWALCellCodecClass(conf);
    try {
        TableMapReduceUtil.addDependencyJarsForClasses(job.getConfiguration(), Class.forName(codecCls));
    } catch (Exception e) {
        throw new IOException("Cannot determine wal codec class " + codecCls, e);
    }
    return job;
}
Also used : Path(org.apache.hadoop.fs.Path) RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) Table(org.apache.hadoop.hbase.client.Table) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) Connection(org.apache.hadoop.hbase.client.Connection) IOException(java.io.IOException) ParseException(java.text.ParseException) IOException(java.io.IOException) TableName(org.apache.hadoop.hbase.TableName) Job(org.apache.hadoop.mapreduce.Job)

Example 77 with RegionLocator

use of org.apache.hadoop.hbase.client.RegionLocator in project hbase by apache.

the class TestGlobalMemStoreSize method testGlobalMemStore.

/**
   * Test the global mem store size in the region server is equal to sum of each
   * region's mem store size
   * @throws Exception
   */
@Test
public void testGlobalMemStore() throws Exception {
    // Start the cluster
    LOG.info("Starting cluster");
    Configuration conf = HBaseConfiguration.create();
    TEST_UTIL = new HBaseTestingUtility(conf);
    TEST_UTIL.startMiniCluster(1, regionServerNum);
    cluster = TEST_UTIL.getHBaseCluster();
    LOG.info("Waiting for active/ready master");
    cluster.waitForActiveAndReadyMaster();
    // Create a table with regions
    final TableName table = TableName.valueOf(name.getMethodName());
    byte[] family = Bytes.toBytes("family");
    LOG.info("Creating table with " + regionNum + " regions");
    Table ht = TEST_UTIL.createMultiRegionTable(table, family, regionNum);
    int numRegions = -1;
    try (RegionLocator r = TEST_UTIL.getConnection().getRegionLocator(table)) {
        numRegions = r.getStartKeys().length;
    }
    assertEquals(regionNum, numRegions);
    waitForAllRegionsAssigned();
    for (HRegionServer server : getOnlineRegionServers()) {
        long globalMemStoreSize = 0;
        for (HRegionInfo regionInfo : ProtobufUtil.getOnlineRegions(null, server.getRSRpcServices())) {
            globalMemStoreSize += server.getFromOnlineRegions(regionInfo.getEncodedName()).getMemstoreSize();
        }
        assertEquals(server.getRegionServerAccounting().getGlobalMemstoreDataSize(), globalMemStoreSize);
    }
    // check the global memstore size after flush
    int i = 0;
    for (HRegionServer server : getOnlineRegionServers()) {
        LOG.info("Starting flushes on " + server.getServerName() + ", size=" + server.getRegionServerAccounting().getGlobalMemstoreDataSize());
        for (HRegionInfo regionInfo : ProtobufUtil.getOnlineRegions(null, server.getRSRpcServices())) {
            Region r = server.getFromOnlineRegions(regionInfo.getEncodedName());
            flush(r, server);
        }
        LOG.info("Post flush on " + server.getServerName());
        long now = System.currentTimeMillis();
        long timeout = now + 1000;
        while (server.getRegionServerAccounting().getGlobalMemstoreDataSize() != 0 && timeout < System.currentTimeMillis()) {
            Threads.sleep(10);
        }
        long size = server.getRegionServerAccounting().getGlobalMemstoreDataSize();
        if (size > 0) {
            // our test was running....
            for (HRegionInfo regionInfo : ProtobufUtil.getOnlineRegions(null, server.getRSRpcServices())) {
                Region r = server.getFromOnlineRegions(regionInfo.getEncodedName());
                long l = r.getMemstoreSize();
                if (l > 0) {
                    // Only meta could have edits at this stage.  Give it another flush
                    // clear them.
                    assertTrue(regionInfo.isMetaRegion());
                    LOG.info(r.toString() + " " + l + ", reflushing");
                    r.flush(true);
                }
            }
        }
        size = server.getRegionServerAccounting().getGlobalMemstoreDataSize();
        assertEquals("Server=" + server.getServerName() + ", i=" + i++, 0, size);
    }
    ht.close();
    TEST_UTIL.shutdownMiniCluster();
}
Also used : RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) Table(org.apache.hadoop.hbase.client.Table) Configuration(org.apache.hadoop.conf.Configuration) Region(org.apache.hadoop.hbase.regionserver.Region) HRegionServer(org.apache.hadoop.hbase.regionserver.HRegionServer) Test(org.junit.Test)

Example 78 with RegionLocator

use of org.apache.hadoop.hbase.client.RegionLocator in project hbase by apache.

the class TestMetaTableAccessor method testRetrying.

/**
   * Does {@link MetaTableAccessor#getRegion(Connection, byte[])} and a write
   * against hbase:meta while its hosted server is restarted to prove our retrying
   * works.
   * @throws IOException
   * @throws InterruptedException
   */
@Test
public void testRetrying() throws IOException, InterruptedException {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    LOG.info("Started " + tableName);
    Table t = UTIL.createMultiRegionTable(tableName, HConstants.CATALOG_FAMILY);
    int regionCount = -1;
    try (RegionLocator r = UTIL.getConnection().getRegionLocator(tableName)) {
        regionCount = r.getStartKeys().length;
    }
    // Test it works getting a region from just made user table.
    final List<HRegionInfo> regions = testGettingTableRegions(connection, tableName, regionCount);
    MetaTask reader = new MetaTask(connection, "reader") {

        @Override
        void metaTask() throws Throwable {
            testGetRegion(connection, regions.get(0));
            LOG.info("Read " + regions.get(0).getEncodedName());
        }
    };
    MetaTask writer = new MetaTask(connection, "writer") {

        @Override
        void metaTask() throws Throwable {
            MetaTableAccessor.addRegionToMeta(connection, regions.get(0));
            LOG.info("Wrote " + regions.get(0).getEncodedName());
        }
    };
    reader.start();
    writer.start();
    // We're gonna check how it takes. If it takes too long, we will consider
    //  it as a fail. We can't put that in the @Test tag as we want to close
    //  the threads nicely
    final long timeOut = 180000;
    long startTime = System.currentTimeMillis();
    try {
        // Make sure reader and writer are working.
        assertTrue(reader.isProgressing());
        assertTrue(writer.isProgressing());
        // meta moves.  They'll need to retry.
        for (int i = 0; i < 2; i++) {
            LOG.info("Restart=" + i);
            UTIL.ensureSomeRegionServersAvailable(2);
            int index = -1;
            do {
                index = UTIL.getMiniHBaseCluster().getServerWithMeta();
            } while (index == -1 && startTime + timeOut < System.currentTimeMillis());
            if (index != -1) {
                UTIL.getMiniHBaseCluster().abortRegionServer(index);
                UTIL.getMiniHBaseCluster().waitOnRegionServer(index);
            }
        }
        assertTrue("reader: " + reader.toString(), reader.isProgressing());
        assertTrue("writer: " + writer.toString(), writer.isProgressing());
    } catch (IOException e) {
        throw e;
    } finally {
        reader.stop = true;
        writer.stop = true;
        reader.join();
        writer.join();
        t.close();
    }
    long exeTime = System.currentTimeMillis() - startTime;
    assertTrue("Timeout: test took " + exeTime / 1000 + " sec", exeTime < timeOut);
}
Also used : RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) Table(org.apache.hadoop.hbase.client.Table) IOException(java.io.IOException) Test(org.junit.Test)

Example 79 with RegionLocator

use of org.apache.hadoop.hbase.client.RegionLocator in project hbase by apache.

the class TestRegionServerOnlineConfigChange method setUp.

@BeforeClass
public static void setUp() throws Exception {
    conf = hbaseTestingUtility.getConfiguration();
    hbaseTestingUtility.startMiniCluster(1, 1);
    t1 = hbaseTestingUtility.createTable(TABLE1, COLUMN_FAMILY1);
    try (RegionLocator locator = hbaseTestingUtility.getConnection().getRegionLocator(TABLE1)) {
        HRegionInfo firstHRI = locator.getAllRegionLocations().get(0).getRegionInfo();
        r1name = firstHRI.getRegionName();
        rs1 = hbaseTestingUtility.getHBaseCluster().getRegionServer(hbaseTestingUtility.getHBaseCluster().getServerWith(r1name));
        r1 = rs1.getRegion(r1name);
    }
}
Also used : HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) BeforeClass(org.junit.BeforeClass)

Example 80 with RegionLocator

use of org.apache.hadoop.hbase.client.RegionLocator in project hbase by apache.

the class TestScannerWithBulkload method testBulkLoadWithParallelScan.

@Test
public void testBulkLoadWithParallelScan() throws Exception {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    final long l = System.currentTimeMillis();
    final Admin admin = TEST_UTIL.getAdmin();
    createTable(admin, tableName);
    Scan scan = createScan();
    scan.setCaching(1);
    final Table table = init(admin, l, scan, tableName);
    // use bulkload
    final Path hfilePath = writeToHFile(l, "/temp/testBulkLoadWithParallelScan/", "/temp/testBulkLoadWithParallelScan/col/file", false);
    Configuration conf = TEST_UTIL.getConfiguration();
    conf.setBoolean("hbase.mapreduce.bulkload.assign.sequenceNumbers", true);
    final LoadIncrementalHFiles bulkload = new LoadIncrementalHFiles(conf);
    ResultScanner scanner = table.getScanner(scan);
    Result result = scanner.next();
    // Create a scanner and then do bulk load
    final CountDownLatch latch = new CountDownLatch(1);
    new Thread() {

        public void run() {
            try {
                Put put1 = new Put(Bytes.toBytes("row5"));
                put1.add(new KeyValue(Bytes.toBytes("row5"), Bytes.toBytes("col"), Bytes.toBytes("q"), l, Bytes.toBytes("version0")));
                table.put(put1);
                try (RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) {
                    bulkload.doBulkLoad(hfilePath, admin, table, locator);
                }
                latch.countDown();
            } catch (TableNotFoundException e) {
            } catch (IOException e) {
            }
        }
    }.start();
    latch.await();
    // By the time we do next() the bulk loaded files are also added to the kv
    // scanner
    scanAfterBulkLoad(scanner, result, "version1");
    scanner.close();
    table.close();
}
Also used : Path(org.apache.hadoop.fs.Path) RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) Table(org.apache.hadoop.hbase.client.Table) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) KeyValue(org.apache.hadoop.hbase.KeyValue) Configuration(org.apache.hadoop.conf.Configuration) IOException(java.io.IOException) Admin(org.apache.hadoop.hbase.client.Admin) CountDownLatch(java.util.concurrent.CountDownLatch) Put(org.apache.hadoop.hbase.client.Put) Result(org.apache.hadoop.hbase.client.Result) TableName(org.apache.hadoop.hbase.TableName) TableNotFoundException(org.apache.hadoop.hbase.TableNotFoundException) LoadIncrementalHFiles(org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles) Scan(org.apache.hadoop.hbase.client.Scan) Test(org.junit.Test)

Aggregations

RegionLocator (org.apache.hadoop.hbase.client.RegionLocator)84 Table (org.apache.hadoop.hbase.client.Table)59 Test (org.junit.Test)49 TableName (org.apache.hadoop.hbase.TableName)39 Admin (org.apache.hadoop.hbase.client.Admin)33 Path (org.apache.hadoop.fs.Path)31 HRegionLocation (org.apache.hadoop.hbase.HRegionLocation)30 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)29 Connection (org.apache.hadoop.hbase.client.Connection)25 Configuration (org.apache.hadoop.conf.Configuration)21 IOException (java.io.IOException)19 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)15 FileSystem (org.apache.hadoop.fs.FileSystem)14 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)13 ServerName (org.apache.hadoop.hbase.ServerName)13 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)12 ClusterConnection (org.apache.hadoop.hbase.client.ClusterConnection)10 Put (org.apache.hadoop.hbase.client.Put)10 ArrayList (java.util.ArrayList)9 Result (org.apache.hadoop.hbase.client.Result)8