Search in sources :

Example 66 with Result

use of org.apache.hadoop.hbase.client.Result in project hbase by apache.

the class TestDistributedLogSplitting method testSameVersionUpdatesRecoveryWithCompaction.

@Ignore("DLR is broken by HBASE-12751")
@Test(timeout = 300000)
public void testSameVersionUpdatesRecoveryWithCompaction() throws Exception {
    LOG.info("testSameVersionUpdatesRecoveryWithWrites");
    conf.setLong("hbase.regionserver.hlog.blocksize", 15 * 1024);
    conf.setBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, true);
    conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 30 * 1024);
    conf.setInt("hbase.hstore.compactionThreshold", 3);
    startCluster(NUM_RS);
    final AtomicLong sequenceId = new AtomicLong(100);
    final int NUM_REGIONS_TO_CREATE = 40;
    final int NUM_LOG_LINES = 2000;
    // turn off load balancing to prevent regions from moving around otherwise
    // they will consume recovered.edits
    master.balanceSwitch(false);
    List<RegionServerThread> rsts = cluster.getLiveRegionServerThreads();
    final ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "table-creation", null);
    Table ht = installTable(zkw, "table", "family", NUM_REGIONS_TO_CREATE);
    try {
        List<HRegionInfo> regions = null;
        HRegionServer hrs = null;
        for (int i = 0; i < NUM_RS; i++) {
            boolean isCarryingMeta = false;
            hrs = rsts.get(i).getRegionServer();
            regions = ProtobufUtil.getOnlineRegions(hrs.getRSRpcServices());
            for (HRegionInfo region : regions) {
                if (region.isMetaRegion()) {
                    isCarryingMeta = true;
                    break;
                }
            }
            if (isCarryingMeta) {
                continue;
            }
            break;
        }
        LOG.info("#regions = " + regions.size());
        Iterator<HRegionInfo> it = regions.iterator();
        while (it.hasNext()) {
            HRegionInfo region = it.next();
            if (region.isMetaTable() || region.getEncodedName().equals(HRegionInfo.FIRST_META_REGIONINFO.getEncodedName())) {
                it.remove();
            }
        }
        if (regions.isEmpty())
            return;
        HRegionInfo curRegionInfo = regions.get(0);
        byte[] startRow = curRegionInfo.getStartKey();
        if (startRow == null || startRow.length == 0) {
            startRow = new byte[] { 0, 0, 0, 0, 1 };
        }
        byte[] row = Bytes.incrementBytes(startRow, 1);
        // use last 5 bytes because HBaseTestingUtility.createMultiRegions use 5 bytes key
        row = Arrays.copyOfRange(row, 3, 8);
        long value = 0;
        final TableName tableName = TableName.valueOf(name.getMethodName());
        byte[] family = Bytes.toBytes("family");
        byte[] qualifier = Bytes.toBytes("c1");
        long timeStamp = System.currentTimeMillis();
        HTableDescriptor htd = new HTableDescriptor(tableName);
        htd.addFamily(new HColumnDescriptor(family));
        final WAL wal = hrs.getWAL(curRegionInfo);
        for (int i = 0; i < NUM_LOG_LINES; i += 1) {
            WALEdit e = new WALEdit();
            value++;
            e.add(new KeyValue(row, family, qualifier, timeStamp, Bytes.toBytes(value)));
            wal.append(curRegionInfo, new WALKey(curRegionInfo.getEncodedNameAsBytes(), tableName, System.currentTimeMillis()), e, true);
        }
        wal.sync();
        wal.shutdown();
        // wait for abort completes
        this.abortRSAndWaitForRecovery(hrs, zkw, NUM_REGIONS_TO_CREATE);
        // verify we got the last value
        LOG.info("Verification Starts...");
        Get g = new Get(row);
        Result r = ht.get(g);
        long theStoredVal = Bytes.toLong(r.getValue(family, qualifier));
        assertEquals(value, theStoredVal);
        // after flush & compaction
        LOG.info("Verification after flush...");
        TEST_UTIL.getAdmin().flush(tableName);
        TEST_UTIL.getAdmin().compact(tableName);
        // wait for compaction completes
        TEST_UTIL.waitFor(30000, 200, new Waiter.Predicate<Exception>() {

            @Override
            public boolean evaluate() throws Exception {
                return (TEST_UTIL.getAdmin().getCompactionState(tableName) == CompactionState.NONE);
            }
        });
        r = ht.get(g);
        theStoredVal = Bytes.toLong(r.getValue(family, qualifier));
        assertEquals(value, theStoredVal);
    } finally {
        if (ht != null)
            ht.close();
        if (zkw != null)
            zkw.close();
    }
}
Also used : WAL(org.apache.hadoop.hbase.wal.WAL) KeyValue(org.apache.hadoop.hbase.KeyValue) Result(org.apache.hadoop.hbase.client.Result) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) WALKey(org.apache.hadoop.hbase.wal.WALKey) WALEdit(org.apache.hadoop.hbase.regionserver.wal.WALEdit) ZooKeeperWatcher(org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher) Table(org.apache.hadoop.hbase.client.Table) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) OperationConflictException(org.apache.hadoop.hbase.exceptions.OperationConflictException) RegionInRecoveryException(org.apache.hadoop.hbase.exceptions.RegionInRecoveryException) IOException(java.io.IOException) TimeoutException(java.util.concurrent.TimeoutException) RetriesExhaustedWithDetailsException(org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException) ServerNotRunningYetException(org.apache.hadoop.hbase.ipc.ServerNotRunningYetException) HRegionServer(org.apache.hadoop.hbase.regionserver.HRegionServer) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) TableName(org.apache.hadoop.hbase.TableName) AtomicLong(java.util.concurrent.atomic.AtomicLong) Get(org.apache.hadoop.hbase.client.Get) RegionServerThread(org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread) Waiter(org.apache.hadoop.hbase.Waiter) Ignore(org.junit.Ignore) Test(org.junit.Test)

Example 67 with Result

use of org.apache.hadoop.hbase.client.Result in project hbase by apache.

the class TestTableSnapshotInputFormat method verifyWithMockedMapReduce.

private void verifyWithMockedMapReduce(Job job, int numRegions, int expectedNumSplits, byte[] startRow, byte[] stopRow) throws IOException, InterruptedException {
    TableSnapshotInputFormat tsif = new TableSnapshotInputFormat();
    List<InputSplit> splits = tsif.getSplits(job);
    Assert.assertEquals(expectedNumSplits, splits.size());
    HBaseTestingUtility.SeenRowTracker rowTracker = new HBaseTestingUtility.SeenRowTracker(startRow, stopRow);
    for (int i = 0; i < splits.size(); i++) {
        // validate input split
        InputSplit split = splits.get(i);
        Assert.assertTrue(split instanceof TableSnapshotRegionSplit);
        // validate record reader
        TaskAttemptContext taskAttemptContext = mock(TaskAttemptContext.class);
        when(taskAttemptContext.getConfiguration()).thenReturn(job.getConfiguration());
        RecordReader<ImmutableBytesWritable, Result> rr = tsif.createRecordReader(split, taskAttemptContext);
        rr.initialize(split, taskAttemptContext);
        // validate we can read all the data back
        while (rr.nextKeyValue()) {
            byte[] row = rr.getCurrentKey().get();
            verifyRowFromMap(rr.getCurrentKey(), rr.getCurrentValue());
            rowTracker.addRow(row);
        }
        rr.close();
    }
    // validate all rows are seen
    rowTracker.validate();
}
Also used : ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) TaskAttemptContext(org.apache.hadoop.mapreduce.TaskAttemptContext) Result(org.apache.hadoop.hbase.client.Result) HBaseTestingUtility(org.apache.hadoop.hbase.HBaseTestingUtility) TableSnapshotRegionSplit(org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat.TableSnapshotRegionSplit) InputSplit(org.apache.hadoop.mapreduce.InputSplit)

Example 68 with Result

use of org.apache.hadoop.hbase.client.Result in project hbase by apache.

the class TestTimeRangeMapRed method verify.

private void verify(final Table table) throws IOException {
    Scan scan = new Scan();
    scan.addColumn(FAMILY_NAME, COLUMN_NAME);
    scan.setMaxVersions(1);
    ResultScanner scanner = table.getScanner(scan);
    for (Result r : scanner) {
        for (Cell kv : r.listCells()) {
            log.debug(Bytes.toString(r.getRow()) + "\t" + Bytes.toString(CellUtil.cloneFamily(kv)) + "\t" + Bytes.toString(CellUtil.cloneQualifier(kv)) + "\t" + kv.getTimestamp() + "\t" + Bytes.toBoolean(CellUtil.cloneValue(kv)));
            org.junit.Assert.assertEquals(TIMESTAMP.get(kv.getTimestamp()), Bytes.toBoolean(CellUtil.cloneValue(kv)));
        }
    }
    scanner.close();
}
Also used : ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) Scan(org.apache.hadoop.hbase.client.Scan) Cell(org.apache.hadoop.hbase.Cell) Result(org.apache.hadoop.hbase.client.Result)

Example 69 with Result

use of org.apache.hadoop.hbase.client.Result in project hbase by apache.

the class TestMasterNoCluster method testFailover.

/**
   * Test master failover.
   * Start up three fake regionservers and a master.
   * @throws IOException
   * @throws KeeperException
   * @throws InterruptedException
   * @throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException 
   */
@Test
public void testFailover() throws Exception {
    final long now = System.currentTimeMillis();
    // Names for our three servers.  Make the port numbers match hostname.
    // Will come in use down in the server when we need to figure how to respond.
    final ServerName sn0 = ServerName.valueOf("0.example.org", 0, now);
    final ServerName sn1 = ServerName.valueOf("1.example.org", 1, now);
    final ServerName sn2 = ServerName.valueOf("2.example.org", 2, now);
    final ServerName[] sns = new ServerName[] { sn0, sn1, sn2 };
    // Put up the mock servers
    final Configuration conf = TESTUTIL.getConfiguration();
    final MockRegionServer rs0 = new MockRegionServer(conf, sn0);
    final MockRegionServer rs1 = new MockRegionServer(conf, sn1);
    final MockRegionServer rs2 = new MockRegionServer(conf, sn2);
    // Put some data into the servers.  Make it look like sn0 has the metaH
    // Put data into sn2 so it looks like it has a few regions for a table named 't'.
    MetaTableLocator.setMetaLocation(rs0.getZooKeeper(), rs0.getServerName(), RegionState.State.OPEN);
    final TableName tableName = TableName.valueOf(name.getMethodName());
    Result[] results = new Result[] { MetaMockingUtil.getMetaTableRowResult(new HRegionInfo(tableName, HConstants.EMPTY_START_ROW, HBaseTestingUtility.KEYS[1]), rs2.getServerName()), MetaMockingUtil.getMetaTableRowResult(new HRegionInfo(tableName, HBaseTestingUtility.KEYS[1], HBaseTestingUtility.KEYS[2]), rs2.getServerName()), MetaMockingUtil.getMetaTableRowResult(new HRegionInfo(tableName, HBaseTestingUtility.KEYS[2], HConstants.EMPTY_END_ROW), rs2.getServerName()) };
    rs1.setNextResults(HRegionInfo.FIRST_META_REGIONINFO.getRegionName(), results);
    // Create master.  Subclass to override a few methods so we can insert mocks
    // and get notification on transitions.  We need to fake out any rpcs the
    // master does opening/closing regions.  Also need to fake out the address
    // of the 'remote' mocked up regionservers.
    CoordinatedStateManager cp = CoordinatedStateManagerFactory.getCoordinatedStateManager(TESTUTIL.getConfiguration());
    // Insert a mock for the connection, use TESTUTIL.getConfiguration rather than
    // the conf from the master; the conf will already have an ClusterConnection
    // associate so the below mocking of a connection will fail.
    final ClusterConnection mockedConnection = HConnectionTestingUtility.getMockedConnectionAndDecorate(TESTUTIL.getConfiguration(), rs0, rs0, rs0.getServerName(), HRegionInfo.FIRST_META_REGIONINFO);
    HMaster master = new HMaster(conf, cp) {

        InetAddress getRemoteInetAddress(final int port, final long serverStartCode) throws UnknownHostException {
            // Return different address dependent on port passed.
            if (port > sns.length) {
                return super.getRemoteInetAddress(port, serverStartCode);
            }
            ServerName sn = sns[port];
            return InetAddress.getByAddress(sn.getHostname(), new byte[] { 10, 0, 0, (byte) sn.getPort() });
        }

        @Override
        void initClusterSchemaService() throws IOException, InterruptedException {
        }

        @Override
        ServerManager createServerManager(MasterServices master) throws IOException {
            ServerManager sm = super.createServerManager(master);
            // Spy on the created servermanager
            ServerManager spy = Mockito.spy(sm);
            // Fake a successful close.
            Mockito.doReturn(true).when(spy).sendRegionClose((ServerName) Mockito.any(), (HRegionInfo) Mockito.any(), (ServerName) Mockito.any());
            return spy;
        }

        @Override
        public ClusterConnection getConnection() {
            return mockedConnection;
        }

        @Override
        public ClusterConnection getClusterConnection() {
            return mockedConnection;
        }
    };
    master.start();
    try {
        // Wait till master is up ready for RPCs.
        while (!master.serviceStarted) Threads.sleep(10);
        // Fake master that there are regionservers out there.  Report in.
        for (int i = 0; i < sns.length; i++) {
            RegionServerReportRequest.Builder request = RegionServerReportRequest.newBuilder();
            ;
            ServerName sn = ServerName.parseVersionedServerName(sns[i].getVersionedBytes());
            request.setServer(ProtobufUtil.toServerName(sn));
            request.setLoad(ServerLoad.EMPTY_SERVERLOAD.obtainServerLoadPB());
            master.getMasterRpcServices().regionServerReport(null, request.build());
        }
        // Master should now come up.
        while (!master.isInitialized()) {
            Threads.sleep(100);
        }
        assertTrue(master.isInitialized());
    } finally {
        rs0.stop("Test is done");
        rs1.stop("Test is done");
        rs2.stop("Test is done");
        master.stopMaster();
        master.join();
    }
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) RegionServerReportRequest(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest) CoordinatedStateManager(org.apache.hadoop.hbase.CoordinatedStateManager) Result(org.apache.hadoop.hbase.client.Result) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) TableName(org.apache.hadoop.hbase.TableName) ClusterConnection(org.apache.hadoop.hbase.client.ClusterConnection) ServerName(org.apache.hadoop.hbase.ServerName) Test(org.junit.Test)

Example 70 with Result

use of org.apache.hadoop.hbase.client.Result in project hbase by apache.

the class TestWalAndCompactingMemStoreFlush method verifyEdit.

// A helper function to verify edits.
void verifyEdit(int familyNum, int putNum, Table table) throws IOException {
    Result r = table.get(createGet(familyNum, putNum));
    byte[] family = FAMILIES[familyNum - 1];
    byte[] qf = Bytes.toBytes("q" + familyNum);
    byte[] val = Bytes.toBytes("val" + familyNum + "-" + putNum);
    assertNotNull(("Missing Put#" + putNum + " for CF# " + familyNum), r.getFamilyMap(family));
    assertNotNull(("Missing Put#" + putNum + " for CF# " + familyNum), r.getFamilyMap(family).get(qf));
    assertTrue(("Incorrect value for Put#" + putNum + " for CF# " + familyNum), Arrays.equals(r.getFamilyMap(family).get(qf), val));
}
Also used : Result(org.apache.hadoop.hbase.client.Result)

Aggregations

Result (org.apache.hadoop.hbase.client.Result)753 ResultScanner (org.apache.hadoop.hbase.client.ResultScanner)302 Test (org.junit.Test)295 Scan (org.apache.hadoop.hbase.client.Scan)287 Get (org.apache.hadoop.hbase.client.Get)282 Table (org.apache.hadoop.hbase.client.Table)228 Cell (org.apache.hadoop.hbase.Cell)203 Put (org.apache.hadoop.hbase.client.Put)183 IOException (java.io.IOException)172 ArrayList (java.util.ArrayList)160 TableName (org.apache.hadoop.hbase.TableName)125 Delete (org.apache.hadoop.hbase.client.Delete)111 Connection (org.apache.hadoop.hbase.client.Connection)102 KeyValue (org.apache.hadoop.hbase.KeyValue)76 Configuration (org.apache.hadoop.conf.Configuration)72 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)62 InterruptedIOException (java.io.InterruptedIOException)59 PrivilegedExceptionAction (java.security.PrivilegedExceptionAction)50 CellScanner (org.apache.hadoop.hbase.CellScanner)47 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)45