Search in sources :

Example 1 with RegionEventDescriptor

use of org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor in project hbase by apache.

the class RSRpcServices method doReplayBatchOp.

/**
   * Execute a list of Put/Delete mutations. The function returns OperationStatus instead of
   * constructing MultiResponse to save a possible loop if caller doesn't need MultiResponse.
   * @param region
   * @param mutations
   * @param replaySeqId
   * @return an array of OperationStatus which internally contains the OperationStatusCode and the
   *         exceptionMessage if any
   * @throws IOException
   */
private OperationStatus[] doReplayBatchOp(final Region region, final List<WALSplitter.MutationReplay> mutations, long replaySeqId) throws IOException {
    long before = EnvironmentEdgeManager.currentTime();
    boolean batchContainsPuts = false, batchContainsDelete = false;
    try {
        for (Iterator<WALSplitter.MutationReplay> it = mutations.iterator(); it.hasNext(); ) {
            WALSplitter.MutationReplay m = it.next();
            if (m.type == MutationType.PUT) {
                batchContainsPuts = true;
            } else {
                batchContainsDelete = true;
            }
            NavigableMap<byte[], List<Cell>> map = m.mutation.getFamilyCellMap();
            List<Cell> metaCells = map.get(WALEdit.METAFAMILY);
            if (metaCells != null && !metaCells.isEmpty()) {
                for (Cell metaCell : metaCells) {
                    CompactionDescriptor compactionDesc = WALEdit.getCompaction(metaCell);
                    boolean isDefaultReplica = RegionReplicaUtil.isDefaultReplica(region.getRegionInfo());
                    HRegion hRegion = (HRegion) region;
                    if (compactionDesc != null) {
                        // replay the compaction. Remove the files from stores only if we are the primary
                        // region replica (thus own the files)
                        hRegion.replayWALCompactionMarker(compactionDesc, !isDefaultReplica, isDefaultReplica, replaySeqId);
                        continue;
                    }
                    FlushDescriptor flushDesc = WALEdit.getFlushDescriptor(metaCell);
                    if (flushDesc != null && !isDefaultReplica) {
                        hRegion.replayWALFlushMarker(flushDesc, replaySeqId);
                        continue;
                    }
                    RegionEventDescriptor regionEvent = WALEdit.getRegionEventDescriptor(metaCell);
                    if (regionEvent != null && !isDefaultReplica) {
                        hRegion.replayWALRegionEventMarker(regionEvent);
                        continue;
                    }
                    BulkLoadDescriptor bulkLoadEvent = WALEdit.getBulkLoadDescriptor(metaCell);
                    if (bulkLoadEvent != null) {
                        hRegion.replayWALBulkLoadEventMarker(bulkLoadEvent);
                        continue;
                    }
                }
                it.remove();
            }
        }
        requestCount.add(mutations.size());
        if (!region.getRegionInfo().isMetaTable()) {
            regionServer.cacheFlusher.reclaimMemStoreMemory();
        }
        return region.batchReplay(mutations.toArray(new WALSplitter.MutationReplay[mutations.size()]), replaySeqId);
    } finally {
        if (regionServer.metricsRegionServer != null) {
            long after = EnvironmentEdgeManager.currentTime();
            if (batchContainsPuts) {
                regionServer.metricsRegionServer.updatePut(after - before);
            }
            if (batchContainsDelete) {
                regionServer.metricsRegionServer.updateDelete(after - before);
            }
        }
    }
}
Also used : BulkLoadDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.BulkLoadDescriptor) FlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor) RegionEventDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor) ArrayList(java.util.ArrayList) List(java.util.List) CompactionDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor) WALSplitter(org.apache.hadoop.hbase.wal.WALSplitter) Cell(org.apache.hadoop.hbase.Cell) ByteBufferCell(org.apache.hadoop.hbase.ByteBufferCell)

Example 2 with RegionEventDescriptor

use of org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor in project hbase by apache.

the class RSRpcServices method doReplayBatchOp.

/**
 * Execute a list of Put/Delete mutations. The function returns OperationStatus instead of
 * constructing MultiResponse to save a possible loop if caller doesn't need MultiResponse.
 * @return an array of OperationStatus which internally contains the OperationStatusCode and the
 *         exceptionMessage if any
 * @deprecated Since 3.0.0, will be removed in 4.0.0. We do not use this method for replaying
 *             edits for secondary replicas any more, see
 *             {@link #replicateToReplica(RpcController, ReplicateWALEntryRequest)}.
 */
@Deprecated
private OperationStatus[] doReplayBatchOp(final HRegion region, final List<MutationReplay> mutations, long replaySeqId) throws IOException {
    long before = EnvironmentEdgeManager.currentTime();
    boolean batchContainsPuts = false, batchContainsDelete = false;
    try {
        for (Iterator<MutationReplay> it = mutations.iterator(); it.hasNext(); ) {
            MutationReplay m = it.next();
            if (m.getType() == MutationType.PUT) {
                batchContainsPuts = true;
            } else {
                batchContainsDelete = true;
            }
            NavigableMap<byte[], List<Cell>> map = m.mutation.getFamilyCellMap();
            List<Cell> metaCells = map.get(WALEdit.METAFAMILY);
            if (metaCells != null && !metaCells.isEmpty()) {
                for (Cell metaCell : metaCells) {
                    CompactionDescriptor compactionDesc = WALEdit.getCompaction(metaCell);
                    boolean isDefaultReplica = RegionReplicaUtil.isDefaultReplica(region.getRegionInfo());
                    HRegion hRegion = region;
                    if (compactionDesc != null) {
                        // replay the compaction. Remove the files from stores only if we are the primary
                        // region replica (thus own the files)
                        hRegion.replayWALCompactionMarker(compactionDesc, !isDefaultReplica, isDefaultReplica, replaySeqId);
                        continue;
                    }
                    FlushDescriptor flushDesc = WALEdit.getFlushDescriptor(metaCell);
                    if (flushDesc != null && !isDefaultReplica) {
                        hRegion.replayWALFlushMarker(flushDesc, replaySeqId);
                        continue;
                    }
                    RegionEventDescriptor regionEvent = WALEdit.getRegionEventDescriptor(metaCell);
                    if (regionEvent != null && !isDefaultReplica) {
                        hRegion.replayWALRegionEventMarker(regionEvent);
                        continue;
                    }
                    BulkLoadDescriptor bulkLoadEvent = WALEdit.getBulkLoadDescriptor(metaCell);
                    if (bulkLoadEvent != null) {
                        hRegion.replayWALBulkLoadEventMarker(bulkLoadEvent);
                        continue;
                    }
                }
                it.remove();
            }
        }
        requestCount.increment();
        if (!region.getRegionInfo().isMetaRegion()) {
            server.getMemStoreFlusher().reclaimMemStoreMemory();
        }
        return region.batchReplay(mutations.toArray(new MutationReplay[mutations.size()]), replaySeqId);
    } finally {
        updateMutationMetrics(region, before, batchContainsPuts, batchContainsDelete);
    }
}
Also used : BulkLoadDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.BulkLoadDescriptor) MutationReplay(org.apache.hadoop.hbase.wal.WALSplitUtil.MutationReplay) FlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor) RegionEventDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor) ArrayList(java.util.ArrayList) List(java.util.List) ImmutableList(org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList) CompactionDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor) Cell(org.apache.hadoop.hbase.Cell) ByteBufferExtendedCell(org.apache.hadoop.hbase.ByteBufferExtendedCell)

Example 3 with RegionEventDescriptor

use of org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor in project hbase by apache.

the class TestHRegionReplayEvents method testSkippingEditsWithSmallerSeqIdAfterRegionOpenEvent.

/**
 * Tests whether edits coming in for replay are skipped which have smaller seq id than the seqId
 * of the last replayed region open event.
 */
@Test
public void testSkippingEditsWithSmallerSeqIdAfterRegionOpenEvent() throws IOException {
    putDataWithFlushes(primaryRegion, 100, 100, 0);
    int numRows = 100;
    // close the region and open again.
    primaryRegion.close();
    primaryRegion = HRegion.openHRegion(rootDir, primaryHri, htd, walPrimary, CONF, rss, null);
    // now replay the edits and the flush marker
    reader = createWALReaderForPrimary();
    List<RegionEventDescriptor> regionEvents = Lists.newArrayList();
    List<WAL.Entry> edits = Lists.newArrayList();
    LOG.info("-- Replaying edits and region events in secondary");
    while (true) {
        WAL.Entry entry = reader.next();
        if (entry == null) {
            break;
        }
        FlushDescriptor flushDesc = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0));
        RegionEventDescriptor regionEventDesc = WALEdit.getRegionEventDescriptor(entry.getEdit().getCells().get(0));
        if (flushDesc != null) {
        // don't replay flushes
        } else if (regionEventDesc != null) {
            regionEvents.add(regionEventDesc);
        } else {
            edits.add(entry);
        }
    }
    // replay the region open of first open, but with the seqid of the second open
    // this way non of the flush files will be picked up.
    secondaryRegion.replayWALRegionEventMarker(RegionEventDescriptor.newBuilder(regionEvents.get(0)).setLogSequenceNumber(regionEvents.get(2).getLogSequenceNumber()).build());
    // skip these the following verification will NOT fail.
    for (WAL.Entry entry : edits) {
        replayEdit(secondaryRegion, entry);
    }
    boolean expectedFail = false;
    try {
        verifyData(secondaryRegion, 0, numRows, cq, families);
    } catch (AssertionError e) {
        // expected
        expectedFail = true;
    }
    if (!expectedFail) {
        fail("Should have failed this verification");
    }
}
Also used : WAL(org.apache.hadoop.hbase.wal.WAL) RegionEventDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor) FlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor) StoreFlushDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor) Test(org.junit.Test)

Example 4 with RegionEventDescriptor

use of org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor in project hbase by apache.

the class TestHRegion method testOpenRegionWrittenToWALForLogReplay.

@Test
public void testOpenRegionWrittenToWALForLogReplay() throws Exception {
    // similar to the above test but with distributed log replay
    final ServerName serverName = ServerName.valueOf(name.getMethodName(), 100, 42);
    final RegionServerServices rss = spy(TEST_UTIL.createMockRegionServerService(serverName));
    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name.getMethodName()));
    htd.addFamily(new HColumnDescriptor(fam1));
    htd.addFamily(new HColumnDescriptor(fam2));
    HRegionInfo hri = new HRegionInfo(htd.getTableName(), HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY);
    // open the region w/o rss and wal and flush some files
    HRegion region = HBaseTestingUtility.createRegionAndWAL(hri, TEST_UTIL.getDataTestDir(), TEST_UTIL.getConfiguration(), htd);
    assertNotNull(region);
    // create a file in fam1 for the region before opening in OpenRegionHandler
    region.put(new Put(Bytes.toBytes("a")).addColumn(fam1, fam1, fam1));
    region.flush(true);
    HBaseTestingUtility.closeRegionAndWAL(region);
    ArgumentCaptor<WALEdit> editCaptor = ArgumentCaptor.forClass(WALEdit.class);
    // capture append() calls
    WAL wal = mockWAL();
    when(rss.getWAL((HRegionInfo) any())).thenReturn(wal);
    // add the region to recovering regions
    HashMap<String, Region> recoveringRegions = Maps.newHashMap();
    recoveringRegions.put(region.getRegionInfo().getEncodedName(), null);
    when(rss.getRecoveringRegions()).thenReturn(recoveringRegions);
    try {
        Configuration conf = new Configuration(TEST_UTIL.getConfiguration());
        conf.set(HConstants.REGION_IMPL, HRegionWithSeqId.class.getName());
        region = HRegion.openHRegion(hri, htd, rss.getWAL(hri), conf, rss, null);
        // verify that we have not appended region open event to WAL because this region is still
        // recovering
        verify(wal, times(0)).append((HRegionInfo) any(), (WALKey) any(), editCaptor.capture(), anyBoolean());
        // not put the region out of recovering state
        new FinishRegionRecoveringHandler(rss, region.getRegionInfo().getEncodedName(), "/foo").prepare().process();
        // now we should have put the entry
        verify(wal, times(1)).append((HRegionInfo) any(), (WALKey) any(), editCaptor.capture(), anyBoolean());
        WALEdit edit = editCaptor.getValue();
        assertNotNull(edit);
        assertNotNull(edit.getCells());
        assertEquals(1, edit.getCells().size());
        RegionEventDescriptor desc = WALEdit.getRegionEventDescriptor(edit.getCells().get(0));
        assertNotNull(desc);
        LOG.info("RegionEventDescriptor from WAL: " + desc);
        assertEquals(RegionEventDescriptor.EventType.REGION_OPEN, desc.getEventType());
        assertTrue(Bytes.equals(desc.getTableName().toByteArray(), htd.getName()));
        assertTrue(Bytes.equals(desc.getEncodedRegionName().toByteArray(), hri.getEncodedNameAsBytes()));
        assertTrue(desc.getLogSequenceNumber() > 0);
        assertEquals(serverName, ProtobufUtil.toServerName(desc.getServer()));
        assertEquals(2, desc.getStoresCount());
        StoreDescriptor store = desc.getStores(0);
        assertTrue(Bytes.equals(store.getFamilyName().toByteArray(), fam1));
        assertEquals(store.getStoreHomeDir(), Bytes.toString(fam1));
        // 1store file
        assertEquals(1, store.getStoreFileCount());
        // ensure path is relative
        assertFalse(store.getStoreFile(0).contains("/"));
        store = desc.getStores(1);
        assertTrue(Bytes.equals(store.getFamilyName().toByteArray(), fam2));
        assertEquals(store.getStoreHomeDir(), Bytes.toString(fam2));
        // no store files
        assertEquals(0, store.getStoreFileCount());
    } finally {
        HBaseTestingUtility.closeRegionAndWAL(region);
    }
}
Also used : WAL(org.apache.hadoop.hbase.wal.WAL) MetricsWAL(org.apache.hadoop.hbase.regionserver.wal.MetricsWAL) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) ByteString(org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) Put(org.apache.hadoop.hbase.client.Put) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) StoreDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.StoreDescriptor) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) WALEdit(org.apache.hadoop.hbase.regionserver.wal.WALEdit) FinishRegionRecoveringHandler(org.apache.hadoop.hbase.regionserver.handler.FinishRegionRecoveringHandler) ServerName(org.apache.hadoop.hbase.ServerName) RegionEventDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor) Test(org.junit.Test)

Example 5 with RegionEventDescriptor

use of org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor in project hbase by apache.

the class HRegion method writeRegionCloseMarker.

private void writeRegionCloseMarker(WAL wal) throws IOException {
    Map<byte[], List<Path>> storeFiles = getStoreFiles();
    RegionEventDescriptor regionEventDesc = ProtobufUtil.toRegionEventDescriptor(RegionEventDescriptor.EventType.REGION_CLOSE, getRegionInfo(), mvcc.getReadPoint(), getRegionServerServices().getServerName(), storeFiles);
    // we do not care region close event at secondary replica side so just pass a null
    // RegionReplicationSink
    WALUtil.writeRegionEventMarker(wal, getReplicationScope(), getRegionInfo(), regionEventDesc, mvcc, null);
    // table is still online
    if (getWalFileSystem().exists(getWALRegionDir())) {
        WALSplitUtil.writeRegionSequenceIdFile(getWalFileSystem(), getWALRegionDir(), mvcc.getReadPoint());
    }
}
Also used : RegionEventDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor) ArrayList(java.util.ArrayList) List(java.util.List)

Aggregations

RegionEventDescriptor (org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor)12 WAL (org.apache.hadoop.hbase.wal.WAL)7 Test (org.junit.Test)7 ArrayList (java.util.ArrayList)5 List (java.util.List)5 FlushDescriptor (org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor)5 StoreDescriptor (org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.StoreDescriptor)4 ServerName (org.apache.hadoop.hbase.ServerName)3 StoreFlushDescriptor (org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor)3 Path (org.apache.hadoop.fs.Path)2 Cell (org.apache.hadoop.hbase.Cell)2 Put (org.apache.hadoop.hbase.client.Put)2 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)2 TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)2 BulkLoadDescriptor (org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.BulkLoadDescriptor)2 CompactionDescriptor (org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor)2 WALEdit (org.apache.hadoop.hbase.wal.WALEdit)2 Configuration (org.apache.hadoop.conf.Configuration)1 ByteBufferCell (org.apache.hadoop.hbase.ByteBufferCell)1 ByteBufferExtendedCell (org.apache.hadoop.hbase.ByteBufferExtendedCell)1