Search in sources :

Example 11 with StoreDescriptor

use of org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.StoreDescriptor in project hbase by apache.

the class ReplicationSourceWALReader method sizeOfStoreFilesIncludeBulkLoad.

/**
 * Calculate the total size of all the store files
 * @param edit edit to count row keys from
 * @return the total size of the store files
 */
private int sizeOfStoreFilesIncludeBulkLoad(WALEdit edit) {
    List<Cell> cells = edit.getCells();
    int totalStoreFilesSize = 0;
    int totalCells = edit.size();
    for (int i = 0; i < totalCells; i++) {
        if (CellUtil.matchingQualifier(cells.get(i), WALEdit.BULK_LOAD)) {
            try {
                BulkLoadDescriptor bld = WALEdit.getBulkLoadDescriptor(cells.get(i));
                List<StoreDescriptor> stores = bld.getStoresList();
                int totalStores = stores.size();
                for (int j = 0; j < totalStores; j++) {
                    totalStoreFilesSize = (int) (totalStoreFilesSize + stores.get(j).getStoreFileSizeBytes());
                }
            } catch (IOException e) {
                LOG.error("Failed to deserialize bulk load entry from wal edit. " + "Size of HFiles part of cell will not be considered in replication " + "request size calculation.", e);
            }
        }
    }
    return totalStoreFilesSize;
}
Also used : BulkLoadDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.BulkLoadDescriptor) IOException(java.io.IOException) Cell(org.apache.hadoop.hbase.Cell) StoreDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.StoreDescriptor)

Example 12 with StoreDescriptor

use of org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.StoreDescriptor in project hbase by apache.

the class HRegion method replayWALRegionEventMarker.

/**
 * @deprecated Since 3.0.0, will be removed in 4.0.0. Only for keep compatibility for old region
 *             replica implementation.
 */
@Deprecated
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NN_NAKED_NOTIFY", justification = "Intentional; cleared the memstore")
void replayWALRegionEventMarker(RegionEventDescriptor regionEvent) throws IOException {
    checkTargetRegion(regionEvent.getEncodedRegionName().toByteArray(), "RegionEvent marker from WAL ", regionEvent);
    startRegionOperation(Operation.REPLAY_EVENT);
    try {
        if (ServerRegionReplicaUtil.isDefaultReplica(this.getRegionInfo())) {
            // if primary nothing to do
            return;
        }
        if (regionEvent.getEventType() == EventType.REGION_CLOSE) {
            // nothing to do on REGION_CLOSE for now.
            return;
        }
        if (regionEvent.getEventType() != EventType.REGION_OPEN) {
            LOG.warn(getRegionInfo().getEncodedName() + " : " + "Unknown region event received, ignoring :" + TextFormat.shortDebugString(regionEvent));
            return;
        }
        if (LOG.isDebugEnabled()) {
            LOG.debug(getRegionInfo().getEncodedName() + " : " + "Replaying region open event marker " + TextFormat.shortDebugString(regionEvent));
        }
        // we will use writestate as a coarse-grain lock for all the replay events
        synchronized (writestate) {
            // smaller than this seqId
            if (this.lastReplayedOpenRegionSeqId <= regionEvent.getLogSequenceNumber()) {
                this.lastReplayedOpenRegionSeqId = regionEvent.getLogSequenceNumber();
            } else {
                LOG.warn(getRegionInfo().getEncodedName() + " : " + "Skipping replaying region event :" + TextFormat.shortDebugString(regionEvent) + " because its sequence id is smaller than this regions lastReplayedOpenRegionSeqId " + " of " + lastReplayedOpenRegionSeqId);
                return;
            }
            // all the files and drop prepared flushes and empty memstores
            for (StoreDescriptor storeDescriptor : regionEvent.getStoresList()) {
                // stores of primary may be different now
                byte[] family = storeDescriptor.getFamilyName().toByteArray();
                HStore store = getStore(family);
                if (store == null) {
                    LOG.warn(getRegionInfo().getEncodedName() + " : " + "Received a region open marker from primary, but the family is not found. " + "Ignoring. StoreDescriptor:" + storeDescriptor);
                    continue;
                }
                long storeSeqId = store.getMaxSequenceId().orElse(0L);
                List<String> storeFiles = storeDescriptor.getStoreFileList();
                try {
                    // replace the files with the new ones
                    store.refreshStoreFiles(storeFiles);
                } catch (FileNotFoundException ex) {
                    LOG.warn(getRegionInfo().getEncodedName() + " : " + "At least one of the store files: " + storeFiles + " doesn't exist any more. Skip loading the file(s)", ex);
                    continue;
                }
                if (store.getMaxSequenceId().orElse(0L) != storeSeqId) {
                    // Record latest flush time if we picked up new files
                    lastStoreFlushTimeMap.put(store, EnvironmentEdgeManager.currentTime());
                }
                if (writestate.flushing) {
                    // only drop memstore snapshots if they are smaller than last flush for the store
                    if (this.prepareFlushResult.flushOpSeqId <= regionEvent.getLogSequenceNumber()) {
                        StoreFlushContext ctx = this.prepareFlushResult.storeFlushCtxs == null ? null : this.prepareFlushResult.storeFlushCtxs.get(family);
                        if (ctx != null) {
                            MemStoreSize mss = store.getFlushableSize();
                            ctx.abort();
                            this.decrMemStoreSize(mss);
                            this.prepareFlushResult.storeFlushCtxs.remove(family);
                        }
                    }
                }
                // Drop the memstore contents if they are now smaller than the latest seen flushed file
                dropMemStoreContentsForSeqId(regionEvent.getLogSequenceNumber(), store);
                if (storeSeqId > this.maxFlushedSeqId) {
                    this.maxFlushedSeqId = storeSeqId;
                }
            }
            // if all stores ended up dropping their snapshots, we can safely drop the
            // prepareFlushResult
            dropPrepareFlushIfPossible();
            // advance the mvcc read point so that the new flushed file is visible.
            mvcc.await();
            // If we were waiting for observing a flush or region opening event for not showing partial
            // data after a secondary region crash, we can allow reads now.
            this.setReadsEnabled(true);
            // e.g. checkResources().
            synchronized (this) {
                // FindBugs NN_NAKED_NOTIFY
                notifyAll();
            }
        }
        logRegionFiles();
    } finally {
        closeRegionOperation(Operation.REPLAY_EVENT);
    }
}
Also used : FileNotFoundException(java.io.FileNotFoundException) StoreDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.StoreDescriptor)

Example 13 with StoreDescriptor

use of org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.StoreDescriptor in project hbase by apache.

the class TestHRegion method testCloseRegionWrittenToWAL.

@Test
public void testCloseRegionWrittenToWAL() throws Exception {
    Path rootDir = new Path(dir + name.getMethodName());
    CommonFSUtils.setRootDir(TEST_UTIL.getConfiguration(), rootDir);
    final ServerName serverName = ServerName.valueOf("testCloseRegionWrittenToWAL", 100, 42);
    final RegionServerServices rss = spy(TEST_UTIL.createMockRegionServerService(serverName));
    TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam1)).setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam2)).build();
    RegionInfo hri = RegionInfoBuilder.newBuilder(htd.getTableName()).build();
    ArgumentCaptor<WALEdit> editCaptor = ArgumentCaptor.forClass(WALEdit.class);
    // capture append() calls
    WAL wal = mockWAL();
    when(rss.getWAL(any(RegionInfo.class))).thenReturn(wal);
    // create and then open a region first so that it can be closed later
    region = HRegion.createHRegion(hri, rootDir, TEST_UTIL.getConfiguration(), htd, rss.getWAL(hri));
    region = HRegion.openHRegion(hri, htd, rss.getWAL(hri), TEST_UTIL.getConfiguration(), rss, null);
    // close the region
    region.close(false);
    // 2 times, one for region open, the other close region
    verify(wal, times(2)).appendMarker(any(RegionInfo.class), (WALKeyImpl) any(WALKeyImpl.class), editCaptor.capture());
    WALEdit edit = editCaptor.getAllValues().get(1);
    assertNotNull(edit);
    assertNotNull(edit.getCells());
    assertEquals(1, edit.getCells().size());
    RegionEventDescriptor desc = WALEdit.getRegionEventDescriptor(edit.getCells().get(0));
    assertNotNull(desc);
    LOG.info("RegionEventDescriptor from WAL: " + desc);
    assertEquals(RegionEventDescriptor.EventType.REGION_CLOSE, desc.getEventType());
    assertTrue(Bytes.equals(desc.getTableName().toByteArray(), htd.getTableName().toBytes()));
    assertTrue(Bytes.equals(desc.getEncodedRegionName().toByteArray(), hri.getEncodedNameAsBytes()));
    assertTrue(desc.getLogSequenceNumber() > 0);
    assertEquals(serverName, ProtobufUtil.toServerName(desc.getServer()));
    assertEquals(2, desc.getStoresCount());
    StoreDescriptor store = desc.getStores(0);
    assertTrue(Bytes.equals(store.getFamilyName().toByteArray(), fam1));
    assertEquals(store.getStoreHomeDir(), Bytes.toString(fam1));
    // no store files
    assertEquals(0, store.getStoreFileCount());
    store = desc.getStores(1);
    assertTrue(Bytes.equals(store.getFamilyName().toByteArray(), fam2));
    assertEquals(store.getStoreHomeDir(), Bytes.toString(fam2));
    // no store files
    assertEquals(0, store.getStoreFileCount());
}
Also used : Path(org.apache.hadoop.fs.Path) WAL(org.apache.hadoop.hbase.wal.WAL) WALEdit(org.apache.hadoop.hbase.wal.WALEdit) ServerName(org.apache.hadoop.hbase.ServerName) RegionEventDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) StoreDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.StoreDescriptor) Test(org.junit.Test)

Example 14 with StoreDescriptor

use of org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.StoreDescriptor in project hbase by apache.

the class TestHRegion method testOpenRegionWrittenToWAL.

@Test
public void testOpenRegionWrittenToWAL() throws Exception {
    final ServerName serverName = ServerName.valueOf(name.getMethodName(), 100, 42);
    final RegionServerServices rss = spy(TEST_UTIL.createMockRegionServerService(serverName));
    TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam1)).setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam2)).build();
    RegionInfo hri = RegionInfoBuilder.newBuilder(htd.getTableName()).build();
    // open the region w/o rss and wal and flush some files
    region = HBaseTestingUtil.createRegionAndWAL(hri, TEST_UTIL.getDataTestDir(), TEST_UTIL.getConfiguration(), htd);
    assertNotNull(region);
    // create a file in fam1 for the region before opening in OpenRegionHandler
    region.put(new Put(Bytes.toBytes("a")).addColumn(fam1, fam1, fam1));
    region.flush(true);
    HBaseTestingUtil.closeRegionAndWAL(region);
    ArgumentCaptor<WALEdit> editCaptor = ArgumentCaptor.forClass(WALEdit.class);
    // capture append() calls
    WAL wal = mockWAL();
    when(rss.getWAL(any(RegionInfo.class))).thenReturn(wal);
    region = HRegion.openHRegion(hri, htd, rss.getWAL(hri), TEST_UTIL.getConfiguration(), rss, null);
    verify(wal, times(1)).appendMarker(any(RegionInfo.class), any(WALKeyImpl.class), editCaptor.capture());
    WALEdit edit = editCaptor.getValue();
    assertNotNull(edit);
    assertNotNull(edit.getCells());
    assertEquals(1, edit.getCells().size());
    RegionEventDescriptor desc = WALEdit.getRegionEventDescriptor(edit.getCells().get(0));
    assertNotNull(desc);
    LOG.info("RegionEventDescriptor from WAL: " + desc);
    assertEquals(RegionEventDescriptor.EventType.REGION_OPEN, desc.getEventType());
    assertTrue(Bytes.equals(desc.getTableName().toByteArray(), htd.getTableName().toBytes()));
    assertTrue(Bytes.equals(desc.getEncodedRegionName().toByteArray(), hri.getEncodedNameAsBytes()));
    assertTrue(desc.getLogSequenceNumber() > 0);
    assertEquals(serverName, ProtobufUtil.toServerName(desc.getServer()));
    assertEquals(2, desc.getStoresCount());
    StoreDescriptor store = desc.getStores(0);
    assertTrue(Bytes.equals(store.getFamilyName().toByteArray(), fam1));
    assertEquals(store.getStoreHomeDir(), Bytes.toString(fam1));
    // 1store file
    assertEquals(1, store.getStoreFileCount());
    // ensure path is relative
    assertFalse(store.getStoreFile(0).contains("/"));
    store = desc.getStores(1);
    assertTrue(Bytes.equals(store.getFamilyName().toByteArray(), fam2));
    assertEquals(store.getStoreHomeDir(), Bytes.toString(fam2));
    // no store files
    assertEquals(0, store.getStoreFileCount());
}
Also used : WAL(org.apache.hadoop.hbase.wal.WAL) WALEdit(org.apache.hadoop.hbase.wal.WALEdit) ServerName(org.apache.hadoop.hbase.ServerName) RegionEventDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) WALKeyImpl(org.apache.hadoop.hbase.wal.WALKeyImpl) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) Put(org.apache.hadoop.hbase.client.Put) StoreDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.StoreDescriptor) Test(org.junit.Test)

Aggregations

StoreDescriptor (org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.StoreDescriptor)14 BulkLoadDescriptor (org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.BulkLoadDescriptor)8 IOException (java.io.IOException)5 Cell (org.apache.hadoop.hbase.Cell)5 Pair (org.apache.hadoop.hbase.util.Pair)5 ArrayList (java.util.ArrayList)4 WAL (org.apache.hadoop.hbase.wal.WAL)4 Test (org.junit.Test)4 Path (org.apache.hadoop.fs.Path)3 ServerName (org.apache.hadoop.hbase.ServerName)3 RegionEventDescriptor (org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor)3 FileNotFoundException (java.io.FileNotFoundException)2 List (java.util.List)2 Put (org.apache.hadoop.hbase.client.Put)2 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)2 TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)2 WALEdit (org.apache.hadoop.hbase.wal.WALEdit)2 Random (java.util.Random)1 Configuration (org.apache.hadoop.conf.Configuration)1 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)1