Search in sources :

Example 21 with WALEdit

use of org.apache.hadoop.hbase.regionserver.wal.WALEdit in project hbase by apache.

the class TestDistributedLogSplitting method testSameVersionUpdatesRecovery.

@Ignore("DLR is broken by HBASE-12751")
@Test(timeout = 300000)
public void testSameVersionUpdatesRecovery() throws Exception {
    LOG.info("testSameVersionUpdatesRecovery");
    conf.setLong("hbase.regionserver.hlog.blocksize", 15 * 1024);
    conf.setBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, true);
    startCluster(NUM_RS);
    final AtomicLong sequenceId = new AtomicLong(100);
    final int NUM_REGIONS_TO_CREATE = 40;
    final int NUM_LOG_LINES = 1000;
    // turn off load balancing to prevent regions from moving around otherwise
    // they will consume recovered.edits
    master.balanceSwitch(false);
    List<RegionServerThread> rsts = cluster.getLiveRegionServerThreads();
    final ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "table-creation", null);
    Table ht = installTable(zkw, name.getMethodName(), "family", NUM_REGIONS_TO_CREATE);
    try {
        List<HRegionInfo> regions = null;
        HRegionServer hrs = null;
        for (int i = 0; i < NUM_RS; i++) {
            boolean isCarryingMeta = false;
            hrs = rsts.get(i).getRegionServer();
            regions = ProtobufUtil.getOnlineRegions(hrs.getRSRpcServices());
            for (HRegionInfo region : regions) {
                if (region.isMetaRegion()) {
                    isCarryingMeta = true;
                    break;
                }
            }
            if (isCarryingMeta) {
                continue;
            }
            break;
        }
        LOG.info("#regions = " + regions.size());
        Iterator<HRegionInfo> it = regions.iterator();
        while (it.hasNext()) {
            HRegionInfo region = it.next();
            if (region.isMetaTable() || region.getEncodedName().equals(HRegionInfo.FIRST_META_REGIONINFO.getEncodedName())) {
                it.remove();
            }
        }
        if (regions.isEmpty())
            return;
        HRegionInfo curRegionInfo = regions.get(0);
        byte[] startRow = curRegionInfo.getStartKey();
        if (startRow == null || startRow.length == 0) {
            startRow = new byte[] { 0, 0, 0, 0, 1 };
        }
        byte[] row = Bytes.incrementBytes(startRow, 1);
        // use last 5 bytes because HBaseTestingUtility.createMultiRegions use 5 bytes key
        row = Arrays.copyOfRange(row, 3, 8);
        long value = 0;
        TableName tableName = TableName.valueOf(name.getMethodName());
        byte[] family = Bytes.toBytes("family");
        byte[] qualifier = Bytes.toBytes("c1");
        long timeStamp = System.currentTimeMillis();
        HTableDescriptor htd = new HTableDescriptor(tableName);
        htd.addFamily(new HColumnDescriptor(family));
        final WAL wal = hrs.getWAL(curRegionInfo);
        for (int i = 0; i < NUM_LOG_LINES; i += 1) {
            WALEdit e = new WALEdit();
            value++;
            e.add(new KeyValue(row, family, qualifier, timeStamp, Bytes.toBytes(value)));
            wal.append(curRegionInfo, new WALKey(curRegionInfo.getEncodedNameAsBytes(), tableName, System.currentTimeMillis()), e, true);
        }
        wal.sync();
        wal.shutdown();
        // wait for abort completes
        this.abortRSAndWaitForRecovery(hrs, zkw, NUM_REGIONS_TO_CREATE);
        // verify we got the last value
        LOG.info("Verification Starts...");
        Get g = new Get(row);
        Result r = ht.get(g);
        long theStoredVal = Bytes.toLong(r.getValue(family, qualifier));
        assertEquals(value, theStoredVal);
        // after flush
        LOG.info("Verification after flush...");
        TEST_UTIL.getAdmin().flush(tableName);
        r = ht.get(g);
        theStoredVal = Bytes.toLong(r.getValue(family, qualifier));
        assertEquals(value, theStoredVal);
    } finally {
        if (ht != null)
            ht.close();
        if (zkw != null)
            zkw.close();
    }
}
Also used : Table(org.apache.hadoop.hbase.client.Table) WAL(org.apache.hadoop.hbase.wal.WAL) KeyValue(org.apache.hadoop.hbase.KeyValue) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) HRegionServer(org.apache.hadoop.hbase.regionserver.HRegionServer) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) Result(org.apache.hadoop.hbase.client.Result) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) WALKey(org.apache.hadoop.hbase.wal.WALKey) TableName(org.apache.hadoop.hbase.TableName) AtomicLong(java.util.concurrent.atomic.AtomicLong) WALEdit(org.apache.hadoop.hbase.regionserver.wal.WALEdit) ZooKeeperWatcher(org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher) Get(org.apache.hadoop.hbase.client.Get) RegionServerThread(org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread) Ignore(org.junit.Ignore) Test(org.junit.Test)

Example 22 with WALEdit

use of org.apache.hadoop.hbase.regionserver.wal.WALEdit in project hbase by apache.

the class HRegion method processRowsWithLocks.

@Override
public void processRowsWithLocks(RowProcessor<?, ?> processor, long timeout, long nonceGroup, long nonce) throws IOException {
    for (byte[] row : processor.getRowsToLock()) {
        checkRow(row, "processRowsWithLocks");
    }
    if (!processor.readOnly()) {
        checkReadOnly();
    }
    checkResources();
    startRegionOperation();
    WALEdit walEdit = new WALEdit();
    // STEP 1. Run pre-process hook
    preProcess(processor, walEdit);
    // Short circuit the read only case
    if (processor.readOnly()) {
        try {
            long now = EnvironmentEdgeManager.currentTime();
            doProcessRowWithTimeout(processor, now, this, null, null, timeout);
            processor.postProcess(this, walEdit, true);
        } finally {
            closeRegionOperation();
        }
        return;
    }
    boolean locked = false;
    List<RowLock> acquiredRowLocks = null;
    List<Mutation> mutations = new ArrayList<>();
    Collection<byte[]> rowsToLock = processor.getRowsToLock();
    // This is assigned by mvcc either explicity in the below or in the guts of the WAL append
    // when it assigns the edit a sequencedid (A.K.A the mvcc write number).
    WriteEntry writeEntry = null;
    MemstoreSize memstoreSize = new MemstoreSize();
    try {
        boolean success = false;
        try {
            // STEP 2. Acquire the row lock(s)
            acquiredRowLocks = new ArrayList<>(rowsToLock.size());
            for (byte[] row : rowsToLock) {
                // Attempt to lock all involved rows, throw if any lock times out
                // use a writer lock for mixed reads and writes
                acquiredRowLocks.add(getRowLockInternal(row, false));
            }
            // STEP 3. Region lock
            lock(this.updatesLock.readLock(), acquiredRowLocks.isEmpty() ? 1 : acquiredRowLocks.size());
            locked = true;
            long now = EnvironmentEdgeManager.currentTime();
            // STEP 4. Let the processor scan the rows, generate mutations and add waledits
            doProcessRowWithTimeout(processor, now, this, mutations, walEdit, timeout);
            if (!mutations.isEmpty()) {
                // STEP 5. Call the preBatchMutate hook
                processor.preBatchMutate(this, walEdit);
                // STEP 6. Append and sync if walEdit has data to write out.
                if (!walEdit.isEmpty()) {
                    writeEntry = doWALAppend(walEdit, getEffectiveDurability(processor.useDurability()), processor.getClusterIds(), now, nonceGroup, nonce);
                } else {
                    // We are here if WAL is being skipped.
                    writeEntry = this.mvcc.begin();
                }
                // STEP 7. Apply to memstore
                long sequenceId = writeEntry.getWriteNumber();
                for (Mutation m : mutations) {
                    // Handle any tag based cell features.
                    // TODO: Do we need to call rewriteCellTags down in applyToMemstore()? Why not before
                    // so tags go into WAL?
                    rewriteCellTags(m.getFamilyCellMap(), m);
                    for (CellScanner cellScanner = m.cellScanner(); cellScanner.advance(); ) {
                        Cell cell = cellScanner.current();
                        if (walEdit.isEmpty()) {
                            // If walEdit is empty, we put nothing in WAL. WAL stamps Cells with sequence id.
                            // If no WAL, need to stamp it here.
                            CellUtil.setSequenceId(cell, sequenceId);
                        }
                        applyToMemstore(getHStore(cell), cell, memstoreSize);
                    }
                }
                // STEP 8. call postBatchMutate hook
                processor.postBatchMutate(this);
                // STEP 9. Complete mvcc.
                mvcc.completeAndWait(writeEntry);
                writeEntry = null;
                // STEP 10. Release region lock
                if (locked) {
                    this.updatesLock.readLock().unlock();
                    locked = false;
                }
                // STEP 11. Release row lock(s)
                releaseRowLocks(acquiredRowLocks);
            }
            success = true;
        } finally {
            // Call complete rather than completeAndWait because we probably had error if walKey != null
            if (writeEntry != null)
                mvcc.complete(writeEntry);
            if (locked) {
                this.updatesLock.readLock().unlock();
            }
            // release locks if some were acquired but another timed out
            releaseRowLocks(acquiredRowLocks);
        }
        // 12. Run post-process hook
        processor.postProcess(this, walEdit, success);
    } finally {
        closeRegionOperation();
        if (!mutations.isEmpty()) {
            long newSize = this.addAndGetMemstoreSize(memstoreSize);
            requestFlushIfNeeded(newSize);
        }
    }
}
Also used : WriteEntry(org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl.WriteEntry) ArrayList(java.util.ArrayList) CellScanner(org.apache.hadoop.hbase.CellScanner) WALEdit(org.apache.hadoop.hbase.regionserver.wal.WALEdit) Mutation(org.apache.hadoop.hbase.client.Mutation) Cell(org.apache.hadoop.hbase.Cell)

Example 23 with WALEdit

use of org.apache.hadoop.hbase.regionserver.wal.WALEdit in project hbase by apache.

the class ReplicationProtbufUtil method buildReplicateWALEntryRequest.

/**
   * Create a new ReplicateWALEntryRequest from a list of WAL entries
   * @param entries the WAL entries to be replicated
   * @param encodedRegionName alternative region name to use if not null
   * @param replicationClusterId Id which will uniquely identify source cluster FS client
   *          configurations in the replication configuration directory
   * @param sourceBaseNamespaceDir Path to source cluster base namespace directory
   * @param sourceHFileArchiveDir Path to the source cluster hfile archive directory
   * @return a pair of ReplicateWALEntryRequest and a CellScanner over all the WALEdit values found.
   */
public static Pair<AdminProtos.ReplicateWALEntryRequest, CellScanner> buildReplicateWALEntryRequest(final Entry[] entries, byte[] encodedRegionName, String replicationClusterId, Path sourceBaseNamespaceDir, Path sourceHFileArchiveDir) {
    // Accumulate all the Cells seen in here.
    List<List<? extends Cell>> allCells = new ArrayList<>(entries.length);
    int size = 0;
    WALProtos.FamilyScope.Builder scopeBuilder = WALProtos.FamilyScope.newBuilder();
    AdminProtos.WALEntry.Builder entryBuilder = AdminProtos.WALEntry.newBuilder();
    AdminProtos.ReplicateWALEntryRequest.Builder builder = AdminProtos.ReplicateWALEntryRequest.newBuilder();
    HBaseProtos.UUID.Builder uuidBuilder = HBaseProtos.UUID.newBuilder();
    for (Entry entry : entries) {
        entryBuilder.clear();
        // TODO: this duplicates a lot in WALKey#getBuilder
        WALProtos.WALKey.Builder keyBuilder = entryBuilder.getKeyBuilder();
        WALKey key = entry.getKey();
        keyBuilder.setEncodedRegionName(UnsafeByteOperations.unsafeWrap(encodedRegionName == null ? key.getEncodedRegionName() : encodedRegionName));
        keyBuilder.setTableName(UnsafeByteOperations.unsafeWrap(key.getTablename().getName()));
        keyBuilder.setLogSequenceNumber(key.getLogSeqNum());
        keyBuilder.setWriteTime(key.getWriteTime());
        if (key.getNonce() != HConstants.NO_NONCE) {
            keyBuilder.setNonce(key.getNonce());
        }
        if (key.getNonceGroup() != HConstants.NO_NONCE) {
            keyBuilder.setNonceGroup(key.getNonceGroup());
        }
        for (UUID clusterId : key.getClusterIds()) {
            uuidBuilder.setLeastSigBits(clusterId.getLeastSignificantBits());
            uuidBuilder.setMostSigBits(clusterId.getMostSignificantBits());
            keyBuilder.addClusterIds(uuidBuilder.build());
        }
        if (key.getOrigLogSeqNum() > 0) {
            keyBuilder.setOrigSequenceNumber(key.getOrigLogSeqNum());
        }
        WALEdit edit = entry.getEdit();
        NavigableMap<byte[], Integer> scopes = key.getReplicationScopes();
        if (scopes != null && !scopes.isEmpty()) {
            for (Map.Entry<byte[], Integer> scope : scopes.entrySet()) {
                scopeBuilder.setFamily(UnsafeByteOperations.unsafeWrap(scope.getKey()));
                WALProtos.ScopeType scopeType = WALProtos.ScopeType.valueOf(scope.getValue().intValue());
                scopeBuilder.setScopeType(scopeType);
                keyBuilder.addScopes(scopeBuilder.build());
            }
        }
        List<Cell> cells = edit.getCells();
        // Add up the size.  It is used later serializing out the kvs.
        for (Cell cell : cells) {
            size += CellUtil.estimatedSerializedSizeOf(cell);
        }
        // Collect up the cells
        allCells.add(cells);
        // Write out how many cells associated with this entry.
        entryBuilder.setAssociatedCellCount(cells.size());
        builder.addEntry(entryBuilder.build());
    }
    if (replicationClusterId != null) {
        builder.setReplicationClusterId(replicationClusterId);
    }
    if (sourceBaseNamespaceDir != null) {
        builder.setSourceBaseNamespaceDirPath(sourceBaseNamespaceDir.toString());
    }
    if (sourceHFileArchiveDir != null) {
        builder.setSourceHFileArchiveDirPath(sourceHFileArchiveDir.toString());
    }
    return new Pair<>(builder.build(), getCellScanner(allCells, size));
}
Also used : ArrayList(java.util.ArrayList) WALKey(org.apache.hadoop.hbase.wal.WALKey) Entry(org.apache.hadoop.hbase.wal.WAL.Entry) WALEdit(org.apache.hadoop.hbase.regionserver.wal.WALEdit) ArrayList(java.util.ArrayList) List(java.util.List) UUID(java.util.UUID) Cell(org.apache.hadoop.hbase.Cell) Pair(org.apache.hadoop.hbase.util.Pair) WALProtos(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos) Map(java.util.Map) NavigableMap(java.util.NavigableMap)

Example 24 with WALEdit

use of org.apache.hadoop.hbase.regionserver.wal.WALEdit in project hbase by apache.

the class HRegion method reckonDeltas.

/**
   * Reckon the Cells to apply to WAL, memstore, and to return to the Client; these Sets are not
   * always the same dependent on whether to write WAL or if the amount to increment is zero (in
   * this case we write back nothing, just return latest Cell value to the client).
   *
   * @param results Fill in here what goes back to the Client if it is non-null (if null, client
   *  doesn't want results).
   * @param forMemStore Fill in here what to apply to the MemStore (by Store).
   * @return A WALEdit to apply to WAL or null if we are to skip the WAL.
   */
private WALEdit reckonDeltas(final Operation op, final Mutation mutation, final Durability effectiveDurability, final Map<Store, List<Cell>> forMemStore, final List<Cell> results) throws IOException {
    WALEdit walEdit = null;
    long now = EnvironmentEdgeManager.currentTime();
    final boolean writeToWAL = effectiveDurability != Durability.SKIP_WAL;
    // Process a Store/family at a time.
    for (Map.Entry<byte[], List<Cell>> entry : mutation.getFamilyCellMap().entrySet()) {
        final byte[] columnFamilyName = entry.getKey();
        List<Cell> deltas = entry.getValue();
        Store store = this.stores.get(columnFamilyName);
        // Reckon for the Store what to apply to WAL and MemStore.
        List<Cell> toApply = reckonDeltasByStore(store, op, mutation, effectiveDurability, now, deltas, results);
        if (!toApply.isEmpty()) {
            forMemStore.put(store, toApply);
            if (writeToWAL) {
                if (walEdit == null) {
                    walEdit = new WALEdit();
                }
                walEdit.getCells().addAll(toApply);
            }
        }
    }
    return walEdit;
}
Also used : WALEdit(org.apache.hadoop.hbase.regionserver.wal.WALEdit) ArrayList(java.util.ArrayList) AbstractList(java.util.AbstractList) List(java.util.List) Map(java.util.Map) TreeMap(java.util.TreeMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) NavigableMap(java.util.NavigableMap) HashMap(java.util.HashMap) ConcurrentMap(java.util.concurrent.ConcurrentMap) ConcurrentSkipListMap(java.util.concurrent.ConcurrentSkipListMap) Cell(org.apache.hadoop.hbase.Cell)

Example 25 with WALEdit

use of org.apache.hadoop.hbase.regionserver.wal.WALEdit in project hbase by apache.

the class TestHRegion method testOpenRegionWrittenToWALForLogReplay.

@Test
public void testOpenRegionWrittenToWALForLogReplay() throws Exception {
    // similar to the above test but with distributed log replay
    final ServerName serverName = ServerName.valueOf(name.getMethodName(), 100, 42);
    final RegionServerServices rss = spy(TEST_UTIL.createMockRegionServerService(serverName));
    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name.getMethodName()));
    htd.addFamily(new HColumnDescriptor(fam1));
    htd.addFamily(new HColumnDescriptor(fam2));
    HRegionInfo hri = new HRegionInfo(htd.getTableName(), HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY);
    // open the region w/o rss and wal and flush some files
    HRegion region = HBaseTestingUtility.createRegionAndWAL(hri, TEST_UTIL.getDataTestDir(), TEST_UTIL.getConfiguration(), htd);
    assertNotNull(region);
    // create a file in fam1 for the region before opening in OpenRegionHandler
    region.put(new Put(Bytes.toBytes("a")).addColumn(fam1, fam1, fam1));
    region.flush(true);
    HBaseTestingUtility.closeRegionAndWAL(region);
    ArgumentCaptor<WALEdit> editCaptor = ArgumentCaptor.forClass(WALEdit.class);
    // capture append() calls
    WAL wal = mockWAL();
    when(rss.getWAL((HRegionInfo) any())).thenReturn(wal);
    // add the region to recovering regions
    HashMap<String, Region> recoveringRegions = Maps.newHashMap();
    recoveringRegions.put(region.getRegionInfo().getEncodedName(), null);
    when(rss.getRecoveringRegions()).thenReturn(recoveringRegions);
    try {
        Configuration conf = new Configuration(TEST_UTIL.getConfiguration());
        conf.set(HConstants.REGION_IMPL, HRegionWithSeqId.class.getName());
        region = HRegion.openHRegion(hri, htd, rss.getWAL(hri), conf, rss, null);
        // verify that we have not appended region open event to WAL because this region is still
        // recovering
        verify(wal, times(0)).append((HRegionInfo) any(), (WALKey) any(), editCaptor.capture(), anyBoolean());
        // not put the region out of recovering state
        new FinishRegionRecoveringHandler(rss, region.getRegionInfo().getEncodedName(), "/foo").prepare().process();
        // now we should have put the entry
        verify(wal, times(1)).append((HRegionInfo) any(), (WALKey) any(), editCaptor.capture(), anyBoolean());
        WALEdit edit = editCaptor.getValue();
        assertNotNull(edit);
        assertNotNull(edit.getCells());
        assertEquals(1, edit.getCells().size());
        RegionEventDescriptor desc = WALEdit.getRegionEventDescriptor(edit.getCells().get(0));
        assertNotNull(desc);
        LOG.info("RegionEventDescriptor from WAL: " + desc);
        assertEquals(RegionEventDescriptor.EventType.REGION_OPEN, desc.getEventType());
        assertTrue(Bytes.equals(desc.getTableName().toByteArray(), htd.getName()));
        assertTrue(Bytes.equals(desc.getEncodedRegionName().toByteArray(), hri.getEncodedNameAsBytes()));
        assertTrue(desc.getLogSequenceNumber() > 0);
        assertEquals(serverName, ProtobufUtil.toServerName(desc.getServer()));
        assertEquals(2, desc.getStoresCount());
        StoreDescriptor store = desc.getStores(0);
        assertTrue(Bytes.equals(store.getFamilyName().toByteArray(), fam1));
        assertEquals(store.getStoreHomeDir(), Bytes.toString(fam1));
        // 1store file
        assertEquals(1, store.getStoreFileCount());
        // ensure path is relative
        assertFalse(store.getStoreFile(0).contains("/"));
        store = desc.getStores(1);
        assertTrue(Bytes.equals(store.getFamilyName().toByteArray(), fam2));
        assertEquals(store.getStoreHomeDir(), Bytes.toString(fam2));
        // no store files
        assertEquals(0, store.getStoreFileCount());
    } finally {
        HBaseTestingUtility.closeRegionAndWAL(region);
    }
}
Also used : WAL(org.apache.hadoop.hbase.wal.WAL) MetricsWAL(org.apache.hadoop.hbase.regionserver.wal.MetricsWAL) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) ByteString(org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) Put(org.apache.hadoop.hbase.client.Put) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) StoreDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.StoreDescriptor) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) WALEdit(org.apache.hadoop.hbase.regionserver.wal.WALEdit) FinishRegionRecoveringHandler(org.apache.hadoop.hbase.regionserver.handler.FinishRegionRecoveringHandler) ServerName(org.apache.hadoop.hbase.ServerName) RegionEventDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor) Test(org.junit.Test)

Aggregations

WALEdit (org.apache.hadoop.hbase.regionserver.wal.WALEdit)29 WALKey (org.apache.hadoop.hbase.wal.WALKey)13 Cell (org.apache.hadoop.hbase.Cell)10 WAL (org.apache.hadoop.hbase.wal.WAL)9 KeyValue (org.apache.hadoop.hbase.KeyValue)8 Test (org.junit.Test)8 ArrayList (java.util.ArrayList)7 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)7 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)7 Put (org.apache.hadoop.hbase.client.Put)5 IOException (java.io.IOException)4 List (java.util.List)4 TreeMap (java.util.TreeMap)4 AtomicLong (java.util.concurrent.atomic.AtomicLong)4 Path (org.apache.hadoop.fs.Path)4 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)4 TableName (org.apache.hadoop.hbase.TableName)4 Mutation (org.apache.hadoop.hbase.client.Mutation)4 Entry (org.apache.hadoop.hbase.wal.WAL.Entry)4 HashMap (java.util.HashMap)3