Search in sources :

Example 1 with WALEntry

use of org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WALEntry in project hbase by apache.

the class RSRpcServices method replay.

/**
 * Replay the given changes when distributedLogReplay WAL edits from a failed RS. The guarantee is
 * that the given mutations will be durable on the receiving RS if this method returns without any
 * exception.
 * @param controller the RPC controller
 * @param request the request
 * @deprecated Since 3.0.0, will be removed in 4.0.0. Not used any more, put here only for
 *             compatibility with old region replica implementation. Now we will use
 *             {@code replicateToReplica} method instead.
 */
@Deprecated
@Override
@QosPriority(priority = HConstants.REPLAY_QOS)
public ReplicateWALEntryResponse replay(final RpcController controller, final ReplicateWALEntryRequest request) throws ServiceException {
    long before = EnvironmentEdgeManager.currentTime();
    CellScanner cells = getAndReset(controller);
    try {
        checkOpen();
        List<WALEntry> entries = request.getEntryList();
        if (entries == null || entries.isEmpty()) {
            // empty input
            return ReplicateWALEntryResponse.newBuilder().build();
        }
        ByteString regionName = entries.get(0).getKey().getEncodedRegionName();
        HRegion region = server.getRegionByEncodedName(regionName.toStringUtf8());
        RegionCoprocessorHost coprocessorHost = ServerRegionReplicaUtil.isDefaultReplica(region.getRegionInfo()) ? region.getCoprocessorHost() : // do not invoke coprocessors if this is a secondary region replica
        null;
        List<Pair<WALKey, WALEdit>> walEntries = new ArrayList<>();
        // Skip adding the edits to WAL if this is a secondary region replica
        boolean isPrimary = RegionReplicaUtil.isDefaultReplica(region.getRegionInfo());
        Durability durability = isPrimary ? Durability.USE_DEFAULT : Durability.SKIP_WAL;
        for (WALEntry entry : entries) {
            if (!regionName.equals(entry.getKey().getEncodedRegionName())) {
                throw new NotServingRegionException("Replay request contains entries from multiple " + "regions. First region:" + regionName.toStringUtf8() + " , other region:" + entry.getKey().getEncodedRegionName());
            }
            if (server.nonceManager != null && isPrimary) {
                long nonceGroup = entry.getKey().hasNonceGroup() ? entry.getKey().getNonceGroup() : HConstants.NO_NONCE;
                long nonce = entry.getKey().hasNonce() ? entry.getKey().getNonce() : HConstants.NO_NONCE;
                server.nonceManager.reportOperationFromWal(nonceGroup, nonce, entry.getKey().getWriteTime());
            }
            Pair<WALKey, WALEdit> walEntry = (coprocessorHost == null) ? null : new Pair<>();
            List<MutationReplay> edits = WALSplitUtil.getMutationsFromWALEntry(entry, cells, walEntry, durability);
            if (coprocessorHost != null) {
                // KeyValue.
                if (coprocessorHost.preWALRestore(region.getRegionInfo(), walEntry.getFirst(), walEntry.getSecond())) {
                    // if bypass this log entry, ignore it ...
                    continue;
                }
                walEntries.add(walEntry);
            }
            if (edits != null && !edits.isEmpty()) {
                // HBASE-17924
                // sort to improve lock efficiency
                Collections.sort(edits, (v1, v2) -> Row.COMPARATOR.compare(v1.mutation, v2.mutation));
                long replaySeqId = (entry.getKey().hasOrigSequenceNumber()) ? entry.getKey().getOrigSequenceNumber() : entry.getKey().getLogSequenceNumber();
                OperationStatus[] result = doReplayBatchOp(region, edits, replaySeqId);
                // check if it's a partial success
                for (int i = 0; result != null && i < result.length; i++) {
                    if (result[i] != OperationStatus.SUCCESS) {
                        throw new IOException(result[i].getExceptionMsg());
                    }
                }
            }
        }
        // sync wal at the end because ASYNC_WAL is used above
        WAL wal = region.getWAL();
        if (wal != null) {
            wal.sync();
        }
        if (coprocessorHost != null) {
            for (Pair<WALKey, WALEdit> entry : walEntries) {
                coprocessorHost.postWALRestore(region.getRegionInfo(), entry.getFirst(), entry.getSecond());
            }
        }
        return ReplicateWALEntryResponse.newBuilder().build();
    } catch (IOException ie) {
        throw new ServiceException(ie);
    } finally {
        final MetricsRegionServer metricsRegionServer = server.getMetrics();
        if (metricsRegionServer != null) {
            metricsRegionServer.updateReplay(EnvironmentEdgeManager.currentTime() - before);
        }
    }
}
Also used : WAL(org.apache.hadoop.hbase.wal.WAL) ByteString(org.apache.hbase.thirdparty.com.google.protobuf.ByteString) ArrayList(java.util.ArrayList) MutationReplay(org.apache.hadoop.hbase.wal.WALSplitUtil.MutationReplay) CellScanner(org.apache.hadoop.hbase.CellScanner) WALKey(org.apache.hadoop.hbase.wal.WALKey) WALEdit(org.apache.hadoop.hbase.wal.WALEdit) Pair(org.apache.hadoop.hbase.util.Pair) NameInt64Pair(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameInt64Pair) NameBytesPair(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameBytesPair) NotServingRegionException(org.apache.hadoop.hbase.NotServingRegionException) Durability(org.apache.hadoop.hbase.client.Durability) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) UncheckedIOException(java.io.UncheckedIOException) ServiceException(org.apache.hbase.thirdparty.com.google.protobuf.ServiceException) WALEntry(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WALEntry) QosPriority(org.apache.hadoop.hbase.ipc.QosPriority)

Example 2 with WALEntry

use of org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WALEntry in project hbase by apache.

the class RSRpcServices method replicateToReplica.

/**
 * Replay the given changes on a secondary replica
 */
@Override
public ReplicateWALEntryResponse replicateToReplica(RpcController controller, ReplicateWALEntryRequest request) throws ServiceException {
    CellScanner cells = getAndReset(controller);
    try {
        checkOpen();
        List<WALEntry> entries = request.getEntryList();
        if (entries == null || entries.isEmpty()) {
            // empty input
            return ReplicateWALEntryResponse.newBuilder().build();
        }
        ByteString regionName = entries.get(0).getKey().getEncodedRegionName();
        HRegion region = server.getRegionByEncodedName(regionName.toStringUtf8());
        if (RegionReplicaUtil.isDefaultReplica(region.getRegionInfo())) {
            throw new DoNotRetryIOException("Should not replicate to primary replica " + region.getRegionInfo() + ", CODE BUG?");
        }
        for (WALEntry entry : entries) {
            if (!regionName.equals(entry.getKey().getEncodedRegionName())) {
                throw new NotServingRegionException("ReplicateToReplica request contains entries from multiple " + "regions. First region:" + regionName.toStringUtf8() + " , other region:" + entry.getKey().getEncodedRegionName());
            }
            region.replayWALEntry(entry, cells);
        }
        return ReplicateWALEntryResponse.newBuilder().build();
    } catch (IOException ie) {
        throw new ServiceException(ie);
    }
}
Also used : ServiceException(org.apache.hbase.thirdparty.com.google.protobuf.ServiceException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) NotServingRegionException(org.apache.hadoop.hbase.NotServingRegionException) ByteString(org.apache.hbase.thirdparty.com.google.protobuf.ByteString) WALEntry(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WALEntry) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) UncheckedIOException(java.io.UncheckedIOException) CellScanner(org.apache.hadoop.hbase.CellScanner)

Example 3 with WALEntry

use of org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WALEntry in project hbase by apache.

the class TestReplicationSink method testApplyDeleteBeforePut.

/**
 * Puts are buffered, but this tests when a delete (not-buffered) is applied
 * before the actual Put that creates it.
 * @throws Exception
 */
@Test
public void testApplyDeleteBeforePut() throws Exception {
    List<WALEntry> entries = new ArrayList<>(5);
    List<Cell> cells = new ArrayList<>();
    for (int i = 0; i < 2; i++) {
        entries.add(createEntry(TABLE_NAME1, i, KeyValue.Type.Put, cells));
    }
    entries.add(createEntry(TABLE_NAME1, 1, KeyValue.Type.DeleteFamily, cells));
    for (int i = 3; i < 5; i++) {
        entries.add(createEntry(TABLE_NAME1, i, KeyValue.Type.Put, cells));
    }
    SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator()), replicationClusterId, baseNamespaceDir, hfileArchiveDir);
    Get get = new Get(Bytes.toBytes(1));
    Result res = table1.get(get);
    assertEquals(0, res.size());
}
Also used : Get(org.apache.hadoop.hbase.client.Get) ArrayList(java.util.ArrayList) WALEntry(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WALEntry) Cell(org.apache.hadoop.hbase.Cell) Result(org.apache.hadoop.hbase.client.Result) Test(org.junit.Test)

Example 4 with WALEntry

use of org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WALEntry in project hbase by apache.

the class TestReplicationSink method testReplicateEntriesForHFiles.

/**
 * Test replicateEntries with a bulk load entry for 25 HFiles
 */
@Test
public void testReplicateEntriesForHFiles() throws Exception {
    Path dir = TEST_UTIL.getDataTestDirOnTestFS("testReplicateEntries");
    Path familyDir = new Path(dir, Bytes.toString(FAM_NAME1));
    int numRows = 10;
    List<Path> p = new ArrayList<>(1);
    final String hfilePrefix = "hfile-";
    // 1. Generate 25 hfile ranges
    Random rng = new SecureRandom();
    Set<Integer> numbers = new HashSet<>();
    while (numbers.size() < 50) {
        numbers.add(rng.nextInt(1000));
    }
    List<Integer> numberList = new ArrayList<>(numbers);
    Collections.sort(numberList);
    Map<String, Long> storeFilesSize = new HashMap<>(1);
    // 2. Create 25 hfiles
    Configuration conf = TEST_UTIL.getConfiguration();
    FileSystem fs = dir.getFileSystem(conf);
    Iterator<Integer> numbersItr = numberList.iterator();
    for (int i = 0; i < 25; i++) {
        Path hfilePath = new Path(familyDir, hfilePrefix + i);
        HFileTestUtil.createHFile(conf, fs, hfilePath, FAM_NAME1, FAM_NAME1, Bytes.toBytes(numbersItr.next()), Bytes.toBytes(numbersItr.next()), numRows);
        p.add(hfilePath);
        storeFilesSize.put(hfilePath.getName(), fs.getFileStatus(hfilePath).getLen());
    }
    // 3. Create a BulkLoadDescriptor and a WALEdit
    Map<byte[], List<Path>> storeFiles = new HashMap<>(1);
    storeFiles.put(FAM_NAME1, p);
    org.apache.hadoop.hbase.wal.WALEdit edit = null;
    WALProtos.BulkLoadDescriptor loadDescriptor = null;
    try (Connection c = ConnectionFactory.createConnection(conf);
        RegionLocator l = c.getRegionLocator(TABLE_NAME1)) {
        RegionInfo regionInfo = l.getAllRegionLocations().get(0).getRegion();
        loadDescriptor = ProtobufUtil.toBulkLoadDescriptor(TABLE_NAME1, UnsafeByteOperations.unsafeWrap(regionInfo.getEncodedNameAsBytes()), storeFiles, storeFilesSize, 1);
        edit = org.apache.hadoop.hbase.wal.WALEdit.createBulkLoadEvent(regionInfo, loadDescriptor);
    }
    List<WALEntry> entries = new ArrayList<>(1);
    // 4. Create a WALEntryBuilder
    WALEntry.Builder builder = createWALEntryBuilder(TABLE_NAME1);
    // 5. Copy the hfile to the path as it is in reality
    for (int i = 0; i < 25; i++) {
        String pathToHfileFromNS = new StringBuilder(100).append(TABLE_NAME1.getNamespaceAsString()).append(Path.SEPARATOR).append(Bytes.toString(TABLE_NAME1.getName())).append(Path.SEPARATOR).append(Bytes.toString(loadDescriptor.getEncodedRegionName().toByteArray())).append(Path.SEPARATOR).append(Bytes.toString(FAM_NAME1)).append(Path.SEPARATOR).append(hfilePrefix + i).toString();
        String dst = baseNamespaceDir + Path.SEPARATOR + pathToHfileFromNS;
        Path dstPath = new Path(dst);
        FileUtil.copy(fs, p.get(0), fs, dstPath, false, conf);
    }
    entries.add(builder.build());
    try (ResultScanner scanner = table1.getScanner(new Scan())) {
        // 6. Assert no existing data in table
        assertEquals(0, scanner.next(numRows).length);
    }
    // 7. Replicate the bulk loaded entry
    SINK.replicateEntries(entries, CellUtil.createCellScanner(edit.getCells().iterator()), replicationClusterId, baseNamespaceDir, hfileArchiveDir);
    try (ResultScanner scanner = table1.getScanner(new Scan())) {
        // 8. Assert data is replicated
        assertEquals(numRows, scanner.next(numRows).length);
    }
// Clean up the created hfiles or it will mess up subsequent tests
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) Random(java.util.Random) SecureRandom(java.security.SecureRandom) FileSystem(org.apache.hadoop.fs.FileSystem) List(java.util.List) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) Path(org.apache.hadoop.fs.Path) RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) Connection(org.apache.hadoop.hbase.client.Connection) SecureRandom(java.security.SecureRandom) WALProtos(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos) Scan(org.apache.hadoop.hbase.client.Scan) WALEntry(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WALEntry) Test(org.junit.Test)

Example 5 with WALEntry

use of org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WALEntry in project hbase by apache.

the class TestReplicationSink method testBatchSink.

/**
 * Insert a whole batch of entries
 * @throws Exception
 */
@Test
public void testBatchSink() throws Exception {
    List<WALEntry> entries = new ArrayList<>(BATCH_SIZE);
    List<Cell> cells = new ArrayList<>();
    for (int i = 0; i < BATCH_SIZE; i++) {
        entries.add(createEntry(TABLE_NAME1, i, KeyValue.Type.Put, cells));
    }
    SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator()), replicationClusterId, baseNamespaceDir, hfileArchiveDir);
    Scan scan = new Scan();
    ResultScanner scanRes = table1.getScanner(scan);
    assertEquals(BATCH_SIZE, scanRes.next(BATCH_SIZE).length);
}
Also used : ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) ArrayList(java.util.ArrayList) Scan(org.apache.hadoop.hbase.client.Scan) WALEntry(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WALEntry) Cell(org.apache.hadoop.hbase.Cell) Test(org.junit.Test)

Aggregations

WALEntry (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WALEntry)15 ArrayList (java.util.ArrayList)11 Cell (org.apache.hadoop.hbase.Cell)9 Test (org.junit.Test)8 Scan (org.apache.hadoop.hbase.client.Scan)7 ResultScanner (org.apache.hadoop.hbase.client.ResultScanner)6 IOException (java.io.IOException)5 CellScanner (org.apache.hadoop.hbase.CellScanner)5 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)4 UncheckedIOException (java.io.UncheckedIOException)3 HashMap (java.util.HashMap)3 List (java.util.List)3 Result (org.apache.hadoop.hbase.client.Result)3 HashSet (java.util.HashSet)2 Map (java.util.Map)2 TreeMap (java.util.TreeMap)2 UUID (java.util.UUID)2 Configuration (org.apache.hadoop.conf.Configuration)2 HBaseIOException (org.apache.hadoop.hbase.HBaseIOException)2 KeyValue (org.apache.hadoop.hbase.KeyValue)2