Search in sources :

Example 16 with WALEdit

use of org.apache.hadoop.hbase.regionserver.wal.WALEdit in project hbase by apache.

the class TestReplicationSmallTests method testCompactionWALEdits.

/**
   * Test for HBASE-9038, Replication.scopeWALEdits would NPE if it wasn't filtering out
   * the compaction WALEdit
   * @throws Exception
   */
@Test(timeout = 300000)
public void testCompactionWALEdits() throws Exception {
    WALProtos.CompactionDescriptor compactionDescriptor = WALProtos.CompactionDescriptor.getDefaultInstance();
    HRegionInfo hri = new HRegionInfo(htable1.getName(), HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
    WALEdit edit = WALEdit.createCompaction(hri, compactionDescriptor);
    Replication.scopeWALEdits(new WALKey(), edit, htable1.getConfiguration(), null);
}
Also used : HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) WALKey(org.apache.hadoop.hbase.wal.WALKey) WALEdit(org.apache.hadoop.hbase.regionserver.wal.WALEdit) WALProtos(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos) Test(org.junit.Test)

Example 17 with WALEdit

use of org.apache.hadoop.hbase.regionserver.wal.WALEdit in project hbase by apache.

the class TestRegionReplicaReplicationEndpoint method testRegionReplicaReplicationIgnoresDisabledTables.

public void testRegionReplicaReplicationIgnoresDisabledTables(boolean dropTable) throws Exception {
    // tests having edits from a disabled or dropped table is handled correctly by skipping those
    // entries and further edits after the edits from dropped/disabled table can be replicated
    // without problems.
    final TableName tableName = TableName.valueOf(name.getMethodName() + dropTable);
    HTableDescriptor htd = HTU.createTableDescriptor(tableName);
    int regionReplication = 3;
    htd.setRegionReplication(regionReplication);
    HTU.deleteTableIfAny(tableName);
    HTU.getAdmin().createTable(htd);
    TableName toBeDisabledTable = TableName.valueOf(dropTable ? "droppedTable" : "disabledTable");
    HTU.deleteTableIfAny(toBeDisabledTable);
    htd = HTU.createTableDescriptor(toBeDisabledTable.toString());
    htd.setRegionReplication(regionReplication);
    HTU.getAdmin().createTable(htd);
    // both tables are created, now pause replication
    ReplicationAdmin admin = new ReplicationAdmin(HTU.getConfiguration());
    admin.disablePeer(ServerRegionReplicaUtil.getReplicationPeerId());
    // now that the replication is disabled, write to the table to be dropped, then drop the table.
    Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration());
    Table table = connection.getTable(tableName);
    Table tableToBeDisabled = connection.getTable(toBeDisabledTable);
    HTU.loadNumericRows(tableToBeDisabled, HBaseTestingUtility.fam1, 6000, 7000);
    AtomicLong skippedEdits = new AtomicLong();
    RegionReplicaReplicationEndpoint.RegionReplicaOutputSink sink = mock(RegionReplicaReplicationEndpoint.RegionReplicaOutputSink.class);
    when(sink.getSkippedEditsCounter()).thenReturn(skippedEdits);
    RegionReplicaReplicationEndpoint.RegionReplicaSinkWriter sinkWriter = new RegionReplicaReplicationEndpoint.RegionReplicaSinkWriter(sink, (ClusterConnection) connection, Executors.newSingleThreadExecutor(), Integer.MAX_VALUE);
    RegionLocator rl = connection.getRegionLocator(toBeDisabledTable);
    HRegionLocation hrl = rl.getRegionLocation(HConstants.EMPTY_BYTE_ARRAY);
    byte[] encodedRegionName = hrl.getRegionInfo().getEncodedNameAsBytes();
    Entry entry = new Entry(new WALKey(encodedRegionName, toBeDisabledTable, 1), new WALEdit());
    // disable the table
    HTU.getAdmin().disableTable(toBeDisabledTable);
    if (dropTable) {
        HTU.getAdmin().deleteTable(toBeDisabledTable);
    }
    sinkWriter.append(toBeDisabledTable, encodedRegionName, HConstants.EMPTY_BYTE_ARRAY, Lists.newArrayList(entry, entry));
    assertEquals(2, skippedEdits.get());
    try {
        // load some data to the to-be-dropped table
        // load the data to the table
        HTU.loadNumericRows(table, HBaseTestingUtility.fam1, 0, 1000);
        // now enable the replication
        admin.enablePeer(ServerRegionReplicaUtil.getReplicationPeerId());
        verifyReplication(tableName, regionReplication, 0, 1000);
    } finally {
        admin.close();
        table.close();
        rl.close();
        tableToBeDisabled.close();
        HTU.deleteTableIfAny(toBeDisabledTable);
        connection.close();
    }
}
Also used : RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) Table(org.apache.hadoop.hbase.client.Table) ReplicationAdmin(org.apache.hadoop.hbase.client.replication.ReplicationAdmin) ClusterConnection(org.apache.hadoop.hbase.client.ClusterConnection) Connection(org.apache.hadoop.hbase.client.Connection) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) WALKey(org.apache.hadoop.hbase.wal.WALKey) TableName(org.apache.hadoop.hbase.TableName) AtomicLong(java.util.concurrent.atomic.AtomicLong) Entry(org.apache.hadoop.hbase.wal.WAL.Entry) HRegionLocation(org.apache.hadoop.hbase.HRegionLocation) WALEdit(org.apache.hadoop.hbase.regionserver.wal.WALEdit)

Example 18 with WALEdit

use of org.apache.hadoop.hbase.regionserver.wal.WALEdit in project hbase by apache.

the class MultiRowMutationProcessor method preBatchMutate.

@Override
public void preBatchMutate(HRegion region, WALEdit walEdit) throws IOException {
    // TODO we should return back the status of this hook run to HRegion so that those Mutations
    // with OperationStatus as SUCCESS or FAILURE should not get applied to memstore.
    RegionCoprocessorHost coprocessorHost = region.getCoprocessorHost();
    OperationStatus[] opStatus = new OperationStatus[mutations.size()];
    Arrays.fill(opStatus, OperationStatus.NOT_RUN);
    WALEdit[] walEditsFromCP = new WALEdit[mutations.size()];
    if (coprocessorHost != null) {
        miniBatch = new MiniBatchOperationInProgress<>(mutations.toArray(new Mutation[mutations.size()]), opStatus, walEditsFromCP, 0, mutations.size());
        coprocessorHost.preBatchMutate(miniBatch);
    }
    // Apply edits to a single WALEdit
    for (int i = 0; i < mutations.size(); i++) {
        if (opStatus[i] == OperationStatus.NOT_RUN) {
            // itself. No need to apply again to region
            if (walEditsFromCP[i] != null) {
                // Add the WALEdit created by CP hook
                for (Cell walCell : walEditsFromCP[i].getCells()) {
                    walEdit.add(walCell);
                }
            }
        }
    }
}
Also used : WALEdit(org.apache.hadoop.hbase.regionserver.wal.WALEdit) Cell(org.apache.hadoop.hbase.Cell)

Example 19 with WALEdit

use of org.apache.hadoop.hbase.regionserver.wal.WALEdit in project hbase by apache.

the class TestWALObserver method verifyWritesSeen.

private void verifyWritesSeen(final WAL log, final SampleRegionWALObserver cp, final boolean seesLegacy) throws Exception {
    HRegionInfo hri = createBasic3FamilyHRegionInfo(Bytes.toString(TEST_TABLE));
    final HTableDescriptor htd = createBasic3FamilyHTD(Bytes.toString(TEST_TABLE));
    NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
    for (byte[] fam : htd.getFamiliesKeys()) {
        scopes.put(fam, 0);
    }
    Path basedir = new Path(this.hbaseRootDir, Bytes.toString(TEST_TABLE));
    deleteDir(basedir);
    fs.mkdirs(new Path(basedir, hri.getEncodedName()));
    // TEST_FAMILY[0] shall be removed from WALEdit.
    // TEST_FAMILY[1] value shall be changed.
    // TEST_FAMILY[2] shall be added to WALEdit, although it's not in the put.
    cp.setTestValues(TEST_TABLE, TEST_ROW, TEST_FAMILY[0], TEST_QUALIFIER[0], TEST_FAMILY[1], TEST_QUALIFIER[1], TEST_FAMILY[2], TEST_QUALIFIER[2]);
    assertFalse(cp.isPreWALWriteCalled());
    assertFalse(cp.isPostWALWriteCalled());
    // TEST_FAMILY[2] is not in the put, however it shall be added by the tested
    // coprocessor.
    // Use a Put to create familyMap.
    Put p = creatPutWith2Families(TEST_ROW);
    Map<byte[], List<Cell>> familyMap = p.getFamilyCellMap();
    WALEdit edit = new WALEdit();
    addFamilyMapToWALEdit(familyMap, edit);
    boolean foundFamily0 = false;
    boolean foundFamily2 = false;
    boolean modifiedFamily1 = false;
    List<Cell> cells = edit.getCells();
    for (Cell cell : cells) {
        if (Arrays.equals(CellUtil.cloneFamily(cell), TEST_FAMILY[0])) {
            foundFamily0 = true;
        }
        if (Arrays.equals(CellUtil.cloneFamily(cell), TEST_FAMILY[2])) {
            foundFamily2 = true;
        }
        if (Arrays.equals(CellUtil.cloneFamily(cell), TEST_FAMILY[1])) {
            if (!Arrays.equals(CellUtil.cloneValue(cell), TEST_VALUE[1])) {
                modifiedFamily1 = true;
            }
        }
    }
    assertTrue(foundFamily0);
    assertFalse(foundFamily2);
    assertFalse(modifiedFamily1);
    // it's where WAL write cp should occur.
    long now = EnvironmentEdgeManager.currentTime();
    // we use HLogKey here instead of WALKey directly to support legacy coprocessors.
    long txid = log.append(hri, new WALKey(hri.getEncodedNameAsBytes(), hri.getTable(), now, new MultiVersionConcurrencyControl(), scopes), edit, true);
    log.sync(txid);
    // the edit shall have been change now by the coprocessor.
    foundFamily0 = false;
    foundFamily2 = false;
    modifiedFamily1 = false;
    for (Cell cell : cells) {
        if (Arrays.equals(CellUtil.cloneFamily(cell), TEST_FAMILY[0])) {
            foundFamily0 = true;
        }
        if (Arrays.equals(CellUtil.cloneFamily(cell), TEST_FAMILY[2])) {
            foundFamily2 = true;
        }
        if (Arrays.equals(CellUtil.cloneFamily(cell), TEST_FAMILY[1])) {
            if (!Arrays.equals(CellUtil.cloneValue(cell), TEST_VALUE[1])) {
                modifiedFamily1 = true;
            }
        }
    }
    assertFalse(foundFamily0);
    assertTrue(foundFamily2);
    assertTrue(modifiedFamily1);
    assertTrue(cp.isPreWALWriteCalled());
    assertTrue(cp.isPostWALWriteCalled());
}
Also used : Path(org.apache.hadoop.fs.Path) MultiVersionConcurrencyControl(org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl) TreeMap(java.util.TreeMap) Put(org.apache.hadoop.hbase.client.Put) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) WALKey(org.apache.hadoop.hbase.wal.WALKey) WALEdit(org.apache.hadoop.hbase.regionserver.wal.WALEdit) List(java.util.List) Cell(org.apache.hadoop.hbase.Cell)

Example 20 with WALEdit

use of org.apache.hadoop.hbase.regionserver.wal.WALEdit in project hbase by apache.

the class TestDistributedLogSplitting method makeWAL.

public void makeWAL(HRegionServer hrs, List<HRegionInfo> regions, String tname, String fname, int num_edits, int edit_size, boolean cleanShutdown) throws IOException {
    TableName fullTName = TableName.valueOf(tname);
    // remove root and meta region
    regions.remove(HRegionInfo.FIRST_META_REGIONINFO);
    // using one sequenceId for edits across all regions is ok.
    final AtomicLong sequenceId = new AtomicLong(10);
    for (Iterator<HRegionInfo> iter = regions.iterator(); iter.hasNext(); ) {
        HRegionInfo regionInfo = iter.next();
        if (regionInfo.getTable().isSystemTable()) {
            iter.remove();
        }
    }
    HTableDescriptor htd = new HTableDescriptor(fullTName);
    byte[] family = Bytes.toBytes(fname);
    htd.addFamily(new HColumnDescriptor(family));
    byte[] value = new byte[edit_size];
    List<HRegionInfo> hris = new ArrayList<>();
    for (HRegionInfo region : regions) {
        if (!region.getTable().getNameAsString().equalsIgnoreCase(tname)) {
            continue;
        }
        hris.add(region);
    }
    LOG.info("Creating wal edits across " + hris.size() + " regions.");
    for (int i = 0; i < edit_size; i++) {
        value[i] = (byte) ('a' + (i % 26));
    }
    int n = hris.size();
    int[] counts = new int[n];
    // sync every ~30k to line up with desired wal rolls
    final int syncEvery = 30 * 1024 / edit_size;
    if (n > 0) {
        for (int i = 0; i < num_edits; i += 1) {
            WALEdit e = new WALEdit();
            HRegionInfo curRegionInfo = hris.get(i % n);
            final WAL log = hrs.getWAL(curRegionInfo);
            byte[] startRow = curRegionInfo.getStartKey();
            if (startRow == null || startRow.length == 0) {
                startRow = new byte[] { 0, 0, 0, 0, 1 };
            }
            byte[] row = Bytes.incrementBytes(startRow, counts[i % n]);
            // use last 5 bytes because
            row = Arrays.copyOfRange(row, 3, 8);
            // HBaseTestingUtility.createMultiRegions use 5 bytes
            // key
            byte[] qualifier = Bytes.toBytes("c" + Integer.toString(i));
            e.add(new KeyValue(row, family, qualifier, System.currentTimeMillis(), value));
            log.append(curRegionInfo, new WALKey(curRegionInfo.getEncodedNameAsBytes(), fullTName, System.currentTimeMillis()), e, true);
            if (0 == i % syncEvery) {
                log.sync();
            }
            counts[i % n] += 1;
        }
    }
    // will cause errors if done after.
    for (HRegionInfo info : hris) {
        final WAL log = hrs.getWAL(info);
        log.sync();
    }
    if (cleanShutdown) {
        for (HRegionInfo info : hris) {
            final WAL log = hrs.getWAL(info);
            log.shutdown();
        }
    }
    for (int i = 0; i < n; i++) {
        LOG.info("region " + hris.get(i).getRegionNameAsString() + " has " + counts[i] + " edits");
    }
    return;
}
Also used : WAL(org.apache.hadoop.hbase.wal.WAL) KeyValue(org.apache.hadoop.hbase.KeyValue) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) ArrayList(java.util.ArrayList) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) WALKey(org.apache.hadoop.hbase.wal.WALKey) TableName(org.apache.hadoop.hbase.TableName) AtomicLong(java.util.concurrent.atomic.AtomicLong) WALEdit(org.apache.hadoop.hbase.regionserver.wal.WALEdit)

Aggregations

WALEdit (org.apache.hadoop.hbase.regionserver.wal.WALEdit)29 WALKey (org.apache.hadoop.hbase.wal.WALKey)13 Cell (org.apache.hadoop.hbase.Cell)10 WAL (org.apache.hadoop.hbase.wal.WAL)9 KeyValue (org.apache.hadoop.hbase.KeyValue)8 Test (org.junit.Test)8 ArrayList (java.util.ArrayList)7 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)7 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)7 Put (org.apache.hadoop.hbase.client.Put)5 IOException (java.io.IOException)4 List (java.util.List)4 TreeMap (java.util.TreeMap)4 AtomicLong (java.util.concurrent.atomic.AtomicLong)4 Path (org.apache.hadoop.fs.Path)4 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)4 TableName (org.apache.hadoop.hbase.TableName)4 Mutation (org.apache.hadoop.hbase.client.Mutation)4 Entry (org.apache.hadoop.hbase.wal.WAL.Entry)4 HashMap (java.util.HashMap)3