Search in sources :

Example 6 with WALKey

use of org.apache.hadoop.hbase.wal.WALKey in project hbase by apache.

the class TestWALObserver method addWALEdits.

private void addWALEdits(final TableName tableName, final HRegionInfo hri, final byte[] rowName, final byte[] family, final int count, EnvironmentEdge ee, final WAL wal, final NavigableMap<byte[], Integer> scopes, final MultiVersionConcurrencyControl mvcc) throws IOException {
    String familyStr = Bytes.toString(family);
    long txid = -1;
    for (int j = 0; j < count; j++) {
        byte[] qualifierBytes = Bytes.toBytes(Integer.toString(j));
        byte[] columnBytes = Bytes.toBytes(familyStr + ":" + Integer.toString(j));
        WALEdit edit = new WALEdit();
        edit.add(new KeyValue(rowName, family, qualifierBytes, ee.currentTime(), columnBytes));
        // uses WALKey instead of HLogKey on purpose. will only work for tests where we don't care
        // about legacy coprocessors
        txid = wal.append(hri, new WALKey(hri.getEncodedNameAsBytes(), tableName, ee.currentTime(), mvcc), edit, true);
    }
    if (-1 != txid) {
        wal.sync(txid);
    }
}
Also used : WALKey(org.apache.hadoop.hbase.wal.WALKey) KeyValue(org.apache.hadoop.hbase.KeyValue) WALEdit(org.apache.hadoop.hbase.regionserver.wal.WALEdit)

Example 7 with WALKey

use of org.apache.hadoop.hbase.wal.WALKey in project hbase by apache.

the class TestBulkLoad method shouldBulkLoadManyFamilyHLog.

@Test
public void shouldBulkLoadManyFamilyHLog() throws IOException {
    when(log.append(any(HRegionInfo.class), any(WALKey.class), argThat(bulkLogWalEditType(WALEdit.BULK_LOAD)), any(boolean.class))).thenAnswer(new Answer() {

        public Object answer(InvocationOnMock invocation) {
            WALKey walKey = invocation.getArgumentAt(1, WALKey.class);
            MultiVersionConcurrencyControl mvcc = walKey.getMvcc();
            if (mvcc != null) {
                MultiVersionConcurrencyControl.WriteEntry we = mvcc.begin();
                walKey.setWriteEntry(we);
            }
            return 01L;
        }

        ;
    });
    testRegionWithFamilies(family1, family2).bulkLoadHFiles(withFamilyPathsFor(family1, family2), false, null);
    verify(log).sync(anyLong());
}
Also used : HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) WALKey(org.apache.hadoop.hbase.wal.WALKey) Answer(org.mockito.stubbing.Answer) InvocationOnMock(org.mockito.invocation.InvocationOnMock) Test(org.junit.Test)

Example 8 with WALKey

use of org.apache.hadoop.hbase.wal.WALKey in project hbase by apache.

the class TestHRegion method testRecoveredEditsReplayCompaction.

public void testRecoveredEditsReplayCompaction(boolean mismatchedRegionName) throws Exception {
    byte[] family = Bytes.toBytes("family");
    this.region = initHRegion(tableName, method, CONF, family);
    final WALFactory wals = new WALFactory(CONF, null, method);
    try {
        Path regiondir = region.getRegionFileSystem().getRegionDir();
        FileSystem fs = region.getRegionFileSystem().getFileSystem();
        byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes();
        long maxSeqId = 3;
        long minSeqId = 0;
        for (long i = minSeqId; i < maxSeqId; i++) {
            Put put = new Put(Bytes.toBytes(i));
            put.addColumn(family, Bytes.toBytes(i), Bytes.toBytes(i));
            region.put(put);
            region.flush(true);
        }
        // this will create a region with 3 files
        assertEquals(3, region.getStore(family).getStorefilesCount());
        List<Path> storeFiles = new ArrayList<>(3);
        for (StoreFile sf : region.getStore(family).getStorefiles()) {
            storeFiles.add(sf.getPath());
        }
        // disable compaction completion
        CONF.setBoolean("hbase.hstore.compaction.complete", false);
        region.compactStores();
        // ensure that nothing changed
        assertEquals(3, region.getStore(family).getStorefilesCount());
        // now find the compacted file, and manually add it to the recovered edits
        Path tmpDir = new Path(region.getRegionFileSystem().getTempDir(), Bytes.toString(family));
        FileStatus[] files = FSUtils.listStatus(fs, tmpDir);
        String errorMsg = "Expected to find 1 file in the region temp directory " + "from the compaction, could not find any";
        assertNotNull(errorMsg, files);
        assertEquals(errorMsg, 1, files.length);
        // move the file inside region dir
        Path newFile = region.getRegionFileSystem().commitStoreFile(Bytes.toString(family), files[0].getPath());
        byte[] encodedNameAsBytes = this.region.getRegionInfo().getEncodedNameAsBytes();
        byte[] fakeEncodedNameAsBytes = new byte[encodedNameAsBytes.length];
        for (int i = 0; i < encodedNameAsBytes.length; i++) {
            // Mix the byte array to have a new encodedName
            fakeEncodedNameAsBytes[i] = (byte) (encodedNameAsBytes[i] + 1);
        }
        CompactionDescriptor compactionDescriptor = ProtobufUtil.toCompactionDescriptor(this.region.getRegionInfo(), mismatchedRegionName ? fakeEncodedNameAsBytes : null, family, storeFiles, Lists.newArrayList(newFile), region.getRegionFileSystem().getStoreDir(Bytes.toString(family)));
        WALUtil.writeCompactionMarker(region.getWAL(), this.region.getReplicationScope(), this.region.getRegionInfo(), compactionDescriptor, region.getMVCC());
        Path recoveredEditsDir = WALSplitter.getRegionDirRecoveredEditsDir(regiondir);
        Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", 1000));
        fs.create(recoveredEdits);
        WALProvider.Writer writer = wals.createRecoveredEditsWriter(fs, recoveredEdits);
        long time = System.nanoTime();
        writer.append(new WAL.Entry(new WALKey(regionName, tableName, 10, time, HConstants.DEFAULT_CLUSTER_ID), WALEdit.createCompaction(region.getRegionInfo(), compactionDescriptor)));
        writer.close();
        // close the region now, and reopen again
        region.getTableDesc();
        region.getRegionInfo();
        region.close();
        try {
            region = HRegion.openHRegion(region, null);
        } catch (WrongRegionException wre) {
            fail("Matching encoded region name should not have produced WrongRegionException");
        }
        // now check whether we have only one store file, the compacted one
        Collection<StoreFile> sfs = region.getStore(family).getStorefiles();
        for (StoreFile sf : sfs) {
            LOG.info(sf.getPath());
        }
        if (!mismatchedRegionName) {
            assertEquals(1, region.getStore(family).getStorefilesCount());
        }
        files = FSUtils.listStatus(fs, tmpDir);
        assertTrue("Expected to find 0 files inside " + tmpDir, files == null || files.length == 0);
        for (long i = minSeqId; i < maxSeqId; i++) {
            Get get = new Get(Bytes.toBytes(i));
            Result result = region.get(get);
            byte[] value = result.getValue(family, Bytes.toBytes(i));
            assertArrayEquals(Bytes.toBytes(i), value);
        }
    } finally {
        HBaseTestingUtility.closeRegionAndWAL(this.region);
        this.region = null;
        wals.close();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) WAL(org.apache.hadoop.hbase.wal.WAL) MetricsWAL(org.apache.hadoop.hbase.regionserver.wal.MetricsWAL) ArrayList(java.util.ArrayList) ByteString(org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) Put(org.apache.hadoop.hbase.client.Put) Result(org.apache.hadoop.hbase.client.Result) WALKey(org.apache.hadoop.hbase.wal.WALKey) FileSystem(org.apache.hadoop.fs.FileSystem) FaultyFileSystem(org.apache.hadoop.hbase.regionserver.TestStore.FaultyFileSystem) Writer(org.apache.hadoop.hbase.wal.WALProvider.Writer) Get(org.apache.hadoop.hbase.client.Get) WALFactory(org.apache.hadoop.hbase.wal.WALFactory) CompactionDescriptor(org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor) WALProvider(org.apache.hadoop.hbase.wal.WALProvider) AbstractFSWALProvider(org.apache.hadoop.hbase.wal.AbstractFSWALProvider)

Example 9 with WALKey

use of org.apache.hadoop.hbase.wal.WALKey in project hbase by apache.

the class TestHRegion method testSkipRecoveredEditsReplay.

@Test
public void testSkipRecoveredEditsReplay() throws Exception {
    byte[] family = Bytes.toBytes("family");
    this.region = initHRegion(tableName, method, CONF, family);
    final WALFactory wals = new WALFactory(CONF, null, method);
    try {
        Path regiondir = region.getRegionFileSystem().getRegionDir();
        FileSystem fs = region.getRegionFileSystem().getFileSystem();
        byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes();
        Path recoveredEditsDir = WALSplitter.getRegionDirRecoveredEditsDir(regiondir);
        long maxSeqId = 1050;
        long minSeqId = 1000;
        for (long i = minSeqId; i <= maxSeqId; i += 10) {
            Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", i));
            fs.create(recoveredEdits);
            WALProvider.Writer writer = wals.createRecoveredEditsWriter(fs, recoveredEdits);
            long time = System.nanoTime();
            WALEdit edit = new WALEdit();
            edit.add(new KeyValue(row, family, Bytes.toBytes(i), time, KeyValue.Type.Put, Bytes.toBytes(i)));
            writer.append(new WAL.Entry(new WALKey(regionName, tableName, i, time, HConstants.DEFAULT_CLUSTER_ID), edit));
            writer.close();
        }
        MonitoredTask status = TaskMonitor.get().createStatus(method);
        Map<byte[], Long> maxSeqIdInStores = new TreeMap<>(Bytes.BYTES_COMPARATOR);
        for (Store store : region.getStores()) {
            maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(), minSeqId - 1);
        }
        long seqId = region.replayRecoveredEditsIfAny(regiondir, maxSeqIdInStores, null, status);
        assertEquals(maxSeqId, seqId);
        region.getMVCC().advanceTo(seqId);
        Get get = new Get(row);
        Result result = region.get(get);
        for (long i = minSeqId; i <= maxSeqId; i += 10) {
            List<Cell> kvs = result.getColumnCells(family, Bytes.toBytes(i));
            assertEquals(1, kvs.size());
            assertArrayEquals(Bytes.toBytes(i), CellUtil.cloneValue(kvs.get(0)));
        }
    } finally {
        HBaseTestingUtility.closeRegionAndWAL(this.region);
        this.region = null;
        wals.close();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) KeyValue(org.apache.hadoop.hbase.KeyValue) WAL(org.apache.hadoop.hbase.wal.WAL) MetricsWAL(org.apache.hadoop.hbase.regionserver.wal.MetricsWAL) TreeMap(java.util.TreeMap) Result(org.apache.hadoop.hbase.client.Result) WALKey(org.apache.hadoop.hbase.wal.WALKey) WALEdit(org.apache.hadoop.hbase.regionserver.wal.WALEdit) FileSystem(org.apache.hadoop.fs.FileSystem) FaultyFileSystem(org.apache.hadoop.hbase.regionserver.TestStore.FaultyFileSystem) Writer(org.apache.hadoop.hbase.wal.WALProvider.Writer) Get(org.apache.hadoop.hbase.client.Get) Matchers.anyLong(org.mockito.Matchers.anyLong) WALFactory(org.apache.hadoop.hbase.wal.WALFactory) Cell(org.apache.hadoop.hbase.Cell) WALProvider(org.apache.hadoop.hbase.wal.WALProvider) AbstractFSWALProvider(org.apache.hadoop.hbase.wal.AbstractFSWALProvider) MonitoredTask(org.apache.hadoop.hbase.monitoring.MonitoredTask) Test(org.junit.Test)

Example 10 with WALKey

use of org.apache.hadoop.hbase.wal.WALKey in project hbase by apache.

the class TestRecoveredEdits method verifyAllEditsMadeItIn.

/**
   * @param fs
   * @param conf
   * @param edits
   * @param region
   * @return Return how many edits seen.
   * @throws IOException
   */
private int verifyAllEditsMadeItIn(final FileSystem fs, final Configuration conf, final Path edits, final HRegion region) throws IOException {
    int count = 0;
    // Based on HRegion#replayRecoveredEdits
    WAL.Reader reader = null;
    try {
        reader = WALFactory.createReader(fs, edits, conf);
        WAL.Entry entry;
        while ((entry = reader.next()) != null) {
            WALKey key = entry.getKey();
            WALEdit val = entry.getEdit();
            count++;
            // Check this edit is for this region.
            if (!Bytes.equals(key.getEncodedRegionName(), region.getRegionInfo().getEncodedNameAsBytes())) {
                continue;
            }
            Cell previous = null;
            for (Cell cell : val.getCells()) {
                if (CellUtil.matchingFamily(cell, WALEdit.METAFAMILY))
                    continue;
                if (previous != null && CellComparator.COMPARATOR.compareRows(previous, cell) == 0)
                    continue;
                previous = cell;
                Get g = new Get(CellUtil.cloneRow(cell));
                Result r = region.get(g);
                boolean found = false;
                for (CellScanner scanner = r.cellScanner(); scanner.advance(); ) {
                    Cell current = scanner.current();
                    if (CellComparator.COMPARATOR.compareKeyIgnoresMvcc(cell, current) == 0) {
                        found = true;
                        break;
                    }
                }
                assertTrue("Failed to find " + cell, found);
            }
        }
    } finally {
        if (reader != null)
            reader.close();
    }
    return count;
}
Also used : WALKey(org.apache.hadoop.hbase.wal.WALKey) WAL(org.apache.hadoop.hbase.wal.WAL) WALEdit(org.apache.hadoop.hbase.regionserver.wal.WALEdit) Get(org.apache.hadoop.hbase.client.Get) Cell(org.apache.hadoop.hbase.Cell) CellScanner(org.apache.hadoop.hbase.CellScanner) Result(org.apache.hadoop.hbase.client.Result)

Aggregations

WALKey (org.apache.hadoop.hbase.wal.WALKey)51 WALEdit (org.apache.hadoop.hbase.regionserver.wal.WALEdit)29 Test (org.junit.Test)26 WAL (org.apache.hadoop.hbase.wal.WAL)22 TreeMap (java.util.TreeMap)17 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)17 KeyValue (org.apache.hadoop.hbase.KeyValue)16 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)15 IOException (java.io.IOException)14 Path (org.apache.hadoop.fs.Path)14 TableName (org.apache.hadoop.hbase.TableName)12 ArrayList (java.util.ArrayList)10 Cell (org.apache.hadoop.hbase.Cell)10 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)10 FileSystem (org.apache.hadoop.fs.FileSystem)9 Get (org.apache.hadoop.hbase.client.Get)9 Result (org.apache.hadoop.hbase.client.Result)9 MultiVersionConcurrencyControl (org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl)8 WALFactory (org.apache.hadoop.hbase.wal.WALFactory)8 Put (org.apache.hadoop.hbase.client.Put)7