Search in sources :

Example 6 with Writer

use of org.apache.hadoop.hbase.wal.WALProvider.Writer in project hbase by apache.

the class TestWALSplit method testSplitWillFailIfWritingToRegionFails.

@Test(timeout = 300000, expected = IOException.class)
public void testSplitWillFailIfWritingToRegionFails() throws Exception {
    //leave 5th log open so we could append the "trap"
    Writer writer = generateWALs(4);
    useDifferentDFSClient();
    String region = "break";
    Path regiondir = new Path(TABLEDIR, region);
    fs.mkdirs(regiondir);
    InstrumentedLogWriter.activateFailure = false;
    appendEntry(writer, TABLE_NAME, Bytes.toBytes(region), ("r" + 999).getBytes(), FAMILY, QUALIFIER, VALUE, 0);
    writer.close();
    try {
        InstrumentedLogWriter.activateFailure = true;
        WALSplitter.split(HBASEDIR, WALDIR, OLDLOGDIR, fs, conf, wals);
    } catch (IOException e) {
        assertTrue(e.getMessage().contains("This exception is instrumented and should only be thrown for testing"));
        throw e;
    } finally {
        InstrumentedLogWriter.activateFailure = false;
    }
}
Also used : Path(org.apache.hadoop.fs.Path) ByteString(org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) IOException(java.io.IOException) Writer(org.apache.hadoop.hbase.wal.WALProvider.Writer) InstrumentedLogWriter(org.apache.hadoop.hbase.regionserver.wal.InstrumentedLogWriter) Test(org.junit.Test)

Example 7 with Writer

use of org.apache.hadoop.hbase.wal.WALProvider.Writer in project hbase by apache.

the class TestWALSplit method generateWALs.

/**
   * @param leaveOpen index to leave un-closed. -1 to close all.
   * @return the writer that's still open, or null if all were closed.
   */
private Writer generateWALs(int writers, int entries, int leaveOpen, int regionEvents) throws IOException {
    makeRegionDirs(REGIONS);
    fs.mkdirs(WALDIR);
    Writer[] ws = new Writer[writers];
    int seq = 0;
    int numRegionEventsAdded = 0;
    for (int i = 0; i < writers; i++) {
        ws[i] = wals.createWALWriter(fs, new Path(WALDIR, WAL_FILE_PREFIX + i));
        for (int j = 0; j < entries; j++) {
            int prefix = 0;
            for (String region : REGIONS) {
                String row_key = region + prefix++ + i + j;
                appendEntry(ws[i], TABLE_NAME, region.getBytes(), row_key.getBytes(), FAMILY, QUALIFIER, VALUE, seq++);
                if (numRegionEventsAdded < regionEvents) {
                    numRegionEventsAdded++;
                    appendRegionEvent(ws[i], region);
                }
            }
        }
        if (i != leaveOpen) {
            ws[i].close();
            LOG.info("Closing writer " + i);
        }
    }
    if (leaveOpen < 0 || leaveOpen >= writers) {
        return null;
    }
    return ws[leaveOpen];
}
Also used : Path(org.apache.hadoop.fs.Path) ByteString(org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) Writer(org.apache.hadoop.hbase.wal.WALProvider.Writer) InstrumentedLogWriter(org.apache.hadoop.hbase.regionserver.wal.InstrumentedLogWriter)

Example 8 with Writer

use of org.apache.hadoop.hbase.wal.WALProvider.Writer in project hbase by apache.

the class TestWALSplit method testSplitLeavesCompactionEventsEdits.

@Test(timeout = 300000)
public void testSplitLeavesCompactionEventsEdits() throws IOException {
    HRegionInfo hri = new HRegionInfo(TABLE_NAME);
    REGIONS.clear();
    REGIONS.add(hri.getEncodedName());
    Path regionDir = new Path(FSUtils.getTableDir(HBASEDIR, TABLE_NAME), hri.getEncodedName());
    LOG.info("Creating region directory: " + regionDir);
    assertTrue(fs.mkdirs(regionDir));
    Writer writer = generateWALs(1, 10, 0, 10);
    String[] compactInputs = new String[] { "file1", "file2", "file3" };
    String compactOutput = "file4";
    appendCompactionEvent(writer, hri, compactInputs, compactOutput);
    writer.close();
    useDifferentDFSClient();
    WALSplitter.split(HBASEDIR, WALDIR, OLDLOGDIR, fs, conf, wals);
    Path originalLog = (fs.listStatus(OLDLOGDIR))[0].getPath();
    // original log should have 10 test edits, 10 region markers, 1 compaction marker
    assertEquals(21, countWAL(originalLog));
    Path[] splitLog = getLogForRegion(HBASEDIR, TABLE_NAME, hri.getEncodedName());
    assertEquals(1, splitLog.length);
    assertFalse("edits differ after split", logsAreEqual(originalLog, splitLog[0]));
    // split log should have 10 test edits plus 1 compaction marker
    assertEquals(11, countWAL(splitLog[0]));
}
Also used : HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) Path(org.apache.hadoop.fs.Path) ByteString(org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) Writer(org.apache.hadoop.hbase.wal.WALProvider.Writer) InstrumentedLogWriter(org.apache.hadoop.hbase.regionserver.wal.InstrumentedLogWriter) Test(org.junit.Test)

Example 9 with Writer

use of org.apache.hadoop.hbase.wal.WALProvider.Writer in project hbase by apache.

the class TestWALSplit method testIOEOnOutputThread.

@Test(timeout = 300000)
public void testIOEOnOutputThread() throws Exception {
    conf.setBoolean(HBASE_SKIP_ERRORS, false);
    generateWALs(-1);
    useDifferentDFSClient();
    FileStatus[] logfiles = fs.listStatus(WALDIR);
    assertTrue("There should be some log file", logfiles != null && logfiles.length > 0);
    // wals with no entries (like the one we don't use in the factory)
    // won't cause a failure since nothing will ever be written.
    // pick the largest one since it's most likely to have entries.
    int largestLogFile = 0;
    long largestSize = 0;
    for (int i = 0; i < logfiles.length; i++) {
        if (logfiles[i].getLen() > largestSize) {
            largestLogFile = i;
            largestSize = logfiles[i].getLen();
        }
    }
    assertTrue("There should be some log greater than size 0.", 0 < largestSize);
    // Set up a splitter that will throw an IOE on the output side
    WALSplitter logSplitter = new WALSplitter(wals, conf, HBASEDIR, fs, null, null, this.mode) {

        @Override
        protected Writer createWriter(Path logfile) throws IOException {
            Writer mockWriter = Mockito.mock(Writer.class);
            Mockito.doThrow(new IOException("Injected")).when(mockWriter).append(Mockito.<Entry>any());
            return mockWriter;
        }
    };
    // Set up a background thread dumper.  Needs a thread to depend on and then we need to run
    // the thread dumping in a background thread so it does not hold up the test.
    final AtomicBoolean stop = new AtomicBoolean(false);
    final Thread someOldThread = new Thread("Some-old-thread") {

        @Override
        public void run() {
            while (!stop.get()) Threads.sleep(10);
        }
    };
    someOldThread.setDaemon(true);
    someOldThread.start();
    final Thread t = new Thread("Background-thread-dumper") {

        public void run() {
            try {
                Threads.threadDumpingIsAlive(someOldThread);
            } catch (InterruptedException e) {
                e.printStackTrace();
            }
        }
    };
    t.setDaemon(true);
    t.start();
    try {
        logSplitter.splitLogFile(logfiles[largestLogFile], null);
        fail("Didn't throw!");
    } catch (IOException ioe) {
        assertTrue(ioe.toString().contains("Injected"));
    } finally {
        // Setting this to true will turn off the background thread dumper.
        stop.set(true);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) FileStatus(org.apache.hadoop.fs.FileStatus) IOException(java.io.IOException) Writer(org.apache.hadoop.hbase.wal.WALProvider.Writer) InstrumentedLogWriter(org.apache.hadoop.hbase.regionserver.wal.InstrumentedLogWriter) Test(org.junit.Test)

Example 10 with Writer

use of org.apache.hadoop.hbase.wal.WALProvider.Writer in project hbase by apache.

the class TestWALSplit method testConcurrentSplitLogAndReplayRecoverEdit.

/**
   * @throws IOException
   * @see https://issues.apache.org/jira/browse/HBASE-4862
   */
@Test(timeout = 300000)
public void testConcurrentSplitLogAndReplayRecoverEdit() throws IOException {
    LOG.info("testConcurrentSplitLogAndReplayRecoverEdit");
    // Generate wals for our destination region
    String regionName = "r0";
    final Path regiondir = new Path(TABLEDIR, regionName);
    REGIONS.clear();
    REGIONS.add(regionName);
    generateWALs(-1);
    wals.getWAL(Bytes.toBytes(regionName), null);
    FileStatus[] logfiles = fs.listStatus(WALDIR);
    assertTrue("There should be some log file", logfiles != null && logfiles.length > 0);
    WALSplitter logSplitter = new WALSplitter(wals, conf, HBASEDIR, fs, null, null, this.mode) {

        @Override
        protected Writer createWriter(Path logfile) throws IOException {
            Writer writer = wals.createRecoveredEditsWriter(this.fs, logfile);
            // After creating writer, simulate region's
            // replayRecoveredEditsIfAny() which gets SplitEditFiles of this
            // region and delete them, excluding files with '.temp' suffix.
            NavigableSet<Path> files = WALSplitter.getSplitEditFilesSorted(fs, regiondir);
            if (files != null && !files.isEmpty()) {
                for (Path file : files) {
                    if (!this.fs.delete(file, false)) {
                        LOG.error("Failed delete of " + file);
                    } else {
                        LOG.debug("Deleted recovered.edits file=" + file);
                    }
                }
            }
            return writer;
        }
    };
    try {
        logSplitter.splitLogFile(logfiles[0], null);
    } catch (IOException e) {
        LOG.info(e);
        fail("Throws IOException when spliting " + "log, it is most likely because writing file does not " + "exist which is caused by concurrent replayRecoveredEditsIfAny()");
    }
    if (fs.exists(CORRUPTDIR)) {
        if (fs.listStatus(CORRUPTDIR).length > 0) {
            fail("There are some corrupt logs, " + "it is most likely caused by concurrent replayRecoveredEditsIfAny()");
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) ByteString(org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) IOException(java.io.IOException) Writer(org.apache.hadoop.hbase.wal.WALProvider.Writer) InstrumentedLogWriter(org.apache.hadoop.hbase.regionserver.wal.InstrumentedLogWriter) Test(org.junit.Test)

Aggregations

Path (org.apache.hadoop.fs.Path)15 Writer (org.apache.hadoop.hbase.wal.WALProvider.Writer)15 Test (org.junit.Test)12 FileSystem (org.apache.hadoop.fs.FileSystem)9 IOException (java.io.IOException)8 ByteString (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString)8 Configuration (org.apache.hadoop.conf.Configuration)6 Put (org.apache.hadoop.hbase.client.Put)6 FaultyFileSystem (org.apache.hadoop.hbase.regionserver.TestStore.FaultyFileSystem)6 InstrumentedLogWriter (org.apache.hadoop.hbase.regionserver.wal.InstrumentedLogWriter)6 WALEdit (org.apache.hadoop.hbase.regionserver.wal.WALEdit)6 WALKey (org.apache.hadoop.hbase.wal.WALKey)6 TreeMap (java.util.TreeMap)5 Cell (org.apache.hadoop.hbase.Cell)5 MetricsWAL (org.apache.hadoop.hbase.regionserver.wal.MetricsWAL)5 AbstractFSWALProvider (org.apache.hadoop.hbase.wal.AbstractFSWALProvider)5 WAL (org.apache.hadoop.hbase.wal.WAL)5 WALFactory (org.apache.hadoop.hbase.wal.WALFactory)5 WALProvider (org.apache.hadoop.hbase.wal.WALProvider)5 FSHLog (org.apache.hadoop.hbase.regionserver.wal.FSHLog)4