use of org.apache.hadoop.hbase.wal.WALProvider.Writer in project hbase by apache.
the class TestWALSplit method testSplitWillFailIfWritingToRegionFails.
@Test(timeout = 300000, expected = IOException.class)
public void testSplitWillFailIfWritingToRegionFails() throws Exception {
//leave 5th log open so we could append the "trap"
Writer writer = generateWALs(4);
useDifferentDFSClient();
String region = "break";
Path regiondir = new Path(TABLEDIR, region);
fs.mkdirs(regiondir);
InstrumentedLogWriter.activateFailure = false;
appendEntry(writer, TABLE_NAME, Bytes.toBytes(region), ("r" + 999).getBytes(), FAMILY, QUALIFIER, VALUE, 0);
writer.close();
try {
InstrumentedLogWriter.activateFailure = true;
WALSplitter.split(HBASEDIR, WALDIR, OLDLOGDIR, fs, conf, wals);
} catch (IOException e) {
assertTrue(e.getMessage().contains("This exception is instrumented and should only be thrown for testing"));
throw e;
} finally {
InstrumentedLogWriter.activateFailure = false;
}
}
use of org.apache.hadoop.hbase.wal.WALProvider.Writer in project hbase by apache.
the class TestWALSplit method generateWALs.
/**
* @param leaveOpen index to leave un-closed. -1 to close all.
* @return the writer that's still open, or null if all were closed.
*/
private Writer generateWALs(int writers, int entries, int leaveOpen, int regionEvents) throws IOException {
makeRegionDirs(REGIONS);
fs.mkdirs(WALDIR);
Writer[] ws = new Writer[writers];
int seq = 0;
int numRegionEventsAdded = 0;
for (int i = 0; i < writers; i++) {
ws[i] = wals.createWALWriter(fs, new Path(WALDIR, WAL_FILE_PREFIX + i));
for (int j = 0; j < entries; j++) {
int prefix = 0;
for (String region : REGIONS) {
String row_key = region + prefix++ + i + j;
appendEntry(ws[i], TABLE_NAME, region.getBytes(), row_key.getBytes(), FAMILY, QUALIFIER, VALUE, seq++);
if (numRegionEventsAdded < regionEvents) {
numRegionEventsAdded++;
appendRegionEvent(ws[i], region);
}
}
}
if (i != leaveOpen) {
ws[i].close();
LOG.info("Closing writer " + i);
}
}
if (leaveOpen < 0 || leaveOpen >= writers) {
return null;
}
return ws[leaveOpen];
}
use of org.apache.hadoop.hbase.wal.WALProvider.Writer in project hbase by apache.
the class TestWALSplit method testSplitLeavesCompactionEventsEdits.
@Test(timeout = 300000)
public void testSplitLeavesCompactionEventsEdits() throws IOException {
HRegionInfo hri = new HRegionInfo(TABLE_NAME);
REGIONS.clear();
REGIONS.add(hri.getEncodedName());
Path regionDir = new Path(FSUtils.getTableDir(HBASEDIR, TABLE_NAME), hri.getEncodedName());
LOG.info("Creating region directory: " + regionDir);
assertTrue(fs.mkdirs(regionDir));
Writer writer = generateWALs(1, 10, 0, 10);
String[] compactInputs = new String[] { "file1", "file2", "file3" };
String compactOutput = "file4";
appendCompactionEvent(writer, hri, compactInputs, compactOutput);
writer.close();
useDifferentDFSClient();
WALSplitter.split(HBASEDIR, WALDIR, OLDLOGDIR, fs, conf, wals);
Path originalLog = (fs.listStatus(OLDLOGDIR))[0].getPath();
// original log should have 10 test edits, 10 region markers, 1 compaction marker
assertEquals(21, countWAL(originalLog));
Path[] splitLog = getLogForRegion(HBASEDIR, TABLE_NAME, hri.getEncodedName());
assertEquals(1, splitLog.length);
assertFalse("edits differ after split", logsAreEqual(originalLog, splitLog[0]));
// split log should have 10 test edits plus 1 compaction marker
assertEquals(11, countWAL(splitLog[0]));
}
use of org.apache.hadoop.hbase.wal.WALProvider.Writer in project hbase by apache.
the class TestWALSplit method testIOEOnOutputThread.
@Test(timeout = 300000)
public void testIOEOnOutputThread() throws Exception {
conf.setBoolean(HBASE_SKIP_ERRORS, false);
generateWALs(-1);
useDifferentDFSClient();
FileStatus[] logfiles = fs.listStatus(WALDIR);
assertTrue("There should be some log file", logfiles != null && logfiles.length > 0);
// wals with no entries (like the one we don't use in the factory)
// won't cause a failure since nothing will ever be written.
// pick the largest one since it's most likely to have entries.
int largestLogFile = 0;
long largestSize = 0;
for (int i = 0; i < logfiles.length; i++) {
if (logfiles[i].getLen() > largestSize) {
largestLogFile = i;
largestSize = logfiles[i].getLen();
}
}
assertTrue("There should be some log greater than size 0.", 0 < largestSize);
// Set up a splitter that will throw an IOE on the output side
WALSplitter logSplitter = new WALSplitter(wals, conf, HBASEDIR, fs, null, null, this.mode) {
@Override
protected Writer createWriter(Path logfile) throws IOException {
Writer mockWriter = Mockito.mock(Writer.class);
Mockito.doThrow(new IOException("Injected")).when(mockWriter).append(Mockito.<Entry>any());
return mockWriter;
}
};
// Set up a background thread dumper. Needs a thread to depend on and then we need to run
// the thread dumping in a background thread so it does not hold up the test.
final AtomicBoolean stop = new AtomicBoolean(false);
final Thread someOldThread = new Thread("Some-old-thread") {
@Override
public void run() {
while (!stop.get()) Threads.sleep(10);
}
};
someOldThread.setDaemon(true);
someOldThread.start();
final Thread t = new Thread("Background-thread-dumper") {
public void run() {
try {
Threads.threadDumpingIsAlive(someOldThread);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
};
t.setDaemon(true);
t.start();
try {
logSplitter.splitLogFile(logfiles[largestLogFile], null);
fail("Didn't throw!");
} catch (IOException ioe) {
assertTrue(ioe.toString().contains("Injected"));
} finally {
// Setting this to true will turn off the background thread dumper.
stop.set(true);
}
}
use of org.apache.hadoop.hbase.wal.WALProvider.Writer in project hbase by apache.
the class TestWALSplit method testConcurrentSplitLogAndReplayRecoverEdit.
/**
* @throws IOException
* @see https://issues.apache.org/jira/browse/HBASE-4862
*/
@Test(timeout = 300000)
public void testConcurrentSplitLogAndReplayRecoverEdit() throws IOException {
LOG.info("testConcurrentSplitLogAndReplayRecoverEdit");
// Generate wals for our destination region
String regionName = "r0";
final Path regiondir = new Path(TABLEDIR, regionName);
REGIONS.clear();
REGIONS.add(regionName);
generateWALs(-1);
wals.getWAL(Bytes.toBytes(regionName), null);
FileStatus[] logfiles = fs.listStatus(WALDIR);
assertTrue("There should be some log file", logfiles != null && logfiles.length > 0);
WALSplitter logSplitter = new WALSplitter(wals, conf, HBASEDIR, fs, null, null, this.mode) {
@Override
protected Writer createWriter(Path logfile) throws IOException {
Writer writer = wals.createRecoveredEditsWriter(this.fs, logfile);
// After creating writer, simulate region's
// replayRecoveredEditsIfAny() which gets SplitEditFiles of this
// region and delete them, excluding files with '.temp' suffix.
NavigableSet<Path> files = WALSplitter.getSplitEditFilesSorted(fs, regiondir);
if (files != null && !files.isEmpty()) {
for (Path file : files) {
if (!this.fs.delete(file, false)) {
LOG.error("Failed delete of " + file);
} else {
LOG.debug("Deleted recovered.edits file=" + file);
}
}
}
return writer;
}
};
try {
logSplitter.splitLogFile(logfiles[0], null);
} catch (IOException e) {
LOG.info(e);
fail("Throws IOException when spliting " + "log, it is most likely because writing file does not " + "exist which is caused by concurrent replayRecoveredEditsIfAny()");
}
if (fs.exists(CORRUPTDIR)) {
if (fs.listStatus(CORRUPTDIR).length > 0) {
fail("There are some corrupt logs, " + "it is most likely caused by concurrent replayRecoveredEditsIfAny()");
}
}
}
Aggregations