use of org.apache.hadoop.hdfs.DFSInputStream in project hbase by apache.
the class AbstractTestWALReplay method testDatalossWhenInputError.
/**
* testcase for https://issues.apache.org/jira/browse/HBASE-15252
*/
@Test
public void testDatalossWhenInputError() throws Exception {
final TableName tableName = TableName.valueOf("testDatalossWhenInputError");
final RegionInfo hri = createBasic3FamilyHRegionInfo(tableName);
final Path basedir = CommonFSUtils.getWALTableDir(conf, tableName);
deleteDir(basedir);
final byte[] rowName = tableName.getName();
final int countPerFamily = 10;
final TableDescriptor htd = createBasic1FamilyHTD(tableName);
HRegion region1 = HBaseTestingUtil.createRegionAndWAL(hri, hbaseRootDir, this.conf, htd);
Path regionDir = region1.getWALRegionDir();
HBaseTestingUtil.closeRegionAndWAL(region1);
WAL wal = createWAL(this.conf, hbaseRootDir, logName);
HRegion region = HRegion.openHRegion(this.conf, this.fs, hbaseRootDir, hri, htd, wal);
for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) {
addRegionEdits(rowName, hcd.getName(), countPerFamily, this.ee, region, "x");
}
// Now assert edits made it in.
final Get g = new Get(rowName);
Result result = region.get(g);
assertEquals(countPerFamily * htd.getColumnFamilies().length, result.size());
// Now close the region (without flush), split the log, reopen the region and assert that
// replay of log has the correct effect.
region.close(true);
wal.shutdown();
runWALSplit(this.conf);
// here we let the DFSInputStream throw an IOException just after the WALHeader.
Path editFile = WALSplitUtil.getSplitEditFilesSorted(this.fs, regionDir).first();
FSDataInputStream stream = fs.open(editFile);
stream.seek(ProtobufLogReader.PB_WAL_MAGIC.length);
Class<? extends AbstractFSWALProvider.Reader> logReaderClass = conf.getClass("hbase.regionserver.hlog.reader.impl", ProtobufLogReader.class, AbstractFSWALProvider.Reader.class);
AbstractFSWALProvider.Reader reader = logReaderClass.getDeclaredConstructor().newInstance();
reader.init(this.fs, editFile, conf, stream);
final long headerLength = stream.getPos();
reader.close();
FileSystem spyFs = spy(this.fs);
doAnswer(new Answer<FSDataInputStream>() {
@Override
public FSDataInputStream answer(InvocationOnMock invocation) throws Throwable {
FSDataInputStream stream = (FSDataInputStream) invocation.callRealMethod();
Field field = FilterInputStream.class.getDeclaredField("in");
field.setAccessible(true);
final DFSInputStream in = (DFSInputStream) field.get(stream);
DFSInputStream spyIn = spy(in);
doAnswer(new Answer<Integer>() {
private long pos;
@Override
public Integer answer(InvocationOnMock invocation) throws Throwable {
if (pos >= headerLength) {
throw new IOException("read over limit");
}
int b = (Integer) invocation.callRealMethod();
if (b > 0) {
pos += b;
}
return b;
}
}).when(spyIn).read(any(byte[].class), anyInt(), anyInt());
doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
invocation.callRealMethod();
in.close();
return null;
}
}).when(spyIn).close();
field.set(stream, spyIn);
return stream;
}
}).when(spyFs).open(eq(editFile));
WAL wal2 = createWAL(this.conf, hbaseRootDir, logName);
HRegion region2;
try {
// log replay should fail due to the IOException, otherwise we may lose data.
region2 = HRegion.openHRegion(conf, spyFs, hbaseRootDir, hri, htd, wal2);
assertEquals(result.size(), region2.get(g).size());
} catch (IOException e) {
assertEquals("read over limit", e.getMessage());
}
region2 = HRegion.openHRegion(conf, fs, hbaseRootDir, hri, htd, wal2);
assertEquals(result.size(), region2.get(g).size());
}
Aggregations