Search in sources :

Example 16 with DFSInputStream

use of org.apache.hadoop.hdfs.DFSInputStream in project hbase by apache.

the class AbstractTestWALReplay method testDatalossWhenInputError.

/**
 * testcase for https://issues.apache.org/jira/browse/HBASE-15252
 */
@Test
public void testDatalossWhenInputError() throws Exception {
    final TableName tableName = TableName.valueOf("testDatalossWhenInputError");
    final RegionInfo hri = createBasic3FamilyHRegionInfo(tableName);
    final Path basedir = CommonFSUtils.getWALTableDir(conf, tableName);
    deleteDir(basedir);
    final byte[] rowName = tableName.getName();
    final int countPerFamily = 10;
    final TableDescriptor htd = createBasic1FamilyHTD(tableName);
    HRegion region1 = HBaseTestingUtil.createRegionAndWAL(hri, hbaseRootDir, this.conf, htd);
    Path regionDir = region1.getWALRegionDir();
    HBaseTestingUtil.closeRegionAndWAL(region1);
    WAL wal = createWAL(this.conf, hbaseRootDir, logName);
    HRegion region = HRegion.openHRegion(this.conf, this.fs, hbaseRootDir, hri, htd, wal);
    for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) {
        addRegionEdits(rowName, hcd.getName(), countPerFamily, this.ee, region, "x");
    }
    // Now assert edits made it in.
    final Get g = new Get(rowName);
    Result result = region.get(g);
    assertEquals(countPerFamily * htd.getColumnFamilies().length, result.size());
    // Now close the region (without flush), split the log, reopen the region and assert that
    // replay of log has the correct effect.
    region.close(true);
    wal.shutdown();
    runWALSplit(this.conf);
    // here we let the DFSInputStream throw an IOException just after the WALHeader.
    Path editFile = WALSplitUtil.getSplitEditFilesSorted(this.fs, regionDir).first();
    FSDataInputStream stream = fs.open(editFile);
    stream.seek(ProtobufLogReader.PB_WAL_MAGIC.length);
    Class<? extends AbstractFSWALProvider.Reader> logReaderClass = conf.getClass("hbase.regionserver.hlog.reader.impl", ProtobufLogReader.class, AbstractFSWALProvider.Reader.class);
    AbstractFSWALProvider.Reader reader = logReaderClass.getDeclaredConstructor().newInstance();
    reader.init(this.fs, editFile, conf, stream);
    final long headerLength = stream.getPos();
    reader.close();
    FileSystem spyFs = spy(this.fs);
    doAnswer(new Answer<FSDataInputStream>() {

        @Override
        public FSDataInputStream answer(InvocationOnMock invocation) throws Throwable {
            FSDataInputStream stream = (FSDataInputStream) invocation.callRealMethod();
            Field field = FilterInputStream.class.getDeclaredField("in");
            field.setAccessible(true);
            final DFSInputStream in = (DFSInputStream) field.get(stream);
            DFSInputStream spyIn = spy(in);
            doAnswer(new Answer<Integer>() {

                private long pos;

                @Override
                public Integer answer(InvocationOnMock invocation) throws Throwable {
                    if (pos >= headerLength) {
                        throw new IOException("read over limit");
                    }
                    int b = (Integer) invocation.callRealMethod();
                    if (b > 0) {
                        pos += b;
                    }
                    return b;
                }
            }).when(spyIn).read(any(byte[].class), anyInt(), anyInt());
            doAnswer(new Answer<Void>() {

                @Override
                public Void answer(InvocationOnMock invocation) throws Throwable {
                    invocation.callRealMethod();
                    in.close();
                    return null;
                }
            }).when(spyIn).close();
            field.set(stream, spyIn);
            return stream;
        }
    }).when(spyFs).open(eq(editFile));
    WAL wal2 = createWAL(this.conf, hbaseRootDir, logName);
    HRegion region2;
    try {
        // log replay should fail due to the IOException, otherwise we may lose data.
        region2 = HRegion.openHRegion(conf, spyFs, hbaseRootDir, hri, htd, wal2);
        assertEquals(result.size(), region2.get(g).size());
    } catch (IOException e) {
        assertEquals("read over limit", e.getMessage());
    }
    region2 = HRegion.openHRegion(conf, fs, hbaseRootDir, hri, htd, wal2);
    assertEquals(result.size(), region2.get(g).size());
}
Also used : WAL(org.apache.hadoop.hbase.wal.WAL) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) ColumnFamilyDescriptor(org.apache.hadoop.hbase.client.ColumnFamilyDescriptor) Result(org.apache.hadoop.hbase.client.Result) Field(java.lang.reflect.Field) FileSystem(org.apache.hadoop.fs.FileSystem) DFSInputStream(org.apache.hadoop.hdfs.DFSInputStream) Path(org.apache.hadoop.fs.Path) FilterInputStream(java.io.FilterInputStream) AbstractFSWALProvider(org.apache.hadoop.hbase.wal.AbstractFSWALProvider) IOException(java.io.IOException) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TableName(org.apache.hadoop.hbase.TableName) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) Mockito.doAnswer(org.mockito.Mockito.doAnswer) Answer(org.mockito.stubbing.Answer) InvocationOnMock(org.mockito.invocation.InvocationOnMock) Get(org.apache.hadoop.hbase.client.Get) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) Test(org.junit.Test)

Aggregations

DFSInputStream (org.apache.hadoop.hdfs.DFSInputStream)16 IOException (java.io.IOException)7 Test (org.junit.Test)6 Path (org.apache.hadoop.fs.Path)4 CompressionFileState (org.smartdata.model.CompressionFileState)4 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)3 ActionException (org.smartdata.action.ActionException)3 SmartDFSClient (org.smartdata.hdfs.client.SmartDFSClient)3 FileState (org.smartdata.model.FileState)3 Gson (com.google.gson.Gson)2 OutputStream (java.io.OutputStream)2 DFSStripedInputStream (org.apache.hadoop.hdfs.DFSStripedInputStream)2 SmartConf (org.smartdata.conf.SmartConf)2 CompactFileState (org.smartdata.model.CompactFileState)2 NormalFileState (org.smartdata.model.NormalFileState)2 CmdletManager (org.smartdata.server.engine.CmdletManager)2 CacheLoader (com.google.common.cache.CacheLoader)1 TypeToken (com.google.gson.reflect.TypeToken)1 FilterInputStream (java.io.FilterInputStream)1 Field (java.lang.reflect.Field)1