Search in sources :

Example 76 with CellScanner

use of org.apache.hadoop.hbase.CellScanner in project hbase by apache.

the class TestPutDeleteEtcCellIteration method testPutConcurrentModificationOnIteration.

@Test(expected = ConcurrentModificationException.class)
public void testPutConcurrentModificationOnIteration() throws IOException {
    Put p = new Put(ROW);
    for (int i = 0; i < COUNT; i++) {
        byte[] bytes = Bytes.toBytes(i);
        p.addColumn(bytes, bytes, TIMESTAMP, bytes);
    }
    int index = 0;
    int trigger = 3;
    for (CellScanner cellScanner = p.cellScanner(); cellScanner.advance(); ) {
        Cell cell = cellScanner.current();
        byte[] bytes = Bytes.toBytes(index++);
        // When we hit the trigger, try inserting a new KV; should trigger exception
        if (trigger == 3)
            p.addColumn(bytes, bytes, TIMESTAMP, bytes);
        cell.equals(new KeyValue(ROW, bytes, bytes, TIMESTAMP, bytes));
    }
    assertEquals(COUNT, index);
}
Also used : KeyValue(org.apache.hadoop.hbase.KeyValue) CellScanner(org.apache.hadoop.hbase.CellScanner) Cell(org.apache.hadoop.hbase.Cell) Test(org.junit.Test)

Example 77 with CellScanner

use of org.apache.hadoop.hbase.CellScanner in project hbase by apache.

the class TestPutDeleteEtcCellIteration method testDeleteIteration.

@Test
public void testDeleteIteration() throws IOException {
    Delete d = new Delete(ROW);
    for (int i = 0; i < COUNT; i++) {
        byte[] bytes = Bytes.toBytes(i);
        d.addColumn(bytes, bytes, TIMESTAMP);
    }
    int index = 0;
    for (CellScanner cellScanner = d.cellScanner(); cellScanner.advance(); ) {
        Cell cell = cellScanner.current();
        byte[] bytes = Bytes.toBytes(index++);
        cell.equals(new KeyValue(ROW, bytes, bytes, TIMESTAMP, KeyValue.Type.DeleteColumn));
    }
    assertEquals(COUNT, index);
}
Also used : KeyValue(org.apache.hadoop.hbase.KeyValue) CellScanner(org.apache.hadoop.hbase.CellScanner) Cell(org.apache.hadoop.hbase.Cell) Test(org.junit.Test)

Example 78 with CellScanner

use of org.apache.hadoop.hbase.CellScanner in project hbase by apache.

the class AbstractTestFSWAL method testFlushSequenceIdIsGreaterThanAllEditsInHFile.

/**
   * Test flush for sure has a sequence id that is beyond the last edit appended. We do this by
   * slowing appends in the background ring buffer thread while in foreground we call flush. The
   * addition of the sync over HRegion in flush should fix an issue where flush was returning before
   * all of its appends had made it out to the WAL (HBASE-11109).
   * @throws IOException
   * @see <a href="https://issues.apache.org/jira/browse/HBASE-11109">HBASE-11109</a>
   */
@Test
public void testFlushSequenceIdIsGreaterThanAllEditsInHFile() throws IOException {
    String testName = currentTest.getMethodName();
    final TableName tableName = TableName.valueOf(testName);
    final HRegionInfo hri = new HRegionInfo(tableName);
    final byte[] rowName = tableName.getName();
    final HTableDescriptor htd = new HTableDescriptor(tableName);
    htd.addFamily(new HColumnDescriptor("f"));
    HRegion r = HBaseTestingUtility.createRegionAndWAL(hri, TEST_UTIL.getDefaultRootDirPath(), TEST_UTIL.getConfiguration(), htd);
    HBaseTestingUtility.closeRegionAndWAL(r);
    final int countPerFamily = 10;
    final AtomicBoolean goslow = new AtomicBoolean(false);
    NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
    for (byte[] fam : htd.getFamiliesKeys()) {
        scopes.put(fam, 0);
    }
    // subclass and doctor a method.
    AbstractFSWAL<?> wal = newSlowWAL(FS, FSUtils.getWALRootDir(CONF), DIR.toString(), testName, CONF, null, true, null, null, new Runnable() {

        @Override
        public void run() {
            if (goslow.get()) {
                Threads.sleep(100);
                LOG.debug("Sleeping before appending 100ms");
            }
        }
    });
    HRegion region = HRegion.openHRegion(TEST_UTIL.getConfiguration(), TEST_UTIL.getTestFileSystem(), TEST_UTIL.getDefaultRootDirPath(), hri, htd, wal);
    EnvironmentEdge ee = EnvironmentEdgeManager.getDelegate();
    try {
        List<Put> puts = null;
        for (HColumnDescriptor hcd : htd.getFamilies()) {
            puts = TestWALReplay.addRegionEdits(rowName, hcd.getName(), countPerFamily, ee, region, "x");
        }
        // Now assert edits made it in.
        final Get g = new Get(rowName);
        Result result = region.get(g);
        assertEquals(countPerFamily * htd.getFamilies().size(), result.size());
        // Construct a WALEdit and add it a few times to the WAL.
        WALEdit edits = new WALEdit();
        for (Put p : puts) {
            CellScanner cs = p.cellScanner();
            while (cs.advance()) {
                edits.add(cs.current());
            }
        }
        // Add any old cluster id.
        List<UUID> clusterIds = new ArrayList<>(1);
        clusterIds.add(UUID.randomUUID());
        // Now make appends run slow.
        goslow.set(true);
        for (int i = 0; i < countPerFamily; i++) {
            final HRegionInfo info = region.getRegionInfo();
            final WALKey logkey = new WALKey(info.getEncodedNameAsBytes(), tableName, System.currentTimeMillis(), clusterIds, -1, -1, region.getMVCC(), scopes);
            wal.append(info, logkey, edits, true);
            region.getMVCC().completeAndWait(logkey.getWriteEntry());
        }
        region.flush(true);
        // FlushResult.flushSequenceId is not visible here so go get the current sequence id.
        long currentSequenceId = region.getReadPoint(null);
        // Now release the appends
        goslow.set(false);
        assertTrue(currentSequenceId >= region.getReadPoint(null));
    } finally {
        region.close(true);
        wal.close();
    }
}
Also used : ArrayList(java.util.ArrayList) CellScanner(org.apache.hadoop.hbase.CellScanner) Result(org.apache.hadoop.hbase.client.Result) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) WALKey(org.apache.hadoop.hbase.wal.WALKey) UUID(java.util.UUID) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) EnvironmentEdge(org.apache.hadoop.hbase.util.EnvironmentEdge) TreeMap(java.util.TreeMap) Put(org.apache.hadoop.hbase.client.Put) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) TableName(org.apache.hadoop.hbase.TableName) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Get(org.apache.hadoop.hbase.client.Get) Test(org.junit.Test)

Example 79 with CellScanner

use of org.apache.hadoop.hbase.CellScanner in project hbase by apache.

the class TestWALLockup method testLockup16960.

/**
   * Reproduce locking up that happens when there's no further syncs after
   * append fails, and causing an isolated sync then infinite wait. See
   * HBASE-16960. If below is broken, we will see this test timeout because it
   * is locked up.
   * <p/>
   * Steps for reproduce:<br/>
   * 1. Trigger server abort through dodgyWAL1<br/>
   * 2. Add a {@link DummyWALActionsListener} to dodgyWAL2 to cause ringbuffer
   * event handler thread sleep for a while thus keeping {@code endOfBatch}
   * false<br/>
   * 3. Publish a sync then an append which will throw exception, check whether
   * the sync could return
   */
@Test(timeout = 20000)
public void testLockup16960() throws IOException {
    // A WAL that we can have throw exceptions when a flag is set.
    class DodgyFSLog extends FSHLog {

        // Set this when want the WAL to start throwing exceptions.
        volatile boolean throwException = false;

        public DodgyFSLog(FileSystem fs, Path root, String logDir, Configuration conf) throws IOException {
            super(fs, root, logDir, conf);
        }

        @Override
        protected Writer createWriterInstance(Path path) throws IOException {
            final Writer w = super.createWriterInstance(path);
            return new Writer() {

                @Override
                public void close() throws IOException {
                    w.close();
                }

                @Override
                public void sync() throws IOException {
                    if (throwException) {
                        throw new IOException("FAKE! Failed to replace a bad datanode...SYNC");
                    }
                    w.sync();
                }

                @Override
                public void append(Entry entry) throws IOException {
                    if (throwException) {
                        throw new IOException("FAKE! Failed to replace a bad datanode...APPEND");
                    }
                    w.append(entry);
                }

                @Override
                public long getLength() {
                    return w.getLength();
                }
            };
        }

        @Override
        protected long doReplaceWriter(Path oldPath, Path newPath, Writer nextWriter) throws IOException {
            if (throwException) {
                throw new FailedLogCloseException("oldPath=" + oldPath + ", newPath=" + newPath);
            }
            long oldFileLen = 0L;
            oldFileLen = super.doReplaceWriter(oldPath, newPath, nextWriter);
            return oldFileLen;
        }
    }
    // Mocked up server and regionserver services. Needed below.
    Server server = new DummyServer(CONF, ServerName.valueOf("hostname1.example.org", 1234, 1L).toString());
    RegionServerServices services = Mockito.mock(RegionServerServices.class);
    CONF.setLong("hbase.regionserver.hlog.sync.timeout", 10000);
    // OK. Now I have my mocked up Server & RegionServerServices and dodgy WAL,
    // go ahead with test.
    FileSystem fs = FileSystem.get(CONF);
    Path rootDir = new Path(dir + getName());
    DodgyFSLog dodgyWAL1 = new DodgyFSLog(fs, rootDir, getName(), CONF);
    Path rootDir2 = new Path(dir + getName() + "2");
    final DodgyFSLog dodgyWAL2 = new DodgyFSLog(fs, rootDir2, getName() + "2", CONF);
    // Add a listener to force ringbuffer event handler sleep for a while
    dodgyWAL2.registerWALActionsListener(new DummyWALActionsListener());
    // I need a log roller running.
    LogRoller logRoller = new LogRoller(server, services);
    logRoller.addWAL(dodgyWAL1);
    logRoller.addWAL(dodgyWAL2);
    // There is no 'stop' once a logRoller is running.. it just dies.
    logRoller.start();
    // Now get a region and start adding in edits.
    HTableDescriptor htd = new HTableDescriptor(TableName.META_TABLE_NAME);
    final HRegion region = initHRegion(tableName, null, null, dodgyWAL1);
    byte[] bytes = Bytes.toBytes(getName());
    NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
    scopes.put(COLUMN_FAMILY_BYTES, 0);
    MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
    try {
        Put put = new Put(bytes);
        put.addColumn(COLUMN_FAMILY_BYTES, Bytes.toBytes("1"), bytes);
        WALKey key = new WALKey(region.getRegionInfo().getEncodedNameAsBytes(), htd.getTableName(), System.currentTimeMillis(), mvcc, scopes);
        WALEdit edit = new WALEdit();
        CellScanner CellScanner = put.cellScanner();
        assertTrue(CellScanner.advance());
        edit.add(CellScanner.current());
        LOG.info("SET throwing of exception on append");
        dodgyWAL1.throwException = true;
        // This append provokes a WAL roll request
        dodgyWAL1.append(region.getRegionInfo(), key, edit, true);
        boolean exception = false;
        try {
            dodgyWAL1.sync();
        } catch (Exception e) {
            exception = true;
        }
        assertTrue("Did not get sync exception", exception);
        // cause server abort.
        try {
            // wait LogRoller exit.
            Thread.sleep(50);
        } catch (InterruptedException e) {
            e.printStackTrace();
        }
        final CountDownLatch latch = new CountDownLatch(1);
        // make RingBufferEventHandler sleep 1s, so the following sync
        // endOfBatch=false
        key = new WALKey(region.getRegionInfo().getEncodedNameAsBytes(), TableName.valueOf("sleep"), System.currentTimeMillis(), mvcc, scopes);
        dodgyWAL2.append(region.getRegionInfo(), key, edit, true);
        Thread t = new Thread("Sync") {

            public void run() {
                try {
                    dodgyWAL2.sync();
                } catch (IOException e) {
                    LOG.info("In sync", e);
                }
                latch.countDown();
                LOG.info("Sync exiting");
            }

            ;
        };
        t.setDaemon(true);
        t.start();
        try {
            // make sure sync have published.
            Thread.sleep(100);
        } catch (InterruptedException e1) {
            e1.printStackTrace();
        }
        // make append throw DamagedWALException
        key = new WALKey(region.getRegionInfo().getEncodedNameAsBytes(), TableName.valueOf("DamagedWALException"), System.currentTimeMillis(), mvcc, scopes);
        dodgyWAL2.append(region.getRegionInfo(), key, edit, true);
        while (latch.getCount() > 0) {
            Threads.sleep(100);
        }
        assertTrue(server.isAborted());
    } finally {
        if (logRoller != null) {
            logRoller.close();
        }
        try {
            if (region != null) {
                region.close();
            }
            if (dodgyWAL1 != null) {
                dodgyWAL1.close();
            }
            if (dodgyWAL2 != null) {
                dodgyWAL2.close();
            }
        } catch (Exception e) {
            LOG.info("On way out", e);
        }
    }
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) Server(org.apache.hadoop.hbase.Server) FailedLogCloseException(org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException) CellScanner(org.apache.hadoop.hbase.CellScanner) FSHLog(org.apache.hadoop.hbase.regionserver.wal.FSHLog) WALKey(org.apache.hadoop.hbase.wal.WALKey) WALEdit(org.apache.hadoop.hbase.regionserver.wal.WALEdit) FileSystem(org.apache.hadoop.fs.FileSystem) Path(org.apache.hadoop.fs.Path) IOException(java.io.IOException) TreeMap(java.util.TreeMap) CountDownLatch(java.util.concurrent.CountDownLatch) Put(org.apache.hadoop.hbase.client.Put) DamagedWALException(org.apache.hadoop.hbase.regionserver.wal.DamagedWALException) IOException(java.io.IOException) FailedLogCloseException(org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) Writer(org.apache.hadoop.hbase.wal.WALProvider.Writer) Test(org.junit.Test)

Example 80 with CellScanner

use of org.apache.hadoop.hbase.CellScanner in project phoenix by apache.

the class TestUtil method dumpTable.

public static void dumpTable(HTableInterface table) throws IOException {
    System.out.println("************ dumping " + table + " **************");
    Scan s = new Scan();
    try (ResultScanner scanner = table.getScanner(s)) {
        Result result = null;
        while ((result = scanner.next()) != null) {
            CellScanner cellScanner = result.cellScanner();
            Cell current = null;
            while (cellScanner.advance()) {
                current = cellScanner.current();
                System.out.println(current);
            }
        }
    }
    System.out.println("-----------------------------------------------");
}
Also used : ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) Scan(org.apache.hadoop.hbase.client.Scan) CellScanner(org.apache.hadoop.hbase.CellScanner) Cell(org.apache.hadoop.hbase.Cell) Result(org.apache.hadoop.hbase.client.Result)

Aggregations

CellScanner (org.apache.hadoop.hbase.CellScanner)82 Cell (org.apache.hadoop.hbase.Cell)69 Test (org.junit.Test)58 Result (org.apache.hadoop.hbase.client.Result)47 Scan (org.apache.hadoop.hbase.client.Scan)43 ResultScanner (org.apache.hadoop.hbase.client.ResultScanner)42 Table (org.apache.hadoop.hbase.client.Table)41 IOException (java.io.IOException)40 TableName (org.apache.hadoop.hbase.TableName)40 InterruptedIOException (java.io.InterruptedIOException)35 Connection (org.apache.hadoop.hbase.client.Connection)34 PrivilegedExceptionAction (java.security.PrivilegedExceptionAction)32 Delete (org.apache.hadoop.hbase.client.Delete)32 Put (org.apache.hadoop.hbase.client.Put)15 KeyValue (org.apache.hadoop.hbase.KeyValue)14 ArrayList (java.util.ArrayList)10 RetriesExhaustedWithDetailsException (org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException)7 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)6 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)6 HBaseRpcController (org.apache.hadoop.hbase.ipc.HBaseRpcController)5