Search in sources :

Example 1 with CancelableProgressable

use of org.apache.hadoop.hbase.util.CancelableProgressable in project hbase by apache.

the class WALEntryStream method recoverLease.

// For HBASE-15019
private void recoverLease(final Configuration conf, final Path path) {
    try {
        final FileSystem dfs = FSUtils.getCurrentFileSystem(conf);
        FSUtils fsUtils = FSUtils.getInstance(dfs, conf);
        fsUtils.recoverFileLease(dfs, path, conf, new CancelableProgressable() {

            @Override
            public boolean progress() {
                LOG.debug("recover WAL lease: " + path);
                return true;
            }
        });
    } catch (IOException e) {
        LOG.warn("unable to recover lease for WAL: " + path, e);
    }
}
Also used : FileSystem(org.apache.hadoop.fs.FileSystem) IOException(java.io.IOException) CancelableProgressable(org.apache.hadoop.hbase.util.CancelableProgressable) FSUtils(org.apache.hadoop.hbase.util.FSUtils)

Example 2 with CancelableProgressable

use of org.apache.hadoop.hbase.util.CancelableProgressable in project hbase by apache.

the class TestWALSplit method testTerminationAskedByReporter.

@Test(timeout = 300000)
public void testTerminationAskedByReporter() throws IOException, CorruptedLogFileException {
    generateWALs(1, 10, -1);
    FileStatus logfile = fs.listStatus(WALDIR)[0];
    useDifferentDFSClient();
    final AtomicInteger count = new AtomicInteger();
    CancelableProgressable localReporter = new CancelableProgressable() {

        @Override
        public boolean progress() {
            count.getAndIncrement();
            return false;
        }
    };
    FileSystem spiedFs = Mockito.spy(fs);
    Mockito.doAnswer(new Answer<FSDataInputStream>() {

        public FSDataInputStream answer(InvocationOnMock invocation) throws Throwable {
            // Sleep a while and wait report status invoked
            Thread.sleep(1500);
            return (FSDataInputStream) invocation.callRealMethod();
        }
    }).when(spiedFs).open(Mockito.<Path>any(), Mockito.anyInt());
    try {
        conf.setInt("hbase.splitlog.report.period", 1000);
        boolean ret = WALSplitter.splitLogFile(HBASEDIR, logfile, spiedFs, conf, localReporter, null, null, this.mode, wals);
        assertFalse("Log splitting should failed", ret);
        assertTrue(count.get() > 0);
    } catch (IOException e) {
        fail("There shouldn't be any exception but: " + e.toString());
    } finally {
        // reset it back to its default value
        conf.setInt("hbase.splitlog.report.period", 59000);
    }
}
Also used : FileStatus(org.apache.hadoop.fs.FileStatus) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) InvocationOnMock(org.mockito.invocation.InvocationOnMock) FileSystem(org.apache.hadoop.fs.FileSystem) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) IOException(java.io.IOException) CancelableProgressable(org.apache.hadoop.hbase.util.CancelableProgressable) Test(org.junit.Test)

Example 3 with CancelableProgressable

use of org.apache.hadoop.hbase.util.CancelableProgressable in project hbase by apache.

the class TestWALSplit method doTestThreading.

/**
   * Sets up a log splitter with a mock reader and writer. The mock reader generates
   * a specified number of edits spread across 5 regions. The mock writer optionally
   * sleeps for each edit it is fed.
   * *
   * After the split is complete, verifies that the statistics show the correct number
   * of edits output into each region.
   *
   * @param numFakeEdits number of fake edits to push through pipeline
   * @param bufferSize size of in-memory buffer
   * @param writerSlowness writer threads will sleep this many ms per edit
   */
private void doTestThreading(final int numFakeEdits, final int bufferSize, final int writerSlowness) throws Exception {
    Configuration localConf = new Configuration(conf);
    localConf.setInt("hbase.regionserver.hlog.splitlog.buffersize", bufferSize);
    // Create a fake log file (we'll override the reader to produce a stream of edits)
    Path logPath = new Path(WALDIR, WAL_FILE_PREFIX + ".fake");
    FSDataOutputStream out = fs.create(logPath);
    out.close();
    // Make region dirs for our destination regions so the output doesn't get skipped
    final List<String> regions = ImmutableList.of("r0", "r1", "r2", "r3", "r4");
    makeRegionDirs(regions);
    // Create a splitter that reads and writes the data without touching disk
    WALSplitter logSplitter = new WALSplitter(wals, localConf, HBASEDIR, fs, null, null, this.mode) {

        /* Produce a mock writer that doesn't write anywhere */
        @Override
        protected Writer createWriter(Path logfile) throws IOException {
            Writer mockWriter = Mockito.mock(Writer.class);
            Mockito.doAnswer(new Answer<Void>() {

                int expectedIndex = 0;

                @Override
                public Void answer(InvocationOnMock invocation) {
                    if (writerSlowness > 0) {
                        try {
                            Thread.sleep(writerSlowness);
                        } catch (InterruptedException ie) {
                            Thread.currentThread().interrupt();
                        }
                    }
                    Entry entry = (Entry) invocation.getArguments()[0];
                    WALEdit edit = entry.getEdit();
                    List<Cell> cells = edit.getCells();
                    assertEquals(1, cells.size());
                    Cell cell = cells.get(0);
                    // Check that the edits come in the right order.
                    assertEquals(expectedIndex, Bytes.toInt(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()));
                    expectedIndex++;
                    return null;
                }
            }).when(mockWriter).append(Mockito.<Entry>any());
            return mockWriter;
        }

        /* Produce a mock reader that generates fake entries */
        @Override
        protected Reader getReader(Path curLogFile, CancelableProgressable reporter) throws IOException {
            Reader mockReader = Mockito.mock(Reader.class);
            Mockito.doAnswer(new Answer<Entry>() {

                int index = 0;

                @Override
                public Entry answer(InvocationOnMock invocation) throws Throwable {
                    if (index >= numFakeEdits)
                        return null;
                    // Generate r0 through r4 in round robin fashion
                    int regionIdx = index % regions.size();
                    byte[] region = new byte[] { (byte) 'r', (byte) (0x30 + regionIdx) };
                    Entry ret = createTestEntry(TABLE_NAME, region, Bytes.toBytes((int) (index / regions.size())), FAMILY, QUALIFIER, VALUE, index);
                    index++;
                    return ret;
                }
            }).when(mockReader).next();
            return mockReader;
        }
    };
    logSplitter.splitLogFile(fs.getFileStatus(logPath), null);
    // Verify number of written edits per region
    Map<byte[], Long> outputCounts = logSplitter.outputSink.getOutputCounts();
    for (Map.Entry<byte[], Long> entry : outputCounts.entrySet()) {
        LOG.info("Got " + entry.getValue() + " output edits for region " + Bytes.toString(entry.getKey()));
        assertEquals((long) entry.getValue(), numFakeEdits / regions.size());
    }
    assertEquals("Should have as many outputs as regions", regions.size(), outputCounts.size());
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) FaultyProtobufLogReader(org.apache.hadoop.hbase.regionserver.wal.FaultyProtobufLogReader) ProtobufLogReader(org.apache.hadoop.hbase.regionserver.wal.ProtobufLogReader) Reader(org.apache.hadoop.hbase.wal.WAL.Reader) ByteString(org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) Entry(org.apache.hadoop.hbase.wal.WAL.Entry) WALEdit(org.apache.hadoop.hbase.regionserver.wal.WALEdit) List(java.util.List) ArrayList(java.util.ArrayList) ImmutableList(com.google.common.collect.ImmutableList) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Cell(org.apache.hadoop.hbase.Cell) Path(org.apache.hadoop.fs.Path) CancelableProgressable(org.apache.hadoop.hbase.util.CancelableProgressable) InvocationOnMock(org.mockito.invocation.InvocationOnMock) AtomicLong(java.util.concurrent.atomic.AtomicLong) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) HashMap(java.util.HashMap) Writer(org.apache.hadoop.hbase.wal.WALProvider.Writer) InstrumentedLogWriter(org.apache.hadoop.hbase.regionserver.wal.InstrumentedLogWriter)

Example 4 with CancelableProgressable

use of org.apache.hadoop.hbase.util.CancelableProgressable in project hbase by apache.

the class AsyncFSOutputHelper method createOutput.

/**
   * Create {@link FanOutOneBlockAsyncDFSOutput} for {@link DistributedFileSystem}, and a simple
   * implementation for other {@link FileSystem} which wraps around a {@link FSDataOutputStream}.
   */
public static AsyncFSOutput createOutput(FileSystem fs, Path f, boolean overwrite, boolean createParent, short replication, long blockSize, final EventLoop eventLoop) throws IOException {
    if (fs instanceof DistributedFileSystem) {
        return FanOutOneBlockAsyncDFSOutputHelper.createOutput((DistributedFileSystem) fs, f, overwrite, createParent, replication, blockSize, eventLoop);
    }
    final FSDataOutputStream fsOut;
    int bufferSize = fs.getConf().getInt(CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY, CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT);
    if (createParent) {
        fsOut = fs.create(f, overwrite, bufferSize, replication, blockSize, null);
    } else {
        fsOut = fs.createNonRecursive(f, overwrite, bufferSize, replication, blockSize, null);
    }
    final ExecutorService flushExecutor = Executors.newSingleThreadExecutor(new ThreadFactoryBuilder().setDaemon(true).setNameFormat("AsyncFSOutputFlusher-" + f.toString().replace("%", "%%")).build());
    return new AsyncFSOutput() {

        private final ByteArrayOutputStream out = new ByteArrayOutputStream();

        @Override
        public void write(final byte[] b, final int off, final int len) {
            if (eventLoop.inEventLoop()) {
                out.write(b, off, len);
            } else {
                eventLoop.submit(() -> out.write(b, off, len)).syncUninterruptibly();
            }
        }

        @Override
        public void write(byte[] b) {
            write(b, 0, b.length);
        }

        @Override
        public void recoverAndClose(CancelableProgressable reporter) throws IOException {
            fsOut.close();
        }

        @Override
        public DatanodeInfo[] getPipeline() {
            return new DatanodeInfo[0];
        }

        private void flush0(CompletableFuture<Long> future, boolean sync) {
            try {
                synchronized (out) {
                    fsOut.write(out.getBuffer(), 0, out.size());
                    out.reset();
                }
            } catch (IOException e) {
                eventLoop.execute(() -> future.completeExceptionally(e));
                return;
            }
            try {
                if (sync) {
                    fsOut.hsync();
                } else {
                    fsOut.hflush();
                }
                long pos = fsOut.getPos();
                eventLoop.execute(() -> future.complete(pos));
            } catch (IOException e) {
                eventLoop.execute(() -> future.completeExceptionally(e));
            }
        }

        @Override
        public CompletableFuture<Long> flush(boolean sync) {
            CompletableFuture<Long> future = new CompletableFuture<>();
            flushExecutor.execute(() -> flush0(future, sync));
            return future;
        }

        @Override
        public void close() throws IOException {
            try {
                flushExecutor.submit(() -> {
                    synchronized (out) {
                        fsOut.write(out.getBuffer(), 0, out.size());
                        out.reset();
                    }
                    return null;
                }).get();
            } catch (InterruptedException e) {
                throw new InterruptedIOException();
            } catch (ExecutionException e) {
                Throwables.propagateIfPossible(e.getCause(), IOException.class);
                throw new IOException(e.getCause());
            } finally {
                flushExecutor.shutdown();
            }
            fsOut.close();
        }

        @Override
        public int buffered() {
            return out.size();
        }

        @Override
        public void writeInt(int i) {
            out.writeInt(i);
        }

        @Override
        public void write(ByteBuffer bb) {
            out.write(bb, bb.position(), bb.remaining());
        }
    };
}
Also used : InterruptedIOException(java.io.InterruptedIOException) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) ByteArrayOutputStream(org.apache.hadoop.hbase.io.ByteArrayOutputStream) IOException(java.io.IOException) InterruptedIOException(java.io.InterruptedIOException) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) CancelableProgressable(org.apache.hadoop.hbase.util.CancelableProgressable) ByteBuffer(java.nio.ByteBuffer) CompletableFuture(java.util.concurrent.CompletableFuture) ExecutorService(java.util.concurrent.ExecutorService) ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) ExecutionException(java.util.concurrent.ExecutionException)

Example 5 with CancelableProgressable

use of org.apache.hadoop.hbase.util.CancelableProgressable in project hbase by apache.

the class ZkSplitLogWorkerCoordination method submitTask.

/**
   * Submit a log split task to executor service
   * @param curTask task to submit
   * @param curTaskZKVersion current version of task
   */
void submitTask(final String curTask, final RecoveryMode mode, final int curTaskZKVersion, final int reportPeriod) {
    final MutableInt zkVersion = new MutableInt(curTaskZKVersion);
    CancelableProgressable reporter = new CancelableProgressable() {

        private long last_report_at = 0;

        @Override
        public boolean progress() {
            long t = EnvironmentEdgeManager.currentTime();
            if ((t - last_report_at) > reportPeriod) {
                last_report_at = t;
                int latestZKVersion = attemptToOwnTask(false, watcher, server.getServerName(), curTask, mode, zkVersion.intValue());
                if (latestZKVersion < 0) {
                    LOG.warn("Failed to heartbeat the task" + curTask);
                    return false;
                }
                zkVersion.setValue(latestZKVersion);
            }
            return true;
        }
    };
    ZkSplitLogWorkerCoordination.ZkSplitTaskDetails splitTaskDetails = new ZkSplitLogWorkerCoordination.ZkSplitTaskDetails();
    splitTaskDetails.setTaskNode(curTask);
    splitTaskDetails.setCurTaskZKVersion(zkVersion);
    WALSplitterHandler hsh = new WALSplitterHandler(server, this, splitTaskDetails, reporter, this.tasksInProgress, splitTaskExecutor, mode);
    server.getExecutorService().submit(hsh);
}
Also used : WALSplitterHandler(org.apache.hadoop.hbase.regionserver.handler.WALSplitterHandler) MutableInt(org.apache.commons.lang.mutable.MutableInt) CancelableProgressable(org.apache.hadoop.hbase.util.CancelableProgressable)

Aggregations

CancelableProgressable (org.apache.hadoop.hbase.util.CancelableProgressable)5 IOException (java.io.IOException)3 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)2 FileSystem (org.apache.hadoop.fs.FileSystem)2 InvocationOnMock (org.mockito.invocation.InvocationOnMock)2 ImmutableList (com.google.common.collect.ImmutableList)1 ImmutableMap (com.google.common.collect.ImmutableMap)1 ThreadFactoryBuilder (com.google.common.util.concurrent.ThreadFactoryBuilder)1 InterruptedIOException (java.io.InterruptedIOException)1 ByteBuffer (java.nio.ByteBuffer)1 ArrayList (java.util.ArrayList)1 HashMap (java.util.HashMap)1 List (java.util.List)1 Map (java.util.Map)1 CompletableFuture (java.util.concurrent.CompletableFuture)1 ExecutionException (java.util.concurrent.ExecutionException)1 ExecutorService (java.util.concurrent.ExecutorService)1 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)1 AtomicLong (java.util.concurrent.atomic.AtomicLong)1 MutableInt (org.apache.commons.lang.mutable.MutableInt)1