use of org.apache.hadoop.hbase.util.CancelableProgressable in project hbase by apache.
the class WALEntryStream method recoverLease.
// For HBASE-15019
private void recoverLease(final Configuration conf, final Path path) {
try {
final FileSystem dfs = FSUtils.getCurrentFileSystem(conf);
FSUtils fsUtils = FSUtils.getInstance(dfs, conf);
fsUtils.recoverFileLease(dfs, path, conf, new CancelableProgressable() {
@Override
public boolean progress() {
LOG.debug("recover WAL lease: " + path);
return true;
}
});
} catch (IOException e) {
LOG.warn("unable to recover lease for WAL: " + path, e);
}
}
use of org.apache.hadoop.hbase.util.CancelableProgressable in project hbase by apache.
the class TestWALSplit method testTerminationAskedByReporter.
@Test(timeout = 300000)
public void testTerminationAskedByReporter() throws IOException, CorruptedLogFileException {
generateWALs(1, 10, -1);
FileStatus logfile = fs.listStatus(WALDIR)[0];
useDifferentDFSClient();
final AtomicInteger count = new AtomicInteger();
CancelableProgressable localReporter = new CancelableProgressable() {
@Override
public boolean progress() {
count.getAndIncrement();
return false;
}
};
FileSystem spiedFs = Mockito.spy(fs);
Mockito.doAnswer(new Answer<FSDataInputStream>() {
public FSDataInputStream answer(InvocationOnMock invocation) throws Throwable {
// Sleep a while and wait report status invoked
Thread.sleep(1500);
return (FSDataInputStream) invocation.callRealMethod();
}
}).when(spiedFs).open(Mockito.<Path>any(), Mockito.anyInt());
try {
conf.setInt("hbase.splitlog.report.period", 1000);
boolean ret = WALSplitter.splitLogFile(HBASEDIR, logfile, spiedFs, conf, localReporter, null, null, this.mode, wals);
assertFalse("Log splitting should failed", ret);
assertTrue(count.get() > 0);
} catch (IOException e) {
fail("There shouldn't be any exception but: " + e.toString());
} finally {
// reset it back to its default value
conf.setInt("hbase.splitlog.report.period", 59000);
}
}
use of org.apache.hadoop.hbase.util.CancelableProgressable in project hbase by apache.
the class TestWALSplit method doTestThreading.
/**
* Sets up a log splitter with a mock reader and writer. The mock reader generates
* a specified number of edits spread across 5 regions. The mock writer optionally
* sleeps for each edit it is fed.
* *
* After the split is complete, verifies that the statistics show the correct number
* of edits output into each region.
*
* @param numFakeEdits number of fake edits to push through pipeline
* @param bufferSize size of in-memory buffer
* @param writerSlowness writer threads will sleep this many ms per edit
*/
private void doTestThreading(final int numFakeEdits, final int bufferSize, final int writerSlowness) throws Exception {
Configuration localConf = new Configuration(conf);
localConf.setInt("hbase.regionserver.hlog.splitlog.buffersize", bufferSize);
// Create a fake log file (we'll override the reader to produce a stream of edits)
Path logPath = new Path(WALDIR, WAL_FILE_PREFIX + ".fake");
FSDataOutputStream out = fs.create(logPath);
out.close();
// Make region dirs for our destination regions so the output doesn't get skipped
final List<String> regions = ImmutableList.of("r0", "r1", "r2", "r3", "r4");
makeRegionDirs(regions);
// Create a splitter that reads and writes the data without touching disk
WALSplitter logSplitter = new WALSplitter(wals, localConf, HBASEDIR, fs, null, null, this.mode) {
/* Produce a mock writer that doesn't write anywhere */
@Override
protected Writer createWriter(Path logfile) throws IOException {
Writer mockWriter = Mockito.mock(Writer.class);
Mockito.doAnswer(new Answer<Void>() {
int expectedIndex = 0;
@Override
public Void answer(InvocationOnMock invocation) {
if (writerSlowness > 0) {
try {
Thread.sleep(writerSlowness);
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
}
}
Entry entry = (Entry) invocation.getArguments()[0];
WALEdit edit = entry.getEdit();
List<Cell> cells = edit.getCells();
assertEquals(1, cells.size());
Cell cell = cells.get(0);
// Check that the edits come in the right order.
assertEquals(expectedIndex, Bytes.toInt(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()));
expectedIndex++;
return null;
}
}).when(mockWriter).append(Mockito.<Entry>any());
return mockWriter;
}
/* Produce a mock reader that generates fake entries */
@Override
protected Reader getReader(Path curLogFile, CancelableProgressable reporter) throws IOException {
Reader mockReader = Mockito.mock(Reader.class);
Mockito.doAnswer(new Answer<Entry>() {
int index = 0;
@Override
public Entry answer(InvocationOnMock invocation) throws Throwable {
if (index >= numFakeEdits)
return null;
// Generate r0 through r4 in round robin fashion
int regionIdx = index % regions.size();
byte[] region = new byte[] { (byte) 'r', (byte) (0x30 + regionIdx) };
Entry ret = createTestEntry(TABLE_NAME, region, Bytes.toBytes((int) (index / regions.size())), FAMILY, QUALIFIER, VALUE, index);
index++;
return ret;
}
}).when(mockReader).next();
return mockReader;
}
};
logSplitter.splitLogFile(fs.getFileStatus(logPath), null);
// Verify number of written edits per region
Map<byte[], Long> outputCounts = logSplitter.outputSink.getOutputCounts();
for (Map.Entry<byte[], Long> entry : outputCounts.entrySet()) {
LOG.info("Got " + entry.getValue() + " output edits for region " + Bytes.toString(entry.getKey()));
assertEquals((long) entry.getValue(), numFakeEdits / regions.size());
}
assertEquals("Should have as many outputs as regions", regions.size(), outputCounts.size());
}
use of org.apache.hadoop.hbase.util.CancelableProgressable in project hbase by apache.
the class AsyncFSOutputHelper method createOutput.
/**
* Create {@link FanOutOneBlockAsyncDFSOutput} for {@link DistributedFileSystem}, and a simple
* implementation for other {@link FileSystem} which wraps around a {@link FSDataOutputStream}.
*/
public static AsyncFSOutput createOutput(FileSystem fs, Path f, boolean overwrite, boolean createParent, short replication, long blockSize, final EventLoop eventLoop) throws IOException {
if (fs instanceof DistributedFileSystem) {
return FanOutOneBlockAsyncDFSOutputHelper.createOutput((DistributedFileSystem) fs, f, overwrite, createParent, replication, blockSize, eventLoop);
}
final FSDataOutputStream fsOut;
int bufferSize = fs.getConf().getInt(CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY, CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT);
if (createParent) {
fsOut = fs.create(f, overwrite, bufferSize, replication, blockSize, null);
} else {
fsOut = fs.createNonRecursive(f, overwrite, bufferSize, replication, blockSize, null);
}
final ExecutorService flushExecutor = Executors.newSingleThreadExecutor(new ThreadFactoryBuilder().setDaemon(true).setNameFormat("AsyncFSOutputFlusher-" + f.toString().replace("%", "%%")).build());
return new AsyncFSOutput() {
private final ByteArrayOutputStream out = new ByteArrayOutputStream();
@Override
public void write(final byte[] b, final int off, final int len) {
if (eventLoop.inEventLoop()) {
out.write(b, off, len);
} else {
eventLoop.submit(() -> out.write(b, off, len)).syncUninterruptibly();
}
}
@Override
public void write(byte[] b) {
write(b, 0, b.length);
}
@Override
public void recoverAndClose(CancelableProgressable reporter) throws IOException {
fsOut.close();
}
@Override
public DatanodeInfo[] getPipeline() {
return new DatanodeInfo[0];
}
private void flush0(CompletableFuture<Long> future, boolean sync) {
try {
synchronized (out) {
fsOut.write(out.getBuffer(), 0, out.size());
out.reset();
}
} catch (IOException e) {
eventLoop.execute(() -> future.completeExceptionally(e));
return;
}
try {
if (sync) {
fsOut.hsync();
} else {
fsOut.hflush();
}
long pos = fsOut.getPos();
eventLoop.execute(() -> future.complete(pos));
} catch (IOException e) {
eventLoop.execute(() -> future.completeExceptionally(e));
}
}
@Override
public CompletableFuture<Long> flush(boolean sync) {
CompletableFuture<Long> future = new CompletableFuture<>();
flushExecutor.execute(() -> flush0(future, sync));
return future;
}
@Override
public void close() throws IOException {
try {
flushExecutor.submit(() -> {
synchronized (out) {
fsOut.write(out.getBuffer(), 0, out.size());
out.reset();
}
return null;
}).get();
} catch (InterruptedException e) {
throw new InterruptedIOException();
} catch (ExecutionException e) {
Throwables.propagateIfPossible(e.getCause(), IOException.class);
throw new IOException(e.getCause());
} finally {
flushExecutor.shutdown();
}
fsOut.close();
}
@Override
public int buffered() {
return out.size();
}
@Override
public void writeInt(int i) {
out.writeInt(i);
}
@Override
public void write(ByteBuffer bb) {
out.write(bb, bb.position(), bb.remaining());
}
};
}
use of org.apache.hadoop.hbase.util.CancelableProgressable in project hbase by apache.
the class ZkSplitLogWorkerCoordination method submitTask.
/**
* Submit a log split task to executor service
* @param curTask task to submit
* @param curTaskZKVersion current version of task
*/
void submitTask(final String curTask, final RecoveryMode mode, final int curTaskZKVersion, final int reportPeriod) {
final MutableInt zkVersion = new MutableInt(curTaskZKVersion);
CancelableProgressable reporter = new CancelableProgressable() {
private long last_report_at = 0;
@Override
public boolean progress() {
long t = EnvironmentEdgeManager.currentTime();
if ((t - last_report_at) > reportPeriod) {
last_report_at = t;
int latestZKVersion = attemptToOwnTask(false, watcher, server.getServerName(), curTask, mode, zkVersion.intValue());
if (latestZKVersion < 0) {
LOG.warn("Failed to heartbeat the task" + curTask);
return false;
}
zkVersion.setValue(latestZKVersion);
}
return true;
}
};
ZkSplitLogWorkerCoordination.ZkSplitTaskDetails splitTaskDetails = new ZkSplitLogWorkerCoordination.ZkSplitTaskDetails();
splitTaskDetails.setTaskNode(curTask);
splitTaskDetails.setCurTaskZKVersion(zkVersion);
WALSplitterHandler hsh = new WALSplitterHandler(server, this, splitTaskDetails, reporter, this.tasksInProgress, splitTaskExecutor, mode);
server.getExecutorService().submit(hsh);
}
Aggregations