Search in sources :

Example 1 with AsyncWriter

use of org.apache.hadoop.hbase.wal.WALProvider.AsyncWriter in project hbase by apache.

the class AsyncFSWAL method appendAndSync.

private void appendAndSync() {
    final AsyncWriter writer = this.writer;
    // maybe a sync request is not queued when we issue a sync, so check here to see if we could
    // finish some.
    finishSync();
    long newHighestProcessedAppendTxid = -1L;
    // this is used to avoid calling peedLast every time on unackedAppends, appendAndAsync is single
    // threaded, this could save us some cycles
    boolean addedToUnackedAppends = false;
    for (Iterator<FSWALEntry> iter = toWriteAppends.iterator(); iter.hasNext(); ) {
        FSWALEntry entry = iter.next();
        boolean appended;
        try {
            appended = appendEntry(writer, entry);
        } catch (IOException e) {
            throw new AssertionError("should not happen", e);
        }
        newHighestProcessedAppendTxid = entry.getTxid();
        iter.remove();
        if (appended) {
            // toWriteAppends, so here we may get an entry which is already in the unackedAppends.
            if (addedToUnackedAppends || unackedAppends.isEmpty() || getLastTxid(unackedAppends) < entry.getTxid()) {
                unackedAppends.addLast(entry);
                addedToUnackedAppends = true;
            }
            // this way to fix first, can optimize later.
            if (writer.getLength() - fileLengthAtLastSync >= batchSize && (addedToUnackedAppends || entry.getTxid() >= getLastTxid(unackedAppends))) {
                break;
            }
        }
    }
    // otherwise, use the previous transaction id.
    if (newHighestProcessedAppendTxid > 0) {
        highestProcessedAppendTxid = newHighestProcessedAppendTxid;
    } else {
        newHighestProcessedAppendTxid = highestProcessedAppendTxid;
    }
    if (writer.getLength() - fileLengthAtLastSync >= batchSize) {
        // sync because buffer size limit.
        sync(writer);
        return;
    }
    if (writer.getLength() == fileLengthAtLastSync) {
        // stamped some region sequence id.
        if (unackedAppends.isEmpty()) {
            highestSyncedTxid.set(highestProcessedAppendTxid);
            finishSync();
            trySetReadyForRolling();
        }
        return;
    }
// reach here means that we have some unsynced data but haven't reached the batch size yet
// but we will not issue a sync directly here even if there are sync requests because we may
// have some new data in the ringbuffer, so let's just return here and delay the decision of
// whether to issue a sync in the caller method.
}
Also used : AsyncWriter(org.apache.hadoop.hbase.wal.WALProvider.AsyncWriter) IOException(java.io.IOException)

Example 2 with AsyncWriter

use of org.apache.hadoop.hbase.wal.WALProvider.AsyncWriter in project hbase by apache.

the class DualAsyncFSWAL method createWriterInstance.

@Override
protected AsyncWriter createWriterInstance(Path path) throws IOException {
    AsyncWriter localWriter = super.createWriterInstance(path);
    // retry forever if we can not create the remote writer to prevent aborting the RS due to log
    // rolling error, unless the skipRemoteWal is set to true.
    // TODO: since for now we only have one thread doing log rolling, this may block the rolling for
    // other wals
    Path remoteWAL = new Path(remoteWALDir, path.getName());
    for (int retry = 0; ; retry++) {
        if (skipRemoteWAL) {
            return localWriter;
        }
        AsyncWriter remoteWriter;
        try {
            remoteWriter = createAsyncWriter(remoteFs, remoteWAL);
        } catch (IOException e) {
            LOG.warn("create remote writer {} failed, retry = {}", remoteWAL, retry, e);
            try {
                Thread.sleep(ConnectionUtils.getPauseTime(100, retry));
            } catch (InterruptedException ie) {
                // restore the interrupt state
                Thread.currentThread().interrupt();
                Closeables.close(localWriter, true);
                throw (IOException) new InterruptedIOException().initCause(ie);
            }
            continue;
        }
        return createCombinedAsyncWriter(localWriter, remoteWriter);
    }
}
Also used : AsyncWriter(org.apache.hadoop.hbase.wal.WALProvider.AsyncWriter) Path(org.apache.hadoop.fs.Path) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) InterruptedIOException(java.io.InterruptedIOException)

Example 3 with AsyncWriter

use of org.apache.hadoop.hbase.wal.WALProvider.AsyncWriter in project hbase by apache.

the class AsyncFSWAL method doReplaceWriter.

@Override
protected long doReplaceWriter(Path oldPath, Path newPath, AsyncWriter nextWriter) throws IOException {
    waitForSafePoint();
    final AsyncWriter oldWriter = this.writer;
    this.writer = nextWriter;
    if (nextWriter != null && nextWriter instanceof AsyncProtobufLogWriter) {
        this.fsOut = ((AsyncProtobufLogWriter) nextWriter).getOutput();
    }
    this.fileLengthAtLastSync = 0L;
    this.highestProcessedAppendTxidAtLastSync = 0L;
    consumeLock.lock();
    try {
        consumerScheduled.set(true);
        writerBroken = waitingRoll = false;
        eventLoop.execute(consumer);
    } finally {
        consumeLock.unlock();
    }
    long oldFileLen;
    if (oldWriter != null) {
        oldFileLen = oldWriter.getLength();
        closeExecutor.execute(() -> {
            try {
                oldWriter.close();
            } catch (IOException e) {
                LOG.warn("close old writer failed", e);
            }
        });
    } else {
        oldFileLen = 0L;
    }
    return oldFileLen;
}
Also used : AsyncWriter(org.apache.hadoop.hbase.wal.WALProvider.AsyncWriter) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException)

Example 4 with AsyncWriter

use of org.apache.hadoop.hbase.wal.WALProvider.AsyncWriter in project hbase by apache.

the class TestAsyncFSWAL method testBrokenWriter.

@Test
public void testBrokenWriter() throws Exception {
    RegionServerServices services = mock(RegionServerServices.class);
    when(services.getConfiguration()).thenReturn(CONF);
    TableDescriptor td = TableDescriptorBuilder.newBuilder(TableName.valueOf("table")).setColumnFamily(ColumnFamilyDescriptorBuilder.of("row")).build();
    RegionInfo ri = RegionInfoBuilder.newBuilder(td.getTableName()).build();
    MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
    NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
    for (byte[] fam : td.getColumnFamilyNames()) {
        scopes.put(fam, 0);
    }
    long timestamp = EnvironmentEdgeManager.currentTime();
    String testName = currentTest.getMethodName();
    AtomicInteger failedCount = new AtomicInteger(0);
    try (LogRoller roller = new LogRoller(services);
        AsyncFSWAL wal = new AsyncFSWAL(FS, CommonFSUtils.getWALRootDir(CONF), DIR.toString(), testName, CONF, null, true, null, null, GROUP, CHANNEL_CLASS) {

            @Override
            protected AsyncWriter createWriterInstance(Path path) throws IOException {
                AsyncWriter writer = super.createWriterInstance(path);
                return new AsyncWriter() {

                    @Override
                    public void close() throws IOException {
                        writer.close();
                    }

                    @Override
                    public long getLength() {
                        return writer.getLength();
                    }

                    @Override
                    public long getSyncedLength() {
                        return writer.getSyncedLength();
                    }

                    @Override
                    public CompletableFuture<Long> sync(boolean forceSync) {
                        CompletableFuture<Long> result = writer.sync(forceSync);
                        if (failedCount.incrementAndGet() < 1000) {
                            CompletableFuture<Long> future = new CompletableFuture<>();
                            FutureUtils.addListener(result, (r, e) -> future.completeExceptionally(new IOException("Inject Error")));
                            return future;
                        } else {
                            return result;
                        }
                    }

                    @Override
                    public void append(Entry entry) {
                        writer.append(entry);
                    }
                };
            }
        }) {
        wal.init();
        roller.addWAL(wal);
        roller.start();
        int numThreads = 10;
        AtomicReference<Exception> error = new AtomicReference<>();
        Thread[] threads = new Thread[numThreads];
        for (int i = 0; i < 10; i++) {
            final int index = i;
            threads[index] = new Thread("Write-Thread-" + index) {

                @Override
                public void run() {
                    byte[] row = Bytes.toBytes("row" + index);
                    WALEdit cols = new WALEdit();
                    cols.add(new KeyValue(row, row, row, timestamp + index, row));
                    WALKeyImpl key = new WALKeyImpl(ri.getEncodedNameAsBytes(), td.getTableName(), SequenceId.NO_SEQUENCE_ID, timestamp, WALKey.EMPTY_UUIDS, HConstants.NO_NONCE, HConstants.NO_NONCE, mvcc, scopes);
                    try {
                        wal.append(ri, key, cols, true);
                    } catch (IOException e) {
                        // should not happen
                        throw new UncheckedIOException(e);
                    }
                    try {
                        wal.sync();
                    } catch (IOException e) {
                        error.set(e);
                    }
                }
            };
        }
        for (Thread t : threads) {
            t.start();
        }
        for (Thread t : threads) {
            t.join();
        }
        assertNull(error.get());
    }
}
Also used : KeyValue(org.apache.hadoop.hbase.KeyValue) MultiVersionConcurrencyControl(org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) UncheckedIOException(java.io.UncheckedIOException) CompletableFuture(java.util.concurrent.CompletableFuture) WALEdit(org.apache.hadoop.hbase.wal.WALEdit) Path(org.apache.hadoop.fs.Path) RegionServerServices(org.apache.hadoop.hbase.regionserver.RegionServerServices) AtomicReference(java.util.concurrent.atomic.AtomicReference) IOException(java.io.IOException) UncheckedIOException(java.io.UncheckedIOException) TreeMap(java.util.TreeMap) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) IOException(java.io.IOException) UncheckedIOException(java.io.UncheckedIOException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) AsyncWriter(org.apache.hadoop.hbase.wal.WALProvider.AsyncWriter) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) WALKeyImpl(org.apache.hadoop.hbase.wal.WALKeyImpl) LogRoller(org.apache.hadoop.hbase.regionserver.LogRoller) Test(org.junit.Test)

Aggregations

IOException (java.io.IOException)4 AsyncWriter (org.apache.hadoop.hbase.wal.WALProvider.AsyncWriter)4 InterruptedIOException (java.io.InterruptedIOException)2 Path (org.apache.hadoop.fs.Path)2 UncheckedIOException (java.io.UncheckedIOException)1 TreeMap (java.util.TreeMap)1 CompletableFuture (java.util.concurrent.CompletableFuture)1 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)1 AtomicReference (java.util.concurrent.atomic.AtomicReference)1 KeyValue (org.apache.hadoop.hbase.KeyValue)1 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)1 TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)1 LogRoller (org.apache.hadoop.hbase.regionserver.LogRoller)1 MultiVersionConcurrencyControl (org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl)1 RegionServerServices (org.apache.hadoop.hbase.regionserver.RegionServerServices)1 WALEdit (org.apache.hadoop.hbase.wal.WALEdit)1 WALKeyImpl (org.apache.hadoop.hbase.wal.WALKeyImpl)1 Test (org.junit.Test)1