Search in sources :

Example 31 with Status

use of org.apache.accumulo.server.replication.proto.Replication.Status in project accumulo by apache.

the class TabletServerLogger method write.

private void write(final Collection<CommitSession> sessions, boolean mincFinish, Writer writer, Retry writeRetry) throws IOException {
    // Work very hard not to lock this during calls to the outside world
    int currentLogId = logId.get();
    boolean success = false;
    while (!success) {
        try {
            // get a reference to the loggers that no other thread can touch
            DfsLogger copy = null;
            AtomicInteger currentId = new AtomicInteger(-1);
            copy = initializeLoggers(currentId);
            currentLogId = currentId.get();
            if (currentLogId == logId.get()) {
                for (CommitSession commitSession : sessions) {
                    if (commitSession.beginUpdatingLogsUsed(copy, mincFinish)) {
                        try {
                            // Scribble out a tablet definition and then write to the metadata table
                            defineTablet(commitSession, writeRetry);
                        } finally {
                            commitSession.finishUpdatingLogsUsed();
                        }
                        // Need to release
                        KeyExtent extent = commitSession.getExtent();
                        if (ReplicationConfigurationUtil.isEnabled(extent, tserver.getTableConfiguration(extent))) {
                            Status status = StatusUtil.openWithUnknownLength(System.currentTimeMillis());
                            log.debug("Writing " + ProtobufUtil.toString(status) + " to metadata table for " + copy.getFileName());
                            // Got some new WALs, note this in the metadata table
                            ReplicationTableUtil.updateFiles(tserver, commitSession.getExtent(), copy.getFileName(), status);
                        }
                    }
                }
            }
            // Make sure that the logs haven't changed out from underneath our copy
            if (currentLogId == logId.get()) {
                // write the mutation to the logs
                LoggerOperation lop = writer.write(copy);
                lop.await();
                // double-check: did the log set change?
                success = (currentLogId == logId.get());
            }
        } catch (DfsLogger.LogClosedException ex) {
            writeRetry.logRetry(log, "Logs closed while writing", ex);
        } catch (Exception t) {
            writeRetry.logRetry(log, "Failed to write to WAL", t);
            try {
                // Backoff
                writeRetry.waitForNextAttempt();
            } catch (InterruptedException e) {
                Thread.currentThread().interrupt();
                throw new RuntimeException(e);
            }
        } finally {
            writeRetry.useRetry();
        }
        // Some sort of write failure occurred. Grab the write lock and reset the logs.
        // But since multiple threads will attempt it, only attempt the reset when
        // the logs haven't changed.
        final int finalCurrent = currentLogId;
        if (!success) {
            testLockAndRun(logIdLock, new TestCallWithWriteLock() {

                @Override
                boolean test() {
                    return finalCurrent == logId.get();
                }

                @Override
                void withWriteLock() throws IOException {
                    close();
                    closeForReplication(sessions);
                }
            });
        }
    }
    // if the log gets too big or too old, reset it .. grab the write lock first
    // event, tid, seq overhead
    logSizeEstimate.addAndGet(4 * 3);
    testLockAndRun(logIdLock, new TestCallWithWriteLock() {

        @Override
        boolean test() {
            return (logSizeEstimate.get() > maxSize) || ((System.currentTimeMillis() - createTime) > maxAge);
        }

        @Override
        void withWriteLock() throws IOException {
            close();
            closeForReplication(sessions);
        }
    });
}
Also used : Status(org.apache.accumulo.server.replication.proto.Replication.Status) CommitSession(org.apache.accumulo.tserver.tablet.CommitSession) IOException(java.io.IOException) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) LoggerOperation(org.apache.accumulo.tserver.log.DfsLogger.LoggerOperation) IOException(java.io.IOException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger)

Example 32 with Status

use of org.apache.accumulo.server.replication.proto.Replication.Status in project accumulo by apache.

the class AccumuloReplicaSystem method replicateRFiles.

protected Status replicateRFiles(ClientContext peerContext, final HostAndPort peerTserver, final ReplicationTarget target, final Path p, final Status status, final long sizeLimit, final String remoteTableId, final TCredentials tcreds, final ReplicaSystemHelper helper, long timeout) throws TTransportException, AccumuloException, AccumuloSecurityException {
    try (final DataInputStream input = getRFileInputStream(p)) {
        Status lastStatus = status, currentStatus = status;
        while (true) {
            // Read and send a batch of mutations
            ReplicationStats replResult = ReplicationClient.executeServicerWithReturn(peerContext, peerTserver, new RFileClientExecReturn(target, input, p, currentStatus, sizeLimit, remoteTableId, tcreds), timeout);
            // Catch the overflow
            long newBegin = currentStatus.getBegin() + replResult.entriesConsumed;
            if (newBegin < 0) {
                newBegin = Long.MAX_VALUE;
            }
            currentStatus = Status.newBuilder(currentStatus).setBegin(newBegin).build();
            log.debug("Sent batch for replication of {} to {}, with new Status {}", p, target, ProtobufUtil.toString(currentStatus));
            // If we got a different status
            if (!currentStatus.equals(lastStatus)) {
                // If we don't have any more work, just quit
                if (!StatusUtil.isWorkRequired(currentStatus)) {
                    return currentStatus;
                } else {
                    // Otherwise, let it loop and replicate some more data
                    lastStatus = currentStatus;
                }
            } else {
                log.debug("Did not replicate any new data for {} to {}, (state was {})", p, target, ProtobufUtil.toString(lastStatus));
                // we can just not record any updates, and it will be picked up again by the work assigner
                return status;
            }
        }
    } catch (IOException e) {
        log.error("Could not create input stream from RFile, will retry", e);
        return status;
    }
}
Also used : Status(org.apache.accumulo.server.replication.proto.Replication.Status) IOException(java.io.IOException) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) DataInputStream(java.io.DataInputStream)

Example 33 with Status

use of org.apache.accumulo.server.replication.proto.Replication.Status in project accumulo by apache.

the class VolumeUtil method updateTabletVolumes.

/**
 * This method does two things. First, it switches any volumes a tablet is using that are configured in instance.volumes.replacements. Second, if a tablet dir
 * is no longer configured for use it chooses a new tablet directory.
 */
public static TabletFiles updateTabletVolumes(AccumuloServerContext context, ZooLock zooLock, VolumeManager vm, KeyExtent extent, TabletFiles tabletFiles, boolean replicate) throws IOException {
    List<Pair<Path, Path>> replacements = ServerConstants.getVolumeReplacements();
    log.trace("Using volume replacements: {}", replacements);
    List<LogEntry> logsToRemove = new ArrayList<>();
    List<LogEntry> logsToAdd = new ArrayList<>();
    List<FileRef> filesToRemove = new ArrayList<>();
    SortedMap<FileRef, DataFileValue> filesToAdd = new TreeMap<>();
    TabletFiles ret = new TabletFiles();
    for (LogEntry logEntry : tabletFiles.logEntries) {
        LogEntry switchedLogEntry = switchVolumes(logEntry, replacements);
        if (switchedLogEntry != null) {
            logsToRemove.add(logEntry);
            logsToAdd.add(switchedLogEntry);
            ret.logEntries.add(switchedLogEntry);
            log.debug("Replacing volume {} : {} -> {}", extent, logEntry.filename, switchedLogEntry.filename);
        } else {
            ret.logEntries.add(logEntry);
        }
    }
    if (extent.isRootTablet()) {
        ret.datafiles = tabletFiles.datafiles;
    } else {
        for (Entry<FileRef, DataFileValue> entry : tabletFiles.datafiles.entrySet()) {
            String metaPath = entry.getKey().meta().toString();
            String switchedPath = switchVolume(metaPath, FileType.TABLE, replacements);
            if (switchedPath != null) {
                filesToRemove.add(entry.getKey());
                FileRef switchedRef = new FileRef(switchedPath, new Path(switchedPath));
                filesToAdd.put(switchedRef, entry.getValue());
                ret.datafiles.put(switchedRef, entry.getValue());
                log.debug("Replacing volume {} : {} -> {}", extent, metaPath, switchedPath);
            } else {
                ret.datafiles.put(entry.getKey(), entry.getValue());
            }
        }
    }
    String tabletDir = tabletFiles.dir;
    String switchedDir = switchVolume(tabletDir, FileType.TABLE, replacements);
    if (switchedDir != null) {
        log.debug("Replacing volume {} : {} -> {}", extent, tabletDir, switchedDir);
        tabletDir = switchedDir;
    }
    if (logsToRemove.size() + filesToRemove.size() > 0 || switchedDir != null) {
        MetadataTableUtil.updateTabletVolumes(extent, logsToRemove, logsToAdd, filesToRemove, filesToAdd, switchedDir, zooLock, context);
        if (replicate) {
            Status status = StatusUtil.fileClosed();
            log.debug("Tablet directory switched, need to record old log files {} {}", logsToRemove, ProtobufUtil.toString(status));
            // Before deleting these logs, we need to mark them for replication
            for (LogEntry logEntry : logsToRemove) {
                ReplicationTableUtil.updateFiles(context, extent, logEntry.filename, status);
            }
        }
    }
    ret.dir = decommisionedTabletDir(context, zooLock, vm, extent, tabletDir);
    if (extent.isRootTablet()) {
        SortedMap<FileRef, DataFileValue> copy = ret.datafiles;
        ret.datafiles = new TreeMap<>();
        for (Entry<FileRef, DataFileValue> entry : copy.entrySet()) {
            ret.datafiles.put(new FileRef(new Path(ret.dir, entry.getKey().path().getName()).toString()), entry.getValue());
        }
    }
    // method this should return the exact strings that are in the metadata table
    return ret;
}
Also used : Path(org.apache.hadoop.fs.Path) Status(org.apache.accumulo.server.replication.proto.Replication.Status) FileStatus(org.apache.hadoop.fs.FileStatus) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) ArrayList(java.util.ArrayList) TreeMap(java.util.TreeMap) LogEntry(org.apache.accumulo.core.tabletserver.log.LogEntry) Pair(org.apache.accumulo.core.util.Pair)

Example 34 with Status

use of org.apache.accumulo.server.replication.proto.Replication.Status in project accumulo by apache.

the class StatusCombiner method typedReduce.

@Override
public Status typedReduce(Key key, Iterator<Status> iter) {
    Builder combined = null;
    while (iter.hasNext()) {
        Status status = iter.next();
        // message to reduce
        if (null == combined) {
            if (!iter.hasNext()) {
                if (log.isTraceEnabled()) {
                    log.trace("Returned single value: {} {}", key.toStringNoTruncate(), ProtobufUtil.toString(status));
                }
                return status;
            } else {
                combined = Status.newBuilder();
            }
        }
        // Add the new message in with the previous message(s)
        combine(combined, status);
    }
    if (log.isTraceEnabled()) {
        log.trace("Combined: {} {}", key.toStringNoTruncate(), ProtobufUtil.toString(combined.build()));
    }
    return combined.build();
}
Also used : Status(org.apache.accumulo.server.replication.proto.Replication.Status) Builder(org.apache.accumulo.server.replication.proto.Replication.Status.Builder)

Example 35 with Status

use of org.apache.accumulo.server.replication.proto.Replication.Status in project accumulo by apache.

the class AccumuloReplicaSystemTest method dontSendEmptyDataToPeer.

@Test
public void dontSendEmptyDataToPeer() throws Exception {
    Client replClient = createMock(Client.class);
    AccumuloReplicaSystem ars = createMock(AccumuloReplicaSystem.class);
    WalEdits edits = new WalEdits(Collections.emptyList());
    WalReplication walReplication = new WalReplication(edits, 0, 0, 0);
    ReplicationTarget target = new ReplicationTarget("peer", "2", Table.ID.of("1"));
    DataInputStream input = null;
    Path p = new Path("/accumulo/wals/tserver+port/" + UUID.randomUUID().toString());
    Status status = null;
    long sizeLimit = Long.MAX_VALUE;
    String remoteTableId = target.getRemoteIdentifier();
    TCredentials tcreds = null;
    Set<Integer> tids = new HashSet<>();
    WalClientExecReturn walClientExec = ars.new WalClientExecReturn(target, input, p, status, sizeLimit, remoteTableId, tcreds, tids);
    expect(ars.getWalEdits(target, input, p, status, sizeLimit, tids)).andReturn(walReplication);
    replay(replClient, ars);
    ReplicationStats stats = walClientExec.execute(replClient);
    verify(replClient, ars);
    Assert.assertEquals(new ReplicationStats(0l, 0l, 0l), stats);
}
Also used : Path(org.apache.hadoop.fs.Path) Status(org.apache.accumulo.server.replication.proto.Replication.Status) TCredentials(org.apache.accumulo.core.security.thrift.TCredentials) WalReplication(org.apache.accumulo.tserver.replication.AccumuloReplicaSystem.WalReplication) WalClientExecReturn(org.apache.accumulo.tserver.replication.AccumuloReplicaSystem.WalClientExecReturn) DataInputStream(java.io.DataInputStream) WalEdits(org.apache.accumulo.core.replication.thrift.WalEdits) ReplicationTarget(org.apache.accumulo.core.replication.ReplicationTarget) ReplicationStats(org.apache.accumulo.tserver.replication.AccumuloReplicaSystem.ReplicationStats) Client(org.apache.accumulo.core.replication.thrift.ReplicationServicer.Client) HashSet(java.util.HashSet) Test(org.junit.Test)

Aggregations

Status (org.apache.accumulo.server.replication.proto.Replication.Status)77 Test (org.junit.Test)57 Mutation (org.apache.accumulo.core.data.Mutation)30 Text (org.apache.hadoop.io.Text)29 BatchWriter (org.apache.accumulo.core.client.BatchWriter)28 Key (org.apache.accumulo.core.data.Key)27 Value (org.apache.accumulo.core.data.Value)26 Scanner (org.apache.accumulo.core.client.Scanner)21 ReplicationTarget (org.apache.accumulo.core.replication.ReplicationTarget)20 Path (org.apache.hadoop.fs.Path)17 HashMap (java.util.HashMap)14 BatchWriterConfig (org.apache.accumulo.core.client.BatchWriterConfig)14 Table (org.apache.accumulo.core.client.impl.Table)14 ReplicationTable (org.apache.accumulo.core.replication.ReplicationTable)13 AccumuloSecurityException (org.apache.accumulo.core.client.AccumuloSecurityException)12 AccumuloException (org.apache.accumulo.core.client.AccumuloException)11 Connector (org.apache.accumulo.core.client.Connector)11 InvalidProtocolBufferException (com.google.protobuf.InvalidProtocolBufferException)10 TableNotFoundException (org.apache.accumulo.core.client.TableNotFoundException)10 DataInputStream (java.io.DataInputStream)9