Search in sources :

Example 1 with LogFileKey

use of org.apache.accumulo.tserver.logger.LogFileKey in project accumulo by apache.

the class AccumuloReplicaSystem method getWalEdits.

protected WalReplication getWalEdits(ReplicationTarget target, DataInputStream wal, Path p, Status status, long sizeLimit, Set<Integer> desiredTids) throws IOException {
    WalEdits edits = new WalEdits();
    edits.edits = new ArrayList<>();
    long size = 0l;
    long entriesConsumed = 0l;
    long numUpdates = 0l;
    LogFileKey key = new LogFileKey();
    LogFileValue value = new LogFileValue();
    while (size < sizeLimit) {
        try {
            key.readFields(wal);
            value.readFields(wal);
        } catch (EOFException e) {
            log.debug("Caught EOFException reading {}", p);
            if (status.getInfiniteEnd() && status.getClosed()) {
                log.debug("{} is closed and has unknown length, assuming entire file has been consumed", p);
                entriesConsumed = Long.MAX_VALUE;
            }
            break;
        }
        entriesConsumed++;
        switch(key.event) {
            case DEFINE_TABLET:
                // For new DEFINE_TABLETs, we also need to record the new tids we see
                if (target.getSourceTableId().equals(key.tablet.getTableId())) {
                    desiredTids.add(key.tid);
                }
                break;
            case MUTATION:
            case MANY_MUTATIONS:
                // Only write out mutations for tids that are for the desired tablet
                if (desiredTids.contains(key.tid)) {
                    ByteArrayOutputStream baos = new ByteArrayOutputStream();
                    DataOutputStream out = new DataOutputStream(baos);
                    key.write(out);
                    // Only write out the mutations that don't have the given ReplicationTarget
                    // as a replicate source (this prevents infinite replication loops: a->b, b->a, repeat)
                    numUpdates += writeValueAvoidingReplicationCycles(out, value, target);
                    out.flush();
                    byte[] data = baos.toByteArray();
                    size += data.length;
                    edits.addToEdits(ByteBuffer.wrap(data));
                }
                break;
            default:
                log.trace("Ignorning WAL entry which doesn't contain mutations, should not have received such entries");
                break;
        }
    }
    return new WalReplication(edits, size, entriesConsumed, numUpdates);
}
Also used : DataOutputStream(java.io.DataOutputStream) EOFException(java.io.EOFException) LogFileValue(org.apache.accumulo.tserver.logger.LogFileValue) LogFileKey(org.apache.accumulo.tserver.logger.LogFileKey) ByteArrayOutputStream(java.io.ByteArrayOutputStream) WalEdits(org.apache.accumulo.core.replication.thrift.WalEdits)

Example 2 with LogFileKey

use of org.apache.accumulo.tserver.logger.LogFileKey in project accumulo by apache.

the class AccumuloReplicaSystem method consumeWalPrefix.

protected Set<Integer> consumeWalPrefix(ReplicationTarget target, DataInputStream wal, Path p, Status status, long sizeLimit) throws IOException {
    Set<Integer> tids = new HashSet<>();
    LogFileKey key = new LogFileKey();
    LogFileValue value = new LogFileValue();
    Set<Integer> desiredTids = new HashSet<>();
    // later on might use that tid
    for (long i = 0; i < status.getBegin(); i++) {
        key.readFields(wal);
        value.readFields(wal);
        switch(key.event) {
            case DEFINE_TABLET:
                if (target.getSourceTableId().equals(key.tablet.getTableId())) {
                    desiredTids.add(key.tid);
                }
                break;
            default:
                break;
        }
    }
    return tids;
}
Also used : LogFileValue(org.apache.accumulo.tserver.logger.LogFileValue) LogFileKey(org.apache.accumulo.tserver.logger.LogFileKey) HashSet(java.util.HashSet)

Example 3 with LogFileKey

use of org.apache.accumulo.tserver.logger.LogFileKey in project accumulo by apache.

the class DfsLogger method open.

/**
 * Opens a Write-Ahead Log file and writes the necessary header information and OPEN entry to the file. The file is ready to be used for ingest if this method
 * returns successfully. If an exception is thrown from this method, it is the callers responsibility to ensure that {@link #close()} is called to prevent
 * leaking the file handle and/or syncing thread.
 *
 * @param address
 *          The address of the host using this WAL
 */
public synchronized void open(String address) throws IOException {
    String filename = UUID.randomUUID().toString();
    log.debug("Address is {}", address);
    String logger = Joiner.on("+").join(address.split(":"));
    log.debug("DfsLogger.open() begin");
    VolumeManager fs = conf.getFileSystem();
    VolumeChooserEnvironment chooserEnv = new VolumeChooserEnvironment(ChooserScope.LOGGER);
    logPath = fs.choose(chooserEnv, ServerConstants.getBaseUris()) + Path.SEPARATOR + ServerConstants.WAL_DIR + Path.SEPARATOR + logger + Path.SEPARATOR + filename;
    metaReference = toString();
    LoggerOperation op = null;
    try {
        short replication = (short) conf.getConfiguration().getCount(Property.TSERV_WAL_REPLICATION);
        if (replication == 0)
            replication = fs.getDefaultReplication(new Path(logPath));
        long blockSize = getWalBlockSize(conf.getConfiguration());
        if (conf.getConfiguration().getBoolean(Property.TSERV_WAL_SYNC))
            logFile = fs.createSyncable(new Path(logPath), 0, replication, blockSize);
        else
            logFile = fs.create(new Path(logPath), true, 0, replication, blockSize);
        sync = logFile.getClass().getMethod("hsync");
        flush = logFile.getClass().getMethod("hflush");
        // Initialize the crypto operations.
        org.apache.accumulo.core.security.crypto.CryptoModule cryptoModule = org.apache.accumulo.core.security.crypto.CryptoModuleFactory.getCryptoModule(conf.getConfiguration().get(Property.CRYPTO_MODULE_CLASS));
        // Initialize the log file with a header and the crypto params used to set up this log file.
        logFile.write(LOG_FILE_HEADER_V3.getBytes(UTF_8));
        CryptoModuleParameters params = CryptoModuleFactory.createParamsObjectFromAccumuloConfiguration(conf.getConfiguration());
        // Immediately update to the correct cipher. Doing this here keeps the CryptoModule independent of the writers using it
        if (params.getAllOptions().get(Property.CRYPTO_WAL_CIPHER_SUITE.getKey()) != null && !params.getAllOptions().get(Property.CRYPTO_WAL_CIPHER_SUITE.getKey()).equals("")) {
            params.setCipherSuite(params.getAllOptions().get(Property.CRYPTO_WAL_CIPHER_SUITE.getKey()));
        }
        NoFlushOutputStream nfos = new NoFlushOutputStream(logFile);
        params.setPlaintextOutputStream(nfos);
        // In order to bootstrap the reading of this file later, we have to record the CryptoModule that was used to encipher it here,
        // so that that crypto module can re-read its own parameters.
        logFile.writeUTF(conf.getConfiguration().get(Property.CRYPTO_MODULE_CLASS));
        params = cryptoModule.getEncryptingOutputStream(params);
        OutputStream encipheringOutputStream = params.getEncryptedOutputStream();
        // another data OutputStream.
        if (encipheringOutputStream == nfos) {
            log.debug("No enciphering, using raw output stream");
            encryptingLogFile = nfos;
        } else {
            log.debug("Enciphering found, wrapping in DataOutputStream");
            encryptingLogFile = new DataOutputStream(encipheringOutputStream);
        }
        LogFileKey key = new LogFileKey();
        key.event = OPEN;
        key.tserverSession = filename;
        key.filename = filename;
        op = logFileData(Collections.singletonList(new Pair<>(key, EMPTY)), Durability.SYNC);
    } catch (Exception ex) {
        if (logFile != null)
            logFile.close();
        logFile = null;
        encryptingLogFile = null;
        throw new IOException(ex);
    }
    syncThread = new Daemon(new LoggingRunnable(log, new LogSyncingTask()));
    syncThread.setName("Accumulo WALog thread " + toString());
    syncThread.start();
    op.await();
    log.debug("Got new write-ahead log: {}", this);
}
Also used : Path(org.apache.hadoop.fs.Path) VolumeManager(org.apache.accumulo.server.fs.VolumeManager) DataOutputStream(java.io.DataOutputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) DataOutputStream(java.io.DataOutputStream) NoFlushOutputStream(org.apache.accumulo.core.security.crypto.NoFlushOutputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) DFSOutputStream(org.apache.hadoop.hdfs.DFSOutputStream) OutputStream(java.io.OutputStream) LogFileKey(org.apache.accumulo.tserver.logger.LogFileKey) IOException(java.io.IOException) EOFException(java.io.EOFException) ClosedChannelException(java.nio.channels.ClosedChannelException) IOException(java.io.IOException) CryptoModule(org.apache.accumulo.core.security.crypto.CryptoModule) LoggingRunnable(org.apache.accumulo.fate.util.LoggingRunnable) CryptoModuleParameters(org.apache.accumulo.core.security.crypto.CryptoModuleParameters) Daemon(org.apache.accumulo.core.util.Daemon) VolumeChooserEnvironment(org.apache.accumulo.server.fs.VolumeChooserEnvironment) NoFlushOutputStream(org.apache.accumulo.core.security.crypto.NoFlushOutputStream)

Example 4 with LogFileKey

use of org.apache.accumulo.tserver.logger.LogFileKey in project accumulo by apache.

the class DfsLogger method minorCompactionStarted.

public LoggerOperation minorCompactionStarted(long seq, int tid, String fqfn, Durability durability) throws IOException {
    LogFileKey key = new LogFileKey();
    key.event = COMPACTION_START;
    key.seq = seq;
    key.tid = tid;
    key.filename = fqfn;
    return logFileData(Collections.singletonList(new Pair<>(key, EMPTY)), durability);
}
Also used : LogFileKey(org.apache.accumulo.tserver.logger.LogFileKey) Pair(org.apache.accumulo.core.util.Pair)

Example 5 with LogFileKey

use of org.apache.accumulo.tserver.logger.LogFileKey in project accumulo by apache.

the class DfsLogger method defineTablet.

public synchronized void defineTablet(long seq, int tid, KeyExtent tablet) throws IOException {
    // write this log to the METADATA table
    final LogFileKey key = new LogFileKey();
    key.event = DEFINE_TABLET;
    key.seq = seq;
    key.tid = tid;
    key.tablet = tablet;
    try {
        write(key, EMPTY);
    } catch (IllegalArgumentException e) {
        log.error("Signature of sync method changed. Accumulo is likely incompatible with this version of Hadoop.");
        throw new RuntimeException(e);
    }
}
Also used : LogFileKey(org.apache.accumulo.tserver.logger.LogFileKey)

Aggregations

LogFileKey (org.apache.accumulo.tserver.logger.LogFileKey)16 LogFileValue (org.apache.accumulo.tserver.logger.LogFileValue)10 DataOutputStream (java.io.DataOutputStream)8 ServerMutation (org.apache.accumulo.server.data.ServerMutation)7 ByteArrayOutputStream (java.io.ByteArrayOutputStream)6 Test (org.junit.Test)6 Mutation (org.apache.accumulo.core.data.Mutation)5 KeyExtent (org.apache.accumulo.core.data.impl.KeyExtent)5 Path (org.apache.hadoop.fs.Path)5 DataInputStream (java.io.DataInputStream)4 BatchWriterConfig (org.apache.accumulo.core.client.BatchWriterConfig)4 Status (org.apache.accumulo.server.replication.proto.Replication.Status)4 Text (org.apache.hadoop.io.Text)4 ByteArrayInputStream (java.io.ByteArrayInputStream)3 HashMap (java.util.HashMap)3 HashSet (java.util.HashSet)3 AccumuloConfiguration (org.apache.accumulo.core.conf.AccumuloConfiguration)3 ConfigurationCopy (org.apache.accumulo.core.conf.ConfigurationCopy)3 ReplicationTarget (org.apache.accumulo.core.replication.ReplicationTarget)3 WalEdits (org.apache.accumulo.core.replication.thrift.WalEdits)3