use of org.apache.accumulo.tserver.logger.LogFileKey in project accumulo by apache.
the class AccumuloReplicaSystem method getWalEdits.
protected WalReplication getWalEdits(ReplicationTarget target, DataInputStream wal, Path p, Status status, long sizeLimit, Set<Integer> desiredTids) throws IOException {
WalEdits edits = new WalEdits();
edits.edits = new ArrayList<>();
long size = 0l;
long entriesConsumed = 0l;
long numUpdates = 0l;
LogFileKey key = new LogFileKey();
LogFileValue value = new LogFileValue();
while (size < sizeLimit) {
try {
key.readFields(wal);
value.readFields(wal);
} catch (EOFException e) {
log.debug("Caught EOFException reading {}", p);
if (status.getInfiniteEnd() && status.getClosed()) {
log.debug("{} is closed and has unknown length, assuming entire file has been consumed", p);
entriesConsumed = Long.MAX_VALUE;
}
break;
}
entriesConsumed++;
switch(key.event) {
case DEFINE_TABLET:
// For new DEFINE_TABLETs, we also need to record the new tids we see
if (target.getSourceTableId().equals(key.tablet.getTableId())) {
desiredTids.add(key.tid);
}
break;
case MUTATION:
case MANY_MUTATIONS:
// Only write out mutations for tids that are for the desired tablet
if (desiredTids.contains(key.tid)) {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
DataOutputStream out = new DataOutputStream(baos);
key.write(out);
// Only write out the mutations that don't have the given ReplicationTarget
// as a replicate source (this prevents infinite replication loops: a->b, b->a, repeat)
numUpdates += writeValueAvoidingReplicationCycles(out, value, target);
out.flush();
byte[] data = baos.toByteArray();
size += data.length;
edits.addToEdits(ByteBuffer.wrap(data));
}
break;
default:
log.trace("Ignorning WAL entry which doesn't contain mutations, should not have received such entries");
break;
}
}
return new WalReplication(edits, size, entriesConsumed, numUpdates);
}
use of org.apache.accumulo.tserver.logger.LogFileKey in project accumulo by apache.
the class AccumuloReplicaSystem method consumeWalPrefix.
protected Set<Integer> consumeWalPrefix(ReplicationTarget target, DataInputStream wal, Path p, Status status, long sizeLimit) throws IOException {
Set<Integer> tids = new HashSet<>();
LogFileKey key = new LogFileKey();
LogFileValue value = new LogFileValue();
Set<Integer> desiredTids = new HashSet<>();
// later on might use that tid
for (long i = 0; i < status.getBegin(); i++) {
key.readFields(wal);
value.readFields(wal);
switch(key.event) {
case DEFINE_TABLET:
if (target.getSourceTableId().equals(key.tablet.getTableId())) {
desiredTids.add(key.tid);
}
break;
default:
break;
}
}
return tids;
}
use of org.apache.accumulo.tserver.logger.LogFileKey in project accumulo by apache.
the class DfsLogger method open.
/**
* Opens a Write-Ahead Log file and writes the necessary header information and OPEN entry to the file. The file is ready to be used for ingest if this method
* returns successfully. If an exception is thrown from this method, it is the callers responsibility to ensure that {@link #close()} is called to prevent
* leaking the file handle and/or syncing thread.
*
* @param address
* The address of the host using this WAL
*/
public synchronized void open(String address) throws IOException {
String filename = UUID.randomUUID().toString();
log.debug("Address is {}", address);
String logger = Joiner.on("+").join(address.split(":"));
log.debug("DfsLogger.open() begin");
VolumeManager fs = conf.getFileSystem();
VolumeChooserEnvironment chooserEnv = new VolumeChooserEnvironment(ChooserScope.LOGGER);
logPath = fs.choose(chooserEnv, ServerConstants.getBaseUris()) + Path.SEPARATOR + ServerConstants.WAL_DIR + Path.SEPARATOR + logger + Path.SEPARATOR + filename;
metaReference = toString();
LoggerOperation op = null;
try {
short replication = (short) conf.getConfiguration().getCount(Property.TSERV_WAL_REPLICATION);
if (replication == 0)
replication = fs.getDefaultReplication(new Path(logPath));
long blockSize = getWalBlockSize(conf.getConfiguration());
if (conf.getConfiguration().getBoolean(Property.TSERV_WAL_SYNC))
logFile = fs.createSyncable(new Path(logPath), 0, replication, blockSize);
else
logFile = fs.create(new Path(logPath), true, 0, replication, blockSize);
sync = logFile.getClass().getMethod("hsync");
flush = logFile.getClass().getMethod("hflush");
// Initialize the crypto operations.
org.apache.accumulo.core.security.crypto.CryptoModule cryptoModule = org.apache.accumulo.core.security.crypto.CryptoModuleFactory.getCryptoModule(conf.getConfiguration().get(Property.CRYPTO_MODULE_CLASS));
// Initialize the log file with a header and the crypto params used to set up this log file.
logFile.write(LOG_FILE_HEADER_V3.getBytes(UTF_8));
CryptoModuleParameters params = CryptoModuleFactory.createParamsObjectFromAccumuloConfiguration(conf.getConfiguration());
// Immediately update to the correct cipher. Doing this here keeps the CryptoModule independent of the writers using it
if (params.getAllOptions().get(Property.CRYPTO_WAL_CIPHER_SUITE.getKey()) != null && !params.getAllOptions().get(Property.CRYPTO_WAL_CIPHER_SUITE.getKey()).equals("")) {
params.setCipherSuite(params.getAllOptions().get(Property.CRYPTO_WAL_CIPHER_SUITE.getKey()));
}
NoFlushOutputStream nfos = new NoFlushOutputStream(logFile);
params.setPlaintextOutputStream(nfos);
// In order to bootstrap the reading of this file later, we have to record the CryptoModule that was used to encipher it here,
// so that that crypto module can re-read its own parameters.
logFile.writeUTF(conf.getConfiguration().get(Property.CRYPTO_MODULE_CLASS));
params = cryptoModule.getEncryptingOutputStream(params);
OutputStream encipheringOutputStream = params.getEncryptedOutputStream();
// another data OutputStream.
if (encipheringOutputStream == nfos) {
log.debug("No enciphering, using raw output stream");
encryptingLogFile = nfos;
} else {
log.debug("Enciphering found, wrapping in DataOutputStream");
encryptingLogFile = new DataOutputStream(encipheringOutputStream);
}
LogFileKey key = new LogFileKey();
key.event = OPEN;
key.tserverSession = filename;
key.filename = filename;
op = logFileData(Collections.singletonList(new Pair<>(key, EMPTY)), Durability.SYNC);
} catch (Exception ex) {
if (logFile != null)
logFile.close();
logFile = null;
encryptingLogFile = null;
throw new IOException(ex);
}
syncThread = new Daemon(new LoggingRunnable(log, new LogSyncingTask()));
syncThread.setName("Accumulo WALog thread " + toString());
syncThread.start();
op.await();
log.debug("Got new write-ahead log: {}", this);
}
use of org.apache.accumulo.tserver.logger.LogFileKey in project accumulo by apache.
the class DfsLogger method minorCompactionStarted.
public LoggerOperation minorCompactionStarted(long seq, int tid, String fqfn, Durability durability) throws IOException {
LogFileKey key = new LogFileKey();
key.event = COMPACTION_START;
key.seq = seq;
key.tid = tid;
key.filename = fqfn;
return logFileData(Collections.singletonList(new Pair<>(key, EMPTY)), durability);
}
use of org.apache.accumulo.tserver.logger.LogFileKey in project accumulo by apache.
the class DfsLogger method defineTablet.
public synchronized void defineTablet(long seq, int tid, KeyExtent tablet) throws IOException {
// write this log to the METADATA table
final LogFileKey key = new LogFileKey();
key.event = DEFINE_TABLET;
key.seq = seq;
key.tid = tid;
key.tablet = tablet;
try {
write(key, EMPTY);
} catch (IllegalArgumentException e) {
log.error("Signature of sync method changed. Accumulo is likely incompatible with this version of Hadoop.");
throw new RuntimeException(e);
}
}
Aggregations