use of org.apache.accumulo.core.crypto.CryptoEnvironmentImpl in project accumulo by apache.
the class DfsLogger method open.
/**
* Opens a Write-Ahead Log file and writes the necessary header information and OPEN entry to the
* file. The file is ready to be used for ingest if this method returns successfully. If an
* exception is thrown from this method, it is the callers responsibility to ensure that
* {@link #close()} is called to prevent leaking the file handle and/or syncing thread.
*
* @param address
* The address of the host using this WAL
*/
public synchronized void open(String address) throws IOException {
String filename = UUID.randomUUID().toString();
log.debug("Address is {}", address);
String logger = Joiner.on("+").join(address.split(":"));
log.debug("DfsLogger.open() begin");
VolumeManager fs = conf.getVolumeManager();
var chooserEnv = new VolumeChooserEnvironmentImpl(org.apache.accumulo.core.spi.fs.VolumeChooserEnvironment.Scope.LOGGER, context);
logPath = fs.choose(chooserEnv, context.getBaseUris()) + Path.SEPARATOR + Constants.WAL_DIR + Path.SEPARATOR + logger + Path.SEPARATOR + filename;
metaReference = toString();
LoggerOperation op = null;
try {
Path logfilePath = new Path(logPath);
short replication = (short) conf.getConfiguration().getCount(Property.TSERV_WAL_REPLICATION);
if (replication == 0)
replication = fs.getDefaultReplication(logfilePath);
long blockSize = getWalBlockSize(conf.getConfiguration());
if (conf.getConfiguration().getBoolean(Property.TSERV_WAL_SYNC))
logFile = fs.createSyncable(logfilePath, 0, replication, blockSize);
else
logFile = fs.create(logfilePath, true, 0, replication, blockSize);
// check again that logfile can be sync'd
if (!fs.canSyncAndFlush(logfilePath)) {
log.warn("sync not supported for log file {}. Data loss may occur.", logPath);
}
// Initialize the log file with a header and its encryption
CryptoService cryptoService = context.getCryptoService();
logFile.write(LOG_FILE_HEADER_V4.getBytes(UTF_8));
log.debug("Using {} for encrypting WAL {}", cryptoService.getClass().getSimpleName(), filename);
CryptoEnvironment env = new CryptoEnvironmentImpl(Scope.WAL, null);
FileEncrypter encrypter = cryptoService.getFileEncrypter(env);
byte[] cryptoParams = encrypter.getDecryptionParameters();
CryptoUtils.writeParams(cryptoParams, logFile);
/**
* Always wrap the WAL in a NoFlushOutputStream to prevent extra flushing to HDFS. The
* {@link #write(LogFileKey, LogFileValue)} method will flush crypto data or do nothing when
* crypto is not enabled.
*/
OutputStream encryptedStream = encrypter.encryptStream(new NoFlushOutputStream(logFile));
if (encryptedStream instanceof NoFlushOutputStream) {
encryptingLogFile = (NoFlushOutputStream) encryptedStream;
} else {
encryptingLogFile = new DataOutputStream(encryptedStream);
}
LogFileKey key = new LogFileKey();
key.event = OPEN;
key.tserverSession = filename;
key.filename = filename;
op = logKeyData(key, Durability.SYNC);
} catch (Exception ex) {
if (logFile != null)
logFile.close();
logFile = null;
encryptingLogFile = null;
throw new IOException(ex);
}
syncThread = Threads.createThread("Accumulo WALog thread " + this, new LogSyncingTask());
syncThread.start();
op.await();
log.debug("Got new write-ahead log: {}", this);
}
Aggregations