use of org.apache.accumulo.core.security.crypto.NoFlushOutputStream in project accumulo by apache.
the class DfsLogger method open.
/**
* Opens a Write-Ahead Log file and writes the necessary header information and OPEN entry to the file. The file is ready to be used for ingest if this method
* returns successfully. If an exception is thrown from this method, it is the callers responsibility to ensure that {@link #close()} is called to prevent
* leaking the file handle and/or syncing thread.
*
* @param address
* The address of the host using this WAL
*/
public synchronized void open(String address) throws IOException {
String filename = UUID.randomUUID().toString();
log.debug("Address is {}", address);
String logger = Joiner.on("+").join(address.split(":"));
log.debug("DfsLogger.open() begin");
VolumeManager fs = conf.getFileSystem();
VolumeChooserEnvironment chooserEnv = new VolumeChooserEnvironment(ChooserScope.LOGGER);
logPath = fs.choose(chooserEnv, ServerConstants.getBaseUris()) + Path.SEPARATOR + ServerConstants.WAL_DIR + Path.SEPARATOR + logger + Path.SEPARATOR + filename;
metaReference = toString();
LoggerOperation op = null;
try {
short replication = (short) conf.getConfiguration().getCount(Property.TSERV_WAL_REPLICATION);
if (replication == 0)
replication = fs.getDefaultReplication(new Path(logPath));
long blockSize = getWalBlockSize(conf.getConfiguration());
if (conf.getConfiguration().getBoolean(Property.TSERV_WAL_SYNC))
logFile = fs.createSyncable(new Path(logPath), 0, replication, blockSize);
else
logFile = fs.create(new Path(logPath), true, 0, replication, blockSize);
sync = logFile.getClass().getMethod("hsync");
flush = logFile.getClass().getMethod("hflush");
// Initialize the crypto operations.
org.apache.accumulo.core.security.crypto.CryptoModule cryptoModule = org.apache.accumulo.core.security.crypto.CryptoModuleFactory.getCryptoModule(conf.getConfiguration().get(Property.CRYPTO_MODULE_CLASS));
// Initialize the log file with a header and the crypto params used to set up this log file.
logFile.write(LOG_FILE_HEADER_V3.getBytes(UTF_8));
CryptoModuleParameters params = CryptoModuleFactory.createParamsObjectFromAccumuloConfiguration(conf.getConfiguration());
// Immediately update to the correct cipher. Doing this here keeps the CryptoModule independent of the writers using it
if (params.getAllOptions().get(Property.CRYPTO_WAL_CIPHER_SUITE.getKey()) != null && !params.getAllOptions().get(Property.CRYPTO_WAL_CIPHER_SUITE.getKey()).equals("")) {
params.setCipherSuite(params.getAllOptions().get(Property.CRYPTO_WAL_CIPHER_SUITE.getKey()));
}
NoFlushOutputStream nfos = new NoFlushOutputStream(logFile);
params.setPlaintextOutputStream(nfos);
// In order to bootstrap the reading of this file later, we have to record the CryptoModule that was used to encipher it here,
// so that that crypto module can re-read its own parameters.
logFile.writeUTF(conf.getConfiguration().get(Property.CRYPTO_MODULE_CLASS));
params = cryptoModule.getEncryptingOutputStream(params);
OutputStream encipheringOutputStream = params.getEncryptedOutputStream();
// another data OutputStream.
if (encipheringOutputStream == nfos) {
log.debug("No enciphering, using raw output stream");
encryptingLogFile = nfos;
} else {
log.debug("Enciphering found, wrapping in DataOutputStream");
encryptingLogFile = new DataOutputStream(encipheringOutputStream);
}
LogFileKey key = new LogFileKey();
key.event = OPEN;
key.tserverSession = filename;
key.filename = filename;
op = logFileData(Collections.singletonList(new Pair<>(key, EMPTY)), Durability.SYNC);
} catch (Exception ex) {
if (logFile != null)
logFile.close();
logFile = null;
encryptingLogFile = null;
throw new IOException(ex);
}
syncThread = new Daemon(new LoggingRunnable(log, new LogSyncingTask()));
syncThread.setName("Accumulo WALog thread " + toString());
syncThread.start();
op.await();
log.debug("Got new write-ahead log: {}", this);
}
Aggregations