use of org.apache.accumulo.server.fs.VolumeChooserEnvironmentImpl in project accumulo by apache.
the class FileSystemInitializer method initialize.
void initialize(VolumeManager fs, String rootTabletDirUri, String rootTabletFileUri, ServerContext context) throws IOException, InterruptedException, KeeperException {
SiteConfiguration siteConfig = initConfig.getSiteConf();
// initialize initial system tables config in zookeeper
initSystemTablesConfig();
Text splitPoint = MetadataSchema.TabletsSection.getRange().getEndKey().getRow();
VolumeChooserEnvironment chooserEnv = new VolumeChooserEnvironmentImpl(VolumeChooserEnvironment.Scope.INIT, MetadataTable.ID, splitPoint, context);
String tableMetadataTabletDirName = TABLE_TABLETS_TABLET_DIR;
String tableMetadataTabletDirUri = fs.choose(chooserEnv, context.getBaseUris()) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + MetadataTable.ID + Path.SEPARATOR + tableMetadataTabletDirName;
chooserEnv = new VolumeChooserEnvironmentImpl(VolumeChooserEnvironment.Scope.INIT, REPL_TABLE_ID, null, context);
String replicationTableDefaultTabletDirName = MetadataSchema.TabletsSection.ServerColumnFamily.DEFAULT_TABLET_DIR_NAME;
String replicationTableDefaultTabletDirUri = fs.choose(chooserEnv, context.getBaseUris()) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + REPL_TABLE_ID + Path.SEPARATOR + replicationTableDefaultTabletDirName;
chooserEnv = new VolumeChooserEnvironmentImpl(VolumeChooserEnvironment.Scope.INIT, MetadataTable.ID, null, context);
String defaultMetadataTabletDirName = MetadataSchema.TabletsSection.ServerColumnFamily.DEFAULT_TABLET_DIR_NAME;
String defaultMetadataTabletDirUri = fs.choose(chooserEnv, context.getBaseUris()) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + MetadataTable.ID + Path.SEPARATOR + defaultMetadataTabletDirName;
// create table and default tablets directories
createDirectories(fs, rootTabletDirUri, tableMetadataTabletDirUri, defaultMetadataTabletDirUri, replicationTableDefaultTabletDirUri);
String ext = FileOperations.getNewFileExtension(DefaultConfiguration.getInstance());
// populate the metadata tablet with info about the replication tablet
String metadataFileName = tableMetadataTabletDirUri + Path.SEPARATOR + "0_1." + ext;
Tablet replicationTablet = new Tablet(REPL_TABLE_ID, replicationTableDefaultTabletDirName, null, null);
createMetadataFile(fs, metadataFileName, siteConfig, replicationTablet);
// populate the root tablet with info about the metadata table's two initial tablets
Tablet tablesTablet = new Tablet(MetadataTable.ID, tableMetadataTabletDirName, null, splitPoint, metadataFileName);
Tablet defaultTablet = new Tablet(MetadataTable.ID, defaultMetadataTabletDirName, splitPoint, null);
createMetadataFile(fs, rootTabletFileUri, siteConfig, tablesTablet, defaultTablet);
}
use of org.apache.accumulo.server.fs.VolumeChooserEnvironmentImpl in project accumulo by apache.
the class Tablet method chooseTabletDir.
private String chooseTabletDir() throws IOException {
VolumeChooserEnvironment chooserEnv = new VolumeChooserEnvironmentImpl(extent.tableId(), extent.endRow(), context);
String dirUri = tabletServer.getVolumeManager().choose(chooserEnv, context.getBaseUris()) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + extent.tableId() + Path.SEPARATOR + dirName;
checkTabletDir(new Path(dirUri));
return dirUri;
}
use of org.apache.accumulo.server.fs.VolumeChooserEnvironmentImpl in project accumulo by apache.
the class DfsLogger method open.
/**
* Opens a Write-Ahead Log file and writes the necessary header information and OPEN entry to the
* file. The file is ready to be used for ingest if this method returns successfully. If an
* exception is thrown from this method, it is the callers responsibility to ensure that
* {@link #close()} is called to prevent leaking the file handle and/or syncing thread.
*
* @param address
* The address of the host using this WAL
*/
public synchronized void open(String address) throws IOException {
String filename = UUID.randomUUID().toString();
log.debug("Address is {}", address);
String logger = Joiner.on("+").join(address.split(":"));
log.debug("DfsLogger.open() begin");
VolumeManager fs = conf.getVolumeManager();
var chooserEnv = new VolumeChooserEnvironmentImpl(org.apache.accumulo.core.spi.fs.VolumeChooserEnvironment.Scope.LOGGER, context);
logPath = fs.choose(chooserEnv, context.getBaseUris()) + Path.SEPARATOR + Constants.WAL_DIR + Path.SEPARATOR + logger + Path.SEPARATOR + filename;
metaReference = toString();
LoggerOperation op = null;
try {
Path logfilePath = new Path(logPath);
short replication = (short) conf.getConfiguration().getCount(Property.TSERV_WAL_REPLICATION);
if (replication == 0)
replication = fs.getDefaultReplication(logfilePath);
long blockSize = getWalBlockSize(conf.getConfiguration());
if (conf.getConfiguration().getBoolean(Property.TSERV_WAL_SYNC))
logFile = fs.createSyncable(logfilePath, 0, replication, blockSize);
else
logFile = fs.create(logfilePath, true, 0, replication, blockSize);
// check again that logfile can be sync'd
if (!fs.canSyncAndFlush(logfilePath)) {
log.warn("sync not supported for log file {}. Data loss may occur.", logPath);
}
// Initialize the log file with a header and its encryption
CryptoService cryptoService = context.getCryptoService();
logFile.write(LOG_FILE_HEADER_V4.getBytes(UTF_8));
log.debug("Using {} for encrypting WAL {}", cryptoService.getClass().getSimpleName(), filename);
CryptoEnvironment env = new CryptoEnvironmentImpl(Scope.WAL, null);
FileEncrypter encrypter = cryptoService.getFileEncrypter(env);
byte[] cryptoParams = encrypter.getDecryptionParameters();
CryptoUtils.writeParams(cryptoParams, logFile);
/**
* Always wrap the WAL in a NoFlushOutputStream to prevent extra flushing to HDFS. The
* {@link #write(LogFileKey, LogFileValue)} method will flush crypto data or do nothing when
* crypto is not enabled.
*/
OutputStream encryptedStream = encrypter.encryptStream(new NoFlushOutputStream(logFile));
if (encryptedStream instanceof NoFlushOutputStream) {
encryptingLogFile = (NoFlushOutputStream) encryptedStream;
} else {
encryptingLogFile = new DataOutputStream(encryptedStream);
}
LogFileKey key = new LogFileKey();
key.event = OPEN;
key.tserverSession = filename;
key.filename = filename;
op = logKeyData(key, Durability.SYNC);
} catch (Exception ex) {
if (logFile != null)
logFile.close();
logFile = null;
encryptingLogFile = null;
throw new IOException(ex);
}
syncThread = Threads.createThread("Accumulo WALog thread " + this, new LogSyncingTask());
syncThread.start();
op.await();
log.debug("Got new write-ahead log: {}", this);
}
use of org.apache.accumulo.server.fs.VolumeChooserEnvironmentImpl in project accumulo by apache.
the class Initialize method doInit.
private boolean doInit(ZooReaderWriter zoo, Opts opts, VolumeManager fs, InitialConfiguration initConfig) {
String instanceNamePath;
String instanceName;
String rootUser;
try {
checkInit(zoo, fs, initConfig);
// prompt user for instance name and root password early, in case they
// abort, we don't leave an inconsistent HDFS/ZooKeeper structure
instanceNamePath = getInstanceNamePath(zoo, opts);
rootUser = getRootUserName(initConfig, opts);
// Don't prompt for a password when we're running SASL(Kerberos)
if (initConfig.getBoolean(Property.INSTANCE_RPC_SASL_ENABLED)) {
opts.rootpass = UUID.randomUUID().toString().getBytes(UTF_8);
} else {
opts.rootpass = getRootPassword(initConfig, opts, rootUser);
}
// the actual disk locations of the root table and tablets
instanceName = instanceNamePath.substring(getInstanceNamePrefix().length());
} catch (Exception e) {
log.error("FATAL: Problem during initialize", e);
return false;
}
InstanceId iid = InstanceId.of(UUID.randomUUID());
try (ServerContext context = ServerContext.initialize(initConfig.getSiteConf(), instanceName, iid)) {
var chooserEnv = new VolumeChooserEnvironmentImpl(Scope.INIT, RootTable.ID, null, context);
String rootTabletDirName = RootTable.ROOT_TABLET_DIR_NAME;
String ext = FileOperations.getNewFileExtension(DefaultConfiguration.getInstance());
String rootTabletFileUri = new Path(fs.choose(chooserEnv, initConfig.getVolumeUris()) + SEPARATOR + TABLE_DIR + SEPARATOR + RootTable.ID + SEPARATOR + rootTabletDirName + SEPARATOR + "00000_00000." + ext).toString();
ZooKeeperInitializer zki = new ZooKeeperInitializer();
zki.initialize(zoo, opts.clearInstanceName, iid, instanceNamePath, rootTabletDirName, rootTabletFileUri);
if (!createDirs(fs, iid, initConfig.getVolumeUris())) {
throw new IOException("Problem creating directories on " + fs.getVolumes());
}
var fileSystemInitializer = new FileSystemInitializer(initConfig, zoo, iid);
var rootVol = fs.choose(chooserEnv, initConfig.getVolumeUris());
var rootPath = new Path(rootVol + SEPARATOR + TABLE_DIR + SEPARATOR + RootTable.ID + rootTabletDirName);
fileSystemInitializer.initialize(fs, rootPath.toString(), rootTabletFileUri, context);
checkSASL(initConfig);
initSecurity(context, opts, rootUser);
checkUploadProps(context, initConfig, opts);
} catch (Exception e) {
log.error("FATAL: Problem during initialize", e);
return false;
}
return true;
}
use of org.apache.accumulo.server.fs.VolumeChooserEnvironmentImpl in project accumulo by apache.
the class TabletServer method checkWalCanSync.
private static void checkWalCanSync(ServerContext context) {
VolumeChooserEnvironment chooserEnv = new VolumeChooserEnvironmentImpl(VolumeChooserEnvironment.Scope.LOGGER, context);
Set<String> prefixes;
var options = context.getBaseUris();
try {
prefixes = context.getVolumeManager().choosable(chooserEnv, options);
} catch (RuntimeException e) {
log.warn("Unable to determine if WAL directories ({}) support sync or flush. " + "Data loss may occur.", Arrays.asList(options), e);
return;
}
boolean warned = false;
for (String prefix : prefixes) {
String logPath = prefix + Path.SEPARATOR + Constants.WAL_DIR;
if (!context.getVolumeManager().canSyncAndFlush(new Path(logPath))) {
// time to start so the warning will be more visible
if (!warned) {
UtilWaitThread.sleep(5000);
warned = true;
}
log.warn("WAL directory ({}) implementation does not support sync or flush." + " Data loss may occur.", logPath);
}
}
}
Aggregations