use of org.apache.accumulo.core.data.InstanceId in project accumulo by apache.
the class Initialize method doInit.
private boolean doInit(ZooReaderWriter zoo, Opts opts, VolumeManager fs, InitialConfiguration initConfig) {
String instanceNamePath;
String instanceName;
String rootUser;
try {
checkInit(zoo, fs, initConfig);
// prompt user for instance name and root password early, in case they
// abort, we don't leave an inconsistent HDFS/ZooKeeper structure
instanceNamePath = getInstanceNamePath(zoo, opts);
rootUser = getRootUserName(initConfig, opts);
// Don't prompt for a password when we're running SASL(Kerberos)
if (initConfig.getBoolean(Property.INSTANCE_RPC_SASL_ENABLED)) {
opts.rootpass = UUID.randomUUID().toString().getBytes(UTF_8);
} else {
opts.rootpass = getRootPassword(initConfig, opts, rootUser);
}
// the actual disk locations of the root table and tablets
instanceName = instanceNamePath.substring(getInstanceNamePrefix().length());
} catch (Exception e) {
log.error("FATAL: Problem during initialize", e);
return false;
}
InstanceId iid = InstanceId.of(UUID.randomUUID());
try (ServerContext context = ServerContext.initialize(initConfig.getSiteConf(), instanceName, iid)) {
var chooserEnv = new VolumeChooserEnvironmentImpl(Scope.INIT, RootTable.ID, null, context);
String rootTabletDirName = RootTable.ROOT_TABLET_DIR_NAME;
String ext = FileOperations.getNewFileExtension(DefaultConfiguration.getInstance());
String rootTabletFileUri = new Path(fs.choose(chooserEnv, initConfig.getVolumeUris()) + SEPARATOR + TABLE_DIR + SEPARATOR + RootTable.ID + SEPARATOR + rootTabletDirName + SEPARATOR + "00000_00000." + ext).toString();
ZooKeeperInitializer zki = new ZooKeeperInitializer();
zki.initialize(zoo, opts.clearInstanceName, iid, instanceNamePath, rootTabletDirName, rootTabletFileUri);
if (!createDirs(fs, iid, initConfig.getVolumeUris())) {
throw new IOException("Problem creating directories on " + fs.getVolumes());
}
var fileSystemInitializer = new FileSystemInitializer(initConfig, zoo, iid);
var rootVol = fs.choose(chooserEnv, initConfig.getVolumeUris());
var rootPath = new Path(rootVol + SEPARATOR + TABLE_DIR + SEPARATOR + RootTable.ID + rootTabletDirName);
fileSystemInitializer.initialize(fs, rootPath.toString(), rootTabletFileUri, context);
checkSASL(initConfig);
initSecurity(context, opts, rootUser);
checkUploadProps(context, initConfig, opts);
} catch (Exception e) {
log.error("FATAL: Problem during initialize", e);
return false;
}
return true;
}
use of org.apache.accumulo.core.data.InstanceId in project accumulo by apache.
the class Initialize method addVolumes.
private static boolean addVolumes(VolumeManager fs, InitialConfiguration initConfig, ServerDirs serverDirs) {
var hadoopConf = initConfig.getHadoopConf();
var siteConfig = initConfig.getSiteConf();
Set<String> volumeURIs = VolumeConfiguration.getVolumeUris(siteConfig);
Set<String> initializedDirs = serverDirs.checkBaseUris(hadoopConf, volumeURIs, true);
HashSet<String> uinitializedDirs = new HashSet<>();
uinitializedDirs.addAll(volumeURIs);
uinitializedDirs.removeAll(initializedDirs);
Path aBasePath = new Path(initializedDirs.iterator().next());
Path iidPath = new Path(aBasePath, Constants.INSTANCE_ID_DIR);
Path versionPath = new Path(aBasePath, Constants.VERSION_DIR);
InstanceId iid = VolumeManager.getInstanceIDFromHdfs(iidPath, hadoopConf);
for (Pair<Path, Path> replacementVolume : serverDirs.getVolumeReplacements()) {
if (aBasePath.equals(replacementVolume.getFirst())) {
log.error("{} is set to be replaced in {} and should not appear in {}." + " It is highly recommended that this property be removed as data" + " could still be written to this volume.", aBasePath, Property.INSTANCE_VOLUMES_REPLACEMENTS, Property.INSTANCE_VOLUMES);
}
}
try {
int persistentVersion = serverDirs.getAccumuloPersistentVersion(versionPath.getFileSystem(hadoopConf), versionPath);
if (persistentVersion != AccumuloDataVersion.get()) {
throw new IOException("Accumulo " + Constants.VERSION + " cannot initialize data version " + persistentVersion);
}
} catch (IOException e) {
log.error("Problem getting accumulo data version", e);
return false;
}
return createDirs(fs, iid, uinitializedDirs);
}
use of org.apache.accumulo.core.data.InstanceId in project accumulo by apache.
the class ListInstances method listInstances.
static synchronized void listInstances(String keepers, boolean printAll, boolean printErrors) {
errors = 0;
System.out.println("INFO : Using ZooKeepers " + keepers);
ZooReader rdr = new ZooReader(keepers, ZOOKEEPER_TIMER_MILLIS);
ZooCache cache = new ZooCache(rdr, null);
TreeMap<String, InstanceId> instanceNames = getInstanceNames(rdr, printErrors);
System.out.println();
printHeader();
for (Entry<String, InstanceId> entry : instanceNames.entrySet()) {
printInstanceInfo(cache, entry.getKey(), entry.getValue(), printErrors);
}
TreeSet<InstanceId> instancedIds = getInstanceIDs(rdr, printErrors);
instancedIds.removeAll(instanceNames.values());
if (printAll) {
for (InstanceId uuid : instancedIds) {
printInstanceInfo(cache, null, uuid, printErrors);
}
} else if (!instancedIds.isEmpty()) {
System.out.println();
System.out.println("INFO : " + instancedIds.size() + " unnamed instances were not printed, run with --print-all to see all instances");
} else {
System.out.println();
}
if (!printErrors && errors > 0) {
System.err.println("WARN : There were " + errors + " errors, run with --print-errors to see more info");
}
}
use of org.apache.accumulo.core.data.InstanceId in project accumulo by apache.
the class UnorderedWorkAssigner method cleanupFinishedWork.
/**
* Iterate over the queued work to remove entries that have been completed.
*/
@Override
protected void cleanupFinishedWork() {
final Iterator<String> work = queuedWork.iterator();
final InstanceId instanceId = client.instanceOperations().getInstanceId();
while (work.hasNext()) {
String filename = work.next();
// Null equates to the work was finished
if (zooCache.get(ZooUtil.getRoot(instanceId) + ReplicationConstants.ZOO_WORK_QUEUE + "/" + filename) == null) {
work.remove();
}
}
}
use of org.apache.accumulo.core.data.InstanceId in project accumulo by apache.
the class ChangeSecret method main.
public static void main(String[] args) throws Exception {
var siteConfig = SiteConfiguration.auto();
var hadoopConf = new Configuration();
Opts opts = new Opts();
ServerContext context = opts.getServerContext();
try (var fs = context.getVolumeManager()) {
ServerDirs serverDirs = new ServerDirs(siteConfig, hadoopConf);
verifyHdfsWritePermission(serverDirs, fs);
List<String> argsList = new ArrayList<>(args.length + 2);
argsList.add("--old");
argsList.add("--new");
argsList.addAll(Arrays.asList(args));
opts.parseArgs(ChangeSecret.class.getName(), args);
Span span = TraceUtil.startSpan(ChangeSecret.class, "main");
try (Scope scope = span.makeCurrent()) {
verifyAccumuloIsDown(context, opts.oldPass);
final InstanceId newInstanceId = InstanceId.of(UUID.randomUUID());
updateHdfs(serverDirs, fs, newInstanceId);
rewriteZooKeeperInstance(context, newInstanceId, opts.oldPass, opts.newPass);
if (opts.oldPass != null) {
deleteInstance(context, opts.oldPass);
}
System.out.println("New instance id is " + newInstanceId);
System.out.println("Be sure to put your new secret in accumulo.properties");
} finally {
span.end();
}
}
}
Aggregations