use of org.apache.accumulo.server.ServerDirs in project accumulo by apache.
the class Initialize method execute.
@Override
public void execute(final String[] args) {
boolean success = true;
Opts opts = new Opts();
opts.parseArgs("accumulo init", args);
var siteConfig = SiteConfiguration.auto();
ZooReaderWriter zoo = new ZooReaderWriter(siteConfig);
SecurityUtil.serverLogin(siteConfig);
Configuration hadoopConfig = new Configuration();
InitialConfiguration initConfig = new InitialConfiguration(hadoopConfig, siteConfig);
ServerDirs serverDirs = new ServerDirs(siteConfig, hadoopConfig);
try (var fs = VolumeManagerImpl.get(siteConfig, hadoopConfig)) {
if (opts.resetSecurity) {
success = resetSecurity(initConfig, opts, fs);
}
if (success && opts.addVolumes) {
success = addVolumes(fs, initConfig, serverDirs);
}
if (!opts.resetSecurity && !opts.addVolumes) {
success = doInit(zoo, opts, fs, initConfig);
}
} catch (IOException e) {
log.error("Problem trying to get Volume configuration", e);
success = false;
} finally {
SingletonManager.setMode(Mode.CLOSED);
if (!success) {
System.exit(-1);
}
}
}
use of org.apache.accumulo.server.ServerDirs in project accumulo by apache.
the class MiniAccumuloClusterImpl method start.
/**
* Starts Accumulo and Zookeeper processes. Can only be called once.
*/
@SuppressFBWarnings(value = "UNENCRYPTED_SOCKET", justification = "insecure socket used for reservation")
@Override
public synchronized void start() throws IOException, InterruptedException {
if (config.useMiniDFS() && miniDFS == null) {
throw new IllegalStateException("Cannot restart mini when using miniDFS");
}
MiniAccumuloClusterControl control = getClusterControl();
if (config.useExistingInstance()) {
AccumuloConfiguration acuConf = config.getAccumuloConfiguration();
Configuration hadoopConf = config.getHadoopConfiguration();
ServerDirs serverDirs = new ServerDirs(acuConf, hadoopConf);
ConfigurationCopy cc = new ConfigurationCopy(acuConf);
Path instanceIdPath;
try (var fs = getServerContext().getVolumeManager()) {
instanceIdPath = serverDirs.getInstanceIdLocation(fs.getFirst());
} catch (IOException e) {
throw new RuntimeException(e);
}
InstanceId instanceIdFromFile = VolumeManager.getInstanceIDFromHdfs(instanceIdPath, hadoopConf);
ZooReaderWriter zrw = new ZooReaderWriter(cc.get(Property.INSTANCE_ZK_HOST), (int) cc.getTimeInMillis(Property.INSTANCE_ZK_TIMEOUT), cc.get(Property.INSTANCE_SECRET));
String rootPath = ZooUtil.getRoot(instanceIdFromFile);
String instanceName = null;
try {
for (String name : zrw.getChildren(Constants.ZROOT + Constants.ZINSTANCES)) {
String instanceNamePath = Constants.ZROOT + Constants.ZINSTANCES + "/" + name;
byte[] bytes = zrw.getData(instanceNamePath);
InstanceId iid = InstanceId.of(new String(bytes, UTF_8));
if (iid.equals(instanceIdFromFile)) {
instanceName = name;
}
}
} catch (KeeperException e) {
throw new RuntimeException("Unable to read instance name from zookeeper.", e);
}
if (instanceName == null) {
throw new RuntimeException("Unable to read instance name from zookeeper.");
}
config.setInstanceName(instanceName);
if (!AccumuloStatus.isAccumuloOffline(zrw, rootPath)) {
throw new RuntimeException("The Accumulo instance being used is already running. Aborting.");
}
} else {
if (!initialized) {
Runtime.getRuntime().addShutdownHook(new Thread(() -> {
try {
MiniAccumuloClusterImpl.this.stop();
} catch (IOException e) {
log.error("IOException while attempting to stop the MiniAccumuloCluster.", e);
} catch (InterruptedException e) {
log.error("The stopping of MiniAccumuloCluster was interrupted.", e);
}
}));
}
if (!config.useExistingZooKeepers()) {
control.start(ServerType.ZOOKEEPER);
}
if (!initialized) {
if (!config.useExistingZooKeepers()) {
// sleep a little bit to let zookeeper come up before calling init, seems to work better
long startTime = System.currentTimeMillis();
while (true) {
try (Socket s = new Socket("localhost", config.getZooKeeperPort())) {
s.setReuseAddress(true);
s.getOutputStream().write("ruok\n".getBytes());
s.getOutputStream().flush();
byte[] buffer = new byte[100];
int n = s.getInputStream().read(buffer);
if (n >= 4 && new String(buffer, 0, 4).equals("imok")) {
break;
}
} catch (Exception e) {
if (System.currentTimeMillis() - startTime >= config.getZooKeeperStartupTime()) {
throw new ZooKeeperBindException("Zookeeper did not start within " + (config.getZooKeeperStartupTime() / 1000) + " seconds. Check the logs in " + config.getLogDir() + " for errors. Last exception: " + e);
}
// Don't spin absurdly fast
sleepUninterruptibly(250, TimeUnit.MILLISECONDS);
}
}
}
LinkedList<String> args = new LinkedList<>();
args.add("--instance-name");
args.add(config.getInstanceName());
args.add("--user");
args.add(config.getRootUserName());
args.add("--clear-instance-name");
// If we aren't using SASL, add in the root password
final String saslEnabled = config.getSiteConfig().get(Property.INSTANCE_RPC_SASL_ENABLED.getKey());
if (saslEnabled == null || !Boolean.parseBoolean(saslEnabled)) {
args.add("--password");
args.add(config.getRootPassword());
}
Process initProcess = exec(Initialize.class, args.toArray(new String[0])).getProcess();
int ret = initProcess.waitFor();
if (ret != 0) {
throw new RuntimeException("Initialize process returned " + ret + ". Check the logs in " + config.getLogDir() + " for errors.");
}
initialized = true;
}
}
log.info("Starting MAC against instance {} and zookeeper(s) {}.", config.getInstanceName(), config.getZooKeepers());
control.start(ServerType.TABLET_SERVER);
int ret = 0;
for (int i = 0; i < 5; i++) {
ret = exec(Main.class, SetGoalState.class.getName(), ManagerGoalState.NORMAL.toString()).getProcess().waitFor();
if (ret == 0) {
break;
}
sleepUninterruptibly(1, TimeUnit.SECONDS);
}
if (ret != 0) {
throw new RuntimeException("Could not set manager goal state, process returned " + ret + ". Check the logs in " + config.getLogDir() + " for errors.");
}
control.start(ServerType.MANAGER);
control.start(ServerType.GARBAGE_COLLECTOR);
if (executor == null) {
executor = Executors.newSingleThreadExecutor();
}
verifyUp();
}
use of org.apache.accumulo.server.ServerDirs in project accumulo by apache.
the class AccumuloTest method testUpdateAccumuloVersion.
@Test
public void testUpdateAccumuloVersion() throws Exception {
Volume v1 = createMock(Volume.class);
FileSystem fs1 = createMock(FileSystem.class);
Path baseVersion1 = new Path("hdfs://volume1/accumulo/version");
Path oldVersion1 = new Path("hdfs://volume1/accumulo/version/7");
Path newVersion1 = new Path("hdfs://volume1/accumulo/version/" + AccumuloDataVersion.get());
FileStatus[] files1 = mockPersistentVersion("7");
expect(fs1.listStatus(baseVersion1)).andReturn(files1);
replay(fs1);
FSDataOutputStream fsdos1 = createMock(FSDataOutputStream.class);
expect(v1.getFileSystem()).andReturn(fs1);
expect(v1.prefixChild(Constants.VERSION_DIR)).andReturn(baseVersion1).times(2);
replay(v1);
fsdos1.close();
replay(fsdos1);
Volume v2 = createMock(Volume.class);
FileSystem fs2 = createMock(FileSystem.class);
Path baseVersion2 = new Path("hdfs://volume2/accumulo/version");
Path oldVersion2 = new Path("hdfs://volume2/accumulo/version/7");
Path newVersion2 = new Path("hdfs://volume2/accumulo/version/" + AccumuloDataVersion.get());
FileStatus[] files2 = mockPersistentVersion("7");
expect(fs2.listStatus(baseVersion2)).andReturn(files2);
replay(fs2);
FSDataOutputStream fsdos2 = createMock(FSDataOutputStream.class);
expect(v2.getFileSystem()).andReturn(fs2);
expect(v2.prefixChild(Constants.VERSION_DIR)).andReturn(baseVersion2).times(2);
replay(v2);
fsdos2.close();
replay(fsdos2);
VolumeManager vm = createMock(VolumeManager.class);
expect(vm.getVolumes()).andReturn(Sets.newHashSet(v1, v2));
expect(vm.delete(oldVersion1)).andReturn(true);
expect(vm.create(newVersion1)).andReturn(fsdos1);
expect(vm.delete(oldVersion2)).andReturn(true);
expect(vm.create(newVersion2)).andReturn(fsdos2);
replay(vm);
UpgradeCoordinator upgradeCoordinator = new UpgradeCoordinator();
ServerDirs constants = new ServerDirs(DefaultConfiguration.getInstance(), new Configuration());
upgradeCoordinator.updateAccumuloVersion(constants, vm, 7);
}
use of org.apache.accumulo.server.ServerDirs in project accumulo by apache.
the class ChangeSecret method main.
public static void main(String[] args) throws Exception {
var siteConfig = SiteConfiguration.auto();
var hadoopConf = new Configuration();
Opts opts = new Opts();
ServerContext context = opts.getServerContext();
try (var fs = context.getVolumeManager()) {
ServerDirs serverDirs = new ServerDirs(siteConfig, hadoopConf);
verifyHdfsWritePermission(serverDirs, fs);
List<String> argsList = new ArrayList<>(args.length + 2);
argsList.add("--old");
argsList.add("--new");
argsList.addAll(Arrays.asList(args));
opts.parseArgs(ChangeSecret.class.getName(), args);
Span span = TraceUtil.startSpan(ChangeSecret.class, "main");
try (Scope scope = span.makeCurrent()) {
verifyAccumuloIsDown(context, opts.oldPass);
final InstanceId newInstanceId = InstanceId.of(UUID.randomUUID());
updateHdfs(serverDirs, fs, newInstanceId);
rewriteZooKeeperInstance(context, newInstanceId, opts.oldPass, opts.newPass);
if (opts.oldPass != null) {
deleteInstance(context, opts.oldPass);
}
System.out.println("New instance id is " + newInstanceId);
System.out.println("Be sure to put your new secret in accumulo.properties");
} finally {
span.end();
}
}
}
use of org.apache.accumulo.server.ServerDirs in project accumulo by apache.
the class AccumuloTest method setUp.
@Before
public void setUp() {
fs = createMock(FileSystem.class);
path = createMock(Path.class);
serverDirs = new ServerDirs(DefaultConfiguration.getInstance(), new Configuration());
}
Aggregations