use of org.apache.accumulo.core.data.InstanceId in project accumulo by apache.
the class SequentialWorkAssigner method cleanupFinishedWork.
/**
* Iterate over the queued work to remove entries that have been completed.
*/
@Override
protected void cleanupFinishedWork() {
final Iterator<Entry<String, Map<TableId, String>>> queuedWork = queuedWorkByPeerName.entrySet().iterator();
final InstanceId instanceId = client.instanceOperations().getInstanceId();
int elementsRemoved = 0;
// Check the status of all the work we've queued up
while (queuedWork.hasNext()) {
// {peer -> {tableId -> workKey, tableId -> workKey, ... }, peer -> ...}
Entry<String, Map<TableId, String>> workForPeer = queuedWork.next();
// TableID to workKey (filename and ReplicationTarget)
Map<TableId, String> queuedReplication = workForPeer.getValue();
Iterator<Entry<TableId, String>> iter = queuedReplication.entrySet().iterator();
// the replication task has finished
while (iter.hasNext()) {
// tableID -> workKey
Entry<TableId, String> entry = iter.next();
// Null equates to the work for this target was finished
if (zooCache.get(ZooUtil.getRoot(instanceId) + ReplicationConstants.ZOO_WORK_QUEUE + "/" + entry.getValue()) == null) {
log.debug("Removing {} from work assignment state", entry.getValue());
iter.remove();
elementsRemoved++;
}
}
}
log.info("Removed {} elements from internal workqueue state because the work was complete", elementsRemoved);
}
use of org.apache.accumulo.core.data.InstanceId in project accumulo by apache.
the class ServerDirs method checkBaseUris.
public Set<String> checkBaseUris(Configuration hadoopConf, Set<String> configuredBaseDirs, boolean ignore) {
// all base dirs must have same instance id and data version, any dirs that have neither should
// be ignored
String firstDir = null;
InstanceId firstIid = null;
Integer firstVersion = null;
// preserve order from configuration (to match user expectations a bit when volumes get sent to
// user-implemented VolumeChoosers)
LinkedHashSet<String> baseDirsList = new LinkedHashSet<>();
for (String baseDir : configuredBaseDirs) {
Path path = new Path(baseDir, Constants.INSTANCE_ID_DIR);
InstanceId currentIid;
int currentVersion;
try {
currentIid = VolumeManager.getInstanceIDFromHdfs(path, hadoopConf);
Path vpath = new Path(baseDir, Constants.VERSION_DIR);
currentVersion = getAccumuloPersistentVersion(vpath.getFileSystem(hadoopConf), vpath);
} catch (Exception e) {
if (ignore) {
continue;
} else {
throw new IllegalArgumentException("Accumulo volume " + path + " not initialized", e);
}
}
if (firstIid == null) {
firstIid = currentIid;
firstDir = baseDir;
firstVersion = currentVersion;
} else if (!currentIid.equals(firstIid)) {
throw new IllegalArgumentException("Configuration " + Property.INSTANCE_VOLUMES.getKey() + " contains paths that have different instance ids " + baseDir + " has " + currentIid + " and " + firstDir + " has " + firstIid);
} else if (currentVersion != firstVersion) {
throw new IllegalArgumentException("Configuration " + Property.INSTANCE_VOLUMES.getKey() + " contains paths that have different versions " + baseDir + " has " + currentVersion + " and " + firstDir + " has " + firstVersion);
}
baseDirsList.add(baseDir);
}
if (baseDirsList.isEmpty()) {
throw new RuntimeException("None of the configured paths are initialized.");
}
return baseDirsList;
}
use of org.apache.accumulo.core.data.InstanceId in project accumulo by apache.
the class MiniAccumuloClusterImpl method start.
/**
* Starts Accumulo and Zookeeper processes. Can only be called once.
*/
@SuppressFBWarnings(value = "UNENCRYPTED_SOCKET", justification = "insecure socket used for reservation")
@Override
public synchronized void start() throws IOException, InterruptedException {
if (config.useMiniDFS() && miniDFS == null) {
throw new IllegalStateException("Cannot restart mini when using miniDFS");
}
MiniAccumuloClusterControl control = getClusterControl();
if (config.useExistingInstance()) {
AccumuloConfiguration acuConf = config.getAccumuloConfiguration();
Configuration hadoopConf = config.getHadoopConfiguration();
ServerDirs serverDirs = new ServerDirs(acuConf, hadoopConf);
ConfigurationCopy cc = new ConfigurationCopy(acuConf);
Path instanceIdPath;
try (var fs = getServerContext().getVolumeManager()) {
instanceIdPath = serverDirs.getInstanceIdLocation(fs.getFirst());
} catch (IOException e) {
throw new RuntimeException(e);
}
InstanceId instanceIdFromFile = VolumeManager.getInstanceIDFromHdfs(instanceIdPath, hadoopConf);
ZooReaderWriter zrw = new ZooReaderWriter(cc.get(Property.INSTANCE_ZK_HOST), (int) cc.getTimeInMillis(Property.INSTANCE_ZK_TIMEOUT), cc.get(Property.INSTANCE_SECRET));
String rootPath = ZooUtil.getRoot(instanceIdFromFile);
String instanceName = null;
try {
for (String name : zrw.getChildren(Constants.ZROOT + Constants.ZINSTANCES)) {
String instanceNamePath = Constants.ZROOT + Constants.ZINSTANCES + "/" + name;
byte[] bytes = zrw.getData(instanceNamePath);
InstanceId iid = InstanceId.of(new String(bytes, UTF_8));
if (iid.equals(instanceIdFromFile)) {
instanceName = name;
}
}
} catch (KeeperException e) {
throw new RuntimeException("Unable to read instance name from zookeeper.", e);
}
if (instanceName == null) {
throw new RuntimeException("Unable to read instance name from zookeeper.");
}
config.setInstanceName(instanceName);
if (!AccumuloStatus.isAccumuloOffline(zrw, rootPath)) {
throw new RuntimeException("The Accumulo instance being used is already running. Aborting.");
}
} else {
if (!initialized) {
Runtime.getRuntime().addShutdownHook(new Thread(() -> {
try {
MiniAccumuloClusterImpl.this.stop();
} catch (IOException e) {
log.error("IOException while attempting to stop the MiniAccumuloCluster.", e);
} catch (InterruptedException e) {
log.error("The stopping of MiniAccumuloCluster was interrupted.", e);
}
}));
}
if (!config.useExistingZooKeepers()) {
control.start(ServerType.ZOOKEEPER);
}
if (!initialized) {
if (!config.useExistingZooKeepers()) {
// sleep a little bit to let zookeeper come up before calling init, seems to work better
long startTime = System.currentTimeMillis();
while (true) {
try (Socket s = new Socket("localhost", config.getZooKeeperPort())) {
s.setReuseAddress(true);
s.getOutputStream().write("ruok\n".getBytes());
s.getOutputStream().flush();
byte[] buffer = new byte[100];
int n = s.getInputStream().read(buffer);
if (n >= 4 && new String(buffer, 0, 4).equals("imok")) {
break;
}
} catch (Exception e) {
if (System.currentTimeMillis() - startTime >= config.getZooKeeperStartupTime()) {
throw new ZooKeeperBindException("Zookeeper did not start within " + (config.getZooKeeperStartupTime() / 1000) + " seconds. Check the logs in " + config.getLogDir() + " for errors. Last exception: " + e);
}
// Don't spin absurdly fast
sleepUninterruptibly(250, TimeUnit.MILLISECONDS);
}
}
}
LinkedList<String> args = new LinkedList<>();
args.add("--instance-name");
args.add(config.getInstanceName());
args.add("--user");
args.add(config.getRootUserName());
args.add("--clear-instance-name");
// If we aren't using SASL, add in the root password
final String saslEnabled = config.getSiteConfig().get(Property.INSTANCE_RPC_SASL_ENABLED.getKey());
if (saslEnabled == null || !Boolean.parseBoolean(saslEnabled)) {
args.add("--password");
args.add(config.getRootPassword());
}
Process initProcess = exec(Initialize.class, args.toArray(new String[0])).getProcess();
int ret = initProcess.waitFor();
if (ret != 0) {
throw new RuntimeException("Initialize process returned " + ret + ". Check the logs in " + config.getLogDir() + " for errors.");
}
initialized = true;
}
}
log.info("Starting MAC against instance {} and zookeeper(s) {}.", config.getInstanceName(), config.getZooKeepers());
control.start(ServerType.TABLET_SERVER);
int ret = 0;
for (int i = 0; i < 5; i++) {
ret = exec(Main.class, SetGoalState.class.getName(), ManagerGoalState.NORMAL.toString()).getProcess().waitFor();
if (ret == 0) {
break;
}
sleepUninterruptibly(1, TimeUnit.SECONDS);
}
if (ret != 0) {
throw new RuntimeException("Could not set manager goal state, process returned " + ret + ". Check the logs in " + config.getLogDir() + " for errors.");
}
control.start(ServerType.MANAGER);
control.start(ServerType.GARBAGE_COLLECTOR);
if (executor == null) {
executor = Executors.newSingleThreadExecutor();
}
verifyUp();
}
use of org.apache.accumulo.core.data.InstanceId in project accumulo by apache.
the class ZooZap method main.
public static void main(String[] args) {
Opts opts = new Opts();
opts.parseArgs(ZooZap.class.getName(), args);
if (!opts.zapMaster && !opts.zapManager && !opts.zapTservers) {
new JCommander(opts).usage();
return;
}
try {
var siteConf = SiteConfiguration.auto();
// Login as the server on secure HDFS
if (siteConf.getBoolean(Property.INSTANCE_RPC_SASL_ENABLED)) {
SecurityUtil.serverLogin(siteConf);
}
String volDir = VolumeConfiguration.getVolumeUris(siteConf).iterator().next();
Path instanceDir = new Path(volDir, "instance_id");
InstanceId iid = VolumeManager.getInstanceIDFromHdfs(instanceDir, new Configuration());
ZooReaderWriter zoo = new ZooReaderWriter(siteConf);
if (opts.zapMaster) {
log.warn("The -master option is deprecated. Please use -manager instead.");
}
if (opts.zapManager || opts.zapMaster) {
String managerLockPath = Constants.ZROOT + "/" + iid + Constants.ZMANAGER_LOCK;
try {
zapDirectory(zoo, managerLockPath, opts);
} catch (Exception e) {
e.printStackTrace();
}
}
if (opts.zapTservers) {
String tserversPath = Constants.ZROOT + "/" + iid + Constants.ZTSERVERS;
try {
List<String> children = zoo.getChildren(tserversPath);
for (String child : children) {
message("Deleting " + tserversPath + "/" + child + " from zookeeper", opts);
if (opts.zapManager || opts.zapMaster) {
zoo.recursiveDelete(tserversPath + "/" + child, NodeMissingPolicy.SKIP);
} else {
var zLockPath = ServiceLock.path(tserversPath + "/" + child);
if (!zoo.getChildren(zLockPath.toString()).isEmpty()) {
if (!ServiceLock.deleteLock(zoo, zLockPath, "tserver")) {
message("Did not delete " + tserversPath + "/" + child, opts);
}
}
}
}
} catch (Exception e) {
log.error("{}", e.getMessage(), e);
}
}
// Remove the tracers, we don't use them anymore.
@SuppressWarnings("deprecation") String path = siteConf.get(Property.TRACE_ZK_PATH);
try {
zapDirectory(zoo, path, opts);
} catch (Exception e) {
// do nothing if the /tracers node does not exist.
}
if (opts.zapCoordinators) {
final String coordinatorPath = Constants.ZROOT + "/" + iid + Constants.ZCOORDINATOR_LOCK;
try {
zapDirectory(zoo, coordinatorPath, opts);
} catch (Exception e) {
log.error("Error deleting coordinator from zookeeper, {}", e.getMessage(), e);
}
}
if (opts.zapCompactors) {
String compactorsBasepath = Constants.ZROOT + "/" + iid + Constants.ZCOMPACTORS;
try {
List<String> queues = zoo.getChildren(compactorsBasepath);
for (String queue : queues) {
message("Deleting " + compactorsBasepath + "/" + queue + " from zookeeper", opts);
zoo.recursiveDelete(compactorsBasepath + "/" + queue, NodeMissingPolicy.SKIP);
}
} catch (Exception e) {
log.error("Error deleting compactors from zookeeper, {}", e.getMessage(), e);
}
}
} finally {
SingletonManager.setMode(Mode.CLOSED);
}
}
use of org.apache.accumulo.core.data.InstanceId in project accumulo by apache.
the class ListInstances method getInstanceNames.
private static TreeMap<String, InstanceId> getInstanceNames(ZooReader zk, boolean printErrors) {
String instancesPath = Constants.ZROOT + Constants.ZINSTANCES;
TreeMap<String, InstanceId> tm = new TreeMap<>();
List<String> names;
try {
names = zk.getChildren(instancesPath);
} catch (Exception e) {
handleException(e, printErrors);
return tm;
}
for (String name : names) {
String instanceNamePath = Constants.ZROOT + Constants.ZINSTANCES + "/" + name;
try {
InstanceId iid = InstanceId.of(new String(zk.getData(instanceNamePath), UTF_8));
tm.put(name, iid);
} catch (Exception e) {
handleException(e, printErrors);
tm.put(name, null);
}
}
return tm;
}
Aggregations