Search in sources :

Example 11 with InstanceId

use of org.apache.accumulo.core.data.InstanceId in project accumulo by apache.

the class Initialize method doInit.

private boolean doInit(ZooReaderWriter zoo, Opts opts, VolumeManager fs, InitialConfiguration initConfig) {
    String instanceNamePath;
    String instanceName;
    String rootUser;
    try {
        checkInit(zoo, fs, initConfig);
        // prompt user for instance name and root password early, in case they
        // abort, we don't leave an inconsistent HDFS/ZooKeeper structure
        instanceNamePath = getInstanceNamePath(zoo, opts);
        rootUser = getRootUserName(initConfig, opts);
        // Don't prompt for a password when we're running SASL(Kerberos)
        if (initConfig.getBoolean(Property.INSTANCE_RPC_SASL_ENABLED)) {
            opts.rootpass = UUID.randomUUID().toString().getBytes(UTF_8);
        } else {
            opts.rootpass = getRootPassword(initConfig, opts, rootUser);
        }
        // the actual disk locations of the root table and tablets
        instanceName = instanceNamePath.substring(getInstanceNamePrefix().length());
    } catch (Exception e) {
        log.error("FATAL: Problem during initialize", e);
        return false;
    }
    InstanceId iid = InstanceId.of(UUID.randomUUID());
    try (ServerContext context = ServerContext.initialize(initConfig.getSiteConf(), instanceName, iid)) {
        var chooserEnv = new VolumeChooserEnvironmentImpl(Scope.INIT, RootTable.ID, null, context);
        String rootTabletDirName = RootTable.ROOT_TABLET_DIR_NAME;
        String ext = FileOperations.getNewFileExtension(DefaultConfiguration.getInstance());
        String rootTabletFileUri = new Path(fs.choose(chooserEnv, initConfig.getVolumeUris()) + SEPARATOR + TABLE_DIR + SEPARATOR + RootTable.ID + SEPARATOR + rootTabletDirName + SEPARATOR + "00000_00000." + ext).toString();
        ZooKeeperInitializer zki = new ZooKeeperInitializer();
        zki.initialize(zoo, opts.clearInstanceName, iid, instanceNamePath, rootTabletDirName, rootTabletFileUri);
        if (!createDirs(fs, iid, initConfig.getVolumeUris())) {
            throw new IOException("Problem creating directories on " + fs.getVolumes());
        }
        var fileSystemInitializer = new FileSystemInitializer(initConfig, zoo, iid);
        var rootVol = fs.choose(chooserEnv, initConfig.getVolumeUris());
        var rootPath = new Path(rootVol + SEPARATOR + TABLE_DIR + SEPARATOR + RootTable.ID + rootTabletDirName);
        fileSystemInitializer.initialize(fs, rootPath.toString(), rootTabletFileUri, context);
        checkSASL(initConfig);
        initSecurity(context, opts, rootUser);
        checkUploadProps(context, initConfig, opts);
    } catch (Exception e) {
        log.error("FATAL: Problem during initialize", e);
        return false;
    }
    return true;
}
Also used : Path(org.apache.hadoop.fs.Path) ServerContext(org.apache.accumulo.server.ServerContext) InstanceId(org.apache.accumulo.core.data.InstanceId) VolumeChooserEnvironmentImpl(org.apache.accumulo.server.fs.VolumeChooserEnvironmentImpl) IOException(java.io.IOException) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) KeeperException(org.apache.zookeeper.KeeperException) IOException(java.io.IOException)

Example 12 with InstanceId

use of org.apache.accumulo.core.data.InstanceId in project accumulo by apache.

the class Initialize method addVolumes.

private static boolean addVolumes(VolumeManager fs, InitialConfiguration initConfig, ServerDirs serverDirs) {
    var hadoopConf = initConfig.getHadoopConf();
    var siteConfig = initConfig.getSiteConf();
    Set<String> volumeURIs = VolumeConfiguration.getVolumeUris(siteConfig);
    Set<String> initializedDirs = serverDirs.checkBaseUris(hadoopConf, volumeURIs, true);
    HashSet<String> uinitializedDirs = new HashSet<>();
    uinitializedDirs.addAll(volumeURIs);
    uinitializedDirs.removeAll(initializedDirs);
    Path aBasePath = new Path(initializedDirs.iterator().next());
    Path iidPath = new Path(aBasePath, Constants.INSTANCE_ID_DIR);
    Path versionPath = new Path(aBasePath, Constants.VERSION_DIR);
    InstanceId iid = VolumeManager.getInstanceIDFromHdfs(iidPath, hadoopConf);
    for (Pair<Path, Path> replacementVolume : serverDirs.getVolumeReplacements()) {
        if (aBasePath.equals(replacementVolume.getFirst())) {
            log.error("{} is set to be replaced in {} and should not appear in {}." + " It is highly recommended that this property be removed as data" + " could still be written to this volume.", aBasePath, Property.INSTANCE_VOLUMES_REPLACEMENTS, Property.INSTANCE_VOLUMES);
        }
    }
    try {
        int persistentVersion = serverDirs.getAccumuloPersistentVersion(versionPath.getFileSystem(hadoopConf), versionPath);
        if (persistentVersion != AccumuloDataVersion.get()) {
            throw new IOException("Accumulo " + Constants.VERSION + " cannot initialize data version " + persistentVersion);
        }
    } catch (IOException e) {
        log.error("Problem getting accumulo data version", e);
        return false;
    }
    return createDirs(fs, iid, uinitializedDirs);
}
Also used : Path(org.apache.hadoop.fs.Path) InstanceId(org.apache.accumulo.core.data.InstanceId) IOException(java.io.IOException) HashSet(java.util.HashSet)

Example 13 with InstanceId

use of org.apache.accumulo.core.data.InstanceId in project accumulo by apache.

the class ListInstances method listInstances.

static synchronized void listInstances(String keepers, boolean printAll, boolean printErrors) {
    errors = 0;
    System.out.println("INFO : Using ZooKeepers " + keepers);
    ZooReader rdr = new ZooReader(keepers, ZOOKEEPER_TIMER_MILLIS);
    ZooCache cache = new ZooCache(rdr, null);
    TreeMap<String, InstanceId> instanceNames = getInstanceNames(rdr, printErrors);
    System.out.println();
    printHeader();
    for (Entry<String, InstanceId> entry : instanceNames.entrySet()) {
        printInstanceInfo(cache, entry.getKey(), entry.getValue(), printErrors);
    }
    TreeSet<InstanceId> instancedIds = getInstanceIDs(rdr, printErrors);
    instancedIds.removeAll(instanceNames.values());
    if (printAll) {
        for (InstanceId uuid : instancedIds) {
            printInstanceInfo(cache, null, uuid, printErrors);
        }
    } else if (!instancedIds.isEmpty()) {
        System.out.println();
        System.out.println("INFO : " + instancedIds.size() + " unnamed instances were not printed, run with --print-all to see all instances");
    } else {
        System.out.println();
    }
    if (!printErrors && errors > 0) {
        System.err.println("WARN : There were " + errors + " errors, run with --print-errors to see more info");
    }
}
Also used : ZooReader(org.apache.accumulo.fate.zookeeper.ZooReader) InstanceId(org.apache.accumulo.core.data.InstanceId) ZooCache(org.apache.accumulo.fate.zookeeper.ZooCache)

Example 14 with InstanceId

use of org.apache.accumulo.core.data.InstanceId in project accumulo by apache.

the class UnorderedWorkAssigner method cleanupFinishedWork.

/**
 * Iterate over the queued work to remove entries that have been completed.
 */
@Override
protected void cleanupFinishedWork() {
    final Iterator<String> work = queuedWork.iterator();
    final InstanceId instanceId = client.instanceOperations().getInstanceId();
    while (work.hasNext()) {
        String filename = work.next();
        // Null equates to the work was finished
        if (zooCache.get(ZooUtil.getRoot(instanceId) + ReplicationConstants.ZOO_WORK_QUEUE + "/" + filename) == null) {
            work.remove();
        }
    }
}
Also used : InstanceId(org.apache.accumulo.core.data.InstanceId)

Example 15 with InstanceId

use of org.apache.accumulo.core.data.InstanceId in project accumulo by apache.

the class ChangeSecret method main.

public static void main(String[] args) throws Exception {
    var siteConfig = SiteConfiguration.auto();
    var hadoopConf = new Configuration();
    Opts opts = new Opts();
    ServerContext context = opts.getServerContext();
    try (var fs = context.getVolumeManager()) {
        ServerDirs serverDirs = new ServerDirs(siteConfig, hadoopConf);
        verifyHdfsWritePermission(serverDirs, fs);
        List<String> argsList = new ArrayList<>(args.length + 2);
        argsList.add("--old");
        argsList.add("--new");
        argsList.addAll(Arrays.asList(args));
        opts.parseArgs(ChangeSecret.class.getName(), args);
        Span span = TraceUtil.startSpan(ChangeSecret.class, "main");
        try (Scope scope = span.makeCurrent()) {
            verifyAccumuloIsDown(context, opts.oldPass);
            final InstanceId newInstanceId = InstanceId.of(UUID.randomUUID());
            updateHdfs(serverDirs, fs, newInstanceId);
            rewriteZooKeeperInstance(context, newInstanceId, opts.oldPass, opts.newPass);
            if (opts.oldPass != null) {
                deleteInstance(context, opts.oldPass);
            }
            System.out.println("New instance id is " + newInstanceId);
            System.out.println("Be sure to put your new secret in accumulo.properties");
        } finally {
            span.end();
        }
    }
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) SiteConfiguration(org.apache.accumulo.core.conf.SiteConfiguration) ServerContext(org.apache.accumulo.server.ServerContext) Scope(io.opentelemetry.context.Scope) ServerUtilOpts(org.apache.accumulo.server.cli.ServerUtilOpts) InstanceId(org.apache.accumulo.core.data.InstanceId) ArrayList(java.util.ArrayList) ServerDirs(org.apache.accumulo.server.ServerDirs) Span(io.opentelemetry.api.trace.Span)

Aggregations

InstanceId (org.apache.accumulo.core.data.InstanceId)23 Test (org.junit.Test)8 IOException (java.io.IOException)6 ZooReaderWriter (org.apache.accumulo.fate.zookeeper.ZooReaderWriter)6 AccumuloClient (org.apache.accumulo.core.client.AccumuloClient)5 TableId (org.apache.accumulo.core.data.TableId)5 ServerContext (org.apache.accumulo.server.ServerContext)5 Path (org.apache.hadoop.fs.Path)5 AccumuloSecurityException (org.apache.accumulo.core.client.AccumuloSecurityException)4 KeeperException (org.apache.zookeeper.KeeperException)4 SuppressFBWarnings (edu.umd.cs.findbugs.annotations.SuppressFBWarnings)3 Socket (java.net.Socket)3 SiteConfiguration (org.apache.accumulo.core.conf.SiteConfiguration)3 ZooReader (org.apache.accumulo.fate.zookeeper.ZooReader)3 Initialize (org.apache.accumulo.server.init.Initialize)3 Configuration (org.apache.hadoop.conf.Configuration)3 AccumuloException (org.apache.accumulo.core.client.AccumuloException)2 TableNotFoundException (org.apache.accumulo.core.client.TableNotFoundException)2 NamespaceId (org.apache.accumulo.core.data.NamespaceId)2 AdminUtil (org.apache.accumulo.fate.AdminUtil)2