use of org.apache.accumulo.server.conf.ServerConfigurationFactory in project accumulo by apache.
the class CollectTabletStats method main.
public static void main(String[] args) throws Exception {
final CollectOptions opts = new CollectOptions();
final ScannerOpts scanOpts = new ScannerOpts();
opts.parseArgs(CollectTabletStats.class.getName(), args, scanOpts);
String[] columnsTmp = new String[] {};
if (opts.columns != null)
columnsTmp = opts.columns.split(",");
final String[] columns = columnsTmp;
final VolumeManager fs = VolumeManagerImpl.get();
Instance instance = opts.getInstance();
final ServerConfigurationFactory sconf = new ServerConfigurationFactory(instance);
Credentials creds = new Credentials(opts.getPrincipal(), opts.getToken());
ClientContext context = new ClientContext(instance, creds, sconf.getSystemConfiguration());
Table.ID tableId = Tables.getTableId(instance, opts.getTableName());
if (tableId == null) {
log.error("Unable to find table named {}", opts.getTableName());
System.exit(-1);
}
TreeMap<KeyExtent, String> tabletLocations = new TreeMap<>();
List<KeyExtent> candidates = findTablets(context, !opts.selectFarTablets, opts.getTableName(), tabletLocations);
if (candidates.size() < opts.numThreads) {
System.err.println("ERROR : Unable to find " + opts.numThreads + " " + (opts.selectFarTablets ? "far" : "local") + " tablets");
System.exit(-1);
}
List<KeyExtent> tabletsToTest = selectRandomTablets(opts.numThreads, candidates);
Map<KeyExtent, List<FileRef>> tabletFiles = new HashMap<>();
for (KeyExtent ke : tabletsToTest) {
List<FileRef> files = getTabletFiles(context, ke);
tabletFiles.put(ke, files);
}
System.out.println();
System.out.println("run location : " + InetAddress.getLocalHost().getHostName() + "/" + InetAddress.getLocalHost().getHostAddress());
System.out.println("num threads : " + opts.numThreads);
System.out.println("table : " + opts.getTableName());
System.out.println("table id : " + tableId);
for (KeyExtent ke : tabletsToTest) {
System.out.println("\t *** Information about tablet " + ke.getUUID() + " *** ");
System.out.println("\t\t# files in tablet : " + tabletFiles.get(ke).size());
System.out.println("\t\ttablet location : " + tabletLocations.get(ke));
reportHdfsBlockLocations(tabletFiles.get(ke));
}
System.out.println("%n*** RUNNING TEST ***%n");
ExecutorService threadPool = Executors.newFixedThreadPool(opts.numThreads);
for (int i = 0; i < opts.iterations; i++) {
ArrayList<Test> tests = new ArrayList<>();
for (final KeyExtent ke : tabletsToTest) {
final List<FileRef> files = tabletFiles.get(ke);
Test test = new Test(ke) {
@Override
public int runTest() throws Exception {
return readFiles(fs, sconf.getSystemConfiguration(), files, ke, columns);
}
};
tests.add(test);
}
runTest("read files", tests, opts.numThreads, threadPool);
}
for (int i = 0; i < opts.iterations; i++) {
ArrayList<Test> tests = new ArrayList<>();
for (final KeyExtent ke : tabletsToTest) {
final List<FileRef> files = tabletFiles.get(ke);
Test test = new Test(ke) {
@Override
public int runTest() throws Exception {
return readFilesUsingIterStack(fs, sconf, files, opts.auths, ke, columns, false);
}
};
tests.add(test);
}
runTest("read tablet files w/ system iter stack", tests, opts.numThreads, threadPool);
}
for (int i = 0; i < opts.iterations; i++) {
ArrayList<Test> tests = new ArrayList<>();
for (final KeyExtent ke : tabletsToTest) {
final List<FileRef> files = tabletFiles.get(ke);
Test test = new Test(ke) {
@Override
public int runTest() throws Exception {
return readFilesUsingIterStack(fs, sconf, files, opts.auths, ke, columns, true);
}
};
tests.add(test);
}
runTest("read tablet files w/ table iter stack", tests, opts.numThreads, threadPool);
}
for (int i = 0; i < opts.iterations; i++) {
ArrayList<Test> tests = new ArrayList<>();
final Connector conn = opts.getConnector();
for (final KeyExtent ke : tabletsToTest) {
Test test = new Test(ke) {
@Override
public int runTest() throws Exception {
return scanTablet(conn, opts.getTableName(), opts.auths, scanOpts.scanBatchSize, ke.getPrevEndRow(), ke.getEndRow(), columns);
}
};
tests.add(test);
}
runTest("read tablet data through accumulo", tests, opts.numThreads, threadPool);
}
for (final KeyExtent ke : tabletsToTest) {
final Connector conn = opts.getConnector();
threadPool.submit(new Runnable() {
@Override
public void run() {
try {
calcTabletStats(conn, opts.getTableName(), opts.auths, scanOpts.scanBatchSize, ke, columns);
} catch (Exception e) {
log.error("Failed to calculate tablet stats.", e);
}
}
});
}
threadPool.shutdown();
}
use of org.apache.accumulo.server.conf.ServerConfigurationFactory in project accumulo by apache.
the class Initialize method initialize.
private boolean initialize(Opts opts, String instanceNamePath, VolumeManager fs, String rootUser) {
UUID uuid = UUID.randomUUID();
// the actual disk locations of the root table and tablets
String[] configuredVolumes = VolumeConfiguration.getVolumeUris(SiteConfiguration.getInstance());
VolumeChooserEnvironment chooserEnv = new VolumeChooserEnvironment(ChooserScope.INIT);
final String rootTabletDir = new Path(fs.choose(chooserEnv, configuredVolumes) + Path.SEPARATOR + ServerConstants.TABLE_DIR + Path.SEPARATOR + RootTable.ID + RootTable.ROOT_TABLET_LOCATION).toString();
try {
initZooKeeper(opts, uuid.toString(), instanceNamePath, rootTabletDir);
} catch (Exception e) {
log.error("FATAL: Failed to initialize zookeeper", e);
return false;
}
try {
initFileSystem(opts, fs, uuid, rootTabletDir);
} catch (Exception e) {
log.error("FATAL Failed to initialize filesystem", e);
if (SiteConfiguration.getInstance().get(Property.INSTANCE_VOLUMES).trim().equals("")) {
Configuration fsConf = CachedConfiguration.getInstance();
final String defaultFsUri = "file:///";
String fsDefaultName = fsConf.get("fs.default.name", defaultFsUri), fsDefaultFS = fsConf.get("fs.defaultFS", defaultFsUri);
// Try to determine when we couldn't find an appropriate core-site.xml on the classpath
if (defaultFsUri.equals(fsDefaultName) && defaultFsUri.equals(fsDefaultFS)) {
log.error("FATAL: Default filesystem value ('fs.defaultFS' or 'fs.default.name') of '{}' was found in the Hadoop configuration", defaultFsUri);
log.error("FATAL: Please ensure that the Hadoop core-site.xml is on the classpath using 'general.classpaths' in accumulo-site.xml");
}
}
return false;
}
final Instance instance = HdfsZooInstance.getInstance();
final ServerConfigurationFactory confFactory = new ServerConfigurationFactory(instance);
// If they did not, fall back to the credentials present in accumulo-site.xml that the servers will use themselves.
try {
final SiteConfiguration siteConf = confFactory.getSiteConfiguration();
if (siteConf.getBoolean(Property.INSTANCE_RPC_SASL_ENABLED)) {
final UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
// We don't have any valid creds to talk to HDFS
if (!ugi.hasKerberosCredentials()) {
final String accumuloKeytab = siteConf.get(Property.GENERAL_KERBEROS_KEYTAB), accumuloPrincipal = siteConf.get(Property.GENERAL_KERBEROS_PRINCIPAL);
// Fail if the site configuration doesn't contain appropriate credentials to login as servers
if (StringUtils.isBlank(accumuloKeytab) || StringUtils.isBlank(accumuloPrincipal)) {
log.error("FATAL: No Kerberos credentials provided, and Accumulo is not properly configured for server login");
return false;
}
log.info("Logging in as {} with {}", accumuloPrincipal, accumuloKeytab);
// Login using the keytab as the 'accumulo' user
UserGroupInformation.loginUserFromKeytab(accumuloPrincipal, accumuloKeytab);
}
}
} catch (IOException e) {
log.error("FATAL: Failed to get the Kerberos user", e);
return false;
}
try {
AccumuloServerContext context = new AccumuloServerContext(instance, confFactory);
initSecurity(context, opts, uuid.toString(), rootUser);
} catch (Exception e) {
log.error("FATAL: Failed to initialize security", e);
return false;
}
if (opts.uploadAccumuloSite) {
try {
log.info("Uploading properties in accumulo-site.xml to Zookeeper. Properties that cannot be set in Zookeeper will be skipped:");
Map<String, String> entries = new TreeMap<>();
SiteConfiguration.getInstance().getProperties(entries, x -> true, false);
for (Map.Entry<String, String> entry : entries.entrySet()) {
String key = entry.getKey();
String value = entry.getValue();
if (Property.isValidZooPropertyKey(key)) {
SystemPropUtil.setSystemProperty(key, value);
log.info("Uploaded - {} = {}", key, Property.isSensitive(key) ? "<hidden>" : value);
} else {
log.info("Skipped - {} = {}", key, Property.isSensitive(key) ? "<hidden>" : value);
}
}
} catch (Exception e) {
log.error("FATAL: Failed to upload accumulo-site.xml to Zookeeper", e);
return false;
}
}
return true;
}
use of org.apache.accumulo.server.conf.ServerConfigurationFactory in project accumulo by apache.
the class PreferredVolumeChooser method loadConfFactory.
// visible (not private) for testing
ServerConfigurationFactory loadConfFactory() {
// Get the current table's properties, and find the preferred volumes property
// This local variable is an intentional component of the single-check idiom.
ServerConfigurationFactory localConf = lazyConfFactory;
if (localConf == null) {
// If we're under contention when first getting here we'll throw away some initializations.
localConf = new ServerConfigurationFactory(HdfsZooInstance.getInstance());
lazyConfFactory = localConf;
}
return localConf;
}
use of org.apache.accumulo.server.conf.ServerConfigurationFactory in project accumulo by apache.
the class LoginProperties method execute.
@Override
public void execute(String[] args) throws Exception {
AccumuloConfiguration config = new ServerConfigurationFactory(HdfsZooInstance.getInstance()).getSystemConfiguration();
Authenticator authenticator = AccumuloVFSClassLoader.getClassLoader().loadClass(config.get(Property.INSTANCE_SECURITY_AUTHENTICATOR)).asSubclass(Authenticator.class).newInstance();
List<Set<TokenProperty>> tokenProps = new ArrayList<>();
for (Class<? extends AuthenticationToken> tokenType : authenticator.getSupportedTokenTypes()) {
tokenProps.add(tokenType.newInstance().getProperties());
}
System.out.println("Supported token types for " + authenticator.getClass().getName() + " are : ");
for (Class<? extends AuthenticationToken> tokenType : authenticator.getSupportedTokenTypes()) {
System.out.println("\t" + tokenType.getName() + ", which accepts the following properties : ");
for (TokenProperty tokenProperty : tokenType.newInstance().getProperties()) {
System.out.println("\t\t" + tokenProperty);
}
System.out.println();
}
}
use of org.apache.accumulo.server.conf.ServerConfigurationFactory in project accumulo by apache.
the class RandomizeVolumes method main.
public static void main(String[] args) throws AccumuloException, AccumuloSecurityException {
ClientOnRequiredTable opts = new ClientOnRequiredTable();
opts.parseArgs(RandomizeVolumes.class.getName(), args);
Connector c;
if (opts.getToken() == null) {
Instance instance = opts.getInstance();
AccumuloServerContext context = new AccumuloServerContext(instance, new ServerConfigurationFactory(instance));
c = context.getConnector();
} else {
c = opts.getConnector();
}
try {
int status = randomize(c, opts.getTableName());
System.exit(status);
} catch (Exception ex) {
log.error("{}", ex.getMessage(), ex);
System.exit(4);
}
}
Aggregations