use of org.apache.accumulo.server.AccumuloServerContext in project accumulo by apache.
the class TotalQueuedIT method getSyncs.
private long getSyncs() throws Exception {
Connector c = getConnector();
ServerConfigurationFactory confFactory = new ServerConfigurationFactory(c.getInstance());
AccumuloServerContext context = new AccumuloServerContext(c.getInstance(), confFactory);
for (String address : c.instanceOperations().getTabletServers()) {
TabletClientService.Client client = ThriftUtil.getTServerClient(HostAndPort.fromString(address), context);
TabletServerStatus status = client.getTabletServerStatus(null, context.rpcCreds());
return status.syncs;
}
return 0;
}
use of org.apache.accumulo.server.AccumuloServerContext in project accumulo by apache.
the class Initialize method initialize.
private boolean initialize(Opts opts, String instanceNamePath, VolumeManager fs, String rootUser) {
UUID uuid = UUID.randomUUID();
// the actual disk locations of the root table and tablets
String[] configuredVolumes = VolumeConfiguration.getVolumeUris(SiteConfiguration.getInstance());
VolumeChooserEnvironment chooserEnv = new VolumeChooserEnvironment(ChooserScope.INIT);
final String rootTabletDir = new Path(fs.choose(chooserEnv, configuredVolumes) + Path.SEPARATOR + ServerConstants.TABLE_DIR + Path.SEPARATOR + RootTable.ID + RootTable.ROOT_TABLET_LOCATION).toString();
try {
initZooKeeper(opts, uuid.toString(), instanceNamePath, rootTabletDir);
} catch (Exception e) {
log.error("FATAL: Failed to initialize zookeeper", e);
return false;
}
try {
initFileSystem(opts, fs, uuid, rootTabletDir);
} catch (Exception e) {
log.error("FATAL Failed to initialize filesystem", e);
if (SiteConfiguration.getInstance().get(Property.INSTANCE_VOLUMES).trim().equals("")) {
Configuration fsConf = CachedConfiguration.getInstance();
final String defaultFsUri = "file:///";
String fsDefaultName = fsConf.get("fs.default.name", defaultFsUri), fsDefaultFS = fsConf.get("fs.defaultFS", defaultFsUri);
// Try to determine when we couldn't find an appropriate core-site.xml on the classpath
if (defaultFsUri.equals(fsDefaultName) && defaultFsUri.equals(fsDefaultFS)) {
log.error("FATAL: Default filesystem value ('fs.defaultFS' or 'fs.default.name') of '{}' was found in the Hadoop configuration", defaultFsUri);
log.error("FATAL: Please ensure that the Hadoop core-site.xml is on the classpath using 'general.classpaths' in accumulo-site.xml");
}
}
return false;
}
final Instance instance = HdfsZooInstance.getInstance();
final ServerConfigurationFactory confFactory = new ServerConfigurationFactory(instance);
// If they did not, fall back to the credentials present in accumulo-site.xml that the servers will use themselves.
try {
final SiteConfiguration siteConf = confFactory.getSiteConfiguration();
if (siteConf.getBoolean(Property.INSTANCE_RPC_SASL_ENABLED)) {
final UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
// We don't have any valid creds to talk to HDFS
if (!ugi.hasKerberosCredentials()) {
final String accumuloKeytab = siteConf.get(Property.GENERAL_KERBEROS_KEYTAB), accumuloPrincipal = siteConf.get(Property.GENERAL_KERBEROS_PRINCIPAL);
// Fail if the site configuration doesn't contain appropriate credentials to login as servers
if (StringUtils.isBlank(accumuloKeytab) || StringUtils.isBlank(accumuloPrincipal)) {
log.error("FATAL: No Kerberos credentials provided, and Accumulo is not properly configured for server login");
return false;
}
log.info("Logging in as {} with {}", accumuloPrincipal, accumuloKeytab);
// Login using the keytab as the 'accumulo' user
UserGroupInformation.loginUserFromKeytab(accumuloPrincipal, accumuloKeytab);
}
}
} catch (IOException e) {
log.error("FATAL: Failed to get the Kerberos user", e);
return false;
}
try {
AccumuloServerContext context = new AccumuloServerContext(instance, confFactory);
initSecurity(context, opts, uuid.toString(), rootUser);
} catch (Exception e) {
log.error("FATAL: Failed to initialize security", e);
return false;
}
if (opts.uploadAccumuloSite) {
try {
log.info("Uploading properties in accumulo-site.xml to Zookeeper. Properties that cannot be set in Zookeeper will be skipped:");
Map<String, String> entries = new TreeMap<>();
SiteConfiguration.getInstance().getProperties(entries, x -> true, false);
for (Map.Entry<String, String> entry : entries.entrySet()) {
String key = entry.getKey();
String value = entry.getValue();
if (Property.isValidZooPropertyKey(key)) {
SystemPropUtil.setSystemProperty(key, value);
log.info("Uploaded - {} = {}", key, Property.isSensitive(key) ? "<hidden>" : value);
} else {
log.info("Skipped - {} = {}", key, Property.isSensitive(key) ? "<hidden>" : value);
}
}
} catch (Exception e) {
log.error("FATAL: Failed to upload accumulo-site.xml to Zookeeper", e);
return false;
}
}
return true;
}
use of org.apache.accumulo.server.AccumuloServerContext in project accumulo by apache.
the class RandomizeVolumes method main.
public static void main(String[] args) throws AccumuloException, AccumuloSecurityException {
ClientOnRequiredTable opts = new ClientOnRequiredTable();
opts.parseArgs(RandomizeVolumes.class.getName(), args);
Connector c;
if (opts.getToken() == null) {
Instance instance = opts.getInstance();
AccumuloServerContext context = new AccumuloServerContext(instance, new ServerConfigurationFactory(instance));
c = context.getConnector();
} else {
c = opts.getConnector();
}
try {
int status = randomize(c, opts.getTableName());
System.exit(status);
} catch (Exception ex) {
log.error("{}", ex.getMessage(), ex);
System.exit(4);
}
}
use of org.apache.accumulo.server.AccumuloServerContext in project accumulo by apache.
the class Admin method execute.
@Override
public void execute(final String[] args) {
boolean everything;
AdminOpts opts = new AdminOpts();
JCommander cl = new JCommander(opts);
cl.setProgramName("accumulo admin");
CheckTabletsCommand checkTabletsCommand = new CheckTabletsCommand();
cl.addCommand("checkTablets", checkTabletsCommand);
ListInstancesCommand listIntancesOpts = new ListInstancesCommand();
cl.addCommand("listInstances", listIntancesOpts);
PingCommand pingCommand = new PingCommand();
cl.addCommand("ping", pingCommand);
DumpConfigCommand dumpConfigCommand = new DumpConfigCommand();
cl.addCommand("dumpConfig", dumpConfigCommand);
VolumesCommand volumesCommand = new VolumesCommand();
cl.addCommand("volumes", volumesCommand);
StopCommand stopOpts = new StopCommand();
cl.addCommand("stop", stopOpts);
StopAllCommand stopAllOpts = new StopAllCommand();
cl.addCommand("stopAll", stopAllOpts);
StopMasterCommand stopMasterOpts = new StopMasterCommand();
cl.addCommand("stopMaster", stopMasterOpts);
RandomizeVolumesCommand randomizeVolumesOpts = new RandomizeVolumesCommand();
cl.addCommand("randomizeVolumes", randomizeVolumesOpts);
cl.parse(args);
if (opts.help || cl.getParsedCommand() == null) {
cl.usage();
return;
}
AccumuloConfiguration siteConf = SiteConfiguration.getInstance();
// Login as the server on secure HDFS
if (siteConf.getBoolean(Property.INSTANCE_RPC_SASL_ENABLED)) {
SecurityUtil.serverLogin(siteConf);
}
Instance instance = opts.getInstance();
ServerConfigurationFactory confFactory = new ServerConfigurationFactory(instance);
try {
ClientContext context = new AccumuloServerContext(instance, confFactory);
int rc = 0;
if (cl.getParsedCommand().equals("listInstances")) {
ListInstances.listInstances(instance.getZooKeepers(), listIntancesOpts.printAll, listIntancesOpts.printErrors);
} else if (cl.getParsedCommand().equals("ping")) {
if (ping(context, pingCommand.args) != 0)
rc = 4;
} else if (cl.getParsedCommand().equals("checkTablets")) {
System.out.println("\n*** Looking for offline tablets ***\n");
if (FindOfflineTablets.findOffline(context, checkTabletsCommand.tableName) != 0)
rc = 5;
System.out.println("\n*** Looking for missing files ***\n");
if (checkTabletsCommand.tableName == null) {
if (RemoveEntriesForMissingFiles.checkAllTables(context, checkTabletsCommand.fixFiles) != 0)
rc = 6;
} else {
if (RemoveEntriesForMissingFiles.checkTable(context, checkTabletsCommand.tableName, checkTabletsCommand.fixFiles) != 0)
rc = 6;
}
} else if (cl.getParsedCommand().equals("stop")) {
stopTabletServer(context, stopOpts.args, opts.force);
} else if (cl.getParsedCommand().equals("dumpConfig")) {
printConfig(context, dumpConfigCommand);
} else if (cl.getParsedCommand().equals("volumes")) {
ListVolumesUsed.listVolumes(context);
} else if (cl.getParsedCommand().equals("randomizeVolumes")) {
rc = RandomizeVolumes.randomize(context.getConnector(), randomizeVolumesOpts.tableName);
} else {
everything = cl.getParsedCommand().equals("stopAll");
if (everything)
flushAll(context);
stopServer(context, everything);
}
if (rc != 0)
System.exit(rc);
} catch (AccumuloException e) {
log.error("{}", e.getMessage(), e);
System.exit(1);
} catch (AccumuloSecurityException e) {
log.error("{}", e.getMessage(), e);
System.exit(2);
} catch (Exception e) {
log.error("{}", e.getMessage(), e);
System.exit(3);
}
}
use of org.apache.accumulo.server.AccumuloServerContext in project accumulo by apache.
the class WebViewsIT method testGetTablesConstraintPassing.
/**
* Test path tables/{tableID} which passes constraints. On passing constraints underlying logic will be executed so we need to mock a certain amount of it.
* Note: If you get test failures here due to 500 server response, it's likely an underlying class or method call was added/modified and needs mocking. Note:
* To get the proper response code back, you need to make sure jersey has a registered MessageBodyWriter capable of serializing/writing the object returned
* from your endpoint. We're using a simple stubbed out inner class HashMapWriter for this.
*
* @throws Exception
* not expected
*/
@Test
public void testGetTablesConstraintPassing() throws Exception {
Instance instanceMock = EasyMock.createMock(Instance.class);
expect(instanceMock.getInstanceID()).andReturn("foo").anyTimes();
AccumuloServerContext contextMock = EasyMock.createMock(AccumuloServerContext.class);
expect(contextMock.getInstance()).andReturn(instanceMock).anyTimes();
expect(contextMock.getConfiguration()).andReturn(DefaultConfiguration.getInstance()).anyTimes();
PowerMock.mockStatic(Monitor.class);
expect(Monitor.getContext()).andReturn(contextMock).anyTimes();
PowerMock.mockStatic(Tables.class);
expect(Tables.getTableName(instanceMock, Table.ID.of("foo"))).andReturn("bar");
PowerMock.replayAll();
org.easymock.EasyMock.replay(instanceMock, contextMock);
// Using the mocks we can verify that the getModel method gets called via debugger
// however it's difficult to continue to mock through the jersey MVC code for the properly built response.
// Our silly HashMapWriter registered in the configure method gets wired in and used here.
Response output = target("tables/foo").request().get();
Assert.assertEquals("should return status 200", 200, output.getStatus());
String responseBody = output.readEntity(String.class);
Assert.assertTrue(responseBody.contains("tableID=foo") && responseBody.contains("table=bar"));
}
Aggregations