use of org.apache.accumulo.server.conf.ServerConfigurationFactory in project accumulo by apache.
the class Master method main.
public static void main(String[] args) throws Exception {
try {
final String app = "master";
ServerOpts opts = new ServerOpts();
opts.parseArgs(app, args);
SecurityUtil.serverLogin(SiteConfiguration.getInstance());
String hostname = opts.getAddress();
Instance instance = HdfsZooInstance.getInstance();
ServerConfigurationFactory conf = new ServerConfigurationFactory(instance);
VolumeManager fs = VolumeManagerImpl.get();
MetricsSystemHelper.configure(Master.class.getSimpleName());
Accumulo.init(fs, instance, conf, app);
Master master = new Master(instance, conf, fs, hostname);
DistributedTrace.enable(hostname, app, conf.getSystemConfiguration());
master.run();
} catch (Exception ex) {
log.error("Unexpected exception, exiting", ex);
System.exit(1);
} finally {
DistributedTrace.disable();
}
}
use of org.apache.accumulo.server.conf.ServerConfigurationFactory in project accumulo by apache.
the class SimpleGarbageCollector method main.
public static void main(String[] args) throws IOException {
final String app = "gc";
Opts opts = new Opts();
opts.parseArgs(app, args);
SecurityUtil.serverLogin(SiteConfiguration.getInstance());
Instance instance = HdfsZooInstance.getInstance();
ServerConfigurationFactory conf = new ServerConfigurationFactory(instance);
log.info("Version " + Constants.VERSION);
log.info("Instance " + instance.getInstanceID());
final VolumeManager fs = VolumeManagerImpl.get();
MetricsSystemHelper.configure(SimpleGarbageCollector.class.getSimpleName());
Accumulo.init(fs, instance, conf, app);
SimpleGarbageCollector gc = new SimpleGarbageCollector(opts, instance, fs, conf);
DistributedTrace.enable(opts.getAddress(), app, conf.getSystemConfiguration());
try {
gc.run();
} finally {
DistributedTrace.disable();
}
}
use of org.apache.accumulo.server.conf.ServerConfigurationFactory in project accumulo by apache.
the class ZombieTServer method main.
public static void main(String[] args) throws Exception {
Random random = new Random(System.currentTimeMillis() % 1000);
int port = random.nextInt(30000) + 2000;
Instance instance = HdfsZooInstance.getInstance();
AccumuloServerContext context = new AccumuloServerContext(instance, new ServerConfigurationFactory(instance));
TransactionWatcher watcher = new TransactionWatcher();
final ThriftClientHandler tch = new ThriftClientHandler(context, watcher);
Processor<Iface> processor = new Processor<>(tch);
ServerAddress serverPort = TServerUtils.startTServer(context.getConfiguration(), ThriftServerType.CUSTOM_HS_HA, processor, "ZombieTServer", "walking dead", 2, 1, 1000, 10 * 1024 * 1024, null, null, -1, HostAndPort.fromParts("0.0.0.0", port));
String addressString = serverPort.address.toString();
String zPath = ZooUtil.getRoot(context.getInstance()) + Constants.ZTSERVERS + "/" + addressString;
ZooReaderWriter zoo = ZooReaderWriter.getInstance();
zoo.putPersistentData(zPath, new byte[] {}, NodeExistsPolicy.SKIP);
ZooLock zlock = new ZooLock(zPath);
LockWatcher lw = new LockWatcher() {
@Override
public void lostLock(final LockLossReason reason) {
try {
tch.halt(Tracer.traceInfo(), null, null);
} catch (Exception ex) {
log.error("Exception", ex);
System.exit(1);
}
}
@Override
public void unableToMonitorLockNode(Throwable e) {
try {
tch.halt(Tracer.traceInfo(), null, null);
} catch (Exception ex) {
log.error("Exception", ex);
System.exit(1);
}
}
};
byte[] lockContent = new ServerServices(addressString, Service.TSERV_CLIENT).toString().getBytes(UTF_8);
if (zlock.tryLock(lw, lockContent)) {
log.debug("Obtained tablet server lock {}", zlock.getLockPath());
}
// modify metadata
synchronized (tch) {
while (!tch.halted) {
tch.wait();
}
}
System.exit(0);
}
use of org.apache.accumulo.server.conf.ServerConfigurationFactory in project accumulo by apache.
the class CloseWriteAheadLogReferencesIT method setupEasyMockStuff.
@Before
public void setupEasyMockStuff() {
Instance mockInst = createMock(Instance.class);
SiteConfiguration siteConfig = EasyMock.createMock(SiteConfiguration.class);
expect(mockInst.getInstanceID()).andReturn(testName.getMethodName()).anyTimes();
expect(mockInst.getZooKeepers()).andReturn("localhost").anyTimes();
expect(mockInst.getZooKeepersSessionTimeOut()).andReturn(30000).anyTimes();
final AccumuloConfiguration systemConf = new ConfigurationCopy(new HashMap<>());
ServerConfigurationFactory factory = createMock(ServerConfigurationFactory.class);
expect(factory.getSystemConfiguration()).andReturn(systemConf).anyTimes();
expect(factory.getSiteConfiguration()).andReturn(siteConfig).anyTimes();
// Just make the SiteConfiguration delegate to our AccumuloConfiguration
// Presently, we only need get(Property) and iterator().
EasyMock.expect(siteConfig.get(EasyMock.anyObject(Property.class))).andAnswer(new IAnswer<String>() {
@Override
public String answer() {
Object[] args = EasyMock.getCurrentArguments();
return systemConf.get((Property) args[0]);
}
}).anyTimes();
EasyMock.expect(siteConfig.getBoolean(EasyMock.anyObject(Property.class))).andAnswer(new IAnswer<Boolean>() {
@Override
public Boolean answer() {
Object[] args = EasyMock.getCurrentArguments();
return systemConf.getBoolean((Property) args[0]);
}
}).anyTimes();
EasyMock.expect(siteConfig.iterator()).andAnswer(new IAnswer<Iterator<Entry<String, String>>>() {
@Override
public Iterator<Entry<String, String>> answer() {
return systemConf.iterator();
}
}).anyTimes();
replay(mockInst, factory, siteConfig);
refs = new WrappedCloseWriteAheadLogReferences(new AccumuloServerContext(mockInst, factory));
}
use of org.apache.accumulo.server.conf.ServerConfigurationFactory in project accumulo by apache.
the class SplitRecoveryIT method run.
private void run() throws Exception {
Instance inst = HdfsZooInstance.getInstance();
AccumuloServerContext c = new AccumuloServerContext(inst, new ServerConfigurationFactory(inst));
String zPath = ZooUtil.getRoot(inst) + "/testLock";
IZooReaderWriter zoo = ZooReaderWriter.getInstance();
zoo.putPersistentData(zPath, new byte[0], NodeExistsPolicy.OVERWRITE);
ZooLock zl = new ZooLock(zPath);
boolean gotLock = zl.tryLock(new LockWatcher() {
@Override
public void lostLock(LockLossReason reason) {
System.exit(-1);
}
@Override
public void unableToMonitorLockNode(Throwable e) {
System.exit(-1);
}
}, "foo".getBytes(UTF_8));
if (!gotLock) {
System.err.println("Failed to get lock " + zPath);
}
// run test for a table with one tablet
runSplitRecoveryTest(c, 0, "sp", 0, zl, nke("foo0", null, null));
runSplitRecoveryTest(c, 1, "sp", 0, zl, nke("foo1", null, null));
// run test for tables with two tablets, run test on first and last tablet
runSplitRecoveryTest(c, 0, "k", 0, zl, nke("foo2", "m", null), nke("foo2", null, "m"));
runSplitRecoveryTest(c, 1, "k", 0, zl, nke("foo3", "m", null), nke("foo3", null, "m"));
runSplitRecoveryTest(c, 0, "o", 1, zl, nke("foo4", "m", null), nke("foo4", null, "m"));
runSplitRecoveryTest(c, 1, "o", 1, zl, nke("foo5", "m", null), nke("foo5", null, "m"));
// run test for table w/ three tablets, run test on middle tablet
runSplitRecoveryTest(c, 0, "o", 1, zl, nke("foo6", "m", null), nke("foo6", "r", "m"), nke("foo6", null, "r"));
runSplitRecoveryTest(c, 1, "o", 1, zl, nke("foo7", "m", null), nke("foo7", "r", "m"), nke("foo7", null, "r"));
// run test for table w/ three tablets, run test on first
runSplitRecoveryTest(c, 0, "g", 0, zl, nke("foo8", "m", null), nke("foo8", "r", "m"), nke("foo8", null, "r"));
runSplitRecoveryTest(c, 1, "g", 0, zl, nke("foo9", "m", null), nke("foo9", "r", "m"), nke("foo9", null, "r"));
// run test for table w/ three tablets, run test on last tablet
runSplitRecoveryTest(c, 0, "w", 2, zl, nke("fooa", "m", null), nke("fooa", "r", "m"), nke("fooa", null, "r"));
runSplitRecoveryTest(c, 1, "w", 2, zl, nke("foob", "m", null), nke("foob", "r", "m"), nke("foob", null, "r"));
}
Aggregations