use of org.apache.accumulo.server.master.state.RootTabletStateStore in project accumulo by apache.
the class Master method run.
public void run() throws IOException, InterruptedException, KeeperException {
final String zroot = ZooUtil.getRoot(getInstance());
// ACCUMULO-4424 Put up the Thrift servers before getting the lock as a sign of process health when a hot-standby
//
// Start the Master's Client service
clientHandler = new MasterClientServiceHandler(this);
// Ensure that calls before the master gets the lock fail
Iface haProxy = HighlyAvailableServiceWrapper.service(clientHandler, this);
Iface rpcProxy = RpcWrapper.service(haProxy);
final Processor<Iface> processor;
if (ThriftServerType.SASL == getThriftServerType()) {
Iface tcredsProxy = TCredentialsUpdatingWrapper.service(rpcProxy, clientHandler.getClass(), getConfiguration());
processor = new Processor<>(tcredsProxy);
} else {
processor = new Processor<>(rpcProxy);
}
ServerAddress sa = TServerUtils.startServer(this, hostname, Property.MASTER_CLIENTPORT, processor, "Master", "Master Client Service Handler", null, Property.MASTER_MINTHREADS, Property.MASTER_THREADCHECK, Property.GENERAL_MAX_MESSAGE_SIZE);
clientService = sa.server;
log.info("Started Master client service at {}", sa.address);
// Start the replication coordinator which assigns tservers to service replication requests
MasterReplicationCoordinator impl = new MasterReplicationCoordinator(this);
ReplicationCoordinator.Iface haReplicationProxy = HighlyAvailableServiceWrapper.service(impl, this);
ReplicationCoordinator.Processor<ReplicationCoordinator.Iface> replicationCoordinatorProcessor = new ReplicationCoordinator.Processor<>(RpcWrapper.service(haReplicationProxy));
ServerAddress replAddress = TServerUtils.startServer(this, hostname, Property.MASTER_REPLICATION_COORDINATOR_PORT, replicationCoordinatorProcessor, "Master Replication Coordinator", "Replication Coordinator", null, Property.MASTER_REPLICATION_COORDINATOR_MINTHREADS, Property.MASTER_REPLICATION_COORDINATOR_THREADCHECK, Property.GENERAL_MAX_MESSAGE_SIZE);
log.info("Started replication coordinator service at " + replAddress.address);
// block until we can obtain the ZK lock for the master
getMasterLock(zroot + Constants.ZMASTER_LOCK);
recoveryManager = new RecoveryManager(this);
TableManager.getInstance().addObserver(this);
StatusThread statusThread = new StatusThread();
statusThread.start();
MigrationCleanupThread migrationCleanupThread = new MigrationCleanupThread();
migrationCleanupThread.start();
tserverSet.startListeningForTabletServerChanges();
ZooReaderWriter zReaderWriter = ZooReaderWriter.getInstance();
zReaderWriter.getChildren(zroot + Constants.ZRECOVERY, new Watcher() {
@Override
public void process(WatchedEvent event) {
nextEvent.event("Noticed recovery changes", event.getType());
try {
// watcher only fires once, add it back
ZooReaderWriter.getInstance().getChildren(zroot + Constants.ZRECOVERY, this);
} catch (Exception e) {
log.error("Failed to add log recovery watcher back", e);
}
}
});
watchers.add(new TabletGroupWatcher(this, new MetaDataStateStore(this, this), null) {
@Override
boolean canSuspendTablets() {
// Always allow user data tablets to enter suspended state.
return true;
}
});
watchers.add(new TabletGroupWatcher(this, new RootTabletStateStore(this, this), watchers.get(0)) {
@Override
boolean canSuspendTablets() {
// be immediately reassigned, even if there's a global table.suspension.duration setting.
return getConfiguration().getBoolean(Property.MASTER_METADATA_SUSPENDABLE);
}
});
watchers.add(new TabletGroupWatcher(this, new ZooTabletStateStore(new ZooStore(zroot)), watchers.get(1)) {
@Override
boolean canSuspendTablets() {
// Never allow root tablet to enter suspended state.
return false;
}
});
for (TabletGroupWatcher watcher : watchers) {
watcher.start();
}
// Once we are sure the upgrade is complete, we can safely allow fate use.
waitForMetadataUpgrade.await();
try {
final AgeOffStore<Master> store = new AgeOffStore<>(new org.apache.accumulo.fate.ZooStore<Master>(ZooUtil.getRoot(getInstance()) + Constants.ZFATE, ZooReaderWriter.getInstance()), 1000 * 60 * 60 * 8);
int threads = getConfiguration().getCount(Property.MASTER_FATE_THREADPOOL_SIZE);
fate = new Fate<>(this, store);
fate.startTransactionRunners(threads);
SimpleTimer.getInstance(getConfiguration()).schedule(new Runnable() {
@Override
public void run() {
store.ageOff();
}
}, 63000, 63000);
} catch (KeeperException | InterruptedException e) {
throw new IOException(e);
}
ZooKeeperInitialization.ensureZooKeeperInitialized(zReaderWriter, zroot);
// the master client service.
if (null != authenticationTokenKeyManager && null != keyDistributor) {
log.info("Starting delegation-token key manager");
keyDistributor.initialize();
authenticationTokenKeyManager.start();
boolean logged = false;
while (!authenticationTokenKeyManager.isInitialized()) {
// Print out a status message when we start waiting for the key manager to get initialized
if (!logged) {
log.info("Waiting for AuthenticationTokenKeyManager to be initialized");
logged = true;
}
sleepUninterruptibly(200, TimeUnit.MILLISECONDS);
}
// And log when we are initialized
log.info("AuthenticationTokenSecretManager is initialized");
}
String address = sa.address.toString();
log.info("Setting master lock data to {}", address);
masterLock.replaceLockData(address.getBytes());
while (!clientService.isServing()) {
sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
}
// Start the daemon to scan the replication table and make units of work
replicationWorkDriver = new ReplicationDriver(this);
replicationWorkDriver.start();
// Start the daemon to assign work to tservers to replicate to our peers
try {
replicationWorkAssigner = new WorkDriver(this);
} catch (AccumuloException | AccumuloSecurityException e) {
log.error("Caught exception trying to initialize replication WorkDriver", e);
throw new RuntimeException(e);
}
replicationWorkAssigner.start();
// Advertise that port we used so peers don't have to be told what it is
ZooReaderWriter.getInstance().putPersistentData(ZooUtil.getRoot(getInstance()) + Constants.ZMASTER_REPLICATION_COORDINATOR_ADDR, replAddress.address.toString().getBytes(UTF_8), NodeExistsPolicy.OVERWRITE);
// Register replication metrics
MasterMetricsFactory factory = new MasterMetricsFactory(getConfiguration(), this);
Metrics replicationMetrics = factory.createReplicationMetrics();
try {
replicationMetrics.register();
} catch (Exception e) {
log.error("Failed to register replication metrics", e);
}
// The master is fully initialized. Clients are allowed to connect now.
masterInitialized.set(true);
while (clientService.isServing()) {
sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
}
log.info("Shutting down fate.");
fate.shutdown();
log.info("Shutting down timekeeping.");
timeKeeper.shutdown();
final long deadline = System.currentTimeMillis() + MAX_CLEANUP_WAIT_TIME;
statusThread.join(remaining(deadline));
replicationWorkAssigner.join(remaining(deadline));
replicationWorkDriver.join(remaining(deadline));
replAddress.server.stop();
// Signal that we want it to stop, and wait for it to do so.
if (authenticationTokenKeyManager != null) {
authenticationTokenKeyManager.gracefulStop();
authenticationTokenKeyManager.join(remaining(deadline));
}
// don't stop
for (TabletGroupWatcher watcher : watchers) {
watcher.join(remaining(deadline));
}
log.info("exiting");
}
use of org.apache.accumulo.server.master.state.RootTabletStateStore in project accumulo by apache.
the class MasterRepairsDualAssignmentIT method test.
@Test
public void test() throws Exception {
// make some tablets, spread 'em around
Connector c = getConnector();
ClientContext context = new ClientContext(c.getInstance(), new Credentials("root", new PasswordToken(ROOT_PASSWORD)), getClientConfig());
String table = this.getUniqueNames(1)[0];
c.securityOperations().grantTablePermission("root", MetadataTable.NAME, TablePermission.WRITE);
c.securityOperations().grantTablePermission("root", RootTable.NAME, TablePermission.WRITE);
c.tableOperations().create(table);
SortedSet<Text> partitions = new TreeSet<>();
for (String part : "a b c d e f g h i j k l m n o p q r s t u v w x y z".split(" ")) {
partitions.add(new Text(part));
}
c.tableOperations().addSplits(table, partitions);
// scan the metadata table and get the two table location states
Set<TServerInstance> states = new HashSet<>();
Set<TabletLocationState> oldLocations = new HashSet<>();
MetaDataStateStore store = new MetaDataStateStore(context, null);
while (states.size() < 2) {
UtilWaitThread.sleep(250);
oldLocations.clear();
for (TabletLocationState tls : store) {
if (tls.current != null) {
states.add(tls.current);
oldLocations.add(tls);
}
}
}
assertEquals(2, states.size());
// Kill a tablet server... we don't care which one... wait for everything to be reassigned
cluster.killProcess(ServerType.TABLET_SERVER, cluster.getProcesses().get(ServerType.TABLET_SERVER).iterator().next());
Set<TServerInstance> replStates = new HashSet<>();
// Find out which tablet server remains
while (true) {
UtilWaitThread.sleep(1000);
states.clear();
replStates.clear();
boolean allAssigned = true;
for (TabletLocationState tls : store) {
if (tls != null && tls.current != null) {
states.add(tls.current);
} else if (tls != null && tls.extent.equals(new KeyExtent(ReplicationTable.ID, null, null))) {
replStates.add(tls.current);
} else {
allAssigned = false;
}
}
System.out.println(states + " size " + states.size() + " allAssigned " + allAssigned);
if (states.size() != 2 && allAssigned)
break;
}
assertEquals(1, replStates.size());
assertEquals(1, states.size());
// pick an assigned tablet and assign it to the old tablet
TabletLocationState moved = null;
for (TabletLocationState old : oldLocations) {
if (!states.contains(old.current)) {
moved = old;
}
}
assertNotEquals(null, moved);
// throw a mutation in as if we were the dying tablet
BatchWriter bw = c.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
Mutation assignment = new Mutation(moved.extent.getMetadataEntry());
moved.current.putLocation(assignment);
bw.addMutation(assignment);
bw.close();
// wait for the master to fix the problem
waitForCleanStore(store);
// now jam up the metadata table
bw = c.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
assignment = new Mutation(new KeyExtent(MetadataTable.ID, null, null).getMetadataEntry());
moved.current.putLocation(assignment);
bw.addMutation(assignment);
bw.close();
waitForCleanStore(new RootTabletStateStore(context, null));
}
Aggregations