use of org.apache.accumulo.server.conf.TableConfiguration in project accumulo by apache.
the class CollectTabletStats method readFilesUsingIterStack.
private static int readFilesUsingIterStack(VolumeManager fs, ServerConfigurationFactory aconf, List<FileRef> files, Authorizations auths, KeyExtent ke, String[] columns, boolean useTableIterators) throws Exception {
SortedKeyValueIterator<Key, Value> reader;
List<SortedKeyValueIterator<Key, Value>> readers = new ArrayList<>(files.size());
for (FileRef file : files) {
FileSystem ns = fs.getVolumeByPath(file.path()).getFileSystem();
readers.add(FileOperations.getInstance().newReaderBuilder().forFile(file.path().toString(), ns, ns.getConf()).withTableConfiguration(aconf.getSystemConfiguration()).build());
}
List<IterInfo> emptyIterinfo = Collections.emptyList();
Map<String, Map<String, String>> emptySsio = Collections.emptyMap();
TableConfiguration tconf = aconf.getTableConfiguration(ke.getTableId());
reader = createScanIterator(ke, readers, auths, new byte[] {}, new HashSet<>(), emptyIterinfo, emptySsio, useTableIterators, tconf);
HashSet<ByteSequence> columnSet = createColumnBSS(columns);
reader.seek(new Range(ke.getPrevEndRow(), false, ke.getEndRow(), true), columnSet, columnSet.size() == 0 ? false : true);
int count = 0;
while (reader.hasTop()) {
count++;
reader.next();
}
return count;
}
use of org.apache.accumulo.server.conf.TableConfiguration in project accumulo by apache.
the class ReplicationUtil method getReplicationTargets.
public Set<ReplicationTarget> getReplicationTargets() {
// The total set of configured targets
final Set<ReplicationTarget> allConfiguredTargets = new HashSet<>();
final Map<String, Table.ID> tableNameToId = Tables.getNameToIdMap(context.getInstance());
for (String table : tableNameToId.keySet()) {
if (MetadataTable.NAME.equals(table) || RootTable.NAME.equals(table)) {
continue;
}
Table.ID localId = tableNameToId.get(table);
if (null == localId) {
log.trace("Could not determine ID for {}", table);
continue;
}
TableConfiguration tableConf = context.getServerConfigurationFactory().getTableConfiguration(localId);
if (null == tableConf) {
log.trace("Could not get configuration for table {} (it no longer exists)", table);
continue;
}
for (Entry<String, String> prop : tableConf.getAllPropertiesWithPrefix(Property.TABLE_REPLICATION_TARGET).entrySet()) {
String peerName = prop.getKey().substring(Property.TABLE_REPLICATION_TARGET.getKey().length());
String remoteIdentifier = prop.getValue();
ReplicationTarget target = new ReplicationTarget(peerName, remoteIdentifier, localId);
allConfiguredTargets.add(target);
}
}
return allConfiguredTargets;
}
use of org.apache.accumulo.server.conf.TableConfiguration in project accumulo by apache.
the class TabletGroupWatcher method run.
@Override
public void run() {
Thread.currentThread().setName("Watching " + store.name());
int[] oldCounts = new int[TabletState.values().length];
EventCoordinator.Listener eventListener = this.master.nextEvent.getListener();
WalStateManager wals = new WalStateManager(master.getInstance(), ZooReaderWriter.getInstance());
while (this.master.stillMaster()) {
// slow things down a little, otherwise we spam the logs when there are many wake-up events
sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
masterState = master.getMasterState();
int totalUnloaded = 0;
int unloaded = 0;
ClosableIterator<TabletLocationState> iter = null;
try {
Map<Table.ID, MergeStats> mergeStatsCache = new HashMap<>();
Map<Table.ID, MergeStats> currentMerges = new HashMap<>();
for (MergeInfo merge : master.merges()) {
if (merge.getExtent() != null) {
currentMerges.put(merge.getExtent().getTableId(), new MergeStats(merge));
}
}
// Get the current status for the current list of tservers
SortedMap<TServerInstance, TabletServerStatus> currentTServers = new TreeMap<>();
for (TServerInstance entry : this.master.tserverSet.getCurrentServers()) {
currentTServers.put(entry, this.master.tserverStatus.get(entry));
}
if (currentTServers.size() == 0) {
eventListener.waitForEvents(Master.TIME_TO_WAIT_BETWEEN_SCANS);
synchronized (this) {
lastScanServers = ImmutableSortedSet.of();
}
continue;
}
// Don't move tablets to servers that are shutting down
SortedMap<TServerInstance, TabletServerStatus> destinations = new TreeMap<>(currentTServers);
destinations.keySet().removeAll(this.master.serversToShutdown);
List<Assignment> assignments = new ArrayList<>();
List<Assignment> assigned = new ArrayList<>();
List<TabletLocationState> assignedToDeadServers = new ArrayList<>();
List<TabletLocationState> suspendedToGoneServers = new ArrayList<>();
Map<KeyExtent, TServerInstance> unassigned = new HashMap<>();
Map<TServerInstance, List<Path>> logsForDeadServers = new TreeMap<>();
MasterState masterState = master.getMasterState();
int[] counts = new int[TabletState.values().length];
stats.begin();
// Walk through the tablets in our store, and work tablets
// towards their goal
iter = store.iterator();
while (iter.hasNext()) {
TabletLocationState tls = iter.next();
if (tls == null) {
continue;
}
Master.log.debug("{} location State: {}", store.name(), tls);
// ignore entries for tables that do not exist in zookeeper
if (TableManager.getInstance().getTableState(tls.extent.getTableId()) == null)
continue;
if (Master.log.isTraceEnabled())
Master.log.trace("{} walogs {}", tls, tls.walogs.size());
// Don't overwhelm the tablet servers with work
if (unassigned.size() + unloaded > Master.MAX_TSERVER_WORK_CHUNK * currentTServers.size()) {
flushChanges(destinations, assignments, assigned, assignedToDeadServers, logsForDeadServers, suspendedToGoneServers, unassigned);
assignments.clear();
assigned.clear();
assignedToDeadServers.clear();
suspendedToGoneServers.clear();
unassigned.clear();
unloaded = 0;
eventListener.waitForEvents(Master.TIME_TO_WAIT_BETWEEN_SCANS);
}
Table.ID tableId = tls.extent.getTableId();
TableConfiguration tableConf = this.master.getConfigurationFactory().getTableConfiguration(tableId);
MergeStats mergeStats = mergeStatsCache.get(tableId);
if (mergeStats == null) {
mergeStats = currentMerges.get(tableId);
if (mergeStats == null) {
mergeStats = new MergeStats(new MergeInfo());
}
mergeStatsCache.put(tableId, mergeStats);
}
TabletGoalState goal = this.master.getGoalState(tls, mergeStats.getMergeInfo());
TServerInstance server = tls.getServer();
TabletState state = tls.getState(currentTServers.keySet());
if (Master.log.isTraceEnabled()) {
Master.log.trace("Goal state {} current {} for {}", goal, state, tls.extent);
}
stats.update(tableId, state);
mergeStats.update(tls.extent, state, tls.chopped, !tls.walogs.isEmpty());
sendChopRequest(mergeStats.getMergeInfo(), state, tls);
sendSplitRequest(mergeStats.getMergeInfo(), state, tls);
// Always follow through with assignments
if (state == TabletState.ASSIGNED) {
goal = TabletGoalState.HOSTED;
}
// if we are shutting down all the tabletservers, we have to do it in order
if (goal == TabletGoalState.SUSPENDED && state == TabletState.HOSTED) {
if (this.master.serversToShutdown.equals(currentTServers.keySet())) {
if (dependentWatcher != null && dependentWatcher.assignedOrHosted() > 0) {
goal = TabletGoalState.HOSTED;
}
}
}
if (goal == TabletGoalState.HOSTED) {
if (state != TabletState.HOSTED && !tls.walogs.isEmpty()) {
if (this.master.recoveryManager.recoverLogs(tls.extent, tls.walogs))
continue;
}
switch(state) {
case HOSTED:
if (server.equals(this.master.migrations.get(tls.extent)))
this.master.migrations.remove(tls.extent);
break;
case ASSIGNED_TO_DEAD_SERVER:
assignedToDeadServers.add(tls);
if (server.equals(this.master.migrations.get(tls.extent)))
this.master.migrations.remove(tls.extent);
TServerInstance tserver = tls.futureOrCurrent();
if (!logsForDeadServers.containsKey(tserver)) {
logsForDeadServers.put(tserver, wals.getWalsInUse(tserver));
}
break;
case SUSPENDED:
if (master.getSteadyTime() - tls.suspend.suspensionTime < tableConf.getTimeInMillis(Property.TABLE_SUSPEND_DURATION)) {
// Tablet is suspended. See if its tablet server is back.
TServerInstance returnInstance = null;
Iterator<TServerInstance> find = destinations.tailMap(new TServerInstance(tls.suspend.server, " ")).keySet().iterator();
if (find.hasNext()) {
TServerInstance found = find.next();
if (found.getLocation().equals(tls.suspend.server)) {
returnInstance = found;
}
}
// Old tablet server is back. Return this tablet to its previous owner.
if (returnInstance != null) {
assignments.add(new Assignment(tls.extent, returnInstance));
} else {
// leave suspended, don't ask for a new assignment.
}
} else {
// Treat as unassigned, ask for a new assignment.
unassigned.put(tls.extent, server);
}
break;
case UNASSIGNED:
// maybe it's a finishing migration
TServerInstance dest = this.master.migrations.get(tls.extent);
if (dest != null) {
// if destination is still good, assign it
if (destinations.keySet().contains(dest)) {
assignments.add(new Assignment(tls.extent, dest));
} else {
// get rid of this migration
this.master.migrations.remove(tls.extent);
unassigned.put(tls.extent, server);
}
} else {
unassigned.put(tls.extent, server);
}
break;
case ASSIGNED:
// Send another reminder
assigned.add(new Assignment(tls.extent, tls.future));
break;
}
} else {
switch(state) {
case SUSPENDED:
// Request a move to UNASSIGNED, so as to allow balancing to continue.
suspendedToGoneServers.add(tls);
cancelOfflineTableMigrations(tls);
break;
case UNASSIGNED:
cancelOfflineTableMigrations(tls);
break;
case ASSIGNED_TO_DEAD_SERVER:
assignedToDeadServers.add(tls);
if (!logsForDeadServers.containsKey(tls.futureOrCurrent())) {
logsForDeadServers.put(tls.futureOrCurrent(), wals.getWalsInUse(tls.futureOrCurrent()));
}
break;
case HOSTED:
TServerConnection conn = this.master.tserverSet.getConnection(server);
if (conn != null) {
conn.unloadTablet(this.master.masterLock, tls.extent, goal.howUnload(), master.getSteadyTime());
unloaded++;
totalUnloaded++;
} else {
Master.log.warn("Could not connect to server {}", server);
}
break;
case ASSIGNED:
break;
}
}
counts[state.ordinal()]++;
}
flushChanges(destinations, assignments, assigned, assignedToDeadServers, logsForDeadServers, suspendedToGoneServers, unassigned);
// provide stats after flushing changes to avoid race conditions w/ delete table
stats.end(masterState);
// Report changes
for (TabletState state : TabletState.values()) {
int i = state.ordinal();
if (counts[i] > 0 && counts[i] != oldCounts[i]) {
this.master.nextEvent.event("[%s]: %d tablets are %s", store.name(), counts[i], state.name());
}
}
Master.log.debug(String.format("[%s]: scan time %.2f seconds", store.name(), stats.getScanTime() / 1000.));
oldCounts = counts;
if (totalUnloaded > 0) {
this.master.nextEvent.event("[%s]: %d tablets unloaded", store.name(), totalUnloaded);
}
updateMergeState(mergeStatsCache);
synchronized (this) {
lastScanServers = ImmutableSortedSet.copyOf(currentTServers.keySet());
}
if (this.master.tserverSet.getCurrentServers().equals(currentTServers.keySet())) {
Master.log.debug(String.format("[%s] sleeping for %.2f seconds", store.name(), Master.TIME_TO_WAIT_BETWEEN_SCANS / 1000.));
eventListener.waitForEvents(Master.TIME_TO_WAIT_BETWEEN_SCANS);
} else {
Master.log.info("Detected change in current tserver set, re-running state machine.");
}
} catch (Exception ex) {
Master.log.error("Error processing table state for store " + store.name(), ex);
if (ex.getCause() != null && ex.getCause() instanceof BadLocationStateException) {
repairMetadata(((BadLocationStateException) ex.getCause()).getEncodedEndRow());
} else {
sleepUninterruptibly(Master.WAIT_BETWEEN_ERRORS, TimeUnit.MILLISECONDS);
}
} finally {
if (iter != null) {
try {
iter.close();
} catch (IOException ex) {
Master.log.warn("Error closing TabletLocationState iterator: " + ex, ex);
}
}
}
}
}
use of org.apache.accumulo.server.conf.TableConfiguration in project accumulo by apache.
the class WorkMaker method run.
public void run() {
if (!ReplicationTable.isOnline(conn)) {
log.debug("Replication table is not yet online");
return;
}
Span span = Trace.start("replicationWorkMaker");
try {
final Scanner s;
try {
s = ReplicationTable.getScanner(conn);
if (null == writer) {
setBatchWriter(ReplicationTable.getBatchWriter(conn));
}
} catch (ReplicationTableOfflineException e) {
log.warn("Replication table was online, but not anymore");
writer = null;
return;
}
// Only pull records about data that has been ingested and is ready for replication
StatusSection.limit(s);
TableConfiguration tableConf;
Text file = new Text();
for (Entry<Key, Value> entry : s) {
// Extract the useful bits from the status key
ReplicationSchema.StatusSection.getFile(entry.getKey(), file);
Table.ID tableId = ReplicationSchema.StatusSection.getTableId(entry.getKey());
log.debug("Processing replication status record for {} on table {}", file, tableId);
Status status;
try {
status = Status.parseFrom(entry.getValue().get());
} catch (InvalidProtocolBufferException e) {
log.error("Could not parse protobuf for {} from table {}", file, tableId);
continue;
}
// TODO put this into a filter on serverside
if (!shouldCreateWork(status)) {
log.debug("Not creating work: {}", status.toString());
continue;
}
// Get the table configuration for the table specified by the status record
tableConf = context.getServerConfigurationFactory().getTableConfiguration(tableId);
// getTableConfiguration(String) returns null if the table no longer exists
if (null == tableConf) {
continue;
}
// Pull the relevant replication targets
// TODO Cache this instead of pulling it every time
Map<String, String> replicationTargets = getReplicationTargets(tableConf);
// -- Another scanner over the WorkSection can make this relatively cheap
if (!replicationTargets.isEmpty()) {
Span workSpan = Trace.start("createWorkMutations");
try {
addWorkRecord(file, entry.getValue(), replicationTargets, tableId);
} finally {
workSpan.stop();
}
} else {
log.warn("No configured targets for table with ID {}", tableId);
}
}
} finally {
span.stop();
}
}
use of org.apache.accumulo.server.conf.TableConfiguration in project accumulo by apache.
the class WriteExportFiles method exportConfig.
private static void exportConfig(AccumuloServerContext context, Table.ID tableID, ZipOutputStream zipOut, DataOutputStream dataOut) throws AccumuloException, AccumuloSecurityException, TableNotFoundException, IOException {
Connector conn = context.getConnector();
DefaultConfiguration defaultConfig = DefaultConfiguration.getInstance();
Map<String, String> siteConfig = conn.instanceOperations().getSiteConfiguration();
Map<String, String> systemConfig = conn.instanceOperations().getSystemConfiguration();
TableConfiguration tableConfig = context.getServerConfigurationFactory().getTableConfiguration(tableID);
OutputStreamWriter osw = new OutputStreamWriter(dataOut, UTF_8);
// only put props that are different than defaults and higher level configurations
zipOut.putNextEntry(new ZipEntry(Constants.EXPORT_TABLE_CONFIG_FILE));
for (Entry<String, String> prop : tableConfig) {
if (prop.getKey().startsWith(Property.TABLE_PREFIX.getKey())) {
Property key = Property.getPropertyByKey(prop.getKey());
if (key == null || !defaultConfig.get(key).equals(prop.getValue())) {
if (!prop.getValue().equals(siteConfig.get(prop.getKey())) && !prop.getValue().equals(systemConfig.get(prop.getKey()))) {
osw.append(prop.getKey() + "=" + prop.getValue() + "\n");
}
}
}
}
osw.flush();
}
Aggregations