use of org.apache.accumulo.core.trace.Span in project accumulo by apache.
the class AccumuloReplicaSystem method getWalStream.
public DataInputStream getWalStream(Path p, FSDataInputStream input) throws IOException {
Span span = Trace.start("Read WAL header");
span.data("file", p.toString());
try {
DFSLoggerInputStreams streams = DfsLogger.readHeaderAndReturnStream(input, conf);
return streams.getDecryptingInputStream();
} finally {
span.stop();
}
}
use of org.apache.accumulo.core.trace.Span in project accumulo by apache.
the class Tablet method minorCompact.
DataFileValue minorCompact(VolumeManager fs, InMemoryMap memTable, FileRef tmpDatafile, FileRef newDatafile, FileRef mergeFile, boolean hasQueueTime, long queued, CommitSession commitSession, long flushId, MinorCompactionReason mincReason) {
boolean failed = false;
long start = System.currentTimeMillis();
timer.incrementStatusMinor();
long count = 0;
String oldName = Thread.currentThread().getName();
try {
Thread.currentThread().setName("Minor compacting " + this.extent);
Span span = Trace.start("write");
CompactionStats stats;
try {
count = memTable.getNumEntries();
DataFileValue dfv = null;
if (mergeFile != null)
dfv = getDatafileManager().getDatafileSizes().get(mergeFile);
MinorCompactor compactor = new MinorCompactor(tabletServer, this, memTable, mergeFile, dfv, tmpDatafile, mincReason, tableConfiguration);
stats = compactor.call();
} finally {
span.stop();
}
span = Trace.start("bringOnline");
try {
getDatafileManager().bringMinorCompactionOnline(tmpDatafile, newDatafile, mergeFile, new DataFileValue(stats.getFileSize(), stats.getEntriesWritten()), commitSession, flushId);
} finally {
span.stop();
}
return new DataFileValue(stats.getFileSize(), stats.getEntriesWritten());
} catch (Exception | Error e) {
failed = true;
throw new RuntimeException(e);
} finally {
Thread.currentThread().setName(oldName);
try {
getTabletMemory().finalizeMinC();
} catch (Throwable t) {
log.error("Failed to free tablet memory", t);
}
if (!failed) {
lastMinorCompactionFinishTime = System.currentTimeMillis();
}
Metrics minCMetrics = getTabletServer().getMinCMetrics();
if (minCMetrics.isEnabled())
minCMetrics.add(TabletServerMinCMetrics.MINC, (lastMinorCompactionFinishTime - start));
if (hasQueueTime) {
timer.updateTime(Operation.MINOR, queued, start, count, failed);
if (minCMetrics.isEnabled())
minCMetrics.add(TabletServerMinCMetrics.QUEUE, (start - queued));
} else
timer.updateTime(Operation.MINOR, start, count, failed);
}
}
use of org.apache.accumulo.core.trace.Span in project accumulo by apache.
the class Tablet method _majorCompact.
private CompactionStats _majorCompact(MajorCompactionReason reason) throws IOException, CompactionCanceledException {
long t1, t2, t3;
Pair<Long, UserCompactionConfig> compactionId = null;
CompactionStrategy strategy = null;
Map<FileRef, Pair<Key, Key>> firstAndLastKeys = null;
if (reason == MajorCompactionReason.USER) {
try {
compactionId = getCompactionID();
strategy = createCompactionStrategy(compactionId.getSecond().getCompactionStrategy());
} catch (NoNodeException e) {
throw new RuntimeException(e);
}
} else if (reason == MajorCompactionReason.NORMAL || reason == MajorCompactionReason.IDLE) {
strategy = Property.createTableInstanceFromPropertyName(tableConfiguration, Property.TABLE_COMPACTION_STRATEGY, CompactionStrategy.class, new DefaultCompactionStrategy());
strategy.init(Property.getCompactionStrategyOptions(tableConfiguration));
} else if (reason == MajorCompactionReason.CHOP) {
firstAndLastKeys = getFirstAndLastKeys(getDatafileManager().getDatafileSizes());
} else {
throw new IllegalArgumentException("Unknown compaction reason " + reason);
}
if (strategy != null) {
BlockCache sc = tabletResources.getTabletServerResourceManager().getSummaryCache();
BlockCache ic = tabletResources.getTabletServerResourceManager().getIndexCache();
MajorCompactionRequest request = new MajorCompactionRequest(extent, reason, getTabletServer().getFileSystem(), tableConfiguration, sc, ic);
request.setFiles(getDatafileManager().getDatafileSizes());
strategy.gatherInformation(request);
}
Map<FileRef, DataFileValue> filesToCompact = null;
int maxFilesToCompact = tableConfiguration.getCount(Property.TSERV_MAJC_THREAD_MAXOPEN);
CompactionStats majCStats = new CompactionStats();
CompactionPlan plan = null;
boolean propogateDeletes = false;
boolean updateCompactionID = false;
synchronized (this) {
// plan all that work that needs to be done in the sync block... then do the actual work
// outside the sync block
t1 = System.currentTimeMillis();
majorCompactionState = CompactionState.WAITING_TO_START;
getTabletMemory().waitForMinC();
t2 = System.currentTimeMillis();
majorCompactionState = CompactionState.IN_PROGRESS;
notifyAll();
VolumeManager fs = getTabletServer().getFileSystem();
if (extent.isRootTablet()) {
// very important that we call this before doing major compaction,
// otherwise deleted compacted files could possible be brought back
// at some point if the file they were compacted to was legitimately
// removed by a major compaction
RootFiles.cleanupReplacement(fs, fs.listStatus(this.location), false);
}
SortedMap<FileRef, DataFileValue> allFiles = getDatafileManager().getDatafileSizes();
List<FileRef> inputFiles = new ArrayList<>();
if (reason == MajorCompactionReason.CHOP) {
// enforce rules: files with keys outside our range need to be compacted
inputFiles.addAll(findChopFiles(extent, firstAndLastKeys, allFiles.keySet()));
} else {
MajorCompactionRequest request = new MajorCompactionRequest(extent, reason, tableConfiguration);
request.setFiles(allFiles);
plan = strategy.getCompactionPlan(request);
if (plan != null) {
plan.validate(allFiles.keySet());
inputFiles.addAll(plan.inputFiles);
}
}
if (inputFiles.isEmpty()) {
if (reason == MajorCompactionReason.USER) {
if (compactionId.getSecond().getIterators().isEmpty()) {
log.debug("No-op major compaction by USER on 0 input files because no iterators present.");
lastCompactID = compactionId.getFirst();
updateCompactionID = true;
} else {
log.debug("Major compaction by USER on 0 input files with iterators.");
filesToCompact = new HashMap<>();
}
} else {
return majCStats;
}
} else {
// If no original files will exist at the end of the compaction, we do not have to propogate deletes
Set<FileRef> droppedFiles = new HashSet<>();
droppedFiles.addAll(inputFiles);
if (plan != null)
droppedFiles.addAll(plan.deleteFiles);
propogateDeletes = !(droppedFiles.equals(allFiles.keySet()));
log.debug("Major compaction plan: {} propogate deletes : {}", plan, propogateDeletes);
filesToCompact = new HashMap<>(allFiles);
filesToCompact.keySet().retainAll(inputFiles);
getDatafileManager().reserveMajorCompactingFiles(filesToCompact.keySet());
}
t3 = System.currentTimeMillis();
}
try {
log.debug(String.format("MajC initiate lock %.2f secs, wait %.2f secs", (t3 - t2) / 1000.0, (t2 - t1) / 1000.0));
if (updateCompactionID) {
MetadataTableUtil.updateTabletCompactID(extent, compactionId.getFirst(), tabletServer, getTabletServer().getLock());
return majCStats;
}
if (!propogateDeletes && compactionId == null) {
// compacting everything, so update the compaction id in metadata
try {
compactionId = getCompactionID();
if (compactionId.getSecond().getCompactionStrategy() != null) {
compactionId = null;
// TODO maybe return unless chop?
}
} catch (NoNodeException e) {
throw new RuntimeException(e);
}
}
List<IteratorSetting> compactionIterators = new ArrayList<>();
if (compactionId != null) {
if (reason == MajorCompactionReason.USER) {
if (getCompactionCancelID() >= compactionId.getFirst()) {
// compaction was canceled
return majCStats;
}
compactionIterators = compactionId.getSecond().getIterators();
synchronized (this) {
if (lastCompactID >= compactionId.getFirst())
// already compacted
return majCStats;
}
}
}
// ACCUMULO-3645 run loop at least once, even if filesToCompact.isEmpty()
do {
int numToCompact = maxFilesToCompact;
if (filesToCompact.size() > maxFilesToCompact && filesToCompact.size() < 2 * maxFilesToCompact) {
// on the second to last compaction pass, compact the minimum amount of files possible
numToCompact = filesToCompact.size() - maxFilesToCompact + 1;
}
Set<FileRef> smallestFiles = removeSmallest(filesToCompact, numToCompact);
FileRef fileName = getNextMapFilename((filesToCompact.size() == 0 && !propogateDeletes) ? "A" : "C");
FileRef compactTmpName = new FileRef(fileName.path().toString() + "_tmp");
AccumuloConfiguration tableConf = createTableConfiguration(tableConfiguration, plan);
Span span = Trace.start("compactFiles");
try {
CompactionEnv cenv = new CompactionEnv() {
@Override
public boolean isCompactionEnabled() {
return Tablet.this.isCompactionEnabled();
}
@Override
public IteratorScope getIteratorScope() {
return IteratorScope.majc;
}
@Override
public RateLimiter getReadLimiter() {
return getTabletServer().getMajorCompactionReadLimiter();
}
@Override
public RateLimiter getWriteLimiter() {
return getTabletServer().getMajorCompactionWriteLimiter();
}
};
HashMap<FileRef, DataFileValue> copy = new HashMap<>(getDatafileManager().getDatafileSizes());
if (!copy.keySet().containsAll(smallestFiles))
throw new IllegalStateException("Cannot find data file values for " + smallestFiles);
copy.keySet().retainAll(smallestFiles);
log.debug("Starting MajC {} ({}) {} --> {} {}", extent, reason, copy.keySet(), compactTmpName, compactionIterators);
// always propagate deletes, unless last batch
boolean lastBatch = filesToCompact.isEmpty();
Compactor compactor = new Compactor(tabletServer, this, copy, null, compactTmpName, lastBatch ? propogateDeletes : true, cenv, compactionIterators, reason.ordinal(), tableConf);
CompactionStats mcs = compactor.call();
span.data("files", "" + smallestFiles.size());
span.data("read", "" + mcs.getEntriesRead());
span.data("written", "" + mcs.getEntriesWritten());
majCStats.add(mcs);
if (lastBatch && plan != null && plan.deleteFiles != null) {
smallestFiles.addAll(plan.deleteFiles);
}
getDatafileManager().bringMajorCompactionOnline(smallestFiles, compactTmpName, fileName, filesToCompact.size() == 0 && compactionId != null ? compactionId.getFirst() : null, new DataFileValue(mcs.getFileSize(), mcs.getEntriesWritten()));
// to add the deleted file
if (filesToCompact.size() > 0 && mcs.getEntriesWritten() > 0) {
filesToCompact.put(fileName, new DataFileValue(mcs.getFileSize(), mcs.getEntriesWritten()));
}
} finally {
span.stop();
}
} while (filesToCompact.size() > 0);
return majCStats;
} finally {
synchronized (Tablet.this) {
getDatafileManager().clearMajorCompactingFile();
}
}
}
use of org.apache.accumulo.core.trace.Span in project accumulo by apache.
the class RpcClientInvocationHandler method invoke.
@Override
public Object invoke(Object obj, Method method, Object[] args) throws Throwable {
if (args == null || args.length < 1 || args[0] != null) {
return method.invoke(instance, args);
}
Class<?> klass = method.getParameterTypes()[0];
if (TInfo.class.isAssignableFrom(klass)) {
args[0] = Tracer.traceInfo();
}
Span span = Trace.start("client:" + method.getName());
try {
return method.invoke(instance, args);
} catch (InvocationTargetException ex) {
throw ex.getCause();
} finally {
span.stop();
}
}
use of org.apache.accumulo.core.trace.Span in project accumulo by apache.
the class ThriftScanner method scan.
public static List<KeyValue> scan(ClientContext context, ScanState scanState, int timeOut) throws ScanTimedOutException, AccumuloException, AccumuloSecurityException, TableNotFoundException {
TabletLocation loc = null;
Instance instance = context.getInstance();
long startTime = System.currentTimeMillis();
String lastError = null;
String error = null;
int tooManyFilesCount = 0;
long sleepMillis = 100;
final long maxSleepTime = context.getConfiguration().getTimeInMillis(Property.GENERAL_MAX_SCANNER_RETRY_PERIOD);
List<KeyValue> results = null;
Span span = Trace.start("scan");
try {
while (results == null && !scanState.finished) {
if (Thread.currentThread().isInterrupted()) {
throw new AccumuloException("Thread interrupted");
}
if ((System.currentTimeMillis() - startTime) / 1000.0 > timeOut)
throw new ScanTimedOutException();
while (loc == null) {
long currentTime = System.currentTimeMillis();
if ((currentTime - startTime) / 1000.0 > timeOut)
throw new ScanTimedOutException();
Span locateSpan = Trace.start("scan:locateTablet");
try {
loc = TabletLocator.getLocator(context, scanState.tableId).locateTablet(context, scanState.startRow, scanState.skipStartRow, false);
if (loc == null) {
if (!Tables.exists(instance, scanState.tableId))
throw new TableDeletedException(scanState.tableId.canonicalID());
else if (Tables.getTableState(instance, scanState.tableId) == TableState.OFFLINE)
throw new TableOfflineException(instance, scanState.tableId.canonicalID());
error = "Failed to locate tablet for table : " + scanState.tableId + " row : " + scanState.startRow;
if (!error.equals(lastError))
log.debug("{}", error);
else if (log.isTraceEnabled())
log.trace("{}", error);
lastError = error;
sleepMillis = pause(sleepMillis, maxSleepTime);
} else {
// when a tablet splits we do want to continue scanning the low child
// of the split if we are already passed it
Range dataRange = loc.tablet_extent.toDataRange();
if (scanState.range.getStartKey() != null && dataRange.afterEndKey(scanState.range.getStartKey())) {
// go to the next tablet
scanState.startRow = loc.tablet_extent.getEndRow();
scanState.skipStartRow = true;
loc = null;
} else if (scanState.range.getEndKey() != null && dataRange.beforeStartKey(scanState.range.getEndKey())) {
// should not happen
throw new RuntimeException("Unexpected tablet, extent : " + loc.tablet_extent + " range : " + scanState.range + " startRow : " + scanState.startRow);
}
}
} catch (AccumuloServerException e) {
log.debug("Scan failed, server side exception : {}", e.getMessage());
throw e;
} catch (AccumuloException e) {
error = "exception from tablet loc " + e.getMessage();
if (!error.equals(lastError))
log.debug("{}", error);
else if (log.isTraceEnabled())
log.trace("{}", error);
lastError = error;
sleepMillis = pause(sleepMillis, maxSleepTime);
} finally {
locateSpan.stop();
}
}
Span scanLocation = Trace.start("scan:location");
scanLocation.data("tserver", loc.tablet_location);
try {
results = scan(loc, scanState, context);
} catch (AccumuloSecurityException e) {
Tables.clearCache(instance);
if (!Tables.exists(instance, scanState.tableId))
throw new TableDeletedException(scanState.tableId.canonicalID());
e.setTableInfo(Tables.getPrintableTableInfoFromId(instance, scanState.tableId));
throw e;
} catch (TApplicationException tae) {
throw new AccumuloServerException(loc.tablet_location, tae);
} catch (TSampleNotPresentException tsnpe) {
String message = "Table " + Tables.getPrintableTableInfoFromId(instance, scanState.tableId) + " does not have sampling configured or built";
throw new SampleNotPresentException(message, tsnpe);
} catch (NotServingTabletException e) {
error = "Scan failed, not serving tablet " + loc;
if (!error.equals(lastError))
log.debug("{}", error);
else if (log.isTraceEnabled())
log.trace("{}", error);
lastError = error;
TabletLocator.getLocator(context, scanState.tableId).invalidateCache(loc.tablet_extent);
loc = null;
// no need to try the current scan id somewhere else
scanState.scanID = null;
if (scanState.isolated)
throw new IsolationException();
sleepMillis = pause(sleepMillis, maxSleepTime);
} catch (NoSuchScanIDException e) {
error = "Scan failed, no such scan id " + scanState.scanID + " " + loc;
if (!error.equals(lastError))
log.debug("{}", error);
else if (log.isTraceEnabled())
log.trace("{}", error);
lastError = error;
if (scanState.isolated)
throw new IsolationException();
scanState.scanID = null;
} catch (TooManyFilesException e) {
error = "Tablet has too many files " + loc + " retrying...";
if (!error.equals(lastError)) {
log.debug("{}", error);
tooManyFilesCount = 0;
} else {
tooManyFilesCount++;
if (tooManyFilesCount == 300)
log.warn("{}", error);
else if (log.isTraceEnabled())
log.trace("{}", error);
}
lastError = error;
// not sure what state the scan session on the server side is
// in after this occurs, so lets be cautious and start a new
// scan session
scanState.scanID = null;
if (scanState.isolated)
throw new IsolationException();
sleepMillis = pause(sleepMillis, maxSleepTime);
} catch (TException e) {
TabletLocator.getLocator(context, scanState.tableId).invalidateCache(context.getInstance(), loc.tablet_location);
error = "Scan failed, thrift error " + e.getClass().getName() + " " + e.getMessage() + " " + loc;
if (!error.equals(lastError))
log.debug("{}", error);
else if (log.isTraceEnabled())
log.trace("{}", error);
lastError = error;
loc = null;
// do not want to continue using the same scan id, if a timeout occurred could cause a batch to be skipped
// because a thread on the server side may still be processing the timed out continue scan
scanState.scanID = null;
if (scanState.isolated)
throw new IsolationException();
sleepMillis = pause(sleepMillis, maxSleepTime);
} finally {
scanLocation.stop();
}
}
if (results != null && results.size() == 0 && scanState.finished) {
results = null;
}
return results;
} catch (InterruptedException ex) {
throw new AccumuloException(ex);
} finally {
span.stop();
}
}
Aggregations