use of org.apache.accumulo.server.problems.ProblemReport in project accumulo by apache.
the class MinorCompactor method call.
@Override
public CompactionStats call() {
final String outputFileName = getOutputFile();
log.debug("Begin minor compaction {} {}", outputFileName, getExtent());
// output to new MapFile with a temporary name
int sleepTime = 100;
double growthFactor = 4;
// 3 minutes
int maxSleepTime = 1000 * 60 * 3;
boolean reportedProblem = false;
runningCompactions.add(this);
try {
do {
try {
CompactionStats ret = super.call();
if (reportedProblem) {
ProblemReports.getInstance(tabletServer).deleteProblemReport(getExtent().getTableId(), ProblemType.FILE_WRITE, outputFileName);
}
return ret;
} catch (IOException e) {
log.warn("MinC failed ({}) to create {} retrying ...", e.getMessage(), outputFileName);
ProblemReports.getInstance(tabletServer).report(new ProblemReport(getExtent().getTableId(), ProblemType.FILE_WRITE, outputFileName, e));
reportedProblem = true;
} catch (RuntimeException e) {
// if this is coming from a user iterator, it is possible that the user could change the iterator config and that the
// minor compaction would succeed
log.warn("MinC failed ({}) to create {} retrying ...", e.getMessage(), outputFileName, e);
ProblemReports.getInstance(tabletServer).report(new ProblemReport(getExtent().getTableId(), ProblemType.FILE_WRITE, outputFileName, e));
reportedProblem = true;
} catch (CompactionCanceledException e) {
throw new IllegalStateException(e);
}
Random random = new Random();
int sleep = sleepTime + random.nextInt(sleepTime);
log.debug("MinC failed sleeping {} ms before retrying", sleep);
sleepUninterruptibly(sleep, TimeUnit.MILLISECONDS);
sleepTime = (int) Math.round(Math.min(maxSleepTime, sleepTime * growthFactor));
// clean up
try {
if (getFileSystem().exists(new Path(outputFileName))) {
getFileSystem().deleteRecursively(new Path(outputFileName));
}
} catch (IOException e) {
log.warn("Failed to delete failed MinC file {} {}", outputFileName, e.getMessage());
}
if (isTableDeleting())
return new CompactionStats(0, 0);
} while (true);
} finally {
thread = null;
runningCompactions.remove(this);
}
}
use of org.apache.accumulo.server.problems.ProblemReport in project accumulo by apache.
the class Tablet method completeClose.
synchronized void completeClose(boolean saveState, boolean completeClose) throws IOException {
if (!isClosing() || isCloseComplete() || closeCompleting) {
throw new IllegalStateException("closeState = " + closeState);
}
log.debug("completeClose(saveState={} completeClose={}) {}", saveState, completeClose, getExtent());
// ensure this method is only called once, also guards against multiple
// threads entering the method at the same time
closeCompleting = true;
closeState = CloseState.CLOSED;
// modify dataSourceDeletions so scans will try to switch data sources and fail because the tablet is closed
dataSourceDeletions.incrementAndGet();
for (ScanDataSource activeScan : activeScans) {
activeScan.interrupt();
}
// wait for reads and writes to complete
while (writesInProgress > 0 || activeScans.size() > 0) {
try {
this.wait(50);
} catch (InterruptedException e) {
log.error(e.toString());
}
}
getTabletMemory().waitForMinC();
if (saveState && getTabletMemory().getMemTable().getNumEntries() > 0) {
try {
prepareForMinC(getFlushID(), MinorCompactionReason.CLOSE).run();
} catch (NoNodeException e) {
throw new RuntimeException(e);
}
}
if (saveState) {
// at this point all tablet data is flushed, so do a consistency check
RuntimeException err = null;
for (int i = 0; i < 5; i++) {
try {
closeConsistencyCheck();
err = null;
} catch (RuntimeException t) {
err = t;
log.error("Consistency check fails, retrying", t);
sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
}
}
if (err != null) {
ProblemReports.getInstance(tabletServer).report(new ProblemReport(extent.getTableId(), ProblemType.TABLET_LOAD, this.extent.toString(), err));
log.error("Tablet closed consistency check has failed for {} giving up and closing", this.extent);
}
}
try {
getTabletMemory().getMemTable().delete(0);
} catch (Throwable t) {
log.error("Failed to delete mem table : " + t.getMessage(), t);
}
getTabletMemory().close();
// close map files
getTabletResources().close();
log.debug("TABLET_HIST {} closed", extent);
tableConfiguration.getNamespaceConfiguration().removeObserver(configObserver);
tableConfiguration.removeObserver(configObserver);
if (completeClose)
closeState = CloseState.COMPLETE;
}
use of org.apache.accumulo.server.problems.ProblemReport in project accumulo by apache.
the class FileManager method reserveReaders.
private Map<FileSKVIterator, String> reserveReaders(KeyExtent tablet, Collection<String> files, boolean continueOnFailure) throws IOException {
if (!tablet.isMeta() && files.size() >= maxOpen) {
throw new IllegalArgumentException("requested files exceeds max open");
}
if (files.size() == 0) {
return Collections.emptyMap();
}
List<String> filesToOpen = null;
List<FileSKVIterator> filesToClose = Collections.emptyList();
Map<FileSKVIterator, String> readersReserved = new HashMap<>();
if (!tablet.isMeta()) {
filePermits.acquireUninterruptibly(files.size());
}
// a synch block
synchronized (this) {
filesToOpen = takeOpenFiles(files, readersReserved);
if (!filesToOpen.isEmpty()) {
int numOpen = countReaders(openFiles);
if (filesToOpen.size() + numOpen + reservedReaders.size() > maxOpen) {
filesToClose = takeLRUOpenFiles((filesToOpen.size() + numOpen + reservedReaders.size()) - maxOpen);
}
}
}
// close files before opening files to ensure we stay under resource
// limitations
closeReaders(filesToClose);
// open any files that need to be opened
for (String file : filesToOpen) {
try {
if (!file.contains(":"))
throw new IllegalArgumentException("Expected uri, got : " + file);
Path path = new Path(file);
FileSystem ns = fs.getVolumeByPath(path).getFileSystem();
// log.debug("Opening "+file + " path " + path);
FileSKVIterator reader = FileOperations.getInstance().newReaderBuilder().forFile(path.toString(), ns, ns.getConf()).withTableConfiguration(context.getServerConfigurationFactory().getTableConfiguration(tablet.getTableId())).withBlockCache(dataCache, indexCache).build();
readersReserved.put(reader, file);
} catch (Exception e) {
ProblemReports.getInstance(context).report(new ProblemReport(tablet.getTableId(), ProblemType.FILE_READ, file, e));
if (continueOnFailure) {
// release the permit for the file that failed to open
if (!tablet.isMeta()) {
filePermits.release(1);
}
log.warn("Failed to open file {} {} continuing...", file, e.getMessage(), e);
} else {
// close whatever files were opened
closeReaders(readersReserved.keySet());
if (!tablet.isMeta()) {
filePermits.release(files.size());
}
log.error("Failed to open file {} {}", file, e.getMessage());
throw new IOException("Failed to open " + file, e);
}
}
}
synchronized (this) {
// update set of reserved readers
reservedReaders.putAll(readersReserved);
}
return readersReserved;
}
use of org.apache.accumulo.server.problems.ProblemReport in project accumulo by apache.
the class ProblemsResource method getDetails.
/**
* Generates a list of the problem details as a JSON object
*
* @return problem details list
*/
@GET
@Path("details")
public ProblemDetail getDetails() {
ProblemDetail problems = new ProblemDetail();
if (Monitor.getProblemException() == null) {
for (Entry<Table.ID, Map<ProblemType, Integer>> entry : Monitor.getProblemSummary().entrySet()) {
ArrayList<ProblemReport> problemReports = new ArrayList<>();
Iterator<ProblemReport> iter = entry.getKey() == null ? ProblemReports.getInstance(Monitor.getContext()).iterator() : ProblemReports.getInstance(Monitor.getContext()).iterator(entry.getKey());
while (iter.hasNext()) problemReports.add(iter.next());
for (ProblemReport pr : problemReports) {
String tableName = Tables.getPrintableTableInfoFromId(HdfsZooInstance.getInstance(), pr.getTableId());
problems.addProblemDetail(new ProblemDetailInformation(tableName, entry.getKey(), pr.getProblemType().name(), pr.getServer(), pr.getTime(), pr.getResource(), pr.getException()));
}
}
}
return problems;
}
use of org.apache.accumulo.server.problems.ProblemReport in project accumulo by apache.
the class Compactor method openMapDataFiles.
private List<SortedKeyValueIterator<Key, Value>> openMapDataFiles(String lgName, ArrayList<FileSKVIterator> readers) throws IOException {
List<SortedKeyValueIterator<Key, Value>> iters = new ArrayList<>(filesToCompact.size());
for (FileRef mapFile : filesToCompact.keySet()) {
try {
FileOperations fileFactory = FileOperations.getInstance();
FileSystem fs = this.fs.getVolumeByPath(mapFile.path()).getFileSystem();
FileSKVIterator reader;
reader = fileFactory.newReaderBuilder().forFile(mapFile.path().toString(), fs, fs.getConf()).withTableConfiguration(acuTableConf).withRateLimiter(env.getReadLimiter()).build();
readers.add(reader);
SortedKeyValueIterator<Key, Value> iter = new ProblemReportingIterator(context, extent.getTableId(), mapFile.path().toString(), false, reader);
if (filesToCompact.get(mapFile).isTimeSet()) {
iter = new TimeSettingIterator(iter, filesToCompact.get(mapFile).getTime());
}
iters.add(iter);
} catch (Throwable e) {
ProblemReports.getInstance(context).report(new ProblemReport(extent.getTableId(), ProblemType.FILE_READ, mapFile.path().toString(), e));
log.warn("Some problem opening map file {} {}", mapFile, e.getMessage(), e);
// failed to open some map file... close the ones that were opened
for (FileSKVIterator reader : readers) {
try {
reader.close();
} catch (Throwable e2) {
log.warn("Failed to close map file", e2);
}
}
readers.clear();
if (e instanceof IOException)
throw (IOException) e;
throw new IOException("Failed to open map data files", e);
}
}
return iters;
}
Aggregations