use of org.apache.accumulo.core.dataImpl.KeyExtent in project accumulo by apache.
the class ThriftClientHandler method loadTablet.
@Override
public void loadTablet(TInfo tinfo, TCredentials credentials, String lock, final TKeyExtent textent) {
try {
checkPermission(credentials, lock, "loadTablet");
} catch (ThriftSecurityException e) {
log.error("Caller doesn't have permission to load a tablet", e);
throw new RuntimeException(e);
}
final KeyExtent extent = KeyExtent.fromThrift(textent);
synchronized (server.unopenedTablets) {
synchronized (server.openingTablets) {
synchronized (server.onlineTablets) {
// Checking if the current tablet is in any of the sets
// below is not a strong enough check to catch all overlapping tablets
// when splits and fix splits are occurring
Set<KeyExtent> unopenedOverlapping = KeyExtent.findOverlapping(extent, server.unopenedTablets);
Set<KeyExtent> openingOverlapping = KeyExtent.findOverlapping(extent, server.openingTablets);
Set<KeyExtent> onlineOverlapping = KeyExtent.findOverlapping(extent, server.getOnlineTablets());
Set<KeyExtent> all = new HashSet<>();
all.addAll(unopenedOverlapping);
all.addAll(openingOverlapping);
all.addAll(onlineOverlapping);
if (!all.isEmpty()) {
// ignore any tablets that have recently split, for error logging
for (KeyExtent e2 : onlineOverlapping) {
Tablet tablet = server.getOnlineTablet(e2);
if (System.currentTimeMillis() - tablet.getSplitCreationTime() < RECENTLY_SPLIT_MILLIES) {
all.remove(e2);
}
}
// ignore self, for error logging
all.remove(extent);
if (!all.isEmpty()) {
log.error("Tablet {} overlaps a previously assigned tablet, possibly due to a recent split. " + "Overlapping tablets: Unopened: {}, Opening: {}, Online: {}", extent, unopenedOverlapping, openingOverlapping, onlineOverlapping);
}
return;
}
server.unopenedTablets.add(extent);
}
}
}
TabletLogger.loading(extent, server.getTabletSession());
final AssignmentHandler ah = new AssignmentHandler(server, extent);
if (extent.isRootTablet()) {
Threads.createThread("Root Tablet Assignment", () -> {
ah.run();
if (server.getOnlineTablets().containsKey(extent)) {
log.info("Root tablet loaded: {}", extent);
} else {
log.info("Root tablet failed to load");
}
}).start();
} else {
if (extent.isMeta()) {
server.resourceManager.addMetaDataAssignment(extent, log, ah);
} else {
server.resourceManager.addAssignment(extent, log, ah);
}
}
}
use of org.apache.accumulo.core.dataImpl.KeyExtent in project accumulo by apache.
the class ThriftClientHandler method writeConditionalMutations.
private void writeConditionalMutations(Map<KeyExtent, List<ServerConditionalMutation>> updates, ArrayList<TCMResult> results, ConditionalSession sess) {
Set<Entry<KeyExtent, List<ServerConditionalMutation>>> es = updates.entrySet();
Map<CommitSession, List<Mutation>> sendables = new HashMap<>();
Map<CommitSession, TabletMutations> loggables = new HashMap<>();
boolean sessionCanceled = sess.interruptFlag.get();
Span span = TraceUtil.startSpan(this.getClass(), "writeConditionalMutations::prep");
try (Scope scope = span.makeCurrent()) {
long t1 = System.currentTimeMillis();
for (Entry<KeyExtent, List<ServerConditionalMutation>> entry : es) {
final Tablet tablet = server.getOnlineTablet(entry.getKey());
if (tablet == null || tablet.isClosed() || sessionCanceled) {
addMutationsAsTCMResults(results, entry.getValue(), TCMStatus.IGNORED);
} else {
final Durability durability = DurabilityImpl.resolveDurabilty(sess.durability, tablet.getDurability());
@SuppressWarnings("unchecked") List<Mutation> mutations = (List<Mutation>) (List<? extends Mutation>) entry.getValue();
if (!mutations.isEmpty()) {
PreparedMutations prepared = tablet.prepareMutationsForCommit(new TservConstraintEnv(server.getContext(), security, sess.credentials), mutations);
if (prepared.tabletClosed()) {
addMutationsAsTCMResults(results, mutations, TCMStatus.IGNORED);
} else {
if (!prepared.getNonViolators().isEmpty()) {
// Only log and commit mutations that did not violate constraints.
List<Mutation> validMutations = prepared.getNonViolators();
addMutationsAsTCMResults(results, validMutations, TCMStatus.ACCEPTED);
CommitSession session = prepared.getCommitSession();
if (durability != Durability.NONE) {
loggables.put(session, new TabletMutations(session, validMutations, durability));
}
sendables.put(session, validMutations);
}
if (!prepared.getViolators().isEmpty()) {
addMutationsAsTCMResults(results, prepared.getViolators(), TCMStatus.VIOLATED);
}
}
}
}
}
long t2 = System.currentTimeMillis();
updateAvgPrepTime(t2 - t1, es.size());
} catch (Exception e) {
TraceUtil.setException(span, e, true);
throw e;
} finally {
span.end();
}
Span span2 = TraceUtil.startSpan(this.getClass(), "writeConditionalMutations::wal");
try (Scope scope = span2.makeCurrent()) {
while (!loggables.isEmpty()) {
try {
long t1 = System.currentTimeMillis();
server.logger.logManyTablets(loggables);
long t2 = System.currentTimeMillis();
updateWalogWriteTime(t2 - t1);
break;
} catch (IOException | FSError ex) {
TraceUtil.setException(span2, ex, false);
log.warn("logging mutations failed, retrying");
} catch (Exception t) {
log.error("Unknown exception logging mutations, counts for" + " mutations in flight not decremented!", t);
throw new RuntimeException(t);
}
}
} catch (Exception e) {
TraceUtil.setException(span2, e, true);
throw e;
} finally {
span2.end();
}
Span span3 = TraceUtil.startSpan(this.getClass(), "writeConditionalMutations::commit");
try (Scope scope = span3.makeCurrent()) {
long t1 = System.currentTimeMillis();
sendables.forEach(CommitSession::commit);
long t2 = System.currentTimeMillis();
updateAvgCommitTime(t2 - t1, sendables.size());
} catch (Exception e) {
TraceUtil.setException(span3, e, true);
throw e;
} finally {
span3.end();
}
}
use of org.apache.accumulo.core.dataImpl.KeyExtent in project accumulo by apache.
the class ThriftClientHandler method flush.
private void flush(UpdateSession us) {
int mutationCount = 0;
Map<CommitSession, List<Mutation>> sendables = new HashMap<>();
Map<CommitSession, TabletMutations> loggables = new HashMap<>();
Throwable error = null;
long pt1 = System.currentTimeMillis();
boolean containsMetadataTablet = false;
for (Tablet tablet : us.queuedMutations.keySet()) {
if (tablet.getExtent().isMeta()) {
containsMetadataTablet = true;
}
}
if (!containsMetadataTablet && !us.queuedMutations.isEmpty()) {
server.resourceManager.waitUntilCommitsAreEnabled();
}
Span span = TraceUtil.startSpan(this.getClass(), "flush::prep");
try (Scope scope = span.makeCurrent()) {
for (Entry<Tablet, ? extends List<Mutation>> entry : us.queuedMutations.entrySet()) {
Tablet tablet = entry.getKey();
Durability durability = DurabilityImpl.resolveDurabilty(us.durability, tablet.getDurability());
List<Mutation> mutations = entry.getValue();
if (!mutations.isEmpty()) {
try {
server.updateMetrics.addMutationArraySize(mutations.size());
PreparedMutations prepared = tablet.prepareMutationsForCommit(us.cenv, mutations);
if (prepared.tabletClosed()) {
if (us.currentTablet == tablet) {
us.currentTablet = null;
}
us.failures.put(tablet.getExtent(), us.successfulCommits.get(tablet));
} else {
if (!prepared.getNonViolators().isEmpty()) {
List<Mutation> validMutations = prepared.getNonViolators();
CommitSession session = prepared.getCommitSession();
if (durability != Durability.NONE) {
loggables.put(session, new TabletMutations(session, validMutations, durability));
}
sendables.put(session, validMutations);
}
if (!prepared.getViolations().isEmpty()) {
us.violations.add(prepared.getViolations());
server.updateMetrics.addConstraintViolations(0);
}
// Use the size of the original mutation list, regardless of how many mutations
// did not violate constraints.
mutationCount += mutations.size();
}
} catch (Exception t) {
error = t;
log.error("Unexpected error preparing for commit", error);
TraceUtil.setException(span, t, false);
break;
}
}
}
} catch (Exception e) {
TraceUtil.setException(span, e, true);
throw e;
} finally {
span.end();
}
long pt2 = System.currentTimeMillis();
us.prepareTimes.addStat(pt2 - pt1);
updateAvgPrepTime(pt2 - pt1, us.queuedMutations.size());
if (error != null) {
sendables.forEach((commitSession, value) -> commitSession.abortCommit());
throw new RuntimeException(error);
}
try {
Span span2 = TraceUtil.startSpan(this.getClass(), "flush::wal");
try (Scope scope = span2.makeCurrent()) {
while (true) {
try {
long t1 = System.currentTimeMillis();
server.logger.logManyTablets(loggables);
long t2 = System.currentTimeMillis();
us.walogTimes.addStat(t2 - t1);
updateWalogWriteTime((t2 - t1));
break;
} catch (IOException | FSError ex) {
log.warn("logging mutations failed, retrying");
} catch (Exception t) {
log.error("Unknown exception logging mutations, counts" + " for mutations in flight not decremented!", t);
throw new RuntimeException(t);
}
}
} catch (Exception e) {
TraceUtil.setException(span2, e, true);
throw e;
} finally {
span2.end();
}
Span span3 = TraceUtil.startSpan(this.getClass(), "flush::commit");
try (Scope scope = span3.makeCurrent()) {
long t1 = System.currentTimeMillis();
sendables.forEach((commitSession, mutations) -> {
commitSession.commit(mutations);
KeyExtent extent = commitSession.getExtent();
if (us.currentTablet != null && extent == us.currentTablet.getExtent()) {
// because constraint violations may filter out some
// mutations, for proper accounting with the client code,
// need to increment the count based on the original
// number of mutations from the client NOT the filtered number
us.successfulCommits.increment(us.currentTablet, us.queuedMutations.get(us.currentTablet).size());
}
});
long t2 = System.currentTimeMillis();
us.flushTime += (t2 - pt1);
us.commitTimes.addStat(t2 - t1);
updateAvgCommitTime(t2 - t1, sendables.size());
} finally {
span3.end();
}
} finally {
us.queuedMutations.clear();
if (us.currentTablet != null) {
us.queuedMutations.put(us.currentTablet, new ArrayList<>());
}
server.updateTotalQueuedMutationSize(-us.queuedMutationSize);
us.queuedMutationSize = 0;
}
us.totalUpdates += mutationCount;
}
use of org.apache.accumulo.core.dataImpl.KeyExtent in project accumulo by apache.
the class TabletServerLogger method write.
private void write(final Collection<CommitSession> sessions, boolean mincFinish, Writer writer, Retry writeRetry) throws IOException {
// Work very hard not to lock this during calls to the outside world
int currentLogId = logId.get();
boolean success = false;
while (!success) {
try {
// get a reference to the loggers that no other thread can touch
AtomicInteger currentId = new AtomicInteger(-1);
DfsLogger copy = initializeLoggers(currentId);
currentLogId = currentId.get();
if (currentLogId == logId.get()) {
for (CommitSession commitSession : sessions) {
if (commitSession.beginUpdatingLogsUsed(copy, mincFinish)) {
try {
// Scribble out a tablet definition and then write to the metadata table
write(singletonList(commitSession), false, logger -> logger.defineTablet(commitSession), writeRetry);
} finally {
commitSession.finishUpdatingLogsUsed();
}
// Need to release
KeyExtent extent = commitSession.getExtent();
@SuppressWarnings("deprecation") boolean replicationEnabled = org.apache.accumulo.core.replication.ReplicationConfigurationUtil.isEnabled(extent, tserver.getTableConfiguration(extent));
if (replicationEnabled) {
@SuppressWarnings("deprecation") Status status = org.apache.accumulo.server.replication.StatusUtil.openWithUnknownLength(System.currentTimeMillis());
log.debug("Writing " + ProtobufUtil.toString(status) + " to metadata table for " + copy.getFileName());
// Got some new WALs, note this in the metadata table
ReplicationTableUtil.updateFiles(tserver.getContext(), commitSession.getExtent(), copy.getFileName(), status);
}
}
}
}
// Make sure that the logs haven't changed out from underneath our copy
if (currentLogId == logId.get()) {
// write the mutation to the logs
LoggerOperation lop = writer.write(copy);
lop.await();
// double-check: did the log set change?
success = (currentLogId == logId.get());
}
} catch (DfsLogger.LogClosedException | ClosedChannelException ex) {
writeRetry.logRetry(log, "Logs closed while writing", ex);
} catch (Exception t) {
writeRetry.logRetry(log, "Failed to write to WAL", t);
try {
// Backoff
writeRetry.waitForNextAttempt();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(e);
}
} finally {
writeRetry.useRetry();
}
// Some sort of write failure occurred. Grab the write lock and reset the logs.
// But since multiple threads will attempt it, only attempt the reset when
// the logs haven't changed.
final int finalCurrent = currentLogId;
if (!success) {
testLockAndRun(logIdLock, new TestCallWithWriteLock() {
@Override
boolean test() {
return finalCurrent == logId.get();
}
@Override
void withWriteLock() throws IOException {
close();
}
});
}
}
// if the log gets too big or too old, reset it .. grab the write lock first
// event, tid, seq overhead
logSizeEstimate.addAndGet(4 * 3);
testLockAndRun(logIdLock, new TestCallWithWriteLock() {
@Override
boolean test() {
return (logSizeEstimate.get() > maxSize) || ((System.currentTimeMillis() - createTime) > maxAge);
}
@Override
void withWriteLock() throws IOException {
close();
}
});
}
use of org.apache.accumulo.core.dataImpl.KeyExtent in project accumulo by apache.
the class ThriftClientHandler method unloadTablet.
@Override
public void unloadTablet(TInfo tinfo, TCredentials credentials, String lock, TKeyExtent textent, TUnloadTabletGoal goal, long requestTime) {
try {
checkPermission(credentials, lock, "unloadTablet");
} catch (ThriftSecurityException e) {
log.error("Caller doesn't have permission to unload a tablet", e);
throw new RuntimeException(e);
}
KeyExtent extent = KeyExtent.fromThrift(textent);
server.resourceManager.addMigration(extent, new UnloadTabletHandler(server, extent, goal, requestTime));
}
Aggregations