use of org.apache.solr.common.SolrException in project lucene-solr by apache.
the class DefaultSolrCoreState method doRecovery.
@Override
public void doRecovery(CoreContainer cc, CoreDescriptor cd) {
Runnable recoveryTask = new Runnable() {
@Override
public void run() {
MDCLoggingContext.setCoreDescriptor(cc, cd);
try {
if (SKIP_AUTO_RECOVERY) {
log.warn("Skipping recovery according to sys prop solrcloud.skip.autorecovery");
return;
}
// check before we grab the lock
if (cc.isShutDown()) {
log.warn("Skipping recovery because Solr is shutdown");
return;
}
// if we can't get the lock, another recovery is running
// we check to see if there is already one waiting to go
// after the current one, and if there is, bail
boolean locked = recoveryLock.tryLock();
try {
if (!locked) {
if (recoveryWaiting.get() > 0) {
return;
}
recoveryWaiting.incrementAndGet();
} else {
recoveryWaiting.incrementAndGet();
cancelRecovery();
}
recoveryLock.lock();
try {
recoveryWaiting.decrementAndGet();
// to be air tight we must also check after lock
if (cc.isShutDown()) {
log.warn("Skipping recovery because Solr is shutdown");
return;
}
log.info("Running recovery");
recoveryThrottle.minimumWaitBetweenActions();
recoveryThrottle.markAttemptingAction();
recoveryStrat = recoveryStrategyBuilder.create(cc, cd, DefaultSolrCoreState.this);
recoveryStrat.setRecoveringAfterStartup(recoveringAfterStartup);
Future<?> future = cc.getUpdateShardHandler().getRecoveryExecutor().submit(recoveryStrat);
try {
future.get();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new SolrException(ErrorCode.SERVER_ERROR, e);
} catch (ExecutionException e) {
throw new SolrException(ErrorCode.SERVER_ERROR, e);
}
} finally {
recoveryLock.unlock();
}
} finally {
if (locked)
recoveryLock.unlock();
}
} finally {
MDCLoggingContext.clear();
}
}
};
try {
// we make recovery requests async - that async request may
// have to 'wait in line' a bit or bail if a recovery is
// already queued up - the recovery execution itself is run
// in another thread on another 'recovery' executor.
// The update executor is interrupted on shutdown and should
// not do disk IO.
// The recovery executor is not interrupted on shutdown.
//
// avoid deadlock: we can't use the recovery executor here
cc.getUpdateShardHandler().getUpdateExecutor().submit(recoveryTask);
} catch (RejectedExecutionException e) {
// fine, we are shutting down
}
}
use of org.apache.solr.common.SolrException in project lucene-solr by apache.
the class FSDataFastInputStream method lookup.
/* This method is thread safe */
@Override
public Object lookup(long pos) {
// update the version map. This is OK since the node won't be ACTIVE when this happens.
if (pos < 0)
return null;
try {
// make sure any unflushed buffer has been flushed
ensureFlushed();
FSDataFastInputStream dis = new FSDataFastInputStream(fs.open(tlogFile), pos);
try {
dis.seek(pos);
LogCodec codec = new LogCodec(resolver);
return codec.readVal(new FastInputStream(dis));
} finally {
dis.close();
}
} catch (IOException e) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "pos=" + pos, e);
}
}
use of org.apache.solr.common.SolrException in project lucene-solr by apache.
the class FSDataFastInputStream method writeCommit.
@Override
public long writeCommit(CommitUpdateCommand cmd, int flags) {
LogCodec codec = new LogCodec(resolver);
synchronized (this) {
try {
// if we had flushed, this should be equal to channel.position()
long pos = fos.size();
if (pos == 0) {
writeLogHeader(codec);
pos = fos.size();
}
codec.init(fos);
codec.writeTag(JavaBinCodec.ARR, 3);
// should just take one byte
codec.writeInt(UpdateLog.COMMIT | flags);
codec.writeLong(cmd.getVersion());
// ensure these bytes are (almost) last in the file
codec.writeStr(END_MESSAGE);
endRecord(pos);
// flush since this will be the last record in a log fill
ensureFlushed();
// now the commit command is written we will never write to this log again
closeOutput();
return pos;
} catch (IOException e) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
}
}
}
use of org.apache.solr.common.SolrException in project lucene-solr by apache.
the class HdfsUpdateLog method init.
@Override
public void init(UpdateHandler uhandler, SolrCore core) {
// ulogDir from CoreDescriptor overrides
String ulogDir = core.getCoreDescriptor().getUlogDir();
this.uhandler = uhandler;
synchronized (fsLock) {
// moving the tlog dir on reload
if (fs == null) {
if (ulogDir != null) {
dataDir = ulogDir;
}
if (dataDir == null || dataDir.length() == 0) {
dataDir = core.getDataDir();
}
if (!core.getDirectoryFactory().isAbsolute(dataDir)) {
try {
dataDir = core.getDirectoryFactory().getDataHome(core.getCoreDescriptor());
} catch (IOException e) {
throw new SolrException(ErrorCode.SERVER_ERROR, e);
}
}
try {
fs = FileSystem.get(new Path(dataDir).toUri(), getConf());
} catch (IOException e) {
throw new SolrException(ErrorCode.SERVER_ERROR, e);
}
} else {
if (debug) {
log.debug("UpdateHandler init: tlogDir=" + tlogDir + ", next id=" + id, " this is a reopen or double init ... nothing else to do.");
}
versionInfo.reload();
return;
}
}
tlogDir = new Path(dataDir, TLOG_NAME);
while (true) {
try {
if (!fs.exists(tlogDir)) {
boolean success = fs.mkdirs(tlogDir);
if (!success) {
throw new RuntimeException("Could not create directory:" + tlogDir);
}
} else {
// To check for safe mode
fs.mkdirs(tlogDir);
}
break;
} catch (RemoteException e) {
if (e.getClassName().equals("org.apache.hadoop.hdfs.server.namenode.SafeModeException")) {
log.warn("The NameNode is in SafeMode - Solr will wait 5 seconds and try again.");
try {
Thread.sleep(5000);
} catch (InterruptedException e1) {
Thread.interrupted();
}
continue;
}
throw new RuntimeException("Problem creating directory: " + tlogDir, e);
} catch (IOException e) {
throw new RuntimeException("Problem creating directory: " + tlogDir, e);
}
}
tlogFiles = getLogList(fs, tlogDir);
// add 1 since we will create a new log for the
id = getLastLogId() + 1;
if (debug) {
log.debug("UpdateHandler init: tlogDir=" + tlogDir + ", existing tlogs=" + Arrays.asList(tlogFiles) + ", next id=" + id);
}
TransactionLog oldLog = null;
for (String oldLogName : tlogFiles) {
Path f = new Path(tlogDir, oldLogName);
try {
oldLog = new HdfsTransactionLog(fs, f, null, true, tlogDfsReplication);
// don't remove old logs on startup since more
addOldLog(oldLog, false);
// than one may be uncapped.
} catch (Exception e) {
INIT_FAILED_LOGS_COUNT.incrementAndGet();
SolrException.log(log, "Failure to open existing log file (non fatal) " + f, e);
try {
fs.delete(f, false);
} catch (IOException e1) {
throw new RuntimeException(e1);
}
}
}
// uncapped.
for (TransactionLog ll : logs) {
if (newestLogsOnStartup.size() < 2) {
newestLogsOnStartup.addFirst(ll);
} else {
// We're never going to modify old non-recovery logs - no need to hold their output open
log.info("Closing output for old non-recovery log " + ll);
ll.closeOutput();
}
}
try {
versionInfo = new VersionInfo(this, numVersionBuckets);
} catch (SolrException e) {
log.error("Unable to use updateLog: " + e.getMessage(), e);
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unable to use updateLog: " + e.getMessage(), e);
}
// non-complete tlogs.
try (RecentUpdates startingUpdates = getRecentUpdates()) {
startingVersions = startingUpdates.getVersions(getNumRecordsToKeep());
startingOperation = startingUpdates.getLatestOperation();
// index)
for (int i = startingUpdates.deleteList.size() - 1; i >= 0; i--) {
DeleteUpdate du = startingUpdates.deleteList.get(i);
oldDeletes.put(new BytesRef(du.id), new LogPtr(-1, du.version));
}
// populate recent deleteByQuery commands
for (int i = startingUpdates.deleteByQueryList.size() - 1; i >= 0; i--) {
Update update = startingUpdates.deleteByQueryList.get(i);
List<Object> dbq = (List<Object>) update.log.lookup(update.pointer);
long version = (Long) dbq.get(1);
String q = (String) dbq.get(2);
trackDeleteByQuery(q, version);
}
}
// initialize metrics
core.getCoreMetricManager().registerMetricProducer(SolrInfoBean.Category.TLOG.toString(), this);
}
use of org.apache.solr.common.SolrException in project lucene-solr by apache.
the class DirectUpdateHandler2 method getQuery.
private Query getQuery(DeleteUpdateCommand cmd) {
Query q;
try {
// move this higher in the stack?
QParser parser = QParser.getParser(cmd.getQuery(), cmd.req);
q = parser.getQuery();
q = QueryUtils.makeQueryable(q);
// Make sure not to delete newer versions
if (ulog != null && cmd.getVersion() != 0 && cmd.getVersion() != -Long.MAX_VALUE) {
BooleanQuery.Builder bq = new BooleanQuery.Builder();
bq.add(q, Occur.MUST);
SchemaField sf = ulog.getVersionInfo().getVersionField();
ValueSource vs = sf.getType().getValueSource(sf, null);
ValueSourceRangeFilter filt = new ValueSourceRangeFilter(vs, Long.toString(Math.abs(cmd.getVersion())), null, true, true);
FunctionRangeQuery range = new FunctionRangeQuery(filt);
// formulated in the "MUST_NOT" sense so we can delete docs w/o a version (some tests depend on this...)
bq.add(range, Occur.MUST_NOT);
q = bq.build();
}
return q;
} catch (SyntaxError e) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, e);
}
}
Aggregations