use of org.apache.lucene.store.LockObtainFailedException in project lucene-solr by apache.
the class HdfsLockFactoryTest method testBasic.
@Test
public void testBasic() throws IOException {
String uri = HdfsTestUtil.getURI(dfsCluster);
Path lockPath = new Path(uri, "/basedir/lock");
Configuration conf = HdfsTestUtil.getClientConfiguration(dfsCluster);
HdfsDirectory dir = new HdfsDirectory(lockPath, conf);
try (Lock lock = dir.obtainLock("testlock")) {
assert lock != null;
try (Lock lock2 = dir.obtainLock("testlock")) {
assert lock2 != null;
fail("Locking should fail");
} catch (LockObtainFailedException lofe) {
// pass
}
}
// now repeat after close()
try (Lock lock = dir.obtainLock("testlock")) {
assert lock != null;
try (Lock lock2 = dir.obtainLock("testlock")) {
assert lock2 != null;
fail("Locking should fail");
} catch (LockObtainFailedException lofe) {
// pass
}
}
dir.close();
}
use of org.apache.lucene.store.LockObtainFailedException in project lucene-solr by apache.
the class HdfsLockFactory method obtainLock.
@Override
public Lock obtainLock(Directory dir, String lockName) throws IOException {
if (!(dir instanceof HdfsDirectory)) {
throw new UnsupportedOperationException("HdfsLockFactory can only be used with HdfsDirectory subclasses, got: " + dir);
}
final HdfsDirectory hdfsDir = (HdfsDirectory) dir;
final Configuration conf = hdfsDir.getConfiguration();
final Path lockPath = hdfsDir.getHdfsDirPath();
final Path lockFile = new Path(lockPath, lockName);
FSDataOutputStream file = null;
final FileSystem fs = FileSystem.get(lockPath.toUri(), conf);
while (true) {
try {
if (!fs.exists(lockPath)) {
boolean success = fs.mkdirs(lockPath);
if (!success) {
throw new RuntimeException("Could not create directory: " + lockPath);
}
} else {
// just to check for safe mode
fs.mkdirs(lockPath);
}
file = fs.create(lockFile, false);
break;
} catch (FileAlreadyExistsException e) {
throw new LockObtainFailedException("Cannot obtain lock file: " + lockFile, e);
} catch (RemoteException e) {
if (e.getClassName().equals("org.apache.hadoop.hdfs.server.namenode.SafeModeException")) {
log.warn("The NameNode is in SafeMode - Solr will wait 5 seconds and try again.");
try {
Thread.sleep(5000);
} catch (InterruptedException e1) {
Thread.interrupted();
}
continue;
}
throw new LockObtainFailedException("Cannot obtain lock file: " + lockFile, e);
} catch (IOException e) {
throw new LockObtainFailedException("Cannot obtain lock file: " + lockFile, e);
} finally {
IOUtils.closeQuietly(file);
}
}
return new HdfsLock(conf, lockFile);
}
use of org.apache.lucene.store.LockObtainFailedException in project bigbluebutton by bigbluebutton.
the class Index method startIndex.
public void startIndex(String uid) {
try {
IndexReader.unlock(FSDirectory.getDirectory(ConfigHandler.indexPath));
if (logger.isInfoEnabled()) {
logger.info("index file path " + ConfigHandler.indexPath);
}
reader = IndexReader.open(ConfigHandler.indexPath);
TermEnum uidIter = reader.terms(new Term("uid"));
while (uidIter.term() != null) {
if (uid.equalsIgnoreCase(uidIter.term().text())) {
reader.deleteDocuments(uidIter.term());
}
uidIter.next();
}
reader.close();
} catch (CorruptIndexException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (LockObtainFailedException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
try {
writer = new IndexWriter(ConfigHandler.indexPath, new StandardAnalyzer(), new IndexWriter.MaxFieldLength(1000000));
} catch (CorruptIndexException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (LockObtainFailedException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
use of org.apache.lucene.store.LockObtainFailedException in project elasticsearch by elastic.
the class IndicesService method deleteIndexStoreIfDeletionAllowed.
private void deleteIndexStoreIfDeletionAllowed(final String reason, final Index index, final IndexSettings indexSettings, final IndexDeletionAllowedPredicate predicate) throws IOException {
boolean success = false;
try {
// we are trying to delete the index store here - not a big deal if the lock can't be obtained
// the store metadata gets wiped anyway even without the lock this is just best effort since
// every shards deletes its content under the shard lock it owns.
logger.debug("{} deleting index store reason [{}]", index, reason);
if (predicate.apply(index, indexSettings)) {
// its safe to delete all index metadata and shard data
nodeEnv.deleteIndexDirectorySafe(index, 0, indexSettings);
}
success = true;
} catch (LockObtainFailedException ex) {
logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} failed to delete index store - at least one shards is still locked", index), ex);
} catch (Exception ex) {
logger.warn((Supplier<?>) () -> new ParameterizedMessage("{} failed to delete index", index), ex);
} finally {
if (success == false) {
addPendingDelete(index, indexSettings);
}
// this is a pure protection to make sure this index doesn't get re-imported as a dangling index.
// we should in the future rather write a tombstone rather than wiping the metadata.
MetaDataStateFormat.deleteMetaState(nodeEnv.indexPaths(index));
}
}
use of org.apache.lucene.store.LockObtainFailedException in project elasticsearch by elastic.
the class IndicesClusterStateService method deleteIndices.
/**
* Deletes indices (with shard data).
*
* @param event cluster change event
*/
private void deleteIndices(final ClusterChangedEvent event) {
final ClusterState previousState = event.previousState();
final ClusterState state = event.state();
final String localNodeId = state.nodes().getLocalNodeId();
assert localNodeId != null;
for (Index index : event.indicesDeleted()) {
if (logger.isDebugEnabled()) {
logger.debug("[{}] cleaning index, no longer part of the metadata", index);
}
AllocatedIndex<? extends Shard> indexService = indicesService.indexService(index);
final IndexSettings indexSettings;
if (indexService != null) {
indexSettings = indexService.getIndexSettings();
indicesService.removeIndex(index, DELETED, "index no longer part of the metadata");
} else if (previousState.metaData().hasIndex(index.getName())) {
// The deleted index was part of the previous cluster state, but not loaded on the local node
final IndexMetaData metaData = previousState.metaData().index(index);
indexSettings = new IndexSettings(metaData, settings);
indicesService.deleteUnassignedIndex("deleted index was not assigned to local node", metaData, state);
} else {
// asserting that the previous cluster state is not initialized/recovered.
assert previousState.blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK);
final IndexMetaData metaData = indicesService.verifyIndexIsDeleted(index, event.state());
if (metaData != null) {
indexSettings = new IndexSettings(metaData, settings);
} else {
indexSettings = null;
}
}
if (indexSettings != null) {
threadPool.generic().execute(new AbstractRunnable() {
@Override
public void onFailure(Exception e) {
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to complete pending deletion for index", index), e);
}
@Override
protected void doRun() throws Exception {
try {
// we are waiting until we can lock the index / all shards on the node and then we ack the delete of the store
// to the master. If we can't acquire the locks here immediately there might be a shard of this index still
// holding on to the lock due to a "currently canceled recovery" or so. The shard will delete itself BEFORE the
// lock is released so it's guaranteed to be deleted by the time we get the lock
indicesService.processPendingDeletes(index, indexSettings, new TimeValue(30, TimeUnit.MINUTES));
} catch (LockObtainFailedException exc) {
logger.warn("[{}] failed to lock all shards for index - timed out after 30 seconds", index);
} catch (InterruptedException e) {
logger.warn("[{}] failed to lock all shards for index - interrupted", index);
}
}
});
}
}
}
Aggregations