use of org.apache.solr.update.DeleteUpdateCommand in project lucene-solr by apache.
the class DistributedUpdateProcessor method isLeader.
// internal helper method to tell if we are the leader for an add or deleteById update
boolean isLeader(UpdateCommand cmd) {
updateCommand = cmd;
if (zkEnabled) {
zkCheck();
if (cmd instanceof AddUpdateCommand) {
AddUpdateCommand acmd = (AddUpdateCommand) cmd;
nodes = setupRequest(acmd.getHashableId(), acmd.getSolrInputDocument());
} else if (cmd instanceof DeleteUpdateCommand) {
DeleteUpdateCommand dcmd = (DeleteUpdateCommand) cmd;
nodes = setupRequest(dcmd.getId(), null);
}
} else {
isLeader = getNonZkLeaderAssumption(req);
}
return isLeader;
}
use of org.apache.solr.update.DeleteUpdateCommand in project lucene-solr by apache.
the class DistributedUpdateProcessor method versionAdd.
/**
* @return whether or not to drop this cmd
* @throws IOException If there is a low-level I/O error.
*/
protected boolean versionAdd(AddUpdateCommand cmd) throws IOException {
BytesRef idBytes = cmd.getIndexedId();
if (idBytes == null) {
super.processAdd(cmd);
return false;
}
if (vinfo == null) {
if (AtomicUpdateDocumentMerger.isAtomicUpdate(cmd)) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Atomic document updates are not supported unless <updateLog/> is configured");
} else {
super.processAdd(cmd);
return false;
}
}
// This is only the hash for the bucket, and must be based only on the uniqueKey (i.e. do not use a pluggable hash here)
int bucketHash = Hash.murmurhash3_x86_32(idBytes.bytes, idBytes.offset, idBytes.length, 0);
// at this point, there is an update we need to try and apply.
// we may or may not be the leader.
// Find any existing version in the document
// TODO: don't reuse update commands any more!
long versionOnUpdate = cmd.getVersion();
if (versionOnUpdate == 0) {
SolrInputField versionField = cmd.getSolrInputDocument().getField(CommonParams.VERSION_FIELD);
if (versionField != null) {
Object o = versionField.getValue();
versionOnUpdate = o instanceof Number ? ((Number) o).longValue() : Long.parseLong(o.toString());
} else {
// Find the version
String versionOnUpdateS = req.getParams().get(CommonParams.VERSION_FIELD);
versionOnUpdate = versionOnUpdateS == null ? 0 : Long.parseLong(versionOnUpdateS);
}
}
boolean isReplayOrPeersync = (cmd.getFlags() & (UpdateCommand.REPLAY | UpdateCommand.PEER_SYNC)) != 0;
boolean leaderLogic = isLeader && !isReplayOrPeersync;
boolean forwardedFromCollection = cmd.getReq().getParams().get(DISTRIB_FROM_COLLECTION) != null;
VersionBucket bucket = vinfo.bucket(bucketHash);
long dependentVersionFound = -1;
// this update depends), before entering the synchronized block
if (!leaderLogic && cmd.isInPlaceUpdate()) {
dependentVersionFound = waitForDependentUpdates(cmd, versionOnUpdate, isReplayOrPeersync, bucket);
if (dependentVersionFound == -1) {
// it means the document has been deleted by now at the leader. drop this update
return true;
}
}
vinfo.lockForUpdate();
try {
synchronized (bucket) {
//just in case anyone is waiting let them know that we have a new update
bucket.notifyAll();
// we obtain the version when synchronized and then do the add so we can ensure that
// if version1 < version2 then version1 is actually added before version2.
// even if we don't store the version field, synchronizing on the bucket
// will enable us to know what version happened first, and thus enable
// realtime-get to work reliably.
// TODO: if versions aren't stored, do we need to set on the cmd anyway for some reason?
// there may be other reasons in the future for a version on the commands
boolean checkDeleteByQueries = false;
if (versionsStored) {
long bucketVersion = bucket.highest;
if (leaderLogic) {
if (forwardedFromCollection && ulog.getState() == UpdateLog.State.ACTIVE) {
// forwarded from a collection but we are not buffering so strip original version and apply our own
// see SOLR-5308
log.info("Removing version field from doc: " + cmd.getPrintableId());
cmd.solrDoc.remove(CommonParams.VERSION_FIELD);
versionOnUpdate = 0;
}
boolean updated = getUpdatedDocument(cmd, versionOnUpdate);
// leaders can also be in buffering state during "migrate" API call, see SOLR-5308
if (forwardedFromCollection && ulog.getState() != UpdateLog.State.ACTIVE && isReplayOrPeersync == false) {
// we're not in an active state, and this update isn't from a replay, so buffer it.
log.info("Leader logic applied but update log is buffering: " + cmd.getPrintableId());
cmd.setFlags(cmd.getFlags() | UpdateCommand.BUFFERING);
ulog.add(cmd);
return true;
}
if (versionOnUpdate != 0) {
Long lastVersion = vinfo.lookupVersion(cmd.getIndexedId());
long foundVersion = lastVersion == null ? -1 : lastVersion;
if (versionOnUpdate == foundVersion || (versionOnUpdate < 0 && foundVersion < 0) || (versionOnUpdate == 1 && foundVersion > 0)) {
// we're ok if versions match, or if both are negative (all missing docs are equal), or if cmd
// specified it must exist (versionOnUpdate==1) and it does.
} else {
throw new SolrException(ErrorCode.CONFLICT, "version conflict for " + cmd.getPrintableId() + " expected=" + versionOnUpdate + " actual=" + foundVersion);
}
}
long version = vinfo.getNewClock();
cmd.setVersion(version);
cmd.getSolrInputDocument().setField(CommonParams.VERSION_FIELD, version);
bucket.updateHighest(version);
} else {
// The leader forwarded us this update.
cmd.setVersion(versionOnUpdate);
if (ulog.getState() != UpdateLog.State.ACTIVE && isReplayOrPeersync == false) {
// we're not in an active state, and this update isn't from a replay, so buffer it.
cmd.setFlags(cmd.getFlags() | UpdateCommand.BUFFERING);
ulog.add(cmd);
return true;
}
if (cmd.isInPlaceUpdate()) {
long prev = cmd.prevVersion;
Long lastVersion = vinfo.lookupVersion(cmd.getIndexedId());
if (lastVersion == null || Math.abs(lastVersion) < prev) {
// this was checked for (in waitForDependentUpdates()) before entering the synchronized block.
// So we shouldn't be here, unless what must've happened is:
// by the time synchronization block was entered, the prev update was deleted by DBQ. Since
// now that update is not in index, the vinfo.lookupVersion() is possibly giving us a version
// from the deleted list (which might be older than the prev update!)
UpdateCommand fetchedFromLeader = fetchFullUpdateFromLeader(cmd, versionOnUpdate);
if (fetchedFromLeader instanceof DeleteUpdateCommand) {
log.info("In-place update of {} failed to find valid lastVersion to apply to, and the document" + " was deleted at the leader subsequently.", idBytes.utf8ToString());
versionDelete((DeleteUpdateCommand) fetchedFromLeader);
return true;
} else {
assert fetchedFromLeader instanceof AddUpdateCommand;
// Newer document was fetched from the leader. Apply that document instead of this current in-place update.
log.info("In-place update of {} failed to find valid lastVersion to apply to, forced to fetch full doc from leader: {}", idBytes.utf8ToString(), fetchedFromLeader);
// Make this update to become a non-inplace update containing the full document obtained from the leader
cmd.solrDoc = ((AddUpdateCommand) fetchedFromLeader).solrDoc;
cmd.prevVersion = -1;
cmd.setVersion((long) cmd.solrDoc.getFieldValue(CommonParams.VERSION_FIELD));
assert cmd.isInPlaceUpdate() == false;
}
} else {
if (lastVersion != null && Math.abs(lastVersion) > prev) {
// this means we got a newer full doc update and in that case it makes no sense to apply the older
// inplace update. Drop this update
log.info("Update was applied on version: " + prev + ", but last version I have is: " + lastVersion + ". Dropping current update.");
return true;
} else {
// We're good, we should apply this update. First, update the bucket's highest.
if (bucketVersion != 0 && bucketVersion < versionOnUpdate) {
bucket.updateHighest(versionOnUpdate);
}
}
}
}
if (!cmd.isInPlaceUpdate()) {
// if we aren't the leader, then we need to check that updates were not re-ordered
if (bucketVersion != 0 && bucketVersion < versionOnUpdate) {
// we're OK... this update has a version higher than anything we've seen
// in this bucket so far, so we know that no reordering has yet occurred.
bucket.updateHighest(versionOnUpdate);
} else {
// there have been updates higher than the current update. we need to check
// the specific version for this id.
Long lastVersion = vinfo.lookupVersion(cmd.getIndexedId());
if (lastVersion != null && Math.abs(lastVersion) >= versionOnUpdate) {
// This update is a repeat, or was reordered. We need to drop this update.
log.debug("Dropping add update due to version {}", idBytes.utf8ToString());
return true;
}
// also need to re-apply newer deleteByQuery commands
checkDeleteByQueries = true;
}
}
if (replicaType == Replica.Type.TLOG && (cmd.getFlags() & UpdateCommand.REPLAY) == 0) {
cmd.setFlags(cmd.getFlags() | UpdateCommand.IGNORE_INDEXWRITER);
}
}
}
boolean willDistrib = isLeader && nodes != null && nodes.size() > 0;
SolrInputDocument clonedDoc = null;
if (willDistrib && cloneRequiredOnLeader) {
clonedDoc = cmd.solrDoc.deepCopy();
}
// TODO: possibly set checkDeleteByQueries as a flag on the command?
doLocalAdd(cmd);
if (willDistrib && cloneRequiredOnLeader) {
cmd.solrDoc = clonedDoc;
}
}
// end synchronized (bucket)
} finally {
vinfo.unlockForUpdate();
}
return false;
}
use of org.apache.solr.update.DeleteUpdateCommand in project SearchServices by Alfresco.
the class SolrInformationServer method deleteByQuery.
private void deleteByQuery(String query) throws IOException {
SolrQueryRequest request = null;
UpdateRequestProcessor processor = null;
try {
request = getLocalSolrQueryRequest();
processor = this.core.getUpdateProcessingChain(null).createProcessor(request, new SolrQueryResponse());
DeleteUpdateCommand delDocCmd = new DeleteUpdateCommand(request);
delDocCmd.setQuery(query);
processor.processDelete(delDocCmd);
} finally {
if (processor != null) {
processor.finish();
}
if (request != null) {
request.close();
}
}
}
use of org.apache.solr.update.DeleteUpdateCommand in project SearchServices by Alfresco.
the class SolrInformationServer method deleteErrorNode.
private void deleteErrorNode(UpdateRequestProcessor processor, SolrQueryRequest request, Node node) throws IOException {
String errorDocId = PREFIX_ERROR + node.getId();
DeleteUpdateCommand delErrorDocCmd = new DeleteUpdateCommand(request);
delErrorDocCmd.setId(errorDocId);
processor.processDelete(delErrorDocCmd);
}
use of org.apache.solr.update.DeleteUpdateCommand in project SearchServices by Alfresco.
the class SolrInformationServer method indexNodes.
@Override
public void indexNodes(List<Node> nodes, boolean overwrite, boolean cascade) throws IOException, AuthenticationException, JSONException {
SolrQueryRequest request = null;
UpdateRequestProcessor processor = null;
try {
request = getLocalSolrQueryRequest();
processor = this.core.getUpdateProcessingChain(null).createProcessor(request, new SolrQueryResponse());
Map<Long, Node> nodeIdsToNodes = new HashMap<>();
EnumMap<SolrApiNodeStatus, List<Long>> nodeStatusToNodeIds = new EnumMap<SolrApiNodeStatus, List<Long>>(SolrApiNodeStatus.class);
categorizeNodes(nodes, nodeIdsToNodes, nodeStatusToNodeIds);
List<Long> deletedNodeIds = mapNullToEmptyList(nodeStatusToNodeIds.get(SolrApiNodeStatus.DELETED));
List<Long> shardDeletedNodeIds = mapNullToEmptyList(nodeStatusToNodeIds.get(SolrApiNodeStatus.NON_SHARD_DELETED));
List<Long> shardUpdatedNodeIds = mapNullToEmptyList(nodeStatusToNodeIds.get(SolrApiNodeStatus.NON_SHARD_UPDATED));
List<Long> unknownNodeIds = mapNullToEmptyList(nodeStatusToNodeIds.get(SolrApiNodeStatus.UNKNOWN));
List<Long> updatedNodeIds = mapNullToEmptyList(nodeStatusToNodeIds.get(SolrApiNodeStatus.UPDATED));
if (!deletedNodeIds.isEmpty() || !shardDeletedNodeIds.isEmpty() || !shardUpdatedNodeIds.isEmpty() || !unknownNodeIds.isEmpty()) {
// fix up any secondary paths
List<NodeMetaData> nodeMetaDatas = new ArrayList<>();
// For all deleted nodes, fake the node metadata
for (Long deletedNodeId : deletedNodeIds) {
Node node = nodeIdsToNodes.get(deletedNodeId);
NodeMetaData nodeMetaData = createDeletedNodeMetaData(node);
nodeMetaDatas.add(nodeMetaData);
}
if (!unknownNodeIds.isEmpty()) {
NodeMetaDataParameters nmdp = new NodeMetaDataParameters();
nmdp.setNodeIds(unknownNodeIds);
nodeMetaDatas.addAll(repositoryClient.getNodesMetaData(nmdp, Integer.MAX_VALUE));
}
for (NodeMetaData nodeMetaData : nodeMetaDatas) {
Node node = nodeIdsToNodes.get(nodeMetaData.getId());
if (nodeMetaData.getTxnId() > node.getTxnId()) {
// it will be indexed later
continue;
}
if (nodeMetaData != null) {
try {
// Lock the node to ensure that no other trackers work with this node until this code completes.
if (!spinLock(nodeMetaData.getId(), 120000)) {
// We haven't acquired the lock in over 2 minutes. This really shouldn't happen unless something has gone wrong.
throw new Exception("Unable to acquire lock on nodeId:" + nodeMetaData.getId());
}
solrContentStore.removeDocFromContentStore(nodeMetaData);
} finally {
unlock(nodeMetaData.getId());
}
}
}
if (log.isDebugEnabled()) {
log.debug(".. deleting");
}
DeleteUpdateCommand delDocCmd = new DeleteUpdateCommand(request);
String query = this.cloud.getQuery(FIELD_DBID, OR, deletedNodeIds, shardDeletedNodeIds, shardUpdatedNodeIds, unknownNodeIds);
delDocCmd.setQuery(query);
processor.processDelete(delDocCmd);
}
if (!updatedNodeIds.isEmpty() || !unknownNodeIds.isEmpty() || !shardUpdatedNodeIds.isEmpty()) {
log.info(".. updating");
NodeMetaDataParameters nmdp = new NodeMetaDataParameters();
List<Long> nodeIds = new LinkedList<>();
nodeIds.addAll(updatedNodeIds);
nodeIds.addAll(unknownNodeIds);
nodeIds.addAll(shardUpdatedNodeIds);
nmdp.setNodeIds(nodeIds);
// Fetches bulk metadata
List<NodeMetaData> nodeMetaDatas = repositoryClient.getNodesMetaData(nmdp, Integer.MAX_VALUE);
NEXT_NODE: for (NodeMetaData nodeMetaData : nodeMetaDatas) {
// System.out.println("####################### NodeMetaData:"+ nodeMetaData.getId());
long start = System.nanoTime();
Node node = nodeIdsToNodes.get(nodeMetaData.getId());
long nodeId = node.getId();
try {
// Lock the node to ensure that no other trackers work with this node until this code completes.
if (!spinLock(nodeId, 120000)) {
// We haven't acquired the lock in over 2 minutes. This really shouldn't happen unless something has gone wrong.
throw new Exception("Unable to acquire lock on nodeId:" + nodeId);
}
if (nodeMetaData.getTxnId() > node.getTxnId()) {
// it will be indexed later
continue;
}
if (nodeIdsToNodes.get(nodeMetaData.getId()).getStatus() == SolrApiNodeStatus.NON_SHARD_UPDATED) {
if (nodeMetaData.getProperties().get(ContentModel.PROP_CASCADE_TX) != null) {
indexNonShardCascade(nodeMetaData);
}
continue;
}
AddUpdateCommand addDocCmd = new AddUpdateCommand(request);
addDocCmd.overwrite = overwrite;
// check index control
Map<QName, PropertyValue> properties = nodeMetaData.getProperties();
StringPropertyValue pValue = (StringPropertyValue) properties.get(ContentModel.PROP_IS_INDEXED);
if (pValue != null) {
Boolean isIndexed = Boolean.valueOf(pValue.getValue());
if (!isIndexed.booleanValue()) {
if (log.isDebugEnabled()) {
log.debug(".. clearing unindexed");
}
deleteNode(processor, request, node);
SolrInputDocument doc = createNewDoc(nodeMetaData, DOC_TYPE_UNINDEXED_NODE);
addDocCmd.solrDoc = doc;
if (recordUnindexedNodes) {
solrContentStore.storeDocOnSolrContentStore(nodeMetaData, doc);
processor.processAdd(addDocCmd);
}
long end = System.nanoTime();
this.trackerStats.addNodeTime(end - start);
continue NEXT_NODE;
}
}
// Make sure any unindexed or error doc is removed.
if (log.isDebugEnabled()) {
log.debug(".. deleting node " + node.getId());
}
deleteNode(processor, request, node);
SolrInputDocument doc = createNewDoc(nodeMetaData, DOC_TYPE_NODE);
addToNewDocAndCache(nodeMetaData, doc);
addDocCmd.solrDoc = doc;
// System.out.println("###################### indexing doc:"+doc.toString());
processor.processAdd(addDocCmd);
long end = System.nanoTime();
this.trackerStats.addNodeTime(end - start);
} finally {
// release the lock on the node so other trackers can access the node.
unlock(nodeId);
}
}
// Ends iteration over nodeMetadatas
}
// Ends checking for the existence of updated or unknown node ids
} catch (Exception e) {
log.error("SolrInformationServer problem", e);
// Bulk version failed, so do one at a time.
for (Node node : nodes) {
this.indexNode(node, true);
}
} finally {
if (processor != null) {
processor.finish();
}
if (request != null) {
request.close();
}
}
}
Aggregations