Search in sources :

Example 11 with CloudDescriptor

use of org.apache.solr.cloud.CloudDescriptor in project lucene-solr by apache.

the class RealTimeGetComponent method process.

@Override
public void process(ResponseBuilder rb) throws IOException {
    SolrQueryRequest req = rb.req;
    SolrQueryResponse rsp = rb.rsp;
    SolrParams params = req.getParams();
    CloudDescriptor cloudDesc = req.getCore().getCoreDescriptor().getCloudDescriptor();
    if (cloudDesc != null) {
        Replica.Type replicaType = cloudDesc.getReplicaType();
        if (replicaType != null) {
            if (replicaType == Replica.Type.PULL) {
                throw new SolrException(ErrorCode.BAD_REQUEST, String.format(Locale.ROOT, "%s can't handle realtime get requests. Replicas of type %s do not support these type of requests", cloudDesc.getCoreNodeName(), Replica.Type.PULL));
            }
        // non-leader TLOG replicas should not respond to distrib /get requests, but internal requests are OK
        }
    }
    if (!params.getBool(COMPONENT_NAME, true)) {
        return;
    }
    // This seems rather kludgey, may there is better way to indicate
    // that replica can support handling version ranges
    String val = params.get("checkCanHandleVersionRanges");
    if (val != null) {
        rb.rsp.add("canHandleVersionRanges", true);
        return;
    }
    val = params.get("getFingerprint");
    if (val != null) {
        processGetFingeprint(rb);
        return;
    }
    val = params.get("getVersions");
    if (val != null) {
        processGetVersions(rb);
        return;
    }
    val = params.get("getUpdates");
    if (val != null) {
        // solrcloud_debug
        if (log.isDebugEnabled()) {
            try {
                RefCounted<SolrIndexSearcher> searchHolder = req.getCore().getNewestSearcher(false);
                SolrIndexSearcher searcher = searchHolder.get();
                try {
                    log.debug(req.getCore().getCoreContainer().getZkController().getNodeName() + " min count to sync to (from most recent searcher view) " + searcher.search(new MatchAllDocsQuery(), 1).totalHits);
                } finally {
                    searchHolder.decref();
                }
            } catch (Exception e) {
                log.debug("Error in solrcloud_debug block", e);
            }
        }
        processGetUpdates(rb);
        return;
    }
    val = params.get("getInputDocument");
    if (val != null) {
        processGetInputDocument(rb);
        return;
    }
    final IdsRequsted reqIds = IdsRequsted.parseParams(req);
    if (reqIds.allIds.isEmpty()) {
        return;
    }
    // parse any existing filters
    try {
        String[] fqs = req.getParams().getParams(CommonParams.FQ);
        if (fqs != null && fqs.length != 0) {
            List<Query> filters = rb.getFilters();
            // if filters already exists, make a copy instead of modifying the original
            filters = filters == null ? new ArrayList<Query>(fqs.length) : new ArrayList<>(filters);
            for (String fq : fqs) {
                if (fq != null && fq.trim().length() != 0) {
                    QParser fqp = QParser.getParser(fq, req);
                    filters.add(fqp.getQuery());
                }
            }
            if (!filters.isEmpty()) {
                rb.setFilters(filters);
            }
        }
    } catch (SyntaxError e) {
        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, e);
    }
    final SolrCore core = req.getCore();
    SchemaField idField = core.getLatestSchema().getUniqueKeyField();
    FieldType fieldType = idField.getType();
    SolrDocumentList docList = new SolrDocumentList();
    UpdateLog ulog = core.getUpdateHandler().getUpdateLog();
    SearcherInfo searcherInfo = new SearcherInfo(core);
    // this is initialized & set on the context *after* any searcher (re-)opening
    ResultContext resultContext = null;
    final DocTransformer transformer = rsp.getReturnFields().getTransformer();
    // true in any situation where we have to use a realtime searcher rather then returning docs
    // directly from the UpdateLog
    final boolean mustUseRealtimeSearcher = // if we have filters, we need to check those against the indexed form of the doc
    (rb.getFilters() != null) || ((null != transformer) && transformer.needsSolrIndexSearcher());
    try {
        BytesRefBuilder idBytes = new BytesRefBuilder();
        for (String idStr : reqIds.allIds) {
            fieldType.readableToIndexed(idStr, idBytes);
            if (ulog != null) {
                Object o = ulog.lookup(idBytes.get());
                if (o != null) {
                    // should currently be a List<Oper,Ver,Doc/Id>
                    List entry = (List) o;
                    assert entry.size() >= 3;
                    int oper = (Integer) entry.get(UpdateLog.FLAGS_IDX) & UpdateLog.OPERATION_MASK;
                    switch(oper) {
                        // fall through to ADD
                        case UpdateLog.UPDATE_INPLACE:
                        case UpdateLog.ADD:
                            if (mustUseRealtimeSearcher) {
                                // close handles to current searchers & result context
                                searcherInfo.clear();
                                resultContext = null;
                                // force open a new realtime searcher
                                ulog.openRealtimeSearcher();
                                // pretend we never found this record and fall through to use the searcher
                                o = null;
                                break;
                            }
                            SolrDocument doc;
                            if (oper == UpdateLog.ADD) {
                                doc = toSolrDoc((SolrInputDocument) entry.get(entry.size() - 1), core.getLatestSchema());
                            } else if (oper == UpdateLog.UPDATE_INPLACE) {
                                assert entry.size() == 5;
                                // For in-place update case, we have obtained the partial document till now. We need to
                                // resolve it to a full document to be returned to the user.
                                doc = resolveFullDocument(core, idBytes.get(), rsp.getReturnFields(), (SolrInputDocument) entry.get(entry.size() - 1), entry, null);
                                if (doc == null) {
                                    // document has been deleted as the resolve was going on
                                    break;
                                }
                            } else {
                                throw new SolrException(ErrorCode.INVALID_STATE, "Expected ADD or UPDATE_INPLACE. Got: " + oper);
                            }
                            if (transformer != null) {
                                // unknown docID
                                transformer.transform(doc, -1, 0);
                            }
                            docList.add(doc);
                            break;
                        case UpdateLog.DELETE:
                            break;
                        default:
                            throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unknown Operation! " + oper);
                    }
                    if (o != null)
                        continue;
                }
            }
            // didn't find it in the update log, so it should be in the newest searcher opened
            searcherInfo.init();
            // don't bother with ResultContext yet, we won't need it if doc doesn't match filters
            int docid = -1;
            long segAndId = searcherInfo.getSearcher().lookupId(idBytes.get());
            if (segAndId >= 0) {
                int segid = (int) segAndId;
                LeafReaderContext ctx = searcherInfo.getSearcher().getTopReaderContext().leaves().get((int) (segAndId >> 32));
                docid = segid + ctx.docBase;
                if (rb.getFilters() != null) {
                    for (Query raw : rb.getFilters()) {
                        Query q = raw.rewrite(searcherInfo.getSearcher().getIndexReader());
                        Scorer scorer = searcherInfo.getSearcher().createWeight(q, false, 1f).scorer(ctx);
                        if (scorer == null || segid != scorer.iterator().advance(segid)) {
                            // filter doesn't match.
                            docid = -1;
                            break;
                        }
                    }
                }
            }
            if (docid < 0)
                continue;
            Document luceneDocument = searcherInfo.getSearcher().doc(docid, rsp.getReturnFields().getLuceneFieldNames());
            SolrDocument doc = toSolrDoc(luceneDocument, core.getLatestSchema());
            SolrDocumentFetcher docFetcher = searcherInfo.getSearcher().getDocFetcher();
            docFetcher.decorateDocValueFields(doc, docid, docFetcher.getNonStoredDVs(true));
            if (null != transformer) {
                if (null == resultContext) {
                    // either first pass, or we've re-opened searcher - either way now we setContext
                    resultContext = new RTGResultContext(rsp.getReturnFields(), searcherInfo.getSearcher(), req);
                    transformer.setContext(resultContext);
                }
                transformer.transform(doc, docid, 0);
            }
            docList.add(doc);
        }
    } finally {
        searcherInfo.clear();
    }
    addDocListToResponse(rb, docList);
}
Also used : ResultContext(org.apache.solr.response.ResultContext) Query(org.apache.lucene.search.Query) MatchAllDocsQuery(org.apache.lucene.search.MatchAllDocsQuery) SolrCore(org.apache.solr.core.SolrCore) DocTransformer(org.apache.solr.response.transform.DocTransformer) ArrayList(java.util.ArrayList) Scorer(org.apache.lucene.search.Scorer) Document(org.apache.lucene.document.Document) SolrInputDocument(org.apache.solr.common.SolrInputDocument) SolrDocument(org.apache.solr.common.SolrDocument) SolrInputDocument(org.apache.solr.common.SolrInputDocument) SolrDocument(org.apache.solr.common.SolrDocument) SyntaxError(org.apache.solr.search.SyntaxError) UpdateLog(org.apache.solr.update.UpdateLog) LeafReaderContext(org.apache.lucene.index.LeafReaderContext) SolrDocumentList(org.apache.solr.common.SolrDocumentList) DocList(org.apache.solr.search.DocList) List(java.util.List) ArrayList(java.util.ArrayList) NamedList(org.apache.solr.common.util.NamedList) SolrException(org.apache.solr.common.SolrException) SolrQueryResponse(org.apache.solr.response.SolrQueryResponse) BytesRefBuilder(org.apache.lucene.util.BytesRefBuilder) SolrIndexSearcher(org.apache.solr.search.SolrIndexSearcher) SolrDocumentList(org.apache.solr.common.SolrDocumentList) MatchAllDocsQuery(org.apache.lucene.search.MatchAllDocsQuery) Replica(org.apache.solr.common.cloud.Replica) CloudDescriptor(org.apache.solr.cloud.CloudDescriptor) SolrException(org.apache.solr.common.SolrException) IOException(java.io.IOException) IndexFingerprint(org.apache.solr.update.IndexFingerprint) FieldType(org.apache.solr.schema.FieldType) SchemaField(org.apache.solr.schema.SchemaField) SolrQueryRequest(org.apache.solr.request.SolrQueryRequest) QParser(org.apache.solr.search.QParser) SolrParams(org.apache.solr.common.params.SolrParams) ModifiableSolrParams(org.apache.solr.common.params.ModifiableSolrParams) SolrDocumentFetcher(org.apache.solr.search.SolrDocumentFetcher)

Example 12 with CloudDescriptor

use of org.apache.solr.cloud.CloudDescriptor in project lucene-solr by apache.

the class RealTimeGetComponent method createSubRequests.

public int createSubRequests(ResponseBuilder rb) throws IOException {
    final IdsRequsted reqIds = IdsRequsted.parseParams(rb.req);
    if (reqIds.allIds.isEmpty()) {
        return ResponseBuilder.STAGE_DONE;
    }
    SolrParams params = rb.req.getParams();
    // TODO: handle collection=...?
    ZkController zkController = rb.req.getCore().getCoreContainer().getZkController();
    // if shards=... then use that
    if (zkController != null && params.get(ShardParams.SHARDS) == null) {
        CloudDescriptor cloudDescriptor = rb.req.getCore().getCoreDescriptor().getCloudDescriptor();
        String collection = cloudDescriptor.getCollectionName();
        ClusterState clusterState = zkController.getClusterState();
        DocCollection coll = clusterState.getCollection(collection);
        Map<String, List<String>> sliceToId = new HashMap<>();
        for (String id : reqIds.allIds) {
            Slice slice = coll.getRouter().getTargetSlice(id, null, null, params, coll);
            List<String> idsForShard = sliceToId.get(slice.getName());
            if (idsForShard == null) {
                idsForShard = new ArrayList<>(2);
                sliceToId.put(slice.getName(), idsForShard);
            }
            idsForShard.add(id);
        }
        for (Map.Entry<String, List<String>> entry : sliceToId.entrySet()) {
            String shard = entry.getKey();
            ShardRequest sreq = createShardRequest(rb, entry.getValue());
            // sreq.shards = new String[]{shard};    // TODO: would be nice if this would work...
            sreq.shards = sliceToShards(rb, collection, shard);
            sreq.actualShards = sreq.shards;
            rb.addRequest(this, sreq);
        }
    } else {
        ShardRequest sreq = createShardRequest(rb, reqIds.allIds);
        // ALL
        sreq.shards = null;
        sreq.actualShards = sreq.shards;
        rb.addRequest(this, sreq);
    }
    return ResponseBuilder.STAGE_DONE;
}
Also used : ClusterState(org.apache.solr.common.cloud.ClusterState) HashMap(java.util.HashMap) CloudDescriptor(org.apache.solr.cloud.CloudDescriptor) ZkController(org.apache.solr.cloud.ZkController) Slice(org.apache.solr.common.cloud.Slice) SolrParams(org.apache.solr.common.params.SolrParams) ModifiableSolrParams(org.apache.solr.common.params.ModifiableSolrParams) SolrDocumentList(org.apache.solr.common.SolrDocumentList) DocList(org.apache.solr.search.DocList) List(java.util.List) ArrayList(java.util.ArrayList) NamedList(org.apache.solr.common.util.NamedList) DocCollection(org.apache.solr.common.cloud.DocCollection) Map(java.util.Map) HashMap(java.util.HashMap)

Example 13 with CloudDescriptor

use of org.apache.solr.cloud.CloudDescriptor in project lucene-solr by apache.

the class CoreSorter method init.

public CoreSorter init(CoreContainer cc) {
    this.cc = cc;
    if (cc == null || !cc.isZooKeeperAware()) {
        return this;
    }
    String myNodeName = getNodeName();
    ClusterState state = cc.getZkController().getClusterState();
    for (CloudDescriptor cloudDescriptor : getCloudDescriptors()) {
        String coll = cloudDescriptor.getCollectionName();
        String sliceName = getShardName(cloudDescriptor);
        if (shardsVsReplicaCounts.containsKey(sliceName))
            continue;
        CountsForEachShard c = new CountsForEachShard(0, 0, 0);
        for (Replica replica : getReplicas(state, coll, cloudDescriptor.getShardId())) {
            if (replica.getNodeName().equals(myNodeName)) {
                c.myReplicas++;
            } else {
                Set<String> liveNodes = state.getLiveNodes();
                if (liveNodes.contains(replica.getNodeName())) {
                    c.totalReplicasInLiveNodes++;
                } else {
                    c.totalReplicasInDownNodes++;
                }
            }
        }
        shardsVsReplicaCounts.put(sliceName, c);
    }
    return this;
}
Also used : ClusterState(org.apache.solr.common.cloud.ClusterState) Replica(org.apache.solr.common.cloud.Replica) CloudDescriptor(org.apache.solr.cloud.CloudDescriptor)

Example 14 with CloudDescriptor

use of org.apache.solr.cloud.CloudDescriptor in project lucene-solr by apache.

the class IndexFetcher method fetchLatestIndex.

/**
   * This command downloads all the necessary files from master to install a index commit point. Only changed files are
   * downloaded. It also downloads the conf files (if they are modified).
   *
   * @param forceReplication force a replication in all cases
   * @param forceCoreReload force a core reload in all cases
   * @return true on success, false if slave is already in sync
   * @throws IOException if an exception occurs
   */
IndexFetchResult fetchLatestIndex(boolean forceReplication, boolean forceCoreReload) throws IOException, InterruptedException {
    boolean cleanupDone = false;
    boolean successfulInstall = false;
    markReplicationStart();
    Directory tmpIndexDir = null;
    String tmpIndex;
    Directory indexDir = null;
    String indexDirPath;
    boolean deleteTmpIdxDir = true;
    File tmpTlogDir = null;
    if (!solrCore.getSolrCoreState().getLastReplicateIndexSuccess()) {
        // if the last replication was not a success, we force a full replication
        // when we are a bit more confident we may want to try a partial replication
        // if the error is connection related or something, but we have to be careful
        forceReplication = true;
        LOG.info("Last replication failed, so I'll force replication");
    }
    try {
        if (fetchFromLeader) {
            assert !solrCore.isClosed() : "Replication should be stopped before closing the core";
            Replica replica = getLeaderReplica();
            CloudDescriptor cd = solrCore.getCoreDescriptor().getCloudDescriptor();
            if (cd.getCoreNodeName().equals(replica.getName())) {
                return IndexFetchResult.EXPECTING_NON_LEADER;
            }
            if (replica.getState() != Replica.State.ACTIVE) {
                LOG.info("Replica {} is leader but it's state is {}, skipping replication", replica.getName(), replica.getState());
                return IndexFetchResult.LEADER_IS_NOT_ACTIVE;
            }
            if (!solrCore.getCoreContainer().getZkController().getClusterState().liveNodesContain(replica.getNodeName())) {
                LOG.info("Replica {} is leader but it's not hosted on a live node, skipping replication", replica.getName());
                return IndexFetchResult.LEADER_IS_NOT_ACTIVE;
            }
            if (!replica.getCoreUrl().equals(masterUrl)) {
                masterUrl = replica.getCoreUrl();
                LOG.info("Updated masterUrl to {}", masterUrl);
            // TODO: Do we need to set forceReplication = true?
            } else {
                LOG.debug("masterUrl didn't change");
            }
        }
        //get the current 'replicateable' index version in the master
        NamedList response;
        try {
            response = getLatestVersion();
        } catch (Exception e) {
            final String errorMsg = e.toString();
            if (!Strings.isNullOrEmpty(errorMsg) && errorMsg.contains(INTERRUPT_RESPONSE_MESSAGE)) {
                LOG.warn("Master at: " + masterUrl + " is not available. Index fetch failed by interrupt. Exception: " + errorMsg);
                return new IndexFetchResult(IndexFetchResult.FAILED_BY_INTERRUPT_MESSAGE, false, e);
            } else {
                LOG.warn("Master at: " + masterUrl + " is not available. Index fetch failed by exception: " + errorMsg);
                return new IndexFetchResult(IndexFetchResult.FAILED_BY_EXCEPTION_MESSAGE, false, e);
            }
        }
        long latestVersion = (Long) response.get(CMD_INDEX_VERSION);
        long latestGeneration = (Long) response.get(GENERATION);
        LOG.info("Master's generation: " + latestGeneration);
        LOG.info("Master's version: " + latestVersion);
        // TODO: make sure that getLatestCommit only returns commit points for the main index (i.e. no side-car indexes)
        IndexCommit commit = solrCore.getDeletionPolicy().getLatestCommit();
        if (commit == null) {
            // Presumably the IndexWriter hasn't been opened yet, and hence the deletion policy hasn't been updated with commit points
            RefCounted<SolrIndexSearcher> searcherRefCounted = null;
            try {
                searcherRefCounted = solrCore.getNewestSearcher(false);
                if (searcherRefCounted == null) {
                    LOG.warn("No open searcher found - fetch aborted");
                    return IndexFetchResult.NO_INDEX_COMMIT_EXIST;
                }
                commit = searcherRefCounted.get().getIndexReader().getIndexCommit();
            } finally {
                if (searcherRefCounted != null)
                    searcherRefCounted.decref();
            }
        }
        LOG.info("Slave's generation: " + commit.getGeneration());
        LOG.info("Slave's version: " + IndexDeletionPolicyWrapper.getCommitTimestamp(commit));
        if (latestVersion == 0L) {
            if (forceReplication && commit.getGeneration() != 0) {
                // since we won't get the files for an empty index,
                // we just clear ours and commit
                LOG.info("New index in Master. Deleting mine...");
                RefCounted<IndexWriter> iw = solrCore.getUpdateHandler().getSolrCoreState().getIndexWriter(solrCore);
                try {
                    iw.get().deleteAll();
                } finally {
                    iw.decref();
                }
                SolrQueryRequest req = new LocalSolrQueryRequest(solrCore, new ModifiableSolrParams());
                solrCore.getUpdateHandler().commit(new CommitUpdateCommand(req, false));
            }
            //there is nothing to be replicated
            successfulInstall = true;
            LOG.debug("Nothing to replicate, master's version is 0");
            return IndexFetchResult.MASTER_VERSION_ZERO;
        }
        // TODO: Should we be comparing timestamps (across machines) here?
        if (!forceReplication && IndexDeletionPolicyWrapper.getCommitTimestamp(commit) == latestVersion) {
            //master and slave are already in sync just return
            LOG.info("Slave in sync with master.");
            successfulInstall = true;
            return IndexFetchResult.ALREADY_IN_SYNC;
        }
        LOG.info("Starting replication process");
        // get the list of files first
        fetchFileList(latestGeneration);
        // this can happen if the commit point is deleted before we fetch the file list.
        if (filesToDownload.isEmpty()) {
            return IndexFetchResult.PEER_INDEX_COMMIT_DELETED;
        }
        LOG.info("Number of files in latest index in master: " + filesToDownload.size());
        if (tlogFilesToDownload != null) {
            LOG.info("Number of tlog files in master: " + tlogFilesToDownload.size());
        }
        // Create the sync service
        fsyncService = ExecutorUtil.newMDCAwareSingleThreadExecutor(new DefaultSolrThreadFactory("fsyncService"));
        // use a synchronized list because the list is read by other threads (to show details)
        filesDownloaded = Collections.synchronizedList(new ArrayList<Map<String, Object>>());
        // if the generation of master is older than that of the slave , it means they are not compatible to be copied
        // then a new index directory to be created and all the files need to be copied
        boolean isFullCopyNeeded = IndexDeletionPolicyWrapper.getCommitTimestamp(commit) >= latestVersion || commit.getGeneration() >= latestGeneration || forceReplication;
        String timestamp = new SimpleDateFormat(SnapShooter.DATE_FMT, Locale.ROOT).format(new Date());
        String tmpIdxDirName = "index." + timestamp;
        tmpIndex = solrCore.getDataDir() + tmpIdxDirName;
        tmpIndexDir = solrCore.getDirectoryFactory().get(tmpIndex, DirContext.DEFAULT, solrCore.getSolrConfig().indexConfig.lockType);
        // tmp dir for tlog files
        if (tlogFilesToDownload != null) {
            tmpTlogDir = new File(solrCore.getUpdateHandler().getUpdateLog().getLogDir(), "tlog." + timestamp);
        }
        // cindex dir...
        indexDirPath = solrCore.getIndexDir();
        indexDir = solrCore.getDirectoryFactory().get(indexDirPath, DirContext.DEFAULT, solrCore.getSolrConfig().indexConfig.lockType);
        try {
            //in the metadata. If there is a mismatch for the same index file then we download the entire index again.
            if (!isFullCopyNeeded && isIndexStale(indexDir)) {
                isFullCopyNeeded = true;
            }
            if (!isFullCopyNeeded && !fetchFromLeader) {
                // and wait until we are able to clean up all unused lucene files
                if (solrCore.getCoreContainer().isZooKeeperAware()) {
                    solrCore.closeSearcher();
                }
                // rollback and reopen index writer and wait until all unused files
                // are successfully deleted
                solrCore.getUpdateHandler().newIndexWriter(true);
                RefCounted<IndexWriter> writer = solrCore.getUpdateHandler().getSolrCoreState().getIndexWriter(null);
                try {
                    IndexWriter indexWriter = writer.get();
                    int c = 0;
                    indexWriter.deleteUnusedFiles();
                    while (hasUnusedFiles(indexDir, commit)) {
                        indexWriter.deleteUnusedFiles();
                        LOG.info("Sleeping for 1000ms to wait for unused lucene index files to be delete-able");
                        Thread.sleep(1000);
                        c++;
                        if (c >= 30) {
                            LOG.warn("IndexFetcher unable to cleanup unused lucene index files so we must do a full copy instead");
                            isFullCopyNeeded = true;
                            break;
                        }
                    }
                    if (c > 0) {
                        LOG.info("IndexFetcher slept for " + (c * 1000) + "ms for unused lucene index files to be delete-able");
                    }
                } finally {
                    writer.decref();
                }
            }
            boolean reloadCore = false;
            try {
                // we have to be careful and do this after we know isFullCopyNeeded won't be flipped
                if (!isFullCopyNeeded) {
                    solrCore.getUpdateHandler().getSolrCoreState().closeIndexWriter(solrCore, true);
                }
                LOG.info("Starting download (fullCopy={}) to {}", isFullCopyNeeded, tmpIndexDir);
                successfulInstall = false;
                long bytesDownloaded = downloadIndexFiles(isFullCopyNeeded, indexDir, tmpIndexDir, latestGeneration);
                if (tlogFilesToDownload != null) {
                    bytesDownloaded += downloadTlogFiles(tmpTlogDir, latestGeneration);
                    // reload update log
                    reloadCore = true;
                }
                final long timeTakenSeconds = getReplicationTimeElapsed();
                final Long bytesDownloadedPerSecond = (timeTakenSeconds != 0 ? new Long(bytesDownloaded / timeTakenSeconds) : null);
                LOG.info("Total time taken for download (fullCopy={},bytesDownloaded={}) : {} secs ({} bytes/sec) to {}", isFullCopyNeeded, bytesDownloaded, timeTakenSeconds, bytesDownloadedPerSecond, tmpIndexDir);
                Collection<Map<String, Object>> modifiedConfFiles = getModifiedConfFiles(confFilesToDownload);
                if (!modifiedConfFiles.isEmpty()) {
                    reloadCore = true;
                    downloadConfFiles(confFilesToDownload, latestGeneration);
                    if (isFullCopyNeeded) {
                        successfulInstall = solrCore.modifyIndexProps(tmpIdxDirName);
                        if (successfulInstall)
                            deleteTmpIdxDir = false;
                    } else {
                        successfulInstall = moveIndexFiles(tmpIndexDir, indexDir);
                    }
                    if (tlogFilesToDownload != null) {
                        // move tlog files and refresh ulog only if we successfully installed a new index
                        successfulInstall &= moveTlogFiles(tmpTlogDir);
                    }
                    if (successfulInstall) {
                        if (isFullCopyNeeded) {
                            // may be closed
                            if (indexDir != null) {
                                solrCore.getDirectoryFactory().doneWithDirectory(indexDir);
                                // Cleanup all index files not associated with any *named* snapshot.
                                solrCore.deleteNonSnapshotIndexFiles(indexDirPath);
                            }
                        }
                        LOG.info("Configuration files are modified, core will be reloaded");
                        logReplicationTimeAndConfFiles(modifiedConfFiles, // write to a file time of replication and
                        successfulInstall);
                    // conf files.
                    }
                } else {
                    terminateAndWaitFsyncService();
                    if (isFullCopyNeeded) {
                        successfulInstall = solrCore.modifyIndexProps(tmpIdxDirName);
                        if (successfulInstall)
                            deleteTmpIdxDir = false;
                    } else {
                        successfulInstall = moveIndexFiles(tmpIndexDir, indexDir);
                    }
                    if (tlogFilesToDownload != null) {
                        // move tlog files and refresh ulog only if we successfully installed a new index
                        successfulInstall &= moveTlogFiles(tmpTlogDir);
                    }
                    if (successfulInstall) {
                        logReplicationTimeAndConfFiles(modifiedConfFiles, successfulInstall);
                    }
                }
            } finally {
                if (!isFullCopyNeeded) {
                    solrCore.getUpdateHandler().getSolrCoreState().openIndexWriter(solrCore);
                }
            }
            // we must reload the core after we open the IW back up
            if (successfulInstall && (reloadCore || forceCoreReload)) {
                LOG.info("Reloading SolrCore {}", solrCore.getName());
                reloadCore();
            }
            if (successfulInstall) {
                if (isFullCopyNeeded) {
                    // may be closed
                    if (indexDir != null) {
                        LOG.info("removing old index directory " + indexDir);
                        solrCore.getDirectoryFactory().doneWithDirectory(indexDir);
                        solrCore.getDirectoryFactory().remove(indexDir);
                    }
                }
                if (isFullCopyNeeded) {
                    solrCore.getUpdateHandler().newIndexWriter(isFullCopyNeeded);
                }
                openNewSearcherAndUpdateCommitPoint();
            }
            if (!isFullCopyNeeded && !forceReplication && !successfulInstall) {
                cleanup(solrCore, tmpIndexDir, indexDir, deleteTmpIdxDir, tmpTlogDir, successfulInstall);
                cleanupDone = true;
                // we try with a full copy of the index
                LOG.warn("Replication attempt was not successful - trying a full index replication reloadCore={}", reloadCore);
                successfulInstall = fetchLatestIndex(true, reloadCore).getSuccessful();
            }
            markReplicationStop();
            return successfulInstall ? IndexFetchResult.INDEX_FETCH_SUCCESS : IndexFetchResult.INDEX_FETCH_FAILURE;
        } catch (ReplicationHandlerException e) {
            LOG.error("User aborted Replication");
            return new IndexFetchResult(IndexFetchResult.FAILED_BY_EXCEPTION_MESSAGE, false, e);
        } catch (SolrException e) {
            throw e;
        } catch (InterruptedException e) {
            throw new InterruptedException("Index fetch interrupted");
        } catch (Exception e) {
            throw new SolrException(ErrorCode.SERVER_ERROR, "Index fetch failed : ", e);
        }
    } finally {
        if (!cleanupDone) {
            cleanup(solrCore, tmpIndexDir, indexDir, deleteTmpIdxDir, tmpTlogDir, successfulInstall);
        }
    }
}
Also used : ArrayList(java.util.ArrayList) CommitUpdateCommand(org.apache.solr.update.CommitUpdateCommand) ModifiableSolrParams(org.apache.solr.common.params.ModifiableSolrParams) SolrException(org.apache.solr.common.SolrException) Directory(org.apache.lucene.store.Directory) NamedList(org.apache.solr.common.util.NamedList) DefaultSolrThreadFactory(org.apache.solr.util.DefaultSolrThreadFactory) SolrIndexSearcher(org.apache.solr.search.SolrIndexSearcher) Replica(org.apache.solr.common.cloud.Replica) CloudDescriptor(org.apache.solr.cloud.CloudDescriptor) NoSuchFileException(java.nio.file.NoSuchFileException) SolrServerException(org.apache.solr.client.solrj.SolrServerException) SolrException(org.apache.solr.common.SolrException) FileNotFoundException(java.io.FileNotFoundException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) IndexCommit(org.apache.lucene.index.IndexCommit) Date(java.util.Date) LocalSolrQueryRequest(org.apache.solr.request.LocalSolrQueryRequest) SolrQueryRequest(org.apache.solr.request.SolrQueryRequest) LocalSolrQueryRequest(org.apache.solr.request.LocalSolrQueryRequest) IndexWriter(org.apache.lucene.index.IndexWriter) File(java.io.File) SimpleDateFormat(java.text.SimpleDateFormat) Map(java.util.Map) HashMap(java.util.HashMap)

Example 15 with CloudDescriptor

use of org.apache.solr.cloud.CloudDescriptor in project lucene-solr by apache.

the class DocExpirationUpdateProcessorFactory method iAmInChargeOfPeriodicDeletes.

/**
   * <p>
   * Helper method that returns true if the Runnable managed by this factory 
   * should be responsible of doing periodical deletes.
   * </p>
   * <p>
   * In simple standalone installations this method always returns true, 
   * but in cloud mode it will be true if and only if we are currently the leader 
   * of the (active) slice with the first name (lexicographically).
   * </p>
   * <p>
   * If this method returns false, it may have also logged a message letting the user 
   * know why we aren't attempting period deletion (but it will attempt to not log 
   * this excessively)
   * </p>
   */
private boolean iAmInChargeOfPeriodicDeletes() {
    ZkController zk = core.getCoreContainer().getZkController();
    if (null == zk)
        return true;
    // This is a lot simpler then doing our own "leader" election across all replicas 
    // of all shards since:
    //   a) we already have a per shard leader
    //   b) shard names must be unique
    //   c) ClusterState is already being "watched" by ZkController, no additional zk hits
    //   d) there might be multiple instances of this factory (in multiple chains) per 
    //      collection, so picking an ephemeral node name for our election would be tricky
    CloudDescriptor desc = core.getCoreDescriptor().getCloudDescriptor();
    String col = desc.getCollectionName();
    List<Slice> slices = new ArrayList<Slice>(zk.getClusterState().getActiveSlices(col));
    Collections.sort(slices, COMPARE_SLICES_BY_NAME);
    if (slices.isEmpty()) {
        log.error("Collection {} has no active Slices?", col);
        return false;
    }
    Replica firstSliceLeader = slices.get(0).getLeader();
    if (null == firstSliceLeader) {
        log.warn("Slice in charge of periodic deletes for {} does not currently have a leader", col);
        return false;
    }
    String leaderInCharge = firstSliceLeader.getName();
    String myCoreNodeName = desc.getCoreNodeName();
    boolean inChargeOfDeletesRightNow = leaderInCharge.equals(myCoreNodeName);
    if (previouslyInChargeOfDeletes && !inChargeOfDeletesRightNow) {
        // don't spam the logs constantly, just log when we know that we're not the guy
        // (the first time -- or anytime we were, but no longer are)
        log.info("Not currently in charge of periodic deletes for this collection, " + "will not trigger delete or log again until this changes");
    }
    previouslyInChargeOfDeletes = inChargeOfDeletesRightNow;
    return inChargeOfDeletesRightNow;
}
Also used : ZkController(org.apache.solr.cloud.ZkController) Slice(org.apache.solr.common.cloud.Slice) ArrayList(java.util.ArrayList) Replica(org.apache.solr.common.cloud.Replica) CloudDescriptor(org.apache.solr.cloud.CloudDescriptor)

Aggregations

CloudDescriptor (org.apache.solr.cloud.CloudDescriptor)15 Replica (org.apache.solr.common.cloud.Replica)8 SolrException (org.apache.solr.common.SolrException)6 ClusterState (org.apache.solr.common.cloud.ClusterState)6 Slice (org.apache.solr.common.cloud.Slice)6 ArrayList (java.util.ArrayList)5 ZkController (org.apache.solr.cloud.ZkController)5 ModifiableSolrParams (org.apache.solr.common.params.ModifiableSolrParams)5 DocCollection (org.apache.solr.common.cloud.DocCollection)4 SolrParams (org.apache.solr.common.params.SolrParams)4 NamedList (org.apache.solr.common.util.NamedList)4 HashMap (java.util.HashMap)3 List (java.util.List)3 SolrQueryRequest (org.apache.solr.request.SolrQueryRequest)3 SolrIndexSearcher (org.apache.solr.search.SolrIndexSearcher)3 File (java.io.File)2 IOException (java.io.IOException)2 Map (java.util.Map)2 MatchAllDocsQuery (org.apache.lucene.search.MatchAllDocsQuery)2 SolrDocumentList (org.apache.solr.common.SolrDocumentList)2