Search in sources :

Example 16 with CommitUpdateCommand

use of org.apache.solr.update.CommitUpdateCommand in project lucene-solr by apache.

the class ChangedSchemaMergeTest method testOptimizeDiffSchemas.

@Test
public void testOptimizeDiffSchemas() throws Exception {
    // load up a core (why not put it on disk?)
    CoreContainer cc = init();
    try (SolrCore changed = cc.getCore("changed")) {
        assertSimilarity(changed, simfac1);
        // add some documents
        addDoc(changed, "id", "1", "which", "15", "text", "some stuff with which");
        addDoc(changed, "id", "2", "which", "15", "text", "some stuff with which");
        addDoc(changed, "id", "3", "which", "15", "text", "some stuff with which");
        addDoc(changed, "id", "4", "which", "15", "text", "some stuff with which");
        SolrQueryRequest req = new LocalSolrQueryRequest(changed, new NamedList<>());
        changed.getUpdateHandler().commit(new CommitUpdateCommand(req, false));
        // write the new schema out and make it current
        FileUtils.writeStringToFile(schemaFile, withoutWhich, StandardCharsets.UTF_8);
        IndexSchema iSchema = IndexSchemaFactory.buildIndexSchema("schema.xml", changed.getSolrConfig());
        changed.setLatestSchema(iSchema);
        assertSimilarity(changed, simfac2);
        // sanity check our sanity check
        assertFalse("test is broken: both simfacs are the same", simfac1.equals(simfac2));
        addDoc(changed, "id", "1", "text", "some stuff without which");
        addDoc(changed, "id", "5", "text", "some stuff without which");
        changed.getUpdateHandler().commit(new CommitUpdateCommand(req, false));
        changed.getUpdateHandler().commit(new CommitUpdateCommand(req, true));
    } catch (Throwable e) {
        log.error("Test exception, logging so not swallowed if there is a (finally) shutdown exception: " + e.getMessage(), e);
        throw e;
    } finally {
        if (cc != null)
            cc.shutdown();
    }
}
Also used : LocalSolrQueryRequest(org.apache.solr.request.LocalSolrQueryRequest) LocalSolrQueryRequest(org.apache.solr.request.LocalSolrQueryRequest) SolrQueryRequest(org.apache.solr.request.SolrQueryRequest) CoreContainer(org.apache.solr.core.CoreContainer) SolrCore(org.apache.solr.core.SolrCore) CommitUpdateCommand(org.apache.solr.update.CommitUpdateCommand) Test(org.junit.Test)

Example 17 with CommitUpdateCommand

use of org.apache.solr.update.CommitUpdateCommand in project lucene-solr by apache.

the class RecoveryStrategy method doSyncOrReplicateRecovery.

// TODO: perhaps make this grab a new core each time through the loop to handle core reloads?
public final void doSyncOrReplicateRecovery(SolrCore core) throws KeeperException, InterruptedException {
    boolean replayed = false;
    boolean successfulRecovery = false;
    UpdateLog ulog;
    ulog = core.getUpdateHandler().getUpdateLog();
    if (ulog == null) {
        SolrException.log(LOG, "No UpdateLog found - cannot recover.");
        recoveryFailed(core, zkController, baseUrl, coreZkNodeName, core.getCoreDescriptor());
        return;
    }
    // we temporary ignore peersync for tlog replicas
    boolean firstTime = replicaType != Replica.Type.TLOG;
    List<Long> recentVersions;
    try (UpdateLog.RecentUpdates recentUpdates = ulog.getRecentUpdates()) {
        recentVersions = recentUpdates.getVersions(ulog.getNumRecordsToKeep());
    } catch (Exception e) {
        SolrException.log(LOG, "Corrupt tlog - ignoring.", e);
        recentVersions = new ArrayList<>(0);
    }
    List<Long> startingVersions = ulog.getStartingVersions();
    if (startingVersions != null && recoveringAfterStartup) {
        try {
            // index of the start of the old list in the current list
            int oldIdx = 0;
            long firstStartingVersion = startingVersions.size() > 0 ? startingVersions.get(0) : 0;
            for (; oldIdx < recentVersions.size(); oldIdx++) {
                if (recentVersions.get(oldIdx) == firstStartingVersion)
                    break;
            }
            if (oldIdx > 0) {
                LOG.info("####### Found new versions added after startup: num=[{}]", oldIdx);
                LOG.info("###### currentVersions=[{}]", recentVersions);
            }
            LOG.info("###### startupVersions=[{}]", startingVersions);
        } catch (Exception e) {
            SolrException.log(LOG, "Error getting recent versions.", e);
            recentVersions = new ArrayList<>(0);
        }
    }
    if (recoveringAfterStartup) {
        // if we're recovering after startup (i.e. we have been down), then we need to know what the last versions were
        // when we went down.  We may have received updates since then.
        recentVersions = startingVersions;
        try {
            if ((ulog.getStartingOperation() & UpdateLog.FLAG_GAP) != 0) {
                // last operation at the time of startup had the GAP flag set...
                // this means we were previously doing a full index replication
                // that probably didn't complete and buffering updates in the
                // meantime.
                LOG.info("Looks like a previous replication recovery did not complete - skipping peer sync.");
                // skip peersync
                firstTime = false;
            }
        } catch (Exception e) {
            SolrException.log(LOG, "Error trying to get ulog starting operation.", e);
            // skip peersync
            firstTime = false;
        }
    }
    if (replicaType == Replica.Type.TLOG) {
        zkController.stopReplicationFromLeader(coreName);
    }
    Future<RecoveryInfo> replayFuture = null;
    while (!successfulRecovery && !Thread.currentThread().isInterrupted() && !isClosed()) {
        // don't use interruption or it will close channels though
        try {
            CloudDescriptor cloudDesc = core.getCoreDescriptor().getCloudDescriptor();
            ZkNodeProps leaderprops = zkStateReader.getLeaderRetry(cloudDesc.getCollectionName(), cloudDesc.getShardId());
            final String leaderBaseUrl = leaderprops.getStr(ZkStateReader.BASE_URL_PROP);
            final String leaderCoreName = leaderprops.getStr(ZkStateReader.CORE_NAME_PROP);
            String leaderUrl = ZkCoreNodeProps.getCoreUrl(leaderBaseUrl, leaderCoreName);
            String ourUrl = ZkCoreNodeProps.getCoreUrl(baseUrl, coreName);
            boolean isLeader = leaderUrl.equals(ourUrl);
            if (isLeader && !cloudDesc.isLeader()) {
                throw new SolrException(ErrorCode.SERVER_ERROR, "Cloud state still says we are leader.");
            }
            if (cloudDesc.isLeader()) {
                // we are now the leader - no one else must have been suitable
                LOG.warn("We have not yet recovered - but we are now the leader!");
                LOG.info("Finished recovery process.");
                zkController.publish(core.getCoreDescriptor(), Replica.State.ACTIVE);
                return;
            }
            LOG.info("Begin buffering updates. core=[{}]", coreName);
            ulog.bufferUpdates();
            replayed = false;
            LOG.info("Publishing state of core [{}] as recovering, leader is [{}] and I am [{}]", core.getName(), leaderUrl, ourUrl);
            zkController.publish(core.getCoreDescriptor(), Replica.State.RECOVERING);
            final Slice slice = zkStateReader.getClusterState().getSlice(cloudDesc.getCollectionName(), cloudDesc.getShardId());
            try {
                prevSendPreRecoveryHttpUriRequest.abort();
            } catch (NullPointerException e) {
            // okay
            }
            if (isClosed()) {
                LOG.info("RecoveryStrategy has been closed");
                break;
            }
            sendPrepRecoveryCmd(leaderBaseUrl, leaderCoreName, slice);
            if (isClosed()) {
                LOG.info("RecoveryStrategy has been closed");
                break;
            }
            // discussion around current value)
            try {
                Thread.sleep(waitForUpdatesWithStaleStatePauseMilliSeconds);
            } catch (InterruptedException e) {
                Thread.currentThread().interrupt();
            }
            // first thing we just try to sync
            if (firstTime) {
                // only try sync the first time through the loop
                firstTime = false;
                LOG.info("Attempting to PeerSync from [{}] - recoveringAfterStartup=[{}]", leaderUrl, recoveringAfterStartup);
                // System.out.println("Attempting to PeerSync from " + leaderUrl
                // + " i am:" + zkController.getNodeName());
                PeerSync peerSync = new PeerSync(core, Collections.singletonList(leaderUrl), ulog.getNumRecordsToKeep(), false, false);
                peerSync.setStartingVersions(recentVersions);
                boolean syncSuccess = peerSync.sync().isSuccess();
                if (syncSuccess) {
                    SolrQueryRequest req = new LocalSolrQueryRequest(core, new ModifiableSolrParams());
                    // force open a new searcher
                    core.getUpdateHandler().commit(new CommitUpdateCommand(req, false));
                    LOG.info("PeerSync stage of recovery was successful.");
                    // solrcloud_debug
                    cloudDebugLog(core, "synced");
                    LOG.info("Replaying updates buffered during PeerSync.");
                    replay(core);
                    replayed = true;
                    // sync success
                    successfulRecovery = true;
                    return;
                }
                LOG.info("PeerSync Recovery was not successful - trying replication.");
            }
            if (isClosed()) {
                LOG.info("RecoveryStrategy has been closed");
                break;
            }
            LOG.info("Starting Replication Recovery.");
            try {
                replicate(zkController.getNodeName(), core, leaderprops);
                if (isClosed()) {
                    LOG.info("RecoveryStrategy has been closed");
                    break;
                }
                replayFuture = replay(core);
                replayed = true;
                if (isClosed()) {
                    LOG.info("RecoveryStrategy has been closed");
                    break;
                }
                LOG.info("Replication Recovery was successful.");
                successfulRecovery = true;
            } catch (InterruptedException e) {
                Thread.currentThread().interrupt();
                LOG.warn("Recovery was interrupted", e);
                close = true;
            } catch (Exception e) {
                SolrException.log(LOG, "Error while trying to recover", e);
            }
        } catch (Exception e) {
            SolrException.log(LOG, "Error while trying to recover. core=" + coreName, e);
        } finally {
            if (!replayed) {
                // dropBufferedUpdate()s currently only supports returning to ACTIVE state, which risks additional updates
                // being added w/o UpdateLog.FLAG_GAP, hence losing the info on restart that we are not up-to-date.
                // For now, ulog will simply remain in BUFFERING state, and an additional call to bufferUpdates() will
                // reset our starting point for playback.
                LOG.info("Replay not started, or was not successful... still buffering updates.");
            /** this prev code is retained in case we want to switch strategies.
          try {
            ulog.dropBufferedUpdates();
          } catch (Exception e) {
            SolrException.log(log, "", e);
          }
          **/
            }
            if (successfulRecovery) {
                LOG.info("Registering as Active after recovery.");
                try {
                    if (replicaType == Replica.Type.TLOG) {
                        zkController.startReplicationFromLeader(coreName, true);
                    }
                    zkController.publish(core.getCoreDescriptor(), Replica.State.ACTIVE);
                } catch (Exception e) {
                    LOG.error("Could not publish as ACTIVE after succesful recovery", e);
                    successfulRecovery = false;
                }
                if (successfulRecovery) {
                    close = true;
                    recoveryListener.recovered();
                }
            }
        }
        if (!successfulRecovery) {
            // Or do a fall off retry...
            try {
                if (isClosed()) {
                    LOG.info("RecoveryStrategy has been closed");
                    break;
                }
                LOG.error("Recovery failed - trying again... (" + retries + ")");
                retries++;
                if (retries >= maxRetries) {
                    SolrException.log(LOG, "Recovery failed - max retries exceeded (" + retries + ").");
                    try {
                        recoveryFailed(core, zkController, baseUrl, coreZkNodeName, core.getCoreDescriptor());
                    } catch (Exception e) {
                        SolrException.log(LOG, "Could not publish that recovery failed", e);
                    }
                    break;
                }
            } catch (Exception e) {
                SolrException.log(LOG, "An error has occurred during recovery", e);
            }
            try {
                // Wait an exponential interval between retries, start at 5 seconds and work up to a minute.
                // If we're at attempt >= 4, there's no point computing pow(2, retries) because the result 
                // will always be the minimum of the two (12). Since we sleep at 5 seconds sub-intervals in
                // order to check if we were closed, 12 is chosen as the maximum loopCount (5s * 12 = 1m).
                double loopCount = retries < 4 ? Math.min(Math.pow(2, retries), 12) : 12;
                LOG.info("Wait [{}] seconds before trying to recover again (attempt={})", loopCount, retries);
                for (int i = 0; i < loopCount; i++) {
                    if (isClosed()) {
                        LOG.info("RecoveryStrategy has been closed");
                        // check if someone closed us
                        break;
                    }
                    Thread.sleep(startingRecoveryDelayMilliSeconds);
                }
            } catch (InterruptedException e) {
                Thread.currentThread().interrupt();
                LOG.warn("Recovery was interrupted.", e);
                close = true;
            }
        }
    }
    // then we still need to update version bucket seeds after recovery
    if (successfulRecovery && replayFuture == null) {
        LOG.info("Updating version bucket highest from index after successful recovery.");
        core.seedVersionBuckets();
    }
    LOG.info("Finished recovery process, successful=[{}]", Boolean.toString(successfulRecovery));
}
Also used : ZkNodeProps(org.apache.solr.common.cloud.ZkNodeProps) PeerSync(org.apache.solr.update.PeerSync) ArrayList(java.util.ArrayList) RecoveryInfo(org.apache.solr.update.UpdateLog.RecoveryInfo) CommitUpdateCommand(org.apache.solr.update.CommitUpdateCommand) SolrServerException(org.apache.solr.client.solrj.SolrServerException) SolrException(org.apache.solr.common.SolrException) ZooKeeperException(org.apache.solr.common.cloud.ZooKeeperException) SocketTimeoutException(java.net.SocketTimeoutException) KeeperException(org.apache.zookeeper.KeeperException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) ModifiableSolrParams(org.apache.solr.common.params.ModifiableSolrParams) LocalSolrQueryRequest(org.apache.solr.request.LocalSolrQueryRequest) SolrQueryRequest(org.apache.solr.request.SolrQueryRequest) LocalSolrQueryRequest(org.apache.solr.request.LocalSolrQueryRequest) Slice(org.apache.solr.common.cloud.Slice) UpdateLog(org.apache.solr.update.UpdateLog) SolrException(org.apache.solr.common.SolrException)

Example 18 with CommitUpdateCommand

use of org.apache.solr.update.CommitUpdateCommand in project lucene-solr by apache.

the class IndexFetcher method fetchLatestIndex.

/**
   * This command downloads all the necessary files from master to install a index commit point. Only changed files are
   * downloaded. It also downloads the conf files (if they are modified).
   *
   * @param forceReplication force a replication in all cases
   * @param forceCoreReload force a core reload in all cases
   * @return true on success, false if slave is already in sync
   * @throws IOException if an exception occurs
   */
IndexFetchResult fetchLatestIndex(boolean forceReplication, boolean forceCoreReload) throws IOException, InterruptedException {
    boolean cleanupDone = false;
    boolean successfulInstall = false;
    markReplicationStart();
    Directory tmpIndexDir = null;
    String tmpIndex;
    Directory indexDir = null;
    String indexDirPath;
    boolean deleteTmpIdxDir = true;
    File tmpTlogDir = null;
    if (!solrCore.getSolrCoreState().getLastReplicateIndexSuccess()) {
        // if the last replication was not a success, we force a full replication
        // when we are a bit more confident we may want to try a partial replication
        // if the error is connection related or something, but we have to be careful
        forceReplication = true;
        LOG.info("Last replication failed, so I'll force replication");
    }
    try {
        if (fetchFromLeader) {
            assert !solrCore.isClosed() : "Replication should be stopped before closing the core";
            Replica replica = getLeaderReplica();
            CloudDescriptor cd = solrCore.getCoreDescriptor().getCloudDescriptor();
            if (cd.getCoreNodeName().equals(replica.getName())) {
                return IndexFetchResult.EXPECTING_NON_LEADER;
            }
            if (replica.getState() != Replica.State.ACTIVE) {
                LOG.info("Replica {} is leader but it's state is {}, skipping replication", replica.getName(), replica.getState());
                return IndexFetchResult.LEADER_IS_NOT_ACTIVE;
            }
            if (!solrCore.getCoreContainer().getZkController().getClusterState().liveNodesContain(replica.getNodeName())) {
                LOG.info("Replica {} is leader but it's not hosted on a live node, skipping replication", replica.getName());
                return IndexFetchResult.LEADER_IS_NOT_ACTIVE;
            }
            if (!replica.getCoreUrl().equals(masterUrl)) {
                masterUrl = replica.getCoreUrl();
                LOG.info("Updated masterUrl to {}", masterUrl);
            // TODO: Do we need to set forceReplication = true?
            } else {
                LOG.debug("masterUrl didn't change");
            }
        }
        //get the current 'replicateable' index version in the master
        NamedList response;
        try {
            response = getLatestVersion();
        } catch (Exception e) {
            final String errorMsg = e.toString();
            if (!Strings.isNullOrEmpty(errorMsg) && errorMsg.contains(INTERRUPT_RESPONSE_MESSAGE)) {
                LOG.warn("Master at: " + masterUrl + " is not available. Index fetch failed by interrupt. Exception: " + errorMsg);
                return new IndexFetchResult(IndexFetchResult.FAILED_BY_INTERRUPT_MESSAGE, false, e);
            } else {
                LOG.warn("Master at: " + masterUrl + " is not available. Index fetch failed by exception: " + errorMsg);
                return new IndexFetchResult(IndexFetchResult.FAILED_BY_EXCEPTION_MESSAGE, false, e);
            }
        }
        long latestVersion = (Long) response.get(CMD_INDEX_VERSION);
        long latestGeneration = (Long) response.get(GENERATION);
        LOG.info("Master's generation: " + latestGeneration);
        LOG.info("Master's version: " + latestVersion);
        // TODO: make sure that getLatestCommit only returns commit points for the main index (i.e. no side-car indexes)
        IndexCommit commit = solrCore.getDeletionPolicy().getLatestCommit();
        if (commit == null) {
            // Presumably the IndexWriter hasn't been opened yet, and hence the deletion policy hasn't been updated with commit points
            RefCounted<SolrIndexSearcher> searcherRefCounted = null;
            try {
                searcherRefCounted = solrCore.getNewestSearcher(false);
                if (searcherRefCounted == null) {
                    LOG.warn("No open searcher found - fetch aborted");
                    return IndexFetchResult.NO_INDEX_COMMIT_EXIST;
                }
                commit = searcherRefCounted.get().getIndexReader().getIndexCommit();
            } finally {
                if (searcherRefCounted != null)
                    searcherRefCounted.decref();
            }
        }
        LOG.info("Slave's generation: " + commit.getGeneration());
        LOG.info("Slave's version: " + IndexDeletionPolicyWrapper.getCommitTimestamp(commit));
        if (latestVersion == 0L) {
            if (forceReplication && commit.getGeneration() != 0) {
                // since we won't get the files for an empty index,
                // we just clear ours and commit
                LOG.info("New index in Master. Deleting mine...");
                RefCounted<IndexWriter> iw = solrCore.getUpdateHandler().getSolrCoreState().getIndexWriter(solrCore);
                try {
                    iw.get().deleteAll();
                } finally {
                    iw.decref();
                }
                SolrQueryRequest req = new LocalSolrQueryRequest(solrCore, new ModifiableSolrParams());
                solrCore.getUpdateHandler().commit(new CommitUpdateCommand(req, false));
            }
            //there is nothing to be replicated
            successfulInstall = true;
            LOG.debug("Nothing to replicate, master's version is 0");
            return IndexFetchResult.MASTER_VERSION_ZERO;
        }
        // TODO: Should we be comparing timestamps (across machines) here?
        if (!forceReplication && IndexDeletionPolicyWrapper.getCommitTimestamp(commit) == latestVersion) {
            //master and slave are already in sync just return
            LOG.info("Slave in sync with master.");
            successfulInstall = true;
            return IndexFetchResult.ALREADY_IN_SYNC;
        }
        LOG.info("Starting replication process");
        // get the list of files first
        fetchFileList(latestGeneration);
        // this can happen if the commit point is deleted before we fetch the file list.
        if (filesToDownload.isEmpty()) {
            return IndexFetchResult.PEER_INDEX_COMMIT_DELETED;
        }
        LOG.info("Number of files in latest index in master: " + filesToDownload.size());
        if (tlogFilesToDownload != null) {
            LOG.info("Number of tlog files in master: " + tlogFilesToDownload.size());
        }
        // Create the sync service
        fsyncService = ExecutorUtil.newMDCAwareSingleThreadExecutor(new DefaultSolrThreadFactory("fsyncService"));
        // use a synchronized list because the list is read by other threads (to show details)
        filesDownloaded = Collections.synchronizedList(new ArrayList<Map<String, Object>>());
        // if the generation of master is older than that of the slave , it means they are not compatible to be copied
        // then a new index directory to be created and all the files need to be copied
        boolean isFullCopyNeeded = IndexDeletionPolicyWrapper.getCommitTimestamp(commit) >= latestVersion || commit.getGeneration() >= latestGeneration || forceReplication;
        String timestamp = new SimpleDateFormat(SnapShooter.DATE_FMT, Locale.ROOT).format(new Date());
        String tmpIdxDirName = "index." + timestamp;
        tmpIndex = solrCore.getDataDir() + tmpIdxDirName;
        tmpIndexDir = solrCore.getDirectoryFactory().get(tmpIndex, DirContext.DEFAULT, solrCore.getSolrConfig().indexConfig.lockType);
        // tmp dir for tlog files
        if (tlogFilesToDownload != null) {
            tmpTlogDir = new File(solrCore.getUpdateHandler().getUpdateLog().getLogDir(), "tlog." + timestamp);
        }
        // cindex dir...
        indexDirPath = solrCore.getIndexDir();
        indexDir = solrCore.getDirectoryFactory().get(indexDirPath, DirContext.DEFAULT, solrCore.getSolrConfig().indexConfig.lockType);
        try {
            //in the metadata. If there is a mismatch for the same index file then we download the entire index again.
            if (!isFullCopyNeeded && isIndexStale(indexDir)) {
                isFullCopyNeeded = true;
            }
            if (!isFullCopyNeeded && !fetchFromLeader) {
                // and wait until we are able to clean up all unused lucene files
                if (solrCore.getCoreContainer().isZooKeeperAware()) {
                    solrCore.closeSearcher();
                }
                // rollback and reopen index writer and wait until all unused files
                // are successfully deleted
                solrCore.getUpdateHandler().newIndexWriter(true);
                RefCounted<IndexWriter> writer = solrCore.getUpdateHandler().getSolrCoreState().getIndexWriter(null);
                try {
                    IndexWriter indexWriter = writer.get();
                    int c = 0;
                    indexWriter.deleteUnusedFiles();
                    while (hasUnusedFiles(indexDir, commit)) {
                        indexWriter.deleteUnusedFiles();
                        LOG.info("Sleeping for 1000ms to wait for unused lucene index files to be delete-able");
                        Thread.sleep(1000);
                        c++;
                        if (c >= 30) {
                            LOG.warn("IndexFetcher unable to cleanup unused lucene index files so we must do a full copy instead");
                            isFullCopyNeeded = true;
                            break;
                        }
                    }
                    if (c > 0) {
                        LOG.info("IndexFetcher slept for " + (c * 1000) + "ms for unused lucene index files to be delete-able");
                    }
                } finally {
                    writer.decref();
                }
            }
            boolean reloadCore = false;
            try {
                // we have to be careful and do this after we know isFullCopyNeeded won't be flipped
                if (!isFullCopyNeeded) {
                    solrCore.getUpdateHandler().getSolrCoreState().closeIndexWriter(solrCore, true);
                }
                LOG.info("Starting download (fullCopy={}) to {}", isFullCopyNeeded, tmpIndexDir);
                successfulInstall = false;
                long bytesDownloaded = downloadIndexFiles(isFullCopyNeeded, indexDir, tmpIndexDir, latestGeneration);
                if (tlogFilesToDownload != null) {
                    bytesDownloaded += downloadTlogFiles(tmpTlogDir, latestGeneration);
                    // reload update log
                    reloadCore = true;
                }
                final long timeTakenSeconds = getReplicationTimeElapsed();
                final Long bytesDownloadedPerSecond = (timeTakenSeconds != 0 ? new Long(bytesDownloaded / timeTakenSeconds) : null);
                LOG.info("Total time taken for download (fullCopy={},bytesDownloaded={}) : {} secs ({} bytes/sec) to {}", isFullCopyNeeded, bytesDownloaded, timeTakenSeconds, bytesDownloadedPerSecond, tmpIndexDir);
                Collection<Map<String, Object>> modifiedConfFiles = getModifiedConfFiles(confFilesToDownload);
                if (!modifiedConfFiles.isEmpty()) {
                    reloadCore = true;
                    downloadConfFiles(confFilesToDownload, latestGeneration);
                    if (isFullCopyNeeded) {
                        successfulInstall = solrCore.modifyIndexProps(tmpIdxDirName);
                        if (successfulInstall)
                            deleteTmpIdxDir = false;
                    } else {
                        successfulInstall = moveIndexFiles(tmpIndexDir, indexDir);
                    }
                    if (tlogFilesToDownload != null) {
                        // move tlog files and refresh ulog only if we successfully installed a new index
                        successfulInstall &= moveTlogFiles(tmpTlogDir);
                    }
                    if (successfulInstall) {
                        if (isFullCopyNeeded) {
                            // may be closed
                            if (indexDir != null) {
                                solrCore.getDirectoryFactory().doneWithDirectory(indexDir);
                                // Cleanup all index files not associated with any *named* snapshot.
                                solrCore.deleteNonSnapshotIndexFiles(indexDirPath);
                            }
                        }
                        LOG.info("Configuration files are modified, core will be reloaded");
                        logReplicationTimeAndConfFiles(modifiedConfFiles, // write to a file time of replication and
                        successfulInstall);
                    // conf files.
                    }
                } else {
                    terminateAndWaitFsyncService();
                    if (isFullCopyNeeded) {
                        successfulInstall = solrCore.modifyIndexProps(tmpIdxDirName);
                        if (successfulInstall)
                            deleteTmpIdxDir = false;
                    } else {
                        successfulInstall = moveIndexFiles(tmpIndexDir, indexDir);
                    }
                    if (tlogFilesToDownload != null) {
                        // move tlog files and refresh ulog only if we successfully installed a new index
                        successfulInstall &= moveTlogFiles(tmpTlogDir);
                    }
                    if (successfulInstall) {
                        logReplicationTimeAndConfFiles(modifiedConfFiles, successfulInstall);
                    }
                }
            } finally {
                if (!isFullCopyNeeded) {
                    solrCore.getUpdateHandler().getSolrCoreState().openIndexWriter(solrCore);
                }
            }
            // we must reload the core after we open the IW back up
            if (successfulInstall && (reloadCore || forceCoreReload)) {
                LOG.info("Reloading SolrCore {}", solrCore.getName());
                reloadCore();
            }
            if (successfulInstall) {
                if (isFullCopyNeeded) {
                    // may be closed
                    if (indexDir != null) {
                        LOG.info("removing old index directory " + indexDir);
                        solrCore.getDirectoryFactory().doneWithDirectory(indexDir);
                        solrCore.getDirectoryFactory().remove(indexDir);
                    }
                }
                if (isFullCopyNeeded) {
                    solrCore.getUpdateHandler().newIndexWriter(isFullCopyNeeded);
                }
                openNewSearcherAndUpdateCommitPoint();
            }
            if (!isFullCopyNeeded && !forceReplication && !successfulInstall) {
                cleanup(solrCore, tmpIndexDir, indexDir, deleteTmpIdxDir, tmpTlogDir, successfulInstall);
                cleanupDone = true;
                // we try with a full copy of the index
                LOG.warn("Replication attempt was not successful - trying a full index replication reloadCore={}", reloadCore);
                successfulInstall = fetchLatestIndex(true, reloadCore).getSuccessful();
            }
            markReplicationStop();
            return successfulInstall ? IndexFetchResult.INDEX_FETCH_SUCCESS : IndexFetchResult.INDEX_FETCH_FAILURE;
        } catch (ReplicationHandlerException e) {
            LOG.error("User aborted Replication");
            return new IndexFetchResult(IndexFetchResult.FAILED_BY_EXCEPTION_MESSAGE, false, e);
        } catch (SolrException e) {
            throw e;
        } catch (InterruptedException e) {
            throw new InterruptedException("Index fetch interrupted");
        } catch (Exception e) {
            throw new SolrException(ErrorCode.SERVER_ERROR, "Index fetch failed : ", e);
        }
    } finally {
        if (!cleanupDone) {
            cleanup(solrCore, tmpIndexDir, indexDir, deleteTmpIdxDir, tmpTlogDir, successfulInstall);
        }
    }
}
Also used : ArrayList(java.util.ArrayList) CommitUpdateCommand(org.apache.solr.update.CommitUpdateCommand) ModifiableSolrParams(org.apache.solr.common.params.ModifiableSolrParams) SolrException(org.apache.solr.common.SolrException) Directory(org.apache.lucene.store.Directory) NamedList(org.apache.solr.common.util.NamedList) DefaultSolrThreadFactory(org.apache.solr.util.DefaultSolrThreadFactory) SolrIndexSearcher(org.apache.solr.search.SolrIndexSearcher) Replica(org.apache.solr.common.cloud.Replica) CloudDescriptor(org.apache.solr.cloud.CloudDescriptor) NoSuchFileException(java.nio.file.NoSuchFileException) SolrServerException(org.apache.solr.client.solrj.SolrServerException) SolrException(org.apache.solr.common.SolrException) FileNotFoundException(java.io.FileNotFoundException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) IndexCommit(org.apache.lucene.index.IndexCommit) Date(java.util.Date) LocalSolrQueryRequest(org.apache.solr.request.LocalSolrQueryRequest) SolrQueryRequest(org.apache.solr.request.SolrQueryRequest) LocalSolrQueryRequest(org.apache.solr.request.LocalSolrQueryRequest) IndexWriter(org.apache.lucene.index.IndexWriter) File(java.io.File) SimpleDateFormat(java.text.SimpleDateFormat) Map(java.util.Map) HashMap(java.util.HashMap)

Example 19 with CommitUpdateCommand

use of org.apache.solr.update.CommitUpdateCommand in project SearchServices by Alfresco.

the class AlfrescoSolrUtils method addStoreRoot.

/**
 * Add a store to root.
 * @param core
 * @param dataModel
 * @param rootNodeRef
 * @param txid
 * @param dbid
 * @param acltxid
 * @param aclid
 * @throws IOException
 */
public static void addStoreRoot(SolrCore core, AlfrescoSolrDataModel dataModel, NodeRef rootNodeRef, int txid, int dbid, int acltxid, int aclid) throws IOException {
    SolrServletRequest solrQueryRequest = null;
    try {
        solrQueryRequest = new SolrServletRequest(core, null);
        AddUpdateCommand addDocCmd = new AddUpdateCommand(solrQueryRequest);
        addDocCmd.overwrite = true;
        addDocCmd.solrDoc = createDocument(dataModel, new Long(txid), new Long(dbid), rootNodeRef, ContentModel.TYPE_STOREROOT, new QName[] { ContentModel.ASPECT_ROOT }, null, null, new Long(aclid), new String[] { "/" }, "system", null, null);
        core.getUpdateHandler().addDoc(addDocCmd);
        addAcl(solrQueryRequest, core, dataModel, acltxid, aclid, 0, 0);
        AddUpdateCommand txCmd = new AddUpdateCommand(solrQueryRequest);
        txCmd.overwrite = true;
        SolrInputDocument input = new SolrInputDocument();
        String id = AlfrescoSolrDataModel.getTransactionDocumentId(new Long(txid));
        input.addField(FIELD_SOLR4_ID, id);
        input.addField(FIELD_VERSION, "0");
        input.addField(FIELD_TXID, txid);
        input.addField(FIELD_INTXID, txid);
        input.addField(FIELD_TXCOMMITTIME, (new Date()).getTime());
        input.addField(FIELD_DOC_TYPE, SolrInformationServer.DOC_TYPE_TX);
        txCmd.solrDoc = input;
        core.getUpdateHandler().addDoc(txCmd);
        core.getUpdateHandler().commit(new CommitUpdateCommand(solrQueryRequest, false));
    } finally {
        solrQueryRequest.close();
    }
}
Also used : SolrServletRequest(org.alfresco.solr.AbstractAlfrescoSolrTests.SolrServletRequest) SolrInputDocument(org.apache.solr.common.SolrInputDocument) QName(org.alfresco.service.namespace.QName) AtomicLong(java.util.concurrent.atomic.AtomicLong) CommitUpdateCommand(org.apache.solr.update.CommitUpdateCommand) AddUpdateCommand(org.apache.solr.update.AddUpdateCommand) Date(java.util.Date)

Example 20 with CommitUpdateCommand

use of org.apache.solr.update.CommitUpdateCommand in project SearchServices by Alfresco.

the class AuthDataLoad method setup.

@BeforeClass
public static void setup() throws Exception {
    // Start test haness
    initAlfrescoCore("schema.xml");
    // Root
    NodeRef rootNodeRef = new NodeRef(new StoreRef("workspace", "SpacesStore"), createGUID());
    addStoreRoot(h.getCore(), dataModel, rootNodeRef, 1, 1, 1, 1);
    // rsp.add("StoreRootNode", 1);
    // Base
    HashMap<QName, PropertyValue> baseFolderProperties = new HashMap<QName, PropertyValue>();
    baseFolderProperties.put(ContentModel.PROP_NAME, new StringPropertyValue("Base Folder"));
    NodeRef baseFolderNodeRef = new NodeRef(new StoreRef("workspace", "SpacesStore"), createGUID());
    QName baseFolderQName = QName.createQName(NamespaceService.CONTENT_MODEL_1_0_URI, "baseFolder");
    ChildAssociationRef n01CAR = new ChildAssociationRef(ContentModel.ASSOC_CHILDREN, rootNodeRef, baseFolderQName, baseFolderNodeRef, true, 0);
    addNode(h.getCore(), dataModel, 1, 2, 1, ContentModel.TYPE_FOLDER, null, baseFolderProperties, null, "andy", new ChildAssociationRef[] { n01CAR }, new NodeRef[] { rootNodeRef }, new String[] { "/" + baseFolderQName.toString() }, baseFolderNodeRef, true);
    // Folders
    HashMap<QName, PropertyValue> folder00Properties = new HashMap<QName, PropertyValue>();
    folder00Properties.put(ContentModel.PROP_NAME, new StringPropertyValue("Folder 0"));
    NodeRef folder00NodeRef = new NodeRef(new StoreRef("workspace", "SpacesStore"), createGUID());
    QName folder00QName = QName.createQName(NamespaceService.CONTENT_MODEL_1_0_URI, "Folder 0");
    ChildAssociationRef folder00CAR = new ChildAssociationRef(ContentModel.ASSOC_CONTAINS, baseFolderNodeRef, folder00QName, folder00NodeRef, true, 0);
    addNode(h.getCore(), dataModel, 1, 3, 1, ContentModel.TYPE_FOLDER, null, folder00Properties, null, "andy", new ChildAssociationRef[] { folder00CAR }, new NodeRef[] { baseFolderNodeRef, rootNodeRef }, new String[] { "/" + baseFolderQName.toString() + "/" + folder00QName.toString() }, folder00NodeRef, true);
    for (long i = 0; i < count; i++) {
        addAcl(h.getCore(), dataModel, 10 + (int) i, 10 + (int) i, (int) (i % maxReader), (int) maxReader);
        HashMap<QName, PropertyValue> content00Properties = new HashMap<QName, PropertyValue>();
        MLTextPropertyValue desc00 = new MLTextPropertyValue();
        desc00.addValue(Locale.ENGLISH, "Doc " + i);
        desc00.addValue(Locale.US, "Doc " + i);
        content00Properties.put(ContentModel.PROP_DESCRIPTION, desc00);
        content00Properties.put(ContentModel.PROP_TITLE, desc00);
        content00Properties.put(ContentModel.PROP_CONTENT, new ContentPropertyValue(Locale.UK, 0l, "UTF-8", "text/plain", null));
        content00Properties.put(ContentModel.PROP_NAME, new StringPropertyValue("Doc " + i));
        content00Properties.put(ContentModel.PROP_CREATOR, new StringPropertyValue("Test"));
        content00Properties.put(ContentModel.PROP_MODIFIER, new StringPropertyValue("Test"));
        content00Properties.put(ContentModel.PROP_VERSION_LABEL, new StringPropertyValue("1.0"));
        content00Properties.put(ContentModel.PROP_OWNER, new StringPropertyValue("Test"));
        Date date00 = new Date();
        content00Properties.put(ContentModel.PROP_CREATED, new StringPropertyValue(DefaultTypeConverter.INSTANCE.convert(String.class, date00)));
        content00Properties.put(ContentModel.PROP_MODIFIED, new StringPropertyValue(DefaultTypeConverter.INSTANCE.convert(String.class, date00)));
        HashMap<QName, String> content00Content = new HashMap<QName, String>();
        content00Content.put(ContentModel.PROP_CONTENT, "Test doc number " + i);
        NodeRef content00NodeRef = new NodeRef(new StoreRef("workspace", "SpacesStore"), createGUID());
        QName content00QName = QName.createQName(NamespaceService.CONTENT_MODEL_1_0_URI, "Doc-" + i);
        ChildAssociationRef content00CAR = new ChildAssociationRef(ContentModel.ASSOC_CONTAINS, folder00NodeRef, content00QName, content00NodeRef, true, 0);
        addNode(h.getCore(), dataModel, 1, 10 + (int) i, 10 + (int) i, ContentModel.TYPE_CONTENT, new QName[] { ContentModel.ASPECT_OWNABLE, ContentModel.ASPECT_TITLED }, content00Properties, content00Content, "andy", new ChildAssociationRef[] { content00CAR }, new NodeRef[] { baseFolderNodeRef, rootNodeRef, folder00NodeRef }, new String[] { "/" + baseFolderQName.toString() + "/" + folder00QName.toString() + "/" + content00QName.toString() }, content00NodeRef, false);
    }
    h.getCore().getUpdateHandler().commit(new CommitUpdateCommand(req(), false));
}
Also used : StoreRef(org.alfresco.service.cmr.repository.StoreRef) StringPropertyValue(org.alfresco.solr.client.StringPropertyValue) HashMap(java.util.HashMap) QName(org.alfresco.service.namespace.QName) MLTextPropertyValue(org.alfresco.solr.client.MLTextPropertyValue) PropertyValue(org.alfresco.solr.client.PropertyValue) StringPropertyValue(org.alfresco.solr.client.StringPropertyValue) ContentPropertyValue(org.alfresco.solr.client.ContentPropertyValue) CommitUpdateCommand(org.apache.solr.update.CommitUpdateCommand) ChildAssociationRef(org.alfresco.service.cmr.repository.ChildAssociationRef) Date(java.util.Date) ContentPropertyValue(org.alfresco.solr.client.ContentPropertyValue) NodeRef(org.alfresco.service.cmr.repository.NodeRef) MLTextPropertyValue(org.alfresco.solr.client.MLTextPropertyValue) BeforeClass(org.junit.BeforeClass)

Aggregations

CommitUpdateCommand (org.apache.solr.update.CommitUpdateCommand)21 LocalSolrQueryRequest (org.apache.solr.request.LocalSolrQueryRequest)12 SolrQueryRequest (org.apache.solr.request.SolrQueryRequest)12 ModifiableSolrParams (org.apache.solr.common.params.ModifiableSolrParams)8 SolrException (org.apache.solr.common.SolrException)7 SolrQueryResponse (org.apache.solr.response.SolrQueryResponse)6 SolrCore (org.apache.solr.core.SolrCore)5 AddUpdateCommand (org.apache.solr.update.AddUpdateCommand)5 UpdateRequestProcessor (org.apache.solr.update.processor.UpdateRequestProcessor)4 Date (java.util.Date)3 SolrInputDocument (org.apache.solr.common.SolrInputDocument)3 IOException (java.io.IOException)2 ArrayList (java.util.ArrayList)2 HashMap (java.util.HashMap)2 Map (java.util.Map)2 ExecutionException (java.util.concurrent.ExecutionException)2 AtomicLong (java.util.concurrent.atomic.AtomicLong)2 QName (org.alfresco.service.namespace.QName)2 SolrServletRequest (org.alfresco.solr.AbstractAlfrescoSolrTests.SolrServletRequest)2 SolrServerException (org.apache.solr.client.solrj.SolrServerException)2