use of com.unboundid.ldif.LDIFModifyChangeRecord in project ldapsdk by pingidentity.
the class ModifyAuditLogMessage method getRevertChangeRecords.
/**
* {@inheritDoc}
*/
@Override()
@NotNull()
public List<LDIFChangeRecord> getRevertChangeRecords() throws AuditLogException {
// Iterate through the modifications backwards and construct the
// appropriate set of modifications to revert each of them.
final Modification[] mods = modifyChangeRecord.getModifications();
final Modification[] revertMods = new Modification[mods.length];
for (int i = mods.length - 1, j = 0; i >= 0; i--, j++) {
revertMods[j] = getRevertModification(mods[i]);
if (revertMods[j] == null) {
throw new AuditLogException(getLogMessageLines(), ERR_MODIFY_AUDIT_LOG_MESSAGE_MOD_NOT_REVERTIBLE.get(modifyChangeRecord.getDN(), String.valueOf(mods[i])));
}
}
return Collections.<LDIFChangeRecord>singletonList(new LDIFModifyChangeRecord(modifyChangeRecord.getDN(), revertMods));
}
use of com.unboundid.ldif.LDIFModifyChangeRecord in project ldapsdk by pingidentity.
the class LDAPDiff method identifyDifferences.
/**
* Examines all of the entries in the provided set and identifies differences
* between the source and target servers. The differences will be written to
* output files, and the return value will provide information about the
* number of entries in each result category.
*
* @param sourcePool A connection pool that may be used to communicate
* with the source server. It must not be
* {@code null}.
* @param targetPool A connection pool that may be used to communicate
* with the target server. It must not be
* {@code null}.
* @param baseDN The base DN for entries to examine. It must not be
* {@code null}.
* @param schema The schema to use in processing. It may optionally
* be {@code null} if no schema is available.
* @param resultCodeRef A reference that may be updated to set the result
* code that should be returned. It must not be
* {@code null} but may be unset.
* @param dnsToExamine The set of DNs to examine. It must not be
* {@code null}.
*
* @return An array of {@code long} values that provide the number of entries
* in each result category. The array that is returned will contain
* six elements. The first will be the number of entries that were
* found to be in sync between the source and target servers. The
* second will be the number of entries that were present only in the
* target server and need to be added to the source server. The
* third will be the number of entries that were present only in the
* source server and need to be removed. The fourth will be the
* number of entries that were present in both servers but were not
* equivalent and therefore need to be modified in the source server.
* The fifth will be the number of entries that were initially
* identified but were subsequently not found in either server. The
* sixth element will be the number of errors encountered while
* attempting to examine entries.
*
* @throws LDAPException If an unrecoverable error occurs during processing.
*/
@NotNull()
private long[] identifyDifferences(@NotNull final LDAPConnectionPool sourcePool, @NotNull final LDAPConnectionPool targetPool, @NotNull final DN baseDN, @Nullable final Schema schema, @NotNull final AtomicReference<ResultCode> resultCodeRef, @NotNull final TreeSet<LDAPDiffCompactDN> dnsToExamine) throws LDAPException {
// Create LDIF writers that will be used to write the output files. We want
// to create the main output file even if we don't end up identifying any
// changes, and it's also convenient to just go ahead and create the
// temporary add and modify files now, too, even if we don't end up using
// them.
final File mergedOutputFile = outputLDIFArg.getValue();
final File addFile = new File(mergedOutputFile.getAbsolutePath() + ".add");
addFile.deleteOnExit();
final File modFile = new File(mergedOutputFile.getAbsolutePath() + ".mod");
modFile.deleteOnExit();
long inSyncCount = 0L;
long addCount = 0L;
long deleteCount = 0L;
long modifyCount = 0L;
long missingCount = 0L;
long errorCount = 0L;
ParallelProcessor<LDAPDiffCompactDN, LDAPDiffProcessorResult> parallelProcessor = null;
final String sourceHostPort = getServerHostPort("sourceHostname", "sourcePort");
final String targetHostPort = getServerHostPort("targetHostname", "targetPort");
final TreeSet<LDAPDiffCompactDN> missingEntryDNs = new TreeSet<>();
try (LDIFWriter mergedWriter = createLDIFWriter(mergedOutputFile, INFO_LDAP_DIFF_MERGED_FILE_COMMENT.get(sourceHostPort, targetHostPort));
LDIFWriter addWriter = createLDIFWriter(addFile);
LDIFWriter modWriter = createLDIFWriter(modFile)) {
// Create a parallel processor that will be used to retrieve and compare
// entries from the source and target servers.
final String[] attributes = parser.getTrailingArguments().toArray(StaticUtils.NO_STRINGS);
final LDAPDiffProcessor processor = new LDAPDiffProcessor(sourcePool, targetPool, baseDN, schema, byteForByteArg.isPresent(), attributes, missingOnlyArg.isPresent());
parallelProcessor = new ParallelProcessor<>(processor, new LDAPSDKThreadFactory("LDAPDiff Compare Processor", true), numThreadsArg.getValue(), 5);
// Define variables that will be used to monitor progress and keep track
// of information between passes.
TreeSet<LDAPDiffCompactDN> currentPassDNs = dnsToExamine;
TreeSet<LDAPDiffCompactDN> nextPassDNs = new TreeSet<>();
final TreeSet<LDAPDiffCompactDN> deletedEntryDNs = new TreeSet<>();
final List<LDAPDiffCompactDN> currentBatchOfDNs = new ArrayList<>(MAX_ENTRIES_PER_BATCH);
// between the source and target servers.
for (int i = 1; i <= numPassesArg.getValue(); i++) {
final boolean isLastPass = (i == numPassesArg.getValue());
if (!quietArg.isPresent()) {
out();
wrapOut(0, WRAP_COLUMN, INFO_LDAP_DIFF_STARTING_COMPARE_PASS.get(i, numPassesArg.getValue(), currentPassDNs.size()));
}
// Process the changes in batches until we have gone through all of the
// entries.
nextPassDNs.clear();
int differencesIdentifiedCount = 0;
int processedCurrentPassCount = 0;
final int totalCurrentPassCount = currentPassDNs.size();
final Iterator<LDAPDiffCompactDN> dnIterator = currentPassDNs.iterator();
while (dnIterator.hasNext()) {
// Build a batch of DNs.
currentBatchOfDNs.clear();
while (dnIterator.hasNext()) {
currentBatchOfDNs.add(dnIterator.next());
dnIterator.remove();
if (currentBatchOfDNs.size() >= MAX_ENTRIES_PER_BATCH) {
break;
}
}
// Process the batch of entries.
final List<Result<LDAPDiffCompactDN, LDAPDiffProcessorResult>> results;
try {
results = parallelProcessor.processAll(currentBatchOfDNs);
} catch (final Exception e) {
Debug.debugException(e);
throw new LDAPException(ResultCode.LOCAL_ERROR, ERR_LDAP_DIFF_ERROR_PROCESSING_BATCH.get(StaticUtils.getExceptionMessage(e)), e);
}
// Iterate through and handle the results.
for (final Result<LDAPDiffCompactDN, LDAPDiffProcessorResult> result : results) {
processedCurrentPassCount++;
final Throwable exception = result.getFailureCause();
if (exception != null) {
final LDAPDiffCompactDN compactDN = result.getInput();
if (!isLastPass) {
nextPassDNs.add(compactDN);
differencesIdentifiedCount++;
} else {
final LDAPException reportException;
if (exception instanceof LDAPException) {
final LDAPException caughtException = (LDAPException) exception;
reportException = new LDAPException(caughtException.getResultCode(), ERR_LDAP_DIFF_ERROR_COMPARING_ENTRY.get(compactDN.toDN(baseDN, schema).toString(), caughtException.getMessage()), caughtException.getMatchedDN(), caughtException.getReferralURLs(), caughtException.getResponseControls(), caughtException.getCause());
} else {
reportException = new LDAPException(ResultCode.LOCAL_ERROR, ERR_LDAP_DIFF_ERROR_COMPARING_ENTRY.get(compactDN.toDN(baseDN, schema).toString(), StaticUtils.getExceptionMessage(exception)), exception);
}
errorCount++;
resultCodeRef.compareAndSet(null, reportException.getResultCode());
final List<String> formattedResultLines = ResultUtils.formatResult(reportException, false, 0, (WRAP_COLUMN - 2));
final Iterator<String> resultLineIterator = formattedResultLines.iterator();
while (resultLineIterator.hasNext()) {
mergedWriter.writeComment(resultLineIterator.next(), false, (!resultLineIterator.hasNext()));
}
}
continue;
}
final LDAPDiffProcessorResult resultOutput = result.getOutput();
final ChangeType changeType = resultOutput.getChangeType();
if (changeType == null) {
// the DN for including in a comment at the end of the LDIF file.
if (resultOutput.isEntryMissing()) {
missingCount++;
missingEntryDNs.add(result.getInput());
} else {
inSyncCount++;
}
// This indicates that the entry is in sync between the source
// and target servers. We don't need to do anything in this case.
inSyncCount++;
} else if (!isLastPass) {
// This entry is out of sync, but this isn't the last pass, so
// just hold on to the DN so that we'll re-examine the entry on
// the next pass.
nextPassDNs.add(result.getInput());
differencesIdentifiedCount++;
} else {
// The entry is out of sync, and this is the last pass. If the
// entry should be deleted, then capture the DN in a sorted list.
// If it's an add or modify, then write it to an appropriate
// temporary file. In each case, update the appropriate counter.
differencesIdentifiedCount++;
switch(changeType) {
case DELETE:
deletedEntryDNs.add(result.getInput());
deleteCount++;
break;
case ADD:
addWriter.writeChangeRecord(new LDIFAddChangeRecord(resultOutput.getEntry()), WARN_LDAP_DIFF_COMMENT_ADDED_ENTRY.get(targetHostPort, sourceHostPort));
addCount++;
break;
case MODIFY:
default:
modWriter.writeChangeRecord(new LDIFModifyChangeRecord(resultOutput.getDN(), resultOutput.getModifications()), WARN_LDAP_DIFF_COMMENT_MODIFIED_ENTRY.get(sourceHostPort, targetHostPort));
modifyCount++;
break;
}
}
}
// Write a progress message.
if (!quietArg.isPresent()) {
final int percentComplete = Math.round(100.0f * processedCurrentPassCount / totalCurrentPassCount);
wrapOut(0, WRAP_COLUMN, INFO_LDAP_DIFF_COMPARE_PROGRESS.get(processedCurrentPassCount, totalCurrentPassCount, percentComplete, differencesIdentifiedCount));
}
}
// differences, then sleep before the next iteration.
if (isLastPass) {
break;
} else if (nextPassDNs.isEmpty()) {
if (!quietArg.isPresent()) {
wrapOut(0, WRAP_COLUMN, INFO_LDAP_DIFF_NO_NEED_FOR_ADDITIONAL_PASS.get());
}
break;
} else {
try {
final int sleepTimeSeconds = secondsBetweenPassesArg.getValue();
if (!quietArg.isPresent()) {
wrapOut(0, WRAP_COLUMN, INFO_LDAP_DIFF_WAITING_BEFORE_NEXT_PASS.get(sleepTimeSeconds));
}
Thread.sleep(TimeUnit.SECONDS.toMillis(sleepTimeSeconds));
} catch (final Exception e) {
Debug.debugException(e);
}
}
// Swap currentPassDNs (which will now be empty) and nextPassDN (which
// contains the DNs of entries that were found out of sync in the
// current pass) sets so that they will be correct for the next pass.
final TreeSet<LDAPDiffCompactDN> emptyDNSet = currentPassDNs;
currentPassDNs = nextPassDNs;
nextPassDNs = emptyDNSet;
}
// the end of the LDIF file.
if ((addCount == 0) && (deleteCount == 0) && (modifyCount == 0)) {
mergedWriter.writeComment(INFO_LDAP_DIFF_SERVERS_IN_SYNC.get(), true, false);
}
// the writers.
if (!deletedEntryDNs.isEmpty()) {
mergedWriter.writeComment(INFO_LDAP_DIFF_COMMENT_DELETED_ENTRIES.get(), true, true);
if (!quietArg.isPresent()) {
out();
wrapOut(0, WRAP_COLUMN, INFO_LDAP_DIFF_STARTING_DELETE_PASS.get(deleteCount));
}
int entryCount = 0;
for (final LDAPDiffCompactDN compactDN : deletedEntryDNs.descendingSet()) {
SearchResultEntry entry = null;
LDAPException ldapException = null;
final String dnString = compactDN.toDN(baseDN, schema).toString();
try {
entry = sourcePool.getEntry(dnString, attributes);
} catch (final LDAPException e) {
Debug.debugException(e);
ldapException = new LDAPException(e.getResultCode(), ERR_LDAP_DIFF_CANNOT_GET_ENTRY_TO_DELETE.get(dnString, StaticUtils.getExceptionMessage(e)), e);
}
if (entry != null) {
mergedWriter.writeComment(INFO_LDAP_DIFF_COMMENT_DELETED_ENTRY.get(sourceHostPort, targetHostPort), false, false);
mergedWriter.writeComment("", false, false);
for (final String line : entry.toLDIF(75)) {
mergedWriter.writeComment(line, false, false);
}
mergedWriter.writeChangeRecord(new LDIFDeleteChangeRecord(entry.getDN()));
} else if (ldapException != null) {
mergedWriter.writeComment(ldapException.getExceptionMessage(), false, false);
mergedWriter.writeChangeRecord(new LDIFDeleteChangeRecord(entry.getDN()));
}
entryCount++;
if ((!quietArg.isPresent()) && ((entryCount % MAX_ENTRIES_PER_BATCH) == 0)) {
final int percentComplete = Math.round(100.0f * entryCount / deleteCount);
wrapOut(0, WRAP_COLUMN, INFO_LDAP_DIFF_DELETE_PROGRESS.get(entryCount, deleteCount, percentComplete));
}
}
if (!quietArg.isPresent()) {
final int percentComplete = Math.round(100.0f * entryCount / deleteCount);
wrapOut(0, WRAP_COLUMN, INFO_LDAP_DIFF_DELETE_PROGRESS.get(entryCount, deleteCount, percentComplete));
}
}
} catch (final IOException e) {
Debug.debugException(e);
throw new LDAPException(ResultCode.LOCAL_ERROR, ERR_LDAP_DIFF_ERROR_WRITING_OUTPUT.get(getToolName(), StaticUtils.getExceptionMessage(e)), e);
} finally {
if (parallelProcessor != null) {
try {
parallelProcessor.shutdown();
} catch (final Exception e) {
Debug.debugException(e);
}
}
}
// file to the merged change file.
if (modifyCount > 0L) {
appendFileToFile(modFile, mergedOutputFile, INFO_LDAP_DIFF_COMMENT_ADDED_ENTRIES.get());
modFile.delete();
}
// the merged change file.
if (addCount > 0L) {
appendFileToFile(addFile, mergedOutputFile, INFO_LDAP_DIFF_COMMENT_MODIFIED_ENTRIES.get());
addFile.delete();
}
// list them.
if (!missingEntryDNs.isEmpty()) {
try (FileOutputStream outputStream = new FileOutputStream(mergedOutputFile, true);
LDIFWriter ldifWriter = new LDIFWriter(outputStream)) {
ldifWriter.writeComment(INFO_LDAP_DIFF_COMMENT_MISSING_ENTRIES.get(), true, true);
for (final LDAPDiffCompactDN missingEntryDN : missingEntryDNs) {
ldifWriter.writeComment(INFO_LDAP_DIFF_COMMENT_MISSING_ENTRY.get(missingEntryDN.toDN(baseDN, schema).toString()), false, true);
}
} catch (final Exception e) {
Debug.debugException(e);
throw new LDAPException(ResultCode.LOCAL_ERROR, ERR_LDAP_DIFF_ERROR_WRITING_OUTPUT.get(getToolName(), StaticUtils.getExceptionMessage(e)), e);
}
}
return new long[] { inSyncCount, addCount, deleteCount, modifyCount, missingCount, errorCount };
}
use of com.unboundid.ldif.LDIFModifyChangeRecord in project ldapsdk by pingidentity.
the class LDAPModify method handleModifyWithDN.
/**
* Handles the processing for a change record when the tool should modify an
* entry with a given DN instead of the DN contained in the change record.
*
* @param connectionPool The connection pool to use to communicate with
* the directory server.
* @param changeRecord The LDIF change record to be processed.
* @param argIdentifierString The identifier string for the argument used to
* specify the DN of the entry to modify.
* @param dn The DN of the entry to modify.
* @param modifyControls The set of controls to include in the modify
* requests.
* @param rateLimiter The fixed-rate barrier to use for rate
* limiting. It may be {@code null} if no rate
* limiting is required.
* @param rejectWriter The reject writer to use to record information
* about any failed operations.
*
* @return A result code obtained from processing.
*/
@NotNull()
private ResultCode handleModifyWithDN(@NotNull final LDAPConnectionPool connectionPool, @NotNull final LDIFChangeRecord changeRecord, @NotNull final String argIdentifierString, @NotNull final DN dn, @NotNull final List<Control> modifyControls, @Nullable final FixedRateBarrier rateLimiter, @Nullable final LDIFWriter rejectWriter) {
// an error. Reject it.
if (!(changeRecord instanceof LDIFModifyChangeRecord)) {
writeRejectedChange(rejectWriter, ERR_LDAPMODIFY_NON_MODIFY_WITH_BULK.get(argIdentifierString), changeRecord);
return ResultCode.PARAM_ERROR;
}
// Create a new modify change record with the provided DN instead of the
// original DN.
final LDIFModifyChangeRecord originalChangeRecord = (LDIFModifyChangeRecord) changeRecord;
final LDIFModifyChangeRecord updatedChangeRecord = new LDIFModifyChangeRecord(dn.toString(), originalChangeRecord.getModifications(), originalChangeRecord.getControls());
if (rateLimiter != null) {
rateLimiter.await();
}
try {
return doModify(updatedChangeRecord, modifyControls, connectionPool, null, rejectWriter);
} catch (final LDAPException le) {
Debug.debugException(le);
return le.getResultCode();
}
}
use of com.unboundid.ldif.LDIFModifyChangeRecord in project ldapsdk by pingidentity.
the class ParallelUpdateOperationThread method run.
/**
* Operates in a loop, retrieving changes from the operation queue and
* processing them.
*/
@Override()
public void run() {
LDIFChangeRecord r = opQueue.getChangeRecord();
// Various controls that might be present on the requests.
final Control undeleteRequestControl = new UndeleteRequestControl();
while (r != null) {
if (rateLimiter != null) {
rateLimiter.await();
}
DN parsedDN = null;
DN parsedNewDN = null;
final long startTime = System.currentTimeMillis();
try {
parsedDN = r.getParsedDN();
if (r instanceof LDIFAddChangeRecord) {
final AddRequest addRequest = ((LDIFAddChangeRecord) r).toAddRequest();
addRequest.addControls(addControls);
if (allowUndelete && addRequest.hasAttribute(ATTR_UNDELETE_FROM_DN)) {
addRequest.addControl(undeleteRequestControl);
}
connectionPool.add(addRequest);
parallelUpdate.opCompletedSuccessfully(r, (System.currentTimeMillis() - startTime));
} else if (r instanceof LDIFDeleteChangeRecord) {
final DeleteRequest deleteRequest = ((LDIFDeleteChangeRecord) r).toDeleteRequest();
deleteRequest.addControls(deleteControls);
connectionPool.delete(deleteRequest);
parallelUpdate.opCompletedSuccessfully(r, (System.currentTimeMillis() - startTime));
} else if (r instanceof LDIFModifyChangeRecord) {
final ModifyRequest modifyRequest = ((LDIFModifyChangeRecord) r).toModifyRequest();
modifyRequest.addControls(modifyControls);
connectionPool.modify(modifyRequest);
parallelUpdate.opCompletedSuccessfully(r, (System.currentTimeMillis() - startTime));
} else if (r instanceof LDIFModifyDNChangeRecord) {
final LDIFModifyDNChangeRecord modifyDNChangeRecord = (LDIFModifyDNChangeRecord) r;
parsedNewDN = modifyDNChangeRecord.getNewDN();
final ModifyDNRequest modifyDNRequest = modifyDNChangeRecord.toModifyDNRequest();
modifyDNRequest.addControls(modifyDNControls);
connectionPool.modifyDN(modifyDNRequest);
parallelUpdate.opCompletedSuccessfully(r, (System.currentTimeMillis() - startTime));
} else {
// This should never happen.
r.processChange(connectionPool);
parallelUpdate.opCompletedSuccessfully(r, (System.currentTimeMillis() - startTime));
}
} catch (final LDAPException e) {
Debug.debugException(e);
parallelUpdate.opFailed(r, e, (System.currentTimeMillis() - startTime));
}
if (parsedNewDN == null) {
r = opQueue.getChangeRecord(parsedDN);
} else {
r = opQueue.getChangeRecord(parsedDN, parsedNewDN);
}
}
}
use of com.unboundid.ldif.LDIFModifyChangeRecord in project ldapsdk by pingidentity.
the class LDAPModify method handleModifyMatchingFilter.
/**
* Handles the processing for a change record when the tool should modify
* entries matching a given filter.
*
* @param connectionPool The connection pool to use to communicate with
* the directory server.
* @param changeRecord The LDIF change record to be processed.
* @param argIdentifierString The identifier string for the argument used to
* specify the filter to use to identify the
* entries to modify.
* @param filter The filter to use to identify the entries to
* modify.
* @param searchControls The set of controls to include in the search
* request.
* @param modifyControls The set of controls to include in the modify
* requests.
* @param rateLimiter The fixed-rate barrier to use for rate
* limiting. It may be {@code null} if no rate
* limiting is required.
* @param rejectWriter The reject writer to use to record information
* about any failed operations.
*
* @return A result code obtained from processing.
*/
@NotNull()
private ResultCode handleModifyMatchingFilter(@NotNull final LDAPConnectionPool connectionPool, @NotNull final LDIFChangeRecord changeRecord, @NotNull final String argIdentifierString, @NotNull final Filter filter, @NotNull final List<Control> searchControls, @NotNull final List<Control> modifyControls, @Nullable final FixedRateBarrier rateLimiter, @Nullable final LDIFWriter rejectWriter) {
// an error. Reject it.
if (!(changeRecord instanceof LDIFModifyChangeRecord)) {
writeRejectedChange(rejectWriter, ERR_LDAPMODIFY_NON_MODIFY_WITH_BULK.get(argIdentifierString), changeRecord);
return ResultCode.PARAM_ERROR;
}
final LDIFModifyChangeRecord modifyChangeRecord = (LDIFModifyChangeRecord) changeRecord;
final HashSet<DN> processedDNs = new HashSet<>(StaticUtils.computeMapCapacity(100));
// If we need to use the simple paged results control, then we may have to
// issue multiple searches.
ASN1OctetString pagedResultsCookie = null;
long entriesProcessed = 0L;
ResultCode resultCode = ResultCode.SUCCESS;
while (true) {
// Construct the search request to send.
final LDAPModifySearchListener listener = new LDAPModifySearchListener(this, modifyChangeRecord, filter, modifyControls, connectionPool, rateLimiter, rejectWriter, processedDNs);
final SearchRequest searchRequest = new SearchRequest(listener, modifyChangeRecord.getDN(), SearchScope.SUB, filter, SearchRequest.NO_ATTRIBUTES);
searchRequest.setControls(searchControls);
if (searchPageSize.isPresent()) {
searchRequest.addControl(new SimplePagedResultsControl(searchPageSize.getValue(), pagedResultsCookie));
}
// The connection pool's automatic retry feature can't work for searches
// that return one or more entries before encountering a failure. To get
// around that, we'll check a connection out of the pool and use it to
// process the search. If an error occurs that indicates the connection
// is no longer valid, we can replace it with a newly-established
// connection and try again. The search result listener will ensure that
// no entry gets updated twice.
LDAPConnection connection;
try {
connection = connectionPool.getConnection();
} catch (final LDAPException le) {
Debug.debugException(le);
writeRejectedChange(rejectWriter, ERR_LDAPMODIFY_CANNOT_GET_SEARCH_CONNECTION.get(modifyChangeRecord.getDN(), String.valueOf(filter), StaticUtils.getExceptionMessage(le)), modifyChangeRecord, le.toLDAPResult());
return le.getResultCode();
}
SearchResult searchResult;
boolean connectionValid = false;
try {
try {
searchResult = connection.search(searchRequest);
} catch (final LDAPSearchException lse) {
searchResult = lse.getSearchResult();
}
if (searchResult.getResultCode() == ResultCode.SUCCESS) {
connectionValid = true;
} else if (searchResult.getResultCode().isConnectionUsable()) {
connectionValid = true;
writeRejectedChange(rejectWriter, ERR_LDAPMODIFY_SEARCH_FAILED.get(modifyChangeRecord.getDN(), String.valueOf(filter)), modifyChangeRecord, searchResult);
return searchResult.getResultCode();
} else if (!neverRetry.isPresent()) {
try {
connection = connectionPool.replaceDefunctConnection(connection);
} catch (final LDAPException le) {
Debug.debugException(le);
writeRejectedChange(rejectWriter, ERR_LDAPMODIFY_SEARCH_FAILED_CANNOT_RECONNECT.get(modifyChangeRecord.getDN(), String.valueOf(filter)), modifyChangeRecord, searchResult);
return searchResult.getResultCode();
}
try {
searchResult = connection.search(searchRequest);
} catch (final LDAPSearchException lse) {
Debug.debugException(lse);
searchResult = lse.getSearchResult();
}
if (searchResult.getResultCode() == ResultCode.SUCCESS) {
connectionValid = true;
} else {
connectionValid = searchResult.getResultCode().isConnectionUsable();
writeRejectedChange(rejectWriter, ERR_LDAPMODIFY_SEARCH_FAILED.get(modifyChangeRecord.getDN(), String.valueOf(filter)), modifyChangeRecord, searchResult);
return searchResult.getResultCode();
}
} else {
writeRejectedChange(rejectWriter, ERR_LDAPMODIFY_SEARCH_FAILED.get(modifyChangeRecord.getDN(), String.valueOf(filter)), modifyChangeRecord, searchResult);
return searchResult.getResultCode();
}
} finally {
if (connectionValid) {
connectionPool.releaseConnection(connection);
} else {
connectionPool.releaseDefunctConnection(connection);
}
}
// accordingly.
if ((resultCode == ResultCode.SUCCESS) && (listener.getResultCode() != ResultCode.SUCCESS)) {
resultCode = listener.getResultCode();
}
// If the search used the simple paged results control then we may need to
// repeat the search to get the next page.
entriesProcessed += searchResult.getEntryCount();
if (searchPageSize.isPresent()) {
final SimplePagedResultsControl responseControl;
try {
responseControl = SimplePagedResultsControl.get(searchResult);
} catch (final LDAPException le) {
Debug.debugException(le);
writeRejectedChange(rejectWriter, ERR_LDAPMODIFY_CANNOT_DECODE_PAGED_RESULTS_CONTROL.get(modifyChangeRecord.getDN(), String.valueOf(filter)), modifyChangeRecord, le.toLDAPResult());
return le.getResultCode();
}
if (responseControl == null) {
writeRejectedChange(rejectWriter, ERR_LDAPMODIFY_MISSING_PAGED_RESULTS_RESPONSE.get(modifyChangeRecord.getDN(), String.valueOf(filter)), modifyChangeRecord);
return ResultCode.CONTROL_NOT_FOUND;
} else {
pagedResultsCookie = responseControl.getCookie();
if (responseControl.moreResultsToReturn()) {
if (verbose.isPresent()) {
commentToOut(INFO_LDAPMODIFY_SEARCH_COMPLETED_MORE_PAGES.get(modifyChangeRecord.getDN(), String.valueOf(filter), entriesProcessed));
for (final String resultLine : ResultUtils.formatResult(searchResult, true, 0, WRAP_COLUMN)) {
out(resultLine);
}
out();
}
} else {
commentToOut(INFO_LDAPMODIFY_SEARCH_COMPLETED.get(entriesProcessed, modifyChangeRecord.getDN(), String.valueOf(filter)));
if (verbose.isPresent()) {
for (final String resultLine : ResultUtils.formatResult(searchResult, true, 0, WRAP_COLUMN)) {
out(resultLine);
}
}
out();
return resultCode;
}
}
} else {
commentToOut(INFO_LDAPMODIFY_SEARCH_COMPLETED.get(entriesProcessed, modifyChangeRecord.getDN(), String.valueOf(filter)));
if (verbose.isPresent()) {
for (final String resultLine : ResultUtils.formatResult(searchResult, true, 0, WRAP_COLUMN)) {
out(resultLine);
}
}
out();
return resultCode;
}
}
}
Aggregations