use of com.unboundid.util.LDAPSDKThreadFactory in project ldapsdk by pingidentity.
the class LDAPDiff method identifyDifferences.
/**
* Examines all of the entries in the provided set and identifies differences
* between the source and target servers. The differences will be written to
* output files, and the return value will provide information about the
* number of entries in each result category.
*
* @param sourcePool A connection pool that may be used to communicate
* with the source server. It must not be
* {@code null}.
* @param targetPool A connection pool that may be used to communicate
* with the target server. It must not be
* {@code null}.
* @param baseDN The base DN for entries to examine. It must not be
* {@code null}.
* @param schema The schema to use in processing. It may optionally
* be {@code null} if no schema is available.
* @param resultCodeRef A reference that may be updated to set the result
* code that should be returned. It must not be
* {@code null} but may be unset.
* @param dnsToExamine The set of DNs to examine. It must not be
* {@code null}.
*
* @return An array of {@code long} values that provide the number of entries
* in each result category. The array that is returned will contain
* six elements. The first will be the number of entries that were
* found to be in sync between the source and target servers. The
* second will be the number of entries that were present only in the
* target server and need to be added to the source server. The
* third will be the number of entries that were present only in the
* source server and need to be removed. The fourth will be the
* number of entries that were present in both servers but were not
* equivalent and therefore need to be modified in the source server.
* The fifth will be the number of entries that were initially
* identified but were subsequently not found in either server. The
* sixth element will be the number of errors encountered while
* attempting to examine entries.
*
* @throws LDAPException If an unrecoverable error occurs during processing.
*/
@NotNull()
private long[] identifyDifferences(@NotNull final LDAPConnectionPool sourcePool, @NotNull final LDAPConnectionPool targetPool, @NotNull final DN baseDN, @Nullable final Schema schema, @NotNull final AtomicReference<ResultCode> resultCodeRef, @NotNull final TreeSet<LDAPDiffCompactDN> dnsToExamine) throws LDAPException {
// Create LDIF writers that will be used to write the output files. We want
// to create the main output file even if we don't end up identifying any
// changes, and it's also convenient to just go ahead and create the
// temporary add and modify files now, too, even if we don't end up using
// them.
final File mergedOutputFile = outputLDIFArg.getValue();
final File addFile = new File(mergedOutputFile.getAbsolutePath() + ".add");
addFile.deleteOnExit();
final File modFile = new File(mergedOutputFile.getAbsolutePath() + ".mod");
modFile.deleteOnExit();
long inSyncCount = 0L;
long addCount = 0L;
long deleteCount = 0L;
long modifyCount = 0L;
long missingCount = 0L;
long errorCount = 0L;
ParallelProcessor<LDAPDiffCompactDN, LDAPDiffProcessorResult> parallelProcessor = null;
final String sourceHostPort = getServerHostPort("sourceHostname", "sourcePort");
final String targetHostPort = getServerHostPort("targetHostname", "targetPort");
final TreeSet<LDAPDiffCompactDN> missingEntryDNs = new TreeSet<>();
try (LDIFWriter mergedWriter = createLDIFWriter(mergedOutputFile, INFO_LDAP_DIFF_MERGED_FILE_COMMENT.get(sourceHostPort, targetHostPort));
LDIFWriter addWriter = createLDIFWriter(addFile);
LDIFWriter modWriter = createLDIFWriter(modFile)) {
// Create a parallel processor that will be used to retrieve and compare
// entries from the source and target servers.
final String[] attributes = parser.getTrailingArguments().toArray(StaticUtils.NO_STRINGS);
final LDAPDiffProcessor processor = new LDAPDiffProcessor(sourcePool, targetPool, baseDN, schema, byteForByteArg.isPresent(), attributes, missingOnlyArg.isPresent());
parallelProcessor = new ParallelProcessor<>(processor, new LDAPSDKThreadFactory("LDAPDiff Compare Processor", true), numThreadsArg.getValue(), 5);
// Define variables that will be used to monitor progress and keep track
// of information between passes.
TreeSet<LDAPDiffCompactDN> currentPassDNs = dnsToExamine;
TreeSet<LDAPDiffCompactDN> nextPassDNs = new TreeSet<>();
final TreeSet<LDAPDiffCompactDN> deletedEntryDNs = new TreeSet<>();
final List<LDAPDiffCompactDN> currentBatchOfDNs = new ArrayList<>(MAX_ENTRIES_PER_BATCH);
// between the source and target servers.
for (int i = 1; i <= numPassesArg.getValue(); i++) {
final boolean isLastPass = (i == numPassesArg.getValue());
if (!quietArg.isPresent()) {
out();
wrapOut(0, WRAP_COLUMN, INFO_LDAP_DIFF_STARTING_COMPARE_PASS.get(i, numPassesArg.getValue(), currentPassDNs.size()));
}
// Process the changes in batches until we have gone through all of the
// entries.
nextPassDNs.clear();
int differencesIdentifiedCount = 0;
int processedCurrentPassCount = 0;
final int totalCurrentPassCount = currentPassDNs.size();
final Iterator<LDAPDiffCompactDN> dnIterator = currentPassDNs.iterator();
while (dnIterator.hasNext()) {
// Build a batch of DNs.
currentBatchOfDNs.clear();
while (dnIterator.hasNext()) {
currentBatchOfDNs.add(dnIterator.next());
dnIterator.remove();
if (currentBatchOfDNs.size() >= MAX_ENTRIES_PER_BATCH) {
break;
}
}
// Process the batch of entries.
final List<Result<LDAPDiffCompactDN, LDAPDiffProcessorResult>> results;
try {
results = parallelProcessor.processAll(currentBatchOfDNs);
} catch (final Exception e) {
Debug.debugException(e);
throw new LDAPException(ResultCode.LOCAL_ERROR, ERR_LDAP_DIFF_ERROR_PROCESSING_BATCH.get(StaticUtils.getExceptionMessage(e)), e);
}
// Iterate through and handle the results.
for (final Result<LDAPDiffCompactDN, LDAPDiffProcessorResult> result : results) {
processedCurrentPassCount++;
final Throwable exception = result.getFailureCause();
if (exception != null) {
final LDAPDiffCompactDN compactDN = result.getInput();
if (!isLastPass) {
nextPassDNs.add(compactDN);
differencesIdentifiedCount++;
} else {
final LDAPException reportException;
if (exception instanceof LDAPException) {
final LDAPException caughtException = (LDAPException) exception;
reportException = new LDAPException(caughtException.getResultCode(), ERR_LDAP_DIFF_ERROR_COMPARING_ENTRY.get(compactDN.toDN(baseDN, schema).toString(), caughtException.getMessage()), caughtException.getMatchedDN(), caughtException.getReferralURLs(), caughtException.getResponseControls(), caughtException.getCause());
} else {
reportException = new LDAPException(ResultCode.LOCAL_ERROR, ERR_LDAP_DIFF_ERROR_COMPARING_ENTRY.get(compactDN.toDN(baseDN, schema).toString(), StaticUtils.getExceptionMessage(exception)), exception);
}
errorCount++;
resultCodeRef.compareAndSet(null, reportException.getResultCode());
final List<String> formattedResultLines = ResultUtils.formatResult(reportException, false, 0, (WRAP_COLUMN - 2));
final Iterator<String> resultLineIterator = formattedResultLines.iterator();
while (resultLineIterator.hasNext()) {
mergedWriter.writeComment(resultLineIterator.next(), false, (!resultLineIterator.hasNext()));
}
}
continue;
}
final LDAPDiffProcessorResult resultOutput = result.getOutput();
final ChangeType changeType = resultOutput.getChangeType();
if (changeType == null) {
// the DN for including in a comment at the end of the LDIF file.
if (resultOutput.isEntryMissing()) {
missingCount++;
missingEntryDNs.add(result.getInput());
} else {
inSyncCount++;
}
// This indicates that the entry is in sync between the source
// and target servers. We don't need to do anything in this case.
inSyncCount++;
} else if (!isLastPass) {
// This entry is out of sync, but this isn't the last pass, so
// just hold on to the DN so that we'll re-examine the entry on
// the next pass.
nextPassDNs.add(result.getInput());
differencesIdentifiedCount++;
} else {
// The entry is out of sync, and this is the last pass. If the
// entry should be deleted, then capture the DN in a sorted list.
// If it's an add or modify, then write it to an appropriate
// temporary file. In each case, update the appropriate counter.
differencesIdentifiedCount++;
switch(changeType) {
case DELETE:
deletedEntryDNs.add(result.getInput());
deleteCount++;
break;
case ADD:
addWriter.writeChangeRecord(new LDIFAddChangeRecord(resultOutput.getEntry()), WARN_LDAP_DIFF_COMMENT_ADDED_ENTRY.get(targetHostPort, sourceHostPort));
addCount++;
break;
case MODIFY:
default:
modWriter.writeChangeRecord(new LDIFModifyChangeRecord(resultOutput.getDN(), resultOutput.getModifications()), WARN_LDAP_DIFF_COMMENT_MODIFIED_ENTRY.get(sourceHostPort, targetHostPort));
modifyCount++;
break;
}
}
}
// Write a progress message.
if (!quietArg.isPresent()) {
final int percentComplete = Math.round(100.0f * processedCurrentPassCount / totalCurrentPassCount);
wrapOut(0, WRAP_COLUMN, INFO_LDAP_DIFF_COMPARE_PROGRESS.get(processedCurrentPassCount, totalCurrentPassCount, percentComplete, differencesIdentifiedCount));
}
}
// differences, then sleep before the next iteration.
if (isLastPass) {
break;
} else if (nextPassDNs.isEmpty()) {
if (!quietArg.isPresent()) {
wrapOut(0, WRAP_COLUMN, INFO_LDAP_DIFF_NO_NEED_FOR_ADDITIONAL_PASS.get());
}
break;
} else {
try {
final int sleepTimeSeconds = secondsBetweenPassesArg.getValue();
if (!quietArg.isPresent()) {
wrapOut(0, WRAP_COLUMN, INFO_LDAP_DIFF_WAITING_BEFORE_NEXT_PASS.get(sleepTimeSeconds));
}
Thread.sleep(TimeUnit.SECONDS.toMillis(sleepTimeSeconds));
} catch (final Exception e) {
Debug.debugException(e);
}
}
// Swap currentPassDNs (which will now be empty) and nextPassDN (which
// contains the DNs of entries that were found out of sync in the
// current pass) sets so that they will be correct for the next pass.
final TreeSet<LDAPDiffCompactDN> emptyDNSet = currentPassDNs;
currentPassDNs = nextPassDNs;
nextPassDNs = emptyDNSet;
}
// the end of the LDIF file.
if ((addCount == 0) && (deleteCount == 0) && (modifyCount == 0)) {
mergedWriter.writeComment(INFO_LDAP_DIFF_SERVERS_IN_SYNC.get(), true, false);
}
// the writers.
if (!deletedEntryDNs.isEmpty()) {
mergedWriter.writeComment(INFO_LDAP_DIFF_COMMENT_DELETED_ENTRIES.get(), true, true);
if (!quietArg.isPresent()) {
out();
wrapOut(0, WRAP_COLUMN, INFO_LDAP_DIFF_STARTING_DELETE_PASS.get(deleteCount));
}
int entryCount = 0;
for (final LDAPDiffCompactDN compactDN : deletedEntryDNs.descendingSet()) {
SearchResultEntry entry = null;
LDAPException ldapException = null;
final String dnString = compactDN.toDN(baseDN, schema).toString();
try {
entry = sourcePool.getEntry(dnString, attributes);
} catch (final LDAPException e) {
Debug.debugException(e);
ldapException = new LDAPException(e.getResultCode(), ERR_LDAP_DIFF_CANNOT_GET_ENTRY_TO_DELETE.get(dnString, StaticUtils.getExceptionMessage(e)), e);
}
if (entry != null) {
mergedWriter.writeComment(INFO_LDAP_DIFF_COMMENT_DELETED_ENTRY.get(sourceHostPort, targetHostPort), false, false);
mergedWriter.writeComment("", false, false);
for (final String line : entry.toLDIF(75)) {
mergedWriter.writeComment(line, false, false);
}
mergedWriter.writeChangeRecord(new LDIFDeleteChangeRecord(entry.getDN()));
} else if (ldapException != null) {
mergedWriter.writeComment(ldapException.getExceptionMessage(), false, false);
mergedWriter.writeChangeRecord(new LDIFDeleteChangeRecord(entry.getDN()));
}
entryCount++;
if ((!quietArg.isPresent()) && ((entryCount % MAX_ENTRIES_PER_BATCH) == 0)) {
final int percentComplete = Math.round(100.0f * entryCount / deleteCount);
wrapOut(0, WRAP_COLUMN, INFO_LDAP_DIFF_DELETE_PROGRESS.get(entryCount, deleteCount, percentComplete));
}
}
if (!quietArg.isPresent()) {
final int percentComplete = Math.round(100.0f * entryCount / deleteCount);
wrapOut(0, WRAP_COLUMN, INFO_LDAP_DIFF_DELETE_PROGRESS.get(entryCount, deleteCount, percentComplete));
}
}
} catch (final IOException e) {
Debug.debugException(e);
throw new LDAPException(ResultCode.LOCAL_ERROR, ERR_LDAP_DIFF_ERROR_WRITING_OUTPUT.get(getToolName(), StaticUtils.getExceptionMessage(e)), e);
} finally {
if (parallelProcessor != null) {
try {
parallelProcessor.shutdown();
} catch (final Exception e) {
Debug.debugException(e);
}
}
}
// file to the merged change file.
if (modifyCount > 0L) {
appendFileToFile(modFile, mergedOutputFile, INFO_LDAP_DIFF_COMMENT_ADDED_ENTRIES.get());
modFile.delete();
}
// the merged change file.
if (addCount > 0L) {
appendFileToFile(addFile, mergedOutputFile, INFO_LDAP_DIFF_COMMENT_MODIFIED_ENTRIES.get());
addFile.delete();
}
// list them.
if (!missingEntryDNs.isEmpty()) {
try (FileOutputStream outputStream = new FileOutputStream(mergedOutputFile, true);
LDIFWriter ldifWriter = new LDIFWriter(outputStream)) {
ldifWriter.writeComment(INFO_LDAP_DIFF_COMMENT_MISSING_ENTRIES.get(), true, true);
for (final LDAPDiffCompactDN missingEntryDN : missingEntryDNs) {
ldifWriter.writeComment(INFO_LDAP_DIFF_COMMENT_MISSING_ENTRY.get(missingEntryDN.toDN(baseDN, schema).toString()), false, true);
}
} catch (final Exception e) {
Debug.debugException(e);
throw new LDAPException(ResultCode.LOCAL_ERROR, ERR_LDAP_DIFF_ERROR_WRITING_OUTPUT.get(getToolName(), StaticUtils.getExceptionMessage(e)), e);
}
}
return new long[] { inSyncCount, addCount, deleteCount, modifyCount, missingCount, errorCount };
}
Aggregations