use of com.unboundid.ldif.LDIFDeleteChangeRecord in project ldapsdk by pingidentity.
the class InMemoryRequestHandler method applyChangesFromLDIF.
/**
* Reads entries from the provided LDIF reader and adds them to the server,
* optionally clearing any existing entries before beginning to add the new
* entries. If an error is encountered while adding entries from LDIF then
* the server will remain populated with the data it held before the import
* attempt (even if the {@code clear} is given with a value of {@code true}).
* <BR><BR>
* This method may be used regardless of whether the server is listening for
* client connections.
*
* @param ldifReader The LDIF reader to use to obtain the change records to
* be applied.
*
* @return The number of changes applied from the LDIF file.
*
* @throws LDAPException If a problem occurs while reading change records
* or applying them to the server.
*/
public int applyChangesFromLDIF(@NotNull final LDIFReader ldifReader) throws LDAPException {
synchronized (entryMap) {
final InMemoryDirectoryServerSnapshot snapshot = createSnapshot();
boolean restoreSnapshot = true;
try {
int changesApplied = 0;
while (true) {
final LDIFChangeRecord changeRecord;
try {
changeRecord = ldifReader.readChangeRecord(true);
if (changeRecord == null) {
restoreSnapshot = false;
return changesApplied;
}
} catch (final LDIFException le) {
Debug.debugException(le);
throw new LDAPException(ResultCode.LOCAL_ERROR, ERR_MEM_HANDLER_APPLY_CHANGES_FROM_LDIF_READ_ERROR.get(le.getMessage()), le);
} catch (final Exception e) {
Debug.debugException(e);
throw new LDAPException(ResultCode.LOCAL_ERROR, ERR_MEM_HANDLER_APPLY_CHANGES_FROM_LDIF_READ_ERROR.get(StaticUtils.getExceptionMessage(e)), e);
}
if (changeRecord instanceof LDIFAddChangeRecord) {
final LDIFAddChangeRecord addChangeRecord = (LDIFAddChangeRecord) changeRecord;
add(addChangeRecord.toAddRequest());
} else if (changeRecord instanceof LDIFDeleteChangeRecord) {
final LDIFDeleteChangeRecord deleteChangeRecord = (LDIFDeleteChangeRecord) changeRecord;
delete(deleteChangeRecord.toDeleteRequest());
} else if (changeRecord instanceof LDIFModifyChangeRecord) {
final LDIFModifyChangeRecord modifyChangeRecord = (LDIFModifyChangeRecord) changeRecord;
modify(modifyChangeRecord.toModifyRequest());
} else if (changeRecord instanceof LDIFModifyDNChangeRecord) {
final LDIFModifyDNChangeRecord modifyDNChangeRecord = (LDIFModifyDNChangeRecord) changeRecord;
modifyDN(modifyDNChangeRecord.toModifyDNRequest());
} else {
throw new LDAPException(ResultCode.LOCAL_ERROR, ERR_MEM_HANDLER_APPLY_CHANGES_UNSUPPORTED_CHANGE.get(String.valueOf(changeRecord)));
}
changesApplied++;
}
} finally {
try {
ldifReader.close();
} catch (final Exception e) {
Debug.debugException(e);
}
if (restoreSnapshot) {
restoreSnapshot(snapshot);
}
}
}
}
use of com.unboundid.ldif.LDIFDeleteChangeRecord in project ldapsdk by pingidentity.
the class ScrambleAttributeTransformation method transformChangeRecord.
/**
* {@inheritDoc}
*/
@Override()
@Nullable()
public LDIFChangeRecord transformChangeRecord(@NotNull final LDIFChangeRecord r) {
if (r == null) {
return null;
}
// entry.
if (r instanceof LDIFAddChangeRecord) {
final LDIFAddChangeRecord addRecord = (LDIFAddChangeRecord) r;
return new LDIFAddChangeRecord(transformEntry(addRecord.getEntryToAdd()), addRecord.getControls());
}
// If it's a delete change record, then see if we need to scramble the DN.
if (r instanceof LDIFDeleteChangeRecord) {
if (scrambleEntryDNs) {
return new LDIFDeleteChangeRecord(scrambleDN(r.getDN()), r.getControls());
} else {
return r;
}
}
// modification values.
if (r instanceof LDIFModifyChangeRecord) {
final LDIFModifyChangeRecord modifyRecord = (LDIFModifyChangeRecord) r;
final Modification[] originalMods = modifyRecord.getModifications();
final Modification[] newMods = new Modification[originalMods.length];
for (int i = 0; i < originalMods.length; i++) {
// If the modification doesn't have any values, then just use the
// original modification.
final Modification m = originalMods[i];
if (!m.hasValue()) {
newMods[i] = m;
continue;
}
// See if the modification targets an attribute that we should scramble.
// If not, then just use the original modification.
final String attrName = StaticUtils.toLowerCase(Attribute.getBaseName(m.getAttributeName()));
if (!attributes.containsKey(attrName)) {
newMods[i] = m;
continue;
}
// Scramble the values just like we do for an attribute.
final Attribute scrambledAttribute = scrambleAttribute(m.getAttribute());
newMods[i] = new Modification(m.getModificationType(), m.getAttributeName(), scrambledAttribute.getRawValues());
}
if (scrambleEntryDNs) {
return new LDIFModifyChangeRecord(scrambleDN(modifyRecord.getDN()), newMods, modifyRecord.getControls());
} else {
return new LDIFModifyChangeRecord(modifyRecord.getDN(), newMods, modifyRecord.getControls());
}
}
// of the components.
if (r instanceof LDIFModifyDNChangeRecord) {
if (scrambleEntryDNs) {
final LDIFModifyDNChangeRecord modDNRecord = (LDIFModifyDNChangeRecord) r;
return new LDIFModifyDNChangeRecord(scrambleDN(modDNRecord.getDN()), scrambleDN(modDNRecord.getNewRDN()), modDNRecord.deleteOldRDN(), scrambleDN(modDNRecord.getNewSuperiorDN()), modDNRecord.getControls());
} else {
return r;
}
}
// This should never happen.
return r;
}
use of com.unboundid.ldif.LDIFDeleteChangeRecord in project ldapsdk by pingidentity.
the class RedactAttributeTransformation method transformChangeRecord.
/**
* {@inheritDoc}
*/
@Override()
@Nullable()
public LDIFChangeRecord transformChangeRecord(@NotNull final LDIFChangeRecord r) {
if (r == null) {
return null;
}
// entry.
if (r instanceof LDIFAddChangeRecord) {
final LDIFAddChangeRecord addRecord = (LDIFAddChangeRecord) r;
return new LDIFAddChangeRecord(transformEntry(addRecord.getEntryToAdd()), addRecord.getControls());
}
// that we might need to redact.
if (r instanceof LDIFDeleteChangeRecord) {
if (redactDNAttributes) {
final LDIFDeleteChangeRecord deleteRecord = (LDIFDeleteChangeRecord) r;
return new LDIFDeleteChangeRecord(redactDN(deleteRecord.getDN()), deleteRecord.getControls());
} else {
return r;
}
}
// If it's a modify change record, then redact all appropriate values.
if (r instanceof LDIFModifyChangeRecord) {
final LDIFModifyChangeRecord modifyRecord = (LDIFModifyChangeRecord) r;
final String newDN;
if (redactDNAttributes) {
newDN = redactDN(modifyRecord.getDN());
} else {
newDN = modifyRecord.getDN();
}
final Modification[] originalMods = modifyRecord.getModifications();
final Modification[] newMods = new Modification[originalMods.length];
for (int i = 0; i < originalMods.length; i++) {
// If the modification doesn't have any values, then just use the
// original modification.
final Modification m = originalMods[i];
if (!m.hasValue()) {
newMods[i] = m;
continue;
}
// See if the modification targets an attribute that we should redact.
// If not, then see if the attribute has a DN syntax.
final String attrName = StaticUtils.toLowerCase(Attribute.getBaseName(m.getAttributeName()));
if (!attributes.contains(attrName)) {
if (redactDNAttributes && (schema != null) && (MatchingRule.selectEqualityMatchingRule(attrName, schema) instanceof DistinguishedNameMatchingRule)) {
final String[] originalValues = m.getValues();
final String[] newValues = new String[originalValues.length];
for (int j = 0; j < originalValues.length; j++) {
newValues[j] = redactDN(originalValues[j]);
}
newMods[i] = new Modification(m.getModificationType(), m.getAttributeName(), newValues);
} else {
newMods[i] = m;
}
continue;
}
// Get the original values. If there's only one of them, or if we
// shouldn't preserve the original number of values, then just create a
// modification with a single value. Otherwise, create a modification
// with the appropriate number of values.
final ASN1OctetString[] originalValues = m.getRawValues();
if (preserveValueCount && (originalValues.length > 1)) {
final ASN1OctetString[] newValues = new ASN1OctetString[originalValues.length];
for (int j = 0; j < originalValues.length; j++) {
newValues[j] = new ASN1OctetString("***REDACTED" + (j + 1) + "***");
}
newMods[i] = new Modification(m.getModificationType(), m.getAttributeName(), newValues);
} else {
newMods[i] = new Modification(m.getModificationType(), m.getAttributeName(), "***REDACTED***");
}
}
return new LDIFModifyChangeRecord(newDN, newMods, modifyRecord.getControls());
}
// superior DN contain anything that we might need to redact.
if (r instanceof LDIFModifyDNChangeRecord) {
if (redactDNAttributes) {
final LDIFModifyDNChangeRecord modDNRecord = (LDIFModifyDNChangeRecord) r;
return new LDIFModifyDNChangeRecord(redactDN(modDNRecord.getDN()), redactDN(modDNRecord.getNewRDN()), modDNRecord.deleteOldRDN(), redactDN(modDNRecord.getNewSuperiorDN()), modDNRecord.getControls());
} else {
return r;
}
}
// We should never get here.
return r;
}
use of com.unboundid.ldif.LDIFDeleteChangeRecord in project ldapsdk by pingidentity.
the class AuditLogReader method read.
/**
* Reads the next audit log message from the log file.
*
* @return The audit log message read from the log file, or {@code null} if
* there are no more messages to be read.
*
* @throws IOException If an error occurs while trying to read from the
* file.
*
* @throws AuditLogException If an error occurs while trying to parse the
* log message.
*/
@Nullable()
public AuditLogMessage read() throws IOException, AuditLogException {
// Read a list of lines until we find the end of the file or a blank line
// after a series of non-blank lines.
final List<String> fullMessageLines = new ArrayList<>(20);
final List<String> nonCommentLines = new ArrayList<>(20);
while (true) {
final String line = reader.readLine();
if (line == null) {
// more.
break;
}
if (line.isEmpty()) {
if (nonCommentLines.isEmpty()) {
// This means that we encountered consecutive blank lines, or blank
// lines with only comments between them. This is okay. We'll just
// clear the list of full message lines and keep reading.
fullMessageLines.clear();
continue;
} else {
// we read as an audit log message.
break;
}
} else {
// We read a non-empty line. Add it to the list of full message lines,
// and if it's not a comment, then add it to the list of non-comment
// lines.
fullMessageLines.add(line);
if (!line.startsWith("#")) {
nonCommentLines.add(line);
}
}
}
// end of the file.
if (nonCommentLines.isEmpty()) {
return null;
}
// Try to parse the set of non-comment lines as an LDIF change record. If
// that fails, then throw a log exception.
final LDIFChangeRecord changeRecord;
try {
final String[] ldifLines = StaticUtils.toArray(nonCommentLines, String.class);
changeRecord = LDIFReader.decodeChangeRecord(ldifLines);
} catch (final Exception e) {
Debug.debugException(e);
final String concatenatedLogLines = StaticUtils.concatenateStrings("[ ", "\"", ", ", "\"", " ]", fullMessageLines);
throw new AuditLogException(fullMessageLines, ERR_AUDIT_LOG_READER_CANNOT_PARSE_CHANGE_RECORD.get(concatenatedLogLines, StaticUtils.getExceptionMessage(e)), e);
}
// record.
if (changeRecord instanceof LDIFAddChangeRecord) {
return new AddAuditLogMessage(fullMessageLines, (LDIFAddChangeRecord) changeRecord);
} else if (changeRecord instanceof LDIFDeleteChangeRecord) {
return new DeleteAuditLogMessage(fullMessageLines, (LDIFDeleteChangeRecord) changeRecord);
} else if (changeRecord instanceof LDIFModifyChangeRecord) {
return new ModifyAuditLogMessage(fullMessageLines, (LDIFModifyChangeRecord) changeRecord);
} else if (changeRecord instanceof LDIFModifyDNChangeRecord) {
return new ModifyDNAuditLogMessage(fullMessageLines, (LDIFModifyDNChangeRecord) changeRecord);
} else {
// This should never happen.
final String concatenatedLogLines = StaticUtils.concatenateStrings("[ ", "\"", ", ", "\"", " ]", fullMessageLines);
throw new AuditLogException(fullMessageLines, ERR_AUDIT_LOG_READER_UNSUPPORTED_CHANGE_RECORD.get(concatenatedLogLines, changeRecord.getChangeType().getName()));
}
}
use of com.unboundid.ldif.LDIFDeleteChangeRecord in project ldapsdk by pingidentity.
the class LDAPDiff method identifyDifferences.
/**
* Examines all of the entries in the provided set and identifies differences
* between the source and target servers. The differences will be written to
* output files, and the return value will provide information about the
* number of entries in each result category.
*
* @param sourcePool A connection pool that may be used to communicate
* with the source server. It must not be
* {@code null}.
* @param targetPool A connection pool that may be used to communicate
* with the target server. It must not be
* {@code null}.
* @param baseDN The base DN for entries to examine. It must not be
* {@code null}.
* @param schema The schema to use in processing. It may optionally
* be {@code null} if no schema is available.
* @param resultCodeRef A reference that may be updated to set the result
* code that should be returned. It must not be
* {@code null} but may be unset.
* @param dnsToExamine The set of DNs to examine. It must not be
* {@code null}.
*
* @return An array of {@code long} values that provide the number of entries
* in each result category. The array that is returned will contain
* six elements. The first will be the number of entries that were
* found to be in sync between the source and target servers. The
* second will be the number of entries that were present only in the
* target server and need to be added to the source server. The
* third will be the number of entries that were present only in the
* source server and need to be removed. The fourth will be the
* number of entries that were present in both servers but were not
* equivalent and therefore need to be modified in the source server.
* The fifth will be the number of entries that were initially
* identified but were subsequently not found in either server. The
* sixth element will be the number of errors encountered while
* attempting to examine entries.
*
* @throws LDAPException If an unrecoverable error occurs during processing.
*/
@NotNull()
private long[] identifyDifferences(@NotNull final LDAPConnectionPool sourcePool, @NotNull final LDAPConnectionPool targetPool, @NotNull final DN baseDN, @Nullable final Schema schema, @NotNull final AtomicReference<ResultCode> resultCodeRef, @NotNull final TreeSet<LDAPDiffCompactDN> dnsToExamine) throws LDAPException {
// Create LDIF writers that will be used to write the output files. We want
// to create the main output file even if we don't end up identifying any
// changes, and it's also convenient to just go ahead and create the
// temporary add and modify files now, too, even if we don't end up using
// them.
final File mergedOutputFile = outputLDIFArg.getValue();
final File addFile = new File(mergedOutputFile.getAbsolutePath() + ".add");
addFile.deleteOnExit();
final File modFile = new File(mergedOutputFile.getAbsolutePath() + ".mod");
modFile.deleteOnExit();
long inSyncCount = 0L;
long addCount = 0L;
long deleteCount = 0L;
long modifyCount = 0L;
long missingCount = 0L;
long errorCount = 0L;
ParallelProcessor<LDAPDiffCompactDN, LDAPDiffProcessorResult> parallelProcessor = null;
final String sourceHostPort = getServerHostPort("sourceHostname", "sourcePort");
final String targetHostPort = getServerHostPort("targetHostname", "targetPort");
final TreeSet<LDAPDiffCompactDN> missingEntryDNs = new TreeSet<>();
try (LDIFWriter mergedWriter = createLDIFWriter(mergedOutputFile, INFO_LDAP_DIFF_MERGED_FILE_COMMENT.get(sourceHostPort, targetHostPort));
LDIFWriter addWriter = createLDIFWriter(addFile);
LDIFWriter modWriter = createLDIFWriter(modFile)) {
// Create a parallel processor that will be used to retrieve and compare
// entries from the source and target servers.
final String[] attributes = parser.getTrailingArguments().toArray(StaticUtils.NO_STRINGS);
final LDAPDiffProcessor processor = new LDAPDiffProcessor(sourcePool, targetPool, baseDN, schema, byteForByteArg.isPresent(), attributes, missingOnlyArg.isPresent());
parallelProcessor = new ParallelProcessor<>(processor, new LDAPSDKThreadFactory("LDAPDiff Compare Processor", true), numThreadsArg.getValue(), 5);
// Define variables that will be used to monitor progress and keep track
// of information between passes.
TreeSet<LDAPDiffCompactDN> currentPassDNs = dnsToExamine;
TreeSet<LDAPDiffCompactDN> nextPassDNs = new TreeSet<>();
final TreeSet<LDAPDiffCompactDN> deletedEntryDNs = new TreeSet<>();
final List<LDAPDiffCompactDN> currentBatchOfDNs = new ArrayList<>(MAX_ENTRIES_PER_BATCH);
// between the source and target servers.
for (int i = 1; i <= numPassesArg.getValue(); i++) {
final boolean isLastPass = (i == numPassesArg.getValue());
if (!quietArg.isPresent()) {
out();
wrapOut(0, WRAP_COLUMN, INFO_LDAP_DIFF_STARTING_COMPARE_PASS.get(i, numPassesArg.getValue(), currentPassDNs.size()));
}
// Process the changes in batches until we have gone through all of the
// entries.
nextPassDNs.clear();
int differencesIdentifiedCount = 0;
int processedCurrentPassCount = 0;
final int totalCurrentPassCount = currentPassDNs.size();
final Iterator<LDAPDiffCompactDN> dnIterator = currentPassDNs.iterator();
while (dnIterator.hasNext()) {
// Build a batch of DNs.
currentBatchOfDNs.clear();
while (dnIterator.hasNext()) {
currentBatchOfDNs.add(dnIterator.next());
dnIterator.remove();
if (currentBatchOfDNs.size() >= MAX_ENTRIES_PER_BATCH) {
break;
}
}
// Process the batch of entries.
final List<Result<LDAPDiffCompactDN, LDAPDiffProcessorResult>> results;
try {
results = parallelProcessor.processAll(currentBatchOfDNs);
} catch (final Exception e) {
Debug.debugException(e);
throw new LDAPException(ResultCode.LOCAL_ERROR, ERR_LDAP_DIFF_ERROR_PROCESSING_BATCH.get(StaticUtils.getExceptionMessage(e)), e);
}
// Iterate through and handle the results.
for (final Result<LDAPDiffCompactDN, LDAPDiffProcessorResult> result : results) {
processedCurrentPassCount++;
final Throwable exception = result.getFailureCause();
if (exception != null) {
final LDAPDiffCompactDN compactDN = result.getInput();
if (!isLastPass) {
nextPassDNs.add(compactDN);
differencesIdentifiedCount++;
} else {
final LDAPException reportException;
if (exception instanceof LDAPException) {
final LDAPException caughtException = (LDAPException) exception;
reportException = new LDAPException(caughtException.getResultCode(), ERR_LDAP_DIFF_ERROR_COMPARING_ENTRY.get(compactDN.toDN(baseDN, schema).toString(), caughtException.getMessage()), caughtException.getMatchedDN(), caughtException.getReferralURLs(), caughtException.getResponseControls(), caughtException.getCause());
} else {
reportException = new LDAPException(ResultCode.LOCAL_ERROR, ERR_LDAP_DIFF_ERROR_COMPARING_ENTRY.get(compactDN.toDN(baseDN, schema).toString(), StaticUtils.getExceptionMessage(exception)), exception);
}
errorCount++;
resultCodeRef.compareAndSet(null, reportException.getResultCode());
final List<String> formattedResultLines = ResultUtils.formatResult(reportException, false, 0, (WRAP_COLUMN - 2));
final Iterator<String> resultLineIterator = formattedResultLines.iterator();
while (resultLineIterator.hasNext()) {
mergedWriter.writeComment(resultLineIterator.next(), false, (!resultLineIterator.hasNext()));
}
}
continue;
}
final LDAPDiffProcessorResult resultOutput = result.getOutput();
final ChangeType changeType = resultOutput.getChangeType();
if (changeType == null) {
// the DN for including in a comment at the end of the LDIF file.
if (resultOutput.isEntryMissing()) {
missingCount++;
missingEntryDNs.add(result.getInput());
} else {
inSyncCount++;
}
// This indicates that the entry is in sync between the source
// and target servers. We don't need to do anything in this case.
inSyncCount++;
} else if (!isLastPass) {
// This entry is out of sync, but this isn't the last pass, so
// just hold on to the DN so that we'll re-examine the entry on
// the next pass.
nextPassDNs.add(result.getInput());
differencesIdentifiedCount++;
} else {
// The entry is out of sync, and this is the last pass. If the
// entry should be deleted, then capture the DN in a sorted list.
// If it's an add or modify, then write it to an appropriate
// temporary file. In each case, update the appropriate counter.
differencesIdentifiedCount++;
switch(changeType) {
case DELETE:
deletedEntryDNs.add(result.getInput());
deleteCount++;
break;
case ADD:
addWriter.writeChangeRecord(new LDIFAddChangeRecord(resultOutput.getEntry()), WARN_LDAP_DIFF_COMMENT_ADDED_ENTRY.get(targetHostPort, sourceHostPort));
addCount++;
break;
case MODIFY:
default:
modWriter.writeChangeRecord(new LDIFModifyChangeRecord(resultOutput.getDN(), resultOutput.getModifications()), WARN_LDAP_DIFF_COMMENT_MODIFIED_ENTRY.get(sourceHostPort, targetHostPort));
modifyCount++;
break;
}
}
}
// Write a progress message.
if (!quietArg.isPresent()) {
final int percentComplete = Math.round(100.0f * processedCurrentPassCount / totalCurrentPassCount);
wrapOut(0, WRAP_COLUMN, INFO_LDAP_DIFF_COMPARE_PROGRESS.get(processedCurrentPassCount, totalCurrentPassCount, percentComplete, differencesIdentifiedCount));
}
}
// differences, then sleep before the next iteration.
if (isLastPass) {
break;
} else if (nextPassDNs.isEmpty()) {
if (!quietArg.isPresent()) {
wrapOut(0, WRAP_COLUMN, INFO_LDAP_DIFF_NO_NEED_FOR_ADDITIONAL_PASS.get());
}
break;
} else {
try {
final int sleepTimeSeconds = secondsBetweenPassesArg.getValue();
if (!quietArg.isPresent()) {
wrapOut(0, WRAP_COLUMN, INFO_LDAP_DIFF_WAITING_BEFORE_NEXT_PASS.get(sleepTimeSeconds));
}
Thread.sleep(TimeUnit.SECONDS.toMillis(sleepTimeSeconds));
} catch (final Exception e) {
Debug.debugException(e);
}
}
// Swap currentPassDNs (which will now be empty) and nextPassDN (which
// contains the DNs of entries that were found out of sync in the
// current pass) sets so that they will be correct for the next pass.
final TreeSet<LDAPDiffCompactDN> emptyDNSet = currentPassDNs;
currentPassDNs = nextPassDNs;
nextPassDNs = emptyDNSet;
}
// the end of the LDIF file.
if ((addCount == 0) && (deleteCount == 0) && (modifyCount == 0)) {
mergedWriter.writeComment(INFO_LDAP_DIFF_SERVERS_IN_SYNC.get(), true, false);
}
// the writers.
if (!deletedEntryDNs.isEmpty()) {
mergedWriter.writeComment(INFO_LDAP_DIFF_COMMENT_DELETED_ENTRIES.get(), true, true);
if (!quietArg.isPresent()) {
out();
wrapOut(0, WRAP_COLUMN, INFO_LDAP_DIFF_STARTING_DELETE_PASS.get(deleteCount));
}
int entryCount = 0;
for (final LDAPDiffCompactDN compactDN : deletedEntryDNs.descendingSet()) {
SearchResultEntry entry = null;
LDAPException ldapException = null;
final String dnString = compactDN.toDN(baseDN, schema).toString();
try {
entry = sourcePool.getEntry(dnString, attributes);
} catch (final LDAPException e) {
Debug.debugException(e);
ldapException = new LDAPException(e.getResultCode(), ERR_LDAP_DIFF_CANNOT_GET_ENTRY_TO_DELETE.get(dnString, StaticUtils.getExceptionMessage(e)), e);
}
if (entry != null) {
mergedWriter.writeComment(INFO_LDAP_DIFF_COMMENT_DELETED_ENTRY.get(sourceHostPort, targetHostPort), false, false);
mergedWriter.writeComment("", false, false);
for (final String line : entry.toLDIF(75)) {
mergedWriter.writeComment(line, false, false);
}
mergedWriter.writeChangeRecord(new LDIFDeleteChangeRecord(entry.getDN()));
} else if (ldapException != null) {
mergedWriter.writeComment(ldapException.getExceptionMessage(), false, false);
mergedWriter.writeChangeRecord(new LDIFDeleteChangeRecord(entry.getDN()));
}
entryCount++;
if ((!quietArg.isPresent()) && ((entryCount % MAX_ENTRIES_PER_BATCH) == 0)) {
final int percentComplete = Math.round(100.0f * entryCount / deleteCount);
wrapOut(0, WRAP_COLUMN, INFO_LDAP_DIFF_DELETE_PROGRESS.get(entryCount, deleteCount, percentComplete));
}
}
if (!quietArg.isPresent()) {
final int percentComplete = Math.round(100.0f * entryCount / deleteCount);
wrapOut(0, WRAP_COLUMN, INFO_LDAP_DIFF_DELETE_PROGRESS.get(entryCount, deleteCount, percentComplete));
}
}
} catch (final IOException e) {
Debug.debugException(e);
throw new LDAPException(ResultCode.LOCAL_ERROR, ERR_LDAP_DIFF_ERROR_WRITING_OUTPUT.get(getToolName(), StaticUtils.getExceptionMessage(e)), e);
} finally {
if (parallelProcessor != null) {
try {
parallelProcessor.shutdown();
} catch (final Exception e) {
Debug.debugException(e);
}
}
}
// file to the merged change file.
if (modifyCount > 0L) {
appendFileToFile(modFile, mergedOutputFile, INFO_LDAP_DIFF_COMMENT_ADDED_ENTRIES.get());
modFile.delete();
}
// the merged change file.
if (addCount > 0L) {
appendFileToFile(addFile, mergedOutputFile, INFO_LDAP_DIFF_COMMENT_MODIFIED_ENTRIES.get());
addFile.delete();
}
// list them.
if (!missingEntryDNs.isEmpty()) {
try (FileOutputStream outputStream = new FileOutputStream(mergedOutputFile, true);
LDIFWriter ldifWriter = new LDIFWriter(outputStream)) {
ldifWriter.writeComment(INFO_LDAP_DIFF_COMMENT_MISSING_ENTRIES.get(), true, true);
for (final LDAPDiffCompactDN missingEntryDN : missingEntryDNs) {
ldifWriter.writeComment(INFO_LDAP_DIFF_COMMENT_MISSING_ENTRY.get(missingEntryDN.toDN(baseDN, schema).toString()), false, true);
}
} catch (final Exception e) {
Debug.debugException(e);
throw new LDAPException(ResultCode.LOCAL_ERROR, ERR_LDAP_DIFF_ERROR_WRITING_OUTPUT.get(getToolName(), StaticUtils.getExceptionMessage(e)), e);
}
}
return new long[] { inSyncCount, addCount, deleteCount, modifyCount, missingCount, errorCount };
}
Aggregations