use of com.datastax.oss.dsbulk.executor.api.result.ReadResult in project dsbulk by datastax.
the class BulkExecutorITBase method verifyReads.
protected void verifyReads(int expectedSuccessful, int expectedFailed, Iterable<ReadResult> actual) {
AtomicInteger i = new AtomicInteger();
long actualSuccessful = Flux.fromIterable(actual).filter(Result::isSuccess).map(result -> result.getRow().orElseThrow(AssertionError::new)).map(row -> row.getInt("pk")).sort().doOnNext(pk -> {
assertThat(pk).isEqualTo(i.get());
i.getAndIncrement();
}).count().blockOptional().orElse(0L);
assertThat(actualSuccessful).isEqualTo(expectedSuccessful);
long actualFailed = Flux.fromIterable(actual).filter(r -> !r.isSuccess()).doOnNext(r -> {
assertThat(r.getRow().isPresent()).isFalse();
assertThat(r.getError().isPresent()).isTrue();
BulkExecutionException error = r.getError().get();
verifyException(error);
}).count().blockOptional().orElse(0L);
assertThat(actualFailed).isEqualTo(expectedFailed);
}
use of com.datastax.oss.dsbulk.executor.api.result.ReadResult in project dsbulk by datastax.
the class LogManagerTest method should_stop_when_unrecoverable_error_reading.
@Test
void should_stop_when_unrecoverable_error_reading() throws Exception {
Path outputDir = Files.createTempDirectory("test");
LogManager logManager = new LogManager(session, outputDir, ErrorThreshold.forAbsoluteValue(2), ErrorThreshold.forAbsoluteValue(0), true, statementFormatter, EXTENDED, rowFormatter);
logManager.init();
DefaultReadResult result = new DefaultReadResult(new BulkExecutionException(new DriverExecutionException(new IllegalArgumentException("error 1")), mockBoundStatement("SELECT 1")));
Flux<ReadResult> stmts = Flux.just(result);
try {
stmts.transform(logManager.newFailedReadsHandler()).blockLast();
fail("Expecting DriverExecutionException to be thrown");
} catch (DriverExecutionException e) {
assertThat(e.getCause()).isInstanceOf(IllegalArgumentException.class).hasMessage("error 1");
}
logManager.close();
Path errors = logManager.getOperationDirectory().resolve("unload-errors.log");
assertThat(errors.toFile()).exists();
assertThat(FileUtils.listAllFilesInDirectory(logManager.getOperationDirectory())).containsOnly(errors);
List<String> lines = Files.readAllLines(errors, UTF_8);
String content = String.join("\n", lines);
assertThat(content).doesNotContain("Resource: ").doesNotContain("Source: ").doesNotContain("Position: ").contains("SELECT 1").contains("error 1").containsOnlyOnce("com.datastax.oss.dsbulk.executor.api.exception.BulkExecutionException: Statement execution failed: SELECT 1");
}
use of com.datastax.oss.dsbulk.executor.api.result.ReadResult in project dsbulk by datastax.
the class LogManagerTest method should_stop_when_sample_size_is_met_and_percentage_exceeded.
@Test
void should_stop_when_sample_size_is_met_and_percentage_exceeded() throws Exception {
Path outputDir = Files.createTempDirectory("test");
LogManager logManager = new LogManager(session, outputDir, ErrorThreshold.forRatio(0.01f, 100), ErrorThreshold.forAbsoluteValue(0), true, statementFormatter, EXTENDED, rowFormatter);
logManager.init();
Flux<ReadResult> stmts = Flux.just(failedReadResult1);
try {
stmts.repeat(101).transform(logManager.newTotalItemsCounter()).transform(logManager.newFailedReadsHandler()).blockLast();
fail("Expecting TooManyErrorsException to be thrown");
} catch (TooManyErrorsException e) {
assertThat(e).hasMessage("Too many errors, the maximum allowed is 1%.");
Assertions.assertThat(((RatioErrorThreshold) e.getThreshold()).getMaxErrorRatio()).isEqualTo(0.01f);
}
logManager.close();
Path errors = logManager.getOperationDirectory().resolve("unload-errors.log");
assertThat(errors.toFile()).exists();
assertThat(FileUtils.listAllFilesInDirectory(logManager.getOperationDirectory())).containsOnly(errors);
List<String> lines = Files.readAllLines(errors, UTF_8);
assertThat(lines.stream().filter(l -> l.contains("BulkExecutionException")).count()).isEqualTo(100);
}
use of com.datastax.oss.dsbulk.executor.api.result.ReadResult in project dsbulk by datastax.
the class LogManager method appendUnmappableReadResultToDebugFile.
// row -> record failed (unload workflow)
@SuppressWarnings("BlockingMethodInNonBlockingContext")
private Mono<ErrorRecord> appendUnmappableReadResultToDebugFile(ErrorRecord record) {
try {
Path logFile = operationDirectory.resolve(MAPPING_ERRORS_FILE);
PrintWriter writer = openFiles.get(logFile);
assert writer != null;
// Don't print the resource since it will be just cql://keyspace/table
if (record.getSource() instanceof ReadResult) {
ReadResult source = (ReadResult) record.getSource();
appendStatement(source, MAPPING_ERRORS_FILE, false);
source.getRow().ifPresent(row -> {
writer.print("Row: ");
String format = rowFormatter.format(row, protocolVersion, codecRegistry);
LogManagerUtils.printAndMaybeAddNewLine(format, writer);
});
}
stackTracePrinter.printStackTrace(record.getError(), writer);
writer.println();
writer.flush();
return Mono.just(record);
} catch (Exception e) {
return Mono.error(e);
}
}
use of com.datastax.oss.dsbulk.executor.api.result.ReadResult in project dsbulk by datastax.
the class ContinuousReadResultSubscription method toPage.
@Override
Page toPage(ContinuousAsyncResultSet rs, ExecutionContext local) {
Iterator<Row> rows = rs.currentPage().iterator();
Iterator<ReadResult> results = new AbstractIterator<ReadResult>() {
@Override
protected ReadResult computeNext() {
if (rows.hasNext()) {
Row row = rows.next();
if (listener != null) {
listener.onRowReceived(row, local);
}
return new DefaultReadResult(statement, rs.getExecutionInfo(), row);
}
return endOfData();
}
};
return new ContinuousPage(rs, results);
}
Aggregations