Search in sources :

Example 26 with Record

use of com.datastax.oss.dsbulk.connectors.api.Record in project dsbulk by datastax.

the class CSVConnectorEndToEndCCMIT method cas_load_with_errors.

/**
 * Test for CAS failures (DAT-384).
 */
@Test
void cas_load_with_errors() {
    session.execute("DROP TABLE IF EXISTS test_cas");
    session.execute("CREATE TABLE test_cas (pk int, cc int, v int, PRIMARY KEY (pk, cc))");
    session.execute("INSERT INTO test_cas (pk, cc, v) VALUES (1, 1, 1)");
    session.execute("INSERT INTO test_cas (pk, cc, v) VALUES (1, 2, 2)");
    session.execute("INSERT INTO test_cas (pk, cc, v) VALUES (1, 3, 3)");
    // two failed CAS records will cause the entire batch to fail
    // will fail
    Record record1Failed = RecordUtils.mappedCSV("pk", "1", "cc", "1", "v", "1");
    // will fail
    Record record2Failed = RecordUtils.mappedCSV("pk", "1", "cc", "2", "v", "2");
    Record record3NotApplied = // will not be applied
    RecordUtils.mappedCSV("pk", "1", "cc", "4", "v", "4");
    MockConnector.mockReads(record1Failed, record2Failed, record3NotApplied);
    List<String> args = new ArrayList<>();
    args.add("load");
    args.add("--connector.name");
    args.add("mock");
    args.add("--schema.keyspace");
    args.add(session.getKeyspace().get().asInternal());
    args.add("--schema.query");
    args.add("INSERT INTO test_cas (pk, cc, v) VALUES (:pk, :cc, :v) IF NOT EXISTS");
    ExitStatus status = new DataStaxBulkLoader(addCommonSettings(args)).run();
    assertStatus(status, STATUS_COMPLETED_WITH_ERRORS);
    Path bad = OperationDirectory.getCurrentOperationDirectory().map(dir -> dir.resolve("paxos.bad")).orElse(null);
    assertThat(bad).exists();
    assertThat(FileUtils.readAllLines(bad)).containsExactly(record1Failed.getSource().toString(), record2Failed.getSource().toString(), record3NotApplied.getSource().toString());
    Path errors = OperationDirectory.getCurrentOperationDirectory().map(dir -> dir.resolve("paxos-errors.log")).orElse(null);
    assertThat(errors).exists();
    assertThat(FileUtils.readAllLines(errors).collect(Collectors.joining("\n"))).contains(String.format("Resource: %s\n" + "    Position: %d\n" + "    Source: %s\n" + "    INSERT INTO test_cas (pk, cc, v) VALUES (:pk, :cc, :v) IF NOT EXISTS\n" + "    pk: 1\n" + "    cc: 1\n" + "    v: 1", record1Failed.getResource(), record1Failed.getPosition(), record1Failed.getSource()), String.format("Resource: %s\n" + "    Position: %d\n" + "    Source: %s\n" + "    INSERT INTO test_cas (pk, cc, v) VALUES (:pk, :cc, :v) IF NOT EXISTS\n" + "    pk: 1\n" + "    cc: 2\n" + "    v: 2", record2Failed.getResource(), record2Failed.getPosition(), record2Failed.getSource()), String.format("Resource: %s\n" + "    Position: %d\n" + "    Source: %s\n" + "    INSERT INTO test_cas (pk, cc, v) VALUES (:pk, :cc, :v) IF NOT EXISTS\n" + "    pk: 1\n" + "    cc: 4\n" + "    v: 4", record3NotApplied.getResource(), record3NotApplied.getPosition(), record3NotApplied.getSource()), "Failed conditional updates:", "\"[applied]\": false\npk: 1\ncc: 1\nv: 1", "\"[applied]\": false\npk: 1\ncc: 2\nv: 2");
    List<Row> rows = session.execute("SELECT v FROM test_cas WHERE pk = 1").all();
    assertThat(rows).hasSize(3);
    assertThat(rows.get(0).getInt(0)).isEqualTo(1);
    assertThat(rows.get(1).getInt(0)).isEqualTo(2);
    assertThat(rows.get(2).getInt(0)).isEqualTo(3);
}
Also used : ExitStatus(com.datastax.oss.dsbulk.runner.ExitStatus) Path(java.nio.file.Path) UNNECESSARY(java.math.RoundingMode.UNNECESSARY) STDOUT(com.datastax.oss.dsbulk.tests.logging.StreamType.STDOUT) BeforeEach(org.junit.jupiter.api.BeforeEach) CodecUtils(com.datastax.oss.dsbulk.codecs.api.util.CodecUtils) Strings(com.datastax.oss.driver.shaded.guava.common.base.Strings) InstanceOfAssertFactories(org.assertj.core.api.InstanceOfAssertFactories) ZonedDateTime(java.time.ZonedDateTime) INSERT_INTO_IP_BY_COUNTRY(com.datastax.oss.dsbulk.runner.tests.EndToEndUtils.INSERT_INTO_IP_BY_COUNTRY) EndToEndUtils.validateExceptionsLog(com.datastax.oss.dsbulk.runner.tests.EndToEndUtils.validateExceptionsLog) InetAddress(java.net.InetAddress) AfterAll(org.junit.jupiter.api.AfterAll) BigDecimal(java.math.BigDecimal) BeforeAll(org.junit.jupiter.api.BeforeAll) Files.createTempDirectory(java.nio.file.Files.createTempDirectory) Assumptions.assumeFalse(org.junit.jupiter.api.Assumptions.assumeFalse) Map(java.util.Map) BigInteger(java.math.BigInteger) Tag(org.junit.jupiter.api.Tag) FLOOR(java.math.RoundingMode.FLOOR) Path(java.nio.file.Path) StreamInterceptor(com.datastax.oss.dsbulk.tests.logging.StreamInterceptor) TestAssertions.assertThat(com.datastax.oss.dsbulk.tests.assertions.TestAssertions.assertThat) Set(java.util.Set) Splitter(com.datastax.oss.driver.shaded.guava.common.base.Splitter) Arguments(org.junit.jupiter.params.provider.Arguments) Stream(java.util.stream.Stream) STATUS_ABORTED_TOO_MANY_ERRORS(com.datastax.oss.dsbulk.runner.ExitStatus.STATUS_ABORTED_TOO_MANY_ERRORS) STDERR(com.datastax.oss.dsbulk.tests.logging.StreamType.STDERR) MICROSECONDS(java.util.concurrent.TimeUnit.MICROSECONDS) EndToEndUtils.validateNumberOfBadRecords(com.datastax.oss.dsbulk.runner.tests.EndToEndUtils.validateNumberOfBadRecords) CQLUtils(com.datastax.oss.dsbulk.tests.utils.CQLUtils) ImmutableSet(com.datastax.oss.driver.shaded.guava.common.collect.ImmutableSet) STATUS_ABORTED_FATAL_ERROR(com.datastax.oss.dsbulk.runner.ExitStatus.STATUS_ABORTED_FATAL_ERROR) ImmutableMap(com.datastax.oss.driver.shaded.guava.common.collect.ImmutableMap) ArrayList(java.util.ArrayList) CqlSession(com.datastax.oss.driver.api.core.CqlSession) EndToEndUtils.createIpByCountryTable(com.datastax.oss.dsbulk.runner.tests.EndToEndUtils.createIpByCountryTable) Assumptions.assumeTrue(org.junit.jupiter.api.Assumptions.assumeTrue) EndToEndUtils.assertStatus(com.datastax.oss.dsbulk.runner.tests.EndToEndUtils.assertStatus) CCMCluster(com.datastax.oss.dsbulk.tests.ccm.CCMCluster) Row(com.datastax.oss.driver.api.core.cql.Row) ResultSet(com.datastax.oss.driver.api.core.cql.ResultSet) Files(java.nio.file.Files) TupleType(com.datastax.oss.driver.api.core.type.TupleType) DataType(com.datastax.oss.driver.api.core.type.DataType) IOException(java.io.IOException) DataStaxBulkLoader(com.datastax.oss.dsbulk.runner.DataStaxBulkLoader) UnknownHostException(java.net.UnknownHostException) LogCapture(com.datastax.oss.dsbulk.tests.logging.LogCapture) AfterEach(org.junit.jupiter.api.AfterEach) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) ChronoUnit(java.time.temporal.ChronoUnit) Assumptions(org.junit.jupiter.api.Assumptions) EndToEndUtils.validateOutputFiles(com.datastax.oss.dsbulk.runner.tests.EndToEndUtils.validateOutputFiles) CqlDuration(com.datastax.oss.driver.api.core.data.CqlDuration) URL(java.net.URL) CsvUtils(com.datastax.oss.dsbulk.runner.tests.CsvUtils) MockConnector(com.datastax.oss.dsbulk.runner.tests.MockConnector) ByteBuffer(java.nio.ByteBuffer) STATUS_COMPLETED_WITH_ERRORS(com.datastax.oss.dsbulk.runner.ExitStatus.STATUS_COMPLETED_WITH_ERRORS) RecordUtils(com.datastax.oss.dsbulk.runner.tests.RecordUtils) Lists(com.datastax.oss.driver.shaded.guava.common.collect.Lists) Uuids(com.datastax.oss.driver.api.core.uuid.Uuids) LocalTime(java.time.LocalTime) URI(java.net.URI) Record(com.datastax.oss.dsbulk.connectors.api.Record) LogInterceptor(com.datastax.oss.dsbulk.tests.logging.LogInterceptor) RoundingMode(java.math.RoundingMode) MethodSource(org.junit.jupiter.params.provider.MethodSource) CodecRegistry(com.datastax.oss.driver.api.core.type.codec.registry.CodecRegistry) OperationDirectory(com.datastax.oss.dsbulk.workflow.api.log.OperationDirectory) IP_BY_COUNTRY_MAPPING_CASE_SENSITIVE(com.datastax.oss.dsbulk.runner.tests.EndToEndUtils.IP_BY_COUNTRY_MAPPING_CASE_SENSITIVE) TupleValue(com.datastax.oss.driver.api.core.data.TupleValue) LineNumberReader(java.io.LineNumberReader) UUID(java.util.UUID) Instant(java.time.Instant) Collectors(java.util.stream.Collectors) UdtValue(com.datastax.oss.driver.api.core.data.UdtValue) Objects(java.util.Objects) Test(org.junit.jupiter.api.Test) STATUS_OK(com.datastax.oss.dsbulk.runner.ExitStatus.STATUS_OK) DataTypes(com.datastax.oss.driver.api.core.type.DataTypes) List(java.util.List) Type(com.datastax.oss.dsbulk.tests.ccm.CCMCluster.Type) ExitStatus(com.datastax.oss.dsbulk.runner.ExitStatus) LocalDate(java.time.LocalDate) TypeCodec(com.datastax.oss.driver.api.core.type.codec.TypeCodec) StreamCapture(com.datastax.oss.dsbulk.tests.logging.StreamCapture) CsvSource(org.junit.jupiter.params.provider.CsvSource) EndToEndUtils.validatePositionsFile(com.datastax.oss.dsbulk.runner.tests.EndToEndUtils.validatePositionsFile) HashMap(java.util.HashMap) Version(com.datastax.oss.driver.api.core.Version) CCMConfig(com.datastax.oss.dsbulk.tests.ccm.annotations.CCMConfig) ImmutableList(com.google.common.collect.ImmutableList) CompressedIOUtils(com.datastax.oss.dsbulk.io.CompressedIOUtils) OverflowStrategy(com.datastax.oss.dsbulk.codecs.api.util.OverflowStrategy) FileUtils(com.datastax.oss.dsbulk.tests.utils.FileUtils) EndToEndUtils.createIpByCountryCaseSensitiveTable(com.datastax.oss.dsbulk.runner.tests.EndToEndUtils.createIpByCountryCaseSensitiveTable) StringUtils.quoteJson(com.datastax.oss.dsbulk.tests.utils.StringUtils.quoteJson) UTF_8(java.nio.charset.StandardCharsets.UTF_8) EndToEndUtils.createWithSpacesTable(com.datastax.oss.dsbulk.runner.tests.EndToEndUtils.createWithSpacesTable) OSS(com.datastax.oss.dsbulk.tests.ccm.CCMCluster.Type.OSS) DefaultProtocolVersion(com.datastax.oss.driver.api.core.DefaultProtocolVersion) UserDefinedType(com.datastax.oss.driver.api.core.type.UserDefinedType) IP_BY_COUNTRY_MAPPING_INDEXED(com.datastax.oss.dsbulk.runner.tests.EndToEndUtils.IP_BY_COUNTRY_MAPPING_INDEXED) EPOCH(java.time.Instant.EPOCH) InputStream(java.io.InputStream) ArrayList(java.util.ArrayList) Record(com.datastax.oss.dsbulk.connectors.api.Record) DataStaxBulkLoader(com.datastax.oss.dsbulk.runner.DataStaxBulkLoader) Row(com.datastax.oss.driver.api.core.cql.Row) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) Test(org.junit.jupiter.api.Test)

Example 27 with Record

use of com.datastax.oss.dsbulk.connectors.api.Record in project dsbulk by datastax.

the class SearchEndToEndCCMIT method normal_unload_of_search_enabled_table.

/**
 * Test for DAT-365: regular unload of a search-enabled table should not contain the solr_query
 * column.
 */
@Test
void normal_unload_of_search_enabled_table() {
    session.execute("CREATE TABLE IF NOT EXISTS test_search2 (pk int, cc int, v varchar, PRIMARY KEY (pk, cc))");
    session.execute("CREATE SEARCH INDEX IF NOT EXISTS ON test_search2 WITH COLUMNS v { indexed:true };");
    session.execute("INSERT INTO test_search2 (pk, cc, v) VALUES (0, 0, 'foo')");
    session.execute("INSERT INTO test_search2 (pk, cc, v) VALUES (0, 1, 'bar')");
    session.execute("INSERT INTO test_search2 (pk, cc, v) VALUES (0, 2, 'qix')");
    // Wait until index is built
    await().atMost(ONE_MINUTE).until(() -> !session.execute("SELECT v FROM test_search2 WHERE solr_query = '{\"q\": \"v:foo\"}'").all().isEmpty());
    List<String> args = new ArrayList<>();
    args.add("unload");
    args.add("--connector.name");
    args.add("mock");
    args.add("--schema.keyspace");
    args.add(session.getKeyspace().map(CqlIdentifier::asInternal).orElseThrow(IllegalStateException::new));
    args.add("--schema.table");
    args.add("test_search2");
    ExitStatus status = new DataStaxBulkLoader(addCommonSettings(args)).run();
    assertStatus(status, STATUS_OK);
    assertThat(records).hasSize(3).satisfies(record -> {
        assertThat(record.fields()).hasSize(3);
        assertThat(record.getFieldValue(new DefaultMappedField("pk"))).isEqualTo("0");
        assertThat(record.getFieldValue(new DefaultMappedField("cc"))).isEqualTo("0");
        assertThat(record.getFieldValue(new DefaultMappedField("v"))).isEqualTo("foo");
    }, Index.atIndex(0)).satisfies(record -> {
        assertThat(record.fields()).hasSize(3);
        assertThat(record.getFieldValue(new DefaultMappedField("pk"))).isEqualTo("0");
        assertThat(record.getFieldValue(new DefaultMappedField("cc"))).isEqualTo("1");
        assertThat(record.getFieldValue(new DefaultMappedField("v"))).isEqualTo("bar");
    }, Index.atIndex(1)).satisfies(record -> {
        assertThat(record.fields()).hasSize(3);
        assertThat(record.getFieldValue(new DefaultMappedField("pk"))).isEqualTo("0");
        assertThat(record.getFieldValue(new DefaultMappedField("cc"))).isEqualTo("2");
        assertThat(record.getFieldValue(new DefaultMappedField("v"))).isEqualTo("qix");
    }, Index.atIndex(2));
}
Also used : ExitStatus(com.datastax.oss.dsbulk.runner.ExitStatus) StreamCapture(com.datastax.oss.dsbulk.tests.logging.StreamCapture) BeforeEach(org.junit.jupiter.api.BeforeEach) CqlIdentifier(com.datastax.oss.driver.api.core.CqlIdentifier) CCMRequirements(com.datastax.oss.dsbulk.tests.ccm.annotations.CCMRequirements) MockConnector(com.datastax.oss.dsbulk.runner.tests.MockConnector) CCMWorkload(com.datastax.oss.dsbulk.tests.ccm.annotations.CCMWorkload) ArrayList(java.util.ArrayList) CCMConfig(com.datastax.oss.dsbulk.tests.ccm.annotations.CCMConfig) CqlSession(com.datastax.oss.driver.api.core.CqlSession) EndToEndUtils.assertStatus(com.datastax.oss.dsbulk.runner.tests.EndToEndUtils.assertStatus) CCMCluster(com.datastax.oss.dsbulk.tests.ccm.CCMCluster) Tag(org.junit.jupiter.api.Tag) ONE_MINUTE(org.awaitility.Durations.ONE_MINUTE) Record(com.datastax.oss.dsbulk.connectors.api.Record) LogInterceptor(com.datastax.oss.dsbulk.tests.logging.LogInterceptor) StreamInterceptor(com.datastax.oss.dsbulk.tests.logging.StreamInterceptor) Awaitility.await(org.awaitility.Awaitility.await) TestAssertions.assertThat(com.datastax.oss.dsbulk.tests.assertions.TestAssertions.assertThat) StringUtils(com.datastax.oss.dsbulk.tests.utils.StringUtils) DefaultMappedField(com.datastax.oss.dsbulk.connectors.api.DefaultMappedField) DataStaxBulkLoader(com.datastax.oss.dsbulk.runner.DataStaxBulkLoader) CCMVersionRequirement(com.datastax.oss.dsbulk.tests.ccm.annotations.CCMVersionRequirement) Workload(com.datastax.oss.dsbulk.tests.ccm.CCMCluster.Workload) LogCapture(com.datastax.oss.dsbulk.tests.logging.LogCapture) Test(org.junit.jupiter.api.Test) STATUS_OK(com.datastax.oss.dsbulk.runner.ExitStatus.STATUS_OK) List(java.util.List) Type(com.datastax.oss.dsbulk.tests.ccm.CCMCluster.Type) ExitStatus(com.datastax.oss.dsbulk.runner.ExitStatus) Index(org.assertj.core.data.Index) WARN(org.slf4j.event.Level.WARN) STDERR(com.datastax.oss.dsbulk.tests.logging.StreamType.STDERR) DefaultMappedField(com.datastax.oss.dsbulk.connectors.api.DefaultMappedField) ArrayList(java.util.ArrayList) DataStaxBulkLoader(com.datastax.oss.dsbulk.runner.DataStaxBulkLoader) CqlIdentifier(com.datastax.oss.driver.api.core.CqlIdentifier) Test(org.junit.jupiter.api.Test)

Example 28 with Record

use of com.datastax.oss.dsbulk.connectors.api.Record in project dsbulk by datastax.

the class UnloadWorkflow method manyWriters.

private Flux<Record> manyWriters() {
    // writeConcurrency and readConcurrency are >= 0.5C here
    int actualConcurrency = Math.min(readConcurrency, writeConcurrency);
    int numThreads = Math.min(numCores * 2, actualConcurrency);
    Scheduler scheduler = Schedulers.newParallel(numThreads, new DefaultThreadFactory("workflow"));
    schedulers.add(scheduler);
    return Flux.fromIterable(readStatements).flatMap(results -> {
        Flux<Record> records = Flux.from(executor.readReactive(results)).publishOn(scheduler, 500).transform(queryWarningsHandler).transform(totalItemsMonitor).transform(totalItemsCounter).transform(failedReadResultsMonitor).transform(failedReadsHandler).map(readResultMapper::map).transform(failedRecordsMonitor).transform(unmappableRecordsHandler);
        if (actualConcurrency == writeConcurrency) {
            records = records.transform(writer);
        } else {
            // If the actual concurrency is lesser than the connector's desired write
            // concurrency, we need to give the connector a chance to switch writers
            // frequently so that it can really redirect records to all the final destinations
            // (to that many files on disk for example). If the connector is correctly
            // implemented, each window will be redirected to a different destination
            // in a round-robin fashion.
            records = records.window(500).flatMap(window -> window.transform(writer), 1, 500);
        }
        return records.transform(failedRecordsMonitor).transform(failedRecordsHandler);
    }, actualConcurrency, 500);
}
Also used : DefaultThreadFactory(io.netty.util.concurrent.DefaultThreadFactory) ReadResult(com.datastax.oss.dsbulk.executor.api.result.ReadResult) Connector(com.datastax.oss.dsbulk.connectors.api.Connector) DefaultThreadFactory(io.netty.util.concurrent.DefaultThreadFactory) BulkReader(com.datastax.oss.dsbulk.executor.api.reader.BulkReader) DriverSettings(com.datastax.oss.dsbulk.workflow.commons.settings.DriverSettings) LoggerFactory(org.slf4j.LoggerFactory) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Workflow(com.datastax.oss.dsbulk.workflow.api.Workflow) Scheduler(reactor.core.scheduler.Scheduler) Function(java.util.function.Function) ExecutorSettings(com.datastax.oss.dsbulk.workflow.commons.settings.ExecutorSettings) SchemaSettings(com.datastax.oss.dsbulk.workflow.commons.settings.SchemaSettings) HashSet(java.util.HashSet) RecordMetadata(com.datastax.oss.dsbulk.connectors.api.RecordMetadata) CqlSession(com.datastax.oss.driver.api.core.CqlSession) ConnectorSettings(com.datastax.oss.dsbulk.workflow.commons.settings.ConnectorSettings) Duration(java.time.Duration) SchemaGenerationStrategy(com.datastax.oss.dsbulk.workflow.commons.settings.SchemaGenerationStrategy) Schedulers(reactor.core.scheduler.Schedulers) Record(com.datastax.oss.dsbulk.connectors.api.Record) Stopwatch(com.datastax.oss.driver.shaded.guava.common.base.Stopwatch) CommonConnectorFeature(com.datastax.oss.dsbulk.connectors.api.CommonConnectorFeature) Logger(org.slf4j.Logger) Config(com.typesafe.config.Config) LogSettings(com.datastax.oss.dsbulk.workflow.commons.settings.LogSettings) Publisher(org.reactivestreams.Publisher) ConvertingCodecFactory(com.datastax.oss.dsbulk.codecs.api.ConvertingCodecFactory) SettingsManager(com.datastax.oss.dsbulk.workflow.commons.settings.SettingsManager) EngineSettings(com.datastax.oss.dsbulk.workflow.commons.settings.EngineSettings) Set(java.util.Set) ClusterInformationUtils(com.datastax.oss.dsbulk.workflow.commons.utils.ClusterInformationUtils) CodecSettings(com.datastax.oss.dsbulk.workflow.commons.settings.CodecSettings) MonitoringSettings(com.datastax.oss.dsbulk.workflow.commons.settings.MonitoringSettings) TimeUnit(java.util.concurrent.TimeUnit) Flux(reactor.core.publisher.Flux) List(java.util.List) CloseableUtils(com.datastax.oss.dsbulk.workflow.commons.utils.CloseableUtils) ReadResultMapper(com.datastax.oss.dsbulk.workflow.commons.schema.ReadResultMapper) DurationUtils(com.datastax.oss.dsbulk.workflow.api.utils.DurationUtils) MetricsManager(com.datastax.oss.dsbulk.workflow.commons.metrics.MetricsManager) Statement(com.datastax.oss.driver.api.core.cql.Statement) LogManager(com.datastax.oss.dsbulk.workflow.commons.log.LogManager) Scheduler(reactor.core.scheduler.Scheduler) Record(com.datastax.oss.dsbulk.connectors.api.Record)

Example 29 with Record

use of com.datastax.oss.dsbulk.connectors.api.Record in project dsbulk by datastax.

the class UnloadWorkflow method execute.

@Override
public boolean execute() {
    LOGGER.debug("{} started.", this);
    metricsManager.start();
    Flux<Record> flux;
    if (writeConcurrency == 1) {
        flux = oneWriter();
    } else if (writeConcurrency < numCores / 2 || readConcurrency < numCores / 2) {
        flux = fewWriters();
    } else {
        flux = manyWriters();
    }
    Stopwatch timer = Stopwatch.createStarted();
    flux.then().flux().transform(terminationHandler).blockLast();
    timer.stop();
    int totalErrors = logManager.getTotalErrors();
    metricsManager.stop(timer.elapsed(), totalErrors == 0);
    Duration elapsed = DurationUtils.round(timer.elapsed(), TimeUnit.SECONDS);
    String elapsedStr = elapsed.isZero() ? "less than one second" : DurationUtils.formatDuration(elapsed);
    if (totalErrors == 0) {
        LOGGER.info("{} completed successfully in {}.", this, elapsedStr);
    } else {
        LOGGER.warn("{} completed with {} errors in {}.", this, totalErrors, elapsedStr);
    }
    return totalErrors == 0;
}
Also used : Stopwatch(com.datastax.oss.driver.shaded.guava.common.base.Stopwatch) Record(com.datastax.oss.dsbulk.connectors.api.Record) Duration(java.time.Duration)

Example 30 with Record

use of com.datastax.oss.dsbulk.connectors.api.Record in project dsbulk by datastax.

the class MappedStatementPrinter method appendRecord.

default void appendRecord(MappedStatement statement, StatementWriter out) {
    Record record = statement.getRecord();
    out.newLine().indent().append("Resource: ").append(String.valueOf(record.getResource())).newLine().indent().append("Position: ").append(String.valueOf(record.getPosition()));
    if (record.getSource() != null) {
        out.newLine().indent().append("Source: ").append(LogManagerUtils.formatSource(record));
    }
}
Also used : Record(com.datastax.oss.dsbulk.connectors.api.Record)

Aggregations

Record (com.datastax.oss.dsbulk.connectors.api.Record)54 DefaultRecord (com.datastax.oss.dsbulk.connectors.api.DefaultRecord)40 Config (com.typesafe.config.Config)39 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)37 Test (org.junit.jupiter.api.Test)35 ErrorRecord (com.datastax.oss.dsbulk.connectors.api.ErrorRecord)24 Path (java.nio.file.Path)24 DefaultIndexedField (com.datastax.oss.dsbulk.connectors.api.DefaultIndexedField)10 Function (java.util.function.Function)9 MethodSource (org.junit.jupiter.params.provider.MethodSource)9 DefaultMappedField (com.datastax.oss.dsbulk.connectors.api.DefaultMappedField)8 ArrayList (java.util.ArrayList)8 List (java.util.List)8 Publisher (org.reactivestreams.Publisher)8 DefaultErrorRecord (com.datastax.oss.dsbulk.connectors.api.DefaultErrorRecord)7 IOException (java.io.IOException)7 ValueSource (org.junit.jupiter.params.provider.ValueSource)7 Flux (reactor.core.publisher.Flux)7 DataStaxBulkLoader (com.datastax.oss.dsbulk.runner.DataStaxBulkLoader)6 ExitStatus (com.datastax.oss.dsbulk.runner.ExitStatus)6