use of com.datastax.oss.dsbulk.connectors.api.RecordMetadata in project dsbulk by datastax.
the class PrometheusEndToEndSimulacronIT method newConnectorDelegate.
@NonNull
private Connector newConnectorDelegate() {
return new Connector() {
@Override
public int readConcurrency() {
return 1;
}
@Override
public boolean supports(@NonNull ConnectorFeature feature) {
return feature == CommonConnectorFeature.INDEXED_RECORDS;
}
@NonNull
@Override
public Publisher<Publisher<Record>> read() {
AtomicInteger counter = new AtomicInteger();
AtomicBoolean running = new AtomicBoolean(true);
return Flux.just(Flux.generate(sink -> {
int i = counter.getAndAdd(1);
if (i == 0) {
startTimer(running);
}
if (running.get()) {
Record record = RecordUtils.indexedCSV("pk", "pk" + 1, "cc", "cc" + 1, "v", "v" + 1);
sink.next(record);
} else {
sink.complete();
}
}));
}
@NonNull
@Override
public RecordMetadata getRecordMetadata() {
return (fieldType, cqlType) -> GenericType.STRING;
}
@NonNull
@Override
public Function<Publisher<Record>, Publisher<Record>> write() {
throw new UnsupportedOperationException();
}
@Override
public int writeConcurrency() {
throw new UnsupportedOperationException();
}
};
}
use of com.datastax.oss.dsbulk.connectors.api.RecordMetadata in project dsbulk by datastax.
the class UnloadWorkflow method init.
@Override
public void init() throws Exception {
settingsManager.init("UNLOAD", false, SchemaGenerationStrategy.READ_AND_MAP);
executionId = settingsManager.getExecutionId();
LogSettings logSettings = settingsManager.getLogSettings();
DriverSettings driverSettings = settingsManager.getDriverSettings();
ConnectorSettings connectorSettings = settingsManager.getConnectorSettings();
SchemaSettings schemaSettings = settingsManager.getSchemaSettings();
ExecutorSettings executorSettings = settingsManager.getExecutorSettings();
CodecSettings codecSettings = settingsManager.getCodecSettings();
MonitoringSettings monitoringSettings = settingsManager.getMonitoringSettings();
EngineSettings engineSettings = settingsManager.getEngineSettings();
engineSettings.init();
// First verify that dry-run is off; that's unsupported for unload.
if (engineSettings.isDryRun()) {
throw new IllegalArgumentException("Dry-run is not supported for unload");
}
// No logs should be produced until the following statement returns
logSettings.init();
connectorSettings.init(false);
connector = connectorSettings.getConnector();
connector.init();
driverSettings.init(false);
logSettings.logEffectiveSettings(settingsManager.getEffectiveBulkLoaderConfig(), driverSettings.getDriverConfig());
codecSettings.init();
monitoringSettings.init();
executorSettings.init();
ConvertingCodecFactory codecFactory = codecSettings.createCodecFactory(schemaSettings.isAllowExtraFields(), schemaSettings.isAllowMissingFields());
session = driverSettings.newSession(executionId, codecFactory.getCodecRegistry(), monitoringSettings.getRegistry());
ClusterInformationUtils.printDebugInfoAboutCluster(session);
schemaSettings.init(session, codecFactory, connector.supports(CommonConnectorFeature.INDEXED_RECORDS), connector.supports(CommonConnectorFeature.MAPPED_RECORDS));
logManager = logSettings.newLogManager(session, false);
logManager.init();
metricsManager = monitoringSettings.newMetricsManager(false, false, logManager.getOperationDirectory(), logSettings.getVerbosity(), session.getContext().getProtocolVersion(), session.getContext().getCodecRegistry(), schemaSettings.getRowType());
metricsManager.init();
RecordMetadata recordMetadata = connector.getRecordMetadata();
readResultMapper = schemaSettings.createReadResultMapper(session, recordMetadata, codecFactory, logSettings.isSources());
readStatements = schemaSettings.createReadStatements(session);
executor = executorSettings.newReadExecutor(session, metricsManager.getExecutionListener(), schemaSettings.isSearchQuery());
closed.set(false);
writer = connector.write();
totalItemsMonitor = metricsManager.newTotalItemsMonitor();
failedRecordsMonitor = metricsManager.newFailedItemsMonitor();
failedReadResultsMonitor = metricsManager.newFailedItemsMonitor();
failedRecordsHandler = logManager.newFailedRecordsHandler();
totalItemsCounter = logManager.newTotalItemsCounter();
failedReadsHandler = logManager.newFailedReadsHandler();
queryWarningsHandler = logManager.newQueryWarningsHandler();
unmappableRecordsHandler = logManager.newUnmappableRecordsHandler();
terminationHandler = logManager.newTerminationHandler();
numCores = Runtime.getRuntime().availableProcessors();
if (connector.writeConcurrency() < 1) {
throw new IllegalArgumentException("Invalid write concurrency: " + connector.writeConcurrency());
}
writeConcurrency = connector.writeConcurrency();
LOGGER.debug("Using write concurrency: {}", writeConcurrency);
readConcurrency = Math.min(readStatements.size(), // a good readConcurrency is then numCores.
engineSettings.getMaxConcurrentQueries().orElse(numCores));
LOGGER.debug("Using read concurrency: {} (user-supplied: {})", readConcurrency, engineSettings.getMaxConcurrentQueries().isPresent());
schedulers = new HashSet<>();
}
use of com.datastax.oss.dsbulk.connectors.api.RecordMetadata in project dsbulk by datastax.
the class MockConnector method mockWrites.
/**
* Sets up the mock connector to emulate writes; it will store all received records as if they
* were written to an external sink. The "written" records will appear in the returned list.
*
* @return the list where "written" records will be stored.
*/
public static List<Record> mockWrites() {
List<Record> records = new ArrayList<>();
setDelegate(new Connector() {
@Override
public void init() {
}
@Override
public void configure(@NonNull Config settings, boolean read, boolean retainRecordSources) {
}
@Override
public int readConcurrency() {
return -1;
}
@Override
public int writeConcurrency() {
return 1;
}
@Override
public boolean supports(@NonNull ConnectorFeature feature) {
return true;
}
@NonNull
@Override
public RecordMetadata getRecordMetadata() {
return (field, cql) -> GenericType.STRING;
}
@NonNull
@Override
public Publisher<Publisher<Record>> read() {
return Flux::just;
}
@NonNull
@Override
public Function<Publisher<Record>, Publisher<Record>> write() {
return upstream -> Flux.from(upstream).doOnNext(records::add);
}
});
return records;
}
use of com.datastax.oss.dsbulk.connectors.api.RecordMetadata in project dsbulk by datastax.
the class MockConnector method mockCountingWrites.
/**
* Sets up the mock connector to emulate writes; it will acknowledge records as if they were
* written to an external sink. The "written" records will be counted and the total number of
* records "written" will be reflected in the returned AtomicInteger.
*
* @return a counter for the number of records "written".
*/
public static AtomicInteger mockCountingWrites() {
AtomicInteger records = new AtomicInteger();
setDelegate(new Connector() {
@Override
public void init() {
}
@Override
public void configure(@NonNull Config settings, boolean read, boolean retainRecordSources) {
}
@Override
public int readConcurrency() {
return -1;
}
@Override
public int writeConcurrency() {
return 1;
}
@Override
public boolean supports(@NonNull ConnectorFeature feature) {
return true;
}
@NonNull
@Override
public RecordMetadata getRecordMetadata() {
return (field, cql) -> GenericType.STRING;
}
@NonNull
@Override
public Publisher<Publisher<Record>> read() {
return Flux::just;
}
@NonNull
@Override
public Function<Publisher<Record>, Publisher<Record>> write() {
return upstream -> Flux.from(upstream).doOnNext(r -> records.incrementAndGet());
}
});
return records;
}
Aggregations