use of org.apache.plc4x.java.scraper.exception.ScraperException in project plc4x by apache.
the class Plc4XConsumer method startTriggered.
private void startTriggered() throws ScraperException {
ScraperConfiguration configuration = getScraperConfig(validateTags());
TriggerCollector collector = new TriggerCollectorImpl(plc4XEndpoint.getPlcDriverManager());
TriggeredScraperImpl scraper = new TriggeredScraperImpl(configuration, (job, alias, response) -> {
try {
Exchange exchange = plc4XEndpoint.createExchange();
exchange.getIn().setBody(response);
getProcessor().process(exchange);
} catch (Exception e) {
getExceptionHandler().handleException(e);
}
}, collector);
scraper.start();
collector.start();
}
use of org.apache.plc4x.java.scraper.exception.ScraperException in project plc4x by apache.
the class TriggeredScraperImpl method start.
/**
* Start the scraping.
*/
// ToDo code-refactoring and improved testing --> PLC4X-90
@Override
public void start() {
// Schedule all jobs
LOGGER.info("Starting jobs...");
// start iterating over all available jobs
for (ScrapeJob job : jobs) {
// iterate over all source the jobs shall performed on
for (Map.Entry<String, String> sourceEntry : job.getSourceConnections().entrySet()) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("Register task for job {} for conn {} ({}) at rate {} ms", job.getJobName(), sourceEntry.getKey(), sourceEntry.getValue(), job.getScrapeRate());
}
// create the regarding triggered scraper task
TriggeredScraperTask triggeredScraperTask;
try {
triggeredScraperTask = new TriggeredScraperTask(driverManager, job.getJobName(), sourceEntry.getKey(), sourceEntry.getValue(), job.getFields(), futureTimeOut, executorService, resultHandler, (TriggeredScrapeJobImpl) job, triggerCollector);
// Add task to internal list
if (LOGGER.isInfoEnabled()) {
LOGGER.info("Task {} added to scheduling", triggeredScraperTask);
}
registerTaskMBean(triggeredScraperTask);
tasks.put(job, triggeredScraperTask);
ScheduledFuture<?> future = scheduler.scheduleAtFixedRate(triggeredScraperTask, 0, job.getScrapeRate(), TimeUnit.MILLISECONDS);
// Store the handle for stopping, etc.
scraperTaskMap.put(triggeredScraperTask, future);
} catch (ScraperException e) {
LOGGER.warn("Error executing the job {} for conn {} ({}) at rate {} ms", job.getJobName(), sourceEntry.getKey(), sourceEntry.getValue(), job.getScrapeRate(), e);
}
}
}
// Add statistics tracker
statisticsLogger = scheduler.scheduleAtFixedRate(() -> {
for (Map.Entry<ScrapeJob, ScraperTask> entry : tasks.entries()) {
DescriptiveStatistics statistics = entry.getValue().getLatencyStatistics();
String msg = String.format(Locale.ENGLISH, "Job statistics (%s, %s) number of requests: %d (%d success, %.1f %% failed, %.1f %% too slow), min latency: %.2f ms, mean latency: %.2f ms, median: %.2f ms", entry.getValue().getJobName(), entry.getValue().getConnectionAlias(), entry.getValue().getRequestCounter(), entry.getValue().getSuccessfullRequestCounter(), entry.getValue().getPercentageFailed(), statistics.apply(new PercentageAboveThreshold(entry.getKey().getScrapeRate() * 1e6)), statistics.getMin() * 1e-6, statistics.getMean() * 1e-6, statistics.getPercentile(50) * 1e-6);
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(msg);
}
}
}, 1_000, 1_000, TimeUnit.MILLISECONDS);
}
use of org.apache.plc4x.java.scraper.exception.ScraperException in project plc4x by apache.
the class Plc4xSchemaFactory method create.
@Override
public Schema create(SchemaPlus parentSchema, String name, Map<String, Object> operand) {
// Fetch config
Object config = operand.get("config");
Validate.notNull(config, "No configuration file given. Please specify operand 'config'...'");
// Load configuration from file
ScraperConfiguration configuration;
try {
configuration = ScraperConfiguration.fromFile(config.toString(), ScraperConfigurationTriggeredImpl.class);
} catch (IOException e) {
throw new IllegalArgumentException("Unable to load configuration file!", e);
}
// Fetch limit
Object limit = operand.get("limit");
Validate.notNull(limit, "No limit for the number of rows for a table. Please specify operand 'config'...'");
long parsedLimit;
try {
parsedLimit = Long.parseLong(limit.toString());
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Given limit '" + limit + "' cannot be parsed to valid long!", e);
}
// Pass the configuration to the Schema
try {
return new Plc4xSchema(configuration, parsedLimit);
} catch (ScraperException e) {
LOGGER.warn("Could not evaluate Plc4xSchema", e);
// ToDo Exception, but interface does not accept ... null is fishy
return null;
}
}
use of org.apache.plc4x.java.scraper.exception.ScraperException in project plc4x by apache.
the class Plc4xSourceTask method start.
@Override
public void start(Map<String, String> props) {
AbstractConfig config = new AbstractConfig(CONFIG_DEF, props);
String connectionName = config.getString(Constants.CONNECTION_NAME_CONFIG);
String plc4xConnectionString = config.getString(Constants.CONNECTION_STRING_CONFIG);
pollReturnInterval = config.getInt(Constants.KAFKA_POLL_RETURN_CONFIG);
Integer bufferSize = config.getInt(Constants.BUFFER_SIZE_CONFIG);
Map<String, String> topics = new HashMap<>();
// Create a buffer with a capacity of BUFFER_SIZE_CONFIG elements which schedules access in a fair way.
buffer = new ArrayBlockingQueue<>(bufferSize, true);
ScraperConfigurationTriggeredImplBuilder builder = new ScraperConfigurationTriggeredImplBuilder();
builder.addSource(connectionName, plc4xConnectionString);
List<String> jobConfigs = config.getList(Constants.QUERIES_CONFIG);
for (String jobConfig : jobConfigs) {
String[] jobConfigSegments = jobConfig.split("\\|");
if (jobConfigSegments.length < 4) {
log.warn("Error in job configuration '{}'. " + "The configuration expects at least 4 segments: " + "{job-name}|{topic}|{rate}(|{field-alias}#{field-address})+", jobConfig);
continue;
}
String jobName = jobConfigSegments[0];
String topic = jobConfigSegments[1];
Integer rate = Integer.valueOf(jobConfigSegments[2]);
JobConfigurationTriggeredImplBuilder jobBuilder = builder.job(jobName, String.format("(SCHEDULED,%s)", rate)).source(connectionName);
for (int i = 3; i < jobConfigSegments.length; i++) {
String[] fieldSegments = jobConfigSegments[i].split("#");
if (fieldSegments.length != 2) {
log.warn("Error in job configuration '{}'. " + "The field segment expects a format {field-alias}#{field-address}, but got '%s'", jobName, jobConfigSegments[i]);
continue;
}
String fieldAlias = fieldSegments[0];
String fieldAddress = fieldSegments[1];
jobBuilder.field(fieldAlias, fieldAddress);
topics.put(jobName, topic);
}
jobBuilder.build();
}
ScraperConfigurationTriggeredImpl scraperConfig = builder.build();
try {
PlcDriverManager plcDriverManager = new PooledPlcDriverManager();
TriggerCollector triggerCollector = new TriggerCollectorImpl(plcDriverManager);
scraper = new TriggeredScraperImpl(scraperConfig, (jobName, sourceName, results) -> {
try {
Long timestamp = System.currentTimeMillis();
Map<String, String> sourcePartition = new HashMap<>();
sourcePartition.put("sourceName", sourceName);
sourcePartition.put("jobName", jobName);
Map<String, Long> sourceOffset = Collections.singletonMap("offset", timestamp);
String topic = topics.get(jobName);
// Prepare the key structure.
Struct key = new Struct(KEY_SCHEMA).put(Constants.SOURCE_NAME_FIELD, sourceName).put(Constants.JOB_NAME_FIELD, jobName);
// Build the Schema for the result struct.
SchemaBuilder fieldSchemaBuilder = SchemaBuilder.struct().name("org.apache.plc4x.kafka.schema.Field");
for (Map.Entry<String, Object> result : results.entrySet()) {
// Get field-name and -value from the results.
String fieldName = result.getKey();
Object fieldValue = result.getValue();
// Get the schema for the given value type.
Schema valueSchema = getSchema(fieldValue);
// Add the schema description for the current field.
fieldSchemaBuilder.field(fieldName, valueSchema);
}
Schema fieldSchema = fieldSchemaBuilder.build();
Schema recordSchema = SchemaBuilder.struct().name("org.apache.plc4x.kafka.schema.JobResult").doc("PLC Job result. This contains all of the received PLCValues as well as a recieved timestamp").field(Constants.FIELDS_CONFIG, fieldSchema).field(Constants.TIMESTAMP_CONFIG, Schema.INT64_SCHEMA).field(Constants.EXPIRES_CONFIG, Schema.OPTIONAL_INT64_SCHEMA).build();
// Build the struct itself.
Struct fieldStruct = new Struct(fieldSchema);
for (Map.Entry<String, Object> result : results.entrySet()) {
// Get field-name and -value from the results.
String fieldName = result.getKey();
Object fieldValue = result.getValue();
if (fieldSchema.field(fieldName).schema().type() == Schema.Type.ARRAY) {
fieldStruct.put(fieldName, ((List) fieldValue).stream().map(p -> ((PlcValue) p).getObject()).collect(Collectors.toList()));
} else {
fieldStruct.put(fieldName, fieldValue);
}
}
Struct recordStruct = new Struct(recordSchema).put(Constants.FIELDS_CONFIG, fieldStruct).put(Constants.TIMESTAMP_CONFIG, timestamp);
// Prepare the source-record element.
SourceRecord sourceRecord = new SourceRecord(sourcePartition, sourceOffset, topic, KEY_SCHEMA, key, recordSchema, recordStruct);
// Add the new source-record to the buffer.
buffer.add(sourceRecord);
} catch (Exception e) {
log.error("Error while parsing returned values", e);
}
}, triggerCollector);
scraper.start();
triggerCollector.start();
} catch (ScraperException e) {
log.error("Error starting the scraper", e);
}
}
Aggregations