Search in sources :

Example 1 with CleansedRowResult

use of com.thinkbiganalytics.spark.datavalidator.CleansedRowResult in project kylo by Teradata.

the class PartitionLevelCountsV2 method call.

@Override
public Iterator<long[]> call(Iterator<CleansedRowResult> cleansedRowResultIterator) throws Exception {
    long[] validationCounts = new long[schemaLen + 2];
    while (cleansedRowResultIterator.hasNext()) {
        CleansedRowResult cleansedRowResult = cleansedRowResultIterator.next();
        for (int idx = 0; idx < schemaLen; idx++) {
            if (!cleansedRowResult.isColumnValid(idx)) {
                validationCounts[idx] = validationCounts[idx] + 1L;
            }
        }
        if (cleansedRowResult.isRowValid()) {
            validationCounts[schemaLen] = validationCounts[schemaLen] + 1L;
        } else {
            validationCounts[schemaLen + 1] = validationCounts[schemaLen + 1] + 1L;
        }
    }
    List<long[]> results = new LinkedList<>();
    results.add(validationCounts);
    return results.iterator();
}
Also used : CleansedRowResult(com.thinkbiganalytics.spark.datavalidator.CleansedRowResult) LinkedList(java.util.LinkedList)

Example 2 with CleansedRowResult

use of com.thinkbiganalytics.spark.datavalidator.CleansedRowResult in project kylo by Teradata.

the class PartitionLevelCountsV1 method call.

@Override
public Iterable<long[]> call(Iterator<CleansedRowResult> cleansedRowResultIterator) throws Exception {
    long[] validationCounts = new long[schemaLen + 2];
    while (cleansedRowResultIterator.hasNext()) {
        CleansedRowResult cleansedRowResult = cleansedRowResultIterator.next();
        for (int idx = 0; idx < schemaLen; idx++) {
            if (!cleansedRowResult.isColumnValid(idx)) {
                validationCounts[idx] = validationCounts[idx] + 1L;
            }
        }
        if (cleansedRowResult.isRowValid()) {
            validationCounts[schemaLen] = validationCounts[schemaLen] + 1L;
        } else {
            validationCounts[schemaLen + 1] = validationCounts[schemaLen + 1] + 1L;
        }
    }
    List<long[]> results = new LinkedList<>();
    results.add(validationCounts);
    return results;
}
Also used : CleansedRowResult(com.thinkbiganalytics.spark.datavalidator.CleansedRowResult) LinkedList(java.util.LinkedList)

Example 3 with CleansedRowResult

use of com.thinkbiganalytics.spark.datavalidator.CleansedRowResult in project kylo by Teradata.

the class CleanseAndValidateRow method call.

@Override
public CleansedRowResult call(@Nonnull final Row row) throws Exception {
    /*
    Cache for performance. Validators accept different parameters (numeric,string, etc) so we need to resolve the type using reflection
     */
    Map<Class, Class> validatorParamType = new HashMap<>();
    int nulls = hasProcessingDttm ? 1 : 0;
    // Create placeholder for the new values plus one columns for reject_reason
    Object[] newValues = new Object[dataTypes.length + 1];
    boolean rowValid = true;
    String sbRejectReason;
    List<ValidationResult> results = null;
    boolean[] columnsValid = new boolean[dataTypes.length];
    Map<Integer, Object> originalValues = new HashMap<>();
    // Iterate through columns to cleanse and validate
    for (int idx = 0; idx < dataTypes.length; idx++) {
        ValidationResult result;
        FieldPolicy fieldPolicy = policies[idx];
        HCatDataType dataType = dataTypes[idx];
        boolean columnValid = true;
        boolean isBinaryType = dataType.getConvertibleType().equals(byte[].class);
        // Extract the value (allowing for null or missing field for odd-ball data)
        Object val = (idx == row.length() || row.isNullAt(idx) ? null : row.get(idx));
        if (dataType.isUnchecked()) {
            if (val == null) {
                nulls++;
            }
            newValues[idx] = val;
            originalValues.put(idx, val);
        } else {
            Object fieldValue = (val);
            boolean isEmpty;
            if (fieldValue == null) {
                nulls++;
            }
            originalValues.put(idx, fieldValue);
            StandardizationAndValidationResult standardizationAndValidationResult = standardizeAndValidateField(fieldPolicy, fieldValue, dataType, validatorParamType);
            result = standardizationAndValidationResult.getFinalValidationResult();
            // only apply the standardized result value if the routine is valid
            fieldValue = result.isValid() ? standardizationAndValidationResult.getFieldValue() : fieldValue;
            // reevaluate the isEmpty flag
            isEmpty = ((fieldValue == null) || (StringUtils.isEmpty(fieldValue.toString())));
            // hive will auto convert byte[] or String fields to a target binary type.
            if (result.isValid() && isBinaryType && !(fieldValue instanceof byte[]) && !(fieldValue instanceof String)) {
                // set it to null
                fieldValue = null;
            } else if ((dataType.isNumeric() || isBinaryType) && isEmpty) {
                // if its a numeric column and the field is empty then set it to null as well
                fieldValue = null;
            }
            newValues[idx] = fieldValue;
            if (!result.isValid()) {
                rowValid = false;
                results = (results == null ? new Vector<ValidationResult>() : results);
                results.addAll(standardizationAndValidationResult.getValidationResults());
                columnValid = false;
            }
        }
        // Record fact that we there was an invalid column
        columnsValid[idx] = columnValid;
    }
    // Return success unless all values were null.  That would indicate a blank line in the file.
    if (nulls >= dataTypes.length) {
        rowValid = false;
        results = (results == null ? new Vector<ValidationResult>() : results);
        results.add(ValidationResult.failRow("empty", "Row is empty"));
    }
    if (!rowValid) {
        for (int idx = 0; idx < dataTypes.length; idx++) {
            // the _invalid table dataTypes matches the source, not the destination
            if (newValues[idx] == null || originalValues.get(idx) == null || newValues[idx].getClass() != originalValues.get(idx).getClass()) {
                newValues[idx] = originalValues.get(idx);
            }
        // otherwise the data has changed, but its still the same data type so we can keep the newly changed value
        }
    }
    // Convert to reject reasons to JSON
    sbRejectReason = toJSONArray(results);
    // Record the results in the appended columns, move processing partition value last
    if (hasProcessingDttm) {
        // PROCESSING_DTTM_COL
        newValues[dataTypes.length] = newValues[dataTypes.length - 1];
        // REJECT_REASON_COL
        newValues[dataTypes.length - 1] = sbRejectReason;
    } else {
        newValues[dataTypes.length] = sbRejectReason;
    }
    return new CleansedRowResult(RowFactory.create(newValues), columnsValid, rowValid);
}
Also used : FieldPolicy(com.thinkbiganalytics.policy.FieldPolicy) BaseFieldPolicy(com.thinkbiganalytics.policy.BaseFieldPolicy) HashMap(java.util.HashMap) CleansedRowResult(com.thinkbiganalytics.spark.datavalidator.CleansedRowResult) StandardizationAndValidationResult(com.thinkbiganalytics.spark.datavalidator.StandardizationAndValidationResult) ValidationResult(com.thinkbiganalytics.policy.validation.ValidationResult) StandardizationAndValidationResult(com.thinkbiganalytics.spark.datavalidator.StandardizationAndValidationResult) HCatDataType(com.thinkbiganalytics.spark.validation.HCatDataType)

Aggregations

CleansedRowResult (com.thinkbiganalytics.spark.datavalidator.CleansedRowResult)3 LinkedList (java.util.LinkedList)2 BaseFieldPolicy (com.thinkbiganalytics.policy.BaseFieldPolicy)1 FieldPolicy (com.thinkbiganalytics.policy.FieldPolicy)1 ValidationResult (com.thinkbiganalytics.policy.validation.ValidationResult)1 StandardizationAndValidationResult (com.thinkbiganalytics.spark.datavalidator.StandardizationAndValidationResult)1 HCatDataType (com.thinkbiganalytics.spark.validation.HCatDataType)1 HashMap (java.util.HashMap)1