use of com.thinkbiganalytics.discovery.schema.Field in project kylo by Teradata.
the class SparkFileSchemaParserServiceTest method testDecimalParsing.
/**
* Test to ensure the column types that have precision,scale get parsed correctly to the field.precisionScale property
*/
@org.junit.Test
public void testDecimalParsing() {
try {
Schema decimalSchema = parseQueryResult(decimalColumns(), SparkFileSchemaParserService.SparkFileType.PARQUET, TableSchemaType.HIVE);
assertNotNull(decimalSchema);
Field decimalField = decimalSchema.getFields().stream().filter(field -> field.getName().equalsIgnoreCase("decimalColumn")).findFirst().orElse(null);
assertNotNull(decimalField);
assertEquals("decimal", decimalField.getDerivedDataType());
assertEquals("17,12", decimalField.getPrecisionScale());
assertEquals("decimal(17,12)", decimalField.getDataTypeWithPrecisionAndScale());
} catch (Exception e) {
e.printStackTrace();
}
}
use of com.thinkbiganalytics.discovery.schema.Field in project kylo by Teradata.
the class TableSetup method updateFieldStringData.
@JsonIgnore
public void updateFieldStringData() {
StringBuffer fieldsString = new StringBuffer();
StringBuffer nullableFieldsString = new StringBuffer();
StringBuffer primaryKeyFieldsString = new StringBuffer();
if (tableSchema != null && tableSchema.getFields() != null) {
for (Field field : tableSchema.getFields()) {
setStringBuffer(fieldsString, field.getName(), "\n");
if (field.isNullable()) {
setStringBuffer(nullableFieldsString, field.getName(), ",");
}
if (field.isPrimaryKey()) {
setStringBuffer(primaryKeyFieldsString, field.getName(), ",");
}
}
}
setFieldsString(fieldsString.toString());
setNullableFields(nullableFieldsString.toString());
setPrimaryKeyFields(primaryKeyFieldsString.toString());
}
use of com.thinkbiganalytics.discovery.schema.Field in project kylo by Teradata.
the class ParserHelper method deriveDataTypes.
/**
* Derive data types
*
* @param type the target database platform
* @param fields the fields
*/
public static void deriveDataTypes(TableSchemaType type, List<? extends Field> fields) {
for (Field field : fields) {
if (StringUtils.isEmpty(field.getDerivedDataType())) {
JDBCType jdbcType = JDBCType.VARCHAR;
try {
if (!StringUtils.isEmpty(field.getNativeDataType())) {
jdbcType = JDBCType.valueOf(field.getNativeDataType());
} else {
jdbcType = deriveJDBCDataType(field.getSampleValues());
}
} catch (IllegalArgumentException e) {
log.warn("Unable to convert data type [?] will be converted to VARCHAR", field.getNativeDataType());
}
switch(type) {
case HIVE:
String hiveType = sqlTypeToHiveType(jdbcType);
field.setDerivedDataType(hiveType);
field.setDataTypeDescriptor(hiveTypeToDescriptor(hiveType));
break;
case RDBMS:
field.setDerivedDataType(jdbcType.getName());
}
}
}
}
use of com.thinkbiganalytics.discovery.schema.Field in project kylo by Teradata.
the class SparkFileSchemaParserService method toHiveSchema.
private DefaultHiveSchema toHiveSchema(TransformQueryResult result, SparkFileType fileType) {
DefaultHiveSchema schema = new DefaultHiveSchema();
schema.setHiveFormat("STORED AS " + fileType);
schema.setStructured(true);
ArrayList<Field> fields = new ArrayList<>();
List<? extends QueryResultColumn> columns = result.getColumns();
for (int i = 0; i < columns.size(); ++i) {
QueryResultColumn column = columns.get(i);
DefaultField field = new DefaultField();
field.setName(column.getDisplayName());
field.setNativeDataType(column.getDataType());
field.setDerivedDataType(column.getDataType());
field.setDataTypeDescriptor(ParserHelper.hiveTypeToDescriptor(column.getDataType()));
// strip the precisionScale and assign to the field property
setPrecisionAndScale(field);
// Add sample values
List<List<Object>> values = result.getRows();
for (List<Object> colMap : values) {
Object oVal = colMap.get(i);
if (oVal != null) {
field.getSampleValues().add(oVal.toString());
}
}
fields.add(field);
}
schema.setFields(fields);
return schema;
}
use of com.thinkbiganalytics.discovery.schema.Field in project kylo by Teradata.
the class TableSetup method getFieldStructure.
@JsonIgnore
public String getFieldStructure(String type, TableSchema schema) {
StringBuffer sb = new StringBuffer();
if (schema != null && schema.getFields() != null) {
for (Field field : schema.getFields()) {
if (StringUtils.isNotBlank(sb.toString())) {
sb.append("\n");
}
String otherName = "";
if (type.equalsIgnoreCase("FEED")) {
otherName = getSourceTargetFieldMap().getOrDefault(field.getName(), "");
} else {
otherName = getTargetSourceFieldMap().getOrDefault(field.getName(), "");
}
sb.append(field.asFieldStructure(otherName));
}
}
return sb.toString();
}
Aggregations