use of com.teradata.jaqy.schema.SchemaInfo in project jaqy by Teradata.
the class AvroExporter method export.
@Override
public long export(JaqyResultSet rs, JaqyInterpreter interpreter) throws Exception {
JaqyHelper helper = rs.getHelper();
SchemaInfo schemaInfo = ResultSetMetaDataUtils.getColumnInfo(rs.getMetaData().getMetaData(), helper);
Schema schema = AvroUtils.getSchema(schemaInfo, helper);
interpreter.getGlobals().log(Level.INFO, "schema is " + schema.toString(true));
DatumWriter<GenericRecord> writer = new GenericDatumWriter<GenericRecord>(schema);
DataFileWriter<GenericRecord> dataFileWriter = new DataFileWriter<GenericRecord>(writer);
if (m_codecFactory != null)
dataFileWriter.setCodec(m_codecFactory);
dataFileWriter.create(schema, m_os);
long count = AvroUtils.print(dataFileWriter, schema, rs, schemaInfo);
dataFileWriter.close();
return count;
}
use of com.teradata.jaqy.schema.SchemaInfo in project jaqy by Teradata.
the class ImportTableCommand method execute.
@Override
public void execute(String[] args, boolean silent, JaqyInterpreter interpreter) throws Exception {
JaqyImporter<?> importer = interpreter.getImporter();
if (importer == null) {
interpreter.error("There is no current import.");
}
SchemaInfo schemaInfo = importer.getSchema();
if (schemaInfo == null) {
interpreter.error("Current import schema is not available.");
}
if (args.length == 0) {
interpreter.error("Staging table name is not specified.");
}
StringBuilder buffer = new StringBuilder();
for (String arg : args) buffer.append(arg);
String tableName = buffer.toString();
SessionUtils.checkOpen(interpreter);
Session session = interpreter.getSession();
JaqyConnection conn = session.getConnection();
JaqyHelper helper = conn.getHelper();
String sql = SchemaUtils.getTableSchema(helper, schemaInfo, tableName, false);
boolean prevCommit = conn.getAutoCommit();
if (!prevCommit)
conn.setAutoCommit(true);
interpreter.println("-- Table Schema --");
interpreter.println(sql);
session.executeQuery(sql, interpreter, 1);
sql = null;
if (!prevCommit)
conn.setAutoCommit(false);
buffer.setLength(0);
buffer.append("INSERT INTO ").append(tableName).append(" VALUES (");
int columnCount = schemaInfo.columns.length;
for (int i = 0; i < columnCount; ++i) {
if (i > 0)
buffer.append(',');
buffer.append('?');
}
buffer.append(')');
sql = buffer.toString();
interpreter.println("-- INSERTION --");
interpreter.println(sql);
try {
session.importQuery(sql, interpreter);
} finally {
interpreter.setQueryMode(QueryMode.Regular);
}
}
use of com.teradata.jaqy.schema.SchemaInfo in project jaqy by Teradata.
the class ImportSchemaCommand method execute.
@Override
public void execute(String[] args, boolean silent, JaqyInterpreter interpreter) throws Exception {
JaqyImporter<?> importer = interpreter.getImporter();
if (importer == null) {
interpreter.error("There is no current import.");
}
SchemaInfo schemaInfo = importer.getSchema();
if (schemaInfo == null) {
interpreter.error("Current import schema is not available.");
}
boolean displaySQL = false;
CommandLine cmdLine = getCommandLine(args);
for (Option option : cmdLine.getOptions()) {
switch(option.getOpt().charAt(0)) {
case 's':
{
displaySQL = true;
break;
}
}
}
SessionUtils.checkOpen(interpreter);
Session session = interpreter.getSession();
JaqyHelper helper = session.getConnection().getHelper();
if (displaySQL) {
String sql = SchemaUtils.getTableSchema(helper, schemaInfo, "TABLENAME", false);
interpreter.println(sql);
} else {
JaqyResultSet rs = SchemaUtils.getSchemaResultSet(helper, schemaInfo, false, interpreter);
interpreter.print(rs);
rs.close();
}
}
use of com.teradata.jaqy.schema.SchemaInfo in project jaqy by Teradata.
the class CSVUtils method getSchemaInfo.
public static SchemaInfo getSchemaInfo(String[] headers, Iterator<CSVRecord> iterator, String[] naValues, boolean precise, long limit) {
int count = -1;
ScanColumnType[] columns = null;
int rowCount = 0;
boolean autoStop = false;
if (limit < 0) {
limit = Long.MAX_VALUE;
autoStop = true;
} else if (limit == 0)
limit = Long.MAX_VALUE;
boolean needScan;
while (iterator.hasNext() && rowCount < limit) {
CSVRecord record = iterator.next();
++rowCount;
int size = record.size();
needScan = false;
if (count == -1) {
count = size;
columns = new ScanColumnType[count];
for (int i = 0; i < count; ++i) {
columns[i] = new ScanColumnType();
columns[i].type = Types.NULL;
columns[i].nullable = false;
columns[i].minLength = Integer.MAX_VALUE;
columns[i].maxLength = -1;
}
needScan = true;
}
for (int i = 0; i < count; ++i) {
String s = record.get(i);
boolean isNa = false;
if (naValues != null) {
for (String na : naValues) {
if (s.equals(na)) {
isNa = true;
break;
}
}
}
if (isNa) {
columns[i].nullable = true;
} else {
int len = s.length();
if (columns[i].maxLength < len)
columns[i].maxLength = len;
if (columns[i].minLength > len)
columns[i].minLength = len;
if (columns[i].type == Types.NUMERIC || columns[i].type == Types.NULL) {
try {
BigDecimal dec = new BigDecimal(s);
int precision = dec.precision();
int scale = dec.scale();
// if precision is smaller than or equal to scale, then we have leading "0."
if (precision <= scale)
precision = scale + 1;
if (columns[i].type == Types.NULL) {
columns[i].type = Types.NUMERIC;
columns[i].precision = precision;
columns[i].scale = scale;
} else {
if (columns[i].scale != scale) {
columns[i].scale = Integer.MAX_VALUE;
}
if (columns[i].precision < precision) {
columns[i].precision = precision;
}
}
++columns[i].notNullCount;
} catch (Exception ex) {
if (columns[i].minLength == columns[i].maxLength) {
// Check if we are in a fixed char column.
columns[i].type = Types.CHAR;
++columns[i].notNullCount;
} else {
columns[i].type = Types.VARCHAR;
// For varchar columns, we basically have to scan
// all the rows to find the maximum string length.
autoStop = false;
}
}
} else if (columns[i].type == Types.CHAR) {
if (columns[i].minLength == columns[i].maxLength)
++columns[i].notNullCount;
else {
columns[i].type = Types.VARCHAR;
// For varchar columns, we basically have to scan
// all the rows to find the maximum string length.
autoStop = false;
}
}
}
if (autoStop && columns[i].notNullCount < AUTO_STOP_MINIMUM) {
// For each number column, we basically need enough
// confidence to say that additional scan is not
// necessary.
needScan = true;
}
}
if (autoStop && !needScan) {
// Automatically stop if we just have numbers.
break;
}
}
if (rowCount == 0)
return null;
FullColumnInfo[] columnInfos = new FullColumnInfo[count];
for (int i = 0; i < count; ++i) {
columnInfos[i] = new FullColumnInfo();
if (headers != null) {
columnInfos[i].name = headers[i];
}
if (columnInfos[i].name == null || columnInfos[i].name.trim().length() == 0) {
columnInfos[i].name = "col" + (i + 1);
}
columnInfos[i].label = columnInfos[i].name;
columnInfos[i].nullable = columns[i].nullable ? ResultSetMetaData.columnNullable : ResultSetMetaData.columnNoNulls;
if (columns[i].type == Types.CHAR || columns[i].type == Types.VARCHAR) {
columnInfos[i].type = columns[i].type;
columnInfos[i].precision = columns[i].maxLength;
} else {
columnInfos[i].precision = columns[i].precision;
if (columns[i].scale == Integer.MAX_VALUE) {
columnInfos[i].type = Types.DOUBLE;
columnInfos[i].scale = 0;
} else if (columns[i].scale <= 0 && columns[i].precision < 11) {
columnInfos[i].type = Types.INTEGER;
columnInfos[i].scale = 0;
} else if (precise && columns[i].scale > 0) {
columnInfos[i].type = Types.DECIMAL;
columnInfos[i].scale = columns[i].scale;
} else {
columnInfos[i].type = Types.DOUBLE;
columnInfos[i].scale = 0;
}
}
}
return new SchemaInfo(columnInfos);
}
use of com.teradata.jaqy.schema.SchemaInfo in project jaqy by Teradata.
the class AvroImporter method getSchema.
@Override
public SchemaInfo getSchema() throws IOException {
SchemaInfo schema = AvroUtils.getSchema(m_dataFileReader.getSchema(), m_iter);
m_dataFileReader.close();
openFile(m_file);
return schema;
}
Aggregations