use of org.apache.hadoop.hive.ql.io.AcidOutputFormat in project hive by apache.
the class AbstractRecordWriter method init.
@Override
public void init(StreamingConnection conn, long minWriteId, long maxWriteId, int statementId) throws StreamingException {
if (conn == null) {
throw new StreamingException("Streaming connection cannot be null during record writer initialization");
}
this.conn = conn;
this.curBatchMinWriteId = minWriteId;
this.curBatchMaxWriteId = maxWriteId;
this.statementId = statementId;
this.conf = conn.getHiveConf();
this.defaultPartitionName = conf.getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME);
this.table = conn.getTable();
String location = table.getSd().getLocation();
try {
URI uri = new URI(location);
this.fs = FileSystem.newInstance(uri, conf);
if (LOG.isDebugEnabled()) {
LOG.debug("Created new filesystem instance: {}", System.identityHashCode(this.fs));
}
} catch (URISyntaxException e) {
throw new StreamingException("Unable to create URI from location: " + location, e);
} catch (IOException e) {
throw new StreamingException("Unable to get filesystem for location: " + location, e);
}
this.inputColumns = table.getSd().getCols().stream().map(FieldSchema::getName).collect(Collectors.toList());
this.inputTypes = table.getSd().getCols().stream().map(FieldSchema::getType).collect(Collectors.toList());
if (conn.isPartitionedTable() && conn.isDynamicPartitioning()) {
this.partitionColumns = table.getPartitionKeys().stream().map(FieldSchema::getName).collect(Collectors.toList());
this.inputColumns.addAll(partitionColumns);
this.inputTypes.addAll(table.getPartitionKeys().stream().map(FieldSchema::getType).collect(Collectors.toList()));
}
this.fullyQualifiedTableName = Warehouse.getQualifiedName(table.getDbName(), table.getTableName());
String outFormatName = this.table.getSd().getOutputFormat();
try {
this.acidOutputFormat = (AcidOutputFormat<?, ?>) ReflectionUtils.newInstance(JavaUtils.loadClass(outFormatName), conf);
} catch (Exception e) {
String shadePrefix = conf.getVar(HiveConf.ConfVars.HIVE_CLASSLOADER_SHADE_PREFIX);
if (shadePrefix != null && !shadePrefix.trim().isEmpty()) {
try {
LOG.info("Shade prefix: {} specified. Using as fallback to load {}..", shadePrefix, outFormatName);
this.acidOutputFormat = (AcidOutputFormat<?, ?>) ReflectionUtils.newInstance(JavaUtils.loadClass(shadePrefix, outFormatName), conf);
} catch (ClassNotFoundException e1) {
throw new StreamingException(e.getMessage(), e);
}
} else {
throw new StreamingException(e.getMessage(), e);
}
}
setupMemoryMonitoring();
try {
final AbstractSerDe serDe = createSerde();
this.inputRowObjectInspector = (StructObjectInspector) serDe.getObjectInspector();
if (conn.isPartitionedTable() && conn.isDynamicPartitioning()) {
preparePartitioningFields();
int dpStartCol = inputRowObjectInspector.getAllStructFieldRefs().size() - table.getPartitionKeys().size();
this.outputRowObjectInspector = new SubStructObjectInspector(inputRowObjectInspector, 0, dpStartCol);
} else {
this.outputRowObjectInspector = inputRowObjectInspector;
}
prepareBucketingFields();
} catch (SerDeException e) {
throw new StreamingException("Unable to create SerDe", e);
}
}
Aggregations