use of org.apache.hadoop.hive.ql.metadata.HiveException in project hive by apache.
the class DDLTask method alterTableOrSinglePartition.
private int alterTableOrSinglePartition(AlterTableDesc alterTbl, Table tbl, Partition part) throws HiveException {
EnvironmentContext environmentContext = alterTbl.getEnvironmentContext();
if (environmentContext == null) {
environmentContext = new EnvironmentContext();
alterTbl.setEnvironmentContext(environmentContext);
}
// do not need update stats in alter table/partition operations
if (environmentContext.getProperties() == null || environmentContext.getProperties().get(StatsSetupConst.DO_NOT_UPDATE_STATS) == null) {
environmentContext.putToProperties(StatsSetupConst.DO_NOT_UPDATE_STATS, StatsSetupConst.TRUE);
}
if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.RENAME) {
tbl.setDbName(Utilities.getDatabaseName(alterTbl.getNewName()));
tbl.setTableName(Utilities.getTableName(alterTbl.getNewName()));
} else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDCOLS) {
StorageDescriptor sd = retrieveStorageDescriptor(tbl, part);
String serializationLib = sd.getSerdeInfo().getSerializationLib();
AvroSerdeUtils.handleAlterTableForAvro(conf, serializationLib, tbl.getTTable().getParameters());
List<FieldSchema> oldCols = (part == null ? tbl.getColsForMetastore() : part.getColsForMetastore());
List<FieldSchema> newCols = alterTbl.getNewCols();
if (serializationLib.equals("org.apache.hadoop.hive.serde.thrift.columnsetSerDe")) {
console.printInfo("Replacing columns for columnsetSerDe and changing to LazySimpleSerDe");
sd.getSerdeInfo().setSerializationLib(LazySimpleSerDe.class.getName());
sd.setCols(newCols);
} else {
// make sure the columns does not already exist
Iterator<FieldSchema> iterNewCols = newCols.iterator();
while (iterNewCols.hasNext()) {
FieldSchema newCol = iterNewCols.next();
String newColName = newCol.getName();
Iterator<FieldSchema> iterOldCols = oldCols.iterator();
while (iterOldCols.hasNext()) {
String oldColName = iterOldCols.next().getName();
if (oldColName.equalsIgnoreCase(newColName)) {
throw new HiveException(ErrorMsg.DUPLICATE_COLUMN_NAMES, newColName);
}
}
oldCols.add(newCol);
}
sd.setCols(oldCols);
}
} else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.RENAMECOLUMN) {
StorageDescriptor sd = retrieveStorageDescriptor(tbl, part);
String serializationLib = sd.getSerdeInfo().getSerializationLib();
AvroSerdeUtils.handleAlterTableForAvro(conf, serializationLib, tbl.getTTable().getParameters());
List<FieldSchema> oldCols = (part == null ? tbl.getColsForMetastore() : part.getColsForMetastore());
List<FieldSchema> newCols = new ArrayList<FieldSchema>();
Iterator<FieldSchema> iterOldCols = oldCols.iterator();
String oldName = alterTbl.getOldColName();
String newName = alterTbl.getNewColName();
String type = alterTbl.getNewColType();
String comment = alterTbl.getNewColComment();
boolean first = alterTbl.getFirst();
String afterCol = alterTbl.getAfterCol();
// if orc table, restrict reordering columns as it will break schema evolution
boolean isOrcSchemaEvolution = sd.getInputFormat().equals(OrcInputFormat.class.getName()) && isSchemaEvolutionEnabled(tbl);
if (isOrcSchemaEvolution && (first || (afterCol != null && !afterCol.trim().isEmpty()))) {
throw new HiveException(ErrorMsg.CANNOT_REORDER_COLUMNS, alterTbl.getOldName());
}
FieldSchema column = null;
boolean found = false;
int position = -1;
if (first) {
position = 0;
}
int i = 1;
while (iterOldCols.hasNext()) {
FieldSchema col = iterOldCols.next();
String oldColName = col.getName();
if (oldColName.equalsIgnoreCase(newName) && !oldColName.equalsIgnoreCase(oldName)) {
throw new HiveException(ErrorMsg.DUPLICATE_COLUMN_NAMES, newName);
} else if (oldColName.equalsIgnoreCase(oldName)) {
col.setName(newName);
if (type != null && !type.trim().equals("")) {
col.setType(type);
}
if (comment != null) {
col.setComment(comment);
}
found = true;
if (first || (afterCol != null && !afterCol.trim().equals(""))) {
column = col;
continue;
}
}
if (afterCol != null && !afterCol.trim().equals("") && oldColName.equalsIgnoreCase(afterCol)) {
position = i;
}
i++;
newCols.add(col);
}
// did not find the column
if (!found) {
throw new HiveException(ErrorMsg.INVALID_COLUMN, oldName);
}
// after column is not null, but we did not find it.
if ((afterCol != null && !afterCol.trim().equals("")) && position < 0) {
throw new HiveException(ErrorMsg.INVALID_COLUMN, afterCol);
}
if (position >= 0) {
newCols.add(position, column);
}
sd.setCols(newCols);
} else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.REPLACECOLS) {
StorageDescriptor sd = retrieveStorageDescriptor(tbl, part);
// change SerDe to LazySimpleSerDe if it is columnsetSerDe
String serializationLib = sd.getSerdeInfo().getSerializationLib();
if (serializationLib.equals("org.apache.hadoop.hive.serde.thrift.columnsetSerDe")) {
console.printInfo("Replacing columns for columnsetSerDe and changing to LazySimpleSerDe");
sd.getSerdeInfo().setSerializationLib(LazySimpleSerDe.class.getName());
} else if (!serializationLib.equals(MetadataTypedColumnsetSerDe.class.getName()) && !serializationLib.equals(LazySimpleSerDe.class.getName()) && !serializationLib.equals(ColumnarSerDe.class.getName()) && !serializationLib.equals(DynamicSerDe.class.getName()) && !serializationLib.equals(ParquetHiveSerDe.class.getName()) && !serializationLib.equals(OrcSerde.class.getName())) {
throw new HiveException(ErrorMsg.CANNOT_REPLACE_COLUMNS, alterTbl.getOldName());
}
final boolean isOrcSchemaEvolution = serializationLib.equals(OrcSerde.class.getName()) && isSchemaEvolutionEnabled(tbl);
// adding columns and limited integer type promotion is supported for ORC schema evolution
if (isOrcSchemaEvolution) {
final List<FieldSchema> existingCols = sd.getCols();
final List<FieldSchema> replaceCols = alterTbl.getNewCols();
if (replaceCols.size() < existingCols.size()) {
throw new HiveException(ErrorMsg.REPLACE_CANNOT_DROP_COLUMNS, alterTbl.getOldName());
}
}
sd.setCols(alterTbl.getNewCols());
} else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDPROPS) {
if (StatsSetupConst.USER.equals(environmentContext.getProperties().get(StatsSetupConst.STATS_GENERATED))) {
environmentContext.getProperties().remove(StatsSetupConst.DO_NOT_UPDATE_STATS);
}
if (part != null) {
part.getTPartition().getParameters().putAll(alterTbl.getProps());
} else {
tbl.getTTable().getParameters().putAll(alterTbl.getProps());
}
} else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.DROPPROPS) {
Iterator<String> keyItr = alterTbl.getProps().keySet().iterator();
if (StatsSetupConst.USER.equals(environmentContext.getProperties().get(StatsSetupConst.STATS_GENERATED))) {
// drop a stats parameter, which triggers recompute stats update automatically
environmentContext.getProperties().remove(StatsSetupConst.DO_NOT_UPDATE_STATS);
}
while (keyItr.hasNext()) {
if (part != null) {
part.getTPartition().getParameters().remove(keyItr.next());
} else {
tbl.getTTable().getParameters().remove(keyItr.next());
}
}
} else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDSERDEPROPS) {
StorageDescriptor sd = retrieveStorageDescriptor(tbl, part);
sd.getSerdeInfo().getParameters().putAll(alterTbl.getProps());
} else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDSERDE) {
StorageDescriptor sd = retrieveStorageDescriptor(tbl, part);
String serdeName = alterTbl.getSerdeName();
String oldSerdeName = sd.getSerdeInfo().getSerializationLib();
// if orc table, restrict changing the serde as it can break schema evolution
if (isSchemaEvolutionEnabled(tbl) && oldSerdeName.equalsIgnoreCase(OrcSerde.class.getName()) && !serdeName.equalsIgnoreCase(OrcSerde.class.getName())) {
throw new HiveException(ErrorMsg.CANNOT_CHANGE_SERDE, OrcSerde.class.getSimpleName(), alterTbl.getOldName());
}
sd.getSerdeInfo().setSerializationLib(serdeName);
if ((alterTbl.getProps() != null) && (alterTbl.getProps().size() > 0)) {
sd.getSerdeInfo().getParameters().putAll(alterTbl.getProps());
}
if (part != null) {
// TODO: wtf? This doesn't do anything.
part.getTPartition().getSd().setCols(part.getTPartition().getSd().getCols());
} else {
if (Table.shouldStoreFieldsInMetastore(conf, serdeName, tbl.getParameters()) && !Table.hasMetastoreBasedSchema(conf, oldSerdeName)) {
// from old SerDe are too long to be stored in metastore, but there's nothing we can do.
try {
Deserializer oldSerde = MetaStoreUtils.getDeserializer(conf, tbl.getTTable(), false, oldSerdeName);
tbl.setFields(Hive.getFieldsFromDeserializer(tbl.getTableName(), oldSerde));
} catch (MetaException ex) {
throw new HiveException(ex);
}
}
}
} else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDFILEFORMAT) {
StorageDescriptor sd = retrieveStorageDescriptor(tbl, part);
// if orc table, restrict changing the file format as it can break schema evolution
if (isSchemaEvolutionEnabled(tbl) && sd.getInputFormat().equals(OrcInputFormat.class.getName()) && !alterTbl.getInputFormat().equals(OrcInputFormat.class.getName())) {
throw new HiveException(ErrorMsg.CANNOT_CHANGE_FILEFORMAT, "ORC", alterTbl.getOldName());
}
sd.setInputFormat(alterTbl.getInputFormat());
sd.setOutputFormat(alterTbl.getOutputFormat());
if (alterTbl.getSerdeName() != null) {
sd.getSerdeInfo().setSerializationLib(alterTbl.getSerdeName());
}
} else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDCLUSTERSORTCOLUMN) {
StorageDescriptor sd = retrieveStorageDescriptor(tbl, part);
// validate sort columns and bucket columns
List<String> columns = Utilities.getColumnNamesFromFieldSchema(tbl.getCols());
if (!alterTbl.isTurnOffSorting()) {
Utilities.validateColumnNames(columns, alterTbl.getBucketColumns());
}
if (alterTbl.getSortColumns() != null) {
Utilities.validateColumnNames(columns, Utilities.getColumnNamesFromSortCols(alterTbl.getSortColumns()));
}
if (alterTbl.isTurnOffSorting()) {
sd.setSortCols(new ArrayList<Order>());
} else if (alterTbl.getNumberBuckets() == -1) {
// -1 buckets means to turn off bucketing
sd.setBucketCols(new ArrayList<String>());
sd.setNumBuckets(-1);
sd.setSortCols(new ArrayList<Order>());
} else {
sd.setBucketCols(alterTbl.getBucketColumns());
sd.setNumBuckets(alterTbl.getNumberBuckets());
sd.setSortCols(alterTbl.getSortColumns());
}
} else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ALTERLOCATION) {
StorageDescriptor sd = retrieveStorageDescriptor(tbl, part);
String newLocation = alterTbl.getNewLocation();
try {
URI locUri = new URI(newLocation);
if (!new Path(locUri).isAbsolute()) {
throw new HiveException(ErrorMsg.BAD_LOCATION_VALUE, newLocation);
}
sd.setLocation(newLocation);
} catch (URISyntaxException e) {
throw new HiveException(e);
}
environmentContext.getProperties().remove(StatsSetupConst.DO_NOT_UPDATE_STATS);
} else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDSKEWEDBY) {
// Validation's been done at compile time. no validation is needed here.
List<String> skewedColNames = null;
List<List<String>> skewedValues = null;
if (alterTbl.isTurnOffSkewed()) {
// Convert skewed table to non-skewed table.
skewedColNames = new ArrayList<String>();
skewedValues = new ArrayList<List<String>>();
} else {
skewedColNames = alterTbl.getSkewedColNames();
skewedValues = alterTbl.getSkewedColValues();
}
if (null == tbl.getSkewedInfo()) {
// Convert non-skewed table to skewed table.
SkewedInfo skewedInfo = new SkewedInfo();
skewedInfo.setSkewedColNames(skewedColNames);
skewedInfo.setSkewedColValues(skewedValues);
tbl.setSkewedInfo(skewedInfo);
} else {
tbl.setSkewedColNames(skewedColNames);
tbl.setSkewedColValues(skewedValues);
}
tbl.setStoredAsSubDirectories(alterTbl.isStoredAsSubDirectories());
} else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ALTERSKEWEDLOCATION) {
// process location one-by-one
Map<List<String>, String> locMaps = alterTbl.getSkewedLocations();
Set<List<String>> keys = locMaps.keySet();
for (List<String> key : keys) {
String newLocation = locMaps.get(key);
try {
URI locUri = new URI(newLocation);
if (part != null) {
List<String> slk = new ArrayList<String>(key);
part.setSkewedValueLocationMap(slk, locUri.toString());
} else {
List<String> slk = new ArrayList<String>(key);
tbl.setSkewedValueLocationMap(slk, locUri.toString());
}
} catch (URISyntaxException e) {
throw new HiveException(e);
}
}
environmentContext.getProperties().remove(StatsSetupConst.DO_NOT_UPDATE_STATS);
} else if (alterTbl.getOp() == AlterTableTypes.ALTERBUCKETNUM) {
if (part != null) {
if (part.getBucketCount() == alterTbl.getNumberBuckets()) {
return 0;
}
part.setBucketCount(alterTbl.getNumberBuckets());
} else {
if (tbl.getNumBuckets() == alterTbl.getNumberBuckets()) {
return 0;
}
tbl.setNumBuckets(alterTbl.getNumberBuckets());
}
} else {
throw new HiveException(ErrorMsg.UNSUPPORTED_ALTER_TBL_OP, alterTbl.getOp().toString());
}
return 0;
}
use of org.apache.hadoop.hive.ql.metadata.HiveException in project hive by apache.
the class DDLTask method showLocks.
/**
* Write a list of the current locks to a file.
* @param db
*
* @param showLocks
* the locks we're interested in.
* @return Returns 0 when execution succeeds and above 0 if it fails.
* @throws HiveException
* Throws this exception if an unexpected error occurs.
*/
private int showLocks(Hive db, ShowLocksDesc showLocks) throws HiveException {
Context ctx = driverContext.getCtx();
HiveTxnManager txnManager = ctx.getHiveTxnManager();
HiveLockManager lockMgr = txnManager.getLockManager();
if (txnManager.useNewShowLocksFormat())
return showLocksNewFormat(showLocks, lockMgr);
boolean isExt = showLocks.isExt();
if (lockMgr == null) {
throw new HiveException("show Locks LockManager not specified");
}
// write the results in the file
DataOutputStream outStream = getOutputStream(showLocks.getResFile());
try {
List<HiveLock> locks = null;
if (showLocks.getTableName() == null) {
// TODO should be doing security check here. Users should not be
// able to see each other's locks.
locks = lockMgr.getLocks(false, isExt);
} else {
locks = lockMgr.getLocks(HiveLockObject.createFrom(db, showLocks.getTableName(), showLocks.getPartSpec()), true, isExt);
}
Collections.sort(locks, new Comparator<HiveLock>() {
@Override
public int compare(HiveLock o1, HiveLock o2) {
int cmp = o1.getHiveLockObject().getName().compareTo(o2.getHiveLockObject().getName());
if (cmp == 0) {
if (o1.getHiveLockMode() == o2.getHiveLockMode()) {
return cmp;
}
// EXCLUSIVE locks occur before SHARED locks
if (o1.getHiveLockMode() == HiveLockMode.EXCLUSIVE) {
return -1;
}
return +1;
}
return cmp;
}
});
Iterator<HiveLock> locksIter = locks.iterator();
while (locksIter.hasNext()) {
HiveLock lock = locksIter.next();
outStream.writeBytes(lock.getHiveLockObject().getDisplayName());
outStream.write(separator);
outStream.writeBytes(lock.getHiveLockMode().toString());
if (isExt) {
HiveLockObjectData lockData = lock.getHiveLockObject().getData();
if (lockData != null) {
outStream.write(terminator);
outStream.writeBytes("LOCK_QUERYID:" + lockData.getQueryId());
outStream.write(terminator);
outStream.writeBytes("LOCK_TIME:" + lockData.getLockTime());
outStream.write(terminator);
outStream.writeBytes("LOCK_MODE:" + lockData.getLockMode());
outStream.write(terminator);
outStream.writeBytes("LOCK_QUERYSTRING:" + lockData.getQueryStr());
}
}
outStream.write(terminator);
}
} catch (FileNotFoundException e) {
LOG.warn("show function: " + stringifyException(e));
return 1;
} catch (IOException e) {
LOG.warn("show function: " + stringifyException(e));
return 1;
} catch (Exception e) {
throw new HiveException(e.toString(), e);
} finally {
IOUtils.closeStream(outStream);
}
return 0;
}
use of org.apache.hadoop.hive.ql.metadata.HiveException in project hive by apache.
the class DDLTask method showGrants.
private int showGrants(Hive db, ShowGrantDesc showGrantDesc) throws HiveException {
HiveAuthorizer authorizer = getSessionAuthorizer(db);
try {
List<HivePrivilegeInfo> privInfos = authorizer.showPrivileges(getAuthorizationTranslator(authorizer).getHivePrincipal(showGrantDesc.getPrincipalDesc()), getAuthorizationTranslator(authorizer).getHivePrivilegeObject(showGrantDesc.getHiveObj()));
boolean testMode = conf.getBoolVar(HiveConf.ConfVars.HIVE_IN_TEST);
writeToFile(writeGrantInfo(privInfos, testMode), showGrantDesc.getResFile());
} catch (IOException e) {
throw new HiveException("Error in show grant statement", e);
}
return 0;
}
use of org.apache.hadoop.hive.ql.metadata.HiveException in project hive by apache.
the class DDLTask method alterTableAlterPart.
/**
* Alter partition column type in a table
*
* @param db
* Database to rename the partition.
* @param alterPartitionDesc
* change partition column type.
* @return Returns 0 when execution succeeds and above 0 if it fails.
* @throws HiveException
*/
private int alterTableAlterPart(Hive db, AlterTableAlterPartDesc alterPartitionDesc) throws HiveException {
Table tbl = db.getTable(alterPartitionDesc.getTableName(), true);
String tabName = alterPartitionDesc.getTableName();
// This is checked by DDLSemanticAnalyzer
assert (tbl.isPartitioned());
List<FieldSchema> newPartitionKeys = new ArrayList<FieldSchema>();
// with a non null value before trying to alter the partition column type.
try {
Set<Partition> partitions = db.getAllPartitionsOf(tbl);
int colIndex = -1;
for (FieldSchema col : tbl.getTTable().getPartitionKeys()) {
colIndex++;
if (col.getName().compareTo(alterPartitionDesc.getPartKeySpec().getName()) == 0) {
break;
}
}
if (colIndex == -1 || colIndex == tbl.getTTable().getPartitionKeys().size()) {
throw new HiveException("Cannot find partition column " + alterPartitionDesc.getPartKeySpec().getName());
}
TypeInfo expectedType = TypeInfoUtils.getTypeInfoFromTypeString(alterPartitionDesc.getPartKeySpec().getType());
ObjectInspector outputOI = TypeInfoUtils.getStandardWritableObjectInspectorFromTypeInfo(expectedType);
Converter converter = ObjectInspectorConverters.getConverter(PrimitiveObjectInspectorFactory.javaStringObjectInspector, outputOI);
// For all the existing partitions, check if the value can be type casted to a non-null object
for (Partition part : partitions) {
if (part.getName().equals(conf.getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME))) {
continue;
}
try {
String value = part.getValues().get(colIndex);
Object convertedValue = converter.convert(value);
if (convertedValue == null) {
throw new HiveException(" Converting from " + TypeInfoFactory.stringTypeInfo + " to " + expectedType + " for value : " + value + " resulted in NULL object");
}
} catch (Exception e) {
throw new HiveException("Exception while converting " + TypeInfoFactory.stringTypeInfo + " to " + expectedType + " for value : " + part.getValues().get(colIndex));
}
}
} catch (Exception e) {
throw new HiveException("Exception while checking type conversion of existing partition values to " + alterPartitionDesc.getPartKeySpec() + " : " + e.getMessage());
}
for (FieldSchema col : tbl.getTTable().getPartitionKeys()) {
if (col.getName().compareTo(alterPartitionDesc.getPartKeySpec().getName()) == 0) {
newPartitionKeys.add(alterPartitionDesc.getPartKeySpec());
} else {
newPartitionKeys.add(col);
}
}
tbl.getTTable().setPartitionKeys(newPartitionKeys);
try {
db.alterTable(tabName, tbl, null);
} catch (InvalidOperationException e) {
throw new HiveException(e, ErrorMsg.GENERIC_ERROR, "Unable to alter " + tabName);
}
work.getInputs().add(new ReadEntity(tbl));
// We've already locked the table as the input, don't relock it as the output.
addIfAbsentByName(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK));
return 0;
}
use of org.apache.hadoop.hive.ql.metadata.HiveException in project hive by apache.
the class FileSinkOperator method closeOp.
@Override
public void closeOp(boolean abort) throws HiveException {
row_count.set(numRows);
LOG.info(toString() + ": records written - " + numRows);
if (!bDynParts && !filesCreated) {
boolean skipFiles = "tez".equalsIgnoreCase(HiveConf.getVar(hconf, ConfVars.HIVE_EXECUTION_ENGINE));
if (skipFiles) {
Class<?> clazz = conf.getTableInfo().getOutputFileFormatClass();
skipFiles = !StreamingOutputFormat.class.isAssignableFrom(clazz);
}
if (!skipFiles) {
createBucketFiles(fsp);
}
}
lastProgressReport = System.currentTimeMillis();
if (!abort) {
// (the size of buffer is kept track of in the ThriftJDBCBinarySerDe).
if (conf.isUsingThriftJDBCBinarySerDe()) {
try {
recordValue = serializer.serialize(null, inputObjInspectors[0]);
if (null != fpaths) {
rowOutWriters = fpaths.outWriters;
rowOutWriters[0].write(recordValue);
}
} catch (SerDeException | IOException e) {
throw new HiveException(e);
}
}
for (FSPaths fsp : valToPaths.values()) {
fsp.closeWriters(abort);
// accumulated statistics which will be aggregated in case of spray writers
if (conf.isGatherStats() && isCollectRWStats) {
if (conf.getWriteType() == AcidUtils.Operation.NOT_ACID) {
for (int idx = 0; idx < fsp.outWriters.length; idx++) {
RecordWriter outWriter = fsp.outWriters[idx];
if (outWriter != null) {
SerDeStats stats = ((StatsProvidingRecordWriter) outWriter).getStats();
if (stats != null) {
fsp.stat.addToStat(StatsSetupConst.RAW_DATA_SIZE, stats.getRawDataSize());
fsp.stat.addToStat(StatsSetupConst.ROW_COUNT, stats.getRowCount());
}
}
}
} else {
for (int i = 0; i < fsp.updaters.length; i++) {
if (fsp.updaters[i] != null) {
SerDeStats stats = fsp.updaters[i].getStats();
if (stats != null) {
fsp.stat.addToStat(StatsSetupConst.RAW_DATA_SIZE, stats.getRawDataSize());
fsp.stat.addToStat(StatsSetupConst.ROW_COUNT, stats.getRowCount());
}
}
}
}
}
if (isNativeTable) {
fsp.commit(fs);
}
}
// Only publish stats if this operator's flag was set to gather stats
if (conf.isGatherStats()) {
publishStats();
}
} else {
// reduce().
for (FSPaths fsp : valToPaths.values()) {
fsp.abortWriters(fs, abort, !autoDelete && isNativeTable);
}
}
fsp = prevFsp = null;
super.closeOp(abort);
}
Aggregations