use of com.esotericsoftware.kryo.KryoException in project hive by apache.
the class MapJoinOperator method closeOp.
@Override
public void closeOp(boolean abort) throws HiveException {
boolean spilled = false;
for (MapJoinTableContainer container : mapJoinTables) {
if (container != null) {
spilled = spilled || container.hasSpill();
container.dumpMetrics();
}
}
// For Hybrid Grace Hash Join, we need to see if there is any spilled data to be processed next
if (spilled) {
if (!abort) {
if (hashMapRowGetters == null) {
hashMapRowGetters = new ReusableGetAdaptor[mapJoinTables.length];
}
int numPartitions = 0;
// Find out number of partitions for each small table (should be same across tables)
for (byte pos = 0; pos < mapJoinTables.length; pos++) {
if (pos != conf.getPosBigTable()) {
firstSmallTable = (HybridHashTableContainer) mapJoinTables[pos];
numPartitions = firstSmallTable.getHashPartitions().length;
break;
}
}
assert numPartitions != 0 : "Number of partitions must be greater than 0!";
if (firstSmallTable.hasSpill()) {
spilledMapJoinTables = new MapJoinBytesTableContainer[mapJoinTables.length];
hybridMapJoinLeftover = true;
// Clear all in-memory partitions first
for (byte pos = 0; pos < mapJoinTables.length; pos++) {
MapJoinTableContainer tableContainer = mapJoinTables[pos];
if (tableContainer != null && tableContainer instanceof HybridHashTableContainer) {
HybridHashTableContainer hybridHtContainer = (HybridHashTableContainer) tableContainer;
hybridHtContainer.dumpStats();
HashPartition[] hashPartitions = hybridHtContainer.getHashPartitions();
// Clear all in memory partitions first
for (int i = 0; i < hashPartitions.length; i++) {
if (!hashPartitions[i].isHashMapOnDisk()) {
hybridHtContainer.setTotalInMemRowCount(hybridHtContainer.getTotalInMemRowCount() - hashPartitions[i].getHashMapFromMemory().getNumValues());
hashPartitions[i].getHashMapFromMemory().clear();
}
}
assert hybridHtContainer.getTotalInMemRowCount() == 0;
}
}
// Reprocess the spilled data
for (int i = 0; i < numPartitions; i++) {
HashPartition[] hashPartitions = firstSmallTable.getHashPartitions();
if (hashPartitions[i].isHashMapOnDisk()) {
try {
// Re-process spilled data
continueProcess(i);
} catch (KryoException ke) {
LOG.error("Processing the spilled data failed due to Kryo error!");
LOG.error("Cleaning up all spilled data!");
cleanupGraceHashJoin();
throw new HiveException(ke);
} catch (Exception e) {
throw new HiveException(e);
}
for (byte pos = 0; pos < order.length; pos++) {
if (pos != conf.getPosBigTable())
spilledMapJoinTables[pos] = null;
}
}
}
}
}
if (isLogInfoEnabled) {
LOG.info("spilled: " + spilled + " abort: " + abort + ". Clearing spilled partitions.");
}
// spilled tables are loaded always (no sharing), so clear it
clearAllTableContainers();
cache.remove(cacheKey);
}
// in mapreduce case, we need to always clear up as mapreduce doesn't have object registry.
if ((this.getExecContext() != null) && (this.getExecContext().getLocalWork() != null) && (this.getExecContext().getLocalWork().getInputFileChangeSensitive()) && !(HiveConf.getVar(hconf, ConfVars.HIVE_EXECUTION_ENGINE).equals("spark") && SparkUtilities.isDedicatedCluster(hconf))) {
if (isLogInfoEnabled) {
LOG.info("MR: Clearing all map join table containers.");
}
clearAllTableContainers();
}
this.loader = null;
super.closeOp(abort);
}
use of com.esotericsoftware.kryo.KryoException in project Paper by pilgr.
the class DbStoragePlainFile method writeTableFile.
/**
* Attempt to write the file, delete the backup and return true as atomically as
* possible. If any exception occurs, delete the new file; next time we will restore
* from the backup.
*
* @param key table key
* @param paperTable table instance
* @param originalFile file to write new data
* @param backupFile backup file to be used if write is failed
*/
private <E> void writeTableFile(String key, PaperTable<E> paperTable, File originalFile, File backupFile) {
try {
FileOutputStream fileStream = new FileOutputStream(originalFile);
final Output kryoOutput = new Output(fileStream);
getKryo().writeObject(kryoOutput, paperTable);
kryoOutput.flush();
fileStream.flush();
sync(fileStream);
//also close file stream
kryoOutput.close();
// Writing was successful, delete the backup file if there is one.
//noinspection ResultOfMethodCallIgnored
backupFile.delete();
} catch (IOException | KryoException e) {
// Clean up an unsuccessfully written file
if (originalFile.exists()) {
if (!originalFile.delete()) {
throw new PaperDbException("Couldn't clean up partially-written file " + originalFile, e);
}
}
throw new PaperDbException("Couldn't save table: " + key + ". " + "Backed up table will be used on next read attempt", e);
}
}
use of com.esotericsoftware.kryo.KryoException in project apex-core by apache.
the class Journal method write.
final void write(Recoverable op) {
if (replayMode.get()) {
throw new IllegalStateException("Request to write while journal is replaying operations");
}
Integer classId = RecoverableOperation.getId(op.getClass());
if (classId == null) {
throw new IllegalArgumentException("Class not registered " + op.getClass());
}
while (true) {
final Output out = output.get();
if (out != null) {
// need to atomically write id, operation and flush the output stream
synchronized (out) {
try {
LOG.debug("WAL write {}", RecoverableOperation.get(classId));
out.writeInt(classId);
op.write(out);
out.flush();
break;
} catch (KryoException e) {
// stream or null leading to the current stream being closed
if (output.get() == out) {
throw e;
}
}
}
} else {
LOG.warn("Journal output stream is null. Skipping write to the WAL.");
break;
}
}
}
use of com.esotericsoftware.kryo.KryoException in project beam by apache.
the class StatelessJavaSerializer method write.
@SuppressWarnings("unchecked")
public void write(Kryo kryo, Output output, Object object) {
try {
ObjectOutputStream objectStream = new ObjectOutputStream(output);
objectStream.writeObject(object);
objectStream.flush();
} catch (Exception e) {
throw new KryoException("Error during Java serialization.", e);
}
}
Aggregations