use of com.esotericsoftware.kryo.KryoException in project cas by apereo.
the class CasKryoTranscoder method encode.
@Override
public CachedData encode(final Object obj) {
try (CloseableKryo kryo = this.kryoPool.borrow();
ByteArrayOutputStream byteStream = new ByteArrayOutputStream();
Output output = new Output(byteStream)) {
if (obj != null) {
LOGGER.trace("Writing object [{}] to memcached ", obj.getClass());
}
kryo.writeClassAndObject(output, obj);
output.flush();
final byte[] bytes = byteStream.toByteArray();
return new CachedData(0, bytes, bytes.length);
} catch (final Exception exception) {
throw new KryoException(exception);
}
}
use of com.esotericsoftware.kryo.KryoException in project hive by apache.
the class MapJoinOperator method closeOp.
@Override
public void closeOp(boolean abort) throws HiveException {
boolean spilled = false;
for (MapJoinTableContainer container : mapJoinTables) {
if (container != null) {
spilled = spilled || container.hasSpill();
container.dumpMetrics();
}
}
// For Hybrid Grace Hash Join, we need to see if there is any spilled data to be processed next
if (spilled) {
if (!abort) {
if (hashMapRowGetters == null) {
hashMapRowGetters = new ReusableGetAdaptor[mapJoinTables.length];
}
int numPartitions = 0;
// Find out number of partitions for each small table (should be same across tables)
for (byte pos = 0; pos < mapJoinTables.length; pos++) {
if (pos != conf.getPosBigTable()) {
firstSmallTable = (HybridHashTableContainer) mapJoinTables[pos];
numPartitions = firstSmallTable.getHashPartitions().length;
break;
}
}
assert numPartitions != 0 : "Number of partitions must be greater than 0!";
if (firstSmallTable.hasSpill()) {
spilledMapJoinTables = new MapJoinBytesTableContainer[mapJoinTables.length];
hybridMapJoinLeftover = true;
// Clear all in-memory partitions first
for (byte pos = 0; pos < mapJoinTables.length; pos++) {
MapJoinTableContainer tableContainer = mapJoinTables[pos];
if (tableContainer != null && tableContainer instanceof HybridHashTableContainer) {
HybridHashTableContainer hybridHtContainer = (HybridHashTableContainer) tableContainer;
hybridHtContainer.dumpStats();
HashPartition[] hashPartitions = hybridHtContainer.getHashPartitions();
// Clear all in memory partitions first
for (int i = 0; i < hashPartitions.length; i++) {
if (!hashPartitions[i].isHashMapOnDisk()) {
hybridHtContainer.setTotalInMemRowCount(hybridHtContainer.getTotalInMemRowCount() - hashPartitions[i].getHashMapFromMemory().getNumValues());
hashPartitions[i].getHashMapFromMemory().clear();
}
}
assert hybridHtContainer.getTotalInMemRowCount() == 0;
}
}
// Reprocess the spilled data
for (int i = 0; i < numPartitions; i++) {
HashPartition[] hashPartitions = firstSmallTable.getHashPartitions();
if (hashPartitions[i].isHashMapOnDisk()) {
try {
// Re-process spilled data
continueProcess(i);
} catch (KryoException ke) {
LOG.error("Processing the spilled data failed due to Kryo error!");
LOG.error("Cleaning up all spilled data!");
cleanupGraceHashJoin();
throw new HiveException(ke);
} catch (Exception e) {
throw new HiveException(e);
}
for (byte pos = 0; pos < order.length; pos++) {
if (pos != conf.getPosBigTable())
spilledMapJoinTables[pos] = null;
}
}
}
}
}
if (LOG.isInfoEnabled()) {
LOG.info("spilled: " + spilled + " abort: " + abort + ". Clearing spilled partitions.");
}
// spilled tables are loaded always (no sharing), so clear it
clearAllTableContainers();
cache.remove(cacheKey);
}
// in mapreduce case, we need to always clear up as mapreduce doesn't have object registry.
if ((this.getExecContext() != null) && (this.getExecContext().getLocalWork() != null) && (this.getExecContext().getLocalWork().getInputFileChangeSensitive()) && !(HiveConf.getVar(hconf, ConfVars.HIVE_EXECUTION_ENGINE).equals("spark") && SparkUtilities.isDedicatedCluster(hconf))) {
if (LOG.isInfoEnabled()) {
LOG.info("MR: Clearing all map join table containers.");
}
clearAllTableContainers();
}
this.loader = null;
super.closeOp(abort);
}
Aggregations