Search in sources :

Example 41 with Kryo

use of com.esotericsoftware.kryo.Kryo in project hive by apache.

the class SerializationUtilities method borrowKryo.

/**
   * By default, kryo pool uses ConcurrentLinkedQueue which is unbounded. To facilitate reuse of
   * kryo object call releaseKryo() after done using the kryo instance. The class loader for the
   * kryo instance will be set to current thread's context class loader.
   *
   * @return kryo instance
   */
public static Kryo borrowKryo() {
    Kryo kryo = kryoPool.borrow();
    kryo.setClassLoader(Thread.currentThread().getContextClassLoader());
    return kryo;
}
Also used : Kryo(com.esotericsoftware.kryo.Kryo)

Example 42 with Kryo

use of com.esotericsoftware.kryo.Kryo in project hive by apache.

the class SerializationUtilities method serializeObjectToKryo.

private static byte[] serializeObjectToKryo(Serializable object) {
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    Output output = new Output(baos);
    Kryo kryo = borrowKryo();
    try {
        kryo.writeObject(output, object);
    } finally {
        releaseKryo(kryo);
    }
    output.close();
    return baos.toByteArray();
}
Also used : Output(com.esotericsoftware.kryo.io.Output) ByteArrayOutputStream(java.io.ByteArrayOutputStream) Kryo(com.esotericsoftware.kryo.Kryo)

Example 43 with Kryo

use of com.esotericsoftware.kryo.Kryo in project hive by apache.

the class SerializationUtilities method deserializeObjectFromKryo.

private static <T extends Serializable> T deserializeObjectFromKryo(byte[] bytes, Class<T> clazz) {
    Input inp = new Input(new ByteArrayInputStream(bytes));
    Kryo kryo = borrowKryo();
    T func = null;
    try {
        func = kryo.readObject(inp, clazz);
    } finally {
        releaseKryo(kryo);
    }
    inp.close();
    return func;
}
Also used : Input(com.esotericsoftware.kryo.io.Input) ByteArrayInputStream(java.io.ByteArrayInputStream) Kryo(com.esotericsoftware.kryo.Kryo)

Example 44 with Kryo

use of com.esotericsoftware.kryo.Kryo in project hive by apache.

the class HybridHashTableContainer method spillPartition.

/**
   * Move the hashtable of a specified partition from memory into local file system
   * @param partitionId the hashtable to be moved
   * @return amount of memory freed
   */
public long spillPartition(int partitionId) throws IOException {
    HashPartition partition = hashPartitions[partitionId];
    int inMemRowCount = partition.hashMap.getNumValues();
    if (inMemRowCount == 0) {
        LOG.warn("Trying to spill an empty hash partition! It may be due to " + "hive.auto.convert.join.noconditionaltask.size being set too low.");
    }
    File file = FileUtils.createLocalDirsTempFile(spillLocalDirs, "partition-" + partitionId + "-", null, false);
    OutputStream outputStream = new FileOutputStream(file, false);
    com.esotericsoftware.kryo.io.Output output = new com.esotericsoftware.kryo.io.Output(outputStream);
    Kryo kryo = SerializationUtilities.borrowKryo();
    try {
        LOG.info("Trying to spill hash partition " + partitionId + " ...");
        // use Kryo to serialize hashmap
        kryo.writeObject(output, partition.hashMap);
        output.close();
        outputStream.close();
    } finally {
        SerializationUtilities.releaseKryo(kryo);
    }
    partition.hashMapLocalPath = file.toPath();
    partition.hashMapOnDisk = true;
    LOG.info("Spilling hash partition " + partitionId + " (Rows: " + inMemRowCount + ", Mem size: " + partition.hashMap.memorySize() + "): " + file);
    LOG.info("Memory usage before spilling: " + memoryUsed);
    long memFreed = partition.hashMap.memorySize();
    memoryUsed -= memFreed;
    LOG.info("Memory usage after spilling: " + memoryUsed);
    partition.rowsOnDisk = inMemRowCount;
    totalInMemRowCount -= inMemRowCount;
    partition.hashMap.clear();
    partition.hashMap = null;
    return memFreed;
}
Also used : ObjectOutputStream(java.io.ObjectOutputStream) OutputStream(java.io.OutputStream) FileOutputStream(java.io.FileOutputStream) FileOutputStream(java.io.FileOutputStream) Output(org.apache.hadoop.hive.serde2.ByteStream.Output) File(java.io.File) Kryo(com.esotericsoftware.kryo.Kryo)

Example 45 with Kryo

use of com.esotericsoftware.kryo.Kryo in project hive by apache.

the class Utilities method setBaseWork.

private static Path setBaseWork(Configuration conf, BaseWork w, Path hiveScratchDir, String name, boolean useCache) {
    Kryo kryo = SerializationUtilities.borrowKryo();
    try {
        setPlanPath(conf, hiveScratchDir);
        Path planPath = getPlanPath(conf, name);
        setHasWork(conf, name);
        OutputStream out = null;
        final long serializedSize;
        final String planMode;
        if (HiveConf.getBoolVar(conf, ConfVars.HIVE_RPC_QUERY_PLAN)) {
            // add it to the conf
            ByteArrayOutputStream byteOut = new ByteArrayOutputStream();
            try {
                out = new DeflaterOutputStream(byteOut, new Deflater(Deflater.BEST_SPEED));
                SerializationUtilities.serializePlan(kryo, w, out);
                out.close();
                out = null;
            } finally {
                IOUtils.closeStream(out);
            }
            final String serializedPlan = Base64.encodeBase64String(byteOut.toByteArray());
            serializedSize = serializedPlan.length();
            planMode = "RPC";
            conf.set(planPath.toUri().getPath(), serializedPlan);
        } else {
            // use the default file system of the conf
            FileSystem fs = planPath.getFileSystem(conf);
            try {
                out = fs.create(planPath);
                SerializationUtilities.serializePlan(kryo, w, out);
                out.close();
                out = null;
                long fileLen = fs.getFileStatus(planPath).getLen();
                serializedSize = fileLen;
                planMode = "FILE";
            } finally {
                IOUtils.closeStream(out);
            }
            // able to get the plan directly from the cache
            if (useCache && !ShimLoader.getHadoopShims().isLocalMode(conf)) {
                // Set up distributed cache
                if (!DistributedCache.getSymlink(conf)) {
                    DistributedCache.createSymlink(conf);
                }
                String uriWithLink = planPath.toUri().toString() + "#" + name;
                DistributedCache.addCacheFile(new URI(uriWithLink), conf);
                // set replication of the plan file to a high number. we use the same
                // replication factor as used by the hadoop jobclient for job.xml etc.
                short replication = (short) conf.getInt("mapred.submit.replication", 10);
                fs.setReplication(planPath, replication);
            }
        }
        LOG.info("Serialized plan (via {}) - name: {} size: {}", planMode, w.getName(), humanReadableByteCount(serializedSize));
        // Cache the plan in this process
        gWorkMap.get(conf).put(planPath, w);
        return planPath;
    } catch (Exception e) {
        String msg = "Error caching " + name + ": " + e;
        LOG.error(msg, e);
        throw new RuntimeException(msg, e);
    } finally {
        SerializationUtilities.releaseKryo(kryo);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) ByteArrayOutputStream(java.io.ByteArrayOutputStream) DeflaterOutputStream(java.util.zip.DeflaterOutputStream) OutputStream(java.io.OutputStream) ByteArrayOutputStream(java.io.ByteArrayOutputStream) URI(java.net.URI) SQLFeatureNotSupportedException(java.sql.SQLFeatureNotSupportedException) SQLTransientException(java.sql.SQLTransientException) SQLException(java.sql.SQLException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) SerDeException(org.apache.hadoop.hive.serde2.SerDeException) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException) EOFException(java.io.EOFException) FileNotFoundException(java.io.FileNotFoundException) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) Deflater(java.util.zip.Deflater) FileSystem(org.apache.hadoop.fs.FileSystem) DeflaterOutputStream(java.util.zip.DeflaterOutputStream) Kryo(com.esotericsoftware.kryo.Kryo)

Aggregations

Kryo (com.esotericsoftware.kryo.Kryo)71 Input (com.esotericsoftware.kryo.io.Input)31 Output (com.esotericsoftware.kryo.io.Output)29 ByteArrayOutputStream (java.io.ByteArrayOutputStream)17 ByteArrayInputStream (java.io.ByteArrayInputStream)16 StdInstantiatorStrategy (org.objenesis.strategy.StdInstantiatorStrategy)13 Test (org.junit.Test)10 Test (org.testng.annotations.Test)8 ArrayList (java.util.ArrayList)6 BigIntegerSerializer (com.esotericsoftware.kryo.serializers.DefaultSerializers.BigIntegerSerializer)5 File (java.io.File)5 FileNotFoundException (java.io.FileNotFoundException)5 IOException (java.io.IOException)5 List (java.util.List)5 Map (java.util.Map)5 BaseTest (org.broadinstitute.hellbender.utils.test.BaseTest)5 ArrayListSerializer (backtype.storm.serialization.types.ArrayListSerializer)3 HashMapSerializer (backtype.storm.serialization.types.HashMapSerializer)3 HashSetSerializer (backtype.storm.serialization.types.HashSetSerializer)3 Serializer (com.esotericsoftware.kryo.Serializer)3