Search in sources :

Example 6 with CountCumSum

use of org.deeplearning4j.spark.text.functions.CountCumSum in project deeplearning4j by deeplearning4j.

the class TextPipelineTest method testFirstIteration.

@Test
public void testFirstIteration() throws Exception {
    JavaSparkContext sc = getContext();
    JavaRDD<String> corpusRDD = getCorpusRDD(sc);
    // word2vec.setRemoveStop(false);
    Broadcast<Map<String, Object>> broadcastTokenizerVarMap = sc.broadcast(word2vec.getTokenizerVarMap());
    TextPipeline pipeline = new TextPipeline(corpusRDD, broadcastTokenizerVarMap);
    pipeline.buildVocabCache();
    pipeline.buildVocabWordListRDD();
    VocabCache<VocabWord> vocabCache = pipeline.getVocabCache();
    /*        Huffman huffman = new Huffman(vocabCache.vocabWords());
        huffman.build();
        huffman.applyIndexes(vocabCache);
        */
    VocabWord token = vocabCache.tokenFor("strange");
    VocabWord word = vocabCache.wordFor("strange");
    log.info("Strange token: " + token);
    log.info("Strange word: " + word);
    // Get total word count and put into word2vec variable map
    Map<String, Object> word2vecVarMap = word2vec.getWord2vecVarMap();
    word2vecVarMap.put("totalWordCount", pipeline.getTotalWordCount());
    double[] expTable = word2vec.getExpTable();
    JavaRDD<AtomicLong> sentenceCountRDD = pipeline.getSentenceCountRDD();
    JavaRDD<List<VocabWord>> vocabWordListRDD = pipeline.getVocabWordListRDD();
    CountCumSum countCumSum = new CountCumSum(sentenceCountRDD);
    JavaRDD<Long> sentenceCountCumSumRDD = countCumSum.buildCumSum();
    JavaPairRDD<List<VocabWord>, Long> vocabWordListSentenceCumSumRDD = vocabWordListRDD.zip(sentenceCountCumSumRDD);
    Broadcast<Map<String, Object>> word2vecVarMapBroadcast = sc.broadcast(word2vecVarMap);
    Broadcast<double[]> expTableBroadcast = sc.broadcast(expTable);
    Iterator<Tuple2<List<VocabWord>, Long>> iterator = vocabWordListSentenceCumSumRDD.collect().iterator();
    FirstIterationFunctionAdapter firstIterationFunction = new FirstIterationFunctionAdapter(word2vecVarMapBroadcast, expTableBroadcast, pipeline.getBroadCastVocabCache());
    Iterable<Map.Entry<VocabWord, INDArray>> ret = firstIterationFunction.call(iterator);
    assertTrue(ret.iterator().hasNext());
}
Also used : VocabWord(org.deeplearning4j.models.word2vec.VocabWord) CountCumSum(org.deeplearning4j.spark.text.functions.CountCumSum) JavaSparkContext(org.apache.spark.api.java.JavaSparkContext) TextPipeline(org.deeplearning4j.spark.text.functions.TextPipeline) AtomicLong(java.util.concurrent.atomic.AtomicLong) Tuple2(scala.Tuple2) FirstIterationFunctionAdapter(org.deeplearning4j.spark.models.embeddings.word2vec.FirstIterationFunctionAdapter) AtomicLong(java.util.concurrent.atomic.AtomicLong) Test(org.junit.Test)

Aggregations

AtomicLong (java.util.concurrent.atomic.AtomicLong)6 JavaSparkContext (org.apache.spark.api.java.JavaSparkContext)6 CountCumSum (org.deeplearning4j.spark.text.functions.CountCumSum)6 TextPipeline (org.deeplearning4j.spark.text.functions.TextPipeline)6 VocabWord (org.deeplearning4j.models.word2vec.VocabWord)5 Test (org.junit.Test)5 Tuple2 (scala.Tuple2)3 Pair (org.deeplearning4j.berkeley.Pair)2 ArrayList (java.util.ArrayList)1 HashMap (java.util.HashMap)1 List (java.util.List)1 Map (java.util.Map)1 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)1 FlatMapFunction (org.apache.spark.api.java.function.FlatMapFunction)1 InMemoryLookupTable (org.deeplearning4j.models.embeddings.inmemory.InMemoryLookupTable)1 Huffman (org.deeplearning4j.models.word2vec.Huffman)1 VocabCache (org.deeplearning4j.models.word2vec.wordstore.VocabCache)1 FirstIterationFunction (org.deeplearning4j.spark.models.embeddings.word2vec.FirstIterationFunction)1 FirstIterationFunctionAdapter (org.deeplearning4j.spark.models.embeddings.word2vec.FirstIterationFunctionAdapter)1 MapToPairFunction (org.deeplearning4j.spark.models.embeddings.word2vec.MapToPairFunction)1