use of org.deeplearning4j.models.word2vec.VocabWord in project deeplearning4j by deeplearning4j.
the class BagOfWordsVectorizerTest method testBagOfWordsVectorizer.
@Test
public void testBagOfWordsVectorizer() throws Exception {
File rootDir = new ClassPathResource("rootdir").getFile();
LabelAwareSentenceIterator iter = new LabelAwareFileSentenceIterator(rootDir);
List<String> labels = Arrays.asList("label1", "label2");
TokenizerFactory tokenizerFactory = new DefaultTokenizerFactory();
BagOfWordsVectorizer vectorizer = new BagOfWordsVectorizer.Builder().setMinWordFrequency(1).setStopWords(new ArrayList<String>()).setTokenizerFactory(tokenizerFactory).setIterator(iter).allowParallelTokenization(false).build();
vectorizer.fit();
VocabWord word = vectorizer.getVocabCache().wordFor("file.");
assumeNotNull(word);
assertEquals(word, vectorizer.getVocabCache().tokenFor("file."));
assertEquals(2, vectorizer.getVocabCache().totalNumberOfDocs());
assertEquals(2, word.getSequencesCount());
assertEquals(2, word.getElementFrequency(), 0.1);
VocabWord word1 = vectorizer.getVocabCache().wordFor("1");
assertEquals(1, word1.getSequencesCount());
assertEquals(1, word1.getElementFrequency(), 0.1);
log.info("Labels used: " + vectorizer.getLabelsSource().getLabels());
assertEquals(2, vectorizer.getLabelsSource().getNumberOfLabelsUsed());
///////////////////
INDArray array = vectorizer.transform("This is 2 file.");
log.info("Transformed array: " + array);
assertEquals(5, array.columns());
VocabCache<VocabWord> vocabCache = vectorizer.getVocabCache();
assertEquals(2, array.getDouble(vocabCache.tokenFor("This").getIndex()), 0.1);
assertEquals(2, array.getDouble(vocabCache.tokenFor("is").getIndex()), 0.1);
assertEquals(2, array.getDouble(vocabCache.tokenFor("file.").getIndex()), 0.1);
assertEquals(0, array.getDouble(vocabCache.tokenFor("1").getIndex()), 0.1);
assertEquals(1, array.getDouble(vocabCache.tokenFor("2").getIndex()), 0.1);
DataSet dataSet = vectorizer.vectorize("This is 2 file.", "label2");
assertEquals(array, dataSet.getFeatureMatrix());
INDArray labelz = dataSet.getLabels();
log.info("Labels array: " + labelz);
int idx2 = ((IndexAccumulation) Nd4j.getExecutioner().exec(new IMax(labelz))).getFinalResult();
// assertEquals(1.0, dataSet.getLabels().getDouble(0), 0.1);
// assertEquals(0.0, dataSet.getLabels().getDouble(1), 0.1);
dataSet = vectorizer.vectorize("This is 1 file.", "label1");
assertEquals(2, dataSet.getFeatureMatrix().getDouble(vocabCache.tokenFor("This").getIndex()), 0.1);
assertEquals(2, dataSet.getFeatureMatrix().getDouble(vocabCache.tokenFor("is").getIndex()), 0.1);
assertEquals(2, dataSet.getFeatureMatrix().getDouble(vocabCache.tokenFor("file.").getIndex()), 0.1);
assertEquals(1, dataSet.getFeatureMatrix().getDouble(vocabCache.tokenFor("1").getIndex()), 0.1);
assertEquals(0, dataSet.getFeatureMatrix().getDouble(vocabCache.tokenFor("2").getIndex()), 0.1);
int idx1 = ((IndexAccumulation) Nd4j.getExecutioner().exec(new IMax(dataSet.getLabels()))).getFinalResult();
//assertEquals(0.0, dataSet.getLabels().getDouble(0), 0.1);
//assertEquals(1.0, dataSet.getLabels().getDouble(1), 0.1);
assertNotEquals(idx2, idx1);
// Serialization check
File tempFile = File.createTempFile("fdsf", "fdfsdf");
tempFile.deleteOnExit();
SerializationUtils.saveObject(vectorizer, tempFile);
BagOfWordsVectorizer vectorizer2 = SerializationUtils.readObject(tempFile);
vectorizer2.setTokenizerFactory(tokenizerFactory);
dataSet = vectorizer2.vectorize("This is 2 file.", "label2");
assertEquals(array, dataSet.getFeatureMatrix());
}
use of org.deeplearning4j.models.word2vec.VocabWord in project deeplearning4j by deeplearning4j.
the class TfidfVectorizerTest method testTfIdfVectorizer.
@Test
public void testTfIdfVectorizer() throws Exception {
File rootDir = new ClassPathResource("tripledir").getFile();
LabelAwareSentenceIterator iter = new LabelAwareFileSentenceIterator(rootDir);
TokenizerFactory tokenizerFactory = new DefaultTokenizerFactory();
TfidfVectorizer vectorizer = new TfidfVectorizer.Builder().setMinWordFrequency(1).setStopWords(new ArrayList<String>()).setTokenizerFactory(tokenizerFactory).setIterator(iter).allowParallelTokenization(false).build();
vectorizer.fit();
VocabWord word = vectorizer.getVocabCache().wordFor("file.");
assumeNotNull(word);
assertEquals(word, vectorizer.getVocabCache().tokenFor("file."));
assertEquals(3, vectorizer.getVocabCache().totalNumberOfDocs());
assertEquals(3, word.getSequencesCount());
assertEquals(3, word.getElementFrequency(), 0.1);
VocabWord word1 = vectorizer.getVocabCache().wordFor("1");
assertEquals(1, word1.getSequencesCount());
assertEquals(1, word1.getElementFrequency(), 0.1);
log.info("Labels used: " + vectorizer.getLabelsSource().getLabels());
assertEquals(3, vectorizer.getLabelsSource().getNumberOfLabelsUsed());
assertEquals(3, vectorizer.getVocabCache().totalNumberOfDocs());
assertEquals(11, vectorizer.numWordsEncountered());
INDArray vector = vectorizer.transform("This is 3 file.");
log.info("TF-IDF vector: " + Arrays.toString(vector.data().asDouble()));
VocabCache<VocabWord> vocabCache = vectorizer.getVocabCache();
assertEquals(.04402, vector.getDouble(vocabCache.tokenFor("This").getIndex()), 0.001);
assertEquals(.04402, vector.getDouble(vocabCache.tokenFor("is").getIndex()), 0.001);
assertEquals(0.119, vector.getDouble(vocabCache.tokenFor("3").getIndex()), 0.001);
assertEquals(0, vector.getDouble(vocabCache.tokenFor("file.").getIndex()), 0.001);
DataSet dataSet = vectorizer.vectorize("This is 3 file.", "label3");
//assertEquals(0.0, dataSet.getLabels().getDouble(0), 0.1);
//assertEquals(0.0, dataSet.getLabels().getDouble(1), 0.1);
//assertEquals(1.0, dataSet.getLabels().getDouble(2), 0.1);
int cnt = 0;
for (int i = 0; i < 3; i++) {
if (dataSet.getLabels().getDouble(i) > 0.1)
cnt++;
}
assertEquals(1, cnt);
File tempFile = File.createTempFile("somefile", "Dsdas");
tempFile.deleteOnExit();
SerializationUtils.saveObject(vectorizer, tempFile);
TfidfVectorizer vectorizer2 = SerializationUtils.readObject(tempFile);
vectorizer2.setTokenizerFactory(tokenizerFactory);
dataSet = vectorizer2.vectorize("This is 3 file.", "label2");
assertEquals(vector, dataSet.getFeatureMatrix());
}
use of org.deeplearning4j.models.word2vec.VocabWord in project deeplearning4j by deeplearning4j.
the class InMemoryLookupTableTest method testConsumeOnNonEqualVocabs.
@Test
public void testConsumeOnNonEqualVocabs() throws Exception {
TokenizerFactory t = new DefaultTokenizerFactory();
t.setTokenPreProcessor(new CommonPreprocessor());
AbstractCache<VocabWord> cacheSource = new AbstractCache.Builder<VocabWord>().build();
ClassPathResource resource = new ClassPathResource("big/raw_sentences.txt");
BasicLineIterator underlyingIterator = new BasicLineIterator(resource.getFile());
SentenceTransformer transformer = new SentenceTransformer.Builder().iterator(underlyingIterator).tokenizerFactory(t).build();
AbstractSequenceIterator<VocabWord> sequenceIterator = new AbstractSequenceIterator.Builder<>(transformer).build();
VocabConstructor<VocabWord> vocabConstructor = new VocabConstructor.Builder<VocabWord>().addSource(sequenceIterator, 1).setTargetVocabCache(cacheSource).build();
vocabConstructor.buildJointVocabulary(false, true);
assertEquals(244, cacheSource.numWords());
InMemoryLookupTable<VocabWord> mem1 = (InMemoryLookupTable<VocabWord>) new InMemoryLookupTable.Builder<VocabWord>().vectorLength(100).cache(cacheSource).build();
mem1.resetWeights(true);
AbstractCache<VocabWord> cacheTarget = new AbstractCache.Builder<VocabWord>().build();
FileLabelAwareIterator labelAwareIterator = new FileLabelAwareIterator.Builder().addSourceFolder(new ClassPathResource("/paravec/labeled").getFile()).build();
transformer = new SentenceTransformer.Builder().iterator(labelAwareIterator).tokenizerFactory(t).build();
sequenceIterator = new AbstractSequenceIterator.Builder<>(transformer).build();
VocabConstructor<VocabWord> vocabTransfer = new VocabConstructor.Builder<VocabWord>().addSource(sequenceIterator, 1).setTargetVocabCache(cacheTarget).build();
vocabTransfer.buildMergedVocabulary(cacheSource, true);
// those +3 go for 3 additional entries in target VocabCache: labels
assertEquals(cacheSource.numWords() + 3, cacheTarget.numWords());
InMemoryLookupTable<VocabWord> mem2 = (InMemoryLookupTable<VocabWord>) new InMemoryLookupTable.Builder<VocabWord>().vectorLength(100).cache(cacheTarget).seed(18).build();
mem2.resetWeights(true);
assertNotEquals(mem1.vector("day"), mem2.vector("day"));
mem2.consume(mem1);
assertEquals(mem1.vector("day"), mem2.vector("day"));
assertTrue(mem1.syn0.rows() < mem2.syn0.rows());
assertEquals(mem1.syn0.rows() + 3, mem2.syn0.rows());
}
use of org.deeplearning4j.models.word2vec.VocabWord in project deeplearning4j by deeplearning4j.
the class InMemoryLookupTableTest method testConsumeOnEqualVocabs.
@Test
public void testConsumeOnEqualVocabs() throws Exception {
TokenizerFactory t = new DefaultTokenizerFactory();
t.setTokenPreProcessor(new CommonPreprocessor());
AbstractCache<VocabWord> cacheSource = new AbstractCache.Builder<VocabWord>().build();
ClassPathResource resource = new ClassPathResource("big/raw_sentences.txt");
BasicLineIterator underlyingIterator = new BasicLineIterator(resource.getFile());
SentenceTransformer transformer = new SentenceTransformer.Builder().iterator(underlyingIterator).tokenizerFactory(t).build();
AbstractSequenceIterator<VocabWord> sequenceIterator = new AbstractSequenceIterator.Builder<>(transformer).build();
VocabConstructor<VocabWord> vocabConstructor = new VocabConstructor.Builder<VocabWord>().addSource(sequenceIterator, 1).setTargetVocabCache(cacheSource).build();
vocabConstructor.buildJointVocabulary(false, true);
assertEquals(244, cacheSource.numWords());
InMemoryLookupTable<VocabWord> mem1 = (InMemoryLookupTable<VocabWord>) new InMemoryLookupTable.Builder<VocabWord>().vectorLength(100).cache(cacheSource).seed(17).build();
mem1.resetWeights(true);
InMemoryLookupTable<VocabWord> mem2 = (InMemoryLookupTable<VocabWord>) new InMemoryLookupTable.Builder<VocabWord>().vectorLength(100).cache(cacheSource).seed(15).build();
mem2.resetWeights(true);
assertNotEquals(mem1.vector("day"), mem2.vector("day"));
mem2.consume(mem1);
assertEquals(mem1.vector("day"), mem2.vector("day"));
}
use of org.deeplearning4j.models.word2vec.VocabWord in project deeplearning4j by deeplearning4j.
the class AbstractCoOccurrencesTest method testFit1.
@Test
public void testFit1() throws Exception {
ClassPathResource resource = new ClassPathResource("other/oneline.txt");
File file = resource.getFile();
AbstractCache<VocabWord> vocabCache = new AbstractCache.Builder<VocabWord>().build();
BasicLineIterator underlyingIterator = new BasicLineIterator(file);
TokenizerFactory t = new DefaultTokenizerFactory();
t.setTokenPreProcessor(new CommonPreprocessor());
SentenceTransformer transformer = new SentenceTransformer.Builder().iterator(underlyingIterator).tokenizerFactory(t).build();
AbstractSequenceIterator<VocabWord> sequenceIterator = new AbstractSequenceIterator.Builder<>(transformer).build();
VocabConstructor<VocabWord> constructor = new VocabConstructor.Builder<VocabWord>().addSource(sequenceIterator, 1).setTargetVocabCache(vocabCache).build();
constructor.buildJointVocabulary(false, true);
AbstractCoOccurrences<VocabWord> coOccurrences = new AbstractCoOccurrences.Builder<VocabWord>().iterate(sequenceIterator).vocabCache(vocabCache).symmetric(false).windowSize(15).build();
coOccurrences.fit();
//List<Pair<VocabWord, VocabWord>> list = coOccurrences.i();
Iterator<Pair<Pair<VocabWord, VocabWord>, Double>> iterator = coOccurrences.iterator();
assertNotEquals(null, iterator);
int cnt = 0;
List<Pair<VocabWord, VocabWord>> list = new ArrayList<>();
while (iterator.hasNext()) {
Pair<Pair<VocabWord, VocabWord>, Double> pair = iterator.next();
list.add(pair.getFirst());
cnt++;
}
log.info("CoOccurrences: " + list);
assertEquals(16, list.size());
assertEquals(16, cnt);
}
Aggregations