use of org.deeplearning4j.models.embeddings.inmemory.InMemoryLookupTable in project deeplearning4j by deeplearning4j.
the class WordVectorSerializer method loadFullModel.
/**
* This method loads full w2v model, previously saved with writeFullMethod call
*
* Deprecation note: Please, consider using readWord2VecModel() or loadStaticModel() method instead
*
* @param path - path to previously stored w2v json model
* @return - Word2Vec instance
*/
@Deprecated
public static Word2Vec loadFullModel(@NonNull String path) throws FileNotFoundException {
/*
// TODO: implementation is in process
We need to restore:
1. WeightLookupTable, including syn0 and syn1 matrices
2. VocabCache + mark it as SPECIAL, to avoid accidental word removals
*/
BasicLineIterator iterator = new BasicLineIterator(new File(path));
// first 3 lines should be processed separately
String confJson = iterator.nextSentence();
log.info("Word2Vec conf. JSON: " + confJson);
VectorsConfiguration configuration = VectorsConfiguration.fromJson(confJson);
// actually we dont need expTable, since it produces exact results on subsequent runs untill you dont modify expTable size :)
String eTable = iterator.nextSentence();
double[] expTable;
String nTable = iterator.nextSentence();
if (configuration.getNegative() > 0) {
// TODO: we probably should parse negTable, but it's not required until vocab changes are introduced. Since on the predefined vocab it will produce exact nTable, the same goes for expTable btw.
}
/*
Since we're restoring vocab from previously serialized model, we can expect minWordFrequency appliance in its vocabulary, so it should NOT be truncated.
That's why i'm setting minWordFrequency to configuration value, but applying SPECIAL to each word, to avoid truncation
*/
VocabularyHolder holder = new VocabularyHolder.Builder().minWordFrequency(configuration.getMinWordFrequency()).hugeModelExpected(configuration.isHugeModelExpected()).scavengerActivationThreshold(configuration.getScavengerActivationThreshold()).scavengerRetentionDelay(configuration.getScavengerRetentionDelay()).build();
AtomicInteger counter = new AtomicInteger(0);
AbstractCache<VocabWord> vocabCache = new AbstractCache.Builder<VocabWord>().build();
while (iterator.hasNext()) {
// log.info("got line: " + iterator.nextSentence());
String wordJson = iterator.nextSentence();
VocabularyWord word = VocabularyWord.fromJson(wordJson);
word.setSpecial(true);
VocabWord vw = new VocabWord(word.getCount(), word.getWord());
vw.setIndex(counter.getAndIncrement());
vw.setIndex(word.getHuffmanNode().getIdx());
vw.setCodeLength(word.getHuffmanNode().getLength());
vw.setPoints(arrayToList(word.getHuffmanNode().getPoint(), word.getHuffmanNode().getLength()));
vw.setCodes(arrayToList(word.getHuffmanNode().getCode(), word.getHuffmanNode().getLength()));
vocabCache.addToken(vw);
vocabCache.addWordToIndex(vw.getIndex(), vw.getLabel());
vocabCache.putVocabWord(vw.getWord());
}
// at this moment vocab is restored, and it's time to rebuild Huffman tree
// since word counters are equal, huffman tree will be equal too
//holder.updateHuffmanCodes();
// we definitely don't need UNK word in this scenarion
// holder.transferBackToVocabCache(vocabCache, false);
// now, it's time to transfer syn0/syn1/syn1 neg values
InMemoryLookupTable lookupTable = (InMemoryLookupTable) new InMemoryLookupTable.Builder().negative(configuration.getNegative()).useAdaGrad(configuration.isUseAdaGrad()).lr(configuration.getLearningRate()).cache(vocabCache).vectorLength(configuration.getLayersSize()).build();
// we create all arrays
lookupTable.resetWeights(true);
iterator.reset();
// we should skip 3 lines from file
iterator.nextSentence();
iterator.nextSentence();
iterator.nextSentence();
// now, for each word from vocabHolder we'll just transfer actual values
while (iterator.hasNext()) {
String wordJson = iterator.nextSentence();
VocabularyWord word = VocabularyWord.fromJson(wordJson);
// syn0 transfer
INDArray syn0 = lookupTable.getSyn0().getRow(vocabCache.indexOf(word.getWord()));
syn0.assign(Nd4j.create(word.getSyn0()));
// syn1 transfer
// syn1 values are being accessed via tree points, but since our goal is just deserialization - we can just push it row by row
INDArray syn1 = lookupTable.getSyn1().getRow(vocabCache.indexOf(word.getWord()));
syn1.assign(Nd4j.create(word.getSyn1()));
// syn1Neg transfer
if (configuration.getNegative() > 0) {
INDArray syn1Neg = lookupTable.getSyn1Neg().getRow(vocabCache.indexOf(word.getWord()));
syn1Neg.assign(Nd4j.create(word.getSyn1Neg()));
}
}
Word2Vec vec = new Word2Vec.Builder(configuration).vocabCache(vocabCache).lookupTable(lookupTable).resetModel(false).build();
vec.setModelUtils(new BasicModelUtils());
return vec;
}
use of org.deeplearning4j.models.embeddings.inmemory.InMemoryLookupTable in project deeplearning4j by deeplearning4j.
the class WordVectorSerializer method writeWord2VecModel.
/**
* This method saves Word2Vec model into compressed zip file and sends it to output stream
* PLEASE NOTE: This method saves FULL model, including syn0 AND syn1
*
*/
public static void writeWord2VecModel(Word2Vec vectors, OutputStream stream) throws IOException {
ZipOutputStream zipfile = new ZipOutputStream(new BufferedOutputStream(new CloseShieldOutputStream(stream)));
ZipEntry syn0 = new ZipEntry("syn0.txt");
zipfile.putNextEntry(syn0);
// writing out syn0
File tempFileSyn0 = File.createTempFile("word2vec", "0");
tempFileSyn0.deleteOnExit();
writeWordVectors(vectors.lookupTable(), tempFileSyn0);
BufferedInputStream fis = new BufferedInputStream(new FileInputStream(tempFileSyn0));
writeEntry(fis, zipfile);
fis.close();
// writing out syn1
File tempFileSyn1 = File.createTempFile("word2vec", "1");
tempFileSyn1.deleteOnExit();
INDArray syn1 = ((InMemoryLookupTable<VocabWord>) vectors.getLookupTable()).getSyn1();
if (syn1 != null)
try (PrintWriter writer = new PrintWriter(new FileWriter(tempFileSyn1))) {
for (int x = 0; x < syn1.rows(); x++) {
INDArray row = syn1.getRow(x);
StringBuilder builder = new StringBuilder();
for (int i = 0; i < row.length(); i++) {
builder.append(row.getDouble(i)).append(" ");
}
writer.println(builder.toString().trim());
}
}
ZipEntry zSyn1 = new ZipEntry("syn1.txt");
zipfile.putNextEntry(zSyn1);
fis = new BufferedInputStream(new FileInputStream(tempFileSyn1));
writeEntry(fis, zipfile);
fis.close();
// writing out syn1
File tempFileSyn1Neg = File.createTempFile("word2vec", "n");
tempFileSyn1Neg.deleteOnExit();
INDArray syn1Neg = ((InMemoryLookupTable<VocabWord>) vectors.getLookupTable()).getSyn1Neg();
if (syn1Neg != null)
try (PrintWriter writer = new PrintWriter(new FileWriter(tempFileSyn1Neg))) {
for (int x = 0; x < syn1Neg.rows(); x++) {
INDArray row = syn1Neg.getRow(x);
StringBuilder builder = new StringBuilder();
for (int i = 0; i < row.length(); i++) {
builder.append(row.getDouble(i)).append(" ");
}
writer.println(builder.toString().trim());
}
}
ZipEntry zSyn1Neg = new ZipEntry("syn1Neg.txt");
zipfile.putNextEntry(zSyn1Neg);
fis = new BufferedInputStream(new FileInputStream(tempFileSyn1Neg));
writeEntry(fis, zipfile);
fis.close();
File tempFileCodes = File.createTempFile("word2vec", "h");
tempFileCodes.deleteOnExit();
ZipEntry hC = new ZipEntry("codes.txt");
zipfile.putNextEntry(hC);
// writing out huffman tree
try (PrintWriter writer = new PrintWriter(new FileWriter(tempFileCodes))) {
for (int i = 0; i < vectors.getVocab().numWords(); i++) {
VocabWord word = vectors.getVocab().elementAtIndex(i);
StringBuilder builder = new StringBuilder(encodeB64(word.getLabel())).append(" ");
for (int code : word.getCodes()) {
builder.append(code).append(" ");
}
writer.println(builder.toString().trim());
}
}
fis = new BufferedInputStream(new FileInputStream(tempFileCodes));
writeEntry(fis, zipfile);
fis.close();
File tempFileHuffman = File.createTempFile("word2vec", "h");
tempFileHuffman.deleteOnExit();
ZipEntry hP = new ZipEntry("huffman.txt");
zipfile.putNextEntry(hP);
// writing out huffman tree
try (PrintWriter writer = new PrintWriter(new FileWriter(tempFileHuffman))) {
for (int i = 0; i < vectors.getVocab().numWords(); i++) {
VocabWord word = vectors.getVocab().elementAtIndex(i);
StringBuilder builder = new StringBuilder(encodeB64(word.getLabel())).append(" ");
for (int point : word.getPoints()) {
builder.append(point).append(" ");
}
writer.println(builder.toString().trim());
}
}
fis = new BufferedInputStream(new FileInputStream(tempFileHuffman));
writeEntry(fis, zipfile);
fis.close();
File tempFileFreqs = File.createTempFile("word2vec", "f");
tempFileFreqs.deleteOnExit();
ZipEntry hF = new ZipEntry("frequencies.txt");
zipfile.putNextEntry(hF);
// writing out word frequencies
try (PrintWriter writer = new PrintWriter(new FileWriter(tempFileFreqs))) {
for (int i = 0; i < vectors.getVocab().numWords(); i++) {
VocabWord word = vectors.getVocab().elementAtIndex(i);
StringBuilder builder = new StringBuilder(encodeB64(word.getLabel())).append(" ").append(word.getElementFrequency()).append(" ").append(vectors.getVocab().docAppearedIn(word.getLabel()));
writer.println(builder.toString().trim());
}
}
fis = new BufferedInputStream(new FileInputStream(tempFileFreqs));
writeEntry(fis, zipfile);
fis.close();
ZipEntry config = new ZipEntry("config.json");
zipfile.putNextEntry(config);
//log.info("Current config: {}", vectors.getConfiguration().toJson());
writeEntry(new ByteArrayInputStream(vectors.getConfiguration().toJson().getBytes()), zipfile);
zipfile.flush();
zipfile.close();
try {
tempFileCodes.delete();
tempFileFreqs.delete();
tempFileHuffman.delete();
tempFileSyn0.delete();
tempFileSyn1.delete();
tempFileSyn1Neg.delete();
} catch (Exception e) {
//
}
}
use of org.deeplearning4j.models.embeddings.inmemory.InMemoryLookupTable in project deeplearning4j by deeplearning4j.
the class WordVectorSerializer method loadTxt.
/**
* Loads an in memory cache from the given path (sets syn0 and the vocab)
*
* @param vectorsFile the path of the file to load
* @return a Pair holding the lookup table and the vocab cache.
* @throws FileNotFoundException if the input file does not exist
*/
public static Pair<InMemoryLookupTable, VocabCache> loadTxt(File vectorsFile) throws FileNotFoundException, UnsupportedEncodingException {
BufferedReader reader = new BufferedReader(new InputStreamReader(new FileInputStream(vectorsFile), "UTF-8"));
AbstractCache cache = new AbstractCache<>();
LineIterator iter = IOUtils.lineIterator(reader);
String line = null;
boolean hasHeader = false;
if (iter.hasNext()) {
// skip header line
line = iter.nextLine();
//look for spaces
if (!line.contains(" ")) {
log.debug("Skipping first line");
hasHeader = true;
} else {
// we should check for something that looks like proper word vectors here. i.e: 1 word at the 0 position, and bunch of floats further
String[] split = line.split(" ");
try {
long[] header = new long[split.length];
for (int x = 0; x < split.length; x++) {
header[x] = Long.parseLong(split[x]);
}
if (split.length < 4)
hasHeader = true;
// [2] - number of documents <-- DL4j-only value
if (split.length == 3)
cache.incrementTotalDocCount(header[2]);
printOutProjectedMemoryUse(header[0], (int) header[1], 1);
hasHeader = true;
try {
reader.close();
} catch (Exception ex) {
}
} catch (Exception e) {
// if any conversion exception hits - that'll be considered header
hasHeader = false;
}
}
}
//reposition buffer to be one line ahead
if (hasHeader) {
line = "";
iter.close();
reader = new BufferedReader(new FileReader(vectorsFile));
iter = IOUtils.lineIterator(reader);
iter.nextLine();
}
List<INDArray> arrays = new ArrayList<>();
while (iter.hasNext()) {
if (line.isEmpty())
line = iter.nextLine();
String[] split = line.split(" ");
//split[0].replaceAll(whitespaceReplacement, " ");
String word = decodeB64(split[0]);
VocabWord word1 = new VocabWord(1.0, word);
word1.setIndex(cache.numWords());
cache.addToken(word1);
cache.addWordToIndex(word1.getIndex(), word);
cache.putVocabWord(word);
float[] vector = new float[split.length - 1];
for (int i = 1; i < split.length; i++) {
vector[i - 1] = Float.parseFloat(split[i]);
}
INDArray row = Nd4j.create(vector);
arrays.add(row);
// workaround for skipped first row
line = "";
}
INDArray syn = Nd4j.vstack(arrays);
InMemoryLookupTable lookupTable = (InMemoryLookupTable) new InMemoryLookupTable.Builder().vectorLength(arrays.get(0).columns()).useAdaGrad(false).cache(cache).useHierarchicSoftmax(false).build();
if (Nd4j.ENFORCE_NUMERICAL_STABILITY)
Nd4j.clearNans(syn);
lookupTable.setSyn0(syn);
iter.close();
try {
reader.close();
} catch (Exception e) {
}
return new Pair<>(lookupTable, (VocabCache) cache);
}
use of org.deeplearning4j.models.embeddings.inmemory.InMemoryLookupTable in project deeplearning4j by deeplearning4j.
the class WordVectorSerializer method readTextModel.
/**
* @param modelFile
* @return
* @throws FileNotFoundException
* @throws IOException
* @throws NumberFormatException
*/
private static Word2Vec readTextModel(File modelFile) throws IOException, NumberFormatException {
InMemoryLookupTable lookupTable;
VocabCache cache;
INDArray syn0;
Word2Vec ret = new Word2Vec();
try (BufferedReader reader = new BufferedReader(new InputStreamReader(GzipUtils.isCompressedFilename(modelFile.getName()) ? new GZIPInputStream(new FileInputStream(modelFile)) : new FileInputStream(modelFile), "UTF-8"))) {
String line = reader.readLine();
String[] initial = line.split(" ");
int words = Integer.parseInt(initial[0]);
int layerSize = Integer.parseInt(initial[1]);
syn0 = Nd4j.create(words, layerSize);
cache = new InMemoryLookupCache(false);
int currLine = 0;
while ((line = reader.readLine()) != null) {
String[] split = line.split(" ");
assert split.length == layerSize + 1;
String word = split[0].replaceAll(whitespaceReplacement, " ");
float[] vector = new float[split.length - 1];
for (int i = 1; i < split.length; i++) {
vector[i - 1] = Float.parseFloat(split[i]);
}
syn0.putRow(currLine, Nd4j.create(vector));
cache.addWordToIndex(cache.numWords(), word);
cache.addToken(new VocabWord(1, word));
cache.putVocabWord(word);
currLine++;
}
lookupTable = (InMemoryLookupTable) new InMemoryLookupTable.Builder().cache(cache).vectorLength(layerSize).build();
lookupTable.setSyn0(syn0);
ret.setVocab(cache);
ret.setLookupTable(lookupTable);
}
return ret;
}
use of org.deeplearning4j.models.embeddings.inmemory.InMemoryLookupTable in project deeplearning4j by deeplearning4j.
the class WordVectorSerializer method readWord2VecFromText.
/**
* This method allows you to read ParagraphVectors from externaly originated vectors and syn1.
* So, technically this method is compatible with any other w2v implementation
*
* @param vectors text file with words and their wieghts, aka Syn0
* @param hs text file HS layers, aka Syn1
* @param h_codes text file with Huffman tree codes
* @param h_points text file with Huffman tree points
* @return
*/
public static Word2Vec readWord2VecFromText(@NonNull File vectors, @NonNull File hs, @NonNull File h_codes, @NonNull File h_points, @NonNull VectorsConfiguration configuration) throws IOException {
// first we load syn0
Pair<InMemoryLookupTable, VocabCache> pair = loadTxt(vectors);
InMemoryLookupTable lookupTable = pair.getFirst();
lookupTable.setNegative(configuration.getNegative());
if (configuration.getNegative() > 0)
lookupTable.initNegative();
VocabCache<VocabWord> vocab = (VocabCache<VocabWord>) pair.getSecond();
// now we load syn1
BufferedReader reader = new BufferedReader(new FileReader(hs));
String line = null;
List<INDArray> rows = new ArrayList<>();
while ((line = reader.readLine()) != null) {
String[] split = line.split(" ");
double[] array = new double[split.length];
for (int i = 0; i < split.length; i++) {
array[i] = Double.parseDouble(split[i]);
}
rows.add(Nd4j.create(array));
}
reader.close();
// it's possible to have full model without syn1
if (rows.size() > 0) {
INDArray syn1 = Nd4j.vstack(rows);
lookupTable.setSyn1(syn1);
}
// now we transform mappings into huffman tree points
reader = new BufferedReader(new FileReader(h_points));
while ((line = reader.readLine()) != null) {
String[] split = line.split(" ");
VocabWord word = vocab.wordFor(decodeB64(split[0]));
List<Integer> points = new ArrayList<>();
for (int i = 1; i < split.length; i++) {
points.add(Integer.parseInt(split[i]));
}
word.setPoints(points);
}
reader.close();
// now we transform mappings into huffman tree codes
reader = new BufferedReader(new FileReader(h_codes));
while ((line = reader.readLine()) != null) {
String[] split = line.split(" ");
VocabWord word = vocab.wordFor(decodeB64(split[0]));
List<Byte> codes = new ArrayList<>();
for (int i = 1; i < split.length; i++) {
codes.add(Byte.parseByte(split[i]));
}
word.setCodes(codes);
word.setCodeLength((short) codes.size());
}
reader.close();
Word2Vec.Builder builder = new Word2Vec.Builder(configuration).vocabCache(vocab).lookupTable(lookupTable).resetModel(false);
TokenizerFactory factory = getTokenizerFactory(configuration);
if (factory != null)
builder.tokenizerFactory(factory);
Word2Vec w2v = builder.build();
return w2v;
}
Aggregations