use of com.ichi2.libanki.Media in project AnkiChinaAndroid by ankichinateam.
the class AnkiChinaSyncer method uploadFileAsync.
private void uploadFileAsync(Map<String, Boolean> needUploadFileRecord, String token, double percent) {
if (needUploadFileRecord.size() == 0) {
return;
}
Pair<File, List<String>> changesZip = CollectionHelper.getInstance().getColSafe(AnkiDroidApp.getInstance()).getMedia().mediaNeedUploadZip2AnkiChina(needUploadFileRecord);
File zip = changesZip.first;
List<String> fnames = changesZip.second;
if (fnames.size() != 0) {
List<String> zipPath = new ArrayList<>();
zipPath.add(zip.getAbsolutePath());
OKHttpUtils.doPostRequest(Consts.ANKI_CHINA_BASE + Consts.API_VERSION + "napi/sync/uploadFile", mCurrentSession, zipPath, uploadProgress, zip.getAbsolutePath(), token, new OKHttpUtils.MyCallBack() {
@Override
public void onFailure(Call call, final IOException e) {
Timber.i("upload error------>%s %s", zip.getAbsolutePath(), e.getMessage());
updateDialogMessage(SYNCING_ERROR, ERROR_NETWORK);
mUploadResultEndCount++;
// maybeCompleted();
}
@Override
public void onResponse(Call call, Object tag, Response response) throws IOException {
final JSONObject object = new JSONObject(response.body().string());
Timber.e("upload media result:%s", object.toString());
Timber.i("upload media result---->%s", tag);
for (String s : fnames) {
needUploadFileRecord.put(s, true);
mUploadResultEndCount++;
updateDialogProgress(SYNCING_MEDIA, "", mCurrentProgress + percent);
}
zip.delete();
if (!maybeCompleted()) {
uploadFileAsync(needUploadFileRecord, token, percent);
}
}
});
}
}
use of com.ichi2.libanki.Media in project AnkiChinaAndroid by ankichinateam.
the class Anki2Importer method _importNotes.
/**
* Notes
* ***********************************************************
*/
private void _importNotes() {
// build guid -> (id,mod,mid) hash & map of existing note ids
mNotes = new HashMap<>();
Set<Long> existing = new HashSet<>();
Cursor cur = null;
try {
cur = mDst.getDb().getDatabase().query("select id, guid, mod, mid from notes", null);
while (cur.moveToNext()) {
long id = cur.getLong(0);
String guid = cur.getString(1);
long mod = cur.getLong(2);
long mid = cur.getLong(3);
mNotes.put(guid, new Object[] { id, mod, mid });
existing.add(id);
}
} finally {
if (cur != null) {
cur.close();
}
}
// we may need to rewrite the guid if the model schemas don't match,
// so we need to keep track of the changes for the card import stage
mChangedGuids = new HashMap<>();
// we ignore updates to changed schemas. we need to note the ignored
// guids, so we avoid importing invalid cards
mIgnoredGuids = new HashMap<>();
// iterate over source collection
ArrayList<Object[]> add = new ArrayList<>();
int totalAddCount = 0;
final int thresExecAdd = 1000;
ArrayList<Object[]> update = new ArrayList<>();
int totalUpdateCount = 0;
final int thresExecUpdate = 1000;
ArrayList<Long> dirty = new ArrayList<>();
int totalDirtyCount = 0;
final int thresExecDirty = 1000;
int usn = mDst.usn();
int dupes = 0;
ArrayList<String> dupesIgnored = new ArrayList<>();
try {
mDst.getDb().getDatabase().beginTransaction();
cur = mSrc.getDb().getDatabase().query("select * from notes", null);
// Counters for progress updates
int total = cur.getCount();
boolean largeCollection = total > 200;
int onePercent = total / 100;
int i = 0;
while (cur.moveToNext()) {
// turn the db result into a mutable list
Object[] note = new Object[] { cur.getLong(0), cur.getString(1), cur.getLong(2), cur.getLong(3), cur.getInt(4), cur.getString(5), cur.getString(6), cur.getString(7), cur.getLong(8), cur.getInt(9), cur.getString(10) };
boolean shouldAdd = _uniquifyNote(note);
if (shouldAdd) {
// ensure id is unique
while (existing.contains(note[0])) {
note[0] = ((Long) note[0]) + 999;
}
existing.add((Long) note[0]);
// bump usn
note[4] = usn;
// update media references in case of dupes
note[6] = _mungeMedia((Long) note[MID], (String) note[6]);
add.add(note);
dirty.add((Long) note[0]);
// note we have the added guid
mNotes.put((String) note[GUID], new Object[] { note[0], note[3], note[MID] });
} else {
// a duplicate or changed schema - safe to update?
dupes += 1;
if (mAllowUpdate) {
Object[] n = mNotes.get(note[GUID]);
long oldNid = (Long) n[0];
long oldMod = (Long) n[1];
long oldMid = (Long) n[2];
// will update if incoming note more recent
if (oldMod < (Long) note[MOD]) {
// safe if note types identical
if (oldMid == (Long) note[MID]) {
// incoming note should use existing id
note[0] = oldNid;
note[4] = usn;
note[6] = _mungeMedia((Long) note[MID], (String) note[6]);
update.add(note);
dirty.add((Long) note[0]);
} else {
dupesIgnored.add(String.format("%s: %s", mCol.getModels().get(oldMid).getString("name"), ((String) note[6]).replace("\u001f", ",")));
mIgnoredGuids.put((String) note[GUID], true);
}
}
}
}
i++;
// add to col partially, so as to avoid OOM
if (add.size() >= thresExecAdd) {
totalAddCount += add.size();
addNotes(add);
add.clear();
Timber.d("add notes: %d", totalAddCount);
}
// add to col partially, so as to avoid OOM
if (update.size() >= thresExecUpdate) {
totalUpdateCount += update.size();
updateNotes(update);
update.clear();
Timber.d("update notes: %d", totalUpdateCount);
}
// add to col partially, so as to avoid OOM
if (dirty.size() >= thresExecDirty) {
totalDirtyCount += dirty.size();
long[] das = Utils.collection2Array(dirty);
mDst.updateFieldCache(das);
mDst.getTags().registerNotes(das);
dirty.clear();
Timber.d("dirty notes: %d", totalDirtyCount);
}
if (total != 0 && (!largeCollection || i % onePercent == 0)) {
// Calls to publishProgress are reasonably expensive due to res.getString()
publishProgress(i * 100 / total, 0, 0);
}
}
publishProgress(100, 0, 0);
// summarize partial add/update/dirty results for total values
totalAddCount += add.size();
totalUpdateCount += update.size();
totalDirtyCount += dirty.size();
if (dupes > 0) {
mLog.add(getRes().getString(R.string.import_update_details, totalUpdateCount, dupes));
if (dupesIgnored.size() > 0) {
mLog.add(getRes().getString(R.string.import_update_ignored));
}
}
// export info for calling code
mDupes = dupes;
mAdded = totalAddCount;
mUpdated = totalUpdateCount;
Timber.d("add notes total: %d", totalAddCount);
Timber.d("update notes total: %d", totalUpdateCount);
Timber.d("dirty notes total: %d", totalDirtyCount);
// add to col (for last chunk)
addNotes(add);
add.clear();
updateNotes(update);
update.clear();
mDst.getDb().getDatabase().setTransactionSuccessful();
} finally {
if (cur != null) {
cur.close();
}
if (mDst.getDb().getDatabase().inTransaction()) {
try {
mDst.getDb().getDatabase().endTransaction();
} catch (Exception e) {
Timber.w(e);
}
}
}
long[] das = Utils.collection2Array(dirty);
mDst.updateFieldCache(das);
mDst.getTags().registerNotes(das);
}
use of com.ichi2.libanki.Media in project AnkiChinaAndroid by ankichinateam.
the class AnkiPackageImporter method run.
@Override
public void run() throws ImportExportException {
publishProgress(0, 0, 0);
File tempDir = new File(new File(mCol.getPath()).getParent(), "tmpzip");
// self.col into Anki.
Collection tmpCol;
Timber.d("Attempting to import package %s", mFile);
String tmpApkgPath = "";
if (mFile.endsWith(".card")) {
tmpApkgPath = mFile.replace(".card", ".apkg");
AESUtil.decryptionFile(mFile, tmpApkgPath);
mFile = tmpApkgPath;
}
try {
// We extract the zip contents into a temporary directory and do a little more
// validation than the desktop client to ensure the extracted collection is an apkg.
String colname = "collection.anki21";
try {
// extract the deck from the zip file
try {
mZip = new ZipFile(new File(mFile));
} catch (FileNotFoundException fileNotFound) {
// The cache can be cleared between copying the file in and importing. This is temporary
if (fileNotFound.getMessage().contains("ENOENT")) {
mLog.add(getRes().getString(R.string.import_log_file_cache_cleared));
return;
}
// displays: failed to unzip
throw fileNotFound;
}
// v2 scheduler?
if (mZip.getEntry(colname) == null) {
colname = CollectionHelper.COLLECTION_FILENAME;
}
// Make sure we have sufficient free space
long uncompressedSize = Utils.calculateUncompressedSize(mZip);
long availableSpace = Utils.determineBytesAvailable(mCol.getPath());
Timber.d("Total uncompressed size will be: %d", uncompressedSize);
Timber.d("Total available size is: %d", availableSpace);
if (uncompressedSize > availableSpace) {
Timber.e("Not enough space to unzip, need %d, available %d", uncompressedSize, availableSpace);
mLog.add(getRes().getString(R.string.import_log_insufficient_space, uncompressedSize, availableSpace));
return;
}
// The filename that we extract should be collection.anki2
// Importing collection.anki21 fails due to some media regexes expecting collection.anki2.
// We follow how Anki does it and fix the problem here.
HashMap<String, String> mediaToFileNameMap = new HashMap<>();
mediaToFileNameMap.put(colname, CollectionHelper.COLLECTION_FILENAME);
Utils.unzipFiles(mZip, tempDir.getAbsolutePath(), new String[] { colname, "media" }, mediaToFileNameMap);
colname = CollectionHelper.COLLECTION_FILENAME;
} catch (IOException e) {
Timber.e(e, "Failed to unzip apkg.");
AnkiDroidApp.sendExceptionReport(e, "AnkiPackageImporter::run() - unzip");
mLog.add(getRes().getString(R.string.import_log_failed_unzip, e.getLocalizedMessage()));
return;
}
String colpath = new File(tempDir, colname).getAbsolutePath();
if (!(new File(colpath)).exists()) {
mLog.add(getRes().getString(R.string.import_log_failed_copy_to, colpath));
return;
}
tmpCol = Storage.Collection(mContext, colpath);
try {
if (!tmpCol.validCollection()) {
mLog.add(getRes().getString(R.string.import_log_failed_validate));
return;
}
} finally {
if (tmpCol != null) {
tmpCol.close();
}
}
mFile = colpath;
// we need the media dict in advance, and we'll need a map of fname ->
// number to use during the import
File mediaMapFile = new File(tempDir, "media");
mNameToNum = new HashMap<>();
String dirPath = tmpCol.getMedia().dir();
File dir = new File(dirPath);
// We need the opposite mapping in AnkiDroid since our extraction method requires it.
Map<String, String> numToName = new HashMap<>();
try (JsonReader jr = new JsonReader(new FileReader(mediaMapFile))) {
jr.beginObject();
// v in anki
String name;
// k in anki
String num;
while (jr.hasNext()) {
num = jr.nextName();
name = jr.nextString();
File file = new File(dir, name);
if (!Utils.isInside(file, dir)) {
throw (new RuntimeException("Invalid file"));
}
Utils.nfcNormalized(num);
mNameToNum.put(name, num);
numToName.put(num, name);
}
jr.endObject();
} catch (FileNotFoundException e) {
Timber.e("Apkg did not contain a media dict. No media will be imported.");
} catch (IOException e) {
Timber.e("Malformed media dict. Media import will be incomplete.");
}
// run anki2 importer
super.run();
// import static media
for (Map.Entry<String, String> entry : mNameToNum.entrySet()) {
String file = entry.getKey();
String c = entry.getValue();
if (!file.startsWith("_") && !file.startsWith("latex-")) {
continue;
}
File path = new File(mCol.getMedia().dir(), Utils.nfcNormalized(file));
if (!path.exists()) {
try {
Utils.unzipFiles(mZip, mCol.getMedia().dir(), new String[] { c }, numToName);
} catch (IOException e) {
Timber.e("Failed to extract static media file. Ignoring.");
}
}
}
} finally {
long availableSpace = Utils.determineBytesAvailable(mCol.getPath());
Timber.d("Total available size is: %d", availableSpace);
// Clean up our temporary files
if (tempDir.exists()) {
BackupManager.removeDir(tempDir);
}
}
publishProgress(100, 100, 100);
// if(!tmpApkgPath.isEmpty()){
// new File(tmpApkgPath).delete();
// }
}
use of com.ichi2.libanki.Media in project AnkiChinaAndroid by ankichinateam.
the class RemoteMediaServer method downloadFiles.
/**
* args: files
* <br>
* This method returns a ZipFile with the OPEN_DELETE flag, ensuring that the file on disk will
* be automatically deleted when the stream is closed.
*/
public ZipFile downloadFiles(List<String> top) throws UnknownHttpResponseException {
Response resp = null;
try {
resp = super.req("downloadFiles", HttpSyncer.getInputStream(Utils.jsonToString(new JSONObject().put("files", new JSONArray(top)))));
String zipPath = mCol.getPath().replaceFirst("collection\\.anki2$", "tmpSyncFromServer.zip");
// retrieve contents and save to file on disk:
super.writeToFile(resp.body().byteStream(), zipPath);
return new ZipFile(new File(zipPath), ZipFile.OPEN_READ | ZipFile.OPEN_DELETE);
} catch (IOException | NullPointerException e) {
Timber.e(e, "Failed to download requested media files");
throw new RuntimeException(e);
} finally {
if (resp != null && resp.body() != null) {
resp.body().close();
}
}
}
use of com.ichi2.libanki.Media in project AnkiChinaAndroid by ankichinateam.
the class ZipFile method exportInto.
/**
* Export source database into new destination database Note: The following python syntax isn't supported in
* Android: for row in mSrc.db.execute("select * from cards where id in "+ids2str(cids)): therefore we use a
* different method for copying tables
*
* @param path String path to destination database
* @throws JSONException
* @throws IOException
*/
public void exportInto(String path, Context context) throws JSONException, IOException, ImportExportException {
// create a new collection at the target
new File(path).delete();
Collection dst = Storage.Collection(context, path);
mSrc = mCol;
// find cards
Long[] cids = cardIds();
// attach dst to src so we can copy data between them. This isn't done in original libanki as Python more
// flexible
dst.close();
Timber.d("Attach DB");
mSrc.getDb().getDatabase().execSQL("ATTACH '" + path + "' AS DST_DB");
// copy cards, noting used nids (as unique set)
Timber.d("Copy cards");
mSrc.getDb().getDatabase().execSQL("INSERT INTO DST_DB.cards select * from cards where id in " + Utils.ids2str(cids));
Set<Long> nids = new HashSet<>(mSrc.getDb().queryLongList("select nid from cards where id in " + Utils.ids2str(cids)));
// notes
Timber.d("Copy notes");
ArrayList<Long> uniqueNids = new ArrayList<>(nids);
String strnids = Utils.ids2str(uniqueNids);
mSrc.getDb().getDatabase().execSQL("INSERT INTO DST_DB.notes select * from notes where id in " + strnids);
// remove system tags if not exporting scheduling info
if (!mIncludeSched) {
Timber.d("Stripping system tags from list");
ArrayList<String> srcTags = mSrc.getDb().queryStringList("select tags from notes where id in " + strnids);
ArrayList<Object[]> args = new ArrayList<>(srcTags.size());
Object[] arg = new Object[2];
for (int row = 0; row < srcTags.size(); row++) {
arg[0] = removeSystemTags(srcTags.get(row));
arg[1] = uniqueNids.get(row);
args.add(row, arg);
}
mSrc.getDb().executeMany("UPDATE DST_DB.notes set tags=? where id=?", args);
}
// models used by the notes
Timber.d("Finding models used by notes");
ArrayList<Long> mids = mSrc.getDb().queryLongList("select distinct mid from DST_DB.notes where id in " + strnids);
// card history and revlog
if (mIncludeSched) {
Timber.d("Copy history and revlog");
mSrc.getDb().getDatabase().execSQL("insert into DST_DB.revlog select * from revlog where cid in " + Utils.ids2str(cids));
// reopen collection to destination database (different from original python code)
mSrc.getDb().getDatabase().execSQL("DETACH DST_DB");
dst.reopen();
} else {
Timber.d("Detaching destination db and reopening");
// first reopen collection to destination database (different from original python code)
mSrc.getDb().getDatabase().execSQL("DETACH DST_DB");
dst.reopen();
// then need to reset card state
Timber.d("Resetting cards");
dst.getSched().resetCards(cids);
}
// models - start with zero
Timber.d("Copy models");
for (Model m : mSrc.getModels().all()) {
if (mids.contains(m.getLong("id"))) {
Timber.d("Copy models:%s", m.getLong("id"));
dst.getModels().update(m);
}
}
for (Model m : dst.getModels().all()) {
Timber.d("check dst model:%s", m.getLong("id"));
}
// decks
Timber.d("Copy decks");
ArrayList<Long> dids = new ArrayList<>();
if (mDid != null) {
dids.add(mDid);
for (Long x : mSrc.getDecks().children(mDid).values()) {
dids.add(x);
}
}
JSONObject dconfs = new JSONObject();
for (Deck d : mSrc.getDecks().all()) {
if ("1".equals(d.getString("id"))) {
continue;
}
if (mDid != null && !dids.contains(d.getLong("id"))) {
continue;
}
if (d.getInt("dyn") != 1 && d.getLong("conf") != 1L) {
if (mIncludeSched) {
dconfs.put(Long.toString(d.getLong("conf")), true);
}
}
Deck destinationDeck = d.deepClone();
if (!mIncludeSched) {
// scheduling not included, so reset deck settings to default
destinationDeck.put("conf", 1);
}
dst.getDecks().update(destinationDeck);
}
// copy used deck confs
Timber.d("Copy deck options");
for (DeckConfig dc : mSrc.getDecks().allConf()) {
if (dconfs.has(dc.getString("id"))) {
dst.getDecks().updateConf(dc);
}
}
// find used media
Timber.d("Find used media");
JSONObject media = new JSONObject();
mMediaDir = mSrc.getMedia().dir();
if (mIncludeMedia) {
ArrayList<Long> mid = mSrc.getDb().queryLongList("select mid from notes where id in " + strnids);
ArrayList<String> flds = mSrc.getDb().queryStringList("select flds from notes where id in " + strnids);
for (int idx = 0; idx < mid.size(); idx++) {
for (String file : mSrc.getMedia().filesInStr(mid.get(idx), flds.get(idx))) {
// skip files in subdirs
if (file.contains(File.separator)) {
continue;
}
media.put(file, true);
}
}
if (mMediaDir != null) {
for (File f : new File(mMediaDir).listFiles()) {
if (f.isDirectory()) {
continue;
}
String fname = f.getName();
if (fname.startsWith("_")) {
// Loop through every model that will be exported, and check if it contains a reference to f
for (JSONObject model : mSrc.getModels().all()) {
if (_modelHasMedia(model, fname)) {
media.put(fname, true);
break;
}
}
}
}
}
}
JSONArray keys = media.names();
if (keys != null) {
for (int i = 0; i < keys.length(); i++) {
mMediaFiles.add(keys.getString(i));
}
}
Timber.d("Cleanup");
dst.setCrt(mSrc.getCrt());
// todo: tags?
mCount = dst.cardCount();
dst.setMod();
postExport();
dst.close();
}
Aggregations