use of com.biglybt.core.logging.LogEvent in project BiglyBT by BiglySoftware.
the class CacheFileWithCache method multiBlockFlush.
protected void multiBlockFlush(List multi_block_entries, long multi_block_start, long multi_block_next, boolean release_entries) throws CacheFileManagerException {
boolean write_ok = false;
try {
if (TRACE)
Logger.log(new LogEvent(torrent, LOGID, "multiBlockFlush: writing " + multi_block_entries.size() + " entries, [" + multi_block_start + "," + multi_block_next + "," + release_entries + "]"));
DirectByteBuffer[] buffers = new DirectByteBuffer[multi_block_entries.size()];
long expected_per_entry_write = 0;
for (int i = 0; i < buffers.length; i++) {
CacheEntry entry = (CacheEntry) multi_block_entries.get(i);
// sanitity check - we should always be flushing entire entries
DirectByteBuffer buffer = entry.getBuffer();
if (buffer.limit(SS_CACHE) - buffer.position(SS_CACHE) != entry.getLength()) {
throw (new CacheFileManagerException(this, "flush: inconsistent entry length, position wrong"));
}
expected_per_entry_write += entry.getLength();
buffers[i] = buffer;
}
long expected_overall_write = multi_block_next - multi_block_start;
if (expected_per_entry_write != expected_overall_write) {
throw (new CacheFileManagerException(this, "flush: inconsistent write length, entrys = " + expected_per_entry_write + " overall = " + expected_overall_write));
}
getFMFile().write(buffers, multi_block_start);
manager.fileBytesWritten(expected_overall_write);
// bytes_written += expected_overall_write;
write_ok = true;
} catch (FMFileManagerException e) {
throw (new CacheFileManagerException(this, "flush fails", e));
} finally {
for (int i = 0; i < multi_block_entries.size(); i++) {
CacheEntry entry = (CacheEntry) multi_block_entries.get(i);
if (release_entries) {
manager.releaseCacheSpace(entry);
} else {
entry.resetBufferPosition();
if (write_ok) {
entry.setClean();
}
}
}
}
}
use of com.biglybt.core.logging.LogEvent in project BiglyBT by BiglySoftware.
the class DMCheckerImpl method stop.
@Override
public void stop() {
int check_wait;
int read_wait;
try {
this_mon.enter();
if (stopped || !started) {
return;
}
// when we exit here we guarantee that all file usage operations have completed
// i.e. writes and checks (checks being doubly async)
stopped = true;
read_wait = async_reads;
check_wait = async_checks;
} finally {
this_mon.exit();
}
long log_time = SystemTime.getCurrentTime();
for (int i = 0; i < read_wait; i++) {
long now = SystemTime.getCurrentTime();
if (now < log_time) {
log_time = now;
} else {
if (now - log_time > 1000) {
log_time = now;
if (Logger.isEnabled()) {
Logger.log(new LogEvent(disk_manager, LOGID, "Waiting for check-reads to complete - " + (read_wait - i) + " remaining"));
}
}
}
async_read_sem.reserve();
}
log_time = SystemTime.getCurrentTime();
for (int i = 0; i < check_wait; i++) {
long now = SystemTime.getCurrentTime();
if (now < log_time) {
log_time = now;
} else {
if (now - log_time > 1000) {
log_time = now;
if (Logger.isEnabled()) {
Logger.log(new LogEvent(disk_manager, LOGID, "Waiting for checks to complete - " + (read_wait - i) + " remaining"));
}
}
}
async_check_sem.reserve();
}
}
use of com.biglybt.core.logging.LogEvent in project BiglyBT by BiglySoftware.
the class DMWriterImpl method stop.
@Override
public void stop() {
int write_wait;
try {
this_mon.enter();
if (stopped || !started) {
return;
}
stopped = true;
write_wait = async_writes;
} finally {
this_mon.exit();
}
// wait for writes
long log_time = SystemTime.getCurrentTime();
for (int i = 0; i < write_wait; i++) {
long now = SystemTime.getCurrentTime();
if (now < log_time) {
log_time = now;
} else {
if (now - log_time > 1000) {
log_time = now;
if (Logger.isEnabled()) {
Logger.log(new LogEvent(disk_manager, LOGID, "Waiting for writes to complete - " + (write_wait - i) + " remaining"));
}
}
}
async_write_sem.reserve();
}
}
use of com.biglybt.core.logging.LogEvent in project BiglyBT by BiglySoftware.
the class DiskManagerRequestImpl method requestStarts.
@Override
public void requestStarts() {
if (DEBUG) {
try {
int id;
synchronized (DiskManagerRequestImpl.class) {
id = next_id++;
}
name = getName() + " [" + id + "]";
start_time = SystemTime.getCurrentTime();
Logger.log(new LogEvent(LOGID, "DMRequest start: " + name));
} catch (Throwable e) {
}
}
}
use of com.biglybt.core.logging.LogEvent in project BiglyBT by BiglySoftware.
the class IpFilterAutoLoaderImpl method loadOtherFilters.
protected void loadOtherFilters(boolean allowAsyncDownloading, boolean loadOldWhileAsyncDownloading) {
int p2bVersion = -1;
try {
class_mon.enter();
List new_ipRanges = new ArrayList(1024);
InputStream fin = null;
BufferedInputStream bin = null;
boolean isURL = false;
try {
// open the file
String file = COConfigurationManager.getStringParameter(CFG_AUTOLOAD_FILE);
Logger.log(new LogEvent(LOGID, "IP Filter file: " + file));
File filtersFile = new File(file);
if (filtersFile.exists()) {
isURL = false;
} else {
if (!UrlUtils.isURL(file)) {
return;
}
isURL = true;
filtersFile = FileUtil.getUserFile("ipfilter.dl");
if (filtersFile.exists()) {
if (allowAsyncDownloading) {
Logger.log(new LogEvent(LOGID, "Downloading " + file + " async"));
downloadFiltersAsync(new URL(file));
if (!loadOldWhileAsyncDownloading) {
return;
}
}
} else {
// no old dl, download sync now
Logger.log(new LogEvent(LOGID, "sync Downloading " + file));
try {
ResourceDownloader rd = ResourceDownloaderFactoryImpl.getSingleton().create(new URL(file));
fin = rd.download();
FileUtil.copyFile(fin, filtersFile);
setNextAutoDownload(true);
} catch (ResourceDownloaderException e) {
return;
}
}
}
fin = new FileInputStream(filtersFile);
bin = new BufferedInputStream(fin, 16384);
// extract (g)zip'd file and open that
byte[] headerBytes = new byte[2];
bin.mark(3);
bin.read(headerBytes, 0, 2);
bin.reset();
if (headerBytes[1] == (byte) 0x8b && headerBytes[0] == 0x1f) {
GZIPInputStream gzip = new GZIPInputStream(bin);
filtersFile = FileUtil.getUserFile("ipfilter.ext");
FileUtil.copyFile(gzip, filtersFile);
fin = new FileInputStream(filtersFile);
bin = new BufferedInputStream(fin, 16384);
} else if (headerBytes[0] == 0x50 && headerBytes[1] == 0x4b) {
ZipInputStream zip = new ZipInputStream(bin);
ZipEntry zipEntry = zip.getNextEntry();
// Skip small files
while (zipEntry != null && zipEntry.getSize() < 1024 * 1024) {
zipEntry = zip.getNextEntry();
}
if (zipEntry == null) {
return;
}
filtersFile = FileUtil.getUserFile("ipfilter.ext");
FileUtil.copyFile(zip, filtersFile);
fin = new FileInputStream(filtersFile);
bin = new BufferedInputStream(fin, 16384);
}
bin.mark(8);
p2bVersion = getP2BFileVersion(bin);
if (p2bVersion < 1 || p2bVersion > 3) {
bin.reset();
loadDATFilters(bin);
return;
}
byte[] descBytes = new byte[255];
byte[] ipBytes = new byte[4];
String encoding = p2bVersion == 1 ? "ISO-8859-1" : "UTF-8";
if (p2bVersion == 1 || p2bVersion == 2) {
while (true) {
String description = readString(bin, descBytes, encoding);
int read = bin.read(ipBytes);
if (read < 4) {
break;
}
int startIp = ByteFormatter.byteArrayToInt(ipBytes);
read = bin.read(ipBytes);
if (read < 4) {
break;
}
int endIp = ByteFormatter.byteArrayToInt(ipBytes);
IpRangeImpl ipRange = new IpRangeImpl(description, startIp, endIp, true);
ipRange.setAddedToRangeList(true);
new_ipRanges.add(ipRange);
}
} else {
// version 3
int read = bin.read(ipBytes);
if (read < 4) {
return;
}
int numDescs = ByteFormatter.byteArrayToInt(ipBytes);
String[] descs = new String[numDescs];
for (int i = 0; i < numDescs; i++) {
descs[i] = readString(bin, descBytes, encoding);
}
read = bin.read(ipBytes);
if (read < 4) {
return;
}
int numRanges = ByteFormatter.byteArrayToInt(ipBytes);
for (int i = 0; i < numRanges; i++) {
read = bin.read(ipBytes);
if (read < 4) {
return;
}
int descIdx = ByteFormatter.byteArrayToInt(ipBytes);
read = bin.read(ipBytes);
if (read < 4) {
return;
}
int startIp = ByteFormatter.byteArrayToInt(ipBytes);
read = bin.read(ipBytes);
if (read < 4) {
return;
}
int endIp = ByteFormatter.byteArrayToInt(ipBytes);
String description = descIdx < descs.length && descIdx >= 0 ? descs[descIdx] : "";
IpRangeImpl ipRange = new IpRangeImpl(description, startIp, endIp, true);
ipRange.setAddedToRangeList(true);
new_ipRanges.add(ipRange);
}
}
} catch (IOException e) {
Debug.out(e);
} finally {
if (bin != null) {
try {
bin.close();
} catch (Throwable e) {
}
}
if (fin != null) {
try {
fin.close();
} catch (Throwable e) {
}
}
Iterator it = new_ipRanges.iterator();
while (it.hasNext()) {
((IpRange) it.next()).checkValid();
}
ipFilter.markAsUpToDate();
if (!isURL) {
setFileReloadTimer();
}
}
} finally {
class_mon.exit();
}
}
Aggregations