use of com.biglybt.core.logging.LogEvent in project BiglyBT by BiglySoftware.
the class VirtualNonBlockingServerChannelSelector method start.
/**
* Start the server and begin accepting incoming connections.
*/
@Override
public void start() {
try {
this_mon.enter();
if (!isRunning()) {
for (int i = start_port; i < start_port + num_ports; i++) {
try {
final ServerSocketChannel server_channel = ServerSocketChannel.open();
server_channels.add(server_channel);
server_channel.socket().setReuseAddress(true);
if (receive_buffer_size > 0)
server_channel.socket().setReceiveBufferSize(receive_buffer_size);
server_channel.socket().bind(new InetSocketAddress(bind_address, i), 1024);
if (Logger.isEnabled())
Logger.log(new LogEvent(LOGID, "TCP incoming server socket " + bind_address));
server_channel.configureBlocking(false);
VirtualAcceptSelector.getSingleton().register(server_channel, new VirtualAcceptSelector.AcceptListener() {
@Override
public void newConnectionAccepted(SocketChannel channel) {
last_accept_time = SystemTime.getCurrentTime();
listener.newConnectionAccepted(server_channel, channel);
}
});
} catch (Throwable t) {
Debug.out(t);
Logger.log(new LogAlert(LogAlert.UNREPEATABLE, "ERROR, unable to bind TCP incoming server socket to " + i, t));
}
}
// init to now
last_accept_time = SystemTime.getCurrentTime();
}
} finally {
this_mon.exit();
}
}
use of com.biglybt.core.logging.LogEvent in project BiglyBT by BiglySoftware.
the class TRTrackerServerProcessor method processTrackerRequest.
protected TRTrackerServerTorrentImpl processTrackerRequest(TRTrackerServerImpl _server, String request, // output
Map[] root_out, // output
TRTrackerServerPeerImpl[] peer_out, int _request_type, byte[][] hashes, String link, String scrape_flags, HashWrapper peer_id, boolean no_peer_id, byte compact_mode, String key, String event, boolean stop_to_queue, int port, int udp_port, int http_port, String real_ip_address, String original_client_ip_address, long downloaded, long uploaded, long left, int num_want, byte crypto_level, byte az_ver, int up_speed, DHTNetworkPosition network_position) throws TRTrackerServerException {
server = _server;
request_type = _request_type;
if (!server.isReady()) {
throw (new TRTrackerServerException("Tracker initialising, please wait"));
}
start = SystemTime.getHighPrecisionCounter();
boolean ip_override = real_ip_address != original_client_ip_address;
boolean loopback = TRTrackerUtils.isLoopback(real_ip_address);
if (loopback) {
// any override is purely for routing purposes for loopback connections and we don't
// want to apply the ip-override precedence rules against us
ip_override = false;
}
// translate any 127.0.0.1 local addresses back to the tracker address. Note this
// fixes up .i2p and onion addresses back to their real values when needed
String client_ip_address = TRTrackerUtils.adjustHostFromHosting(original_client_ip_address);
if (client_ip_address != original_client_ip_address) {
if (Logger.isEnabled()) {
Logger.log(new LogEvent(LogIDs.TRACKER, " address adjusted: original=" + original_client_ip_address + ", real=" + real_ip_address + ", adjusted=" + client_ip_address + ", loopback=" + loopback));
}
}
if (!TRTrackerServerImpl.getAllNetworksSupported()) {
String network = AENetworkClassifier.categoriseAddress(client_ip_address);
String[] permitted_networks = TRTrackerServerImpl.getPermittedNetworks();
boolean ok = false;
for (int i = 0; i < permitted_networks.length; i++) {
if (network == permitted_networks[i]) {
ok = true;
break;
}
}
if (!ok) {
throw (new TRTrackerServerException("Network '" + network + "' not supported"));
}
}
TRTrackerServerTorrentImpl torrent = null;
if (request_type != TRTrackerServerRequest.RT_FULL_SCRAPE) {
if (request_type == TRTrackerServerRequest.RT_ANNOUNCE) {
if (hashes == null || hashes.length == 0) {
throw (new TRTrackerServerException("Hash missing from request "));
}
if (hashes.length != 1) {
throw (new TRTrackerServerException("Too many hashes for announce"));
}
byte[] hash = hashes[0];
torrent = server.getTorrent(hash);
if (torrent == null) {
if (!COConfigurationManager.getBooleanParameter("Tracker Public Enable")) {
throw (new TRTrackerServerException("Torrent unauthorised"));
} else {
try {
torrent = (TRTrackerServerTorrentImpl) server.permit(real_ip_address, hash, false);
} catch (Throwable e) {
throw (new TRTrackerServerException("Torrent unauthorised", e));
}
}
}
if (peer_id == null) {
throw (new TRTrackerServerException("peer_id missing from request"));
}
boolean queue_it = stop_to_queue;
if (queue_it) {
Set biased = server.getBiasedPeers();
if (biased != null && biased.contains(real_ip_address)) {
// biased peers get to queue whatever
} else {
if (loopback || ip_override) {
queue_it = false;
}
}
}
long interval;
long min_interval;
if (queue_it) {
// when queued we use the scrape timeouts as it is scrape operations that
// will keep the entry alive from this point on
interval = server.getScrapeRetryInterval(torrent);
min_interval = server.getMinScrapeRetryInterval();
} else {
interval = server.getAnnounceRetryInterval(torrent);
min_interval = server.getMinAnnounceRetryInterval();
if (left == 0) {
long mult = server.getSeedAnnounceIntervalMultiplier();
interval *= mult;
min_interval *= mult;
}
}
TRTrackerServerPeerImpl peer = torrent.peerContact(request, event, peer_id, port, udp_port, http_port, crypto_level, az_ver, real_ip_address, client_ip_address, ip_override, loopback, key, uploaded, downloaded, left, interval, up_speed, network_position);
if (queue_it) {
torrent.peerQueued(client_ip_address, port, udp_port, http_port, crypto_level, az_ver, interval, left == 0);
}
HashMap pre_map = new HashMap();
TRTrackerServerPeer pre_process_peer = peer;
if (pre_process_peer == null) {
// can be null for stop events received without a previous start
pre_process_peer = new lightweightPeer(client_ip_address, port, peer_id);
}
server.preProcess(pre_process_peer, torrent, request_type, request, pre_map);
// set num_want to 0 for stopped events as no point in returning peers
boolean stopped = event != null && event.equalsIgnoreCase("stopped");
root_out[0] = torrent.exportAnnounceToMap(client_ip_address, pre_map, peer, left > 0, stopped ? 0 : num_want, interval, min_interval, no_peer_id, compact_mode, crypto_level, network_position);
peer_out[0] = peer;
} else if (request_type == TRTrackerServerRequest.RT_QUERY) {
if (link == null) {
if (hashes == null || hashes.length == 0) {
throw (new TRTrackerServerException("Hash missing from request "));
}
if (hashes.length != 1) {
throw (new TRTrackerServerException("Too many hashes for query"));
}
byte[] hash = hashes[0];
torrent = server.getTorrent(hash);
} else {
torrent = server.getTorrent(link);
}
if (torrent == null) {
throw (new TRTrackerServerException("Torrent unauthorised"));
}
long interval = server.getAnnounceRetryInterval(torrent);
root_out[0] = torrent.exportAnnounceToMap(client_ip_address, new HashMap(), null, true, num_want, interval, server.getMinAnnounceRetryInterval(), true, compact_mode, crypto_level, network_position);
} else {
if (hashes == null || hashes.length == 0) {
throw (new TRTrackerServerException("Hash missing from request "));
}
boolean local_scrape = client_ip_address.equals("127.0.0.1");
long max_interval = server.getMinScrapeRetryInterval();
Map root = new HashMap();
root_out[0] = root;
Map files = new ByteEncodedKeyHashMap();
root.put("files", files);
char[] scrape_chars = scrape_flags == null ? null : scrape_flags.toCharArray();
if (scrape_chars != null && scrape_chars.length != hashes.length) {
scrape_chars = null;
}
for (int i = 0; i < hashes.length; i++) {
byte[] hash = hashes[i];
String str_hash;
try {
str_hash = new String(hash, Constants.BYTE_ENCODING);
if (i > 0 && files.get(str_hash) != null) {
continue;
}
} catch (UnsupportedEncodingException e) {
continue;
}
torrent = server.getTorrent(hash);
if (torrent == null) {
if (!COConfigurationManager.getBooleanParameter("Tracker Public Enable")) {
continue;
} else {
try {
torrent = (TRTrackerServerTorrentImpl) server.permit(real_ip_address, hash, false);
} catch (Throwable e) {
continue;
}
}
}
long interval = server.getScrapeRetryInterval(torrent);
if (interval > max_interval) {
max_interval = interval;
}
if (scrape_chars != null && (QUEUE_TEST || !(loopback || ip_override))) {
if (scrape_chars[i] == 'Q') {
torrent.peerQueued(client_ip_address, port, udp_port, http_port, crypto_level, az_ver, (int) interval, true);
}
}
if (torrent.getRedirects() != null) {
if (hashes.length > 1) {
continue;
}
}
server.preProcess(new lightweightPeer(client_ip_address, port, peer_id), torrent, request_type, request, null);
// we don't cache local scrapes as if we do this causes the hosting of
// torrents to retrieve old values initially. Not a fatal error but not
// the best behaviour as the (local) seed isn't initially visible.
Map hash_entry = torrent.exportScrapeToMap(request, client_ip_address, !local_scrape);
// System.out.println( "tracker - encoding: " + ByteFormatter.nicePrint(torrent_hash) + " -> " + ByteFormatter.nicePrint( str_hash.getBytes( Constants.BYTE_ENCODING )));
files.put(str_hash, hash_entry);
}
if (hashes.length > 1) {
// no specific torrent
torrent = null;
}
// System.out.println( "scrape: hashes = " + hashes.length + ", files = " + files.size() + ", tim = " + max_interval );
addScrapeInterval(max_interval, root);
}
} else {
if (!TRTrackerServerImpl.isFullScrapeEnabled()) {
throw (new TRTrackerServerException("Full scrape disabled"));
}
Map files = new ByteEncodedKeyHashMap();
TRTrackerServerTorrentImpl[] torrents = server.getTorrents();
for (int i = 0; i < torrents.length; i++) {
TRTrackerServerTorrentImpl this_torrent = torrents[i];
if (this_torrent.getRedirects() != null) {
continue;
}
server.preProcess(new lightweightPeer(client_ip_address, port, peer_id), this_torrent, request_type, request, null);
byte[] torrent_hash = this_torrent.getHash().getHash();
try {
String str_hash = new String(torrent_hash, Constants.BYTE_ENCODING);
// System.out.println( "tracker - encoding: " + ByteFormatter.nicePrint(torrent_hash) + " -> " + ByteFormatter.nicePrint( str_hash.getBytes( Constants.BYTE_ENCODING )));
Map hash_entry = this_torrent.exportScrapeToMap(request, client_ip_address, true);
files.put(str_hash, hash_entry);
} catch (UnsupportedEncodingException e) {
throw (new TRTrackerServerException("Encoding error", e));
}
}
Map root = new HashMap();
root_out[0] = root;
addScrapeInterval(null, root);
root.put("files", files);
}
return (torrent);
}
use of com.biglybt.core.logging.LogEvent in project BiglyBT by BiglySoftware.
the class FileUtil method readResilientFile.
private static Map readResilientFile(String original_file_name, File parent_dir, String file_name, int fail_count, boolean recovery_mode, boolean skip_key_intern) {
// logging in here is only done during "non-recovery" mode to prevent subsequent recovery
// attempts logging everything a second time.
// recovery-mode allows the decoding process to "succeed" with a partially recovered file
boolean using_backup = file_name.endsWith(".saving");
File file = new File(parent_dir, file_name);
if ((!file.exists()) || file.length() <= 1L) {
if (using_backup) {
if (!recovery_mode) {
if (fail_count == 1) {
Debug.out("Load of '" + original_file_name + "' fails, no usable file or backup");
} else {
// drop this log, it doesn't really help to inform about the failure to
// find a .saving file
// if (Logger.isEnabled())
// Logger.log(new LogEvent(LOGID, LogEvent.LT_ERROR, "Load of '"
// + file_name + "' fails, file not found"));
}
}
return (null);
}
if (!recovery_mode) {
// kinda confusing log this as we get it under "normal" circumstances (loading a config
// file that doesn't exist legitimately, e.g. shares or bad-ips
// if (Logger.isEnabled())
// Logger.log(new LogEvent(LOGID, LogEvent.LT_ERROR, "Load of '"
// + file_name + "' failed, " + "file not found or 0-sized."));
}
return (readResilientFile(original_file_name, parent_dir, file_name + ".saving", 0, recovery_mode, true));
}
BufferedInputStream bin = null;
try {
int retry_limit = 5;
while (true) {
try {
bin = new BufferedInputStream(new FileInputStream(file), 16384);
break;
} catch (IOException e) {
if (--retry_limit == 0) {
throw (e);
}
if (Logger.isEnabled())
Logger.log(new LogEvent(LOGID, "Failed to open '" + file.toString() + "', retrying", e));
Thread.sleep(500);
}
}
BDecoder decoder = new BDecoder();
if (recovery_mode) {
decoder.setRecoveryMode(true);
}
Map res = decoder.decodeStream(bin, !skip_key_intern);
if (using_backup && !recovery_mode) {
Debug.out("Load of '" + original_file_name + "' had to revert to backup file");
}
return (res);
} catch (Throwable e) {
Debug.printStackTrace(e);
try {
if (bin != null) {
bin.close();
bin = null;
}
} catch (Exception x) {
Debug.printStackTrace(x);
}
if (!recovery_mode) {
// Occurs when file is there but b0rked
// copy it in case it actually contains useful data, so it won't be overwritten next save
File bad;
int bad_id = 0;
while (true) {
File test = new File(parent_dir, file.getName() + ".bad" + (bad_id == 0 ? "" : ("" + bad_id)));
if (!test.exists()) {
bad = test;
break;
}
bad_id++;
}
if (Logger.isEnabled())
Logger.log(new LogEvent(LOGID, LogEvent.LT_WARNING, "Read of '" + original_file_name + "' failed, decoding error. " + "Renaming to " + bad.getName()));
// copy it so its left in place for possible recovery
copyFile(file, bad);
}
if (using_backup) {
if (!recovery_mode) {
Debug.out("Load of '" + original_file_name + "' fails, no usable file or backup");
}
return (null);
}
return (readResilientFile(original_file_name, parent_dir, file_name + ".saving", 1, recovery_mode, true));
} finally {
try {
if (bin != null) {
bin.close();
}
} catch (Exception e) {
Debug.printStackTrace(e);
}
}
}
use of com.biglybt.core.logging.LogEvent in project BiglyBT by BiglySoftware.
the class TRBlockingServerProcessor method runSupport.
@Override
public void runSupport() {
// System.out.println( "Processor starts: " + socket.getRemoteSocketAddress());
boolean keep_alive = getServer().isKeepAliveEnabled();
try {
InputStream is = new BufferedInputStream(socket.getInputStream());
while (true) {
setTaskState("entry");
try {
if (keep_alive) {
socket.setSoTimeout(KEEP_ALIVE_SOCKET_TIMEOUT);
setTimeoutsDisabled(true);
} else {
socket.setSoTimeout(SOCKET_TIMEOUT);
}
} catch (Throwable e) {
// e.printStackTrace();
}
setTaskState("reading header");
try {
byte[] buffer = new byte[16 * 1024];
int header_pos = 0;
while (header_pos < buffer.length) {
int len = is.read(buffer, header_pos, 1);
if (len != 1) {
throw (new Exception("Premature end of stream reading header"));
}
header_pos++;
if (header_pos >= 4 && buffer[header_pos - 4] == CR && buffer[header_pos - 3] == FF && buffer[header_pos - 2] == CR && buffer[header_pos - 1] == FF) {
break;
}
}
String header = new String(buffer, 0, header_pos, Constants.BYTE_ENCODING);
if (Logger.isEnabled()) {
String log_str = header;
int pos = log_str.indexOf(NL);
if (pos != -1) {
log_str = log_str.substring(0, pos);
}
Logger.log(new LogEvent(LOGID, "Tracker Server: received header '" + log_str + "' from " + socket.getRemoteSocketAddress()));
}
// System.out.println( "got header:" + header );
InputStream post_is = null;
File post_file = null;
String lowercase_header;
boolean head = false;
int url_start;
if (header.startsWith("GET ")) {
timeout_ticks = 1;
lowercase_header = header.toLowerCase();
url_start = 4;
} else if (header.startsWith("HEAD ")) {
timeout_ticks = 1;
lowercase_header = header.toLowerCase();
url_start = 5;
head = true;
} else if (header.startsWith("POST ")) {
timeout_ticks = TRTrackerServerTCP.PROCESSING_POST_MULTIPLIER;
if (timeout_ticks == 0) {
setTimeoutsDisabled(true);
}
setTaskState("reading content");
lowercase_header = header.toLowerCase();
url_start = 5;
String cl_str = getHeaderField(header, lowercase_header, "content-length:");
boolean chunk_read = false;
if (cl_str == null) {
String transfer_encoding_str = getHeaderField(header, lowercase_header, "transfer-encoding: ");
chunk_read = transfer_encoding_str != null && transfer_encoding_str.equalsIgnoreCase("chunked");
cl_str = "0";
}
int content_length = Integer.parseInt(cl_str);
ByteArrayOutputStream baos = null;
FileOutputStream fos = null;
try {
OutputStream data_os;
if (content_length <= 256 * 1024) {
baos = new ByteArrayOutputStream();
data_os = baos;
} else {
post_file = AETemporaryFileHandler.createTempFile();
post_file.deleteOnExit();
fos = new FileOutputStream(post_file);
data_os = fos;
}
if (chunk_read) {
while (true) {
// Read Chunk Size and CRLF
int chunkSize = -1;
while (true) {
int val = is.read();
if (val == -1) {
throw (new TRTrackerServerException("premature end of input stream (chunksize)"));
}
if (val == '\n') {
break;
}
if (val != '\r') {
if (chunkSize == -1) {
chunkSize = 0;
} else {
chunkSize <<= 4;
}
chunkSize += Character.digit(val, 16);
}
}
if (chunkSize == -1) {
throw (new TRTrackerServerException("invalid chunk size"));
}
if (chunkSize == 0) {
// terminating chunk, clean up last CRLF
boolean bad = is.read() == -1 || is.read() == -1;
if (bad) {
throw (new TRTrackerServerException("premature end of input stream (NoTerminatingChunk)"));
}
break;
}
// Read Chunk data
while (chunkSize > 0) {
int len = is.read(buffer, 0, Math.min(chunkSize, buffer.length));
if (len < 0) {
throw (new TRTrackerServerException("premature end of input stream"));
}
data_os.write(buffer, 0, len);
chunkSize -= len;
}
// Cleanup Chunk Terminator CRLF
boolean bad = is.read() == -1 || is.read() == -1;
if (bad) {
throw (new TRTrackerServerException("premature end of input stream (NoChunkEndMarker)"));
}
}
}
while (content_length > 0) {
int len = is.read(buffer, 0, Math.min(content_length, buffer.length));
if (len < 0) {
throw (new TRTrackerServerException("premature end of input stream"));
}
data_os.write(buffer, 0, len);
content_length -= len;
}
if (baos != null) {
post_is = new ByteArrayInputStream(baos.toByteArray());
} else {
fos.close();
fos = null;
post_is = new BufferedInputStream(new FileInputStream(post_file), 256 * 1024);
}
// System.out.println( "TRTrackerServerProcessorTCP: request data = " + baos.size());
} finally {
// tidy up open streams
if (baos != null) {
try {
baos.close();
} catch (Throwable e) {
}
}
if (fos != null) {
try {
fos.close();
} catch (Throwable e) {
}
}
// we've errored so delete any temp file
if (post_is == null && post_file != null) {
post_file.delete();
}
}
} else {
int pos = header.indexOf(' ');
if (pos == -1) {
throw (new TRTrackerServerException("header doesn't have space in right place"));
}
timeout_ticks = 1;
lowercase_header = header.toLowerCase();
url_start = pos + 1;
}
setTaskState("processing request");
current_request = header;
try {
if (post_is == null) {
// set up a default input stream
post_is = new ByteArrayInputStream(new byte[0]);
}
int url_end = header.indexOf(" ", url_start);
if (url_end == -1) {
throw (new TRTrackerServerException("header doesn't have space in right place"));
}
String url = header.substring(url_start, url_end).trim();
int nl_pos = header.indexOf(NL, url_end);
if (nl_pos == -1) {
throw (new TRTrackerServerException("header doesn't have nl in right place"));
}
String http_ver = header.substring(url_end, nl_pos).trim();
String con_str = getHeaderField(header, lowercase_header, "connection:");
if (con_str == null) {
if (http_ver.equalsIgnoreCase("HTTP/1.0")) {
keep_alive = false;
}
} else if (con_str.equalsIgnoreCase("close")) {
keep_alive = false;
}
InetSocketAddress local_sa = (InetSocketAddress) socket.getLocalSocketAddress();
InetSocketAddress remote_sa = (InetSocketAddress) socket.getRemoteSocketAddress();
AEProxyAddressMapper.AppliedPortMapping applied_mapping = proxy_address_mapper.applyPortMapping(remote_sa.getAddress(), remote_sa.getPort());
remote_sa = applied_mapping.getAddress();
if (head) {
ByteArrayOutputStream head_response = new ByteArrayOutputStream(4096);
if (!processRequest(header, lowercase_header, url, local_sa, remote_sa, false, keep_alive, post_is, head_response, null)) {
keep_alive = false;
}
byte[] head_data = head_response.toByteArray();
int header_length = head_data.length;
for (int i = 3; i < head_data.length; i++) {
if (head_data[i - 3] == CR && head_data[i - 2] == FF && head_data[i - 1] == CR && head_data[i] == FF) {
header_length = i + 1;
break;
}
}
setTaskState("writing head response");
socket.getOutputStream().write(head_data, 0, header_length);
socket.getOutputStream().flush();
} else {
if (!processRequest(header, lowercase_header, url, local_sa, remote_sa, false, keep_alive, post_is, socket.getOutputStream(), null)) {
keep_alive = false;
}
}
} finally {
if (post_is != null) {
post_is.close();
}
if (post_file != null) {
post_file.delete();
}
}
} catch (Throwable e) {
keep_alive = false;
// e.printStackTrace();
}
if (!keep_alive) {
break;
}
}
} catch (Throwable e) {
} finally {
setTaskState("final socket close");
try {
socket.close();
} catch (Throwable e) {
// e.printStackTrace();
}
// System.out.println( "Processor ends: " + socket.getRemoteSocketAddress());
}
}
use of com.biglybt.core.logging.LogEvent in project BiglyBT by BiglySoftware.
the class TRTrackerServerUDP method recvLoop.
protected void recvLoop(DatagramSocket socket, InetSocketAddress address) {
long successful_accepts = 0;
long failed_accepts = 0;
while (!closed) {
try {
byte[] buf = new byte[PRUDPPacket.MAX_PACKET_SIZE];
DatagramPacket packet = new DatagramPacket(buf, buf.length, address);
socket.receive(packet);
successful_accepts++;
failed_accepts = 0;
String ip = packet.getAddress().getHostAddress();
if (!ip_filter.isInRange(ip, "Tracker", null)) {
thread_pool.run(new TRTrackerServerProcessorUDP(this, socket, packet));
}
} catch (Throwable e) {
if (!closed) {
failed_accepts++;
Logger.log(new LogEvent(LOGID, "TRTrackerServer: receive failed on port " + port, e));
if ((failed_accepts > 100 && successful_accepts == 0) || failed_accepts > 1000) {
// looks like its not going to work...
// some kind of socket problem
Logger.logTextResource(new LogAlert(LogAlert.UNREPEATABLE, LogAlert.AT_ERROR, "Network.alert.acceptfail"), new String[] { "" + port, "UDP" });
break;
}
}
}
}
}
Aggregations