use of java.net.HttpURLConnection in project druid by druid-io.
the class JettyTest method testGzipCompression.
@Test
public void testGzipCompression() throws Exception {
final URL url = new URL("http://localhost:" + port + "/default");
final HttpURLConnection get = (HttpURLConnection) url.openConnection();
get.setRequestProperty("Accept-Encoding", "gzip");
Assert.assertEquals("gzip", get.getContentEncoding());
final HttpURLConnection post = (HttpURLConnection) url.openConnection();
post.setRequestProperty("Accept-Encoding", "gzip");
post.setRequestMethod("POST");
Assert.assertEquals("gzip", post.getContentEncoding());
final HttpURLConnection getNoGzip = (HttpURLConnection) url.openConnection();
Assert.assertNotEquals("gzip", getNoGzip.getContentEncoding());
final HttpURLConnection postNoGzip = (HttpURLConnection) url.openConnection();
postNoGzip.setRequestMethod("POST");
Assert.assertNotEquals("gzip", postNoGzip.getContentEncoding());
}
use of java.net.HttpURLConnection in project UltimateAndroid by cymcsg.
the class HttpUtils_Deprecated method getImageFromUrl.
/**
* Get image from newwork
*
* @param path The path of image
* @return byte[]
* @throws Exception
*/
public byte[] getImageFromUrl(String path) throws Exception {
URL url = new URL(path);
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setConnectTimeout(12 * 1000);
conn.setRequestMethod("GET");
InputStream inStream = conn.getInputStream();
if (conn.getResponseCode() == HttpURLConnection.HTTP_OK) {
return readStream(inStream);
}
return null;
}
use of java.net.HttpURLConnection in project UltimateAndroid by cymcsg.
the class HttpUtils_Deprecated method getImageStreamFromUrl.
/**
* Get image from newwork
*
* @param path The path of image
* @return InputStream
* @throws Exception
*/
public static InputStream getImageStreamFromUrl(String path) throws Exception {
URL url = new URL(path);
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setConnectTimeout(12 * 1000);
conn.setRequestMethod("GET");
if (conn.getResponseCode() == HttpURLConnection.HTTP_OK) {
return conn.getInputStream();
}
return null;
}
use of java.net.HttpURLConnection in project UltimateAndroid by cymcsg.
the class CacheUtils method getImageURI.
public static Uri getImageURI(String path, File cache) throws Exception {
String name = MD5Utils.getMD5(path) + path.substring(path.lastIndexOf("."));
File file = new File(cache, name);
if (file.exists()) {
//Uri.fromFile(path)这个方法能得到文件的URI
return Uri.fromFile(file);
} else {
// 从网络上获取图片
URL url = new URL(path);
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setConnectTimeout(5000);
conn.setRequestMethod("GET");
conn.setDoInput(true);
if (conn.getResponseCode() == 200) {
InputStream is = conn.getInputStream();
FileOutputStream fos = new FileOutputStream(file);
byte[] buffer = new byte[1024];
int len = 0;
while ((len = is.read(buffer)) != -1) {
fos.write(buffer, 0, len);
}
is.close();
fos.close();
return Uri.fromFile(file);
}
}
return null;
}
use of java.net.HttpURLConnection in project lucida by claritylab.
the class WebDocumentFetcher method apply.
/**
* Fetches the top <code>MAX_DOCS</code> documents containing the given
* search engine snippets. The original snippets are dropped.
*
* @param results array of <code>Result</code> objects containing snippets
* @return array of <code>Result</code> objects containing entire documents
*/
public Result[] apply(Result[] results) {
// documents containing the search engine snippets
docs = new ArrayList<Result>();
// start document fetchers
HashSet<String> urls = new HashSet<String>();
for (Result result : results) {
// only apply this filter to results for the semantic parsing
// approach
Query query = result.getQuery();
Predicate[] ps = query.getAnalyzedQuestion().getPredicates();
if (!query.extractWith(FactoidsFromPredicatesFilter.ID) || ps.length == 0 || result.getScore() > Float.NEGATIVE_INFINITY)
continue;
// if result is not a web document then just make a copy
if (!result.getDocID().contains(":")) {
Result newResult = result.getCopy();
newResult.setScore(0);
docs.add(newResult);
continue;
}
// fetch at most MAX_DOCS documents
if (urls.size() >= MAX_DOCS)
break;
String url = result.getDocID();
// no forbidden document type
if (url.matches("(?i).*?" + FORBIDDEN_DOCS))
continue;
// only HTTP connections
try {
URLConnection conn = (new URL(url)).openConnection();
if (!(conn instanceof HttpURLConnection))
continue;
} catch (IOException e) {
continue;
}
// no duplicate document
if (!urls.add(url))
continue;
// if caching is enabled, try to read document from cache
if (CACHING) {
FileCache cache = new FileCache(CACHE_DIR);
String[] entries = cache.read(url);
if (entries != null) {
StringBuilder sb = new StringBuilder();
for (String entry : entries) {
sb.append(entry);
sb.append("\n");
}
String docText = sb.toString();
Result doc = new Result(docText, result.getQuery(), url, result.getHitPos());
doc.setScore(0);
docs.add(doc);
continue;
}
}
(new WebDocumentFetcher()).start(this, result);
}
// wait until all fetchers are done
waitForDocs();
// keep old results
Result[] newResults = docs.toArray(new Result[docs.size()]);
Result[] allResults = new Result[results.length + newResults.length];
for (int i = 0; i < results.length; i++) allResults[i] = results[i];
for (int i = 0; i < newResults.length; i++) allResults[results.length + i] = newResults[i];
return allResults;
}
Aggregations