use of org.apache.commons.compress.compressors.CompressorStreamFactory in project logging-log4j2 by apache.
the class RollingAppenderSizeTest method testAppender.
@Test
public void testAppender() throws Exception {
final Path path = Paths.get(DIR, "rollingtest.log");
if (Files.exists(path) && createOnDemand) {
Assert.fail(String.format("Unexpected file: %s (%s bytes)", path, Files.getAttribute(path, "size")));
}
for (int i = 0; i < 500; ++i) {
logger.debug("This is test message number " + i);
}
try {
Thread.sleep(100);
} catch (final InterruptedException ie) {
// Ignore the error.
}
final File dir = new File(DIR);
assertTrue("Directory not created", dir.exists() && dir.listFiles().length > 0);
final File[] files = dir.listFiles();
assertNotNull(files);
assertThat(files, hasItemInArray(that(hasName(that(endsWith(fileExtension))))));
final FileExtension ext = FileExtension.lookup(fileExtension);
if (ext == null || FileExtension.ZIP == ext || FileExtension.PACK200 == ext) {
// Apache Commons Compress cannot deflate zip? TODO test decompressing these formats
return;
}
// Stop the context to make sure all files are compressed and closed. Trying to remedy failures in CI builds.
if (!loggerContextRule.getLoggerContext().stop(30, TimeUnit.SECONDS)) {
System.err.println("Could not stop cleanly " + loggerContextRule + " for " + this);
}
for (final File file : files) {
if (file.getName().endsWith(fileExtension)) {
CompressorInputStream in = null;
try (FileInputStream fis = new FileInputStream(file)) {
try {
in = new CompressorStreamFactory().createCompressorInputStream(ext.name().toLowerCase(), fis);
} catch (final CompressorException ce) {
ce.printStackTrace();
fail("Error creating intput stream from " + file.toString() + ": " + ce.getMessage());
}
final ByteArrayOutputStream baos = new ByteArrayOutputStream();
assertNotNull("No input stream for " + file.getName(), in);
try {
IOUtils.copy(in, baos);
} catch (final Exception ex) {
ex.printStackTrace();
fail("Unable to decompress " + file.getAbsolutePath());
}
final String text = new String(baos.toByteArray(), Charset.defaultCharset());
final String[] lines = text.split("[\\r\\n]+");
for (final String line : lines) {
assertTrue(line.contains("DEBUG o.a.l.l.c.a.r.RollingAppenderSizeTest [main] This is test message number"));
}
} finally {
Closer.close(in);
}
}
}
}
use of org.apache.commons.compress.compressors.CompressorStreamFactory in project lucene-solr by apache.
the class StreamUtilsTest method rawBzip2File.
private Path rawBzip2File(String ext) throws Exception {
Path f = testDir.resolve("testfile." + ext);
OutputStream os = new CompressorStreamFactory().createCompressorOutputStream(CompressorStreamFactory.BZIP2, Files.newOutputStream(f));
writeText(os);
return f;
}
use of org.apache.commons.compress.compressors.CompressorStreamFactory in project tika by apache.
the class CompressorParserTest method testCoverage.
@Test
public void testCoverage() throws Exception {
//test that the package parser covers all inputstreams handled
//by CompressorStreamFactory. When we update commons-compress, and they add
//a new stream type, we want to make sure that we're handling it.
CompressorStreamFactory archiveStreamFactory = new CompressorStreamFactory(true, 1000);
CompressorParser compressorParser = new CompressorParser();
ParseContext parseContext = new ParseContext();
for (String name : archiveStreamFactory.getInputStreamCompressorNames()) {
MediaType mt = CompressorParser.getMediaType(name);
if (NOT_COVERED.contains(mt)) {
continue;
}
//name of the missing stream
if (mt.equals(MediaType.OCTET_STREAM)) {
fail("getting octet-stream for: " + name);
}
if (!compressorParser.getSupportedTypes(parseContext).contains(mt)) {
fail("CompressorParser should support: " + mt.toString());
}
}
}
use of org.apache.commons.compress.compressors.CompressorStreamFactory in project gitblit by gitblit.
the class PtServlet method doGet.
@Override
protected void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException {
try {
response.setContentType("application/octet-stream");
response.setDateHeader("Last-Modified", lastModified);
response.setHeader("Cache-Control", "none");
response.setHeader("Pragma", "no-cache");
response.setDateHeader("Expires", 0);
boolean windows = false;
try {
String useragent = request.getHeader("user-agent").toString();
windows = useragent.toLowerCase().contains("windows");
} catch (Exception e) {
}
byte[] pyBytes;
File file = runtimeManager.getFileOrFolder("tickets.pt", "${baseFolder}/pt.py");
if (file.exists()) {
// custom script
pyBytes = readAll(new FileInputStream(file));
} else {
// default script
pyBytes = readAll(getClass().getResourceAsStream("/pt.py"));
}
if (windows) {
// windows: download zip file with pt.py and pt.cmd
response.setHeader("Content-Disposition", "attachment; filename=\"pt.zip\"");
OutputStream os = response.getOutputStream();
ZipArchiveOutputStream zos = new ZipArchiveOutputStream(os);
// add the Python script
ZipArchiveEntry pyEntry = new ZipArchiveEntry("pt.py");
pyEntry.setSize(pyBytes.length);
pyEntry.setUnixMode(FileMode.EXECUTABLE_FILE.getBits());
pyEntry.setTime(lastModified);
zos.putArchiveEntry(pyEntry);
zos.write(pyBytes);
zos.closeArchiveEntry();
// add a Python launch cmd file
byte[] cmdBytes = readAll(getClass().getResourceAsStream("/pt.cmd"));
ZipArchiveEntry cmdEntry = new ZipArchiveEntry("pt.cmd");
cmdEntry.setSize(cmdBytes.length);
cmdEntry.setUnixMode(FileMode.REGULAR_FILE.getBits());
cmdEntry.setTime(lastModified);
zos.putArchiveEntry(cmdEntry);
zos.write(cmdBytes);
zos.closeArchiveEntry();
// add a brief readme
byte[] txtBytes = readAll(getClass().getResourceAsStream("/pt.txt"));
ZipArchiveEntry txtEntry = new ZipArchiveEntry("readme.txt");
txtEntry.setSize(txtBytes.length);
txtEntry.setUnixMode(FileMode.REGULAR_FILE.getBits());
txtEntry.setTime(lastModified);
zos.putArchiveEntry(txtEntry);
zos.write(txtBytes);
zos.closeArchiveEntry();
// cleanup
zos.finish();
zos.close();
os.flush();
} else {
// unix: download a tar.gz file with pt.py set with execute permissions
response.setHeader("Content-Disposition", "attachment; filename=\"pt.tar.gz\"");
OutputStream os = response.getOutputStream();
CompressorOutputStream cos = new CompressorStreamFactory().createCompressorOutputStream(CompressorStreamFactory.GZIP, os);
TarArchiveOutputStream tos = new TarArchiveOutputStream(cos);
tos.setAddPaxHeadersForNonAsciiNames(true);
tos.setLongFileMode(TarArchiveOutputStream.LONGFILE_POSIX);
// add the Python script
TarArchiveEntry pyEntry = new TarArchiveEntry("pt");
pyEntry.setMode(FileMode.EXECUTABLE_FILE.getBits());
pyEntry.setModTime(lastModified);
pyEntry.setSize(pyBytes.length);
tos.putArchiveEntry(pyEntry);
tos.write(pyBytes);
tos.closeArchiveEntry();
// add a brief readme
byte[] txtBytes = readAll(getClass().getResourceAsStream("/pt.txt"));
TarArchiveEntry txtEntry = new TarArchiveEntry("README");
txtEntry.setMode(FileMode.REGULAR_FILE.getBits());
txtEntry.setModTime(lastModified);
txtEntry.setSize(txtBytes.length);
tos.putArchiveEntry(txtEntry);
tos.write(txtBytes);
tos.closeArchiveEntry();
// cleanup
tos.finish();
tos.close();
cos.close();
os.flush();
}
} catch (Exception e) {
e.printStackTrace();
}
}
use of org.apache.commons.compress.compressors.CompressorStreamFactory in project languagetool by languagetool-org.
the class WikipediaSentenceExtractor method extract.
private void extract(Language language, String xmlDumpPath) throws IOException, CompressorException {
try (FileInputStream fis = new FileInputStream(xmlDumpPath);
BufferedInputStream bis = new BufferedInputStream(fis);
CompressorInputStream input = new CompressorStreamFactory().createCompressorInputStream(bis)) {
int sentenceCount = 0;
WikipediaSentenceSource source = new WikipediaSentenceSource(input, language);
while (source.hasNext()) {
String sentence = source.next().getText();
if (skipSentence(sentence)) {
continue;
}
System.out.println(sentence);
sentenceCount++;
if (sentenceCount % 1000 == 0) {
System.err.println("Exporting sentence #" + sentenceCount + "...");
}
}
}
}
Aggregations