use of org.apache.commons.compress.archivers.ArchiveInputStream in project stanbol by apache.
the class StandaloneManagedSolrServer method updateIndex.
@Override
public IndexMetadata updateIndex(String name, String parsedResourceName, Properties properties) throws IOException {
if (name == null || name.isEmpty()) {
throw new IllegalArgumentException("The parsed index name MUST NOT be NULL nor empty!");
}
String resourceName;
if (!ConfigUtils.isValidSolrIndexFileName(parsedResourceName)) {
log.debug("add SolrIndexFileExtension to parsed indexArchive {}", parsedResourceName);
resourceName = ConfigUtils.appandSolrIndexFileExtension(parsedResourceName, null);
} else {
resourceName = parsedResourceName;
}
Map<String, String> comments = new HashMap<String, String>();
if (properties != null) {
for (Entry<Object, Object> prop : properties.entrySet()) {
comments.put(prop.getKey().toString(), prop.getValue().toString());
}
}
InputStream is = null;
for (Iterator<DataFileProvider> it = dataFileProviders.iterator(); is == null && it.hasNext(); ) {
DataFileProvider dfp = it.next();
try {
is = dfp.getInputStream(null, resourceName, comments);
} catch (IOException e) {
// not found
}
}
if (is != null || new File(managedSolrDir, parsedResourceName).isDirectory()) {
ArchiveInputStream ais;
try {
ais = ManagementUtils.getArchiveInputStream(resourceName, is);
} catch (ArchiveException e) {
throw new IOException("Unable to open ArchiveInputStream for resource '" + resourceName + "'!", e);
}
IndexMetadata metadata = new IndexMetadata();
if (properties != null) {
metadata.putAll(properties);
}
metadata.setIndexName(name);
metadata.setServerName(DEFAULT_SERVER_NAME);
metadata.setSynchronized(false);
metadata.setState(ManagedIndexState.ACTIVE);
metadata.setArchive(resourceName);
return updateCore(metadata, ais);
} else {
return null;
}
}
use of org.apache.commons.compress.archivers.ArchiveInputStream in project quickutil by quickutil.
the class CompressUtil method decompressTarGz.
/**
* 解压tar.gz文件:
*
* @return 解压后的根目录路径
*/
public static String decompressTarGz(String sourcePath, String targetPath) {
String rootPath = null;
try (FileInputStream fInput = new FileInputStream(sourcePath);
BufferedInputStream bufInput = new BufferedInputStream(fInput);
GZIPInputStream gzipInput = new GZIPInputStream(bufInput);
ArchiveInputStream archiveInput = new ArchiveStreamFactory().createArchiveInputStream("tar", gzipInput)) {
// tar压缩文件条目
TarArchiveEntry entry;
boolean isRootPath = true;
while ((entry = (TarArchiveEntry) archiveInput.getNextEntry()) != null) {
String entryName = entry.getName();
// 转换为目标路径
if (targetPath != null) {
entryName = targetPath + File.separator + entryName;
}
if (isRootPath) {
rootPath = entryName;
isRootPath = false;
}
if (entry.isDirectory()) {
FileUtil.mkdirByFile(entryName);
} else if (entry.isFile()) {
FileUtil.stream2file(archiveInput, entryName, false);
}
}
} catch (Exception e) {
LOGGER.error(Symbol.BLANK, e);
}
return rootPath;
}
use of org.apache.commons.compress.archivers.ArchiveInputStream in project structr by structr.
the class UnarchiveCommand method unarchive.
private void unarchive(final SecurityContext securityContext, final File file, final String parentFolderId) throws ArchiveException, IOException, FrameworkException {
final App app = StructrApp.getInstance(securityContext);
final InputStream is;
Folder existingParentFolder = null;
final String fileName = file.getName();
try (final Tx tx = app.tx()) {
// search for existing parent folder
existingParentFolder = app.get(Folder.class, parentFolderId);
String parentFolderName = null;
String msgString = "Unarchiving file {}";
if (existingParentFolder != null) {
parentFolderName = existingParentFolder.getName();
msgString += " into existing folder {}.";
}
logger.info(msgString, new Object[] { fileName, parentFolderName });
is = file.getInputStream();
tx.success();
if (is == null) {
getWebSocket().send(MessageBuilder.status().code(400).message("Could not get input stream from file ".concat(fileName)).build(), true);
return;
}
tx.success();
}
final BufferedInputStream bufferedIs = new BufferedInputStream(is);
switch(ArchiveStreamFactory.detect(bufferedIs)) {
// 7z doesn't support streaming
case ArchiveStreamFactory.SEVEN_Z:
{
int overallCount = 0;
logger.info("7-Zip archive format detected");
try (final Tx outertx = app.tx()) {
SevenZFile sevenZFile = new SevenZFile(file.getFileOnDisk());
SevenZArchiveEntry sevenZEntry = sevenZFile.getNextEntry();
while (sevenZEntry != null) {
try (final Tx tx = app.tx(true, true, false)) {
int count = 0;
while (sevenZEntry != null && count++ < 50) {
final String entryPath = "/" + PathHelper.clean(sevenZEntry.getName());
logger.info("Entry path: {}", entryPath);
if (sevenZEntry.isDirectory()) {
handleDirectory(securityContext, existingParentFolder, entryPath);
} else {
byte[] buf = new byte[(int) sevenZEntry.getSize()];
sevenZFile.read(buf, 0, buf.length);
try (final ByteArrayInputStream in = new ByteArrayInputStream(buf)) {
handleFile(securityContext, in, existingParentFolder, entryPath);
}
}
sevenZEntry = sevenZFile.getNextEntry();
overallCount++;
}
logger.info("Committing transaction after {} entries.", overallCount);
tx.success();
}
}
logger.info("Unarchived {} files.", overallCount);
outertx.success();
}
break;
}
// ZIP needs special treatment to support "unsupported feature data descriptor"
case ArchiveStreamFactory.ZIP:
{
logger.info("Zip archive format detected");
try (final ZipArchiveInputStream in = new ZipArchiveInputStream(bufferedIs, null, false, true)) {
handleArchiveInputStream(in, app, securityContext, existingParentFolder);
}
break;
}
default:
{
logger.info("Default archive format detected");
try (final ArchiveInputStream in = new ArchiveStreamFactory().createArchiveInputStream(bufferedIs)) {
handleArchiveInputStream(in, app, securityContext, existingParentFolder);
}
}
}
getWebSocket().send(MessageBuilder.finished().callback(callback).data("success", true).data("filename", fileName).build(), true);
}
use of org.apache.commons.compress.archivers.ArchiveInputStream in project BWAPI4J by OpenBW.
the class DummyDataUtils method readMultiLineIntegerArraysFromArchiveFile.
public static List<List<Integer>> readMultiLineIntegerArraysFromArchiveFile(final String archiveFilename, final String mapHash, final String regex) throws IOException {
final InputStream inputStream = createInputStreamForDummyDataSet(archiveFilename);
final String mapShortHash = determineMapShortHash(mapHash);
try (final ArchiveInputStream tarIn = new TarArchiveInputStream(new BZip2CompressorInputStream(inputStream));
final BufferedReader buffer = new BufferedReader(new InputStreamReader(tarIn))) {
final ArchiveEntry nextEntry = getArchiveEntry(tarIn, mapShortHash);
Assert.assertNotNull(nextEntry);
final List<List<Integer>> data = new ArrayList<>();
String line;
while ((line = buffer.readLine()) != null) {
if (line.isEmpty()) {
continue;
}
final String[] tokens = line.split(regex);
final List<Integer> intTokens = new ArrayList<>();
for (final String token : tokens) {
final String tokenTrimmed = token.trim();
if (tokenTrimmed.isEmpty()) {
continue;
}
int intToken = Integer.parseInt(tokenTrimmed);
intTokens.add(intToken);
}
data.add(intTokens);
}
int valuesReadCount = 0;
for (final List<Integer> list : data) {
valuesReadCount += list.size();
}
logger.debug("Read " + valuesReadCount + " values from " + archiveFilename);
return data;
}
}
use of org.apache.commons.compress.archivers.ArchiveInputStream in project BWAPI4J by OpenBW.
the class DummyDataUtils method readMultiLinesAsStringTokensFromArchiveFile.
public static List<List<String>> readMultiLinesAsStringTokensFromArchiveFile(final String archiveFilename, final String mapHash, final String regex) throws IOException {
final InputStream inputStream = createInputStreamForDummyDataSet(archiveFilename);
final String mapShortHash = determineMapShortHash(mapHash);
try (final ArchiveInputStream tarIn = new TarArchiveInputStream(new BZip2CompressorInputStream(inputStream));
final BufferedReader buffer = new BufferedReader(new InputStreamReader(tarIn))) {
final ArchiveEntry nextEntry = getArchiveEntry(tarIn, mapShortHash);
Assert.assertNotNull(nextEntry);
final List<List<String>> data = new ArrayList<>();
String line;
while ((line = buffer.readLine()) != null) {
if (line.isEmpty()) {
continue;
}
final String[] tokens = line.split(regex);
final List<String> strTokens = new ArrayList<>();
for (final String token : tokens) {
final String tokenTrimmed = token.trim();
if (tokenTrimmed.isEmpty()) {
continue;
}
strTokens.add(tokenTrimmed);
}
data.add(strTokens);
}
int valuesReadCount = 0;
for (final List<String> list : data) {
valuesReadCount += list.size();
}
logger.debug("Read " + valuesReadCount + " values from " + archiveFilename);
return data;
}
}
Aggregations