use of org.apache.commons.compress.archivers.ArchiveInputStream in project ozone by apache.
the class TarContainerPacker method unpackContainerData.
/**
* Given an input stream (tar file) extract the data to the specified
* directories.
*
* @param container container which defines the destination structure.
* @param input the input stream.
*/
@Override
public byte[] unpackContainerData(Container<KeyValueContainerData> container, InputStream input) throws IOException {
byte[] descriptorFileContent = null;
KeyValueContainerData containerData = container.getContainerData();
Path dbRoot = containerData.getDbFile().toPath();
Path chunksRoot = Paths.get(containerData.getChunksPath());
try (InputStream decompressed = decompress(input);
ArchiveInputStream archiveInput = untar(decompressed)) {
ArchiveEntry entry = archiveInput.getNextEntry();
while (entry != null) {
String name = entry.getName();
long size = entry.getSize();
if (name.startsWith(DB_DIR_NAME + "/")) {
Path destinationPath = dbRoot.resolve(name.substring(DB_DIR_NAME.length() + 1));
extractEntry(entry, archiveInput, size, dbRoot, destinationPath);
} else if (name.startsWith(CHUNKS_DIR_NAME + "/")) {
Path destinationPath = chunksRoot.resolve(name.substring(CHUNKS_DIR_NAME.length() + 1));
extractEntry(entry, archiveInput, size, chunksRoot, destinationPath);
} else if (CONTAINER_FILE_NAME.equals(name)) {
// Don't do anything. Container file should be unpacked in a
// separated step by unpackContainerDescriptor call.
descriptorFileContent = readEntry(archiveInput, size);
} else {
throw new IllegalArgumentException("Unknown entry in the tar file: " + "" + name);
}
entry = archiveInput.getNextEntry();
}
return descriptorFileContent;
} catch (CompressorException e) {
throw new IOException("Can't uncompress the given container: " + container.getContainerData().getContainerID(), e);
}
}
use of org.apache.commons.compress.archivers.ArchiveInputStream in project hopsworks by logicalclocks.
the class WebDriverFactory method extractZip.
public static void extractZip(File sourceFilePath, File destinationFilePath) throws IOException, ArchiveException {
InputStream is = new FileInputStream(sourceFilePath);
ArchiveInputStream in = new ArchiveStreamFactory().createArchiveInputStream("zip", is);
ZipArchiveEntry entry = null;
while ((entry = (ZipArchiveEntry) in.getNextEntry()) != null) {
OutputStream out = new FileOutputStream(new File(destinationFilePath, entry.getName()));
IOUtils.copy(in, out);
out.close();
}
in.close();
}
use of org.apache.commons.compress.archivers.ArchiveInputStream in project jreleaser by jreleaser.
the class FileUtils method unpackArchiveCompressed.
public static void unpackArchiveCompressed(Path src, Path dest, boolean removeRootEntry) throws IOException {
deleteFiles(dest, true);
File destinationDir = dest.toFile();
String filename = src.getFileName().toString();
String artifactFileName = getFilename(filename, FileType.getSupportedExtensions());
String artifactExtension = filename.substring(artifactFileName.length());
String artifactFileFormat = artifactExtension.substring(1);
FileType fileType = FileType.of(artifactFileFormat);
try (InputStream fi = Files.newInputStream(src);
InputStream bi = new BufferedInputStream(fi);
InputStream gzi = resolveCompressorInputStream(fileType, bi);
ArchiveInputStream in = new TarArchiveInputStream(gzi)) {
unpackArchive(removeRootEntry ? artifactFileName + "/" : "", destinationDir, in);
}
}
use of org.apache.commons.compress.archivers.ArchiveInputStream in project packr by libgdx.
the class ArchiveUtils method extractGenericArchive.
/**
* Extracts an archive using {@link ArchiveStreamFactory#createArchiveInputStream(InputStream)} with no special handling of symbolic links or file
* permissions.
*
* @param inputStream the archive input stream
* @param extractToDirectory the directory to extract the archive into
* @throws ArchiveException if an archive error occurs
* @throws IOException if an IO error occurs
*/
private static void extractGenericArchive(InputStream inputStream, Path extractToDirectory) throws ArchiveException, IOException {
final ArchiveInputStream archiveInputStream = new ArchiveStreamFactory().createArchiveInputStream(inputStream);
ArchiveEntry entry;
while ((entry = archiveInputStream.getNextEntry()) != null) {
if (!archiveInputStream.canReadEntryData(entry)) {
LOG.error("Failed to read archive entry " + entry);
continue;
}
Path entryExtractPath = extractToDirectory.resolve(getEntryAsPath(entry));
if (entry.isDirectory()) {
Files.createDirectories(entryExtractPath);
} else {
Files.createDirectories(entryExtractPath.getParent());
Files.copy(archiveInputStream, entryExtractPath, StandardCopyOption.REPLACE_EXISTING);
}
Files.setLastModifiedTime(entryExtractPath, FileTime.fromMillis(entry.getLastModifiedDate().getTime()));
}
}
use of org.apache.commons.compress.archivers.ArchiveInputStream in project stanbol by apache.
the class IndexInstallTask method execute.
@Override
public void execute(InstallationContext ctx) {
String indexName = (String) getResource().getAttribute(INDEX_NAME);
if (indexName == null) {
log.error("Unable to remove Managed Index because the required Property '{}'" + "used to define the name of the Index is missing", INDEX_NAME);
setFinishedState(ResourceState.IGNORED);
} else {
String serverName = (String) getResource().getAttribute(ManagedIndexConstants.SERVER_NAME);
ManagedSolrServer server = managedServers.get(serverName);
if (server == null) {
log.warn("Unable to install Managed Solr Index {} because the {} " + "Server {} is currently not active!", new Object[] { indexName, serverName == null ? "default" : "", serverName != null ? serverName : "" });
// needs still to be installed
setFinishedState(ResourceState.IGNORED);
} else {
// we have an index name and a server to in stall it ...
// ... let's do the work
String archiveFormat = (String) getResource().getAttribute(PROPERTY_ARCHIVE_FORMAT);
InputStream is = null;
try {
is = getResource().getInputStream();
if ("properties".equals(archiveFormat)) {
InputStreamReader reader = new InputStreamReader(is, "UTF-8");
Properties props = new Properties();
try {
props.load(reader);
} finally {
IOUtils.closeQuietly(reader);
}
// TODO install to the right server!
String indexPath = props.getProperty(INDEX_ARCHIVES);
if (indexPath == null) {
indexPath = indexName + '.' + ConfigUtils.SOLR_INDEX_ARCHIVE_EXTENSION;
log.info("Property \"" + INDEX_ARCHIVES + "\" not present within the SolrIndex references file. Will use the default name \"" + indexPath + "\"");
}
server.updateIndex(indexName, indexPath, props);
setFinishedState(ResourceState.INSTALLED);
} else {
ArchiveInputStream ais = null;
try {
ais = ConfigUtils.getArchiveInputStream(archiveFormat, is);
server.updateIndex(indexName, ais);
// we are done ... set the state to installed!
setFinishedState(ResourceState.INSTALLED);
} finally {
IOUtils.closeQuietly(ais);
}
}
// now we can copy the core!
} catch (Exception e) {
String message = String.format("Unable to install SolrIndexArchive for index name '%s'!" + " (resource=%s, arviceFormat=%s)", indexName, getResource().getURL(), archiveFormat);
log.error(message, e);
ctx.log("%s! Reason: %s", message, e.getMessage());
setFinishedState(ResourceState.IGNORED);
} finally {
IOUtils.closeQuietly(is);
}
}
}
}
Aggregations