use of org.apache.commons.compress.archivers.tar.TarArchiveOutputStream in project alien4cloud by alien4cloud.
the class FileUtil method tar.
/**
* Recursively tar file
*
* @param inputPath file path can be directory
* @param outputPath where to put the archived file
* @param childrenOnly if inputPath is directory and if childrenOnly is true, the archive will contain all of its children, else the archive contains unique
* entry which is the inputPath itself
* @param gZipped compress with gzip algorithm
*/
public static void tar(Path inputPath, Path outputPath, boolean gZipped, boolean childrenOnly) throws IOException {
if (!Files.exists(inputPath)) {
throw new FileNotFoundException("File not found " + inputPath);
}
touch(outputPath);
OutputStream outputStream = new BufferedOutputStream(Files.newOutputStream(outputPath));
if (gZipped) {
outputStream = new GzipCompressorOutputStream(outputStream);
}
TarArchiveOutputStream tarArchiveOutputStream = new TarArchiveOutputStream(outputStream);
tarArchiveOutputStream.setLongFileMode(TarArchiveOutputStream.LONGFILE_POSIX);
try {
if (!Files.isDirectory(inputPath)) {
putTarEntry(tarArchiveOutputStream, new TarArchiveEntry(inputPath.getFileName().toString()), inputPath);
} else {
Path sourcePath = inputPath;
if (!childrenOnly) {
// In order to have the dossier as the root entry
sourcePath = inputPath.getParent();
}
Files.walkFileTree(inputPath, new TarDirWalker(sourcePath, tarArchiveOutputStream));
}
tarArchiveOutputStream.flush();
} finally {
Closeables.close(tarArchiveOutputStream, true);
}
}
use of org.apache.commons.compress.archivers.tar.TarArchiveOutputStream in project docker-client by spotify.
the class CompressedDirectory method create.
/**
* This method creates a gzip tarball of the specified directory. File permissions will be
* retained. The file will be created in a temporary directory using the {@link
* Files#createTempFile(String, String, java.nio.file.attribute.FileAttribute[])} method. The
* returned object is auto-closeable, and upon closing it, the archive file will be deleted.
*
* @param directory the directory to compress
* @return a Path object representing the compressed directory
* @throws IOException if the compressed directory could not be created.
*/
public static CompressedDirectory create(final Path directory) throws IOException {
final Path file = Files.createTempFile("docker-client-", ".tar.gz");
final Path dockerIgnorePath = directory.resolve(".dockerignore");
final ImmutableList<DockerIgnorePathMatcher> ignoreMatchers = parseDockerIgnore(dockerIgnorePath);
try (final OutputStream fileOut = Files.newOutputStream(file);
final GzipCompressorOutputStream gzipOut = new GzipCompressorOutputStream(fileOut);
final TarArchiveOutputStream tarOut = new TarArchiveOutputStream(gzipOut)) {
tarOut.setLongFileMode(LONGFILE_POSIX);
tarOut.setBigNumberMode(BIGNUMBER_POSIX);
Files.walkFileTree(directory, EnumSet.of(FileVisitOption.FOLLOW_LINKS), Integer.MAX_VALUE, new Visitor(directory, ignoreMatchers, tarOut));
} catch (Throwable t) {
// If an error occurs, delete temporary file before rethrowing exclude.
try {
Files.delete(file);
} catch (IOException e) {
// So we don't lose track of the reason the file was deleted... might be important
t.addSuppressed(e);
}
throw t;
}
return new CompressedDirectory(file);
}
use of org.apache.commons.compress.archivers.tar.TarArchiveOutputStream in project karaf by apache.
the class ArchiveMojo method archive.
public // ArchiverException,
File archive(// ArchiverException,
File source, // ArchiverException,
File dest, // ArchiverException,
Artifact artifact) throws IOException {
String serverName = null;
if (targetFile != null) {
serverName = targetFile.getName();
} else {
serverName = artifact.getArtifactId() + "-" + artifact.getVersion();
}
dest = new File(dest, serverName + "." + artifact.getType());
String prefix = "";
if (usePathPrefix) {
prefix = pathPrefix.trim();
if (prefix.length() > 0 && !prefix.endsWith("/")) {
prefix += "/";
}
}
if ("tar.gz".equals(artifact.getType())) {
try (OutputStream fOut = Files.newOutputStream(dest.toPath());
OutputStream bOut = new BufferedOutputStream(fOut);
OutputStream gzOut = new GzipCompressorOutputStream(bOut);
TarArchiveOutputStream tOut = new TarArchiveOutputStream(gzOut);
DirectoryStream<Path> children = Files.newDirectoryStream(source.toPath())) {
tOut.setLongFileMode(TarArchiveOutputStream.LONGFILE_POSIX);
tOut.setBigNumberMode(TarArchiveOutputStream.BIGNUMBER_POSIX);
for (Path child : children) {
addFileToTarGz(tOut, child, prefix);
}
}
} else if ("zip".equals(artifact.getType())) {
try (OutputStream fOut = Files.newOutputStream(dest.toPath());
OutputStream bOut = new BufferedOutputStream(fOut);
ZipArchiveOutputStream tOut = new ZipArchiveOutputStream(bOut);
DirectoryStream<Path> children = Files.newDirectoryStream(source.toPath())) {
for (Path child : children) {
addFileToZip(tOut, child, prefix);
}
}
} else {
throw new IllegalArgumentException("Unknown target type: " + artifact.getType());
}
return dest;
}
use of org.apache.commons.compress.archivers.tar.TarArchiveOutputStream in project testcontainers-java by testcontainers.
the class ImageFromDockerfile method resolve.
@Override
protected final String resolve() {
Logger logger = DockerLoggerFactory.getLogger(dockerImageName);
DockerClient dockerClient = DockerClientFactory.instance().client();
try {
if (deleteOnExit) {
ResourceReaper.instance().registerImageForCleanup(dockerImageName);
}
BuildImageResultCallback resultCallback = new BuildImageResultCallback() {
@Override
public void onNext(BuildResponseItem item) {
super.onNext(item);
if (item.isErrorIndicated()) {
logger.error(item.getErrorDetail().getMessage());
} else {
logger.debug(StringUtils.chomp(item.getStream(), "\n"));
}
}
};
// We have to use pipes to avoid high memory consumption since users might want to build really big images
@Cleanup PipedInputStream in = new PipedInputStream();
@Cleanup PipedOutputStream out = new PipedOutputStream(in);
BuildImageCmd buildImageCmd = dockerClient.buildImageCmd(in);
configure(buildImageCmd);
Map<String, String> labels = new HashMap<>();
if (buildImageCmd.getLabels() != null) {
labels.putAll(buildImageCmd.getLabels());
}
labels.putAll(DockerClientFactory.DEFAULT_LABELS);
// noinspection deprecation
labels.putAll(ResourceReaper.instance().getLabels());
buildImageCmd.withLabels(labels);
prePullDependencyImages(dependencyImageNames);
BuildImageResultCallback exec = buildImageCmd.exec(resultCallback);
long bytesToDockerDaemon = 0;
// To build an image, we have to send the context to Docker in TAR archive format
try (TarArchiveOutputStream tarArchive = new TarArchiveOutputStream(new GZIPOutputStream(out))) {
tarArchive.setLongFileMode(TarArchiveOutputStream.LONGFILE_POSIX);
tarArchive.setBigNumberMode(TarArchiveOutputStream.BIGNUMBER_POSIX);
for (Map.Entry<String, Transferable> entry : transferables.entrySet()) {
Transferable transferable = entry.getValue();
final String destination = entry.getKey();
transferable.transferTo(tarArchive, destination);
bytesToDockerDaemon += transferable.getSize();
}
tarArchive.finish();
}
log.info("Transferred {} to Docker daemon", FileUtils.byteCountToDisplaySize(bytesToDockerDaemon));
if (// warn if >50MB sent to docker daemon
bytesToDockerDaemon > FileUtils.ONE_MB * 50)
log.warn("A large amount of data was sent to the Docker daemon ({}). Consider using a .dockerignore file for better performance.", FileUtils.byteCountToDisplaySize(bytesToDockerDaemon));
exec.awaitImageId();
return dockerImageName;
} catch (IOException e) {
throw new RuntimeException("Can't close DockerClient", e);
}
}
use of org.apache.commons.compress.archivers.tar.TarArchiveOutputStream in project testcontainers-java by testcontainers.
the class ContainerState method copyFileToContainer.
/**
* Copies a file to the container.
*
* @param transferable file which is copied into the container
* @param containerPath destination path inside the container
*/
@SneakyThrows(IOException.class)
default void copyFileToContainer(Transferable transferable, String containerPath) {
if (getContainerId() == null) {
throw new IllegalStateException("copyFileToContainer can only be used with created / running container");
}
try (ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
TarArchiveOutputStream tarArchive = new TarArchiveOutputStream(byteArrayOutputStream)) {
tarArchive.setLongFileMode(TarArchiveOutputStream.LONGFILE_POSIX);
tarArchive.setBigNumberMode(TarArchiveOutputStream.BIGNUMBER_POSIX);
transferable.transferTo(tarArchive, containerPath);
tarArchive.finish();
DockerClientFactory.instance().client().copyArchiveToContainerCmd(getContainerId()).withTarInputStream(new ByteArrayInputStream(byteArrayOutputStream.toByteArray())).withRemotePath("/").exec();
}
}
Aggregations