use of org.apache.commons.io.input.CloseShieldInputStream in project gradle by gradle.
the class TarTaskOutputPacker method unpack.
private UnpackResult unpack(SortedSet<ResolvedTaskOutputFilePropertySpec> propertySpecs, TarArchiveInputStream tarInput, TaskOutputOriginReader readOriginAction) throws IOException {
Map<String, ResolvedTaskOutputFilePropertySpec> propertySpecsMap = Maps.uniqueIndex(propertySpecs, new Function<TaskFilePropertySpec, String>() {
@Override
public String apply(TaskFilePropertySpec propertySpec) {
return propertySpec.getPropertyName();
}
});
TarArchiveEntry tarEntry;
OriginTaskExecutionMetadata originMetadata = null;
ImmutableListMultimap.Builder<String, FileSnapshot> propertyFileSnapshots = ImmutableListMultimap.builder();
long entries = 0;
while ((tarEntry = tarInput.getNextTarEntry()) != null) {
++entries;
String path = tarEntry.getName();
if (path.equals(METADATA_PATH)) {
// handle origin metadata
originMetadata = readOriginAction.execute(new CloseShieldInputStream(tarInput));
} else {
// handle output property
Matcher matcher = PROPERTY_PATH.matcher(path);
if (!matcher.matches()) {
throw new IllegalStateException("Cached result format error, invalid contents: " + path);
}
String propertyName = unescape(matcher.group(2));
ResolvedTaskOutputFilePropertySpec propertySpec = propertySpecsMap.get(propertyName);
if (propertySpec == null) {
throw new IllegalStateException(String.format("No output property '%s' registered", propertyName));
}
boolean outputMissing = matcher.group(1) != null;
String childPath = matcher.group(3);
unpackPropertyEntry(propertySpec, tarInput, tarEntry, childPath, outputMissing, propertyFileSnapshots);
}
}
if (originMetadata == null) {
throw new IllegalStateException("Cached result format error, no origin metadata was found.");
}
return new UnpackResult(originMetadata, entries, propertyFileSnapshots.build());
}
use of org.apache.commons.io.input.CloseShieldInputStream in project xwiki-platform by xwiki.
the class EmbeddedSolrInstance method validateAndInitializeHomeDirectory.
/**
* Checks rights, creates paths and adds default config XML files if they don`t already exist.
*
* @param solrHome the directory to use as Solr home.
* @throws IllegalArgumentException if the provided directory is not usable (is a file, is not writable, etc.).
* @throws IOException if the XML files are not copied successfully.
*/
private void validateAndInitializeHomeDirectory(String solrHome) throws IllegalArgumentException, IOException {
// Validate and create the directory if it does not already exist.
File solrHomeDirectory = new File(solrHome);
if (solrHomeDirectory.exists()) {
// Exists but is unusable.
if (!solrHomeDirectory.isDirectory() || !solrHomeDirectory.canWrite() || !solrHomeDirectory.canRead()) {
throw new IllegalArgumentException(String.format("The given path [%s] must be a readable and writable directory", solrHomeDirectory));
}
} else {
// Create the home directory
if (!solrHomeDirectory.mkdirs()) {
// Does not exist and can not be created.
throw new IllegalArgumentException(String.format("The given path [%s] could not be created due to and invalid value %s", solrHomeDirectory, "or to insufficient filesystem permissions"));
}
// Initialize the Solr Home with the default configuration files if the folder does not already exist.
// Add the configuration files required by Solr.
InputStream stream = this.solrConfiguration.getHomeDirectoryConfiguration();
try (ZipInputStream zstream = new ZipInputStream(stream)) {
for (ZipEntry entry = zstream.getNextEntry(); entry != null; entry = zstream.getNextEntry()) {
if (entry.isDirectory()) {
File destinationDirectory = new File(solrHomeDirectory, entry.getName());
destinationDirectory.mkdirs();
} else {
File destinationFile = new File(solrHomeDirectory, entry.getName());
FileUtils.copyInputStreamToFile(new CloseShieldInputStream(zstream), destinationFile);
}
}
}
}
}
use of org.apache.commons.io.input.CloseShieldInputStream in project xwiki-platform by xwiki.
the class XarPackage method readDescriptor.
/**
* Read a XML descriptor of a XAR package (usually names package.xml).
*
* @param stream the input stream to the XML file to parse
* @throws XarException when failing to parse the descriptor
* @throws IOException when failing to read the file
*/
public void readDescriptor(InputStream stream) throws XarException, IOException {
DocumentBuilderFactory dbFactory = DocumentBuilderFactory.newInstance();
DocumentBuilder dBuilder;
try {
dBuilder = dbFactory.newDocumentBuilder();
} catch (ParserConfigurationException e) {
throw new XarException("Failed to create a new Document builder", e);
}
Document doc;
try {
// DocumentBuilder#parse close the passed stream which is not what we want
doc = dBuilder.parse(new CloseShieldInputStream(stream));
} catch (SAXException e) {
throw new XarException("Failed to parse XML document", e);
}
// Normalize the document
doc.getDocumentElement().normalize();
// Read the document
NodeList children = doc.getChildNodes();
for (int i = 0; i < children.getLength(); ++i) {
Node node = children.item(i);
if (node.getNodeType() == Node.ELEMENT_NODE) {
Element element = (Element) node;
if (element.getTagName().equals(XarModel.ELEMENT_PACKAGE)) {
readDescriptorPackage(element);
break;
}
}
}
}
use of org.apache.commons.io.input.CloseShieldInputStream in project xwiki-platform by xwiki.
the class Package method Import.
/**
* Load this package in memory from an InputStream. It may be installed later using {@link #install(XWikiContext)}.
*
* @param file an InputStream of a zipped package file
* @param context current XWikiContext
* @return an empty string, useless.
* @throws IOException while reading the ZipFile
* @throws XWikiException when package content is broken
* @since 2.3M2
*/
public String Import(InputStream file, XWikiContext context) throws IOException, XWikiException {
ZipArchiveInputStream zis;
ArchiveEntry entry;
Document description = null;
try {
zis = new ZipArchiveInputStream(file, XAR_FILENAME_ENCODING, false);
List<XWikiDocument> docsToLoad = new LinkedList<XWikiDocument>();
/*
* Loop 1: Cycle through the zip input stream and load out all of the documents, when we find the
* package.xml file we put it aside to so that we only include documents which are in the file.
*/
while ((entry = zis.getNextEntry()) != null) {
if (entry.isDirectory() || (entry.getName().indexOf("META-INF") != -1)) {
// (we use that directory to put meta data such as LICENSE/NOTICE files.)
continue;
} else if (entry.getName().compareTo(DefaultPackageFileName) == 0) {
// The entry is the manifest (package.xml). Read this differently.
description = fromXml(new CloseShieldInputStream(zis));
} else {
XWikiDocument doc = null;
try {
doc = readFromXML(new CloseShieldInputStream(zis));
} catch (Throwable e) {
LOGGER.warn("Failed to parse document [{}] from XML during import, thus it will not be installed. " + "The error was: " + ExceptionUtils.getRootCauseMessage(e));
// It will be listed in the "failed documents" section after the import.
addToErrors(entry.getName().replaceAll("/", "."), context);
continue;
}
// if no filters throw exceptions, add it to the list to import.
try {
this.filter(doc, context);
docsToLoad.add(doc);
} catch (ExcludeDocumentException e) {
LOGGER.info("Skip the document '" + doc.getDocumentReference() + "'");
}
}
}
// Make sure a manifest was included in the package...
if (description == null) {
throw new PackageException(XWikiException.ERROR_XWIKI_UNKNOWN, "Could not find the package definition");
}
/*
* Loop 2: Cycle through the list of documents and if they are in the manifest then add them, otherwise log
* a warning and add them to the skipped list.
*/
for (XWikiDocument doc : docsToLoad) {
if (documentExistInPackageFile(doc.getFullName(), doc.getLanguage(), description)) {
this.add(doc, context);
} else {
LOGGER.warn("document " + doc.getDocumentReference() + " does not exist in package definition." + " It will not be installed.");
// It will be listed in the "skipped documents" section after the
// import.
addToSkipped(doc.getFullName(), context);
}
}
updateFileInfos(description);
} catch (DocumentException e) {
throw new PackageException(XWikiException.ERROR_XWIKI_UNKNOWN, "Error when reading the XML");
}
return "";
}
use of org.apache.commons.io.input.CloseShieldInputStream in project gradle by gradle.
the class LineEndingNormalizingInputStreamHasher method hash.
/**
* Returns empty if the file is detected to be a binary file
*/
private Optional<HashCode> hash(InputStream inputStream) throws IOException {
PrimitiveHasher hasher = Hashing.newPrimitiveHasher();
hasher.putHash(SIGNATURE);
try (BufferedInputStream input = new BufferedInputStream(new CloseShieldInputStream(inputStream), BUFFER_SIZE)) {
int peekAhead = -1;
while (true) {
// If there is something left over in the peekAhead buffer, use that
int next = peekAhead;
// If the peekAhead buffer is empty, get the next byte from the input stream
if (next != -1) {
peekAhead = -1;
} else {
next = input.read();
}
// If both the peekAhead buffer and the input stream are empty, we're done
if (next == -1) {
break;
}
// Bust out if we detect a binary file
if (isControlCharacter(next)) {
return Optional.empty();
}
// If the next bytes are '\r' or '\r\n', replace it with '\n'
if (next == '\r') {
peekAhead = input.read();
if (peekAhead == '\n') {
peekAhead = -1;
}
next = '\n';
}
hasher.putByte((byte) next);
}
}
return Optional.of(hasher.hash());
}
Aggregations