use of java.io.BufferedInputStream in project weave by continuuity.
the class AbstractWeaveService method handleSecureStoreUpdate.
/**
* Attempts to handle secure store update.
*
* @param message The message received
* @return {@code true} if the message requests for secure store update, {@code false} otherwise.
*/
protected final boolean handleSecureStoreUpdate(Message message) {
if (!SystemMessages.SECURE_STORE_UPDATED.equals(message)) {
return false;
}
// If not in secure mode, simply ignore the message.
if (!UserGroupInformation.isSecurityEnabled()) {
return true;
}
try {
Credentials credentials = new Credentials();
Location location = getSecureStoreLocation();
DataInputStream input = new DataInputStream(new BufferedInputStream(location.getInputStream()));
try {
credentials.readTokenStorageStream(input);
} finally {
input.close();
}
UserGroupInformation.getCurrentUser().addCredentials(credentials);
this.credentials = credentials;
LOG.info("Secure store updated from {}.", location.toURI());
} catch (Throwable t) {
LOG.error("Failed to update secure store.", t);
}
return true;
}
use of java.io.BufferedInputStream in project buck by facebook.
the class HttpDownloader method fetch.
@Override
public boolean fetch(BuckEventBus eventBus, URI uri, Optional<PasswordAuthentication> authentication, Path output) throws IOException {
if (!("https".equals(uri.getScheme()) || "http".equals(uri.getScheme()))) {
return false;
}
DownloadEvent.Started started = DownloadEvent.started(uri);
eventBus.post(started);
try {
HttpURLConnection connection = createConnection(uri);
if (authentication.isPresent()) {
if ("https".equals(uri.getScheme()) && connection instanceof HttpsURLConnection) {
PasswordAuthentication p = authentication.get();
String authStr = p.getUserName() + ":" + new String(p.getPassword());
String authEncoded = BaseEncoding.base64().encode(authStr.getBytes(StandardCharsets.UTF_8));
connection.addRequestProperty("Authorization", "Basic " + authEncoded);
} else {
LOG.info("Refusing to send basic authentication over plain http.");
return false;
}
}
if (HttpURLConnection.HTTP_OK != connection.getResponseCode()) {
LOG.info("Unable to download %s: %s", uri, connection.getResponseMessage());
return false;
}
long contentLength = connection.getContentLengthLong();
try (InputStream is = new BufferedInputStream(connection.getInputStream());
OutputStream os = new BufferedOutputStream(Files.newOutputStream(output))) {
long read = 0;
while (true) {
int r = is.read();
read++;
if (r == -1) {
break;
}
if (read % PROGRESS_REPORT_EVERY_N_BYTES == 0) {
eventBus.post(new DownloadProgressEvent(uri, contentLength, read));
}
os.write(r);
}
}
return true;
} finally {
eventBus.post(DownloadEvent.finished(started));
}
}
use of java.io.BufferedInputStream in project buck by facebook.
the class FatJar method load.
/**
* @return the {@link FatJar} object deserialized from the resource name via {@code loader}.
*/
public static FatJar load(ClassLoader loader) throws XMLStreamException, JAXBException, IOException {
InputStream inputStream = loader.getResourceAsStream(FAT_JAR_INFO_RESOURCE);
try {
BufferedInputStream bufferedInputStream = new BufferedInputStream(inputStream);
try {
XMLEventReader xmlEventReader = XMLInputFactory.newFactory().createXMLEventReader(bufferedInputStream);
JAXBContext context = JAXBContext.newInstance(FatJar.class);
Unmarshaller unmarshaller = context.createUnmarshaller();
JAXBElement<FatJar> jaxbElementA = unmarshaller.unmarshal(xmlEventReader, FatJar.class);
return jaxbElementA.getValue();
} finally {
bufferedInputStream.close();
}
} finally {
inputStream.close();
}
}
use of java.io.BufferedInputStream in project buck by facebook.
the class AppleInfoPlistParsing method getBundleIdFromPlistStream.
/**
* Extracts the bundle ID (CFBundleIdentifier) from an Info.plist, returning it if present.
*/
public static Optional<String> getBundleIdFromPlistStream(InputStream inputStream) throws IOException {
NSDictionary infoPlist;
try (BufferedInputStream bufferedInputStream = new BufferedInputStream(inputStream)) {
try {
infoPlist = (NSDictionary) PropertyListParser.parse(bufferedInputStream);
} catch (Exception e) {
throw new IOException(e);
}
}
NSObject bundleId = infoPlist.objectForKey("CFBundleIdentifier");
if (bundleId == null) {
return Optional.empty();
} else {
return Optional.of(bundleId.toString());
}
}
use of java.io.BufferedInputStream in project buck by facebook.
the class OfflineScribeLogger method sendStoredLogs.
private synchronized void sendStoredLogs() {
ImmutableSortedSet<Path> logsPaths;
try {
if (!filesystem.isDirectory(logDir)) {
// No logs to submit to Scribe.
return;
}
logsPaths = filesystem.getMtimeSortedMatchingDirectoryContents(logDir, LOGFILE_PATTERN);
} catch (Exception e) {
LOG.error(e, "Fetching stored logs list failed.");
return;
}
long totalBytesToSend = 0;
for (Path logPath : logsPaths) {
// Sending should be ceased if storing has been initiated or closing was started.
if (startedStoring || startedClosing) {
break;
}
// Get iterator.
Iterator<ScribeData> it;
File logFile;
try {
logFile = logPath.toFile();
totalBytesToSend += logFile.length();
if (totalBytesToSend > maxScribeOfflineLogsBytes) {
LOG.warn("Total size of offline logs exceeds the limit. Ceasing to send them to Scribe.");
return;
}
InputStream logFileStream;
try {
logFileStream = new BufferedInputStream(new FileInputStream(logFile), BUFFER_SIZE);
} catch (FileNotFoundException e) {
LOG.info(e, "There was a problem getting stream for logfile: %s. Likely logfile was resent and" + "deleted by a concurrent Buck command.", logPath);
continue;
}
it = new ObjectMapper().readValues(new JsonFactory().createParser(logFileStream), ScribeData.class);
} catch (Exception e) {
LOG.error(e, "Failed to initiate reading from: %s. File may be corrupted.", logPath);
continue;
}
// Read and submit.
int scribeLinesInFile = 0;
List<ListenableFuture<Void>> logFutures = new LinkedList<>();
Map<String, CategoryData> logReadData = new HashMap<>();
try {
boolean interrupted = false;
// Read data and build per category clusters - dispatch if needed.
while (it.hasNext()) {
if (startedStoring || startedClosing) {
interrupted = true;
break;
}
ScribeData newData = it.next();
// Prepare map entry for new data (dispatch old data if needed).
if (!logReadData.containsKey(newData.getCategory())) {
logReadData.put(newData.getCategory(), new CategoryData());
}
CategoryData categoryData = logReadData.get(newData.getCategory());
if (categoryData.getLinesBytes() > CLUSTER_DISPATCH_SIZE) {
logFutures.add(scribeLogger.log(newData.getCategory(), categoryData.getLines()));
categoryData.clearData();
}
// Add new data to the cluster for the category.
for (String line : newData.getLines()) {
categoryData.addLine(line);
scribeLinesInFile++;
}
}
// Send remaining data from per category clusters.
if (!interrupted) {
for (Map.Entry<String, CategoryData> logReadDataEntry : logReadData.entrySet()) {
if (startedStoring || startedClosing) {
interrupted = true;
break;
}
List<String> categoryLines = logReadDataEntry.getValue().getLines();
if (categoryLines.size() > 0) {
logFutures.add(scribeLogger.log(logReadDataEntry.getKey(), categoryLines));
}
}
}
if (interrupted) {
LOG.info("Stopped while sending from offline log (it will not be removed): %s.", logPath);
logFutures.clear();
break;
}
} catch (Exception e) {
LOG.error(e, "Error while reading offline log from: %s. This log will not be removed now. If this " + "error reappears in further runs, the file may be corrupted and should be deleted. ", logPath);
logFutures.clear();
continue;
} finally {
logReadData.clear();
}
// Confirm data was successfully sent and remove logfile.
try {
Futures.allAsList(logFutures).get(LOG_TIMEOUT, LOG_TIMEOUT_UNIT);
totalBytesResent.inc(logFile.length());
totalLinesResent.inc(scribeLinesInFile);
logfilesResent.inc();
try {
filesystem.deleteFileAtPathIfExists(logPath);
} catch (Exception e) {
LOG.error(e, "Failed to remove successfully resent offline log. Stopping sending.");
break;
}
} catch (Exception e) {
LOG.info("Failed to send all data from offline log: %s. Log will not be removed.", logPath);
// Do not attempt to send data from further logfiles - likely there are network issues.
break;
} finally {
logFutures.clear();
}
}
}
Aggregations