use of org.apache.storm.blobstore.InputStreamWithMeta in project storm by apache.
the class Utils method downloadResourcesAsSupervisorAttempt.
private static boolean downloadResourcesAsSupervisorAttempt(ClientBlobStore cb, String key, String localFile) {
boolean isSuccess = false;
try (FileOutputStream out = new FileOutputStream(localFile);
InputStreamWithMeta in = cb.getBlob(key)) {
long fileSize = in.getFileLength();
byte[] buffer = new byte[1024];
int len;
int downloadFileSize = 0;
while ((len = in.read(buffer)) >= 0) {
out.write(buffer, 0, len);
downloadFileSize += len;
}
isSuccess = (fileSize == downloadFileSize);
} catch (TException | IOException e) {
LOG.error("An exception happened while downloading {} from blob store.", localFile, e);
}
if (!isSuccess) {
try {
Files.deleteIfExists(Paths.get(localFile));
} catch (IOException ex) {
LOG.error("Failed trying to delete the partially downloaded {}", localFile, ex);
}
}
return isSuccess;
}
use of org.apache.storm.blobstore.InputStreamWithMeta in project storm by apache.
the class Nimbus method beginBlobDownload.
@SuppressWarnings("deprecation")
@Override
public BeginDownloadResult beginBlobDownload(String key) throws AuthorizationException, KeyNotFoundException, TException {
try {
InputStreamWithMeta is = blobStore.getBlob(key, getSubject());
String sessionId = Utils.uuid();
BeginDownloadResult ret = new BeginDownloadResult(is.getVersion(), sessionId);
ret.set_data_size(is.getFileLength());
blobDownloaders.put(sessionId, new BufferInputStream(is, (int) conf.getOrDefault(Config.STORM_BLOBSTORE_INPUTSTREAM_BUFFER_SIZE_BYTES, 65536)));
LOG.info("Created download session {} for {}", sessionId, key);
return ret;
} catch (Exception e) {
LOG.warn("begin blob download exception.", e);
if (e instanceof TException) {
throw (TException) e;
}
throw new RuntimeException(e);
}
}
use of org.apache.storm.blobstore.InputStreamWithMeta in project storm by apache.
the class Localizer method downloadBlob.
private LocalizedResource downloadBlob(Map conf, String key, File localFile, String user, boolean uncompress, boolean isUpdate) throws AuthorizationException, KeyNotFoundException, IOException {
ClientBlobStore blobstore = null;
try {
blobstore = getClientBlobStore();
long nimbusBlobVersion = Utils.nimbusVersionOfBlob(key, blobstore);
long oldVersion = Utils.localVersionOfBlob(localFile.toString());
FileOutputStream out = null;
PrintWriter writer = null;
int numTries = 0;
String localizedPath = localFile.toString();
String localFileWithVersion = Utils.constructBlobWithVersionFileName(localFile.toString(), nimbusBlobVersion);
String localVersionFile = Utils.constructVersionFileName(localFile.toString());
String downloadFile = localFileWithVersion;
if (uncompress) {
// we need to download to temp file and then unpack into the one requested
downloadFile = new File(localFile.getParent(), TO_UNCOMPRESS + localFile.getName()).toString();
}
while (numTries < _blobDownloadRetries) {
out = new FileOutputStream(downloadFile);
numTries++;
try {
if (!Utils.canUserReadBlob(blobstore.getBlobMeta(key), user)) {
throw new AuthorizationException(user + " does not have READ access to " + key);
}
InputStreamWithMeta in = blobstore.getBlob(key);
byte[] buffer = new byte[1024];
int len;
while ((len = in.read(buffer)) >= 0) {
out.write(buffer, 0, len);
}
out.close();
in.close();
if (uncompress) {
Utils.unpack(new File(downloadFile), new File(localFileWithVersion));
LOG.debug("uncompressed " + downloadFile + " to: " + localFileWithVersion);
}
// Next write the version.
LOG.info("Blob: " + key + " updated with new Nimbus-provided version: " + nimbusBlobVersion + " local version was: " + oldVersion);
// The false parameter ensures overwriting the version file, not appending
writer = new PrintWriter(new BufferedWriter(new FileWriter(localVersionFile, false)));
writer.println(nimbusBlobVersion);
writer.close();
try {
setBlobPermissions(conf, user, localFileWithVersion);
setBlobPermissions(conf, user, localVersionFile);
// Update the key.current symlink. First create tmp symlink and do
// move of tmp to current so that the operation is atomic.
String tmp_uuid_local = java.util.UUID.randomUUID().toString();
LOG.debug("Creating a symlink @" + localFile + "." + tmp_uuid_local + " , " + "linking to: " + localFile + "." + nimbusBlobVersion);
File uuid_symlink = new File(localFile + "." + tmp_uuid_local);
Files.createSymbolicLink(uuid_symlink.toPath(), Paths.get(Utils.constructBlobWithVersionFileName(localFile.toString(), nimbusBlobVersion)));
File current_symlink = new File(Utils.constructBlobCurrentSymlinkName(localFile.toString()));
Files.move(uuid_symlink.toPath(), current_symlink.toPath(), ATOMIC_MOVE);
} catch (IOException e) {
// restore the old version to the file
try {
PrintWriter restoreWriter = new PrintWriter(new BufferedWriter(new FileWriter(localVersionFile, false)));
restoreWriter.println(oldVersion);
restoreWriter.close();
} catch (IOException ignore) {
}
throw e;
}
String oldBlobFile = localFile + "." + oldVersion;
try {
// anyone trying to read it.
if ((oldVersion != -1) && (oldVersion != nimbusBlobVersion)) {
LOG.info("Removing an old blob file:" + oldBlobFile);
Files.delete(Paths.get(oldBlobFile));
}
} catch (IOException e) {
// At this point we have downloaded everything and moved symlinks. If the remove of
// old fails just log an error
LOG.error("Exception removing old blob version: " + oldBlobFile);
}
break;
} catch (AuthorizationException ae) {
// we consider this non-retriable exceptions
if (out != null) {
out.close();
}
new File(downloadFile).delete();
throw ae;
} catch (IOException | KeyNotFoundException e) {
if (out != null) {
out.close();
}
if (writer != null) {
writer.close();
}
new File(downloadFile).delete();
if (uncompress) {
try {
FileUtils.deleteDirectory(new File(localFileWithVersion));
} catch (IOException ignore) {
}
}
if (!isUpdate) {
// don't want to remove existing version file if its an update
new File(localVersionFile).delete();
}
if (numTries < _blobDownloadRetries) {
LOG.error("Failed to download blob, retrying", e);
} else {
throw e;
}
}
}
return new LocalizedResource(key, localizedPath, uncompress);
} finally {
if (blobstore != null) {
blobstore.shutdown();
}
}
}
use of org.apache.storm.blobstore.InputStreamWithMeta in project storm by apache.
the class Zookeeper method leaderLatchListenerImpl.
// Leader latch listener that will be invoked when we either gain or lose leadership
public static LeaderLatchListener leaderLatchListenerImpl(final Map conf, final CuratorFramework zk, final BlobStore blobStore, final LeaderLatch leaderLatch) throws UnknownHostException {
final String hostName = InetAddress.getLocalHost().getCanonicalHostName();
return new LeaderLatchListener() {
final String STORM_JAR_SUFFIX = "-stormjar.jar";
final String STORM_CODE_SUFFIX = "-stormcode.ser";
final String STORM_CONF_SUFFIX = "-stormconf.ser";
@Override
public void isLeader() {
Set<String> activeTopologyIds = new TreeSet<>(Zookeeper.getChildren(zk, conf.get(Config.STORM_ZOOKEEPER_ROOT) + ClusterUtils.STORMS_SUBTREE, false));
Set<String> activeTopologyBlobKeys = populateTopologyBlobKeys(activeTopologyIds);
Set<String> activeTopologyCodeKeys = filterTopologyCodeKeys(activeTopologyBlobKeys);
Set<String> allLocalBlobKeys = Sets.newHashSet(blobStore.listKeys());
Set<String> allLocalTopologyBlobKeys = filterTopologyBlobKeys(allLocalBlobKeys);
// this finds all active topologies blob keys from all local topology blob keys
Sets.SetView<String> diffTopology = Sets.difference(activeTopologyBlobKeys, allLocalTopologyBlobKeys);
LOG.info("active-topology-blobs [{}] local-topology-blobs [{}] diff-topology-blobs [{}]", generateJoinedString(activeTopologyIds), generateJoinedString(allLocalTopologyBlobKeys), generateJoinedString(diffTopology));
if (diffTopology.isEmpty()) {
Set<String> activeTopologyDependencies = getTopologyDependencyKeys(activeTopologyCodeKeys);
// this finds all dependency blob keys from active topologies from all local blob keys
Sets.SetView<String> diffDependencies = Sets.difference(activeTopologyDependencies, allLocalBlobKeys);
LOG.info("active-topology-dependencies [{}] local-blobs [{}] diff-topology-dependencies [{}]", generateJoinedString(activeTopologyDependencies), generateJoinedString(allLocalBlobKeys), generateJoinedString(diffDependencies));
if (diffDependencies.isEmpty()) {
LOG.info("Accepting leadership, all active topologies and corresponding dependencies found locally.");
} else {
LOG.info("Code for all active topologies is available locally, but some dependencies are not found locally, giving up leadership.");
closeLatch();
}
} else {
LOG.info("code for all active topologies not available locally, giving up leadership.");
closeLatch();
}
}
@Override
public void notLeader() {
LOG.info("{} lost leadership.", hostName);
}
private String generateJoinedString(Set<String> activeTopologyIds) {
return Joiner.on(",").join(activeTopologyIds);
}
private Set<String> populateTopologyBlobKeys(Set<String> activeTopologyIds) {
Set<String> activeTopologyBlobKeys = new TreeSet<>();
for (String activeTopologyId : activeTopologyIds) {
activeTopologyBlobKeys.add(activeTopologyId + STORM_JAR_SUFFIX);
activeTopologyBlobKeys.add(activeTopologyId + STORM_CODE_SUFFIX);
activeTopologyBlobKeys.add(activeTopologyId + STORM_CONF_SUFFIX);
}
return activeTopologyBlobKeys;
}
private Set<String> filterTopologyBlobKeys(Set<String> blobKeys) {
Set<String> topologyBlobKeys = new HashSet<>();
for (String blobKey : blobKeys) {
if (blobKey.endsWith(STORM_JAR_SUFFIX) || blobKey.endsWith(STORM_CODE_SUFFIX) || blobKey.endsWith(STORM_CONF_SUFFIX)) {
topologyBlobKeys.add(blobKey);
}
}
return topologyBlobKeys;
}
private Set<String> filterTopologyCodeKeys(Set<String> blobKeys) {
Set<String> topologyCodeKeys = new HashSet<>();
for (String blobKey : blobKeys) {
if (blobKey.endsWith(STORM_CODE_SUFFIX)) {
topologyCodeKeys.add(blobKey);
}
}
return topologyCodeKeys;
}
private Set<String> getTopologyDependencyKeys(Set<String> activeTopologyCodeKeys) {
Set<String> activeTopologyDependencies = new TreeSet<>();
Subject subject = ReqContext.context().subject();
for (String activeTopologyCodeKey : activeTopologyCodeKeys) {
try {
InputStreamWithMeta blob = blobStore.getBlob(activeTopologyCodeKey, subject);
byte[] blobContent = IOUtils.readFully(blob, new Long(blob.getFileLength()).intValue());
StormTopology stormCode = Utils.deserialize(blobContent, StormTopology.class);
if (stormCode.is_set_dependency_jars()) {
activeTopologyDependencies.addAll(stormCode.get_dependency_jars());
}
if (stormCode.is_set_dependency_artifacts()) {
activeTopologyDependencies.addAll(stormCode.get_dependency_artifacts());
}
} catch (AuthorizationException | KeyNotFoundException | IOException e) {
LOG.error("Exception occurs while reading blob for key: " + activeTopologyCodeKey + ", exception: " + e, e);
throw new RuntimeException("Exception occurs while reading blob for key: " + activeTopologyCodeKey + ", exception: " + e, e);
}
}
return activeTopologyDependencies;
}
private void closeLatch() {
try {
leaderLatch.close();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
};
}
use of org.apache.storm.blobstore.InputStreamWithMeta in project storm by apache.
the class LeaderListenerCallback method getTopologyDependencyKeys.
private Set<String> getTopologyDependencyKeys(Set<String> activeTopologyCodeKeys) {
Set<String> activeTopologyDependencies = new TreeSet<>();
Subject subject = ReqContext.context().subject();
for (String activeTopologyCodeKey : activeTopologyCodeKeys) {
try (InputStreamWithMeta blob = blobStore.getBlob(activeTopologyCodeKey, subject)) {
byte[] blobContent = IOUtils.readFully(blob, new Long(blob.getFileLength()).intValue());
StormTopology stormCode = Utils.deserialize(blobContent, StormTopology.class);
if (stormCode.is_set_dependency_jars()) {
activeTopologyDependencies.addAll(stormCode.get_dependency_jars());
}
if (stormCode.is_set_dependency_artifacts()) {
activeTopologyDependencies.addAll(stormCode.get_dependency_artifacts());
}
} catch (AuthorizationException | KeyNotFoundException | IOException e) {
LOG.error("Exception occurs while reading blob for key: " + activeTopologyCodeKey + ", exception: " + e, e);
throw new RuntimeException("Exception occurs while reading blob for key: " + activeTopologyCodeKey + ", exception: " + e, e);
}
}
return activeTopologyDependencies;
}
Aggregations