use of org.apache.storm.generated.KeyAlreadyExistsException in project storm by apache.
the class BlobStoreAPIWordCountTopology method main.
public static void main(String[] args) {
prepare();
BlobStoreAPIWordCountTopology wc = new BlobStoreAPIWordCountTopology();
try {
File file = createFile(fileName);
// Creating blob again before launching topology
createBlobWithContent(key, store, file);
// Blostore launch command with topology blobstore map
// Here we are giving it a local name so that we can read from the file
// bin/storm jar examples/storm-starter/storm-starter-topologies-0.11.0-SNAPSHOT.jar
// org.apache.storm.starter.BlobStoreAPIWordCountTopology bl -c
// topology.blobstore.map='{"key":{"localname":"blacklist.txt", "uncompress":"false"}}'
wc.buildAndLaunchWordCountTopology(args);
// Updating file few times every 5 seconds
for (int i = 0; i < 10; i++) {
updateBlobWithContent(key, store, updateFile(file));
Utils.sleep(5000);
}
} catch (KeyAlreadyExistsException kae) {
LOG.info("Key already exists {}", kae);
} catch (AuthorizationException | KeyNotFoundException | IOException exp) {
throw new RuntimeException(exp);
}
}
use of org.apache.storm.generated.KeyAlreadyExistsException in project storm by apache.
the class HdfsBlobStore method createBlob.
@Override
public AtomicOutputStream createBlob(String key, SettableBlobMeta meta, Subject who) throws AuthorizationException, KeyAlreadyExistsException {
if (meta.get_replication_factor() <= 0) {
meta.set_replication_factor((int) conf.get(Config.STORM_BLOBSTORE_REPLICATION_FACTOR));
}
who = checkAndGetSubject(who);
validateKey(key);
_aclHandler.normalizeSettableBlobMeta(key, meta, who, READ | WRITE | ADMIN);
BlobStoreAclHandler.validateSettableACLs(key, meta.get_acl());
_aclHandler.hasPermissions(meta.get_acl(), READ | WRITE | ADMIN, who, key);
if (_hbs.exists(DATA_PREFIX + key)) {
throw new KeyAlreadyExistsException(key);
}
BlobStoreFileOutputStream mOut = null;
try {
BlobStoreFile metaFile = _hbs.write(META_PREFIX + key, true);
metaFile.setMetadata(meta);
mOut = new BlobStoreFileOutputStream(metaFile);
mOut.write(Utils.thriftSerialize(meta));
mOut.close();
mOut = null;
BlobStoreFile dataFile = _hbs.write(DATA_PREFIX + key, true);
dataFile.setMetadata(meta);
return new BlobStoreFileOutputStream(dataFile);
} catch (IOException e) {
throw new RuntimeException(e);
} finally {
if (mOut != null) {
try {
mOut.cancel();
} catch (IOException e) {
//Ignored
}
}
}
}
use of org.apache.storm.generated.KeyAlreadyExistsException in project storm by apache.
the class DependencyUploader method uploadFiles.
public List<String> uploadFiles(List<File> dependencies, boolean cleanupIfFails) throws IOException, AuthorizationException {
checkFilesExist(dependencies);
List<String> keys = new ArrayList<>(dependencies.size());
try {
for (File dependency : dependencies) {
String fileName = dependency.getName();
String key = BlobStoreUtils.generateDependencyBlobKey(BlobStoreUtils.applyUUIDToFileName(fileName));
try {
uploadDependencyToBlobStore(key, dependency);
} catch (KeyAlreadyExistsException e) {
// it should never happened since we apply UUID
throw new RuntimeException(e);
}
keys.add(key);
}
} catch (Throwable e) {
if (getBlobStore() != null && cleanupIfFails) {
deleteBlobs(keys);
}
throw new RuntimeException(e);
}
return keys;
}
use of org.apache.storm.generated.KeyAlreadyExistsException in project storm by apache.
the class DependencyUploader method uploadArtifacts.
public List<String> uploadArtifacts(Map<String, File> artifacts) {
checkFilesExist(artifacts.values());
List<String> keys = new ArrayList<>(artifacts.size());
try {
for (Map.Entry<String, File> artifactToFile : artifacts.entrySet()) {
String artifact = artifactToFile.getKey();
File dependency = artifactToFile.getValue();
String key = BlobStoreUtils.generateDependencyBlobKey(convertArtifactToJarFileName(artifact));
try {
uploadDependencyToBlobStore(key, dependency);
} catch (KeyAlreadyExistsException e) {
// we lose the race, but it doesn't matter
}
keys.add(key);
}
} catch (Throwable e) {
throw new RuntimeException(e);
}
return keys;
}
use of org.apache.storm.generated.KeyAlreadyExistsException in project storm by apache.
the class BlobStoreUtils method downloadMissingBlob.
// Download missing blobs from potential nimbodes
public static boolean downloadMissingBlob(Map<String, Object> conf, BlobStore blobStore, String key, Set<NimbusInfo> nimbusInfos) throws TTransportException {
ReadableBlobMeta rbm;
ClientBlobStore remoteBlobStore;
InputStreamWithMeta in;
boolean isSuccess = false;
LOG.debug("Download blob NimbusInfos {}", nimbusInfos);
for (NimbusInfo nimbusInfo : nimbusInfos) {
if (isSuccess) {
break;
}
LOG.debug("Download blob key: {}, NimbusInfo {}", key, nimbusInfo);
try (NimbusClient client = new NimbusClient(conf, nimbusInfo.getHost(), nimbusInfo.getPort(), null)) {
rbm = client.getClient().getBlobMeta(key);
remoteBlobStore = new NimbusBlobStore();
remoteBlobStore.setClient(conf, client);
in = remoteBlobStore.getBlob(key);
blobStore.createBlob(key, in, rbm.get_settable(), getNimbusSubject());
// if key already exists while creating the blob else update it
Iterator<String> keyIterator = blobStore.listKeys();
while (keyIterator.hasNext()) {
if (keyIterator.next().equals(key)) {
LOG.debug("Success creating key, {}", key);
isSuccess = true;
break;
}
}
} catch (IOException | AuthorizationException exception) {
throw new RuntimeException(exception);
} catch (KeyAlreadyExistsException kae) {
LOG.info("KeyAlreadyExistsException Key: {} {}", key, kae);
} catch (KeyNotFoundException knf) {
// Catching and logging KeyNotFoundException because, if
// there is a subsequent update and delete, the non-leader
// nimbodes might throw an exception.
LOG.info("KeyNotFoundException Key: {} {}", key, knf);
} catch (Exception exp) {
// Logging an exception while client is connecting
LOG.error("Exception {}", exp);
}
}
if (!isSuccess) {
LOG.error("Could not download the blob with key: {}", key);
}
return isSuccess;
}
Aggregations