Search in sources :

Example 21 with AuthorizationException

use of org.apache.storm.generated.AuthorizationException in project storm by apache.

the class ReturnResultsReducer method complete.

@Override
public void complete(ReturnResultsState state, TridentCollector collector) {
    // only one of the multireducers will receive the tuples
    if (state.returnInfo != null) {
        String result = JSONValue.toJSONString(state.results);
        Map retMap = null;
        try {
            retMap = (Map) JSONValue.parseWithException(state.returnInfo);
        } catch (ParseException e) {
            collector.reportError(e);
            return;
        }
        final String host = (String) retMap.get("host");
        final int port = Utils.getInt(retMap.get("port"));
        String id = (String) retMap.get("id");
        DistributedRPCInvocations.Iface client;
        if (local) {
            client = (DistributedRPCInvocations.Iface) ServiceRegistry.getService(host);
        } else {
            List server = new ArrayList() {

                {
                    add(host);
                    add(port);
                }
            };
            if (!_clients.containsKey(server)) {
                try {
                    _clients.put(server, new DRPCInvocationsClient(conf, host, port));
                } catch (TTransportException ex) {
                    throw new RuntimeException(ex);
                }
            }
            client = _clients.get(server);
        }
        try {
            client.result(id, result);
        } catch (AuthorizationException aze) {
            collector.reportError(aze);
        } catch (TException e) {
            collector.reportError(e);
        }
    }
}
Also used : TException(org.apache.thrift.TException) AuthorizationException(org.apache.storm.generated.AuthorizationException) ArrayList(java.util.ArrayList) DistributedRPCInvocations(org.apache.storm.generated.DistributedRPCInvocations) TTransportException(org.apache.thrift.transport.TTransportException) DRPCInvocationsClient(org.apache.storm.drpc.DRPCInvocationsClient) ArrayList(java.util.ArrayList) List(java.util.List) ParseException(org.json.simple.parser.ParseException) HashMap(java.util.HashMap) Map(java.util.Map)

Example 22 with AuthorizationException

use of org.apache.storm.generated.AuthorizationException in project storm by apache.

the class DRPCSpout method nextTuple.

@Override
public void nextTuple() {
    boolean gotRequest = false;
    if (_local_drpc_id == null) {
        int size = 0;
        synchronized (_clients) {
            //This will only ever grow, so no need to worry about falling off the end
            size = _clients.size();
        }
        for (int i = 0; i < size; i++) {
            DRPCInvocationsClient client;
            synchronized (_clients) {
                client = _clients.get(i);
            }
            if (!client.isConnected()) {
                LOG.warn("DRPCInvocationsClient [{}:{}] is not connected.", client.getHost(), client.getPort());
                reconnectAsync(client);
                continue;
            }
            try {
                DRPCRequest req = client.fetchRequest(_function);
                if (req.get_request_id().length() > 0) {
                    Map returnInfo = new HashMap();
                    returnInfo.put("id", req.get_request_id());
                    returnInfo.put("host", client.getHost());
                    returnInfo.put("port", client.getPort());
                    gotRequest = true;
                    _collector.emit(new Values(req.get_func_args(), JSONValue.toJSONString(returnInfo)), new DRPCMessageId(req.get_request_id(), i));
                    break;
                }
            } catch (AuthorizationException aze) {
                reconnectAsync(client);
                LOG.error("Not authorized to fetch DRPC result from DRPC server", aze);
            } catch (TException e) {
                reconnectAsync(client);
                LOG.error("Failed to fetch DRPC result from DRPC server", e);
            } catch (Exception e) {
                LOG.error("Failed to fetch DRPC result from DRPC server", e);
            }
        }
        checkFutures();
    } else {
        DistributedRPCInvocations.Iface drpc = (DistributedRPCInvocations.Iface) ServiceRegistry.getService(_local_drpc_id);
        if (drpc != null) {
            // can happen during shutdown of drpc while topology is still up
            try {
                DRPCRequest req = drpc.fetchRequest(_function);
                if (req.get_request_id().length() > 0) {
                    Map returnInfo = new HashMap();
                    returnInfo.put("id", req.get_request_id());
                    returnInfo.put("host", _local_drpc_id);
                    returnInfo.put("port", 0);
                    gotRequest = true;
                    _collector.emit(new Values(req.get_func_args(), JSONValue.toJSONString(returnInfo)), new DRPCMessageId(req.get_request_id(), 0));
                }
            } catch (AuthorizationException aze) {
                throw new RuntimeException(aze);
            } catch (TException e) {
                throw new RuntimeException(e);
            }
        }
    }
    if (!gotRequest) {
        Utils.sleep(1);
    }
}
Also used : TException(org.apache.thrift.TException) HashMap(java.util.HashMap) AuthorizationException(org.apache.storm.generated.AuthorizationException) Values(org.apache.storm.tuple.Values) DistributedRPCInvocations(org.apache.storm.generated.DistributedRPCInvocations) TException(org.apache.thrift.TException) AuthorizationException(org.apache.storm.generated.AuthorizationException) DRPCRequest(org.apache.storm.generated.DRPCRequest) HashMap(java.util.HashMap) Map(java.util.Map)

Example 23 with AuthorizationException

use of org.apache.storm.generated.AuthorizationException in project storm by apache.

the class Localizer method getBlobs.

/**
   * This function either returns the blobs in the existing cache or if they don't exist in the
   * cache, it downloads them in parallel (up to SUPERVISOR_BLOBSTORE_DOWNLOAD_THREAD_COUNT)
   * and will block until all of them have been downloaded
   */
public synchronized List<LocalizedResource> getBlobs(List<LocalResource> localResources, String user, String topo, File userFileDir) throws AuthorizationException, KeyNotFoundException, IOException {
    LocalizedResourceSet newSet = new LocalizedResourceSet(user);
    LocalizedResourceSet lrsrcSet = _userRsrc.putIfAbsent(user, newSet);
    if (lrsrcSet == null) {
        lrsrcSet = newSet;
    }
    ArrayList<LocalizedResource> results = new ArrayList<>();
    ArrayList<Callable<LocalizedResource>> downloads = new ArrayList<>();
    ClientBlobStore blobstore = null;
    try {
        blobstore = getClientBlobStore();
        for (LocalResource localResource : localResources) {
            String key = localResource.getBlobName();
            boolean uncompress = localResource.shouldUncompress();
            LocalizedResource lrsrc = lrsrcSet.get(key, localResource.shouldUncompress());
            boolean isUpdate = false;
            if ((lrsrc != null) && (lrsrc.isUncompressed() == localResource.shouldUncompress()) && (isLocalizedResourceDownloaded(lrsrc))) {
                if (isLocalizedResourceUpToDate(lrsrc, blobstore)) {
                    LOG.debug("blob already exists: {}", key);
                    lrsrc.addReference(topo);
                    results.add(lrsrc);
                    continue;
                }
                LOG.debug("blob exists but isn't up to date: {}", key);
                isUpdate = true;
            }
            // go off to blobstore and get it
            // assume dir passed in exists and has correct permission
            LOG.debug("fetching blob: {}", key);
            File downloadDir = getCacheDirForFiles(userFileDir);
            File localFile = new File(downloadDir, key);
            if (uncompress) {
                // for compressed file, download to archives dir
                downloadDir = getCacheDirForArchives(userFileDir);
                localFile = new File(downloadDir, key);
            }
            downloadDir.mkdir();
            downloads.add(new DownloadBlob(this, _conf, key, localFile, user, uncompress, isUpdate));
        }
    } finally {
        if (blobstore != null) {
            blobstore.shutdown();
        }
    }
    try {
        List<Future<LocalizedResource>> futures = _execService.invokeAll(downloads);
        for (Future<LocalizedResource> futureRsrc : futures) {
            LocalizedResource lrsrc = futureRsrc.get();
            lrsrc.addReference(topo);
            lrsrcSet.add(lrsrc.getKey(), lrsrc, lrsrc.isUncompressed());
            results.add(lrsrc);
        }
    } catch (ExecutionException e) {
        if (e.getCause() instanceof AuthorizationException)
            throw (AuthorizationException) e.getCause();
        else if (e.getCause() instanceof KeyNotFoundException) {
            throw (KeyNotFoundException) e.getCause();
        } else {
            throw new IOException("Error getting blobs", e);
        }
    } catch (RejectedExecutionException re) {
        throw new IOException("RejectedExecutionException: ", re);
    } catch (InterruptedException ie) {
        throw new IOException("Interrupted Exception", ie);
    }
    return results;
}
Also used : ClientBlobStore(org.apache.storm.blobstore.ClientBlobStore) AuthorizationException(org.apache.storm.generated.AuthorizationException) ArrayList(java.util.ArrayList) IOException(java.io.IOException) Callable(java.util.concurrent.Callable) RejectedExecutionException(java.util.concurrent.RejectedExecutionException) Future(java.util.concurrent.Future) RejectedExecutionException(java.util.concurrent.RejectedExecutionException) ExecutionException(java.util.concurrent.ExecutionException) File(java.io.File) KeyNotFoundException(org.apache.storm.generated.KeyNotFoundException)

Example 24 with AuthorizationException

use of org.apache.storm.generated.AuthorizationException in project storm by apache.

the class Localizer method downloadBlob.

private LocalizedResource downloadBlob(Map conf, String key, File localFile, String user, boolean uncompress, boolean isUpdate) throws AuthorizationException, KeyNotFoundException, IOException {
    ClientBlobStore blobstore = null;
    try {
        blobstore = getClientBlobStore();
        long nimbusBlobVersion = Utils.nimbusVersionOfBlob(key, blobstore);
        long oldVersion = Utils.localVersionOfBlob(localFile.toString());
        FileOutputStream out = null;
        PrintWriter writer = null;
        int numTries = 0;
        String localizedPath = localFile.toString();
        String localFileWithVersion = Utils.constructBlobWithVersionFileName(localFile.toString(), nimbusBlobVersion);
        String localVersionFile = Utils.constructVersionFileName(localFile.toString());
        String downloadFile = localFileWithVersion;
        if (uncompress) {
            // we need to download to temp file and then unpack into the one requested
            downloadFile = new File(localFile.getParent(), TO_UNCOMPRESS + localFile.getName()).toString();
        }
        while (numTries < _blobDownloadRetries) {
            out = new FileOutputStream(downloadFile);
            numTries++;
            try {
                if (!Utils.canUserReadBlob(blobstore.getBlobMeta(key), user)) {
                    throw new AuthorizationException(user + " does not have READ access to " + key);
                }
                InputStreamWithMeta in = blobstore.getBlob(key);
                byte[] buffer = new byte[1024];
                int len;
                while ((len = in.read(buffer)) >= 0) {
                    out.write(buffer, 0, len);
                }
                out.close();
                in.close();
                if (uncompress) {
                    Utils.unpack(new File(downloadFile), new File(localFileWithVersion));
                    LOG.debug("uncompressed " + downloadFile + " to: " + localFileWithVersion);
                }
                // Next write the version.
                LOG.info("Blob: " + key + " updated with new Nimbus-provided version: " + nimbusBlobVersion + " local version was: " + oldVersion);
                // The false parameter ensures overwriting the version file, not appending
                writer = new PrintWriter(new BufferedWriter(new FileWriter(localVersionFile, false)));
                writer.println(nimbusBlobVersion);
                writer.close();
                try {
                    setBlobPermissions(conf, user, localFileWithVersion);
                    setBlobPermissions(conf, user, localVersionFile);
                    // Update the key.current symlink. First create tmp symlink and do
                    // move of tmp to current so that the operation is atomic.
                    String tmp_uuid_local = java.util.UUID.randomUUID().toString();
                    LOG.debug("Creating a symlink @" + localFile + "." + tmp_uuid_local + " , " + "linking to: " + localFile + "." + nimbusBlobVersion);
                    File uuid_symlink = new File(localFile + "." + tmp_uuid_local);
                    Files.createSymbolicLink(uuid_symlink.toPath(), Paths.get(Utils.constructBlobWithVersionFileName(localFile.toString(), nimbusBlobVersion)));
                    File current_symlink = new File(Utils.constructBlobCurrentSymlinkName(localFile.toString()));
                    Files.move(uuid_symlink.toPath(), current_symlink.toPath(), ATOMIC_MOVE);
                } catch (IOException e) {
                    // restore the old version to the file
                    try {
                        PrintWriter restoreWriter = new PrintWriter(new BufferedWriter(new FileWriter(localVersionFile, false)));
                        restoreWriter.println(oldVersion);
                        restoreWriter.close();
                    } catch (IOException ignore) {
                    }
                    throw e;
                }
                String oldBlobFile = localFile + "." + oldVersion;
                try {
                    // anyone trying to read it.
                    if ((oldVersion != -1) && (oldVersion != nimbusBlobVersion)) {
                        LOG.info("Removing an old blob file:" + oldBlobFile);
                        Files.delete(Paths.get(oldBlobFile));
                    }
                } catch (IOException e) {
                    // At this point we have downloaded everything and moved symlinks.  If the remove of
                    // old fails just log an error
                    LOG.error("Exception removing old blob version: " + oldBlobFile);
                }
                break;
            } catch (AuthorizationException ae) {
                // we consider this non-retriable exceptions
                if (out != null) {
                    out.close();
                }
                new File(downloadFile).delete();
                throw ae;
            } catch (IOException | KeyNotFoundException e) {
                if (out != null) {
                    out.close();
                }
                if (writer != null) {
                    writer.close();
                }
                new File(downloadFile).delete();
                if (uncompress) {
                    try {
                        FileUtils.deleteDirectory(new File(localFileWithVersion));
                    } catch (IOException ignore) {
                    }
                }
                if (!isUpdate) {
                    // don't want to remove existing version file if its an update
                    new File(localVersionFile).delete();
                }
                if (numTries < _blobDownloadRetries) {
                    LOG.error("Failed to download blob, retrying", e);
                } else {
                    throw e;
                }
            }
        }
        return new LocalizedResource(key, localizedPath, uncompress);
    } finally {
        if (blobstore != null) {
            blobstore.shutdown();
        }
    }
}
Also used : ClientBlobStore(org.apache.storm.blobstore.ClientBlobStore) AuthorizationException(org.apache.storm.generated.AuthorizationException) FileWriter(java.io.FileWriter) IOException(java.io.IOException) BufferedWriter(java.io.BufferedWriter) InputStreamWithMeta(org.apache.storm.blobstore.InputStreamWithMeta) FileOutputStream(java.io.FileOutputStream) File(java.io.File) KeyNotFoundException(org.apache.storm.generated.KeyNotFoundException) PrintWriter(java.io.PrintWriter)

Example 25 with AuthorizationException

use of org.apache.storm.generated.AuthorizationException in project storm by apache.

the class TridentKafkaClientWordCountNamedTopics method run.

protected void run(String[] args) throws AlreadyAliveException, InvalidTopologyException, AuthorizationException, InterruptedException {
    if (args.length > 0 && Arrays.stream(args).anyMatch(option -> option.equals("-h"))) {
        System.out.printf("Usage: java %s [%s] [%s] [%s] [%s]\n", getClass().getName(), "broker_host:broker_port", "topic1", "topic2", "topology_name");
    } else {
        final String brokerUrl = args.length > 0 ? args[0] : KAFKA_LOCAL_BROKER;
        final String topic1 = args.length > 1 ? args[1] : TOPIC_1;
        final String topic2 = args.length > 2 ? args[2] : TOPIC_2;
        System.out.printf("Running with broker_url: [%s], topics: [%s, %s]\n", brokerUrl, topic1, topic2);
        Config tpConf = LocalSubmitter.defaultConfig(true);
        if (args.length == 4) {
            //Submit Remote
            // Producers
            StormSubmitter.submitTopology(topic1 + "-producer", tpConf, KafkaProducerTopology.newTopology(brokerUrl, topic1));
            StormSubmitter.submitTopology(topic2 + "-producer", tpConf, KafkaProducerTopology.newTopology(brokerUrl, topic2));
            // Consumer
            StormSubmitter.submitTopology("topics-consumer", tpConf, TridentKafkaConsumerTopology.newTopology(newKafkaTridentSpoutOpaque()));
            // Print results to console, which also causes the print filter in the consumer topology to print the results in the worker log
            Thread.sleep(2000);
            DrpcResultsPrinter.remoteClient().printResults(60, 1, TimeUnit.SECONDS);
        } else {
            //Submit Local
            final LocalSubmitter localSubmitter = LocalSubmitter.newInstance();
            final String topic1Tp = "topic1-producer";
            final String topic2Tp = "topic2-producer";
            final String consTpName = "topics-consumer";
            try {
                // Producers
                localSubmitter.submit(topic1Tp, tpConf, KafkaProducerTopology.newTopology(brokerUrl, topic1));
                localSubmitter.submit(topic2Tp, tpConf, KafkaProducerTopology.newTopology(brokerUrl, topic2));
                // Consumer
                try {
                    localSubmitter.submit(consTpName, tpConf, TridentKafkaConsumerTopology.newTopology(localSubmitter.getDrpc(), newKafkaTridentSpoutOpaque()));
                    // print
                    localSubmitter.printResults(15, 1, TimeUnit.SECONDS);
                } catch (Exception e) {
                    e.printStackTrace();
                }
            } finally {
                // kill
                localSubmitter.kill(topic1Tp);
                localSubmitter.kill(topic2Tp);
                localSubmitter.kill(consTpName);
                // shutdown
                localSubmitter.shutdown();
            }
        }
    }
    // Kill all the non daemon threads
    System.exit(0);
}
Also used : StormSubmitter(org.apache.storm.StormSubmitter) Arrays(java.util.Arrays) KafkaTridentSpoutOpaque(org.apache.storm.kafka.spout.trident.KafkaTridentSpoutOpaque) KafkaSpoutRetryService(org.apache.storm.kafka.spout.KafkaSpoutRetryService) Fields(org.apache.storm.tuple.Fields) AlreadyAliveException(org.apache.storm.generated.AlreadyAliveException) TimeInterval(org.apache.storm.kafka.spout.KafkaSpoutRetryExponentialBackoff.TimeInterval) Serializable(java.io.Serializable) TimeUnit(java.util.concurrent.TimeUnit) AuthorizationException(org.apache.storm.generated.AuthorizationException) Values(org.apache.storm.tuple.Values) List(java.util.List) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Func(org.apache.storm.kafka.spout.Func) EARLIEST(org.apache.storm.kafka.spout.KafkaSpoutConfig.FirstPollOffsetStrategy.EARLIEST) Config(org.apache.storm.Config) KafkaSpoutRetryExponentialBackoff(org.apache.storm.kafka.spout.KafkaSpoutRetryExponentialBackoff) InvalidTopologyException(org.apache.storm.generated.InvalidTopologyException) KafkaSpoutConfig(org.apache.storm.kafka.spout.KafkaSpoutConfig) Config(org.apache.storm.Config) KafkaSpoutConfig(org.apache.storm.kafka.spout.KafkaSpoutConfig) AlreadyAliveException(org.apache.storm.generated.AlreadyAliveException) AuthorizationException(org.apache.storm.generated.AuthorizationException) InvalidTopologyException(org.apache.storm.generated.InvalidTopologyException)

Aggregations

AuthorizationException (org.apache.storm.generated.AuthorizationException)36 KeyNotFoundException (org.apache.storm.generated.KeyNotFoundException)26 IOException (java.io.IOException)25 KeyAlreadyExistsException (org.apache.storm.generated.KeyAlreadyExistsException)21 TException (org.apache.thrift.TException)21 AlreadyAliveException (org.apache.storm.generated.AlreadyAliveException)20 InvalidTopologyException (org.apache.storm.generated.InvalidTopologyException)20 InterruptedIOException (java.io.InterruptedIOException)18 BindException (java.net.BindException)18 NotAliveException (org.apache.storm.generated.NotAliveException)18 ArrayList (java.util.ArrayList)10 HashMap (java.util.HashMap)10 List (java.util.List)8 Map (java.util.Map)8 IStormClusterState (org.apache.storm.cluster.IStormClusterState)7 ImmutableMap (com.google.common.collect.ImmutableMap)4 File (java.io.File)4 NodeInfo (org.apache.storm.generated.NodeInfo)4 BufferInputStream (org.apache.storm.utils.BufferInputStream)4 TimeCacheMap (org.apache.storm.utils.TimeCacheMap)4