use of com.google.common.util.concurrent.UncheckedExecutionException in project streamline by hortonworks.
the class StormTopologyMetricsImpl method getTopologyInfo.
private Map<String, ?> getTopologyInfo(String topologyId, String asUser) {
LOG.debug("[START] getTopologyInfo - topology id: {}, asUser: {}", topologyId, asUser);
Stopwatch stopwatch = Stopwatch.createStarted();
try {
Map<String, ?> responseMap;
try {
responseMap = topologyRetrieveCache.get(new ImmutablePair<>(topologyId, asUser));
} catch (ExecutionException e) {
if (e.getCause() != null) {
throw new RuntimeException(e.getCause());
} else {
throw new RuntimeException(e);
}
} catch (UncheckedExecutionException e) {
Throwable cause = e.getCause();
if (cause instanceof RuntimeException) {
throw (RuntimeException) cause;
} else {
throw new RuntimeException(e);
}
}
LOG.debug("[END] getTopologyInfo - topology id: {}, elapsed: {} ms", topologyId, stopwatch.elapsed(TimeUnit.MILLISECONDS));
return responseMap;
} finally {
stopwatch.stop();
}
}
use of com.google.common.util.concurrent.UncheckedExecutionException in project meghanada-server by mopemope.
the class LocationSearcher method getFieldLocationFromProject.
private Optional<Location> getFieldLocationFromProject(final String fqcn, final String fieldName, final File file) {
try {
final Source declaringClassSrc = getSource(project, file);
final String path = declaringClassSrc.getFile().getPath();
return declaringClassSrc.getClassScopes().stream().map(cs -> getMatchField(cs, fqcn, fieldName)).filter(Optional::isPresent).map(optional -> {
final Variable variable = optional.get();
return new Location(path, variable.range.begin.line, variable.range.begin.column);
}).findFirst();
} catch (Exception e) {
throw new UncheckedExecutionException(e);
}
}
use of com.google.common.util.concurrent.UncheckedExecutionException in project meghanada-server by mopemope.
the class CachedASMReflector method reflect.
public List<MemberDescriptor> reflect(final String className) {
final ClassName cn = new ClassName(className);
// check type parameter
final String classWithoutTP = cn.getName();
final GlobalCache globalCache = GlobalCache.getInstance();
try {
final List<MemberDescriptor> members = new ArrayList<>(16);
List<MemberDescriptor> list = globalCache.getMemberDescriptors(classWithoutTP);
for (final MemberDescriptor md : list) {
members.add(md.clone());
}
if (cn.hasTypeParameter()) {
return this.replaceMembers(classWithoutTP, className, members);
}
return members;
} catch (ExecutionException e) {
throw new UncheckedExecutionException(e);
}
}
use of com.google.common.util.concurrent.UncheckedExecutionException in project spf4j by zolyfarkas.
the class SpecificRecordOpenTypeMapping method typeFromSpecificRecord.
private static CompositeType typeFromSpecificRecord(final SpecificRecordBase r, final JMXBeanMappingSupplier typeMapper) throws NotSerializableException {
Schema schema = r.getSchema();
List<Schema.Field> fields = schema.getFields();
int size = fields.size();
String[] names = new String[size];
String[] descriptions = new String[size];
OpenType<?>[] types = new OpenType<?>[size];
for (Schema.Field field : fields) {
int pos = field.pos();
names[pos] = field.name();
descriptions[pos] = field.doc();
types[pos] = typeMapper.get(getGenericType(field.schema())).getOpenType();
}
try {
return new CompositeType(schema.getFullName(), schema.getDoc(), names, descriptions, types);
} catch (OpenDataException ex) {
throw new UncheckedExecutionException(ex);
}
}
use of com.google.common.util.concurrent.UncheckedExecutionException in project incubator-gobblin by apache.
the class HiveSource method createWorkunitsForPartitionedTable.
protected void createWorkunitsForPartitionedTable(HiveDataset hiveDataset, AutoReturnableObject<IMetaStoreClient> client) throws IOException {
boolean setLineageInfo = false;
long tableProcessTime = new DateTime().getMillis();
this.watermarker.onTableProcessBegin(hiveDataset.getTable(), tableProcessTime);
Optional<String> partitionFilter = Optional.absent();
// If the table is date partitioned, use the partition name to filter partitions older than lookback
if (hiveDataset.getProperties().containsKey(LookbackPartitionFilterGenerator.PARTITION_COLUMN) && hiveDataset.getProperties().containsKey(LookbackPartitionFilterGenerator.DATETIME_FORMAT) && hiveDataset.getProperties().containsKey(LookbackPartitionFilterGenerator.LOOKBACK)) {
partitionFilter = Optional.of(new LookbackPartitionFilterGenerator(hiveDataset.getProperties()).getFilter(hiveDataset));
log.info(String.format("Getting partitions for %s using partition filter %s", hiveDataset.getTable().getCompleteName(), partitionFilter.get()));
}
List<Partition> sourcePartitions = HiveUtils.getPartitions(client.get(), hiveDataset.getTable(), partitionFilter);
for (Partition sourcePartition : sourcePartitions) {
if (isOlderThanLookback(sourcePartition)) {
continue;
}
LongWatermark lowWatermark = watermarker.getPreviousHighWatermark(sourcePartition);
try {
if (!shouldCreateWorkUnit(new Path(sourcePartition.getLocation()))) {
log.info(String.format("Not creating workunit for partition %s as partition path %s contains data path tokens to ignore %s", sourcePartition.getCompleteName(), sourcePartition.getLocation(), this.ignoreDataPathIdentifierList));
continue;
}
long updateTime = this.updateProvider.getUpdateTime(sourcePartition);
if (shouldCreateWorkunit(sourcePartition, lowWatermark)) {
log.debug(String.format("Processing partition: %s", sourcePartition));
long partitionProcessTime = new DateTime().getMillis();
this.watermarker.onPartitionProcessBegin(sourcePartition, partitionProcessTime, updateTime);
LongWatermark expectedPartitionHighWatermark = this.watermarker.getExpectedHighWatermark(sourcePartition, tableProcessTime, partitionProcessTime);
HiveWorkUnit hiveWorkUnit = workUnitForPartition(hiveDataset, sourcePartition);
hiveWorkUnit.setWatermarkInterval(new WatermarkInterval(lowWatermark, expectedPartitionHighWatermark));
EventWorkunitUtils.setPartitionSlaEventMetadata(hiveWorkUnit, hiveDataset.getTable(), sourcePartition, updateTime, lowWatermark.getValue(), this.beginGetWorkunitsTime);
if (hiveDataset instanceof ConvertibleHiveDataset && !setLineageInfo) {
setLineageInfo((ConvertibleHiveDataset) hiveDataset, hiveWorkUnit, this.sharedJobBroker);
log.info("Added lineage event for dataset " + hiveDataset.getUrn());
// Add lineage information only once per hive table
setLineageInfo = true;
}
workunits.add(hiveWorkUnit);
log.info(String.format("Creating workunit for partition %s as updateTime %s is greater than low watermark %s", sourcePartition.getCompleteName(), updateTime, lowWatermark.getValue()));
} else {
// If watermark tracking at a partition level is necessary, create a dummy workunit for this partition here.
log.info(String.format("Not creating workunit for partition %s as updateTime %s is lesser than low watermark %s", sourcePartition.getCompleteName(), updateTime, lowWatermark.getValue()));
}
} catch (UpdateNotFoundException e) {
log.error(String.format("Not creating workunit for %s as update time was not found. %s", sourcePartition.getCompleteName(), e.getMessage()));
} catch (SchemaNotFoundException e) {
log.error(String.format("Not creating workunit for %s as schema was not found. %s", sourcePartition.getCompleteName(), e.getMessage()));
} catch (UncheckedExecutionException e) {
log.error(String.format("Not creating workunit for %s because an unchecked exception occurred. %s", sourcePartition.getCompleteName(), e.getMessage()));
}
}
}
Aggregations