use of scala.collection.immutable.Map$ in project iceberg by apache.
the class SparkTableUtil method getPartitions.
/**
* Returns all partitions in the table.
*
* @param spark a Spark session
* @param tableIdent a table identifier
* @param partitionFilter partition filter, or null if no filter
* @return all table's partitions
*/
public static List<SparkPartition> getPartitions(SparkSession spark, TableIdentifier tableIdent, Map<String, String> partitionFilter) {
try {
SessionCatalog catalog = spark.sessionState().catalog();
CatalogTable catalogTable = catalog.getTableMetadata(tableIdent);
Option<scala.collection.immutable.Map<String, String>> scalaPartitionFilter;
if (partitionFilter != null && !partitionFilter.isEmpty()) {
Builder<Tuple2<String, String>, scala.collection.immutable.Map<String, String>> builder = Map$.MODULE$.<String, String>newBuilder();
partitionFilter.forEach((key, value) -> builder.$plus$eq(Tuple2.apply(key, value)));
scalaPartitionFilter = Option.apply(builder.result());
} else {
scalaPartitionFilter = Option.empty();
}
Seq<CatalogTablePartition> partitions = catalog.listPartitions(tableIdent, scalaPartitionFilter).toIndexedSeq();
return JavaConverters.seqAsJavaListConverter(partitions).asJava().stream().map(catalogPartition -> toSparkPartition(catalogPartition, catalogTable)).collect(Collectors.toList());
} catch (NoSuchDatabaseException e) {
throw SparkExceptionUtil.toUncheckedException(e, "Unknown table: %s. Database not found in catalog.", tableIdent);
} catch (NoSuchTableException e) {
throw SparkExceptionUtil.toUncheckedException(e, "Unknown table: %s. Table not found in catalog.", tableIdent);
}
}
use of scala.collection.immutable.Map$ in project iceberg by apache.
the class SparkTableUtil method getPartitionsByFilter.
/**
* Returns partitions that match the specified 'predicate'.
*
* @param spark a Spark session
* @param tableIdent a table identifier
* @param predicateExpr a predicate expression on partition columns
* @return matching table's partitions
*/
public static List<SparkPartition> getPartitionsByFilter(SparkSession spark, TableIdentifier tableIdent, Expression predicateExpr) {
try {
SessionCatalog catalog = spark.sessionState().catalog();
CatalogTable catalogTable = catalog.getTableMetadata(tableIdent);
Expression resolvedPredicateExpr;
if (!predicateExpr.resolved()) {
resolvedPredicateExpr = resolveAttrs(spark, tableIdent.quotedString(), predicateExpr);
} else {
resolvedPredicateExpr = predicateExpr;
}
Seq<Expression> predicates = JavaConverters.collectionAsScalaIterableConverter(ImmutableList.of(resolvedPredicateExpr)).asScala().toIndexedSeq();
Seq<CatalogTablePartition> partitions = catalog.listPartitionsByFilter(tableIdent, predicates).toIndexedSeq();
return JavaConverters.seqAsJavaListConverter(partitions).asJava().stream().map(catalogPartition -> toSparkPartition(catalogPartition, catalogTable)).collect(Collectors.toList());
} catch (NoSuchDatabaseException e) {
throw SparkExceptionUtil.toUncheckedException(e, "Unknown table: %s. Database not found in catalog.", tableIdent);
} catch (NoSuchTableException e) {
throw SparkExceptionUtil.toUncheckedException(e, "Unknown table: %s. Table not found in catalog.", tableIdent);
}
}
use of scala.collection.immutable.Map$ in project secor by pinterest.
the class OstrichAdminService method start.
public void start() {
Duration[] defaultLatchIntervals = { Duration.apply(1, TimeUnit.MINUTES) };
Map<String, CustomHttpHandler> handlers = mPrometheusEnabled ? new Map.Map1<>("/prometheus", new PrometheusHandler()) : Map$.MODULE$.empty();
@SuppressWarnings("deprecation") AdminServiceFactory adminServiceFactory = new AdminServiceFactory(this.mPort, 20, List$.MODULE$.<StatsFactory>empty(), Option.<String>empty(), List$.MODULE$.<Regex>empty(), handlers, JavaConversions.asScalaBuffer(Arrays.asList(defaultLatchIntervals)).toList());
RuntimeEnvironment runtimeEnvironment = new RuntimeEnvironment(this);
adminServiceFactory.apply(runtimeEnvironment);
try {
Properties properties = new Properties();
properties.load(this.getClass().getResource("build.properties").openStream());
String buildRevision = properties.getProperty("build_revision", "unknown");
LOG.info("build.properties build_revision: {}", properties.getProperty("build_revision", "unknown"));
StatsUtil.setLabel("secor.build_revision", buildRevision);
} catch (Throwable t) {
LOG.error("Failed to load properties from build.properties", t);
}
}
use of scala.collection.immutable.Map$ in project parent by Daytime-Don-t-Know-Dark-Night.
the class Jdbcs method onDuplicateUpdate.
public static String onDuplicateUpdate(StructType schema, String sql2, String... exclude) {
Set<String> excludeSet = Arrays.stream(exclude).collect(Collectors.toSet());
Iterable<String> cols1 = Splitter.on(",").trimResults().omitEmptyStrings().split(sql2);
Pattern pat = Pattern.compile("([^=]+)=[^=]+$");
Streams.stream(cols1).forEach(i -> {
Matcher matcher = pat.matcher(i);
Preconditions.checkArgument(matcher.matches(), "format error in " + i);
excludeSet.add(CharMatcher.anyOf("`").trimFrom(matcher.group(1)));
});
List<String> cols2 = Arrays.stream(schema.fields()).filter(i -> !excludeSet.contains(i.name())).map(i -> String.format("`%s`=values(`%s`)", i.name(), i.name())).collect(Collectors.toList());
Preconditions.checkArgument(Iterables.size(cols1) + cols2.size() + exclude.length == schema.size());
String cols = Joiner.on(",").join(Iterables.concat(cols1, cols2));
return "on duplicate key update " + cols;
}
use of scala.collection.immutable.Map$ in project kylo by Teradata.
the class JdbcRelationProviderTest method testWithClassLoader.
/**
* Verify creating a JdbcRelation using a custom class loader.
*/
@Test
@SuppressWarnings("unchecked")
public void testWithClassLoader() {
// Create parameters map
final Map<String, String> parameters = (Map<String, String>) Map$.MODULE$.<String, String>newBuilder().$plus$eq(new Tuple2<>("dbtable", "mytable")).$plus$eq(new Tuple2<>("url", "jdbc:h2:mem:spark")).result();
// Test creating a JDBC relation
final DataSourceResourceLoader classLoader = new DataSourceResourceLoader(Mockito.mock(SparkContext.class), Thread.currentThread().getContextClassLoader());
final AtomicReference<JdbcRelation> relation = new AtomicReference<>();
classLoader.runWithThreadContext(new Runnable() {
@Override
public void run() {
try (final Connection conn = DriverManager.getConnection("jdbc:h2:mem:spark");
final Statement stmt = conn.createStatement()) {
stmt.execute("CREATE TABLE mytable (col1 VARCHAR)");
final JdbcRelationProvider provider = new JdbcRelationProvider();
relation.set((JdbcRelation) provider.createRelation(Mockito.mock(SQLContext.class), parameters));
} catch (final Exception e) {
Throwables.propagate(e);
}
}
});
Assert.assertNotNull("Expected relation to be created", relation.get());
Assert.assertEquals(JDBCRelation.class, relation.get().getDelegate().getClass());
Assert.assertEquals(classLoader, relation.get().getLoader());
}
Aggregations