use of org.apache.beam.vendor.calcite.v1_28_0.com.google.common.collect.ImmutableMultimap in project presto by prestodb.
the class HiveTableOperations method commit.
@Override
public void commit(@Nullable TableMetadata base, TableMetadata metadata) {
requireNonNull(metadata, "metadata is null");
// if the metadata is already out of date, reject it
if (!Objects.equals(base, current())) {
throw new CommitFailedException("Cannot commit: stale table metadata for %s", getSchemaTableName());
}
// if the metadata is not changed, return early
if (Objects.equals(base, metadata)) {
return;
}
String newMetadataLocation = writeNewMetadata(metadata, version + 1);
Table table;
// getting a process-level lock per table to avoid concurrent commit attempts to the same table from the same
// JVM process, which would result in unnecessary and costly HMS lock acquisition requests
Optional<Long> lockId = Optional.empty();
ReentrantLock tableLevelMutex = commitLockCache.getUnchecked(database + "." + tableName);
tableLevelMutex.lock();
try {
try {
lockId = Optional.of(metastore.lock(metastoreContext, database, tableName));
if (base == null) {
String tableComment = metadata.properties().get(TABLE_COMMENT);
Map<String, String> parameters = new HashMap<>();
parameters.put("EXTERNAL", "TRUE");
parameters.put(TABLE_TYPE_PROP, ICEBERG_TABLE_TYPE_VALUE);
parameters.put(METADATA_LOCATION, newMetadataLocation);
if (tableComment != null) {
parameters.put(TABLE_COMMENT, tableComment);
}
Table.Builder builder = Table.builder().setDatabaseName(database).setTableName(tableName).setOwner(owner.orElseThrow(() -> new IllegalStateException("Owner not set"))).setTableType(PrestoTableType.EXTERNAL_TABLE).setDataColumns(toHiveColumns(metadata.schema().columns())).withStorage(storage -> storage.setLocation(metadata.location())).withStorage(storage -> storage.setStorageFormat(STORAGE_FORMAT)).setParameters(parameters);
table = builder.build();
} else {
Table currentTable = getTable();
checkState(currentMetadataLocation != null, "No current metadata location for existing table");
String metadataLocation = currentTable.getParameters().get(METADATA_LOCATION);
if (!currentMetadataLocation.equals(metadataLocation)) {
throw new CommitFailedException("Metadata location [%s] is not same as table metadata location [%s] for %s", currentMetadataLocation, metadataLocation, getSchemaTableName());
}
table = Table.builder(currentTable).setDataColumns(toHiveColumns(metadata.schema().columns())).withStorage(storage -> storage.setLocation(metadata.location())).setParameter(METADATA_LOCATION, newMetadataLocation).setParameter(PREVIOUS_METADATA_LOCATION, currentMetadataLocation).build();
}
} catch (RuntimeException e) {
try {
io().deleteFile(newMetadataLocation);
} catch (RuntimeException exception) {
e.addSuppressed(exception);
}
throw e;
}
PrestoPrincipal owner = new PrestoPrincipal(USER, table.getOwner());
PrincipalPrivileges privileges = new PrincipalPrivileges(ImmutableMultimap.<String, HivePrivilegeInfo>builder().put(table.getOwner(), new HivePrivilegeInfo(SELECT, true, owner, owner)).put(table.getOwner(), new HivePrivilegeInfo(INSERT, true, owner, owner)).put(table.getOwner(), new HivePrivilegeInfo(UPDATE, true, owner, owner)).put(table.getOwner(), new HivePrivilegeInfo(DELETE, true, owner, owner)).build(), ImmutableMultimap.of());
if (base == null) {
metastore.createTable(metastoreContext, table, privileges);
} else {
metastore.replaceTable(metastoreContext, database, tableName, table, privileges);
}
} finally {
shouldRefresh = true;
try {
lockId.ifPresent(id -> metastore.unlock(metastoreContext, id));
} catch (Exception e) {
log.error(e, "Failed to unlock: %s", lockId.orElse(null));
} finally {
tableLevelMutex.unlock();
}
}
}
use of org.apache.beam.vendor.calcite.v1_28_0.com.google.common.collect.ImmutableMultimap in project presto by prestodb.
the class JoinGraph method joinWith.
private JoinGraph joinWith(JoinGraph other, List<JoinNode.EquiJoinClause> joinClauses, Context context, PlanNodeId newRoot) {
for (PlanNode node : other.nodes) {
checkState(!edges.containsKey(node.getId()), format("Node [%s] appeared in two JoinGraphs", node));
}
List<PlanNode> nodes = ImmutableList.<PlanNode>builder().addAll(this.nodes).addAll(other.nodes).build();
ImmutableMultimap.Builder<PlanNodeId, Edge> edges = ImmutableMultimap.<PlanNodeId, Edge>builder().putAll(this.edges).putAll(other.edges);
List<RowExpression> joinedFilters = ImmutableList.<RowExpression>builder().addAll(this.filters).addAll(other.filters).build();
for (JoinNode.EquiJoinClause edge : joinClauses) {
VariableReferenceExpression leftVariable = edge.getLeft();
VariableReferenceExpression rightVariable = edge.getRight();
checkState(context.containsVariable(leftVariable));
checkState(context.containsVariable(rightVariable));
PlanNode left = context.getVariableSource(leftVariable);
PlanNode right = context.getVariableSource(rightVariable);
edges.put(left.getId(), new Edge(right, leftVariable, rightVariable));
edges.put(right.getId(), new Edge(left, rightVariable, leftVariable));
}
return new JoinGraph(nodes, edges.build(), newRoot, joinedFilters, Optional.empty());
}
use of org.apache.beam.vendor.calcite.v1_28_0.com.google.common.collect.ImmutableMultimap in project presto by prestodb.
the class PlanRemotePojections method dedupVariables.
private static List<ProjectionContext> dedupVariables(List<ProjectionContext> projectionContexts) {
ImmutableList.Builder<ProjectionContext> deduppedProjectionContexts = ImmutableList.builder();
Set<VariableReferenceExpression> originalVariable = projectionContexts.get(projectionContexts.size() - 1).getProjections().keySet();
SymbolMapper mapper = null;
for (int i = 0; i < projectionContexts.size(); i++) {
Map<VariableReferenceExpression, RowExpression> projections = projectionContexts.get(i).getProjections();
// Apply mapping from previous projection
if (mapper != null) {
ImmutableMap.Builder<VariableReferenceExpression, RowExpression> newProjections = ImmutableMap.builder();
for (Map.Entry<VariableReferenceExpression, RowExpression> entry : projections.entrySet()) {
newProjections.put(entry.getKey(), mapper.map(entry.getValue()));
}
projections = newProjections.build();
}
// Dedup
ImmutableMultimap.Builder<RowExpression, VariableReferenceExpression> reverseProjectionsBuilder = ImmutableMultimap.builder();
projections.forEach((key, value) -> reverseProjectionsBuilder.put(value, key));
ImmutableMultimap<RowExpression, VariableReferenceExpression> reverseProjections = reverseProjectionsBuilder.build();
if (reverseProjections.keySet().size() == projectionContexts.get(i).getProjections().size() && reverseProjections.keySet().stream().noneMatch(VariableReferenceExpression.class::isInstance)) {
// No duplication
deduppedProjectionContexts.add(new ProjectionContext(projections, projectionContexts.get(i).isRemote()));
mapper = null;
} else {
SymbolMapper.Builder mapperBuilder = SymbolMapper.builder();
ImmutableMap.Builder<VariableReferenceExpression, RowExpression> dedupedProjectionsBuilder = ImmutableMap.builder();
for (RowExpression key : reverseProjections.keySet()) {
List<VariableReferenceExpression> values = ImmutableList.copyOf(reverseProjections.get(key));
if (key instanceof VariableReferenceExpression) {
values.forEach(variable -> mapperBuilder.put(variable, (VariableReferenceExpression) key));
dedupedProjectionsBuilder.put((VariableReferenceExpression) key, key);
} else if (values.size() > 1) {
// Consolidate to one variable, prefer variables from original plan
List<VariableReferenceExpression> fromOriginal = originalVariable.stream().filter(values::contains).collect(toImmutableList());
VariableReferenceExpression variable = fromOriginal.isEmpty() ? values.get(0) : getOnlyElement(fromOriginal);
for (int j = 0; j < values.size(); j++) {
if (!values.get(j).equals(variable)) {
mapperBuilder.put(values.get(j), variable);
}
}
dedupedProjectionsBuilder.put(variable, key);
} else {
checkState(values.size() == 1, "Expect only 1 value");
dedupedProjectionsBuilder.put(values.get(0), key);
}
}
deduppedProjectionContexts.add(new ProjectionContext(dedupedProjectionsBuilder.build(), projectionContexts.get(i).isRemote()));
mapper = mapperBuilder.build();
}
}
return deduppedProjectionContexts.build();
}
use of org.apache.beam.vendor.calcite.v1_28_0.com.google.common.collect.ImmutableMultimap in project querydsl by querydsl.
the class GroupBy4Test method test6.
@Test
public void test6() {
List<Table> data = Lists.newArrayList();
data.add(new Table("1", "abc", "111"));
data.add(new Table("1", "pqr", "222"));
data.add(new Table("2", "abc", "333"));
data.add(new Table("2", "pqr", "444"));
data.add(new Table("3", "abc", "555"));
data.add(new Table("3", "pqr", "666"));
data.add(new Table("3", "pqr", "777"));
QGroupBy4Test_Table table = QGroupBy4Test_Table.table;
Map<String, Multimap<String, String>> transform = CollQueryFactory.from(table, data).transform(groupBy(table.col1).as(GuavaGroupBy.multimap(table.col2, table.col3)));
ImmutableMap<String, Multimap<String, String>> expected = ImmutableMap.<String, Multimap<String, String>>builder().put("1", HashMultimap.create(ImmutableMultimap.<String, String>builder().putAll("abc", "111").putAll("pqr", "222").build())).put("2", HashMultimap.create(ImmutableMultimap.<String, String>builder().putAll("abc", "333").putAll("pqr", "444").build())).put("3", HashMultimap.create(ImmutableMultimap.<String, String>builder().putAll("abc", "555").putAll("pqr", "666", "777").build())).build();
assertEquals(expected, transform);
}
use of org.apache.beam.vendor.calcite.v1_28_0.com.google.common.collect.ImmutableMultimap in project cassandra by apache.
the class NetworkTopologyStrategyTest method calculateNaturalEndpoints.
// Copy of older endpoints calculation algorithm for comparison
public static List<InetAddressAndPort> calculateNaturalEndpoints(Token searchToken, TokenMetadata tokenMetadata, Map<String, Integer> datacenters, IEndpointSnitch snitch) {
// we want to preserve insertion order so that the first added endpoint becomes primary
Set<InetAddressAndPort> replicas = new LinkedHashSet<>();
// replicas we have found in each DC
Map<String, Set<InetAddressAndPort>> dcReplicas = new HashMap<>(datacenters.size());
for (Map.Entry<String, Integer> dc : datacenters.entrySet()) dcReplicas.put(dc.getKey(), new HashSet<InetAddressAndPort>(dc.getValue()));
Topology topology = tokenMetadata.getTopology();
// all endpoints in each DC, so we can check when we have exhausted all the members of a DC
Multimap<String, InetAddressAndPort> allEndpoints = topology.getDatacenterEndpoints();
// all racks in a DC so we can check when we have exhausted all racks in a DC
Map<String, ImmutableMultimap<String, InetAddressAndPort>> racks = topology.getDatacenterRacks();
assert !allEndpoints.isEmpty() && !racks.isEmpty() : "not aware of any cluster members";
// tracks the racks we have already placed replicas in
Map<String, Set<String>> seenRacks = new HashMap<>(datacenters.size());
for (Map.Entry<String, Integer> dc : datacenters.entrySet()) seenRacks.put(dc.getKey(), new HashSet<String>());
// tracks the endpoints that we skipped over while looking for unique racks
// when we relax the rack uniqueness we can append this to the current result so we don't have to wind back the iterator
Map<String, Set<InetAddressAndPort>> skippedDcEndpoints = new HashMap<>(datacenters.size());
for (Map.Entry<String, Integer> dc : datacenters.entrySet()) skippedDcEndpoints.put(dc.getKey(), new LinkedHashSet<InetAddressAndPort>());
Iterator<Token> tokenIter = TokenMetadata.ringIterator(tokenMetadata.sortedTokens(), searchToken, false);
while (tokenIter.hasNext() && !hasSufficientReplicas(dcReplicas, allEndpoints, datacenters)) {
Token next = tokenIter.next();
InetAddressAndPort ep = tokenMetadata.getEndpoint(next);
String dc = snitch.getDatacenter(ep);
// have we already found all replicas for this dc?
if (!datacenters.containsKey(dc) || hasSufficientReplicas(dc, dcReplicas, allEndpoints, datacenters))
continue;
// can we skip checking the rack?
if (seenRacks.get(dc).size() == racks.get(dc).keySet().size()) {
dcReplicas.get(dc).add(ep);
replicas.add(ep);
} else {
String rack = snitch.getRack(ep);
// is this a new rack?
if (seenRacks.get(dc).contains(rack)) {
skippedDcEndpoints.get(dc).add(ep);
} else {
dcReplicas.get(dc).add(ep);
replicas.add(ep);
seenRacks.get(dc).add(rack);
// if we've run out of distinct racks, add the hosts we skipped past already (up to RF)
if (seenRacks.get(dc).size() == racks.get(dc).keySet().size()) {
Iterator<InetAddressAndPort> skippedIt = skippedDcEndpoints.get(dc).iterator();
while (skippedIt.hasNext() && !hasSufficientReplicas(dc, dcReplicas, allEndpoints, datacenters)) {
InetAddressAndPort nextSkipped = skippedIt.next();
dcReplicas.get(dc).add(nextSkipped);
replicas.add(nextSkipped);
}
}
}
}
}
return new ArrayList<InetAddressAndPort>(replicas);
}
Aggregations