use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.config.CalciteConnectionConfig in project calcite by apache.
the class Schemas method makeContext.
/**
* Creates a context for the purposes of preparing a statement.
*
* @param connection Connection
* @param schema Schema
* @param schemaPath Path wherein to look for functions
* @param objectPath Path of the object being analyzed (usually a view),
* or null
* @param propValues Connection properties
* @return Context
*/
private static CalcitePrepare.Context makeContext(CalciteConnection connection, CalciteSchema schema, List<String> schemaPath, List<String> objectPath, final ImmutableMap<CalciteConnectionProperty, String> propValues) {
if (connection == null) {
final CalcitePrepare.Context context0 = CalcitePrepare.Dummy.peek();
final CalciteConnectionConfig config = mutate(context0.config(), propValues);
return makeContext(config, context0.getTypeFactory(), context0.getDataContext(), schema, schemaPath, objectPath);
} else {
final CalciteConnectionConfig config = mutate(connection.config(), propValues);
return makeContext(config, connection.getTypeFactory(), createDataContext(connection, schema.root().plus()), schema, schemaPath, objectPath);
}
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.config.CalciteConnectionConfig in project calcite by apache.
the class FilesTableFunction method eval.
/**
* Evaluates the function.
*
* @param path Directory in which to start the search. Typically '.'
* @return Table that can be inspected, planned, and evaluated
*/
public static ScannableTable eval(final String path) {
return new ScannableTable() {
public RelDataType getRowType(RelDataTypeFactory typeFactory) {
return typeFactory.builder().add("access_time", // %A@ sec since epoch
SqlTypeName.TIMESTAMP).add("block_count", // %b in 512B blocks
SqlTypeName.INTEGER).add("change_time", // %C@ sec since epoch
SqlTypeName.TIMESTAMP).add("depth", // %d depth in directory tree
SqlTypeName.INTEGER).add("device", // %D device number
SqlTypeName.INTEGER).add("file_name", // %f file name, sans dirs
SqlTypeName.VARCHAR).add("fstype", // %F file system type
SqlTypeName.VARCHAR).add("gname", // %g group name
SqlTypeName.VARCHAR).add("gid", // %G numeric group id
SqlTypeName.INTEGER).add("dir_name", // %h leading dirs
SqlTypeName.VARCHAR).add("inode", // %i inode number
SqlTypeName.BIGINT).add("link", // %l object of sym link
SqlTypeName.VARCHAR).add("perm", SqlTypeName.CHAR, // %#m permission octal
4).add("hard", // %n number of hard links
SqlTypeName.INTEGER).add("path", // %P file's name
SqlTypeName.VARCHAR).add("size", // %s file's size in bytes
SqlTypeName.BIGINT).add("mod_time", // %T@ seconds since epoch
SqlTypeName.TIMESTAMP).add("user", // %u user name
SqlTypeName.VARCHAR).add("uid", // %U numeric user id
SqlTypeName.INTEGER).add("type", SqlTypeName.CHAR, // %Y file type
1).build();
// Fields in Linux find that are currently ignored:
// %y file type (not following sym links)
// %k block count in 1KB blocks
// %p file name (including argument)
}
private Enumerable<String> sourceLinux() {
final String[] args = { "find", path, "-printf", "" + // access_time
"%A@\\0" + // block_count
"%b\\0" + // change_time
"%C@\\0" + // depth
"%d\\0" + // device
"%D\\0" + // file_name
"%f\\0" + // fstype
"%F\\0" + // gname
"%g\\0" + // gid
"%G\\0" + // dir_name
"%h\\0" + // inode
"%i\\0" + // link
"%l\\0" + // perm
"%#m\\0" + // hard
"%n\\0" + // path
"%P\\0" + // size
"%s\\0" + // mod_time
"%T@\\0" + // user
"%u\\0" + // uid
"%U\\0" + // type
"%Y\\0" };
return Processes.processLines('\0', args);
}
private Enumerable<String> sourceMacOs() {
if (path.contains("'")) {
// no injection monkey business
throw new IllegalArgumentException();
}
final String[] args = { "/bin/sh", "-c", "find '" + path + "' | xargs stat -f " + // access_time
"%a%n" + // block_count
"%b%n" + // change_time
"%c%n" + // depth: not supported by macOS stat
"0%n" + // device: we only use the high part of "H,L" device
"%Hd%n" + // filename: not supported by macOS stat
"filename%n" + // fstype: not supported by macOS stat
"fstype%n" + // gname
"%Sg%n" + // gid
"%g%n" + // dir_name: not supported by macOS stat
"dir_name%n" + // inode
"%i%n" + // link
"%Y%n" + // perm
"%Lp%n" + // hard
"%l%n" + // path
"%SN%n" + // size
"%z%n" + // mod_time
"%m%n" + // user
"%Su%n" + // uid
"%u%n" + // type
"%LT%n" };
return Processes.processLines('\n', args);
}
public Enumerable<Object[]> scan(DataContext root) {
final RelDataType rowType = getRowType(root.getTypeFactory());
final List<String> fieldNames = ImmutableList.copyOf(rowType.getFieldNames());
final String osName = System.getProperty("os.name");
final String osVersion = System.getProperty("os.version");
Util.discard(osVersion);
final Enumerable<String> enumerable;
switch(osName) {
case // tested on version 10.12.5
"Mac OS X":
enumerable = sourceMacOs();
break;
default:
enumerable = sourceLinux();
}
return new AbstractEnumerable<Object[]>() {
public Enumerator<Object[]> enumerator() {
final Enumerator<String> e = enumerable.enumerator();
return new Enumerator<Object[]>() {
Object[] current;
public Object[] current() {
return current;
}
public boolean moveNext() {
current = new Object[fieldNames.size()];
for (int i = 0; i < current.length; i++) {
if (!e.moveNext()) {
return false;
}
final String v = e.current();
try {
current[i] = field(fieldNames.get(i), v);
} catch (RuntimeException e) {
throw new RuntimeException("while parsing value [" + v + "] of field [" + fieldNames.get(i) + "] in line [" + Arrays.toString(current) + "]", e);
}
}
switch(osName) {
case "Mac OS X":
// Strip leading "./"
String path = (String) current[14];
if (path.equals(".")) {
current[14] = path = "";
// depth
current[3] = 0;
} else if (path.startsWith("./")) {
current[14] = path = path.substring(2);
// depth
current[3] = count(path, '/') + 1;
} else {
// depth
current[3] = count(path, '/');
}
final int slash = path.lastIndexOf('/');
if (slash >= 0) {
// filename
current[5] = path.substring(slash + 1);
// dir_name
current[9] = path.substring(0, slash);
} else {
// filename
current[5] = path;
// dir_name
current[9] = "";
}
// Make type values more like those on Linux
final String type = (String) current[19];
current[19] = type.equals("/") ? "d" : type.equals("") || type.equals("*") ? "f" : type.equals("@") ? "l" : type;
}
return true;
}
private int count(String s, char c) {
int n = 0;
for (int i = 0, len = s.length(); i < len; i++) {
if (s.charAt(i) == c) {
++n;
}
}
return n;
}
public void reset() {
throw new UnsupportedOperationException();
}
public void close() {
e.close();
}
private Object field(String field, String value) {
switch(field) {
case "block_count":
case "depth":
case "device":
case "gid":
case "uid":
case "hard":
return Integer.valueOf(value);
case "inode":
case "size":
return Long.valueOf(value);
case "access_time":
case "change_time":
case "mod_time":
return new BigDecimal(value).multiply(THOUSAND).longValue();
default:
return value;
}
}
};
}
};
}
public Statistic getStatistic() {
return Statistics.of(1000d, ImmutableList.of(ImmutableBitSet.of(1)));
}
public Schema.TableType getJdbcTableType() {
return Schema.TableType.TABLE;
}
public boolean isRolledUp(String column) {
return false;
}
public boolean rolledUpColumnValidInsideAgg(String column, SqlCall call, SqlNode parent, CalciteConnectionConfig config) {
return true;
}
};
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.config.CalciteConnectionConfig in project calcite by apache.
the class StdinTableFunction method eval.
public static ScannableTable eval(boolean b) {
return new ScannableTable() {
public Enumerable<Object[]> scan(DataContext root) {
final InputStream is = DataContext.Variable.STDIN.get(root);
return new AbstractEnumerable<Object[]>() {
final InputStreamReader in = new InputStreamReader(is, StandardCharsets.UTF_8);
final BufferedReader br = new BufferedReader(in);
public Enumerator<Object[]> enumerator() {
return new Enumerator<Object[]>() {
String line;
int i;
public Object[] current() {
if (line == null) {
throw new NoSuchElementException();
}
return new Object[] { i, line };
}
public boolean moveNext() {
try {
line = br.readLine();
++i;
return line != null;
} catch (IOException e) {
throw new RuntimeException(e);
}
}
public void reset() {
throw new UnsupportedOperationException();
}
public void close() {
try {
br.close();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
};
}
};
}
public RelDataType getRowType(RelDataTypeFactory typeFactory) {
return typeFactory.builder().add("ordinal", SqlTypeName.INTEGER).add("line", SqlTypeName.VARCHAR).build();
}
public Statistic getStatistic() {
return Statistics.of(1000d, ImmutableList.of(ImmutableBitSet.of(1)));
}
public Schema.TableType getJdbcTableType() {
return Schema.TableType.TABLE;
}
public boolean isRolledUp(String column) {
return false;
}
public boolean rolledUpColumnValidInsideAgg(String column, SqlCall call, SqlNode parent, CalciteConnectionConfig config) {
return true;
}
};
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.config.CalciteConnectionConfig in project calcite by apache.
the class VmstatTableFunction method eval.
public static ScannableTable eval(boolean b) {
return new ScannableTable() {
public Enumerable<Object[]> scan(DataContext root) {
final RelDataType rowType = getRowType(root.getTypeFactory());
final List<String> fieldNames = ImmutableList.copyOf(rowType.getFieldNames());
final String[] args;
final String osName = System.getProperty("os.name");
final String osVersion = System.getProperty("os.version");
Util.discard(osVersion);
// Could do this here too..
switch(osName) {
case // tested on version 10.11.6
"Mac OS X":
args = new String[] { "/bin/sh", "-c", "vm_stat | tail -n +2 | awk '{print $NF}' | sed 's/\\.//' | tr '\\n' ' '" };
break;
default:
args = new String[] { "/bin/sh", "-c", "vmstat -n | tail -n +3" };
}
return Processes.processLines(args).select(new Function1<String, Object[]>() {
public Object[] apply(String line) {
final String[] fields = line.trim().split("\\s+");
final Object[] values = new Object[fieldNames.size()];
for (int i = 0; i < values.length; i++) {
try {
values[i] = field(fieldNames.get(i), fields[i]);
} catch (RuntimeException e) {
e.printStackTrace(System.out);
throw new RuntimeException("while parsing value [" + fields[i] + "] of field [" + fieldNames.get(i) + "] in line [" + line + "]");
}
}
return values;
}
private Object field(String field, String value) {
if (value.isEmpty()) {
return 0;
}
if (value.endsWith(".")) {
return Long.parseLong(value.substring(0, value.length()));
}
return Long.parseLong(value);
}
});
}
public RelDataType getRowType(RelDataTypeFactory typeFactory) {
final String osName = System.getProperty("os.name");
final RelDataTypeFactory.Builder builder = typeFactory.builder();
switch(osName) {
case "Mac OS X":
return builder.add("pages_free", SqlTypeName.BIGINT).add("pages_active", SqlTypeName.BIGINT).add("pages_inactive", SqlTypeName.BIGINT).add("pages_speculative", SqlTypeName.BIGINT).add("pages_throttled", SqlTypeName.BIGINT).add("pages_wired_down", SqlTypeName.BIGINT).add("pages_purgeable", SqlTypeName.BIGINT).add("translation_faults", SqlTypeName.BIGINT).add("pages_copy_on_write", SqlTypeName.BIGINT).add("pages_zero_filed", SqlTypeName.BIGINT).add("pages_reactivated", SqlTypeName.BIGINT).add("pages_purged", SqlTypeName.BIGINT).add("pages_file_backed", SqlTypeName.BIGINT).add("pages_anonymous", SqlTypeName.BIGINT).add("pages_stored_compressor", SqlTypeName.BIGINT).add("pages_occupied_compressor", SqlTypeName.BIGINT).add("decompressions", SqlTypeName.BIGINT).add("compressions", SqlTypeName.BIGINT).add("pageins", SqlTypeName.BIGINT).add("pageouts", SqlTypeName.BIGINT).add("swapins", SqlTypeName.BIGINT).add("swapouts", SqlTypeName.BIGINT).build();
default:
return builder.add("proc_r", SqlTypeName.BIGINT).add("proc_b", SqlTypeName.BIGINT).add("mem_swpd", SqlTypeName.BIGINT).add("mem_free", SqlTypeName.BIGINT).add("mem_buff", SqlTypeName.BIGINT).add("mem_cache", SqlTypeName.BIGINT).add("swap_si", SqlTypeName.BIGINT).add("swap_so", SqlTypeName.BIGINT).add("io_bi", SqlTypeName.BIGINT).add("io_bo", SqlTypeName.BIGINT).add("system_in", SqlTypeName.BIGINT).add("system_cs", SqlTypeName.BIGINT).add("cpu_us", SqlTypeName.BIGINT).add("cpu_sy", SqlTypeName.BIGINT).add("cpu_id", SqlTypeName.BIGINT).add("cpu_wa", SqlTypeName.BIGINT).add("cpu_st", SqlTypeName.BIGINT).build();
}
}
public Statistic getStatistic() {
return Statistics.of(1000d, ImmutableList.of(ImmutableBitSet.of(1)));
}
public Schema.TableType getJdbcTableType() {
return Schema.TableType.TABLE;
}
public boolean isRolledUp(String column) {
return false;
}
public boolean rolledUpColumnValidInsideAgg(String column, SqlCall call, SqlNode parent, CalciteConnectionConfig config) {
return true;
}
};
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.config.CalciteConnectionConfig in project calcite by apache.
the class VolcanoPlanner method registerMaterializations.
private void registerMaterializations() {
// Avoid using materializations while populating materializations!
final CalciteConnectionConfig config = context.unwrap(CalciteConnectionConfig.class);
if (config == null || !config.materializationsEnabled()) {
return;
}
// Register rels using materialized views.
final List<Pair<RelNode, List<RelOptMaterialization>>> materializationUses = RelOptMaterializations.useMaterializedViews(originalRoot, materializations);
for (Pair<RelNode, List<RelOptMaterialization>> use : materializationUses) {
RelNode rel = use.left;
Hook.SUB.run(rel);
registerImpl(rel, root.set);
}
// Register table rels of materialized views that cannot find a substitution
// in root rel transformation but can potentially be useful.
final Set<RelOptMaterialization> applicableMaterializations = new HashSet<>(RelOptMaterializations.getApplicableMaterializations(originalRoot, materializations));
for (Pair<RelNode, List<RelOptMaterialization>> use : materializationUses) {
applicableMaterializations.removeAll(use.right);
}
for (RelOptMaterialization materialization : applicableMaterializations) {
RelSubset subset = registerImpl(materialization.queryRel, null);
RelNode tableRel2 = RelOptUtil.createCastRel(materialization.tableRel, materialization.queryRel.getRowType(), true);
registerImpl(tableRel2, subset.set);
}
// Register rels using lattices.
final List<Pair<RelNode, RelOptLattice>> latticeUses = RelOptMaterializations.useLattices(originalRoot, ImmutableList.copyOf(latticeByName.values()));
if (!latticeUses.isEmpty()) {
RelNode rel = latticeUses.get(0).left;
Hook.SUB.run(rel);
registerImpl(rel, root.set);
}
}
Aggregations