Java源码示例:org.apache.calcite.prepare.CalciteCatalogReader
示例1
/**
* Creates a catalog reader that contains a single {@link Table} with temporary table name
* and specified {@code rowType}.
*
* @param rowType table row type
* @return the {@link CalciteCatalogReader} instance
*/
private static CalciteCatalogReader createSingleTableCatalogReader(
boolean lenientCaseSensitivity,
FrameworkConfig config,
FlinkTypeFactory typeFactory,
RelDataType rowType) {
// connection properties
boolean caseSensitive = !lenientCaseSensitivity && config.getParserConfig().caseSensitive();
Properties properties = new Properties();
properties.put(
CalciteConnectionProperty.CASE_SENSITIVE.camelName(),
String.valueOf(caseSensitive));
CalciteConnectionConfig connectionConfig = new CalciteConnectionConfigImpl(properties);
// prepare root schema
final RowTypeSpecifiedTable table = new RowTypeSpecifiedTable(rowType);
final Map<String, Table> tableMap = Collections.singletonMap(TEMPORARY_TABLE_NAME, table);
CalciteSchema schema = CalciteSchemaBuilder.asRootSchema(new TableSpecifiedSchema(tableMap));
return new FlinkCalciteCatalogReader(
schema,
new ArrayList<>(new ArrayList<>()),
typeFactory,
connectionConfig);
}
示例2
private RelNode toRel(RelOptCluster cluster, SchemaPlus rootSchema,
SchemaPlus defaultSchema, String sql) throws SqlParseException {
final SqlParser parser = SqlParser.create(sql, SqlParser.Config.DEFAULT);
final SqlNode parsed = parser.parseStmt();
final CalciteCatalogReader catalogReader = new CalciteCatalogReader(
CalciteSchema.from(rootSchema),
CalciteSchema.from(defaultSchema).path(null),
new JavaTypeFactoryImpl(), new CalciteConnectionConfigImpl(new Properties()));
final SqlValidator validator = new ValidatorForTest(SqlStdOperatorTable.instance(),
catalogReader, new JavaTypeFactoryImpl(), SqlConformanceEnum.DEFAULT);
final SqlNode validated = validator.validate(parsed);
final SqlToRelConverter.Config config = SqlToRelConverter.configBuilder()
.withTrimUnusedFields(true)
.withExpand(true)
.withDecorrelationEnabled(true)
.build();
final SqlToRelConverter converter = new SqlToRelConverter(
(rowType, queryString, schemaPath, viewPath) -> {
throw new UnsupportedOperationException("cannot expand view");
}, validator, catalogReader, cluster, StandardConvertletTable.INSTANCE, config);
return converter.convertQuery(validated, false, true).rel;
}
示例3
@Override
public RelRoot expandView(RelDataType rowType, String queryString,
List<String> schemaPath, List<String> viewPath) {
if (planner == null) {
ready();
}
SqlParser parser = SqlParser.create(queryString, parserConfig);
SqlNode sqlNode;
try {
sqlNode = parser.parseQuery();
} catch (SqlParseException e) {
throw new RuntimeException("parse failed", e);
}
final CalciteCatalogReader catalogReader =
createCatalogReader().withSchemaPath(schemaPath);
final SqlValidator validator = createSqlValidator(catalogReader);
final RexBuilder rexBuilder = createRexBuilder();
final RelOptCluster cluster = RelOptCluster.create(planner, rexBuilder);
final SqlToRelConverter.Config config = SqlToRelConverter
.configBuilder()
.withConfig(sqlToRelConverterConfig)
.withTrimUnusedFields(false)
.build();
final SqlToRelConverter sqlToRelConverter =
new SqlToRelConverter(this, validator,
catalogReader, cluster, convertletTable, config);
final RelRoot root =
sqlToRelConverter.convertQuery(sqlNode, true, false);
final RelRoot root2 =
root.withRel(sqlToRelConverter.flattenTypes(root.rel, true));
final RelBuilder relBuilder =
config.getRelBuilderFactory().create(cluster, null);
return root2.withRel(
RelDecorrelator.decorrelateQuery(root.rel, relBuilder));
}
示例4
private SqlValidator createSqlValidator(CalciteCatalogReader catalogReader) {
final SqlOperatorTable opTab =
ChainedSqlOperatorTable.of(operatorTable, catalogReader);
return new CalciteSqlValidator(opTab,
catalogReader,
typeFactory,
sqlValidatorConfig
.withDefaultNullCollation(connectionConfig.defaultNullCollation())
.withLenientOperatorLookup(connectionConfig.lenientOperatorLookup())
.withSqlConformance(connectionConfig.conformance())
.withIdentifierExpansion(true));
}
示例5
/**
* Method method build a calcite framework configuration for calcite to parse SQL and generate relational tree
* out of it.
* @return FrameworkConfig
*/
private FrameworkConfig buildFrameWorkConfig()
{
List<SqlOperatorTable> sqlOperatorTables = new ArrayList<>();
sqlOperatorTables.add(SqlStdOperatorTable.instance());
sqlOperatorTables
.add(new CalciteCatalogReader(CalciteSchema.from(schema), false, Collections.<String>emptyList(), typeFactory));
return Frameworks.newConfigBuilder().defaultSchema(schema)
.parserConfig(SqlParser.configBuilder().setLex(Lex.MYSQL).build())
.operatorTable(new ChainedSqlOperatorTable(sqlOperatorTables)).build();
}
示例6
private FrameworkConfig buildFrameWorkConfig() {
if (hasUdf) {
List<SqlOperatorTable> sqlOperatorTables = new ArrayList<>();
sqlOperatorTables.add(SqlStdOperatorTable.instance());
sqlOperatorTables.add(new CalciteCatalogReader(CalciteSchema.from(schema),
false,
Collections.<String>emptyList(), typeFactory));
return Frameworks.newConfigBuilder().defaultSchema(schema)
.operatorTable(new ChainedSqlOperatorTable(sqlOperatorTables)).build();
} else {
return Frameworks.newConfigBuilder().defaultSchema(schema).build();
}
}
示例7
public static CalciteState sqlOverDummyTable(String sql)
throws RelConversionException, ValidationException, SqlParseException {
SchemaPlus schema = Frameworks.createRootSchema(true);
JavaTypeFactory typeFactory = new JavaTypeFactoryImpl
(RelDataTypeSystem.DEFAULT);
StreamableTable streamableTable = new CompilerUtil.TableBuilderInfo(typeFactory)
.field("ID", SqlTypeName.INTEGER)
.field("NAME", typeFactory.createType(String.class))
.field("ADDR", typeFactory.createType(String.class))
.build();
Table table = streamableTable.stream();
schema.add("FOO", table);
schema.add("BAR", table);
schema.add("MYPLUS", ScalarFunctionImpl.create(MyPlus.class, "eval"));
List<SqlOperatorTable> sqlOperatorTables = new ArrayList<>();
sqlOperatorTables.add(SqlStdOperatorTable.instance());
sqlOperatorTables.add(new CalciteCatalogReader(CalciteSchema.from(schema),
false,
Collections.<String>emptyList(), typeFactory));
SqlOperatorTable chainedSqlOperatorTable = new ChainedSqlOperatorTable(sqlOperatorTables);
FrameworkConfig config = Frameworks.newConfigBuilder().defaultSchema(
schema).operatorTable(chainedSqlOperatorTable).build();
Planner planner = Frameworks.getPlanner(config);
SqlNode parse = planner.parse(sql);
SqlNode validate = planner.validate(parse);
RelNode tree = planner.convert(validate);
System.out.println(RelOptUtil.toString(tree, SqlExplainLevel.ALL_ATTRIBUTES));
return new CalciteState(schema, tree);
}
示例8
public QuarkTileTable(QuarkTile quarkTile, CalciteCatalogReader calciteCatalogReader,
RelDataType relDataType, Path path, QuarkTable backingTable) {
this.quarkTile = quarkTile;
this.backingTable = backingTable;
this.relOptTable = RelOptTableImpl.create(
calciteCatalogReader,
relDataType,
this,
path);
}
示例9
private CalciteCatalogReader createCatalogReader(QueryContext context) {
return new CalciteCatalogReader(
CalciteSchema.from(context.getRootSchema()),
false,
context.getDefaultSchemaPath(),
context.getTypeFactory());
}
示例10
private static CalciteCatalogReader getCatalogReader(
CalcitePrepare.Context context, boolean mutable) {
return new CalciteCatalogReader(
mutable ? context.getMutableRootSchema() : context.getRootSchema(),
ImmutableList.of(),
context.getTypeFactory(),
new CalciteConnectionConfigImpl(new Properties()));
}
示例11
/**
* Creates a catalog reader that contains a single {@link Table} with temporary table name
* and specified {@code rowType}.
*
* <p>Make this method public so that other systems can also use it.
*
* @param caseSensitive whether to match case sensitively
* @param tableName table name to register with
* @param typeFactory type factory
* @param rowType table row type
* @return the {@link CalciteCatalogReader} instance
*/
public static CalciteCatalogReader createSingleTableCatalogReader(
boolean caseSensitive,
String tableName,
RelDataTypeFactory typeFactory,
RelDataType rowType) {
// connection properties
Properties properties = new Properties();
properties.put(
CalciteConnectionProperty.CASE_SENSITIVE.camelName(),
String.valueOf(caseSensitive));
CalciteConnectionConfig connectionConfig = new CalciteConnectionConfigImpl(properties);
// prepare root schema
final ExplicitRowTypeTable table = new ExplicitRowTypeTable(rowType);
final Map<String, Table> tableMap = Collections.singletonMap(tableName, table);
CalciteSchema schema = CalciteSchema.createRootSchema(
false,
false,
"",
new ExplicitTableSchema(tableMap));
return new CalciteCatalogReader(
schema,
new ArrayList<>(new ArrayList<>()),
typeFactory,
connectionConfig);
}
示例12
private static RelNode run(PropAction action, RuleSet rules)
throws Exception {
FrameworkConfig config = Frameworks.newConfigBuilder()
.ruleSets(rules).build();
final Properties info = new Properties();
final Connection connection = DriverManager
.getConnection("jdbc:calcite:", info);
final CalciteServerStatement statement = connection
.createStatement().unwrap(CalciteServerStatement.class);
final CalcitePrepare.Context prepareContext =
statement.createPrepareContext();
final JavaTypeFactory typeFactory = prepareContext.getTypeFactory();
CalciteCatalogReader catalogReader =
new CalciteCatalogReader(prepareContext.getRootSchema(),
prepareContext.getDefaultSchemaPath(),
typeFactory,
prepareContext.config());
final RexBuilder rexBuilder = new RexBuilder(typeFactory);
final RelOptPlanner planner = new VolcanoPlanner(config.getCostFactory(),
config.getContext());
// set up rules before we generate cluster
planner.clearRelTraitDefs();
planner.addRelTraitDef(RelCollationTraitDef.INSTANCE);
planner.addRelTraitDef(ConventionTraitDef.INSTANCE);
planner.clear();
for (RelOptRule r : rules) {
planner.addRule(r);
}
final RelOptCluster cluster = RelOptCluster.create(planner, rexBuilder);
return action.apply(cluster, catalogReader,
prepareContext.getRootSchema().plus());
}
示例13
public TableEnv(TableConfig tableConfig) {
try {
this.tableConfig = tableConfig;
SqlParser.Config sqlParserConfig = tableConfig.getSqlParserConfig()
!= null ? tableConfig.getSqlParserConfig() : SqlParser
.configBuilder().setCaseSensitive(false)
.build();
SqlOperatorTable sqlStdOperatorTable = tableConfig
.getSqlOperatorTable()
!= null
? tableConfig.getSqlOperatorTable()
: ChainedSqlOperatorTable.of(SqlStdOperatorTable.instance());
CalciteConnectionConfig calciteConnectionConfig = tableConfig
.getCalciteConnectionConfig()
!= null
? tableConfig.getCalciteConnectionConfig()
: createDefaultConnectionConfig(sqlParserConfig);
RelDataTypeSystem typeSystem = tableConfig.getRelDataTypeSystem() != null
? tableConfig.getRelDataTypeSystem()
: calciteConnectionConfig.typeSystem(RelDataTypeSystem.class,
RelDataTypeSystem.DEFAULT);
SqlRexConvertletTable convertletTable = tableConfig
.getConvertletTable()
!= null
? tableConfig
.getConvertletTable()
: StandardConvertletTable.INSTANCE;
RexExecutor rexExecutor = tableConfig.getRexExecutor() != null
? tableConfig.getRexExecutor()
: RexUtil.EXECUTOR;
this.calciteCatalogReader = new CalciteCatalogReader(
CalciteSchema.from(rootSchema),
CalciteSchema.from(rootSchema).path(null),
new JavaTypeFactoryImpl(typeSystem),
calciteConnectionConfig);
this.frameworkConfig = createFrameworkConfig(sqlParserConfig,
ChainedSqlOperatorTable.of(sqlStdOperatorTable,
calciteCatalogReader), convertletTable,
calciteConnectionConfig, typeSystem, rexExecutor);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
示例14
public CalciteCatalogReader createCatalogReader() {
return createCalciteCatalogReader();
}
示例15
CalciteSqlValidator(SqlOperatorTable opTab,
CalciteCatalogReader catalogReader, JavaTypeFactory typeFactory,
Config config) {
super(opTab, catalogReader, typeFactory, config);
}
示例16
public static CalciteState sqlOverNestedTable(String sql)
throws RelConversionException, ValidationException, SqlParseException {
SchemaPlus schema = Frameworks.createRootSchema(true);
JavaTypeFactory typeFactory = new JavaTypeFactoryImpl
(RelDataTypeSystem.DEFAULT);
StreamableTable streamableTable = new CompilerUtil.TableBuilderInfo(typeFactory)
.field("ID", SqlTypeName.INTEGER)
.field("MAPFIELD",
typeFactory.createTypeWithNullability(
typeFactory.createMapType(
typeFactory.createTypeWithNullability(
typeFactory.createSqlType(SqlTypeName.VARCHAR), true),
typeFactory.createTypeWithNullability(
typeFactory.createSqlType(SqlTypeName.INTEGER), true))
, true))
.field("NESTEDMAPFIELD",
typeFactory.createTypeWithNullability(
typeFactory.createMapType(
typeFactory.createTypeWithNullability(
typeFactory.createSqlType(SqlTypeName.VARCHAR), true),
typeFactory.createTypeWithNullability(
typeFactory.createMapType(
typeFactory.createTypeWithNullability(
typeFactory.createSqlType(SqlTypeName.VARCHAR), true),
typeFactory.createTypeWithNullability(
typeFactory.createSqlType(SqlTypeName.INTEGER), true))
, true))
, true))
.field("ARRAYFIELD", typeFactory.createTypeWithNullability(
typeFactory.createArrayType(
typeFactory.createTypeWithNullability(
typeFactory.createSqlType(SqlTypeName.INTEGER), true), -1L)
, true))
.build();
Table table = streamableTable.stream();
schema.add("FOO", table);
schema.add("BAR", table);
schema.add("MYPLUS", ScalarFunctionImpl.create(MyPlus.class, "eval"));
List<SqlOperatorTable> sqlOperatorTables = new ArrayList<>();
sqlOperatorTables.add(SqlStdOperatorTable.instance());
sqlOperatorTables.add(new CalciteCatalogReader(CalciteSchema.from(schema),
false,
Collections.<String>emptyList(), typeFactory));
SqlOperatorTable chainedSqlOperatorTable = new ChainedSqlOperatorTable(sqlOperatorTables);
FrameworkConfig config = Frameworks.newConfigBuilder().defaultSchema(
schema).operatorTable(chainedSqlOperatorTable).build();
Planner planner = Frameworks.getPlanner(config);
SqlNode parse = planner.parse(sql);
SqlNode validate = planner.validate(parse);
RelNode tree = planner.convert(validate);
System.out.println(RelOptUtil.toString(tree, SqlExplainLevel.ALL_ATTRIBUTES));
return new CalciteState(schema, tree);
}