public class HoodieTableSource extends Object implements org.apache.flink.table.connector.source.ScanTableSource, org.apache.flink.table.connector.source.abilities.SupportsPartitionPushDown, org.apache.flink.table.connector.source.abilities.SupportsProjectionPushDown, org.apache.flink.table.connector.source.abilities.SupportsLimitPushDown, org.apache.flink.table.connector.source.abilities.SupportsFilterPushDown
org.apache.flink.table.connector.source.ScanTableSource.ScanContext, org.apache.flink.table.connector.source.ScanTableSource.ScanRuntimeProvider| Constructor and Description |
|---|
HoodieTableSource(org.apache.flink.table.catalog.ResolvedSchema schema,
org.apache.hadoop.fs.Path path,
List<String> partitionKeys,
String defaultPartName,
org.apache.flink.configuration.Configuration conf) |
HoodieTableSource(org.apache.flink.table.catalog.ResolvedSchema schema,
org.apache.hadoop.fs.Path path,
List<String> partitionKeys,
String defaultPartName,
org.apache.flink.configuration.Configuration conf,
List<Map<String,String>> requiredPartitions,
int[] requiredPos,
Long limit,
List<org.apache.flink.table.expressions.ResolvedExpression> filters) |
| Modifier and Type | Method and Description |
|---|---|
org.apache.flink.table.connector.source.abilities.SupportsFilterPushDown.Result |
applyFilters(List<org.apache.flink.table.expressions.ResolvedExpression> filters) |
void |
applyLimit(long limit) |
void |
applyPartitions(List<Map<String,String>> partitions) |
void |
applyProjection(int[][] projections) |
String |
asSummaryString() |
org.apache.flink.table.connector.source.DynamicTableSource |
copy() |
org.apache.flink.table.connector.ChangelogMode |
getChangelogMode() |
org.apache.flink.configuration.Configuration |
getConf() |
org.apache.flink.api.common.io.InputFormat<org.apache.flink.table.data.RowData,?> |
getInputFormat() |
org.apache.flink.api.common.io.InputFormat<org.apache.flink.table.data.RowData,?> |
getInputFormat(boolean isStreaming) |
HoodieTableMetaClient |
getMetaClient() |
org.apache.hadoop.fs.FileStatus[] |
getReadFiles()
Get the reader paths with partition path expanded.
|
org.apache.flink.table.connector.source.ScanTableSource.ScanRuntimeProvider |
getScanRuntimeProvider(org.apache.flink.table.connector.source.ScanTableSource.ScanContext scanContext) |
org.apache.avro.Schema |
getTableAvroSchema() |
Optional<List<Map<String,String>>> |
listPartitions() |
void |
reset()
Reset the state of the table source.
|
boolean |
supportsNestedProjection() |
public HoodieTableSource(org.apache.flink.table.catalog.ResolvedSchema schema,
org.apache.hadoop.fs.Path path,
List<String> partitionKeys,
String defaultPartName,
org.apache.flink.configuration.Configuration conf)
public HoodieTableSource(org.apache.flink.table.catalog.ResolvedSchema schema,
org.apache.hadoop.fs.Path path,
List<String> partitionKeys,
String defaultPartName,
org.apache.flink.configuration.Configuration conf,
@Nullable
List<Map<String,String>> requiredPartitions,
@Nullable
int[] requiredPos,
@Nullable
Long limit,
@Nullable
List<org.apache.flink.table.expressions.ResolvedExpression> filters)
public org.apache.flink.table.connector.source.ScanTableSource.ScanRuntimeProvider getScanRuntimeProvider(org.apache.flink.table.connector.source.ScanTableSource.ScanContext scanContext)
getScanRuntimeProvider in interface org.apache.flink.table.connector.source.ScanTableSourcepublic org.apache.flink.table.connector.ChangelogMode getChangelogMode()
getChangelogMode in interface org.apache.flink.table.connector.source.ScanTableSourcepublic org.apache.flink.table.connector.source.DynamicTableSource copy()
copy in interface org.apache.flink.table.connector.source.DynamicTableSourcepublic String asSummaryString()
asSummaryString in interface org.apache.flink.table.connector.source.DynamicTableSourcepublic org.apache.flink.table.connector.source.abilities.SupportsFilterPushDown.Result applyFilters(List<org.apache.flink.table.expressions.ResolvedExpression> filters)
applyFilters in interface org.apache.flink.table.connector.source.abilities.SupportsFilterPushDownpublic Optional<List<Map<String,String>>> listPartitions()
listPartitions in interface org.apache.flink.table.connector.source.abilities.SupportsPartitionPushDownpublic void applyPartitions(List<Map<String,String>> partitions)
applyPartitions in interface org.apache.flink.table.connector.source.abilities.SupportsPartitionPushDownpublic boolean supportsNestedProjection()
supportsNestedProjection in interface org.apache.flink.table.connector.source.abilities.SupportsProjectionPushDownpublic void applyProjection(int[][] projections)
applyProjection in interface org.apache.flink.table.connector.source.abilities.SupportsProjectionPushDownpublic void applyLimit(long limit)
applyLimit in interface org.apache.flink.table.connector.source.abilities.SupportsLimitPushDownpublic org.apache.flink.api.common.io.InputFormat<org.apache.flink.table.data.RowData,?> getInputFormat()
@VisibleForTesting public org.apache.flink.api.common.io.InputFormat<org.apache.flink.table.data.RowData,?> getInputFormat(boolean isStreaming)
@VisibleForTesting public org.apache.avro.Schema getTableAvroSchema()
@VisibleForTesting public HoodieTableMetaClient getMetaClient()
@VisibleForTesting public org.apache.flink.configuration.Configuration getConf()
@VisibleForTesting public void reset()
@VisibleForTesting public org.apache.hadoop.fs.FileStatus[] getReadFiles()
Copyright © 2022 The Apache Software Foundation. All rights reserved.