public class HoodieTableSource extends Object implements org.apache.flink.table.connector.source.ScanTableSource, org.apache.flink.table.connector.source.abilities.SupportsProjectionPushDown, org.apache.flink.table.connector.source.abilities.SupportsLimitPushDown, org.apache.flink.table.connector.source.abilities.SupportsFilterPushDown
org.apache.flink.table.connector.source.ScanTableSource.ScanContext, org.apache.flink.table.connector.source.ScanTableSource.ScanRuntimeProvider| Constructor and Description |
|---|
HoodieTableSource(org.apache.flink.table.catalog.ResolvedSchema schema,
org.apache.hadoop.fs.Path path,
List<String> partitionKeys,
String defaultPartName,
org.apache.flink.configuration.Configuration conf) |
HoodieTableSource(org.apache.flink.table.catalog.ResolvedSchema schema,
org.apache.hadoop.fs.Path path,
List<String> partitionKeys,
String defaultPartName,
org.apache.flink.configuration.Configuration conf,
List<ExpressionPredicates.Predicate> predicates,
DataPruner dataPruner,
PartitionPruners.PartitionPruner partitionPruner,
int dataBucket,
int[] requiredPos,
Long limit,
HoodieTableMetaClient metaClient,
InternalSchemaManager internalSchemaManager) |
| Modifier and Type | Method and Description |
|---|---|
org.apache.flink.table.connector.source.abilities.SupportsFilterPushDown.Result |
applyFilters(List<org.apache.flink.table.expressions.ResolvedExpression> filters) |
void |
applyLimit(long limit) |
void |
applyProjection(int[][] projections) |
String |
asSummaryString() |
org.apache.flink.table.connector.source.DynamicTableSource |
copy() |
org.apache.flink.table.connector.ChangelogMode |
getChangelogMode() |
org.apache.flink.configuration.Configuration |
getConf() |
int |
getDataBucket() |
DataPruner |
getDataPruner() |
org.apache.flink.api.common.io.InputFormat<org.apache.flink.table.data.RowData,?> |
getInputFormat() |
org.apache.flink.api.common.io.InputFormat<org.apache.flink.table.data.RowData,?> |
getInputFormat(boolean isStreaming) |
HoodieTableMetaClient |
getMetaClient() |
List<ExpressionPredicates.Predicate> |
getPredicates() |
org.apache.hadoop.fs.FileStatus[] |
getReadFiles()
Get the reader paths with partition path expanded.
|
org.apache.flink.table.connector.source.ScanTableSource.ScanRuntimeProvider |
getScanRuntimeProvider(org.apache.flink.table.connector.source.ScanTableSource.ScanContext scanContext) |
org.apache.avro.Schema |
getTableAvroSchema() |
void |
reset()
Reset the state of the table source.
|
boolean |
supportsNestedProjection() |
public HoodieTableSource(org.apache.flink.table.catalog.ResolvedSchema schema,
org.apache.hadoop.fs.Path path,
List<String> partitionKeys,
String defaultPartName,
org.apache.flink.configuration.Configuration conf)
public HoodieTableSource(org.apache.flink.table.catalog.ResolvedSchema schema,
org.apache.hadoop.fs.Path path,
List<String> partitionKeys,
String defaultPartName,
org.apache.flink.configuration.Configuration conf,
@Nullable
List<ExpressionPredicates.Predicate> predicates,
@Nullable
DataPruner dataPruner,
@Nullable
PartitionPruners.PartitionPruner partitionPruner,
int dataBucket,
@Nullable
int[] requiredPos,
@Nullable
Long limit,
@Nullable
HoodieTableMetaClient metaClient,
@Nullable
InternalSchemaManager internalSchemaManager)
public org.apache.flink.table.connector.source.ScanTableSource.ScanRuntimeProvider getScanRuntimeProvider(org.apache.flink.table.connector.source.ScanTableSource.ScanContext scanContext)
getScanRuntimeProvider in interface org.apache.flink.table.connector.source.ScanTableSourcepublic org.apache.flink.table.connector.ChangelogMode getChangelogMode()
getChangelogMode in interface org.apache.flink.table.connector.source.ScanTableSourcepublic org.apache.flink.table.connector.source.DynamicTableSource copy()
copy in interface org.apache.flink.table.connector.source.DynamicTableSourcepublic String asSummaryString()
asSummaryString in interface org.apache.flink.table.connector.source.DynamicTableSourcepublic org.apache.flink.table.connector.source.abilities.SupportsFilterPushDown.Result applyFilters(List<org.apache.flink.table.expressions.ResolvedExpression> filters)
applyFilters in interface org.apache.flink.table.connector.source.abilities.SupportsFilterPushDownpublic boolean supportsNestedProjection()
supportsNestedProjection in interface org.apache.flink.table.connector.source.abilities.SupportsProjectionPushDownpublic void applyProjection(int[][] projections)
applyProjection in interface org.apache.flink.table.connector.source.abilities.SupportsProjectionPushDownpublic void applyLimit(long limit)
applyLimit in interface org.apache.flink.table.connector.source.abilities.SupportsLimitPushDownpublic org.apache.flink.api.common.io.InputFormat<org.apache.flink.table.data.RowData,?> getInputFormat()
@VisibleForTesting public org.apache.flink.api.common.io.InputFormat<org.apache.flink.table.data.RowData,?> getInputFormat(boolean isStreaming)
@VisibleForTesting public org.apache.avro.Schema getTableAvroSchema()
@VisibleForTesting public HoodieTableMetaClient getMetaClient()
@VisibleForTesting public org.apache.flink.configuration.Configuration getConf()
@VisibleForTesting public void reset()
@VisibleForTesting public org.apache.hadoop.fs.FileStatus[] getReadFiles()
@VisibleForTesting public List<ExpressionPredicates.Predicate> getPredicates()
@VisibleForTesting public DataPruner getDataPruner()
@VisibleForTesting public int getDataBucket()
Copyright © 2023 The Apache Software Foundation. All rights reserved.