| Package | Description |
|---|---|
| org.apache.hudi.source | |
| org.apache.hudi.table.format | |
| org.apache.hudi.table.format.cdc | |
| org.apache.hudi.table.format.mor |
| Modifier and Type | Method and Description |
|---|---|
static org.apache.flink.streaming.api.operators.OneInputStreamOperatorFactory<MergeOnReadInputSplit,org.apache.flink.table.data.RowData> |
StreamReadOperator.factory(MergeOnReadInputFormat format) |
List<MergeOnReadInputSplit> |
IncrementalInputSplits.Result.getInputSplits() |
| Modifier and Type | Method and Description |
|---|---|
static IncrementalInputSplits.Result |
IncrementalInputSplits.Result.instance(List<MergeOnReadInputSplit> inputSplits,
String endInstant) |
static IncrementalInputSplits.Result |
IncrementalInputSplits.Result.instance(List<MergeOnReadInputSplit> inputSplits,
String endInstant,
String offset) |
void |
StreamReadMonitoringFunction.monitorDirAndForwardSplits(org.apache.flink.streaming.api.functions.source.SourceFunction.SourceContext<MergeOnReadInputSplit> context) |
void |
StreamReadOperator.processElement(org.apache.flink.streaming.runtime.streamrecord.StreamRecord<MergeOnReadInputSplit> element) |
void |
StreamReadMonitoringFunction.run(org.apache.flink.streaming.api.functions.source.SourceFunction.SourceContext<MergeOnReadInputSplit> context) |
| Modifier and Type | Method and Description |
|---|---|
static HoodieMergedLogRecordScanner |
FormatUtils.logScanner(MergeOnReadInputSplit split,
org.apache.avro.Schema logSchema,
InternalSchema internalSchema,
org.apache.flink.configuration.Configuration flinkConf,
org.apache.hadoop.conf.Configuration hadoopConf) |
| Constructor and Description |
|---|
BoundedMemoryRecords(MergeOnReadInputSplit split,
org.apache.avro.Schema logSchema,
InternalSchema internalSchema,
org.apache.hadoop.conf.Configuration hadoopConf,
org.apache.flink.configuration.Configuration flinkConf) |
| Modifier and Type | Class and Description |
|---|---|
class |
CdcInputSplit
Represents an input split of source, actually a data bucket.
|
| Modifier and Type | Method and Description |
|---|---|
static MergeOnReadInputSplit |
CdcInputFormat.fileSlice2Split(String tablePath,
FileSlice fileSlice,
long maxCompactionMemoryInBytes) |
| Modifier and Type | Method and Description |
|---|---|
protected ClosableIterator<org.apache.flink.table.data.RowData> |
CdcInputFormat.initIterator(MergeOnReadInputSplit split) |
| Modifier and Type | Method and Description |
|---|---|
MergeOnReadInputSplit[] |
MergeOnReadInputFormat.createInputSplits(int minNumSplits) |
| Modifier and Type | Method and Description |
|---|---|
List<MergeOnReadInputSplit> |
MergeOnReadTableState.getInputSplits() |
| Modifier and Type | Method and Description |
|---|---|
protected ClosableIterator<org.apache.flink.table.data.RowData> |
MergeOnReadInputFormat.getFullLogFileIterator(MergeOnReadInputSplit split) |
org.apache.flink.core.io.InputSplitAssigner |
MergeOnReadInputFormat.getInputSplitAssigner(MergeOnReadInputSplit[] mergeOnReadInputSplits) |
protected ClosableIterator<org.apache.flink.table.data.RowData> |
MergeOnReadInputFormat.initIterator(MergeOnReadInputSplit split) |
void |
MergeOnReadInputFormat.open(MergeOnReadInputSplit split) |
| Constructor and Description |
|---|
MergeIterator(org.apache.flink.configuration.Configuration flinkConf,
org.apache.hadoop.conf.Configuration hadoopConf,
MergeOnReadInputSplit split,
org.apache.flink.table.types.logical.RowType tableRowType,
org.apache.flink.table.types.logical.RowType requiredRowType,
org.apache.avro.Schema tableSchema,
InternalSchema querySchema,
Option<RowDataProjection> projection,
Option<Function<org.apache.avro.generic.IndexedRecord,org.apache.avro.generic.GenericRecord>> avroProjection,
boolean emitDelete,
int operationPos,
ClosableIterator<org.apache.flink.table.data.RowData> nested) |
MergeIterator(org.apache.flink.configuration.Configuration flinkConf,
org.apache.hadoop.conf.Configuration hadoopConf,
MergeOnReadInputSplit split,
org.apache.flink.table.types.logical.RowType tableRowType,
org.apache.flink.table.types.logical.RowType requiredRowType,
org.apache.avro.Schema tableSchema,
org.apache.avro.Schema requiredSchema,
InternalSchema querySchema,
int[] requiredPos,
boolean emitDelete,
int operationPos,
ClosableIterator<org.apache.flink.table.data.RowData> nested) |
| Constructor and Description |
|---|
MergeOnReadTableState(org.apache.flink.table.types.logical.RowType rowType,
org.apache.flink.table.types.logical.RowType requiredRowType,
String avroSchema,
String requiredAvroSchema,
List<MergeOnReadInputSplit> inputSplits,
String[] pkFields) |
Copyright © 2023 The Apache Software Foundation. All rights reserved.