public abstract class LongHybridHashTable extends BaseHybridHashTable
See LongHashPartition. TODO add min max long filter and bloomFilter to spilled
partition.
buildRowCount, buildSpillRetBufferNumbers, buildSpillReturnBuffers, closed, compressionBlockSize, compressionCodecFactory, compressionEnabled, currentEnumerator, currentRecursionDepth, currentSpilledBuildSide, currentSpilledProbeSide, initPartitionFanOut, internalPool, ioManager, LOG, MAX_NUM_PARTITIONS, MAX_RECURSION_DEPTH, numSpillFiles, segmentSize, segmentSizeBits, segmentSizeMask, spillInBytes, totalNumBuffers, tryDistinctBuildRow| Constructor and Description |
|---|
LongHybridHashTable(Object owner,
boolean compressionEnabled,
int compressionBlockSize,
BinaryRowDataSerializer buildSideSerializer,
BinaryRowDataSerializer probeSideSerializer,
org.apache.flink.runtime.memory.MemoryManager memManager,
long reservedMemorySize,
org.apache.flink.runtime.io.disk.iomanager.IOManager ioManager,
int avgRecordLen,
long buildRowCount) |
| Modifier and Type | Method and Description |
|---|---|
protected void |
clearPartitions() |
void |
close()
Closes the hash table.
|
int |
compressionBlockSize() |
org.apache.flink.runtime.io.compression.BlockCompressionFactory |
compressionCodecFactory() |
boolean |
compressionEnabled() |
void |
endBuild() |
void |
free() |
RowIterator<org.apache.flink.table.data.binary.BinaryRowData> |
get(long probeKey)
This method is only used for operator fusion codegen to get build row from hash table.
|
abstract long |
getBuildLongKey(org.apache.flink.table.data.RowData row)
For code gen get build side long key.
|
LongHashPartition.MatchIterator |
getBuildSideIterator() |
org.apache.flink.table.data.RowData |
getCurrentProbeRow() |
List<LongHashPartition> |
getPartitionsPendingForSMJ() |
abstract long |
getProbeLongKey(org.apache.flink.table.data.RowData row)
For code gen get probe side long key.
|
RowIterator |
getSpilledPartitionBuildSideIter(LongHashPartition p) |
ProbeIterator |
getSpilledPartitionProbeSideIter(LongHashPartition p) |
void |
insertIntoProbeBuffer(org.apache.flink.table.data.RowData probeRecord)
If the probe row corresponding partition has been spilled to disk, just call this method
spill probe row to disk.
|
boolean |
nextMatching() |
abstract org.apache.flink.table.data.binary.BinaryRowData |
probeToBinary(org.apache.flink.table.data.RowData row)
For code gen probe side to BinaryRowData.
|
void |
putBuildRow(org.apache.flink.table.data.binary.BinaryRowData row) |
int |
spillPartition() |
boolean |
tryProbe(org.apache.flink.table.data.RowData record) |
createInputView, ensureNumBuffersReturned, freeCurrent, freePages, getNextBuffer, getNextBuffers, getNotNullNextBuffer, getNumSpillFiles, getSpillInBytes, getUsedMemoryInBytes, hash, maxInitBufferOfBucketArea, maxNumPartition, nextSegment, pageSize, readAllBuffers, releaseMemoryCacheForSMJ, remainBuffers, returnAll, returnPagepublic LongHybridHashTable(Object owner, boolean compressionEnabled, int compressionBlockSize, BinaryRowDataSerializer buildSideSerializer, BinaryRowDataSerializer probeSideSerializer, org.apache.flink.runtime.memory.MemoryManager memManager, long reservedMemorySize, org.apache.flink.runtime.io.disk.iomanager.IOManager ioManager, int avgRecordLen, long buildRowCount)
public void putBuildRow(org.apache.flink.table.data.binary.BinaryRowData row)
throws IOException
IOExceptionpublic void endBuild()
throws IOException
IOException@Nullable public final RowIterator<org.apache.flink.table.data.binary.BinaryRowData> get(long probeKey) throws IOException
IOExceptionpublic final void insertIntoProbeBuffer(org.apache.flink.table.data.RowData probeRecord)
throws IOException
Note: This must be called only after get(long) method.
IOExceptionpublic boolean tryProbe(org.apache.flink.table.data.RowData record)
throws IOException
IOExceptionpublic boolean nextMatching()
throws IOException
IOExceptionpublic org.apache.flink.table.data.RowData getCurrentProbeRow()
public LongHashPartition.MatchIterator getBuildSideIterator()
public void close()
BaseHybridHashTableclose in class BaseHybridHashTablepublic void free()
free in class BaseHybridHashTablepublic abstract long getBuildLongKey(org.apache.flink.table.data.RowData row)
public abstract long getProbeLongKey(org.apache.flink.table.data.RowData row)
public abstract org.apache.flink.table.data.binary.BinaryRowData probeToBinary(org.apache.flink.table.data.RowData row)
public int spillPartition()
throws IOException
spillPartition in class BaseHybridHashTableIOExceptionpublic List<LongHashPartition> getPartitionsPendingForSMJ()
public RowIterator getSpilledPartitionBuildSideIter(LongHashPartition p) throws IOException
IOExceptionpublic ProbeIterator getSpilledPartitionProbeSideIter(LongHashPartition p) throws IOException
IOExceptionprotected void clearPartitions()
clearPartitions in class BaseHybridHashTablepublic boolean compressionEnabled()
public org.apache.flink.runtime.io.compression.BlockCompressionFactory compressionCodecFactory()
public int compressionBlockSize()
Copyright © 2014–2024 The Apache Software Foundation. All rights reserved.