public class HiveTableOperations
extends org.apache.iceberg.BaseMetastoreTableOperations
| Modifier and Type | Field and Description |
|---|---|
static java.lang.String |
HIVE_TABLE_PROPERTY_MAX_SIZE |
static long |
HIVE_TABLE_PROPERTY_MAX_SIZE_DEFAULT |
static java.lang.String |
NO_LOCK_EXPECTED_KEY |
static java.lang.String |
NO_LOCK_EXPECTED_VALUE |
| Modifier | Constructor and Description |
|---|---|
protected |
HiveTableOperations(org.apache.hadoop.conf.Configuration conf,
org.apache.iceberg.ClientPool<org.apache.hadoop.hive.metastore.IMetaStoreClient,org.apache.thrift.TException> metaClients,
org.apache.iceberg.io.FileIO fileIO,
java.lang.String catalogName,
java.lang.String database,
java.lang.String table) |
| Modifier and Type | Method and Description |
|---|---|
static void |
cleanupMetadata(org.apache.iceberg.io.FileIO io,
java.lang.String commitStatus,
java.lang.String metadataLocation) |
java.lang.String |
database() |
protected void |
doCommit(org.apache.iceberg.TableMetadata base,
org.apache.iceberg.TableMetadata metadata) |
protected void |
doRefresh() |
default boolean |
exposeInHmsProperties() |
default java.util.Map<java.lang.String,java.lang.String> |
hmsEnvContext(java.lang.String metadataLocation) |
org.apache.iceberg.io.FileIO |
io() |
long |
maxHiveTablePropertySize() |
org.apache.iceberg.ClientPool<org.apache.hadoop.hive.metastore.IMetaStoreClient,org.apache.thrift.TException> |
metaClients() |
default org.apache.hadoop.hive.metastore.api.Table |
newHmsTable(java.lang.String hmsTableOwner) |
default void |
persistTable(org.apache.hadoop.hive.metastore.api.Table hmsTable,
boolean updateHiveTable,
java.lang.String metadataLocation) |
default void |
setField(java.util.Map<java.lang.String,java.lang.String> parameters,
java.lang.String key,
java.lang.String value) |
default void |
setSchema(org.apache.iceberg.TableMetadata metadata,
java.util.Map<java.lang.String,java.lang.String> parameters) |
static org.apache.hadoop.hive.metastore.api.StorageDescriptor |
storageDescriptor(org.apache.iceberg.TableMetadata metadata,
boolean hiveEngineEnabled) |
java.lang.String |
table() |
protected java.lang.String |
tableName() |
org.apache.hadoop.hive.metastore.TableType |
tableType() |
static java.lang.String |
translateToIcebergProp(java.lang.String hmsProp)
Provides key translation where necessary between Iceberg and HMS props.
|
static void |
validateTableIsIceberg(org.apache.hadoop.hive.metastore.api.Table table,
java.lang.String fullName) |
checkCommitStatus, commit, current, currentMetadataLocation, currentVersion, disableRefresh, locationProvider, metadataFileLocation, refresh, refreshFromMetadataLocation, refreshFromMetadataLocation, refreshFromMetadataLocation, refreshFromMetadataLocation, requestRefresh, temp, writeNewMetadata, writeNewMetadataIfRequiredpublic static final java.lang.String HIVE_TABLE_PROPERTY_MAX_SIZE
public static final long HIVE_TABLE_PROPERTY_MAX_SIZE_DEFAULT
public static final java.lang.String NO_LOCK_EXPECTED_KEY
public static final java.lang.String NO_LOCK_EXPECTED_VALUE
protected HiveTableOperations(org.apache.hadoop.conf.Configuration conf,
org.apache.iceberg.ClientPool<org.apache.hadoop.hive.metastore.IMetaStoreClient,org.apache.thrift.TException> metaClients,
org.apache.iceberg.io.FileIO fileIO,
java.lang.String catalogName,
java.lang.String database,
java.lang.String table)
public static java.lang.String translateToIcebergProp(java.lang.String hmsProp)
Example: Deleting data files upon DROP TABLE is enabled using gc.enabled=true in Iceberg and external.table.purge=true in Hive. Hive and Iceberg users are unaware of each other's control flags, therefore inconsistent behaviour can occur from e.g. a Hive user's point of view if external.table.purge=true is set on the HMS table but gc.enabled=false is set on the Iceberg table, resulting in no data file deletion.
hmsProp - The HMS property that should be translated to Iceberg propertyprotected java.lang.String tableName()
tableName in class org.apache.iceberg.BaseMetastoreTableOperationspublic org.apache.iceberg.io.FileIO io()
io in interface org.apache.iceberg.TableOperationsprotected void doRefresh()
doRefresh in class org.apache.iceberg.BaseMetastoreTableOperationsprotected void doCommit(org.apache.iceberg.TableMetadata base,
org.apache.iceberg.TableMetadata metadata)
doCommit in class org.apache.iceberg.BaseMetastoreTableOperationspublic long maxHiveTablePropertySize()
public java.lang.String database()
public java.lang.String table()
public org.apache.hadoop.hive.metastore.TableType tableType()
public org.apache.iceberg.ClientPool<org.apache.hadoop.hive.metastore.IMetaStoreClient,org.apache.thrift.TException> metaClients()
public java.util.Map<java.lang.String,java.lang.String> hmsEnvContext(java.lang.String metadataLocation)
public boolean exposeInHmsProperties()
public void setSchema(org.apache.iceberg.TableMetadata metadata,
java.util.Map<java.lang.String,java.lang.String> parameters)
public void setField(java.util.Map<java.lang.String,java.lang.String> parameters,
java.lang.String key,
java.lang.String value)
public static void validateTableIsIceberg(org.apache.hadoop.hive.metastore.api.Table table,
java.lang.String fullName)
public void persistTable(org.apache.hadoop.hive.metastore.api.Table hmsTable,
boolean updateHiveTable,
java.lang.String metadataLocation)
throws org.apache.thrift.TException,
java.lang.InterruptedException
org.apache.thrift.TExceptionjava.lang.InterruptedExceptionpublic static org.apache.hadoop.hive.metastore.api.StorageDescriptor storageDescriptor(org.apache.iceberg.TableMetadata metadata,
boolean hiveEngineEnabled)
public static void cleanupMetadata(org.apache.iceberg.io.FileIO io,
java.lang.String commitStatus,
java.lang.String metadataLocation)
public org.apache.hadoop.hive.metastore.api.Table newHmsTable(java.lang.String hmsTableOwner)