public class CqlStorage extends AbstractCassandraStorage
AbstractCassandraStorage.MarshallerTypecolumn_family, conf, DEFAULT_INPUT_FORMAT, DEFAULT_OUTPUT_FORMAT, inputFormatClass, keyspace, loadSignature, outputFormatClass, PARTITION_FILTER_SIGNATURE, partitionerClass, password, PIG_INITIAL_ADDRESS, PIG_INPUT_FORMAT, PIG_INPUT_INITIAL_ADDRESS, PIG_INPUT_PARTITIONER, PIG_INPUT_RPC_PORT, PIG_INPUT_SPLIT_SIZE, PIG_OUTPUT_FORMAT, PIG_OUTPUT_INITIAL_ADDRESS, PIG_OUTPUT_PARTITIONER, PIG_OUTPUT_RPC_PORT, PIG_PARTITIONER, PIG_RPC_PORT, splitSize, storeSignature, usePartitionFilter, username| Constructor and Description |
|---|
CqlStorage() |
CqlStorage(int pageSize) |
| Modifier and Type | Method and Description |
|---|---|
protected java.util.List<ColumnDef> |
getColumnMetadata(Cassandra.Client client,
boolean cql3Table)
include key columns
|
org.apache.pig.data.Tuple |
getNext()
get next row
|
org.apache.pig.ResourceSchema |
getSchema(java.lang.String location,
org.apache.hadoop.mapreduce.Job job)
schema: (value, value, value) where keys are in the front.
|
void |
prepareToRead(org.apache.hadoop.mapreduce.RecordReader reader,
org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigSplit split) |
void |
prepareToWrite(org.apache.hadoop.mapreduce.RecordWriter writer) |
void |
putNext(org.apache.pig.data.Tuple t)
output: (((name, value), (name, value)), (value ...
|
void |
setLocation(java.lang.String location,
org.apache.hadoop.mapreduce.Job job)
set read configuration settings
|
void |
setPartitionFilter(org.apache.pig.Expression partitionFilter) |
void |
setStoreLocation(java.lang.String location,
org.apache.hadoop.mapreduce.Job job)
set store configuration settings
|
cfdefFromString, cfdefToString, checkSchema, cleanupOnFailure, columnToTuple, composeComposite, getCfDef, getCfDef, getColumnMeta, getDefaultMarshallers, getFullyQualifiedClassName, getIndexes, getIndexType, getInputFormat, getKeysMeta, getOutputFormat, getPartitionKeys, getPigType, getQueryMap, getStatistics, getValidatorMap, initSchema, objToBB, parseType, relativeToAbsolutePath, relToAbsPathForStoreLocation, setConnectionInformation, setStoreFuncUDFContextSignature, setTupleValue, setUDFContextSignaturepublic CqlStorage()
public CqlStorage(int pageSize)
pageSize - limit number of CQL rows to fetch in a thrift requestpublic void prepareToRead(org.apache.hadoop.mapreduce.RecordReader reader,
org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigSplit split)
prepareToRead in class org.apache.pig.LoadFuncpublic org.apache.pig.data.Tuple getNext()
throws java.io.IOException
getNext in class org.apache.pig.LoadFuncjava.io.IOExceptionpublic void setLocation(java.lang.String location,
org.apache.hadoop.mapreduce.Job job)
throws java.io.IOException
setLocation in class org.apache.pig.LoadFuncjava.io.IOExceptionpublic void setStoreLocation(java.lang.String location,
org.apache.hadoop.mapreduce.Job job)
throws java.io.IOException
java.io.IOExceptionpublic org.apache.pig.ResourceSchema getSchema(java.lang.String location,
org.apache.hadoop.mapreduce.Job job)
throws java.io.IOException
java.io.IOExceptionpublic void setPartitionFilter(org.apache.pig.Expression partitionFilter)
public void prepareToWrite(org.apache.hadoop.mapreduce.RecordWriter writer)
public void putNext(org.apache.pig.data.Tuple t)
throws java.io.IOException
java.io.IOExceptionprotected java.util.List<ColumnDef> getColumnMetadata(Cassandra.Client client, boolean cql3Table) throws InvalidRequestException, UnavailableException, TimedOutException, SchemaDisagreementException, org.apache.thrift.TException, java.nio.charset.CharacterCodingException
getColumnMetadata in class AbstractCassandraStorageInvalidRequestExceptionUnavailableExceptionTimedOutExceptionSchemaDisagreementExceptionorg.apache.thrift.TExceptionjava.nio.charset.CharacterCodingExceptionCopyright © 2013 The Apache Software Foundation