#include <arith_codec.h>
|
| void | InitEncoder () |
| | Initialises the Encoder.
|
| |
| void | EncodeSymbol (const bool symbol, const int context_num) |
| | encodes a symbol and writes to output
|
| |
| void | EncodeUInt (const unsigned int value, const int bin1, const int max_bin) |
| |
| void | EncodeSInt (const int value, const int bin1, const int max_bin) |
| |
| void | FlushEncoder () |
| | flushes the output of the encoder.
|
| |
| int | ByteCount () const |
| |
| void | InitDecoder (int num_bytes) |
| | Initialise the Decoder.
|
| |
| bool | DecodeSymbol (int context_num) |
| | Decodes a symbol given a context number.
|
| |
| unsigned int | DecodeUInt (const int bin1, const int max_bin) |
| |
| int | DecodeSInt (const int bin1, const int max_bin) |
| |
◆ ArithCodecBase() [1/2]
| dirac::ArithCodecBase::ArithCodecBase |
( |
ByteIO * | p_byteio, |
|
|
size_t | number_of_contexts ) |
Creates an ArithCodec object to decode input based on a set of parameters.
- Parameters
-
| p_byteio | input/output for encoded bits |
| number_of_contexts | the number of contexts used |
◆ ~ArithCodecBase()
| virtual dirac::ArithCodecBase::~ArithCodecBase |
( |
| ) |
|
|
virtual |
Destructor is virtual as this class is abstract.
◆ ArithCodecBase() [2/2]
◆ ByteCount()
| int dirac::ArithCodecBase::ByteCount |
( |
| ) |
const |
|
protected |
◆ DecodeSInt()
| int dirac::ArithCodecBase::DecodeSInt |
( |
const int | bin1, |
|
|
const int | max_bin ) |
|
inlineprotected |
◆ DecodeSymbol()
| bool dirac::ArithCodecBase::DecodeSymbol |
( |
int | context_num | ) |
|
|
inlineprotected |
◆ DecodeUInt()
| unsigned int dirac::ArithCodecBase::DecodeUInt |
( |
const int | bin1, |
|
|
const int | max_bin ) |
|
inlineprotected |
◆ EncodeSInt()
| void dirac::ArithCodecBase::EncodeSInt |
( |
const int | value, |
|
|
const int | bin1, |
|
|
const int | max_bin ) |
|
inlineprotected |
◆ EncodeSymbol()
| void dirac::ArithCodecBase::EncodeSymbol |
( |
const bool | symbol, |
|
|
const int | context_num ) |
|
inlineprotected |
◆ EncodeUInt()
| void dirac::ArithCodecBase::EncodeUInt |
( |
const unsigned int | value, |
|
|
const int | bin1, |
|
|
const int | max_bin ) |
|
inlineprotected |
◆ FlushEncoder()
| void dirac::ArithCodecBase::FlushEncoder |
( |
| ) |
|
|
protected |
◆ InitDecoder()
| void dirac::ArithCodecBase::InitDecoder |
( |
int | num_bytes | ) |
|
|
protected |
◆ InitEncoder()
| void dirac::ArithCodecBase::InitEncoder |
( |
| ) |
|
|
protected |
◆ InputBit()
| bool dirac::ArithCodecBase::InputBit |
( |
| ) |
|
|
inlineprivate |
◆ operator=()
◆ ReadAllData()
| void dirac::ArithCodecBase::ReadAllData |
( |
int | num_bytes | ) |
|
|
private |
◆ m_byteio
| ByteIO* dirac::ArithCodecBase::m_byteio |
|
private |
◆ m_code
| unsigned int dirac::ArithCodecBase::m_code |
|
private |
◆ m_context_list
| std::vector<Context> dirac::ArithCodecBase::m_context_list |
|
protected |
◆ m_data_ptr
| char* dirac::ArithCodecBase::m_data_ptr |
|
private |
◆ m_decode_data_ptr
| char* dirac::ArithCodecBase::m_decode_data_ptr |
|
private |
◆ m_input_bits_left
| int dirac::ArithCodecBase::m_input_bits_left |
|
private |
◆ m_low_code
| unsigned int dirac::ArithCodecBase::m_low_code |
|
private |
◆ m_range
| unsigned int dirac::ArithCodecBase::m_range |
|
private |
◆ m_scount
| unsigned int dirac::ArithCodecBase::m_scount |
|
private |
◆ m_underflow
| int dirac::ArithCodecBase::m_underflow |
|
private |
The documentation for this class was generated from the following file:
© 2004 British Broadcasting Corporation.
Dirac code licensed under the Mozilla Public License (MPL) Version 1.1.
HTML documentation generated by Dimitri van Heesch's
excellent Doxygen tool.