summaryrefslogtreecommitdiff
path: root/gpr/source/lib/vc5_decoder
diff options
context:
space:
mode:
authorluxagraf <sng@luxagraf.net>2023-06-15 15:58:59 -0500
committerluxagraf <sng@luxagraf.net>2023-06-15 15:58:59 -0500
commitab987e10f154f5536bb8fd936ae0966e909fa969 (patch)
tree9de5076f38b71ececb1bc94f8d9d19170898d603 /gpr/source/lib/vc5_decoder
added all my scriptssynced/master
Diffstat (limited to 'gpr/source/lib/vc5_decoder')
-rw-r--r--gpr/source/lib/vc5_decoder/CMakeLists.txt22
-rwxr-xr-xgpr/source/lib/vc5_decoder/codebooks.c36
-rwxr-xr-xgpr/source/lib/vc5_decoder/codebooks.h50
-rwxr-xr-xgpr/source/lib/vc5_decoder/component.c111
-rwxr-xr-xgpr/source/lib/vc5_decoder/component.h36
-rwxr-xr-xgpr/source/lib/vc5_decoder/decoder.c2310
-rwxr-xr-xgpr/source/lib/vc5_decoder/decoder.h336
-rwxr-xr-xgpr/source/lib/vc5_decoder/dequantize.c88
-rwxr-xr-xgpr/source/lib/vc5_decoder/dequantize.h36
-rwxr-xr-xgpr/source/lib/vc5_decoder/headers.h49
-rwxr-xr-xgpr/source/lib/vc5_decoder/inverse.c1188
-rwxr-xr-xgpr/source/lib/vc5_decoder/inverse.h51
-rwxr-xr-xgpr/source/lib/vc5_decoder/parameters.c47
-rwxr-xr-xgpr/source/lib/vc5_decoder/parameters.h119
-rwxr-xr-xgpr/source/lib/vc5_decoder/raw.c135
-rwxr-xr-xgpr/source/lib/vc5_decoder/raw.h35
-rwxr-xr-xgpr/source/lib/vc5_decoder/syntax.c116
-rwxr-xr-xgpr/source/lib/vc5_decoder/syntax.h44
-rwxr-xr-xgpr/source/lib/vc5_decoder/vc5_decoder.c132
-rwxr-xr-xgpr/source/lib/vc5_decoder/vc5_decoder.h84
-rwxr-xr-xgpr/source/lib/vc5_decoder/vlc.c109
-rwxr-xr-xgpr/source/lib/vc5_decoder/vlc.h103
-rwxr-xr-xgpr/source/lib/vc5_decoder/wavelet.c173
-rwxr-xr-xgpr/source/lib/vc5_decoder/wavelet.h42
24 files changed, 5452 insertions, 0 deletions
diff --git a/gpr/source/lib/vc5_decoder/CMakeLists.txt b/gpr/source/lib/vc5_decoder/CMakeLists.txt
new file mode 100644
index 0000000..d78522d
--- /dev/null
+++ b/gpr/source/lib/vc5_decoder/CMakeLists.txt
@@ -0,0 +1,22 @@
+# library
+set( LIB_NAME vc5_decoder )
+
+# get source files
+file( GLOB SRC_FILES "*.c" )
+
+# get include files
+file( GLOB INC_FILES "*.h" )
+
+# add include files from other folders
+include_directories( "../vc5_common" )
+include_directories( "../common/private" )
+include_directories( "../common/public" )
+
+
+# library
+add_library( ${LIB_NAME} STATIC ${SRC_FILES} ${INC_FILES} )
+
+target_link_libraries( ${LIB_NAME} )
+
+# set the folder where to place the projects
+set_target_properties( ${LIB_NAME} PROPERTIES FOLDER lib )
diff --git a/gpr/source/lib/vc5_decoder/codebooks.c b/gpr/source/lib/vc5_decoder/codebooks.c
new file mode 100755
index 0000000..cfd40d4
--- /dev/null
+++ b/gpr/source/lib/vc5_decoder/codebooks.c
@@ -0,0 +1,36 @@
+/*! @file codebooks.c
+ *
+ * @brief Implementation of routines for the inverse component transform and permutation.
+ *
+ * @version 1.0.0
+ *
+ * (C) Copyright 2018 GoPro Inc (http://gopro.com/).
+ *
+ * Licensed under either:
+ * - Apache License, Version 2.0, http://www.apache.org/licenses/LICENSE-2.0
+ * - MIT license, http://opensource.org/licenses/MIT
+ * at your option.
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "headers.h"
+#include "table17.inc"
+
+/*!
+ @brief Define the codeset used by the reference codec
+
+ The baseline codec only supports codebook #17.
+
+ Codebook #17 is intended to be used with cubic companding
+ (see @ref FillMagnitudeEncodingTable and @ref ComputeCubicTable).
+ */
+DECODER_CODESET decoder_codeset_17 = {
+ "Codebook set 17 from data by David Newman with tables automatically generated for the FSM decoder",
+ (const CODEBOOK *)&table17,
+ CODESET_FLAGS_COMPANDING_CUBIC,
+};
diff --git a/gpr/source/lib/vc5_decoder/codebooks.h b/gpr/source/lib/vc5_decoder/codebooks.h
new file mode 100755
index 0000000..94670ee
--- /dev/null
+++ b/gpr/source/lib/vc5_decoder/codebooks.h
@@ -0,0 +1,50 @@
+/*! @file codebooks.h
+ *
+ * @brief Declaration of routines for the inverse component transform and permutation.
+ * The collection of codebooks that are used by the decoder are called a codeset.
+ * The codebook in seach codeset is derived from the master codebook that is
+ * included in the codec by including the table for the codebook. The encoder
+ * uses specialized codebooks for coefficient magnitudes and runs of zeros that
+ * are derived from the master codebook.
+
+ * @version 1.0.0
+ *
+ * (C) Copyright 2018 GoPro Inc (http://gopro.com/).
+ *
+ * Licensed under either:
+ * - Apache License, Version 2.0, http://www.apache.org/licenses/LICENSE-2.0
+ * - MIT license, http://opensource.org/licenses/MIT
+ * at your option.
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CODEBOOKS_H
+#define CODEBOOKS_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+ typedef struct decoder_codeset {
+
+ const char *title; //!< Identifying string for the codeset
+
+ const CODEBOOK *codebook; //!< Codebook for runs and magnitudes
+
+ uint32_t flags; //!< Encoding flags (see the codeset flags)
+
+ } DECODER_CODESET;
+
+ //TODO: Need to support other codesets in the reference decoder?
+ extern DECODER_CODESET decoder_codeset_17;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // CODEBOOKS_H
diff --git a/gpr/source/lib/vc5_decoder/component.c b/gpr/source/lib/vc5_decoder/component.c
new file mode 100755
index 0000000..c224dc5
--- /dev/null
+++ b/gpr/source/lib/vc5_decoder/component.c
@@ -0,0 +1,111 @@
+/*! @file component.c
+ *
+ * @brief Code for parsing the inverse component transform and inverse component permutation.
+ *
+ * @version 1.0.0
+ *
+ * (C) Copyright 2018 GoPro Inc (http://gopro.com/).
+ *
+ * Licensed under either:
+ * - Apache License, Version 2.0, http://www.apache.org/licenses/LICENSE-2.0
+ * - MIT license, http://opensource.org/licenses/MIT
+ * at your option.
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "headers.h"
+
+CODEC_ERROR ParseInverseComponentTransform(DECODER *decoder, BITSTREAM *stream, size_t chunk_size)
+{
+ //CODEC_ERROR error = CODEC_ERROR_OKAY;
+ CODEC_STATE *codec = &decoder->codec;
+ int component_count = codec->channel_count;
+ int padding;
+ int i;
+
+#if VC5_ENABLED_PART(VC5_PART_COLOR_SAMPLING)
+ if (IsPartEnabled(decoder->enabled_parts, VC5_PART_COLOR_SAMPLING))
+ {
+ // Recompute the number of components to account for color difference component subsampling
+ component_count = codec->pattern_width * codec->pattern_height + 2;
+ }
+#endif
+
+ // Compute the padding (in bytes) from the end of the component transform to the end of the chunk payload
+ padding = (int)((chunk_size * sizeof(SEGMENT)) - ((component_count + 2) * component_count) * sizeof(uint8_t));
+
+ for (i = 0; i < component_count; i++)
+ {
+ int offset;
+ int scale;
+ int j;
+
+ for (j = 0; j < component_count; j++)
+ {
+ int matrix_index = i * component_count + j;
+ int matrix_value = GetBits(stream, 8);
+
+ //TODO: Need to save the value in the codec state
+ (void)matrix_index;
+ (void)matrix_value;
+ }
+
+ offset = GetBits(stream, 8);
+ scale = GetBits(stream, 8);
+
+ //TODO: Need to save the offset and scale in the codec state
+ (void)offset;
+ (void)scale;
+ }
+
+ // Skip the padding at the end of the chunk payload
+ GetBits(stream, 8 * padding);
+
+ // Should be at the end of the last segment in the chunk
+ assert(IsAlignedSegment(stream));
+
+ return CODEC_ERROR_OKAY;
+}
+
+CODEC_ERROR ParseInverseComponentPermutation(DECODER *decoder, BITSTREAM *stream, size_t chunk_size)
+{
+ //CODEC_ERROR error = CODEC_ERROR_OKAY;
+ CODEC_STATE *codec = &decoder->codec;
+ int component_count = codec->channel_count;
+ int padding;
+ int i;
+
+#if VC5_ENABLED_PART(VC5_PART_COLOR_SAMPLING)
+ if (IsPartEnabled(decoder->enabled_parts, VC5_PART_COLOR_SAMPLING))
+ {
+ // Recompute the number of components to account for color difference component subsampling
+ component_count = codec->pattern_width * codec->pattern_height + 2;
+ }
+#endif
+
+ // Compute the padding (in bytes) from the end of the component transform to the end of the chunk payload
+ padding = (int)((chunk_size * sizeof(SEGMENT)) - component_count * sizeof(uint8_t));
+
+ for (i = 0; i < component_count; i++)
+ {
+ int value;
+
+ value = GetBits(stream, 8);
+
+ //TODO: Need to save the permutation index in yhe codec state
+ (void)value;
+ }
+
+ // Skip the padding at the end of the chunk payload
+ GetBits(stream, 8 * padding);
+
+ // Should be at the end of the last segment in the chunk
+ assert(IsAlignedSegment(stream));
+
+ return CODEC_ERROR_OKAY;
+}
diff --git a/gpr/source/lib/vc5_decoder/component.h b/gpr/source/lib/vc5_decoder/component.h
new file mode 100755
index 0000000..db208b0
--- /dev/null
+++ b/gpr/source/lib/vc5_decoder/component.h
@@ -0,0 +1,36 @@
+/*! @file component.h
+ *
+ * @brief Declaration of routines for the inverse component transform and permutation.
+ *
+ * @version 1.0.0
+ *
+ * (C) Copyright 2018 GoPro Inc (http://gopro.com/).
+ *
+ * Licensed under either:
+ * - Apache License, Version 2.0, http://www.apache.org/licenses/LICENSE-2.0
+ * - MIT license, http://opensource.org/licenses/MIT
+ * at your option.
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef COMPONENT_H
+#define COMPONENT_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+ CODEC_ERROR ParseInverseComponentTransform(DECODER *decoder, BITSTREAM *stream, size_t chunk_size);
+
+ CODEC_ERROR ParseInverseComponentPermutation(DECODER *decoder, BITSTREAM *stream, size_t chunk_size);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // COMPONENT_H
diff --git a/gpr/source/lib/vc5_decoder/decoder.c b/gpr/source/lib/vc5_decoder/decoder.c
new file mode 100755
index 0000000..73aa3b8
--- /dev/null
+++ b/gpr/source/lib/vc5_decoder/decoder.c
@@ -0,0 +1,2310 @@
+/*! @file decoder.h
+ *
+ * @brief Implementation of core decoder functions and data structure
+ *
+ * @version 1.0.0
+ *
+ * (C) Copyright 2018 GoPro Inc (http://gopro.com/).
+ *
+ * Licensed under either:
+ * - Apache License, Version 2.0, http://www.apache.org/licenses/LICENSE-2.0
+ * - MIT license, http://opensource.org/licenses/MIT
+ * at your option.
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "headers.h"
+
+/*!
+ @brief Align the bitstream to a byte boundary
+
+ Enough bits are removed from the bitstream buffer to
+ align the bitstream to the next byte.
+ */
+static CODEC_ERROR AlignBitsByte(BITSTREAM *bitstream)
+{
+ // Compute the number of bits to skip
+ BITCOUNT count = bitstream->count % 8;
+ GetBits(bitstream, count);
+ assert((bitstream->count % 8) == 0);
+ return CODEC_ERROR_OKAY;
+}
+
+/*!
+ @brief Align the bitstream to the next word boundary
+
+ All of the bits in the bitstream buffer are flushed unless
+ the bitstream buffer is completely empty or completely full.
+ */
+static CODEC_ERROR AlignBitsWord(BITSTREAM *bitstream)
+{
+ BITCOUNT count = bitstream->count;
+
+ if (0 < count && count < bit_word_count) {
+ GetBits(bitstream, count);
+ }
+
+ return CODEC_ERROR_OKAY;
+}
+
+/*!
+ @brief Align the bitstream to the next tag value pair
+ */
+static CODEC_ERROR AlignBitsSegment(BITSTREAM *bitstream)
+{
+ STREAM *stream = bitstream->stream;
+ size_t byte_count;
+
+ // Byte align the bitstream
+ AlignBitsByte(bitstream);
+ assert((bitstream->count % 8) == 0);
+
+ // Compute the number of bytes in the bit buffer
+ byte_count = bitstream->count / 8;
+
+ // Add the number of bytes read from the stream
+ byte_count += stream->byte_count;
+
+ while ((byte_count % sizeof(TAGVALUE)) != 0)
+ {
+ GetBits(bitstream, 8);
+ byte_count++;
+ }
+
+ // The bitstream should be aligned to the next segment
+ assert((bitstream->count == 0) || (bitstream->count == bit_word_count));
+ assert((byte_count % sizeof(TAGVALUE)) == 0);
+
+ return CODEC_ERROR_OKAY;
+}
+
+/*!
+ @brief Initialize the decoder data structure
+ This routine performs the same function as a C++ constructor.
+ The decoder is initialized with default values that are replaced
+ by the parameters used to prepare the decoder (see @ref PrepareDecoder).
+ This routine does not perform all of the initializations required
+ to prepare the decoder data structure for decoding a sample.
+ */
+CODEC_ERROR InitDecoder(DECODER *decoder, const gpr_allocator *allocator)
+{
+ assert(decoder != NULL);
+ if (! (decoder != NULL)) {
+ return CODEC_ERROR_NULLPTR;
+ }
+
+ memset(decoder, 0, sizeof(DECODER));
+
+ // Assign a memory allocator to the decoder
+ decoder->allocator = (gpr_allocator *)allocator;
+
+ return CODEC_ERROR_OKAY;
+}
+
+/*!
+ @brief Release resources allocated by the decoder
+ Note that this routine does not close the logfile.
+ */
+CODEC_ERROR ReleaseDecoder(DECODER *decoder)
+{
+ // Free the wavelet transforms and decoding buffers
+ ReleaseDecoderTransforms(decoder);
+ ReleaseDecoderBuffers(decoder);
+
+ return CODEC_ERROR_OKAY;
+}
+
+/*!
+ @brief Decode the bitstream encoded into the byte stream into separate component arrays
+ This is a convenience routine for applications that use the byte stream data structure
+ for bitstreams stored in a file or memory buffer.
+ The main entry point for decoding a bitstream is @ref DecodingProcess.
+ The parameters data structure is intended to simulate information that may be available
+ to the decoder from the media container or an external application.
+ This routine assumes that the unpacked image has already been initialized.
+ */
+CODEC_ERROR DecodeStream(STREAM *stream, UNPACKED_IMAGE *unpacked_image, const DECODER_PARAMETERS *parameters)
+{
+ CODEC_ERROR error = CODEC_ERROR_OKAY;
+ BITSTREAM bitstream;
+ DECODER decoder;
+
+ // Initialize the bitstream data structure
+ InitBitstream(&bitstream);
+
+ // Bind the bitstream to the byte stream
+ error = AttachBitstream(&bitstream, stream);
+ if (error != CODEC_ERROR_OKAY) {
+ return error;
+ }
+
+ // Decode the bitstream sample into a image buffer
+ error = DecodingProcess(&decoder, &bitstream, unpacked_image, parameters);
+
+ // Release any resources allocated by the decoder
+ ReleaseDecoder(&decoder);
+
+ // Release any resources allocated by the bitstream
+ ReleaseBitstream(&bitstream);
+
+ return error;
+}
+
+/*!
+ @brief Decode the bitstream encoded into the byte stream
+ This is a convenience routine for applications that use the byte
+ stream data structure for samples stored in a file or memory buffer.
+ The main entry point for decoding a bitstream is @ref DecodingProcess.
+ The parameters data structure is intended to simulate information that
+ may be available to the decoder from the media container or an external
+ application.
+ */
+CODEC_ERROR DecodeImage(STREAM *stream, IMAGE *packed_image, RGB_IMAGE *rgb_image, DECODER_PARAMETERS *parameters)
+{
+ CODEC_ERROR error = CODEC_ERROR_OKAY;
+ BITSTREAM bitstream;
+ DECODER decoder;
+ DIMENSION packed_width;
+ DIMENSION packed_height;
+ PIXEL_FORMAT packed_format;
+
+ SetupDecoderLogCurve();
+
+ // The unpacked image will hold the component arrays decoded from the bitstream
+ UNPACKED_IMAGE unpacked_image;
+
+ // Initialize the bitstream data structure
+ InitBitstream(&bitstream);
+
+ // Bind the bitstream to the byte stream
+ error = AttachBitstream(&bitstream, stream);
+ if (error != CODEC_ERROR_OKAY) {
+ return error;
+ }
+
+ // The component arrays will be allocated after the bitstream is decoded
+ InitUnpackedImage(&unpacked_image);
+
+ // Decode the bitstream sample into a image buffer
+ error = DecodingProcess(&decoder, &bitstream, &unpacked_image, parameters);
+
+ if( error != CODEC_ERROR_OKAY )
+ {
+ return error;
+ }
+
+ switch (parameters->rgb_resolution) {
+
+ case GPR_RGB_RESOLUTION_NONE:
+ // The dimensions and format for the output of the image packing process
+ SetOutputImageFormat(&decoder, parameters, &packed_width, &packed_height, &packed_format);
+
+ // Allocate the image buffer for output of the image packing process
+ AllocImage(decoder.allocator, packed_image, packed_width, packed_height, packed_format);
+
+ // Pack the component arrays into the output image
+ ImageRepackingProcess(&unpacked_image, packed_image, parameters);
+ break;
+
+ case GPR_RGB_RESOLUTION_HALF:
+ WaveletToRGB(parameters->allocator, (PIXEL*)unpacked_image.component_array_list[0].data, (PIXEL*)unpacked_image.component_array_list[1].data, (PIXEL*)unpacked_image.component_array_list[2].data,
+ unpacked_image.component_array_list[2].width, unpacked_image.component_array_list[2].height, unpacked_image.component_array_list[2].pitch / 2,
+ rgb_image, 12, parameters->rgb_bits, &parameters->rgb_gain );
+ break;
+
+ case GPR_RGB_RESOLUTION_QUARTER:
+
+ WaveletToRGB(parameters->allocator, decoder.transform[0].wavelet[0]->data[0], decoder.transform[1].wavelet[0]->data[0], decoder.transform[2].wavelet[0]->data[0],
+ decoder.transform[2].wavelet[0]->width, decoder.transform[2].wavelet[0]->height, decoder.transform[2].wavelet[0]->width,
+ rgb_image, 14, parameters->rgb_bits, &parameters->rgb_gain );
+ break;
+
+ case GPR_RGB_RESOLUTION_EIGHTH:
+
+ WaveletToRGB(parameters->allocator, decoder.transform[0].wavelet[1]->data[0], decoder.transform[1].wavelet[1]->data[0], decoder.transform[2].wavelet[1]->data[0],
+ decoder.transform[2].wavelet[1]->width, decoder.transform[2].wavelet[1]->height, decoder.transform[2].wavelet[1]->width,
+ rgb_image, 14, parameters->rgb_bits, &parameters->rgb_gain );
+
+ break;
+
+ case GPR_RGB_RESOLUTION_SIXTEENTH:
+
+ WaveletToRGB(parameters->allocator, decoder.transform[0].wavelet[2]->data[0], decoder.transform[1].wavelet[2]->data[0], decoder.transform[2].wavelet[2]->data[0],
+ decoder.transform[2].wavelet[2]->width, decoder.transform[2].wavelet[2]->height, decoder.transform[2].wavelet[2]->width,
+ rgb_image, 14, parameters->rgb_bits, &parameters->rgb_gain );
+ break;
+
+ default:
+ return CODEC_ERROR_UNSUPPORTED_FORMAT;
+ break;
+ }
+
+ ReleaseComponentArrays( &parameters->allocator, &unpacked_image, unpacked_image.component_count );
+
+ // Release any resources allocated by the decoder
+ ReleaseDecoder(&decoder);
+
+ // Release any resources allocated by the bitstream
+ ReleaseBitstream(&bitstream);
+
+ return CODEC_ERROR_OKAY;
+}
+
+/*!
+ @brief Initialize the decoder using the specified parameters
+ @todo Add more error checking to this top-level routine
+ */
+CODEC_ERROR PrepareDecoder(DECODER *decoder, const DECODER_PARAMETERS *parameters)
+{
+ CODEC_ERROR error = CODEC_ERROR_OKAY;
+
+ // Initialize the decoder data structure
+ error = InitDecoder(decoder, &parameters->allocator);
+ if (error != CODEC_ERROR_OKAY) {
+ return error;
+ }
+
+ // Set the mask that specifies which parts of the VC-5 standard are supported
+ decoder->enabled_parts = parameters->enabled_parts;
+
+ // Verify that the enabled parts are correct
+ error = VerifyEnabledParts(decoder->enabled_parts);
+ assert(error == CODEC_ERROR_OKAY);
+ if (! (error == CODEC_ERROR_OKAY)) {
+ return error;
+ }
+
+ // Initialize the codec state (allocation routines use the codec state)
+ error = PrepareDecoderState(decoder, parameters);
+ if (error != CODEC_ERROR_OKAY) {
+ return error;
+ }
+
+ if (parameters != NULL)
+ {
+#if VC5_ENABLED_PART(VC5_PART_LAYERS)
+ decoder->layer_count = (uint_fast8_t)parameters->layer_count;
+ decoder->progressive = parameters->progressive;
+ decoder->top_field_first = parameters->top_field_first;
+#endif
+ }
+
+#if VC5_ENABLED_PART(VC5_PART_SECTIONS)
+ if (IsPartEnabled(decoder->enabled_parts, VC5_PART_SECTIONS))
+ {
+ decoder->section_flag = parameters->section_flag;
+ }
+#endif
+
+ decoder->subbands_to_decode = MAX_SUBBAND_COUNT;
+
+ return error;
+}
+
+/*!
+ @brief Decode a VC-5 bitstream to an ordered set of component arrays
+ This is the main entry point for decoding a sample. The decoder must
+ have been initialized by a call to @ref PrepareDecoder.
+ The bitstream must be initialized and bound to a byte stream before
+ calling this routine. The unpacked output image will be initialized by this
+ routine to hold the decoded component arrays represented in the bitstream.
+ @todo When the VC-5 part for layers is defined, should be able to pass a mask
+ indicating which layers must be decoded
+ */
+CODEC_ERROR DecodingProcess(DECODER *decoder, BITSTREAM *stream, UNPACKED_IMAGE *image, const DECODER_PARAMETERS *parameters)
+{
+ CODEC_ERROR error = CODEC_ERROR_OKAY;
+ TAGVALUE segment;
+
+ // Initialize the decoder with a default allocator
+ PrepareDecoder(decoder, parameters);
+
+ // Get the bitstream start marker
+ segment = GetSegment(stream);
+ if (segment.longword != StartMarkerSegment)
+ {
+ return CODEC_ERROR_MISSING_START_MARKER;
+ }
+
+ // Set up number of subbands to decode
+ if( parameters->rgb_resolution == GPR_RGB_RESOLUTION_SIXTEENTH )
+ {
+ decoder->subbands_to_decode = 1;
+ }
+ else if( parameters->rgb_resolution == GPR_RGB_RESOLUTION_EIGHTH )
+ {
+ decoder->subbands_to_decode = 4;
+ }
+ else if( parameters->rgb_resolution == GPR_RGB_RESOLUTION_QUARTER )
+ {
+ decoder->subbands_to_decode = 7;
+ }
+
+#if VC5_ENABLED_PART(VC5_PART_LAYERS)
+ // Decode each layer in the sample
+ if (decoder->layer_count > 1)
+ {
+ IMAGE decoded_image[MAX_LAYER_COUNT];
+ PIXEL_FORMAT decoded_format = decoder->output.format;
+ int layer_index;
+
+ for (layer_index = 0; layer_index < decoder->layer_count; layer_index++)
+ {
+ DIMENSION layer_width = LayerWidth(decoder, decoder->output.width);
+ DIMENSION layer_height = LayerHeight(decoder, decoder->output.height);
+
+ // Allocate a image for this layer
+ AllocImage(decoder->allocator, &decoded_image[layer_index], layer_width, layer_height, decoded_format);
+
+ // Decode the layer into its own image
+ error = DecodeSampleLayer(decoder, stream, &decoded_image[layer_index]);
+ if (error != CODEC_ERROR_OKAY) {
+ break;
+ }
+ }
+
+ if (error == CODEC_ERROR_OKAY)
+ {
+ // The decoded image in each layer is composited into the output image
+ error = ReconstructSampleFrame(decoder, decoded_image, decoder->layer_count, image);
+ }
+
+ // Free the images used for decoding each layer
+ for (layer_index = 0; layer_index < decoder->layer_count; layer_index++)
+ {
+ ReleaseImage(decoder->allocator, &decoded_image[layer_index]);
+ }
+ }
+ else
+#endif
+ {
+ // A VC-5 Part 1 bitstream can only contain a single layer (encoded image)
+ error = DecodeSingleImage(decoder, stream, image, parameters);
+ }
+
+ // Done decoding all layers in the sample and computing the output image
+ return error;
+}
+
+#if VC5_ENABLED_PART(VC5_PART_LAYERS)
+/*!
+ @brief Decode the portion of a sample that corresponds to a single layer
+ Samples can be contain multiple subsamples. Each subsample may correspond to
+ a different view. For example, an encoded video sample may contain both the
+ left and right subsamples in a stereo pair.
+ Subsamples have been called tracks or channels, but this terminology can be
+ confused with separate video tracks in a multimedia container or the color
+ planes that are called channels elsewhere in this codec.
+ The subsamples are decoded seperately and composited to form a single image
+ that is the output of the complete process of decoding a single video sample.
+ For this reason, the subsamples are called layers.
+ @todo Okay to call a subsample a layer?
+ */
+CODEC_ERROR DecodeSampleLayer(DECODER *decoder, BITSTREAM *input, UNPACKED_IMAGE *image)
+{
+ CODEC_ERROR error = CODEC_ERROR_OKAY;
+
+ // Initialize the codec state (including the dimensions of the first wavelet band)
+ PrepareDecoderState(decoder, NULL);
+
+ // Reset the flags in the wavelet transforms
+ PrepareDecoderTransforms(decoder);
+
+ // Process tag value pairs until the layer has been decoded
+ for (;;)
+ {
+ TAGVALUE segment;
+
+ // Read the next tag value pair from the bitstream
+ segment = GetSegment(input);
+ assert(input->error == BITSTREAM_ERROR_OKAY);
+ if (input->error != BITSTREAM_ERROR_OKAY) {
+ decoder->error = CodecErrorBitstream(input->error);
+ return decoder->error;
+ break;
+ }
+
+ error = UpdateCodecState(decoder, input, segment);
+ if (error != CODEC_ERROR_OKAY) {
+ return error;
+ }
+
+ // Processed all wavelet bands in all channels?
+ if (IsLayerComplete(decoder)) break;
+
+ }
+
+ // Parsed the bitstream without errors?
+ if (error != CODEC_ERROR_OKAY) {
+ return error;
+ }
+
+ // Reconstruct the output image using the last decoded wavelet in each channel
+ return ReconstructLayerImage(decoder, image);
+}
+#endif
+
+/*!
+ @brief Decode the bitstream into a list of component arrays
+ */
+CODEC_ERROR DecodeSingleImage(DECODER *decoder, BITSTREAM *input, UNPACKED_IMAGE *image, const DECODER_PARAMETERS *parameters)
+{
+ TIMESTAMP("[BEG]", 2)
+
+ CODEC_ERROR error = CODEC_ERROR_OKAY;
+
+ CODEC_STATE *codec = &decoder->codec;
+
+ // Process tag value pairs until the layer has been decoded
+ for (;;)
+ {
+ TAGVALUE segment;
+
+ // Read the next tag value pair from the bitstream
+ segment = GetSegment(input);
+ assert(input->error == BITSTREAM_ERROR_OKAY);
+ if (input->error != BITSTREAM_ERROR_OKAY) {
+ decoder->error = CodecErrorBitstream(input->error);
+ return decoder->error;
+ break;
+ }
+
+ error = UpdateCodecState(decoder, input, segment);
+ if (error != CODEC_ERROR_OKAY) {
+ return error;
+ }
+
+ // Processed all wavelet bands in all channels?
+ if ( IsDecodingComplete(decoder) && codec->header == false ) {
+ break;
+ }
+ }
+
+ // Parsed the bitstream without errors?
+ if (error != CODEC_ERROR_OKAY) {
+ return error;
+ }
+
+ TIMESTAMP("[END]", 2)
+
+ if( parameters->rgb_resolution == GPR_RGB_RESOLUTION_NONE ||
+ parameters->rgb_resolution == GPR_RGB_RESOLUTION_HALF ||
+ parameters->rgb_resolution == GPR_RGB_RESOLUTION_FULL )
+ {
+ // Reconstruct the output image using the last decoded wavelet in each channel
+ error = ReconstructUnpackedImage(decoder, image);
+ }
+
+ return error;
+}
+
+#if VC5_ENABLED_PART(VC5_PART_IMAGE_FORMATS)
+/*!
+ @brief Set the channel dimensions from the image dimensions and format
+ This routine is used set the channel dimensions and other channel-specific
+ parameters after the bitstream header has been parsed. The dimensions of
+ each channel can only be set using the parameters present in the bitstream
+ header if the bitstream conforms to VC-5 Part 3.
+ */
+CODEC_ERROR SetImageChannelParameters(DECODER *decoder, int channel_number)
+{
+ CODEC_STATE *codec = &decoder->codec;
+ IMAGE_FORMAT image_format = codec->image_format;
+ DIMENSION image_width = codec->image_width;
+ DIMENSION image_height = codec->image_height;
+ DIMENSION pattern_width = codec->pattern_width;
+ DIMENSION pattern_height = codec->pattern_height;
+
+ // Are the image dimensions valid?
+ if (image_width == 0 || image_height == 0)
+ {
+ // Cannot set the channel dimensions without valid image dimensions
+ return CODEC_ERROR_IMAGE_DIMENSIONS;
+ }
+
+ // Are the pattern dimensions valid?
+ if (pattern_width == 0 || pattern_height == 0)
+ {
+ // The channel dimensions may depend on the pattern dimensions
+ return CODEC_ERROR_PATTERN_DIMENSIONS;
+ }
+
+ switch (image_format)
+ {
+ case IMAGE_FORMAT_RAW:
+ // The pattern width and height must be two
+ assert(pattern_width == 2 && pattern_height == 2);
+
+ // The image dimensions must be divisible by the pattern dimensions
+ //assert((image_width % 2) == 0 && (image_height % 2) == 0);
+
+ decoder->channel[channel_number].width = image_width / 2;
+ decoder->channel[channel_number].height = image_height / 2;
+ break;
+
+ default:
+ // Cannot set the channel dimensions without a valid image format
+ return CODEC_ERROR_BAD_IMAGE_FORMAT;
+ break;
+ }
+
+ //TODO: Is the default bits per component the correct value to use?
+ decoder->channel[channel_number].bits_per_component = codec->bits_per_component;
+ decoder->channel[channel_number].initialized = true;
+
+ return CODEC_ERROR_OKAY;
+}
+#endif
+
+#if VC5_ENABLED_PART(VC5_PART_IMAGE_FORMATS)
+/*!
+ @brief Allocate all of the wavelets used during decoding
+ This routine allocates all of the wavelets in the wavelet tree that
+ may be used during decoding.
+ This routine is used to preallocate the wavelets before decoding begins.
+ If the wavelet bands are allocated on demand if not preallocated.
+ By default, the wavelet bands are encoded into the bitstream with the bands
+ from the wavelet at the highest level (smallest wavelet) first so that the
+ bands can be processed by the decoder in the order as the sample is decoded.
+ */
+CODEC_ERROR AllocDecoderTransforms(DECODER *decoder)
+{
+ CODEC_ERROR result;
+ // Use the default allocator for the decoder
+
+ gpr_allocator *allocator = decoder->allocator;
+ int channel_number;
+ int wavelet_index;
+
+ int channel_count;
+ int wavelet_count;
+
+ assert(decoder != NULL);
+ if (! (decoder != NULL)) {
+ return CODEC_ERROR_NULLPTR;
+ }
+
+ channel_count = decoder->codec.channel_count;
+ wavelet_count = decoder->wavelet_count;
+
+ for (channel_number = 0; channel_number < channel_count; channel_number++)
+ {
+ DIMENSION wavelet_width;
+ DIMENSION wavelet_height;
+
+ // Set the channel dimensions using the information obtained from the bitstream header
+ result = SetImageChannelParameters(decoder, channel_number);
+ if( result != CODEC_ERROR_OKAY )
+ {
+ assert(0);
+ return result;
+ }
+
+ // Check that the channel dimensions and other parameters have been set
+ assert(decoder->channel[channel_number].initialized);
+
+ // The dimensions of the wavelet at level zero are equal to the channel dimensions
+ wavelet_width = decoder->channel[channel_number].width;
+ wavelet_height = decoder->channel[channel_number].height;
+
+ for (wavelet_index = 0; wavelet_index < wavelet_count; wavelet_index++)
+ {
+ WAVELET *wavelet;
+
+ // Pad the wavelet width if necessary
+ if ((wavelet_width % 2) != 0) {
+ wavelet_width++;
+ }
+
+ // Pad the wavelet height if necessary
+ if ((wavelet_height % 2) != 0) {
+ wavelet_height++;
+ }
+
+ // Dimensions of the current wavelet must be divisible by two
+ assert((wavelet_width % 2) == 0 && (wavelet_height % 2) == 0);
+
+ // Reduce the dimensions of the next wavelet by half
+ wavelet_width /= 2;
+ wavelet_height /= 2;
+
+ wavelet = CreateWavelet(allocator, wavelet_width, wavelet_height);
+ decoder->transform[channel_number].wavelet[wavelet_index] = wavelet;
+ }
+ }
+
+ return CODEC_ERROR_OKAY;
+}
+#endif
+
+/*!
+ @brief Free the wavelet transforms allocated by the decoder
+ */
+CODEC_ERROR ReleaseDecoderTransforms(DECODER *decoder)
+{
+ int channel_count = decoder->codec.channel_count;
+ int channel_index;
+
+ for (channel_index = 0; channel_index < channel_count; channel_index++)
+ {
+ int wavelet_index;
+
+ for (wavelet_index = 0; wavelet_index < decoder->wavelet_count; wavelet_index++)
+ {
+ WAVELET *wavelet = decoder->transform[channel_index].wavelet[wavelet_index];
+ DeleteWavelet(decoder->allocator, wavelet);
+ }
+ }
+
+ return CODEC_ERROR_OKAY;
+}
+
+#if VC5_ENABLED_PART(VC5_PART_IMAGE_FORMATS)
+/*!
+ @brief Allocate all of the buffers required for decoding
+ This routine allocates buffers required for decoding, not including
+ the wavelet images in the wavelet tree which are allocated by
+ @ref AllocDecoderTransforms
+ This routine is used to preallocate buffers before decoding begins.
+ Decoding buffers are allocated on demand if not preallocated.
+ Currently, the reference decoder allocates scratch buffers as required
+ by each routine that needs scratch space and the scratch buffers are
+ deallocated at the end each routine that allocates scratch space.
+ @todo Should it be an error if the buffers are not preallocated?
+ */
+CODEC_ERROR AllocDecoderBuffers(DECODER *decoder)
+{
+ (void)decoder;
+ return CODEC_ERROR_UNIMPLEMENTED;
+}
+#endif
+
+/*!
+ @brief Free any buffers allocated by the decoder
+ */
+CODEC_ERROR ReleaseDecoderBuffers(DECODER *decoder)
+{
+ (void)decoder;
+ return CODEC_ERROR_UNIMPLEMENTED;
+}
+
+/*!
+ @brief Allocate the wavelets for the specified channel
+ */
+CODEC_ERROR AllocateChannelWavelets(DECODER *decoder, int channel_number)
+{
+ // Use the default allocator for the decoder
+ gpr_allocator *allocator = decoder->allocator;
+ int wavelet_index;
+
+#if VC5_ENABLED_PART(VC5_PART_IMAGE_FORMATS)
+ // Use the channel dimensions computed from the image dimension and image format
+ DIMENSION channel_width = decoder->channel[channel_number].width;
+ DIMENSION channel_height = decoder->channel[channel_number].height;
+#else
+ // Use the channel dimensions from the current codec state
+ DIMENSION channel_width = decoder->codec.channel_width;
+ DIMENSION channel_height = decoder->codec.channel_height;
+#endif
+
+ // Round up the wavelet dimensions to an even number
+ DIMENSION wavelet_width = ((channel_width % 2) == 0) ? channel_width / 2 : (channel_width + 1) / 2;
+ DIMENSION wavelet_height = ((channel_height % 2) == 0) ? channel_height / 2 : (channel_height + 1) / 2;
+
+ //TODO: Check for errors before the code that initializes the local variables
+ assert(decoder != NULL);
+ if (! (decoder != NULL)) {
+ return CODEC_ERROR_NULLPTR;
+ }
+
+ for (wavelet_index = 0; wavelet_index < decoder->wavelet_count; wavelet_index++)
+ {
+ WAVELET *wavelet = decoder->transform[channel_number].wavelet[wavelet_index];
+
+ // Has a wavelet already been created?
+ if (wavelet != NULL)
+ {
+ // Is the wavelet the correct size?
+ if (wavelet_width != wavelet->width ||
+ wavelet_height != wavelet->height)
+ {
+ // Deallocate the wavelet
+ DeleteWavelet(allocator, wavelet);
+
+ wavelet = NULL;
+ }
+ }
+
+ if (wavelet == NULL)
+ {
+ wavelet = CreateWavelet(allocator, wavelet_width, wavelet_height);
+ assert(wavelet != NULL);
+
+ decoder->transform[channel_number].wavelet[wavelet_index] = wavelet;
+ }
+
+ // Pad the wavelet width if necessary
+ if ((wavelet_width % 2) != 0) {
+ wavelet_width++;
+ }
+
+ // Pad the wavelet height if necessary
+ if ((wavelet_height % 2) != 0) {
+ wavelet_height++;
+ }
+
+ // Dimensions of the current wavelet must be divisible by two
+ assert((wavelet_width % 2) == 0 && (wavelet_height % 2) == 0);
+
+ // Reduce the dimensions of the next wavelet by half
+ wavelet_width /= 2;
+ wavelet_height /= 2;
+ }
+
+ return CODEC_ERROR_OKAY;
+}
+
+/*!
+ @brief Initialize the codec state before starting to decode a bitstream
+ */
+CODEC_ERROR PrepareDecoderState(DECODER *decoder, const DECODER_PARAMETERS *parameters)
+{
+ CODEC_ERROR error = CODEC_ERROR_OKAY;
+ CODEC_STATE *codec = &decoder->codec;
+
+ // Set the parameters that control the decoding process
+ decoder->wavelet_count = 3;
+
+ // The wavelets and decoding buffers have not been allocated
+ decoder->memory_allocated = false;
+
+ // Clear the table of information about each decoded channel
+ memset(decoder->channel, 0, sizeof(decoder->channel));
+
+ // Set the codebook
+ decoder->codebook = (CODEBOOK *)decoder_codeset_17.codebook;
+
+ // Initialize the codec state with the default parameter values
+ error = PrepareCodecState(codec);
+ if (error != CODEC_ERROR_OKAY) {
+ return error;
+ }
+
+ // Initialize the codec state with the external parameter values
+ codec->image_width = parameters->input.width;
+ codec->image_height = parameters->input.height;
+
+ //TODO: Initialize other parameters with external values?
+
+ // The default channel dimensions are the image dimensions
+ codec->channel_width = codec->image_width;
+ codec->channel_height = codec->image_height;
+
+ return error;
+}
+
+/*!
+ @brief Prepare the decoder transforms for the next layer
+ Each wavelet in the decoder transforms contain flags that indicate
+ whether the wavelet bands must be decoded. These flags must be reset
+ before decoding the next layer.
+ */
+CODEC_ERROR PrepareDecoderTransforms(DECODER *decoder)
+{
+ int channel_count = decoder->codec.channel_count;
+ int channel_index;
+
+ for (channel_index = 0; channel_index < channel_count; channel_index++)
+ {
+ int wavelet_count = decoder->wavelet_count;
+ int wavelet_index;
+
+ for (wavelet_index = 0; wavelet_index < wavelet_count; wavelet_index++)
+ {
+ WAVELET *wavelet = decoder->transform[channel_index].wavelet[wavelet_index];
+ wavelet->valid_band_mask = 0;
+ }
+ }
+
+ return CODEC_ERROR_OKAY;
+}
+
+/*!
+ @brief Pack the component arrays into the output image
+
+ The decoding process outputs a set of component arrays that does not correspond
+ to any common image format. The image repacking process converts the ordered
+ set of component arrays output by the decoding processing into a packed image.
+ The image repacking process is not normative in VC-5 Part 1.
+ */
+CODEC_ERROR ImageRepackingProcess(const UNPACKED_IMAGE *unpacked_image,
+ PACKED_IMAGE *packed_image,
+ const DECODER_PARAMETERS *parameters)
+{
+ DIMENSION output_width = packed_image->width;
+ DIMENSION output_height = packed_image->height;
+ size_t output_pitch = packed_image->pitch;
+ PIXEL_FORMAT output_format = packed_image->format;
+ PIXEL *output_buffer = packed_image->buffer;
+ ENABLED_PARTS enabled_parts = parameters->enabled_parts;
+
+ (void)parameters;
+
+ // The dimensions must be in units of Bayer pattern elements
+ output_width /= 2;
+ output_height /= 2;
+ output_pitch *= 2;
+
+ switch (output_format)
+ {
+ case PIXEL_FORMAT_RAW_RGGB_12:
+ case PIXEL_FORMAT_RAW_GBRG_12:
+ return PackComponentsToRAW(unpacked_image, output_buffer, output_pitch,
+ output_width, output_height, enabled_parts, 12, output_format );
+
+ case PIXEL_FORMAT_RAW_RGGB_14:
+ case PIXEL_FORMAT_RAW_GBRG_14:
+ return PackComponentsToRAW(unpacked_image, output_buffer, output_pitch,
+ output_width, output_height, enabled_parts, 14, output_format );
+ break;
+
+ case PIXEL_FORMAT_RAW_RGGB_16:
+ return PackComponentsToRAW(unpacked_image, output_buffer, output_pitch,
+ output_width, output_height, enabled_parts, 16, output_format );
+ break;
+
+ default:
+ assert(0);
+ break;
+ }
+
+ // Unsupported output image format
+ return CODEC_ERROR_UNSUPPORTED_FORMAT;
+}
+
+/*!
+ @brief Compute default parameters for the repacked image
+ */
+CODEC_ERROR SetOutputImageFormat(DECODER *decoder,
+ const DECODER_PARAMETERS *parameters,
+ DIMENSION *width_out,
+ DIMENSION *height_out,
+ PIXEL_FORMAT *format_out)
+{
+ // The image dimensions are in units of samples
+ DIMENSION output_width = decoder->codec.image_width;
+ DIMENSION output_height = decoder->codec.image_height;
+
+ PIXEL_FORMAT output_format = PIXEL_FORMAT_UNKNOWN;
+
+ // Override the pixel format with the format passed as a parameter
+ if (parameters->output.format != PIXEL_FORMAT_UNKNOWN) {
+ output_format = parameters->output.format;
+ }
+ assert(output_format != PIXEL_FORMAT_UNKNOWN);
+
+ if (width_out != NULL) {
+ *width_out = output_width;
+ }
+
+ if (height_out != NULL) {
+ *height_out = output_height;
+ }
+
+ if (format_out != NULL) {
+ *format_out = output_format;
+ }
+
+ return CODEC_ERROR_OKAY;
+}
+
+/*!
+ @brief Return true if the lowpass bands in all channels are valid
+ */
+bool ChannelLowpassBandsAllValid(const DECODER *decoder, int index)
+{
+ int channel_count = decoder->codec.channel_count;
+ int channel;
+ for (channel = 0; channel < channel_count; channel++)
+ {
+ WAVELET *wavelet = decoder->transform[channel].wavelet[index];
+ if ((wavelet->valid_band_mask & BandValidMask(0)) == 0) {
+ return false;
+ }
+ }
+
+ // All channels have valid lowpass bands at the specified level
+ return true;
+}
+
+#if VC5_ENABLED_PART(VC5_PART_SECTIONS)
+
+/*
+ @brief Return true if the tag identifies a section header
+ */
+bool IsSectionHeader(TAGWORD tag)
+{
+ switch (tag)
+ {
+ case CODEC_TAG_ImageSectionTag:
+ case CODEC_TAG_HeaderSectionTag:
+ case CODEC_TAG_LayerSectionTag:
+ case CODEC_TAG_ChannelSectionTag:
+ case CODEC_TAG_WaveletSectionTag:
+ case CODEC_TAG_SubbandSectionTag:
+ return true;
+
+ default:
+ return false;
+ }
+
+ return false;
+}
+
+/*
+ @brief Map the tag for a section header to the section number
+ */
+CODEC_ERROR GetSectionNumber(TAGWORD tag, int *section_number_out)
+{
+ int section_number = 0;
+
+ switch (tag)
+ {
+ case CODEC_TAG_ImageSectionTag:
+ section_number = 1;
+ break;
+
+ case CODEC_TAG_HeaderSectionTag:
+ section_number = 2;
+ break;
+
+ case CODEC_TAG_LayerSectionTag:
+ section_number = 3;
+ break;
+
+ case CODEC_TAG_ChannelSectionTag:
+ section_number = 4;
+ break;
+
+ case CODEC_TAG_WaveletSectionTag:
+ section_number = 5;
+ break;
+
+ case CODEC_TAG_SubbandSectionTag:
+ section_number = 6;
+ break;
+
+ default:
+ assert(0);
+ break;
+ }
+
+ if (section_number_out != NULL) {
+ *section_number_out = section_number;
+ }
+
+ if (section_number > 0) {
+ return CODEC_ERROR_OKAY;
+ }
+
+ return CODEC_ERROR_BAD_SECTION_TAG;
+}
+
+/*!
+ @brief Write section information to the section log file
+ */
+CODEC_ERROR WriteSectionInformation(FILE *logfile, int section_number, int section_length)
+{
+ fprintf(logfile, "Section: %d, length: %d\n", section_number, section_length);
+ return CODEC_ERROR_OKAY;
+}
+
+#endif
+
+/*!
+ @brief Skip the payload in a chunk
+
+ A chunk is a tag value pair where the value specifies the length
+ of a payload. If the tag is a negative number, then the payload
+ can be skipped without affecting the decoding process.
+ */
+static CODEC_ERROR SkipPayload(BITSTREAM *bitstream, int chunk_size)
+{
+ // The chunk size is in units of 32-bit words
+ size_t size = 4 * chunk_size;
+
+ // This routine assumes that the bit buffer is empty
+ assert(bitstream->count == 0);
+
+ // Skip the specified number of bytes in the stream
+ return SkipBytes(bitstream->stream, size);
+}
+
+/*!
+ @brief Parse the unique image identifier in a small chunk payload
+
+ @todo Should the UMID instance number be a parameter to this routine?
+ */
+static CODEC_ERROR ParseUniqueImageIdentifier(DECODER *decoder, BITSTREAM *stream, size_t identifier_length)
+{
+ const int UMID_length_byte = 0x13;
+ const int UMID_instance_number = 0;
+
+ // Total length of the unique image identifier chunk payload (in segments)
+ const int identifier_chunk_payload_length = UMID_length + sequence_number_length;
+
+ uint8_t byte_array[12];
+ BITWORD length_byte;
+ BITWORD instance_number;
+
+ // Check that the chunk payload has the correct length (in segments)
+ if (identifier_length != identifier_chunk_payload_length) {
+ return CODEC_ERROR_SYNTAX_ERROR;
+ }
+
+ // The unique image identifier chunk should begin with a UMID label
+ GetByteArray(stream, byte_array, sizeof(byte_array));
+ if (memcmp(byte_array, UMID_label, sizeof(UMID_label)) != 0) {
+ return CODEC_ERROR_UMID_LABEL;
+ }
+
+ // Check the UMID length byte
+ length_byte = GetBits(stream, 8);
+ if (length_byte != UMID_length_byte) {
+ return CODEC_ERROR_SYNTAX_ERROR;
+ }
+
+ // Check the UMID instance number
+ instance_number = GetBits(stream, 24);
+ if (instance_number != UMID_instance_number) {
+ return CODEC_ERROR_SYNTAX_ERROR;
+ }
+
+ // Read the image sequence identifier
+ GetByteArray(stream, decoder->image_sequence_identifier, sizeof(decoder->image_sequence_identifier));
+
+ // Read the image sequence number
+ decoder->image_sequence_number = GetBits(stream, 32);
+
+ return CODEC_ERROR_OKAY;
+}
+
+/*!
+ @brief Update the codec state with the specified tag value pair
+ When a segment (tag value pair) is encountered in the bitstream of an
+ encoded sample, it may imply some change in the codec state. For example,
+ when a tag for the encoded format is read from the bitstream, the encoded
+ format entry in the codec state may change.
+ Some tags require that additional information must be read from the
+ bitstream and more segments may be encountered, leading to additional
+ changes in the codec state.
+ A tag may identify a single parameter and the parameter value must be updated
+ in the codec state with the new value specified in the segment, but a tag may
+ also imply that other pparameter values must be updated. For example, the tag
+ that marks the first encounter with a wavelet at a lower level in the wavelet
+ tree implies that the width and height of wavelet bands that may be encoded in
+ the remainder of the sample must be doubled.
+
+ It is not necessary for the encoder to insert segments into the bitstream if the
+ codec state change represented by an encoded tag and value can be deduced from
+ earlier segments in the bitstream and the codec state can be changed at a time
+ during decoding that is functionally the same as when the state change would have
+ been performed by an explicitly encoded tag and value.
+ @todo Need to check that parameters found in the sample are consistent with
+ the decoding parameters used to initialize the codec state.
+ */
+CODEC_ERROR UpdateCodecState(DECODER *decoder, BITSTREAM *stream, TAGVALUE segment)
+{
+ CODEC_ERROR error = CODEC_ERROR_OKAY;
+ CODEC_STATE *codec = &decoder->codec;
+ ENABLED_PARTS enabled_parts = decoder->enabled_parts;
+ bool optional = false;
+ int chunk_size = 0;
+ TAGWORD tag = segment.tuple.tag;
+ TAGWORD value = segment.tuple.value;
+
+ // The enabled parts variable may not be used depending on the compile-time options
+ (void)enabled_parts;
+
+ // Assume that the next syntax element is not a tag-value pair for a header parameter
+ codec->header = false;
+
+ // Assume that the next syntax element is not a codeblock (large chunk element)
+ codec->codeblock = false;
+
+ // Is this an optional tag?
+ if (tag < 0) {
+ tag = RequiredTag(tag);
+ optional = true;
+ }
+
+ switch (tag)
+ {
+ case CODEC_TAG_ChannelCount: // Number of channels in the transform
+ assert(0 < value && value <= MAX_CHANNEL_COUNT);
+ codec->channel_count = (uint_least8_t)value;
+ codec->header = true;
+ break;
+
+ case CODEC_TAG_ImageWidth: // Width of the image
+ codec->image_width = value;
+ codec->header = true;
+
+ // The image width is the default width of the next channel in the bitstream
+ codec->channel_width = value;
+ break;
+
+ case CODEC_TAG_ImageHeight: // Height of the image
+ codec->image_height = value;
+ codec->header = true;
+
+ // The image height is the default height of the next channel in the bitstream
+ codec->channel_height = value;
+ break;
+
+ case CODEC_TAG_SubbandNumber: // Subband number of this wavelet band
+ codec->subband_number = value;
+ break;
+
+ case CODEC_TAG_Quantization: // Quantization applied to band
+ codec->band.quantization = value;
+ break;
+
+ case CODEC_TAG_LowpassPrecision: // Number of bits per lowpass coefficient
+ if (! (PRECISION_MIN <= value && value <= PRECISION_MAX)) {
+ return CODEC_ERROR_LOWPASS_PRECISION;
+ }
+ codec->lowpass_precision = (PRECISION)value;
+ break;
+
+ case CODEC_TAG_ChannelNumber: // Channel number
+ codec->channel_number = value;
+ break;
+
+ case CODEC_TAG_BitsPerComponent: // Number of bits in the video source
+ codec->bits_per_component = (PRECISION)value;
+ //error = SetDecoderBitsPerComponent(decoder, codec->channel_number, codec->bits_per_component);
+ break;
+
+ case CODEC_TAG_PrescaleShift:
+ UpdatePrescaleTable(codec, value);
+ break;
+
+#if VC5_ENABLED_PART(VC5_PART_IMAGE_FORMATS)
+ case CODEC_TAG_ImageFormat:
+ if (IsPartEnabled(enabled_parts, VC5_PART_IMAGE_FORMATS))
+ {
+ codec->image_format = (IMAGE_FORMAT)value;
+ codec->header = true;
+ }
+ else
+ {
+ // The image format shall not be present in the bitstream
+ assert(0);
+ error = CODEC_ERROR_BITSTREAM_SYNTAX;
+ }
+ break;
+
+ case CODEC_TAG_PatternWidth:
+ if (IsPartEnabled(enabled_parts, VC5_PART_IMAGE_FORMATS))
+ {
+ codec->pattern_width = (DIMENSION)value;
+ codec->header = true;
+ }
+ else
+ {
+ // The pattern width shall not be present in the bitstream
+ assert(0);
+ error = CODEC_ERROR_BITSTREAM_SYNTAX;
+ }
+ break;
+
+ case CODEC_TAG_PatternHeight:
+ if (IsPartEnabled(enabled_parts, VC5_PART_IMAGE_FORMATS))
+ {
+ codec->pattern_height = (DIMENSION)value;
+ codec->header = true;
+ }
+ else
+ {
+ // The pattern height shall not be present in the bitstream
+ assert(0);
+ error = CODEC_ERROR_BITSTREAM_SYNTAX;
+ }
+ break;
+
+ case CODEC_TAG_ComponentsPerSample:
+ if (IsPartEnabled(enabled_parts, VC5_PART_IMAGE_FORMATS))
+ {
+ codec->components_per_sample = (DIMENSION)value;
+ codec->header = true;
+ }
+ else
+ {
+ // The components per sample shall not be present in the bitstream
+ assert(0);
+ error = CODEC_ERROR_BITSTREAM_SYNTAX;
+ }
+ break;
+
+ case CODEC_TAG_MaxBitsPerComponent:
+ if (IsPartEnabled(enabled_parts, VC5_PART_IMAGE_FORMATS))
+ {
+ codec->max_bits_per_component = (PRECISION)value;
+ codec->header = true;
+ }
+ else
+ {
+ // The components per sample shall not be present in the bitstream
+ assert(0);
+ error = CODEC_ERROR_BITSTREAM_SYNTAX;
+ }
+ break;
+#endif
+
+ case CODEC_TAG_ChannelWidth:
+#if VC5_ENABLED_PART(VC5_PART_IMAGE_FORMATS)
+ if (IsPartEnabled(enabled_parts, VC5_PART_IMAGE_FORMATS))
+ {
+ // The channel width shall not be present in the bitstream
+ assert(0);
+ error = CODEC_ERROR_BITSTREAM_SYNTAX;
+ }
+ else
+#endif
+ {
+ // The channel width may be present in the bitstream
+ codec->channel_width = (DIMENSION)value;
+ }
+ break;
+
+ case CODEC_TAG_ChannelHeight:
+#if VC5_ENABLED_PART(VC5_PART_IMAGE_FORMATS)
+ if (IsPartEnabled(enabled_parts, VC5_PART_IMAGE_FORMATS))
+ {
+ // The channel height shall not be present in the bitstream
+ assert(0);
+ error = CODEC_ERROR_BITSTREAM_SYNTAX;
+ }
+ else
+#endif
+ {
+ // The channel height may be present in the bitstream
+ codec->channel_height = (DIMENSION)value;
+ }
+ break;
+
+ default: // Unknown tag or the tag identifies a chunk
+
+ //TODO: Check for chunk tags that are not defined in VC-5 Part 1
+
+ // Does this tag indicate a chunk of data?
+ if (tag & CODEC_TAG_CHUNK_MASK)
+ {
+ // Does this chunk have a 24-bit size?
+ if(tag & CODEC_TAG_LARGE_CHUNK)
+ {
+ // The chunk size includes the low byte in the tag
+ chunk_size = (value & 0xFFFF);
+ chunk_size += ((tag & 0xFF) << 16);
+ }
+ else
+ {
+ // The chunk size is specified by the value
+ chunk_size = (value & 0xFFFF);
+ }
+ }
+
+ // Is this a codeblock?
+ if ((tag & CODEC_TAG_LargeCodeblock) == CODEC_TAG_LargeCodeblock)
+ {
+ codec->codeblock = true;
+ }
+
+ // Is this chunk a unique image identifier?
+ else if (tag == CODEC_TAG_UniqueImageIdentifier)
+ {
+ // The unique image identifier should be optional
+ assert(optional);
+ if (! optional) {
+ return CODEC_ERROR_SYNTAX_ERROR;
+ }
+
+ // Parse the unique image identifier
+ error = ParseUniqueImageIdentifier(decoder, stream, chunk_size);
+ }
+
+ // Is this chunk an inverse component transform?
+ else if (tag == CODEC_TAG_InverseTransform)
+ {
+ // The inverse component transform should not be optional
+ assert(!optional);
+ if (optional) {
+ return CODEC_ERROR_SYNTAX_ERROR;
+ }
+
+ // Parse the inverse component transform
+ error = ParseInverseComponentTransform(decoder, stream, chunk_size);
+ }
+
+ // Is this chunk an inverse component permutation?
+ else if (tag == CODEC_TAG_InversePermutation)
+ {
+ // The inverse component permutation should not be optional
+ assert(!optional);
+ if (optional) {
+ return CODEC_ERROR_SYNTAX_ERROR;
+ }
+
+ // Parse the inverse component permutation
+ error = ParseInverseComponentPermutation(decoder, stream, chunk_size);
+ }
+
+ // Is this chunk a 16-bit inverse component transform?
+ else if (tag == CODEC_TAG_InverseTransform16)
+ {
+ // The 16-bit inverse component transform is not supported
+ assert(0);
+ return CODEC_ERROR_UNIMPLEMENTED;
+ }
+#if VC5_ENABLED_PART(VC5_PART_SECTIONS)
+ // Is this a section header?
+ else if (IsPartEnabled(enabled_parts, VC5_PART_SECTIONS) && decoder->section_flag && IsSectionHeader(tag))
+ {
+ int section_number;
+
+ // Section headers are optional tag-value pairs
+ optional = true;
+
+ // Is this a bitstream header section?
+ if (tag == CODEC_TAG_HeaderSectionTag)
+ {
+ // Handle this tag-value pair as if it was a bitstream header parameter
+ codec->header = true;
+ }
+
+ // Convert the tag to a section number
+ GetSectionNumber(tag, &section_number);
+
+ // Record the section number and length (in segments)
+ codec->section_number = section_number;
+ codec->section_length = chunk_size;
+
+ if( decoder->section_logfile )
+ {
+ // Write the section information to the log file
+ WriteSectionInformation(decoder->section_logfile, section_number, chunk_size);
+ }
+ }
+#endif
+ else
+ {
+ // Does this chunk have a 24-bit chunk payload size?
+ if (tag & CODEC_TAG_LARGE_CHUNK)
+ {
+ optional = true;
+ chunk_size = 0;
+ }
+
+ assert(optional);
+ if (!optional)
+ {
+ error = CODEC_ERROR_BITSTREAM_SYNTAX;
+ }
+ else if (chunk_size > 0)
+ {
+ // Skip processing the payload of this optional chunk element
+ SkipPayload(stream, chunk_size);
+ }
+ }
+ break;
+ }
+
+ // Encountered an error while processing the tag?
+ if (error != CODEC_ERROR_OKAY)
+ {
+ return error;
+ }
+
+ //TODO: Check that bitstreams with missplaced header parameters fail to decode
+
+ //if (IsHeaderParameter(tag))
+ if (codec->header)
+ {
+ if (optional)
+ {
+#if VC5_ENABLED_PART(VC5_PART_SECTIONS)
+ if (tag == CODEC_TAG_HeaderSectionTag)
+ {
+ // Okay for the bitstream header to contain an optional section header tag-value pair
+ }
+#if VC5_ENABLED_PART(VC5_PART_LAYERS)
+ else if (!IsPartEnabled(enabled_parts, VC5_PART_LAYERS))
+ {
+ // A header parameter cannot be optional
+ error = CODEC_ERROR_REQUIRED_PARAMETER;
+ }
+#endif
+#endif
+#if VC5_ENABLED_PART(VC5_PART_LAYERS)
+ if (!IsPartEnabled(enabled_parts, VC5_PART_LAYERS))
+ {
+ // A header parameter cannot be optional
+ error = CODEC_ERROR_REQUIRED_PARAMETER;
+ }
+#endif
+ }
+ else if (decoder->header_finished)
+ {
+ // Should not encounter a header parameter after the header has been parsed
+ error = CODEC_ERROR_BITSTREAM_SYNTAX;
+ }
+ else
+ {
+ // Record that this header parameter has been decoded
+ error = UpdateHeaderParameter(decoder, tag);
+ }
+ }
+ else if (!decoder->header_finished)
+ {
+ // There should be no more header parameters in the bitstream
+ decoder->header_finished = true;
+ }
+
+#if VC5_ENABLED_PART(VC5_PART_IMAGE_FORMATS)
+ // The wavelets and buffers can be allocated after the bitstream header has been parsed
+ if (IsPartEnabled(enabled_parts, VC5_PART_IMAGE_FORMATS) &&
+ decoder->header_finished &&
+ !decoder->memory_allocated)
+ {
+ // Allocate space for the wavelet transforms
+ AllocDecoderTransforms(decoder);
+
+ // Allocate all buffers required for decoding
+ AllocDecoderBuffers(decoder);
+
+ // Reset the flags in the wavelet transforms
+ PrepareDecoderTransforms(decoder);
+
+ // The wavelet transforms and decoding buffers have been allocated
+ decoder->memory_allocated = true;
+ }
+#endif
+
+ // Found a codeblock element?
+ if (codec->codeblock)
+ {
+ const int channel_number = codec->channel_number;
+
+ // Have the channel dimensions been initialized?
+ if (!decoder->channel[channel_number].initialized)
+ {
+ // Record the channel dimensions and component precision
+ decoder->channel[channel_number].width = codec->channel_width;
+ decoder->channel[channel_number].height = codec->channel_height;
+
+ // Initialize the dimensions of this channel
+ decoder->channel[channel_number].initialized = true;
+
+ //TODO: Allocate space for the wavelet transforms and decoding buffers
+ }
+
+ // Is this the first codeblock encountered in the bitstream for this channel?
+ if (!decoder->channel[channel_number].found_first_codeblock)
+ {
+ // Remember the number of bits per component in this and higher numbered channel
+ decoder->channel[codec->channel_number].bits_per_component = codec->bits_per_component;
+
+ // Found the first codeblock in the channel
+ decoder->channel[channel_number].found_first_codeblock = true;
+ }
+
+ {
+ CODEC_STATE *codec = &decoder->codec;
+
+ const int subband_number = codec->subband_number;
+
+ if( subband_number < decoder->subbands_to_decode )
+ {
+ // Decode the subband into its wavelet band
+ error = DecodeChannelSubband(decoder, stream, chunk_size);
+ }
+ else
+ {
+ // Skip decoding of subband
+ error = SkipPayload(stream, chunk_size);
+
+ WAVELET* wavelet = decoder->transform[channel_number].wavelet[SubbandWaveletIndex(subband_number)];
+ wavelet->valid_band_mask = 0xF;
+ }
+
+ // Set the subband number for the next band expected in the bitstream
+ codec->subband_number++;
+
+ // Was the subband successfully decoded?
+ if (error == CODEC_ERROR_OKAY)
+ {
+ // Record that this subband has been decoded successfully
+ SetDecodedBandMask(codec, subband_number);
+ }
+
+ // Done decoding all subbands in this channel?
+ if (codec->subband_number == codec->subband_count)
+ {
+ // Advance to the next channel
+ codec->channel_number++;
+
+ // Reset the subband number
+ codec->subband_number = 0;
+ }
+ }
+
+ }
+
+ return error;
+}
+
+/*!
+ @brief Return true if the tag corresponds to a bitstream header parameter
+ */
+bool IsHeaderParameter(TAGWORD tag)
+{
+ switch (tag)
+ {
+ case CODEC_TAG_ImageWidth:
+ case CODEC_TAG_ImageHeight:
+ case CODEC_TAG_ChannelCount:
+ case CODEC_TAG_SubbandCount:
+
+#if VC5_ENABLED_PART(VC5_PART_IMAGE_FORMATS)
+ case CODEC_TAG_ImageFormat:
+ case CODEC_TAG_PatternWidth:
+ case CODEC_TAG_PatternHeight:
+ case CODEC_TAG_ComponentsPerSample:
+ case CODEC_TAG_MaxBitsPerComponent:
+#endif
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+/*!
+ @brief Return the header mask that corresponds to the header tag
+ */
+uint16_t GetHeaderMask(TAGWORD tag)
+{
+ uint16_t header_mask = 0;
+
+ switch (tag)
+ {
+ case CODEC_TAG_ImageWidth:
+ header_mask = BITSTREAM_HEADER_FLAGS_IMAGE_WIDTH;
+ break;
+
+ case CODEC_TAG_ImageHeight:
+ header_mask = BITSTREAM_HEADER_FLAGS_IMAGE_HEIGHT;
+ break;
+
+ case CODEC_TAG_ChannelCount:
+ header_mask = BITSTREAM_HEADER_FLAGS_CHANNEL_COUNT;
+ break;
+
+ case CODEC_TAG_SubbandCount:
+ header_mask = BITSTREAM_HEADER_FLAGS_SUBBAND_COUNT;
+ break;
+
+#if VC5_ENABLED_PART(VC5_PART_IMAGE_FORMATS)
+ case CODEC_TAG_ImageFormat:
+ header_mask = BITSTREAM_HEADER_FLAGS_IMAGE_FORMAT;
+ break;
+#endif
+#if VC5_ENABLED_PART(VC5_PART_IMAGE_FORMATS)
+ case CODEC_TAG_PatternWidth:
+ header_mask = BITSTREAM_HEADER_FLAGS_PATTERN_WIDTH;
+ break;
+#endif
+#if VC5_ENABLED_PART(VC5_PART_IMAGE_FORMATS)
+ case CODEC_TAG_PatternHeight:
+ header_mask = BITSTREAM_HEADER_FLAGS_PATTERN_HEIGHT;
+ break;
+#endif
+#if VC5_ENABLED_PART(VC5_PART_IMAGE_FORMATS)
+ case CODEC_TAG_ComponentsPerSample:
+ header_mask = BITSTREAM_HEADER_FLAGS_COMPONENTS_PER_SAMPLE;
+ break;
+#endif
+#if VC5_ENABLED_PART(VC5_PART_IMAGE_FORMATS)
+ case CODEC_TAG_MaxBitsPerComponent:
+ header_mask = BITSTREAM_HEADER_FLAGS_MAX_BITS_PER_COMPONENT;
+ break;
+#endif
+
+ default:
+ assert(0);
+ break;
+ }
+
+ return header_mask;
+}
+
+/*!
+ @brief Record that a header parameter was found in the bitstream.
+ The tag-value pair that corresponds to a header parameters must occur
+ in the bitstream header and must occur at most once in the bitstream.
+ */
+CODEC_ERROR UpdateHeaderParameter(DECODER *decoder, TAGWORD tag)
+{
+ uint16_t header_mask = 0;
+
+ if (!IsHeaderParameter(tag)) {
+ return CODEC_ERROR_UNEXPECTED;
+ }
+
+ header_mask = GetHeaderMask(tag);
+
+ if (header_mask == 0) {
+ return CODEC_ERROR_UNEXPECTED;
+ }
+
+ if (decoder->header_mask & header_mask) {
+ // The header parameter should occur at most once
+ return CODEC_ERROR_DUPLICATE_HEADER_PARAMETER;
+ }
+
+ // Record this encounter with the header parameter
+ decoder->header_mask |= header_mask;
+
+ return CODEC_ERROR_OKAY;
+}
+
+#if VC5_ENABLED_PART(VC5_PART_COLOR_SAMPLING)
+/*!
+ @brief Adjust the width of the layer (if necessary)
+ Note that all layers have the same dimensions so the layer index is not
+ passed as an argument to this routine.
+ All layers have the same width as the encoded width.
+ */
+DIMENSION LayerWidth(DECODER *decoder, DIMENSION width)
+{
+ //CODEC_STATE *codec = &decoder->codec;
+ (void)decoder;
+ return width;
+}
+#endif
+
+#if VC5_ENABLED_PART(VC5_PART_COLOR_SAMPLING)
+/*!
+ @brief Adjust the height of the layer to account for interlaced frames
+ Note that all layers have the same dimensions so the layer index is not
+ passed as an argument to this routine.
+ */
+DIMENSION LayerHeight(DECODER *decoder, DIMENSION height)
+{
+ CODEC_STATE *codec = &decoder->codec;
+
+ if (codec->progressive == 0)
+ {
+ height /= 2;
+ }
+
+ return height;
+}
+#endif
+
+/*!
+ @brief Decode the specified wavelet subband
+ After decoded the specified subband, the routine checks whether all bands
+ in the current wavelet have been decoded and if so the inverse transform is
+ applied to the wavelet to reconstruct the lowpass band in the wavelet at the
+ next lower level.
+ */
+CODEC_ERROR DecodeChannelSubband(DECODER *decoder, BITSTREAM *input, size_t chunk_size)
+{
+ CODEC_ERROR error = CODEC_ERROR_OKAY;
+ CODEC_STATE *codec = &decoder->codec;
+
+ const int channel_number = codec->channel_number;
+ const int subband_number = codec->subband_number;
+
+ // Get the index to the wavelet corresponding to this subband
+ const int index = SubbandWaveletIndex(subband_number);
+
+ // Get the index of the wavelet band corresponding to this subband
+ const int band = SubbandBandIndex(subband_number);
+
+ // Wavelet containing the band to decode
+ WAVELET *wavelet = NULL;
+
+ //TODO: Need to check that the codeblock matches the chunk size
+ (void)chunk_size;
+
+ // Allocate the wavelets for this channel if not already allocated
+ AllocateChannelWavelets(decoder, channel_number);
+
+ // Is this a highpass band?
+ if (subband_number > 0)
+ {
+ // Decode a highpass band
+
+ // Get the wavelet that contains the highpass band
+ wavelet = decoder->transform[channel_number].wavelet[index];
+
+ // The wavelets are preallocated
+ assert(wavelet != NULL);
+
+ error = DecodeHighpassBand(decoder, input, wavelet, band);
+ if (error == CODEC_ERROR_OKAY)
+ {
+ // Update the wavelet band valid flags
+ UpdateWaveletValidBandMask(wavelet, band);
+ }
+
+ // Save the quantization factor
+ wavelet->quant[band] = codec->band.quantization;
+ }
+ else
+ {
+ // Decode a lowpass band
+
+ // Get the wavelet that contains the lowpass band
+ wavelet = decoder->transform[channel_number].wavelet[index];
+
+ // The lowpass band must be subband zero
+ assert(subband_number == 0);
+
+ // The lowpass data is always stored in wavelet band zero
+ assert(band == 0);
+
+ // The wavelets are preallocated
+ assert(wavelet != NULL);
+
+ error = DecodeLowpassBand(decoder, input, wavelet);
+ if (error == CODEC_ERROR_OKAY)
+ {
+ // Update the wavelet band valid flags
+ UpdateWaveletValidBandMask(wavelet, band);
+ }
+ }
+
+ // Ready to invert this wavelet to get the lowpass band in the lower wavelet?
+ if (BandsAllValid(wavelet))
+ {
+ // Apply the inverse wavelet transform to reconstruct the lower level wavelet
+ error = ReconstructWaveletBand(decoder, channel_number, wavelet, index);
+ }
+
+ return error;
+}
+
+/*!
+ @brief Invert the wavelet to reconstruct a lowpass band
+ The bands in the wavelet at one level are used to compute the lowpass
+ band in the wavelet at the next lower level in the transform. Wavelet
+ levels are numbered starting at zero for the original image. The
+ reference codec for the baseline profile uses the classic wavelet
+ tree where each wavelet at a high level depends only on the wavelet
+ at the next lower level and each wavelet is a spatial wavelet with
+ four bands.
+ This routine is called during decoding after all bands in a wavelet
+ have been decoded and the lowpass band in the wavelet at the next
+ lower level can be computed by applying the inverse wavelet transform.
+ This routine is not called for the wavelet at level one to reconstruct the
+ decoded component arrays. Special routines are used to compute each component
+ array using the wavelet at level one in each channel.
+
+ See @ref ReconstructUnpackedImage.
+ */
+CODEC_ERROR ReconstructWaveletBand(DECODER *decoder, int channel, WAVELET *wavelet, int index)
+{
+ PRESCALE prescale = decoder->codec.prescale_table[index];
+
+ // Is the current wavelet at a higher level than wavelet level one?
+ if (index > 0)
+ {
+ // Reconstruct the lowpass band in the lower wavelet
+ const int lowpass_index = index - 1;
+ WAVELET *lowpass;
+ int lowpass_width;
+ int lowpass_height;
+
+ lowpass = decoder->transform[channel].wavelet[lowpass_index];
+ assert(lowpass != NULL);
+ if (! (lowpass != NULL)) {
+ return CODEC_ERROR_UNEXPECTED;
+ }
+
+ lowpass_width = lowpass->width;
+ lowpass_height = lowpass->height;
+
+ // Check that the reconstructed wavelet is valid
+ if( lowpass_width <= 0 || lowpass_height <= 0 )
+ {
+ assert(false);
+ return CODEC_ERROR_IMAGE_DIMENSIONS;
+ }
+
+ // Check that the lowpass band has not already been reconstructed
+ assert((lowpass->valid_band_mask & BandValidMask(0)) == 0);
+
+ // Check that all of the wavelet bands have been decoded
+ assert(BandsAllValid(wavelet));
+
+ // Decode the lowpass band in the wavelet one lower level than the input wavelet
+ TransformInverseSpatialQuantLowpass(decoder->allocator, wavelet, lowpass, prescale);
+
+ // Update the band valid flags
+ UpdateWaveletValidBandMask(lowpass, 0);
+ }
+
+ return CODEC_ERROR_OKAY;
+}
+
+/*!
+ @brief Set the bit for the specified subband in the decoded band mask
+ The decoded subband mask is used to track which subbands have been
+ decoded in the current channel. It is reset at the start of each
+ channel.
+ */
+CODEC_ERROR SetDecodedBandMask(CODEC_STATE *codec, int subband)
+{
+ if (0 <= subband && subband < MAX_SUBBAND_COUNT) {
+ codec->decoded_subband_mask |= (1 << subband);
+ }
+ return CODEC_ERROR_OKAY;
+}
+
+/*!
+ @brief Decoded the lowpass band from the bitstream
+ The wavelet at the highest level is passes as an argument.
+ This routine decodes lowpass band in the bitstream into the
+ lowpass band of the wavelet.
+ */
+CODEC_ERROR DecodeLowpassBand(DECODER *decoder, BITSTREAM *stream, WAVELET *wavelet)
+{
+ CODEC_ERROR error = CODEC_ERROR_OKAY;
+ CODEC_STATE *codec = &decoder->codec;
+
+ int lowpass_band_width; // Lowpass band dimensions
+ int lowpass_band_height;
+ int lowpass_band_pitch;
+ PIXEL *lowpass_band_ptr; // Pointer into the lowpass band
+
+ PRECISION lowpass_precision; // Number of bits per lowpass coefficient
+
+ int row, column;
+
+ lowpass_band_width = wavelet->width;
+ lowpass_band_height = wavelet->height;
+ lowpass_band_pitch = wavelet->pitch/sizeof(PIXEL);
+ lowpass_band_ptr = wavelet->data[0];
+
+ lowpass_precision = codec->lowpass_precision;
+
+ // Decode each row in the lowpass image
+ for (row = 0; row < lowpass_band_height; row++)
+ {
+ for (column = 0; column < lowpass_band_width; column++)
+ {
+ COEFFICIENT lowpass_value = (COEFFICIENT)GetBits(stream, lowpass_precision);
+ //assert(0 <= lowpass_value && lowpass_value <= COEFFICIENT_MAX);
+
+ //if (lowpass_value > COEFFICIENT_MAX) {
+ // lowpass_value = COEFFICIENT_MAX;
+ //}
+
+ lowpass_band_ptr[column] = lowpass_value;
+ }
+
+ // Advance to the next row in the lowpass image
+ lowpass_band_ptr += lowpass_band_pitch;
+ }
+ // Align the bitstream to the next tag value pair
+ AlignBitsSegment(stream);
+
+ // Return indication of lowpass decoding success
+ return error;
+}
+
+/*!
+ @brief Decode the highpass band from the bitstream
+ The specified wavelet band is decoded from the bitstream
+ using the codebook and encoding method specified in the
+ bitstream.
+ */
+CODEC_ERROR DecodeHighpassBand(DECODER *decoder, BITSTREAM *stream, WAVELET *wavelet, int band)
+{
+ CODEC_ERROR error = CODEC_ERROR_OKAY;
+
+ // Get the highpass band dimensions
+ DIMENSION width = wavelet->width; //codec->band.width;
+ DIMENSION height = wavelet->height; //codec->band.height;
+
+ // Check that the band index is in range
+ assert(0 <= band && band < wavelet->band_count);
+
+ // Encoded coefficients start on a tag boundary
+ AlignBitsSegment(stream);
+
+ // Decode this subband
+ error = DecodeBandRuns(stream, decoder->codebook, wavelet->data[band], width, height, wavelet->pitch);
+ assert(error == CODEC_ERROR_OKAY);
+
+ // Return failure if a problem was encountered while reading the band coefficients
+ if (error != CODEC_ERROR_OKAY) {
+ return error;
+ }
+
+ // The encoded band coefficients end on a bitstream word boundary
+ // to avoid interference with the marker for the coefficient band trailer
+ AlignBitsWord(stream);
+
+ // Decode the band trailer
+ error = DecodeBandTrailer(stream);
+ decoder->error = error;
+ assert(error == CODEC_ERROR_OKAY);
+ return error;
+}
+
+/*!
+ @brief Decode the highpass band from the bitstream
+ The highpass band in the bitstream is decoded using the specified
+ codebook. This routine assumes that the highpass band was encoded
+ using the run lengths encoding method which is the default for all
+ current codec implementations.
+ The encoded highpass band consists of signed values and runs of zeros.
+ Each codebook entry specifies either an unsigned magnitude with a run
+ length of one or a run of zeros. The unsigned magnitude is immediately
+ followed by the sign bit.
+ Unsigned magnitudes always have a run length of one.
+ Note that runs of zeros can straddle end of line boundaries.
+ The end of the highpass band is marked by a special codeword.
+ Special codewords in the codebook have a run length of zero.
+ The value indicates the type or purpose of the special codeword.
+ */
+CODEC_ERROR DecodeBandRuns(BITSTREAM *stream, CODEBOOK *codebook, PIXEL *data,
+ DIMENSION width, DIMENSION height, DIMENSION pitch)
+{
+ CODEC_ERROR error = CODEC_ERROR_OKAY;
+ size_t data_count;
+ size_t row_padding;
+ int row = 0;
+ int column = 0;
+ int index = 0;
+ //BITWORD special;
+ RUN run = RUN_INITIALIZER;
+
+ // Convert the pitch to units of pixels
+ pitch /= sizeof(PIXEL);
+
+ // Check that the band dimensions are reasonable
+ assert(width <= pitch);
+
+ // Compute the number of pixels encoded into the band
+ data_count = height * width;
+ row_padding = pitch - width;
+
+ while (data_count > 0)
+ {
+ // Get the next run length and value
+ error = GetRun(stream, codebook, &run);
+ if (error != CODEC_ERROR_OKAY) {
+ return error;
+ }
+
+ // Check that the run does not extend past the end of the band
+ assert(run.count <= data_count);
+
+ // Copy the value into the specified number of pixels in the band
+ while (run.count > 0)
+ {
+ // Reached the end of the column?
+ if (column == width)
+ {
+ // Need to pad the end of the row?
+ if (row_padding > 0)
+ {
+ int count;
+ for (count = 0; (size_t)count < row_padding; count++) {
+ data[index++] = 0;
+ }
+ }
+
+ // Advance to the next row
+ row++;
+ column = 0;
+ }
+
+ data[index++] = (PIXEL)run.value;
+ column++;
+ run.count--;
+ data_count--;
+ }
+ }
+
+ // The last run should have ended at the end of the band
+ assert(data_count == 0 && run.count == 0);
+
+ // Check for the special codeword that marks the end of the highpass band
+ error = GetRlv(stream, codebook, &run);
+ if (error == CODEC_ERROR_OKAY) {
+ if (! (run.count == 0 || run.value == SPECIAL_MARKER_BAND_END)) {
+ error = CODEC_ERROR_BAND_END_MARKER;
+ }
+ }
+
+ return error;
+}
+
+/*!
+ @brief Decode the band trailer that follows a highpass band
+ This routine aligns the bitstream to a tag value boundary.
+ Currently the band trailer does not perform any function beyond
+ preparing the bitstream for reading the next tag value pair.
+ */
+CODEC_ERROR DecodeBandTrailer(BITSTREAM *stream)
+{
+ CODEC_ERROR error = CODEC_ERROR_OKAY;
+
+ // Advance the bitstream to a tag boundary
+ AlignBitsSegment(stream);
+
+ return error;
+}
+
+/*!
+ @brief Return true if the bitstream has been completely decoded
+ The end of sample flag is set to true when enough of the sample has been read
+ from the bitstream to allow the output frame to be fully reconstructed. Any
+ remaining bits in the sample can be ignored and it may be the case that further
+ reads from the bitstream will result in an error.
+ The end of sample flag is set when the tag for the frame trailer is found, but
+ may be set when sufficient subbands have been decoded to allow the frame to be
+ reconstructed at the desired resolution. For example, it is not an error if
+ bands at level one in the wavelet tree are not present in the bitstream when
+ decoding to half resolution. The decoder should set the end of sample flag as
+ soon as it is no longer necessary to read further information from the sample.
+
+ @todo Rename this routine to end of image or end of bitstream?
+ */
+bool EndOfSample(DECODER *decoder)
+{
+ return decoder->codec.end_of_sample;
+}
+
+#if VC5_ENABLED_PART(VC5_PART_LAYERS)
+/*!
+ @brief Return true of the layer has been completely read from the bitstream
+ */
+bool EndOfLayer(DECODER *decoder)
+{
+ return (decoder->codec.end_of_layer || decoder->codec.end_of_sample);
+}
+#endif
+
+/*!
+ @brief Return true if the entire bitstream header has been decoded
+ The bitstream header has been completely decoded when at least one
+ non-header parameter has been encountered in the bitstream and all
+ of the required header parameters have been decoded.
+
+ @todo Create a bitstream that can be used to test this predicate.
+ */
+bool IsHeaderComplete(DECODER *decoder)
+{
+ return (decoder->header_finished &&
+ ((decoder->header_mask & BITSTREAM_HEADER_FLAGS_REQUIRED) == BITSTREAM_HEADER_FLAGS_REQUIRED));
+}
+
+/*!
+ @brief Return true if all channels in the bitstream have been processed
+ It is only necessary to test the bands in the largest wavelet in each
+ channel since its lowpass band would not be finished if the wavelets
+ at the higher levels were incomplete.
+ */
+bool IsDecodingComplete(DECODER *decoder)
+{
+ int channel_count = decoder->codec.channel_count;
+ int channel_index;
+
+ for (channel_index = 0; channel_index < channel_count; channel_index++)
+ {
+ WAVELET *wavelet = decoder->transform[channel_index].wavelet[0];
+
+ // Processing is not complete if the wavelet has not been allocated
+ if (wavelet == NULL) return false;
+
+ // Processing is not complete unless all bands have been processed
+ if (!AllBandsValid(wavelet)) return false;
+ }
+
+ // All bands in all wavelets in all channels are done
+ return true;
+}
+
+/*!
+ @brief Perform the final wavelet transform in each channel to compute the component arrays
+ Each channel is decoded and the lowpass and highpass bands are used to reconstruct the
+ lowpass band in the wavelet at the next lower level by applying the inverse wavelet filter.
+ Highpass band decoding and computation of the inverse wavelet transform in each channel
+ stops when the wavelet at the level immediately above the output frame is computed.
+ This routine performs the final wavelet transform in each channel and combines the channels
+ into a single output frame. Note that this routine is called for each layer in a sample,
+ producing an output frame for each layer. The output frames for each layer must be combine
+ by an image compositing operation into a single output frame for the fully decoded sample.
+ */
+CODEC_ERROR ReconstructUnpackedImage(DECODER *decoder, UNPACKED_IMAGE *image)
+{
+ TIMESTAMP("[BEG]", 2)
+
+ CODEC_ERROR error = CODEC_ERROR_OKAY;
+ gpr_allocator *allocator = decoder->allocator;
+
+ int channel_count = decoder->codec.channel_count;
+ int channel_number;
+
+ // Check for enough space in the local array allocations
+ //assert(channel_count <= MAX_CHANNEL_COUNT);
+
+ // Allocate the vector of component arrays
+ size_t size = channel_count * sizeof(COMPONENT_ARRAY);
+ image->component_array_list = allocator->Alloc(size);
+ if (image->component_array_list == NULL) {
+ return CODEC_ERROR_OUTOFMEMORY;
+ }
+
+ // Clear the component array information so that the state is consistent
+ image->component_count = 0;
+ memset(image->component_array_list, 0, size);
+
+ for (channel_number = 0; channel_number < channel_count; channel_number++)
+ {
+ // Get the dimensions of this channel
+ DIMENSION channel_width = decoder->channel[channel_number].width;
+ DIMENSION channel_height = decoder->channel[channel_number].height;
+ PRECISION bits_per_component = decoder->channel[channel_number].bits_per_component;
+
+ // Amount of prescaling applied to the component array values before encoding
+ PRESCALE prescale = decoder->codec.prescale_table[0];
+
+ // Allocate the component array for this channel
+ error = AllocateComponentArray(allocator,
+ &image->component_array_list[channel_number],
+ channel_width,
+ channel_height,
+ bits_per_component);
+
+ if (error != CODEC_ERROR_OKAY) {
+ return error;
+ }
+
+ error = TransformInverseSpatialQuantArray(allocator,
+ decoder->transform[channel_number].wavelet[0],
+ image->component_array_list[channel_number].data,
+ channel_width,
+ channel_height,
+ image->component_array_list[channel_number].pitch,
+ prescale);
+ if (error != CODEC_ERROR_OKAY) {
+ return error;
+ }
+ }
+
+ // One component array is output by the decoding process per channel in the bitstream
+ image->component_count = channel_count;
+
+ TIMESTAMP("[END]", 2)
+
+ return error;
+}
+
+#if VC5_ENABLED_PART(VC5_PART_LAYERS)
+/*!
+ @brief Perform the final wavelet transform in each channel to compute the output frame
+ Each channel is decoded and the lowpass and highpass bands are used to reconstruct the
+ lowpass band in the wavelet at the next lower level by applying the inverse wavelet filter.
+ Highpass band decoding and computation of the inverse wavelet transform in each channel
+ stops when the wavelet at the level immediately above the output frame is computed.
+ This routine performs the final wavelet transform in each channel and combines the channels
+ into a single output frame. Note that this routine is called for each layer in a sample,
+ producing an output frame for each layer. The output frames for each layer must be combine
+ by an image compositing operation into a single output frame for the fully decoded sample.
+ Refer to @ref ReconstructSampleFrame for the details of how the frames from each layer
+ are combined to produce the output frame for the decoded sample.
+ */
+CODEC_ERROR ReconstructLayerImage(DECODER *decoder, IMAGE *image)
+{
+ CODEC_ERROR error = CODEC_ERROR_OKAY;
+
+ DIMENSION decoded_width = decoder->decoded.width;
+ DIMENSION decoded_height = decoder->decoded.height;
+ int channel_count = decoder->codec.channel_count;
+
+ //DIMENSION layer_width = LayerWidth(decoder, decoded_width);
+ //DIMENSION layer_height = LayerHeight(decoder, decoded_height);
+ DIMENSION layer_width = decoded_width;
+ DIMENSION layer_height = decoded_height;
+
+ //TODO: Adjust the layer width to account for chroma sampling
+
+ // Allocate a buffer for the intermediate output from each wavelet transform
+ size_t decoded_frame_pitch = layer_width * channel_count * sizeof(PIXEL);
+ size_t decoded_frame_size = layer_height * decoded_frame_pitch;
+ PIXEL *decoded_frame_buffer = (PIXEL *)Alloc(decoder->allocator, decoded_frame_size);
+ if (decoded_frame_buffer == NULL) {
+ return CODEC_ERROR_OUTOFMEMORY;
+ }
+
+ error = TransformInverseSpatialQuantBuffer(decoder, decoded_frame_buffer, (DIMENSION)decoded_frame_pitch);
+ if (error == CODEC_ERROR_OKAY)
+ {
+ // Pack the decoded frame into the output format
+ error = PackOutputImage(decoded_frame_buffer, decoded_frame_pitch, decoder->encoded.format, image);
+ }
+
+ // Free the buffer for the decoded frame
+ Free(decoder->allocator, decoded_frame_buffer);
+
+ return error;
+}
+#endif
+
+#if VC5_ENABLED_PART(VC5_PART_LAYERS)
+/*!
+ @brief Combine multiple decoded frames from each layer into a single output frame
+ An encoded sample may contain multiple sub-samples called layers. For example,
+ there may be two sub-samples (layers) for the left and right frames in a stereo pair.
+ Note that the baseline profile supports only one layer per sample.
+ Each layer is decoded independently to produce an output frame for that layer. The
+ CineForm codec does not support dependent sub-samples in any of the existing profiles.
+
+ This routine forms a composite frame for the output of the completely decoded sample
+ from the individual frames obtained by decoding each layer. It is contemplated that
+ any image compositing algorithm could be used to combine decoded layers, although the
+ most sophisticated algorithms might be reserved for the most advanced profiles.
+ The dimensions of the output frame could be much larger than the dimensions of any
+ of the frames decoded from individual layers. Compositing could overlay the frames
+ from the individual layers with an arbitrary spatial offset applied to the frame from
+ each layer, creating a collage from frames decoded from the individual layers. Typical
+ applications may use only the most elementary compositing operations.
+ */
+CODEC_ERROR ReconstructSampleFrame(DECODER *decoder, IMAGE image_array[], int frame_count, IMAGE *output_image)
+{
+ DIMENSION frame_width = image_array[0].width;
+ DIMENSION field_height = image_array[0].height;
+ DIMENSION frame_height = 2 * field_height;
+ PIXEL_FORMAT frame_format = image_array[0].format;
+
+ AllocImage(decoder->allocator, output_image, frame_width, frame_height, frame_format);
+
+ return ComposeFields(image_array, frame_count, output_image);
+}
+#endif
+
+
diff --git a/gpr/source/lib/vc5_decoder/decoder.h b/gpr/source/lib/vc5_decoder/decoder.h
new file mode 100755
index 0000000..013aa31
--- /dev/null
+++ b/gpr/source/lib/vc5_decoder/decoder.h
@@ -0,0 +1,336 @@
+/*! @file decoder.h
+ *
+ * @brief Core decoder functions and data structure
+ *
+ * @version 1.0.0
+ *
+ * (C) Copyright 2018 GoPro Inc (http://gopro.com/).
+ *
+ * Licensed under either:
+ * - Apache License, Version 2.0, http://www.apache.org/licenses/LICENSE-2.0
+ * - MIT license, http://opensource.org/licenses/MIT
+ * at your option.
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef DECODER_H
+#define DECODER_H
+
+/*!
+ Data structure for the buffers and information used by
+ the decoder.
+
+ The decoder data structure contains information that will be
+ used by the decoder for decoding every sample in the sequence.
+ Information that varies during decoding, such as the current
+ subband index or the dimensions of the bands in the wavelet that
+ is being decoded, is stored in the codec state.
+
+ @todo Consider changing the transform data structure to use a
+ vector of wavelets rather than a vector of wavelet pointers.
+
+ @todo Remove unused substructures
+
+ @todo Dynamically allocate the vector of wavelet trees based on the
+ actual number of channels rather than the maximum channel count.
+
+ @todo Need to handle the cases where header parameters are provided
+ by the application instead of being in the bitstream.
+ */
+typedef struct _decoder
+{
+ CODEC_ERROR error; //!< Error code from the most recent codec operation
+ gpr_allocator *allocator; //!< Memory allocator used to allocate all dyynamic data
+ CODEC_STATE codec; //!< Information gathered while decoding the current sample
+
+ //! Parts of the VC-5 standard that are supported at runtime by the codec implementation
+ ENABLED_PARTS enabled_parts;
+
+#if VC5_ENABLED_PART(VC5_PART_IMAGE_FORMATS)
+ uint64_t frame_number; //!< Every sample in a clip has a unique frame number
+#endif
+
+ uint16_t header_mask; //!< Track which header parameters have been decoded
+ bool header_finished; //!< Finished decoding the bitstream header?
+ bool memory_allocated; //!< True if memory for decoding has been allocated
+
+ //! Dimensions of each channel found in the bitstream
+ struct _channel
+ {
+ DIMENSION width; //!< Width of this channel
+ DIMENSION height; //!< Height of this channnel
+
+ //! Bits per component for the component array corresponding to this channel
+ uint_least8_t bits_per_component;
+
+ bool initialized; //!< Has the channel information been initialized?
+
+ bool found_first_codeblock; //!< Has the first codeblock in the channel been found?
+
+ } channel[MAX_CHANNEL_COUNT]; //!< Information about each channel in the bitstream
+
+#if VC5_ENABLED_PART(VC5_PART_IMAGE_FORMATS)
+ //! Dimensions and format of the encoded image
+ struct _encoded
+ {
+ DIMENSION width; //!< Encoded width
+ DIMENSION height; //!< Encoded height
+ IMAGE_FORMAT format; //!< Encoded format
+
+ } encoded; //!< Information about the image as represented in the bitstream
+#endif
+
+#if VC5_ENABLED_PART(VC5_PART_IMAGE_FORMATS)
+ //! Dimensions and format of the decoded image
+ struct _decoded
+ {
+ DIMENSION width; //!< Decoded width
+ DIMENSION height; //!< Decoded height
+ //RESOLUTION resolution;
+
+ } decoded; //!< Information about the decoded component arrays
+#endif
+
+#if VC5_ENABLED_PART(VC5_PART_IMAGE_FORMATS)
+ //! Dimensions and format of the frame after post-processing (see @ref ImageRepackingProcess)
+ struct _output
+ {
+ DIMENSION width; //!< Output frame width
+ DIMENSION height; //!< Output frame height
+ PIXEL_FORMAT format; //!< Output frame pixel format
+
+ } output; //!< Information about the packed image output by the image repacking process
+#endif
+
+#if VC5_ENABLED_PART(VC5_PART_IMAGE_FORMATS)
+ //! Dimensions and format of the image output by the display process
+ struct _display
+ {
+ DIMENSION width; //!< Output frame width
+ DIMENSION height; //!< Output frame height
+ PIXEL_FORMAT format; //!< Output frame pixel format
+
+ } display; //!< Information about the displayable image output by the display process
+#endif
+
+#if VC5_ENABLED_PART(VC5_PART_LAYERS)
+ int layer_count; //!< Number of subsamples in each sample
+#endif
+
+ int wavelet_count; //!< Number of wavelets in each channel
+
+ int subbands_to_decode;
+
+ //! Wavelet tree for each channel
+ TRANSFORM transform[MAX_CHANNEL_COUNT];
+
+ //! Pointer to the active codebook for variable-length codes
+ CODEBOOK *codebook;
+
+#if VC5_ENABLED_PART(VC5_PART_IMAGE_FORMATS)
+ uint8_t image_sequence_identifier[16]; //!< UUID for the unique image sequence identifier
+ uint32_t image_sequence_number; //!< Number of the image in the image sequence
+#endif
+
+#if VC5_ENABLED_PART(VC5_PART_LAYERS)
+ bool progressive; //!< True if the encoded frame is progressive
+ bool top_field_first; //!< True if the top field is encoded first
+#endif
+
+#if VC5_ENABLED_PART(VC5_PART_SECTIONS)
+ bool section_flag; //!< Control whether section processing is enabled
+ FILE *section_logfile; //!< Log file for writing section information
+#endif
+
+} DECODER;
+
+/*!
+ @brief Information that can be obtained from an bitstream header
+
+ The bitstream header consists of tag-value pairs that must occur in the bitstream
+ before the first codeblock if the parameters are present in the bitstream.
+
+ Consider organizing the values obtained from the bitstream into input parameters
+ and encoded parameters, as is done elsewhere in the decoder, even though the
+ bitstream does not have such a rigid syntax.
+ */
+typedef struct _bitstream_header
+{
+ uint16_t channel_count; //!< Number of channels in the bitstream
+
+#if VC5_ENABLED_PART(VC5_PART_IMAGE_FORMATS)
+ uint64_t frame_number; //!< Every sample in a clip has a unique frame number
+ PIXEL_FORMAT input_format; //!< Pixel format of the frame input to the encoder
+
+ // Encoded dimensions and format of the encoded frame (including padding)
+ DIMENSION encoded_width; //!< Width of the encoded frame
+ DIMENSION encoded_height; //!< Height of the encoded frame
+
+ IMAGE_FORMAT encoded_format; //!< Encoded format
+
+ // The display aperture within the encoded frame
+ DIMENSION row_offset;
+ DIMENSION column_offset;
+ DIMENSION display_width; //!< Width of the displayable frame (if specified in the sample)
+ DIMENSION display_height; //!< Height of the displayable frame (if specified in the sample)
+#endif
+#if VC5_ENABLED_PART(VC5_PART_LAYERS)
+ DIMENSION video_channel_count; // Number of layers?
+ DIMENSION current_video_channel; //TODO: Find better way to handle this
+ int layer_count; //!< Number of layers in the sample
+ bool progressive; //!< Progressive versus interlaced frames
+ bool top_field_first; //!< Interlaced frame with top field first
+#endif
+
+} BITSTREAM_HEADER;
+
+//! Flags that indicate which header parameters have been assigned values
+typedef enum _bitstream_header_flags
+{
+ BITSTREAM_HEADER_FLAGS_IMAGE_WIDTH = (1 << 0),
+ BITSTREAM_HEADER_FLAGS_IMAGE_HEIGHT = (1 << 1),
+ BITSTREAM_HEADER_FLAGS_CHANNEL_COUNT = (1 << 2),
+ BITSTREAM_HEADER_FLAGS_SUBBAND_COUNT = (1 << 3),
+
+#if VC5_ENABLED_PART(VC5_PART_IMAGE_FORMATS)
+ BITSTREAM_HEADER_FLAGS_IMAGE_FORMAT = (1 << 4),
+ BITSTREAM_HEADER_FLAGS_PATTERN_WIDTH = (1 << 5),
+ BITSTREAM_HEADER_FLAGS_PATTERN_HEIGHT = (1 << 6),
+ BITSTREAM_HEADER_FLAGS_COMPONENTS_PER_SAMPLE = (1 << 7),
+ BITSTREAM_HEADER_FLAGS_MAX_BITS_PER_COMPONENT = (1 << 8),
+
+ //! Required header parameters
+ BITSTREAM_HEADER_FLAGS_REQUIRED = (BITSTREAM_HEADER_FLAGS_IMAGE_WIDTH |
+ BITSTREAM_HEADER_FLAGS_IMAGE_HEIGHT |
+ BITSTREAM_HEADER_FLAGS_IMAGE_FORMAT |
+ BITSTREAM_HEADER_FLAGS_PATTERN_WIDTH |
+ BITSTREAM_HEADER_FLAGS_PATTERN_HEIGHT |
+ BITSTREAM_HEADER_FLAGS_COMPONENTS_PER_SAMPLE),
+#else
+
+ //! Required header parameters
+ BITSTREAM_HEADER_FLAGS_REQUIRED = (BITSTREAM_HEADER_FLAGS_IMAGE_WIDTH |
+ BITSTREAM_HEADER_FLAGS_IMAGE_HEIGHT),
+#endif
+
+} BITSTREAM_HEADER_FLAGS;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+ CODEC_ERROR InitDecoder(DECODER *decoder, const gpr_allocator *allocator);
+
+ CODEC_ERROR SetDecoderLogfile(DECODER *decoder, FILE *logfile);
+
+ CODEC_ERROR ReleaseDecoder(DECODER *decoder);
+
+ CODEC_ERROR PrepareDecoderState(DECODER *decoder, const DECODER_PARAMETERS *parameters);
+
+ CODEC_ERROR PrepareDecoderTransforms(DECODER *decoder);
+
+ CODEC_ERROR SetOutputImageFormat(DECODER *decoder,
+ const DECODER_PARAMETERS *parameters,
+ DIMENSION *width_out,
+ DIMENSION *height_out,
+ PIXEL_FORMAT *format_out);
+
+#if VC5_ENABLED_PART(VC5_PART_IMAGE_FORMATS)
+ CODEC_ERROR SetDisplayImageFormat(DECODER *decoder,
+ const DECODER_PARAMETERS *parameters,
+ DIMENSION *width_out,
+ DIMENSION *height_out,
+ PIXEL_FORMAT *format_out);
+#endif
+
+ bool ChannelLowpassBandsAllValid(const DECODER *decoder, int wavelet_index);
+
+ PIXEL_FORMAT EncodedPixelFormat(const DECODER *decoder, const DECODER_PARAMETERS *parameters);
+
+ CODEC_ERROR PackOutputImage(void *buffer, size_t pitch, int encoded_format, IMAGE *image);
+
+ CODEC_ERROR ImageRepackingProcess(const UNPACKED_IMAGE *unpacked_image,
+ PACKED_IMAGE *packed_image,
+ const DECODER_PARAMETERS *parameters);
+
+ CODEC_ERROR UpdateCodecState(DECODER *decoder, BITSTREAM *stream, TAGVALUE segment);
+
+ bool IsHeaderParameter(TAGWORD tag);
+
+ CODEC_ERROR UpdateHeaderParameter(DECODER *decoder, TAGWORD tag);
+
+ CODEC_ERROR PrepareDecoder(DECODER *decoder, const DECODER_PARAMETERS *parameters);
+
+ CODEC_ERROR AllocDecoderTransforms(DECODER *decoder);
+
+ CODEC_ERROR ReleaseDecoderTransforms(DECODER *decoder);
+
+ CODEC_ERROR AllocDecoderBuffers(DECODER *decoder);
+
+ CODEC_ERROR ReleaseDecoderBuffers(DECODER *decoder);
+
+ CODEC_ERROR AllocateChannelWavelets(DECODER *decoder, int channel);
+
+ CODEC_ERROR DecodeStream(STREAM *stream, UNPACKED_IMAGE *image, const DECODER_PARAMETERS *parameters);
+
+ CODEC_ERROR DecodeImage(STREAM *stream, IMAGE *image, RGB_IMAGE *rgb_image, DECODER_PARAMETERS *parameters);
+
+ CODEC_ERROR DecodingProcess(DECODER *decoder, BITSTREAM *stream, UNPACKED_IMAGE *image, const DECODER_PARAMETERS *parameters);
+
+ CODEC_ERROR DecodeSingleImage(DECODER *decoder, BITSTREAM *input, UNPACKED_IMAGE *image, const DECODER_PARAMETERS *parameters);
+
+ CODEC_ERROR DecodeSampleLayer(DECODER *decoder, BITSTREAM *input, IMAGE *image);
+
+ CODEC_ERROR DecodeChannelSubband(DECODER *decoder, BITSTREAM *input, size_t chunk_size);
+
+ CODEC_ERROR ReconstructWaveletBand(DECODER *decoder, int channel, WAVELET *wavelet, int index);
+
+ CODEC_ERROR ParseChannelIndex(BITSTREAM *stream, uint32_t *channel_size, int channel_count);
+
+ DIMENSION LayerWidth(DECODER *decoder, DIMENSION width);
+ DIMENSION LayerHeight(DECODER *decoder, DIMENSION height);
+
+ CODEC_ERROR ProcessSampleMarker(DECODER *decoder, BITSTREAM *stream, TAGWORD marker);
+
+ CODEC_ERROR SetDecodedBandMask(CODEC_STATE *codec, int subband);
+
+ CODEC_ERROR DecodeLowpassBand(DECODER *decoder, BITSTREAM *stream, WAVELET *wavelet);
+
+ CODEC_ERROR DecodeHighpassBand(DECODER *decoder, BITSTREAM *stream, WAVELET *wavelet, int band);
+
+ CODEC_ERROR DecodeBandRuns(BITSTREAM *stream, CODEBOOK *codebook, PIXEL *data,
+ DIMENSION width, DIMENSION height, DIMENSION pitch);
+
+ CODEC_ERROR DecodeBandTrailer(BITSTREAM *stream);
+
+ CODEC_ERROR DecodeSampleChannelHeader(DECODER *decoder, BITSTREAM *stream);
+
+ bool IsHeaderComplete(DECODER *decoder);
+
+ bool EndOfSample(DECODER *decoder);
+
+#if VC5_ENABLED_PART(VC5_PART_LAYERS)
+ bool EndOfLayer(DECODER *decoder);
+ bool IsLayerComplete(DECODER *decoder);
+#endif
+
+ bool IsDecodingComplete(DECODER *decoder);
+
+ CODEC_ERROR ReconstructUnpackedImage(DECODER *decoder, UNPACKED_IMAGE *image);
+
+#if VC5_ENABLED_PART(VC5_PART_LAYERS)
+ CODEC_ERROR ReconstructLayerImage(DECODER *decoder, IMAGE *image);
+#endif
+
+ CODEC_ERROR TransformInverseSpatialQuantBuffer(DECODER *decoder, void *output_buffer, DIMENSION output_width, DIMENSION output_pitch);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // DECODER_H
diff --git a/gpr/source/lib/vc5_decoder/dequantize.c b/gpr/source/lib/vc5_decoder/dequantize.c
new file mode 100755
index 0000000..48cfb64
--- /dev/null
+++ b/gpr/source/lib/vc5_decoder/dequantize.c
@@ -0,0 +1,88 @@
+/*! @file dequantize.c
+ *
+ * @brief Implementation of inverse quantization functions
+ *
+ * @version 1.0.0
+ *
+ * (C) Copyright 2018 GoPro Inc (http://gopro.com/).
+ *
+ * Licensed under either:
+ * - Apache License, Version 2.0, http://www.apache.org/licenses/LICENSE-2.0
+ * - MIT license, http://opensource.org/licenses/MIT
+ * at your option.
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "headers.h"
+
+// Not using midpoint correction in dequantization
+static const int midpoint = 0;
+
+/*!
+ @brief Dequantize a band with the specified dimensions
+
+ The companding curve is inverted and the value is multiplied by the
+ quantization value that was used by the encoder to compress the band.
+*/
+CODEC_ERROR DequantizeBandRow16s(PIXEL *input, int width, int quantization, PIXEL *output)
+{
+ int column;
+
+ // Undo quantization in the entire row
+ for (column = 0; column < width; column++)
+ {
+ int32_t value = input[column];
+
+ // Invert the companding curve (if any)
+ value = UncompandedValue(value);
+
+ // Dequantize the absolute value
+ if (value > 0)
+ {
+ value = (quantization * value) + midpoint;
+ }
+ else if (value < 0)
+ {
+ value = neg(value);
+ value = (quantization * value) + midpoint;
+ value = neg(value);
+ }
+
+ // Store the dequantized coefficient
+ output[column] = ClampPixel(value);
+ }
+
+ return CODEC_ERROR_OKAY;
+}
+
+/*!
+ @brief This function dequantizes the pixel value
+
+ The inverse companding curve is applied to convert the pixel value
+ to its quantized value and then the pixel value is multiplied by
+ the quantization parameter.
+*/
+PIXEL DequantizedValue(int32_t value, int quantization)
+{
+ // Invert the companding curve (if any)
+ value = UncompandedValue(value);
+
+ // Dequantize the absolute value
+ if (value > 0)
+ {
+ value = (quantization * value) + midpoint;
+ }
+ else if (value < 0)
+ {
+ value = neg(value);
+ value = (quantization * value) + midpoint;
+ value = neg(value);
+ }
+
+ return ClampPixel(value);
+}
diff --git a/gpr/source/lib/vc5_decoder/dequantize.h b/gpr/source/lib/vc5_decoder/dequantize.h
new file mode 100755
index 0000000..748bc36
--- /dev/null
+++ b/gpr/source/lib/vc5_decoder/dequantize.h
@@ -0,0 +1,36 @@
+/*! @file dequantize.h
+ *
+ * @brief Declaration of inverse quantization functions
+ *
+ * @version 1.0.0
+ *
+ * (C) Copyright 2018 GoPro Inc (http://gopro.com/).
+ *
+ * Licensed under either:
+ * - Apache License, Version 2.0, http://www.apache.org/licenses/LICENSE-2.0
+ * - MIT license, http://opensource.org/licenses/MIT
+ * at your option.
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef QUANTIZE_H
+#define QUANTIZE_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+ CODEC_ERROR DequantizeBandRow16s(PIXEL *input, int width, int quantization, PIXEL *output);
+
+ PIXEL DequantizedValue(int32_t value, int quantization);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // QUANTIZE_H
diff --git a/gpr/source/lib/vc5_decoder/headers.h b/gpr/source/lib/vc5_decoder/headers.h
new file mode 100755
index 0000000..54e17e4
--- /dev/null
+++ b/gpr/source/lib/vc5_decoder/headers.h
@@ -0,0 +1,49 @@
+/*! @file headers.h
+ *
+ * @brief This file includes all of the header files that are used by the decoder.
+ *
+ * Note that some header files are only used by the main program that
+ * calls the codec or are only used for debugging are not included by this file.
+ * Only headers that are part of the reference decoder are included by this file.
+
+ * Including a single header file in all reference decoder source files
+ * ensures that all modules see the same header files in the same order.
+
+ * This file can be used for creating a pre-compiled header if the
+ * compiler supports that capabilities.
+ *
+ * @version 1.0.0
+ *
+ * (C) Copyright 2018 GoPro Inc (http://gopro.com/).
+ *
+ * Licensed under either:
+ * - Apache License, Version 2.0, http://www.apache.org/licenses/LICENSE-2.0
+ * - MIT license, http://opensource.org/licenses/MIT
+ * at your option.
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HEADERS_H
+#define HEADERS_H
+
+#include "common.h"
+
+#include "vlc.h"
+#include "raw.h"
+#include "bitstream.h"
+#include "dequantize.h"
+#include "parameters.h"
+#include "inverse.h"
+#include "codebooks.h"
+#include "wavelet.h"
+#include "syntax.h"
+#include "decoder.h"
+#include "component.h"
+#include "vc5_decoder.h"
+
+#endif // HEADERS_H
diff --git a/gpr/source/lib/vc5_decoder/inverse.c b/gpr/source/lib/vc5_decoder/inverse.c
new file mode 100755
index 0000000..cacc7be
--- /dev/null
+++ b/gpr/source/lib/vc5_decoder/inverse.c
@@ -0,0 +1,1188 @@
+/*! @file inverse.c
+ *
+ * @brief Implementation of the inverse wavelet transforms.
+ *
+ * (C) Copyright 2018 GoPro Inc (http://gopro.com/).
+ *
+ * Licensed under either:
+ * - Apache License, Version 2.0, http://www.apache.org/licenses/LICENSE-2.0
+ * - MIT license, http://opensource.org/licenses/MIT
+ * at your option.
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "headers.h"
+
+//! Rounding adjustment used by the inverse wavelet transforms
+static const int32_t rounding = 4;
+
+/*!
+ @brief Apply the inverse horizontal wavelet transform
+ This routine applies the inverse wavelet transform to a row of
+ lowpass and highpass coefficients, producing an output row that
+ is write as wide.
+ */
+STATIC CODEC_ERROR InvertHorizontal16s(PIXEL *lowpass, //!< Horizontal lowpass coefficients
+ PIXEL *highpass, //!< Horizontal highpass coefficients
+ PIXEL *output, //!< Row of reconstructed results
+ DIMENSION input_width, //!< Number of values in the input row
+ DIMENSION output_width //!< Number of values in the output row
+)
+{
+ const int last_column = input_width - 1;
+
+ int32_t even;
+ int32_t odd;
+
+ // Start processing at the beginning of the row
+ int column = 0;
+
+ // Process the first two output points with special filters for the left border
+ even = 0;
+ odd = 0;
+
+ // Apply the even reconstruction filter to the lowpass band
+ even += 11 * lowpass[column + 0];
+ even -= 4 * lowpass[column + 1];
+ even += 1 * lowpass[column + 2];
+ even += rounding;
+ even = DivideByShift(even, 3);
+
+ // Add the highpass correction
+ even += highpass[column];
+ even >>= 1;
+
+ // The lowpass result should be a positive number
+ //assert(0 <= even && even <= INT16_MAX);
+
+ // Apply the odd reconstruction filter to the lowpass band
+ odd += 5 * lowpass[column + 0];
+ odd += 4 * lowpass[column + 1];
+ odd -= 1 * lowpass[column + 2];
+ odd += rounding;
+ odd = DivideByShift(odd, 3);
+
+ // Subtract the highpass correction
+ odd -= highpass[column];
+ odd >>= 1;
+
+ // The lowpass result should be a positive number
+ //assert(0 <= odd && odd <= INT16_MAX);
+
+ // Store the last two output points produced by the loop
+ output[2 * column + 0] = clamp_uint14(even);
+ output[2 * column + 1] = clamp_uint14(odd);
+
+ // Advance to the next input column (second pair of output values)
+ column++;
+
+ // Process the rest of the columns up to the last column in the row
+ for (; column < last_column; column++)
+ {
+ int32_t even = 0; // Result of convolution with even filter
+ int32_t odd = 0; // Result of convolution with odd filter
+
+ // Apply the even reconstruction filter to the lowpass band
+
+ even += lowpass[column - 1];
+ even -= lowpass[column + 1];
+ even += 4;
+ even >>= 3;
+ even += lowpass[column + 0];
+
+ // Add the highpass correction
+ even += highpass[column];
+ even >>= 1;
+
+ // The lowpass result should be a positive number
+ //assert(0 <= even && even <= INT16_MAX);
+
+ // Place the even result in the even column
+ //output[2 * column + 0] = clamp_uint12(even);
+ output[2 * column + 0] = clamp_uint14(even);
+
+ // Apply the odd reconstruction filter to the lowpass band
+ odd -= lowpass[column - 1];
+ odd += lowpass[column + 1];
+ odd += 4;
+ odd >>= 3;
+ odd += lowpass[column + 0];
+
+ // Subtract the highpass correction
+ odd -= highpass[column];
+ odd >>= 1;
+
+ // The lowpass result should be a positive number
+ //assert(0 <= odd && odd <= INT16_MAX);
+
+ // Place the odd result in the odd column
+ //output[2 * column + 1] = clamp_uint14(odd);
+ output[2 * column + 1] = clamp_uint14(odd);
+ }
+
+ // Should have exited the loop at the column for right border processing
+ assert(column == last_column);
+
+ // Process the last two output points with special filters for the right border
+ even = 0;
+ odd = 0;
+
+ // Apply the even reconstruction filter to the lowpass band
+ even += 5 * lowpass[column + 0];
+ even += 4 * lowpass[column - 1];
+ even -= 1 * lowpass[column - 2];
+ even += rounding;
+ even = DivideByShift(even, 3);
+
+ // Add the highpass correction
+ even += highpass[column];
+ even >>= 1;
+
+ // The lowpass result should be a positive number
+ //assert(0 <= even && even <= INT16_MAX);
+
+ // Place the even result in the even column
+ output[2 * column + 0] = clamp_uint14(even);
+
+ if (2 * column + 1 < output_width)
+ {
+ // Apply the odd reconstruction filter to the lowpass band
+ odd += 11 * lowpass[column + 0];
+ odd -= 4 * lowpass[column - 1];
+ odd += 1 * lowpass[column - 2];
+ odd += rounding;
+ odd = DivideByShift(odd, 3);
+
+ // Subtract the highpass correction
+ odd -= highpass[column];
+ odd >>= 1;
+
+ // The lowpass result should be a positive number
+ //assert(0 <= odd && odd <= INT16_MAX);
+
+ // Place the odd result in the odd column
+ output[2 * column + 1] = clamp_uint14(odd);
+ }
+
+ return CODEC_ERROR_OKAY;
+}
+
+/*!
+ @brief Apply the inverse horizontal wavelet transform
+ This routine is similar to @ref InvertHorizontal16s, but a scale factor
+ that was applied during encoding is removed from the output values.
+ */
+STATIC CODEC_ERROR InvertHorizontalDescale16s(PIXEL *lowpass, PIXEL *highpass, PIXEL *output,
+ DIMENSION input_width, DIMENSION output_width,
+ int descale)
+{
+ const int last_column = input_width - 1;
+
+ // Start processing at the beginning of the row
+ int column = 0;
+
+ int descale_shift = 0;
+
+ int32_t even;
+ int32_t odd;
+
+ /*
+ The implementation of the inverse filter includes descaling by a factor of two
+ because the last division by two in the computation of the even and odd results
+ that is performed using a right arithmetic shift has been omitted from the code.
+ */
+ if (descale == 2) {
+ descale_shift = 1;
+ }
+
+ // Check that the descaling value is reasonable
+ assert(descale_shift >= 0);
+
+ // Process the first two output points with special filters for the left border
+ even = 0;
+ odd = 0;
+
+ // Apply the even reconstruction filter to the lowpass band
+ even += 11 * lowpass[column + 0];
+ even -= 4 * lowpass[column + 1];
+ even += 1 * lowpass[column + 2];
+ even += rounding;
+ even = DivideByShift(even, 3);
+
+ // Add the highpass correction
+ even += highpass[column];
+
+ // Remove any scaling used during encoding
+ even <<= descale_shift;
+
+ // The lowpass result should be a positive number
+ //assert(0 <= even && even <= INT16_MAX);
+
+ // Apply the odd reconstruction filter to the lowpass band
+ odd += 5 * lowpass[column + 0];
+ odd += 4 * lowpass[column + 1];
+ odd -= 1 * lowpass[column + 2];
+ odd += rounding;
+ odd = DivideByShift(odd, 3);
+
+ // Subtract the highpass correction
+ odd -= highpass[column];
+
+ // Remove any scaling used during encoding
+ odd <<= descale_shift;
+
+ // The lowpass result should be a positive number
+ //assert(0 <= odd && odd <= INT16_MAX);
+
+ output[2 * column + 0] = ClampPixel(even);
+ output[2 * column + 1] = ClampPixel(odd);
+
+ // Advance to the next input column (second pair of output values)
+ column++;
+
+ // Process the rest of the columns up to the last column in the row
+ for (; column < last_column; column++)
+ {
+ int32_t even = 0; // Result of convolution with even filter
+ int32_t odd = 0; // Result of convolution with odd filter
+
+ // Apply the even reconstruction filter to the lowpass band
+ even += lowpass[column - 1];
+ even -= lowpass[column + 1];
+ even += 4;
+ even >>= 3;
+ even += lowpass[column + 0];
+
+ // Add the highpass correction
+ even += highpass[column];
+
+ // Remove any scaling used during encoding
+ even <<= descale_shift;
+
+ // The lowpass result should be a positive number
+ //assert(0 <= even && even <= INT16_MAX);
+
+ // Place the even result in the even column
+ output[2 * column + 0] = ClampPixel(even);
+
+ // Apply the odd reconstruction filter to the lowpass band
+ odd -= lowpass[column - 1];
+ odd += lowpass[column + 1];
+ odd += 4;
+ odd >>= 3;
+ odd += lowpass[column + 0];
+
+ // Subtract the highpass correction
+ odd -= highpass[column];
+
+ // Remove any scaling used during encoding
+ odd <<= descale_shift;
+
+ // The lowpass result should be a positive number
+ //assert(0 <= odd && odd <= INT16_MAX);
+
+ // Place the odd result in the odd column
+ output[2 * column + 1] = ClampPixel(odd);
+ }
+
+ // Should have exited the loop at the column for right border processing
+ assert(column == last_column);
+
+ // Process the last two output points with special filters for the right border
+ even = 0;
+ odd = 0;
+
+ // Apply the even reconstruction filter to the lowpass band
+ even += 5 * lowpass[column + 0];
+ even += 4 * lowpass[column - 1];
+ even -= 1 * lowpass[column - 2];
+ even += rounding;
+ even = DivideByShift(even, 3);
+
+ // Add the highpass correction
+ even += highpass[column];
+
+ // Remove any scaling used during encoding
+ even <<= descale_shift;
+
+ // The lowpass result should be a positive number
+ //assert(0 <= even && even <= INT16_MAX);
+
+ // Place the even result in the even column
+ output[2 * column + 0] = ClampPixel(even);
+
+ if (2 * column + 1 < output_width)
+ {
+ // Apply the odd reconstruction filter to the lowpass band
+ odd += 11 * lowpass[column + 0];
+ odd -= 4 * lowpass[column - 1];
+ odd += 1 * lowpass[column - 2];
+ odd += rounding;
+ odd = DivideByShift(odd, 3);
+
+ // Subtract the highpass correction
+ odd -= highpass[column];
+
+ // Remove any scaling used during encoding
+ odd <<= descale_shift;
+
+ // The lowpass result should be a positive number
+ //assert(0 <= odd && odd <= INT16_MAX);
+
+ // Place the odd result in the odd column
+ output[2 * column + 1] = ClampPixel(odd);
+ }
+
+ return CODEC_ERROR_OKAY;
+}
+
+/*!
+ @brief Apply the inverse spatial wavelet filter
+ Dequantize the coefficients in the highpass bands and apply the
+ inverse spatial wavelet filter to compute a lowpass band that
+ has twice the width and height of the input bands.
+ The inverse vertical filter is applied to the upper and lower bands
+ on the left and the upper and lower bands on the right. The inverse
+ horizontal filter is applied to the left and right (lowpass and highpass)
+ results from the vertical inverse. Each application of the inverse
+ vertical filter produces two output rows and each application of the
+ inverse horizontal filter produces an output row that is twice as wide.
+ The inverse wavelet filter is a three tap filter.
+
+ For the even output values, add and subtract the off-center values,
+ add the rounding correction, and divide by eight, then add the center
+ value, add the highpass coefficient, and divide by two.
+
+ For the odd output values, the add and subtract operations for the
+ off-center values are reversed the the highpass coefficient is subtracted.
+ Divisions are implemented by right arithmetic shifts.
+ Special formulas for the inverse vertical filter are applied to the top
+ and bottom rows.
+ */
+CODEC_ERROR InvertSpatialQuant16s(gpr_allocator *allocator,
+ PIXEL *lowlow_band, int lowlow_pitch,
+ PIXEL *lowhigh_band, int lowhigh_pitch,
+ PIXEL *highlow_band, int highlow_pitch,
+ PIXEL *highhigh_band, int highhigh_pitch,
+ PIXEL *output_image, int output_pitch,
+ DIMENSION input_width, DIMENSION input_height,
+ DIMENSION output_width, DIMENSION output_height,
+ QUANT quantization[])
+{
+ PIXEL *lowlow = (PIXEL *)lowlow_band;
+ PIXEL *lowhigh = lowhigh_band;
+ PIXEL *highlow = highlow_band;
+ PIXEL *highhigh = highhigh_band;
+ PIXEL *output = output_image;
+ PIXEL *even_lowpass;
+ PIXEL *even_highpass;
+ PIXEL *odd_lowpass;
+ PIXEL *odd_highpass;
+ PIXEL *even_output;
+ PIXEL *odd_output;
+ size_t buffer_row_size;
+ int last_row = input_height - 1;
+ int row, column;
+
+ PIXEL *lowhigh_row[3];
+
+ PIXEL *lowhigh_line[3];
+ PIXEL *highlow_line;
+ PIXEL *highhigh_line;
+
+ QUANT highlow_quantization = quantization[HL_BAND];
+ QUANT lowhigh_quantization = quantization[LH_BAND];
+ QUANT highhigh_quantization = quantization[HH_BAND];
+
+ // Compute positions within the temporary buffer for each row of horizontal lowpass
+ // and highpass intermediate coefficients computed by the vertical inverse transform
+ buffer_row_size = input_width * sizeof(PIXEL);
+
+ // Compute the positions of the even and odd rows of coefficients
+ even_lowpass = (PIXEL *)allocator->Alloc(buffer_row_size);
+ even_highpass = (PIXEL *)allocator->Alloc(buffer_row_size);
+ odd_lowpass = (PIXEL *)allocator->Alloc(buffer_row_size);
+ odd_highpass = (PIXEL *)allocator->Alloc(buffer_row_size);
+
+ // Compute the positions of the dequantized highpass rows
+ lowhigh_line[0] = (PIXEL *)allocator->Alloc(buffer_row_size);
+ lowhigh_line[1] = (PIXEL *)allocator->Alloc(buffer_row_size);
+ lowhigh_line[2] = (PIXEL *)allocator->Alloc(buffer_row_size);
+ highlow_line = (PIXEL *)allocator->Alloc(buffer_row_size);
+ highhigh_line = (PIXEL *)allocator->Alloc(buffer_row_size);
+
+ // Convert pitch from bytes to pixels
+ lowlow_pitch /= sizeof(PIXEL);
+ lowhigh_pitch /= sizeof(PIXEL);
+ highlow_pitch /= sizeof(PIXEL);
+ highhigh_pitch /= sizeof(PIXEL);
+ output_pitch /= sizeof(PIXEL);
+
+ // Initialize the pointers to the even and odd output rows
+ even_output = output;
+ odd_output = output + output_pitch;
+
+ // Apply the vertical border filter to the first row
+ row = 0;
+
+ // Set pointers to the first three rows in the first highpass band
+ lowhigh_row[0] = lowhigh + 0 * lowhigh_pitch;
+ lowhigh_row[1] = lowhigh + 1 * lowhigh_pitch;
+ lowhigh_row[2] = lowhigh + 2 * lowhigh_pitch;
+
+ // Dequantize three rows of highpass coefficients in the first highpass band
+ DequantizeBandRow16s(lowhigh_row[0], input_width, lowhigh_quantization, lowhigh_line[0]);
+ DequantizeBandRow16s(lowhigh_row[1], input_width, lowhigh_quantization, lowhigh_line[1]);
+ DequantizeBandRow16s(lowhigh_row[2], input_width, lowhigh_quantization, lowhigh_line[2]);
+
+ // Dequantize one row of coefficients each in the second and third highpass bands
+ DequantizeBandRow16s(highlow, input_width, highlow_quantization, highlow_line);
+ DequantizeBandRow16s(highhigh, input_width, highhigh_quantization, highhigh_line);
+
+ for (column = 0; column < input_width; column++)
+ {
+ int32_t even = 0; // Result of convolution with even filter
+ int32_t odd = 0; // Result of convolution with odd filter
+
+
+ /***** Compute the vertical inverse for the left two bands *****/
+
+ // Apply the even reconstruction filter to the lowpass band
+ even += 11 * lowlow[column + 0 * lowlow_pitch];
+ even -= 4 * lowlow[column + 1 * lowlow_pitch];
+ even += 1 * lowlow[column + 2 * lowlow_pitch];
+ even += rounding;
+ even = DivideByShift(even, 3);
+
+ // Add the highpass correction
+ even += highlow_line[column];
+ even >>= 1;
+
+ // The inverse of the left two bands should be a positive number
+ //assert(0 <= even && even <= INT16_MAX);
+
+ // Place the even result in the even row
+ even_lowpass[column] = ClampPixel(even);
+
+ // Apply the odd reconstruction filter to the lowpass band
+ odd += 5 * lowlow[column + 0 * lowlow_pitch];
+ odd += 4 * lowlow[column + 1 * lowlow_pitch];
+ odd -= 1 * lowlow[column + 2 * lowlow_pitch];
+ odd += rounding;
+ odd = DivideByShift(odd, 3);
+
+ // Subtract the highpass correction
+ odd -= highlow_line[column];
+ odd >>= 1;
+
+ // The inverse of the left two bands should be a positive number
+ //assert(0 <= odd && odd <= INT16_MAX);
+
+ // Place the odd result in the odd row
+ odd_lowpass[column] = ClampPixel(odd);
+
+
+ /***** Compute the vertical inverse for the right two bands *****/
+
+ even = 0;
+ odd = 0;
+
+ // Apply the even reconstruction filter to the lowpass band
+ even += 11 * lowhigh_line[0][column];
+ even -= 4 * lowhigh_line[1][column];
+ even += 1 * lowhigh_line[2][column];
+ even += rounding;
+ even = DivideByShift(even, 3);
+
+ // Add the highpass correction
+ even += highhigh_line[column];
+ even >>= 1;
+
+ // Place the even result in the even row
+ even_highpass[column] = ClampPixel(even);
+
+ // Apply the odd reconstruction filter to the lowpass band
+ odd += 5 * lowhigh_line[0][column];
+ odd += 4 * lowhigh_line[1][column];
+ odd -= 1 * lowhigh_line[2][column];
+ odd += rounding;
+ odd = DivideByShift(odd, 3);
+
+ // Subtract the highpass correction
+ odd -= highhigh_line[column];
+ odd >>= 1;
+
+ // Place the odd result in the odd row
+ odd_highpass[column] = ClampPixel(odd);
+ }
+
+ // Apply the inverse horizontal transform to the even and odd rows
+ InvertHorizontal16s(even_lowpass, even_highpass, even_output, input_width, output_width);
+ InvertHorizontal16s(odd_lowpass, odd_highpass, odd_output, input_width, output_width);
+
+ // Advance to the next pair of even and odd output rows
+ even_output += 2 * output_pitch;
+ odd_output += 2 * output_pitch;
+
+ // Always advance the highpass row pointers
+ highlow += highlow_pitch;
+ highhigh += highhigh_pitch;
+
+ // Advance the row index
+ row++;
+
+ // Process the middle rows using the interior reconstruction filters
+ for (; row < last_row; row++)
+ {
+ // Dequantize one row from each of the two highpass bands
+ DequantizeBandRow16s(highlow, input_width, highlow_quantization, highlow_line);
+ DequantizeBandRow16s(highhigh, input_width, highhigh_quantization, highhigh_line);
+
+ // Process the entire row
+ for (column = 0; column < input_width; column++)
+ {
+ int32_t even = 0; // Result of convolution with even filter
+ int32_t odd = 0; // Result of convolution with odd filter
+
+
+ /***** Compute the vertical inverse for the left two bands *****/
+
+ // Apply the even reconstruction filter to the lowpass band
+ even += lowlow[column + 0 * lowlow_pitch];
+ even -= lowlow[column + 2 * lowlow_pitch];
+ even += 4;
+ even >>= 3;
+ even += lowlow[column + 1 * lowlow_pitch];
+
+ // Add the highpass correction
+ even += highlow_line[column];
+ even >>= 1;
+
+ // The inverse of the left two bands should be a positive number
+ //assert(0 <= even && even <= INT16_MAX);
+
+ // Place the even result in the even row
+ even_lowpass[column] = ClampPixel(even);
+
+ // Apply the odd reconstruction filter to the lowpass band
+ odd -= lowlow[column + 0 * lowlow_pitch];
+ odd += lowlow[column + 2 * lowlow_pitch];
+ odd += 4;
+ odd >>= 3;
+ odd += lowlow[column + 1 * lowlow_pitch];
+
+ // Subtract the highpass correction
+ odd -= highlow_line[column];
+ odd >>= 1;
+
+ // The inverse of the left two bands should be a positive number
+ //assert(0 <= odd && odd <= INT16_MAX);
+
+ // Place the odd result in the odd row
+ odd_lowpass[column] = ClampPixel(odd);
+
+
+ /***** Compute the vertical inverse for the right two bands *****/
+
+ even = 0;
+ odd = 0;
+
+ // Apply the even reconstruction filter to the lowpass band
+ even += lowhigh_line[0][column];
+ even -= lowhigh_line[2][column];
+ even += 4;
+ even >>= 3;
+ even += lowhigh_line[1][column];
+
+ // Add the highpass correction
+ even += highhigh_line[column];
+ even >>= 1;
+
+ // Place the even result in the even row
+ even_highpass[column] = ClampPixel(even);
+
+ // Apply the odd reconstruction filter to the lowpass band
+ odd -= lowhigh_line[0][column];
+ odd += lowhigh_line[2][column];
+ odd += 4;
+ odd >>= 3;
+ odd += lowhigh_line[1][column];
+
+ // Subtract the highpass correction
+ odd -= highhigh_line[column];
+ odd >>= 1;
+
+ // Place the odd result in the odd row
+ odd_highpass[column] = ClampPixel(odd);
+ }
+
+ // Apply the inverse horizontal transform to the even and odd rows and descale the results
+ InvertHorizontal16s(even_lowpass, even_highpass, even_output, input_width, output_width);
+ InvertHorizontal16s(odd_lowpass, odd_highpass, odd_output, input_width, output_width);
+
+ // Advance to the next input row in each band
+ lowlow += lowlow_pitch;
+ lowhigh += lowhigh_pitch;
+ highlow += highlow_pitch;
+ highhigh += highhigh_pitch;
+
+ // Advance to the next pair of even and odd output rows
+ even_output += 2 * output_pitch;
+ odd_output += 2 * output_pitch;
+
+ if (row < last_row - 1)
+ {
+ // Compute the address of the next row in the lowhigh band
+ PIXEL *lowhigh_row_ptr = (lowhigh + 2 * lowhigh_pitch);
+ //PIXEL *lowhigh_row_ptr = (lowhigh + lowhigh_pitch);
+
+ // Shift the rows in the buffer of dequantized lowhigh bands
+ PIXEL *temp = lowhigh_line[0];
+ lowhigh_line[0] = lowhigh_line[1];
+ lowhigh_line[1] = lowhigh_line[2];
+ lowhigh_line[2] = temp;
+
+ // Undo quantization for the next row in the lowhigh band
+ DequantizeBandRow16s(lowhigh_row_ptr, input_width, lowhigh_quantization, lowhigh_line[2]);
+ }
+ }
+
+ // Should have exited the loop at the last row
+ assert(row == last_row);
+
+ // Advance the lowlow pointer to the last row in the band
+ lowlow += lowlow_pitch;
+
+ // Check that the band pointers are on the last row in each wavelet band
+ assert(lowlow == (lowlow_band + last_row * lowlow_pitch));
+
+ assert(highlow == (highlow_band + last_row * highlow_pitch));
+ assert(highhigh == (highhigh_band + last_row * highhigh_pitch));
+
+ // Undo quantization for the highlow and highhigh bands
+ DequantizeBandRow16s(highlow, input_width, highlow_quantization, highlow_line);
+ DequantizeBandRow16s(highhigh, input_width, highhigh_quantization, highhigh_line);
+
+ // Apply the vertical border filter to the last row
+ for (column = 0; column < input_width; column++)
+ {
+ int32_t even = 0; // Result of convolution with even filter
+ int32_t odd = 0; // Result of convolution with odd filter
+
+
+ /***** Compute the vertical inverse for the left two bands *****/
+
+ // Apply the even reconstruction filter to the lowpass band
+ even += 5 * lowlow[column + 0 * lowlow_pitch];
+ even += 4 * lowlow[column - 1 * lowlow_pitch];
+ even -= 1 * lowlow[column - 2 * lowlow_pitch];
+ even += 4;
+ even = DivideByShift(even, 3);
+
+ // Add the highpass correction
+ even += highlow_line[column];
+ even >>= 1;
+
+ // The inverse of the left two bands should be a positive number
+ //assert(0 <= even && even <= INT16_MAX);
+
+ // Place the even result in the even row
+ even_lowpass[column] = ClampPixel(even);
+
+ // Apply the odd reconstruction filter to the lowpass band
+ odd += 11 * lowlow[column + 0 * lowlow_pitch];
+ odd -= 4 * lowlow[column - 1 * lowlow_pitch];
+ odd += 1 * lowlow[column - 2 * lowlow_pitch];
+ odd += 4;
+ odd = DivideByShift(odd, 3);
+
+ // Subtract the highpass correction
+ odd -= highlow_line[column];
+ odd >>= 1;
+
+ // The inverse of the left two bands should be a positive number
+ //assert(0 <= odd && odd <= INT16_MAX);
+
+ // Place the odd result in the odd row
+ odd_lowpass[column] = ClampPixel(odd);
+
+
+ // Compute the vertical inverse for the right two bands //
+
+ even = 0;
+ odd = 0;
+
+ // Apply the even reconstruction filter to the lowpass band
+ even += 5 * lowhigh_line[2][column];
+ even += 4 * lowhigh_line[1][column];
+ even -= 1 * lowhigh_line[0][column];
+ even += 4;
+ even = DivideByShift(even, 3);
+
+ // Add the highpass correction
+ even += highhigh_line[column];
+ even >>= 1;
+
+ // Place the even result in the even row
+ even_highpass[column] = ClampPixel(even);
+
+ // Apply the odd reconstruction filter to the lowpass band
+ odd += 11 * lowhigh_line[2][column];
+ odd -= 4 * lowhigh_line[1][column];
+ odd += 1 * lowhigh_line[0][column];
+ odd += 4;
+ odd = DivideByShift(odd, 3);
+
+ // Subtract the highpass correction
+ odd -= highhigh_line[column];
+ odd >>= 1;
+
+ // Place the odd result in the odd row
+ odd_highpass[column] = ClampPixel(odd);
+ }
+
+ // Apply the inverse horizontal transform to the even and odd rows and descale the results
+ InvertHorizontal16s(even_lowpass, even_highpass, even_output, input_width, output_width);
+
+ // Is the output wavelet shorter than twice the height of the input wavelet?
+ if (2 * row + 1 < output_height) {
+ InvertHorizontal16s(odd_lowpass, odd_highpass, odd_output, input_width, output_width);
+ }
+
+ // Free the scratch buffers
+ allocator->Free(even_lowpass);
+ allocator->Free(even_highpass);
+ allocator->Free(odd_lowpass);
+ allocator->Free(odd_highpass);
+
+ allocator->Free(lowhigh_line[0]);
+ allocator->Free(lowhigh_line[1]);
+ allocator->Free(lowhigh_line[2]);
+ allocator->Free(highlow_line);
+ allocator->Free(highhigh_line);
+
+ return CODEC_ERROR_OKAY;
+}
+
+/*!
+ @brief Apply the inverse spatial transform with descaling
+ This routine is similar to @ref InvertSpatialQuant16s, but a scale factor
+ that was applied during encoding is removed from the output values.
+ */
+CODEC_ERROR InvertSpatialQuantDescale16s(gpr_allocator *allocator,
+ PIXEL *lowlow_band, int lowlow_pitch,
+ PIXEL *lowhigh_band, int lowhigh_pitch,
+ PIXEL *highlow_band, int highlow_pitch,
+ PIXEL *highhigh_band, int highhigh_pitch,
+ PIXEL *output_image, int output_pitch,
+ DIMENSION input_width, DIMENSION input_height,
+ DIMENSION output_width, DIMENSION output_height,
+ int descale, QUANT quantization[])
+{
+ PIXEL *lowlow = lowlow_band;
+ PIXEL *lowhigh = lowhigh_band;
+ PIXEL *highlow = highlow_band;
+ PIXEL *highhigh = highhigh_band;
+ PIXEL *output = output_image;
+ PIXEL *even_lowpass;
+ PIXEL *even_highpass;
+ PIXEL *odd_lowpass;
+ PIXEL *odd_highpass;
+ PIXEL *even_output;
+ PIXEL *odd_output;
+ size_t buffer_row_size;
+ int last_row = input_height - 1;
+ int row, column;
+
+ PIXEL *lowhigh_row[3];
+
+ PIXEL *lowhigh_line[3];
+ PIXEL *highlow_line;
+ PIXEL *highhigh_line;
+
+ QUANT highlow_quantization = quantization[HL_BAND];
+ QUANT lowhigh_quantization = quantization[LH_BAND];
+ QUANT highhigh_quantization = quantization[HH_BAND];
+
+ // Compute positions within the temporary buffer for each row of horizontal lowpass
+ // and highpass intermediate coefficients computed by the vertical inverse transform
+ buffer_row_size = input_width * sizeof(PIXEL);
+
+ // Allocate space for the even and odd rows of results from the inverse vertical filter
+ even_lowpass = (PIXEL *)allocator->Alloc(buffer_row_size);
+ even_highpass = (PIXEL *)allocator->Alloc(buffer_row_size);
+ odd_lowpass = (PIXEL *)allocator->Alloc(buffer_row_size);
+ odd_highpass = (PIXEL *)allocator->Alloc(buffer_row_size);
+
+ // Allocate scratch space for the dequantized highpass coefficients
+ lowhigh_line[0] = (PIXEL *)allocator->Alloc(buffer_row_size);
+ lowhigh_line[1] = (PIXEL *)allocator->Alloc(buffer_row_size);
+ lowhigh_line[2] = (PIXEL *)allocator->Alloc(buffer_row_size);
+ highlow_line = (PIXEL *)allocator->Alloc(buffer_row_size);
+ highhigh_line = (PIXEL *)allocator->Alloc(buffer_row_size);
+
+ // Convert pitch from bytes to pixels
+ lowlow_pitch /= sizeof(PIXEL);
+ lowhigh_pitch /= sizeof(PIXEL);
+ highlow_pitch /= sizeof(PIXEL);
+ highhigh_pitch /= sizeof(PIXEL);
+ output_pitch /= sizeof(PIXEL);
+
+ // Initialize the pointers to the even and odd output rows
+ even_output = output;
+ odd_output = output + output_pitch;
+
+ // Apply the vertical border filter to the first row
+ row = 0;
+
+ // Set pointers to the first three rows in the first highpass band
+ lowhigh_row[0] = lowhigh + 0 * lowhigh_pitch;
+ lowhigh_row[1] = lowhigh + 1 * lowhigh_pitch;
+ lowhigh_row[2] = lowhigh + 2 * lowhigh_pitch;
+
+ // Dequantize three rows of highpass coefficients in the first highpass band
+ DequantizeBandRow16s(lowhigh_row[0], input_width, lowhigh_quantization, lowhigh_line[0]);
+ DequantizeBandRow16s(lowhigh_row[1], input_width, lowhigh_quantization, lowhigh_line[1]);
+ DequantizeBandRow16s(lowhigh_row[2], input_width, lowhigh_quantization, lowhigh_line[2]);
+
+ // Dequantize one row of coefficients each in the second and third highpass bands
+ DequantizeBandRow16s(highlow, input_width, highlow_quantization, highlow_line);
+ DequantizeBandRow16s(highhigh, input_width, highhigh_quantization, highhigh_line);
+
+ for (column = 0; column < input_width; column++)
+ {
+ int32_t even = 0; // Result of convolution with even filter
+ int32_t odd = 0; // Result of convolution with odd filter
+
+
+ /***** Compute the vertical inverse for the left two bands *****/
+
+ // Apply the even reconstruction filter to the lowpass band
+ even += 11 * lowlow[column + 0 * lowlow_pitch];
+ even -= 4 * lowlow[column + 1 * lowlow_pitch];
+ even += 1 * lowlow[column + 2 * lowlow_pitch];
+ even += rounding;
+ even = DivideByShift(even, 3);
+
+ // Add the highpass correction
+ even += highlow_line[column];
+ even = DivideByShift(even, 1);
+
+ // The inverse of the left two bands should be a positive number
+ //assert(0 <= even && even <= INT16_MAX);
+
+ // Place the even result in the even row
+ even_lowpass[column] = ClampPixel(even);
+
+ // Apply the odd reconstruction filter to the lowpass band
+ odd += 5 * lowlow[column + 0 * lowlow_pitch];
+ odd += 4 * lowlow[column + 1 * lowlow_pitch];
+ odd -= 1 * lowlow[column + 2 * lowlow_pitch];
+ odd += rounding;
+ odd = DivideByShift(odd, 3);
+
+ // Subtract the highpass correction
+ odd -= highlow_line[column];
+ odd = DivideByShift(odd, 1);
+
+ // The inverse of the left two bands should be a positive number
+ //assert(0 <= odd && odd <= INT16_MAX);
+
+ // Place the odd result in the odd row
+ odd_lowpass[column] = ClampPixel(odd);
+
+
+ /***** Compute the vertical inverse for the right two bands *****/
+
+ even = 0;
+ odd = 0;
+
+ // Apply the even reconstruction filter to the lowpass band
+ even += 11 * lowhigh_line[0][column];
+ even -= 4 * lowhigh_line[1][column];
+ even += 1 * lowhigh_line[2][column];
+ even += rounding;
+ even = DivideByShift(even, 3);
+
+ // Add the highpass correction
+ even += highhigh_line[column];
+ even = DivideByShift(even, 1);
+
+ // Place the even result in the even row
+ even_highpass[column] = ClampPixel(even);
+
+ // Apply the odd reconstruction filter to the lowpass band
+ odd += 5 * lowhigh_line[0][column];
+ odd += 4 * lowhigh_line[1][column];
+ odd -= 1 * lowhigh_line[2][column];
+ odd += rounding;
+ odd = DivideByShift(odd, 3);
+
+ // Subtract the highpass correction
+ odd -= highhigh_line[column];
+ odd = DivideByShift(odd, 1);
+
+ // Place the odd result in the odd row
+ odd_highpass[column] = ClampPixel(odd);
+ }
+
+ // Apply the inverse horizontal transform to the even and odd rows and descale the results
+ InvertHorizontalDescale16s(even_lowpass, even_highpass, even_output,
+ input_width, output_width, descale);
+
+ InvertHorizontalDescale16s(odd_lowpass, odd_highpass, odd_output,
+ input_width, output_width, descale);
+
+ // Advance to the next pair of even and odd output rows
+ even_output += 2 * output_pitch;
+ odd_output += 2 * output_pitch;
+
+ // Always advance the highpass row pointers
+ highlow += highlow_pitch;
+ highhigh += highhigh_pitch;
+
+ // Advance the row index
+ row++;
+
+ // Process the middle rows using the interior reconstruction filters
+ for (; row < last_row; row++)
+ {
+ // Dequantize one row from each of the two highpass bands
+ DequantizeBandRow16s(highlow, input_width, highlow_quantization, highlow_line);
+ DequantizeBandRow16s(highhigh, input_width, highhigh_quantization, highhigh_line);
+
+ // Process the entire row
+ for (column = 0; column < input_width; column++)
+ {
+ int32_t even = 0; // Result of convolution with even filter
+ int32_t odd = 0; // Result of convolution with odd filter
+
+
+ /***** Compute the vertical inverse for the left two bands *****/
+
+ // Apply the even reconstruction filter to the lowpass band
+ even += lowlow[column + 0 * lowlow_pitch];
+ even -= lowlow[column + 2 * lowlow_pitch];
+ even += 4;
+ even >>= 3;
+ even += lowlow[column + 1 * lowlow_pitch];
+
+ // Add the highpass correction
+ even += highlow_line[column];
+ even = DivideByShift(even, 1);
+
+ // The inverse of the left two bands should be a positive number
+ //assert(0 <= even && even <= INT16_MAX);
+
+ // Place the even result in the even row
+ even_lowpass[column] = ClampPixel(even);
+
+ // Apply the odd reconstruction filter to the lowpass band
+ odd -= lowlow[column + 0 * lowlow_pitch];
+ odd += lowlow[column + 2 * lowlow_pitch];
+ odd += 4;
+ odd >>= 3;
+ odd += lowlow[column + 1 * lowlow_pitch];
+
+ // Subtract the highpass correction
+ odd -= highlow_line[column];
+ odd = DivideByShift(odd, 1);
+
+ // The inverse of the left two bands should be a positive number
+ //assert(0 <= odd && odd <= INT16_MAX);
+
+ // Place the odd result in the odd row
+ odd_lowpass[column] = ClampPixel(odd);
+
+
+ /***** Compute the vertical inverse for the right two bands *****/
+
+ even = 0;
+ odd = 0;
+
+ // Apply the even reconstruction filter to the lowpass band
+ even += lowhigh_line[0][column];
+ even -= lowhigh_line[2][column];
+ even += 4;
+ even >>= 3;
+ even += lowhigh_line[1][column];
+
+ // Add the highpass correction
+ even += highhigh_line[column];
+ even = DivideByShift(even, 1);
+
+ // Place the even result in the even row
+ even_highpass[column] = ClampPixel(even);
+
+ // Apply the odd reconstruction filter to the lowpass band
+ odd -= lowhigh_line[0][column];
+ odd += lowhigh_line[2][column];
+ odd += 4;
+ odd >>= 3;
+ odd += lowhigh_line[1][column];
+
+ // Subtract the highpass correction
+ odd -= highhigh_line[column];
+ odd = DivideByShift(odd, 1);
+
+ // Place the odd result in the odd row
+ odd_highpass[column] = ClampPixel(odd);
+ }
+
+ // Apply the inverse horizontal transform to the even and odd rows and descale the results
+ InvertHorizontalDescale16s(even_lowpass, even_highpass, even_output,
+ input_width, output_width, descale);
+
+ InvertHorizontalDescale16s(odd_lowpass, odd_highpass, odd_output,
+ input_width, output_width, descale);
+
+ // Advance to the next input row in each band
+ lowlow += lowlow_pitch;
+ lowhigh += lowhigh_pitch;
+ highlow += highlow_pitch;
+ highhigh += highhigh_pitch;
+
+ // Advance to the next pair of even and odd output rows
+ even_output += 2 * output_pitch;
+ odd_output += 2 * output_pitch;
+
+ if (row < last_row - 1)
+ {
+ // Compute the address of the next row in the lowhigh band
+ PIXEL *lowhigh_row_ptr = (lowhigh + 2 * lowhigh_pitch);
+
+ // Shift the rows in the buffer of dequantized lowhigh bands
+ PIXEL *temp = lowhigh_line[0];
+ lowhigh_line[0] = lowhigh_line[1];
+ lowhigh_line[1] = lowhigh_line[2];
+ lowhigh_line[2] = temp;
+
+ // Undo quantization for the next row in the lowhigh band
+ DequantizeBandRow16s(lowhigh_row_ptr, input_width, lowhigh_quantization, lowhigh_line[2]);
+ }
+ }
+
+ // Should have exited the loop at the last row
+ assert(row == last_row);
+
+ // Advance the lowlow pointer to the last row in the band
+ lowlow += lowlow_pitch;
+
+ // Check that the band pointers are on the last row in each wavelet band
+ assert(lowlow == (lowlow_band + last_row * lowlow_pitch));
+
+ assert(highlow == (highlow_band + last_row * highlow_pitch));
+ assert(highhigh == (highhigh_band + last_row * highhigh_pitch));
+
+ // Undo quantization for the highlow and highhigh bands
+ DequantizeBandRow16s(highlow, input_width, highlow_quantization, highlow_line);
+ DequantizeBandRow16s(highhigh, input_width, highhigh_quantization, highhigh_line);
+
+ // Apply the vertical border filter to the last row
+ for (column = 0; column < input_width; column++)
+ {
+ int32_t even = 0; // Result of convolution with even filter
+ int32_t odd = 0; // Result of convolution with odd filter
+
+
+ /***** Compute the vertical inverse for the left two bands *****/
+
+ // Apply the even reconstruction filter to the lowpass band
+ even += 5 * lowlow[column + 0 * lowlow_pitch];
+ even += 4 * lowlow[column - 1 * lowlow_pitch];
+ even -= 1 * lowlow[column - 2 * lowlow_pitch];
+ even += rounding;
+ even = DivideByShift(even, 3);
+
+ // Add the highpass correction
+ even += highlow_line[column];
+ even = DivideByShift(even, 1);
+
+ // The inverse of the left two bands should be a positive number
+ //assert(0 <= even && even <= INT16_MAX);
+
+ // Place the even result in the even row
+ even_lowpass[column] = ClampPixel(even);
+
+ // Apply the odd reconstruction filter to the lowpass band
+ odd += 11 * lowlow[column + 0 * lowlow_pitch];
+ odd -= 4 * lowlow[column - 1 * lowlow_pitch];
+ odd += 1 * lowlow[column - 2 * lowlow_pitch];
+ odd += rounding;
+ odd = DivideByShift(odd, 3);
+
+ // Subtract the highpass correction
+ odd -= highlow_line[column];
+ odd = DivideByShift(odd, 1);
+
+ // The inverse of the left two bands should be a positive number
+ //assert(0 <= odd && odd <= INT16_MAX);
+
+ // Place the odd result in the odd row
+ odd_lowpass[column] = ClampPixel(odd);
+
+
+ /***** Compute the vertical inverse for the right two bands *****/
+
+ even = 0;
+ odd = 0;
+
+ // Apply the even reconstruction filter to the lowpass band
+ even += 5 * lowhigh_line[2][column];
+ even += 4 * lowhigh_line[1][column];
+ even -= 1 * lowhigh_line[0][column];
+ even += rounding;
+ even = DivideByShift(even, 3);
+
+ // Add the highpass correction
+ even += highhigh_line[column];
+ even = DivideByShift(even, 1);
+
+ // Place the even result in the even row
+ even_highpass[column] = ClampPixel(even);
+
+ // Apply the odd reconstruction filter to the lowpass band
+ odd += 11 * lowhigh_line[2][column];
+ odd -= 4 * lowhigh_line[1][column];
+ odd += 1 * lowhigh_line[0][column];
+ odd += rounding;
+ odd = DivideByShift(odd, 3);
+
+ // Subtract the highpass correction
+ odd -= highhigh_line[column];
+ odd = DivideByShift(odd, 1);
+
+ // Place the odd result in the odd row
+ odd_highpass[column] = ClampPixel(odd);
+ }
+
+ // Apply the inverse horizontal transform to the even and odd rows and descale the results
+ InvertHorizontalDescale16s(even_lowpass, even_highpass, even_output,
+ input_width, output_width, descale);
+
+ // Is the output wavelet shorter than twice the height of the input wavelet?
+ if (2 * row + 1 < output_height) {
+ InvertHorizontalDescale16s(odd_lowpass, odd_highpass, odd_output,
+ input_width, output_width, descale);
+ }
+
+ // Free the scratch buffers
+ allocator->Free(even_lowpass);
+ allocator->Free(even_highpass);
+ allocator->Free(odd_lowpass);
+ allocator->Free(odd_highpass);
+
+ allocator->Free(lowhigh_line[0]);
+ allocator->Free(lowhigh_line[1]);
+ allocator->Free(lowhigh_line[2]);
+ allocator->Free(highlow_line);
+ allocator->Free(highhigh_line);
+
+ return CODEC_ERROR_OKAY;
+}
+
diff --git a/gpr/source/lib/vc5_decoder/inverse.h b/gpr/source/lib/vc5_decoder/inverse.h
new file mode 100755
index 0000000..0d8d77c
--- /dev/null
+++ b/gpr/source/lib/vc5_decoder/inverse.h
@@ -0,0 +1,51 @@
+/*! @file inverse.h
+ *
+ * @brief Declaration of the inverse wavelet transform functions.
+ *
+ * (C) Copyright 2018 GoPro Inc (http://gopro.com/).
+ *
+ * Licensed under either:
+ * - Apache License, Version 2.0, http://www.apache.org/licenses/LICENSE-2.0
+ * - MIT license, http://opensource.org/licenses/MIT
+ * at your option.
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INVERSE_H
+#define INVERSE_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+ CODEC_ERROR InvertSpatialQuant16s(gpr_allocator *allocator,
+ PIXEL *lowlow_band, int lowlow_pitch,
+ PIXEL *lowhigh_band, int lowhigh_pitch,
+ PIXEL *highlow_band, int highlow_pitch,
+ PIXEL *highhigh_band, int highhigh_pitch,
+ PIXEL *output_image, int output_pitch,
+ DIMENSION input_width, DIMENSION input_height,
+ DIMENSION output_width, DIMENSION output_height,
+ QUANT quantization[]);
+
+ CODEC_ERROR InvertSpatialQuantDescale16s(gpr_allocator *allocator,
+ PIXEL *lowlow_band, int lowlow_pitch,
+ PIXEL *lowhigh_band, int lowhigh_pitch,
+ PIXEL *highlow_band, int highlow_pitch,
+ PIXEL *highhigh_band, int highhigh_pitch,
+ PIXEL *output_image, int output_pitch,
+ DIMENSION input_width, DIMENSION input_height,
+ DIMENSION output_width, DIMENSION output_height,
+ //ROI roi, PIXEL *buffer, size_t buffer_size,
+ int descale, QUANT quantization[]);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // INVERSE_H
diff --git a/gpr/source/lib/vc5_decoder/parameters.c b/gpr/source/lib/vc5_decoder/parameters.c
new file mode 100755
index 0000000..f8ef2cd
--- /dev/null
+++ b/gpr/source/lib/vc5_decoder/parameters.c
@@ -0,0 +1,47 @@
+/*! @file parameters.c
+ *
+ * @brief Implementation of the data structure used to pass decoding
+ * parameters to the decoder.
+ *
+ * (C) Copyright 2018 GoPro Inc (http://gopro.com/).
+ *
+ * Licensed under either:
+ * - Apache License, Version 2.0, http://www.apache.org/licenses/LICENSE-2.0
+ * - MIT license, http://opensource.org/licenses/MIT
+ * at your option.
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "headers.h"
+
+//! Current version number of the parameters data structure
+#define PARAMETERS_VERSION 0
+
+/*!
+ @brief Initialize the parameters data structure
+
+ The version number of the parameters data structure must be
+ incremented whenever a change is made to the definition of
+ the parameters data structure.
+*/
+CODEC_ERROR InitDecoderParameters(DECODER_PARAMETERS *parameters)
+{
+ memset(parameters, 0, sizeof(DECODER_PARAMETERS));
+ parameters->version = 1;
+ parameters->verbose_flag = false;
+
+ parameters->enabled_parts = VC5_ENABLED_PARTS;
+
+ parameters->output.format = PIXEL_FORMAT_RAW_DEFAULT;
+
+ parameters->rgb_resolution = GPR_RGB_RESOLUTION_NONE;
+
+ gpr_rgb_gain_set_defaults(&parameters->rgb_gain);
+
+ return CODEC_ERROR_OKAY;
+}
diff --git a/gpr/source/lib/vc5_decoder/parameters.h b/gpr/source/lib/vc5_decoder/parameters.h
new file mode 100755
index 0000000..d0bbb74
--- /dev/null
+++ b/gpr/source/lib/vc5_decoder/parameters.h
@@ -0,0 +1,119 @@
+/*! @file parameters.h
+ *
+ * @brief Declare a data structure for holding a table of parameters used
+ * during decoding to override the default decoding behavior.
+
+ * The decoder can be initialized using the dimensions of the encoded frame
+ * obtained from an external source such as a media container and the pixel
+ * format of the decoded frame. The encoded sample will be decoded to the
+ * dimensions of the encoded frame without at the full encoded resolution
+ * without scaling. The decoded frames will have the specified pixel format,
+ * but this assumes that the encoded dimensions used during initialization
+ * are the same as the actual encoded dimensions and that the pixel format of
+ * the decoded frames is a valid pixel format.
+ *
+ * (C) Copyright 2018 GoPro Inc (http://gopro.com/).
+ *
+ * Licensed under either:
+ * - Apache License, Version 2.0, http://www.apache.org/licenses/LICENSE-2.0
+ * - MIT license, http://opensource.org/licenses/MIT
+ * at your option.
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef PARAMETERS_H
+#define PARAMETERS_H
+
+#include "vc5_decoder.h"
+
+/*!
+ @brief Declaration of a data structure for passing decoding parameters to the decoder
+*/
+typedef struct _decoder_parameters
+{
+ uint32_t version; //!< Version number for this definition of the parameters
+
+ uint32_t enabled_parts; //!< Parts of the VC-5 standard that are enabled
+
+#if VC5_ENABLED_PART(VC5_PART_LAYERS)
+ int layer_count; //!< Number of layers in the sample
+ bool progressive; //!< True if the frame is progressive
+ bool top_field_first; //!< True if interlaced with top field first
+#endif
+
+#if VC5_ENABLED_PART(VC5_PART_SECTIONS)
+ bool section_flag; //!< True if decoding sections element in the bitstream
+#endif
+
+ //! Dimensions and format of the output of the image unpacking process
+ struct _input_parameters
+ {
+ DIMENSION width;
+ DIMENSION height;
+ // PIXEL_FORMAT format;
+ } input; //! Dimensions and format of the unpacked image
+
+#if VC5_ENABLED_PART(VC5_PART_IMAGE_FORMATS)
+ //! Dimensions and format of the output of the decoding process
+ struct _decoded_parameters
+ {
+ DIMENSION width;
+ DIMENSION height;
+ PIXEL_FORMAT format;
+ } decoded; //! Decoded image dimensions and pixel format
+#endif
+
+#if VC5_ENABLED_PART(VC5_PART_ELEMENTARY)
+ //! Dimensions and format of the output of the image repacking process
+ struct _output_parameters
+ {
+ DIMENSION width;
+ DIMENSION height;
+ PIXEL_FORMAT format;
+ } output;
+#endif
+
+#if VC5_ENABLED_PART(VC5_PART_IMAGE_FORMATS)
+ //! Dimensions and format of the displayable image
+ struct _display_parameters
+ {
+ DIMENSION width;
+ DIMENSION height;
+ PIXEL_FORMAT format;
+ } display;
+#endif
+
+#if VC5_ENABLED_PART(VC5_PART_METADATA)
+ //! Metadata that controls decoding
+ METADATA metadata;
+#endif
+
+ //! Flag that controls verbose output
+ bool verbose_flag;
+
+ GPR_RGB_RESOLUTION rgb_resolution;
+
+ int rgb_bits;
+
+ gpr_rgb_gain rgb_gain;
+
+ gpr_allocator allocator;
+
+} DECODER_PARAMETERS;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+ CODEC_ERROR InitDecoderParameters(DECODER_PARAMETERS *parameters);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // PARAMETERS_H
diff --git a/gpr/source/lib/vc5_decoder/raw.c b/gpr/source/lib/vc5_decoder/raw.c
new file mode 100755
index 0000000..7cbac6f
--- /dev/null
+++ b/gpr/source/lib/vc5_decoder/raw.c
@@ -0,0 +1,135 @@
+/*! @file raw.c
+ *
+ * @brief Definition of routines for packing a row of pixels into a RAW image.
+ *
+ * (C) Copyright 2018 GoPro Inc (http://gopro.com/).
+ *
+ * Licensed under either:
+ * - Apache License, Version 2.0, http://www.apache.org/licenses/LICENSE-2.0
+ * - MIT license, http://opensource.org/licenses/MIT
+ * at your option.
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "headers.h"
+
+/*!
+ @brief Pack the component arrays into an output image
+
+ The inverse component transform for Bayer images (VC-5 Part 3)
+ is applied to the component arrays before combining the values
+ into a packed image.
+ */
+CODEC_ERROR PackComponentsToRAW(const UNPACKED_IMAGE *image,
+ PIXEL *output_buffer, size_t output_pitch,
+ DIMENSION width, DIMENSION height,
+ ENABLED_PARTS enabled_parts, uint16_t output_bit_depth, PIXEL_FORMAT output_format )
+{
+ // Define pointers to the rows for each input component
+ COMPONENT_VALUE *GS_input_buffer;
+ COMPONENT_VALUE *RG_input_buffer;
+ COMPONENT_VALUE *BG_input_buffer;
+ COMPONENT_VALUE *GD_input_buffer;
+
+ // Define pointers to the rows for each output component
+ uint16_t *output_row1_ptr;
+ uint16_t *output_row2_ptr;
+
+ //size_t input_quarter_pitch;
+ size_t output_half_pitch;
+
+ int row;
+
+ GS_input_buffer = image->component_array_list[0].data;
+ RG_input_buffer = image->component_array_list[1].data;
+ BG_input_buffer = image->component_array_list[2].data;
+ GD_input_buffer = image->component_array_list[3].data;
+
+ // Compute the distance between the half rows in the Bayer grid
+ output_half_pitch = output_pitch / 2;
+
+ for (row = 0; row < height; row++)
+ {
+ COMPONENT_VALUE *GS_input_row_ptr = (COMPONENT_VALUE *)((uintptr_t)GS_input_buffer + row * image->component_array_list[0].pitch);
+ COMPONENT_VALUE *RG_input_row_ptr = (COMPONENT_VALUE *)((uintptr_t)RG_input_buffer + row * image->component_array_list[1].pitch);
+ COMPONENT_VALUE *BG_input_row_ptr = (COMPONENT_VALUE *)((uintptr_t)BG_input_buffer + row * image->component_array_list[2].pitch);
+ COMPONENT_VALUE *GD_input_row_ptr = (COMPONENT_VALUE *)((uintptr_t)GD_input_buffer + row * image->component_array_list[3].pitch);
+
+ uint8_t *output_row_ptr = (uint8_t *)output_buffer + row * output_pitch;
+
+ const int32_t midpoint = 2048;
+
+ int column;
+
+ output_row1_ptr = (uint16_t *)output_row_ptr;
+ output_row2_ptr = (uint16_t *)(output_row_ptr + output_half_pitch);
+
+ // Pack the rows of Bayer components into the BYR4 pattern
+ for (column = 0; column < width; column++)
+ {
+ int32_t GS, RG, BG, GD;
+ int32_t R, G1, G2, B;
+
+ GS = GS_input_row_ptr[column];
+ RG = RG_input_row_ptr[column];
+ BG = BG_input_row_ptr[column];
+ GD = GD_input_row_ptr[column];
+
+ // Convert unsigned values to signed values
+ GD -= midpoint;
+ RG -= midpoint;
+ BG -= midpoint;
+
+ R = (RG << 1) + GS;
+ B = (BG << 1) + GS;
+ G1 = GS + GD;
+ G2 = GS - GD;
+
+ R = clamp_uint(R, 12);
+ G1 = clamp_uint(G1, 12);
+ G2 = clamp_uint(G2, 12);
+ B = clamp_uint(B, 12);
+
+ // Apply inverse protune log curve
+ R = DecoderLogCurve[R];
+ B = DecoderLogCurve[B];
+ G1 = DecoderLogCurve[G1];
+ G2 = DecoderLogCurve[G2];
+
+ R >>= (16 - output_bit_depth);
+ B >>= (16 - output_bit_depth);
+ G1 >>= (16 - output_bit_depth);
+ G2 >>= (16 - output_bit_depth);
+
+ switch (output_format)
+ {
+ case PIXEL_FORMAT_RAW_RGGB_12:
+ case PIXEL_FORMAT_RAW_RGGB_14:
+ output_row1_ptr[2 * column + 0] = (uint16_t)R;
+ output_row1_ptr[2 * column + 1] = (uint16_t)G1;
+ output_row2_ptr[2 * column + 0] = (uint16_t)G2;
+ output_row2_ptr[2 * column + 1] = (uint16_t)B;
+ break;
+
+ case PIXEL_FORMAT_RAW_GBRG_12:
+ case PIXEL_FORMAT_RAW_GBRG_14:
+ output_row1_ptr[2 * column + 0] = (uint16_t)G1;
+ output_row1_ptr[2 * column + 1] = (uint16_t)B;
+ output_row2_ptr[2 * column + 0] = (uint16_t)R;
+ output_row2_ptr[2 * column + 1] = (uint16_t)G2;
+ break;
+
+ default:
+ assert(0);
+ break;
+ }
+ }
+ }
+
+ return CODEC_ERROR_OKAY;
+}
diff --git a/gpr/source/lib/vc5_decoder/raw.h b/gpr/source/lib/vc5_decoder/raw.h
new file mode 100755
index 0000000..7bca498
--- /dev/null
+++ b/gpr/source/lib/vc5_decoder/raw.h
@@ -0,0 +1,35 @@
+/*! @file raw.h
+ *
+ * @brief Declaration of routines for packing a row of pixels into a RAW image.
+ *
+ * (C) Copyright 2018 GoPro Inc (http://gopro.com/).
+ *
+ * Licensed under either:
+ * - Apache License, Version 2.0, http://www.apache.org/licenses/LICENSE-2.0
+ * - MIT license, http://opensource.org/licenses/MIT
+ * at your option.
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef RAW_H
+#define RAW_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+ CODEC_ERROR PackComponentsToRAW(const UNPACKED_IMAGE *image,
+ PIXEL *output_buffer, size_t output_pitch,
+ DIMENSION width, DIMENSION height,
+ ENABLED_PARTS enabled_parts, uint16_t output_bit_depth, PIXEL_FORMAT output_format );
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // RAW_H
diff --git a/gpr/source/lib/vc5_decoder/syntax.c b/gpr/source/lib/vc5_decoder/syntax.c
new file mode 100755
index 0000000..f0c4b58
--- /dev/null
+++ b/gpr/source/lib/vc5_decoder/syntax.c
@@ -0,0 +1,116 @@
+/*! @file syntax.c
+ *
+ * @brief Implementation of functions for parsing the bitstream syntax of encoded samples.
+ *
+ * (C) Copyright 2018 GoPro Inc (http://gopro.com/).
+ *
+ * Licensed under either:
+ * - Apache License, Version 2.0, http://www.apache.org/licenses/LICENSE-2.0
+ * - MIT license, http://opensource.org/licenses/MIT
+ * at your option.
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "headers.h"
+
+//! Size of a tag or value (in bits)
+#define BITSTREAM_TAG_SIZE 16
+
+/*!
+ @brief Read the next tag-valie pair from the bitstream.
+
+ The next tag is read from the bitstream and the next value that
+ immediately follows the tag in the bitstreeam are read from the
+ bitstream.
+
+ The value may be the length of the payload in bytes or the value
+ may be a single scalar. This routine only reads the next tag and
+ value and does not intepret the tag or value and does not read any
+ data that may follow the segment in the bitstream.
+
+ The tag and value are interpreted by @ref UpdateCodecState and that
+ routine may read additional information from the bitstream.
+
+ If the value is the length of the payload then it encodes the number
+ of bytes in the segment payload, not counting the segment header.
+*/
+TAGVALUE GetSegment(BITSTREAM *stream)
+{
+ TAGVALUE segment;
+ segment.tuple.tag = (TAGWORD)GetBits(stream, 16);
+ segment.tuple.value = (TAGWORD)GetBits(stream, 16);
+ return segment;
+}
+
+/*!
+ @brief Read the specified tag from the bitstream and return the value
+*/
+TAGWORD GetValue(BITSTREAM *stream, int tag)
+{
+ TAGVALUE segment = GetTagValue(stream);
+
+ assert(stream->error == BITSTREAM_ERROR_OKAY);
+ if (stream->error == BITSTREAM_ERROR_OKAY) {
+ assert(segment.tuple.tag == tag);
+ if (segment.tuple.tag == tag) {
+ return segment.tuple.value;
+ }
+ else {
+ stream->error = BITSTREAM_ERROR_BADTAG;
+ }
+ }
+
+ // An error has occurred so return zero (error code was set above)
+ return 0;
+}
+
+/*!
+ @brief Read the next tag value pair from the bitstream
+*/
+TAGVALUE GetTagValue(BITSTREAM *stream)
+{
+ TAGVALUE segment = GetSegment(stream);
+ while (segment.tuple.tag < 0) {
+ segment = GetSegment(stream);
+ }
+
+ return segment;
+}
+
+/*!
+ @brief Return true if the tag is optional
+*/
+bool IsTagOptional(TAGWORD tag)
+{
+ return (tag < 0);
+}
+
+/*!
+ @brief Return true if the tag is required
+*/
+bool IsTagRequired(TAGWORD tag)
+{
+ return (tag >= 0);
+}
+
+/*!
+ @brief Return true if a valid tag read from the bitstream
+*/
+bool IsValidSegment(BITSTREAM *stream, TAGVALUE segment, TAGWORD tag)
+{
+ return (stream->error == BITSTREAM_ERROR_OKAY &&
+ segment.tuple.tag == tag);
+}
+
+/*!
+ @brief Return true if the tag value pair has the specified tag and value
+*/
+bool IsTagValue(TAGVALUE segment, int tag, TAGWORD value)
+{
+ return (segment.tuple.tag == tag && segment.tuple.value == value);
+}
diff --git a/gpr/source/lib/vc5_decoder/syntax.h b/gpr/source/lib/vc5_decoder/syntax.h
new file mode 100755
index 0000000..d5c4099
--- /dev/null
+++ b/gpr/source/lib/vc5_decoder/syntax.h
@@ -0,0 +1,44 @@
+/*! @file syntax.h
+ *
+ * @brief Declare functions for parsing the bitstream syntax of encoded samples.
+ *
+ * (C) Copyright 2018 GoPro Inc (http://gopro.com/).
+ *
+ * Licensed under either:
+ * - Apache License, Version 2.0, http://www.apache.org/licenses/LICENSE-2.0
+ * - MIT license, http://opensource.org/licenses/MIT
+ * at your option.
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef DECODER_SYNTAX_H
+#define DECODER_SYNTAX_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+ TAGVALUE GetSegment(BITSTREAM *stream);
+
+ TAGWORD GetValue(BITSTREAM *stream, int tag);
+
+ TAGVALUE GetTagValue(BITSTREAM *stream);
+
+ bool IsTagOptional(TAGWORD tag);
+
+ bool IsTagRequired(TAGWORD tag);
+
+ bool IsValidSegment(BITSTREAM *stream, TAGVALUE segment, TAGWORD tag);
+
+ bool IsTagValue(TAGVALUE segment, int tag, TAGWORD value);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // DECODER_SYNTAX_H
diff --git a/gpr/source/lib/vc5_decoder/vc5_decoder.c b/gpr/source/lib/vc5_decoder/vc5_decoder.c
new file mode 100755
index 0000000..f36ad4c
--- /dev/null
+++ b/gpr/source/lib/vc5_decoder/vc5_decoder.c
@@ -0,0 +1,132 @@
+/*! @file vc5_decoder.c
+ *
+ * @brief Implementation of the top level vc5 decoder API.
+ *
+ * (C) Copyright 2018 GoPro Inc (http://gopro.com/).
+ *
+ * Licensed under either:
+ * - Apache License, Version 2.0, http://www.apache.org/licenses/LICENSE-2.0
+ * - MIT license, http://opensource.org/licenses/MIT
+ * at your option.
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "headers.h"
+
+void vc5_decoder_parameters_set_default(vc5_decoder_parameters* decoding_parameters)
+{
+ decoding_parameters->enabled_parts = VC5_ENABLED_PARTS;
+
+ decoding_parameters->pixel_format = VC5_DECODER_PIXEL_FORMAT_DEFAULT;
+
+ decoding_parameters->rgb_resolution = VC5_DECODER_RGB_RESOLUTION_DEFAULT;
+ decoding_parameters->rgb_bits = 8;
+
+ gpr_rgb_gain_set_defaults(&decoding_parameters->rgb_gain);
+}
+
+CODEC_ERROR vc5_decoder_process(const vc5_decoder_parameters* decoding_parameters, /* vc5 decoding parameters */
+ const gpr_buffer* vc5_buffer, /* vc5 input buffer. */
+ gpr_buffer* raw_buffer, /* raw output buffer. */
+ gpr_rgb_buffer* rgb_buffer) /* rgb output buffer */
+{
+ CODEC_ERROR error = CODEC_ERROR_OKAY;
+
+ IMAGE output_image;
+ // Clear the members of the image data structure
+ InitImage(&output_image);
+
+ STREAM input;
+ DECODER_PARAMETERS parameters;
+
+ // Initialize the parameters passed to the decoder
+ InitDecoderParameters(&parameters);
+
+ parameters.enabled_parts = decoding_parameters->enabled_parts;
+
+ parameters.rgb_resolution = decoding_parameters->rgb_resolution;
+ parameters.rgb_bits = decoding_parameters->rgb_bits;
+ parameters.rgb_gain = decoding_parameters->rgb_gain;
+
+ if( rgb_buffer == NULL )
+ {
+ parameters.rgb_resolution = GPR_RGB_RESOLUTION_NONE;
+ }
+
+ parameters.allocator.Alloc = decoding_parameters->mem_alloc;
+ parameters.allocator.Free = decoding_parameters->mem_free;
+
+ switch( decoding_parameters->pixel_format )
+ {
+ case VC5_DECODER_PIXEL_FORMAT_RGGB_12:
+ parameters.output.format = PIXEL_FORMAT_RAW_RGGB_12;
+ break;
+
+ case VC5_DECODER_PIXEL_FORMAT_RGGB_14:
+ parameters.output.format = PIXEL_FORMAT_RAW_RGGB_14;
+ break;
+
+ case VC5_DECODER_PIXEL_FORMAT_GBRG_12:
+ parameters.output.format = PIXEL_FORMAT_RAW_GBRG_12;
+ break;
+
+ case VC5_DECODER_PIXEL_FORMAT_GBRG_14:
+ parameters.output.format = PIXEL_FORMAT_RAW_GBRG_14;
+ break;
+
+ default:
+ assert(0);
+ }
+
+ // Check that the enabled parts are correct
+ error = CheckEnabledParts(&parameters.enabled_parts);
+ if (error != CODEC_ERROR_OKAY) {
+ return CODEC_ERROR_ENABLED_PARTS;
+ }
+
+ error = OpenStreamBuffer(&input, vc5_buffer->buffer, vc5_buffer->size );
+ if (error != CODEC_ERROR_OKAY) {
+ fprintf(stderr, "Could not open input vc5 stream\n" );
+ return error;
+ }
+
+ RGB_IMAGE rgb_image;
+ InitRGBImage(&rgb_image);
+
+ error = DecodeImage(&input, &output_image, &rgb_image, &parameters);
+ if (error != CODEC_ERROR_OKAY) {
+ fprintf(stderr, "Could not decode input vc5 bitstream. Error number %d\n", error );
+ return error;
+ }
+
+ if( parameters.rgb_resolution != GPR_RGB_RESOLUTION_NONE )
+ {
+ assert( rgb_buffer);
+
+ rgb_buffer->buffer = rgb_image.buffer;
+ rgb_buffer->size = rgb_image.size;
+ rgb_buffer->width = rgb_image.width;
+ rgb_buffer->height = rgb_image.height;
+ }
+
+ if( raw_buffer )
+ {
+ assert( output_image.buffer );
+ assert( output_image.size == output_image.width * output_image.height * sizeof(unsigned short) );
+
+ raw_buffer->buffer = output_image.buffer;
+ raw_buffer->size = output_image.size;
+ }
+ else
+ {
+ // Nothing should be returned in output_image since we do not want output raw image
+ assert( output_image.buffer == NULL );
+ }
+
+ return error;
+}
diff --git a/gpr/source/lib/vc5_decoder/vc5_decoder.h b/gpr/source/lib/vc5_decoder/vc5_decoder.h
new file mode 100755
index 0000000..c87111a
--- /dev/null
+++ b/gpr/source/lib/vc5_decoder/vc5_decoder.h
@@ -0,0 +1,84 @@
+/*! @file vc5_decoder.h
+ *
+ * @brief Declaration of the top level vc5 decoder API.
+ *
+ * (C) Copyright 2018 GoPro Inc (http://gopro.com/).
+ *
+ * Licensed under either:
+ * - Apache License, Version 2.0, http://www.apache.org/licenses/LICENSE-2.0
+ * - MIT license, http://opensource.org/licenses/MIT
+ * at your option.
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef VC5_DECODER_H
+#define VC5_DECODER_H
+
+#include "error.h"
+#include "types.h"
+#include "gpr_buffer.h"
+#include "gpr_rgb_buffer.h"
+#include "vc5_common.h"
+
+#ifdef __cplusplus
+ extern "C" {
+#endif
+
+ /*!
+ @brief Bayer pattern ordering for vc5 encoder processing
+ */
+ typedef enum
+ {
+ VC5_DECODER_PIXEL_FORMAT_RGGB_12 = 0, // RGGB 14bit pixels packed into 16bits
+
+ VC5_DECODER_PIXEL_FORMAT_RGGB_14 = 1, // RGGB 14bit pixels packed into 16bits
+
+ VC5_DECODER_PIXEL_FORMAT_GBRG_12 = 2, // GBRG 12bit pixels packed into 16bits
+
+ VC5_DECODER_PIXEL_FORMAT_GBRG_14 = 3, // GBRG 12bit pixels packed into 16bits
+
+ VC5_DECODER_PIXEL_FORMAT_DEFAULT = VC5_DECODER_PIXEL_FORMAT_RGGB_14,
+
+ } VC5_DECODER_PIXEL_FORMAT;
+
+ #define VC5_DECODER_RGB_RESOLUTION_DEFAULT GPR_RGB_RESOLUTION_QUARTER
+
+ /*!
+ @brief vc5 decoder parameters
+ */
+ typedef struct
+ {
+ ENABLED_PARTS enabled_parts;
+
+ VC5_DECODER_PIXEL_FORMAT pixel_format; // Bayer Ordering Pattern (Default: VC5_ENCODER_BAYER_ORDERING_RGGB)
+
+ GPR_RGB_RESOLUTION rgb_resolution;
+
+ int rgb_bits;
+
+ gpr_rgb_gain rgb_gain;
+
+ gpr_malloc mem_alloc; // Callback function to allocate memory
+
+ gpr_free mem_free; // Callback function to free memory
+
+ } vc5_decoder_parameters;
+
+ void vc5_decoder_parameters_set_default(vc5_decoder_parameters* decoding_parameters);
+
+ CODEC_ERROR vc5_decoder_process(const vc5_decoder_parameters* decoding_parameters, /* vc5 decoding parameters */
+ const gpr_buffer* vc5_buffer, /* vc5 input buffer. */
+ gpr_buffer* raw_buffer, /* raw output buffer. */
+ gpr_rgb_buffer* rgb_buffer); /* rgb output buffer */
+
+
+#ifdef __cplusplus
+ }
+#endif
+
+#endif // VC5_DECODER_H
diff --git a/gpr/source/lib/vc5_decoder/vlc.c b/gpr/source/lib/vc5_decoder/vlc.c
new file mode 100755
index 0000000..646fc59
--- /dev/null
+++ b/gpr/source/lib/vc5_decoder/vlc.c
@@ -0,0 +1,109 @@
+/*! @file vlc.c
+ *
+ * @brief Implementation of routines to parse a variable-length encoded bitstream.
+ *
+ * (C) Copyright 2018 GoPro Inc (http://gopro.com/).
+ *
+ * Licensed under either:
+ * - Apache License, Version 2.0, http://www.apache.org/licenses/LICENSE-2.0
+ * - MIT license, http://opensource.org/licenses/MIT
+ * at your option.
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "headers.h"
+
+/*!
+ @brief Parse a run length coded magnitude in the bitstream
+*/
+CODEC_ERROR GetRlv(BITSTREAM *stream, CODEBOOK *codebook, RUN *run)
+{
+ BITWORD bitstream_bits = 0; // Buffer of bits read from the stream
+ BITCOUNT bitstream_count = 0; // Number of bits read from the stream
+
+ // Get the length of the codebook and initialize a pointer to its entries
+ int codebook_length = codebook->length;
+ RLV *codebook_entry = (RLV *)((uint8_t *)codebook + sizeof(CODEBOOK));
+
+ // Index into the codebook
+ int codeword_index = 0;
+
+ // Search the codebook for the run length and value
+ while (codeword_index < codebook_length)
+ {
+ // Get the size of the current word in the codebook
+ BITCOUNT codeword_count = codebook_entry[codeword_index].size;
+
+ // Need to read more bits from the stream?
+ if (bitstream_count < codeword_count)
+ {
+ // Calculate the number of additional bits to read from the stream
+ BITCOUNT read_count = codeword_count - bitstream_count;
+ bitstream_bits = AddBits(stream, bitstream_bits, read_count);
+ bitstream_count = codeword_count;
+ }
+
+ // Examine the run length table entries that have the same bit field length
+ for (; (codeword_index < codebook_length) && (bitstream_count == codebook_entry[codeword_index].size);
+ codeword_index++) {
+ if (bitstream_bits == codebook_entry[codeword_index].bits) {
+ run->count = codebook_entry[codeword_index].count;
+ run->value = codebook_entry[codeword_index].value;
+ goto found;
+ }
+ }
+ }
+
+ // Did not find a matching code in the codebook
+ return CODEC_ERROR_NOTFOUND;
+
+found:
+
+ // Found a valid codeword in the bitstream
+ return CODEC_ERROR_OKAY;
+}
+
+/*!
+ Parse a run length coded signed value in the bitstream
+*/
+CODEC_ERROR GetRun(BITSTREAM *stream, CODEBOOK *codebook, RUN *run)
+{
+ CODEC_ERROR error = CODEC_ERROR_OKAY;
+ int32_t value;
+
+ // Get the magnitude of the number from the bitstream
+ error = GetRlv(stream, codebook, run);
+
+ // Error while parsing the bitstream?
+ if (error != CODEC_ERROR_OKAY) {
+ return error;
+ }
+
+ // Restore the sign to the magnitude of the run value
+ value = run->value;
+
+ // Signed quantity?
+ if (value != 0)
+ {
+ BITWORD sign;
+
+ // Something is wrong if the value is already negative
+ assert(value > 0);
+
+ // Get the codeword for the sign of the value
+ sign = GetBits(stream, VLC_SIGNCODE_SIZE);
+
+ // Change the sign if the codeword signalled a negative value
+ value = ((sign == VLC_NEGATIVE_CODE) ? neg(value) : value);
+ }
+
+ // Return the signed value of the coefficient
+ run->value = value;
+
+ return CODEC_ERROR_OKAY;
+}
diff --git a/gpr/source/lib/vc5_decoder/vlc.h b/gpr/source/lib/vc5_decoder/vlc.h
new file mode 100755
index 0000000..6ec41f6
--- /dev/null
+++ b/gpr/source/lib/vc5_decoder/vlc.h
@@ -0,0 +1,103 @@
+/*! @file vlc.h
+ *
+ * @brief Declaration of the data structures for variable-length decoding
+ *
+ * @version 1.0.0
+ *
+ * (C) Copyright 2018 GoPro Inc (http://gopro.com/).
+ *
+ * Licensed under either:
+ * - Apache License, Version 2.0, http://www.apache.org/licenses/LICENSE-2.0
+ * - MIT license, http://opensource.org/licenses/MIT
+ * at your option.
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef VLC_H
+#define VLC_H
+
+// Codewords for the sign bits that follow a non-zero value
+#define VLC_POSITIVE_CODE 0x0 //!< Code that indicates a positive value
+#define VLC_NEGATIVE_CODE 0x1 //!< Code that indicates a negative value
+#define VLC_SIGNCODE_SIZE 1 //!< Size of the code for sign suffix
+
+/*!
+ @brief Codebook entries for arbitrary runs and values
+
+ The codebook data structure allows runs of an arbitrary value,
+ but all codec implementations only use runs of zeros. The
+ codeword for a non-zero value is followed by the sign bit.
+
+ @todo Could add the sign bit to the magnitude entries in this
+ table if it improves performance or makes the code more clear.
+*/
+typedef struct _rlv {
+ uint_fast8_t size; //!< Size of code word in bits
+ uint32_t bits; //!< Code word bits right justified
+ uint32_t count; //!< Run length
+ int32_t value; //!< Run value (unsigned)
+} RLV;
+
+/*!
+ @brief Declaration of a codebook
+
+ This data structure is often called the master codebook to distinguish
+ it from the encoding tables that are derived from the codebook. The
+ codebook has a header that is immediately followed by the codebook entries.
+ Each entry is an @ref RLV data structure that contains the codeword and
+ the size of the codeword in bits. Each codeword represent a run length
+ and value. The current codec implementation only supports runs of zeros,
+ so the run length is one for non-zero values. A non-zero value is an
+ unsigned coefficient magnitude. Special codewords that mark significant
+ locations in the bitstream are indicated by a run length of zero and the
+ value indicates the type of marker.
+
+ The codebook is generated by a separate program that takes as input a table
+ of the frequencies of coefficient magnitudes and runs of zeros.
+*/
+typedef struct _codebook
+{
+ uint32_t length; //!< Number of entries in the code book
+ // The length is followed by the RLV entries
+} CODEBOOK;
+
+//! Macro used to define the codebook generated by the Huffman program
+#define RLVTABLE(n) \
+ static struct \
+ { \
+ uint32_t length; \
+ RLV entries[n]; \
+ }
+
+/*!
+ @brief Structure returned by the run length decoding routines
+
+ The value returned may be signed if the routine that was called
+ to parse the bitstream found a run of a non-zero value and then
+ parsed the sign bit that follows the magnitude.
+*/
+typedef struct _run {
+ uint32_t count; //!< Run length count
+ int32_t value; //!< Run length value
+} RUN;
+
+//! Initializer for the run length data structure
+#define RUN_INITIALIZER {0, 0}
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+ CODEC_ERROR GetRlv(BITSTREAM *stream, CODEBOOK *codebook, RUN *run);
+ CODEC_ERROR GetRun(BITSTREAM *stream, CODEBOOK *codebook, RUN *run);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // VLC_H
diff --git a/gpr/source/lib/vc5_decoder/wavelet.c b/gpr/source/lib/vc5_decoder/wavelet.c
new file mode 100755
index 0000000..a60eb0a
--- /dev/null
+++ b/gpr/source/lib/vc5_decoder/wavelet.c
@@ -0,0 +1,173 @@
+/*! @file wavelet.c
+ *
+ * @brief Implementation of the module for wavelet data structures and transforms
+ *
+ * @version 1.0.0
+ *
+ * (C) Copyright 2018 GoPro Inc (http://gopro.com/).
+ *
+ * Licensed under either:
+ * - Apache License, Version 2.0, http://www.apache.org/licenses/LICENSE-2.0
+ * - MIT license, http://opensource.org/licenses/MIT
+ * at your option.
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "headers.h"
+
+/*!
+ @brief Apply the inverse wavelet transform to reconstruct a lowpass band
+
+ This routine reconstructs the lowpass band in the output wavelet from the
+ decoded bands in the input wavelet. The prescale argument is used to undo
+ scaling that may have been performed during encoding to prevent overflow.
+
+ @todo Replace the two different routines for different prescale shifts with
+ a single routine that can handle any prescale shift.
+*/
+CODEC_ERROR TransformInverseSpatialQuantLowpass(gpr_allocator *allocator, WAVELET *input, WAVELET *output, uint16_t prescale)
+{
+ // Dimensions of each wavelet band
+ DIMENSION input_width = input->width;
+ DIMENSION input_height = input->height;
+
+ // The output width may be less than twice the input width if the output width is odd
+ DIMENSION output_width = output->width;
+
+ // The output height may be less than twice the input height if the output height is odd
+ DIMENSION output_height = output->height;
+
+ // Check that a valid input image has been provided
+ assert(input != NULL);
+ assert(input->data[0] != NULL);
+ assert(input->data[1] != NULL);
+ assert(input->data[2] != NULL);
+ assert(input->data[3] != NULL);
+
+ // Check that the output image is a gray image or a lowpass wavelet band
+ assert(output->data[0] != NULL);
+
+ // Check for valid quantization values
+ if (input->quant[0] == 0) {
+ input->quant[0] = 1;
+ }
+
+ assert(input->quant[0] > 0);
+ assert(input->quant[1] > 0);
+ assert(input->quant[2] > 0);
+ assert(input->quant[3] > 0);
+
+ if (prescale > 1)
+ {
+ // This is a spatial transform for the lowpass temporal band
+ assert(prescale == 2);
+
+ // Apply the inverse spatial transform for a lowpass band that is not prescaled
+ InvertSpatialQuantDescale16s(allocator,
+ (PIXEL *)input->data[0], input->pitch,
+ (PIXEL *)input->data[1], input->pitch,
+ (PIXEL *)input->data[2], input->pitch,
+ (PIXEL *)input->data[3], input->pitch,
+ output->data[0], output->pitch,
+ input_width, input_height,
+ output_width, output_height,
+ prescale, input->quant);
+ }
+ else
+ {
+ // This case does not handle any prescaling applied during encoding
+ assert(prescale == 0);
+
+ // Apply the inverse spatial transform for a lowpass band that is not prescaled
+ InvertSpatialQuant16s(allocator,
+ (PIXEL *)input->data[0], input->pitch,
+ (PIXEL *)input->data[1], input->pitch,
+ (PIXEL *)input->data[2], input->pitch,
+ (PIXEL *)input->data[3], input->pitch,
+ output->data[0], output->pitch,
+ input_width, input_height,
+ output_width, output_height,
+ input->quant);
+ }
+
+ return CODEC_ERROR_OKAY;
+}
+
+/*!
+ @brief Apply the inverse wavelet transform to reconstruct a component array
+
+ This routine reconstructs the lowpass band in the output wavelet from the
+ decoded bands in the input wavelet. The prescale argument is used to undo
+ scaling that may have been performed during encoding to prevent overflow.
+*/
+CODEC_ERROR TransformInverseSpatialQuantArray(gpr_allocator *allocator,
+ WAVELET *input,
+ COMPONENT_VALUE *output_buffer,
+ DIMENSION output_width,
+ DIMENSION output_height,
+ size_t output_pitch,
+ PRESCALE prescale)
+{
+ // Dimensions of each wavelet band
+ DIMENSION input_width = input->width;
+ DIMENSION input_height = input->height;
+
+ // Check that a valid input image has been provided
+ assert(input != NULL);
+ assert(input->data[0] != NULL);
+ assert(input->data[1] != NULL);
+ assert(input->data[2] != NULL);
+ assert(input->data[3] != NULL);
+
+ // Check for valid quantization values
+ if (input->quant[0] == 0) {
+ input->quant[0] = 1;
+ }
+
+ assert(input->quant[0] > 0);
+ assert(input->quant[1] > 0);
+ assert(input->quant[2] > 0);
+ assert(input->quant[3] > 0);
+
+ assert(output_width > 0 && output_height > 0 && output_pitch > 0 && output_buffer != NULL);
+
+ if (prescale > 1)
+ {
+ // This is a spatial transform for the lowpass temporal band
+ assert(prescale == 2);
+
+ // Apply the inverse spatial transform for a lowpass band that is not prescaled
+ InvertSpatialQuantDescale16s(allocator,
+ (PIXEL *)input->data[0], input->pitch,
+ (PIXEL *)input->data[1], input->pitch,
+ (PIXEL *)input->data[2], input->pitch,
+ (PIXEL *)input->data[3], input->pitch,
+ (PIXEL *)output_buffer, (int)output_pitch,
+ input_width, input_height,
+ output_width, output_height,
+ prescale, input->quant);
+ }
+ else
+ {
+ // This case does not handle any prescaling applied during encoding
+ assert(prescale == 0);
+
+ // Apply the inverse spatial transform for a lowpass band that is not prescaled
+ InvertSpatialQuant16s(allocator,
+ (PIXEL *)input->data[0], input->pitch,
+ (PIXEL *)input->data[1], input->pitch,
+ (PIXEL *)input->data[2], input->pitch,
+ (PIXEL *)input->data[3], input->pitch,
+ (PIXEL *)output_buffer, (int)output_pitch,
+ input_width, input_height,
+ output_width, output_height,
+ input->quant);
+ }
+
+ return CODEC_ERROR_OKAY;
+}
diff --git a/gpr/source/lib/vc5_decoder/wavelet.h b/gpr/source/lib/vc5_decoder/wavelet.h
new file mode 100755
index 0000000..4f2d2fb
--- /dev/null
+++ b/gpr/source/lib/vc5_decoder/wavelet.h
@@ -0,0 +1,42 @@
+/*! @file wavelet.h
+ *
+ * @brief Declare of wavelet decoding functions
+ *
+ * @version 1.0.0
+ *
+ * (C) Copyright 2018 GoPro Inc (http://gopro.com/).
+ *
+ * Licensed under either:
+ * - Apache License, Version 2.0, http://www.apache.org/licenses/LICENSE-2.0
+ * - MIT license, http://opensource.org/licenses/MIT
+ * at your option.
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _DECODER_WAVELET_H
+#define _DECODER_WAVELET_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+CODEC_ERROR TransformInverseSpatialQuantLowpass(gpr_allocator *allocator, WAVELET *input, WAVELET *output, uint16_t prescale);
+
+CODEC_ERROR TransformInverseSpatialQuantArray( gpr_allocator *allocator,
+ WAVELET *input,
+ COMPONENT_VALUE *output_buffer,
+ DIMENSION output_width,
+ DIMENSION output_height,
+ size_t output_pitch,
+ PRESCALE prescale);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif