summaryrefslogtreecommitdiff
path: root/gpr/source/lib/vc5_encoder
diff options
context:
space:
mode:
Diffstat (limited to 'gpr/source/lib/vc5_encoder')
-rw-r--r--gpr/source/lib/vc5_encoder/CMakeLists.txt21
-rwxr-xr-xgpr/source/lib/vc5_encoder/codebooks.c425
-rwxr-xr-xgpr/source/lib/vc5_encoder/codebooks.h71
-rwxr-xr-xgpr/source/lib/vc5_encoder/component.c403
-rwxr-xr-xgpr/source/lib/vc5_encoder/component.h100
-rwxr-xr-xgpr/source/lib/vc5_encoder/encoder.c2555
-rwxr-xr-xgpr/source/lib/vc5_encoder/encoder.h333
-rwxr-xr-xgpr/source/lib/vc5_encoder/forward.c851
-rwxr-xr-xgpr/source/lib/vc5_encoder/forward.h38
-rwxr-xr-xgpr/source/lib/vc5_encoder/headers.h50
-rwxr-xr-xgpr/source/lib/vc5_encoder/parameters.c86
-rwxr-xr-xgpr/source/lib/vc5_encoder/parameters.h139
-rwxr-xr-xgpr/source/lib/vc5_encoder/raw.c621
-rwxr-xr-xgpr/source/lib/vc5_encoder/raw.h36
-rwxr-xr-xgpr/source/lib/vc5_encoder/sections.c293
-rwxr-xr-xgpr/source/lib/vc5_encoder/sections.h91
-rwxr-xr-xgpr/source/lib/vc5_encoder/syntax.c276
-rwxr-xr-xgpr/source/lib/vc5_encoder/syntax.h44
-rwxr-xr-xgpr/source/lib/vc5_encoder/vc5_encoder.c164
-rwxr-xr-xgpr/source/lib/vc5_encoder/vc5_encoder.h103
-rwxr-xr-xgpr/source/lib/vc5_encoder/vlc.c94
-rwxr-xr-xgpr/source/lib/vc5_encoder/vlc.h138
22 files changed, 6932 insertions, 0 deletions
diff --git a/gpr/source/lib/vc5_encoder/CMakeLists.txt b/gpr/source/lib/vc5_encoder/CMakeLists.txt
new file mode 100644
index 0000000..9dc143b
--- /dev/null
+++ b/gpr/source/lib/vc5_encoder/CMakeLists.txt
@@ -0,0 +1,21 @@
+# library
+set( LIB_NAME vc5_encoder )
+
+# get source files
+file( GLOB SRC_FILES "*.c" )
+
+# get include files
+file( GLOB INC_FILES "*.h" )
+
+# add include files from other folders
+include_directories( "../vc5_common" )
+include_directories( "../common/private" )
+include_directories( "../common/public" )
+
+# library
+add_library( ${LIB_NAME} STATIC ${SRC_FILES} ${INC_FILES} )
+
+target_link_libraries( ${LIB_NAME} )
+
+# set the folder where to place the projects
+set_target_properties( ${LIB_NAME} PROPERTIES FOLDER lib )
diff --git a/gpr/source/lib/vc5_encoder/codebooks.c b/gpr/source/lib/vc5_encoder/codebooks.c
new file mode 100755
index 0000000..c013f4d
--- /dev/null
+++ b/gpr/source/lib/vc5_encoder/codebooks.c
@@ -0,0 +1,425 @@
+/*! @file codebooks.c
+ *
+ * @brief Implementation of the routines for computing the encoding tables from a codebook.
+ *
+ * (C) Copyright 2018 GoPro Inc (http://gopro.com/).
+ *
+ * Licensed under either:
+ * - Apache License, Version 2.0, http://www.apache.org/licenses/LICENSE-2.0
+ * - MIT license, http://opensource.org/licenses/MIT
+ * at your option.
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "headers.h"
+#include "table17.inc"
+
+//! Length of the codebook for runs of zeros
+#define RUNS_TABLE_LENGTH 3072
+
+
+/*!
+ @brief Define the codeset used by the reference codec
+
+ The baseline codec only supports codebook #17.
+
+ Codebook #17 is intended to be used with cubic companding
+ (see @ref FillMagnitudeEncodingTable and @ref ComputeCubicTable).
+ */
+ENCODER_CODESET encoder_codeset_17 = {
+ "Codebook set 17 from data by David Newman with tables automatically generated for the FSM decoder",
+ (const CODEBOOK *)&table17,
+ CODESET_FLAGS_COMPANDING_CUBIC,
+ NULL,
+ NULL,
+};
+
+/*!
+ @brief Initialize the codeset by creating more efficient tables for encoding
+
+ This routine takes the original codebook in the codeset and creates a table
+ of codewords for runs of zeros, indexed by the run length, and a table for
+ coefficient magnitudes, indexed by the coefficient magnitude. This allows
+ runs of zeros and non-zero coefficients to be entropy coded using a simple
+ table lookup.
+*/
+CODEC_ERROR PrepareCodebooks(const gpr_allocator *allocator, ENCODER_CODESET *cs)
+{
+ CODEC_ERROR error = CODEC_ERROR_OKAY;
+
+ // Create a new table for runs of zeros with the default length
+ const int runs_table_length = RUNS_TABLE_LENGTH;
+
+ // Initialize the indexable table of run length codes
+ RLV *old_codes = (RLV *)(((uint8_t *)cs->codebook) + sizeof(CODEBOOK));
+ int old_length = cs->codebook->length;
+
+ size_t runs_table_size = runs_table_length * sizeof(RLC) + sizeof(RUNS_TABLE);
+
+ RUNS_TABLE *runs_table = allocator->Alloc(runs_table_size);
+ RLC *new_codes = (RLC *)(((uint8_t *)runs_table) + sizeof(RUNS_TABLE));
+ int new_length = runs_table_length;
+
+ // Set the length of the table for encoding coefficient magnitudes
+ int mags_table_shift = 8;
+ int mags_table_length;
+ size_t mags_table_size;
+ MAGS_TABLE *mags_table;
+ VLE *mags_table_entries;
+
+ // Use a larger table if companding
+ if (CompandingParameter() > 0) {
+ //mags_table_shift = 11;
+ mags_table_shift = 10;
+ }
+
+ // Allocate the table for encoding coefficient magnitudes
+ mags_table_length = (1 << mags_table_shift);
+ mags_table_size = mags_table_length * sizeof(VLE) + sizeof(MAGS_TABLE);
+ mags_table = allocator->Alloc(mags_table_size);
+ if (mags_table == NULL) {
+ allocator->Free( (void *)runs_table);
+ return CODEC_ERROR_OUTOFMEMORY;
+ }
+
+ mags_table_entries = (VLE *)(((uint8_t *)mags_table) + sizeof(MAGS_TABLE));
+
+ // Create a more efficient codebook for encoding runs of zeros
+ error = ComputeRunLengthCodeTable(allocator,
+ old_codes, old_length, new_codes, new_length);
+ if (error != CODEC_ERROR_OKAY) {
+ allocator->Free( (void *)runs_table);
+ return error;
+ }
+
+ // Store the new table for runs of zeros in the codeset
+ runs_table->length = runs_table_length;
+ cs->runs_table = runs_table;
+
+ error = FillMagnitudeEncodingTable(cs->codebook, mags_table_entries, mags_table_length, cs->flags);
+ if (error != CODEC_ERROR_OKAY) {
+ allocator->Free( (void *)runs_table);
+ return error;
+ }
+
+ mags_table->length = mags_table_length;
+ cs->mags_table = mags_table;
+
+ // The codebooks have been initialized successfully
+ return CODEC_ERROR_OKAY;
+}
+
+/*!
+ @brief Free all data structures allocated for the codebooks
+*/
+CODEC_ERROR ReleaseCodebooks(gpr_allocator *allocator, ENCODER_CODESET *cs)
+{
+ allocator->Free( (void *)cs->runs_table );
+ allocator->Free( (void *)cs->mags_table);
+ return CODEC_ERROR_OKAY;
+}
+
+/*!
+ @brief Compute a table of codewords for runs of zeros
+
+ The table is indexed by the length of the run of zeros.
+*/
+CODEC_ERROR ComputeRunLengthCodeTable(const gpr_allocator *allocator,
+ RLV *input_codes, int input_length,
+ RLC *output_codes, int output_length)
+{
+ // Need enough space for the codebook and the code for a single value
+ int runs_codebook_length = input_length + 1;
+ size_t runs_codebook_size = runs_codebook_length * sizeof(RLC);
+ RLC *runs_codebook = (RLC *)allocator->Alloc(runs_codebook_size);
+ bool single_zero_run_flag = false;
+ int input_index;
+ int runs_codebook_count = 0;
+ CODEC_ERROR return_code = CODEC_ERROR_OKAY;
+
+ // Copy the codes for runs of zeros into the temporary codebook for sorting
+ for (input_index = 0; input_index < input_length; input_index++)
+ {
+ uint32_t count = input_codes[input_index].count;
+ int32_t value = input_codes[input_index].value;
+
+ // Is this code for a run of zeros?
+ if (value != 0 || count == 0) {
+ // Skip codebook entries for coefficient magnitudes and special codes
+ continue;
+ }
+
+ // Is this code for a single zero
+ if (count == 1 && value == 0) {
+ single_zero_run_flag = true;
+ }
+
+ // Check that the temporary runs codebook is not full
+ assert(runs_codebook_count < runs_codebook_length);
+
+ // Copy the code into the temporary runs codebook
+ runs_codebook[runs_codebook_count].size = input_codes[input_index].size;
+ runs_codebook[runs_codebook_count].bits = input_codes[input_index].bits;
+ runs_codebook[runs_codebook_count].count = count;
+
+ // Check the codebook entry
+ assert(runs_codebook[runs_codebook_count].size > 0);
+ assert(runs_codebook[runs_codebook_count].count > 0);
+
+ // Increment the count of codes in the temporary runs codebook
+ runs_codebook_count++;
+ }
+
+ // Check that the runs codebook includes a run of a single zero
+ if( single_zero_run_flag == false )
+ {
+ assert(false);
+ return_code = CODEC_ERROR_UNEXPECTED;
+ }
+
+ // Sort the codewords into decreasing run length
+ SortDecreasingRunLength(runs_codebook, runs_codebook_count);
+
+ // The last code must be for a single run
+ assert(runs_codebook[runs_codebook_count - 1].count == 1);
+
+ // Fill the lookup table with codes for runs indexed by the run length
+ FillRunLengthEncodingTable(runs_codebook, runs_codebook_count, output_codes, output_length);
+
+ // Free the space used for the sorted codewords
+ allocator->Free(runs_codebook);
+
+ return return_code;
+}
+
+/*!
+ @brief Sort the codebook into decreasing length of the run
+*/
+CODEC_ERROR SortDecreasingRunLength(RLC *codebook, int length)
+{
+ int i;
+ int j;
+
+ // Perform a simple bubble sort since the codebook may already be sorted
+ for (i = 0; i < length; i++)
+ {
+ for (j = i+1; j < length; j++)
+ {
+ // Should not have more than one codebook entry with the same run length
+ assert(codebook[i].count != codebook[j].count);
+
+ // Exchange codebook entries if the current entry is smaller
+ if (codebook[i].count < codebook[j].count)
+ {
+ int size = codebook[i].size;
+ uint32_t bits = codebook[i].bits;
+ int count = codebook[i].count;
+
+ codebook[i].size = codebook[j].size;
+ codebook[i].bits = codebook[j].bits;
+ codebook[i].count = codebook[j].count;
+
+ codebook[j].size = size;
+ codebook[j].bits = bits;
+ codebook[j].count = count;
+ }
+ }
+
+ // After two iterations that last two items should be in the proper order
+ assert(i == 0 || codebook[i-1].count > codebook[i].count);
+ }
+
+ return CODEC_ERROR_OKAY;
+}
+
+/*!
+ @brief Use a sparse run length code table to create an indexable table for faster encoding
+*/
+CODEC_ERROR FillRunLengthEncodingTable(RLC *codebook, int codebook_length, RLC *table, int table_length)
+{
+ int i; // Index into the lookup table
+ int j; // Index into the codebook
+
+ // Use all of the bits except the sign bit for the codewords
+ int max_code_size = bit_word_count - 1;
+
+ // Check that the input codes are sorted into decreasing run length
+ for (j = 1; j < codebook_length; j++)
+ {
+ RLC *previous = &codebook[j-1];
+ RLC *current = &codebook[j];
+
+ assert(previous->count > current->count);
+ if (! (previous->count > current->count)) {
+ return CODEC_ERROR_UNEXPECTED;
+ }
+ }
+
+ // The last input code should be the code for a single zero
+ assert(codebook[codebook_length - 1].count == 1);
+
+ // Create the shortest codeword for each table entry
+ for (i = 0; i < table_length; i++)
+ {
+ int length = i; // Length of the run for this table entry
+ uint32_t codeword = 0; // Composite codeword for this run length
+ int codesize = 0; // Number of bits in the composite codeword
+ int remaining; // Remaining run length not covered by the codeword
+
+ remaining = length;
+
+ for (j = 0; j < codebook_length; j++)
+ {
+ int repetition; // Number of times the codeword is used
+ int k;
+
+ // Nothing to do if the remaining run length is zero
+ if (remaining == 0) break;
+
+ // The number of times that the codeword is used is the number
+ // of times that it divides evenly into the remaining run length
+ repetition = remaining / codebook[j].count;
+
+ // Append the codes to the end of the composite codeword
+ for (k = 0; k < repetition; k++)
+ {
+ // Terminate the loop if the codeword will not fit
+ if (codebook[j].size > (max_code_size - codesize))
+ {
+ if (codesize)
+ {
+ remaining -= (k * codebook[j].count);
+ goto next;
+ }
+ else
+ {
+ break;
+ }
+ }
+
+ // Shift the codeword to make room for the appended codes
+ codeword <<= codebook[j].size;
+
+ // Insert the codeword from the codebook at the end of the composite codeword
+ codeword |= codebook[j].bits;
+
+ // Increment the number of bits in the composite codeword
+ codesize += codebook[j].size;
+ }
+
+ // Reduce the run length by the amount that was consumed by the repeated codeword
+ remaining -= (k * codebook[j].count);
+ }
+
+next:
+ // Should have covered the entire run if the last codeword would fit
+ //assert(remaining == 0 || (max_code_size - codesize) < codebook[codebook_length - 1].size);
+
+ // Store the composite run length in the lookup table
+ table[i].bits = codeword;
+ table[i].size = codesize;
+ table[i].count = length - remaining;
+ }
+
+ return CODEC_ERROR_OKAY;
+}
+
+/*!
+ @brief Fill lookup table for encoding coefficient magnitudes
+
+ The table for encoding coefficient magnitudes is indexed by the magnitude.
+ Each entry is a codeword and the size in bits.
+
+ @todo Implement cubic companding
+*/
+CODEC_ERROR FillMagnitudeEncodingTable(const CODEBOOK *codebook, VLE *mags_table_entry, int mags_table_length, uint32_t flags)
+{
+ // Get the length of the codebook and a pointer to the entries
+ //int codebook_length = codebook->length;
+ RLV *codebook_entry = (RLV *)((uint8_t *)codebook + sizeof(CODEBOOK));
+
+ int32_t maximum_magnitude_value = 0;
+
+ uint32_t codebook_index;
+
+ int16_t cubic_table[1025];
+ int cubic_table_length = sizeof(cubic_table) / sizeof(cubic_table[0]);
+
+ int mags_table_index;
+
+ // Find the maximum coefficient magnitude in the codebook
+ for (codebook_index = 0; codebook_index < codebook->length; codebook_index++)
+ {
+ // Does this codebook entry correspond to a coefficient magnitude?
+ if (codebook_entry[codebook_index].count == 1)
+ {
+ int32_t codebook_value = codebook_entry[codebook_index].value;
+ if (maximum_magnitude_value < codebook_value) {
+ maximum_magnitude_value = codebook_value;
+ }
+ }
+ }
+ assert(maximum_magnitude_value > 0);
+
+ if (flags & CODESET_FLAGS_COMPANDING_CUBIC)
+ {
+ ComputeCubicTable(cubic_table, cubic_table_length, maximum_magnitude_value);
+ }
+
+ // Fill each table entry with the codeword for that (signed) value
+ for (mags_table_index = 0; mags_table_index < mags_table_length; mags_table_index++)
+ {
+ // Compute the magnitude that corresponds to this index
+ int32_t magnitude = mags_table_index;
+ uint32_t codeword;
+ int codesize;
+
+ // Apply the companding curve
+ if (flags & CODESET_FLAGS_COMPANDING_CUBIC)
+ {
+ // Apply a cubic companding curve
+ assert(magnitude < cubic_table_length);
+ magnitude = cubic_table[magnitude];
+ }
+ else if (flags & CODESET_FLAGS_COMPANDING_NONE)
+ {
+ // Do not apply a companding curve
+ }
+ else
+ {
+ // Apply an old-style companding curve
+ magnitude = CompandedValue(magnitude);
+ }
+
+ // Is the magnitude larger than the number of entries in the codebook?
+ if (magnitude > maximum_magnitude_value) {
+ magnitude = maximum_magnitude_value;
+ }
+
+ // Find the codebook entry corresponding to this coefficient magnitude
+ codeword = 0;
+ codesize = 0;
+ for (codebook_index = 0; codebook_index < codebook->length; codebook_index++)
+ {
+ if (codebook_entry[codebook_index].value == magnitude)
+ {
+ assert(codebook_entry[codebook_index].count == 1);
+ codeword = codebook_entry[codebook_index].bits;
+ codesize = codebook_entry[codebook_index].size;
+ break;
+ }
+ }
+ assert(0 < codesize && codesize <= 32);
+
+ mags_table_entry[mags_table_index].bits = codeword;
+ mags_table_entry[mags_table_index].size = codesize;
+
+ }
+
+ return CODEC_ERROR_OKAY;
+}
diff --git a/gpr/source/lib/vc5_encoder/codebooks.h b/gpr/source/lib/vc5_encoder/codebooks.h
new file mode 100755
index 0000000..dc8757a
--- /dev/null
+++ b/gpr/source/lib/vc5_encoder/codebooks.h
@@ -0,0 +1,71 @@
+/*! @file codebooks.h
+ *
+ * @brief Declaration of the routines for computing the encoding tables from a codebook.
+
+ * A codebooks contains the variable-length codes for coefficient magnitudes, runs
+ * of zeros, and special codewords that mark entropy codec locations in bitstream.
+
+ * The codebook is used to create tables and simplify entropy coding of signed values
+ * and runs of zeros.
+ *
+ * (C) Copyright 2018 GoPro Inc (http://gopro.com/).
+ *
+ * Licensed under either:
+ * - Apache License, Version 2.0, http://www.apache.org/licenses/LICENSE-2.0
+ * - MIT license, http://opensource.org/licenses/MIT
+ * at your option.
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CODEBOOKS_H
+#define CODEBOOKS_H
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+ /*!
+ Codeset data structure that is used by the encoder
+ */
+ typedef struct encoder_codeset {
+
+ const char *title; //!< Identifying string for the codeset
+
+ const CODEBOOK *codebook; //!< Codebook for runs and magnitudes
+
+ uint32_t flags; //!< Encoding flags (see the codeset flags)
+
+ const MAGS_TABLE *mags_table; //!< Table for encoding coefficient magnitudes
+
+ const RUNS_TABLE *runs_table; //!< Table for encoding runs of zeros
+
+ } ENCODER_CODESET;
+
+ extern ENCODER_CODESET encoder_codeset_17;
+
+ CODEC_ERROR PrepareCodebooks(const gpr_allocator *allocator, ENCODER_CODESET *cs);
+
+ CODEC_ERROR ReleaseCodebooks(gpr_allocator *allocator, ENCODER_CODESET *cs);
+
+ CODEC_ERROR ComputeRunLengthCodeTable(const gpr_allocator *allocator,
+ RLV *input_codes, int input_length,
+ RLC *output_codes, int output_length);
+
+ CODEC_ERROR SortDecreasingRunLength(RLC *codebook, int length);
+
+ CODEC_ERROR FillRunLengthEncodingTable(RLC *codebook, int codebook_length,
+ RLC *table, int table_length);
+
+ CODEC_ERROR FillMagnitudeEncodingTable(const CODEBOOK *codebook, VLE *table, int size, uint32_t flags);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // CODEBOOKS_H
diff --git a/gpr/source/lib/vc5_encoder/component.c b/gpr/source/lib/vc5_encoder/component.c
new file mode 100755
index 0000000..72c33ed
--- /dev/null
+++ b/gpr/source/lib/vc5_encoder/component.c
@@ -0,0 +1,403 @@
+/*! @file component.c
+ *
+ * @brief Implementation of the inverse component transform and inverse component permutation.
+ *
+ * (C) Copyright 2018 GoPro Inc (http://gopro.com/).
+ *
+ * Licensed under either:
+ * - Apache License, Version 2.0, http://www.apache.org/licenses/LICENSE-2.0
+ * - MIT license, http://opensource.org/licenses/MIT
+ * at your option.
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "headers.h"
+
+/*!
+ @brief Initialize a component transform
+ */
+CODEC_ERROR InitComponentTransform(COMPONENT_TRANSFORM *transform)
+{
+ if (transform != NULL)
+ {
+ transform->component_count = 0;
+ transform->transform_matrix = NULL;
+ transform->transform_offset = NULL;
+ transform->transform_scale = NULL;
+ return CODEC_ERROR_OKAY;
+ }
+ return CODEC_ERROR_UNEXPECTED;
+}
+
+/*!
+ @brief Initialize a component permutation
+ */
+CODEC_ERROR InitComponentPermutation(COMPONENT_PERMUTATION *permutation)
+{
+ if (permutation != NULL)
+ {
+ permutation->component_count = 0;
+ permutation->permutation_array = NULL;
+ return CODEC_ERROR_OKAY;
+ }
+ return CODEC_ERROR_UNEXPECTED;
+}
+
+/*!
+ @brief Allocate the arrays in a component transform
+
+ The allocated arrays in the component transform are initialized to all zeros.
+ */
+CODEC_ERROR AllocateComponentTransform(gpr_allocator *allocator, COMPONENT_TRANSFORM *transform, int component_count)
+{
+ size_t transform_matrix_size = component_count * component_count * sizeof(uint16_t);
+ size_t transform_offset_size = component_count * sizeof(uint16_t);
+ size_t transform_scale_size = component_count * sizeof(uint16_t);
+
+ transform->transform_matrix = (uint16_t *)allocator->Alloc(transform_matrix_size);
+ transform->transform_offset = (uint16_t *)allocator->Alloc(transform_offset_size);
+ transform->transform_scale = (uint16_t *)allocator->Alloc(transform_scale_size);
+
+ if (transform->transform_matrix == NULL ||
+ transform->transform_offset == NULL ||
+ transform->transform_scale == NULL) {
+
+ //TODO: Should clean up the partially allocated transform arrays
+ return CODEC_ERROR_OUTOFMEMORY;
+ }
+
+ transform->component_count = component_count;
+
+ memset(transform->transform_matrix, 0, transform_matrix_size);
+ memset(transform->transform_offset, 0, transform_offset_size);
+ memset(transform->transform_scale, 0, transform_scale_size);
+
+ return CODEC_ERROR_OKAY;
+}
+
+/*!
+ @brief Allocate the arrays in a component permutation
+
+ The allocated arrays in the component permutation are initialized to all zeros.
+ */
+CODEC_ERROR AllocateComponentPermutation(gpr_allocator *allocator, COMPONENT_PERMUTATION *permutation, int component_count)
+{
+ size_t permutation_array_size = component_count * sizeof(uint16_t);
+
+ permutation->permutation_array = (uint16_t *)allocator->Alloc(permutation_array_size);
+ if (permutation->permutation_array == NULL) {
+ return CODEC_ERROR_OUTOFMEMORY;
+ }
+
+ permutation->component_count = component_count;
+
+ memset(permutation->permutation_array, 0, permutation_array_size);
+
+ return CODEC_ERROR_OKAY;
+}
+
+/*!
+ @brief Release the arrays in a component transform
+ */
+CODEC_ERROR ReleaseComponentTransform(gpr_allocator *allocator, COMPONENT_TRANSFORM *transform)
+{
+ if (transform != NULL)
+ {
+ allocator->Free(transform->transform_matrix);
+ allocator->Free(transform->transform_offset);
+ allocator->Free(transform->transform_scale);
+ memset(transform, 0, sizeof(COMPONENT_TRANSFORM));
+ }
+ return CODEC_ERROR_OKAY;
+}
+
+/*!
+ @brief Release the arrays in a component permutation
+ */
+CODEC_ERROR ReleaseComponentPermutation(gpr_allocator *allocator, COMPONENT_PERMUTATION *permutation)
+{
+ if (permutation != NULL)
+ {
+ allocator->Free(permutation->permutation_array);
+ memset(permutation, 0, sizeof(COMPONENT_PERMUTATION));
+ }
+ return CODEC_ERROR_OKAY;
+}
+
+/*!
+ @brief Initialize a component transform to the identity transform
+ */
+CODEC_ERROR InitComponentTransformIdentity(COMPONENT_TRANSFORM *transform, int component_count, gpr_allocator *allocator)
+{
+ CODEC_ERROR error = CODEC_ERROR_OKAY;
+ int component_index;
+
+ InitComponentTransform(transform);
+ error = AllocateComponentTransform(allocator, transform, component_count);
+ if (error != CODEC_ERROR_OKAY) {
+ return error;
+ }
+
+ for (component_index = 0; component_index < component_count; component_index++)
+ {
+ // Compute the index to the diagonal element in the matrix
+ int array_index = component_index * component_count + component_index;
+ transform->transform_matrix[array_index] = 1;
+ }
+
+ return CODEC_ERROR_OKAY;
+}
+
+/*!
+ @brief Initialize a component transform to the identity permutation
+
+ */
+CODEC_ERROR InitComponentPermutationIdentity(COMPONENT_PERMUTATION *permutation, int component_count, gpr_allocator *allocator)
+{
+ CODEC_ERROR error = CODEC_ERROR_OKAY;
+ int component_index;
+
+ InitComponentPermutation(permutation);
+ error = AllocateComponentPermutation(allocator, permutation, component_count);
+ if (error != CODEC_ERROR_OKAY) {
+ return error;
+ }
+
+ for (component_index = 0; component_index < component_count; component_index++)
+ {
+ permutation->permutation_array[component_index] = component_index;
+ }
+
+ return CODEC_ERROR_OKAY;
+}
+
+
+/*!
+ @brief Initialize a component transform to known values for testing
+ */
+CODEC_ERROR InitComponentTransformTesting(COMPONENT_TRANSFORM *transform, int component_count, gpr_allocator *allocator)
+{
+ CODEC_ERROR error = CODEC_ERROR_OKAY;
+ int row;
+ int column;
+
+ InitComponentTransform(transform);
+ error = AllocateComponentTransform(allocator, transform, component_count);
+ if (error != CODEC_ERROR_OKAY) {
+ return error;
+ }
+
+ for (row = 0; row < component_count; row++)
+ {
+ for (column = 0; column < component_count; column++)
+ {
+ // Compute the index to the element in the matrix
+ int array_index = row * component_count + column;
+ transform->transform_matrix[array_index] = array_index;
+ }
+
+ transform->transform_offset[row] = (component_count - row);
+ transform->transform_scale[row] = row + 1;
+ }
+
+ return CODEC_ERROR_OKAY;
+}
+
+/*!
+ @brief Initialize a component transform to known values for testing
+
+ */
+CODEC_ERROR InitComponentPermutationTesting(COMPONENT_PERMUTATION *permutation, int component_count, gpr_allocator *allocator)
+{
+ CODEC_ERROR error = CODEC_ERROR_OKAY;
+ int component_index;
+
+ InitComponentPermutation(permutation);
+ error = AllocateComponentPermutation(allocator, permutation, component_count);
+ if (error != CODEC_ERROR_OKAY) {
+ return error;
+ }
+
+ for (component_index = 0; component_index < component_count; component_index++)
+ {
+ permutation->permutation_array[component_index] = component_count - component_index - 1;
+ }
+
+ return CODEC_ERROR_OKAY;
+}
+
+/*!
+ @brief Return true if the component transform is the identity transform
+ */
+bool IsComponentTransformIdentity(COMPONENT_TRANSFORM *transform)
+{
+ int component_count;
+ int component_row;
+ int component_column;
+
+ // A null transform is equivalent to the identity transform
+ if (transform == NULL) return true;
+
+ component_count = transform->component_count;
+
+ for (component_row = 0; component_row < component_count; component_row++)
+ {
+ // Is the transform matrix the identity matrix?
+ for (component_column = 0; component_column < component_count; component_column++)
+ {
+ // Compute the index to the component element in the transform matrix
+ int array_index = component_row * component_count + component_column;
+
+ if (component_row == component_column)
+ {
+ if (transform->transform_matrix[array_index] != 1) return false;
+ }
+ else
+ {
+ if (transform->transform_matrix[array_index] != 0) return false;
+ }
+ }
+
+ // Is the transform offset zero?
+ if (transform->transform_offset != 0) return false;
+
+ // Is the scale factor zero?
+ if (transform->transform_scale != 0) return false;
+ }
+
+ // The component transform is the identity transform
+ return true;
+}
+
+/*!
+ @brief Allocate the arrays in a component permutation
+ */
+bool IsComponentPermutationIdentity(COMPONENT_PERMUTATION *permutation)
+{
+ int component_count;
+ int component_index;
+
+ // A null permutation is equivalent to the identity permutation
+ if (permutation == NULL) {
+ return true;
+ }
+
+ component_count = permutation->component_count;
+
+ for (component_index = 0; component_index < component_count; component_index++)
+ {
+ if (permutation->permutation_array[component_index] != component_index) {
+ return false;
+ }
+ }
+
+ // The component permutation is the identity permutation
+ return true;
+}
+
+/*!
+ @brief Write the component transform to the bitstream
+
+ @todo Use the InverseTransform16 syntax element if any values are larger than a single byte
+ */
+CODEC_ERROR WriteComponentTransform(COMPONENT_TRANSFORM *transform, BITSTREAM *stream)
+{
+ if (transform != NULL)
+ {
+ const int component_count = transform->component_count;
+ const size_t chunk_payload_size = (component_count * component_count + 2 * component_count) * sizeof(uint8_t);
+ const size_t chunk_payload_padding = sizeof(SEGMENT) - (chunk_payload_size % sizeof(SEGMENT));
+ const int chunk_payload_length = (int)((chunk_payload_size + sizeof(SEGMENT) - 1) / sizeof(SEGMENT));
+ int i;
+
+ // Write the tag value pair for the small chunk element for the component transform
+ PutTagPair(stream, CODEC_TAG_InverseTransform, chunk_payload_length);
+
+ for (i = 0; i < component_count; i++)
+ {
+ int offset_value;
+ int scale_value;
+
+ // Write the row at this index in the transform matrix
+ int j;
+
+ for (j = 0; j < component_count; j++)
+ {
+ int array_index = i * component_count + j;
+ int array_value = transform->transform_matrix[array_index];
+
+ assert(INT8_MIN <= array_value && array_value <= INT8_MAX);
+ PutBits(stream, array_value, 8);
+ }
+
+ // Write the offset
+ offset_value = transform->transform_offset[i];
+ assert(INT8_MIN <= offset_value && offset_value <= INT8_MAX);
+ PutBits(stream, offset_value, 8);
+
+ // Write the scale
+ scale_value = transform->transform_scale[i];
+ assert(0 <= scale_value && scale_value <= UINT8_MAX);
+ PutBits(stream, scale_value, 8);
+ }
+
+ // Pad the remainer of the chunk payload with zeros
+ for (i = 0; i < (int)chunk_payload_padding; i++) {
+ PutBits(stream, 0, 8);
+ }
+
+ // Check that the bitstream is aligned on a segment boundary
+ assert(IsAlignedSegment(stream));
+ if (! (IsAlignedSegment(stream))) {
+ return CODEC_ERROR_UNEXPECTED;
+ }
+
+ return CODEC_ERROR_OKAY;
+ }
+
+ return CODEC_ERROR_UNEXPECTED;
+}
+
+/*!
+ @brief Write the component permutation to the bitstream
+ */
+CODEC_ERROR WriteComponentPermutation(COMPONENT_PERMUTATION *permutation, BITSTREAM *stream)
+{
+ if (permutation != NULL)
+ {
+ const int component_count = permutation->component_count;
+ const size_t chunk_payload_size = component_count * sizeof(uint8_t);
+ const size_t chunk_payload_padding = sizeof(SEGMENT) - (chunk_payload_size % sizeof(SEGMENT));
+ const int chunk_payload_length = (int)((chunk_payload_size + sizeof(SEGMENT) - 1) / sizeof(SEGMENT));
+ int i;
+
+ // Write the tag value pair for the small chunk element for the component transform
+ PutTagPair(stream, CODEC_TAG_InversePermutation, chunk_payload_length);
+
+ for (i = 0; i < component_count; i++)
+ {
+ uint8_t value = (uint8_t)permutation->permutation_array[i];
+ PutBits(stream, value, 8);
+ }
+
+ // Pad the remainer of the chunk payload with zeros
+ for (i = 0; i < (int)chunk_payload_padding; i++) {
+ PutBits(stream, 0, 8);
+ }
+
+ // Check that the bitstream is aligned on a segment boundary
+ assert(IsAlignedSegment(stream));
+ if (! (IsAlignedSegment(stream))) {
+ return CODEC_ERROR_UNEXPECTED;
+ }
+
+ return CODEC_ERROR_OKAY;
+ }
+
+ return CODEC_ERROR_UNEXPECTED;
+}
diff --git a/gpr/source/lib/vc5_encoder/component.h b/gpr/source/lib/vc5_encoder/component.h
new file mode 100755
index 0000000..1c194c4
--- /dev/null
+++ b/gpr/source/lib/vc5_encoder/component.h
@@ -0,0 +1,100 @@
+/*! @file component.h
+ *
+ * @brief Declaration of routines for the inverse component transform and permutation.
+
+ * A codebooks contains the variable-length codes for coefficient magnitudes, runs
+ * of zeros, and special codewords that mark entropy codec locations in bitstream.
+
+ * The codebook is used to create tables and simplify entropy coding of signed values
+ * and runs of zeros.
+ *
+ * (C) Copyright 2018 GoPro Inc (http://gopro.com/).
+ *
+ * Licensed under either:
+ * - Apache License, Version 2.0, http://www.apache.org/licenses/LICENSE-2.0
+ * - MIT license, http://opensource.org/licenses/MIT
+ * at your option.
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef COMPONENT_H
+#define COMPONENT_H
+
+/*!
+ @brief Data structure for the component transform (16 bit precision)
+ */
+typedef struct _component_transform
+{
+ uint16_t component_count;
+ uint16_t *transform_matrix;
+ uint16_t *transform_offset;
+ uint16_t *transform_scale;
+
+} COMPONENT_TRANSFORM;
+
+/*!
+ @brief Data structure for the component permutation
+ */
+typedef struct _component_permutation
+{
+ uint16_t component_count;
+ uint16_t *permutation_array;
+
+} COMPONENT_PERMUTATION;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+ CODEC_ERROR InitComponentTransform(COMPONENT_TRANSFORM *transform);
+
+ CODEC_ERROR InitComponentPermutation(COMPONENT_PERMUTATION *permutation);
+
+ CODEC_ERROR AllocateComponentTransform(gpr_allocator *allocator,
+ COMPONENT_TRANSFORM *transform,
+ int component_count);
+
+ CODEC_ERROR AllocateComponentPermutation(gpr_allocator *allocator,
+ COMPONENT_PERMUTATION *permutation,
+ int component_count);
+
+ CODEC_ERROR ReleaseComponentTransform(gpr_allocator *allocator,
+ COMPONENT_TRANSFORM *transform);
+
+ CODEC_ERROR ReleaseComponentPermutation(gpr_allocator *allocator,
+ COMPONENT_PERMUTATION *permutation);
+
+ CODEC_ERROR InitComponentTransformIdentity(COMPONENT_TRANSFORM *transform,
+ int component_count,
+ gpr_allocator *allocator);
+
+ CODEC_ERROR InitComponentPermutationIdentity(COMPONENT_PERMUTATION *permutation,
+ int component_count,
+ gpr_allocator *allocator);
+
+ CODEC_ERROR InitComponentTransformTesting(COMPONENT_TRANSFORM *transform,
+ int component_count,
+ gpr_allocator *allocator);
+
+ CODEC_ERROR InitComponentPermutationTesting(COMPONENT_PERMUTATION *permutation,
+ int component_count,
+ gpr_allocator *allocator);
+
+ bool IsComponentTransformIdentity(COMPONENT_TRANSFORM *transform);
+
+ bool IsComponentPermutationIdentity(COMPONENT_PERMUTATION *permutation);
+
+ CODEC_ERROR WriteComponentTransform(COMPONENT_TRANSFORM *transform, BITSTREAM *stream);
+
+ CODEC_ERROR WriteComponentPermutation(COMPONENT_PERMUTATION *permutation, BITSTREAM *stream);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // COMPONENT_H
diff --git a/gpr/source/lib/vc5_encoder/encoder.c b/gpr/source/lib/vc5_encoder/encoder.c
new file mode 100755
index 0000000..0701a4d
--- /dev/null
+++ b/gpr/source/lib/vc5_encoder/encoder.c
@@ -0,0 +1,2555 @@
+/*! @file encoder.c
+ *
+ * @brief Implementation of functions for encoding samples.
+ *
+ * Encoded samples must be aligned on a four byte boundary.
+ * Any constraints on the alignment of data within the sample
+ * are handled by padding the sample to the correct alignment.
+ *
+ * Note that the encoded dimensions are the actual dimensions of each channel
+ * (or the first channel in the case of 4:2:2 sampling) in the encoded sample.
+ * The display offsets and dimensions specify the portion of the encoded frame
+ * that should be displayed, but in the case of a Bayer image the display
+ * dimensions are doubled to account for the effects of the demosaic filter.
+ * If a Bayer image is encoded to Bayer format (no demosaic filter applied),
+ * then the encoded dimensions will be the same as grid of Bayer quads, less
+ * any padding required during encoding, but the display dimensions and
+ * offset will be reported as if a demosiac filter were applied to scale the
+ * encoded frame to the display dimensions (doubling the width and height).
+ *
+ * (C) Copyright 2018 GoPro Inc (http://gopro.com/).
+ *
+ * Licensed under either:
+ * - Apache License, Version 2.0, http://www.apache.org/licenses/LICENSE-2.0
+ * - MIT license, http://opensource.org/licenses/MIT
+ * at your option.
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "headers.h"
+
+#if ENABLED(NEON)
+#include <arm_neon.h>
+#endif
+
+/*!
+ @brief Align the bitstream to a byte boundary
+
+ Enough bits are written to the bitstream to align the
+ bitstream to the next byte.
+ */
+static CODEC_ERROR AlignBitsByte(BITSTREAM *bitstream)
+{
+ if (bitstream->count > 0 && (bitstream->count % 8) != 0)
+ {
+ // Compute the number of bits of padding
+ BITCOUNT count = (8 - (bitstream->count % 8));
+ PutBits(bitstream, 0, count);
+ }
+ assert((bitstream->count % 8) == 0);
+ return CODEC_ERROR_OKAY;
+}
+
+/*!
+ @brief Align the bitstream to the next segment
+
+ The corresponding function in the existing codec flushes the bitstream.
+
+ @todo Is it necessary to flush the bitstream (and the associated byte stream)
+ after aligning the bitstream to a segment boundary?
+ */
+static CODEC_ERROR AlignBitsSegment(BITSTREAM *bitstream)
+{
+ STREAM *stream = bitstream->stream;
+ size_t byte_count;
+
+ // Byte align the bitstream
+ AlignBitsByte(bitstream);
+ assert((bitstream->count % 8) == 0);
+
+ // Compute the number of bytes in the bit buffer
+ byte_count = bitstream->count / 8;
+
+ // Add the number of bytes written to the stream
+ byte_count += stream->byte_count;
+
+ while ((byte_count % sizeof(TAGVALUE)) != 0)
+ {
+ PutBits(bitstream, 0, 8);
+ byte_count++;
+ }
+
+ // The bitstream should be aligned to the next segment
+ assert((bitstream->count == 0) || (bitstream->count == bit_word_count));
+ assert((byte_count % sizeof(TAGVALUE)) == 0);
+
+ return CODEC_ERROR_OKAY;
+}
+
+#if VC5_ENABLED_PART(VC5_PART_IMAGE_FORMATS)
+/*!
+ @brief Set default values for the pattern element structure
+
+ Some image formats imply specific parameters for the dimensions of the
+ pattern elements and the number of components per sample. If the pattern
+ element structure has not been fully specified by the command-line
+ arguments, then missing values can be filled in from the default values
+ for the image format.
+ */
+bool SetImageFormatDefaults(ENCODER *encoder)
+{
+ switch (encoder->image_format)
+ {
+#if VC5_ENABLED_PART(VC5_PART_COLOR_SAMPLING)
+ if (IsPartEnabled(encoder->enabled_parts, VC5_PART_COLOR_SAMPLING))
+ {
+ // The components per sample parameter is not applicable to VC-5 Part 4 bitstreams
+ assert(encoder->components_per_sample == 0);
+ encoder->components_per_sample = 0;
+ }
+ else
+ {
+ // Set the default components per sample assuming no alpha channel
+ if (encoder->components_per_sample == 0) {
+ encoder->components_per_sample = 3;
+ }
+ }
+#else
+ // Set the default components per sample assuming no alpha channel
+ if (encoder->components_per_sample == 0) {
+ encoder->components_per_sample = 3;
+ }
+#endif
+ return true;
+
+ case IMAGE_FORMAT_RAW:
+ if (encoder->pattern_width == 0) {
+ encoder->pattern_width = 2;
+ }
+
+ if (encoder->pattern_height == 0) {
+ encoder->pattern_height = 2;
+ }
+
+ if (encoder->components_per_sample == 0) {
+ encoder->components_per_sample = 1;
+ }
+
+ return true;
+
+ default:
+ // Unable to set default values for the pattern elements
+ return false;
+ }
+}
+#endif
+
+#if VC5_ENABLED_PART(VC5_PART_IMAGE_FORMATS)
+/*!
+ @brief Check for inconsistent values for the parameters specified on the command-line
+
+ This routine looks for inconsistencies between the image format, the dimensions of the
+ pattern elements, and the number of components per sample.
+ */
+bool CheckImageFormatParameters(ENCODER *encoder)
+{
+ switch (encoder->image_format)
+ {
+ case IMAGE_FORMAT_RAW:
+
+ if (encoder->pattern_width != 2) {
+ return false;
+ }
+
+ if (encoder->pattern_height != 2) {
+ return false;
+ }
+
+ if (encoder->components_per_sample != 1) {
+ return false;
+ }
+
+ // The parameters for the Bayer image format are correct
+ return true;
+
+ default:
+ // Cannot verify the parameters for an unknown image format
+ return false;
+
+ }
+}
+#endif
+
+/*!
+ @brief Prepare the encoder state
+*/
+CODEC_ERROR PrepareEncoderState(ENCODER *encoder,
+ const UNPACKED_IMAGE *image,
+ const ENCODER_PARAMETERS *parameters)
+{
+ CODEC_STATE *codec = &encoder->codec;
+ int channel_count = image->component_count;
+ int channel_number;
+
+ // Set the default value for the number of bits per lowpass coefficient
+ PRECISION lowpass_precision = 16;
+
+ if (parameters->encoded.lowpass_precision > 0) {
+ lowpass_precision = parameters->encoded.lowpass_precision;
+ }
+
+ for (channel_number = 0; channel_number < channel_count; channel_number++)
+ {
+ DIMENSION width = image->component_array_list[channel_number].width;
+ DIMENSION height = image->component_array_list[channel_number].height;
+ PRECISION bits_per_component = image->component_array_list[channel_number].bits_per_component;
+
+ // Copy the component array parameters into the encoder state
+ encoder->channel[channel_number].width = width;
+ encoder->channel[channel_number].height = height;
+ encoder->channel[channel_number].bits_per_component = bits_per_component;
+
+ // The lowpass bands in all channels are encoded with the same precision
+ encoder->channel[channel_number].lowpass_precision = lowpass_precision;
+ }
+
+ // Record the number of channels in the encoder state
+ encoder->channel_count = channel_count;
+
+ // The encoder uses three wavelet transform levels for each channel
+ encoder->wavelet_count = 3;
+
+ // Set the channel encoding order
+ if (parameters->channel_order_count > 0)
+ {
+ // Use the channel order specified by the encoding parameters
+ encoder->channel_order_count = parameters->channel_order_count;
+ memcpy(encoder->channel_order_table, parameters->channel_order_table, sizeof(encoder->channel_order_table));
+ }
+ else
+ {
+ // Use the default channel encoding order
+ for (channel_number = 0; channel_number < channel_count; channel_number++)
+ {
+ encoder->channel_order_table[channel_number] = channel_number;
+ }
+ encoder->channel_order_count = channel_count;
+ }
+
+#if VC5_ENABLED_PART(VC5_PART_IMAGE_FORMATS)
+ // The actual image dimensions are reported in the bitstream header (VC-5 Part 3)
+ encoder->image_width = parameters->input.width;
+ encoder->image_height = parameters->input.height;
+ encoder->pattern_width = parameters->pattern_width;
+ encoder->pattern_height = parameters->pattern_height;
+ encoder->components_per_sample = parameters->components_per_sample;
+ encoder->image_format = parameters->encoded.format;
+ encoder->max_bits_per_component = MaxBitsPerComponent(image);
+
+ // Set default parameters for the image format
+ SetImageFormatDefaults(encoder);
+
+ if (!CheckImageFormatParameters(encoder)) {
+ return CODEC_ERROR_BAD_IMAGE_FORMAT;
+ }
+#else
+ // The dimensions of the image is the maximum of the channel dimensions (VC-5 Part 1)
+ GetMaximumChannelDimensions(image, &encoder->image_width, &encoder->image_height);
+#endif
+
+#if VC5_ENABLED_PART(VC5_PART_LAYERS)
+ // Interlaced images are encoded as separate layers
+ encoder->progressive = parameters->progressive;
+ encoder->top_field_first = TRUE;
+ encoder->frame_inverted = FALSE;
+ encoder->progressive = 1;
+
+ // Set the number of layers (sub-samples) in the encoded sample
+ encoder->layer_count = parameters->layer_count;
+#endif
+
+#if VC5_ENABLED_PART(VC5_PART_SECTIONS)
+ // by default, all sections are enabled
+ encoder->enabled_sections = parameters->enabled_sections;
+#endif
+
+ // Initialize the codec state with the default parameters used by the decoding process
+ return PrepareCodecState(codec);
+}
+
+/*!
+ @brief Initialize the encoder data structure
+
+ This routine performs the same function as a C++ constructor.
+ The encoder is initialized with default values that are replaced
+ by the parameters used to prepare the encoder (see @ref PrepareEncoder).
+
+ This routine does not perform all of the initializations required
+ to prepare the encoder data structure for decoding a sample.
+*/
+CODEC_ERROR InitEncoder(ENCODER *encoder, const gpr_allocator *allocator, const VERSION *version)
+{
+ assert(encoder != NULL);
+ if (! (encoder != NULL)) {
+ return CODEC_ERROR_NULLPTR;
+ }
+
+ memset(encoder, 0, sizeof(ENCODER));
+
+ // Assign a memory allocator to the encoder
+ encoder->allocator = (gpr_allocator *)allocator;
+
+ // Write debugging information to standard output
+ encoder->logfile = stdout;
+
+ if (version)
+ {
+ // Store the version number in the encoder
+ memcpy(&encoder->version, version, sizeof(encoder->version));
+ }
+ else
+ {
+ // Clear the version number in the encoder
+ memset(&encoder->version, 0, sizeof(encoder->version));
+ }
+
+ return CODEC_ERROR_OKAY;
+}
+
+/*!
+ @brief Encode the image into the output stream
+
+ This is a convenience routine for applications that use a byte stream to
+ represent a memory buffer or binary file that will store the encoded image.
+
+ The image is unpacked into a set of component arrays by the image unpacking
+ process invoked by calling the routine @ref ImageUnpackingProcess. The image
+ unpacking process is informative and is not part of the VC-5 standard.
+
+ The main entry point for encoding the component arrays output by the image
+ unpacking process is @ref EncodingProcess.
+*/
+CODEC_ERROR EncodeImage(IMAGE *image, STREAM *stream, RGB_IMAGE *rgb_image, ENCODER_PARAMETERS *parameters)
+{
+ CODEC_ERROR error = CODEC_ERROR_OKAY;
+
+ // Allocate data structures for the encoder state and the bitstream
+ ENCODER encoder;
+ BITSTREAM bitstream;
+
+ SetupEncoderLogCurve();
+
+ UNPACKED_IMAGE unpacked_image;
+
+ // Unpack the image into a set of component arrays
+ error = ImageUnpackingProcess(image, &unpacked_image, parameters, &parameters->allocator);
+ if (error != CODEC_ERROR_OKAY) {
+ return error;
+ }
+
+ // Initialize the bitstream data structure
+ InitBitstream(&bitstream);
+
+ // Bind the bitstream to the byte stream
+ error = AttachBitstream(&bitstream, stream);
+ if (error != CODEC_ERROR_OKAY) {
+ return error;
+ }
+
+ // Encode the component arrays into the bitstream
+ error = EncodingProcess(&encoder, &unpacked_image, &bitstream, parameters);
+ if (error != CODEC_ERROR_OKAY) {
+ return error;
+ }
+
+ if( rgb_image != NULL && parameters->rgb_resolution == GPR_RGB_RESOLUTION_SIXTEENTH )
+ { // Thumbnail
+ SetupDecoderLogCurve();
+
+ WaveletToRGB(parameters->allocator,
+ encoder.transform[0].wavelet[2]->data[LL_BAND], encoder.transform[1].wavelet[2]->data[LL_BAND], encoder.transform[2].wavelet[2]->data[LL_BAND],
+ encoder.transform[0].wavelet[2]->width, encoder.transform[0].wavelet[2]->height, encoder.transform[0].wavelet[2]->width,
+ rgb_image, 14, 8, &parameters->rgb_gain );
+ }
+
+ error = ReleaseComponentArrays( &parameters->allocator, &unpacked_image, unpacked_image.component_count );
+ if (error != CODEC_ERROR_OKAY) {
+ return error;
+ }
+
+ // Release any resources allocated by the bitstream
+ ReleaseBitstream(&bitstream);
+
+ // Release any resources allocated by the encoder
+ ReleaseEncoder(&encoder);
+
+ return error;
+}
+
+/*!
+ @brief Reference implementation of the VC-5 encoding process.
+
+ The encoder takes input image in the form of a list of component arrays
+ produced by the image unpacking process and encodes the image into the
+ bitstream.
+
+ External parameters are used to initialize the encoder state.
+
+ The encoder state determines how the image is encoded int the bitstream.
+*/
+CODEC_ERROR EncodingProcess(ENCODER *encoder,
+ const UNPACKED_IMAGE *image,
+ BITSTREAM *bitstream,
+ const ENCODER_PARAMETERS *parameters)
+{
+ CODEC_ERROR error = CODEC_ERROR_OKAY;
+
+ // Initialize the encoder using the parameters provided by the application
+ error = PrepareEncoder(encoder, image, parameters);
+ assert(error == CODEC_ERROR_OKAY);
+ if (! (error == CODEC_ERROR_OKAY)) {
+ return error;
+ }
+
+#if VC5_ENABLED_PART(VC5_PART_IMAGE_FORMATS)
+ if (encoder->image_format == IMAGE_FORMAT_UNKNOWN) {
+ return CODEC_ERROR_BAD_IMAGE_FORMAT;
+ }
+ if ( parameters->verbose_flag )
+ {
+ LogPrint("Pattern width: %d\n", encoder->pattern_width);
+ LogPrint("Pattern height: %d\n", encoder->pattern_height);
+
+ if (!IsPartEnabled(encoder->enabled_parts, VC5_PART_COLOR_SAMPLING)) {
+ LogPrint("Components per sample: %d\n", encoder->components_per_sample);
+ }
+ LogPrint("Internal precision: %d\n", encoder->internal_precision);
+
+ LogPrint("\n");
+ }
+#endif
+
+ // Write the bitstream start marker
+ PutBitstreamStartMarker(bitstream);
+
+ // Allocate six pairs of lowpass and highpass buffers for each channel
+ AllocateEncoderHorizontalBuffers(encoder);
+
+#if VC5_ENABLED_PART(VC5_PART_LAYERS)
+
+ if (!IsPartEnabled(encoder.enabled_parts, VC5_PART_LAYERS) || encoder.layer_count == 1)
+ {
+ // Encode the image as a single layer in the sample
+ error = EncodeSingleImage(encoder, ImageData(image), image->pitch, &bitstream);
+ }
+ else
+ {
+ // Each layer encodes a separate frame
+ IMAGE *image_array[MAX_LAYER_COUNT];
+ memset(image_array, 0, sizeof(image_array));
+
+ // The encoding parameters must include a decompositor
+ assert (parameters->decompositor != NULL);
+
+ // Decompose the frame into individual frames for each layer
+ error = parameters->decompositor(image, image_array, encoder.layer_count);
+ if (error != CODEC_ERROR_OKAY) {
+ return error;
+ }
+
+ // Encode each frame as a separate layer in the sample
+ error = EncodeMultipleImages(encoder, image_array, encoder.layer_count, &bitstream);
+ }
+#else
+
+ // Encode one image into the bitstream
+ error = EncodeSingleImage(encoder, image, bitstream);
+
+#endif
+
+ DeallocateEncoderHorizontalBuffers(encoder);
+
+ return error;
+}
+
+/*!
+ @brief Initialize the encoder using the specified parameters
+
+ It is important to use the correct encoded image dimensions (including padding)
+ and the correct encoded format to initialize the encoder. The decoded image
+ dimensions must be adjusted to account for a lower decoded resolution if applicable.
+
+ It is expected that the parameters data structure may change over time
+ with additional or different fields, depending on the codec profile or
+ changes made to the codec during further development. The parameters
+ data structure may have a version number or may evolve into a dictionary
+ of key-value pairs with missing keys indicating that a default value
+ should be used.
+
+ @todo Add more error checking to this top-level routine
+*/
+CODEC_ERROR PrepareEncoder(ENCODER *encoder,
+ const UNPACKED_IMAGE *image,
+ const ENCODER_PARAMETERS *parameters)
+{
+ CODEC_ERROR error = CODEC_ERROR_OKAY;
+ VERSION version = VERSION_INITIALIZER(VC5_VERSION_MAJOR, VC5_VERSION_MINOR, VC5_VERSION_REVISION, 0);
+ PRECISION max_bits_per_component = MaxBitsPerComponent(image);
+
+ // Initialize the encoder data structure
+ InitEncoder(encoder, &parameters->allocator, &version);
+
+ // Set the mask that specifies which parts of the VC-5 standard are supported
+ encoder->enabled_parts = parameters->enabled_parts;
+
+ // Verify that the enabled parts are correct
+ error = VerifyEnabledParts(encoder->enabled_parts);
+ assert(error == CODEC_ERROR_OKAY);
+ if (! (error == CODEC_ERROR_OKAY)) {
+ return error;
+ }
+
+ // Remember the internal precision used by the image unpacking process
+ encoder->internal_precision = minimum(max_bits_per_component, default_internal_precision);
+
+ // Initialize the encoding parameters and the codec state
+ PrepareEncoderState(encoder, image, parameters);
+
+ // Allocate the wavelet transforms
+ AllocEncoderTransforms(encoder);
+
+ // Initialize the quantizer
+ SetEncoderQuantization(encoder, parameters);
+
+ // Initialize the wavelet transforms
+ PrepareEncoderTransforms(encoder);
+
+ // Allocate the scratch buffers used for encoding
+ AllocEncoderBuffers(encoder);
+
+ // Initialize the encoding tables for magnitudes and runs of zeros
+ error = PrepareCodebooks(&parameters->allocator, &encoder_codeset_17 );
+ if (error != CODEC_ERROR_OKAY) {
+ return error;
+ }
+
+ // Select the codebook for encoding
+ encoder->codeset = &encoder_codeset_17;
+
+ // The encoder is ready to decode a sample
+ return CODEC_ERROR_OKAY;
+}
+
+/*!
+ @brief Free all resources allocated by the encoder
+*/
+CODEC_ERROR ReleaseEncoder(ENCODER *encoder)
+{
+ if (encoder != NULL)
+ {
+ gpr_allocator *allocator = encoder->allocator;
+ int channel;
+
+ // Free the encoding tables
+ ReleaseCodebooks(allocator, encoder->codeset);
+
+ // Free the wavelet tree for each channel
+ for (channel = 0; channel < MAX_CHANNEL_COUNT; channel++)
+ {
+ ReleaseTransform(allocator, &encoder->transform[channel]);
+ }
+
+ //TODO: Free the encoding buffers
+ }
+
+ return CODEC_ERROR_OKAY;
+}
+
+/*!
+ @brief Encode a single image into the bitstream
+
+ This is the main entry point for encoding a single image into the bitstream.
+ The encoder must have been initialized by a call to @ref PrepareEncoder.
+
+ The unpacked image is the set of component arrays output by the image unpacking
+ process. The bitstream must be initialized and bound to a byte stream before
+ calling this routine.
+*/
+CODEC_ERROR EncodeSingleImage(ENCODER *encoder, const UNPACKED_IMAGE *image, BITSTREAM *stream)
+{
+ CODEC_ERROR error = CODEC_ERROR_OKAY;
+
+ // Write the sample header that is common to all layers
+ error = EncodeBitstreamHeader(encoder, stream);
+ assert(error == CODEC_ERROR_OKAY);
+ if (! (error == CODEC_ERROR_OKAY)) {
+ return error;
+ }
+
+ // Write the sample extension header to the bitstream
+ error = EncodeExtensionHeader(encoder, stream);
+ assert(error == CODEC_ERROR_OKAY);
+ if (! (error == CODEC_ERROR_OKAY)) {
+ return error;
+ }
+
+ // Encode each component array as a separate channel in the bitstream
+ error = EncodeMultipleChannels(encoder, image, stream);
+ assert(error == CODEC_ERROR_OKAY);
+ if (! (error == CODEC_ERROR_OKAY)) {
+ return error;
+ }
+
+ // Finish the encoded sample after the last layer
+ error = EncodeBitstreamTrailer(encoder, stream);
+ assert(error == CODEC_ERROR_OKAY);
+ if (! (error == CODEC_ERROR_OKAY)) {
+ return error;
+ }
+
+ // Force any data remaining in the bitstream to be written into the sample
+ FlushBitstream(stream);
+
+ return error;
+}
+
+#if VC5_ENABLED_PART(VC5_PART_LAYERS)
+/*!
+ @brief Encode multiple frames as separate layers in a sample
+
+ The encoder must have been initialized by a call to @ref PrepareEncoder.
+
+ The bitstream must be initialized and bound to a byte stream before
+ calling this routine.
+*/
+CODEC_ERROR EncodeMultipleFrames(ENCODER *encoder, IMAGE *image_array[], int frame_count, BITSTREAM *stream)
+{
+ CODEC_ERROR error = CODEC_ERROR_OKAY;
+ //CODEC_STATE *codec = &encoder->codec;
+
+ int layer_index;
+
+ // The number of frames must match the number of layers in the sample
+ assert(frame_count == encoder->layer_count);
+
+ // Initialize the codec state
+ PrepareEncoderState(encoder);
+
+ // Write the bitstream start marker
+ error = PutBitstreamStartMarker(stream);
+ assert(error == CODEC_ERROR_OKAY);
+ if (! (error == CODEC_ERROR_OKAY)) {
+ return error;
+ }
+
+ // Write the bitstream header that is common to all layers
+ error = EncodeBitstreamHeader(encoder, stream);
+ assert(error == CODEC_ERROR_OKAY);
+ if (! (error == CODEC_ERROR_OKAY)) {
+ return error;
+ }
+
+ // Write the extension header to the bitstream
+ error = EncodeExtensionHeader(encoder, stream);
+ assert(error == CODEC_ERROR_OKAY);
+ if (! (error == CODEC_ERROR_OKAY)) {
+ return error;
+ }
+
+ // Encode each frame in a separate layer in the sample
+ for (layer_index = 0; layer_index < frame_count; layer_index++)
+ {
+ error = EncodeLayer(encoder, image_array[layer_index]->buffer, image_array[layer_index]->pitch, stream);
+ assert(error == CODEC_ERROR_OKAY);
+ if (! (error == CODEC_ERROR_OKAY)) {
+ return error;
+ }
+ }
+
+ error = EncodeSampleExtensionTrailer(encoder, stream);
+ assert(error == CODEC_ERROR_OKAY);
+ if (! (error == CODEC_ERROR_OKAY)) {
+ return error;
+ }
+
+ // Finish the encoded sample after the last layer
+ error = EncodeSampleTrailer(encoder, stream);
+ assert(error == CODEC_ERROR_OKAY);
+ if (! (error == CODEC_ERROR_OKAY)) {
+ return error;
+ }
+
+ // Force any data remaining in the bitstream to be written into the sample
+ FlushBitstream(stream);
+
+ // Check that the sample offset stack has been emptied
+ assert(stream->sample_offset_count == 0);
+
+ //TODO: Any resources need to be released?
+
+ // Done encoding all layers in the sample
+ return error;
+}
+#endif
+
+/*!
+ @brief Initialize the wavelet transforms for encoding
+*/
+CODEC_ERROR PrepareEncoderTransforms(ENCODER *encoder)
+{
+ //int channel_count = encoder->channel_count;
+ int channel_number;
+
+ // Set the prescale and quantization in each wavelet transform
+ for (channel_number = 0; channel_number < encoder->channel_count; channel_number++)
+ {
+ TRANSFORM *transform = &encoder->transform[channel_number];
+
+ // Set the prescaling (may be used in setting the quantization)
+ int bits_per_component = encoder->channel[channel_number].bits_per_component;
+ SetTransformPrescale(transform, bits_per_component);
+
+ //TODO: Are the wavelet scale factors still used?
+
+ // Must set the transform scale if not calling SetTransformQuantization
+ SetTransformScale(transform);
+ }
+
+ return CODEC_ERROR_OKAY;
+}
+
+/*!
+ @brief Unpack the image into component arrays for encoding
+*/
+CODEC_ERROR ImageUnpackingProcess(const PACKED_IMAGE *input,
+ UNPACKED_IMAGE *output,
+ const ENCODER_PARAMETERS *parameters,
+ gpr_allocator *allocator)
+{
+ ENABLED_PARTS enabled_parts = parameters->enabled_parts;
+ int channel_count;
+ DIMENSION max_channel_width;
+ DIMENSION max_channel_height;
+ int bits_per_component;
+
+ // The configuration of component arrays is determined by the image format
+ switch (input->format)
+ {
+ case PIXEL_FORMAT_RAW_RGGB_12:
+ case PIXEL_FORMAT_RAW_RGGB_12P:
+ case PIXEL_FORMAT_RAW_RGGB_14:
+ case PIXEL_FORMAT_RAW_GBRG_12:
+ case PIXEL_FORMAT_RAW_GBRG_12P:
+ case PIXEL_FORMAT_RAW_RGGB_16:
+ channel_count = 4;
+ max_channel_width = input->width / 2;
+ max_channel_height = input->height / 2;
+ bits_per_component = 12;
+ break;
+
+ default:
+ assert(0);
+ return CODEC_ERROR_PIXEL_FORMAT;
+ break;
+ }
+
+ // Allocate space for the component arrays
+ AllocateComponentArrays(allocator, output, channel_count, max_channel_width, max_channel_height,
+ input->format, bits_per_component);
+
+
+ // The configuration of component arrays is determined by the image format
+ switch (input->format)
+ {
+ case PIXEL_FORMAT_RAW_RGGB_14:
+ UnpackImage_14(input, output, enabled_parts, true );
+ break;
+
+ case PIXEL_FORMAT_RAW_RGGB_12:
+ UnpackImage_12(input, output, enabled_parts, true );
+ break;
+
+ case PIXEL_FORMAT_RAW_GBRG_12:
+ UnpackImage_12(input, output, enabled_parts, false );
+ break;
+
+ case PIXEL_FORMAT_RAW_RGGB_12P:
+ UnpackImage_12P(input, output, enabled_parts, true );
+ break;
+
+ case PIXEL_FORMAT_RAW_GBRG_12P:
+ UnpackImage_12P(input, output, enabled_parts, false );
+ break;
+
+ default:
+ assert(0);
+ return CODEC_ERROR_PIXEL_FORMAT;
+ break;
+ }
+
+ return CODEC_ERROR_OKAY;
+}
+
+
+
+/*!
+ @brief Insert the header segments that are common to all samples
+
+ This code was derived from PutVideoIntraFrameHeader in the current codec.
+
+ @todo Need to output the channel size table.
+*/
+CODEC_ERROR EncodeBitstreamHeader(ENCODER *encoder, BITSTREAM *stream)
+{
+ CODEC_STATE *codec = &encoder->codec;
+
+ //TAGWORD subband_count = 10;
+ TAGWORD image_width = encoder->image_width;
+ TAGWORD image_height = encoder->image_height;
+
+#if VC5_ENABLED_PART(VC5_PART_IMAGE_FORMATS)
+ TAGWORD image_format = encoder->image_format;
+ TAGWORD pattern_width = encoder->pattern_width;
+ TAGWORD pattern_height = encoder->pattern_height;
+ TAGWORD components_per_sample = encoder->components_per_sample;
+ TAGWORD max_bits_per_component = encoder->max_bits_per_component;
+ TAGWORD default_bits_per_component = max_bits_per_component;
+#else
+ TAGWORD default_bits_per_component = encoder->internal_precision;
+#endif
+
+ // Align the start of the header on a segment boundary
+ AlignBitsSegment(stream);
+
+ // The bitstream should be aligned to a segment boundary
+ assert(IsAlignedSegment(stream));
+
+#if VC5_ENABLED_PART(VC5_PART_SECTIONS)
+ if (IsSectionEnabled(encoder, SECTION_NUMBER_HEADER))
+ {
+ // Write the section header for the bitstream header into the bitstream
+ BeginHeaderSection(encoder, stream);
+ }
+#endif
+
+ // Output the number of channels
+ if (encoder->channel_count != codec->channel_count) {
+ PutTagPair(stream, CODEC_TAG_ChannelCount, encoder->channel_count);
+ codec->channel_count = encoder->channel_count;
+ }
+
+ // Inform the decoder of the maximum component array dimensions
+ PutTagPair(stream, CODEC_TAG_ImageWidth, image_width);
+ PutTagPair(stream, CODEC_TAG_ImageHeight, image_height);
+
+#if VC5_ENABLED_PART(VC5_PART_IMAGE_FORMATS)
+ if (IsPartEnabled(encoder->enabled_parts, VC5_PART_IMAGE_FORMATS))
+ {
+ PutTagPair(stream, CODEC_TAG_ImageFormat, image_format);
+ PutTagPair(stream, CODEC_TAG_PatternWidth, pattern_width);
+ PutTagPair(stream, CODEC_TAG_PatternHeight, pattern_height);
+ PutTagPair(stream, CODEC_TAG_ComponentsPerSample, components_per_sample);
+ PutTagPair(stream, CODEC_TAG_MaxBitsPerComponent, max_bits_per_component);
+ }
+#endif
+
+#if VC5_ENABLED_PART(VC5_PART_LAYERS)
+ if (IsPartEnabled(encoder->enabled_parts, VC5_PART_LAYERS))
+ {
+ // Output the number of layers in the sample (optional for backward compatibility)
+ //PutTagPairOptional(stream, CODEC_TAG_LAYER_COUNT, layer_count);
+ }
+#endif
+
+ // Record the image dimensions in the codec state
+ codec->image_width = image_width;
+ codec->image_height = image_height;
+
+ // The image dimensions determine the default channel dimensions
+ codec->channel_width = image_width;
+ codec->channel_height = image_height;
+
+#if VC5_ENABLED_PART(VC5_PART_IMAGE_FORMATS)
+ if (IsPartEnabled(encoder->enabled_parts, VC5_PART_IMAGE_FORMATS))
+ {
+ // Record the pattern element parameters in the codec state
+ codec->image_format = image_format;
+ codec->pattern_width = pattern_width;
+ codec->pattern_height = pattern_height;
+ codec->components_per_sample = components_per_sample;
+ codec->max_bits_per_component = (PRECISION)max_bits_per_component;
+ }
+#endif
+
+ // This parameter is the default precision for each channel
+ codec->bits_per_component = default_bits_per_component;
+
+#if VC5_ENABLED_PART(VC5_PART_SECTIONS)
+ if (IsSectionEnabled(encoder, SECTION_NUMBER_HEADER))
+ {
+ // Make sure that the bitstream is aligned to a segment boundary
+ AlignBitsSegment(stream);
+
+ // Update the section header with the actual size of the bitstream header section
+ EndSection(stream);
+ }
+#endif
+
+ return CODEC_ERROR_OKAY;
+}
+
+/*!
+ @brief Write the trailer at the end of the encoded sample
+
+ This routine updates the sample size segment in the sample extension header
+ with the actual size of the encoded sample. The size of the encoded sample
+ does not include the size of the sample header or trailer.
+
+ Note that the trailer may not be necessary as the decoder may stop
+ reading from the sample after it has decoded all of the information
+ required to reconstruct the frame.
+
+ This code was derived from PutVideoIntraFrameTrailer in the current codec.
+*/
+CODEC_ERROR EncodeBitstreamTrailer(ENCODER *encoder, BITSTREAM *stream)
+{
+ AlignBitsSegment(stream);
+
+ return CODEC_ERROR_OKAY;
+}
+
+/*!
+ @brief Write the unique image identifier
+
+ @todo Should the UMID instance number be a parameter to this routine?
+ */
+static CODEC_ERROR WriteUniqueImageIdentifier(ENCODER *encoder, BITSTREAM *stream)
+{
+ const int UMID_length_byte = 0x13;
+ const int UMID_instance_number = 0;
+
+ // Total length of the unique image identifier chunk payload (in segments)
+ const int identifier_chunk_payload_length = UMID_length + sequence_number_length;
+
+ // Write the tag value pair for the small chunk element for the unique image identifier
+ PutTagPairOptional(stream, CODEC_TAG_UniqueImageIdentifier, identifier_chunk_payload_length);
+
+ // Write the UMID label
+ PutByteArray(stream, UMID_label, sizeof(UMID_label));
+
+ // Write the UMID length byte
+ PutBits(stream, UMID_length_byte, 8);
+
+ // Write the UMID instance number
+ PutBits(stream, UMID_instance_number, 24);
+
+ // Write the image sequence identifier
+ PutByteArray(stream, encoder->image_sequence_identifier, sizeof(encoder->image_sequence_identifier));
+
+ // Write the image sequence number
+ PutLong(stream, encoder->image_sequence_number);
+
+ return CODEC_ERROR_OKAY;
+}
+
+/*!
+ @brief Write extra information that follows the sample header into the bitstream
+
+ This routine writes metadata into the sample header extension.
+
+ Metadata includes the unique GUID for each video clip, the number of each video frame,
+ and the timecode (if available). The GUID and frame number pair uniquely identify each
+ frame in the encoded clip.
+
+ This routine also outputs additional information that describes the characterstics of
+ the encoded video in the GOP extension and sample flags.
+
+ The size of the sample extension header is provided by the sample size segment.
+*/
+CODEC_ERROR EncodeExtensionHeader(ENCODER *encoder, BITSTREAM *stream)
+{
+ ENABLED_PARTS enabled_parts = encoder->enabled_parts;
+
+ // Encode the transform prescale for the first channel (assume all channels are the same)
+ TAGWORD prescale_shift = PackTransformPrescale(&encoder->transform[0]);
+
+ // The tag-value pair is required if the encoder is not using the default values
+ //if (IsTransformPrescaleDefault(&encoder->transform[0], TRANSFORM_TYPE_SPATIAL, encoder->encoded.precision))
+ if (IsTransformPrescaleDefault(&encoder->transform[0], encoder->internal_precision))
+ {
+ PutTagPairOptional(stream, CODEC_TAG_PrescaleShift, prescale_shift);
+ }
+ else
+ {
+ PutTagPair(stream, CODEC_TAG_PrescaleShift, prescale_shift);
+ }
+
+#if VC5_ENABLED_PART(VC5_PART_IMAGE_FORMATS)
+ if (IsPartEnabled(enabled_parts, VC5_PART_IMAGE_FORMATS))
+ {
+ WriteUniqueImageIdentifier(encoder, stream);
+ }
+#endif
+
+#if VC5_ENABLED_PART(VC5_PART_IMAGE_FORMATS)
+ if (IsPartEnabled(enabled_parts, VC5_PART_IMAGE_FORMATS) &&
+ !IsComponentTransformIdentity(encoder->component_transform))
+ {
+ WriteComponentTransform(encoder->component_transform, stream);
+ }
+#endif
+
+#if VC5_ENABLED_PART(VC5_PART_IMAGE_FORMATS)
+ if (IsPartEnabled(enabled_parts, VC5_PART_IMAGE_FORMATS) &&
+ !IsComponentPermutationIdentity(encoder->component_permutation))
+ {
+ WriteComponentPermutation(encoder->component_permutation, stream);
+ }
+#endif
+
+ return CODEC_ERROR_OKAY;
+}
+
+/*!
+ @brief Write the sample extension trailer into the bitstream
+
+ This routine must be called after encoding the sample and before writing the
+ sample trailer, but must only be called if the sample extension header was
+ written into the bitstream.
+*/
+CODEC_ERROR EncodeExtensionTrailer(ENCODER *encoder, BITSTREAM *stream)
+{
+ return CODEC_ERROR_OKAY;
+}
+
+static int32_t GetMultiplier(QUANT divisor)
+{
+ switch (divisor)
+ {
+ case 1:
+ return (uint32_t)(1 << 16);
+
+ case 12:
+ return (1 << 16) / 12;
+
+ case 24:
+ return (1 << 16) / 24;
+
+ case 32:
+ return (1 << 16) / 32;
+
+ case 48:
+ return (1 << 16) / 48;
+
+ case 96:
+ return (1 << 16) / 96;
+
+ case 144:
+ return (1 << 16) / 144;
+
+ default:
+ return (uint32_t)(1 << 16) / divisor;
+ };
+}
+
+/*!
+ @brief Compute the rounding value for quantization
+ */
+static QUANT QuantizerMidpoint(QUANT correction, QUANT divisor)
+{
+ int32_t midpoint = 0;
+
+ if (correction == 2)
+ {
+ midpoint = divisor >> 1;
+
+ // CFEncode_Premphasis_Original
+ if (midpoint) {
+ midpoint--;
+ }
+ }
+ else if (correction > 2 && correction < 9)
+ {
+ midpoint = divisor / correction;
+ }
+
+ return midpoint;
+}
+
+static void GetQuantizationParameters(int32_t midpoint_prequant, QUANT quant[], int32_t* midpoints, int32_t* multipliers)
+{
+ int i;
+ for (i = 0; i < 4; i++)
+ {
+ midpoints[i] = QuantizerMidpoint(midpoint_prequant, quant[i]);
+ multipliers[i] = GetMultiplier(quant[i]);
+ }
+}
+
+/*!
+ @brief Shift the buffers of horizontal highpass results
+
+ The encoder contains six rows of horizontal lowpass and highpass results
+ for each channel. This routine shifts the buffers by two rows to make
+ room for two more rows of horizontal results for each channel.
+ */
+static void ShiftHorizontalResultBuffers(PIXEL **buffer)
+{
+ PIXEL *buffer01[2];
+
+ memcpy( buffer01, buffer + 0, sizeof(PIXEL*) * 2 );
+
+ memmove( buffer + 0, buffer + 2, sizeof(PIXEL*) * (ROW_BUFFER_COUNT - 2) );
+
+ memcpy( buffer + 4, buffer01, sizeof(PIXEL*) * 2 );
+}
+
+typedef struct _recursive_transform_data
+{
+ PIXEL *input_ptr;
+ DIMENSION input_width;
+ DIMENSION input_height;
+ DIMENSION input_pitch;
+
+ PIXEL *output_ptr[MAX_BAND_COUNT];
+ DIMENSION output_width;
+ DIMENSION output_pitch;
+
+ int32_t prescale;
+
+ int32_t* midpoints;
+ int32_t* multipliers;
+
+ PIXEL **lowpass_buffer;
+ PIXEL **highpass_buffer;
+
+} RECURSIVE_TRANSFORM_DATA;
+
+#define RECURSIVE 1
+
+static void ForwardWaveletTransformRecursive(RECURSIVE_TRANSFORM_DATA *transform_data, int wavelet_stage, uint32_t start_row, uint32_t end_row)
+{
+ uint32_t input_row_index = start_row;
+
+ PIXEL *input_ptr = transform_data[wavelet_stage].input_ptr;
+ DIMENSION input_width = transform_data[wavelet_stage].input_width;
+ DIMENSION input_height = transform_data[wavelet_stage].input_height;
+ DIMENSION input_pitch = transform_data[wavelet_stage].input_pitch;
+
+ PIXEL **output_ptr = transform_data[wavelet_stage].output_ptr;
+ DIMENSION output_width = transform_data[wavelet_stage].output_width;
+ DIMENSION output_pitch = transform_data[wavelet_stage].output_pitch;
+
+ int32_t* midpoints = transform_data[wavelet_stage].midpoints;
+ int32_t* multipliers = transform_data[wavelet_stage].multipliers;
+
+ PIXEL **lowpass_buffer = transform_data[wavelet_stage].lowpass_buffer;
+ PIXEL **highpass_buffer = transform_data[wavelet_stage].highpass_buffer;
+
+ int32_t prescale = transform_data[wavelet_stage].prescale;
+
+ uint32_t bottom_input_row = ((input_height % 2) == 0) ? input_height - 2 : input_height - 1;
+
+ uint32_t last_middle_row = bottom_input_row - 2;
+
+ end_row = minimum( last_middle_row, end_row);
+
+ // --- TOP ROW
+ if( input_row_index == 0 )
+ {
+ int row;
+
+ for (row = 0; row < ROW_BUFFER_COUNT; row++)
+ {
+ PIXEL *input_row_ptr = (PIXEL *)((uintptr_t)input_ptr + row * input_pitch);
+
+ FilterHorizontalRow(input_row_ptr, lowpass_buffer[row], highpass_buffer[row], input_width, prescale);
+ }
+
+ // Process the first row as a special case for the boundary condition
+ FilterVerticalTopRow(lowpass_buffer, highpass_buffer, output_ptr, output_width, output_pitch, midpoints, multipliers, input_row_index );
+ input_row_index += 2;
+ }
+
+ // --- MIDDLE ROWS
+ for (; input_row_index <= end_row; input_row_index += 2)
+ {
+ // Check for errors in the row calculation
+ assert((input_row_index % 2) == 0);
+
+ FilterVerticalMiddleRow(lowpass_buffer, highpass_buffer, output_ptr, output_width, output_pitch, midpoints, multipliers, input_row_index );
+
+ if (input_row_index < last_middle_row)
+ {
+ int row;
+
+ ShiftHorizontalResultBuffers(lowpass_buffer);
+ ShiftHorizontalResultBuffers(highpass_buffer);
+
+ // Get two more rows of horizontal lowpass and highpass results
+ for (row = 4; row < ROW_BUFFER_COUNT; row++)
+ {
+ int next_input_row = minimum( input_row_index + row, input_height - 1 );
+
+ PIXEL *input_row_ptr = (PIXEL *)((uintptr_t)input_ptr + next_input_row * input_pitch);
+
+ FilterHorizontalRow(input_row_ptr, lowpass_buffer[row], highpass_buffer[row], input_width, prescale);
+ }
+ }
+ }
+
+ // --- BOTTOM ROW
+ if( input_row_index == bottom_input_row )
+ {
+ FilterVerticalBottomRow(lowpass_buffer, highpass_buffer, output_ptr, output_width, output_pitch, midpoints, multipliers, input_row_index );
+ }
+
+ if( wavelet_stage < (MAX_WAVELET_COUNT - 1) )
+ {
+ ForwardWaveletTransformRecursive( transform_data, wavelet_stage + 1, 0, 0xFFFF );
+ }
+}
+
+static void SetRecursiveTransformData(RECURSIVE_TRANSFORM_DATA* transform_data,
+ const TRANSFORM *transform,
+ const COMPONENT_ARRAY *input_image_component,
+ int32_t midpoints[MAX_BAND_COUNT], int32_t multipliers[MAX_BAND_COUNT],
+ PIXEL *lowpass_buffer[MAX_WAVELET_COUNT][ROW_BUFFER_COUNT],
+ PIXEL *highpass_buffer[MAX_WAVELET_COUNT][ROW_BUFFER_COUNT],
+ int midpoint_prequant, int wavelet_stage )
+{
+ int i;
+
+ if( wavelet_stage == 0 )
+ {
+ transform_data->input_width = input_image_component->width;
+ transform_data->input_height = input_image_component->height;
+ transform_data->input_pitch = input_image_component->pitch;
+ transform_data->input_ptr = (PIXEL*)input_image_component->data;
+ }
+ else
+ {
+ WAVELET *input_wavelet = transform->wavelet[wavelet_stage - 1];
+
+ transform_data->input_width = input_wavelet->width;
+ transform_data->input_height = input_wavelet->height;
+ transform_data->input_pitch = input_wavelet->pitch;
+ transform_data->input_ptr = WaveletRowAddress(input_wavelet, LL_BAND, 0);
+ }
+
+ WAVELET *output_wavelet = transform->wavelet[wavelet_stage];
+ assert(output_wavelet);
+
+ transform_data->output_width = output_wavelet->width;
+ transform_data->output_pitch = output_wavelet->pitch;
+
+ for (i = 0; i < MAX_BAND_COUNT; i++)
+ {
+ transform_data->output_ptr[i] = output_wavelet->data[i];
+ }
+
+ transform_data->lowpass_buffer = lowpass_buffer[wavelet_stage];
+ transform_data->highpass_buffer = highpass_buffer[wavelet_stage];
+ transform_data->prescale = transform->prescale[wavelet_stage];
+
+ GetQuantizationParameters(midpoint_prequant, output_wavelet->quant, midpoints, multipliers );
+
+ transform_data->midpoints = midpoints;
+ transform_data->multipliers = multipliers;
+}
+
+static void ForwardWaveletTransform(TRANSFORM *transform, const COMPONENT_ARRAY *input_image_component, PIXEL *lowpass_buffer[MAX_WAVELET_COUNT][ROW_BUFFER_COUNT], PIXEL *highpass_buffer[MAX_WAVELET_COUNT][ROW_BUFFER_COUNT], int midpoint_prequant)
+{
+ RECURSIVE_TRANSFORM_DATA transform_data[MAX_WAVELET_COUNT];
+
+ int32_t midpoints[MAX_WAVELET_COUNT][MAX_BAND_COUNT]; //!< Midpoint value for each band (applied during quantization)
+ int32_t multipliers[MAX_WAVELET_COUNT][MAX_BAND_COUNT]; //!< Multiplier value for each band (applied during quantization)
+
+ SetRecursiveTransformData( &transform_data[0], transform, input_image_component, midpoints[0], multipliers[0], lowpass_buffer, highpass_buffer, midpoint_prequant, 0 );
+ SetRecursiveTransformData( &transform_data[1], transform, input_image_component, midpoints[1], multipliers[1], lowpass_buffer, highpass_buffer, midpoint_prequant, 1 );
+ SetRecursiveTransformData( &transform_data[2], transform, input_image_component, midpoints[2], multipliers[2], lowpass_buffer, highpass_buffer, midpoint_prequant, 2 );
+
+ ForwardWaveletTransformRecursive( transform_data, 0, 0, 0xFFFF );
+}
+
+/*!
+ @brief Encode the portion of a sample that corresponds to a single layer
+
+ Samples can be contain multiple subsamples. Each subsample may correspond to
+ a different view. For example, an encoded video sample may contain both the
+ left and right subsamples in a stereo pair.
+
+ Subsamples have been called tracks or channels, but this terminology can be
+ confused with separate video tracks in a multimedia container or the color
+ planes that are called channels elsewhere in this codec.
+
+ The subsamples are decoded seperately and composited to form a single frame
+ that is the output of the complete process of decoding a single video sample.
+ For this reason, the subsamples are called layers.
+
+ @todo Need to reset the codec state for each layer?
+*/
+//CODEC_ERROR EncodeLayer(ENCODER *encoder, void *buffer, size_t pitch, BITSTREAM *stream)
+CODEC_ERROR EncodeMultipleChannels(ENCODER *encoder, const UNPACKED_IMAGE *image, BITSTREAM *stream)
+{
+ CODEC_ERROR error = CODEC_ERROR_OKAY;
+
+ int channel_count;
+ int channel_index;
+
+ channel_count = encoder->channel_count;
+
+#if VC5_ENABLED_PART(VC5_PART_LAYERS)
+ if (IsPartEnabled(encoder->enabled_parts, VC5_PART_LAYERS))
+ {
+ // Write the tag value pairs that preceed the encoded wavelet tree
+ error = EncodeLayerHeader(encoder, stream);
+ if (error != CODEC_ERROR_OKAY) {
+ return error;
+ }
+ }
+#endif
+
+
+ CODEC_STATE *codec = &encoder->codec;
+
+ // Compute the wavelet transform tree for each channel
+ for (channel_index = 0; channel_index < channel_count; channel_index++)
+ {
+ int channel_number;
+
+ ForwardWaveletTransform(&encoder->transform[channel_index], &image->component_array_list[channel_index], encoder->lowpass_buffer, encoder->highpass_buffer, encoder->midpoint_prequant );
+
+ channel_number = encoder->channel_order_table[channel_index];
+
+ // Encode the tag value pairs in the header for this channel
+ error = EncodeChannelHeader(encoder, channel_number, stream);
+ if (error != CODEC_ERROR_OKAY) {
+ return error;
+ }
+
+ // Encode the lowpass and highpass bands in the wavelet tree for this channel
+ error = EncodeChannelSubbands(encoder, channel_number, stream);
+ if (error != CODEC_ERROR_OKAY) {
+ return error;
+ }
+
+ // Encode the tag value pairs in the trailer for this channel
+ error = EncodeChannelTrailer(encoder, channel_number, stream);
+ if (error != CODEC_ERROR_OKAY) {
+ return error;
+ }
+
+ // Check that the bitstream is alligned to a segment boundary
+ assert(IsAlignedSegment(stream));
+
+ // Update the codec state for the next channel in the bitstream
+ //codec->channel_number++;
+ codec->channel_number = (channel_number + 1);
+ codec->subband_number = 0;
+ }
+
+#if VC5_ENABLED_PART(VC5_PART_LAYERS)
+ if (IsPartEnabled(encoder->enabled_parts, VC5_PART_LAYERS))
+ {
+ // Write the tag value pairs that follow the encoded wavelet tree
+ error = EncodeLayerTrailer(encoder, stream);
+ if (error != CODEC_ERROR_OKAY) {
+ return error;
+ }
+ //TODO: Need to align the bitstream between layers?
+ }
+#endif
+
+ return CODEC_ERROR_OKAY;
+}
+
+#if VC5_ENABLED_PART(VC5_PART_LAYERS)
+/*!
+ @brief Write the sample layer header
+
+ The baseline profile only supports a single layer so the layer header
+ and trailer are not required.
+*/
+CODEC_ERROR EncodeLayerHeader(ENCODER *encoder, BITSTREAM *stream)
+{
+ //TODO: Write the tag-value pair for the layer number
+
+ return CODEC_ERROR_OKAY;
+}
+#endif
+#if VC5_ENABLED_PART(VC5_PART_LAYERS)
+/*!
+ @brief Write the sample layer trailer
+
+ The baseline profile only supports a single layer so the layer header
+ and trailer are not required.
+
+ If more than one layer is present, the layers must be terminated by a
+ layer trailer. Otherwise, the decoder will continue to parse tag-value
+ pairs that belong to the next layer.
+*/
+CODEC_ERROR EncodeLayerTrailer(ENCODER *encoder, BITSTREAM *stream)
+{
+ // The value in the layer trailer tag-value pair is not used
+ PutTagPairOptional(stream, CODEC_TAG_LAYER_TRAILER, 0);
+
+ return CODEC_ERROR_OKAY;
+}
+#endif
+
+/*!
+ @brief Encode the channel into the bistream
+
+ This routine encodes all of the subbands (lowpass and highpass) in the
+ wavelet tree for the specified channel into the bitstream.
+*/
+CODEC_ERROR EncodeChannelWavelets(ENCODER *encoder, BITSTREAM *stream)
+{
+ CODEC_ERROR error = CODEC_ERROR_OKAY;
+ CODEC_STATE *codec = &encoder->codec;
+
+ int channel_count;
+ int channel_index;
+
+ // Get the number of channels in the encoder wavelet transform
+ channel_count = encoder->channel_count;
+
+ // Compute the remaining wavelet transforms for each channel
+ //for (channel_index = 0; channel_index < channel_count; channel_index++)
+ for (channel_index = 0; channel_index < channel_count; channel_index++)
+ {
+ int channel_number = encoder->channel_order_table[channel_index];
+
+ // Encode the tag value pairs in the header for this channel
+ error = EncodeChannelHeader(encoder, channel_number, stream);
+ if (error != CODEC_ERROR_OKAY) {
+ return error;
+ }
+
+ // Encode the lowpass and highpass bands in the wavelet tree for this channel
+ error = EncodeChannelSubbands(encoder, channel_number, stream);
+ if (error != CODEC_ERROR_OKAY) {
+ return error;
+ }
+
+ // Encode the tag value pairs in the trailer for this channel
+ error = EncodeChannelTrailer(encoder, channel_number, stream);
+ if (error != CODEC_ERROR_OKAY) {
+ return error;
+ }
+
+ // Check that the bitstream is alligned to a segment boundary
+ assert(IsAlignedSegment(stream));
+
+ // Update the codec state for the next channel in the bitstream
+ //codec->channel_number++;
+ codec->channel_number = (channel_number + 1);
+ codec->subband_number = 0;
+ }
+
+ return CODEC_ERROR_OKAY;
+}
+
+/*!
+ @brief Write the channel header into the bitstream
+
+ The channel header separates channels in the encoded layer. The channel header
+ is not required before the first encoded channel because the codec state is
+ initialized for decoding the first channel.
+
+ The first channel is channel number zero.
+*/
+CODEC_ERROR EncodeChannelHeader(ENCODER *encoder,
+ int channel_number,
+ BITSTREAM *stream)
+{
+ CODEC_STATE *codec = &encoder->codec;
+ DIMENSION channel_width = encoder->channel[channel_number].width;
+ DIMENSION channel_height = encoder->channel[channel_number].height;
+ int bits_per_component = encoder->channel[channel_number].bits_per_component;
+
+ AlignBitsSegment(stream);
+
+#if VC5_ENABLED_PART(VC5_PART_SECTIONS)
+ if (IsSectionEnabled(encoder, SECTION_NUMBER_CHANNEL))
+ {
+ // Write the channel section header into the bitstream
+ BeginChannelSection(encoder, stream);
+ }
+#endif
+
+ // Write the channel number if it does not match the codec state
+ if (channel_number != codec->channel_number)
+ {
+ PutTagPair(stream, CODEC_TAG_ChannelNumber, channel_number);
+ codec->channel_number = channel_number;
+ }
+
+#if VC5_ENABLED_PART(VC5_PART_IMAGE_FORMATS)
+ if (IsPartEnabled(encoder->enabled_parts, VC5_PART_IMAGE_FORMATS))
+ {
+ // The decoder will derive the channel width and height from the image dimensions and format
+ codec->channel_width = channel_width;
+ codec->channel_height = channel_height;
+ }
+ else
+#endif
+ {
+ // Write the component array width if it does not match the codec state
+ if (channel_width != codec->channel_width)
+ {
+ PutTagPair(stream, CODEC_TAG_ChannelWidth, channel_width);
+ codec->channel_width = channel_width;
+ }
+
+ // Write the component array height if it does not match the codec state
+ if (channel_height != codec->channel_height)
+ {
+ PutTagPair(stream, CODEC_TAG_ChannelHeight, channel_height);
+ codec->channel_height = channel_height;
+ }
+ }
+
+ // Write the component array precision if it does not match the codec state
+ if (bits_per_component != codec->bits_per_component)
+ {
+ PutTagPair(stream, CODEC_TAG_BitsPerComponent, bits_per_component);
+ codec->bits_per_component = bits_per_component;
+ }
+
+ return CODEC_ERROR_OKAY;
+}
+
+/*!
+ @brief Write the encoded subbands for this channel into the bitstream
+
+ This routine writes the encoded subbands in the wavelet tree for this channel
+ into the bitstream, including both the lowpass band and all of the highpass
+ bands in each wavelet in this channel.
+*/
+CODEC_ERROR EncodeChannelSubbands(ENCODER *encoder, int channel_number, BITSTREAM *stream)
+{
+ CODEC_ERROR error = CODEC_ERROR_OKAY;
+ //CODEC_STATE *codec = &encoder->codec;
+
+ int wavelet_count = encoder->wavelet_count;
+ int last_wavelet_index = wavelet_count - 1;
+ int wavelet_index;
+
+ int subband = 0;
+
+ // Start with the lowpass band in the wavelet at the highest level
+ WAVELET *wavelet = encoder->transform[channel_number].wavelet[last_wavelet_index];
+
+ // Check that the bitstream is aligned on a segment boundary
+ assert(IsAlignedSegment(stream));
+
+#if VC5_ENABLED_PART(VC5_PART_SECTIONS)
+ if (IsSectionEnabled(encoder, SECTION_NUMBER_WAVELET))
+ {
+ // Write the wavelet section header into the bitstream
+ BeginWaveletSection(encoder, stream);
+ }
+#endif
+
+ // Encode the lowpass subband in this channel
+ error = EncodeLowpassBand(encoder, wavelet, channel_number, stream);
+ if (error != CODEC_ERROR_OKAY) {
+ return error;
+ }
+
+ // Advance to the first highpass subband
+ subband++;
+
+ // Encode the highpass bands in order of subband number
+ for (wavelet_index = last_wavelet_index; wavelet_index >= 0; wavelet_index--)
+ {
+ //int wavelet_type = WAVELET_TYPE_SPATIAL;
+ //int wavelet_level = wavelet_index + 1;
+ int band_index;
+
+ //int lowpass_scale = 0;
+ //int lowpass_divisor = 0;
+
+ wavelet = encoder->transform[channel_number].wavelet[wavelet_index];
+
+#if VC5_ENABLED_PART(VC5_PART_SECTIONS)
+ if (IsSectionEnabled(encoder, SECTION_NUMBER_WAVELET))
+ {
+ // Was the wavelet section header already written into the bitstream?
+ if (wavelet_index < last_wavelet_index)
+ {
+ // Write the wavelet section header into the bitstream
+ BeginWaveletSection(encoder, stream);
+ }
+ }
+#endif
+ // Encode the highpass bands in this wavelet
+ for (band_index = 1; band_index < wavelet->band_count; band_index++)
+ {
+ error = EncodeHighpassBand(encoder, wavelet, band_index, subband, stream);
+ if (error != CODEC_ERROR_OKAY) {
+ return error;
+ }
+
+ // Advance to the next subband
+ subband++;
+ }
+
+#if VC5_ENABLED_PART(VC5_PART_SECTIONS)
+ if (IsSectionEnabled(encoder, SECTION_NUMBER_WAVELET))
+ {
+ // Make sure that the bitstream is aligned to a segment boundary
+ AlignBitsSegment(stream);
+
+ // Update the section header with the actual size of the wavelet section
+ EndSection(stream);
+ }
+#endif
+ }
+
+ return CODEC_ERROR_OKAY;
+}
+
+/*!
+ @brief Write the channel trailer into the bitstream
+
+ A channel trailer is not required as the channel header functions as a marker
+ between channels in the bitstream.
+
+ It may be necessary to update the channel size in a sample size segment written
+ into the channel header if the channel header includes a sample size segment in
+ the future.
+*/
+CODEC_ERROR EncodeChannelTrailer(ENCODER *encoder, int channel, BITSTREAM *stream)
+{
+#if VC5_ENABLED_PART(VC5_PART_SECTIONS)
+ if (IsSectionEnabled(encoder, SECTION_NUMBER_CHANNEL))
+ {
+ // Make sure that the bitstream is aligned to a segment boundary
+ AlignBitsSegment(stream);
+
+ // Update the section header with the actual size of the channel section
+ EndSection(stream);
+ }
+#endif
+ return CODEC_ERROR_OKAY;
+}
+
+/*!
+ @brief Allocate intermediate buffers for the horizontal transform results
+
+ @todo Need to return an error code if allocation fails
+*/
+CODEC_ERROR AllocateEncoderHorizontalBuffers(ENCODER *encoder)
+{
+ gpr_allocator *allocator = encoder->allocator;
+ int channel_index;
+ int wavelet_index;
+ int channel_count = encoder->channel_count;
+
+ int buffer_width = 0;
+
+ for (channel_index = 0; channel_index < channel_count; channel_index++)
+ {
+ buffer_width = maximum(buffer_width, encoder->channel[channel_index].width );
+ }
+
+ buffer_width = ((buffer_width % 2) == 0) ? buffer_width / 2 : (buffer_width + 1) / 2;
+
+ for (wavelet_index = 0; wavelet_index < MAX_WAVELET_COUNT; wavelet_index++)
+ {
+ int row;
+
+ int channel_width = encoder->transform[0].wavelet[wavelet_index]->width;
+
+ for (row = 0; row < ROW_BUFFER_COUNT; row++)
+ {
+ PIXEL *lowpass_buffer = allocator->Alloc(channel_width * sizeof(PIXEL) * 2);
+ PIXEL *highpass_buffer = lowpass_buffer + channel_width;
+
+ assert(lowpass_buffer != NULL);
+ if (! (lowpass_buffer != NULL)) {
+ return CODEC_ERROR_OUTOFMEMORY;
+ }
+
+ encoder->lowpass_buffer[wavelet_index][row] = lowpass_buffer;
+ encoder->highpass_buffer[wavelet_index][row] = highpass_buffer;
+ }
+ }
+
+
+ return CODEC_ERROR_OKAY;
+}
+
+/*!
+ @brief Deallocate the intermediate buffers for the horizontal transform results
+
+ It is possible to avoid reallocating the buffers for the horizontal transform
+ results if the buffers were not deallocated between encoded frames. In this case,
+ it would be necessary to call this routine inside @ref ReleaseEncoder and it would
+ also be necessary to modify @ref AllocateEncoderHorizontalBuffers to not allocate
+ the buffers if they are already allocated.
+*/
+CODEC_ERROR DeallocateEncoderHorizontalBuffers(ENCODER *encoder)
+{
+ gpr_allocator *allocator = encoder->allocator;
+
+ int wavelet_index;
+
+ for (wavelet_index = 0; wavelet_index < MAX_WAVELET_COUNT; wavelet_index++)
+ {
+ int row;
+
+ for (row = 0; row < ROW_BUFFER_COUNT; row++)
+ {
+ allocator->Free(encoder->lowpass_buffer[wavelet_index][row]);
+ }
+ }
+
+
+ return CODEC_ERROR_OKAY;
+}
+
+/*!
+ @brief Allocate buffers used for computing the forward wavelet transform
+*/
+CODEC_ERROR AllocateHorizontalBuffers(gpr_allocator *allocator,
+ PIXEL *lowpass_buffer[],
+ PIXEL *highpass_buffer[],
+ int buffer_width)
+{
+ const size_t row_buffer_size = buffer_width * sizeof(PIXEL);
+
+ int row;
+
+ for (row = 0; row < ROW_BUFFER_COUNT; row++)
+ {
+ lowpass_buffer[row] = allocator->Alloc(row_buffer_size);
+ highpass_buffer[row] = allocator->Alloc(row_buffer_size);
+
+ // Check that the memory allocation was successful
+ assert(lowpass_buffer[row] != NULL);
+ if (! (lowpass_buffer[row] != NULL)) {
+ return CODEC_ERROR_OUTOFMEMORY;
+ }
+ assert(highpass_buffer[row] != NULL);
+ if (! (highpass_buffer[row] != NULL)) {
+ return CODEC_ERROR_OUTOFMEMORY;
+ }
+ }
+
+ return CODEC_ERROR_OKAY;
+}
+
+/*!
+ @brief Deallocate buffers used for computing the forward wavelet transform
+*/
+CODEC_ERROR DeallocateHorizontalBuffers(gpr_allocator *allocator,
+ PIXEL *lowpass_buffer[],
+ PIXEL *highpass_buffer[])
+{
+ int row;
+
+ for (row = 0; row < ROW_BUFFER_COUNT; row++)
+ {
+ allocator->Free(lowpass_buffer[row]);
+ allocator->Free(highpass_buffer[row]);
+ }
+
+ return CODEC_ERROR_OKAY;
+}
+
+/*!
+ @brief Allocate all of the wavelets used during encoding
+
+ This routine allocates all of the wavelets in the wavelet tree that
+ may be used during encoding.
+
+ This routine is used to preallocate the wavelets before encoding begins.
+ If the wavelet bands are allocated on demand if not preallocated.
+
+ By default, the wavelet bands are encoded into the bitstream with the bands
+ from the wavelet at the highest level (smallest wavelet) first so that the
+ bands can be processed by the encoder in the order as the sample is decoded.
+
+ @todo Do not allocate wavelets for resolutions that are larger then the
+ decoded resolution. At lower resolutions, the depth of the wavelet tree
+ can be reduced and the highpass bands in the unused wavelets to not have
+ to be decoded.
+
+ @todo Should it be an error if the wavelets are not preallocated?
+*/
+CODEC_ERROR AllocEncoderTransforms(ENCODER *encoder)
+{
+ CODEC_ERROR error = CODEC_ERROR_OKAY;
+
+ // Use the default allocator for the encoder
+ gpr_allocator *allocator = encoder->allocator;
+ int channel_index;
+ int wavelet_index;
+
+ assert(encoder != NULL);
+ if (! (encoder != NULL)) {
+ return CODEC_ERROR_NULLPTR;
+ }
+
+ // Check that the encoded dimensions are valid
+ //assert((encoder->encoded.width % (1 << encoder->wavelet_count)) == 0);
+
+ for (channel_index = 0; channel_index < encoder->channel_count; channel_index++)
+ {
+ // The wavelet at level zero has the same dimensions as the encoded frame
+ DIMENSION wavelet_width = 0;
+ DIMENSION wavelet_height = 0;
+ error = GetChannelDimensions(encoder, channel_index, &wavelet_width, &wavelet_height);
+ assert(wavelet_width > 0 && wavelet_height > 0);
+ if (error != CODEC_ERROR_OKAY) {
+ return error;
+ }
+
+ for (wavelet_index = 0; wavelet_index < encoder->wavelet_count; wavelet_index++)
+ {
+ WAVELET *wavelet = NULL;
+
+ // Pad the wavelet width if not divisible by two
+ if ((wavelet_width % 2) != 0) {
+ wavelet_width++;
+ }
+
+ // Pad the wavelet height if not divisible by two
+ if ((wavelet_height % 2) != 0) {
+ wavelet_height++;
+ }
+
+ // Reduce the dimensions of the next wavelet by half
+ wavelet_width /= 2;
+ wavelet_height /= 2;
+
+ // Dimensions of the current wavelet must be divisible by two
+ //assert((wavelet_width % 2) == 0 && (wavelet_height % 2) == 0);
+
+ // The wavelet width must be divisible by two
+ //assert((wavelet_width % 2) == 0);
+
+ // Allocate the wavelet
+ wavelet = CreateWavelet(allocator, wavelet_width, wavelet_height);
+ if (wavelet == NULL) {
+ return CODEC_ERROR_OUTOFMEMORY;
+ }
+
+ // Add the wavelet to the transform
+ encoder->transform[channel_index].wavelet[wavelet_index] = wavelet;
+ }
+ }
+
+ return CODEC_ERROR_OKAY;
+}
+
+/*!
+ @brief Allocate all of the buffers required for encoding
+
+ This routine allocates buffers required for encoding, not including
+ the wavelet images in the wavelet tree which are allocated by
+ @ref AllocEncoderTransforms
+
+ This routine is used to preallocate buffers before encoding begins.
+ If the buffers are allocated on demand if not preallocated.
+
+ The encoding parameters, including the encoded frame dimensions,
+ resolution of the decoded frame, and the decoded pixel format, are
+ taken into account when the buffers are allocated. For example,
+ buffer space that is only used when encoding to full resolution will
+ not be allocated if the frame is decoded to a smaller size.
+
+ Note that it is not an error to preallocate more buffer space than
+ what is strictly required for encoding. For example, it is okay to
+ allocate buffer space required for full frame encoding even if the
+ encoded sample will be decoded at lower resolution. In many applications,
+ it is simpler to preallocate the maximum buffer space that may be needed.
+
+ Currently, the reference encoder allocates scratch buffers as required
+ by each routine that needs scratch space and the scratch buffers are
+ deallocated at the end each routine that allocates scratch space.
+ A custom memory allocator can make this scheme efficient. See comments
+ in the documentation for the memory allocator module.
+
+ @todo Should it be an error if the buffers are not preallocated?
+*/
+CODEC_ERROR AllocEncoderBuffers(ENCODER *encoder)
+{
+ (void)encoder;
+ return CODEC_ERROR_UNIMPLEMENTED;
+}
+
+/*!
+ @brief Set the quantization parameters in the encoder
+
+ This routine computes the parameters in the quantizer used by
+ the encoder based based on the quality setting and the desired
+ bitrate. The quantization parameters are adjsuted to compensate
+ for the precision of the input pixels.
+
+ Note that the baseline profile does not support quantization to
+ achieve a desired bitrate.
+
+*/
+CODEC_ERROR SetEncoderQuantization(ENCODER *encoder,
+ const ENCODER_PARAMETERS *parameters)
+{
+ int channel_count = encoder->channel_count;
+ int channel_number;
+
+ const int quant_table_length = sizeof(parameters->quant_table)/sizeof(parameters->quant_table[0]);
+
+ // Set the midpoint prequant parameter
+ encoder->midpoint_prequant = 2;
+
+ // Set the quantization table in each channel
+ for (channel_number = 0; channel_number < channel_count; channel_number++)
+ {
+ SetTransformQuantTable(encoder, channel_number, parameters->quant_table, quant_table_length);
+ }
+
+ return CODEC_ERROR_OKAY;
+}
+
+/*!
+ @brief Copy the quantization table into the wavelet bands
+*/
+CODEC_ERROR SetTransformQuantTable(ENCODER *encoder, int channel, const QUANT table[], int table_length)
+{
+ int wavelet_count = encoder->wavelet_count;
+ int wavelet_index;
+ int subband;
+
+ // All lowpass bands use the quantization for subband zero
+ for (wavelet_index = 0; wavelet_index < wavelet_count; wavelet_index++)
+ {
+ WAVELET *wavelet = encoder->transform[channel].wavelet[wavelet_index];
+ wavelet->quant[0] = table[0];
+ }
+
+ // Store the quantization values for the highpass bands in each wavelet
+ for (subband = 1; subband < table_length; subband++)
+ {
+ int wavelet_index = SubbandWaveletIndex(subband);
+ int band_index = SubbandBandIndex(subband);
+ WAVELET *wavelet;
+
+ assert(0 <= wavelet_index && wavelet_index < wavelet_count);
+ assert(0 <= band_index && band_index <= MAX_BAND_COUNT);
+
+ // Store the quantization value for this subband
+ wavelet = encoder->transform[channel].wavelet[wavelet_index];
+ wavelet->quant[band_index] = table[subband];
+ }
+
+ return CODEC_ERROR_OKAY;
+}
+
+/*!
+ @brief Return the encoded dimensions for the specified channel
+
+ The encoded dimensions for each channel may differ due to color
+ difference component sampling.
+*/
+CODEC_ERROR GetChannelDimensions(ENCODER *encoder,
+ int channel_number,
+ DIMENSION *channel_width_out,
+ DIMENSION *channel_height_out)
+{
+ DIMENSION channel_width = 0;
+ DIMENSION channel_height = 0;
+
+ assert(encoder != NULL && channel_width_out != NULL && channel_height_out != NULL);
+ if (! (encoder != NULL && channel_width_out != NULL && channel_height_out != NULL)) {
+ return CODEC_ERROR_NULLPTR;
+ }
+
+ assert(0 <= channel_number && channel_number < encoder->channel_count);
+ if (! (0 <= channel_number && channel_number < encoder->channel_count)) {
+ return CODEC_ERROR_UNEXPECTED;
+ }
+
+ // Clear the output dimensions in case this routine terminates early
+ *channel_width_out = 0;
+ *channel_height_out = 0;
+
+ channel_width = encoder->channel[channel_number].width;
+ channel_height = encoder->channel[channel_number].height;
+
+ *channel_width_out = channel_width;
+ *channel_height_out = channel_height;
+
+ return CODEC_ERROR_OKAY;
+}
+
+/*!
+ @brief Adjust the height of encoded layer
+
+ Interleaved frames are encoded as separate layers with half the height.
+*/
+DIMENSION EncodedLayerHeight(ENCODER *encoder, DIMENSION height)
+{
+#if VC5_ENABLED_PART(VC5_PART_LAYERS)
+ assert(encoder != NULL);
+ if (encoder->progressive == 0) {
+ height /= 2;
+ }
+#endif
+
+ return height;
+}
+
+/*!
+ @brief Compute the dimensions of the image as reported by the ImageWidth and ImageHeight parameters
+
+ The image width is the maximum width of all component arrays and the image height is the maximum height
+ of all component arrays.
+*/
+CODEC_ERROR GetMaximumChannelDimensions(const UNPACKED_IMAGE *image, DIMENSION *width_out, DIMENSION *height_out)
+{
+ DIMENSION width = 0;
+ DIMENSION height = 0;
+ int channel_number;
+
+ if (image == NULL) {
+ return CODEC_ERROR_UNEXPECTED;
+ }
+
+ for (channel_number = 0; channel_number < image->component_count; channel_number++)
+ {
+ if (width < image->component_array_list[channel_number].width) {
+ width = image->component_array_list[channel_number].width;
+ }
+
+ if (height < image->component_array_list[channel_number].height) {
+ height = image->component_array_list[channel_number].height;
+ }
+ }
+
+ if (width_out != NULL) {
+ *width_out = width;
+ }
+
+ if (height_out != NULL) {
+ *height_out = height;
+ }
+
+ return CODEC_ERROR_OKAY;
+}
+
+/*!
+ @brief Set the bit for the specified subband in the decoded band mask
+
+ The decoded subband mask is used to track which subbands have been
+ decoded in teh current channel. It is reset at the start of each
+ channel.
+
+ The decoded subband mask is used when decoding a sample at less
+ than full resolution. The mask indicates when enough subbands
+ have been decoded for a channel and that remaining portion of the
+ encoded sample for the current channel may be skipped.
+*/
+CODEC_ERROR SetEncodedBandMask(CODEC_STATE *codec, int subband)
+{
+ if (0 <= subband && subband < MAX_SUBBAND_COUNT) {
+ codec->decoded_subband_mask |= (1 << subband);
+ }
+ return CODEC_ERROR_OKAY;
+}
+
+
+/*!
+ @brief Encoded the lowpass band from the bitstream
+
+ The wavelet at the highest level is passes as an argument.
+ This routine decodes lowpass band in the bitstream into the
+ lowpass band of the wavelet.
+*/
+CODEC_ERROR EncodeLowpassBand(ENCODER *encoder, WAVELET *wavelet, int channel_number, BITSTREAM *stream)
+{
+ CODEC_STATE *codec = &encoder->codec;
+ //FILE *logfile = encoder->logfile;
+ //int subband = 0;
+ //int level = encoder->wavelet_count;
+ int width = wavelet->width;
+ int height = wavelet->height;
+ uint8_t *lowpass_row_ptr;
+ int lowpass_pitch;
+ int row;
+
+ PRECISION lowpass_precision = encoder->channel[channel_number].lowpass_precision;
+
+ lowpass_row_ptr = (uint8_t *)wavelet->data[LL_BAND];
+ lowpass_pitch = wavelet->pitch;
+
+#if VC5_ENABLED_PART(VC5_PART_SECTIONS)
+ if (IsSectionEnabled(encoder, SECTION_NUMBER_SUBBAND))
+ {
+ // Make sure that the bitstream is aligned to a segment boundary
+ AlignBitsSegment(stream);
+
+ // Write the channel section header into the bitstream
+ BeginSubbandSection(encoder, stream);
+ }
+#endif
+
+ // Write the tag-value pairs for the lowpass band to the bitstream
+ PutVideoLowpassHeader(encoder, channel_number, stream);
+
+ // Check that the bitstream is tag aligned before writing the pixels
+ assert(IsAlignedSegment(stream));
+
+ for (row = 0; row < height; row++)
+ {
+ uint16_t *lowpass = (uint16_t *)lowpass_row_ptr;
+ int column;
+
+ for (column = 0; column < width; column++)
+ {
+ BITWORD coefficient = lowpass[column];
+ //assert(0 <= lowpass[column] && lowpass[column] <= COEFFICIENT_MAX);
+ assert(lowpass[column] <= COEFFICIENT_MAX);
+ assert(coefficient <= COEFFICIENT_MAX);
+ PutBits(stream, coefficient, lowpass_precision);
+ }
+
+ lowpass_row_ptr += lowpass_pitch;
+ }
+
+ // Align the bitstream to a segment boundary
+ AlignBitsSegment(stream);
+
+ PutVideoLowpassTrailer(stream);
+
+ // Update the subband number in the codec state
+ codec->subband_number++;
+
+#if VC5_ENABLED_PART(VC5_PART_SECTIONS)
+ if (IsSectionEnabled(encoder, SECTION_NUMBER_SUBBAND))
+ {
+ // Make sure that the bitstream is aligned to a segment boundary
+ AlignBitsSegment(stream);
+
+ // Update the section header with the actual size of the subband section
+ EndSection(stream);
+ }
+#endif
+
+ return CODEC_ERROR_OKAY;
+}
+
+CODEC_ERROR PutVideoSubbandHeader(ENCODER *encoder, int subband_number, QUANT quantization, BITSTREAM *stream)
+{
+ CODEC_STATE *codec = &encoder->codec;
+
+ if (subband_number != codec->subband_number) {
+ PutTagPair(stream, CODEC_TAG_SubbandNumber, subband_number);
+ codec->subband_number = subband_number;
+ }
+
+ if (quantization != codec->band.quantization) {
+ PutTagPair(stream, CODEC_TAG_Quantization, quantization);
+ codec->band.quantization = quantization;
+ }
+
+ // Write the chunk header for the codeblock
+ PushSampleSize(stream, CODEC_TAG_LargeCodeblock);
+
+ return CODEC_ERROR_OKAY;
+}
+
+/*!
+ @brief Encode the highpass band into the bitstream
+
+ The specified wavelet band is decoded from the bitstream
+ using the codebook and encoding method specified in the
+ bitstream.
+*/
+CODEC_ERROR EncodeHighpassBand(ENCODER *encoder, WAVELET *wavelet, int band, int subband, BITSTREAM *stream)
+{
+ CODEC_ERROR error = CODEC_ERROR_OKAY;
+ CODEC_STATE *codec = &encoder->codec;
+
+ DIMENSION band_width = wavelet->width;
+ DIMENSION band_height = wavelet->height;
+
+ void *band_data = wavelet->data[band];
+ DIMENSION band_pitch = wavelet->pitch;
+
+ QUANT quantization = wavelet->quant[band];
+ //uint16_t scale = wavelet->scale[band];
+
+ //int divisor = 0;
+ //int peaks_coding = 0;
+
+ ENCODER_CODESET *codeset = encoder->codeset;
+
+ //int encoding_method = BAND_ENCODING_RUNLENGTHS;
+
+ // Check that the band header starts on a tag boundary
+ assert(IsAlignedTag(stream));
+
+#if VC5_ENABLED_PART(VC5_PART_SECTIONS)
+ if (IsSectionEnabled(encoder, SECTION_NUMBER_SUBBAND))
+ {
+ // Make sure that the bitstream is aligned to a segment boundary
+ AlignBitsSegment(stream);
+
+ // Write the channel section header into the bitstream
+ BeginSubbandSection(encoder, stream);
+ }
+#endif
+
+ // Output the tag-value pairs for this subband
+ PutVideoSubbandHeader(encoder, subband, quantization, stream);
+
+ // Encode the highpass coefficients for this subband into the bitstream
+ error = EncodeHighpassBandRowRuns(stream, codeset, band_data, band_width, band_height, band_pitch);
+ if (error != CODEC_ERROR_OKAY) {
+ return error;
+ }
+
+ // Align the bitstream to a segment boundary
+ AlignBitsSegment(stream);
+
+ // Output the band trailer
+ PutVideoSubbandTrailer(encoder, stream);
+
+ // Update the subband number in the codec state
+ codec->subband_number++;
+
+#if VC5_ENABLED_PART(VC5_PART_SECTIONS)
+ if (IsSectionEnabled(encoder, SECTION_NUMBER_SUBBAND))
+ {
+ // Make sure that the bitstream is aligned to a segment boundary
+ AlignBitsSegment(stream);
+
+ // Update the section header with the actual size of the subband section
+ EndSection(stream);
+ }
+#endif
+
+ return CODEC_ERROR_OKAY;
+}
+
+STATIC_INLINE void write_bits(uint8_t** buffer, uint32_t bits)
+{
+ uint32_t word = Swap32(bits);
+ *( (uint32_t*)(*buffer) ) = word;
+}
+
+STATIC_INLINE VLE PutZeroBits(uint8_t** buffer, VLE stream_bits, uint_fast8_t size )
+{
+ BITCOUNT unused_bit_count = bit_word_count - stream_bits.size;
+
+ if ( size > unused_bit_count )
+ {
+ if (stream_bits.size < bit_word_count)
+ {
+ size -= unused_bit_count;
+ }
+
+ write_bits(buffer, stream_bits.bits);
+ *buffer += 4;
+
+ stream_bits.size = size;
+ stream_bits.bits = 0;
+ }
+ else
+ {
+ stream_bits.size += size;
+ }
+
+ return stream_bits;
+}
+
+STATIC_INLINE VLE PutBitsCore(uint8_t** buffer, VLE stream_bits, uint32_t bits, uint_fast8_t size )
+{
+ BITCOUNT unused_bit_count = bit_word_count - stream_bits.size;
+
+ if ( size > unused_bit_count)
+ {
+ if (stream_bits.size < bit_word_count)
+ {
+ stream_bits.bits |= (bits >> (size - unused_bit_count));
+ size -= unused_bit_count;
+ }
+
+ write_bits(buffer, stream_bits.bits);
+ *buffer += 4;
+
+ stream_bits.size = size;
+ stream_bits.bits = bits << (bit_word_count - size);
+ }
+ else
+ {
+ stream_bits.bits |= (bits << (unused_bit_count - size));
+ stream_bits.size += size;
+ }
+
+ return stream_bits;
+}
+
+STATIC_INLINE VLE PutBitsCoreWithSign(uint8_t** buffer, VLE stream_bits, uint32_t bits, uint_fast8_t size, bool positive )
+{
+ stream_bits = PutBitsCore( buffer, stream_bits, bits, size );
+
+ BITCOUNT unused_bit_count = bit_word_count - stream_bits.size;
+
+ if ( unused_bit_count == 0 )
+ {
+ write_bits(buffer, stream_bits.bits);
+ *buffer += 4;
+
+ stream_bits.size = 1;
+
+ if( positive == false )
+ stream_bits.bits = 1 << (bit_word_count - 1);
+ else
+ stream_bits.bits = 0;
+ }
+ else
+ {
+ stream_bits.size += 1;
+
+ if( positive == false )
+ stream_bits.bits |= (1 << (unused_bit_count - 1));
+ }
+
+ return stream_bits;
+}
+
+/*!
+ @brief Encode the highpass band from the bitstream
+
+ This routine does not encode runs of zeros across row boundaries.
+*/
+CODEC_ERROR EncodeHighpassBandRowRuns(BITSTREAM *stream, ENCODER_CODESET *codeset, PIXEL *data,
+ DIMENSION width, DIMENSION height, DIMENSION pitch)
+{
+ CODEC_ERROR error = CODEC_ERROR_OKAY;
+
+ int row_padding;
+ int row = 0;
+ //int column = 0;
+ //size_t index = 0;
+
+ // The encoder uses the codebooks for magnitudes and runs of zeros
+ const MAGS_TABLE *mags_table = codeset->mags_table;
+ const RUNS_TABLE *runs_table = codeset->runs_table;
+ uint32_t runs_table_length = runs_table->length;
+ RLC *rlc = (RLC *)((uint8_t *)runs_table + sizeof(RUNS_TABLE));
+
+ // The band is terminated by the band end codeword in the codebook
+ const CODEBOOK *codebook = codeset->codebook;
+
+ PIXEL *rowptr = data;
+
+ // Convert the pitch to units of pixels
+ assert((pitch % sizeof(PIXEL)) == 0);
+ pitch /= sizeof(PIXEL);
+
+ // Check that the band dimensions are reasonable
+ assert(width <= pitch);
+
+ // Compute the number of values of padding at the end of each row
+ row_padding = pitch - width;
+
+ VLE *mags_table_entry = (VLE *)((uint8_t *)mags_table + sizeof(MAGS_TABLE));
+
+ VLE stream_bits;
+
+ stream_bits.bits = stream->buffer;
+ stream_bits.size = stream->count;
+
+ struct _stream *bit_stream = stream->stream;
+
+ int mags_table_length_minus_1 = mags_table->length - 1;
+
+ uint8_t* stream_buffer = (uint8_t *)bit_stream->location.memory.buffer + bit_stream->byte_count;
+ uint8_t* stream_buffer_orig = stream_buffer;
+
+ uint32_t count = 0;
+ for (row = 0; row < height; row++)
+ {
+ uint32_t index = 0; // Start at the beginning of the row
+
+ // Search the row for runs of zeros and nonzero values
+ while (1)
+ {
+ // Loop invariant
+ assert(index < width);
+
+ {
+ PIXEL* start = rowptr + index;
+ PIXEL* end = rowptr + width;
+
+ for (; *(start) == 0 && start != end; start++)
+ {
+
+ }
+
+ uint32_t x = start - (rowptr + index);
+
+ index += x;
+ count += x;
+ }
+
+ // Need to output a value?
+ if (index < width)
+ {
+ while (count > 0)
+ {
+ if( count < 12 )
+ {
+ stream_bits = PutZeroBits(&stream_buffer, stream_bits, count );
+ break;
+ }
+ else
+ {
+ uint32_t count_index = minimum(count, runs_table_length - 1);
+ assert(count_index < runs_table->length);
+
+ RLC rlc_val = rlc[count_index];
+
+ stream_bits = PutBitsCore(&stream_buffer, stream_bits, rlc_val.bits, rlc_val.size );
+
+ // Reduce the length of the run by the amount output
+ count -= rlc_val.count;
+ }
+ }
+
+ count = 0;
+
+ // The value zero is run length coded and handled by another routine
+ {
+ PIXEL value = rowptr[index++];
+ assert(value != 0);
+
+ PIXEL abs_value = minimum( abs(value), mags_table_length_minus_1 );
+
+ stream_bits = PutBitsCoreWithSign(&stream_buffer, stream_bits, mags_table_entry[abs_value].bits, mags_table_entry[abs_value].size, value > 0 );
+ }
+ }
+
+ // Add the end of row padding to the encoded length
+ if (index == width)
+ {
+ count += row_padding;
+ break;
+ }
+ }
+
+ // Should have processed the entire row
+ assert(index == width);
+
+ // Advance to the next row
+ rowptr += pitch;
+ }
+
+ stream->count = stream_bits.size;
+ stream->buffer = stream_bits.bits;
+ bit_stream->byte_count += (stream_buffer - stream_buffer_orig);
+
+ // // Need to output a pending run of zeros?
+ if (count > 0)
+ {
+ error = PutZeros(stream, runs_table, count);
+ if (error != CODEC_ERROR_OKAY) {
+ return error;
+ }
+ }
+
+ // Insert the special codeword that marks the end of the highpass band
+ error = PutSpecial(stream, codebook, SPECIAL_MARKER_BAND_END);
+
+ return error;
+}
+
+CODEC_ERROR PutVideoSubbandTrailer(ENCODER *encoder, BITSTREAM *stream)
+{
+ // Set the size of the large chunk for the highpass band codeblock
+ PopSampleSize(stream);
+
+ return CODEC_ERROR_OKAY;
+}
+
+/*!
+ @brief Read the segment at the specified offset in the bitstream
+
+ This routine is used to read a segment that was previously written at a previous
+ location in the encoded sample. This allows the encoder to update, rather than
+ overwrite, a segment that has already been written. Typically, this is done to
+ insert the size or offset to a portion of the sample (syntax element) into a
+ segment that acts as an index to the syntax element.
+ */
+CODEC_ERROR GetSampleOffsetSegment(BITSTREAM *bitstream, uint32_t offset, TAGVALUE *segment)
+{
+ CODEC_ERROR error = CODEC_ERROR_OKAY;
+ uint32_t buffer;
+
+ error = GetBlock(bitstream->stream, &buffer, sizeof(buffer), offset);
+ if (error != CODEC_ERROR_OKAY) {
+ return error;
+ }
+
+ // Translate the segment to native byte order
+ segment->longword = Swap32(buffer);
+
+ // Cannot return a segment if the offset stack is empty
+ return CODEC_ERROR_OKAY;
+}
+
+/*!
+ @brief Write the lowpass band header into the bitstream
+
+ Each channel is encoded separately, so the lowpass band (subband zero)
+ is the lowpass band in the wavelet at the highest level for each channel.
+
+ The last element in the lowpass band header is a segment that contains the
+ size of this subband. The actual size is updated when the lowpass trailer
+ is written (see @ref PutVideoLowpassTrailer).
+
+ The lowpass start code is used to uniquely identify the start of the lowpass
+ band header and is used by the decode to navigate to the next channel in the
+ bitstream.
+
+ @todo Consider writing a composite lowpass band for all channels with
+ interleaved rows to facilitate access to the thumbnail image in the
+ encoded sample.
+ */
+CODEC_ERROR PutVideoLowpassHeader(ENCODER *encoder, int channel_number, BITSTREAM *stream)
+{
+ CODEC_STATE *codec = &encoder->codec;
+ PRECISION lowpass_precision = encoder->channel[channel_number].lowpass_precision;
+
+ // Output the subband number
+ if (codec->subband_number != 0)
+ {
+ PutTagPair(stream, CODEC_TAG_SubbandNumber, 0);
+ codec->subband_number = 0;
+ }
+
+ // Output the lowpass precision
+ //if (encoder->lowpass.precision != codec->lowpass.precision)
+ if (lowpass_precision != codec->lowpass_precision)
+ {
+ PutTagPair(stream, CODEC_TAG_LowpassPrecision, lowpass_precision);
+ codec->lowpass_precision = lowpass_precision;
+ }
+
+ // Write the chunk header for the codeblock
+ PushSampleSize(stream, CODEC_TAG_LargeCodeblock);
+
+ return CODEC_ERROR_OKAY;
+}
+
diff --git a/gpr/source/lib/vc5_encoder/encoder.h b/gpr/source/lib/vc5_encoder/encoder.h
new file mode 100755
index 0000000..1048628
--- /dev/null
+++ b/gpr/source/lib/vc5_encoder/encoder.h
@@ -0,0 +1,333 @@
+/*! @file encoder.h
+ *
+ * @brief Declaration of the data structures and constants used for core encoding.
+ *
+ * (C) Copyright 2018 GoPro Inc (http://gopro.com/).
+ *
+ * Licensed under either:
+ * - Apache License, Version 2.0, http://www.apache.org/licenses/LICENSE-2.0
+ * - MIT license, http://opensource.org/licenses/MIT
+ * at your option.
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ENCODER_H
+#define ENCODER_H
+
+/*!
+ @brief Data structure for the pixel or picture aspect ratio
+
+ @todo Should the members of the aspect ratio data structure be unsigned?
+*/
+typedef struct _aspect_ratio
+{
+ int16_t x; //!< Numerator of the aspect ratio
+ int16_t y; //!< Denominator of the aspect ratio
+
+} ASPECT_RATIO;
+
+/*!
+ @brief Data structure for the buffers and information used by the encoder
+
+ The encoder data structure contains information that will be
+ used by the encoder for decoding every sample in the sequence.
+ Information that varies during decoding, such as the current
+ subband index or the dimensions of the bands in the wavelet that
+ is being decoded, is stored in the codec state.
+
+ The encoded dimensions are the width and height of the array of pixels
+ for each encoded channel (image plane), including padding added to
+ satisfy the requirements of the wavelet transforms. In the case
+ of 4:2:2 sampling, the encoded width and height are for the luma channel.
+
+ The display dimensions are the width and height of the display aperture,
+ the displayable portion of the decoded image with padding removed.
+
+ The display dimensions can include a row and column offset to trim
+ top rows and left columns from the decoded image prior to display.
+
+ The decoded dimensions equal the encoded dimensions at full resolution
+ and are reduced by a power of two if decoded to a lower resolution.
+ The decoded dimensions are derived from the encoded dimensions and the
+ decoded resolution.
+
+ The decoded dimensions are used to allocate the wavelet tree for the
+ lowpass and highpass coefficients decoded from the bitstream. It is
+ not necessary to allocate wavelets for larger resolutions than the
+ decoded resolution.
+
+ For Bayer encoded images, the encoded dimensions are half the width
+ and height of the input dimensions (after windowing). Typically,
+ media containers report the display dimensions as twice the encoded
+ dimensions since a demosaic algorithm must be applied to produce a
+ displayable image that looks right to most people.
+
+ @todo Consider changing the transform data structure to use a
+ vector of wavelets rather than a vector of wavelet pointers.
+*/
+typedef struct _encoder
+{
+ // CODEC codec; //!< Common fields for both the encoder and decoder
+
+ FILE *logfile; //!< File for writing debugging information
+ CODEC_ERROR error; //!< Error code from the most recent codec operation
+ gpr_allocator *allocator; //!< Memory allocator used to allocate all dyynamic data
+ CODEC_STATE codec; //!< Information gathered while decoding the current sample
+ VERSION version; //!< Codec version (major, minor, revision, build)
+
+ //! Parts of the VC-5 standard that are supported at runtime by the codec implementation
+ ENABLED_PARTS enabled_parts;
+
+ uint64_t frame_number; //!< Every sample in a clip has a unique frame number
+
+ //! Number of color channels in the input and encoded images
+ uint_fast8_t channel_count;
+
+ //! Number of wavelet transforms in each channel
+ uint_fast8_t wavelet_count;
+
+ //! Internal precision used by this encoder
+ PRECISION internal_precision;
+
+#if VC5_ENABLED_PART(VC5_PART_IMAGE_FORMATS)
+ IMAGE_FORMAT image_format; //!< Type of the image represented by the bitstream
+ DIMENSION image_width; //!< Number of samples per row in the image represented by the bitstream
+ DIMENSION image_height; //!< Number of rows of samples in the image represented by the bitstream
+ DIMENSION pattern_width; //!< Number of samples per row in each pattern element
+ DIMENSION pattern_height; //!< Number of rows of samples in each pattern element
+ DIMENSION components_per_sample; //!< Number of components per sample in the image
+ DIMENSION max_bits_per_component; //!< Upper bound on the number of significant bits per component value
+#else
+ DIMENSION image_width; //!< Upper bound on the width of each channel
+ DIMENSION image_height; //!< Upper bound on the height of each channel
+#endif
+
+#if VC5_ENABLED_PART(VC5_PART_LAYERS)
+ //! Progressive frame flag
+ BOOLEAN progressive;
+
+ // Interlaced frame with the top field encoded first
+ BOOLEAN top_field_first;
+
+ // The encoded frame is upside down (not used)
+ BOOLEAN frame_inverted;
+#endif
+
+ struct _channel
+ {
+ DIMENSION width; //!< Width of the next channel in the bitstream
+ DIMENSION height; //!< Height of the next channel in the bitstream
+
+ //! Precision of the component array for the next channel in the bitstream
+ PRECISION bits_per_component;
+
+ //! Number of bits per lowpass coefficient
+ PRECISION lowpass_precision;
+
+ } channel[MAX_CHANNEL_COUNT]; //!< Information about each channel
+
+ //! Dimensions and format of the image that was input to the encoder
+ struct _input
+ {
+ DIMENSION width; //!< Width of the image input to the encoder
+ DIMENSION height; //!< Height of the image input to the encoder
+ PIXEL_FORMAT format; //!< Pixel format of the image input to the encode
+
+ } input; //!< Information about the image input to the encoder
+
+#if VC5_ENABLED_PART(VC5_PART_LAYERS)
+ uint_least8_t layer_count; //!< Number of subsamples in each sample
+#endif
+
+ //! Wavelet tree for each channel
+ TRANSFORM transform[MAX_CHANNEL_COUNT];
+
+ //! Codebook to use for encoding
+ ENCODER_CODESET *codeset;
+
+ //! Scratch buffer for unpacking the input image
+ PIXEL *unpacked_buffer[MAX_CHANNEL_COUNT];
+
+ //! Parameter that controls the amount of rounding before quantization
+ int midpoint_prequant;
+
+ //! Table for the order in which channels are encoded into the bitstream
+ CHANNEL channel_order_table[MAX_CHANNEL_COUNT];
+
+ //! Number of entries in the channel order table (may be less than the channel count)
+ int channel_order_count;
+
+#if VC5_ENABLED_PART(VC5_PART_IMAGE_FORMATS)
+ uint8_t image_sequence_identifier[16]; //!< UUID used for the unique image identifier
+ uint32_t image_sequence_number; //!< Number of the image in the encoded sequence
+#endif
+
+#if VC5_ENABLED_PART(VC5_PART_IMAGE_FORMATS)
+ COMPONENT_TRANSFORM *component_transform;
+ COMPONENT_PERMUTATION *component_permutation;
+#endif
+
+ //! Six rows of horizontal lowpass results for each channel
+ PIXEL *lowpass_buffer[MAX_WAVELET_COUNT][ROW_BUFFER_COUNT];
+
+ //! Six rows of horizontal highpass results for each channel
+ PIXEL *highpass_buffer[MAX_WAVELET_COUNT][ROW_BUFFER_COUNT];
+
+#if VC5_ENABLED_PART(VC5_PART_SECTIONS)
+ ENABLED_SECTIONS enabled_sections;
+#endif
+
+} ENCODER;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+ CODEC_ERROR InitEncoder(ENCODER *encoder, const gpr_allocator *allocator, const VERSION *version);
+
+ //TAGWORD PackedEncoderVersion(ENCODER *encoder);
+
+ CODEC_ERROR PrepareEncoder(ENCODER *encoder,
+ const UNPACKED_IMAGE *image,
+ const ENCODER_PARAMETERS *parameters);
+
+ CODEC_ERROR PrepareEncoderState(ENCODER *encoder,
+ const UNPACKED_IMAGE *image,
+ const ENCODER_PARAMETERS *parameters);
+
+ CODEC_ERROR SetInputChannelFormats(ENCODER *encoder, ENCODER_PARAMETERS *parameters);
+
+ CODEC_ERROR ReleaseEncoder(ENCODER *encoder);
+
+ CODEC_ERROR AllocEncoderTransforms(ENCODER *encoder);
+
+ CODEC_ERROR AllocEncoderBuffers(ENCODER *encoder);
+
+ //CODEC_ERROR EncodeStream(IMAGE *image, STREAM *stream, PARAMETERS *parameters);
+ CODEC_ERROR EncodeImage(IMAGE *image, STREAM *stream, RGB_IMAGE *rgb_image, ENCODER_PARAMETERS *parameters);
+
+ //CODEC_ERROR EncodeSingleImage(ENCODER *encoder, IMAGE *image, BITSTREAM *stream);
+ CODEC_ERROR EncodingProcess(ENCODER *encoder,
+ const UNPACKED_IMAGE *image,
+ BITSTREAM *stream,
+ const ENCODER_PARAMETERS *parameters);
+
+ CODEC_ERROR EncodeSingleImage(ENCODER *encoder, const UNPACKED_IMAGE *image, BITSTREAM *stream);
+
+ CODEC_ERROR EncodeSingleChannel(ENCODER *encoder, void *buffer, size_t pitch, BITSTREAM *stream);
+
+ //CODEC_ERROR EncodeMultipleImages(ENCODER *encoder, IMAGE *image_array[], int frame_count, BITSTREAM *stream);
+
+ CODEC_ERROR PrepareEncoderTransforms(ENCODER *encoder);
+
+ //CODEC_ERROR ImageUnpackingProcess(ENCODER *encoder, IMAGE *image);
+ CODEC_ERROR ImageUnpackingProcess(const PACKED_IMAGE *packed_image,
+ UNPACKED_IMAGE *unpacked_image,
+ const ENCODER_PARAMETERS *parameters,
+ gpr_allocator *allocator);
+
+ CODEC_ERROR UnpackImage(const PACKED_IMAGE *input, UNPACKED_IMAGE *output, ENABLED_PARTS enabled_parts);
+
+ CODEC_ERROR PreprocessImageRow(uint8_t *input, DIMENSION image_width, uint8_t *output);
+
+ CODEC_ERROR UnpackImageRow(uint8_t *input_row_ptr,
+ DIMENSION image_width,
+ PIXEL_FORMAT pixel_format,
+ PIXEL *output_row_ptr[],
+ PRECISION bits_per_component[],
+ int channel_count,
+ ENABLED_PARTS enabled_parts,
+ int raw_shift);
+
+ CODEC_ERROR EncodeBitstreamHeader(ENCODER *encoder, BITSTREAM *bitstream);
+
+ CODEC_ERROR EncodeBitstreamTrailer(ENCODER *encoder, BITSTREAM *bitstream);
+
+ CODEC_ERROR EncodeExtensionHeader(ENCODER *encoder, BITSTREAM *bitstream);
+
+ CODEC_ERROR EncodeExtensionTrailer(ENCODER *encoder, BITSTREAM *bitstream);
+
+ //CODEC_ERROR EncodeLayer(ENCODER *encoder, void *buffer, size_t pitch, BITSTREAM *stream);
+ CODEC_ERROR EncodeMultipleChannels(ENCODER *encoder, const UNPACKED_IMAGE *image, BITSTREAM *stream);
+
+#if VC5_ENABLED_PART(VC5_PART_LAYERS)
+ CODEC_ERROR EncodeLayerHeader(ENCODER *encoder, BITSTREAM *bitstream);
+ CODEC_ERROR EncodeLayerTrailer(ENCODER *encoder, BITSTREAM *bitstream);
+#endif
+
+ CODEC_ERROR SetEncoderQuantization(ENCODER *encoder,
+ const ENCODER_PARAMETERS *parameters);
+
+ CODEC_ERROR SetTransformQuantTable(ENCODER *encoder, int channel, const QUANT table[], int length);
+
+ CODEC_ERROR GetChannelDimensions(ENCODER *encoder,
+ int channel_number,
+ DIMENSION *channel_width_out,
+ DIMENSION *channel_height_out);
+
+ CODEC_ERROR GetMaximumChannelDimensions(const UNPACKED_IMAGE *image, DIMENSION *width_out, DIMENSION *height_out);
+
+ DIMENSION EncodedLayerHeight(ENCODER *encoder, DIMENSION height);
+
+ CODEC_ERROR SetEncodedBandMask(CODEC_STATE *codec, int subband);
+
+ CODEC_ERROR EncodeChannelSubbands(ENCODER *encoder, int channel, BITSTREAM *stream);
+
+ CODEC_ERROR EncodeChannelHeader(ENCODER *encoder,
+ int channel_number,
+ BITSTREAM *stream);
+
+ CODEC_ERROR EncodeChannelTrailer(ENCODER *encoder, int channel, BITSTREAM *stream);
+
+ //CODEC_ERROR EncodeLayerChannels(ENCODER *encoder, BITSTREAM *stream);
+ CODEC_ERROR EncodeChannelWavelets(ENCODER *encoder, BITSTREAM *stream);
+
+ CODEC_ERROR PutVideoLowpassHeader(ENCODER *encoder, int channel_number, BITSTREAM *stream);
+
+ CODEC_ERROR PutVideoSubbandHeader(ENCODER *encoder, int subband, QUANT quantization, BITSTREAM *stream);
+ CODEC_ERROR PutVideoSubbandTrailer(ENCODER *encoder, BITSTREAM *stream);
+
+ CODEC_ERROR TransformForwardSpatialQuantFrame(ENCODER *encoder, void *buffer, size_t pitch);
+
+ CODEC_ERROR AllocateEncoderHorizontalBuffers(ENCODER *encoder);
+
+ CODEC_ERROR DeallocateEncoderHorizontalBuffers(ENCODER *encoder);
+
+ CODEC_ERROR AllocateEncoderUnpackingBuffers(ENCODER *encoder, int frame_width);
+
+ CODEC_ERROR DeallocateEncoderUnpackingBuffers(ENCODER *encoder);
+
+ CODEC_ERROR AllocateHorizontalBuffers(gpr_allocator *allocator,
+ PIXEL *lowpass_buffer[],
+ PIXEL *highpass_buffer[],
+ int buffer_width);
+
+ CODEC_ERROR DeallocateHorizontalBuffers(gpr_allocator *allocator,
+ PIXEL *lowpass_buffer[],
+ PIXEL *highpass_buffer[]);
+
+
+ CODEC_ERROR PadWaveletBands(ENCODER *encoder, WAVELET *wavelet);
+
+ CODEC_ERROR EncodeLowpassBand(ENCODER *encoder, WAVELET *wavelet, int channel_number, BITSTREAM *stream);
+
+ CODEC_ERROR EncodeHighpassBand(ENCODER *encoder, WAVELET *wavelet, int band, int subband, BITSTREAM *stream);
+
+ CODEC_ERROR EncodeHighpassBandLongRuns(BITSTREAM *stream, ENCODER_CODESET *codeset, PIXEL *data,
+ DIMENSION width, DIMENSION height, DIMENSION pitch);
+
+ CODEC_ERROR EncodeHighpassBandRowRuns(BITSTREAM *stream, ENCODER_CODESET *codeset, PIXEL *data,
+ DIMENSION width, DIMENSION height, DIMENSION pitch);
+
+ CODEC_ERROR GetSampleOffsetSegment(BITSTREAM *bitstream, uint32_t offset, TAGVALUE *segment_out);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // ENCODER_H
diff --git a/gpr/source/lib/vc5_encoder/forward.c b/gpr/source/lib/vc5_encoder/forward.c
new file mode 100755
index 0000000..ab54dd8
--- /dev/null
+++ b/gpr/source/lib/vc5_encoder/forward.c
@@ -0,0 +1,851 @@
+/*! @file forward.c
+ *
+ * @brief Implementation of the forward wavelet transform functions.
+ *
+ * (C) Copyright 2018 GoPro Inc (http://gopro.com/).
+ *
+ * Licensed under either:
+ * - Apache License, Version 2.0, http://www.apache.org/licenses/LICENSE-2.0
+ * - MIT license, http://opensource.org/licenses/MIT
+ * at your option.
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "headers.h"
+
+#if ENABLED(NEON)
+#include <arm_neon.h>
+#endif
+
+//! Rounding added to the highpass sum before division
+static const int32_t rounding = 4;
+
+STATIC_INLINE PIXEL QuantizeValue(int16_t value, int32_t midpoint, int32_t multiplier )
+{
+ int less_than_zero = 0;
+ int negate_less_than_zero = 0;
+
+ int16_t x = abs(value) + midpoint;
+
+ if( value < 0 )
+ {
+ less_than_zero = -1;
+ negate_less_than_zero = 1;
+ }
+
+ x = (int32_t)(x * multiplier) >> 16;
+
+ x = x ^ less_than_zero;
+ x = x + negate_less_than_zero;
+
+ return ClampPixel(x);
+}
+
+static void FilterVerticalTopBottom_Core_8x_C_(PIXEL *coefficients[], int column, int16_t* highpass, int16_t* lowpass, bool top )
+{
+ const int filter_coeffs_top[] = { 5, -11, 4, 4, -1, -1 };
+ const int filter_coeffs_bottom[] = { 1, 1, -4, -4, 11, -5 };
+
+ int low_band_index = top ? 0 : 4;
+ const int* filter_coeffs = top ? filter_coeffs_top : filter_coeffs_bottom;
+
+ int i, f;
+
+ for (i = 0; i < 8; i++)
+ {
+ lowpass[i] = coefficients[low_band_index + 0][column + i] + coefficients[low_band_index + 1][column + i];
+ }
+
+ for (i = 0; i < 8; i++)
+ {
+ int32_t sum = 0;
+
+ for (f = 0; f < 6; f++)
+ {
+ sum += filter_coeffs[f] * coefficients[f][column + i];
+ }
+
+ sum += rounding;
+ sum = DivideByShift(sum, 3);
+ highpass[i] = sum;
+ }
+}
+
+#if ENABLED(NEON)
+
+static const uint16x8_t mask = {0x0000, 0xFFFF,0x0000,0xFFFF,0x0000,0xFFFF,0x0000, 0xFFFF};
+
+#define HorizontalFilter_Prescale2_4x HorizontalFilter_Prescale2_4x_NEON_
+void HorizontalFilter_Prescale2_4x_NEON_(PIXEL *input, PIXEL* lowpass, PIXEL* highpass )
+{
+ const int prescale_rounding = 3;
+ const int prescale = 2;
+
+ int32x4_t __pairwise_sum_0_7, __highpass;
+ int32x4_t __diff;
+ int16x8_t __input_2_9;
+
+ {
+ const int16x8_t __prescale_rounding = vdupq_n_s16 (prescale_rounding);
+ const int16x8_t __shift = vdupq_n_s16 (-prescale);
+
+ int16x8_t __input_0_7 = vld1q_s16( input );
+ __input_2_9 = vld1q_s16( input + 2 );
+ int16x8_t __input_8_15 = vld1q_s16( input + 8 );
+
+ __input_0_7 = vaddq_s16( __input_0_7, __prescale_rounding );
+ __input_0_7 = vshlq_s16( __input_0_7, __shift );
+
+ __input_8_15 = vaddq_s16( __input_8_15, __prescale_rounding );
+ __input_8_15 = vshlq_s16( __input_8_15, __shift );
+
+ __pairwise_sum_0_7 = vpaddlq_s16(__input_0_7);
+ int32x4_t __pairwise_sum_8_15 = vpaddlq_s16(__input_8_15);
+
+ __input_0_7 = vbslq_s16(mask, vnegq_s16(__input_0_7), __input_0_7);
+ __input_8_15 = vbslq_s16(mask, vnegq_s16(__input_8_15), __input_8_15);
+ __diff = vextq_s32(vpaddlq_s16( __input_0_7 ), vpaddlq_s16( __input_8_15 ), 1);
+
+ __highpass = vcombine_s32( vget_high_s32(__pairwise_sum_0_7), vget_low_s32(__pairwise_sum_8_15) );
+ }
+
+ // High pass band
+ {
+ const int32x4_t __rounding = vdupq_n_s32(rounding);
+
+ __highpass = vsubq_s32( __highpass, __pairwise_sum_0_7 );
+ __highpass = vaddq_s32( __highpass, __rounding );
+ __highpass = vshrq_n_s32( __highpass, 3 );
+ __highpass = vqaddq_s32( __highpass, __diff ); // Dont need to clamp because we are using saturating instruction
+
+ vst1_s16(highpass, vmovn_s32(__highpass) );
+ }
+
+ // Low pass band
+ {
+ const int32x4_t __prescale_rounding = vdupq_n_s32(prescale_rounding);
+ const int32x4_t __shift = vdupq_n_s32(-prescale);
+
+ int32x4_t __pairwise_sum_2_9 = vpaddlq_s16(__input_2_9);
+
+ __pairwise_sum_2_9 = vaddq_s32(__pairwise_sum_2_9, __prescale_rounding);
+ __pairwise_sum_2_9 = vshlq_s32(__pairwise_sum_2_9, __shift);
+
+ vst1_s16(lowpass, vmovn_s32(__pairwise_sum_2_9) );
+ }
+}
+
+#define HorizontalFilter_Prescale0_4x HorizontalFilter_Prescale0_4x_NEON_
+void HorizontalFilter_Prescale0_4x_NEON_(PIXEL *input, PIXEL* lowpass, PIXEL* highpass )
+{
+ int32x4_t __pairwise_sum_0_7, __highpass;
+ int32x4_t __diff;
+ int16x8_t __input_2_9;
+
+ {
+ int16x8_t __input_0_7 = vld1q_s16( input );
+ __input_2_9 = vld1q_s16( input + 2 );
+ int16x8_t __input_8_15 = vld1q_s16( input + 8 );
+
+ __pairwise_sum_0_7 = vpaddlq_s16(__input_0_7);
+ int32x4_t __pairwise_sum_8_15 = vpaddlq_s16(__input_8_15);
+
+ __input_0_7 = vbslq_s16(mask, vnegq_s16(__input_0_7), __input_0_7);
+ __input_8_15 = vbslq_s16(mask, vnegq_s16(__input_8_15), __input_8_15);
+ __diff = vextq_s32(vpaddlq_s16( __input_0_7 ), vpaddlq_s16( __input_8_15 ), 1);
+
+ __highpass = vcombine_s32( vget_high_s32(__pairwise_sum_0_7), vget_low_s32(__pairwise_sum_8_15) );
+ }
+
+ // High pass band
+ {
+ const int32x4_t __rounding = vdupq_n_s32(rounding);
+
+ __highpass = vsubq_s32( __highpass, __pairwise_sum_0_7 );
+ __highpass = vaddq_s32( __highpass, __rounding );
+ __highpass = vshrq_n_s32( __highpass, 3 );
+ __highpass = vqaddq_s32( __highpass, __diff ); // Dont need to clamp because we are using saturating instruction
+
+ vst1_s16(highpass, vmovn_s32(__highpass) );
+ }
+
+ // Low pass band
+ {
+ int32x4_t __pairwise_sum_2_9 = vpaddlq_s16(__input_2_9);
+
+ vst1_s16(lowpass, vmovn_s32(__pairwise_sum_2_9) );
+ }
+}
+
+void QuantizeBand_8x_NEON_(int16_t* wavelet_band, int16_t midpoint, int32_t multiplier, PIXEL *output )
+{
+ int16x8_t __wavelet_band = vld1q_s16( wavelet_band );
+
+ int16x8_t __wavelet_band_abs = vaddq_s16( vabsq_s16(__wavelet_band), vdupq_n_s16( midpoint ) );
+
+ int32x4_t __multipliers = vdupq_n_s32( multiplier );
+
+ int32x4_t __value_high = vmovl_s16( vget_high_s16(__wavelet_band_abs) );
+ __value_high = vmulq_s32( __value_high, __multipliers );
+
+ int32x4_t __value_low = vmovl_s16( vget_low_s16(__wavelet_band_abs) );
+ __value_low = vmulq_s32( __value_low, __multipliers );
+
+ int16x8_t __multiplied = vcombine_s16( vshrn_n_s32( __value_low, 16 ), vshrn_n_s32( __value_high, 16 ) );
+
+ uint16x8_t mask = vcltq_s16(__wavelet_band, vdupq_n_s16(0) );
+ int16x8_t __neg_output = vnegq_s16(__multiplied);
+
+ int16x8_t __result = vbslq_s16( mask, __neg_output, __multiplied );
+
+ vst1q_s16(output, __result);
+}
+
+void FilterVerticalMiddle_Core_8x_NEON_(PIXEL *coefficients[], int column, int16_t* highpass, int16_t* lowpass )
+{
+ int16x8_t __highpass, __highpass_50, __highpass_14;
+
+ {
+ int16x8_t __row_0 = vld1q_s16( &coefficients[0][column] );
+ int16x8_t __row_5 = vld1q_s16( &coefficients[5][column] );
+
+ __highpass_50 = vsubq_s16( __row_5, __row_0 );
+ }
+
+ {
+ int16x8_t __row_1 = vld1q_s16( &coefficients[1][column] );
+ int16x8_t __row_4 = vld1q_s16( &coefficients[4][column] );
+
+ __highpass_14 = vsubq_s16( __row_4, __row_1 );
+ }
+
+ {
+ int16x8_t __rounding = vdupq_n_s16 (rounding);
+
+ __highpass = vaddq_s16( __highpass_50, __highpass_14 );
+ __highpass = vaddq_s16( __highpass, __rounding );
+ __highpass = vshrq_n_s16(__highpass, 3);
+ }
+
+ {
+ int16x8_t __row_2 = vld1q_s16( &coefficients[2][column] );
+ int16x8_t __row_3 = vld1q_s16( &coefficients[3][column] );
+
+ int16x8_t __diff_23 = vsubq_s16( __row_2, __row_3 );
+ int16x8_t __sum_23 = vaddq_s16( __row_2, __row_3 );
+
+ __highpass = vaddq_s16( __highpass, __diff_23 );
+
+ vst1q_s16(lowpass, __sum_23);
+ }
+
+ vst1q_s16(highpass, __highpass);
+
+}
+
+#define FilterVerticalMiddle_8x FilterVerticalMiddle_8x_NEON_
+void FilterVerticalMiddle_8x_NEON_(PIXEL *lowpass[], PIXEL *highpass[], int column, int32_t* midpoints, int32_t* multipliers, PIXEL *result[])
+{
+ int16_t LOW[8];
+ int16_t HIGH[8];
+
+ FilterVerticalMiddle_Core_8x_NEON_( highpass, column, HIGH, LOW);
+ QuantizeBand_8x_NEON_( LOW, midpoints[LH_BAND], multipliers[LH_BAND], result[LH_BAND] + column );
+ QuantizeBand_8x_NEON_( HIGH, midpoints[HH_BAND], multipliers[HH_BAND], result[HH_BAND] + column );
+
+ FilterVerticalMiddle_Core_8x_NEON_( lowpass, column, HIGH, LOW);
+ QuantizeBand_8x_NEON_( LOW, midpoints[LL_BAND], multipliers[LL_BAND], result[LL_BAND] + column );
+ QuantizeBand_8x_NEON_( HIGH, midpoints[HL_BAND], multipliers[HL_BAND], result[HL_BAND] + column );
+}
+
+#define FilterVerticalTopBottom_8x FilterVerticalTopBottom_8x_NEON_
+void FilterVerticalTopBottom_8x_NEON_(PIXEL *lowpass[], PIXEL *highpass[], int column, int32_t* midpoints, int32_t* multipliers, PIXEL *result[], bool top )
+{
+ int16_t LOW[8];
+ int16_t HIGH[8];
+
+ FilterVerticalTopBottom_Core_8x_C_( highpass, column, HIGH, LOW, top );
+ QuantizeBand_8x_NEON_( LOW, midpoints[LH_BAND], multipliers[LH_BAND], result[LH_BAND] + column );
+ QuantizeBand_8x_NEON_( HIGH, midpoints[HH_BAND], multipliers[HH_BAND], result[HH_BAND] + column );
+
+ FilterVerticalTopBottom_Core_8x_C_( lowpass, column, HIGH, LOW, top );
+ QuantizeBand_8x_NEON_( LOW, midpoints[LL_BAND], multipliers[LL_BAND], result[LL_BAND] + column );
+ QuantizeBand_8x_NEON_( HIGH, midpoints[HL_BAND], multipliers[HL_BAND], result[HL_BAND] + column );
+}
+
+#else
+
+#define HorizontalFilter_Prescale2_4x HorizontalFilter_Prescale2_4x_C_
+void HorizontalFilter_Prescale2_4x_C_(PIXEL *input, PIXEL* lowpass, PIXEL* highpass )
+{
+ int i;
+ PIXEL input_c[16];
+ for ( i = 0; i < 12; i++)
+ {
+ input_c[i] = (input[i] + 3) >> 2;
+ }
+
+ int32_t diff_23 = input_c[2] - input_c[3];
+ int32_t diff_45 = input_c[4] - input_c[5];
+ int32_t diff_67 = input_c[6] - input_c[7];
+ int32_t diff_89 = input_c[8] - input_c[9];
+
+ int32_t sum_01 = input_c[0] + input_c[1];
+ int32_t sum_23 = input_c[2] + input_c[3];
+ int32_t sum_45 = input_c[4] + input_c[5];
+ int32_t sum_67 = input_c[6] + input_c[7];
+ int32_t sum_89 = input_c[8] + input_c[9];
+ int32_t sum_1011 = input_c[10] + input_c[11];
+
+ {
+ int32_t sum = sum_45 - sum_01;
+
+ sum += rounding;
+ sum = DivideByShift(sum, 3);
+ sum += diff_23;
+
+ highpass[0] = ClampPixel(sum);
+ }
+
+ {
+ int32_t sum = sum_67 - sum_23;
+
+ sum += rounding;
+ sum = DivideByShift(sum, 3);
+ sum += diff_45;
+
+ highpass[1] = ClampPixel(sum);
+ }
+
+ {
+ int32_t sum = sum_89 - sum_45;
+
+ sum += rounding;
+ sum = DivideByShift(sum, 3);
+ sum += diff_67;
+
+ highpass[2] = ClampPixel(sum);
+ }
+
+ {
+ int32_t sum = sum_1011 - sum_67;
+
+ sum += rounding;
+ sum = DivideByShift(sum, 3);
+ sum += diff_89;
+
+ highpass[3] = ClampPixel(sum);
+ }
+
+ lowpass[0] = (input[2] + input[3] + 3) >> 2;
+ lowpass[1] = (input[4] + input[5] + 3) >> 2;
+ lowpass[2] = (input[6] + input[7] + 3) >> 2;
+ lowpass[3] = (input[8] + input[9] + 3) >> 2;
+}
+
+#define HorizontalFilter_Prescale0_4x HorizontalFilter_Prescale0_4x_C_
+void HorizontalFilter_Prescale0_4x_C_(PIXEL *input, PIXEL* lowpass, PIXEL* highpass )
+{
+ PIXEL input_c[16];
+
+ memcpy(input_c, input, sizeof(PIXEL) * 12);
+
+ int32_t diff_23 = input_c[2] - input_c[3];
+ int32_t diff_45 = input_c[4] - input_c[5];
+ int32_t diff_67 = input_c[6] - input_c[7];
+ int32_t diff_89 = input_c[8] - input_c[9];
+
+ int32_t sum_01 = input_c[0] + input_c[1];
+ int32_t sum_23 = input_c[2] + input_c[3];
+ int32_t sum_45 = input_c[4] + input_c[5];
+ int32_t sum_67 = input_c[6] + input_c[7];
+ int32_t sum_89 = input_c[8] + input_c[9];
+ int32_t sum_1011 = input_c[10] + input_c[11];
+
+ {
+ int32_t sum = sum_45 - sum_01;
+
+ sum += rounding;
+ sum = DivideByShift(sum, 3);
+ sum += diff_23;
+
+ highpass[0] = ClampPixel(sum);
+ }
+
+ {
+ int32_t sum = sum_67 - sum_23;
+
+ sum += rounding;
+ sum = DivideByShift(sum, 3);
+ sum += diff_45;
+
+ highpass[1] = ClampPixel(sum);
+ }
+
+ {
+ int32_t sum = sum_89 - sum_45;
+
+ sum += rounding;
+ sum = DivideByShift(sum, 3);
+ sum += diff_67;
+
+ highpass[2] = ClampPixel(sum);
+ }
+
+ {
+ int32_t sum = sum_1011 - sum_67;
+
+ sum += rounding;
+ sum = DivideByShift(sum, 3);
+ sum += diff_89;
+
+ highpass[3] = ClampPixel(sum);
+ }
+
+ lowpass[0] = input[2] + input[3];
+ lowpass[1] = input[4] + input[5];
+ lowpass[2] = input[6] + input[7];
+ lowpass[3] = input[8] + input[9];
+}
+
+void FilterVerticalMiddle_Core_8x_C_(PIXEL *coefficients[], int column, int16_t* highpass, int16_t* lowpass )
+{
+ PIXEL row_0[8];
+ PIXEL row_1[8];
+ PIXEL row_2[8];
+ PIXEL row_3[8];
+ PIXEL row_4[8];
+ PIXEL row_5[8];
+
+ int i;
+
+ memcpy( row_0, &coefficients[0][column], sizeof(PIXEL) * 8 );
+ memcpy( row_1, &coefficients[1][column], sizeof(PIXEL) * 8 );
+ memcpy( row_2, &coefficients[2][column], sizeof(PIXEL) * 8 );
+ memcpy( row_3, &coefficients[3][column], sizeof(PIXEL) * 8 );
+ memcpy( row_4, &coefficients[4][column], sizeof(PIXEL) * 8 );
+ memcpy( row_5, &coefficients[5][column], sizeof(PIXEL) * 8 );
+
+ for (i = 0; i < 8; i++)
+ highpass[i] = (-1 * row_0[i] + row_5[i]);
+
+ for (i = 0; i < 8; i++)
+ highpass[i] += (-1 * row_1[i] + row_4[i]);
+
+ for (i = 0; i < 8; i++)
+ highpass[i] += rounding;
+
+ for (i = 0; i < 8; i++)
+ highpass[i] = DivideByShift(highpass[i], 3);
+
+ for (i = 0; i < 8; i++)
+ {
+ highpass[i] += (row_2[i] - row_3[i]);
+ lowpass[i] = (row_2[i] + row_3[i]);
+ }
+}
+
+
+#define FilterVerticalMiddle_8x FilterVerticalMiddle_8x_C_
+void FilterVerticalMiddle_8x_C_(PIXEL *lowpass[], PIXEL *highpass[], int column, int32_t* midpoints, int32_t* multipliers, PIXEL *result[])
+{
+ int i;
+
+ int16_t LL[8];
+ int16_t HL[8];
+ int16_t LH[8];
+ int16_t HH[8];
+
+ FilterVerticalMiddle_Core_8x_C_( highpass, column, HH, LH);
+ FilterVerticalMiddle_Core_8x_C_( lowpass, column, HL, LL);
+
+ for (i = 0; i < 8; i++)
+ {
+ result[LL_BAND][column + i] = QuantizeValue( LL[i], midpoints[LL_BAND], multipliers[LL_BAND] );
+ result[LH_BAND][column + i] = QuantizeValue( LH[i], midpoints[LH_BAND], multipliers[LH_BAND] );
+ result[HL_BAND][column + i] = QuantizeValue( HL[i], midpoints[HL_BAND], multipliers[HL_BAND] );
+ result[HH_BAND][column + i] = QuantizeValue( HH[i], midpoints[HH_BAND], multipliers[HH_BAND] );
+ }
+}
+
+#define FilterVerticalTopBottom_8x FilterVerticalTopBottom_8x_C_
+void FilterVerticalTopBottom_8x_C_(PIXEL *lowpass[], PIXEL *highpass[], int column, int32_t* midpoints, int32_t* multipliers, PIXEL *result[], bool top )
+{
+ int i;
+
+ int16_t LL[8];
+ int16_t HL[8];
+ int16_t LH[8];
+ int16_t HH[8];
+
+ FilterVerticalTopBottom_Core_8x_C_( highpass, column, HH, LH, top );
+ FilterVerticalTopBottom_Core_8x_C_( lowpass, column, HL, LL, top );
+
+ for (i = 0; i < 8; i++)
+ {
+ result[LL_BAND][column + i] = QuantizeValue( LL[i], midpoints[LL_BAND], multipliers[LL_BAND] );
+ result[LH_BAND][column + i] = QuantizeValue( LH[i], midpoints[LH_BAND], multipliers[LH_BAND] );
+ result[HL_BAND][column + i] = QuantizeValue( HL[i], midpoints[HL_BAND], multipliers[HL_BAND] );
+ result[HH_BAND][column + i] = QuantizeValue( HH[i], midpoints[HH_BAND], multipliers[HH_BAND] );
+ }
+}
+
+#endif
+
+static PIXEL HorizontalHighPassFilter_Middle(PIXEL *input, int prescale_rounding, int prescale)
+{
+ int32_t sum;
+
+ if( prescale == 0 )
+ {
+ sum = -input[0] - input[1] + (input[2] << 3) - (input[3] << 3) + input[4] + input[5];
+ }
+ else
+ {
+ sum = 0;
+ sum -= (input[0] + prescale_rounding) >> prescale;
+ sum -= (input[1] + prescale_rounding) >> prescale;
+ sum += ((input[2] + prescale_rounding) >> prescale) << 3;
+ sum -= ((input[3] + prescale_rounding) >> prescale) << 3;
+ sum += (input[4] + prescale_rounding) >> prescale;
+ sum += (input[5] + prescale_rounding) >> prescale;
+ }
+
+ sum += rounding;
+ sum = DivideByShift(sum, 3);
+ return ClampPixel(sum);
+}
+
+static PIXEL HorizontalHighPassFilter(PIXEL *input, PIXEL *multipliers, int prescale_rounding, int prescale)
+{
+ int i;
+ int32_t sum = 0;
+
+ if( prescale == 0 )
+ {
+ for (i = 0; i < 6; i++)
+ {
+ sum += multipliers[i] * input[i];
+ }
+
+ }
+ else
+ {
+ for (i = 0; i < 6; i++)
+ {
+ sum += multipliers[i] * ( (input[i] + prescale_rounding) >> prescale);
+ }
+ }
+
+ sum += rounding;
+ sum = DivideByShift(sum, 3);
+ return ClampPixel(sum);
+}
+
+/*!
+ @brief Apply the horizontal wavelet filter to a row of pixels
+*/
+CODEC_ERROR FilterHorizontalRow(PIXEL *input, PIXEL *lowpass, PIXEL *highpass, int width, int prescale)
+{
+ int column = 2;
+
+ //uint16_t *input = (uint16_t *)input_buffer;
+
+ //TODO: Check that the rounding is correct for all prescale values
+ int prescale_rounding = (1 << prescale) - 1;
+
+ const int last_input_column = ((width % 2) == 0) ? width - 2 : width - 1;
+
+ int last_input_column_tight = ( (last_input_column - 4) / 8) * 8;
+
+ //TODO Test this routine with other prescale values
+ assert(prescale == 0 || prescale == 2);
+
+ /***** Process the left border using the formula for boundary conditions *****/
+
+ // Compute the lowpass coefficient
+ lowpass[0] = (input[0] + input[1] + prescale_rounding) >> prescale;
+
+ {
+ PIXEL coefficients[6] = { 5, -11, 4, 4, -1, -1 };
+ highpass[0] = HorizontalHighPassFilter(input, coefficients, prescale_rounding, prescale );
+ }
+
+ if( prescale == 2 )
+ {
+ /***** Process the internal pixels using the normal wavelet formula *****/
+ for (; column < last_input_column_tight; column += 8) //
+ {
+ // Column index should always be divisible by two
+ assert((column % 2) == 0);
+
+ HorizontalFilter_Prescale2_4x( input + column - 2, &lowpass[column/2], &highpass[column/2] );
+ }
+ }
+ else if( prescale == 0 )
+ {
+ /***** Process the internal pixels using the normal wavelet formula *****/
+ for (; column < last_input_column_tight; column += 8) //
+ {
+ // Column index should always be divisible by two
+ assert((column % 2) == 0);
+
+ HorizontalFilter_Prescale0_4x( input + column - 2, &lowpass[column/2], &highpass[column/2] );
+ }
+ }
+ else
+ {
+ assert(0);
+ }
+
+ for (; column < last_input_column; column += 2)
+ {
+ // Column index should always be divisible by two
+ assert((column % 2) == 0);
+
+ // Compute the lowpass coefficient
+ lowpass[column/2] = (input[column + 0] + input[column + 1] + prescale_rounding) >> prescale;
+
+ // Initialize the sum for computing the highpass coefficient
+ if ((column + 3) < width)
+ {
+ highpass[column/2] = HorizontalHighPassFilter_Middle(input + column - 2, prescale_rounding, prescale );
+ }
+ else
+ {
+ int32_t sum = 0;
+
+ sum -= (input[column - 2] + prescale_rounding) >> prescale;
+ sum -= (input[column - 1] + prescale_rounding) >> prescale;
+ sum += (input[column + 2] + prescale_rounding) >> prescale;
+
+ // Duplicate the value in the last column
+ sum += (input[column + 2] + prescale_rounding) >> prescale;
+ sum += rounding;
+ sum = DivideByShift(sum, 3);
+ sum += (input[column + 0] + prescale_rounding) >> prescale;
+ sum -= (input[column + 1] + prescale_rounding) >> prescale;
+ highpass[column/2] = ClampPixel(sum);
+ }
+ }
+
+ // Should have exited the loop at the last column
+ assert(column == last_input_column);
+
+ /***** Process the right border using the formula for boundary conditions *****/
+
+ // Compute the lowpass coefficient
+ if ((column + 1) < width)
+ {
+ PIXEL coefficients[6] = { 1, 1, -4, -4, 11, -5 };
+ highpass[column/2] = HorizontalHighPassFilter(input + column - 4, coefficients, prescale_rounding, prescale );
+
+ // Use the value in the last column
+ lowpass[column/2] = (input[column + 0] + input[column + 1] + prescale_rounding) >> prescale;
+ }
+ else
+ {
+ int32_t sum = 0;
+ // Duplicate the value in the last column
+ sum -= 5 * ((input[column + 0] + prescale_rounding) >> prescale);
+
+ sum += 11 * ((input[column + 0] + prescale_rounding) >> prescale);
+ sum -= 4 * ((input[column - 1] + prescale_rounding) >> prescale);
+ sum -= 4 * ((input[column - 2] + prescale_rounding) >> prescale);
+ sum += 1 * ((input[column - 3] + prescale_rounding) >> prescale);
+ sum += 1 * ((input[column - 4] + prescale_rounding) >> prescale);
+ sum += rounding;
+ sum = DivideByShift(sum, 3);
+
+ highpass[column/2] = ClampPixel(sum);
+
+ // Duplicate the value in the last column
+ lowpass[column/2] = (input[column + 0] + input[column + 0] + prescale_rounding) >> prescale;
+ }
+
+ return CODEC_ERROR_OKAY;
+}
+
+static void FilterVerticalMiddle_1x(PIXEL *lowpass[], PIXEL *highpass[], int column, int32_t* midpoints, int32_t* multipliers, PIXEL *result[])
+{
+ {
+ const PIXEL coefficients_01 = lowpass[0][column] + lowpass[1][column];
+ const PIXEL coefficients_2 = lowpass[2][column];
+ const PIXEL coefficients_3 = lowpass[3][column];
+ const PIXEL coefficients_45 = lowpass[4][column] + lowpass[5][column];
+
+ int32_t sum = coefficients_45 - coefficients_01;
+ sum += 8 * (coefficients_2 - coefficients_3);
+ sum = DivideByShift(sum + rounding, 3);
+
+ result[LL_BAND][column] = QuantizeValue( coefficients_2 + coefficients_3, midpoints[LL_BAND], multipliers[LL_BAND] );
+ result[HL_BAND][column] = QuantizeValue( sum, midpoints[HL_BAND], multipliers[HL_BAND] );
+ }
+
+ {
+ const PIXEL coefficients_01 = highpass[0][column] + highpass[1][column];
+ const PIXEL coefficients_2 = highpass[2][column];
+ const PIXEL coefficients_3 = highpass[3][column];
+ const PIXEL coefficients_45 = highpass[4][column] + highpass[5][column];
+
+ int32_t sum = coefficients_45 - coefficients_01;
+ sum += 8 * (coefficients_2 - coefficients_3);
+ sum = DivideByShift(sum + rounding, 3);
+
+ result[LH_BAND][column] = QuantizeValue( coefficients_2 + coefficients_3, midpoints[LH_BAND], multipliers[LH_BAND] );
+ result[HH_BAND][column] = QuantizeValue( sum, midpoints[HH_BAND], multipliers[HH_BAND] );
+ }
+}
+
+static void FilterVerticalTopBottom_1x(PIXEL *lowpass[], PIXEL *highpass[], int column, int32_t* midpoints, int32_t* multipliers, PIXEL *result[], bool top )
+{
+ const int filter_coeffs_top[] = { 5, -11, 4, 4, -1, -1 };
+ const int filter_coeffs_bottom[] = { 1, 1, -4, -4, 11, -5 };
+
+ int low_band_index = top ? 0 : 4;
+ const int* filter_coeffs = top ? filter_coeffs_top : filter_coeffs_bottom;
+
+ int i;
+ int32_t sum_L = 0;
+ int32_t sum_H = 0;
+
+ // Apply the lowpass vertical filter to the lowpass horizontal results
+ result[LL_BAND][column] = QuantizeValue( lowpass[low_band_index + 0][column] + lowpass[low_band_index + 1][column], midpoints[LL_BAND], multipliers[LL_BAND] );
+
+ // Apply the lowpass vertical filter to the highpass horizontal results
+ result[LH_BAND][column] = QuantizeValue( highpass[low_band_index + 0][column] + highpass[low_band_index + 1][column], midpoints[LH_BAND], multipliers[LH_BAND] );
+
+ for (i = 0; i < 6; i++)
+ {
+ sum_L += filter_coeffs[i] * lowpass[i][column];
+ sum_H += filter_coeffs[i] * highpass[i][column];
+ }
+
+ sum_L += rounding;
+ sum_L = DivideByShift(sum_L, 3);
+ result[HL_BAND][column] = QuantizeValue( sum_L, midpoints[HL_BAND], multipliers[HL_BAND] );
+
+ sum_H += rounding;
+ sum_H = DivideByShift(sum_H, 3);
+ result[HH_BAND][column] = QuantizeValue( sum_H, midpoints[HH_BAND], multipliers[HH_BAND] );
+}
+
+/*!
+ @brief Apply the vertical wavelet filter to the first row
+
+ This routine uses the wavelet formulas for the top row of an image
+
+ The midpoint prequant argument is not the offset that is added to the
+ value prior to quantization. It is a setting indicating which midpoint
+ offset to use.
+
+ @todo Change the midpoint_prequant argument to midpoint_setting?
+*/
+CODEC_ERROR FilterVerticalTopRow(PIXEL **lowpass, PIXEL **highpass, PIXEL **output, int wavelet_width, int wavelet_pitch, int32_t midpoints[], int32_t multipliers[], int input_row )
+{
+ int column = 0;
+ const int wavelet_width_m8 = (wavelet_width / 8) * 8;
+
+ assert(input_row == 0);
+
+ for (; column < wavelet_width_m8; column += 8)
+ {
+ FilterVerticalTopBottom_8x( lowpass, highpass, column, midpoints, multipliers, output, true );
+
+ assert(output[LL_BAND][column] >= 0);
+ }
+
+ for (; column < wavelet_width; column++)
+ {
+ FilterVerticalTopBottom_1x( lowpass, highpass, column, midpoints, multipliers, output, true );
+
+ assert(output[LL_BAND][column] >= 0);
+ }
+
+ return CODEC_ERROR_OKAY;
+}
+
+
+CODEC_ERROR FilterVerticalBottomRow(PIXEL **lowpass, PIXEL **highpass, PIXEL **output, int wavelet_width, int wavelet_pitch, int32_t midpoints[], int32_t multipliers[], int input_row )
+{
+ PIXEL *result[MAX_BAND_COUNT];
+ int column = 0;
+ const int wavelet_width_m8 = (wavelet_width / 8) * 8;
+
+ int band;
+
+ //uint16_t **lowpass = (uint16_t **)lowpass_buffer;
+
+ int output_row = input_row / 2;
+
+ // Compute the address of each output row
+ for (band = 0; band < MAX_BAND_COUNT; band++)
+ {
+ uint8_t *band_row_ptr = (uint8_t *)output[band];
+ band_row_ptr += output_row * wavelet_pitch;
+ result[band] = (PIXEL *)band_row_ptr;
+ }
+
+ for (; column < wavelet_width_m8; column += 8)
+ {
+ FilterVerticalTopBottom_8x( lowpass, highpass, column, midpoints, multipliers, result, false );
+
+ assert(result[LL_BAND][column] >= 0);
+ }
+
+ for (; column < wavelet_width; column++)
+ {
+ FilterVerticalTopBottom_1x( lowpass, highpass, column, midpoints, multipliers, result, false );
+
+ assert(result[LL_BAND][column] >= 0);
+ }
+
+ return CODEC_ERROR_OKAY;
+}
+
+/*!
+ @brief Apply the vertical wavelet filter to a middle row
+
+ This routine uses the wavelet formulas for the middle rows of an image
+*/
+CODEC_ERROR FilterVerticalMiddleRow(PIXEL **lowpass, PIXEL **highpass, PIXEL **output, int wavelet_width, int wavelet_pitch, int32_t midpoints[], int32_t multipliers[], int input_row )
+{
+ PIXEL *result[MAX_BAND_COUNT];
+
+ int column = 0;
+ int band;
+
+ const int wavelet_width_m8 = (wavelet_width / 8) * 8;
+
+ //uint16_t **lowpass = (uint16_t **)lowpass_buffer;
+
+ int output_row = input_row / 2;
+
+ // Compute the address of each output row
+ for (band = 0; band < MAX_BAND_COUNT; band++)
+ {
+ uint8_t *band_row_ptr = (uint8_t *)output[band];
+ band_row_ptr += output_row * wavelet_pitch;
+ result[band] = (PIXEL *)band_row_ptr;
+ }
+
+ for (; column < wavelet_width_m8; column += 8)
+ {
+ FilterVerticalMiddle_8x(lowpass, highpass, column, midpoints, multipliers, result);
+ }
+
+ for (; column < wavelet_width; column += 1)
+ {
+ FilterVerticalMiddle_1x(lowpass, highpass, column, midpoints, multipliers, result);
+
+ assert(result[LL_BAND][column] >= 0);
+ }
+
+ return CODEC_ERROR_OKAY;
+}
+
diff --git a/gpr/source/lib/vc5_encoder/forward.h b/gpr/source/lib/vc5_encoder/forward.h
new file mode 100755
index 0000000..74e2369
--- /dev/null
+++ b/gpr/source/lib/vc5_encoder/forward.h
@@ -0,0 +1,38 @@
+/*! @file forward.h
+ *
+ * @brief Declare of the forward wavelet transform functions.
+ *
+ * (C) Copyright 2018 GoPro Inc (http://gopro.com/).
+ *
+ * Licensed under either:
+ * - Apache License, Version 2.0, http://www.apache.org/licenses/LICENSE-2.0
+ * - MIT license, http://opensource.org/licenses/MIT
+ * at your option.
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FORWARD_H
+#define FORWARD_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+ CODEC_ERROR FilterHorizontalRow(PIXEL *input, PIXEL *lowpass, PIXEL *highpass, int width, int prescale);
+
+ CODEC_ERROR FilterVerticalTopRow(PIXEL **lowpass, PIXEL **highpass, PIXEL **output, int wavelet_width, int wavelet_pitch, int32_t midpoints[], int32_t multipliers[], int input_row );
+
+ CODEC_ERROR FilterVerticalMiddleRow(PIXEL **lowpass, PIXEL **highpass, PIXEL **output, int wavelet_width, int wavelet_pitch, int32_t midpoints[], int32_t multipliers[], int input_row );
+
+ CODEC_ERROR FilterVerticalBottomRow(PIXEL **lowpass, PIXEL **highpass, PIXEL **output, int wavelet_width, int wavelet_pitch, int32_t midpoints[], int32_t multipliers[], int input_row );
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // FORWARD_H
diff --git a/gpr/source/lib/vc5_encoder/headers.h b/gpr/source/lib/vc5_encoder/headers.h
new file mode 100755
index 0000000..bdf8ad9
--- /dev/null
+++ b/gpr/source/lib/vc5_encoder/headers.h
@@ -0,0 +1,50 @@
+/*! @file headers.h
+ *
+ * @brief This file includes all of the header files that are used by the encoder.
+ *
+ * Note that some header files are only used by the main program that
+ * calls the codec or are only used for debugging are not included by this file.
+ * Only headers that are part of the reference encoder are included by this file.
+ *
+ * Including a single header file in all reference encoder source files
+ * ensures that all modules see the same header files in the same order.
+ *
+ * This file can be used for creating a pre-compiled header if the
+ * compiler supports that capabilities.
+ *
+ * (C) Copyright 2018 GoPro Inc (http://gopro.com/).
+ *
+ * Licensed under either:
+ * - Apache License, Version 2.0, http://www.apache.org/licenses/LICENSE-2.0
+ * - MIT license, http://opensource.org/licenses/MIT
+ * at your option.
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HEADERS_H
+#define HEADERS_H
+
+#include "common.h"
+
+#include "vlc.h"
+#include "bitstream.h"
+#include "raw.h"
+#include "codebooks.h"
+#include "component.h"
+#include "syntax.h"
+#include "forward.h"
+
+#if VC5_ENABLED_PART(VC5_PART_SECTIONS)
+#include "sections.h"
+#endif
+
+#include "parameters.h"
+#include "encoder.h"
+#include "vc5_encoder.h"
+
+#endif // HEADERS_H
diff --git a/gpr/source/lib/vc5_encoder/parameters.c b/gpr/source/lib/vc5_encoder/parameters.c
new file mode 100755
index 0000000..2158a71
--- /dev/null
+++ b/gpr/source/lib/vc5_encoder/parameters.c
@@ -0,0 +1,86 @@
+/*! @file parameters.c
+ *
+ * @brief Implementation of the data structure used to pass parameters
+ * to the encoder.
+ *
+ * The parameters data structure is currently a simple struct, but
+ * fields may be added, removed, or replaced. A version number is
+ * included in the parameters data structure to allow decoders to
+ * adapt to changes.
+
+ * It is contemplated that future inplementations may use a dictionary
+ * of key-value pairs which would allow the decoder to determine whether
+ * a parameter is present.
+ *
+ * (C) Copyright 2018 GoPro Inc (http://gopro.com/).
+ *
+ * Licensed under either:
+ * - Apache License, Version 2.0, http://www.apache.org/licenses/LICENSE-2.0
+ * - MIT license, http://opensource.org/licenses/MIT
+ * at your option.
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "headers.h"
+
+//! Current version number of the parameters data structure
+#define PARAMETERS_VERSION 1
+
+/*!
+ @brief Initialize the parameters data structure
+
+ The version number of the parameters data structure must be
+ incremented whenever a change is made to the definition of
+ the parameters data structure.
+
+ @todo Special initialization required by the metadata?
+*/
+CODEC_ERROR InitEncoderParameters(ENCODER_PARAMETERS *parameters)
+{
+ memset(parameters, 0, sizeof(ENCODER_PARAMETERS));
+ parameters->version = PARAMETERS_VERSION;
+
+ // Set the default value for the number of bits per lowpass coefficient
+ parameters->encoded.lowpass_precision = 16;
+
+ parameters->input.width = 4000;
+ parameters->input.height = 3000;
+
+#if VC5_ENABLED_PART(VC5_PART_IMAGE_FORMATS)
+ // The maximum number of bits per component is the internal precision
+ //parameters->max_bits_per_component = internal_precision;
+#endif
+
+#if VC5_ENABLED_PART(VC5_PART_SECTIONS)
+ parameters->enabled_sections = VC5_ENABLED_SECTIONS;
+#endif
+
+ // The elementary bitstream is always enabled
+ parameters->enabled_parts = VC5_ENABLED_PARTS;
+
+#if VC5_ENABLED_PART(VC5_PART_LAYERS)
+ parameters->layer_count = 1;
+ parameters->progressive = 1;
+#endif
+
+ // Initialize the quantization table using the default values
+ {
+ // Initialize to CineForm Filmscan-2
+ QUANT quant_table[] = {1, 24, 24, 12, 24, 24, 12, 32, 32, 48};
+ memcpy(parameters->quant_table, quant_table, sizeof(parameters->quant_table));
+ }
+
+ parameters->verbose_flag = false;
+
+ gpr_rgb_gain_set_defaults(&parameters->rgb_gain);
+
+ parameters->rgb_resolution = VC5_ENCODER_RGB_RESOLUTION_DEFAULT;
+
+ return CODEC_ERROR_OKAY;
+}
+
diff --git a/gpr/source/lib/vc5_encoder/parameters.h b/gpr/source/lib/vc5_encoder/parameters.h
new file mode 100755
index 0000000..b2cac04
--- /dev/null
+++ b/gpr/source/lib/vc5_encoder/parameters.h
@@ -0,0 +1,139 @@
+/*! @file parameters.h
+ *
+ * @brief Declare a data structure for holding a table of parameters that is passed
+ * to the encoder during initialization.
+ *
+ * (C) Copyright 2018 GoPro Inc (http://gopro.com/).
+ *
+ * Licensed under either:
+ * - Apache License, Version 2.0, http://www.apache.org/licenses/LICENSE-2.0
+ * - MIT license, http://opensource.org/licenses/MIT
+ * at your option.
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef PARAMETERS_H
+#define PARAMETERS_H
+
+#include "vc5_encoder.h"
+
+/*!
+ @brief Function prototype for a decompositor
+
+ A decompositor is the opposite of an image composition operator:
+ It decomposes a frame into one or more frames.
+
+ For example, an interlaced frame can be decomposed into fields or two frames
+ arranged side-by-side within a single frame can be decomposed into individual
+ frames.
+
+ Each layer in an encoded sample may correspond to a separate input frame.
+ For convenience, the reference codec stores the input to the encoder in a
+ separate file with one frame per file if the encoded sample has a single layer.
+ To allow the reference encoder to store all of the input frames that are
+ encoded as separate layers in an encoded sample in a single file, multiple
+ frames are stored in the file (often using over-under frame packing). The
+ decomposer unpacks multiple frames in a single frame into individual frames
+ for encoding with one frame per layer.
+*/
+
+typedef CODEC_ERROR (* DECOMPOSITOR)(IMAGE *packed_image, IMAGE *image_array[], int frame_count);
+
+/*!
+ @brief Declaration of a data structure for passing parameters to the encoder
+
+ The encoded dimensions are the width and height of the planes of pixels as
+ represented internally in the encoded sample. In the case where the planes
+ have different dimensions (for example YUV with 4:2:2 sampling), the first
+ encoded plane (corresponding to the luma plane, for example) is reported.
+*/
+typedef struct _encoder_parameters
+{
+ uint32_t version; //!< Version number for this definition of the parameters
+
+ // BAYER_ORDERING bayer_ordering;
+
+ ENABLED_PARTS enabled_parts; //!< Parts of the VC-5 standard that are enabled
+
+ //! Data structure for the input frame dimensions and format
+ struct _input_parameters
+ {
+ DIMENSION width; //!< Width of the frames input to the encoder
+ DIMENSION height; //!< Height of the frames input to the encoder
+ PIXEL_FORMAT format; //!< Pixel format of the input frames
+ PRECISION precision; //!< Bits per component in the input image
+
+ } input; //!< Dimensions and format of the input frame
+
+ //! Data structure for the encoded representation of the image
+ struct _encoded_parameters
+ {
+#if VC5_ENABLED_PART(VC5_PART_IMAGE_FORMATS)
+ DIMENSION width; //!< Width of the encoded frame
+ DIMENSION height; //!< Height of the encoded frame
+ IMAGE_FORMAT format; //!< Internal format of the encoded image
+ PRECISION precision; //!< Encoded precision of the image after scaling
+#endif
+ //! Number of bits used to encode lowpass coefficients
+ PRECISION lowpass_precision;
+
+ } encoded; //!< Encoded frame dimensions and the encoded format
+
+ //! Array of quantization values indexed by the subband number
+ QUANT quant_table[MAX_SUBBAND_COUNT];
+
+#if VC5_ENABLED_PART(VC5_PART_METADATA)
+ //! Metadata that controls decoding (currently not used)
+ METADATA metadata;
+#endif
+#if VC5_ENABLED_PART(VC5_PART_LAYERS)
+ int layer_count;
+ int progressive;
+ DECOMPOSITOR decompositor;
+#endif
+
+#if VC5_ENABLED_PART(VC5_PART_SECTIONS)
+ ENABLED_SECTIONS enabled_sections;
+#endif
+
+#if VC5_ENABLED_PART(VC5_PART_IMAGE_FORMATS)
+ // Definition of the pattern elements
+ DIMENSION pattern_width;
+ DIMENSION pattern_height;
+ DIMENSION components_per_sample;
+ //PRECISION max_bits_per_component;
+#endif
+
+ //! Table for the order in which channels are encoded into the bitstream
+ CHANNEL channel_order_table[MAX_CHANNEL_COUNT];
+
+ //! Number of entries in the channel order table (may be less than the channel count)
+ int channel_order_count;
+
+ //! Flag that controls verbose output
+ bool verbose_flag;
+
+ gpr_allocator allocator;
+
+ GPR_RGB_RESOLUTION rgb_resolution;
+
+ gpr_rgb_gain rgb_gain;
+
+} ENCODER_PARAMETERS;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+ CODEC_ERROR InitEncoderParameters(ENCODER_PARAMETERS *parameters);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // PARAMETERS_H
diff --git a/gpr/source/lib/vc5_encoder/raw.c b/gpr/source/lib/vc5_encoder/raw.c
new file mode 100755
index 0000000..cc1a302
--- /dev/null
+++ b/gpr/source/lib/vc5_encoder/raw.c
@@ -0,0 +1,621 @@
+/*! @file raw.c
+ *
+ * @brief Implementation of routines for packing RAW image to a row of pixels.
+ *
+ * (C) Copyright 2018 GoPro Inc (http://gopro.com/).
+ *
+ * Licensed under either:
+ * - Apache License, Version 2.0, http://www.apache.org/licenses/LICENSE-2.0
+ * - MIT license, http://opensource.org/licenses/MIT
+ * at your option.
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "headers.h"
+
+#if ENABLED(NEON)
+#include <arm_neon.h>
+#endif
+
+/** ------------------- **/
+/** 14 BIT INPUT FORMAT **/
+/** ------------------- **/
+
+static void UnpackPixel_14(uint16_t *input_row1_ptr, uint16_t *input_row2_ptr, int column, PIXEL *output_buffer[], bool rggb )
+{
+ uint16_t R1, G1, G2, B1;
+ uint16_t GS, GD, RG, BG;
+
+ uint16_t *GS_output_row_ptr = (uint16_t *)output_buffer[0];
+ uint16_t *GD_output_row_ptr = (uint16_t *)output_buffer[3];
+ uint16_t *RG_output_row_ptr = (uint16_t *)output_buffer[1];
+ uint16_t *BG_output_row_ptr = (uint16_t *)output_buffer[2];
+
+ const int internal_precision = 12;
+ const int32_t midpoint = (1 << (internal_precision - 1));
+
+ if( rggb )
+ {
+ R1 = input_row1_ptr[2 * column + 0];
+ G1 = input_row1_ptr[2 * column + 1];
+ G2 = input_row2_ptr[2 * column + 0];
+ B1 = input_row2_ptr[2 * column + 1];
+ }
+ else
+ {
+ G1 = input_row1_ptr[2 * column + 0];
+ B1 = input_row1_ptr[2 * column + 1];
+ R1 = input_row2_ptr[2 * column + 0];
+ G2 = input_row2_ptr[2 * column + 1];
+ }
+
+ // Apply protune log curve
+ R1 = EncoderLogCurve[ R1 >> 2 ];
+ G1 = EncoderLogCurve[ G1 >> 2 ];
+ G2 = EncoderLogCurve[ G2 >> 2 ];
+ B1 = EncoderLogCurve[ B1 >> 2 ];
+
+ // Difference the green components and subtract green from the red and blue components
+ GS = (G1 + G2) >> 1;
+ GD = (G1 - G2 + 2 * midpoint) >> 1;
+ RG = (R1 - GS + 2 * midpoint) >> 1;
+ BG = (B1 - GS + 2 * midpoint) >> 1;
+
+ GS_output_row_ptr[column] = clamp_uint(GS, internal_precision);
+ GD_output_row_ptr[column] = clamp_uint(GD, internal_precision);
+ RG_output_row_ptr[column] = clamp_uint(RG, internal_precision);
+ BG_output_row_ptr[column] = clamp_uint(BG, internal_precision);
+}
+
+#if ENABLED(NEON)
+
+#define UnpackPixel_14_8x UnpackPixel_14_8x_NEON_
+static void UnpackPixel_14_8x_NEON_(uint16_t *input_row1_ptr, uint16_t *input_row2_ptr, int column, PIXEL *output_buffer[], bool rggb )
+{
+ int i;
+ uint16x8x2_t row_1, row_2;
+
+ const int internal_precision = 12;
+ const int32_t midpoint = (1 << (internal_precision - 1));
+
+ // Apply protune log curve
+ {
+ uint16_t input_row1_12b[16];
+ uint16_t input_row2_12b[16];
+
+ for (i = 0; i < 16; ++i)
+ {
+ input_row1_12b[i] = EncoderLogCurve[ input_row1_ptr[2 * column + i] >> 2 ];
+ input_row2_12b[i] = EncoderLogCurve[ input_row2_ptr[2 * column + i] >> 2 ];
+ }
+
+ row_1 = vld2q_u16( input_row1_12b );
+ row_2 = vld2q_u16( input_row2_12b );
+ }
+
+ int16x8_t R1, G1, G2, B1;
+
+ if( rggb )
+ {
+ R1 = vreinterpretq_s16_u16( row_1.val[0] );
+ G1 = vreinterpretq_s16_u16( row_1.val[1] );
+ G2 = vreinterpretq_s16_u16( row_2.val[0] );
+ B1 = vreinterpretq_s16_u16( row_2.val[1] );
+ }
+ else
+ {
+ G1 = vreinterpretq_s16_u16( row_1.val[0] );
+ B1 = vreinterpretq_s16_u16( row_1.val[1] );
+ R1 = vreinterpretq_s16_u16( row_2.val[0] );
+ G2 = vreinterpretq_s16_u16( row_2.val[1] );
+ }
+
+ int16x8_t GS, GD, RG, BG;
+
+ GS = vhaddq_s16(G1, G2);
+ vst1q_s16( output_buffer[0] + column, GS );
+
+ {
+ const int16x8_t __midpoint_x2 = vdupq_n_s16(midpoint * 2);
+
+ GD = vsubq_s16(G1, G2);
+ GD = vhaddq_s16(GD, __midpoint_x2);
+ vst1q_s16( output_buffer[3] + column, GD );
+
+ GS = vsubq_s16( __midpoint_x2, GS );
+ }
+
+ RG = vhaddq_s16(R1, GS);
+ vst1q_s16( output_buffer[1] + column, RG );
+
+ BG = vhaddq_s16(B1, GS);
+ vst1q_s16( output_buffer[2] + column, BG );
+}
+
+#else
+
+#define UnpackPixel_14_8x UnpackPixel_14_8x_C_
+static void UnpackPixel_14_8x_C_(uint16_t *input_row1_ptr, uint16_t *input_row2_ptr, int column, PIXEL *output_buffer[], bool rggb )
+{
+ int i;
+ for ( i = 0; i < 8; i++)
+ {
+ UnpackPixel_14(input_row1_ptr, input_row2_ptr, column + i, output_buffer, rggb );
+ }
+}
+
+#endif
+
+void UnpackImage_14(const PACKED_IMAGE *input, UNPACKED_IMAGE *output, ENABLED_PARTS enabled_parts, bool rggb )
+{
+ uint8_t *input_buffer = (uint8_t *)input->buffer + input->offset;
+
+ const DIMENSION input_width = input->width / 2;
+ const DIMENSION input_width_m8 = (input_width / 8) * 8;
+
+ const DIMENSION input_height = input->height / 2;
+
+ size_t input_pitch = input->pitch;
+
+ PIXEL *output_row_ptr_array[MAX_CHANNEL_COUNT];
+ uint32_t output_row_ptr_array_pitch[MAX_CHANNEL_COUNT];
+
+ uint16_t *input_row_ptr = (uint16_t*)input_buffer;
+
+ int channel_number;
+
+ int row;
+
+ for (channel_number = 0; channel_number < MAX_CHANNEL_COUNT; channel_number++)
+ {
+ output_row_ptr_array[channel_number] = (PIXEL *)(output->component_array_list[channel_number].data);
+
+ // output->component_array_list[channel_number].pitch is pitch in bytes, so we need to convert it to pitch in PIXELS
+ output_row_ptr_array_pitch[channel_number] = (output->component_array_list[channel_number].pitch / sizeof(PIXEL));
+ }
+
+ for (row = 0; row < input_height; row++)
+ {
+ uint16_t* input_row2_ptr = input_row_ptr + (input_pitch / sizeof(uint16_t));
+
+ int column = 0;
+
+ // Unpack the row of Bayer components from the BYR4 pattern elements
+ for (; column < input_width_m8; column+= 8)
+ {
+ UnpackPixel_14_8x(input_row_ptr, input_row2_ptr, column, output_row_ptr_array, rggb );
+ }
+
+ // Unpack the row of Bayer components from the BYR4 pattern elements
+ for (; column < input_width; column++)
+ {
+ UnpackPixel_14(input_row_ptr, input_row2_ptr, column, output_row_ptr_array, rggb );
+ }
+
+ input_row_ptr += input_pitch;
+
+ for (channel_number = 0; channel_number < MAX_CHANNEL_COUNT; channel_number++)
+ {
+ output_row_ptr_array[channel_number] += output_row_ptr_array_pitch[channel_number];
+ }
+ }
+}
+
+/** ------------------- **/
+/** 12 BIT INPUT FORMAT **/
+/** ------------------- **/
+
+static void UnpackPixel_12(uint16_t *input_row1_ptr, uint16_t *input_row2_ptr, int column, PIXEL *output_buffer[], bool rggb )
+{
+ uint16_t R1, G1, G2, B1;
+ uint16_t GS, GD, RG, BG;
+
+ uint16_t *GS_output_row_ptr = (uint16_t *)output_buffer[0];
+ uint16_t *GD_output_row_ptr = (uint16_t *)output_buffer[3];
+ uint16_t *RG_output_row_ptr = (uint16_t *)output_buffer[1];
+ uint16_t *BG_output_row_ptr = (uint16_t *)output_buffer[2];
+
+ const int internal_precision = 12;
+ const int32_t midpoint = (1 << (internal_precision - 1));
+
+ if( rggb )
+ {
+ R1 = input_row1_ptr[2 * column + 0];
+ G1 = input_row1_ptr[2 * column + 1];
+ G2 = input_row2_ptr[2 * column + 0];
+ B1 = input_row2_ptr[2 * column + 1];
+ }
+ else
+ {
+ G1 = input_row1_ptr[2 * column + 0];
+ B1 = input_row1_ptr[2 * column + 1];
+ R1 = input_row2_ptr[2 * column + 0];
+ G2 = input_row2_ptr[2 * column + 1];
+ }
+
+ // Apply protune log curve
+ R1 = EncoderLogCurve[ R1 ];
+ G1 = EncoderLogCurve[ G1 ];
+ G2 = EncoderLogCurve[ G2 ];
+ B1 = EncoderLogCurve[ B1 ];
+
+ // Difference the green components and subtract green from the red and blue components
+ GS = (G1 + G2) >> 1;
+ GD = (G1 - G2 + 2 * midpoint) >> 1;
+ RG = (R1 - GS + 2 * midpoint) >> 1;
+ BG = (B1 - GS + 2 * midpoint) >> 1;
+
+ GS_output_row_ptr[column] = clamp_uint(GS, internal_precision);
+ GD_output_row_ptr[column] = clamp_uint(GD, internal_precision);
+ RG_output_row_ptr[column] = clamp_uint(RG, internal_precision);
+ BG_output_row_ptr[column] = clamp_uint(BG, internal_precision);
+}
+
+#if ENABLED(NEON)
+
+#define UnpackPixel_12_8x UnpackPixel_12_8x_NEON_
+static void UnpackPixel_12_8x_NEON_(uint16_t *input_row1_ptr, uint16_t *input_row2_ptr, int column, PIXEL *output_buffer[], bool rggb )
+{
+ int i;
+ uint16x8x2_t row_1, row_2;
+
+ const int internal_precision = 12;
+ const int32_t midpoint = (1 << (internal_precision - 1));
+
+ // Apply protune log curve
+ {
+ uint16_t input_row1_12b[16];
+ uint16_t input_row2_12b[16];
+
+ for (i = 0; i < 16; ++i)
+ {
+ input_row1_12b[i] = EncoderLogCurve[ input_row1_ptr[2 * column + i] ];
+ input_row2_12b[i] = EncoderLogCurve[ input_row2_ptr[2 * column + i] ];
+ }
+
+ row_1 = vld2q_u16( input_row1_12b );
+ row_2 = vld2q_u16( input_row2_12b );
+ }
+
+ int16x8_t R1, G1, G2, B1;
+
+ if( rggb )
+ {
+ R1 = vreinterpretq_s16_u16( row_1.val[0] );
+ G1 = vreinterpretq_s16_u16( row_1.val[1] );
+ G2 = vreinterpretq_s16_u16( row_2.val[0] );
+ B1 = vreinterpretq_s16_u16( row_2.val[1] );
+ }
+ else
+ {
+ G1 = vreinterpretq_s16_u16( row_1.val[0] );
+ B1 = vreinterpretq_s16_u16( row_1.val[1] );
+ R1 = vreinterpretq_s16_u16( row_2.val[0] );
+ G2 = vreinterpretq_s16_u16( row_2.val[1] );
+ }
+
+ int16x8_t GS, GD, RG, BG;
+
+ GS = vhaddq_s16(G1, G2);
+ vst1q_s16( output_buffer[0] + column, GS );
+
+ {
+ const int16x8_t __midpoint_x2 = vdupq_n_s16(midpoint * 2);
+
+ GD = vsubq_s16(G1, G2);
+ GD = vhaddq_s16(GD, __midpoint_x2);
+ vst1q_s16( output_buffer[3] + column, GD );
+
+ GS = vsubq_s16( __midpoint_x2, GS );
+ }
+
+ RG = vhaddq_s16(R1, GS);
+ vst1q_s16( output_buffer[1] + column, RG );
+
+ BG = vhaddq_s16(B1, GS);
+ vst1q_s16( output_buffer[2] + column, BG );
+}
+
+#else
+
+#define UnpackPixel_12_8x UnpackPixel_12_8x_C_
+static void UnpackPixel_12_8x_C_(uint16_t *input_row1_ptr, uint16_t *input_row2_ptr, int column, PIXEL *output_buffer[], bool rggb )
+{
+ int i;
+ for ( i = 0; i < 8; i++)
+ {
+ UnpackPixel_12(input_row1_ptr, input_row2_ptr, column + i, output_buffer, rggb );
+ }
+}
+
+#endif
+
+void UnpackImage_12(const PACKED_IMAGE *input, UNPACKED_IMAGE *output, ENABLED_PARTS enabled_parts, bool rggb )
+{
+ uint8_t *input_buffer = (uint8_t *)input->buffer + input->offset;
+
+ const DIMENSION input_width = input->width / 2;
+ const DIMENSION input_width_m8 = (input_width / 8) * 8;
+ const DIMENSION input_height = input->height / 2;
+
+ size_t input_pitch = input->pitch;
+
+ PIXEL *output_row_ptr_array[MAX_CHANNEL_COUNT];
+ uint32_t output_row_ptr_array_pitch[MAX_CHANNEL_COUNT];
+
+ uint16_t *input_row_ptr = (uint16_t*)input_buffer;
+
+ int channel_number;
+
+ int row;
+
+ for (channel_number = 0; channel_number < MAX_CHANNEL_COUNT; channel_number++)
+ {
+ output_row_ptr_array[channel_number] = (PIXEL *)(output->component_array_list[channel_number].data);
+
+ // output->component_array_list[channel_number].pitch is pitch in bytes, so we need to convert it to pitch in PIXELS
+ output_row_ptr_array_pitch[channel_number] = (output->component_array_list[channel_number].pitch / sizeof(PIXEL));
+ }
+
+ for (row = 0; row < input_height; row++)
+ {
+ uint16_t* input_row2_ptr = input_row_ptr + (input_pitch / sizeof(uint16_t));
+
+ int column = 0;
+
+ // Unpack the row of Bayer components from the BYR4 pattern elements
+ for (; column < input_width_m8; column+= 8)
+ {
+ UnpackPixel_12_8x(input_row_ptr, input_row2_ptr, column, output_row_ptr_array, rggb );
+ }
+
+ // Unpack the row of Bayer components from the BYR4 pattern elements
+ for (; column < input_width; column++)
+ {
+ UnpackPixel_12(input_row_ptr, input_row2_ptr, column, output_row_ptr_array, rggb );
+ }
+
+ input_row_ptr += input_pitch;
+
+ for (channel_number = 0; channel_number < MAX_CHANNEL_COUNT; channel_number++)
+ {
+ output_row_ptr_array[channel_number] += output_row_ptr_array_pitch[channel_number];
+ }
+ }
+}
+
+/** -------------------------- **/
+/** 12 bit PACKED INPUT FORMAT **/
+/** -------------------------- **/
+
+static void UnpackPixel_12P(uint16_t *input_row1_ptr, uint16_t *input_row2_ptr, int column, PIXEL *output_buffer[], bool rggb )
+{
+ uint16_t R1, G1, G2, B1;
+ uint16_t GS, GD, RG, BG;
+
+ const int internal_precision = 12;
+ const int32_t midpoint = (1 << (internal_precision - 1));
+
+ const unsigned int byte_offset = (column * 3);
+
+ { // read first row data
+ uint8_t* row1_ptr = (uint8_t*)input_row1_ptr;
+
+ unsigned char byte_0 = row1_ptr[byte_offset + 0];
+ unsigned char byte_1 = row1_ptr[byte_offset + 1];
+ unsigned char byte_2 = row1_ptr[byte_offset + 2];
+
+ if( rggb )
+ {
+ R1 = (byte_0) + ((byte_1 & 0x0f) << 8);
+ G1 = (byte_2 << 4) + ((byte_1 & 0xf0) >> 4);
+ }
+ else
+ {
+ G1 = (byte_0) + ((byte_1 & 0x0f) << 8);
+ B1 = (byte_2 << 4) + ((byte_1 & 0xf0) >> 4);
+ }
+ }
+
+ { // read second row data
+ uint8_t* row2_ptr = (uint8_t*)input_row2_ptr;
+
+ unsigned char byte_0 = row2_ptr[byte_offset + 0];
+ unsigned char byte_1 = row2_ptr[byte_offset + 1];
+ unsigned char byte_2 = row2_ptr[byte_offset + 2];
+
+ if( rggb )
+ {
+ G2 = (byte_0) + ((byte_1 & 0x0f) << 8);
+ B1 = (byte_2 << 4) + ((byte_1 & 0xf0) >> 4);
+ }
+ else
+ {
+ R1 = (byte_0) + ((byte_1 & 0x0f) << 8);
+ G2 = (byte_2 << 4) + ((byte_1 & 0xf0) >> 4);
+ }
+ }
+
+ // Apply protune log curve
+ G1 = EncoderLogCurve[ G1 ];
+ B1 = EncoderLogCurve[ B1 ];
+ R1 = EncoderLogCurve[ R1 ];
+ G2 = EncoderLogCurve[ G2 ];
+
+ // difference the green components and subtract green from the red and blue components
+ GS = (G1 + G2) >> 1;
+ GD = (G1 - G2 + 2 * midpoint) >> 1;
+ RG = (R1 - GS + 2 * midpoint) >> 1;
+ BG = (B1 - GS + 2 * midpoint) >> 1;
+
+ { // write output
+ uint16_t *GS_output_row_ptr = (uint16_t *)output_buffer[0];
+ uint16_t *GD_output_row_ptr = (uint16_t *)output_buffer[3];
+ uint16_t *RG_output_row_ptr = (uint16_t *)output_buffer[1];
+ uint16_t *BG_output_row_ptr = (uint16_t *)output_buffer[2];
+
+ GS_output_row_ptr[column] = clamp_uint(GS, internal_precision);
+ GD_output_row_ptr[column] = clamp_uint(GD, internal_precision);
+ RG_output_row_ptr[column] = clamp_uint(RG, internal_precision);
+ BG_output_row_ptr[column] = clamp_uint(BG, internal_precision);
+ }
+}
+
+#if ENABLED(NEON)
+
+#define UnpackPixel_12P_8x UnpackPixel_12P_8x_NEON_
+static void UnpackPixel_12P_8x_NEON_(uint16_t *input_row1_ptr, uint16_t *input_row2_ptr, int column, PIXEL *output_buffer[], bool rggb )
+{
+ int i;
+ uint16x8_t g1, b1, r1, g2;
+
+ const int internal_precision = 12;
+ const int32_t midpoint = (1 << (internal_precision - 1));
+ const unsigned int byte_offset = (column * 3)/2;
+
+ // Apply protune log curve
+ {
+ uint8_t* row1_ptr = (uint8_t*) &input_row1_ptr[byte_offset]; //Taken care: input_row1_ptr is 16-bit pointer. So halving it
+ uint8x8x3_t __byte012 = vld3_u8(row1_ptr); //Make sure you move only 24 bytes at a time
+ uint8x8_t __byte0 = __byte012.val[0];
+ uint8x8_t __byte1 = __byte012.val[1];
+ uint8x8_t __byte2 = __byte012.val[2];
+
+ if( rggb )
+ {
+ r1 = vaddw_u8(vshll_n_u8(vshl_n_u8(__byte1, 4), 4), __byte0);
+ g1 = vaddw_u8(vshll_n_u8(__byte2, 4), vshr_n_u8(__byte1, 4));
+ }
+ else
+ {
+ g1 = vaddw_u8(vshll_n_u8(vshl_n_u8(__byte1, 4), 4), __byte0);
+ b1 = vaddw_u8(vshll_n_u8(__byte2, 4), vshr_n_u8(__byte1, 4));
+ }
+ }
+ {
+ uint8_t* row2_ptr = (uint8_t*) &input_row2_ptr[byte_offset];
+ uint8x8x3_t __byte012 = vld3_u8(row2_ptr);
+ uint8x8_t __byte0 = __byte012.val[0];
+ uint8x8_t __byte1 = __byte012.val[1];
+ uint8x8_t __byte2 = __byte012.val[2];
+
+ if( rggb )
+ {
+ g2 = vaddw_u8(vshll_n_u8(vshl_n_u8(__byte1, 4), 4), __byte0);
+ b1 = vaddw_u8(vshll_n_u8(__byte2, 4), vshr_n_u8(__byte1, 4));
+ }
+ else
+ {
+ r1 = vaddw_u8(vshll_n_u8(vshl_n_u8(__byte1, 4), 4), __byte0);
+ g2 = vaddw_u8(vshll_n_u8(__byte2, 4), vshr_n_u8(__byte1, 4));
+ }
+ }
+
+ for(i = 0; i < 8; i++)
+ {
+ g1[i] = EncoderLogCurve[g1[i]];
+ b1[i] = EncoderLogCurve[b1[i]];
+ r1[i] = EncoderLogCurve[r1[i]];
+ g2[i] = EncoderLogCurve[g2[i]];
+ }
+
+ int16x8_t R1, G1, G2, B1;
+
+ G1 = vreinterpretq_s16_u16( g1 );
+ B1 = vreinterpretq_s16_u16( b1 );
+ R1 = vreinterpretq_s16_u16( r1 );
+ G2 = vreinterpretq_s16_u16( g2 );
+
+ int16x8_t GS, GD, RG, BG;
+
+ GS = vhaddq_s16(G1, G2);
+ vst1q_s16( output_buffer[0] + column, GS );
+
+ {
+ const int16x8_t __midpoint_x2 = vdupq_n_s16(midpoint * 2);
+
+ GD = vsubq_s16(G1, G2);
+ GD = vhaddq_s16(GD, __midpoint_x2);
+ vst1q_s16( output_buffer[3] + column, GD );
+ GS = vsubq_s16( __midpoint_x2, GS );
+ }
+
+ RG = vhaddq_s16(R1, GS);
+ vst1q_s16( output_buffer[1] + column, RG );
+
+ BG = vhaddq_s16(B1, GS);
+ vst1q_s16( output_buffer[2] + column, BG );
+}
+
+#else
+
+#define UnpackPixel_12P_8x UnpackPixel_12P_8x_C_
+static void UnpackPixel_12P_8x_C_(uint16_t *input_row1_ptr, uint16_t *input_row2_ptr, int column, PIXEL *output_buffer[], bool rggb )
+{
+ int i;
+ for ( i = 0; i < 8; i++)
+ {
+ UnpackPixel_12P(input_row1_ptr, input_row2_ptr, column + i, output_buffer, rggb );
+ }
+}
+
+#endif
+
+void UnpackImage_12P(const PACKED_IMAGE *input, UNPACKED_IMAGE *output, ENABLED_PARTS enabled_parts, bool rggb )
+{
+ uint8_t *input_buffer = (uint8_t *)input->buffer + input->offset;
+
+ const DIMENSION input_width = input->width / 2;
+ const DIMENSION input_width_m8 = (input_width / 8) * 8;
+ const DIMENSION input_height = input->height / 2;
+
+ size_t input_pitch = input->pitch;
+
+ PIXEL *output_row_ptr_array[MAX_CHANNEL_COUNT];
+ uint32_t output_row_ptr_array_pitch[MAX_CHANNEL_COUNT];
+
+ uint16_t *input_row_ptr = (uint16_t*)input_buffer;
+
+ int channel_number;
+
+ int row;
+
+ for (channel_number = 0; channel_number < MAX_CHANNEL_COUNT; channel_number++)
+ {
+ output_row_ptr_array[channel_number] = (PIXEL *)(output->component_array_list[channel_number].data);
+
+ output_row_ptr_array_pitch[channel_number] = (output->component_array_list[channel_number].pitch / sizeof(PIXEL));
+ }
+
+ for (row = 0; row < input_height; row++)
+ {
+ uint16_t* input_row2_ptr = input_row_ptr + (input_pitch / sizeof(uint16_t));
+
+ int column = 0;
+
+ // Unpack the row of Bayer components from the BYR4 pattern elements
+ for (; column < input_width_m8; column+= 8)
+ {
+ UnpackPixel_12P_8x(input_row_ptr, input_row2_ptr, column, output_row_ptr_array, rggb );
+ }
+
+ // Unpack the row of Bayer components from the BYR4 pattern elements
+ for (; column < input_width; column++)
+ {
+ UnpackPixel_12P(input_row_ptr, input_row2_ptr, column, output_row_ptr_array, rggb );
+ }
+
+ input_row_ptr += input_pitch;
+
+ for (channel_number = 0; channel_number < MAX_CHANNEL_COUNT; channel_number++)
+ {
+ output_row_ptr_array[channel_number] += output_row_ptr_array_pitch[channel_number];
+ }
+ }
+}
+
diff --git a/gpr/source/lib/vc5_encoder/raw.h b/gpr/source/lib/vc5_encoder/raw.h
new file mode 100755
index 0000000..894d60e
--- /dev/null
+++ b/gpr/source/lib/vc5_encoder/raw.h
@@ -0,0 +1,36 @@
+/*! @file raw.h
+ *
+ * @brief Declaration of routines for packing RAW image to a row of pixels.
+ *
+ * (C) Copyright 2018 GoPro Inc (http://gopro.com/).
+ *
+ * Licensed under either:
+ * - Apache License, Version 2.0, http://www.apache.org/licenses/LICENSE-2.0
+ * - MIT license, http://opensource.org/licenses/MIT
+ * at your option.
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef RAW_H
+#define RAW_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+ void UnpackImage_14(const PACKED_IMAGE *input, UNPACKED_IMAGE *output, ENABLED_PARTS enabled_parts, bool rggb );
+
+ void UnpackImage_12(const PACKED_IMAGE *input, UNPACKED_IMAGE *output, ENABLED_PARTS enabled_parts, bool rggb );
+
+ void UnpackImage_12P(const PACKED_IMAGE *input, UNPACKED_IMAGE *output, ENABLED_PARTS enabled_parts, bool rggb );
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // RAW_H
diff --git a/gpr/source/lib/vc5_encoder/sections.c b/gpr/source/lib/vc5_encoder/sections.c
new file mode 100755
index 0000000..47efc0d
--- /dev/null
+++ b/gpr/source/lib/vc5_encoder/sections.c
@@ -0,0 +1,293 @@
+/*! @file sections.c
+ *
+ * @brief Implementation of code for encoding sections
+ *
+ * (C) Copyright 2018 GoPro Inc (http://gopro.com/).
+ *
+ * Licensed under either:
+ * - Apache License, Version 2.0, http://www.apache.org/licenses/LICENSE-2.0
+ * - MIT license, http://opensource.org/licenses/MIT
+ * at your option.
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "headers.h"
+
+#if VC5_ENABLED_PART(VC5_PART_SECTIONS)
+
+/*
+ @brief Write codec state parameters used for decoding the section into the bitstream
+
+ A section element may be decoded independently from other sections of the same type.
+ Concurrent decoding implies that all codec state parameters needed to decode a section
+ element be present in that section element.
+
+ In principle, it is only necessary to write the codec state parameters that may be changed
+ as other section elements are decoded independently. This sample encoder takes the simple
+ approach and writes all non-header codec state parameters into the bitstream.
+ */
+static CODEC_ERROR PutCodecState(ENCODER *encoder, BITSTREAM *stream, SECTION_NUMBER section_number)
+{
+ CODEC_STATE *codec = &encoder->codec;
+ TAGWORD prescale_shift = 0;
+
+ switch (section_number)
+ {
+ case SECTION_NUMBER_IMAGE:
+ assert(0);
+ break;
+
+ case SECTION_NUMBER_HEADER:
+ // No codec state parameters to be written into the bitstream
+ break;
+
+ case SECTION_NUMBER_CHANNEL:
+ // Encode the transform prescale for the first channel (assume all channels are the same)
+ prescale_shift = PackTransformPrescale(&encoder->transform[0]);
+
+ PutTagPair(stream, CODEC_TAG_ChannelNumber, codec->channel_number);
+ PutTagPair(stream, CODEC_TAG_SubbandNumber, codec->subband_number);
+ PutTagPair(stream, CODEC_TAG_LowpassPrecision, codec->lowpass_precision);
+ PutTagPair(stream, CODEC_TAG_Quantization, codec->band.quantization);
+ PutTagPair(stream, CODEC_TAG_PrescaleShift, prescale_shift);
+
+#if VC5_ENABLED_PART(VC5_PART_IMAGE_FORMATS)
+ if (!IsPartEnabled(encoder->enabled_parts, VC5_PART_IMAGE_FORMATS))
+ {
+ PutTagPair(stream, CODEC_TAG_ChannelWidth, codec->channel_width);
+ PutTagPair(stream, CODEC_TAG_ChannelHeight, codec->channel_height);
+ }
+#endif
+#if VC5_ENABLED_PART(VC5_PART_LAYERS)
+ if (IsPartEnabled(encoder->enabled_parts, VC5_PART_LAYERS))
+ {
+ PutTagPair(stream, CODEC_TAG_LayerNumber, codec->layer_number);
+ }
+#endif
+ break;
+
+ case SECTION_NUMBER_WAVELET:
+ PutTagPair(stream, CODEC_TAG_ChannelNumber, codec->channel_number);
+ PutTagPair(stream, CODEC_TAG_SubbandNumber, codec->subband_number);
+ PutTagPair(stream, CODEC_TAG_LowpassPrecision, codec->lowpass_precision);
+ //PutTagPair(stream, CODEC_TAG_Quantization, codec->band.quantization);
+ //PutTagPair(stream, CODEC_TAG_PrescaleShift, prescale_shift);
+ break;
+
+ case SECTION_NUMBER_SUBBAND:
+ PutTagPair(stream, CODEC_TAG_ChannelNumber, codec->channel_number);
+ PutTagPair(stream, CODEC_TAG_SubbandNumber, codec->subband_number);
+ PutTagPair(stream, CODEC_TAG_LowpassPrecision, codec->lowpass_precision);
+ PutTagPair(stream, CODEC_TAG_Quantization, codec->band.quantization);
+ //PutTagPair(stream, CODEC_TAG_PrescaleShift, prescale_shift);
+ break;
+
+ default:
+ assert(0);
+ return CODEC_ERROR_UNEXPECTED;
+ }
+
+ return CODEC_ERROR_OKAY;
+}
+
+/*
+ @brief Return true if specified type of section is enabled
+ */
+bool IsSectionEnabled(ENCODER *encoder, SECTION_NUMBER section_number)
+{
+ if (IsPartEnabled(encoder->enabled_parts, VC5_PART_SECTIONS))
+ {
+ if (SECTION_NUMBER_MINIMUM <= section_number && section_number <= SECTION_NUMBER_MAXIMUM)
+ {
+ uint32_t section_mask = SECTION_NUMBER_MASK(section_number);
+
+ if (encoder->enabled_sections & section_mask) {
+ return true;
+ }
+ }
+ }
+
+ // None of the predefined VC-5 sections are enabled
+ return false;
+}
+
+/*
+ @brief Start a new section with the specified tag
+
+ The location of the the tag-value pair that marks the beginning of the new
+ section is pushed onto a stack so that the tag-value pair can be updated with
+ the actual size of the section when the section is ended by a call to the
+ @ref EndSection function.
+ */
+CODEC_ERROR BeginSection(BITSTREAM *bitstream, TAGWORD tag)
+{
+ return PushSampleSize(bitstream, tag);
+}
+
+/*
+ @brief End a section
+
+ Update the tag-value pair that marks the section with the actual size of the section.
+ */
+CODEC_ERROR EndSection(BITSTREAM *bitstream)
+{
+ return PopSampleSize(bitstream);
+}
+
+/*!
+ @brief Write an image section header into the bitstream
+ */
+CODEC_ERROR BeginImageSection(struct _encoder *encoder, BITSTREAM *stream)
+{
+ assert(0);
+ return CODEC_ERROR_OKAY;
+}
+
+/*
+ @brief Write a section header for the bitstream header into the bitstream
+ */
+CODEC_ERROR BeginHeaderSection(struct _encoder *encoder, BITSTREAM *stream)
+{
+ // Write the section header for the bitstream header into the bitstream
+ return BeginSection(stream, CODEC_TAG_HeaderSectionTag);
+}
+
+/*
+ @brief Write a layer section header into the bitstream
+
+ Any codec state parameters that are required to decode the layer must be explicitly
+ written into the bitstream so that the layer sections and be decoded concurrently.
+ */
+CODEC_ERROR BeginLayerSection(struct _encoder *encoder, BITSTREAM *stream)
+{
+ CODEC_ERROR error = CODEC_ERROR_OKAY;
+
+ // Duplicate all codec state parameters required for decoding the layer
+ PutCodecState(encoder, stream, SECTION_NUMBER_LAYER);
+
+ // Write the section header for the layer into the bitstream
+ error = BeginSection(stream, CODEC_TAG_LayerSectionTag);
+
+ return error;
+}
+
+/*
+ @brief Write a channel section header into the bitstream
+
+ Any codec state parameters that are required to decode the channel must be explicitly
+ written into the bitstream so that the channel sections and be decoded concurrently.
+ */
+CODEC_ERROR BeginChannelSection(ENCODER *encoder, BITSTREAM *stream)
+{
+ CODEC_ERROR error = CODEC_ERROR_OKAY;
+
+ // Duplicate all codec state parameters required for decoding the channel
+ PutCodecState(encoder, stream, SECTION_NUMBER_CHANNEL);
+
+ // Write the section header for the channel into the bitstream
+ error = BeginSection(stream, CODEC_TAG_ChannelSectionTag);
+
+ return error;
+}
+
+/*
+ @brief Write a wavelet section header into the bitstream
+
+ Any codec state parameters that are required to decode the wavelet must be explicitly
+ written into the bitstream so that the wavelet sections and be decoded concurrently.
+ */
+CODEC_ERROR BeginWaveletSection(struct _encoder *encoder, BITSTREAM *stream)
+{
+ CODEC_ERROR error = CODEC_ERROR_OKAY;
+
+ // Duplicate all codec state parameters required for decoding the wavelet
+ PutCodecState(encoder, stream, SECTION_NUMBER_WAVELET);
+
+ // Write the section header for the wavelet into the bitstream
+ error = BeginSection(stream, CODEC_TAG_WaveletSectionTag);
+
+ return error;
+}
+
+/*
+ @brief Write a subband section header into the bitstream
+
+ Any codec state parameters that are required to decode the subband must be explicitly
+ written into the bitstream so that the subband sections and be decoded concurrently.
+ */
+CODEC_ERROR BeginSubbandSection(struct _encoder *encoder, BITSTREAM *stream)
+{
+ CODEC_ERROR error = CODEC_ERROR_OKAY;
+
+ // Duplicate all codec state parameters required for decoding the subband
+ PutCodecState(encoder, stream, SECTION_NUMBER_SUBBAND);
+
+ // Write the section header for the subband into the bitstream
+ error = BeginSection(stream, CODEC_TAG_SubbandSectionTag);
+
+ return error;
+}
+
+/*!
+ @brief Set the flags that indicate which sections in VC-5 Part 6 are enabled
+
+ The argument is a list of comma-separated integers for the section numbers
+ in the VC-5 Part 2 conformance specification that are enabled for this invocation
+ of the encoder.
+
+ Note: Enabling sections at runtime has no effect unless support for sections
+ is compiled into the program by enabling the corresponding compile-time switch
+ for VC-5 part 6 (sections).
+ */
+bool GetEnabledSections(const char *string, uint32_t *enabled_sections_out)
+{
+ if (string != NULL && enabled_sections_out != NULL)
+ {
+ // No sections are enabled by default
+ ENABLED_SECTIONS enabled_sections = 0;
+
+ const char *p = string;
+ assert(p != NULL);
+ while (*p != '\0')
+ {
+ char *q = NULL;
+ long section_number;
+ if (!isdigit((int)*p)) break;
+ section_number = strtol(p, &q, 10);
+
+ // Is the section number in bounds?
+ if (SECTION_NUMBER_MINIMUM <= section_number && section_number <= SECTION_NUMBER_MAXIMUM)
+ {
+ // Set the bit that corresponds to this section number
+ enabled_sections |= SECTION_NUMBER_MASK(section_number);
+
+ // Advance to the next section number in the command-line argument
+ p = (*q != '\0') ? q + 1 : q;
+ }
+ else
+ {
+ // Invalid section number
+ assert(0);
+ return false;
+ }
+ }
+
+ // Return the bit mask for the enabled sections
+ *enabled_sections_out = enabled_sections;
+
+ // Should have parsed all section numbers in the argument string
+ assert(*p == '\0');
+ return true;
+ }
+
+ // Invalid input arguments
+ return false;
+}
+
+
+#endif
diff --git a/gpr/source/lib/vc5_encoder/sections.h b/gpr/source/lib/vc5_encoder/sections.h
new file mode 100755
index 0000000..602dd81
--- /dev/null
+++ b/gpr/source/lib/vc5_encoder/sections.h
@@ -0,0 +1,91 @@
+/*! @file sections.h
+ *
+ * @brief Declaration of routines for handling sections in the encoder.
+ *
+ * (C) Copyright 2018 GoPro Inc (http://gopro.com/).
+ *
+ * Licensed under either:
+ * - Apache License, Version 2.0, http://www.apache.org/licenses/LICENSE-2.0
+ * - MIT license, http://opensource.org/licenses/MIT
+ * at your option.
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SECTIONS_H
+#define SECTIONS_H
+
+/*
+ @brief Enumeration of the predefined section numbers
+
+ The predefined section numbers are defined in ST 2073-2.
+ */
+typedef enum _section_number
+{
+ SECTION_NUMBER_IMAGE = 1, //!< Image section
+ SECTION_NUMBER_HEADER = 2, //!< Bitstream header section
+ SECTION_NUMBER_LAYER = 3, //!< Layer section
+ SECTION_NUMBER_CHANNEL = 4, //!< Channel section
+ SECTION_NUMBER_WAVELET = 5, //!< Wavelet section
+ SECTION_NUMBER_SUBBAND = 6, //!< Subband section
+
+ //TODO: Add more section number definitions as required
+
+ //! Modify the smallest and largest section numbers as more sections are added
+ SECTION_NUMBER_MINIMUM = SECTION_NUMBER_IMAGE,
+ SECTION_NUMBER_MAXIMUM = SECTION_NUMBER_SUBBAND,
+
+} SECTION_NUMBER;
+
+/*
+ @Macro for creating a section number bit mask from a section number
+
+ The macro does not check that the section number argument is valid.
+ */
+#define SECTION_NUMBER_MASK(section_number) (1 << (section_number - 1))
+
+/*
+ @brief Data type for the bit mask that represents enabled sections
+
+ The bit mask indicates which section numbers defined in ST 2073-2 are enabled
+ at runtime.
+ */
+typedef uint32_t ENABLED_SECTIONS;
+
+#define VC5_ENABLED_SECTIONS (SECTION_NUMBER_MASK(SECTION_NUMBER_CHANNEL) | \
+ SECTION_NUMBER_MASK(SECTION_NUMBER_WAVELET) | \
+ SECTION_NUMBER_MASK(SECTION_NUMBER_SUBBAND))
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+ bool IsSectionEnabled(struct _encoder *encoder, SECTION_NUMBER section_number);
+
+ CODEC_ERROR BeginSection(BITSTREAM *bitstream, TAGWORD tag);
+
+ CODEC_ERROR EndSection(BITSTREAM *bitstream);
+
+ CODEC_ERROR BeginImageSection(struct _encoder *encoder, BITSTREAM *stream);
+
+ CODEC_ERROR BeginHeaderSection(struct _encoder *encoder, BITSTREAM *stream);
+
+ CODEC_ERROR BeginLayerSection(struct _encoder *encoder, BITSTREAM *stream);
+
+ CODEC_ERROR BeginChannelSection(struct _encoder *encoder, BITSTREAM *stream);
+
+ CODEC_ERROR BeginWaveletSection(struct _encoder *encoder, BITSTREAM *stream);
+
+ CODEC_ERROR BeginSubbandSection(struct _encoder *encoder, BITSTREAM *stream);
+
+ bool GetEnabledSections(const char *string, uint32_t *enabled_sections_out);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // SECTIONS_H
diff --git a/gpr/source/lib/vc5_encoder/syntax.c b/gpr/source/lib/vc5_encoder/syntax.c
new file mode 100755
index 0000000..14afdf1
--- /dev/null
+++ b/gpr/source/lib/vc5_encoder/syntax.c
@@ -0,0 +1,276 @@
+/*! @file syntax.c
+ *
+ * @brief Implementation of functions for writing bitstream syntax of encoded samples.
+ *
+ * @version 1.0.0
+ *
+ * (C) Copyright 2018 GoPro Inc (http://gopro.com/).
+ *
+ * Licensed under either:
+ * - Apache License, Version 2.0, http://www.apache.org/licenses/LICENSE-2.0
+ * - MIT license, http://opensource.org/licenses/MIT
+ * at your option.
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "headers.h"
+
+#if VC5_ENABLED_PART(VC5_PART_SECTIONS)
+
+/*!
+ @brief Pop the top value from the sample offset stack
+ */
+static uint32_t PopSampleOffsetStack(BITSTREAM *bitstream)
+{
+ assert(bitstream->sample_offset_count > 0);
+ return bitstream->sample_offset_stack[--bitstream->sample_offset_count];
+}
+
+#endif
+
+/*!
+ @brief Write a segment at the specified offset in the bitstream
+
+ The segment at the specified offset in the bitstream is overwritten by the new
+ segment provided as an argument. Typically this is done to update a segment that
+ is intended to provide the size or offset to a syntax element in the encoded sample.
+ */
+CODEC_ERROR PutSampleOffsetSegment(BITSTREAM *bitstream, uint32_t offset, TAGVALUE segment)
+{
+ // Translate the segment to network byte order
+ uint32_t buffer = Swap32(segment.longword);
+
+ // Must write the segment on a segment boundary
+ assert((offset % sizeof(TAGVALUE)) == 0);
+
+ // Write the segment to the byte stream at the specified offset
+ return PutBlock(bitstream->stream, &buffer, sizeof(buffer), offset);
+}
+
+static bool IsTagOptional(TAGWORD tag)
+{
+ return (tag < 0);
+}
+
+/*!
+ @brief Write the trailer for the lowpass band into the bitstream
+
+ This routine writes a marker into the bitstream that can aid in debugging,
+ but the most important function is to update the segment that contains the
+ size of this subband with the actual size of the lowpass band.
+ */
+CODEC_ERROR PutVideoLowpassTrailer(BITSTREAM *stream)
+{
+ // Check that the bitstream is tag aligned before writing the pixels
+ assert(IsAlignedSegment(stream));
+
+ // Set the size of the large chunk for the lowpass band codeblock
+ PopSampleSize(stream);
+
+ return CODEC_ERROR_OKAY;
+}
+
+/*!
+ @brief Write the next tag value pair to the bitstream
+
+ @todo Change the code to use the @ref PutLong function?
+ */
+CODEC_ERROR PutTagValue(BITSTREAM *stream, TAGVALUE segment)
+{
+ CODEC_ERROR error;
+
+ // Write the tag to the bitstream
+ error = PutBits(stream, segment.tuple.tag, tagword_count);
+ if (error != CODEC_ERROR_OKAY) {
+ return error;
+ }
+
+ // Write the value to the bitstream
+ error = PutBits(stream, segment.tuple.value, tagword_count);
+
+ return error;
+}
+
+/*!
+ @brief Write a required tag value pair
+
+ @todo Should the tag value pair be output on a segment boundary?
+ */
+CODEC_ERROR PutTagPair(BITSTREAM *stream, int tag, int value)
+{
+ // The bitstream should be aligned on a tag word boundary
+ assert(IsAlignedTag(stream));
+
+ // The value must fit within a tag word
+ assert(((uint32_t)value & ~(uint32_t)CODEC_TAG_MASK) == 0);
+
+ PutLong(stream, ((uint32_t )tag << 16) | (value & CODEC_TAG_MASK));
+
+ return CODEC_ERROR_OKAY;
+}
+
+/*!
+ @brief Write an optional tag value pair
+
+ @todo Should the tag value pair be output on a segment boundary?
+ */
+CODEC_ERROR PutTagPairOptional(BITSTREAM *stream, int tag, int value)
+{
+ // The bitstream should be aligned on a tag word boundary
+ assert(IsAlignedTag(stream));
+
+ // The value must fit within a tag word
+ assert(((uint32_t)value & ~(uint32_t)CODEC_TAG_MASK) == 0);
+
+ // Set the optional tag bit
+ //tag |= CODEC_TAG_OPTIONAL;
+ //tag = NEG(tag);
+ tag = neg(tag);
+
+ PutLong(stream, ((uint32_t )tag << 16) | (value & CODEC_TAG_MASK));
+
+ return CODEC_ERROR_OKAY;
+}
+
+/*!
+ @brief Write a tag value pair that specifies the size of a syntax element
+
+ The routine pushes the current position in the bitstream onto the sample offset
+ stack and writes a tag value pair for the size of the current syntax element.
+ The routine @ref PopSampleSize overwrites the segment with a tag value pair
+ that contains the actual size of the syntax element.
+
+ This routine corresponds to the routine SizeTagPush in the current codec implementation.
+ */
+CODEC_ERROR PushSampleSize(BITSTREAM *bitstream, TAGWORD tag)
+{
+#if VC5_ENABLED_PART(VC5_PART_SECTIONS)
+
+ size_t position = GetBitstreamPosition(bitstream);
+
+ // Check for stack overflow
+ assert(bitstream->sample_offset_count < MAX_SAMPLE_OFFSET_COUNT);
+
+ // Check that the bitstream position can be pushed onto the stack
+ assert(position <= UINT32_MAX);
+
+ // Push the current sample offset onto the stack
+ bitstream->sample_offset_stack[bitstream->sample_offset_count++] = (uint32_t)position;
+
+#endif
+
+ // Write a tag value pair for the size of this chunk
+ PutTagPairOptional(bitstream, tag, 0);
+
+ return CODEC_ERROR_OKAY;
+}
+
+/*!
+ @brief Update a sample size segment with the actual size of the syntax element
+
+ This routine pops the offset in the bitstream to the most recent tag value pair
+ that was written into the bitstream from the sample offset stack and overwrites
+ the segment with the tag value pair that contains the actual size of the syntax
+ element.
+
+ This routine corresponds to the routine SizeTagPop in the current codec implementation.
+ */
+CODEC_ERROR PopSampleSize(BITSTREAM *bitstream)
+{
+ CODEC_ERROR error = CODEC_ERROR_OKAY;
+
+#if VC5_ENABLED_PART(VC5_PART_SECTIONS)
+
+ if (bitstream->sample_offset_count > 0)
+ {
+ TAGVALUE segment;
+ TAGWORD tag;
+
+ uint32_t current_offset;
+ uint32_t previous_offset;
+
+ uint32_t chunk_size;
+
+ size_t position = GetBitstreamPosition(bitstream);
+
+ // Get the offset to the current position in the bitstream
+ assert(position <= UINT32_MAX);
+ current_offset = (uint32_t)position;
+
+ // Pop the offset for this chunk from the sample offset stack
+ previous_offset = PopSampleOffsetStack(bitstream);
+
+ assert(previous_offset < current_offset);
+
+ // Get the segment for the chunk written at the most recent offset
+ error = GetSampleOffsetSegment(bitstream, previous_offset, &segment);
+ if (error != CODEC_ERROR_OKAY) {
+ return error;
+ }
+
+ // Get the tag for the chunk segment
+ tag = segment.tuple.tag;
+
+ // Should be an optional tag-value pair
+ assert(IsTagOptional(tag));
+ if (! (IsTagOptional(tag))) {
+ return CODEC_ERROR_UNEXPECTED;
+ }
+
+ // Convert the tag to required
+ tag = RequiredTag(tag);
+
+ // Compute the size of the current chunk
+ chunk_size = current_offset - previous_offset;
+
+ if (chunk_size >= 4)
+ {
+ // The chunk payload should contain an integer number of segments
+ assert((chunk_size % sizeof(TAGVALUE)) == 0);
+
+ // Compute the number of segments in the chunk payload
+ chunk_size = (chunk_size / sizeof(TAGVALUE)) - 1;
+ }
+ else
+ {
+ chunk_size = 0;
+ }
+
+ // Does this chunk have a 24-bit size field?
+ if (tag & CODEC_TAG_LARGE_CHUNK)
+ {
+ // Add the most significant eight bits of the size to the tag
+ tag |= ((chunk_size >> 16) & 0xFF);
+ }
+
+ // The segment value is the least significant 16 bits of the payload size
+ chunk_size &= 0xFFFF;
+
+ // Update the segment with the optional tag and chunk size
+ segment.tuple.tag = OptionalTag(tag);
+ segment.tuple.value = chunk_size;
+
+ return PutSampleOffsetSegment(bitstream, previous_offset, segment);
+ }
+#endif
+
+ return CODEC_ERROR_UNEXPECTED;
+}
+
+/*!
+ @brief Write the bitstream start marker
+ */
+CODEC_ERROR PutBitstreamStartMarker(BITSTREAM *stream)
+{
+ assert(stream != NULL);
+ if (! (stream != NULL)) {
+ return CODEC_ERROR_UNEXPECTED;
+ }
+
+ return PutLong(stream, StartMarkerSegment);
+}
diff --git a/gpr/source/lib/vc5_encoder/syntax.h b/gpr/source/lib/vc5_encoder/syntax.h
new file mode 100755
index 0000000..3a85fe7
--- /dev/null
+++ b/gpr/source/lib/vc5_encoder/syntax.h
@@ -0,0 +1,44 @@
+/*! @file syntax.h
+ *
+ * @brief Declare functions for writing bitstream syntax of encoded samples.
+ *
+ * (C) Copyright 2018 GoPro Inc (http://gopro.com/).
+ *
+ * Licensed under either:
+ * - Apache License, Version 2.0, http://www.apache.org/licenses/LICENSE-2.0
+ * - MIT license, http://opensource.org/licenses/MIT
+ * at your option.
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ENCODER_SYNTAX_H
+#define ENCODER_SYNTAX_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+ CODEC_ERROR PutSampleOffsetSegment(BITSTREAM *bitstream, uint32_t offset, TAGVALUE segment);
+
+ CODEC_ERROR PutVideoLowpassTrailer(BITSTREAM *stream);
+
+ CODEC_ERROR PutVideoBandTrailer(BITSTREAM *stream);
+
+ CODEC_ERROR PutTagValue(BITSTREAM *stream, TAGVALUE segment);
+
+ CODEC_ERROR PutBitstreamStartMarker(BITSTREAM *stream);
+
+ CODEC_ERROR PushSampleSize(BITSTREAM *bitstream, TAGWORD tag);
+
+ CODEC_ERROR PopSampleSize(BITSTREAM *bitstream);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // ENCODER_SYNTAX_H
diff --git a/gpr/source/lib/vc5_encoder/vc5_encoder.c b/gpr/source/lib/vc5_encoder/vc5_encoder.c
new file mode 100755
index 0000000..c28d904
--- /dev/null
+++ b/gpr/source/lib/vc5_encoder/vc5_encoder.c
@@ -0,0 +1,164 @@
+/*! @file vc5_encoder.c
+ *
+ * @brief Implementation of the top level vc5 encoder data structures and functions.
+ *
+ * @version 1.0.0
+ *
+ * (C) Copyright 2018 GoPro Inc (http://gopro.com/).
+ *
+ * Licensed under either:
+ * - Apache License, Version 2.0, http://www.apache.org/licenses/LICENSE-2.0
+ * - MIT license, http://opensource.org/licenses/MIT
+ * at your option.
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "headers.h"
+
+void vc5_encoder_parameters_set_default(vc5_encoder_parameters* encoding_parameters)
+{
+ encoding_parameters->enabled_parts = VC5_ENABLED_PARTS;
+ encoding_parameters->input_width = 4000;
+ encoding_parameters->input_height = 3000;
+ encoding_parameters->input_pitch = 4000;
+
+ encoding_parameters->pixel_format = VC5_ENCODER_PIXEL_FORMAT_DEFAULT;
+ encoding_parameters->quality_setting = VC5_ENCODER_QUALITY_SETTING_DEFAULT;
+
+ encoding_parameters->mem_alloc = malloc;
+ encoding_parameters->mem_free = free;
+}
+
+CODEC_ERROR vc5_encoder_process(const vc5_encoder_parameters* encoding_parameters, /* vc5 encoding parameters */
+ const gpr_buffer* raw_buffer, /* raw input buffer. */
+ gpr_buffer* vc5_buffer,
+ gpr_rgb_buffer* rgb_buffer) /* rgb output buffer. */
+{
+ CODEC_ERROR error = CODEC_ERROR_OKAY;
+ IMAGE image;
+ ENCODER_PARAMETERS parameters;
+
+ STREAM bitstream_file;
+
+ const int max_vc5_buffer_size = 10000000;
+
+ // Initialize the data structure for passing parameters to the encoder
+ InitEncoderParameters(&parameters);
+
+ {
+ QUANT quant_table[VC5_ENCODER_QUALITY_SETTING_COUNT][sizeof(parameters.quant_table) / sizeof(parameters.quant_table[0])] = {
+ {1, 24, 24, 12, 64, 64, 48, 512, 512, 768}, // CineForm Low
+ {1, 24, 24, 12, 48, 48, 32, 256, 256, 384}, // CineForm Medium
+ {1, 24, 24, 12, 32, 32, 24, 128, 128, 192}, // CineForm High
+ {1, 24, 24, 12, 24, 24, 12, 96, 96, 144}, // CineForm Filmscan-1
+ {1, 24, 24, 12, 24, 24, 12, 64, 64, 96}, // CineForm Filmscan-X
+ {1, 24, 24, 12, 24, 24, 12, 32, 32, 48} // CineForm Filmscan-2
+ };
+
+ if( encoding_parameters->quality_setting < VC5_ENCODER_QUALITY_SETTING_COUNT )
+ {
+ memcpy(parameters.quant_table, quant_table[encoding_parameters->quality_setting], sizeof(parameters.quant_table));
+ }
+ }
+
+ parameters.enabled_parts = encoding_parameters->enabled_parts;
+ parameters.encoded.format = IMAGE_FORMAT_RAW;
+
+#if VC5_ENABLED_PART(VC5_PART_LAYERS)
+ // Test interlaced encoding using one layer per field
+ parameters.layer_count = 2;
+ parameters.progressive = 0;
+ parameters.decompositor = DecomposeFields;
+#endif
+
+ parameters.allocator.Alloc = encoding_parameters->mem_alloc;
+ parameters.allocator.Free = encoding_parameters->mem_free;
+
+ // Check that the enabled parts are correct
+ error = CheckEnabledParts(&parameters.enabled_parts);
+ if (error != CODEC_ERROR_OKAY) {
+ return error;
+ }
+
+ image.buffer = raw_buffer->buffer;
+
+ image.width = encoding_parameters->input_width;
+ image.height = encoding_parameters->input_height;
+ image.pitch = encoding_parameters->input_pitch;
+ image.size = image.width * image.height * 2;
+ image.offset = 0;
+
+ switch( encoding_parameters->pixel_format )
+ {
+ case VC5_ENCODER_PIXEL_FORMAT_RGGB_12:
+ image.format = PIXEL_FORMAT_RAW_RGGB_12;
+ break;
+
+ case VC5_ENCODER_PIXEL_FORMAT_RGGB_12P:
+ image.format = PIXEL_FORMAT_RAW_RGGB_12P;
+ break;
+
+ case VC5_ENCODER_PIXEL_FORMAT_RGGB_14:
+ image.format = PIXEL_FORMAT_RAW_RGGB_14;
+ break;
+
+ case VC5_ENCODER_PIXEL_FORMAT_GBRG_12:
+ image.format = PIXEL_FORMAT_RAW_GBRG_12;
+ break;
+
+ case VC5_ENCODER_PIXEL_FORMAT_GBRG_12P:
+ image.format = PIXEL_FORMAT_RAW_GBRG_12P;
+ break;
+
+ default:
+ assert(0);
+ }
+
+ // Set the dimensions and pixel format of the packed input image
+ {
+ parameters.input.width = image.width;
+ parameters.input.height = image.height;
+ parameters.input.format = image.format;
+ }
+
+#if VC5_ENABLED_PART(VC5_PART_LAYERS)
+ // Test interlaced encoding using one layer per field
+ parameters.layer_count = 2;
+ parameters.progressive = 0;
+ parameters.decompositor = DecomposeFields;
+#endif
+
+ vc5_buffer->buffer = encoding_parameters->mem_alloc( max_vc5_buffer_size );
+
+ // Open a stream to the output file
+ error = CreateStreamBuffer(&bitstream_file, vc5_buffer->buffer, max_vc5_buffer_size );
+ if (error != CODEC_ERROR_OKAY) {
+ return error;
+ }
+
+ RGB_IMAGE rgb_image;
+ InitRGBImage(&rgb_image);
+
+ // Encode the image into the byte stream
+ error = EncodeImage(&image, &bitstream_file, &rgb_image, &parameters);
+ if (error != CODEC_ERROR_OKAY) {
+ return error;
+ }
+
+ if( rgb_buffer )
+ {
+ rgb_buffer->buffer = rgb_image.buffer;
+ rgb_buffer->size = rgb_image.size;
+ rgb_buffer->width = rgb_image.width;
+ rgb_buffer->height = rgb_image.height;
+ }
+
+ vc5_buffer->size = bitstream_file.byte_count;
+
+ return CODEC_ERROR_OKAY;
+}
diff --git a/gpr/source/lib/vc5_encoder/vc5_encoder.h b/gpr/source/lib/vc5_encoder/vc5_encoder.h
new file mode 100755
index 0000000..35a9b16
--- /dev/null
+++ b/gpr/source/lib/vc5_encoder/vc5_encoder.h
@@ -0,0 +1,103 @@
+/*! @file vc5_encoder.h
+ *
+ * @brief Declaration of the top level vc5 encoder API.
+ *
+ * (C) Copyright 2018 GoPro Inc (http://gopro.com/).
+ *
+ * Licensed under either:
+ * - Apache License, Version 2.0, http://www.apache.org/licenses/LICENSE-2.0
+ * - MIT license, http://opensource.org/licenses/MIT
+ * at your option.
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef VC5_ENCODER_H
+#define VC5_ENCODER_H
+
+#include "error.h"
+#include "types.h"
+#include "gpr_buffer.h"
+#include "gpr_rgb_buffer.h"
+#include "vc5_common.h"
+
+#ifdef __cplusplus
+ extern "C" {
+#endif
+
+ /*!
+ @brief Bayer pattern ordering for vc5 encoder processing
+ */
+ typedef enum
+ {
+ VC5_ENCODER_PIXEL_FORMAT_RGGB_12 = 0, // RGGB 14bit pixels packed into 16bits
+
+ VC5_ENCODER_PIXEL_FORMAT_RGGB_12P = 1, // RGGB 14bit pixels packed into 16bits
+
+ VC5_ENCODER_PIXEL_FORMAT_RGGB_14 = 2, // RGGB 14bit pixels packed into 16bits
+
+ VC5_ENCODER_PIXEL_FORMAT_GBRG_12 = 3, // GBRG 12bit pixels packed into 16bits
+
+ VC5_ENCODER_PIXEL_FORMAT_GBRG_12P = 4, // GBRG 12bit pixels packed into 12bits
+
+ VC5_ENCODER_PIXEL_FORMAT_DEFAULT = VC5_ENCODER_PIXEL_FORMAT_RGGB_14,
+
+ } VC5_ENCODER_PIXEL_FORMAT;
+
+ #define VC5_ENCODER_RGB_RESOLUTION_DEFAULT GPR_RGB_RESOLUTION_SIXTEENTH
+
+ /*!
+ @brief Quality setting of the VC5 encoder
+ */
+ typedef enum
+ {
+ VC5_ENCODER_QUALITY_SETTING_LOW = 0, // Low (Lowest Quality)
+ VC5_ENCODER_QUALITY_SETTING_MEDIUM = 1, // Medium
+ VC5_ENCODER_QUALITY_SETTING_HIGH = 2, // High
+ VC5_ENCODER_QUALITY_SETTING_FS1 = 3, // Film Scan 1
+ VC5_ENCODER_QUALITY_SETTING_FSX = 4, // Film Scan X
+ VC5_ENCODER_QUALITY_SETTING_FS2 = 5, // Film Scan 2 (Highest Quality)
+
+ VC5_ENCODER_QUALITY_SETTING_COUNT = 6,
+
+ VC5_ENCODER_QUALITY_SETTING_DEFAULT = VC5_ENCODER_QUALITY_SETTING_FSX,
+
+ } VC5_ENCODER_QUALITY_SETTING;
+
+ /*!
+ @brief vc5 encoder parameters
+ */
+ typedef struct
+ {
+ ENABLED_PARTS enabled_parts;
+
+ unsigned int input_width; // Image Width in Components (Default: 4000)
+ unsigned int input_height; // Image Height in Components (Default: 3000)
+ int input_pitch; // Image Buffer Stride in Components (Default: 4000)
+
+ VC5_ENCODER_PIXEL_FORMAT pixel_format; // Bayer Ordering Pattern (Default: VC5_ENCODER_BAYER_ORDERING_RGGB)
+
+ VC5_ENCODER_QUALITY_SETTING quality_setting; // Quality setting of the encoder (Default: VC5_ENCODER_QUALITY_SETTING_FS2)
+
+ gpr_malloc mem_alloc; // Callback function to allocate memory
+
+ gpr_free mem_free; // Callback function to free memory
+
+ } vc5_encoder_parameters;
+
+ void vc5_encoder_parameters_set_default(vc5_encoder_parameters* encoding_parameters);
+
+ CODEC_ERROR vc5_encoder_process(const vc5_encoder_parameters* encoding_parameters, /* vc5 encoding parameters */
+ const gpr_buffer* raw_buffer, /* raw input buffer. */
+ gpr_buffer* vc5_buffer,
+ gpr_rgb_buffer* rgb_buffer); /* vc5 output buffer. */
+
+#ifdef __cplusplus
+ }
+#endif
+
+#endif // VC5_ENCODER_H
diff --git a/gpr/source/lib/vc5_encoder/vlc.c b/gpr/source/lib/vc5_encoder/vlc.c
new file mode 100755
index 0000000..38a0ecb
--- /dev/null
+++ b/gpr/source/lib/vc5_encoder/vlc.c
@@ -0,0 +1,94 @@
+/*! @file vlc.c
+ *
+ * @brief Implementation of routines to insert variable length codes into the bitstream
+ *
+ * @version 1.0.0
+ *
+ * (C) Copyright 2018 GoPro Inc (http://gopro.com/).
+ *
+ * Licensed under either:
+ * - Apache License, Version 2.0, http://www.apache.org/licenses/LICENSE-2.0
+ * - MIT license, http://opensource.org/licenses/MIT
+ * at your option.
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "headers.h"
+
+/*!
+ @brief Write the codewords for a run of zeros into the bitstream
+
+ The codebook contains codewords for a few runs of zeros. This routine writes
+ multiple codewords into the bitstream until the specified number of zeros has
+ been written into the bitstream.
+*/
+CODEC_ERROR PutZeros(BITSTREAM *stream, const RUNS_TABLE *runs_table, uint32_t count)
+{
+ // Get the length of the codebook and a pointer to the entries
+ uint32_t length = runs_table->length;
+ RLC *rlc = (RLC *)((uint8_t *)runs_table + sizeof(RUNS_TABLE));
+ int index;
+
+ // Output one or more run lengths until the run is finished
+ while (count > 0)
+ {
+ // Index into the codebook to get a run length code that covers most of the run
+ index = (count < length) ? count : length - 1;
+
+ // Output the run length code
+ PutBits(stream, rlc[index].bits, rlc[index].size);
+
+ // Reduce the length of the run by the amount output
+ count -= rlc[index].count;
+ }
+
+ // Should have output enough runs to cover the run of zeros
+ assert(count == 0);
+
+ return CODEC_ERROR_OKAY;
+}
+
+/*!
+ @brief Insert a special codeword into the bitstream
+
+ The codebook contains special codewords in addition to the codebook
+ entries for coefficient magnitudes and runs of zeros. Special codewords
+ are used to mark important locations in the bitstream. Currently,
+ the only special codeword is the one that marks the end of an encoded
+ band.
+*/
+CODEC_ERROR PutSpecial(BITSTREAM *stream, const CODEBOOK *codebook, SPECIAL_MARKER marker)
+{
+ int codebook_length = codebook->length;
+ RLV *codebook_entry = (RLV *)((uint8_t *)codebook + sizeof(CODEBOOK));
+
+ int index;
+
+ // Find the special code that corresponds to the marker
+ for (index = 0; index < codebook_length; index++)
+ {
+ // Is this entry a special code?
+ if (codebook_entry[index].count != 0) {
+ continue;
+ }
+
+ // Is this entry the special code for the marker?
+ if (codebook_entry[index].value == (int32_t)marker) {
+ break;
+ }
+ }
+ assert(index < codebook_length);
+ if (! (index < codebook_length)) {
+ // Did not find the special code for the marker in the codebook
+ return CODEC_ERROR_INVALID_MARKER;
+ }
+
+ PutBits(stream, codebook_entry[index].bits, codebook_entry[index].size);
+
+ return CODEC_ERROR_OKAY;
+}
diff --git a/gpr/source/lib/vc5_encoder/vlc.h b/gpr/source/lib/vc5_encoder/vlc.h
new file mode 100755
index 0000000..4947060
--- /dev/null
+++ b/gpr/source/lib/vc5_encoder/vlc.h
@@ -0,0 +1,138 @@
+/*! @file vlc.h
+ *
+ * @brief Declaration of the data structures for variable-length encoding
+ *
+ * @version 1.0.0
+ *
+ * (C) Copyright 2018 GoPro Inc (http://gopro.com/).
+ *
+ * Licensed under either:
+ * - Apache License, Version 2.0, http://www.apache.org/licenses/LICENSE-2.0
+ * - MIT license, http://opensource.org/licenses/MIT
+ * at your option.
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef VLC_H
+#define VLC_H
+
+/*!
+ @brief Codebook entries for arbitrary runs and values
+
+ The codebook data structure allows runs of an arbitrary value,
+ but all codec implementations only use runs of zeros. The
+ codeword for a non-zero value is followed by the sign bit.
+
+ @todo Could add the sign bit to the magnitude entries in this
+ table if it improves performance or makes the code more clear.
+*/
+typedef struct _rlv {
+ uint_fast8_t size; //!< Size of code word in bits
+ uint32_t bits; //!< Code word bits right justified
+ uint32_t count; //!< Run length
+ int32_t value; //!< Run value (unsigned)
+} RLV;
+
+/*!
+ @brief Declaration of a codebook
+
+ This data structure is often called the master codebook to distinguish
+ it from the encoding tables that are derived from the codebook. The
+ codebook has a header that is immediately followed by the codebook entries.
+ Each entry is an @ref RLV data structure that contains the codeword and
+ the size of the codeword in bits. Each codeword represent a run length
+ and value. The current codec implementation only supports runs of zeros,
+ so the run length is one for non-zero values. A non-zero value is an
+ unsigned coefficient magnitude. Special codewords that mark significant
+ locations in the bitstream are indicated by a run length of zero and the
+ value indicates the type of marker.
+
+ The codebook is generated by a separate program that takes as input a table
+ of the frequencies of coefficient magnitudes and runs of zeros.
+*/
+typedef struct _codebook
+{
+ uint32_t length; //!< Number of entries in the code book
+ // The length is followed by the RLV entries
+} CODEBOOK;
+
+//! Macro used to define the codebook generated by the Huffman program
+#define RLVTABLE(n) \
+ static struct \
+ { \
+ uint32_t length; \
+ RLV entries[n]; \
+ }
+
+/*!
+ @brief Table of codewords for coefficient magnitudes
+
+ The entries in this table are indexed by the coefficient magnitude.
+
+ This table is derived from the master codebook by sorting the entries
+ for coefficient magnitudes into increasing order. Each entry in the
+ table is a codeword and its size in bits.
+*/
+typedef struct _magnitude_table
+{
+ uint32_t length; //!< Number of entries in the encoding table
+ // The length is followed by the VLE entries
+} MAGS_TABLE;
+
+/*!
+ @brief Entry in the table for encoding coefficients magnitudes
+
+ Each entry is the codeword and its size in bits. The typename VLE
+ stands for variable length encoding to distinguish this entry from
+ the data structures for variable length coding in general.
+*/
+typedef struct _vle {
+ uint_fast8_t size; //!< Size of code word in bits
+ uint32_t bits; //!< Code words bits (right justified)
+} VLE;
+
+/*!
+ @brief Table of codewords for runs of zeros
+
+ The entries in this table are indexed by length of the run of zeros.
+
+ This table is derived from the master codebook by concatenating codewords
+ for runs of zeros to form a codeword a run with the specified length.
+
+ Each entry in the table is a codeword and its size in bits and the number
+ of zeros that are that are not included in the run represented by the
+ codeword.
+*/
+typedef struct _runs_table
+{
+ uint32_t length; //!< Number of entries in the encoding table
+ // The length is followed by the RLC entries
+} RUNS_TABLE;
+
+/*!
+ @brief Entry in the table for encoding runs of zeros
+*/
+typedef struct _rlc { // Codebook entries for runs of zeros
+ uint_fast8_t size; //!< Size of code word in bits
+ uint32_t count; //!< Remaining length of the run
+ uint32_t bits; //!< Code word bits (right justified)
+} RLC;
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+ CODEC_ERROR PutZeros(BITSTREAM *stream, const RUNS_TABLE *codebook, uint32_t count);
+ CODEC_ERROR PutSpecial(BITSTREAM *stream, const CODEBOOK *codebook, SPECIAL_MARKER marker);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // VLC_H