Merge pull request #53833 from akien-mga/remove-webm-support

This commit is contained in:
Rémi Verschelde 2021-10-15 17:33:06 +02:00 committed by GitHub
commit 1efe7093be
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
634 changed files with 6 additions and 176465 deletions

View File

@ -235,16 +235,6 @@ Copyright: 1995-2019, The PNG Reference Library Authors.
1995-1996, Guy Eric Schalnat, Group 42, Inc.
License: Zlib
Files: ./thirdparty/libsimplewebm/
Comment: libsimplewebm
Copyright: 2016, Błażej Szczygieł
License: Expat
Files: ./thirdparty/libsimplewebm/libwebm/
Comment: The WebM Project
Copyright: 2010, Google Inc.
License: BSD-3-clause
Files: ./thirdparty/libtheora/
Comment: OggTheora
Copyright: 2002-2009, Xiph.org Foundation
@ -255,17 +245,6 @@ Comment: OggVorbis
Copyright: 2002-2015, Xiph.org Foundation
License: BSD-3-clause
Files: ./thirdparty/libvpx/
Comment: The WebM Project
Copyright: 2010, The WebM Project authors.
License: BSD-3-clause
Files: ./thirdparty/libvpx/third_party/android/cpu-features.c
./thirdparty/libvpx/third_party/android/cpu-features.h
Comment: The Android Open Source Project
Copyright: 2010, The Android Open Source Project
License: BSD-2-clause
Files: ./thirdparty/libwebp/
Comment: WebP codec
Copyright: 2010, Google Inc.
@ -388,14 +367,6 @@ Comment: Intel Open Image Denoise
Copyright: 2009-2019, Intel Corporation
License: Apache-2.0
Files: ./thirdparty/opus/
Comment: Opus
Copyright: 2001-2011, Xiph.Org, Skype Limited, Octasic,
Jean-Marc Valin, Timothy B. Terriberry,
CSIRO, Gregory Maxwell, Mark Borgerding,
Erik de Castro Lopo
License: BSD-3-clause
Files: ./thirdparty/pcre2/
Comment: PCRE2
Copyright: 1997-2020, University of Cambridge

View File

@ -166,12 +166,10 @@ opts.Add(BoolVariable("builtin_libogg", "Use the built-in libogg library", True)
opts.Add(BoolVariable("builtin_libpng", "Use the built-in libpng library", True))
opts.Add(BoolVariable("builtin_libtheora", "Use the built-in libtheora library", True))
opts.Add(BoolVariable("builtin_libvorbis", "Use the built-in libvorbis library", True))
opts.Add(BoolVariable("builtin_libvpx", "Use the built-in libvpx library", True))
opts.Add(BoolVariable("builtin_libwebp", "Use the built-in libwebp library", True))
opts.Add(BoolVariable("builtin_wslay", "Use the built-in wslay library", True))
opts.Add(BoolVariable("builtin_mbedtls", "Use the built-in mbedTLS library", True))
opts.Add(BoolVariable("builtin_miniupnpc", "Use the built-in miniupnpc library", True))
opts.Add(BoolVariable("builtin_opus", "Use the built-in Opus library", True))
opts.Add(BoolVariable("builtin_pcre2", "Use the built-in PCRE2 library", True))
opts.Add(BoolVariable("builtin_pcre2_with_jit", "Use JIT compiler for the built-in PCRE2 library", True))
opts.Add(BoolVariable("builtin_recast", "Use the built-in Recast library", True))

View File

@ -5,9 +5,9 @@
</brief_description>
<description>
Control node for playing video streams using [VideoStream] resources.
Supported video formats are [url=https://www.webmproject.org/]WebM[/url] ([code].webm[/code], [VideoStreamWebm]), [url=https://www.theora.org/]Ogg Theora[/url] ([code].ogv[/code], [VideoStreamTheora]), and any format exposed via a GDNative plugin using [VideoStreamGDNative].
Supported video formats are [url=https://www.theora.org/]Ogg Theora[/url] ([code].ogv[/code], [VideoStreamTheora]) and any format exposed via a GDNative plugin using [VideoStreamGDNative].
[b]Note:[/b] Due to a bug, VideoPlayer does not support localization remapping yet.
[b]Warning:[/b] On HTML5, video playback [i]will[/i] perform poorly due to missing architecture-specific assembly optimizations, especially for VP8/VP9.
[b]Warning:[/b] On HTML5, video playback [i]will[/i] perform poorly due to missing architecture-specific assembly optimizations.
</description>
<tutorials>
</tutorials>

View File

@ -3,9 +3,6 @@
Import("env")
Import("env_modules")
# Only kept to build the thirdparty library used by the theora and webm
# modules.
env_ogg = env_modules.Clone()
# Thirdparty source files

View File

@ -1,252 +0,0 @@
#!/usr/bin/env python
Import("env")
Import("env_modules")
# Only kept to build the thirdparty library used by the webm module.
# AudioStreamOpus was dropped in 3.0 due to incompatibility with the new audio
# engine. If you want to port it, fetch it from the Git history.
env_opus = env_modules.Clone()
# Thirdparty source files
thirdparty_obj = []
# Thirdparty source files
if env["builtin_opus"]:
thirdparty_dir = "#thirdparty/opus/"
thirdparty_sources = [
# Sync with opus_sources.mk
"opus.c",
"opus_decoder.c",
"opus_encoder.c",
"opus_multistream.c",
"opus_multistream_encoder.c",
"opus_multistream_decoder.c",
"repacketizer.c",
"analysis.c",
"mlp.c",
"mlp_data.c",
# Sync with libopusfile Makefile.am
"info.c",
"internal.c",
"opusfile.c",
"stream.c",
# Sync with celt_sources.mk
"celt/bands.c",
"celt/celt.c",
"celt/celt_encoder.c",
"celt/celt_decoder.c",
"celt/cwrs.c",
"celt/entcode.c",
"celt/entdec.c",
"celt/entenc.c",
"celt/kiss_fft.c",
"celt/laplace.c",
"celt/mathops.c",
"celt/mdct.c",
"celt/modes.c",
"celt/pitch.c",
"celt/celt_lpc.c",
"celt/quant_bands.c",
"celt/rate.c",
"celt/vq.c",
# "celt/arm/arm_celt_map.c",
# "celt/arm/armcpu.c",
# "celt/arm/celt_ne10_fft.c",
# "celt/arm/celt_ne10_mdct.c",
# "celt/arm/celt_neon_intr.c",
# Sync with silk_sources.mk
"silk/CNG.c",
"silk/code_signs.c",
"silk/init_decoder.c",
"silk/decode_core.c",
"silk/decode_frame.c",
"silk/decode_parameters.c",
"silk/decode_indices.c",
"silk/decode_pulses.c",
"silk/decoder_set_fs.c",
"silk/dec_API.c",
"silk/enc_API.c",
"silk/encode_indices.c",
"silk/encode_pulses.c",
"silk/gain_quant.c",
"silk/interpolate.c",
"silk/LP_variable_cutoff.c",
"silk/NLSF_decode.c",
"silk/NSQ.c",
"silk/NSQ_del_dec.c",
"silk/PLC.c",
"silk/shell_coder.c",
"silk/tables_gain.c",
"silk/tables_LTP.c",
"silk/tables_NLSF_CB_NB_MB.c",
"silk/tables_NLSF_CB_WB.c",
"silk/tables_other.c",
"silk/tables_pitch_lag.c",
"silk/tables_pulses_per_block.c",
"silk/VAD.c",
"silk/control_audio_bandwidth.c",
"silk/quant_LTP_gains.c",
"silk/VQ_WMat_EC.c",
"silk/HP_variable_cutoff.c",
"silk/NLSF_encode.c",
"silk/NLSF_VQ.c",
"silk/NLSF_unpack.c",
"silk/NLSF_del_dec_quant.c",
"silk/process_NLSFs.c",
"silk/stereo_LR_to_MS.c",
"silk/stereo_MS_to_LR.c",
"silk/check_control_input.c",
"silk/control_SNR.c",
"silk/init_encoder.c",
"silk/control_codec.c",
"silk/A2NLSF.c",
"silk/ana_filt_bank_1.c",
"silk/biquad_alt.c",
"silk/bwexpander_32.c",
"silk/bwexpander.c",
"silk/debug.c",
"silk/decode_pitch.c",
"silk/inner_prod_aligned.c",
"silk/lin2log.c",
"silk/log2lin.c",
"silk/LPC_analysis_filter.c",
"silk/LPC_inv_pred_gain.c",
"silk/table_LSF_cos.c",
"silk/NLSF2A.c",
"silk/NLSF_stabilize.c",
"silk/NLSF_VQ_weights_laroia.c",
"silk/pitch_est_tables.c",
"silk/resampler.c",
"silk/resampler_down2_3.c",
"silk/resampler_down2.c",
"silk/resampler_private_AR2.c",
"silk/resampler_private_down_FIR.c",
"silk/resampler_private_IIR_FIR.c",
"silk/resampler_private_up2_HQ.c",
"silk/resampler_rom.c",
"silk/sigm_Q15.c",
"silk/sort.c",
"silk/sum_sqr_shift.c",
"silk/stereo_decode_pred.c",
"silk/stereo_encode_pred.c",
"silk/stereo_find_predictor.c",
"silk/stereo_quant_pred.c",
]
opus_sources_silk = []
if env["platform"] in ["android", "iphone", "javascript"]:
env_opus.Append(CPPDEFINES=["FIXED_POINT"])
opus_sources_silk = [
"silk/fixed/LTP_analysis_filter_FIX.c",
"silk/fixed/LTP_scale_ctrl_FIX.c",
"silk/fixed/corrMatrix_FIX.c",
"silk/fixed/encode_frame_FIX.c",
"silk/fixed/find_LPC_FIX.c",
"silk/fixed/find_LTP_FIX.c",
"silk/fixed/find_pitch_lags_FIX.c",
"silk/fixed/find_pred_coefs_FIX.c",
"silk/fixed/noise_shape_analysis_FIX.c",
"silk/fixed/prefilter_FIX.c",
"silk/fixed/process_gains_FIX.c",
"silk/fixed/regularize_correlations_FIX.c",
"silk/fixed/residual_energy16_FIX.c",
"silk/fixed/residual_energy_FIX.c",
"silk/fixed/solve_LS_FIX.c",
"silk/fixed/warped_autocorrelation_FIX.c",
"silk/fixed/apply_sine_window_FIX.c",
"silk/fixed/autocorr_FIX.c",
"silk/fixed/burg_modified_FIX.c",
"silk/fixed/k2a_FIX.c",
"silk/fixed/k2a_Q16_FIX.c",
"silk/fixed/pitch_analysis_core_FIX.c",
"silk/fixed/vector_ops_FIX.c",
"silk/fixed/schur64_FIX.c",
"silk/fixed/schur_FIX.c",
]
else:
opus_sources_silk = [
"silk/float/apply_sine_window_FLP.c",
"silk/float/corrMatrix_FLP.c",
"silk/float/encode_frame_FLP.c",
"silk/float/find_LPC_FLP.c",
"silk/float/find_LTP_FLP.c",
"silk/float/find_pitch_lags_FLP.c",
"silk/float/find_pred_coefs_FLP.c",
"silk/float/LPC_analysis_filter_FLP.c",
"silk/float/LTP_analysis_filter_FLP.c",
"silk/float/LTP_scale_ctrl_FLP.c",
"silk/float/noise_shape_analysis_FLP.c",
"silk/float/prefilter_FLP.c",
"silk/float/process_gains_FLP.c",
"silk/float/regularize_correlations_FLP.c",
"silk/float/residual_energy_FLP.c",
"silk/float/solve_LS_FLP.c",
"silk/float/warped_autocorrelation_FLP.c",
"silk/float/wrappers_FLP.c",
"silk/float/autocorrelation_FLP.c",
"silk/float/burg_modified_FLP.c",
"silk/float/bwexpander_FLP.c",
"silk/float/energy_FLP.c",
"silk/float/inner_product_FLP.c",
"silk/float/k2a_FLP.c",
"silk/float/levinsondurbin_FLP.c",
"silk/float/LPC_inv_pred_gain_FLP.c",
"silk/float/pitch_analysis_core_FLP.c",
"silk/float/scale_copy_vector_FLP.c",
"silk/float/scale_vector_FLP.c",
"silk/float/schur_FLP.c",
"silk/float/sort_FLP.c",
]
thirdparty_sources = [thirdparty_dir + file for file in thirdparty_sources + opus_sources_silk]
# also requires libogg
if env["builtin_libogg"]:
env_opus.Prepend(CPPPATH=["#thirdparty/libogg"])
env_opus.Append(CPPDEFINES=["HAVE_CONFIG_H"])
thirdparty_include_paths = [
"",
"celt",
"opus",
"silk",
"silk/fixed",
"silk/float",
]
env_opus.Prepend(CPPPATH=[thirdparty_dir + "/" + dir for dir in thirdparty_include_paths])
if env["platform"] == "android":
if "android_arch" in env and env["android_arch"] == "armv7":
env_opus.Append(CPPDEFINES=["OPUS_ARM_OPT"])
elif "android_arch" in env and env["android_arch"] == "arm64v8":
env_opus.Append(CPPDEFINES=["OPUS_ARM64_OPT"])
elif env["platform"] == "iphone":
if "arch" in env and env["arch"] == "arm":
env_opus.Append(CPPDEFINES=["OPUS_ARM_OPT"])
elif "arch" in env and env["arch"] == "arm64":
env_opus.Append(CPPDEFINES=["OPUS_ARM64_OPT"])
elif env["platform"] == "osx":
if "arch" in env and env["arch"] == "arm64":
env_opus.Append(CPPDEFINES=["OPUS_ARM64_OPT"])
env_thirdparty = env_opus.Clone()
env_thirdparty.disable_warnings()
env_thirdparty.add_source_files(thirdparty_obj, thirdparty_sources)
env.modules_sources += thirdparty_obj
# Godot source files
module_obj = []
env_opus.add_source_files(module_obj, "*.cpp")
env.modules_sources += module_obj
# Needed to force rebuilding the module files when the thirdparty library is updated.
env.Depends(module_obj, thirdparty_obj)

View File

@ -1,6 +0,0 @@
def can_build(env, platform):
return env.module_check_dependencies("opus", ["ogg"])
def configure(env):
pass

View File

@ -1,37 +0,0 @@
/*************************************************************************/
/* register_types.cpp */
/*************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
/* https://godotengine.org */
/*************************************************************************/
/* Copyright (c) 2007-2021 Juan Linietsky, Ariel Manzur. */
/* Copyright (c) 2014-2021 Godot Engine contributors (cf. AUTHORS.md). */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/*************************************************************************/
#include "register_types.h"
// Dummy module as libvorbis is needed by other modules (theora ...)
void register_opus_types() {}
void unregister_opus_types() {}

View File

@ -1,37 +0,0 @@
/*************************************************************************/
/* register_types.h */
/*************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
/* https://godotengine.org */
/*************************************************************************/
/* Copyright (c) 2007-2021 Juan Linietsky, Ariel Manzur. */
/* Copyright (c) 2014-2021 Godot Engine contributors (cf. AUTHORS.md). */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/*************************************************************************/
#ifndef OPUS_REGISTER_TYPES_H
#define OPUS_REGISTER_TYPES_H
void register_opus_types();
void unregister_opus_types();
#endif // OPUS_REGISTER_TYPES_H

View File

@ -4,7 +4,7 @@
[VideoStream] resource for Ogg Theora videos.
</brief_description>
<description>
[VideoStream] resource handling the [url=https://www.theora.org/]Ogg Theora[/url] video format with [code].ogv[/code] extension. The Theora codec is less efficient than [VideoStreamWebm]'s VP8 and VP9, but it requires less CPU resources to decode. The Theora codec is decoded on the CPU.
[VideoStream] resource handling the [url=https://www.theora.org/]Ogg Theora[/url] video format with [code].ogv[/code] extension. The Theora codec is decoded on the CPU.
[b]Note:[/b] While Ogg Theora videos can also have an [code].ogg[/code] extension, you will have to rename the extension to [code].ogv[/code] to use those videos within Godot.
</description>
<tutorials>

View File

@ -603,7 +603,7 @@ float VideoStreamPlaybackTheora::get_playback_position() const {
};
void VideoStreamPlaybackTheora::seek(float p_time) {
WARN_PRINT_ONCE("Seeking in Theora and WebM videos is not implemented yet (it's only supported for GDNative-provided video streams).");
WARN_PRINT_ONCE("Seeking in Theora videos is not implemented yet (it's only supported for GDNative-provided video streams).");
}
void VideoStreamPlaybackTheora::set_mix_callback(AudioMixCallback p_callback, void *p_userdata) {

View File

@ -1,48 +0,0 @@
#!/usr/bin/env python
Import("env")
Import("env_modules")
env_webm = env_modules.Clone()
# Thirdparty source files
thirdparty_obj = []
thirdparty_dir = "#thirdparty/libsimplewebm/"
thirdparty_sources = [
"libwebm/mkvparser/mkvparser.cc",
"OpusVorbisDecoder.cpp",
"VPXDecoder.cpp",
"WebMDemuxer.cpp",
]
thirdparty_sources = [thirdparty_dir + file for file in thirdparty_sources]
env_webm.Prepend(CPPPATH=[thirdparty_dir, thirdparty_dir + "libwebm/"])
# also requires libogg, libvorbis and libopus
if env["builtin_libogg"]:
env_webm.Prepend(CPPPATH=["#thirdparty/libogg"])
if env["builtin_libvorbis"]:
env_webm.Prepend(CPPPATH=["#thirdparty/libvorbis"])
if env["builtin_opus"]:
env_webm.Prepend(CPPPATH=["#thirdparty/opus"])
if env["builtin_libvpx"]:
env_webm.Prepend(CPPPATH=["#thirdparty/libvpx"])
SConscript("libvpx/SCsub")
env_thirdparty = env_webm.Clone()
env_thirdparty.disable_warnings()
env_thirdparty.add_source_files(thirdparty_obj, thirdparty_sources)
env.modules_sources += thirdparty_obj
# Godot source files
module_obj = []
env_webm.add_source_files(module_obj, "*.cpp")
env.modules_sources += module_obj
# Needed to force rebuilding the module files when the thirdparty library is updated.
env.Depends(module_obj, thirdparty_obj)

View File

@ -1,19 +0,0 @@
def can_build(env, platform):
if platform in ["iphone"]:
return False
return env.module_check_dependencies("webm", ["ogg", "opus", "vorbis"])
def configure(env):
pass
def get_doc_classes():
return [
"VideoStreamWebm",
]
def get_doc_path():
return "doc_classes"

View File

@ -1,28 +0,0 @@
<?xml version="1.0" encoding="UTF-8" ?>
<class name="VideoStreamWebm" inherits="VideoStream" version="4.0">
<brief_description>
[VideoStream] resource for WebM videos.
</brief_description>
<description>
[VideoStream] resource handling the [url=https://www.webmproject.org/]WebM[/url] video format with [code].webm[/code] extension. Both the VP8 and VP9 codecs are supported. The VP8 and VP9 codecs are more efficient than [VideoStreamTheora], but they require more CPU resources to decode (especially VP9). Both the VP8 and VP9 codecs are decoded on the CPU.
[b]Note:[/b] Alpha channel (also known as transparency) is not supported. The video will always appear to have a black background, even if it originally contains an alpha channel.
[b]Note:[/b] There are known bugs and performance issues with WebM video playback in Godot. If you run into problems, try using the Ogg Theora format instead: [VideoStreamTheora]
</description>
<tutorials>
</tutorials>
<methods>
<method name="get_file">
<return type="String" />
<description>
Returns the WebM video file handled by this [VideoStreamWebm].
</description>
</method>
<method name="set_file">
<return type="void" />
<argument index="0" name="file" type="String" />
<description>
Sets the WebM video file that this [VideoStreamWebm] resource handles. The [code]file[/code] name should have the [code].webm[/code] extension.
</description>
</method>
</methods>
</class>

View File

@ -1,382 +0,0 @@
#!/usr/bin/env python
Import("env")
Import("env_modules")
# Thirdparty sources
libvpx_dir = "#thirdparty/libvpx/"
libvpx_sources = [
"vp8/vp8_dx_iface.c",
"vp8/common/generic/systemdependent.c",
"vp8/common/alloccommon.c",
"vp8/common/blockd.c",
"vp8/common/copy_c.c",
"vp8/common/debugmodes.c",
"vp8/common/dequantize.c",
"vp8/common/entropy.c",
"vp8/common/entropymode.c",
"vp8/common/entropymv.c",
"vp8/common/extend.c",
"vp8/common/filter.c",
"vp8/common/findnearmv.c",
"vp8/common/idct_blk.c",
"vp8/common/idctllm.c",
"vp8/common/loopfilter_filters.c",
"vp8/common/mbpitch.c",
"vp8/common/modecont.c",
"vp8/common/quant_common.c",
"vp8/common/reconinter.c",
"vp8/common/reconintra.c",
"vp8/common/reconintra4x4.c",
"vp8/common/rtcd.c",
"vp8/common/setupintrarecon.c",
"vp8/common/swapyv12buffer.c",
"vp8/common/treecoder.c",
"vp8/common/vp8_loopfilter.c",
"vp8/decoder/dboolhuff.c",
"vp8/decoder/decodeframe.c",
"vp8/decoder/decodemv.c",
"vp8/decoder/detokenize.c",
"vp8/decoder/onyxd_if.c",
"vp9/vp9_dx_iface.c",
"vp9/common/vp9_alloccommon.c",
"vp9/common/vp9_blockd.c",
"vp9/common/vp9_common_data.c",
"vp9/common/vp9_debugmodes.c",
"vp9/common/vp9_entropy.c",
"vp9/common/vp9_entropymode.c",
"vp9/common/vp9_entropymv.c",
"vp9/common/vp9_filter.c",
"vp9/common/vp9_frame_buffers.c",
"vp9/common/vp9_idct.c",
"vp9/common/vp9_loopfilter.c",
"vp9/common/vp9_mvref_common.c",
"vp9/common/vp9_pred_common.c",
"vp9/common/vp9_quant_common.c",
"vp9/common/vp9_reconinter.c",
"vp9/common/vp9_reconintra.c",
"vp9/common/vp9_rtcd.c",
"vp9/common/vp9_scale.c",
"vp9/common/vp9_scan.c",
"vp9/common/vp9_seg_common.c",
"vp9/common/vp9_thread_common.c",
"vp9/common/vp9_tile_common.c",
"vp9/decoder/vp9_decodeframe.c",
"vp9/decoder/vp9_decodemv.c",
"vp9/decoder/vp9_decoder.c",
"vp9/decoder/vp9_detokenize.c",
"vp9/decoder/vp9_dsubexp.c",
"vp9/decoder/vp9_dthread.c",
"vpx/src/vpx_codec.c",
"vpx/src/vpx_decoder.c",
"vpx/src/vpx_image.c",
"vpx/src/vpx_psnr.c",
"vpx_dsp/bitreader.c",
"vpx_dsp/bitreader_buffer.c",
"vpx_dsp/intrapred.c",
"vpx_dsp/inv_txfm.c",
"vpx_dsp/loopfilter.c",
"vpx_dsp/prob.c",
"vpx_dsp/vpx_convolve.c",
"vpx_dsp/vpx_dsp_rtcd.c",
"vpx_mem/vpx_mem.c",
"vpx_scale/vpx_scale_rtcd.c",
"vpx_scale/generic/yv12config.c",
"vpx_scale/generic/yv12extend.c",
"vpx_util/vpx_thread.c",
]
libvpx_sources_mt = [
"vp8/decoder/threading.c",
]
libvpx_sources_intrin_x86 = [
"vp8/common/x86/filter_x86.c",
"vp8/common/x86/loopfilter_x86.c",
"vp8/common/x86/vp8_asm_stubs.c",
"vpx_dsp/x86/vpx_asm_stubs.c",
]
libvpx_sources_intrin_x86_mmx = [
"vp8/common/x86/idct_blk_mmx.c",
]
libvpx_sources_intrin_x86_sse2 = [
"vp8/common/x86/idct_blk_sse2.c",
"vp9/common/x86/vp9_idct_intrin_sse2.c",
"vpx_dsp/x86/inv_txfm_sse2.c",
"vpx_dsp/x86/loopfilter_sse2.c",
]
libvpx_sources_intrin_x86_ssse3 = [
"vpx_dsp/x86/vpx_subpixel_8t_intrin_ssse3.c",
]
libvpx_sources_intrin_x86_avx2 = [
"vpx_dsp/x86/loopfilter_avx2.c",
"vpx_dsp/x86/vpx_subpixel_8t_intrin_avx2.c",
]
libvpx_sources_x86asm = [
"vp8/common/x86/copy_sse2.asm",
"vp8/common/x86/copy_sse3.asm",
"vp8/common/x86/dequantize_mmx.asm",
"vp8/common/x86/idctllm_mmx.asm",
"vp8/common/x86/idctllm_sse2.asm",
"vp8/common/x86/iwalsh_mmx.asm",
"vp8/common/x86/iwalsh_sse2.asm",
"vp8/common/x86/loopfilter_sse2.asm",
"vp8/common/x86/recon_mmx.asm",
"vp8/common/x86/recon_sse2.asm",
"vp8/common/x86/subpixel_mmx.asm",
"vp8/common/x86/subpixel_sse2.asm",
"vp8/common/x86/subpixel_ssse3.asm",
"vp8/common/x86/vp8_loopfilter_mmx.asm",
"vpx_dsp/x86/intrapred_sse2.asm",
"vpx_dsp/x86/intrapred_ssse3.asm",
"vpx_dsp/x86/inv_wht_sse2.asm",
"vpx_dsp/x86/vpx_convolve_copy_sse2.asm",
"vpx_dsp/x86/vpx_subpixel_8t_sse2.asm",
"vpx_dsp/x86/vpx_subpixel_8t_ssse3.asm",
"vpx_dsp/x86/vpx_subpixel_bilinear_sse2.asm",
"vpx_dsp/x86/vpx_subpixel_bilinear_ssse3.asm",
"vpx_ports/emms.asm",
]
libvpx_sources_x86_64asm = [
"vp8/common/x86/loopfilter_block_sse2_x86_64.asm",
"vpx_dsp/x86/inv_txfm_ssse3_x86_64.asm",
]
libvpx_sources_arm = [
"vpx_ports/arm_cpudetect.c",
"vp8/common/arm/loopfilter_arm.c",
]
libvpx_sources_arm_neon = [
"vp8/common/arm/neon/bilinearpredict_neon.c",
"vp8/common/arm/neon/copymem_neon.c",
"vp8/common/arm/neon/dc_only_idct_add_neon.c",
"vp8/common/arm/neon/dequant_idct_neon.c",
"vp8/common/arm/neon/dequantizeb_neon.c",
"vp8/common/arm/neon/idct_blk_neon.c",
"vp8/common/arm/neon/idct_dequant_0_2x_neon.c",
"vp8/common/arm/neon/idct_dequant_full_2x_neon.c",
"vp8/common/arm/neon/iwalsh_neon.c",
"vp8/common/arm/neon/loopfiltersimplehorizontaledge_neon.c",
"vp8/common/arm/neon/loopfiltersimpleverticaledge_neon.c",
"vp8/common/arm/neon/mbloopfilter_neon.c",
"vp8/common/arm/neon/shortidct4x4llm_neon.c",
"vp8/common/arm/neon/sixtappredict_neon.c",
"vp8/common/arm/neon/vp8_loopfilter_neon.c",
"vp9/common/arm/neon/vp9_iht4x4_add_neon.c",
"vp9/common/arm/neon/vp9_iht8x8_add_neon.c",
"vpx_dsp/arm/idct16x16_1_add_neon.c",
"vpx_dsp/arm/idct16x16_add_neon.c",
"vpx_dsp/arm/idct16x16_neon.c",
"vpx_dsp/arm/idct32x32_1_add_neon.c",
"vpx_dsp/arm/idct32x32_add_neon.c",
"vpx_dsp/arm/idct4x4_1_add_neon.c",
"vpx_dsp/arm/idct4x4_add_neon.c",
"vpx_dsp/arm/idct8x8_1_add_neon.c",
"vpx_dsp/arm/idct8x8_add_neon.c",
"vpx_dsp/arm/intrapred_neon.c",
"vpx_dsp/arm/loopfilter_16_neon.c",
"vpx_dsp/arm/loopfilter_4_neon.c",
"vpx_dsp/arm/loopfilter_8_neon.c",
"vpx_dsp/arm/loopfilter_neon.c",
"vpx_dsp/arm/vpx_convolve8_avg_neon.c",
"vpx_dsp/arm/vpx_convolve8_neon.c",
"vpx_dsp/arm/vpx_convolve_avg_neon.c",
"vpx_dsp/arm/vpx_convolve_copy_neon.c",
"vpx_dsp/arm/vpx_convolve_neon.c",
]
libvpx_sources_arm_neon_gas = [
"vpx_dsp/arm/gas/intrapred_neon_asm.s",
"vpx_dsp/arm/gas/loopfilter_mb_neon.s",
"vpx_dsp/arm/gas/save_reg_neon.s",
]
libvpx_sources_arm_neon_armasm_ms = [
"vpx_dsp/arm/armasm_ms/intrapred_neon_asm.asm",
"vpx_dsp/arm/armasm_ms/loopfilter_mb_neon.asm",
"vpx_dsp/arm/armasm_ms/save_reg_neon.asm",
]
libvpx_sources_arm_neon_gas_apple = [
"vpx_dsp/arm/gas_apple/intrapred_neon_asm.s",
"vpx_dsp/arm/gas_apple/loopfilter_mb_neon.s",
"vpx_dsp/arm/gas_apple/save_reg_neon.s",
]
libvpx_sources = [libvpx_dir + file for file in libvpx_sources]
libvpx_sources_mt = [libvpx_dir + file for file in libvpx_sources_mt]
libvpx_sources_intrin_x86 = [libvpx_dir + file for file in libvpx_sources_intrin_x86]
libvpx_sources_intrin_x86_mmx = [libvpx_dir + file for file in libvpx_sources_intrin_x86_mmx]
libvpx_sources_intrin_x86_sse2 = [libvpx_dir + file for file in libvpx_sources_intrin_x86_sse2]
libvpx_sources_intrin_x86_ssse3 = [libvpx_dir + file for file in libvpx_sources_intrin_x86_ssse3]
libvpx_sources_intrin_x86_avx2 = [libvpx_dir + file for file in libvpx_sources_intrin_x86_avx2]
libvpx_sources_x86asm = [libvpx_dir + file for file in libvpx_sources_x86asm]
libvpx_sources_x86_64asm = [libvpx_dir + file for file in libvpx_sources_x86_64asm]
libvpx_sources_arm = [libvpx_dir + file for file in libvpx_sources_arm]
libvpx_sources_arm_neon = [libvpx_dir + file for file in libvpx_sources_arm_neon]
libvpx_sources_arm_neon_gas = [libvpx_dir + file for file in libvpx_sources_arm_neon_gas]
libvpx_sources_arm_neon_armasm_ms = [libvpx_dir + file for file in libvpx_sources_arm_neon_armasm_ms]
libvpx_sources_arm_neon_gas_apple = [libvpx_dir + file for file in libvpx_sources_arm_neon_gas_apple]
env_libvpx = env_modules.Clone()
env_libvpx.disable_warnings()
env_libvpx.Prepend(CPPPATH=[libvpx_dir])
webm_multithread = env["platform"] != "javascript"
cpu_bits = env["bits"]
webm_cpu_x86 = False
webm_cpu_arm = False
if env["platform"] == "uwp":
if "arm" in env["PROGSUFFIX"]:
webm_cpu_arm = True
else:
webm_cpu_x86 = True
else:
import platform
is_x11_or_server_arm = env["platform"] == "linuxbsd" and (
platform.machine().startswith("arm") or platform.machine().startswith("aarch")
)
is_macos_x86 = env["platform"] == "osx" and ("arch" in env and (env["arch"] != "arm64"))
is_ios_x86 = env["platform"] == "iphone" and ("arch" in env and env["arch"].startswith("x86"))
is_android_x86 = env["platform"] == "android" and env["android_arch"].startswith("x86")
if is_android_x86:
cpu_bits = "32" if env["android_arch"] == "x86" else "64"
webm_cpu_x86 = (
not is_x11_or_server_arm
and (cpu_bits == "32" or cpu_bits == "64")
and (
env["platform"] == "windows"
or env["platform"] == "linuxbsd"
or env["platform"] == "haiku"
or is_macos_x86
or is_android_x86
or is_ios_x86
)
)
webm_cpu_arm = (
is_x11_or_server_arm
or (not is_macos_x86 and env["platform"] == "osx")
or (not is_ios_x86 and env["platform"] == "iphone")
or (not is_android_x86 and env["platform"] == "android")
)
if webm_cpu_x86:
import subprocess
import os
yasm_paths = [
"yasm",
"../../../yasm",
]
yasm_found = False
devnull = open(os.devnull)
for yasm_path in yasm_paths:
try:
yasm_found = True
subprocess.Popen([yasm_path, "--version"], stdout=devnull, stderr=devnull).communicate()
except Exception:
yasm_found = False
if yasm_found:
break
if not yasm_found:
webm_cpu_x86 = False
print("YASM is necessary for WebM SIMD optimizations.")
webm_simd_optimizations = False
if webm_cpu_x86:
if env["platform"] == "windows" or env["platform"] == "uwp":
env_libvpx["ASFORMAT"] = "win"
elif env["platform"] == "osx" or env["platform"] == "iphone":
env_libvpx["ASFORMAT"] = "macho"
else:
env_libvpx["ASFORMAT"] = "elf"
env_libvpx["ASFORMAT"] += cpu_bits
env_libvpx["AS"] = "yasm"
env_libvpx["ASFLAGS"] = "-I" + libvpx_dir[1:] + " -f $ASFORMAT -D $ASCPU"
env_libvpx["ASCOM"] = "$AS $ASFLAGS -o $TARGET $SOURCES"
if cpu_bits == "32":
env_libvpx["ASCPU"] = "X86_32"
elif cpu_bits == "64":
env_libvpx["ASCPU"] = "X86_64"
env_libvpx.Append(CPPDEFINES=["WEBM_X86ASM"])
webm_simd_optimizations = True
if webm_cpu_arm:
if env["platform"] == "iphone":
env_libvpx["ASFLAGS"] = "-arch armv7"
elif env["platform"] == "android" and env["android_arch"] == "armv7" or env["platform"] == "linuxbsd":
env_libvpx["ASFLAGS"] = "-mfpu=neon"
elif env["platform"] == "uwp":
env_libvpx["AS"] = "armasm"
env_libvpx["ASFLAGS"] = ""
env_libvpx["ASCOM"] = "$AS $ASFLAGS -o $TARGET $SOURCES"
env_libvpx.Append(CPPDEFINES=["WEBM_ARMASM"])
webm_simd_optimizations = True
if webm_simd_optimizations == False:
print("WebM SIMD optimizations are disabled. Check if your CPU architecture, CPU bits or platform are supported!")
env_libvpx.add_source_files(env.modules_sources, libvpx_sources)
if webm_multithread:
env_libvpx.add_source_files(env.modules_sources, libvpx_sources_mt)
if webm_cpu_x86:
is_clang_or_gcc = (
("gcc" in os.path.basename(env["CC"])) or ("clang" in os.path.basename(env["CC"])) or ("osxcross" in env)
)
env_libvpx_mmx = env_libvpx.Clone()
if cpu_bits == "32" and is_clang_or_gcc:
env_libvpx_mmx.Append(CCFLAGS=["-mmmx"])
env_libvpx_mmx.add_source_files(env.modules_sources, libvpx_sources_intrin_x86_mmx)
env_libvpx_sse2 = env_libvpx.Clone()
if cpu_bits == "32" and is_clang_or_gcc:
env_libvpx_sse2.Append(CCFLAGS=["-msse2"])
env_libvpx_sse2.add_source_files(env.modules_sources, libvpx_sources_intrin_x86_sse2)
env_libvpx_ssse3 = env_libvpx.Clone()
if is_clang_or_gcc:
env_libvpx_ssse3.Append(CCFLAGS=["-mssse3"])
env_libvpx_ssse3.add_source_files(env.modules_sources, libvpx_sources_intrin_x86_ssse3)
env_libvpx_avx2 = env_libvpx.Clone()
if is_clang_or_gcc:
env_libvpx_avx2.Append(CCFLAGS=["-mavx2"])
env_libvpx_avx2.add_source_files(env.modules_sources, libvpx_sources_intrin_x86_avx2)
env_libvpx.add_source_files(env.modules_sources, libvpx_sources_intrin_x86)
env_libvpx.add_source_files(env.modules_sources, libvpx_sources_x86asm)
if cpu_bits == "64":
env_libvpx.add_source_files(env.modules_sources, libvpx_sources_x86_64asm)
elif webm_cpu_arm:
env_libvpx.add_source_files(env.modules_sources, libvpx_sources_arm)
if env["platform"] == "android":
env_libvpx.Prepend(CPPPATH=[libvpx_dir + "third_party/android"])
env_libvpx.add_source_files(env.modules_sources, [libvpx_dir + "third_party/android/cpu-features.c"])
env_libvpx_neon = env_libvpx.Clone()
env_libvpx_neon.add_source_files(env.modules_sources, libvpx_sources_arm_neon)
if env["platform"] == "uwp":
env_libvpx.add_source_files(env.modules_sources, libvpx_sources_arm_neon_armasm_ms)
elif env["platform"] == "iphone":
env_libvpx.add_source_files(env.modules_sources, libvpx_sources_arm_neon_gas_apple)
elif (is_x11_or_server_arm and cpu_bits == "32") or (
env["platform"] == "android" and not env["android_arch"] == "arm64v8"
):
env_libvpx.add_source_files(env.modules_sources, libvpx_sources_arm_neon_gas)

View File

@ -1,47 +0,0 @@
/*************************************************************************/
/* register_types.cpp */
/*************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
/* https://godotengine.org */
/*************************************************************************/
/* Copyright (c) 2007-2021 Juan Linietsky, Ariel Manzur. */
/* Copyright (c) 2014-2021 Godot Engine contributors (cf. AUTHORS.md). */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/*************************************************************************/
#include "register_types.h"
#include "video_stream_webm.h"
static Ref<ResourceFormatLoaderWebm> resource_loader_webm;
void register_webm_types() {
resource_loader_webm.instantiate();
ResourceLoader::add_resource_format_loader(resource_loader_webm, true);
GDREGISTER_CLASS(VideoStreamWebm);
}
void unregister_webm_types() {
ResourceLoader::remove_resource_format_loader(resource_loader_webm);
resource_loader_webm.unref();
}

View File

@ -1,37 +0,0 @@
/*************************************************************************/
/* register_types.h */
/*************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
/* https://godotengine.org */
/*************************************************************************/
/* Copyright (c) 2007-2021 Juan Linietsky, Ariel Manzur. */
/* Copyright (c) 2014-2021 Godot Engine contributors (cf. AUTHORS.md). */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/*************************************************************************/
#ifndef WEBM_REGISTER_TYPES_H
#define WEBM_REGISTER_TYPES_H
void register_webm_types();
void unregister_webm_types();
#endif // WEBM_REGISTER_TYPES_H

View File

@ -1,469 +0,0 @@
/*************************************************************************/
/* video_stream_webm.cpp */
/*************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
/* https://godotengine.org */
/*************************************************************************/
/* Copyright (c) 2007-2021 Juan Linietsky, Ariel Manzur. */
/* Copyright (c) 2014-2021 Godot Engine contributors (cf. AUTHORS.md). */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/*************************************************************************/
#include "video_stream_webm.h"
#include "core/config/project_settings.h"
#include "core/io/file_access.h"
#include "core/os/os.h"
#include "servers/audio_server.h"
#include "thirdparty/misc/yuv2rgb.h"
// libsimplewebm
#include <OpusVorbisDecoder.hpp>
#include <VPXDecoder.hpp>
// libvpx
#include <vpx/vpx_image.h>
// libwebm
#include <mkvparser/mkvparser.h>
class MkvReader : public mkvparser::IMkvReader {
public:
MkvReader(const String &p_file) {
file = FileAccess::open(p_file, FileAccess::READ);
ERR_FAIL_COND_MSG(!file, "Failed loading resource: '" + p_file + "'.");
}
~MkvReader() {
if (file) {
memdelete(file);
}
}
virtual int Read(long long pos, long len, unsigned char *buf) {
if (file) {
if (file->get_position() != (uint64_t)pos) {
file->seek(pos);
}
if (file->get_buffer(buf, len) == (uint64_t)len) {
return 0;
}
}
return -1;
}
virtual int Length(long long *total, long long *available) {
if (file) {
const uint64_t len = file->get_length();
if (total) {
*total = len;
}
if (available) {
*available = len;
}
return 0;
}
return -1;
}
private:
FileAccess *file;
};
/**/
VideoStreamPlaybackWebm::VideoStreamPlaybackWebm() :
texture(memnew(ImageTexture)) {}
VideoStreamPlaybackWebm::~VideoStreamPlaybackWebm() {
delete_pointers();
}
bool VideoStreamPlaybackWebm::open_file(const String &p_file) {
file_name = p_file;
webm = memnew(WebMDemuxer(new MkvReader(file_name), 0, audio_track));
if (webm->isOpen()) {
video = memnew(VPXDecoder(*webm, OS::get_singleton()->get_processor_count()));
if (video->isOpen()) {
audio = memnew(OpusVorbisDecoder(*webm));
if (audio->isOpen()) {
audio_frame = memnew(WebMFrame);
pcm = (float *)memalloc(sizeof(float) * audio->getBufferSamples() * webm->getChannels());
} else {
memdelete(audio);
audio = nullptr;
}
frame_data.resize((webm->getWidth() * webm->getHeight()) << 2);
Ref<Image> img;
img.instantiate();
img->create(webm->getWidth(), webm->getHeight(), false, Image::FORMAT_RGBA8);
texture->create_from_image(img);
return true;
}
memdelete(video);
video = nullptr;
}
memdelete(webm);
webm = nullptr;
return false;
}
void VideoStreamPlaybackWebm::stop() {
if (playing) {
delete_pointers();
pcm = nullptr;
audio_frame = nullptr;
video_frames = nullptr;
video = nullptr;
audio = nullptr;
open_file(file_name); //Should not fail here...
video_frames_capacity = video_frames_pos = 0;
num_decoded_samples = 0;
samples_offset = -1;
video_frame_delay = video_pos = 0.0;
}
time = 0.0;
playing = false;
}
void VideoStreamPlaybackWebm::play() {
stop();
delay_compensation = ProjectSettings::get_singleton()->get("audio/video/video_delay_compensation_ms");
delay_compensation /= 1000.0;
playing = true;
}
bool VideoStreamPlaybackWebm::is_playing() const {
return playing;
}
void VideoStreamPlaybackWebm::set_paused(bool p_paused) {
paused = p_paused;
}
bool VideoStreamPlaybackWebm::is_paused() const {
return paused;
}
void VideoStreamPlaybackWebm::set_loop(bool p_enable) {
//Empty
}
bool VideoStreamPlaybackWebm::has_loop() const {
return false;
}
float VideoStreamPlaybackWebm::get_length() const {
if (webm) {
return webm->getLength();
}
return 0.0f;
}
float VideoStreamPlaybackWebm::get_playback_position() const {
return video_pos;
}
void VideoStreamPlaybackWebm::seek(float p_time) {
WARN_PRINT_ONCE("Seeking in Theora and WebM videos is not implemented yet (it's only supported for GDNative-provided video streams).");
}
void VideoStreamPlaybackWebm::set_audio_track(int p_idx) {
audio_track = p_idx;
}
Ref<Texture2D> VideoStreamPlaybackWebm::get_texture() const {
return texture;
}
void VideoStreamPlaybackWebm::update(float p_delta) {
if ((!playing || paused) || !video) {
return;
}
time += p_delta;
if (time < video_pos) {
return;
}
bool audio_buffer_full = false;
if (samples_offset > -1) {
//Mix remaining samples
const int to_read = num_decoded_samples - samples_offset;
const int mixed = mix_callback(mix_udata, pcm + samples_offset * webm->getChannels(), to_read);
if (mixed != to_read) {
samples_offset += mixed;
audio_buffer_full = true;
} else {
samples_offset = -1;
}
}
const bool hasAudio = (audio && mix_callback);
while ((hasAudio && !audio_buffer_full && !has_enough_video_frames()) ||
(!hasAudio && video_frames_pos == 0)) {
if (hasAudio && !audio_buffer_full && audio_frame->isValid() &&
audio->getPCMF(*audio_frame, pcm, num_decoded_samples) && num_decoded_samples > 0) {
const int mixed = mix_callback(mix_udata, pcm, num_decoded_samples);
if (mixed != num_decoded_samples) {
samples_offset = mixed;
audio_buffer_full = true;
}
}
WebMFrame *video_frame;
if (video_frames_pos >= video_frames_capacity) {
WebMFrame **video_frames_new = (WebMFrame **)memrealloc(video_frames, ++video_frames_capacity * sizeof(void *));
ERR_FAIL_COND(!video_frames_new); //Out of memory
(video_frames = video_frames_new)[video_frames_capacity - 1] = memnew(WebMFrame);
}
video_frame = video_frames[video_frames_pos];
if (!webm->readFrame(video_frame, audio_frame)) { //This will invalidate frames
break; //Can't demux, EOS?
}
if (video_frame->isValid()) {
++video_frames_pos;
}
};
bool video_frame_done = false;
while (video_frames_pos > 0 && !video_frame_done) {
WebMFrame *video_frame = video_frames[0];
// It seems VPXDecoder::decode has to be executed even though we might skip this frame
if (video->decode(*video_frame)) {
VPXDecoder::IMAGE_ERROR err;
VPXDecoder::Image image;
if (should_process(*video_frame)) {
if ((err = video->getImage(image)) != VPXDecoder::NO_FRAME) {
if (err == VPXDecoder::NO_ERROR && image.w == webm->getWidth() && image.h == webm->getHeight()) {
uint8_t *w = frame_data.ptrw();
bool converted = false;
if (image.chromaShiftW == 0 && image.chromaShiftH == 0 && image.cs == VPX_CS_SRGB) {
uint8_t *wp = w;
unsigned char *rRow = image.planes[2];
unsigned char *gRow = image.planes[0];
unsigned char *bRow = image.planes[1];
for (int i = 0; i < image.h; i++) {
for (int j = 0; j < image.w; j++) {
*wp++ = rRow[j];
*wp++ = gRow[j];
*wp++ = bRow[j];
*wp++ = 255;
}
rRow += image.linesize[2];
gRow += image.linesize[0];
bRow += image.linesize[1];
}
converted = true;
} else if (image.chromaShiftW == 1 && image.chromaShiftH == 1) {
yuv420_2_rgb8888(w, image.planes[0], image.planes[1], image.planes[2], image.w, image.h, image.linesize[0], image.linesize[1], image.w << 2);
//libyuv::I420ToARGB(image.planes[0], image.linesize[0], image.planes[2], image.linesize[2], image.planes[1], image.linesize[1], w.ptr(), image.w << 2, image.w, image.h);
converted = true;
} else if (image.chromaShiftW == 1 && image.chromaShiftH == 0) {
yuv422_2_rgb8888(w, image.planes[0], image.planes[1], image.planes[2], image.w, image.h, image.linesize[0], image.linesize[1], image.w << 2);
//libyuv::I422ToARGB(image.planes[0], image.linesize[0], image.planes[2], image.linesize[2], image.planes[1], image.linesize[1], w.ptr(), image.w << 2, image.w, image.h);
converted = true;
} else if (image.chromaShiftW == 0 && image.chromaShiftH == 0) {
yuv444_2_rgb8888(w, image.planes[0], image.planes[1], image.planes[2], image.w, image.h, image.linesize[0], image.linesize[1], image.w << 2);
//libyuv::I444ToARGB(image.planes[0], image.linesize[0], image.planes[2], image.linesize[2], image.planes[1], image.linesize[1], w.ptr(), image.w << 2, image.w, image.h);
converted = true;
} else if (image.chromaShiftW == 2 && image.chromaShiftH == 0) {
//libyuv::I411ToARGB(image.planes[0], image.linesize[0], image.planes[2], image.linesize[2] image.planes[1], image.linesize[1], w.ptr(), image.w << 2, image.w, image.h);
//converted = true;
}
if (converted) {
Ref<Image> img = memnew(Image(image.w, image.h, 0, Image::FORMAT_RGBA8, frame_data));
texture->update(img); //Zero copy send to rendering server
video_frame_done = true;
}
}
}
}
}
video_pos = video_frame->time;
memmove(video_frames, video_frames + 1, (--video_frames_pos) * sizeof(void *));
video_frames[video_frames_pos] = video_frame;
}
if (video_frames_pos == 0 && webm->isEOS()) {
stop();
}
}
void VideoStreamPlaybackWebm::set_mix_callback(VideoStreamPlayback::AudioMixCallback p_callback, void *p_userdata) {
mix_callback = p_callback;
mix_udata = p_userdata;
}
int VideoStreamPlaybackWebm::get_channels() const {
if (audio) {
return webm->getChannels();
}
return 0;
}
int VideoStreamPlaybackWebm::get_mix_rate() const {
if (audio) {
return webm->getSampleRate();
}
return 0;
}
inline bool VideoStreamPlaybackWebm::has_enough_video_frames() const {
if (video_frames_pos > 0) {
// FIXME: AudioServer output latency was fixed in af9bb0e, previously it used to
// systematically return 0. Now that it gives a proper latency, it broke this
// code where the delay compensation likely never really worked.
//const double audio_delay = AudioServer::get_singleton()->get_output_latency();
const double video_time = video_frames[video_frames_pos - 1]->time;
return video_time >= time + /* audio_delay + */ delay_compensation;
}
return false;
}
bool VideoStreamPlaybackWebm::should_process(WebMFrame &video_frame) {
// FIXME: AudioServer output latency was fixed in af9bb0e, previously it used to
// systematically return 0. Now that it gives a proper latency, it broke this
// code where the delay compensation likely never really worked.
//const double audio_delay = AudioServer::get_singleton()->get_output_latency();
return video_frame.time >= time + /* audio_delay + */ delay_compensation;
}
void VideoStreamPlaybackWebm::delete_pointers() {
if (pcm) {
memfree(pcm);
}
if (audio_frame) {
memdelete(audio_frame);
}
if (video_frames) {
for (int i = 0; i < video_frames_capacity; ++i) {
memdelete(video_frames[i]);
}
memfree(video_frames);
}
if (video) {
memdelete(video);
}
if (audio) {
memdelete(audio);
}
if (webm) {
memdelete(webm);
}
}
/**/
VideoStreamWebm::VideoStreamWebm() {}
Ref<VideoStreamPlayback> VideoStreamWebm::instance_playback() {
Ref<VideoStreamPlaybackWebm> pb = memnew(VideoStreamPlaybackWebm);
pb->set_audio_track(audio_track);
if (pb->open_file(file)) {
return pb;
}
return nullptr;
}
void VideoStreamWebm::set_file(const String &p_file) {
file = p_file;
}
String VideoStreamWebm::get_file() {
return file;
}
void VideoStreamWebm::_bind_methods() {
ClassDB::bind_method(D_METHOD("set_file", "file"), &VideoStreamWebm::set_file);
ClassDB::bind_method(D_METHOD("get_file"), &VideoStreamWebm::get_file);
ADD_PROPERTY(PropertyInfo(Variant::STRING, "file", PROPERTY_HINT_NONE, "", PROPERTY_USAGE_NOEDITOR | PROPERTY_USAGE_INTERNAL), "set_file", "get_file");
}
void VideoStreamWebm::set_audio_track(int p_track) {
audio_track = p_track;
}
////////////
RES ResourceFormatLoaderWebm::load(const String &p_path, const String &p_original_path, Error *r_error, bool p_use_sub_threads, float *r_progress, CacheMode p_cache_mode) {
FileAccess *f = FileAccess::open(p_path, FileAccess::READ);
if (!f) {
if (r_error) {
*r_error = ERR_CANT_OPEN;
}
return RES();
}
VideoStreamWebm *stream = memnew(VideoStreamWebm);
stream->set_file(p_path);
Ref<VideoStreamWebm> webm_stream = Ref<VideoStreamWebm>(stream);
if (r_error) {
*r_error = OK;
}
f->close();
memdelete(f);
return webm_stream;
}
void ResourceFormatLoaderWebm::get_recognized_extensions(List<String> *p_extensions) const {
p_extensions->push_back("webm");
}
bool ResourceFormatLoaderWebm::handles_type(const String &p_type) const {
return ClassDB::is_parent_class(p_type, "VideoStream");
}
String ResourceFormatLoaderWebm::get_resource_type(const String &p_path) const {
String el = p_path.get_extension().to_lower();
if (el == "webm") {
return "VideoStreamWebm";
}
return "";
}

View File

@ -1,135 +0,0 @@
/*************************************************************************/
/* video_stream_webm.h */
/*************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
/* https://godotengine.org */
/*************************************************************************/
/* Copyright (c) 2007-2021 Juan Linietsky, Ariel Manzur. */
/* Copyright (c) 2014-2021 Godot Engine contributors (cf. AUTHORS.md). */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/*************************************************************************/
#ifndef VIDEO_STREAM_WEBM_H
#define VIDEO_STREAM_WEBM_H
#include "core/io/resource_loader.h"
#include "scene/resources/video_stream.h"
class WebMFrame;
class WebMDemuxer;
class VPXDecoder;
class OpusVorbisDecoder;
class VideoStreamPlaybackWebm : public VideoStreamPlayback {
GDCLASS(VideoStreamPlaybackWebm, VideoStreamPlayback);
String file_name;
int audio_track = 0;
WebMDemuxer *webm = nullptr;
VPXDecoder *video = nullptr;
OpusVorbisDecoder *audio = nullptr;
WebMFrame **video_frames = nullptr, *audio_frame = nullptr;
int video_frames_pos = 0, video_frames_capacity = 0;
int num_decoded_samples = 0, samples_offset = -1;
AudioMixCallback mix_callback = nullptr;
void *mix_udata = nullptr;
bool playing = false, paused = false;
double delay_compensation = 0.0;
double time = 0.0, video_frame_delay = 0.0, video_pos = 0.0;
Vector<uint8_t> frame_data;
Ref<ImageTexture> texture;
float *pcm = nullptr;
public:
VideoStreamPlaybackWebm();
~VideoStreamPlaybackWebm();
bool open_file(const String &p_file);
virtual void stop() override;
virtual void play() override;
virtual bool is_playing() const override;
virtual void set_paused(bool p_paused) override;
virtual bool is_paused() const override;
virtual void set_loop(bool p_enable) override;
virtual bool has_loop() const override;
virtual float get_length() const override;
virtual float get_playback_position() const override;
virtual void seek(float p_time) override;
virtual void set_audio_track(int p_idx) override;
virtual Ref<Texture2D> get_texture() const override;
virtual void update(float p_delta) override;
virtual void set_mix_callback(AudioMixCallback p_callback, void *p_userdata) override;
virtual int get_channels() const override;
virtual int get_mix_rate() const override;
private:
inline bool has_enough_video_frames() const;
bool should_process(WebMFrame &video_frame);
void delete_pointers();
};
/**/
class VideoStreamWebm : public VideoStream {
GDCLASS(VideoStreamWebm, VideoStream);
String file;
int audio_track = 0;
protected:
static void _bind_methods();
public:
VideoStreamWebm();
virtual Ref<VideoStreamPlayback> instance_playback() override;
virtual void set_file(const String &p_file);
String get_file();
virtual void set_audio_track(int p_track) override;
};
class ResourceFormatLoaderWebm : public ResourceFormatLoader {
public:
virtual RES load(const String &p_path, const String &p_original_path = "", Error *r_error = nullptr, bool p_use_sub_threads = false, float *r_progress = nullptr, CacheMode p_cache_mode = CACHE_MODE_REUSE);
virtual void get_recognized_extensions(List<String> *p_extensions) const;
virtual bool handles_type(const String &p_type) const;
virtual String get_resource_type(const String &p_path) const;
};
#endif // VIDEO_STREAM_WEBM_H

View File

@ -288,17 +288,10 @@ def configure(env):
if any(platform.machine() in s for s in list_of_x86):
env["x86_libtheora_opt_gcc"] = True
if not env["builtin_libvpx"]:
env.ParseConfig("pkg-config vpx --cflags --libs")
if not env["builtin_libvorbis"]:
env["builtin_libogg"] = False # Needed to link against system libvorbis
env.ParseConfig("pkg-config vorbis vorbisfile --cflags --libs")
if not env["builtin_opus"]:
env["builtin_libogg"] = False # Needed to link against system opus
env.ParseConfig("pkg-config opus opusfile --cflags --libs")
if not env["builtin_libogg"]:
env.ParseConfig("pkg-config ogg --cflags --libs")

View File

@ -94,7 +94,6 @@ def configure(env):
env["AR"] = mpprefix + "/libexec/llvm-" + mpclangver + "/bin/llvm-ar"
env["RANLIB"] = mpprefix + "/libexec/llvm-" + mpclangver + "/bin/llvm-ranlib"
env["AS"] = mpprefix + "/libexec/llvm-" + mpclangver + "/bin/llvm-as"
env.Append(CPPDEFINES=["__MACPORTS__"]) # hack to fix libvpx MM256_BROADCASTSI128_SI256 define
else:
env["CC"] = "clang"
env["CXX"] = "clang++"
@ -122,7 +121,6 @@ def configure(env):
env["AR"] = basecmd + "ar"
env["RANLIB"] = basecmd + "ranlib"
env["AS"] = basecmd + "as"
env.Append(CPPDEFINES=["__MACPORTS__"]) # hack to fix libvpx MM256_BROADCASTSI128_SI256 define
if env["use_ubsan"] or env["use_asan"] or env["use_tsan"]:
env.extra_suffix += "s"

View File

@ -29,9 +29,9 @@
/*************************************************************************/
#include "video_player.h"
#include "scene/scene_string_names.h"
#include "core/os/os.h"
#include "scene/scene_string_names.h"
#include "servers/audio_server.h"
int VideoPlayer::sp_get_channel_count() const {
@ -55,7 +55,7 @@ bool VideoPlayer::mix(AudioFrame *p_buffer, int p_frames) {
return false;
}
// Called from main thread (eg VideoStreamPlaybackWebm::update)
// Called from main thread (e.g. VideoStreamPlaybackTheora::update).
int VideoPlayer::_audio_mix_callback(void *p_udata, const float *p_data, int p_frames) {
ERR_FAIL_NULL_V(p_udata, 0);
ERR_FAIL_NULL_V(p_data, 0);

53
thirdparty/README.md vendored
View File

@ -255,25 +255,6 @@ Files extracted from upstream source:
- `LICENSE`
## libsimplewebm
- Upstream: https://github.com/zaps166/libsimplewebm
- Version: git (fe57fd3cfe6c0af4c6af110b1f84a90cf191d943, 2019)
- License: MIT (main), BSD-3-Clause (libwebm)
This contains libwebm, but the version in use is updated from the one used by libsimplewebm,
and may have *unmarked* alterations from that.
Files extracted from upstream source:
- all the .cpp, .hpp files in the main folder except `example.cpp`
- LICENSE
Important: Some files have Godot-made changes.
They are marked with `// -- GODOT start --` and `// -- GODOT end --`
comments.
## libtheora
- Upstream: https://www.theora.org
@ -303,23 +284,6 @@ Files extracted from upstream source:
- COPYING
## libvpx
- Upstream: https://chromium.googlesource.com/webm/libvpx/
- Version: 1.6.0 (2016)
- License: BSD-3-Clause
Files extracted from upstream source:
TODO.
Important: File `libvpx/vpx_dsp/x86/vpx_subpixel_8t_intrin_avx2.c` has
Godot-made change marked with `// -- GODOT --` comments.
The files `libvpx/third_party/android/cpu-features.{c,h}` were copied
from the Android NDK r18.
## libwebp
- Upstream: https://chromium.googlesource.com/webm/libwebp/
@ -531,23 +495,6 @@ Patch files are provided in `oidn/patches/`.
- scripts/resource_to_cpp.py (used in modules/denoise/resource_to_cpp.py)
## opus
- Upstream: https://opus-codec.org
- Version: 1.1.5 (opus) and 0.8 (opusfile) (2017)
- License: BSD-3-Clause
Files extracted from upstream source:
- all .c and .h files in src/ (both opus and opusfile)
- all .h files in include/ (both opus and opusfile) as opus/
- remove unused `opus_demo.c`,
- remove `http.c`, `wincerts.c` and `winerrno.h` (part of
unused libopusurl)
- celt/ and silk/ subfolders
- COPYING
## pcre2
- Upstream: http://www.pcre.org

View File

@ -1,21 +0,0 @@
MIT License
Copyright (c) 2016 Błażej Szczygieł
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -1,264 +0,0 @@
/*
MIT License
Copyright (c) 2016 Błażej Szczygieł
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "OpusVorbisDecoder.hpp"
#include <vorbis/codec.h>
#include <opus/opus.h>
#include <string.h>
struct VorbisDecoder
{
vorbis_info info;
vorbis_dsp_state dspState;
vorbis_block block;
ogg_packet op;
bool hasDSPState, hasBlock;
};
/**/
OpusVorbisDecoder::OpusVorbisDecoder(const WebMDemuxer &demuxer) :
m_vorbis(NULL), m_opus(NULL),
m_numSamples(0)
{
switch (demuxer.getAudioCodec())
{
case WebMDemuxer::AUDIO_VORBIS:
m_channels = demuxer.getChannels();
if (openVorbis(demuxer))
return;
break;
case WebMDemuxer::AUDIO_OPUS:
m_channels = demuxer.getChannels();
if (openOpus(demuxer))
return;
break;
default:
return;
}
close();
}
OpusVorbisDecoder::~OpusVorbisDecoder()
{
close();
}
bool OpusVorbisDecoder::isOpen() const
{
return (m_vorbis || m_opus);
}
bool OpusVorbisDecoder::getPCMS16(WebMFrame &frame, short *buffer, int &numOutSamples)
{
if (m_vorbis)
{
m_vorbis->op.packet = frame.buffer;
m_vorbis->op.bytes = frame.bufferSize;
if (vorbis_synthesis(&m_vorbis->block, &m_vorbis->op))
return false;
if (vorbis_synthesis_blockin(&m_vorbis->dspState, &m_vorbis->block))
return false;
const int maxSamples = getBufferSamples();
int samplesCount, count = 0;
float **pcm;
while ((samplesCount = vorbis_synthesis_pcmout(&m_vorbis->dspState, &pcm)))
{
const int toConvert = samplesCount <= maxSamples ? samplesCount : maxSamples;
for (int c = 0; c < m_channels; ++c)
{
float *samples = pcm[c];
for (int i = 0, j = c; i < toConvert; ++i, j += m_channels)
{
int sample = samples[i] * 32767.0f;
if (sample > 32767)
sample = 32767;
else if (sample < -32768)
sample = -32768;
buffer[count + j] = sample;
}
}
vorbis_synthesis_read(&m_vorbis->dspState, toConvert);
count += toConvert;
}
numOutSamples = count;
return true;
}
else if (m_opus)
{
const int samples = opus_decode(m_opus, frame.buffer, frame.bufferSize, buffer, m_numSamples, 0);
if (samples >= 0)
{
numOutSamples = samples;
return true;
}
}
return false;
}
// -- GODOT begin --
bool OpusVorbisDecoder::getPCMF(WebMFrame &frame, float *buffer, int &numOutSamples) {
if (m_vorbis) {
m_vorbis->op.packet = frame.buffer;
m_vorbis->op.bytes = frame.bufferSize;
if (vorbis_synthesis(&m_vorbis->block, &m_vorbis->op))
return false;
if (vorbis_synthesis_blockin(&m_vorbis->dspState, &m_vorbis->block))
return false;
const int maxSamples = getBufferSamples();
int samplesCount, count = 0;
float **pcm;
while ((samplesCount = vorbis_synthesis_pcmout(&m_vorbis->dspState, &pcm))) {
const int toConvert = samplesCount <= maxSamples ? samplesCount : maxSamples;
for (int c = 0; c < m_channels; ++c) {
float *samples = pcm[c];
for (int i = 0, j = c; i < toConvert; ++i, j += m_channels) {
buffer[count + j] = samples[i];
}
}
vorbis_synthesis_read(&m_vorbis->dspState, toConvert);
count += toConvert;
}
numOutSamples = count;
return true;
} else if (m_opus) {
const int samples = opus_decode_float(m_opus, frame.buffer, frame.bufferSize, buffer, m_numSamples, 0);
if (samples >= 0) {
numOutSamples = samples;
return true;
}
}
return false;
}
// -- GODOT end --
bool OpusVorbisDecoder::openVorbis(const WebMDemuxer &demuxer)
{
size_t extradataSize = 0;
const unsigned char *extradata = demuxer.getAudioExtradata(extradataSize);
if (extradataSize < 3 || !extradata || extradata[0] != 2)
return false;
size_t headerSize[3] = {0};
size_t offset = 1;
/* Calculate three headers sizes */
for (int i = 0; i < 2; ++i)
{
for (;;)
{
if (offset >= extradataSize)
return false;
headerSize[i] += extradata[offset];
if (extradata[offset++] < 0xFF)
break;
}
}
headerSize[2] = extradataSize - (headerSize[0] + headerSize[1] + offset);
if (headerSize[0] + headerSize[1] + headerSize[2] + offset != extradataSize)
return false;
ogg_packet op[3];
memset(op, 0, sizeof op);
op[0].packet = (unsigned char *)extradata + offset;
op[0].bytes = headerSize[0];
op[0].b_o_s = 1;
op[1].packet = (unsigned char *)extradata + offset + headerSize[0];
op[1].bytes = headerSize[1];
op[2].packet = (unsigned char *)extradata + offset + headerSize[0] + headerSize[1];
op[2].bytes = headerSize[2];
m_vorbis = new VorbisDecoder;
m_vorbis->hasDSPState = m_vorbis->hasBlock = false;
vorbis_info_init(&m_vorbis->info);
/* Upload three Vorbis headers into libvorbis */
vorbis_comment vc;
vorbis_comment_init(&vc);
for (int i = 0; i < 3; ++i)
{
if (vorbis_synthesis_headerin(&m_vorbis->info, &vc, &op[i]))
{
vorbis_comment_clear(&vc);
return false;
}
}
vorbis_comment_clear(&vc);
if (vorbis_synthesis_init(&m_vorbis->dspState, &m_vorbis->info))
return false;
m_vorbis->hasDSPState = true;
if (m_vorbis->info.channels != m_channels || m_vorbis->info.rate != demuxer.getSampleRate())
return false;
if (vorbis_block_init(&m_vorbis->dspState, &m_vorbis->block))
return false;
m_vorbis->hasBlock = true;
memset(&m_vorbis->op, 0, sizeof m_vorbis->op);
m_numSamples = 4096 / m_channels;
return true;
}
bool OpusVorbisDecoder::openOpus(const WebMDemuxer &demuxer)
{
int opusErr = 0;
m_opus = opus_decoder_create(demuxer.getSampleRate(), m_channels, &opusErr);
if (!opusErr)
{
m_numSamples = demuxer.getSampleRate() * 0.06 + 0.5; //Maximum frame size (for 60 ms frame)
return true;
}
return false;
}
void OpusVorbisDecoder::close()
{
if (m_vorbis)
{
if (m_vorbis->hasBlock)
vorbis_block_clear(&m_vorbis->block);
if (m_vorbis->hasDSPState)
vorbis_dsp_clear(&m_vorbis->dspState);
vorbis_info_clear(&m_vorbis->info);
delete m_vorbis;
}
if (m_opus)
opus_decoder_destroy(m_opus);
}

View File

@ -1,65 +0,0 @@
/*
MIT License
Copyright (c) 2016 Błażej Szczygieł
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#ifndef OPUSVORBISDECODER_HPP
#define OPUSVORBISDECODER_HPP
#include "WebMDemuxer.hpp"
struct VorbisDecoder;
struct OpusDecoder;
class OpusVorbisDecoder
{
OpusVorbisDecoder(const OpusVorbisDecoder &);
void operator =(const OpusVorbisDecoder &);
public:
OpusVorbisDecoder(const WebMDemuxer &demuxer);
~OpusVorbisDecoder();
bool isOpen() const;
inline int getBufferSamples() const
{
return m_numSamples;
}
bool getPCMS16(WebMFrame &frame, short *buffer, int &numOutSamples);
// -- GODOT begin --
bool getPCMF(WebMFrame &frame, float *buffer, int &numOutSamples);
// -- GODOT end --
private:
bool openVorbis(const WebMDemuxer &demuxer);
bool openOpus(const WebMDemuxer &demuxer);
void close();
VorbisDecoder *m_vorbis;
OpusDecoder *m_opus;
int m_numSamples;
int m_channels;
};
#endif // OPUSVORBISDECODER_HPP

View File

@ -1,154 +0,0 @@
/*
MIT License
Copyright (c) 2016 Błażej Szczygieł
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "VPXDecoder.hpp"
#include <vpx/vpx_decoder.h>
#include <vpx/vp8dx.h>
#include <stdlib.h>
#include <string.h>
VPXDecoder::VPXDecoder(const WebMDemuxer &demuxer, unsigned threads) :
m_ctx(NULL),
m_iter(NULL),
m_delay(0),
m_last_space(VPX_CS_UNKNOWN)
{
if (threads > 8)
threads = 8;
else if (threads < 1)
threads = 1;
const vpx_codec_dec_cfg_t codecCfg = {
threads,
0,
0
};
vpx_codec_iface_t *codecIface = NULL;
switch (demuxer.getVideoCodec())
{
case WebMDemuxer::VIDEO_VP8:
codecIface = vpx_codec_vp8_dx();
break;
case WebMDemuxer::VIDEO_VP9:
codecIface = vpx_codec_vp9_dx();
m_delay = threads - 1;
break;
default:
return;
}
m_ctx = new vpx_codec_ctx_t;
if (vpx_codec_dec_init(m_ctx, codecIface, &codecCfg, m_delay > 0 ? VPX_CODEC_USE_FRAME_THREADING : 0))
{
delete m_ctx;
m_ctx = NULL;
}
}
VPXDecoder::~VPXDecoder()
{
if (m_ctx)
{
vpx_codec_destroy(m_ctx);
delete m_ctx;
}
}
bool VPXDecoder::decode(const WebMFrame &frame)
{
m_iter = NULL;
return !vpx_codec_decode(m_ctx, frame.buffer, frame.bufferSize, NULL, 0);
}
VPXDecoder::IMAGE_ERROR VPXDecoder::getImage(Image &image)
{
IMAGE_ERROR err = NO_FRAME;
if (vpx_image_t *img = vpx_codec_get_frame(m_ctx, &m_iter))
{
// It seems to be a common problem that UNKNOWN comes up a lot, yet FFMPEG is somehow getting accurate colour-space information.
// After checking FFMPEG code, *they're* getting colour-space information, so I'm assuming something like this is going on.
// It appears to work, at least.
if (img->cs != VPX_CS_UNKNOWN)
m_last_space = img->cs;
if ((img->fmt & VPX_IMG_FMT_PLANAR) && !(img->fmt & (VPX_IMG_FMT_HAS_ALPHA | VPX_IMG_FMT_HIGHBITDEPTH)))
{
if (img->stride[0] && img->stride[1] && img->stride[2])
{
const int uPlane = !!(img->fmt & VPX_IMG_FMT_UV_FLIP) + 1;
const int vPlane = !(img->fmt & VPX_IMG_FMT_UV_FLIP) + 1;
image.w = img->d_w;
image.h = img->d_h;
image.cs = m_last_space;
image.chromaShiftW = img->x_chroma_shift;
image.chromaShiftH = img->y_chroma_shift;
image.planes[0] = img->planes[0];
image.planes[1] = img->planes[uPlane];
image.planes[2] = img->planes[vPlane];
image.linesize[0] = img->stride[0];
image.linesize[1] = img->stride[uPlane];
image.linesize[2] = img->stride[vPlane];
err = NO_ERROR;
}
}
else
{
err = UNSUPPORTED_FRAME;
}
}
return err;
}
/**/
// -- GODOT begin --
#if 0
// -- GODOT end --
static inline int ceilRshift(int val, int shift)
{
return (val + (1 << shift) - 1) >> shift;
}
int VPXDecoder::Image::getWidth(int plane) const
{
if (!plane)
return w;
return ceilRshift(w, chromaShiftW);
}
int VPXDecoder::Image::getHeight(int plane) const
{
if (!plane)
return h;
return ceilRshift(h, chromaShiftH);
}
// -- GODOT begin --
#endif
// -- GODOT end --

View File

@ -1,86 +0,0 @@
/*
MIT License
Copyright (c) 2016 Błażej Szczygieł
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#ifndef VPXDECODER_HPP
#define VPXDECODER_HPP
#include "WebMDemuxer.hpp"
struct vpx_codec_ctx;
class VPXDecoder
{
VPXDecoder(const VPXDecoder &);
void operator =(const VPXDecoder &);
public:
class Image
{
public:
// -- GODOT begin --
#if 0
// -- GODOT end --
int getWidth(int plane) const;
int getHeight(int plane) const;
// -- GODOT begin --
#endif
// -- GODOT end --
int w, h;
int cs;
int chromaShiftW, chromaShiftH;
unsigned char *planes[3];
int linesize[3];
};
enum IMAGE_ERROR
{
UNSUPPORTED_FRAME = -1,
NO_ERROR,
NO_FRAME
};
VPXDecoder(const WebMDemuxer &demuxer, unsigned threads = 1);
~VPXDecoder();
inline bool isOpen() const
{
return (bool)m_ctx;
}
inline int getFramesDelay() const
{
return m_delay;
}
bool decode(const WebMFrame &frame);
IMAGE_ERROR getImage(Image &image); //The data is NOT copied! Only 3-plane, 8-bit images are supported.
private:
vpx_codec_ctx *m_ctx;
const void *m_iter;
int m_delay;
int m_last_space;
};
#endif // VPXDECODER_HPP

View File

@ -1,241 +0,0 @@
/*
MIT License
Copyright (c) 2016 Błażej Szczygieł
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "WebMDemuxer.hpp"
#include "mkvparser/mkvparser.h"
#include <assert.h>
#include <stdlib.h>
#include <string.h>
WebMFrame::WebMFrame() :
bufferSize(0), bufferCapacity(0),
buffer(NULL),
time(0),
key(false)
{}
WebMFrame::~WebMFrame()
{
free(buffer);
}
/**/
WebMDemuxer::WebMDemuxer(mkvparser::IMkvReader *reader, int videoTrack, int audioTrack) :
m_reader(reader),
m_segment(NULL),
m_cluster(NULL), m_block(NULL), m_blockEntry(NULL),
m_blockFrameIndex(0),
m_videoTrack(NULL), m_vCodec(NO_VIDEO),
m_audioTrack(NULL), m_aCodec(NO_AUDIO),
m_isOpen(false),
m_eos(false)
{
long long pos = 0;
if (mkvparser::EBMLHeader().Parse(m_reader, pos))
return;
if (mkvparser::Segment::CreateInstance(m_reader, pos, m_segment))
return;
if (m_segment->Load() < 0)
return;
const mkvparser::Tracks *tracks = m_segment->GetTracks();
const unsigned long tracksCount = tracks->GetTracksCount();
int currVideoTrack = -1, currAudioTrack = -1;
for (unsigned long i = 0; i < tracksCount; ++i)
{
const mkvparser::Track *track = tracks->GetTrackByIndex(i);
if (const char *codecId = track->GetCodecId())
{
if ((!m_videoTrack || currVideoTrack != videoTrack) && track->GetType() == mkvparser::Track::kVideo)
{
if (!strcmp(codecId, "V_VP8"))
m_vCodec = VIDEO_VP8;
else if (!strcmp(codecId, "V_VP9"))
m_vCodec = VIDEO_VP9;
if (m_vCodec != NO_VIDEO)
m_videoTrack = static_cast<const mkvparser::VideoTrack *>(track);
++currVideoTrack;
}
if ((!m_audioTrack || currAudioTrack != audioTrack) && track->GetType() == mkvparser::Track::kAudio)
{
if (!strcmp(codecId, "A_VORBIS"))
m_aCodec = AUDIO_VORBIS;
else if (!strcmp(codecId, "A_OPUS"))
m_aCodec = AUDIO_OPUS;
if (m_aCodec != NO_AUDIO)
m_audioTrack = static_cast<const mkvparser::AudioTrack *>(track);
++currAudioTrack;
}
}
}
if (!m_videoTrack && !m_audioTrack)
return;
m_isOpen = true;
}
WebMDemuxer::~WebMDemuxer()
{
delete m_segment;
delete m_reader;
}
double WebMDemuxer::getLength() const
{
return m_segment->GetDuration() / 1e9;
}
WebMDemuxer::VIDEO_CODEC WebMDemuxer::getVideoCodec() const
{
return m_vCodec;
}
int WebMDemuxer::getWidth() const
{
return m_videoTrack->GetWidth();
}
int WebMDemuxer::getHeight() const
{
return m_videoTrack->GetHeight();
}
WebMDemuxer::AUDIO_CODEC WebMDemuxer::getAudioCodec() const
{
return m_aCodec;
}
const unsigned char *WebMDemuxer::getAudioExtradata(size_t &size) const
{
return m_audioTrack->GetCodecPrivate(size);
}
double WebMDemuxer::getSampleRate() const
{
return m_audioTrack->GetSamplingRate();
}
int WebMDemuxer::getChannels() const
{
return m_audioTrack->GetChannels();
}
int WebMDemuxer::getAudioDepth() const
{
return m_audioTrack->GetBitDepth();
}
bool WebMDemuxer::readFrame(WebMFrame *videoFrame, WebMFrame *audioFrame)
{
const long videoTrackNumber = (videoFrame && m_videoTrack) ? m_videoTrack->GetNumber() : 0;
const long audioTrackNumber = (audioFrame && m_audioTrack) ? m_audioTrack->GetNumber() : 0;
bool blockEntryEOS = false;
if (videoFrame)
videoFrame->bufferSize = 0;
if (audioFrame)
audioFrame->bufferSize = 0;
if (videoTrackNumber == 0 && audioTrackNumber == 0)
return false;
if (m_eos)
return false;
if (!m_cluster)
m_cluster = m_segment->GetFirst();
do
{
bool getNewBlock = false;
long status = 0;
if (!m_blockEntry && !blockEntryEOS)
{
status = m_cluster->GetFirst(m_blockEntry);
getNewBlock = true;
}
else if (blockEntryEOS || m_blockEntry->EOS())
{
m_cluster = m_segment->GetNext(m_cluster);
if (!m_cluster || m_cluster->EOS())
{
m_eos = true;
return false;
}
status = m_cluster->GetFirst(m_blockEntry);
blockEntryEOS = false;
getNewBlock = true;
}
else if (!m_block || m_blockFrameIndex == m_block->GetFrameCount() || notSupportedTrackNumber(videoTrackNumber, audioTrackNumber))
{
status = m_cluster->GetNext(m_blockEntry, m_blockEntry);
if (!m_blockEntry || m_blockEntry->EOS())
{
blockEntryEOS = true;
continue;
}
getNewBlock = true;
}
if (status || !m_blockEntry)
return false;
if (getNewBlock)
{
m_block = m_blockEntry->GetBlock();
m_blockFrameIndex = 0;
}
} while (blockEntryEOS || notSupportedTrackNumber(videoTrackNumber, audioTrackNumber));
WebMFrame *frame = NULL;
const long trackNumber = m_block->GetTrackNumber();
if (trackNumber == videoTrackNumber)
frame = videoFrame;
else if (trackNumber == audioTrackNumber)
frame = audioFrame;
else
{
//Should not be possible
assert(trackNumber == videoTrackNumber || trackNumber == audioTrackNumber);
return false;
}
const mkvparser::Block::Frame &blockFrame = m_block->GetFrame(m_blockFrameIndex++);
if (blockFrame.len > frame->bufferCapacity)
{
unsigned char *newBuff = (unsigned char *)realloc(frame->buffer, frame->bufferCapacity = blockFrame.len);
if (newBuff)
frame->buffer = newBuff;
else // Out of memory
return false;
}
frame->bufferSize = blockFrame.len;
frame->time = m_block->GetTime(m_cluster) / 1e9;
frame->key = m_block->IsKey();
return !blockFrame.Read(m_reader, frame->buffer);
}
inline bool WebMDemuxer::notSupportedTrackNumber(long videoTrackNumber, long audioTrackNumber) const
{
const long trackNumber = m_block->GetTrackNumber();
return (trackNumber != videoTrackNumber && trackNumber != audioTrackNumber);
}

View File

@ -1,125 +0,0 @@
/*
MIT License
Copyright (c) 2016 Błażej Szczygieł
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#ifndef WEBMDEMUXER_HPP
#define WEBMDEMUXER_HPP
#include <stddef.h>
namespace mkvparser {
class IMkvReader;
class Segment;
class Cluster;
class Block;
class BlockEntry;
class VideoTrack;
class AudioTrack;
}
class WebMFrame
{
WebMFrame(const WebMFrame &);
void operator =(const WebMFrame &);
public:
WebMFrame();
~WebMFrame();
inline bool isValid() const
{
return bufferSize > 0;
}
long bufferSize, bufferCapacity;
unsigned char *buffer;
double time;
bool key;
};
class WebMDemuxer
{
WebMDemuxer(const WebMDemuxer &);
void operator =(const WebMDemuxer &);
public:
enum VIDEO_CODEC
{
NO_VIDEO,
VIDEO_VP8,
VIDEO_VP9
};
enum AUDIO_CODEC
{
NO_AUDIO,
AUDIO_VORBIS,
AUDIO_OPUS
};
WebMDemuxer(mkvparser::IMkvReader *reader, int videoTrack = 0, int audioTrack = 0);
~WebMDemuxer();
inline bool isOpen() const
{
return m_isOpen;
}
inline bool isEOS() const
{
return m_eos;
}
double getLength() const;
VIDEO_CODEC getVideoCodec() const;
int getWidth() const;
int getHeight() const;
AUDIO_CODEC getAudioCodec() const;
const unsigned char *getAudioExtradata(size_t &size) const; // Needed for Vorbis
double getSampleRate() const;
int getChannels() const;
int getAudioDepth() const;
bool readFrame(WebMFrame *videoFrame, WebMFrame *audioFrame);
private:
inline bool notSupportedTrackNumber(long videoTrackNumber, long audioTrackNumber) const;
mkvparser::IMkvReader *m_reader;
mkvparser::Segment *m_segment;
const mkvparser::Cluster *m_cluster;
const mkvparser::Block *m_block;
const mkvparser::BlockEntry *m_blockEntry;
int m_blockFrameIndex;
const mkvparser::VideoTrack *m_videoTrack;
VIDEO_CODEC m_vCodec;
const mkvparser::AudioTrack *m_audioTrack;
AUDIO_CODEC m_aCodec;
bool m_isOpen;
bool m_eos;
};
#endif // WEBMDEMUXER_HPP

View File

@ -1,4 +0,0 @@
# Names should be added to this file like so:
# Name or Organization <email address>
Google Inc.

View File

@ -1,30 +0,0 @@
Copyright (c) 2010, Google Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of Google nor the names of its contributors may
be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,23 +0,0 @@
Additional IP Rights Grant (Patents)
------------------------------------
"These implementations" means the copyrightable works that implement the WebM
codecs distributed by Google as part of the WebM Project.
Google hereby grants to you a perpetual, worldwide, non-exclusive, no-charge,
royalty-free, irrevocable (except as stated in this section) patent license to
make, have made, use, offer to sell, sell, import, transfer, and otherwise
run, modify and propagate the contents of these implementations of WebM, where
such license applies only to those patent claims, both currently owned by
Google and acquired in the future, licensable by Google that are necessarily
infringed by these implementations of WebM. This grant does not include claims
that would be infringed only as a consequence of further modification of these
implementations. If you or your agent or exclusive licensee institute or order
or agree to the institution of patent litigation or any other patent
enforcement activity against any entity (including a cross-claim or
counterclaim in a lawsuit) alleging that any of these implementations of WebM
or any code incorporated within any of these implementations of WebM
constitute direct or contributory patent infringement, or inducement of
patent infringement, then any patent rights granted to you under this License
for these implementations of WebM shall terminate as of the date such
litigation is filed.

View File

@ -1,11 +0,0 @@
URL: https://chromium.googlesource.com/webm/libwebm
Version: d7c62173ff6b4a5e0a2f86683a5b67db98cf09bf
License: BSD
License File: LICENSE.txt
Description:
libwebm is used to handle WebM container I/O.
Local Changes:
* Removed: "mkvmuxer", "hdr_util", "file_util", "mkv_reader".
* Make "~IMkvRerader()" public.

View File

@ -1,192 +0,0 @@
// Copyright (c) 2012 The WebM project authors. All Rights Reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the LICENSE file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
#ifndef COMMON_WEBMIDS_H_
#define COMMON_WEBMIDS_H_
namespace libwebm {
enum MkvId {
kMkvEBML = 0x1A45DFA3,
kMkvEBMLVersion = 0x4286,
kMkvEBMLReadVersion = 0x42F7,
kMkvEBMLMaxIDLength = 0x42F2,
kMkvEBMLMaxSizeLength = 0x42F3,
kMkvDocType = 0x4282,
kMkvDocTypeVersion = 0x4287,
kMkvDocTypeReadVersion = 0x4285,
kMkvVoid = 0xEC,
kMkvSignatureSlot = 0x1B538667,
kMkvSignatureAlgo = 0x7E8A,
kMkvSignatureHash = 0x7E9A,
kMkvSignaturePublicKey = 0x7EA5,
kMkvSignature = 0x7EB5,
kMkvSignatureElements = 0x7E5B,
kMkvSignatureElementList = 0x7E7B,
kMkvSignedElement = 0x6532,
// segment
kMkvSegment = 0x18538067,
// Meta Seek Information
kMkvSeekHead = 0x114D9B74,
kMkvSeek = 0x4DBB,
kMkvSeekID = 0x53AB,
kMkvSeekPosition = 0x53AC,
// Segment Information
kMkvInfo = 0x1549A966,
kMkvTimecodeScale = 0x2AD7B1,
kMkvDuration = 0x4489,
kMkvDateUTC = 0x4461,
kMkvTitle = 0x7BA9,
kMkvMuxingApp = 0x4D80,
kMkvWritingApp = 0x5741,
// Cluster
kMkvCluster = 0x1F43B675,
kMkvTimecode = 0xE7,
kMkvPrevSize = 0xAB,
kMkvBlockGroup = 0xA0,
kMkvBlock = 0xA1,
kMkvBlockDuration = 0x9B,
kMkvReferenceBlock = 0xFB,
kMkvLaceNumber = 0xCC,
kMkvSimpleBlock = 0xA3,
kMkvBlockAdditions = 0x75A1,
kMkvBlockMore = 0xA6,
kMkvBlockAddID = 0xEE,
kMkvBlockAdditional = 0xA5,
kMkvDiscardPadding = 0x75A2,
// Track
kMkvTracks = 0x1654AE6B,
kMkvTrackEntry = 0xAE,
kMkvTrackNumber = 0xD7,
kMkvTrackUID = 0x73C5,
kMkvTrackType = 0x83,
kMkvFlagEnabled = 0xB9,
kMkvFlagDefault = 0x88,
kMkvFlagForced = 0x55AA,
kMkvFlagLacing = 0x9C,
kMkvDefaultDuration = 0x23E383,
kMkvMaxBlockAdditionID = 0x55EE,
kMkvName = 0x536E,
kMkvLanguage = 0x22B59C,
kMkvCodecID = 0x86,
kMkvCodecPrivate = 0x63A2,
kMkvCodecName = 0x258688,
kMkvCodecDelay = 0x56AA,
kMkvSeekPreRoll = 0x56BB,
// video
kMkvVideo = 0xE0,
kMkvFlagInterlaced = 0x9A,
kMkvStereoMode = 0x53B8,
kMkvAlphaMode = 0x53C0,
kMkvPixelWidth = 0xB0,
kMkvPixelHeight = 0xBA,
kMkvPixelCropBottom = 0x54AA,
kMkvPixelCropTop = 0x54BB,
kMkvPixelCropLeft = 0x54CC,
kMkvPixelCropRight = 0x54DD,
kMkvDisplayWidth = 0x54B0,
kMkvDisplayHeight = 0x54BA,
kMkvDisplayUnit = 0x54B2,
kMkvAspectRatioType = 0x54B3,
kMkvFrameRate = 0x2383E3,
// end video
// colour
kMkvColour = 0x55B0,
kMkvMatrixCoefficients = 0x55B1,
kMkvBitsPerChannel = 0x55B2,
kMkvChromaSubsamplingHorz = 0x55B3,
kMkvChromaSubsamplingVert = 0x55B4,
kMkvCbSubsamplingHorz = 0x55B5,
kMkvCbSubsamplingVert = 0x55B6,
kMkvChromaSitingHorz = 0x55B7,
kMkvChromaSitingVert = 0x55B8,
kMkvRange = 0x55B9,
kMkvTransferCharacteristics = 0x55BA,
kMkvPrimaries = 0x55BB,
kMkvMaxCLL = 0x55BC,
kMkvMaxFALL = 0x55BD,
// mastering metadata
kMkvMasteringMetadata = 0x55D0,
kMkvPrimaryRChromaticityX = 0x55D1,
kMkvPrimaryRChromaticityY = 0x55D2,
kMkvPrimaryGChromaticityX = 0x55D3,
kMkvPrimaryGChromaticityY = 0x55D4,
kMkvPrimaryBChromaticityX = 0x55D5,
kMkvPrimaryBChromaticityY = 0x55D6,
kMkvWhitePointChromaticityX = 0x55D7,
kMkvWhitePointChromaticityY = 0x55D8,
kMkvLuminanceMax = 0x55D9,
kMkvLuminanceMin = 0x55DA,
// end mastering metadata
// end colour
// projection
kMkvProjection = 0x7670,
kMkvProjectionType = 0x7671,
kMkvProjectionPrivate = 0x7672,
kMkvProjectionPoseYaw = 0x7673,
kMkvProjectionPosePitch = 0x7674,
kMkvProjectionPoseRoll = 0x7675,
// end projection
// audio
kMkvAudio = 0xE1,
kMkvSamplingFrequency = 0xB5,
kMkvOutputSamplingFrequency = 0x78B5,
kMkvChannels = 0x9F,
kMkvBitDepth = 0x6264,
// end audio
// ContentEncodings
kMkvContentEncodings = 0x6D80,
kMkvContentEncoding = 0x6240,
kMkvContentEncodingOrder = 0x5031,
kMkvContentEncodingScope = 0x5032,
kMkvContentEncodingType = 0x5033,
kMkvContentCompression = 0x5034,
kMkvContentCompAlgo = 0x4254,
kMkvContentCompSettings = 0x4255,
kMkvContentEncryption = 0x5035,
kMkvContentEncAlgo = 0x47E1,
kMkvContentEncKeyID = 0x47E2,
kMkvContentSignature = 0x47E3,
kMkvContentSigKeyID = 0x47E4,
kMkvContentSigAlgo = 0x47E5,
kMkvContentSigHashAlgo = 0x47E6,
kMkvContentEncAESSettings = 0x47E7,
kMkvAESSettingsCipherMode = 0x47E8,
kMkvAESSettingsCipherInitData = 0x47E9,
// end ContentEncodings
// Cueing Data
kMkvCues = 0x1C53BB6B,
kMkvCuePoint = 0xBB,
kMkvCueTime = 0xB3,
kMkvCueTrackPositions = 0xB7,
kMkvCueTrack = 0xF7,
kMkvCueClusterPosition = 0xF1,
kMkvCueBlockNumber = 0x5378,
// Chapters
kMkvChapters = 0x1043A770,
kMkvEditionEntry = 0x45B9,
kMkvChapterAtom = 0xB6,
kMkvChapterUID = 0x73C4,
kMkvChapterStringUID = 0x5654,
kMkvChapterTimeStart = 0x91,
kMkvChapterTimeEnd = 0x92,
kMkvChapterDisplay = 0x80,
kMkvChapString = 0x85,
kMkvChapLanguage = 0x437C,
kMkvChapCountry = 0x437E,
// Tags
kMkvTags = 0x1254C367,
kMkvTag = 0x7373,
kMkvSimpleTag = 0x67C8,
kMkvTagName = 0x45A3,
kMkvTagString = 0x4487
};
} // namespace libwebm
#endif // COMMON_WEBMIDS_H_

View File

@ -1,28 +0,0 @@
// Copyright (c) 2012 The WebM project authors. All Rights Reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the LICENSE file in the root of the source
// tree. An additional intellectual property rights grant can be found
// in the file PATENTS. All contributing project authors may
// be found in the AUTHORS file in the root of the source tree.
#ifndef MKVMUXER_MKVMUXERTYPES_H_
#define MKVMUXER_MKVMUXERTYPES_H_
namespace mkvmuxer {
typedef unsigned char uint8;
typedef short int16;
typedef int int32;
typedef unsigned int uint32;
typedef long long int64;
typedef unsigned long long uint64;
} // namespace mkvmuxer
// Copied from Chromium basictypes.h
// A macro to disallow the copy constructor and operator= functions
// This should be used in the private: declarations for a class
#define LIBWEBM_DISALLOW_COPY_AND_ASSIGN(TypeName) \
TypeName(const TypeName&); \
void operator=(const TypeName&)
#endif // MKVMUXER_MKVMUXERTYPES_HPP_

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,142 +0,0 @@
# This file is automatically generated from the git commit history
# by tools/gen_authors.sh.
Aaron Watry <awatry@gmail.com>
Abo Talib Mahfoodh <ab.mahfoodh@gmail.com>
Adam Xu <adam@xuyaowu.com>
Adrian Grange <agrange@google.com>
Aex Converse <aconverse@google.com>
Ahmad Sharif <asharif@google.com>
Alexander Voronov <avoronov@graphics.cs.msu.ru>
Alexis Ballier <aballier@gentoo.org>
Alok Ahuja <waveletcoeff@gmail.com>
Alpha Lam <hclam@google.com>
A.Mahfoodh <ab.mahfoodh@gmail.com>
Ami Fischman <fischman@chromium.org>
Andoni Morales Alastruey <ylatuya@gmail.com>
Andres Mejia <mcitadel@gmail.com>
Andrew Russell <anrussell@google.com>
Angie Chiang <angiebird@google.com>
Aron Rosenberg <arosenberg@logitech.com>
Attila Nagy <attilanagy@google.com>
Brion Vibber <bvibber@wikimedia.org>
changjun.yang <changjun.yang@intel.com>
Charles 'Buck' Krasic <ckrasic@google.com>
chm <chm@rock-chips.com>
Christian Duvivier <cduvivier@google.com>
Daniele Castagna <dcastagna@chromium.org>
Daniel Kang <ddkang@google.com>
Deb Mukherjee <debargha@google.com>
Dim Temp <dimtemp0@gmail.com>
Dmitry Kovalev <dkovalev@google.com>
Dragan Mrdjan <dmrdjan@mips.com>
Ed Baker <edward.baker@intel.com>
Ehsan Akhgari <ehsan.akhgari@gmail.com>
Erik Niemeyer <erik.a.niemeyer@intel.com>
Fabio Pedretti <fabio.ped@libero.it>
Frank Galligan <fgalligan@google.com>
Fredrik Söderquist <fs@opera.com>
Fritz Koenig <frkoenig@google.com>
Gaute Strokkenes <gaute.strokkenes@broadcom.com>
Geza Lore <gezalore@gmail.com>
Ghislain MARY <ghislainmary2@gmail.com>
Giuseppe Scrivano <gscrivano@gnu.org>
Gordana Cmiljanovic <gordana.cmiljanovic@imgtec.com>
Guillaume Martres <gmartres@google.com>
Guillermo Ballester Valor <gbvalor@gmail.com>
Hangyu Kuang <hkuang@google.com>
Hanno Böck <hanno@hboeck.de>
Henrik Lundin <hlundin@google.com>
Hui Su <huisu@google.com>
Ivan Maltz <ivanmaltz@google.com>
Jacek Caban <cjacek@gmail.com>
Jacky Chen <jackychen@google.com>
James Berry <jamesberry@google.com>
James Yu <james.yu@linaro.org>
James Zern <jzern@google.com>
Jan Gerber <j@mailb.org>
Jan Kratochvil <jan.kratochvil@redhat.com>
Janne Salonen <jsalonen@google.com>
Jean-Yves Avenard <jyavenard@mozilla.com>
Jeff Faust <jfaust@google.com>
Jeff Muizelaar <jmuizelaar@mozilla.com>
Jeff Petkau <jpet@chromium.org>
Jia Jia <jia.jia@linaro.org>
Jian Zhou <zhoujian@google.com>
Jim Bankoski <jimbankoski@google.com>
Jingning Han <jingning@google.com>
Joey Parrish <joeyparrish@google.com>
Johann Koenig <johannkoenig@google.com>
John Koleszar <jkoleszar@google.com>
Johnny Klonaris <google@jawknee.com>
John Stark <jhnstrk@gmail.com>
Joshua Bleecher Snyder <josh@treelinelabs.com>
Joshua Litt <joshualitt@google.com>
Julia Robson <juliamrobson@gmail.com>
Justin Clift <justin@salasaga.org>
Justin Lebar <justin.lebar@gmail.com>
KO Myung-Hun <komh@chollian.net>
Lawrence Velázquez <larryv@macports.org>
Linfeng Zhang <linfengz@google.com>
Lou Quillio <louquillio@google.com>
Luca Barbato <lu_zero@gentoo.org>
Makoto Kato <makoto.kt@gmail.com>
Mans Rullgard <mans@mansr.com>
Marco Paniconi <marpan@google.com>
Mark Mentovai <mark@chromium.org>
Martin Ettl <ettl.martin78@googlemail.com>
Martin Storsjo <martin@martin.st>
Matthew Heaney <matthewjheaney@chromium.org>
Michael Kohler <michaelkohler@live.com>
Mike Frysinger <vapier@chromium.org>
Mike Hommey <mhommey@mozilla.com>
Mikhal Shemer <mikhal@google.com>
Minghai Shang <minghai@google.com>
Morton Jonuschat <yabawock@gmail.com>
Nico Weber <thakis@chromium.org>
Parag Salasakar <img.mips1@gmail.com>
Pascal Massimino <pascal.massimino@gmail.com>
Patrik Westin <patrik.westin@gmail.com>
Paul Wilkins <paulwilkins@google.com>
Pavol Rusnak <stick@gk2.sk>
Paweł Hajdan <phajdan@google.com>
Pengchong Jin <pengchong@google.com>
Peter de Rivaz <peter.derivaz@gmail.com>
Philip Jägenstedt <philipj@opera.com>
Priit Laes <plaes@plaes.org>
Rafael Ávila de Espíndola <rafael.espindola@gmail.com>
Rafaël Carré <funman@videolan.org>
Ralph Giles <giles@xiph.org>
Rob Bradford <rob@linux.intel.com>
Ronald S. Bultje <rsbultje@gmail.com>
Rui Ueyama <ruiu@google.com>
Sami Pietilä <samipietila@google.com>
Sasi Inguva <isasi@google.com>
Scott Graham <scottmg@chromium.org>
Scott LaVarnway <slavarnway@google.com>
Sean McGovern <gseanmcg@gmail.com>
Sergey Kolomenkin <kolomenkin@gmail.com>
Sergey Ulanov <sergeyu@chromium.org>
Shimon Doodkin <helpmepro1@gmail.com>
Shunyao Li <shunyaoli@google.com>
Stefan Holmer <holmer@google.com>
Suman Sunkara <sunkaras@google.com>
Taekhyun Kim <takim@nvidia.com>
Takanori MATSUURA <t.matsuu@gmail.com>
Tamar Levy <tamar.levy@intel.com>
Tao Bai <michaelbai@chromium.org>
Tero Rintaluoma <teror@google.com>
Thijs Vermeir <thijsvermeir@gmail.com>
Tim Kopp <tkopp@google.com>
Timothy B. Terriberry <tterribe@xiph.org>
Tom Finegan <tomfinegan@google.com>
Vignesh Venkatasubramanian <vigneshv@google.com>
Yaowu Xu <yaowu@google.com>
Yi Luo <luoyi@google.com>
Yongzhe Wang <yongzhe@google.com>
Yunqing Wang <yunqingwang@google.com>
Yury Gitman <yuryg@google.com>
Zoe Liu <zoeliu@google.com>
Google Inc.
The Mozilla Foundation
The Xiph.Org Foundation

View File

@ -1,654 +0,0 @@
2016-07-20 v1.6.0 "Khaki Campbell Duck"
This release improves upon the VP9 encoder and speeds up the encoding and
decoding processes.
- Upgrading:
This release is ABI incompatible with 1.5.0 due to a new 'color_range' enum
in vpx_image and some minor changes to the VP8_COMP structure.
The default key frame interval for VP9 has changed from 128 to 9999.
- Enhancement:
A core focus has been performance for low end Intel processors. SSSE3
instructions such as 'pshufb' have been avoided and instructions have been
reordered to better accommodate the more constrained pipelines.
As a result, devices based on Celeron processors have seen substantial
decoding improvements. From Indian Runner Duck to Javan Whistling Duck,
decoding speed improved between 10 and 30%. Between Javan Whistling Duck
and Khaki Campbell Duck, it improved another 10 to 15%.
While Celeron benefited most, Core-i5 also improved 5% and 10% between the
respective releases.
Realtime performance for WebRTC for both speed and quality has received a
lot of attention.
- Bug Fixes:
A number of fuzzing issues, found variously by Mozilla, Chromium and others,
have been fixed and we strongly recommend updating.
2015-11-09 v1.5.0 "Javan Whistling Duck"
This release improves upon the VP9 encoder and speeds up the encoding and
decoding processes.
- Upgrading:
This release is ABI incompatible with 1.4.0. It drops deprecated VP8
controls and adds a variety of VP9 controls for testing.
The vpxenc utility now prefers VP9 by default.
- Enhancements:
Faster VP9 encoding and decoding
Smaller library size by combining functions used by VP8 and VP9
- Bug Fixes:
A variety of fuzzing issues
2015-04-03 v1.4.0 "Indian Runner Duck"
This release includes significant improvements to the VP9 codec.
- Upgrading:
This release is ABI incompatible with 1.3.0. It drops the compatibility
layer, requiring VPX_IMG_FMT_* instead of IMG_FMT_*, and adds several codec
controls for VP9.
- Enhancements:
Faster VP9 encoding and decoding
Multithreaded VP9 decoding (tile and frame-based)
Multithreaded VP9 encoding - on by default
YUV 4:2:2 and 4:4:4 support in VP9
10 and 12bit support in VP9
64bit ARM support by replacing ARM assembly with intrinsics
- Bug Fixes:
Fixes a VP9 bitstream issue in Profile 1. This only affected non-YUV 4:2:0
files.
- Known Issues:
Frame Parallel decoding fails for segmented and non-420 files.
2013-11-15 v1.3.0 "Forest"
This release introduces the VP9 codec in a backward-compatible way.
All existing users of VP8 can continue to use the library without
modification. However, some VP8 options do not map to VP9 in the same manner.
The VP9 encoder in this release is not feature complete. Users interested in
the encoder are advised to use the git master branch and discuss issues on
libvpx mailing lists.
- Upgrading:
This release is ABI and API compatible with Duclair (v1.0.0). Users
of older releases should refer to the Upgrading notes in this document
for that release.
- Enhancements:
Get rid of bashisms in the main build scripts
Added usage info on command line options
Add lossless compression mode
Dll build of libvpx
Add additional Mac OS X targets: 10.7, 10.8 and 10.9 (darwin11-13)
Add option to disable documentation
configure: add --enable-external-build support
make: support V=1 as short form of verbose=yes
configure: support mingw-w64
configure: support hardfloat armv7 CHOSTS
configure: add support for android x86
Add estimated completion time to vpxenc
Don't exit on decode errors in vpxenc
vpxenc: support scaling prior to encoding
vpxdec: support scaling output
vpxenc: improve progress indicators with --skip
msvs: Don't link to winmm.lib
Add a new script for producing vcxproj files
Produce Visual Studio 10 and 11 project files
Produce Windows Phone project files
msvs-build: use msbuild for vs >= 2005
configure: default configure log to config.log
Add encoding option --static-thresh
- Speed:
Miscellaneous speed optimizations for VP8 and VP9.
- Quality:
In general, quality is consistent with the Eider release.
- Bug Fixes:
This release represents approximately a year of engineering effort,
and contains multiple bug fixes. Please refer to git history for details.
2012-12-21 v1.2.0
This release acts as a checkpoint for a large amount of internal refactoring
and testing. It also contains a number of small bugfixes, so all users are
encouraged to upgrade.
- Upgrading:
This release is ABI and API compatible with Duclair (v1.0.0). Users
of older releases should refer to the Upgrading notes in this
document for that release.
- Enhancements:
VP8 optimizations for MIPS dspr2
vpxenc: add -quiet option
- Speed:
Encoder and decoder speed is consistent with the Eider release.
- Quality:
In general, quality is consistent with the Eider release.
Minor tweaks to ARNR filtering
Minor improvements to real time encoding with multiple temporal layers
- Bug Fixes:
Fixes multithreaded encoder race condition in loopfilter
Fixes multi-resolution threaded encoding
Fix potential encoder dead-lock after picture resize
2012-05-09 v1.1.0 "Eider"
This introduces a number of enhancements, mostly focused on real-time
encoding. In addition, it fixes a decoder bug (first introduced in
Duclair) so all users of that release are encouraged to upgrade.
- Upgrading:
This release is ABI and API compatible with Duclair (v1.0.0). Users
of older releases should refer to the Upgrading notes in this
document for that release.
This release introduces a new temporal denoiser, controlled by the
VP8E_SET_NOISE_SENSITIVITY control. The temporal denoiser does not
currently take a strength parameter, so the control is effectively
a boolean - zero (off) or non-zero (on). For compatibility with
existing applications, the values accepted are the same as those
for the spatial denoiser (0-6). The temporal denoiser is enabled
by default, and the older spatial denoiser may be restored by
configuring with --disable-temporal-denoising. The temporal denoiser
is more computationally intensive than the spatial one.
This release removes support for a legacy, decode only API that was
supported, but deprecated, at the initial release of libvpx
(v0.9.0). This is not expected to have any impact. If you are
impacted, you can apply a reversion to commit 2bf8fb58 locally.
Please update to the latest libvpx API if you are affected.
- Enhancements:
Adds a motion compensated temporal denoiser to the encoder, which
gives higher quality than the older spatial denoiser. (See above
for notes on upgrading).
In addition, support for new compilers and platforms were added,
including:
improved support for XCode
Android x86 NDK build
OS/2 support
SunCC support
Changing resolution with vpx_codec_enc_config_set() is now
supported. Previously, reinitializing the codec was required to
change the input resolution.
The vpxenc application has initial support for producing multiple
encodes from the same input in one call. Resizing is not yet
supported, but varying other codec parameters is. Use -- to
delineate output streams. Options persist from one stream to the
next.
Also, the vpxenc application will now use a keyframe interval of
5 seconds by default. Use the --kf-max-dist option to override.
- Speed:
Decoder performance improved 2.5% versus Duclair. Encoder speed is
consistent with Duclair for most material. Two pass encoding of
slideshow-like material will see significant improvements.
Large realtime encoding speed gains at a small quality expense are
possible by configuring the on-the-fly bitpacking experiment with
--enable-onthefly-bitpacking. Realtime encoder can be up to 13%
faster (ARM) depending on the number of threads and bitrate
settings. This technique sees constant gain over the 5-16 speed
range. For VC style input the loss seen is up to 0.2dB. See commit
52cf4dca for further details.
- Quality:
On the whole, quality is consistent with the Duclair release. Some
tweaks:
Reduced blockiness in easy sections by applying a penalty to
intra modes.
Improved quality of static sections (like slideshows) with
two pass encoding.
Improved keyframe sizing with multiple temporal layers
- Bug Fixes:
Corrected alt-ref contribution to frame rate for visible updates
to the alt-ref buffer. This affected applications making manual
usage of the frame reference flags, or temporal layers.
Additional constraints were added to disable multi-frame quality
enhancement (MFQE) in sections of the frame where there is motion.
(#392)
Fixed corruption issues when vpx_codec_enc_config_set() was called
with spatial resampling enabled.
Fixed a decoder error introduced in Duclair where the segmentation
map was not being reinitialized on keyframes (#378)
2012-01-27 v1.0.0 "Duclair"
Our fourth named release, focused on performance and features related to
real-time encoding. It also fixes a decoder crash bug introduced in
v0.9.7, so all users of that release are encouraged to upgrade.
- Upgrading:
This release is ABI incompatible with prior releases of libvpx, so the
"major" version number has been bumped to 1. You must recompile your
applications against the latest version of the libvpx headers. The
API remains compatible, and this should not require code changes in most
applications.
- Enhancements:
This release introduces several substantial new features to the encoder,
of particular interest to real time streaming applications.
Temporal scalability allows the encoder to produce a stream that can
be decimated to different frame rates, with independent rate targetting
for each substream.
Multiframe quality enhancement postprocessing can make visual quality
more consistent in the presence of frames that are substantially
different quality than the surrounding frames, as in the temporal
scalability case and in some forced keyframe scenarios.
Multiple-resolution encoding support allows the encoding of the
same content at different resolutions faster than encoding them
separately.
- Speed:
Optimization targets for this release included the decoder and the real-
time modes of the encoder. Decoder speed on x86 has improved 10.5% with
this release. Encoder improvements followed a curve where speeds 1-3
improved 4.0%-1.5%, speeds 4-8 improved <1%, and speeds 9-16 improved
1.5% to 10.5%, respectively. "Best" mode speed is consistent with the
Cayuga release.
- Quality:
Encoder quality in the single stream case is consistent with the Cayuga
release.
- Bug Fixes:
This release fixes an OOB read decoder crash bug present in v0.9.7
related to the clamping of motion vectors in SPLITMV blocks. This
behavior could be triggered by corrupt input or by starting
decoding from a P-frame.
2011-08-15 v0.9.7-p1 "Cayuga" patch 1
This is an incremental bugfix release against Cayuga. All users of that
release are strongly encouraged to upgrade.
- Fix potential OOB reads (cdae03a)
An unbounded out of bounds read was discovered when the
decoder was requested to perform error concealment (new in
Cayuga) given a frame with corrupt partition sizes.
A bounded out of bounds read was discovered affecting all
versions of libvpx. Given an multipartition input frame that
is truncated between the mode/mv partition and the first
residiual paritition (in the block of partition offsets), up
to 3 extra bytes could have been read from the source buffer.
The code will not take any action regardless of the contents
of these undefined bytes, as the truncated buffer is detected
immediately following the read based on the calculated
starting position of the coefficient partition.
- Fix potential error concealment crash when the very first frame
is missing or corrupt (a609be5)
- Fix significant artifacts in error concealment (a4c2211, 99d870a)
- Revert 1-pass CBR rate control changes (e961317)
Further testing showed this change produced undesirable visual
artifacts, rolling back for now.
2011-08-02 v0.9.7 "Cayuga"
Our third named release, focused on a faster, higher quality, encoder.
- Upgrading:
This release is backwards compatible with Aylesbury (v0.9.5) and
Bali (v0.9.6). Users of older releases should refer to the Upgrading
notes in this document for that release.
- Enhancements:
Stereo 3D format support for vpxenc
Runtime detection of available processor cores.
Allow specifying --end-usage by enum name
vpxdec: test for frame corruption
vpxenc: add quantizer histogram display
vpxenc: add rate histogram display
Set VPX_FRAME_IS_DROPPABLE
update configure for ios sdk 4.3
Avoid text relocations in ARM vp8 decoder
Generate a vpx.pc file for pkg-config.
New ways of passing encoded data between encoder and decoder.
- Speed:
This release includes across-the-board speed improvements to the
encoder. On x86, these measure at approximately 11.5% in Best mode,
21.5% in Good mode (speed 0), and 22.5% in Realtime mode (speed 6).
On ARM Cortex A9 with Neon extensions, real-time encoding of video
telephony content is 35% faster than Bali on single core and 48%
faster on multi-core. On the NVidia Tegra2 platform, real time
encoding is 40% faster than Bali.
Decoder speed was not a priority for this release, but improved
approximately 8.4% on x86.
Reduce motion vector search on alt-ref frame.
Encoder loopfilter running in its own thread
Reworked loopfilter to precalculate more parameters
SSE2/SSSE3 optimizations for build_predictors_mbuv{,_s}().
Make hor UV predict ~2x faster (73 vs 132 cycles) using SSSE3.
Removed redundant checks
Reduced structure sizes
utilize preload in ARMv6 MC/LPF/Copy routines
ARM optimized quantization, dfct, variance, subtract
Increase chrow row alignment to 16 bytes.
disable trellis optimization for first pass
Write SSSE3 sub-pixel filter function
Improve SSE2 half-pixel filter funtions
Add vp8_sub_pixel_variance16x8_ssse3 function
Reduce unnecessary distortion computation
Use diamond search to replace full search
Preload reference area in sub-pixel motion search (real-time mode)
- Quality:
This release focused primarily on one-pass use cases, including
video conferencing. Low latency data rate control was significantly
improved, improving streamability over bandwidth constrained links.
Added support for error concealment, allowing frames to maintain
visual quality in the presence of substantial packet loss.
Add rc_max_intra_bitrate_pct control
Limit size of initial keyframe in one-pass.
Improve framerate adaptation
Improved 1-pass CBR rate control
Improved KF insertion after fades to still.
Improved key frame detection.
Improved activity masking (lower PSNR impact for same SSIM boost)
Improved interaction between GF and ARFs
Adding error-concealment to the decoder.
Adding support for independent partitions
Adjusted rate-distortion constants
- Bug Fixes:
Removed firstpass motion map
Fix parallel make install
Fix multithreaded encoding for 1 MB wide frame
Fixed iwalsh_neon build problems with RVDS4.1
Fix semaphore emulation, spin-wait intrinsics on Windows
Fix build with xcode4 and simplify GLOBAL.
Mark ARM asm objects as allowing a non-executable stack.
Fix vpxenc encoding incorrect webm file header on big endian
2011-03-07 v0.9.6 "Bali"
Our second named release, focused on a faster, higher quality, encoder.
- Upgrading:
This release is backwards compatible with Aylesbury (v0.9.5). Users
of older releases should refer to the Upgrading notes in this
document for that release.
- Enhancements:
vpxenc --psnr shows a summary when encode completes
--tune=ssim option to enable activity masking
improved postproc visualizations for development
updated support for Apple iOS to SDK 4.2
query decoder to determine which reference frames were updated
implemented error tracking in the decoder
fix pipe support on windows
- Speed:
Primary focus was on good quality mode, speed 0. Average improvement
on x86 about 40%, up to 100% on user-generated content at that speed.
Best quality mode speed improved 35%, and realtime speed 10-20%. This
release also saw significant improvement in realtime encoding speed
on ARM platforms.
Improved encoder threading
Dont pick encoder filter level when loopfilter is disabled.
Avoid double copying of key frames into alt and golden buffer
FDCT optimizations.
x86 sse2 temporal filter
SSSE3 version of fast quantizer
vp8_rd_pick_best_mbsegmentation code restructure
Adjusted breakout RD for SPLITMV
Changed segmentation check order
Improved rd_pick_intra4x4block
Adds armv6 optimized variance calculation
ARMv6 optimized sad16x16
ARMv6 optimized half pixel variance calculations
Full search SAD function optimization in SSE4.1
Improve MV prediction accuracy to achieve performance gain
Improve MV prediction in vp8_pick_inter_mode() for speed>3
- Quality:
Best quality mode improved PSNR 6.3%, and SSIM 6.1%. This release
also includes support for "activity masking," which greatly improves
SSIM at the expense of PSNR. For now, this feature is available with
the --tune=ssim option. Further experimentation in this area
is ongoing. This release also introduces a new rate control mode
called "CQ," which changes the allocation of bits within a clip to
the sections where they will have the most visual impact.
Tuning for the more exact quantizer.
Relax rate control for last few frames
CQ Mode
Limit key frame quantizer for forced key frames.
KF/GF Pulsing
Add simple version of activity masking.
make rdmult adaptive for intra in quantizer RDO
cap the best quantizer for 2nd order DC
change the threshold of DC check for encode breakout
- Bug Fixes:
Fix crash on Sparc Solaris.
Fix counter of fixed keyframe distance
ARNR filter pointer update bug fix
Fixed use of motion percentage in KF/GF group calc
Changed condition for using RD in Intra Mode
Fix encoder real-time only configuration.
Fix ARM encoder crash with multiple token partitions
Fixed bug first cluster timecode of webm file is wrong.
Fixed various encoder bugs with odd-sized images
vp8e_get_preview fixed when spatial resampling enabled
quantizer: fix assertion in fast quantizer path
Allocate source buffers to be multiples of 16
Fix for manual Golden frame frequency
Fix drastic undershoot in long form content
2010-10-28 v0.9.5 "Aylesbury"
Our first named release, focused on a faster decoder, and a better encoder.
- Upgrading:
This release incorporates backwards-incompatible changes to the
ivfenc and ivfdec tools. These tools are now called vpxenc and vpxdec.
vpxdec
* the -q (quiet) option has been removed, and replaced with
-v (verbose). the output is quiet by default. Use -v to see
the version number of the binary.
* The default behavior is now to write output to a single file
instead of individual frames. The -y option has been removed.
Y4M output is the default.
* For raw I420/YV12 output instead of Y4M, the --i420 or --yv12
options must be specified.
$ ivfdec -o OUTPUT INPUT
$ vpxdec --i420 -o OUTPUT INPUT
* If an output file is not specified, the default is to write
Y4M to stdout. This makes piping more natural.
$ ivfdec -y -o - INPUT | ...
$ vpxdec INPUT | ...
* The output file has additional flexibility for formatting the
filename. It supports escape characters for constructing a
filename from the width, height, and sequence number. This
replaces the -p option. To get the equivalent:
$ ivfdec -p frame INPUT
$ vpxdec --i420 -o frame-%wx%h-%4.i420 INPUT
vpxenc
* The output file must be specified with -o, rather than as the
last argument.
$ ivfenc <options> INPUT OUTPUT
$ vpxenc <options> -o OUTPUT INPUT
* The output defaults to webm. To get IVF output, use the --ivf
option.
$ ivfenc <options> INPUT OUTPUT.ivf
$ vpxenc <options> -o OUTPUT.ivf --ivf INPUT
- Enhancements:
ivfenc and ivfdec have been renamed to vpxenc, vpxdec.
vpxdec supports .webm input
vpxdec writes .y4m by default
vpxenc writes .webm output by default
vpxenc --psnr now shows the average/overall PSNR at the end
ARM platforms now support runtime cpu detection
vpxdec visualizations added for motion vectors, block modes, references
vpxdec now silent by default
vpxdec --progress shows frame-by-frame timing information
vpxenc supports the distinction between --fps and --timebase
NASM is now a supported assembler
configure: enable PIC for shared libs by default
configure: add --enable-small
configure: support for ppc32-linux-gcc
configure: support for sparc-solaris-gcc
- Bugs:
Improve handling of invalid frames
Fix valgrind errors in the NEON loop filters.
Fix loopfilter delta zero transitions
Fix valgrind errors in vp8_sixtap_predict8x4_armv6().
Build fixes for darwin-icc
- Speed:
20-40% (average 28%) improvement in libvpx decoder speed,
including:
Rewrite vp8_short_walsh4x4_sse2()
Optimizations on the loopfilters.
Miscellaneous improvements for Atom
Add 4-tap version of 2nd-pass ARMv6 MC filter.
Improved multithread utilization
Better instruction choices on x86
reorder data to use wider instructions
Update NEON wide idcts
Make block access to frame buffer sequential
Improved subset block search
Bilinear subpixel optimizations for ssse3.
Decrease memory footprint
Encoder speed improvements (percentage gain not measured):
Skip unnecessary search of identical frames
Add SSE2 subtract functions
Improve bounds checking in vp8_diamond_search_sadx4()
Added vp8_fast_quantize_b_sse2
- Quality:
Over 7% overall PSNR improvement (6.3% SSIM) in "best" quality
encoding mode, and up to 60% improvement on very noisy, still
or slow moving source video
Motion compensated temporal filter for Alt-Ref Noise Reduction
Improved use of trellis quantization on 2nd order Y blocks
Tune effect of motion on KF/GF boost in two pass
Allow coefficient optimization for good quality speed 0.
Improved control of active min quantizer for two pass.
Enable ARFs for non-lagged compress
2010-09-02 v0.9.2
- Enhancements:
Disable frame dropping by default
Improved multithreaded performance
Improved Force Key Frame Behaviour
Increased rate control buffer level precision
Fix bug in 1st pass motion compensation
ivfenc: correct fixed kf interval, --disable-kf
- Speed:
Changed above and left context data layout
Rework idct calling structure.
Removed unnecessary MB_MODE_INFO copies
x86: SSSE3 sixtap prediction
Reworked IDCT to include reconstruction (add) step
Swap alt/gold/new/last frame buffer ptrs instead of copying.
Improve SSE2 loopfilter functions
Change bitreader to use a larger window.
Avoid loopfilter reinitialization when possible
- Quality:
Normalize quantizer's zero bin and rounding factors
Add trellis quantization.
Make the quantizer exact.
Updates to ARNR filtering algorithm
Fix breakout thresh computation for golden & AltRef frames
Redo the forward 4x4 dct
Improve the accuracy of forward walsh-hadamard transform
Further adjustment of RD behaviour with Q and Zbin.
- Build System:
Allow linking of libs built with MinGW to MSVC
Fix target auto-detection on mingw32
Allow --cpu= to work for x86.
configure: pass original arguments through to make dist
Fix builds without runtime CPU detection
msvs: fix install of codec sources
msvs: Change devenv.com command line for better msys support
msvs: Add vs9 targets.
Add x86_64-linux-icc target
- Bugs:
Potential crashes on older MinGW builds
Fix two-pass framrate for Y4M input.
Fixed simple loop filter, other crashes on ARM v6
arm: fix missing dependency with --enable-shared
configure: support directories containing .o
Replace pinsrw (SSE) with MMX instructions
apple: include proper mach primatives
Fixed rate control bug with long key frame interval.
Fix DSO link errors on x86-64 when not using a version script
Fixed buffer selection for UV in AltRef filtering
2010-06-17 v0.9.1
- Enhancements:
* ivfenc/ivfdec now support YUV4MPEG2 input and pipe I/O
* Speed optimizations
- Bugfixes:
* Rate control
* Prevent out-of-bounds accesses on invalid data
- Build system updates:
* Detect toolchain to be used automatically for native builds
* Support building shared libraries
* Better autotools emulation (--prefix, --libdir, DESTDIR)
- Updated LICENSE
* http://webmproject.blogspot.com/2010/06/changes-to-webm-open-source-license.html
2010-05-18 v0.9.0
- Initial open source release. Welcome to WebM and VP8!

View File

@ -1,31 +0,0 @@
Copyright (c) 2010, The WebM Project authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of Google, nor the WebM Project, nor the names
of its contributors may be used to endorse or promote products
derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,23 +0,0 @@
Additional IP Rights Grant (Patents)
------------------------------------
"These implementations" means the copyrightable works that implement the WebM
codecs distributed by Google as part of the WebM Project.
Google hereby grants to you a perpetual, worldwide, non-exclusive, no-charge,
royalty-free, irrevocable (except as stated in this section) patent license to
make, have made, use, offer to sell, sell, import, transfer, and otherwise
run, modify and propagate the contents of these implementations of WebM, where
such license applies only to those patent claims, both currently owned by
Google and acquired in the future, licensable by Google that are necessarily
infringed by these implementations of WebM. This grant does not include claims
that would be infringed only as a consequence of further modification of these
implementations. If you or your agent or exclusive licensee institute or order
or agree to the institution of patent litigation or any other patent
enforcement activity against any entity (including a cross-claim or
counterclaim in a lawsuit) alleging that any of these implementations of WebM
or any code incorporated within any of these implementations of WebM
constitute direct or contributory patent infringement, or inducement of
patent infringement, then any patent rights granted to you under this License
for these implementations of WebM shall terminate as of the date such
litigation is filed.

View File

@ -1,240 +0,0 @@
#ifndef VP8_RTCD_H_
#define VP8_RTCD_H_
#ifdef RTCD_C
#define RTCD_EXTERN
#else
#define RTCD_EXTERN extern
#endif
/*
* VP8
*/
struct blockd;
struct loop_filter_info;
#ifdef __cplusplus
extern "C" {
#endif
void vp8_bilinear_predict16x16_c(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
void vp8_bilinear_predict16x16_neon(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
RTCD_EXTERN void (*vp8_bilinear_predict16x16)(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
void vp8_bilinear_predict4x4_c(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
#define vp8_bilinear_predict4x4 vp8_bilinear_predict4x4_c
void vp8_bilinear_predict8x4_c(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
void vp8_bilinear_predict8x4_neon(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
RTCD_EXTERN void (*vp8_bilinear_predict8x4)(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
void vp8_bilinear_predict8x8_c(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
void vp8_bilinear_predict8x8_neon(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
RTCD_EXTERN void (*vp8_bilinear_predict8x8)(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
void vp8_clear_system_state_c();
#define vp8_clear_system_state vp8_clear_system_state_c
void vp8_copy_mem16x16_c(unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch);
void vp8_copy_mem16x16_neon(unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch);
RTCD_EXTERN void (*vp8_copy_mem16x16)(unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch);
void vp8_copy_mem8x4_c(unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch);
void vp8_copy_mem8x4_neon(unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch);
RTCD_EXTERN void (*vp8_copy_mem8x4)(unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch);
void vp8_copy_mem8x8_c(unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch);
void vp8_copy_mem8x8_neon(unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch);
RTCD_EXTERN void (*vp8_copy_mem8x8)(unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch);
void vp8_dc_only_idct_add_c(short input, unsigned char *pred, int pred_stride, unsigned char *dst, int dst_stride);
void vp8_dc_only_idct_add_neon(short input, unsigned char *pred, int pred_stride, unsigned char *dst, int dst_stride);
RTCD_EXTERN void (*vp8_dc_only_idct_add)(short input, unsigned char *pred, int pred_stride, unsigned char *dst, int dst_stride);
void vp8_dequant_idct_add_c(short *input, short *dq, unsigned char *output, int stride);
void vp8_dequant_idct_add_neon(short *input, short *dq, unsigned char *output, int stride);
RTCD_EXTERN void (*vp8_dequant_idct_add)(short *input, short *dq, unsigned char *output, int stride);
void vp8_dequant_idct_add_uv_block_c(short *q, short *dq, unsigned char *dst_u, unsigned char *dst_v, int stride, char *eobs);
void vp8_dequant_idct_add_uv_block_neon(short *q, short *dq, unsigned char *dst_u, unsigned char *dst_v, int stride, char *eobs);
RTCD_EXTERN void (*vp8_dequant_idct_add_uv_block)(short *q, short *dq, unsigned char *dst_u, unsigned char *dst_v, int stride, char *eobs);
void vp8_dequant_idct_add_y_block_c(short *q, short *dq, unsigned char *dst, int stride, char *eobs);
void vp8_dequant_idct_add_y_block_neon(short *q, short *dq, unsigned char *dst, int stride, char *eobs);
RTCD_EXTERN void (*vp8_dequant_idct_add_y_block)(short *q, short *dq, unsigned char *dst, int stride, char *eobs);
void vp8_dequantize_b_c(struct blockd*, short *dqc);
void vp8_dequantize_b_neon(struct blockd*, short *dqc);
RTCD_EXTERN void (*vp8_dequantize_b)(struct blockd*, short *dqc);
void vp8_loop_filter_bh_c(unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi);
void vp8_loop_filter_bh_neon(unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi);
RTCD_EXTERN void (*vp8_loop_filter_bh)(unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi);
void vp8_loop_filter_bv_c(unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi);
void vp8_loop_filter_bv_neon(unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi);
RTCD_EXTERN void (*vp8_loop_filter_bv)(unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi);
void vp8_loop_filter_mbh_c(unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi);
void vp8_loop_filter_mbh_neon(unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi);
RTCD_EXTERN void (*vp8_loop_filter_mbh)(unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi);
void vp8_loop_filter_mbv_c(unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi);
void vp8_loop_filter_mbv_neon(unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi);
RTCD_EXTERN void (*vp8_loop_filter_mbv)(unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi);
void vp8_loop_filter_bhs_c(unsigned char *y, int ystride, const unsigned char *blimit);
void vp8_loop_filter_bhs_neon(unsigned char *y, int ystride, const unsigned char *blimit);
RTCD_EXTERN void (*vp8_loop_filter_simple_bh)(unsigned char *y, int ystride, const unsigned char *blimit);
void vp8_loop_filter_bvs_c(unsigned char *y, int ystride, const unsigned char *blimit);
void vp8_loop_filter_bvs_neon(unsigned char *y, int ystride, const unsigned char *blimit);
RTCD_EXTERN void (*vp8_loop_filter_simple_bv)(unsigned char *y, int ystride, const unsigned char *blimit);
void vp8_loop_filter_simple_horizontal_edge_c(unsigned char *y, int ystride, const unsigned char *blimit);
void vp8_loop_filter_mbhs_neon(unsigned char *y, int ystride, const unsigned char *blimit);
RTCD_EXTERN void (*vp8_loop_filter_simple_mbh)(unsigned char *y, int ystride, const unsigned char *blimit);
void vp8_loop_filter_simple_vertical_edge_c(unsigned char *y, int ystride, const unsigned char *blimit);
void vp8_loop_filter_mbvs_neon(unsigned char *y, int ystride, const unsigned char *blimit);
RTCD_EXTERN void (*vp8_loop_filter_simple_mbv)(unsigned char *y, int ystride, const unsigned char *blimit);
void vp8_short_idct4x4llm_c(short *input, unsigned char *pred, int pitch, unsigned char *dst, int dst_stride);
void vp8_short_idct4x4llm_neon(short *input, unsigned char *pred, int pitch, unsigned char *dst, int dst_stride);
RTCD_EXTERN void (*vp8_short_idct4x4llm)(short *input, unsigned char *pred, int pitch, unsigned char *dst, int dst_stride);
void vp8_short_inv_walsh4x4_c(short *input, short *output);
void vp8_short_inv_walsh4x4_neon(short *input, short *output);
RTCD_EXTERN void (*vp8_short_inv_walsh4x4)(short *input, short *output);
void vp8_short_inv_walsh4x4_1_c(short *input, short *output);
#define vp8_short_inv_walsh4x4_1 vp8_short_inv_walsh4x4_1_c
void vp8_sixtap_predict16x16_c(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
void vp8_sixtap_predict16x16_neon(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
RTCD_EXTERN void (*vp8_sixtap_predict16x16)(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
void vp8_sixtap_predict4x4_c(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
#define vp8_sixtap_predict4x4 vp8_sixtap_predict4x4_c
void vp8_sixtap_predict8x4_c(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
void vp8_sixtap_predict8x4_neon(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
RTCD_EXTERN void (*vp8_sixtap_predict8x4)(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
void vp8_sixtap_predict8x8_c(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
void vp8_sixtap_predict8x8_neon(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
RTCD_EXTERN void (*vp8_sixtap_predict8x8)(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
void vp8_rtcd(void);
#ifdef RTCD_C
#include "vpx_ports/arm.h"
static void setup_rtcd_internal(void)
{
int flags = arm_cpu_caps();
vp8_bilinear_predict16x16 = vp8_bilinear_predict16x16_c;
#if HAVE_NEON
if (flags & HAS_NEON) vp8_bilinear_predict16x16 = vp8_bilinear_predict16x16_neon;
#endif
vp8_bilinear_predict8x4 = vp8_bilinear_predict8x4_c;
#if HAVE_NEON
if (flags & HAS_NEON) vp8_bilinear_predict8x4 = vp8_bilinear_predict8x4_neon;
#endif
vp8_bilinear_predict8x8 = vp8_bilinear_predict8x8_c;
#if HAVE_NEON
if (flags & HAS_NEON) vp8_bilinear_predict8x8 = vp8_bilinear_predict8x8_neon;
#endif
vp8_copy_mem16x16 = vp8_copy_mem16x16_c;
#if HAVE_NEON
if (flags & HAS_NEON) vp8_copy_mem16x16 = vp8_copy_mem16x16_neon;
#endif
vp8_copy_mem8x4 = vp8_copy_mem8x4_c;
#if HAVE_NEON
if (flags & HAS_NEON) vp8_copy_mem8x4 = vp8_copy_mem8x4_neon;
#endif
vp8_copy_mem8x8 = vp8_copy_mem8x8_c;
#if HAVE_NEON
if (flags & HAS_NEON) vp8_copy_mem8x8 = vp8_copy_mem8x8_neon;
#endif
vp8_dc_only_idct_add = vp8_dc_only_idct_add_c;
#if HAVE_NEON
if (flags & HAS_NEON) vp8_dc_only_idct_add = vp8_dc_only_idct_add_neon;
#endif
vp8_dequant_idct_add = vp8_dequant_idct_add_c;
#if HAVE_NEON
if (flags & HAS_NEON) vp8_dequant_idct_add = vp8_dequant_idct_add_neon;
#endif
vp8_dequant_idct_add_uv_block = vp8_dequant_idct_add_uv_block_c;
#if HAVE_NEON
if (flags & HAS_NEON) vp8_dequant_idct_add_uv_block = vp8_dequant_idct_add_uv_block_neon;
#endif
vp8_dequant_idct_add_y_block = vp8_dequant_idct_add_y_block_c;
#if HAVE_NEON
if (flags & HAS_NEON) vp8_dequant_idct_add_y_block = vp8_dequant_idct_add_y_block_neon;
#endif
vp8_dequantize_b = vp8_dequantize_b_c;
#if HAVE_NEON
if (flags & HAS_NEON) vp8_dequantize_b = vp8_dequantize_b_neon;
#endif
vp8_loop_filter_bh = vp8_loop_filter_bh_c;
#if HAVE_NEON
if (flags & HAS_NEON) vp8_loop_filter_bh = vp8_loop_filter_bh_neon;
#endif
vp8_loop_filter_bv = vp8_loop_filter_bv_c;
#if HAVE_NEON
if (flags & HAS_NEON) vp8_loop_filter_bv = vp8_loop_filter_bv_neon;
#endif
vp8_loop_filter_mbh = vp8_loop_filter_mbh_c;
#if HAVE_NEON
if (flags & HAS_NEON) vp8_loop_filter_mbh = vp8_loop_filter_mbh_neon;
#endif
vp8_loop_filter_mbv = vp8_loop_filter_mbv_c;
#if HAVE_NEON
if (flags & HAS_NEON) vp8_loop_filter_mbv = vp8_loop_filter_mbv_neon;
#endif
vp8_loop_filter_simple_bh = vp8_loop_filter_bhs_c;
#if HAVE_NEON
if (flags & HAS_NEON) vp8_loop_filter_simple_bh = vp8_loop_filter_bhs_neon;
#endif
vp8_loop_filter_simple_bv = vp8_loop_filter_bvs_c;
#if HAVE_NEON
if (flags & HAS_NEON) vp8_loop_filter_simple_bv = vp8_loop_filter_bvs_neon;
#endif
vp8_loop_filter_simple_mbh = vp8_loop_filter_simple_horizontal_edge_c;
#if HAVE_NEON
if (flags & HAS_NEON) vp8_loop_filter_simple_mbh = vp8_loop_filter_mbhs_neon;
#endif
vp8_loop_filter_simple_mbv = vp8_loop_filter_simple_vertical_edge_c;
#if HAVE_NEON
if (flags & HAS_NEON) vp8_loop_filter_simple_mbv = vp8_loop_filter_mbvs_neon;
#endif
vp8_short_idct4x4llm = vp8_short_idct4x4llm_c;
#if HAVE_NEON
if (flags & HAS_NEON) vp8_short_idct4x4llm = vp8_short_idct4x4llm_neon;
#endif
vp8_short_inv_walsh4x4 = vp8_short_inv_walsh4x4_c;
#if HAVE_NEON
if (flags & HAS_NEON) vp8_short_inv_walsh4x4 = vp8_short_inv_walsh4x4_neon;
#endif
vp8_sixtap_predict16x16 = vp8_sixtap_predict16x16_c;
#if HAVE_NEON
if (flags & HAS_NEON) vp8_sixtap_predict16x16 = vp8_sixtap_predict16x16_neon;
#endif
vp8_sixtap_predict8x4 = vp8_sixtap_predict8x4_c;
#if HAVE_NEON
if (flags & HAS_NEON) vp8_sixtap_predict8x4 = vp8_sixtap_predict8x4_neon;
#endif
vp8_sixtap_predict8x8 = vp8_sixtap_predict8x8_c;
#if HAVE_NEON
if (flags & HAS_NEON) vp8_sixtap_predict8x8 = vp8_sixtap_predict8x8_neon;
#endif
}
#endif
#ifdef __cplusplus
} // extern "C"
#endif
#endif

View File

@ -1,117 +0,0 @@
#ifndef VP8_RTCD_H_
#define VP8_RTCD_H_
#ifdef RTCD_C
#define RTCD_EXTERN
#else
#define RTCD_EXTERN extern
#endif
/*
* VP8
*/
struct blockd;
struct loop_filter_info;
#ifdef __cplusplus
extern "C" {
#endif
void vp8_bilinear_predict16x16_c(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
#define vp8_bilinear_predict16x16 vp8_bilinear_predict16x16_c
void vp8_bilinear_predict4x4_c(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
#define vp8_bilinear_predict4x4 vp8_bilinear_predict4x4_c
void vp8_bilinear_predict8x4_c(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
#define vp8_bilinear_predict8x4 vp8_bilinear_predict8x4_c
void vp8_bilinear_predict8x8_c(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
#define vp8_bilinear_predict8x8 vp8_bilinear_predict8x8_c
void vp8_clear_system_state_c();
#define vp8_clear_system_state vp8_clear_system_state_c
void vp8_copy_mem16x16_c(unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch);
#define vp8_copy_mem16x16 vp8_copy_mem16x16_c
void vp8_copy_mem8x4_c(unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch);
#define vp8_copy_mem8x4 vp8_copy_mem8x4_c
void vp8_copy_mem8x8_c(unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch);
#define vp8_copy_mem8x8 vp8_copy_mem8x8_c
void vp8_dc_only_idct_add_c(short input, unsigned char *pred, int pred_stride, unsigned char *dst, int dst_stride);
#define vp8_dc_only_idct_add vp8_dc_only_idct_add_c
void vp8_dequant_idct_add_c(short *input, short *dq, unsigned char *output, int stride);
#define vp8_dequant_idct_add vp8_dequant_idct_add_c
void vp8_dequant_idct_add_uv_block_c(short *q, short *dq, unsigned char *dst_u, unsigned char *dst_v, int stride, char *eobs);
#define vp8_dequant_idct_add_uv_block vp8_dequant_idct_add_uv_block_c
void vp8_dequant_idct_add_y_block_c(short *q, short *dq, unsigned char *dst, int stride, char *eobs);
#define vp8_dequant_idct_add_y_block vp8_dequant_idct_add_y_block_c
void vp8_dequantize_b_c(struct blockd*, short *dqc);
#define vp8_dequantize_b vp8_dequantize_b_c
void vp8_loop_filter_bh_c(unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi);
#define vp8_loop_filter_bh vp8_loop_filter_bh_c
void vp8_loop_filter_bv_c(unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi);
#define vp8_loop_filter_bv vp8_loop_filter_bv_c
void vp8_loop_filter_mbh_c(unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi);
#define vp8_loop_filter_mbh vp8_loop_filter_mbh_c
void vp8_loop_filter_mbv_c(unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi);
#define vp8_loop_filter_mbv vp8_loop_filter_mbv_c
void vp8_loop_filter_bhs_c(unsigned char *y, int ystride, const unsigned char *blimit);
#define vp8_loop_filter_simple_bh vp8_loop_filter_bhs_c
void vp8_loop_filter_bvs_c(unsigned char *y, int ystride, const unsigned char *blimit);
#define vp8_loop_filter_simple_bv vp8_loop_filter_bvs_c
void vp8_loop_filter_simple_horizontal_edge_c(unsigned char *y, int ystride, const unsigned char *blimit);
#define vp8_loop_filter_simple_mbh vp8_loop_filter_simple_horizontal_edge_c
void vp8_loop_filter_simple_vertical_edge_c(unsigned char *y, int ystride, const unsigned char *blimit);
#define vp8_loop_filter_simple_mbv vp8_loop_filter_simple_vertical_edge_c
void vp8_short_idct4x4llm_c(short *input, unsigned char *pred, int pitch, unsigned char *dst, int dst_stride);
#define vp8_short_idct4x4llm vp8_short_idct4x4llm_c
void vp8_short_inv_walsh4x4_c(short *input, short *output);
#define vp8_short_inv_walsh4x4 vp8_short_inv_walsh4x4_c
void vp8_short_inv_walsh4x4_1_c(short *input, short *output);
#define vp8_short_inv_walsh4x4_1 vp8_short_inv_walsh4x4_1_c
void vp8_sixtap_predict16x16_c(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
#define vp8_sixtap_predict16x16 vp8_sixtap_predict16x16_c
void vp8_sixtap_predict4x4_c(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
#define vp8_sixtap_predict4x4 vp8_sixtap_predict4x4_c
void vp8_sixtap_predict8x4_c(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
#define vp8_sixtap_predict8x4 vp8_sixtap_predict8x4_c
void vp8_sixtap_predict8x8_c(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
#define vp8_sixtap_predict8x8 vp8_sixtap_predict8x8_c
void vp8_rtcd(void);
#ifdef RTCD_C
static void setup_rtcd_internal(void)
{
}
#endif
#ifdef __cplusplus
} // extern "C"
#endif
#endif

View File

@ -1,247 +0,0 @@
#ifndef VP8_RTCD_H_
#define VP8_RTCD_H_
#ifdef RTCD_C
#define RTCD_EXTERN
#else
#define RTCD_EXTERN extern
#endif
/*
* VP8
*/
struct blockd;
struct loop_filter_info;
#ifdef __cplusplus
extern "C" {
#endif
void vp8_bilinear_predict16x16_c(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
void vp8_bilinear_predict16x16_mmx(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
void vp8_bilinear_predict16x16_sse2(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
void vp8_bilinear_predict16x16_ssse3(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
RTCD_EXTERN void (*vp8_bilinear_predict16x16)(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
void vp8_bilinear_predict4x4_c(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
void vp8_bilinear_predict4x4_mmx(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
RTCD_EXTERN void (*vp8_bilinear_predict4x4)(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
void vp8_bilinear_predict8x4_c(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
void vp8_bilinear_predict8x4_mmx(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
RTCD_EXTERN void (*vp8_bilinear_predict8x4)(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
void vp8_bilinear_predict8x8_c(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
void vp8_bilinear_predict8x8_mmx(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
void vp8_bilinear_predict8x8_sse2(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
void vp8_bilinear_predict8x8_ssse3(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
RTCD_EXTERN void (*vp8_bilinear_predict8x8)(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
void vp8_clear_system_state_c();
void vpx_reset_mmx_state();
RTCD_EXTERN void (*vp8_clear_system_state)();
void vp8_copy_mem16x16_c(unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch);
void vp8_copy_mem16x16_mmx(unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch);
void vp8_copy_mem16x16_sse2(unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch);
RTCD_EXTERN void (*vp8_copy_mem16x16)(unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch);
void vp8_copy_mem8x4_c(unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch);
void vp8_copy_mem8x4_mmx(unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch);
RTCD_EXTERN void (*vp8_copy_mem8x4)(unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch);
void vp8_copy_mem8x8_c(unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch);
void vp8_copy_mem8x8_mmx(unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch);
RTCD_EXTERN void (*vp8_copy_mem8x8)(unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch);
void vp8_dc_only_idct_add_c(short input, unsigned char *pred, int pred_stride, unsigned char *dst, int dst_stride);
void vp8_dc_only_idct_add_mmx(short input, unsigned char *pred, int pred_stride, unsigned char *dst, int dst_stride);
RTCD_EXTERN void (*vp8_dc_only_idct_add)(short input, unsigned char *pred, int pred_stride, unsigned char *dst, int dst_stride);
void vp8_dequant_idct_add_c(short *input, short *dq, unsigned char *output, int stride);
void vp8_dequant_idct_add_mmx(short *input, short *dq, unsigned char *output, int stride);
RTCD_EXTERN void (*vp8_dequant_idct_add)(short *input, short *dq, unsigned char *output, int stride);
void vp8_dequant_idct_add_uv_block_c(short *q, short *dq, unsigned char *dst_u, unsigned char *dst_v, int stride, char *eobs);
void vp8_dequant_idct_add_uv_block_mmx(short *q, short *dq, unsigned char *dst_u, unsigned char *dst_v, int stride, char *eobs);
void vp8_dequant_idct_add_uv_block_sse2(short *q, short *dq, unsigned char *dst_u, unsigned char *dst_v, int stride, char *eobs);
RTCD_EXTERN void (*vp8_dequant_idct_add_uv_block)(short *q, short *dq, unsigned char *dst_u, unsigned char *dst_v, int stride, char *eobs);
void vp8_dequant_idct_add_y_block_c(short *q, short *dq, unsigned char *dst, int stride, char *eobs);
void vp8_dequant_idct_add_y_block_mmx(short *q, short *dq, unsigned char *dst, int stride, char *eobs);
void vp8_dequant_idct_add_y_block_sse2(short *q, short *dq, unsigned char *dst, int stride, char *eobs);
RTCD_EXTERN void (*vp8_dequant_idct_add_y_block)(short *q, short *dq, unsigned char *dst, int stride, char *eobs);
void vp8_dequantize_b_c(struct blockd*, short *dqc);
void vp8_dequantize_b_mmx(struct blockd*, short *dqc);
RTCD_EXTERN void (*vp8_dequantize_b)(struct blockd*, short *dqc);
void vp8_loop_filter_bh_c(unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi);
void vp8_loop_filter_bh_mmx(unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi);
void vp8_loop_filter_bh_sse2(unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi);
RTCD_EXTERN void (*vp8_loop_filter_bh)(unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi);
void vp8_loop_filter_bv_c(unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi);
void vp8_loop_filter_bv_mmx(unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi);
void vp8_loop_filter_bv_sse2(unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi);
RTCD_EXTERN void (*vp8_loop_filter_bv)(unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi);
void vp8_loop_filter_mbh_c(unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi);
void vp8_loop_filter_mbh_mmx(unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi);
void vp8_loop_filter_mbh_sse2(unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi);
RTCD_EXTERN void (*vp8_loop_filter_mbh)(unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi);
void vp8_loop_filter_mbv_c(unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi);
void vp8_loop_filter_mbv_mmx(unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi);
void vp8_loop_filter_mbv_sse2(unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi);
RTCD_EXTERN void (*vp8_loop_filter_mbv)(unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi);
void vp8_loop_filter_bhs_c(unsigned char *y, int ystride, const unsigned char *blimit);
void vp8_loop_filter_bhs_mmx(unsigned char *y, int ystride, const unsigned char *blimit);
void vp8_loop_filter_bhs_sse2(unsigned char *y, int ystride, const unsigned char *blimit);
RTCD_EXTERN void (*vp8_loop_filter_simple_bh)(unsigned char *y, int ystride, const unsigned char *blimit);
void vp8_loop_filter_bvs_c(unsigned char *y, int ystride, const unsigned char *blimit);
void vp8_loop_filter_bvs_mmx(unsigned char *y, int ystride, const unsigned char *blimit);
void vp8_loop_filter_bvs_sse2(unsigned char *y, int ystride, const unsigned char *blimit);
RTCD_EXTERN void (*vp8_loop_filter_simple_bv)(unsigned char *y, int ystride, const unsigned char *blimit);
void vp8_loop_filter_simple_horizontal_edge_c(unsigned char *y, int ystride, const unsigned char *blimit);
void vp8_loop_filter_simple_horizontal_edge_mmx(unsigned char *y, int ystride, const unsigned char *blimit);
void vp8_loop_filter_simple_horizontal_edge_sse2(unsigned char *y, int ystride, const unsigned char *blimit);
RTCD_EXTERN void (*vp8_loop_filter_simple_mbh)(unsigned char *y, int ystride, const unsigned char *blimit);
void vp8_loop_filter_simple_vertical_edge_c(unsigned char *y, int ystride, const unsigned char *blimit);
void vp8_loop_filter_simple_vertical_edge_mmx(unsigned char *y, int ystride, const unsigned char *blimit);
void vp8_loop_filter_simple_vertical_edge_sse2(unsigned char *y, int ystride, const unsigned char *blimit);
RTCD_EXTERN void (*vp8_loop_filter_simple_mbv)(unsigned char *y, int ystride, const unsigned char *blimit);
void vp8_short_idct4x4llm_c(short *input, unsigned char *pred, int pitch, unsigned char *dst, int dst_stride);
void vp8_short_idct4x4llm_mmx(short *input, unsigned char *pred, int pitch, unsigned char *dst, int dst_stride);
RTCD_EXTERN void (*vp8_short_idct4x4llm)(short *input, unsigned char *pred, int pitch, unsigned char *dst, int dst_stride);
void vp8_short_inv_walsh4x4_c(short *input, short *output);
void vp8_short_inv_walsh4x4_mmx(short *input, short *output);
void vp8_short_inv_walsh4x4_sse2(short *input, short *output);
RTCD_EXTERN void (*vp8_short_inv_walsh4x4)(short *input, short *output);
void vp8_short_inv_walsh4x4_1_c(short *input, short *output);
#define vp8_short_inv_walsh4x4_1 vp8_short_inv_walsh4x4_1_c
void vp8_sixtap_predict16x16_c(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
void vp8_sixtap_predict16x16_mmx(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
void vp8_sixtap_predict16x16_sse2(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
void vp8_sixtap_predict16x16_ssse3(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
RTCD_EXTERN void (*vp8_sixtap_predict16x16)(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
void vp8_sixtap_predict4x4_c(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
void vp8_sixtap_predict4x4_mmx(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
void vp8_sixtap_predict4x4_ssse3(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
RTCD_EXTERN void (*vp8_sixtap_predict4x4)(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
void vp8_sixtap_predict8x4_c(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
void vp8_sixtap_predict8x4_mmx(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
void vp8_sixtap_predict8x4_sse2(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
void vp8_sixtap_predict8x4_ssse3(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
RTCD_EXTERN void (*vp8_sixtap_predict8x4)(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
void vp8_sixtap_predict8x8_c(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
void vp8_sixtap_predict8x8_mmx(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
void vp8_sixtap_predict8x8_sse2(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
void vp8_sixtap_predict8x8_ssse3(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
RTCD_EXTERN void (*vp8_sixtap_predict8x8)(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
void vp8_rtcd(void);
#ifdef RTCD_C
#include "vpx_ports/x86.h"
static void setup_rtcd_internal(void)
{
int flags = x86_simd_caps();
vp8_bilinear_predict16x16 = vp8_bilinear_predict16x16_c;
if (flags & HAS_MMX) vp8_bilinear_predict16x16 = vp8_bilinear_predict16x16_mmx;
if (flags & HAS_SSE2) vp8_bilinear_predict16x16 = vp8_bilinear_predict16x16_sse2;
if (flags & HAS_SSSE3) vp8_bilinear_predict16x16 = vp8_bilinear_predict16x16_ssse3;
vp8_bilinear_predict4x4 = vp8_bilinear_predict4x4_c;
if (flags & HAS_MMX) vp8_bilinear_predict4x4 = vp8_bilinear_predict4x4_mmx;
vp8_bilinear_predict8x4 = vp8_bilinear_predict8x4_c;
if (flags & HAS_MMX) vp8_bilinear_predict8x4 = vp8_bilinear_predict8x4_mmx;
vp8_bilinear_predict8x8 = vp8_bilinear_predict8x8_c;
if (flags & HAS_MMX) vp8_bilinear_predict8x8 = vp8_bilinear_predict8x8_mmx;
if (flags & HAS_SSE2) vp8_bilinear_predict8x8 = vp8_bilinear_predict8x8_sse2;
if (flags & HAS_SSSE3) vp8_bilinear_predict8x8 = vp8_bilinear_predict8x8_ssse3;
vp8_clear_system_state = vp8_clear_system_state_c;
if (flags & HAS_MMX) vp8_clear_system_state = vpx_reset_mmx_state;
vp8_copy_mem16x16 = vp8_copy_mem16x16_c;
if (flags & HAS_MMX) vp8_copy_mem16x16 = vp8_copy_mem16x16_mmx;
if (flags & HAS_SSE2) vp8_copy_mem16x16 = vp8_copy_mem16x16_sse2;
vp8_copy_mem8x4 = vp8_copy_mem8x4_c;
if (flags & HAS_MMX) vp8_copy_mem8x4 = vp8_copy_mem8x4_mmx;
vp8_copy_mem8x8 = vp8_copy_mem8x8_c;
if (flags & HAS_MMX) vp8_copy_mem8x8 = vp8_copy_mem8x8_mmx;
vp8_dc_only_idct_add = vp8_dc_only_idct_add_c;
if (flags & HAS_MMX) vp8_dc_only_idct_add = vp8_dc_only_idct_add_mmx;
vp8_dequant_idct_add = vp8_dequant_idct_add_c;
if (flags & HAS_MMX) vp8_dequant_idct_add = vp8_dequant_idct_add_mmx;
vp8_dequant_idct_add_uv_block = vp8_dequant_idct_add_uv_block_c;
if (flags & HAS_MMX) vp8_dequant_idct_add_uv_block = vp8_dequant_idct_add_uv_block_mmx;
if (flags & HAS_SSE2) vp8_dequant_idct_add_uv_block = vp8_dequant_idct_add_uv_block_sse2;
vp8_dequant_idct_add_y_block = vp8_dequant_idct_add_y_block_c;
if (flags & HAS_MMX) vp8_dequant_idct_add_y_block = vp8_dequant_idct_add_y_block_mmx;
if (flags & HAS_SSE2) vp8_dequant_idct_add_y_block = vp8_dequant_idct_add_y_block_sse2;
vp8_dequantize_b = vp8_dequantize_b_c;
if (flags & HAS_MMX) vp8_dequantize_b = vp8_dequantize_b_mmx;
vp8_loop_filter_bh = vp8_loop_filter_bh_c;
if (flags & HAS_MMX) vp8_loop_filter_bh = vp8_loop_filter_bh_mmx;
if (flags & HAS_SSE2) vp8_loop_filter_bh = vp8_loop_filter_bh_sse2;
vp8_loop_filter_bv = vp8_loop_filter_bv_c;
if (flags & HAS_MMX) vp8_loop_filter_bv = vp8_loop_filter_bv_mmx;
if (flags & HAS_SSE2) vp8_loop_filter_bv = vp8_loop_filter_bv_sse2;
vp8_loop_filter_mbh = vp8_loop_filter_mbh_c;
if (flags & HAS_MMX) vp8_loop_filter_mbh = vp8_loop_filter_mbh_mmx;
if (flags & HAS_SSE2) vp8_loop_filter_mbh = vp8_loop_filter_mbh_sse2;
vp8_loop_filter_mbv = vp8_loop_filter_mbv_c;
if (flags & HAS_MMX) vp8_loop_filter_mbv = vp8_loop_filter_mbv_mmx;
if (flags & HAS_SSE2) vp8_loop_filter_mbv = vp8_loop_filter_mbv_sse2;
vp8_loop_filter_simple_bh = vp8_loop_filter_bhs_c;
if (flags & HAS_MMX) vp8_loop_filter_simple_bh = vp8_loop_filter_bhs_mmx;
if (flags & HAS_SSE2) vp8_loop_filter_simple_bh = vp8_loop_filter_bhs_sse2;
vp8_loop_filter_simple_bv = vp8_loop_filter_bvs_c;
if (flags & HAS_MMX) vp8_loop_filter_simple_bv = vp8_loop_filter_bvs_mmx;
if (flags & HAS_SSE2) vp8_loop_filter_simple_bv = vp8_loop_filter_bvs_sse2;
vp8_loop_filter_simple_mbh = vp8_loop_filter_simple_horizontal_edge_c;
if (flags & HAS_MMX) vp8_loop_filter_simple_mbh = vp8_loop_filter_simple_horizontal_edge_mmx;
if (flags & HAS_SSE2) vp8_loop_filter_simple_mbh = vp8_loop_filter_simple_horizontal_edge_sse2;
vp8_loop_filter_simple_mbv = vp8_loop_filter_simple_vertical_edge_c;
if (flags & HAS_MMX) vp8_loop_filter_simple_mbv = vp8_loop_filter_simple_vertical_edge_mmx;
if (flags & HAS_SSE2) vp8_loop_filter_simple_mbv = vp8_loop_filter_simple_vertical_edge_sse2;
vp8_short_idct4x4llm = vp8_short_idct4x4llm_c;
if (flags & HAS_MMX) vp8_short_idct4x4llm = vp8_short_idct4x4llm_mmx;
vp8_short_inv_walsh4x4 = vp8_short_inv_walsh4x4_c;
if (flags & HAS_MMX) vp8_short_inv_walsh4x4 = vp8_short_inv_walsh4x4_mmx;
if (flags & HAS_SSE2) vp8_short_inv_walsh4x4 = vp8_short_inv_walsh4x4_sse2;
vp8_sixtap_predict16x16 = vp8_sixtap_predict16x16_c;
if (flags & HAS_MMX) vp8_sixtap_predict16x16 = vp8_sixtap_predict16x16_mmx;
if (flags & HAS_SSE2) vp8_sixtap_predict16x16 = vp8_sixtap_predict16x16_sse2;
if (flags & HAS_SSSE3) vp8_sixtap_predict16x16 = vp8_sixtap_predict16x16_ssse3;
vp8_sixtap_predict4x4 = vp8_sixtap_predict4x4_c;
if (flags & HAS_MMX) vp8_sixtap_predict4x4 = vp8_sixtap_predict4x4_mmx;
if (flags & HAS_SSSE3) vp8_sixtap_predict4x4 = vp8_sixtap_predict4x4_ssse3;
vp8_sixtap_predict8x4 = vp8_sixtap_predict8x4_c;
if (flags & HAS_MMX) vp8_sixtap_predict8x4 = vp8_sixtap_predict8x4_mmx;
if (flags & HAS_SSE2) vp8_sixtap_predict8x4 = vp8_sixtap_predict8x4_sse2;
if (flags & HAS_SSSE3) vp8_sixtap_predict8x4 = vp8_sixtap_predict8x4_ssse3;
vp8_sixtap_predict8x8 = vp8_sixtap_predict8x8_c;
if (flags & HAS_MMX) vp8_sixtap_predict8x8 = vp8_sixtap_predict8x8_mmx;
if (flags & HAS_SSE2) vp8_sixtap_predict8x8 = vp8_sixtap_predict8x8_sse2;
if (flags & HAS_SSSE3) vp8_sixtap_predict8x8 = vp8_sixtap_predict8x8_ssse3;
}
#endif
#ifdef __cplusplus
} // extern "C"
#endif
#endif

View File

@ -1,54 +0,0 @@
#ifndef VP9_RTCD_H_
#define VP9_RTCD_H_
#ifdef RTCD_C
#define RTCD_EXTERN
#else
#define RTCD_EXTERN extern
#endif
/*
* VP9
*/
#include "vp9/common/vp9_common.h"
#ifdef __cplusplus
extern "C" {
#endif
void vp9_iht16x16_256_add_c(const tran_low_t *input, uint8_t *output, int pitch, int tx_type);
#define vp9_iht16x16_256_add vp9_iht16x16_256_add_c
void vp9_iht4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type);
void vp9_iht4x4_16_add_neon(const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type);
RTCD_EXTERN void (*vp9_iht4x4_16_add)(const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type);
void vp9_iht8x8_64_add_c(const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type);
void vp9_iht8x8_64_add_neon(const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type);
RTCD_EXTERN void (*vp9_iht8x8_64_add)(const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type);
void vp9_rtcd(void);
#ifdef RTCD_C
#include "vpx_ports/arm.h"
static void setup_rtcd_internal(void)
{
int flags = arm_cpu_caps();
vp9_iht4x4_16_add = vp9_iht4x4_16_add_c;
#if HAVE_NEON
if (flags & HAS_NEON) vp9_iht4x4_16_add = vp9_iht4x4_16_add_neon;
#endif
vp9_iht8x8_64_add = vp9_iht8x8_64_add_c;
#if HAVE_NEON
if (flags & HAS_NEON) vp9_iht8x8_64_add = vp9_iht8x8_64_add_neon;
#endif
}
#endif
#ifdef __cplusplus
} // extern "C"
#endif
#endif

View File

@ -1,41 +0,0 @@
#ifndef VP9_RTCD_H_
#define VP9_RTCD_H_
#ifdef RTCD_C
#define RTCD_EXTERN
#else
#define RTCD_EXTERN extern
#endif
/*
* VP9
*/
#include "vp9/common/vp9_common.h"
#ifdef __cplusplus
extern "C" {
#endif
void vp9_iht16x16_256_add_c(const tran_low_t *input, uint8_t *output, int pitch, int tx_type);
#define vp9_iht16x16_256_add vp9_iht16x16_256_add_c
void vp9_iht4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type);
#define vp9_iht4x4_16_add vp9_iht4x4_16_add_c
void vp9_iht8x8_64_add_c(const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type);
#define vp9_iht8x8_64_add vp9_iht8x8_64_add_c
void vp9_rtcd(void);
#ifdef RTCD_C
static void setup_rtcd_internal(void)
{
}
#endif
#ifdef __cplusplus
} // extern "C"
#endif
#endif

View File

@ -1,55 +0,0 @@
#ifndef VP9_RTCD_H_
#define VP9_RTCD_H_
#ifdef RTCD_C
#define RTCD_EXTERN
#else
#define RTCD_EXTERN extern
#endif
/*
* VP9
*/
#include "vp9/common/vp9_common.h"
#ifdef __cplusplus
extern "C" {
#endif
void vp9_iht16x16_256_add_c(const tran_low_t *input, uint8_t *output, int pitch, int tx_type);
void vp9_iht16x16_256_add_sse2(const tran_low_t *input, uint8_t *output, int pitch, int tx_type);
RTCD_EXTERN void (*vp9_iht16x16_256_add)(const tran_low_t *input, uint8_t *output, int pitch, int tx_type);
void vp9_iht4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type);
void vp9_iht4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type);
RTCD_EXTERN void (*vp9_iht4x4_16_add)(const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type);
void vp9_iht8x8_64_add_c(const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type);
void vp9_iht8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type);
RTCD_EXTERN void (*vp9_iht8x8_64_add)(const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type);
void vp9_rtcd(void);
#ifdef RTCD_C
#include "vpx_ports/x86.h"
static void setup_rtcd_internal(void)
{
int flags = x86_simd_caps();
vp9_iht16x16_256_add = vp9_iht16x16_256_add_c;
if (flags & HAS_SSE2) vp9_iht16x16_256_add = vp9_iht16x16_256_add_sse2;
vp9_iht4x4_16_add = vp9_iht4x4_16_add_c;
if (flags & HAS_SSE2) vp9_iht4x4_16_add = vp9_iht4x4_16_add_sse2;
vp9_iht8x8_64_add = vp9_iht8x8_64_add_c;
if (flags & HAS_SSE2) vp9_iht8x8_64_add = vp9_iht8x8_64_add_sse2;
}
#endif
#ifdef __cplusplus
} // extern "C"
#endif
#endif

View File

@ -1,678 +0,0 @@
#ifndef VPX_DSP_RTCD_H_
#define VPX_DSP_RTCD_H_
#ifdef RTCD_C
#define RTCD_EXTERN
#else
#define RTCD_EXTERN extern
#endif
/*
* DSP
*/
#include "vpx/vpx_integer.h"
#include "vpx_dsp/vpx_dsp_common.h"
#ifdef __cplusplus
extern "C" {
#endif
void vpx_convolve8_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
void vpx_convolve8_neon(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
RTCD_EXTERN void (*vpx_convolve8)(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
void vpx_convolve8_avg_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
void vpx_convolve8_avg_neon(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
RTCD_EXTERN void (*vpx_convolve8_avg)(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
void vpx_convolve8_avg_horiz_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
void vpx_convolve8_avg_horiz_neon(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
RTCD_EXTERN void (*vpx_convolve8_avg_horiz)(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
void vpx_convolve8_avg_vert_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
void vpx_convolve8_avg_vert_neon(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
RTCD_EXTERN void (*vpx_convolve8_avg_vert)(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
void vpx_convolve8_horiz_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
void vpx_convolve8_horiz_neon(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
RTCD_EXTERN void (*vpx_convolve8_horiz)(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
void vpx_convolve8_vert_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
void vpx_convolve8_vert_neon(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
RTCD_EXTERN void (*vpx_convolve8_vert)(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
void vpx_convolve_avg_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
void vpx_convolve_avg_neon(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
RTCD_EXTERN void (*vpx_convolve_avg)(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
void vpx_convolve_copy_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
void vpx_convolve_copy_neon(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
RTCD_EXTERN void (*vpx_convolve_copy)(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
void vpx_d117_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d117_predictor_16x16 vpx_d117_predictor_16x16_c
void vpx_d117_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d117_predictor_32x32 vpx_d117_predictor_32x32_c
void vpx_d117_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d117_predictor_4x4 vpx_d117_predictor_4x4_c
void vpx_d117_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d117_predictor_8x8 vpx_d117_predictor_8x8_c
void vpx_d135_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d135_predictor_16x16 vpx_d135_predictor_16x16_c
void vpx_d135_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d135_predictor_32x32 vpx_d135_predictor_32x32_c
void vpx_d135_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_d135_predictor_4x4_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_d135_predictor_4x4)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_d135_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d135_predictor_8x8 vpx_d135_predictor_8x8_c
void vpx_d153_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d153_predictor_16x16 vpx_d153_predictor_16x16_c
void vpx_d153_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d153_predictor_32x32 vpx_d153_predictor_32x32_c
void vpx_d153_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d153_predictor_4x4 vpx_d153_predictor_4x4_c
void vpx_d153_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d153_predictor_8x8 vpx_d153_predictor_8x8_c
void vpx_d207_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d207_predictor_16x16 vpx_d207_predictor_16x16_c
void vpx_d207_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d207_predictor_32x32 vpx_d207_predictor_32x32_c
void vpx_d207_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d207_predictor_4x4 vpx_d207_predictor_4x4_c
void vpx_d207_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d207_predictor_8x8 vpx_d207_predictor_8x8_c
void vpx_d207e_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d207e_predictor_16x16 vpx_d207e_predictor_16x16_c
void vpx_d207e_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d207e_predictor_32x32 vpx_d207e_predictor_32x32_c
void vpx_d207e_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d207e_predictor_4x4 vpx_d207e_predictor_4x4_c
void vpx_d207e_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d207e_predictor_8x8 vpx_d207e_predictor_8x8_c
void vpx_d45_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_d45_predictor_16x16_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_d45_predictor_16x16)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_d45_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d45_predictor_32x32 vpx_d45_predictor_32x32_c
void vpx_d45_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_d45_predictor_4x4_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_d45_predictor_4x4)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_d45_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_d45_predictor_8x8_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_d45_predictor_8x8)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_d45e_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d45e_predictor_16x16 vpx_d45e_predictor_16x16_c
void vpx_d45e_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d45e_predictor_32x32 vpx_d45e_predictor_32x32_c
void vpx_d45e_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d45e_predictor_4x4 vpx_d45e_predictor_4x4_c
void vpx_d45e_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d45e_predictor_8x8 vpx_d45e_predictor_8x8_c
void vpx_d63_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d63_predictor_16x16 vpx_d63_predictor_16x16_c
void vpx_d63_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d63_predictor_32x32 vpx_d63_predictor_32x32_c
void vpx_d63_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d63_predictor_4x4 vpx_d63_predictor_4x4_c
void vpx_d63_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d63_predictor_8x8 vpx_d63_predictor_8x8_c
void vpx_d63e_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d63e_predictor_16x16 vpx_d63e_predictor_16x16_c
void vpx_d63e_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d63e_predictor_32x32 vpx_d63e_predictor_32x32_c
void vpx_d63e_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d63e_predictor_4x4 vpx_d63e_predictor_4x4_c
void vpx_d63e_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d63e_predictor_8x8 vpx_d63e_predictor_8x8_c
void vpx_d63f_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d63f_predictor_4x4 vpx_d63f_predictor_4x4_c
void vpx_dc_128_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_dc_128_predictor_16x16_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_dc_128_predictor_16x16)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_dc_128_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_dc_128_predictor_32x32_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_dc_128_predictor_32x32)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_dc_128_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_dc_128_predictor_4x4_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_dc_128_predictor_4x4)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_dc_128_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_dc_128_predictor_8x8_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_dc_128_predictor_8x8)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_dc_left_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_dc_left_predictor_16x16_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_dc_left_predictor_16x16)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_dc_left_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_dc_left_predictor_32x32_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_dc_left_predictor_32x32)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_dc_left_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_dc_left_predictor_4x4_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_dc_left_predictor_4x4)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_dc_left_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_dc_left_predictor_8x8_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_dc_left_predictor_8x8)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_dc_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_dc_predictor_16x16_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_dc_predictor_16x16)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_dc_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_dc_predictor_32x32_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_dc_predictor_32x32)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_dc_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_dc_predictor_4x4_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_dc_predictor_4x4)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_dc_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_dc_predictor_8x8_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_dc_predictor_8x8)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_dc_top_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_dc_top_predictor_16x16_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_dc_top_predictor_16x16)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_dc_top_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_dc_top_predictor_32x32_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_dc_top_predictor_32x32)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_dc_top_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_dc_top_predictor_4x4_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_dc_top_predictor_4x4)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_dc_top_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_dc_top_predictor_8x8_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_dc_top_predictor_8x8)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_h_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_h_predictor_16x16_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_h_predictor_16x16)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_h_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_h_predictor_32x32_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_h_predictor_32x32)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_h_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_h_predictor_4x4_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_h_predictor_4x4)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_h_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_h_predictor_8x8_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_h_predictor_8x8)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_he_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_he_predictor_4x4 vpx_he_predictor_4x4_c
void vpx_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest, int dest_stride);
void vpx_idct16x16_10_add_neon(const tran_low_t *input, uint8_t *dest, int dest_stride);
RTCD_EXTERN void (*vpx_idct16x16_10_add)(const tran_low_t *input, uint8_t *dest, int dest_stride);
void vpx_idct16x16_1_add_c(const tran_low_t *input, uint8_t *dest, int dest_stride);
void vpx_idct16x16_1_add_neon(const tran_low_t *input, uint8_t *dest, int dest_stride);
RTCD_EXTERN void (*vpx_idct16x16_1_add)(const tran_low_t *input, uint8_t *dest, int dest_stride);
void vpx_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest, int dest_stride);
void vpx_idct16x16_256_add_neon(const tran_low_t *input, uint8_t *dest, int dest_stride);
RTCD_EXTERN void (*vpx_idct16x16_256_add)(const tran_low_t *input, uint8_t *dest, int dest_stride);
void vpx_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest, int dest_stride);
void vpx_idct32x32_1024_add_neon(const tran_low_t *input, uint8_t *dest, int dest_stride);
RTCD_EXTERN void (*vpx_idct32x32_1024_add)(const tran_low_t *input, uint8_t *dest, int dest_stride);
void vpx_idct32x32_135_add_c(const tran_low_t *input, uint8_t *dest, int dest_stride);
void vpx_idct32x32_1024_add_neon(const tran_low_t *input, uint8_t *dest, int dest_stride);
RTCD_EXTERN void (*vpx_idct32x32_135_add)(const tran_low_t *input, uint8_t *dest, int dest_stride);
void vpx_idct32x32_1_add_c(const tran_low_t *input, uint8_t *dest, int dest_stride);
void vpx_idct32x32_1_add_neon(const tran_low_t *input, uint8_t *dest, int dest_stride);
RTCD_EXTERN void (*vpx_idct32x32_1_add)(const tran_low_t *input, uint8_t *dest, int dest_stride);
void vpx_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest, int dest_stride);
void vpx_idct32x32_1024_add_neon(const tran_low_t *input, uint8_t *dest, int dest_stride);
RTCD_EXTERN void (*vpx_idct32x32_34_add)(const tran_low_t *input, uint8_t *dest, int dest_stride);
void vpx_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int dest_stride);
void vpx_idct4x4_16_add_neon(const tran_low_t *input, uint8_t *dest, int dest_stride);
RTCD_EXTERN void (*vpx_idct4x4_16_add)(const tran_low_t *input, uint8_t *dest, int dest_stride);
void vpx_idct4x4_1_add_c(const tran_low_t *input, uint8_t *dest, int dest_stride);
void vpx_idct4x4_1_add_neon(const tran_low_t *input, uint8_t *dest, int dest_stride);
RTCD_EXTERN void (*vpx_idct4x4_1_add)(const tran_low_t *input, uint8_t *dest, int dest_stride);
void vpx_idct8x8_12_add_c(const tran_low_t *input, uint8_t *dest, int dest_stride);
void vpx_idct8x8_12_add_neon(const tran_low_t *input, uint8_t *dest, int dest_stride);
RTCD_EXTERN void (*vpx_idct8x8_12_add)(const tran_low_t *input, uint8_t *dest, int dest_stride);
void vpx_idct8x8_1_add_c(const tran_low_t *input, uint8_t *dest, int dest_stride);
void vpx_idct8x8_1_add_neon(const tran_low_t *input, uint8_t *dest, int dest_stride);
RTCD_EXTERN void (*vpx_idct8x8_1_add)(const tran_low_t *input, uint8_t *dest, int dest_stride);
void vpx_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest, int dest_stride);
void vpx_idct8x8_64_add_neon(const tran_low_t *input, uint8_t *dest, int dest_stride);
RTCD_EXTERN void (*vpx_idct8x8_64_add)(const tran_low_t *input, uint8_t *dest, int dest_stride);
void vpx_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int dest_stride);
#define vpx_iwht4x4_16_add vpx_iwht4x4_16_add_c
void vpx_iwht4x4_1_add_c(const tran_low_t *input, uint8_t *dest, int dest_stride);
#define vpx_iwht4x4_1_add vpx_iwht4x4_1_add_c
void vpx_lpf_horizontal_4_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
void vpx_lpf_horizontal_4_neon(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
RTCD_EXTERN void (*vpx_lpf_horizontal_4)(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
void vpx_lpf_horizontal_4_dual_c(uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1);
void vpx_lpf_horizontal_4_dual_neon(uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1);
RTCD_EXTERN void (*vpx_lpf_horizontal_4_dual)(uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1);
void vpx_lpf_horizontal_8_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
void vpx_lpf_horizontal_8_neon(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
RTCD_EXTERN void (*vpx_lpf_horizontal_8)(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
void vpx_lpf_horizontal_8_dual_c(uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1);
void vpx_lpf_horizontal_8_dual_neon(uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1);
RTCD_EXTERN void (*vpx_lpf_horizontal_8_dual)(uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1);
void vpx_lpf_horizontal_edge_16_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
void vpx_lpf_horizontal_edge_16_neon(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
RTCD_EXTERN void (*vpx_lpf_horizontal_edge_16)(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
void vpx_lpf_horizontal_edge_8_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
void vpx_lpf_horizontal_edge_8_neon(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
RTCD_EXTERN void (*vpx_lpf_horizontal_edge_8)(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
void vpx_lpf_vertical_16_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
void vpx_lpf_vertical_16_neon(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
RTCD_EXTERN void (*vpx_lpf_vertical_16)(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
void vpx_lpf_vertical_16_dual_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
void vpx_lpf_vertical_16_dual_neon(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
RTCD_EXTERN void (*vpx_lpf_vertical_16_dual)(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
void vpx_lpf_vertical_4_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
void vpx_lpf_vertical_4_neon(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
RTCD_EXTERN void (*vpx_lpf_vertical_4)(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
void vpx_lpf_vertical_4_dual_c(uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1);
void vpx_lpf_vertical_4_dual_neon(uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1);
RTCD_EXTERN void (*vpx_lpf_vertical_4_dual)(uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1);
void vpx_lpf_vertical_8_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
void vpx_lpf_vertical_8_neon(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
RTCD_EXTERN void (*vpx_lpf_vertical_8)(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
void vpx_lpf_vertical_8_dual_c(uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1);
void vpx_lpf_vertical_8_dual_neon(uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1);
RTCD_EXTERN void (*vpx_lpf_vertical_8_dual)(uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1);
void vpx_scaled_2d_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
#define vpx_scaled_2d vpx_scaled_2d_c
void vpx_scaled_avg_2d_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
#define vpx_scaled_avg_2d vpx_scaled_avg_2d_c
void vpx_scaled_avg_horiz_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
#define vpx_scaled_avg_horiz vpx_scaled_avg_horiz_c
void vpx_scaled_avg_vert_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
#define vpx_scaled_avg_vert vpx_scaled_avg_vert_c
void vpx_scaled_horiz_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
#define vpx_scaled_horiz vpx_scaled_horiz_c
void vpx_scaled_vert_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
#define vpx_scaled_vert vpx_scaled_vert_c
void vpx_tm_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_tm_predictor_16x16_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_tm_predictor_16x16)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_tm_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_tm_predictor_32x32_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_tm_predictor_32x32)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_tm_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_tm_predictor_4x4_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_tm_predictor_4x4)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_tm_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_tm_predictor_8x8_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_tm_predictor_8x8)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_v_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_v_predictor_16x16_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_v_predictor_16x16)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_v_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_v_predictor_32x32_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_v_predictor_32x32)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_v_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_v_predictor_4x4_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_v_predictor_4x4)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_v_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_v_predictor_8x8_neon(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_v_predictor_8x8)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_ve_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_ve_predictor_4x4 vpx_ve_predictor_4x4_c
void vpx_dsp_rtcd(void);
#ifdef RTCD_C
#include "vpx_ports/arm.h"
static void setup_rtcd_internal(void)
{
int flags = arm_cpu_caps();
vpx_convolve8 = vpx_convolve8_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_convolve8 = vpx_convolve8_neon;
#endif
vpx_convolve8_avg = vpx_convolve8_avg_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_convolve8_avg = vpx_convolve8_avg_neon;
#endif
vpx_convolve8_avg_horiz = vpx_convolve8_avg_horiz_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_convolve8_avg_horiz = vpx_convolve8_avg_horiz_neon;
#endif
vpx_convolve8_avg_vert = vpx_convolve8_avg_vert_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_convolve8_avg_vert = vpx_convolve8_avg_vert_neon;
#endif
vpx_convolve8_horiz = vpx_convolve8_horiz_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_convolve8_horiz = vpx_convolve8_horiz_neon;
#endif
vpx_convolve8_vert = vpx_convolve8_vert_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_convolve8_vert = vpx_convolve8_vert_neon;
#endif
vpx_convolve_avg = vpx_convolve_avg_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_convolve_avg = vpx_convolve_avg_neon;
#endif
vpx_convolve_copy = vpx_convolve_copy_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_convolve_copy = vpx_convolve_copy_neon;
#endif
vpx_d135_predictor_4x4 = vpx_d135_predictor_4x4_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_d135_predictor_4x4 = vpx_d135_predictor_4x4_neon;
#endif
vpx_d45_predictor_16x16 = vpx_d45_predictor_16x16_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_d45_predictor_16x16 = vpx_d45_predictor_16x16_neon;
#endif
vpx_d45_predictor_4x4 = vpx_d45_predictor_4x4_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_d45_predictor_4x4 = vpx_d45_predictor_4x4_neon;
#endif
vpx_d45_predictor_8x8 = vpx_d45_predictor_8x8_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_d45_predictor_8x8 = vpx_d45_predictor_8x8_neon;
#endif
vpx_dc_128_predictor_16x16 = vpx_dc_128_predictor_16x16_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_dc_128_predictor_16x16 = vpx_dc_128_predictor_16x16_neon;
#endif
vpx_dc_128_predictor_32x32 = vpx_dc_128_predictor_32x32_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_dc_128_predictor_32x32 = vpx_dc_128_predictor_32x32_neon;
#endif
vpx_dc_128_predictor_4x4 = vpx_dc_128_predictor_4x4_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_dc_128_predictor_4x4 = vpx_dc_128_predictor_4x4_neon;
#endif
vpx_dc_128_predictor_8x8 = vpx_dc_128_predictor_8x8_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_dc_128_predictor_8x8 = vpx_dc_128_predictor_8x8_neon;
#endif
vpx_dc_left_predictor_16x16 = vpx_dc_left_predictor_16x16_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_dc_left_predictor_16x16 = vpx_dc_left_predictor_16x16_neon;
#endif
vpx_dc_left_predictor_32x32 = vpx_dc_left_predictor_32x32_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_dc_left_predictor_32x32 = vpx_dc_left_predictor_32x32_neon;
#endif
vpx_dc_left_predictor_4x4 = vpx_dc_left_predictor_4x4_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_dc_left_predictor_4x4 = vpx_dc_left_predictor_4x4_neon;
#endif
vpx_dc_left_predictor_8x8 = vpx_dc_left_predictor_8x8_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_dc_left_predictor_8x8 = vpx_dc_left_predictor_8x8_neon;
#endif
vpx_dc_predictor_16x16 = vpx_dc_predictor_16x16_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_dc_predictor_16x16 = vpx_dc_predictor_16x16_neon;
#endif
vpx_dc_predictor_32x32 = vpx_dc_predictor_32x32_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_dc_predictor_32x32 = vpx_dc_predictor_32x32_neon;
#endif
vpx_dc_predictor_4x4 = vpx_dc_predictor_4x4_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_dc_predictor_4x4 = vpx_dc_predictor_4x4_neon;
#endif
vpx_dc_predictor_8x8 = vpx_dc_predictor_8x8_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_dc_predictor_8x8 = vpx_dc_predictor_8x8_neon;
#endif
vpx_dc_top_predictor_16x16 = vpx_dc_top_predictor_16x16_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_dc_top_predictor_16x16 = vpx_dc_top_predictor_16x16_neon;
#endif
vpx_dc_top_predictor_32x32 = vpx_dc_top_predictor_32x32_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_dc_top_predictor_32x32 = vpx_dc_top_predictor_32x32_neon;
#endif
vpx_dc_top_predictor_4x4 = vpx_dc_top_predictor_4x4_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_dc_top_predictor_4x4 = vpx_dc_top_predictor_4x4_neon;
#endif
vpx_dc_top_predictor_8x8 = vpx_dc_top_predictor_8x8_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_dc_top_predictor_8x8 = vpx_dc_top_predictor_8x8_neon;
#endif
vpx_h_predictor_16x16 = vpx_h_predictor_16x16_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_h_predictor_16x16 = vpx_h_predictor_16x16_neon;
#endif
vpx_h_predictor_32x32 = vpx_h_predictor_32x32_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_h_predictor_32x32 = vpx_h_predictor_32x32_neon;
#endif
vpx_h_predictor_4x4 = vpx_h_predictor_4x4_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_h_predictor_4x4 = vpx_h_predictor_4x4_neon;
#endif
vpx_h_predictor_8x8 = vpx_h_predictor_8x8_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_h_predictor_8x8 = vpx_h_predictor_8x8_neon;
#endif
vpx_idct16x16_10_add = vpx_idct16x16_10_add_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_idct16x16_10_add = vpx_idct16x16_10_add_neon;
#endif
vpx_idct16x16_1_add = vpx_idct16x16_1_add_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_idct16x16_1_add = vpx_idct16x16_1_add_neon;
#endif
vpx_idct16x16_256_add = vpx_idct16x16_256_add_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_idct16x16_256_add = vpx_idct16x16_256_add_neon;
#endif
vpx_idct32x32_1024_add = vpx_idct32x32_1024_add_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_idct32x32_1024_add = vpx_idct32x32_1024_add_neon;
#endif
vpx_idct32x32_135_add = vpx_idct32x32_135_add_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_idct32x32_135_add = vpx_idct32x32_1024_add_neon;
#endif
vpx_idct32x32_1_add = vpx_idct32x32_1_add_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_idct32x32_1_add = vpx_idct32x32_1_add_neon;
#endif
vpx_idct32x32_34_add = vpx_idct32x32_34_add_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_idct32x32_34_add = vpx_idct32x32_1024_add_neon;
#endif
vpx_idct4x4_16_add = vpx_idct4x4_16_add_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_idct4x4_16_add = vpx_idct4x4_16_add_neon;
#endif
vpx_idct4x4_1_add = vpx_idct4x4_1_add_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_idct4x4_1_add = vpx_idct4x4_1_add_neon;
#endif
vpx_idct8x8_12_add = vpx_idct8x8_12_add_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_idct8x8_12_add = vpx_idct8x8_12_add_neon;
#endif
vpx_idct8x8_1_add = vpx_idct8x8_1_add_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_idct8x8_1_add = vpx_idct8x8_1_add_neon;
#endif
vpx_idct8x8_64_add = vpx_idct8x8_64_add_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_idct8x8_64_add = vpx_idct8x8_64_add_neon;
#endif
vpx_lpf_horizontal_4 = vpx_lpf_horizontal_4_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_lpf_horizontal_4 = vpx_lpf_horizontal_4_neon;
#endif
vpx_lpf_horizontal_4_dual = vpx_lpf_horizontal_4_dual_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_lpf_horizontal_4_dual = vpx_lpf_horizontal_4_dual_neon;
#endif
vpx_lpf_horizontal_8 = vpx_lpf_horizontal_8_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_lpf_horizontal_8 = vpx_lpf_horizontal_8_neon;
#endif
vpx_lpf_horizontal_8_dual = vpx_lpf_horizontal_8_dual_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_lpf_horizontal_8_dual = vpx_lpf_horizontal_8_dual_neon;
#endif
vpx_lpf_horizontal_edge_16 = vpx_lpf_horizontal_edge_16_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_lpf_horizontal_edge_16 = vpx_lpf_horizontal_edge_16_neon;
#endif
vpx_lpf_horizontal_edge_8 = vpx_lpf_horizontal_edge_8_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_lpf_horizontal_edge_8 = vpx_lpf_horizontal_edge_8_neon;
#endif
vpx_lpf_vertical_16 = vpx_lpf_vertical_16_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_lpf_vertical_16 = vpx_lpf_vertical_16_neon;
#endif
vpx_lpf_vertical_16_dual = vpx_lpf_vertical_16_dual_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_lpf_vertical_16_dual = vpx_lpf_vertical_16_dual_neon;
#endif
vpx_lpf_vertical_4 = vpx_lpf_vertical_4_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_lpf_vertical_4 = vpx_lpf_vertical_4_neon;
#endif
vpx_lpf_vertical_4_dual = vpx_lpf_vertical_4_dual_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_lpf_vertical_4_dual = vpx_lpf_vertical_4_dual_neon;
#endif
vpx_lpf_vertical_8 = vpx_lpf_vertical_8_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_lpf_vertical_8 = vpx_lpf_vertical_8_neon;
#endif
vpx_lpf_vertical_8_dual = vpx_lpf_vertical_8_dual_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_lpf_vertical_8_dual = vpx_lpf_vertical_8_dual_neon;
#endif
vpx_tm_predictor_16x16 = vpx_tm_predictor_16x16_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_tm_predictor_16x16 = vpx_tm_predictor_16x16_neon;
#endif
vpx_tm_predictor_32x32 = vpx_tm_predictor_32x32_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_tm_predictor_32x32 = vpx_tm_predictor_32x32_neon;
#endif
vpx_tm_predictor_4x4 = vpx_tm_predictor_4x4_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_tm_predictor_4x4 = vpx_tm_predictor_4x4_neon;
#endif
vpx_tm_predictor_8x8 = vpx_tm_predictor_8x8_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_tm_predictor_8x8 = vpx_tm_predictor_8x8_neon;
#endif
vpx_v_predictor_16x16 = vpx_v_predictor_16x16_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_v_predictor_16x16 = vpx_v_predictor_16x16_neon;
#endif
vpx_v_predictor_32x32 = vpx_v_predictor_32x32_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_v_predictor_32x32 = vpx_v_predictor_32x32_neon;
#endif
vpx_v_predictor_4x4 = vpx_v_predictor_4x4_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_v_predictor_4x4 = vpx_v_predictor_4x4_neon;
#endif
vpx_v_predictor_8x8 = vpx_v_predictor_8x8_c;
#if HAVE_NEON
if (flags & HAS_NEON) vpx_v_predictor_8x8 = vpx_v_predictor_8x8_neon;
#endif
}
#endif
#ifdef __cplusplus
} // extern "C"
#endif
#endif

View File

@ -1,355 +0,0 @@
#ifndef VPX_DSP_RTCD_H_
#define VPX_DSP_RTCD_H_
#ifdef RTCD_C
#define RTCD_EXTERN
#else
#define RTCD_EXTERN extern
#endif
/*
* DSP
*/
#include "vpx/vpx_integer.h"
#include "vpx_dsp/vpx_dsp_common.h"
#ifdef __cplusplus
extern "C" {
#endif
void vpx_convolve8_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
#define vpx_convolve8 vpx_convolve8_c
void vpx_convolve8_avg_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
#define vpx_convolve8_avg vpx_convolve8_avg_c
void vpx_convolve8_avg_horiz_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
#define vpx_convolve8_avg_horiz vpx_convolve8_avg_horiz_c
void vpx_convolve8_avg_vert_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
#define vpx_convolve8_avg_vert vpx_convolve8_avg_vert_c
void vpx_convolve8_horiz_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
#define vpx_convolve8_horiz vpx_convolve8_horiz_c
void vpx_convolve8_vert_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
#define vpx_convolve8_vert vpx_convolve8_vert_c
void vpx_convolve_avg_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
#define vpx_convolve_avg vpx_convolve_avg_c
void vpx_convolve_copy_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
#define vpx_convolve_copy vpx_convolve_copy_c
void vpx_d117_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d117_predictor_16x16 vpx_d117_predictor_16x16_c
void vpx_d117_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d117_predictor_32x32 vpx_d117_predictor_32x32_c
void vpx_d117_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d117_predictor_4x4 vpx_d117_predictor_4x4_c
void vpx_d117_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d117_predictor_8x8 vpx_d117_predictor_8x8_c
void vpx_d135_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d135_predictor_16x16 vpx_d135_predictor_16x16_c
void vpx_d135_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d135_predictor_32x32 vpx_d135_predictor_32x32_c
void vpx_d135_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d135_predictor_4x4 vpx_d135_predictor_4x4_c
void vpx_d135_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d135_predictor_8x8 vpx_d135_predictor_8x8_c
void vpx_d153_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d153_predictor_16x16 vpx_d153_predictor_16x16_c
void vpx_d153_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d153_predictor_32x32 vpx_d153_predictor_32x32_c
void vpx_d153_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d153_predictor_4x4 vpx_d153_predictor_4x4_c
void vpx_d153_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d153_predictor_8x8 vpx_d153_predictor_8x8_c
void vpx_d207_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d207_predictor_16x16 vpx_d207_predictor_16x16_c
void vpx_d207_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d207_predictor_32x32 vpx_d207_predictor_32x32_c
void vpx_d207_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d207_predictor_4x4 vpx_d207_predictor_4x4_c
void vpx_d207_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d207_predictor_8x8 vpx_d207_predictor_8x8_c
void vpx_d207e_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d207e_predictor_16x16 vpx_d207e_predictor_16x16_c
void vpx_d207e_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d207e_predictor_32x32 vpx_d207e_predictor_32x32_c
void vpx_d207e_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d207e_predictor_4x4 vpx_d207e_predictor_4x4_c
void vpx_d207e_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d207e_predictor_8x8 vpx_d207e_predictor_8x8_c
void vpx_d45_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d45_predictor_16x16 vpx_d45_predictor_16x16_c
void vpx_d45_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d45_predictor_32x32 vpx_d45_predictor_32x32_c
void vpx_d45_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d45_predictor_4x4 vpx_d45_predictor_4x4_c
void vpx_d45_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d45_predictor_8x8 vpx_d45_predictor_8x8_c
void vpx_d45e_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d45e_predictor_16x16 vpx_d45e_predictor_16x16_c
void vpx_d45e_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d45e_predictor_32x32 vpx_d45e_predictor_32x32_c
void vpx_d45e_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d45e_predictor_4x4 vpx_d45e_predictor_4x4_c
void vpx_d45e_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d45e_predictor_8x8 vpx_d45e_predictor_8x8_c
void vpx_d63_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d63_predictor_16x16 vpx_d63_predictor_16x16_c
void vpx_d63_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d63_predictor_32x32 vpx_d63_predictor_32x32_c
void vpx_d63_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d63_predictor_4x4 vpx_d63_predictor_4x4_c
void vpx_d63_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d63_predictor_8x8 vpx_d63_predictor_8x8_c
void vpx_d63e_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d63e_predictor_16x16 vpx_d63e_predictor_16x16_c
void vpx_d63e_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d63e_predictor_32x32 vpx_d63e_predictor_32x32_c
void vpx_d63e_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d63e_predictor_4x4 vpx_d63e_predictor_4x4_c
void vpx_d63e_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d63e_predictor_8x8 vpx_d63e_predictor_8x8_c
void vpx_d63f_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d63f_predictor_4x4 vpx_d63f_predictor_4x4_c
void vpx_dc_128_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_dc_128_predictor_16x16 vpx_dc_128_predictor_16x16_c
void vpx_dc_128_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_dc_128_predictor_32x32 vpx_dc_128_predictor_32x32_c
void vpx_dc_128_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_dc_128_predictor_4x4 vpx_dc_128_predictor_4x4_c
void vpx_dc_128_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_dc_128_predictor_8x8 vpx_dc_128_predictor_8x8_c
void vpx_dc_left_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_dc_left_predictor_16x16 vpx_dc_left_predictor_16x16_c
void vpx_dc_left_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_dc_left_predictor_32x32 vpx_dc_left_predictor_32x32_c
void vpx_dc_left_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_dc_left_predictor_4x4 vpx_dc_left_predictor_4x4_c
void vpx_dc_left_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_dc_left_predictor_8x8 vpx_dc_left_predictor_8x8_c
void vpx_dc_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_dc_predictor_16x16 vpx_dc_predictor_16x16_c
void vpx_dc_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_dc_predictor_32x32 vpx_dc_predictor_32x32_c
void vpx_dc_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_dc_predictor_4x4 vpx_dc_predictor_4x4_c
void vpx_dc_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_dc_predictor_8x8 vpx_dc_predictor_8x8_c
void vpx_dc_top_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_dc_top_predictor_16x16 vpx_dc_top_predictor_16x16_c
void vpx_dc_top_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_dc_top_predictor_32x32 vpx_dc_top_predictor_32x32_c
void vpx_dc_top_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_dc_top_predictor_4x4 vpx_dc_top_predictor_4x4_c
void vpx_dc_top_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_dc_top_predictor_8x8 vpx_dc_top_predictor_8x8_c
void vpx_h_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_h_predictor_16x16 vpx_h_predictor_16x16_c
void vpx_h_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_h_predictor_32x32 vpx_h_predictor_32x32_c
void vpx_h_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_h_predictor_4x4 vpx_h_predictor_4x4_c
void vpx_h_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_h_predictor_8x8 vpx_h_predictor_8x8_c
void vpx_he_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_he_predictor_4x4 vpx_he_predictor_4x4_c
void vpx_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest, int dest_stride);
#define vpx_idct16x16_10_add vpx_idct16x16_10_add_c
void vpx_idct16x16_1_add_c(const tran_low_t *input, uint8_t *dest, int dest_stride);
#define vpx_idct16x16_1_add vpx_idct16x16_1_add_c
void vpx_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest, int dest_stride);
#define vpx_idct16x16_256_add vpx_idct16x16_256_add_c
void vpx_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest, int dest_stride);
#define vpx_idct32x32_1024_add vpx_idct32x32_1024_add_c
void vpx_idct32x32_135_add_c(const tran_low_t *input, uint8_t *dest, int dest_stride);
#define vpx_idct32x32_135_add vpx_idct32x32_135_add_c
void vpx_idct32x32_1_add_c(const tran_low_t *input, uint8_t *dest, int dest_stride);
#define vpx_idct32x32_1_add vpx_idct32x32_1_add_c
void vpx_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest, int dest_stride);
#define vpx_idct32x32_34_add vpx_idct32x32_34_add_c
void vpx_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int dest_stride);
#define vpx_idct4x4_16_add vpx_idct4x4_16_add_c
void vpx_idct4x4_1_add_c(const tran_low_t *input, uint8_t *dest, int dest_stride);
#define vpx_idct4x4_1_add vpx_idct4x4_1_add_c
void vpx_idct8x8_12_add_c(const tran_low_t *input, uint8_t *dest, int dest_stride);
#define vpx_idct8x8_12_add vpx_idct8x8_12_add_c
void vpx_idct8x8_1_add_c(const tran_low_t *input, uint8_t *dest, int dest_stride);
#define vpx_idct8x8_1_add vpx_idct8x8_1_add_c
void vpx_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest, int dest_stride);
#define vpx_idct8x8_64_add vpx_idct8x8_64_add_c
void vpx_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int dest_stride);
#define vpx_iwht4x4_16_add vpx_iwht4x4_16_add_c
void vpx_iwht4x4_1_add_c(const tran_low_t *input, uint8_t *dest, int dest_stride);
#define vpx_iwht4x4_1_add vpx_iwht4x4_1_add_c
void vpx_lpf_horizontal_4_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
#define vpx_lpf_horizontal_4 vpx_lpf_horizontal_4_c
void vpx_lpf_horizontal_4_dual_c(uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1);
#define vpx_lpf_horizontal_4_dual vpx_lpf_horizontal_4_dual_c
void vpx_lpf_horizontal_8_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
#define vpx_lpf_horizontal_8 vpx_lpf_horizontal_8_c
void vpx_lpf_horizontal_8_dual_c(uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1);
#define vpx_lpf_horizontal_8_dual vpx_lpf_horizontal_8_dual_c
void vpx_lpf_horizontal_edge_16_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
#define vpx_lpf_horizontal_edge_16 vpx_lpf_horizontal_edge_16_c
void vpx_lpf_horizontal_edge_8_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
#define vpx_lpf_horizontal_edge_8 vpx_lpf_horizontal_edge_8_c
void vpx_lpf_vertical_16_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
#define vpx_lpf_vertical_16 vpx_lpf_vertical_16_c
void vpx_lpf_vertical_16_dual_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
#define vpx_lpf_vertical_16_dual vpx_lpf_vertical_16_dual_c
void vpx_lpf_vertical_4_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
#define vpx_lpf_vertical_4 vpx_lpf_vertical_4_c
void vpx_lpf_vertical_4_dual_c(uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1);
#define vpx_lpf_vertical_4_dual vpx_lpf_vertical_4_dual_c
void vpx_lpf_vertical_8_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
#define vpx_lpf_vertical_8 vpx_lpf_vertical_8_c
void vpx_lpf_vertical_8_dual_c(uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1);
#define vpx_lpf_vertical_8_dual vpx_lpf_vertical_8_dual_c
void vpx_scaled_2d_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
#define vpx_scaled_2d vpx_scaled_2d_c
void vpx_scaled_avg_2d_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
#define vpx_scaled_avg_2d vpx_scaled_avg_2d_c
void vpx_scaled_avg_horiz_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
#define vpx_scaled_avg_horiz vpx_scaled_avg_horiz_c
void vpx_scaled_avg_vert_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
#define vpx_scaled_avg_vert vpx_scaled_avg_vert_c
void vpx_scaled_horiz_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
#define vpx_scaled_horiz vpx_scaled_horiz_c
void vpx_scaled_vert_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
#define vpx_scaled_vert vpx_scaled_vert_c
void vpx_tm_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_tm_predictor_16x16 vpx_tm_predictor_16x16_c
void vpx_tm_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_tm_predictor_32x32 vpx_tm_predictor_32x32_c
void vpx_tm_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_tm_predictor_4x4 vpx_tm_predictor_4x4_c
void vpx_tm_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_tm_predictor_8x8 vpx_tm_predictor_8x8_c
void vpx_v_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_v_predictor_16x16 vpx_v_predictor_16x16_c
void vpx_v_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_v_predictor_32x32 vpx_v_predictor_32x32_c
void vpx_v_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_v_predictor_4x4 vpx_v_predictor_4x4_c
void vpx_v_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_v_predictor_8x8 vpx_v_predictor_8x8_c
void vpx_ve_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_ve_predictor_4x4 vpx_ve_predictor_4x4_c
void vpx_dsp_rtcd(void);
#ifdef RTCD_C
static void setup_rtcd_internal(void)
{
}
#endif
#ifdef __cplusplus
} // extern "C"
#endif
#endif

View File

@ -1,604 +0,0 @@
#ifndef VPX_DSP_RTCD_H_
#define VPX_DSP_RTCD_H_
#ifdef RTCD_C
#define RTCD_EXTERN
#else
#define RTCD_EXTERN extern
#endif
/*
* DSP
*/
#include "vpx/vpx_integer.h"
#include "vpx_dsp/vpx_dsp_common.h"
#ifdef __cplusplus
extern "C" {
#endif
void vpx_convolve8_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
void vpx_convolve8_sse2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
void vpx_convolve8_ssse3(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
RTCD_EXTERN void (*vpx_convolve8)(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
void vpx_convolve8_avg_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
void vpx_convolve8_avg_sse2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
void vpx_convolve8_avg_ssse3(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
RTCD_EXTERN void (*vpx_convolve8_avg)(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
void vpx_convolve8_avg_horiz_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
void vpx_convolve8_avg_horiz_sse2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
void vpx_convolve8_avg_horiz_ssse3(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
RTCD_EXTERN void (*vpx_convolve8_avg_horiz)(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
void vpx_convolve8_avg_vert_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
void vpx_convolve8_avg_vert_sse2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
void vpx_convolve8_avg_vert_ssse3(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
RTCD_EXTERN void (*vpx_convolve8_avg_vert)(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
void vpx_convolve8_horiz_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
void vpx_convolve8_horiz_sse2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
void vpx_convolve8_horiz_ssse3(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
RTCD_EXTERN void (*vpx_convolve8_horiz)(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
void vpx_convolve8_vert_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
void vpx_convolve8_vert_sse2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
void vpx_convolve8_vert_ssse3(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
RTCD_EXTERN void (*vpx_convolve8_vert)(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
void vpx_convolve_avg_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
void vpx_convolve_avg_sse2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
RTCD_EXTERN void (*vpx_convolve_avg)(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
void vpx_convolve_copy_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
void vpx_convolve_copy_sse2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
RTCD_EXTERN void (*vpx_convolve_copy)(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
void vpx_d117_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d117_predictor_16x16 vpx_d117_predictor_16x16_c
void vpx_d117_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d117_predictor_32x32 vpx_d117_predictor_32x32_c
void vpx_d117_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d117_predictor_4x4 vpx_d117_predictor_4x4_c
void vpx_d117_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d117_predictor_8x8 vpx_d117_predictor_8x8_c
void vpx_d135_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d135_predictor_16x16 vpx_d135_predictor_16x16_c
void vpx_d135_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d135_predictor_32x32 vpx_d135_predictor_32x32_c
void vpx_d135_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d135_predictor_4x4 vpx_d135_predictor_4x4_c
void vpx_d135_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d135_predictor_8x8 vpx_d135_predictor_8x8_c
void vpx_d153_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_d153_predictor_16x16_ssse3(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_d153_predictor_16x16)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_d153_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_d153_predictor_32x32_ssse3(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_d153_predictor_32x32)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_d153_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_d153_predictor_4x4_ssse3(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_d153_predictor_4x4)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_d153_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_d153_predictor_8x8_ssse3(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_d153_predictor_8x8)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_d207_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_d207_predictor_16x16_ssse3(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_d207_predictor_16x16)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_d207_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_d207_predictor_32x32_ssse3(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_d207_predictor_32x32)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_d207_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_d207_predictor_4x4_sse2(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_d207_predictor_4x4)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_d207_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_d207_predictor_8x8_ssse3(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_d207_predictor_8x8)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_d207e_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d207e_predictor_16x16 vpx_d207e_predictor_16x16_c
void vpx_d207e_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d207e_predictor_32x32 vpx_d207e_predictor_32x32_c
void vpx_d207e_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d207e_predictor_4x4 vpx_d207e_predictor_4x4_c
void vpx_d207e_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d207e_predictor_8x8 vpx_d207e_predictor_8x8_c
void vpx_d45_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_d45_predictor_16x16_ssse3(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_d45_predictor_16x16)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_d45_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_d45_predictor_32x32_ssse3(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_d45_predictor_32x32)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_d45_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_d45_predictor_4x4_sse2(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_d45_predictor_4x4)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_d45_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_d45_predictor_8x8_sse2(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_d45_predictor_8x8)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_d45e_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d45e_predictor_16x16 vpx_d45e_predictor_16x16_c
void vpx_d45e_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d45e_predictor_32x32 vpx_d45e_predictor_32x32_c
void vpx_d45e_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d45e_predictor_4x4 vpx_d45e_predictor_4x4_c
void vpx_d45e_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d45e_predictor_8x8 vpx_d45e_predictor_8x8_c
void vpx_d63_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_d63_predictor_16x16_ssse3(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_d63_predictor_16x16)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_d63_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_d63_predictor_32x32_ssse3(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_d63_predictor_32x32)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_d63_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_d63_predictor_4x4_ssse3(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_d63_predictor_4x4)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_d63_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_d63_predictor_8x8_ssse3(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_d63_predictor_8x8)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_d63e_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d63e_predictor_16x16 vpx_d63e_predictor_16x16_c
void vpx_d63e_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d63e_predictor_32x32 vpx_d63e_predictor_32x32_c
void vpx_d63e_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d63e_predictor_4x4 vpx_d63e_predictor_4x4_c
void vpx_d63e_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d63e_predictor_8x8 vpx_d63e_predictor_8x8_c
void vpx_d63f_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_d63f_predictor_4x4 vpx_d63f_predictor_4x4_c
void vpx_dc_128_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_dc_128_predictor_16x16_sse2(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_dc_128_predictor_16x16)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_dc_128_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_dc_128_predictor_32x32_sse2(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_dc_128_predictor_32x32)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_dc_128_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_dc_128_predictor_4x4_sse2(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_dc_128_predictor_4x4)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_dc_128_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_dc_128_predictor_8x8_sse2(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_dc_128_predictor_8x8)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_dc_left_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_dc_left_predictor_16x16_sse2(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_dc_left_predictor_16x16)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_dc_left_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_dc_left_predictor_32x32_sse2(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_dc_left_predictor_32x32)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_dc_left_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_dc_left_predictor_4x4_sse2(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_dc_left_predictor_4x4)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_dc_left_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_dc_left_predictor_8x8_sse2(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_dc_left_predictor_8x8)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_dc_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_dc_predictor_16x16_sse2(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_dc_predictor_16x16)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_dc_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_dc_predictor_32x32_sse2(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_dc_predictor_32x32)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_dc_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_dc_predictor_4x4_sse2(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_dc_predictor_4x4)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_dc_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_dc_predictor_8x8_sse2(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_dc_predictor_8x8)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_dc_top_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_dc_top_predictor_16x16_sse2(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_dc_top_predictor_16x16)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_dc_top_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_dc_top_predictor_32x32_sse2(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_dc_top_predictor_32x32)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_dc_top_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_dc_top_predictor_4x4_sse2(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_dc_top_predictor_4x4)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_dc_top_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_dc_top_predictor_8x8_sse2(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_dc_top_predictor_8x8)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_h_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_h_predictor_16x16_sse2(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_h_predictor_16x16)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_h_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_h_predictor_32x32_sse2(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_h_predictor_32x32)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_h_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_h_predictor_4x4_sse2(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_h_predictor_4x4)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_h_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_h_predictor_8x8_sse2(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_h_predictor_8x8)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_he_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_he_predictor_4x4 vpx_he_predictor_4x4_c
void vpx_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest, int dest_stride);
void vpx_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest, int dest_stride);
RTCD_EXTERN void (*vpx_idct16x16_10_add)(const tran_low_t *input, uint8_t *dest, int dest_stride);
void vpx_idct16x16_1_add_c(const tran_low_t *input, uint8_t *dest, int dest_stride);
void vpx_idct16x16_1_add_sse2(const tran_low_t *input, uint8_t *dest, int dest_stride);
RTCD_EXTERN void (*vpx_idct16x16_1_add)(const tran_low_t *input, uint8_t *dest, int dest_stride);
void vpx_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest, int dest_stride);
void vpx_idct16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest, int dest_stride);
RTCD_EXTERN void (*vpx_idct16x16_256_add)(const tran_low_t *input, uint8_t *dest, int dest_stride);
void vpx_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest, int dest_stride);
void vpx_idct32x32_1024_add_sse2(const tran_low_t *input, uint8_t *dest, int dest_stride);
RTCD_EXTERN void (*vpx_idct32x32_1024_add)(const tran_low_t *input, uint8_t *dest, int dest_stride);
void vpx_idct32x32_135_add_c(const tran_low_t *input, uint8_t *dest, int dest_stride);
void vpx_idct32x32_1024_add_sse2(const tran_low_t *input, uint8_t *dest, int dest_stride);
RTCD_EXTERN void (*vpx_idct32x32_135_add)(const tran_low_t *input, uint8_t *dest, int dest_stride);
void vpx_idct32x32_1_add_c(const tran_low_t *input, uint8_t *dest, int dest_stride);
void vpx_idct32x32_1_add_sse2(const tran_low_t *input, uint8_t *dest, int dest_stride);
RTCD_EXTERN void (*vpx_idct32x32_1_add)(const tran_low_t *input, uint8_t *dest, int dest_stride);
void vpx_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest, int dest_stride);
void vpx_idct32x32_34_add_sse2(const tran_low_t *input, uint8_t *dest, int dest_stride);
RTCD_EXTERN void (*vpx_idct32x32_34_add)(const tran_low_t *input, uint8_t *dest, int dest_stride);
void vpx_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int dest_stride);
void vpx_idct4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest, int dest_stride);
RTCD_EXTERN void (*vpx_idct4x4_16_add)(const tran_low_t *input, uint8_t *dest, int dest_stride);
void vpx_idct4x4_1_add_c(const tran_low_t *input, uint8_t *dest, int dest_stride);
void vpx_idct4x4_1_add_sse2(const tran_low_t *input, uint8_t *dest, int dest_stride);
RTCD_EXTERN void (*vpx_idct4x4_1_add)(const tran_low_t *input, uint8_t *dest, int dest_stride);
void vpx_idct8x8_12_add_c(const tran_low_t *input, uint8_t *dest, int dest_stride);
void vpx_idct8x8_12_add_sse2(const tran_low_t *input, uint8_t *dest, int dest_stride);
RTCD_EXTERN void (*vpx_idct8x8_12_add)(const tran_low_t *input, uint8_t *dest, int dest_stride);
void vpx_idct8x8_1_add_c(const tran_low_t *input, uint8_t *dest, int dest_stride);
void vpx_idct8x8_1_add_sse2(const tran_low_t *input, uint8_t *dest, int dest_stride);
RTCD_EXTERN void (*vpx_idct8x8_1_add)(const tran_low_t *input, uint8_t *dest, int dest_stride);
void vpx_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest, int dest_stride);
void vpx_idct8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest, int dest_stride);
RTCD_EXTERN void (*vpx_idct8x8_64_add)(const tran_low_t *input, uint8_t *dest, int dest_stride);
void vpx_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int dest_stride);
void vpx_iwht4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest, int dest_stride);
RTCD_EXTERN void (*vpx_iwht4x4_16_add)(const tran_low_t *input, uint8_t *dest, int dest_stride);
void vpx_iwht4x4_1_add_c(const tran_low_t *input, uint8_t *dest, int dest_stride);
#define vpx_iwht4x4_1_add vpx_iwht4x4_1_add_c
void vpx_lpf_horizontal_4_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
void vpx_lpf_horizontal_4_sse2(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
RTCD_EXTERN void (*vpx_lpf_horizontal_4)(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
void vpx_lpf_horizontal_4_dual_c(uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1);
void vpx_lpf_horizontal_4_dual_sse2(uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1);
RTCD_EXTERN void (*vpx_lpf_horizontal_4_dual)(uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1);
void vpx_lpf_horizontal_8_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
void vpx_lpf_horizontal_8_sse2(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
RTCD_EXTERN void (*vpx_lpf_horizontal_8)(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
void vpx_lpf_horizontal_8_dual_c(uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1);
void vpx_lpf_horizontal_8_dual_sse2(uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1);
RTCD_EXTERN void (*vpx_lpf_horizontal_8_dual)(uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1);
void vpx_lpf_horizontal_edge_16_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
void vpx_lpf_horizontal_edge_16_sse2(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
RTCD_EXTERN void (*vpx_lpf_horizontal_edge_16)(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
void vpx_lpf_horizontal_edge_8_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
void vpx_lpf_horizontal_edge_8_sse2(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
RTCD_EXTERN void (*vpx_lpf_horizontal_edge_8)(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
void vpx_lpf_vertical_16_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
void vpx_lpf_vertical_16_sse2(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
RTCD_EXTERN void (*vpx_lpf_vertical_16)(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
void vpx_lpf_vertical_16_dual_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
void vpx_lpf_vertical_16_dual_sse2(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
RTCD_EXTERN void (*vpx_lpf_vertical_16_dual)(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
void vpx_lpf_vertical_4_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
void vpx_lpf_vertical_4_sse2(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
RTCD_EXTERN void (*vpx_lpf_vertical_4)(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
void vpx_lpf_vertical_4_dual_c(uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1);
void vpx_lpf_vertical_4_dual_sse2(uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1);
RTCD_EXTERN void (*vpx_lpf_vertical_4_dual)(uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1);
void vpx_lpf_vertical_8_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
void vpx_lpf_vertical_8_sse2(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
RTCD_EXTERN void (*vpx_lpf_vertical_8)(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh);
void vpx_lpf_vertical_8_dual_c(uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1);
void vpx_lpf_vertical_8_dual_sse2(uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1);
RTCD_EXTERN void (*vpx_lpf_vertical_8_dual)(uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1);
void vpx_scaled_2d_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
void vpx_scaled_2d_ssse3(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
RTCD_EXTERN void (*vpx_scaled_2d)(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
void vpx_scaled_avg_2d_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
#define vpx_scaled_avg_2d vpx_scaled_avg_2d_c
void vpx_scaled_avg_horiz_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
#define vpx_scaled_avg_horiz vpx_scaled_avg_horiz_c
void vpx_scaled_avg_vert_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
#define vpx_scaled_avg_vert vpx_scaled_avg_vert_c
void vpx_scaled_horiz_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
#define vpx_scaled_horiz vpx_scaled_horiz_c
void vpx_scaled_vert_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h);
#define vpx_scaled_vert vpx_scaled_vert_c
void vpx_tm_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_tm_predictor_16x16_sse2(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_tm_predictor_16x16)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_tm_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_tm_predictor_32x32_sse2(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_tm_predictor_32x32)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_tm_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_tm_predictor_4x4_sse2(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_tm_predictor_4x4)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_tm_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_tm_predictor_8x8_sse2(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_tm_predictor_8x8)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_v_predictor_16x16_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_v_predictor_16x16_sse2(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_v_predictor_16x16)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_v_predictor_32x32_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_v_predictor_32x32_sse2(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_v_predictor_32x32)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_v_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_v_predictor_4x4_sse2(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_v_predictor_4x4)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_v_predictor_8x8_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_v_predictor_8x8_sse2(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
RTCD_EXTERN void (*vpx_v_predictor_8x8)(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
void vpx_ve_predictor_4x4_c(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left);
#define vpx_ve_predictor_4x4 vpx_ve_predictor_4x4_c
void vpx_dsp_rtcd(void);
#ifdef RTCD_C
#include "vpx_ports/x86.h"
static void setup_rtcd_internal(void)
{
int flags = x86_simd_caps();
vpx_convolve8 = vpx_convolve8_c;
if (flags & HAS_SSE2) vpx_convolve8 = vpx_convolve8_sse2;
if (flags & HAS_SSSE3) vpx_convolve8 = vpx_convolve8_ssse3;
vpx_convolve8_avg = vpx_convolve8_avg_c;
if (flags & HAS_SSE2) vpx_convolve8_avg = vpx_convolve8_avg_sse2;
if (flags & HAS_SSSE3) vpx_convolve8_avg = vpx_convolve8_avg_ssse3;
vpx_convolve8_avg_horiz = vpx_convolve8_avg_horiz_c;
if (flags & HAS_SSE2) vpx_convolve8_avg_horiz = vpx_convolve8_avg_horiz_sse2;
if (flags & HAS_SSSE3) vpx_convolve8_avg_horiz = vpx_convolve8_avg_horiz_ssse3;
vpx_convolve8_avg_vert = vpx_convolve8_avg_vert_c;
if (flags & HAS_SSE2) vpx_convolve8_avg_vert = vpx_convolve8_avg_vert_sse2;
if (flags & HAS_SSSE3) vpx_convolve8_avg_vert = vpx_convolve8_avg_vert_ssse3;
vpx_convolve8_horiz = vpx_convolve8_horiz_c;
if (flags & HAS_SSE2) vpx_convolve8_horiz = vpx_convolve8_horiz_sse2;
if (flags & HAS_SSSE3) vpx_convolve8_horiz = vpx_convolve8_horiz_ssse3;
vpx_convolve8_vert = vpx_convolve8_vert_c;
if (flags & HAS_SSE2) vpx_convolve8_vert = vpx_convolve8_vert_sse2;
if (flags & HAS_SSSE3) vpx_convolve8_vert = vpx_convolve8_vert_ssse3;
vpx_convolve_avg = vpx_convolve_avg_c;
if (flags & HAS_SSE2) vpx_convolve_avg = vpx_convolve_avg_sse2;
vpx_convolve_copy = vpx_convolve_copy_c;
if (flags & HAS_SSE2) vpx_convolve_copy = vpx_convolve_copy_sse2;
vpx_d153_predictor_16x16 = vpx_d153_predictor_16x16_c;
if (flags & HAS_SSSE3) vpx_d153_predictor_16x16 = vpx_d153_predictor_16x16_ssse3;
vpx_d153_predictor_32x32 = vpx_d153_predictor_32x32_c;
if (flags & HAS_SSSE3) vpx_d153_predictor_32x32 = vpx_d153_predictor_32x32_ssse3;
vpx_d153_predictor_4x4 = vpx_d153_predictor_4x4_c;
if (flags & HAS_SSSE3) vpx_d153_predictor_4x4 = vpx_d153_predictor_4x4_ssse3;
vpx_d153_predictor_8x8 = vpx_d153_predictor_8x8_c;
if (flags & HAS_SSSE3) vpx_d153_predictor_8x8 = vpx_d153_predictor_8x8_ssse3;
vpx_d207_predictor_16x16 = vpx_d207_predictor_16x16_c;
if (flags & HAS_SSSE3) vpx_d207_predictor_16x16 = vpx_d207_predictor_16x16_ssse3;
vpx_d207_predictor_32x32 = vpx_d207_predictor_32x32_c;
if (flags & HAS_SSSE3) vpx_d207_predictor_32x32 = vpx_d207_predictor_32x32_ssse3;
vpx_d207_predictor_4x4 = vpx_d207_predictor_4x4_c;
if (flags & HAS_SSE2) vpx_d207_predictor_4x4 = vpx_d207_predictor_4x4_sse2;
vpx_d207_predictor_8x8 = vpx_d207_predictor_8x8_c;
if (flags & HAS_SSSE3) vpx_d207_predictor_8x8 = vpx_d207_predictor_8x8_ssse3;
vpx_d45_predictor_16x16 = vpx_d45_predictor_16x16_c;
if (flags & HAS_SSSE3) vpx_d45_predictor_16x16 = vpx_d45_predictor_16x16_ssse3;
vpx_d45_predictor_32x32 = vpx_d45_predictor_32x32_c;
if (flags & HAS_SSSE3) vpx_d45_predictor_32x32 = vpx_d45_predictor_32x32_ssse3;
vpx_d45_predictor_4x4 = vpx_d45_predictor_4x4_c;
if (flags & HAS_SSE2) vpx_d45_predictor_4x4 = vpx_d45_predictor_4x4_sse2;
vpx_d45_predictor_8x8 = vpx_d45_predictor_8x8_c;
if (flags & HAS_SSE2) vpx_d45_predictor_8x8 = vpx_d45_predictor_8x8_sse2;
vpx_d63_predictor_16x16 = vpx_d63_predictor_16x16_c;
if (flags & HAS_SSSE3) vpx_d63_predictor_16x16 = vpx_d63_predictor_16x16_ssse3;
vpx_d63_predictor_32x32 = vpx_d63_predictor_32x32_c;
if (flags & HAS_SSSE3) vpx_d63_predictor_32x32 = vpx_d63_predictor_32x32_ssse3;
vpx_d63_predictor_4x4 = vpx_d63_predictor_4x4_c;
if (flags & HAS_SSSE3) vpx_d63_predictor_4x4 = vpx_d63_predictor_4x4_ssse3;
vpx_d63_predictor_8x8 = vpx_d63_predictor_8x8_c;
if (flags & HAS_SSSE3) vpx_d63_predictor_8x8 = vpx_d63_predictor_8x8_ssse3;
vpx_dc_128_predictor_16x16 = vpx_dc_128_predictor_16x16_c;
if (flags & HAS_SSE2) vpx_dc_128_predictor_16x16 = vpx_dc_128_predictor_16x16_sse2;
vpx_dc_128_predictor_32x32 = vpx_dc_128_predictor_32x32_c;
if (flags & HAS_SSE2) vpx_dc_128_predictor_32x32 = vpx_dc_128_predictor_32x32_sse2;
vpx_dc_128_predictor_4x4 = vpx_dc_128_predictor_4x4_c;
if (flags & HAS_SSE2) vpx_dc_128_predictor_4x4 = vpx_dc_128_predictor_4x4_sse2;
vpx_dc_128_predictor_8x8 = vpx_dc_128_predictor_8x8_c;
if (flags & HAS_SSE2) vpx_dc_128_predictor_8x8 = vpx_dc_128_predictor_8x8_sse2;
vpx_dc_left_predictor_16x16 = vpx_dc_left_predictor_16x16_c;
if (flags & HAS_SSE2) vpx_dc_left_predictor_16x16 = vpx_dc_left_predictor_16x16_sse2;
vpx_dc_left_predictor_32x32 = vpx_dc_left_predictor_32x32_c;
if (flags & HAS_SSE2) vpx_dc_left_predictor_32x32 = vpx_dc_left_predictor_32x32_sse2;
vpx_dc_left_predictor_4x4 = vpx_dc_left_predictor_4x4_c;
if (flags & HAS_SSE2) vpx_dc_left_predictor_4x4 = vpx_dc_left_predictor_4x4_sse2;
vpx_dc_left_predictor_8x8 = vpx_dc_left_predictor_8x8_c;
if (flags & HAS_SSE2) vpx_dc_left_predictor_8x8 = vpx_dc_left_predictor_8x8_sse2;
vpx_dc_predictor_16x16 = vpx_dc_predictor_16x16_c;
if (flags & HAS_SSE2) vpx_dc_predictor_16x16 = vpx_dc_predictor_16x16_sse2;
vpx_dc_predictor_32x32 = vpx_dc_predictor_32x32_c;
if (flags & HAS_SSE2) vpx_dc_predictor_32x32 = vpx_dc_predictor_32x32_sse2;
vpx_dc_predictor_4x4 = vpx_dc_predictor_4x4_c;
if (flags & HAS_SSE2) vpx_dc_predictor_4x4 = vpx_dc_predictor_4x4_sse2;
vpx_dc_predictor_8x8 = vpx_dc_predictor_8x8_c;
if (flags & HAS_SSE2) vpx_dc_predictor_8x8 = vpx_dc_predictor_8x8_sse2;
vpx_dc_top_predictor_16x16 = vpx_dc_top_predictor_16x16_c;
if (flags & HAS_SSE2) vpx_dc_top_predictor_16x16 = vpx_dc_top_predictor_16x16_sse2;
vpx_dc_top_predictor_32x32 = vpx_dc_top_predictor_32x32_c;
if (flags & HAS_SSE2) vpx_dc_top_predictor_32x32 = vpx_dc_top_predictor_32x32_sse2;
vpx_dc_top_predictor_4x4 = vpx_dc_top_predictor_4x4_c;
if (flags & HAS_SSE2) vpx_dc_top_predictor_4x4 = vpx_dc_top_predictor_4x4_sse2;
vpx_dc_top_predictor_8x8 = vpx_dc_top_predictor_8x8_c;
if (flags & HAS_SSE2) vpx_dc_top_predictor_8x8 = vpx_dc_top_predictor_8x8_sse2;
vpx_h_predictor_16x16 = vpx_h_predictor_16x16_c;
if (flags & HAS_SSE2) vpx_h_predictor_16x16 = vpx_h_predictor_16x16_sse2;
vpx_h_predictor_32x32 = vpx_h_predictor_32x32_c;
if (flags & HAS_SSE2) vpx_h_predictor_32x32 = vpx_h_predictor_32x32_sse2;
vpx_h_predictor_4x4 = vpx_h_predictor_4x4_c;
if (flags & HAS_SSE2) vpx_h_predictor_4x4 = vpx_h_predictor_4x4_sse2;
vpx_h_predictor_8x8 = vpx_h_predictor_8x8_c;
if (flags & HAS_SSE2) vpx_h_predictor_8x8 = vpx_h_predictor_8x8_sse2;
vpx_idct16x16_10_add = vpx_idct16x16_10_add_c;
if (flags & HAS_SSE2) vpx_idct16x16_10_add = vpx_idct16x16_10_add_sse2;
vpx_idct16x16_1_add = vpx_idct16x16_1_add_c;
if (flags & HAS_SSE2) vpx_idct16x16_1_add = vpx_idct16x16_1_add_sse2;
vpx_idct16x16_256_add = vpx_idct16x16_256_add_c;
if (flags & HAS_SSE2) vpx_idct16x16_256_add = vpx_idct16x16_256_add_sse2;
vpx_idct32x32_1024_add = vpx_idct32x32_1024_add_c;
if (flags & HAS_SSE2) vpx_idct32x32_1024_add = vpx_idct32x32_1024_add_sse2;
vpx_idct32x32_135_add = vpx_idct32x32_135_add_c;
if (flags & HAS_SSE2) vpx_idct32x32_135_add = vpx_idct32x32_1024_add_sse2;
vpx_idct32x32_1_add = vpx_idct32x32_1_add_c;
if (flags & HAS_SSE2) vpx_idct32x32_1_add = vpx_idct32x32_1_add_sse2;
vpx_idct32x32_34_add = vpx_idct32x32_34_add_c;
if (flags & HAS_SSE2) vpx_idct32x32_34_add = vpx_idct32x32_34_add_sse2;
vpx_idct4x4_16_add = vpx_idct4x4_16_add_c;
if (flags & HAS_SSE2) vpx_idct4x4_16_add = vpx_idct4x4_16_add_sse2;
vpx_idct4x4_1_add = vpx_idct4x4_1_add_c;
if (flags & HAS_SSE2) vpx_idct4x4_1_add = vpx_idct4x4_1_add_sse2;
vpx_idct8x8_12_add = vpx_idct8x8_12_add_c;
if (flags & HAS_SSE2) vpx_idct8x8_12_add = vpx_idct8x8_12_add_sse2;
vpx_idct8x8_1_add = vpx_idct8x8_1_add_c;
if (flags & HAS_SSE2) vpx_idct8x8_1_add = vpx_idct8x8_1_add_sse2;
vpx_idct8x8_64_add = vpx_idct8x8_64_add_c;
if (flags & HAS_SSE2) vpx_idct8x8_64_add = vpx_idct8x8_64_add_sse2;
vpx_iwht4x4_16_add = vpx_iwht4x4_16_add_c;
if (flags & HAS_SSE2) vpx_iwht4x4_16_add = vpx_iwht4x4_16_add_sse2;
vpx_lpf_horizontal_4 = vpx_lpf_horizontal_4_c;
if (flags & HAS_SSE2) vpx_lpf_horizontal_4 = vpx_lpf_horizontal_4_sse2;
vpx_lpf_horizontal_4_dual = vpx_lpf_horizontal_4_dual_c;
if (flags & HAS_SSE2) vpx_lpf_horizontal_4_dual = vpx_lpf_horizontal_4_dual_sse2;
vpx_lpf_horizontal_8 = vpx_lpf_horizontal_8_c;
if (flags & HAS_SSE2) vpx_lpf_horizontal_8 = vpx_lpf_horizontal_8_sse2;
vpx_lpf_horizontal_8_dual = vpx_lpf_horizontal_8_dual_c;
if (flags & HAS_SSE2) vpx_lpf_horizontal_8_dual = vpx_lpf_horizontal_8_dual_sse2;
vpx_lpf_horizontal_edge_16 = vpx_lpf_horizontal_edge_16_c;
if (flags & HAS_SSE2) vpx_lpf_horizontal_edge_16 = vpx_lpf_horizontal_edge_16_sse2;
vpx_lpf_horizontal_edge_8 = vpx_lpf_horizontal_edge_8_c;
if (flags & HAS_SSE2) vpx_lpf_horizontal_edge_8 = vpx_lpf_horizontal_edge_8_sse2;
vpx_lpf_vertical_16 = vpx_lpf_vertical_16_c;
if (flags & HAS_SSE2) vpx_lpf_vertical_16 = vpx_lpf_vertical_16_sse2;
vpx_lpf_vertical_16_dual = vpx_lpf_vertical_16_dual_c;
if (flags & HAS_SSE2) vpx_lpf_vertical_16_dual = vpx_lpf_vertical_16_dual_sse2;
vpx_lpf_vertical_4 = vpx_lpf_vertical_4_c;
if (flags & HAS_SSE2) vpx_lpf_vertical_4 = vpx_lpf_vertical_4_sse2;
vpx_lpf_vertical_4_dual = vpx_lpf_vertical_4_dual_c;
if (flags & HAS_SSE2) vpx_lpf_vertical_4_dual = vpx_lpf_vertical_4_dual_sse2;
vpx_lpf_vertical_8 = vpx_lpf_vertical_8_c;
if (flags & HAS_SSE2) vpx_lpf_vertical_8 = vpx_lpf_vertical_8_sse2;
vpx_lpf_vertical_8_dual = vpx_lpf_vertical_8_dual_c;
if (flags & HAS_SSE2) vpx_lpf_vertical_8_dual = vpx_lpf_vertical_8_dual_sse2;
vpx_scaled_2d = vpx_scaled_2d_c;
if (flags & HAS_SSSE3) vpx_scaled_2d = vpx_scaled_2d_ssse3;
vpx_tm_predictor_16x16 = vpx_tm_predictor_16x16_c;
if (flags & HAS_SSE2) vpx_tm_predictor_16x16 = vpx_tm_predictor_16x16_sse2;
vpx_tm_predictor_32x32 = vpx_tm_predictor_32x32_c;
if (flags & HAS_SSE2) vpx_tm_predictor_32x32 = vpx_tm_predictor_32x32_sse2;
vpx_tm_predictor_4x4 = vpx_tm_predictor_4x4_c;
if (flags & HAS_SSE2) vpx_tm_predictor_4x4 = vpx_tm_predictor_4x4_sse2;
vpx_tm_predictor_8x8 = vpx_tm_predictor_8x8_c;
if (flags & HAS_SSE2) vpx_tm_predictor_8x8 = vpx_tm_predictor_8x8_sse2;
vpx_v_predictor_16x16 = vpx_v_predictor_16x16_c;
if (flags & HAS_SSE2) vpx_v_predictor_16x16 = vpx_v_predictor_16x16_sse2;
vpx_v_predictor_32x32 = vpx_v_predictor_32x32_c;
if (flags & HAS_SSE2) vpx_v_predictor_32x32 = vpx_v_predictor_32x32_sse2;
vpx_v_predictor_4x4 = vpx_v_predictor_4x4_c;
if (flags & HAS_SSE2) vpx_v_predictor_4x4 = vpx_v_predictor_4x4_sse2;
vpx_v_predictor_8x8 = vpx_v_predictor_8x8_c;
if (flags & HAS_SSE2) vpx_v_predictor_8x8 = vpx_v_predictor_8x8_sse2;
}
#endif
#ifdef __cplusplus
} // extern "C"
#endif
#endif

File diff suppressed because it is too large Load Diff

View File

@ -1,323 +0,0 @@
/*
* Copyright (C) 2010 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef CPU_FEATURES_H
#define CPU_FEATURES_H
#include <sys/cdefs.h>
#include <stdint.h>
__BEGIN_DECLS
/* A list of valid values returned by android_getCpuFamily().
* They describe the CPU Architecture of the current process.
*/
typedef enum {
ANDROID_CPU_FAMILY_UNKNOWN = 0,
ANDROID_CPU_FAMILY_ARM,
ANDROID_CPU_FAMILY_X86,
ANDROID_CPU_FAMILY_MIPS,
ANDROID_CPU_FAMILY_ARM64,
ANDROID_CPU_FAMILY_X86_64,
ANDROID_CPU_FAMILY_MIPS64,
ANDROID_CPU_FAMILY_MAX /* do not remove */
} AndroidCpuFamily;
/* Return the CPU family of the current process.
*
* Note that this matches the bitness of the current process. I.e. when
* running a 32-bit binary on a 64-bit capable CPU, this will return the
* 32-bit CPU family value.
*/
extern AndroidCpuFamily android_getCpuFamily(void);
/* Return a bitmap describing a set of optional CPU features that are
* supported by the current device's CPU. The exact bit-flags returned
* depend on the value returned by android_getCpuFamily(). See the
* documentation for the ANDROID_CPU_*_FEATURE_* flags below for details.
*/
extern uint64_t android_getCpuFeatures(void);
/* The list of feature flags for ANDROID_CPU_FAMILY_ARM that can be
* recognized by the library (see note below for 64-bit ARM). Value details
* are:
*
* VFPv2:
* CPU supports the VFPv2 instruction set. Many, but not all, ARMv6 CPUs
* support these instructions. VFPv2 is a subset of VFPv3 so this will
* be set whenever VFPv3 is set too.
*
* ARMv7:
* CPU supports the ARMv7-A basic instruction set.
* This feature is mandated by the 'armeabi-v7a' ABI.
*
* VFPv3:
* CPU supports the VFPv3-D16 instruction set, providing hardware FPU
* support for single and double precision floating point registers.
* Note that only 16 FPU registers are available by default, unless
* the D32 bit is set too. This feature is also mandated by the
* 'armeabi-v7a' ABI.
*
* VFP_D32:
* CPU VFP optional extension that provides 32 FPU registers,
* instead of 16. Note that ARM mandates this feature is the 'NEON'
* feature is implemented by the CPU.
*
* NEON:
* CPU FPU supports "ARM Advanced SIMD" instructions, also known as
* NEON. Note that this mandates the VFP_D32 feature as well, per the
* ARM Architecture specification.
*
* VFP_FP16:
* Half-width floating precision VFP extension. If set, the CPU
* supports instructions to perform floating-point operations on
* 16-bit registers. This is part of the VFPv4 specification, but
* not mandated by any Android ABI.
*
* VFP_FMA:
* Fused multiply-accumulate VFP instructions extension. Also part of
* the VFPv4 specification, but not mandated by any Android ABI.
*
* NEON_FMA:
* Fused multiply-accumulate NEON instructions extension. Optional
* extension from the VFPv4 specification, but not mandated by any
* Android ABI.
*
* IDIV_ARM:
* Integer division available in ARM mode. Only available
* on recent CPUs (e.g. Cortex-A15).
*
* IDIV_THUMB2:
* Integer division available in Thumb-2 mode. Only available
* on recent CPUs (e.g. Cortex-A15).
*
* iWMMXt:
* Optional extension that adds MMX registers and operations to an
* ARM CPU. This is only available on a few XScale-based CPU designs
* sold by Marvell. Pretty rare in practice.
*
* AES:
* CPU supports AES instructions. These instructions are only
* available for 32-bit applications running on ARMv8 CPU.
*
* CRC32:
* CPU supports CRC32 instructions. These instructions are only
* available for 32-bit applications running on ARMv8 CPU.
*
* SHA2:
* CPU supports SHA2 instructions. These instructions are only
* available for 32-bit applications running on ARMv8 CPU.
*
* SHA1:
* CPU supports SHA1 instructions. These instructions are only
* available for 32-bit applications running on ARMv8 CPU.
*
* PMULL:
* CPU supports 64-bit PMULL and PMULL2 instructions. These
* instructions are only available for 32-bit applications
* running on ARMv8 CPU.
*
* If you want to tell the compiler to generate code that targets one of
* the feature set above, you should probably use one of the following
* flags (for more details, see technical note at the end of this file):
*
* -mfpu=vfp
* -mfpu=vfpv2
* These are equivalent and tell GCC to use VFPv2 instructions for
* floating-point operations. Use this if you want your code to
* run on *some* ARMv6 devices, and any ARMv7-A device supported
* by Android.
*
* Generated code requires VFPv2 feature.
*
* -mfpu=vfpv3-d16
* Tell GCC to use VFPv3 instructions (using only 16 FPU registers).
* This should be generic code that runs on any CPU that supports the
* 'armeabi-v7a' Android ABI. Note that no ARMv6 CPU supports this.
*
* Generated code requires VFPv3 feature.
*
* -mfpu=vfpv3
* Tell GCC to use VFPv3 instructions with 32 FPU registers.
* Generated code requires VFPv3|VFP_D32 features.
*
* -mfpu=neon
* Tell GCC to use VFPv3 instructions with 32 FPU registers, and
* also support NEON intrinsics (see <arm_neon.h>).
* Generated code requires VFPv3|VFP_D32|NEON features.
*
* -mfpu=vfpv4-d16
* Generated code requires VFPv3|VFP_FP16|VFP_FMA features.
*
* -mfpu=vfpv4
* Generated code requires VFPv3|VFP_FP16|VFP_FMA|VFP_D32 features.
*
* -mfpu=neon-vfpv4
* Generated code requires VFPv3|VFP_FP16|VFP_FMA|VFP_D32|NEON|NEON_FMA
* features.
*
* -mcpu=cortex-a7
* -mcpu=cortex-a15
* Generated code requires VFPv3|VFP_FP16|VFP_FMA|VFP_D32|
* NEON|NEON_FMA|IDIV_ARM|IDIV_THUMB2
* This flag implies -mfpu=neon-vfpv4.
*
* -mcpu=iwmmxt
* Allows the use of iWMMXt instrinsics with GCC.
*
* IMPORTANT NOTE: These flags should only be tested when
* android_getCpuFamily() returns ANDROID_CPU_FAMILY_ARM, i.e. this is a
* 32-bit process.
*
* When running a 64-bit ARM process on an ARMv8 CPU,
* android_getCpuFeatures() will return a different set of bitflags
*/
enum {
ANDROID_CPU_ARM_FEATURE_ARMv7 = (1 << 0),
ANDROID_CPU_ARM_FEATURE_VFPv3 = (1 << 1),
ANDROID_CPU_ARM_FEATURE_NEON = (1 << 2),
ANDROID_CPU_ARM_FEATURE_LDREX_STREX = (1 << 3),
ANDROID_CPU_ARM_FEATURE_VFPv2 = (1 << 4),
ANDROID_CPU_ARM_FEATURE_VFP_D32 = (1 << 5),
ANDROID_CPU_ARM_FEATURE_VFP_FP16 = (1 << 6),
ANDROID_CPU_ARM_FEATURE_VFP_FMA = (1 << 7),
ANDROID_CPU_ARM_FEATURE_NEON_FMA = (1 << 8),
ANDROID_CPU_ARM_FEATURE_IDIV_ARM = (1 << 9),
ANDROID_CPU_ARM_FEATURE_IDIV_THUMB2 = (1 << 10),
ANDROID_CPU_ARM_FEATURE_iWMMXt = (1 << 11),
ANDROID_CPU_ARM_FEATURE_AES = (1 << 12),
ANDROID_CPU_ARM_FEATURE_PMULL = (1 << 13),
ANDROID_CPU_ARM_FEATURE_SHA1 = (1 << 14),
ANDROID_CPU_ARM_FEATURE_SHA2 = (1 << 15),
ANDROID_CPU_ARM_FEATURE_CRC32 = (1 << 16),
};
/* The bit flags corresponding to the output of android_getCpuFeatures()
* when android_getCpuFamily() returns ANDROID_CPU_FAMILY_ARM64. Value details
* are:
*
* FP:
* CPU has Floating-point unit.
*
* ASIMD:
* CPU has Advanced SIMD unit.
*
* AES:
* CPU supports AES instructions.
*
* CRC32:
* CPU supports CRC32 instructions.
*
* SHA2:
* CPU supports SHA2 instructions.
*
* SHA1:
* CPU supports SHA1 instructions.
*
* PMULL:
* CPU supports 64-bit PMULL and PMULL2 instructions.
*/
enum {
ANDROID_CPU_ARM64_FEATURE_FP = (1 << 0),
ANDROID_CPU_ARM64_FEATURE_ASIMD = (1 << 1),
ANDROID_CPU_ARM64_FEATURE_AES = (1 << 2),
ANDROID_CPU_ARM64_FEATURE_PMULL = (1 << 3),
ANDROID_CPU_ARM64_FEATURE_SHA1 = (1 << 4),
ANDROID_CPU_ARM64_FEATURE_SHA2 = (1 << 5),
ANDROID_CPU_ARM64_FEATURE_CRC32 = (1 << 6),
};
/* The bit flags corresponding to the output of android_getCpuFeatures()
* when android_getCpuFamily() returns ANDROID_CPU_FAMILY_X86 or
* ANDROID_CPU_FAMILY_X86_64.
*/
enum {
ANDROID_CPU_X86_FEATURE_SSSE3 = (1 << 0),
ANDROID_CPU_X86_FEATURE_POPCNT = (1 << 1),
ANDROID_CPU_X86_FEATURE_MOVBE = (1 << 2),
ANDROID_CPU_X86_FEATURE_SSE4_1 = (1 << 3),
ANDROID_CPU_X86_FEATURE_SSE4_2 = (1 << 4),
ANDROID_CPU_X86_FEATURE_AES_NI = (1 << 5),
ANDROID_CPU_X86_FEATURE_AVX = (1 << 6),
ANDROID_CPU_X86_FEATURE_RDRAND = (1 << 7),
ANDROID_CPU_X86_FEATURE_AVX2 = (1 << 8),
ANDROID_CPU_X86_FEATURE_SHA_NI = (1 << 9),
};
/* The bit flags corresponding to the output of android_getCpuFeatures()
* when android_getCpuFamily() returns ANDROID_CPU_FAMILY_MIPS
* or ANDROID_CPU_FAMILY_MIPS64. Values are:
*
* R6:
* CPU executes MIPS Release 6 instructions natively, and
* supports obsoleted R1..R5 instructions only via kernel traps.
*
* MSA:
* CPU supports Mips SIMD Architecture instructions.
*/
enum {
ANDROID_CPU_MIPS_FEATURE_R6 = (1 << 0),
ANDROID_CPU_MIPS_FEATURE_MSA = (1 << 1),
};
/* Return the number of CPU cores detected on this device. */
extern int android_getCpuCount(void);
/* The following is used to force the CPU count and features
* mask in sandboxed processes. Under 4.1 and higher, these processes
* cannot access /proc, which is the only way to get information from
* the kernel about the current hardware (at least on ARM).
*
* It _must_ be called only once, and before any android_getCpuXXX
* function, any other case will fail.
*
* This function return 1 on success, and 0 on failure.
*/
extern int android_setCpu(int cpu_count,
uint64_t cpu_features);
#ifdef __arm__
/* Retrieve the ARM 32-bit CPUID value from the kernel.
* Note that this cannot work on sandboxed processes under 4.1 and
* higher, unless you called android_setCpuArm() before.
*/
extern uint32_t android_getCpuIdArm(void);
/* An ARM-specific variant of android_setCpu() that also allows you
* to set the ARM CPUID field.
*/
extern int android_setCpuArm(int cpu_count,
uint64_t cpu_features,
uint32_t cpu_id);
#endif
__END_DECLS
#endif /* CPU_FEATURES_H */

View File

@ -1,18 +0,0 @@
Copyright (C) 2005-2012 x264 project
Authors: Loren Merritt <lorenm@u.washington.edu>
Anton Mitrofanov <BugMaster@narod.ru>
Jason Garrett-Glaser <darkshikari@gmail.com>
Henrik Gramner <hengar-6@student.ltu.se>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

View File

@ -1,20 +0,0 @@
URL: https://git.videolan.org/git/x264.git
Version: d23d18655249944c1ca894b451e2c82c7a584c62
License: ISC
License File: LICENSE
Description:
x264/libav's framework for x86 assembly. Contains a variety of macros and
defines that help automatically allow assembly to work cross-platform.
Local Modifications:
Get configuration from vpx_config.asm.
Prefix functions with vpx by default.
Manage name mangling (prefixing with '_') manually because 'PREFIX' does not
exist in libvpx.
Expand PIC default to macho64 and respect CONFIG_PIC from libvpx
Set 'private_extern' visibility for macho targets.
Copy PIC 'GLOBAL' macros from x86_abi_support.asm
Use .text instead of .rodata on macho to avoid broken tables in PIC mode.
Use .text with no alignment for aout
Only use 'hidden' visibility with Chromium

File diff suppressed because it is too large Load Diff

View File

@ -1,190 +0,0 @@
/*
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "vpx_config.h"
#include "alloccommon.h"
#include "blockd.h"
#include "vpx_mem/vpx_mem.h"
#include "onyxc_int.h"
#include "findnearmv.h"
#include "entropymode.h"
#include "systemdependent.h"
void vp8_de_alloc_frame_buffers(VP8_COMMON *oci)
{
int i;
for (i = 0; i < NUM_YV12_BUFFERS; i++)
vp8_yv12_de_alloc_frame_buffer(&oci->yv12_fb[i]);
vp8_yv12_de_alloc_frame_buffer(&oci->temp_scale_frame);
#if CONFIG_POSTPROC
vp8_yv12_de_alloc_frame_buffer(&oci->post_proc_buffer);
if (oci->post_proc_buffer_int_used)
vp8_yv12_de_alloc_frame_buffer(&oci->post_proc_buffer_int);
vpx_free(oci->pp_limits_buffer);
oci->pp_limits_buffer = NULL;
#endif
vpx_free(oci->above_context);
vpx_free(oci->mip);
#if CONFIG_ERROR_CONCEALMENT
vpx_free(oci->prev_mip);
oci->prev_mip = NULL;
#endif
oci->above_context = NULL;
oci->mip = NULL;
}
int vp8_alloc_frame_buffers(VP8_COMMON *oci, int width, int height)
{
int i;
vp8_de_alloc_frame_buffers(oci);
/* our internal buffers are always multiples of 16 */
if ((width & 0xf) != 0)
width += 16 - (width & 0xf);
if ((height & 0xf) != 0)
height += 16 - (height & 0xf);
for (i = 0; i < NUM_YV12_BUFFERS; i++)
{
oci->fb_idx_ref_cnt[i] = 0;
oci->yv12_fb[i].flags = 0;
if (vp8_yv12_alloc_frame_buffer(&oci->yv12_fb[i], width, height, VP8BORDERINPIXELS) < 0)
goto allocation_fail;
}
oci->new_fb_idx = 0;
oci->lst_fb_idx = 1;
oci->gld_fb_idx = 2;
oci->alt_fb_idx = 3;
oci->fb_idx_ref_cnt[0] = 1;
oci->fb_idx_ref_cnt[1] = 1;
oci->fb_idx_ref_cnt[2] = 1;
oci->fb_idx_ref_cnt[3] = 1;
if (vp8_yv12_alloc_frame_buffer(&oci->temp_scale_frame, width, 16, VP8BORDERINPIXELS) < 0)
goto allocation_fail;
oci->mb_rows = height >> 4;
oci->mb_cols = width >> 4;
oci->MBs = oci->mb_rows * oci->mb_cols;
oci->mode_info_stride = oci->mb_cols + 1;
oci->mip = vpx_calloc((oci->mb_cols + 1) * (oci->mb_rows + 1), sizeof(MODE_INFO));
if (!oci->mip)
goto allocation_fail;
oci->mi = oci->mip + oci->mode_info_stride + 1;
/* Allocation of previous mode info will be done in vp8_decode_frame()
* as it is a decoder only data */
oci->above_context = vpx_calloc(sizeof(ENTROPY_CONTEXT_PLANES) * oci->mb_cols, 1);
if (!oci->above_context)
goto allocation_fail;
#if CONFIG_POSTPROC
if (vp8_yv12_alloc_frame_buffer(&oci->post_proc_buffer, width, height, VP8BORDERINPIXELS) < 0)
goto allocation_fail;
oci->post_proc_buffer_int_used = 0;
memset(&oci->postproc_state, 0, sizeof(oci->postproc_state));
memset(oci->post_proc_buffer.buffer_alloc, 128,
oci->post_proc_buffer.frame_size);
/* Allocate buffer to store post-processing filter coefficients.
*
* Note: Round up mb_cols to support SIMD reads
*/
oci->pp_limits_buffer = vpx_memalign(16, 24 * ((oci->mb_cols + 1) & ~1));
if (!oci->pp_limits_buffer)
goto allocation_fail;
#endif
return 0;
allocation_fail:
vp8_de_alloc_frame_buffers(oci);
return 1;
}
void vp8_setup_version(VP8_COMMON *cm)
{
switch (cm->version)
{
case 0:
cm->no_lpf = 0;
cm->filter_type = NORMAL_LOOPFILTER;
cm->use_bilinear_mc_filter = 0;
cm->full_pixel = 0;
break;
case 1:
cm->no_lpf = 0;
cm->filter_type = SIMPLE_LOOPFILTER;
cm->use_bilinear_mc_filter = 1;
cm->full_pixel = 0;
break;
case 2:
cm->no_lpf = 1;
cm->filter_type = NORMAL_LOOPFILTER;
cm->use_bilinear_mc_filter = 1;
cm->full_pixel = 0;
break;
case 3:
cm->no_lpf = 1;
cm->filter_type = SIMPLE_LOOPFILTER;
cm->use_bilinear_mc_filter = 1;
cm->full_pixel = 1;
break;
default:
/*4,5,6,7 are reserved for future use*/
cm->no_lpf = 0;
cm->filter_type = NORMAL_LOOPFILTER;
cm->use_bilinear_mc_filter = 0;
cm->full_pixel = 0;
break;
}
}
void vp8_create_common(VP8_COMMON *oci)
{
vp8_machine_specific_config(oci);
vp8_init_mbmode_probs(oci);
vp8_default_bmode_probs(oci->fc.bmode_prob);
oci->mb_no_coeff_skip = 1;
oci->no_lpf = 0;
oci->filter_type = NORMAL_LOOPFILTER;
oci->use_bilinear_mc_filter = 0;
oci->full_pixel = 0;
oci->multi_token_partition = ONE_PARTITION;
oci->clamp_type = RECON_CLAMP_REQUIRED;
/* Initialize reference frame sign bias structure to defaults */
memset(oci->ref_frame_sign_bias, 0, sizeof(oci->ref_frame_sign_bias));
/* Default disable buffer to buffer copying */
oci->copy_buffer_to_gf = 0;
oci->copy_buffer_to_arf = 0;
}
void vp8_remove_common(VP8_COMMON *oci)
{
vp8_de_alloc_frame_buffers(oci);
}

View File

@ -1,31 +0,0 @@
/*
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef VP8_COMMON_ALLOCCOMMON_H_
#define VP8_COMMON_ALLOCCOMMON_H_
#include "onyxc_int.h"
#ifdef __cplusplus
extern "C" {
#endif
void vp8_create_common(VP8_COMMON *oci);
void vp8_remove_common(VP8_COMMON *oci);
void vp8_de_alloc_frame_buffers(VP8_COMMON *oci);
int vp8_alloc_frame_buffers(VP8_COMMON *oci, int width, int height);
void vp8_setup_version(VP8_COMMON *oci);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // VP8_COMMON_ALLOCCOMMON_H_

View File

@ -1,181 +0,0 @@
/*
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "vpx_config.h"
#include "vp8_rtcd.h"
#include "vp8/common/loopfilter.h"
#include "vp8/common/onyxc_int.h"
#define prototype_loopfilter(sym) \
void sym(unsigned char *src, int pitch, const unsigned char *blimit,\
const unsigned char *limit, const unsigned char *thresh, int count)
#if HAVE_MEDIA
extern prototype_loopfilter(vp8_loop_filter_horizontal_edge_armv6);
extern prototype_loopfilter(vp8_loop_filter_vertical_edge_armv6);
extern prototype_loopfilter(vp8_mbloop_filter_horizontal_edge_armv6);
extern prototype_loopfilter(vp8_mbloop_filter_vertical_edge_armv6);
#endif
#if HAVE_NEON
typedef void loopfilter_y_neon(unsigned char *src, int pitch,
unsigned char blimit, unsigned char limit, unsigned char thresh);
typedef void loopfilter_uv_neon(unsigned char *u, int pitch,
unsigned char blimit, unsigned char limit, unsigned char thresh,
unsigned char *v);
extern loopfilter_y_neon vp8_loop_filter_horizontal_edge_y_neon;
extern loopfilter_y_neon vp8_loop_filter_vertical_edge_y_neon;
extern loopfilter_uv_neon vp8_loop_filter_horizontal_edge_uv_neon;
extern loopfilter_uv_neon vp8_loop_filter_vertical_edge_uv_neon;
extern loopfilter_y_neon vp8_mbloop_filter_horizontal_edge_y_neon;
extern loopfilter_y_neon vp8_mbloop_filter_vertical_edge_y_neon;
extern loopfilter_uv_neon vp8_mbloop_filter_horizontal_edge_uv_neon;
extern loopfilter_uv_neon vp8_mbloop_filter_vertical_edge_uv_neon;
#endif
#if HAVE_MEDIA
/* ARMV6/MEDIA loopfilter functions*/
/* Horizontal MB filtering */
void vp8_loop_filter_mbh_armv6(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
int y_stride, int uv_stride, loop_filter_info *lfi)
{
vp8_mbloop_filter_horizontal_edge_armv6(y_ptr, y_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 2);
if (u_ptr)
vp8_mbloop_filter_horizontal_edge_armv6(u_ptr, uv_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 1);
if (v_ptr)
vp8_mbloop_filter_horizontal_edge_armv6(v_ptr, uv_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 1);
}
/* Vertical MB Filtering */
void vp8_loop_filter_mbv_armv6(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
int y_stride, int uv_stride, loop_filter_info *lfi)
{
vp8_mbloop_filter_vertical_edge_armv6(y_ptr, y_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 2);
if (u_ptr)
vp8_mbloop_filter_vertical_edge_armv6(u_ptr, uv_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 1);
if (v_ptr)
vp8_mbloop_filter_vertical_edge_armv6(v_ptr, uv_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 1);
}
/* Horizontal B Filtering */
void vp8_loop_filter_bh_armv6(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
int y_stride, int uv_stride, loop_filter_info *lfi)
{
vp8_loop_filter_horizontal_edge_armv6(y_ptr + 4 * y_stride, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
vp8_loop_filter_horizontal_edge_armv6(y_ptr + 8 * y_stride, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
vp8_loop_filter_horizontal_edge_armv6(y_ptr + 12 * y_stride, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
if (u_ptr)
vp8_loop_filter_horizontal_edge_armv6(u_ptr + 4 * uv_stride, uv_stride, lfi->blim, lfi->lim, lfi->hev_thr, 1);
if (v_ptr)
vp8_loop_filter_horizontal_edge_armv6(v_ptr + 4 * uv_stride, uv_stride, lfi->blim, lfi->lim, lfi->hev_thr, 1);
}
void vp8_loop_filter_bhs_armv6(unsigned char *y_ptr, int y_stride,
const unsigned char *blimit)
{
vp8_loop_filter_simple_horizontal_edge_armv6(y_ptr + 4 * y_stride, y_stride, blimit);
vp8_loop_filter_simple_horizontal_edge_armv6(y_ptr + 8 * y_stride, y_stride, blimit);
vp8_loop_filter_simple_horizontal_edge_armv6(y_ptr + 12 * y_stride, y_stride, blimit);
}
/* Vertical B Filtering */
void vp8_loop_filter_bv_armv6(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
int y_stride, int uv_stride, loop_filter_info *lfi)
{
vp8_loop_filter_vertical_edge_armv6(y_ptr + 4, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
vp8_loop_filter_vertical_edge_armv6(y_ptr + 8, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
vp8_loop_filter_vertical_edge_armv6(y_ptr + 12, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
if (u_ptr)
vp8_loop_filter_vertical_edge_armv6(u_ptr + 4, uv_stride, lfi->blim, lfi->lim, lfi->hev_thr, 1);
if (v_ptr)
vp8_loop_filter_vertical_edge_armv6(v_ptr + 4, uv_stride, lfi->blim, lfi->lim, lfi->hev_thr, 1);
}
void vp8_loop_filter_bvs_armv6(unsigned char *y_ptr, int y_stride,
const unsigned char *blimit)
{
vp8_loop_filter_simple_vertical_edge_armv6(y_ptr + 4, y_stride, blimit);
vp8_loop_filter_simple_vertical_edge_armv6(y_ptr + 8, y_stride, blimit);
vp8_loop_filter_simple_vertical_edge_armv6(y_ptr + 12, y_stride, blimit);
}
#endif
#if HAVE_NEON
/* NEON loopfilter functions */
/* Horizontal MB filtering */
void vp8_loop_filter_mbh_neon(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
int y_stride, int uv_stride, loop_filter_info *lfi)
{
unsigned char mblim = *lfi->mblim;
unsigned char lim = *lfi->lim;
unsigned char hev_thr = *lfi->hev_thr;
vp8_mbloop_filter_horizontal_edge_y_neon(y_ptr, y_stride, mblim, lim, hev_thr);
if (u_ptr)
vp8_mbloop_filter_horizontal_edge_uv_neon(u_ptr, uv_stride, mblim, lim, hev_thr, v_ptr);
}
/* Vertical MB Filtering */
void vp8_loop_filter_mbv_neon(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
int y_stride, int uv_stride, loop_filter_info *lfi)
{
unsigned char mblim = *lfi->mblim;
unsigned char lim = *lfi->lim;
unsigned char hev_thr = *lfi->hev_thr;
vp8_mbloop_filter_vertical_edge_y_neon(y_ptr, y_stride, mblim, lim, hev_thr);
if (u_ptr)
vp8_mbloop_filter_vertical_edge_uv_neon(u_ptr, uv_stride, mblim, lim, hev_thr, v_ptr);
}
/* Horizontal B Filtering */
void vp8_loop_filter_bh_neon(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
int y_stride, int uv_stride, loop_filter_info *lfi)
{
unsigned char blim = *lfi->blim;
unsigned char lim = *lfi->lim;
unsigned char hev_thr = *lfi->hev_thr;
vp8_loop_filter_horizontal_edge_y_neon(y_ptr + 4 * y_stride, y_stride, blim, lim, hev_thr);
vp8_loop_filter_horizontal_edge_y_neon(y_ptr + 8 * y_stride, y_stride, blim, lim, hev_thr);
vp8_loop_filter_horizontal_edge_y_neon(y_ptr + 12 * y_stride, y_stride, blim, lim, hev_thr);
if (u_ptr)
vp8_loop_filter_horizontal_edge_uv_neon(u_ptr + 4 * uv_stride, uv_stride, blim, lim, hev_thr, v_ptr + 4 * uv_stride);
}
/* Vertical B Filtering */
void vp8_loop_filter_bv_neon(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
int y_stride, int uv_stride, loop_filter_info *lfi)
{
unsigned char blim = *lfi->blim;
unsigned char lim = *lfi->lim;
unsigned char hev_thr = *lfi->hev_thr;
vp8_loop_filter_vertical_edge_y_neon(y_ptr + 4, y_stride, blim, lim, hev_thr);
vp8_loop_filter_vertical_edge_y_neon(y_ptr + 8, y_stride, blim, lim, hev_thr);
vp8_loop_filter_vertical_edge_y_neon(y_ptr + 12, y_stride, blim, lim, hev_thr);
if (u_ptr)
vp8_loop_filter_vertical_edge_uv_neon(u_ptr + 4, uv_stride, blim, lim, hev_thr, v_ptr + 4);
}
#endif

View File

@ -1,591 +0,0 @@
/*
* Copyright (c) 2014 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <arm_neon.h>
static const uint8_t bifilter4_coeff[8][2] = {
{128, 0},
{112, 16},
{ 96, 32},
{ 80, 48},
{ 64, 64},
{ 48, 80},
{ 32, 96},
{ 16, 112}
};
void vp8_bilinear_predict8x4_neon(
unsigned char *src_ptr,
int src_pixels_per_line,
int xoffset,
int yoffset,
unsigned char *dst_ptr,
int dst_pitch) {
uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8;
uint8x8_t d7u8, d9u8, d11u8, d22u8, d23u8, d24u8, d25u8, d26u8;
uint8x16_t q1u8, q2u8, q3u8, q4u8, q5u8;
uint16x8_t q1u16, q2u16, q3u16, q4u16;
uint16x8_t q6u16, q7u16, q8u16, q9u16, q10u16;
if (xoffset == 0) { // skip_1stpass_filter
d22u8 = vld1_u8(src_ptr); src_ptr += src_pixels_per_line;
d23u8 = vld1_u8(src_ptr); src_ptr += src_pixels_per_line;
d24u8 = vld1_u8(src_ptr); src_ptr += src_pixels_per_line;
d25u8 = vld1_u8(src_ptr); src_ptr += src_pixels_per_line;
d26u8 = vld1_u8(src_ptr);
} else {
q1u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line;
q2u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line;
q3u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line;
q4u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line;
q5u8 = vld1q_u8(src_ptr);
d0u8 = vdup_n_u8(bifilter4_coeff[xoffset][0]);
d1u8 = vdup_n_u8(bifilter4_coeff[xoffset][1]);
q6u16 = vmull_u8(vget_low_u8(q1u8), d0u8);
q7u16 = vmull_u8(vget_low_u8(q2u8), d0u8);
q8u16 = vmull_u8(vget_low_u8(q3u8), d0u8);
q9u16 = vmull_u8(vget_low_u8(q4u8), d0u8);
q10u16 = vmull_u8(vget_low_u8(q5u8), d0u8);
d3u8 = vext_u8(vget_low_u8(q1u8), vget_high_u8(q1u8), 1);
d5u8 = vext_u8(vget_low_u8(q2u8), vget_high_u8(q2u8), 1);
d7u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 1);
d9u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 1);
d11u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 1);
q6u16 = vmlal_u8(q6u16, d3u8, d1u8);
q7u16 = vmlal_u8(q7u16, d5u8, d1u8);
q8u16 = vmlal_u8(q8u16, d7u8, d1u8);
q9u16 = vmlal_u8(q9u16, d9u8, d1u8);
q10u16 = vmlal_u8(q10u16, d11u8, d1u8);
d22u8 = vqrshrn_n_u16(q6u16, 7);
d23u8 = vqrshrn_n_u16(q7u16, 7);
d24u8 = vqrshrn_n_u16(q8u16, 7);
d25u8 = vqrshrn_n_u16(q9u16, 7);
d26u8 = vqrshrn_n_u16(q10u16, 7);
}
// secondpass_filter
if (yoffset == 0) { // skip_2ndpass_filter
vst1_u8((uint8_t *)dst_ptr, d22u8); dst_ptr += dst_pitch;
vst1_u8((uint8_t *)dst_ptr, d23u8); dst_ptr += dst_pitch;
vst1_u8((uint8_t *)dst_ptr, d24u8); dst_ptr += dst_pitch;
vst1_u8((uint8_t *)dst_ptr, d25u8);
} else {
d0u8 = vdup_n_u8(bifilter4_coeff[yoffset][0]);
d1u8 = vdup_n_u8(bifilter4_coeff[yoffset][1]);
q1u16 = vmull_u8(d22u8, d0u8);
q2u16 = vmull_u8(d23u8, d0u8);
q3u16 = vmull_u8(d24u8, d0u8);
q4u16 = vmull_u8(d25u8, d0u8);
q1u16 = vmlal_u8(q1u16, d23u8, d1u8);
q2u16 = vmlal_u8(q2u16, d24u8, d1u8);
q3u16 = vmlal_u8(q3u16, d25u8, d1u8);
q4u16 = vmlal_u8(q4u16, d26u8, d1u8);
d2u8 = vqrshrn_n_u16(q1u16, 7);
d3u8 = vqrshrn_n_u16(q2u16, 7);
d4u8 = vqrshrn_n_u16(q3u16, 7);
d5u8 = vqrshrn_n_u16(q4u16, 7);
vst1_u8((uint8_t *)dst_ptr, d2u8); dst_ptr += dst_pitch;
vst1_u8((uint8_t *)dst_ptr, d3u8); dst_ptr += dst_pitch;
vst1_u8((uint8_t *)dst_ptr, d4u8); dst_ptr += dst_pitch;
vst1_u8((uint8_t *)dst_ptr, d5u8);
}
return;
}
void vp8_bilinear_predict8x8_neon(
unsigned char *src_ptr,
int src_pixels_per_line,
int xoffset,
int yoffset,
unsigned char *dst_ptr,
int dst_pitch) {
uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8, d7u8, d8u8, d9u8, d11u8;
uint8x8_t d22u8, d23u8, d24u8, d25u8, d26u8, d27u8, d28u8, d29u8, d30u8;
uint8x16_t q1u8, q2u8, q3u8, q4u8, q5u8;
uint16x8_t q1u16, q2u16, q3u16, q4u16, q5u16;
uint16x8_t q6u16, q7u16, q8u16, q9u16, q10u16;
if (xoffset == 0) { // skip_1stpass_filter
d22u8 = vld1_u8(src_ptr); src_ptr += src_pixels_per_line;
d23u8 = vld1_u8(src_ptr); src_ptr += src_pixels_per_line;
d24u8 = vld1_u8(src_ptr); src_ptr += src_pixels_per_line;
d25u8 = vld1_u8(src_ptr); src_ptr += src_pixels_per_line;
d26u8 = vld1_u8(src_ptr); src_ptr += src_pixels_per_line;
d27u8 = vld1_u8(src_ptr); src_ptr += src_pixels_per_line;
d28u8 = vld1_u8(src_ptr); src_ptr += src_pixels_per_line;
d29u8 = vld1_u8(src_ptr); src_ptr += src_pixels_per_line;
d30u8 = vld1_u8(src_ptr);
} else {
q1u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line;
q2u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line;
q3u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line;
q4u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line;
d0u8 = vdup_n_u8(bifilter4_coeff[xoffset][0]);
d1u8 = vdup_n_u8(bifilter4_coeff[xoffset][1]);
q6u16 = vmull_u8(vget_low_u8(q1u8), d0u8);
q7u16 = vmull_u8(vget_low_u8(q2u8), d0u8);
q8u16 = vmull_u8(vget_low_u8(q3u8), d0u8);
q9u16 = vmull_u8(vget_low_u8(q4u8), d0u8);
d3u8 = vext_u8(vget_low_u8(q1u8), vget_high_u8(q1u8), 1);
d5u8 = vext_u8(vget_low_u8(q2u8), vget_high_u8(q2u8), 1);
d7u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 1);
d9u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 1);
q6u16 = vmlal_u8(q6u16, d3u8, d1u8);
q7u16 = vmlal_u8(q7u16, d5u8, d1u8);
q8u16 = vmlal_u8(q8u16, d7u8, d1u8);
q9u16 = vmlal_u8(q9u16, d9u8, d1u8);
d22u8 = vqrshrn_n_u16(q6u16, 7);
d23u8 = vqrshrn_n_u16(q7u16, 7);
d24u8 = vqrshrn_n_u16(q8u16, 7);
d25u8 = vqrshrn_n_u16(q9u16, 7);
// first_pass filtering on the rest 5-line data
q1u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line;
q2u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line;
q3u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line;
q4u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line;
q5u8 = vld1q_u8(src_ptr);
q6u16 = vmull_u8(vget_low_u8(q1u8), d0u8);
q7u16 = vmull_u8(vget_low_u8(q2u8), d0u8);
q8u16 = vmull_u8(vget_low_u8(q3u8), d0u8);
q9u16 = vmull_u8(vget_low_u8(q4u8), d0u8);
q10u16 = vmull_u8(vget_low_u8(q5u8), d0u8);
d3u8 = vext_u8(vget_low_u8(q1u8), vget_high_u8(q1u8), 1);
d5u8 = vext_u8(vget_low_u8(q2u8), vget_high_u8(q2u8), 1);
d7u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 1);
d9u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 1);
d11u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 1);
q6u16 = vmlal_u8(q6u16, d3u8, d1u8);
q7u16 = vmlal_u8(q7u16, d5u8, d1u8);
q8u16 = vmlal_u8(q8u16, d7u8, d1u8);
q9u16 = vmlal_u8(q9u16, d9u8, d1u8);
q10u16 = vmlal_u8(q10u16, d11u8, d1u8);
d26u8 = vqrshrn_n_u16(q6u16, 7);
d27u8 = vqrshrn_n_u16(q7u16, 7);
d28u8 = vqrshrn_n_u16(q8u16, 7);
d29u8 = vqrshrn_n_u16(q9u16, 7);
d30u8 = vqrshrn_n_u16(q10u16, 7);
}
// secondpass_filter
if (yoffset == 0) { // skip_2ndpass_filter
vst1_u8((uint8_t *)dst_ptr, d22u8); dst_ptr += dst_pitch;
vst1_u8((uint8_t *)dst_ptr, d23u8); dst_ptr += dst_pitch;
vst1_u8((uint8_t *)dst_ptr, d24u8); dst_ptr += dst_pitch;
vst1_u8((uint8_t *)dst_ptr, d25u8); dst_ptr += dst_pitch;
vst1_u8((uint8_t *)dst_ptr, d26u8); dst_ptr += dst_pitch;
vst1_u8((uint8_t *)dst_ptr, d27u8); dst_ptr += dst_pitch;
vst1_u8((uint8_t *)dst_ptr, d28u8); dst_ptr += dst_pitch;
vst1_u8((uint8_t *)dst_ptr, d29u8);
} else {
d0u8 = vdup_n_u8(bifilter4_coeff[yoffset][0]);
d1u8 = vdup_n_u8(bifilter4_coeff[yoffset][1]);
q1u16 = vmull_u8(d22u8, d0u8);
q2u16 = vmull_u8(d23u8, d0u8);
q3u16 = vmull_u8(d24u8, d0u8);
q4u16 = vmull_u8(d25u8, d0u8);
q5u16 = vmull_u8(d26u8, d0u8);
q6u16 = vmull_u8(d27u8, d0u8);
q7u16 = vmull_u8(d28u8, d0u8);
q8u16 = vmull_u8(d29u8, d0u8);
q1u16 = vmlal_u8(q1u16, d23u8, d1u8);
q2u16 = vmlal_u8(q2u16, d24u8, d1u8);
q3u16 = vmlal_u8(q3u16, d25u8, d1u8);
q4u16 = vmlal_u8(q4u16, d26u8, d1u8);
q5u16 = vmlal_u8(q5u16, d27u8, d1u8);
q6u16 = vmlal_u8(q6u16, d28u8, d1u8);
q7u16 = vmlal_u8(q7u16, d29u8, d1u8);
q8u16 = vmlal_u8(q8u16, d30u8, d1u8);
d2u8 = vqrshrn_n_u16(q1u16, 7);
d3u8 = vqrshrn_n_u16(q2u16, 7);
d4u8 = vqrshrn_n_u16(q3u16, 7);
d5u8 = vqrshrn_n_u16(q4u16, 7);
d6u8 = vqrshrn_n_u16(q5u16, 7);
d7u8 = vqrshrn_n_u16(q6u16, 7);
d8u8 = vqrshrn_n_u16(q7u16, 7);
d9u8 = vqrshrn_n_u16(q8u16, 7);
vst1_u8((uint8_t *)dst_ptr, d2u8); dst_ptr += dst_pitch;
vst1_u8((uint8_t *)dst_ptr, d3u8); dst_ptr += dst_pitch;
vst1_u8((uint8_t *)dst_ptr, d4u8); dst_ptr += dst_pitch;
vst1_u8((uint8_t *)dst_ptr, d5u8); dst_ptr += dst_pitch;
vst1_u8((uint8_t *)dst_ptr, d6u8); dst_ptr += dst_pitch;
vst1_u8((uint8_t *)dst_ptr, d7u8); dst_ptr += dst_pitch;
vst1_u8((uint8_t *)dst_ptr, d8u8); dst_ptr += dst_pitch;
vst1_u8((uint8_t *)dst_ptr, d9u8);
}
return;
}
void vp8_bilinear_predict16x16_neon(
unsigned char *src_ptr,
int src_pixels_per_line,
int xoffset,
int yoffset,
unsigned char *dst_ptr,
int dst_pitch) {
int i;
unsigned char tmp[272];
unsigned char *tmpp;
uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8, d7u8, d8u8, d9u8;
uint8x8_t d10u8, d11u8, d12u8, d13u8, d14u8, d15u8, d16u8, d17u8, d18u8;
uint8x8_t d19u8, d20u8, d21u8;
uint8x16_t q1u8, q2u8, q3u8, q4u8, q5u8, q6u8, q7u8, q8u8, q9u8, q10u8;
uint8x16_t q11u8, q12u8, q13u8, q14u8, q15u8;
uint16x8_t q1u16, q2u16, q3u16, q4u16, q5u16, q6u16, q7u16, q8u16;
uint16x8_t q9u16, q10u16, q11u16, q12u16, q13u16, q14u16;
if (xoffset == 0) { // secondpass_bfilter16x16_only
d0u8 = vdup_n_u8(bifilter4_coeff[yoffset][0]);
d1u8 = vdup_n_u8(bifilter4_coeff[yoffset][1]);
q11u8 = vld1q_u8(src_ptr);
src_ptr += src_pixels_per_line;
for (i = 4; i > 0; i--) {
q12u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line;
q13u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line;
q14u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line;
q15u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line;
q1u16 = vmull_u8(vget_low_u8(q11u8), d0u8);
q2u16 = vmull_u8(vget_high_u8(q11u8), d0u8);
q3u16 = vmull_u8(vget_low_u8(q12u8), d0u8);
q4u16 = vmull_u8(vget_high_u8(q12u8), d0u8);
q5u16 = vmull_u8(vget_low_u8(q13u8), d0u8);
q6u16 = vmull_u8(vget_high_u8(q13u8), d0u8);
q7u16 = vmull_u8(vget_low_u8(q14u8), d0u8);
q8u16 = vmull_u8(vget_high_u8(q14u8), d0u8);
q1u16 = vmlal_u8(q1u16, vget_low_u8(q12u8), d1u8);
q2u16 = vmlal_u8(q2u16, vget_high_u8(q12u8), d1u8);
q3u16 = vmlal_u8(q3u16, vget_low_u8(q13u8), d1u8);
q4u16 = vmlal_u8(q4u16, vget_high_u8(q13u8), d1u8);
q5u16 = vmlal_u8(q5u16, vget_low_u8(q14u8), d1u8);
q6u16 = vmlal_u8(q6u16, vget_high_u8(q14u8), d1u8);
q7u16 = vmlal_u8(q7u16, vget_low_u8(q15u8), d1u8);
q8u16 = vmlal_u8(q8u16, vget_high_u8(q15u8), d1u8);
d2u8 = vqrshrn_n_u16(q1u16, 7);
d3u8 = vqrshrn_n_u16(q2u16, 7);
d4u8 = vqrshrn_n_u16(q3u16, 7);
d5u8 = vqrshrn_n_u16(q4u16, 7);
d6u8 = vqrshrn_n_u16(q5u16, 7);
d7u8 = vqrshrn_n_u16(q6u16, 7);
d8u8 = vqrshrn_n_u16(q7u16, 7);
d9u8 = vqrshrn_n_u16(q8u16, 7);
q1u8 = vcombine_u8(d2u8, d3u8);
q2u8 = vcombine_u8(d4u8, d5u8);
q3u8 = vcombine_u8(d6u8, d7u8);
q4u8 = vcombine_u8(d8u8, d9u8);
q11u8 = q15u8;
vst1q_u8((uint8_t *)dst_ptr, q1u8); dst_ptr += dst_pitch;
vst1q_u8((uint8_t *)dst_ptr, q2u8); dst_ptr += dst_pitch;
vst1q_u8((uint8_t *)dst_ptr, q3u8); dst_ptr += dst_pitch;
vst1q_u8((uint8_t *)dst_ptr, q4u8); dst_ptr += dst_pitch;
}
return;
}
if (yoffset == 0) { // firstpass_bfilter16x16_only
d0u8 = vdup_n_u8(bifilter4_coeff[xoffset][0]);
d1u8 = vdup_n_u8(bifilter4_coeff[xoffset][1]);
for (i = 4; i > 0 ; i--) {
d2u8 = vld1_u8(src_ptr);
d3u8 = vld1_u8(src_ptr + 8);
d4u8 = vld1_u8(src_ptr + 16); src_ptr += src_pixels_per_line;
d5u8 = vld1_u8(src_ptr);
d6u8 = vld1_u8(src_ptr + 8);
d7u8 = vld1_u8(src_ptr + 16); src_ptr += src_pixels_per_line;
d8u8 = vld1_u8(src_ptr);
d9u8 = vld1_u8(src_ptr + 8);
d10u8 = vld1_u8(src_ptr + 16); src_ptr += src_pixels_per_line;
d11u8 = vld1_u8(src_ptr);
d12u8 = vld1_u8(src_ptr + 8);
d13u8 = vld1_u8(src_ptr + 16); src_ptr += src_pixels_per_line;
q7u16 = vmull_u8(d2u8, d0u8);
q8u16 = vmull_u8(d3u8, d0u8);
q9u16 = vmull_u8(d5u8, d0u8);
q10u16 = vmull_u8(d6u8, d0u8);
q11u16 = vmull_u8(d8u8, d0u8);
q12u16 = vmull_u8(d9u8, d0u8);
q13u16 = vmull_u8(d11u8, d0u8);
q14u16 = vmull_u8(d12u8, d0u8);
d2u8 = vext_u8(d2u8, d3u8, 1);
d5u8 = vext_u8(d5u8, d6u8, 1);
d8u8 = vext_u8(d8u8, d9u8, 1);
d11u8 = vext_u8(d11u8, d12u8, 1);
q7u16 = vmlal_u8(q7u16, d2u8, d1u8);
q9u16 = vmlal_u8(q9u16, d5u8, d1u8);
q11u16 = vmlal_u8(q11u16, d8u8, d1u8);
q13u16 = vmlal_u8(q13u16, d11u8, d1u8);
d3u8 = vext_u8(d3u8, d4u8, 1);
d6u8 = vext_u8(d6u8, d7u8, 1);
d9u8 = vext_u8(d9u8, d10u8, 1);
d12u8 = vext_u8(d12u8, d13u8, 1);
q8u16 = vmlal_u8(q8u16, d3u8, d1u8);
q10u16 = vmlal_u8(q10u16, d6u8, d1u8);
q12u16 = vmlal_u8(q12u16, d9u8, d1u8);
q14u16 = vmlal_u8(q14u16, d12u8, d1u8);
d14u8 = vqrshrn_n_u16(q7u16, 7);
d15u8 = vqrshrn_n_u16(q8u16, 7);
d16u8 = vqrshrn_n_u16(q9u16, 7);
d17u8 = vqrshrn_n_u16(q10u16, 7);
d18u8 = vqrshrn_n_u16(q11u16, 7);
d19u8 = vqrshrn_n_u16(q12u16, 7);
d20u8 = vqrshrn_n_u16(q13u16, 7);
d21u8 = vqrshrn_n_u16(q14u16, 7);
q7u8 = vcombine_u8(d14u8, d15u8);
q8u8 = vcombine_u8(d16u8, d17u8);
q9u8 = vcombine_u8(d18u8, d19u8);
q10u8 =vcombine_u8(d20u8, d21u8);
vst1q_u8((uint8_t *)dst_ptr, q7u8); dst_ptr += dst_pitch;
vst1q_u8((uint8_t *)dst_ptr, q8u8); dst_ptr += dst_pitch;
vst1q_u8((uint8_t *)dst_ptr, q9u8); dst_ptr += dst_pitch;
vst1q_u8((uint8_t *)dst_ptr, q10u8); dst_ptr += dst_pitch;
}
return;
}
d0u8 = vdup_n_u8(bifilter4_coeff[xoffset][0]);
d1u8 = vdup_n_u8(bifilter4_coeff[xoffset][1]);
d2u8 = vld1_u8(src_ptr);
d3u8 = vld1_u8(src_ptr + 8);
d4u8 = vld1_u8(src_ptr + 16); src_ptr += src_pixels_per_line;
d5u8 = vld1_u8(src_ptr);
d6u8 = vld1_u8(src_ptr + 8);
d7u8 = vld1_u8(src_ptr + 16); src_ptr += src_pixels_per_line;
d8u8 = vld1_u8(src_ptr);
d9u8 = vld1_u8(src_ptr + 8);
d10u8 = vld1_u8(src_ptr + 16); src_ptr += src_pixels_per_line;
d11u8 = vld1_u8(src_ptr);
d12u8 = vld1_u8(src_ptr + 8);
d13u8 = vld1_u8(src_ptr + 16); src_ptr += src_pixels_per_line;
// First Pass: output_height lines x output_width columns (17x16)
tmpp = tmp;
for (i = 3; i > 0; i--) {
q7u16 = vmull_u8(d2u8, d0u8);
q8u16 = vmull_u8(d3u8, d0u8);
q9u16 = vmull_u8(d5u8, d0u8);
q10u16 = vmull_u8(d6u8, d0u8);
q11u16 = vmull_u8(d8u8, d0u8);
q12u16 = vmull_u8(d9u8, d0u8);
q13u16 = vmull_u8(d11u8, d0u8);
q14u16 = vmull_u8(d12u8, d0u8);
d2u8 = vext_u8(d2u8, d3u8, 1);
d5u8 = vext_u8(d5u8, d6u8, 1);
d8u8 = vext_u8(d8u8, d9u8, 1);
d11u8 = vext_u8(d11u8, d12u8, 1);
q7u16 = vmlal_u8(q7u16, d2u8, d1u8);
q9u16 = vmlal_u8(q9u16, d5u8, d1u8);
q11u16 = vmlal_u8(q11u16, d8u8, d1u8);
q13u16 = vmlal_u8(q13u16, d11u8, d1u8);
d3u8 = vext_u8(d3u8, d4u8, 1);
d6u8 = vext_u8(d6u8, d7u8, 1);
d9u8 = vext_u8(d9u8, d10u8, 1);
d12u8 = vext_u8(d12u8, d13u8, 1);
q8u16 = vmlal_u8(q8u16, d3u8, d1u8);
q10u16 = vmlal_u8(q10u16, d6u8, d1u8);
q12u16 = vmlal_u8(q12u16, d9u8, d1u8);
q14u16 = vmlal_u8(q14u16, d12u8, d1u8);
d14u8 = vqrshrn_n_u16(q7u16, 7);
d15u8 = vqrshrn_n_u16(q8u16, 7);
d16u8 = vqrshrn_n_u16(q9u16, 7);
d17u8 = vqrshrn_n_u16(q10u16, 7);
d18u8 = vqrshrn_n_u16(q11u16, 7);
d19u8 = vqrshrn_n_u16(q12u16, 7);
d20u8 = vqrshrn_n_u16(q13u16, 7);
d21u8 = vqrshrn_n_u16(q14u16, 7);
d2u8 = vld1_u8(src_ptr);
d3u8 = vld1_u8(src_ptr + 8);
d4u8 = vld1_u8(src_ptr + 16); src_ptr += src_pixels_per_line;
d5u8 = vld1_u8(src_ptr);
d6u8 = vld1_u8(src_ptr + 8);
d7u8 = vld1_u8(src_ptr + 16); src_ptr += src_pixels_per_line;
d8u8 = vld1_u8(src_ptr);
d9u8 = vld1_u8(src_ptr + 8);
d10u8 = vld1_u8(src_ptr + 16); src_ptr += src_pixels_per_line;
d11u8 = vld1_u8(src_ptr);
d12u8 = vld1_u8(src_ptr + 8);
d13u8 = vld1_u8(src_ptr + 16); src_ptr += src_pixels_per_line;
q7u8 = vcombine_u8(d14u8, d15u8);
q8u8 = vcombine_u8(d16u8, d17u8);
q9u8 = vcombine_u8(d18u8, d19u8);
q10u8 = vcombine_u8(d20u8, d21u8);
vst1q_u8((uint8_t *)tmpp, q7u8); tmpp += 16;
vst1q_u8((uint8_t *)tmpp, q8u8); tmpp += 16;
vst1q_u8((uint8_t *)tmpp, q9u8); tmpp += 16;
vst1q_u8((uint8_t *)tmpp, q10u8); tmpp += 16;
}
// First-pass filtering for rest 5 lines
d14u8 = vld1_u8(src_ptr);
d15u8 = vld1_u8(src_ptr + 8);
d16u8 = vld1_u8(src_ptr + 16); src_ptr += src_pixels_per_line;
q9u16 = vmull_u8(d2u8, d0u8);
q10u16 = vmull_u8(d3u8, d0u8);
q11u16 = vmull_u8(d5u8, d0u8);
q12u16 = vmull_u8(d6u8, d0u8);
q13u16 = vmull_u8(d8u8, d0u8);
q14u16 = vmull_u8(d9u8, d0u8);
d2u8 = vext_u8(d2u8, d3u8, 1);
d5u8 = vext_u8(d5u8, d6u8, 1);
d8u8 = vext_u8(d8u8, d9u8, 1);
q9u16 = vmlal_u8(q9u16, d2u8, d1u8);
q11u16 = vmlal_u8(q11u16, d5u8, d1u8);
q13u16 = vmlal_u8(q13u16, d8u8, d1u8);
d3u8 = vext_u8(d3u8, d4u8, 1);
d6u8 = vext_u8(d6u8, d7u8, 1);
d9u8 = vext_u8(d9u8, d10u8, 1);
q10u16 = vmlal_u8(q10u16, d3u8, d1u8);
q12u16 = vmlal_u8(q12u16, d6u8, d1u8);
q14u16 = vmlal_u8(q14u16, d9u8, d1u8);
q1u16 = vmull_u8(d11u8, d0u8);
q2u16 = vmull_u8(d12u8, d0u8);
q3u16 = vmull_u8(d14u8, d0u8);
q4u16 = vmull_u8(d15u8, d0u8);
d11u8 = vext_u8(d11u8, d12u8, 1);
d14u8 = vext_u8(d14u8, d15u8, 1);
q1u16 = vmlal_u8(q1u16, d11u8, d1u8);
q3u16 = vmlal_u8(q3u16, d14u8, d1u8);
d12u8 = vext_u8(d12u8, d13u8, 1);
d15u8 = vext_u8(d15u8, d16u8, 1);
q2u16 = vmlal_u8(q2u16, d12u8, d1u8);
q4u16 = vmlal_u8(q4u16, d15u8, d1u8);
d10u8 = vqrshrn_n_u16(q9u16, 7);
d11u8 = vqrshrn_n_u16(q10u16, 7);
d12u8 = vqrshrn_n_u16(q11u16, 7);
d13u8 = vqrshrn_n_u16(q12u16, 7);
d14u8 = vqrshrn_n_u16(q13u16, 7);
d15u8 = vqrshrn_n_u16(q14u16, 7);
d16u8 = vqrshrn_n_u16(q1u16, 7);
d17u8 = vqrshrn_n_u16(q2u16, 7);
d18u8 = vqrshrn_n_u16(q3u16, 7);
d19u8 = vqrshrn_n_u16(q4u16, 7);
q5u8 = vcombine_u8(d10u8, d11u8);
q6u8 = vcombine_u8(d12u8, d13u8);
q7u8 = vcombine_u8(d14u8, d15u8);
q8u8 = vcombine_u8(d16u8, d17u8);
q9u8 = vcombine_u8(d18u8, d19u8);
vst1q_u8((uint8_t *)tmpp, q5u8); tmpp += 16;
vst1q_u8((uint8_t *)tmpp, q6u8); tmpp += 16;
vst1q_u8((uint8_t *)tmpp, q7u8); tmpp += 16;
vst1q_u8((uint8_t *)tmpp, q8u8); tmpp += 16;
vst1q_u8((uint8_t *)tmpp, q9u8);
// secondpass_filter
d0u8 = vdup_n_u8(bifilter4_coeff[yoffset][0]);
d1u8 = vdup_n_u8(bifilter4_coeff[yoffset][1]);
tmpp = tmp;
q11u8 = vld1q_u8(tmpp);
tmpp += 16;
for (i = 4; i > 0; i--) {
q12u8 = vld1q_u8(tmpp); tmpp += 16;
q13u8 = vld1q_u8(tmpp); tmpp += 16;
q14u8 = vld1q_u8(tmpp); tmpp += 16;
q15u8 = vld1q_u8(tmpp); tmpp += 16;
q1u16 = vmull_u8(vget_low_u8(q11u8), d0u8);
q2u16 = vmull_u8(vget_high_u8(q11u8), d0u8);
q3u16 = vmull_u8(vget_low_u8(q12u8), d0u8);
q4u16 = vmull_u8(vget_high_u8(q12u8), d0u8);
q5u16 = vmull_u8(vget_low_u8(q13u8), d0u8);
q6u16 = vmull_u8(vget_high_u8(q13u8), d0u8);
q7u16 = vmull_u8(vget_low_u8(q14u8), d0u8);
q8u16 = vmull_u8(vget_high_u8(q14u8), d0u8);
q1u16 = vmlal_u8(q1u16, vget_low_u8(q12u8), d1u8);
q2u16 = vmlal_u8(q2u16, vget_high_u8(q12u8), d1u8);
q3u16 = vmlal_u8(q3u16, vget_low_u8(q13u8), d1u8);
q4u16 = vmlal_u8(q4u16, vget_high_u8(q13u8), d1u8);
q5u16 = vmlal_u8(q5u16, vget_low_u8(q14u8), d1u8);
q6u16 = vmlal_u8(q6u16, vget_high_u8(q14u8), d1u8);
q7u16 = vmlal_u8(q7u16, vget_low_u8(q15u8), d1u8);
q8u16 = vmlal_u8(q8u16, vget_high_u8(q15u8), d1u8);
d2u8 = vqrshrn_n_u16(q1u16, 7);
d3u8 = vqrshrn_n_u16(q2u16, 7);
d4u8 = vqrshrn_n_u16(q3u16, 7);
d5u8 = vqrshrn_n_u16(q4u16, 7);
d6u8 = vqrshrn_n_u16(q5u16, 7);
d7u8 = vqrshrn_n_u16(q6u16, 7);
d8u8 = vqrshrn_n_u16(q7u16, 7);
d9u8 = vqrshrn_n_u16(q8u16, 7);
q1u8 = vcombine_u8(d2u8, d3u8);
q2u8 = vcombine_u8(d4u8, d5u8);
q3u8 = vcombine_u8(d6u8, d7u8);
q4u8 = vcombine_u8(d8u8, d9u8);
q11u8 = q15u8;
vst1q_u8((uint8_t *)dst_ptr, q1u8); dst_ptr += dst_pitch;
vst1q_u8((uint8_t *)dst_ptr, q2u8); dst_ptr += dst_pitch;
vst1q_u8((uint8_t *)dst_ptr, q3u8); dst_ptr += dst_pitch;
vst1q_u8((uint8_t *)dst_ptr, q4u8); dst_ptr += dst_pitch;
}
return;
}

View File

@ -1,59 +0,0 @@
/*
* Copyright (c) 2014 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <arm_neon.h>
void vp8_copy_mem8x4_neon(
unsigned char *src,
int src_stride,
unsigned char *dst,
int dst_stride) {
uint8x8_t vtmp;
int r;
for (r = 0; r < 4; r++) {
vtmp = vld1_u8(src);
vst1_u8(dst, vtmp);
src += src_stride;
dst += dst_stride;
}
}
void vp8_copy_mem8x8_neon(
unsigned char *src,
int src_stride,
unsigned char *dst,
int dst_stride) {
uint8x8_t vtmp;
int r;
for (r = 0; r < 8; r++) {
vtmp = vld1_u8(src);
vst1_u8(dst, vtmp);
src += src_stride;
dst += dst_stride;
}
}
void vp8_copy_mem16x16_neon(
unsigned char *src,
int src_stride,
unsigned char *dst,
int dst_stride) {
int r;
uint8x16_t qtmp;
for (r = 0; r < 16; r++) {
qtmp = vld1q_u8(src);
vst1q_u8(dst, qtmp);
src += src_stride;
dst += dst_stride;
}
}

View File

@ -1,42 +0,0 @@
/*
* Copyright (c) 2014 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <arm_neon.h>
void vp8_dc_only_idct_add_neon(
int16_t input_dc,
unsigned char *pred_ptr,
int pred_stride,
unsigned char *dst_ptr,
int dst_stride) {
int i;
uint16_t a1 = ((input_dc + 4) >> 3);
uint32x2_t d2u32 = vdup_n_u32(0);
uint8x8_t d2u8;
uint16x8_t q1u16;
uint16x8_t qAdd;
qAdd = vdupq_n_u16(a1);
for (i = 0; i < 2; i++) {
d2u32 = vld1_lane_u32((const uint32_t *)pred_ptr, d2u32, 0);
pred_ptr += pred_stride;
d2u32 = vld1_lane_u32((const uint32_t *)pred_ptr, d2u32, 1);
pred_ptr += pred_stride;
q1u16 = vaddw_u8(qAdd, vreinterpret_u8_u32(d2u32));
d2u8 = vqmovun_s16(vreinterpretq_s16_u16(q1u16));
vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d2u8), 0);
dst_ptr += dst_stride;
vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d2u8), 1);
dst_ptr += dst_stride;
}
}

View File

@ -1,142 +0,0 @@
/*
* Copyright (c) 2014 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <arm_neon.h>
static const int16_t cospi8sqrt2minus1 = 20091;
static const int16_t sinpi8sqrt2 = 35468;
void vp8_dequant_idct_add_neon(
int16_t *input,
int16_t *dq,
unsigned char *dst,
int stride) {
unsigned char *dst0;
int32x2_t d14, d15;
int16x4_t d2, d3, d4, d5, d10, d11, d12, d13;
int16x8_t q1, q2, q3, q4, q5, q6;
int16x8_t qEmpty = vdupq_n_s16(0);
int32x2x2_t d2tmp0, d2tmp1;
int16x4x2_t d2tmp2, d2tmp3;
d14 = d15 = vdup_n_s32(0);
// load input
q3 = vld1q_s16(input);
vst1q_s16(input, qEmpty);
input += 8;
q4 = vld1q_s16(input);
vst1q_s16(input, qEmpty);
// load dq
q5 = vld1q_s16(dq);
dq += 8;
q6 = vld1q_s16(dq);
// load src from dst
dst0 = dst;
d14 = vld1_lane_s32((const int32_t *)dst0, d14, 0);
dst0 += stride;
d14 = vld1_lane_s32((const int32_t *)dst0, d14, 1);
dst0 += stride;
d15 = vld1_lane_s32((const int32_t *)dst0, d15, 0);
dst0 += stride;
d15 = vld1_lane_s32((const int32_t *)dst0, d15, 1);
q1 = vreinterpretq_s16_u16(vmulq_u16(vreinterpretq_u16_s16(q3),
vreinterpretq_u16_s16(q5)));
q2 = vreinterpretq_s16_u16(vmulq_u16(vreinterpretq_u16_s16(q4),
vreinterpretq_u16_s16(q6)));
d12 = vqadd_s16(vget_low_s16(q1), vget_low_s16(q2));
d13 = vqsub_s16(vget_low_s16(q1), vget_low_s16(q2));
q2 = vcombine_s16(vget_high_s16(q1), vget_high_s16(q2));
q3 = vqdmulhq_n_s16(q2, sinpi8sqrt2);
q4 = vqdmulhq_n_s16(q2, cospi8sqrt2minus1);
q3 = vshrq_n_s16(q3, 1);
q4 = vshrq_n_s16(q4, 1);
q3 = vqaddq_s16(q3, q2);
q4 = vqaddq_s16(q4, q2);
d10 = vqsub_s16(vget_low_s16(q3), vget_high_s16(q4));
d11 = vqadd_s16(vget_high_s16(q3), vget_low_s16(q4));
d2 = vqadd_s16(d12, d11);
d3 = vqadd_s16(d13, d10);
d4 = vqsub_s16(d13, d10);
d5 = vqsub_s16(d12, d11);
d2tmp0 = vtrn_s32(vreinterpret_s32_s16(d2), vreinterpret_s32_s16(d4));
d2tmp1 = vtrn_s32(vreinterpret_s32_s16(d3), vreinterpret_s32_s16(d5));
d2tmp2 = vtrn_s16(vreinterpret_s16_s32(d2tmp0.val[0]),
vreinterpret_s16_s32(d2tmp1.val[0]));
d2tmp3 = vtrn_s16(vreinterpret_s16_s32(d2tmp0.val[1]),
vreinterpret_s16_s32(d2tmp1.val[1]));
// loop 2
q2 = vcombine_s16(d2tmp2.val[1], d2tmp3.val[1]);
q3 = vqdmulhq_n_s16(q2, sinpi8sqrt2);
q4 = vqdmulhq_n_s16(q2, cospi8sqrt2minus1);
d12 = vqadd_s16(d2tmp2.val[0], d2tmp3.val[0]);
d13 = vqsub_s16(d2tmp2.val[0], d2tmp3.val[0]);
q3 = vshrq_n_s16(q3, 1);
q4 = vshrq_n_s16(q4, 1);
q3 = vqaddq_s16(q3, q2);
q4 = vqaddq_s16(q4, q2);
d10 = vqsub_s16(vget_low_s16(q3), vget_high_s16(q4));
d11 = vqadd_s16(vget_high_s16(q3), vget_low_s16(q4));
d2 = vqadd_s16(d12, d11);
d3 = vqadd_s16(d13, d10);
d4 = vqsub_s16(d13, d10);
d5 = vqsub_s16(d12, d11);
d2 = vrshr_n_s16(d2, 3);
d3 = vrshr_n_s16(d3, 3);
d4 = vrshr_n_s16(d4, 3);
d5 = vrshr_n_s16(d5, 3);
d2tmp0 = vtrn_s32(vreinterpret_s32_s16(d2), vreinterpret_s32_s16(d4));
d2tmp1 = vtrn_s32(vreinterpret_s32_s16(d3), vreinterpret_s32_s16(d5));
d2tmp2 = vtrn_s16(vreinterpret_s16_s32(d2tmp0.val[0]),
vreinterpret_s16_s32(d2tmp1.val[0]));
d2tmp3 = vtrn_s16(vreinterpret_s16_s32(d2tmp0.val[1]),
vreinterpret_s16_s32(d2tmp1.val[1]));
q1 = vcombine_s16(d2tmp2.val[0], d2tmp2.val[1]);
q2 = vcombine_s16(d2tmp3.val[0], d2tmp3.val[1]);
q1 = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(q1),
vreinterpret_u8_s32(d14)));
q2 = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(q2),
vreinterpret_u8_s32(d15)));
d14 = vreinterpret_s32_u8(vqmovun_s16(q1));
d15 = vreinterpret_s32_u8(vqmovun_s16(q2));
dst0 = dst;
vst1_lane_s32((int32_t *)dst0, d14, 0);
dst0 += stride;
vst1_lane_s32((int32_t *)dst0, d14, 1);
dst0 += stride;
vst1_lane_s32((int32_t *)dst0, d15, 0);
dst0 += stride;
vst1_lane_s32((int32_t *)dst0, d15, 1);
return;
}

View File

@ -1,25 +0,0 @@
/*
* Copyright (c) 2014 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <arm_neon.h>
#include "vp8/common/blockd.h"
void vp8_dequantize_b_neon(BLOCKD *d, short *DQC) {
int16x8x2_t qQ, qDQC, qDQ;
qQ = vld2q_s16(d->qcoeff);
qDQC = vld2q_s16(DQC);
qDQ.val[0] = vmulq_s16(qQ.val[0], qDQC.val[0]);
qDQ.val[1] = vmulq_s16(qQ.val[1], qDQC.val[1]);
vst2q_s16(d->dqcoeff, qDQ);
}

View File

@ -1,96 +0,0 @@
/*
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "vpx_config.h"
#include "vp8_rtcd.h"
/* place these declarations here because we don't want to maintain them
* outside of this scope
*/
void idct_dequant_full_2x_neon(short *q, short *dq,
unsigned char *dst, int stride);
void idct_dequant_0_2x_neon(short *q, short dq,
unsigned char *dst, int stride);
void vp8_dequant_idct_add_y_block_neon(short *q, short *dq,
unsigned char *dst,
int stride, char *eobs)
{
int i;
for (i = 0; i < 4; i++)
{
if (((short *)(eobs))[0])
{
if (((short *)eobs)[0] & 0xfefe)
idct_dequant_full_2x_neon (q, dq, dst, stride);
else
idct_dequant_0_2x_neon (q, dq[0], dst, stride);
}
if (((short *)(eobs))[1])
{
if (((short *)eobs)[1] & 0xfefe)
idct_dequant_full_2x_neon (q+32, dq, dst+8, stride);
else
idct_dequant_0_2x_neon (q+32, dq[0], dst+8, stride);
}
q += 64;
dst += 4*stride;
eobs += 4;
}
}
void vp8_dequant_idct_add_uv_block_neon(short *q, short *dq,
unsigned char *dstu,
unsigned char *dstv,
int stride, char *eobs)
{
if (((short *)(eobs))[0])
{
if (((short *)eobs)[0] & 0xfefe)
idct_dequant_full_2x_neon (q, dq, dstu, stride);
else
idct_dequant_0_2x_neon (q, dq[0], dstu, stride);
}
q += 32;
dstu += 4*stride;
if (((short *)(eobs))[1])
{
if (((short *)eobs)[1] & 0xfefe)
idct_dequant_full_2x_neon (q, dq, dstu, stride);
else
idct_dequant_0_2x_neon (q, dq[0], dstu, stride);
}
q += 32;
if (((short *)(eobs))[2])
{
if (((short *)eobs)[2] & 0xfefe)
idct_dequant_full_2x_neon (q, dq, dstv, stride);
else
idct_dequant_0_2x_neon (q, dq[0], dstv, stride);
}
q += 32;
dstv += 4*stride;
if (((short *)(eobs))[3])
{
if (((short *)eobs)[3] & 0xfefe)
idct_dequant_full_2x_neon (q, dq, dstv, stride);
else
idct_dequant_0_2x_neon (q, dq[0], dstv, stride);
}
}

View File

@ -1,63 +0,0 @@
/*
* Copyright (c) 2014 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <arm_neon.h>
void idct_dequant_0_2x_neon(
int16_t *q,
int16_t dq,
unsigned char *dst,
int stride) {
unsigned char *dst0;
int i, a0, a1;
int16x8x2_t q2Add;
int32x2_t d2s32 = vdup_n_s32(0),
d4s32 = vdup_n_s32(0);
uint8x8_t d2u8, d4u8;
uint16x8_t q1u16, q2u16;
a0 = ((q[0] * dq) + 4) >> 3;
a1 = ((q[16] * dq) + 4) >> 3;
q[0] = q[16] = 0;
q2Add.val[0] = vdupq_n_s16((int16_t)a0);
q2Add.val[1] = vdupq_n_s16((int16_t)a1);
for (i = 0; i < 2; i++, dst += 4) {
dst0 = dst;
d2s32 = vld1_lane_s32((const int32_t *)dst0, d2s32, 0);
dst0 += stride;
d2s32 = vld1_lane_s32((const int32_t *)dst0, d2s32, 1);
dst0 += stride;
d4s32 = vld1_lane_s32((const int32_t *)dst0, d4s32, 0);
dst0 += stride;
d4s32 = vld1_lane_s32((const int32_t *)dst0, d4s32, 1);
q1u16 = vaddw_u8(vreinterpretq_u16_s16(q2Add.val[i]),
vreinterpret_u8_s32(d2s32));
q2u16 = vaddw_u8(vreinterpretq_u16_s16(q2Add.val[i]),
vreinterpret_u8_s32(d4s32));
d2u8 = vqmovun_s16(vreinterpretq_s16_u16(q1u16));
d4u8 = vqmovun_s16(vreinterpretq_s16_u16(q2u16));
d2s32 = vreinterpret_s32_u8(d2u8);
d4s32 = vreinterpret_s32_u8(d4u8);
dst0 = dst;
vst1_lane_s32((int32_t *)dst0, d2s32, 0);
dst0 += stride;
vst1_lane_s32((int32_t *)dst0, d2s32, 1);
dst0 += stride;
vst1_lane_s32((int32_t *)dst0, d4s32, 0);
dst0 += stride;
vst1_lane_s32((int32_t *)dst0, d4s32, 1);
}
return;
}

View File

@ -1,185 +0,0 @@
/*
* Copyright (c) 2014 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <arm_neon.h>
static const int16_t cospi8sqrt2minus1 = 20091;
static const int16_t sinpi8sqrt2 = 17734;
// because the lowest bit in 0x8a8c is 0, we can pre-shift this
void idct_dequant_full_2x_neon(
int16_t *q,
int16_t *dq,
unsigned char *dst,
int stride) {
unsigned char *dst0, *dst1;
int32x2_t d28, d29, d30, d31;
int16x8_t q0, q1, q2, q3, q4, q5, q6, q7, q8, q9, q10, q11;
int16x8_t qEmpty = vdupq_n_s16(0);
int32x4x2_t q2tmp0, q2tmp1;
int16x8x2_t q2tmp2, q2tmp3;
int16x4_t dLow0, dLow1, dHigh0, dHigh1;
d28 = d29 = d30 = d31 = vdup_n_s32(0);
// load dq
q0 = vld1q_s16(dq);
dq += 8;
q1 = vld1q_s16(dq);
// load q
q2 = vld1q_s16(q);
vst1q_s16(q, qEmpty);
q += 8;
q3 = vld1q_s16(q);
vst1q_s16(q, qEmpty);
q += 8;
q4 = vld1q_s16(q);
vst1q_s16(q, qEmpty);
q += 8;
q5 = vld1q_s16(q);
vst1q_s16(q, qEmpty);
// load src from dst
dst0 = dst;
dst1 = dst + 4;
d28 = vld1_lane_s32((const int32_t *)dst0, d28, 0);
dst0 += stride;
d28 = vld1_lane_s32((const int32_t *)dst1, d28, 1);
dst1 += stride;
d29 = vld1_lane_s32((const int32_t *)dst0, d29, 0);
dst0 += stride;
d29 = vld1_lane_s32((const int32_t *)dst1, d29, 1);
dst1 += stride;
d30 = vld1_lane_s32((const int32_t *)dst0, d30, 0);
dst0 += stride;
d30 = vld1_lane_s32((const int32_t *)dst1, d30, 1);
dst1 += stride;
d31 = vld1_lane_s32((const int32_t *)dst0, d31, 0);
d31 = vld1_lane_s32((const int32_t *)dst1, d31, 1);
q2 = vmulq_s16(q2, q0);
q3 = vmulq_s16(q3, q1);
q4 = vmulq_s16(q4, q0);
q5 = vmulq_s16(q5, q1);
// vswp
dLow0 = vget_low_s16(q2);
dHigh0 = vget_high_s16(q2);
dLow1 = vget_low_s16(q4);
dHigh1 = vget_high_s16(q4);
q2 = vcombine_s16(dLow0, dLow1);
q4 = vcombine_s16(dHigh0, dHigh1);
dLow0 = vget_low_s16(q3);
dHigh0 = vget_high_s16(q3);
dLow1 = vget_low_s16(q5);
dHigh1 = vget_high_s16(q5);
q3 = vcombine_s16(dLow0, dLow1);
q5 = vcombine_s16(dHigh0, dHigh1);
q6 = vqdmulhq_n_s16(q4, sinpi8sqrt2);
q7 = vqdmulhq_n_s16(q5, sinpi8sqrt2);
q8 = vqdmulhq_n_s16(q4, cospi8sqrt2minus1);
q9 = vqdmulhq_n_s16(q5, cospi8sqrt2minus1);
q10 = vqaddq_s16(q2, q3);
q11 = vqsubq_s16(q2, q3);
q8 = vshrq_n_s16(q8, 1);
q9 = vshrq_n_s16(q9, 1);
q4 = vqaddq_s16(q4, q8);
q5 = vqaddq_s16(q5, q9);
q2 = vqsubq_s16(q6, q5);
q3 = vqaddq_s16(q7, q4);
q4 = vqaddq_s16(q10, q3);
q5 = vqaddq_s16(q11, q2);
q6 = vqsubq_s16(q11, q2);
q7 = vqsubq_s16(q10, q3);
q2tmp0 = vtrnq_s32(vreinterpretq_s32_s16(q4), vreinterpretq_s32_s16(q6));
q2tmp1 = vtrnq_s32(vreinterpretq_s32_s16(q5), vreinterpretq_s32_s16(q7));
q2tmp2 = vtrnq_s16(vreinterpretq_s16_s32(q2tmp0.val[0]),
vreinterpretq_s16_s32(q2tmp1.val[0]));
q2tmp3 = vtrnq_s16(vreinterpretq_s16_s32(q2tmp0.val[1]),
vreinterpretq_s16_s32(q2tmp1.val[1]));
// loop 2
q8 = vqdmulhq_n_s16(q2tmp2.val[1], sinpi8sqrt2);
q9 = vqdmulhq_n_s16(q2tmp3.val[1], sinpi8sqrt2);
q10 = vqdmulhq_n_s16(q2tmp2.val[1], cospi8sqrt2minus1);
q11 = vqdmulhq_n_s16(q2tmp3.val[1], cospi8sqrt2minus1);
q2 = vqaddq_s16(q2tmp2.val[0], q2tmp3.val[0]);
q3 = vqsubq_s16(q2tmp2.val[0], q2tmp3.val[0]);
q10 = vshrq_n_s16(q10, 1);
q11 = vshrq_n_s16(q11, 1);
q10 = vqaddq_s16(q2tmp2.val[1], q10);
q11 = vqaddq_s16(q2tmp3.val[1], q11);
q8 = vqsubq_s16(q8, q11);
q9 = vqaddq_s16(q9, q10);
q4 = vqaddq_s16(q2, q9);
q5 = vqaddq_s16(q3, q8);
q6 = vqsubq_s16(q3, q8);
q7 = vqsubq_s16(q2, q9);
q4 = vrshrq_n_s16(q4, 3);
q5 = vrshrq_n_s16(q5, 3);
q6 = vrshrq_n_s16(q6, 3);
q7 = vrshrq_n_s16(q7, 3);
q2tmp0 = vtrnq_s32(vreinterpretq_s32_s16(q4), vreinterpretq_s32_s16(q6));
q2tmp1 = vtrnq_s32(vreinterpretq_s32_s16(q5), vreinterpretq_s32_s16(q7));
q2tmp2 = vtrnq_s16(vreinterpretq_s16_s32(q2tmp0.val[0]),
vreinterpretq_s16_s32(q2tmp1.val[0]));
q2tmp3 = vtrnq_s16(vreinterpretq_s16_s32(q2tmp0.val[1]),
vreinterpretq_s16_s32(q2tmp1.val[1]));
q4 = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(q2tmp2.val[0]),
vreinterpret_u8_s32(d28)));
q5 = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(q2tmp2.val[1]),
vreinterpret_u8_s32(d29)));
q6 = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(q2tmp3.val[0]),
vreinterpret_u8_s32(d30)));
q7 = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(q2tmp3.val[1]),
vreinterpret_u8_s32(d31)));
d28 = vreinterpret_s32_u8(vqmovun_s16(q4));
d29 = vreinterpret_s32_u8(vqmovun_s16(q5));
d30 = vreinterpret_s32_u8(vqmovun_s16(q6));
d31 = vreinterpret_s32_u8(vqmovun_s16(q7));
dst0 = dst;
dst1 = dst + 4;
vst1_lane_s32((int32_t *)dst0, d28, 0);
dst0 += stride;
vst1_lane_s32((int32_t *)dst1, d28, 1);
dst1 += stride;
vst1_lane_s32((int32_t *)dst0, d29, 0);
dst0 += stride;
vst1_lane_s32((int32_t *)dst1, d29, 1);
dst1 += stride;
vst1_lane_s32((int32_t *)dst0, d30, 0);
dst0 += stride;
vst1_lane_s32((int32_t *)dst1, d30, 1);
dst1 += stride;
vst1_lane_s32((int32_t *)dst0, d31, 0);
vst1_lane_s32((int32_t *)dst1, d31, 1);
return;
}

View File

@ -1,102 +0,0 @@
/*
* Copyright (c) 2014 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <arm_neon.h>
void vp8_short_inv_walsh4x4_neon(
int16_t *input,
int16_t *mb_dqcoeff) {
int16x8_t q0s16, q1s16, q2s16, q3s16;
int16x4_t d4s16, d5s16, d6s16, d7s16;
int16x4x2_t v2tmp0, v2tmp1;
int32x2x2_t v2tmp2, v2tmp3;
int16x8_t qAdd3;
q0s16 = vld1q_s16(input);
q1s16 = vld1q_s16(input + 8);
// 1st for loop
d4s16 = vadd_s16(vget_low_s16(q0s16), vget_high_s16(q1s16));
d6s16 = vadd_s16(vget_high_s16(q0s16), vget_low_s16(q1s16));
d5s16 = vsub_s16(vget_low_s16(q0s16), vget_high_s16(q1s16));
d7s16 = vsub_s16(vget_high_s16(q0s16), vget_low_s16(q1s16));
q2s16 = vcombine_s16(d4s16, d5s16);
q3s16 = vcombine_s16(d6s16, d7s16);
q0s16 = vaddq_s16(q2s16, q3s16);
q1s16 = vsubq_s16(q2s16, q3s16);
v2tmp2 = vtrn_s32(vreinterpret_s32_s16(vget_low_s16(q0s16)),
vreinterpret_s32_s16(vget_low_s16(q1s16)));
v2tmp3 = vtrn_s32(vreinterpret_s32_s16(vget_high_s16(q0s16)),
vreinterpret_s32_s16(vget_high_s16(q1s16)));
v2tmp0 = vtrn_s16(vreinterpret_s16_s32(v2tmp2.val[0]),
vreinterpret_s16_s32(v2tmp3.val[0]));
v2tmp1 = vtrn_s16(vreinterpret_s16_s32(v2tmp2.val[1]),
vreinterpret_s16_s32(v2tmp3.val[1]));
// 2nd for loop
d4s16 = vadd_s16(v2tmp0.val[0], v2tmp1.val[1]);
d6s16 = vadd_s16(v2tmp0.val[1], v2tmp1.val[0]);
d5s16 = vsub_s16(v2tmp0.val[0], v2tmp1.val[1]);
d7s16 = vsub_s16(v2tmp0.val[1], v2tmp1.val[0]);
q2s16 = vcombine_s16(d4s16, d5s16);
q3s16 = vcombine_s16(d6s16, d7s16);
qAdd3 = vdupq_n_s16(3);
q0s16 = vaddq_s16(q2s16, q3s16);
q1s16 = vsubq_s16(q2s16, q3s16);
q0s16 = vaddq_s16(q0s16, qAdd3);
q1s16 = vaddq_s16(q1s16, qAdd3);
q0s16 = vshrq_n_s16(q0s16, 3);
q1s16 = vshrq_n_s16(q1s16, 3);
// store
vst1_lane_s16(mb_dqcoeff, vget_low_s16(q0s16), 0);
mb_dqcoeff += 16;
vst1_lane_s16(mb_dqcoeff, vget_high_s16(q0s16), 0);
mb_dqcoeff += 16;
vst1_lane_s16(mb_dqcoeff, vget_low_s16(q1s16), 0);
mb_dqcoeff += 16;
vst1_lane_s16(mb_dqcoeff, vget_high_s16(q1s16), 0);
mb_dqcoeff += 16;
vst1_lane_s16(mb_dqcoeff, vget_low_s16(q0s16), 1);
mb_dqcoeff += 16;
vst1_lane_s16(mb_dqcoeff, vget_high_s16(q0s16), 1);
mb_dqcoeff += 16;
vst1_lane_s16(mb_dqcoeff, vget_low_s16(q1s16), 1);
mb_dqcoeff += 16;
vst1_lane_s16(mb_dqcoeff, vget_high_s16(q1s16), 1);
mb_dqcoeff += 16;
vst1_lane_s16(mb_dqcoeff, vget_low_s16(q0s16), 2);
mb_dqcoeff += 16;
vst1_lane_s16(mb_dqcoeff, vget_high_s16(q0s16), 2);
mb_dqcoeff += 16;
vst1_lane_s16(mb_dqcoeff, vget_low_s16(q1s16), 2);
mb_dqcoeff += 16;
vst1_lane_s16(mb_dqcoeff, vget_high_s16(q1s16), 2);
mb_dqcoeff += 16;
vst1_lane_s16(mb_dqcoeff, vget_low_s16(q0s16), 3);
mb_dqcoeff += 16;
vst1_lane_s16(mb_dqcoeff, vget_high_s16(q0s16), 3);
mb_dqcoeff += 16;
vst1_lane_s16(mb_dqcoeff, vget_low_s16(q1s16), 3);
mb_dqcoeff += 16;
vst1_lane_s16(mb_dqcoeff, vget_high_s16(q1s16), 3);
mb_dqcoeff += 16;
return;
}

View File

@ -1,111 +0,0 @@
/*
* Copyright (c) 2014 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <arm_neon.h>
#include "./vpx_config.h"
static INLINE void vp8_loop_filter_simple_horizontal_edge_neon(
unsigned char *s,
int p,
const unsigned char *blimit) {
uint8_t *sp;
uint8x16_t qblimit, q0u8;
uint8x16_t q5u8, q6u8, q7u8, q8u8, q9u8, q10u8, q14u8, q15u8;
int16x8_t q2s16, q3s16, q13s16;
int8x8_t d8s8, d9s8;
int8x16_t q2s8, q3s8, q4s8, q10s8, q11s8, q14s8;
qblimit = vdupq_n_u8(*blimit);
sp = s - (p << 1);
q5u8 = vld1q_u8(sp);
sp += p;
q6u8 = vld1q_u8(sp);
sp += p;
q7u8 = vld1q_u8(sp);
sp += p;
q8u8 = vld1q_u8(sp);
q15u8 = vabdq_u8(q6u8, q7u8);
q14u8 = vabdq_u8(q5u8, q8u8);
q15u8 = vqaddq_u8(q15u8, q15u8);
q14u8 = vshrq_n_u8(q14u8, 1);
q0u8 = vdupq_n_u8(0x80);
q13s16 = vdupq_n_s16(3);
q15u8 = vqaddq_u8(q15u8, q14u8);
q5u8 = veorq_u8(q5u8, q0u8);
q6u8 = veorq_u8(q6u8, q0u8);
q7u8 = veorq_u8(q7u8, q0u8);
q8u8 = veorq_u8(q8u8, q0u8);
q15u8 = vcgeq_u8(qblimit, q15u8);
q2s16 = vsubl_s8(vget_low_s8(vreinterpretq_s8_u8(q7u8)),
vget_low_s8(vreinterpretq_s8_u8(q6u8)));
q3s16 = vsubl_s8(vget_high_s8(vreinterpretq_s8_u8(q7u8)),
vget_high_s8(vreinterpretq_s8_u8(q6u8)));
q4s8 = vqsubq_s8(vreinterpretq_s8_u8(q5u8),
vreinterpretq_s8_u8(q8u8));
q2s16 = vmulq_s16(q2s16, q13s16);
q3s16 = vmulq_s16(q3s16, q13s16);
q10u8 = vdupq_n_u8(3);
q9u8 = vdupq_n_u8(4);
q2s16 = vaddw_s8(q2s16, vget_low_s8(q4s8));
q3s16 = vaddw_s8(q3s16, vget_high_s8(q4s8));
d8s8 = vqmovn_s16(q2s16);
d9s8 = vqmovn_s16(q3s16);
q4s8 = vcombine_s8(d8s8, d9s8);
q14s8 = vandq_s8(q4s8, vreinterpretq_s8_u8(q15u8));
q2s8 = vqaddq_s8(q14s8, vreinterpretq_s8_u8(q10u8));
q3s8 = vqaddq_s8(q14s8, vreinterpretq_s8_u8(q9u8));
q2s8 = vshrq_n_s8(q2s8, 3);
q3s8 = vshrq_n_s8(q3s8, 3);
q11s8 = vqaddq_s8(vreinterpretq_s8_u8(q6u8), q2s8);
q10s8 = vqsubq_s8(vreinterpretq_s8_u8(q7u8), q3s8);
q6u8 = veorq_u8(vreinterpretq_u8_s8(q11s8), q0u8);
q7u8 = veorq_u8(vreinterpretq_u8_s8(q10s8), q0u8);
vst1q_u8(s, q7u8);
s -= p;
vst1q_u8(s, q6u8);
return;
}
void vp8_loop_filter_bhs_neon(
unsigned char *y_ptr,
int y_stride,
const unsigned char *blimit) {
y_ptr += y_stride * 4;
vp8_loop_filter_simple_horizontal_edge_neon(y_ptr, y_stride, blimit);
y_ptr += y_stride * 4;
vp8_loop_filter_simple_horizontal_edge_neon(y_ptr, y_stride, blimit);
y_ptr += y_stride * 4;
vp8_loop_filter_simple_horizontal_edge_neon(y_ptr, y_stride, blimit);
return;
}
void vp8_loop_filter_mbhs_neon(
unsigned char *y_ptr,
int y_stride,
const unsigned char *blimit) {
vp8_loop_filter_simple_horizontal_edge_neon(y_ptr, y_stride, blimit);
return;
}

View File

@ -1,283 +0,0 @@
/*
* Copyright (c) 2014 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <arm_neon.h>
#include "./vpx_config.h"
#include "vpx_ports/arm.h"
#ifdef VPX_INCOMPATIBLE_GCC
static INLINE void write_2x4(unsigned char *dst, int pitch,
const uint8x8x2_t result) {
/*
* uint8x8x2_t result
00 01 02 03 | 04 05 06 07
10 11 12 13 | 14 15 16 17
---
* after vtrn_u8
00 10 02 12 | 04 14 06 16
01 11 03 13 | 05 15 07 17
*/
const uint8x8x2_t r01_u8 = vtrn_u8(result.val[0],
result.val[1]);
const uint16x4_t x_0_4 = vreinterpret_u16_u8(r01_u8.val[0]);
const uint16x4_t x_1_5 = vreinterpret_u16_u8(r01_u8.val[1]);
vst1_lane_u16((uint16_t *)dst, x_0_4, 0);
dst += pitch;
vst1_lane_u16((uint16_t *)dst, x_1_5, 0);
dst += pitch;
vst1_lane_u16((uint16_t *)dst, x_0_4, 1);
dst += pitch;
vst1_lane_u16((uint16_t *)dst, x_1_5, 1);
dst += pitch;
vst1_lane_u16((uint16_t *)dst, x_0_4, 2);
dst += pitch;
vst1_lane_u16((uint16_t *)dst, x_1_5, 2);
dst += pitch;
vst1_lane_u16((uint16_t *)dst, x_0_4, 3);
dst += pitch;
vst1_lane_u16((uint16_t *)dst, x_1_5, 3);
}
static INLINE void write_2x8(unsigned char *dst, int pitch,
const uint8x8x2_t result,
const uint8x8x2_t result2) {
write_2x4(dst, pitch, result);
dst += pitch * 8;
write_2x4(dst, pitch, result2);
}
#else
static INLINE void write_2x8(unsigned char *dst, int pitch,
const uint8x8x2_t result,
const uint8x8x2_t result2) {
vst2_lane_u8(dst, result, 0);
dst += pitch;
vst2_lane_u8(dst, result, 1);
dst += pitch;
vst2_lane_u8(dst, result, 2);
dst += pitch;
vst2_lane_u8(dst, result, 3);
dst += pitch;
vst2_lane_u8(dst, result, 4);
dst += pitch;
vst2_lane_u8(dst, result, 5);
dst += pitch;
vst2_lane_u8(dst, result, 6);
dst += pitch;
vst2_lane_u8(dst, result, 7);
dst += pitch;
vst2_lane_u8(dst, result2, 0);
dst += pitch;
vst2_lane_u8(dst, result2, 1);
dst += pitch;
vst2_lane_u8(dst, result2, 2);
dst += pitch;
vst2_lane_u8(dst, result2, 3);
dst += pitch;
vst2_lane_u8(dst, result2, 4);
dst += pitch;
vst2_lane_u8(dst, result2, 5);
dst += pitch;
vst2_lane_u8(dst, result2, 6);
dst += pitch;
vst2_lane_u8(dst, result2, 7);
}
#endif // VPX_INCOMPATIBLE_GCC
#ifdef VPX_INCOMPATIBLE_GCC
static INLINE
uint8x8x4_t read_4x8(unsigned char *src, int pitch) {
uint8x8x4_t x;
const uint8x8_t a = vld1_u8(src);
const uint8x8_t b = vld1_u8(src + pitch * 1);
const uint8x8_t c = vld1_u8(src + pitch * 2);
const uint8x8_t d = vld1_u8(src + pitch * 3);
const uint8x8_t e = vld1_u8(src + pitch * 4);
const uint8x8_t f = vld1_u8(src + pitch * 5);
const uint8x8_t g = vld1_u8(src + pitch * 6);
const uint8x8_t h = vld1_u8(src + pitch * 7);
const uint32x2x2_t r04_u32 = vtrn_u32(vreinterpret_u32_u8(a),
vreinterpret_u32_u8(e));
const uint32x2x2_t r15_u32 = vtrn_u32(vreinterpret_u32_u8(b),
vreinterpret_u32_u8(f));
const uint32x2x2_t r26_u32 = vtrn_u32(vreinterpret_u32_u8(c),
vreinterpret_u32_u8(g));
const uint32x2x2_t r37_u32 = vtrn_u32(vreinterpret_u32_u8(d),
vreinterpret_u32_u8(h));
const uint16x4x2_t r02_u16 = vtrn_u16(vreinterpret_u16_u32(r04_u32.val[0]),
vreinterpret_u16_u32(r26_u32.val[0]));
const uint16x4x2_t r13_u16 = vtrn_u16(vreinterpret_u16_u32(r15_u32.val[0]),
vreinterpret_u16_u32(r37_u32.val[0]));
const uint8x8x2_t r01_u8 = vtrn_u8(vreinterpret_u8_u16(r02_u16.val[0]),
vreinterpret_u8_u16(r13_u16.val[0]));
const uint8x8x2_t r23_u8 = vtrn_u8(vreinterpret_u8_u16(r02_u16.val[1]),
vreinterpret_u8_u16(r13_u16.val[1]));
/*
* after vtrn_u32
00 01 02 03 | 40 41 42 43
10 11 12 13 | 50 51 52 53
20 21 22 23 | 60 61 62 63
30 31 32 33 | 70 71 72 73
---
* after vtrn_u16
00 01 20 21 | 40 41 60 61
02 03 22 23 | 42 43 62 63
10 11 30 31 | 50 51 70 71
12 13 32 33 | 52 52 72 73
00 01 20 21 | 40 41 60 61
10 11 30 31 | 50 51 70 71
02 03 22 23 | 42 43 62 63
12 13 32 33 | 52 52 72 73
---
* after vtrn_u8
00 10 20 30 | 40 50 60 70
01 11 21 31 | 41 51 61 71
02 12 22 32 | 42 52 62 72
03 13 23 33 | 43 53 63 73
*/
x.val[0] = r01_u8.val[0];
x.val[1] = r01_u8.val[1];
x.val[2] = r23_u8.val[0];
x.val[3] = r23_u8.val[1];
return x;
}
#else
static INLINE
uint8x8x4_t read_4x8(unsigned char *src, int pitch) {
uint8x8x4_t x;
x.val[0] = x.val[1] = x.val[2] = x.val[3] = vdup_n_u8(0);
x = vld4_lane_u8(src, x, 0);
src += pitch;
x = vld4_lane_u8(src, x, 1);
src += pitch;
x = vld4_lane_u8(src, x, 2);
src += pitch;
x = vld4_lane_u8(src, x, 3);
src += pitch;
x = vld4_lane_u8(src, x, 4);
src += pitch;
x = vld4_lane_u8(src, x, 5);
src += pitch;
x = vld4_lane_u8(src, x, 6);
src += pitch;
x = vld4_lane_u8(src, x, 7);
return x;
}
#endif // VPX_INCOMPATIBLE_GCC
static INLINE void vp8_loop_filter_simple_vertical_edge_neon(
unsigned char *s,
int p,
const unsigned char *blimit) {
unsigned char *src1;
uint8x16_t qblimit, q0u8;
uint8x16_t q3u8, q4u8, q5u8, q6u8, q7u8, q11u8, q12u8, q14u8, q15u8;
int16x8_t q2s16, q13s16, q11s16;
int8x8_t d28s8, d29s8;
int8x16_t q2s8, q3s8, q10s8, q11s8, q14s8;
uint8x8x4_t d0u8x4; // d6, d7, d8, d9
uint8x8x4_t d1u8x4; // d10, d11, d12, d13
uint8x8x2_t d2u8x2; // d12, d13
uint8x8x2_t d3u8x2; // d14, d15
qblimit = vdupq_n_u8(*blimit);
src1 = s - 2;
d0u8x4 = read_4x8(src1, p);
src1 += p * 8;
d1u8x4 = read_4x8(src1, p);
q3u8 = vcombine_u8(d0u8x4.val[0], d1u8x4.val[0]); // d6 d10
q4u8 = vcombine_u8(d0u8x4.val[2], d1u8x4.val[2]); // d8 d12
q5u8 = vcombine_u8(d0u8x4.val[1], d1u8x4.val[1]); // d7 d11
q6u8 = vcombine_u8(d0u8x4.val[3], d1u8x4.val[3]); // d9 d13
q15u8 = vabdq_u8(q5u8, q4u8);
q14u8 = vabdq_u8(q3u8, q6u8);
q15u8 = vqaddq_u8(q15u8, q15u8);
q14u8 = vshrq_n_u8(q14u8, 1);
q0u8 = vdupq_n_u8(0x80);
q11s16 = vdupq_n_s16(3);
q15u8 = vqaddq_u8(q15u8, q14u8);
q3u8 = veorq_u8(q3u8, q0u8);
q4u8 = veorq_u8(q4u8, q0u8);
q5u8 = veorq_u8(q5u8, q0u8);
q6u8 = veorq_u8(q6u8, q0u8);
q15u8 = vcgeq_u8(qblimit, q15u8);
q2s16 = vsubl_s8(vget_low_s8(vreinterpretq_s8_u8(q4u8)),
vget_low_s8(vreinterpretq_s8_u8(q5u8)));
q13s16 = vsubl_s8(vget_high_s8(vreinterpretq_s8_u8(q4u8)),
vget_high_s8(vreinterpretq_s8_u8(q5u8)));
q14s8 = vqsubq_s8(vreinterpretq_s8_u8(q3u8),
vreinterpretq_s8_u8(q6u8));
q2s16 = vmulq_s16(q2s16, q11s16);
q13s16 = vmulq_s16(q13s16, q11s16);
q11u8 = vdupq_n_u8(3);
q12u8 = vdupq_n_u8(4);
q2s16 = vaddw_s8(q2s16, vget_low_s8(q14s8));
q13s16 = vaddw_s8(q13s16, vget_high_s8(q14s8));
d28s8 = vqmovn_s16(q2s16);
d29s8 = vqmovn_s16(q13s16);
q14s8 = vcombine_s8(d28s8, d29s8);
q14s8 = vandq_s8(q14s8, vreinterpretq_s8_u8(q15u8));
q2s8 = vqaddq_s8(q14s8, vreinterpretq_s8_u8(q11u8));
q3s8 = vqaddq_s8(q14s8, vreinterpretq_s8_u8(q12u8));
q2s8 = vshrq_n_s8(q2s8, 3);
q14s8 = vshrq_n_s8(q3s8, 3);
q11s8 = vqaddq_s8(vreinterpretq_s8_u8(q5u8), q2s8);
q10s8 = vqsubq_s8(vreinterpretq_s8_u8(q4u8), q14s8);
q6u8 = veorq_u8(vreinterpretq_u8_s8(q11s8), q0u8);
q7u8 = veorq_u8(vreinterpretq_u8_s8(q10s8), q0u8);
d2u8x2.val[0] = vget_low_u8(q6u8); // d12
d2u8x2.val[1] = vget_low_u8(q7u8); // d14
d3u8x2.val[0] = vget_high_u8(q6u8); // d13
d3u8x2.val[1] = vget_high_u8(q7u8); // d15
src1 = s - 1;
write_2x8(src1, p, d2u8x2, d3u8x2);
}
void vp8_loop_filter_bvs_neon(
unsigned char *y_ptr,
int y_stride,
const unsigned char *blimit) {
y_ptr += 4;
vp8_loop_filter_simple_vertical_edge_neon(y_ptr, y_stride, blimit);
y_ptr += 4;
vp8_loop_filter_simple_vertical_edge_neon(y_ptr, y_stride, blimit);
y_ptr += 4;
vp8_loop_filter_simple_vertical_edge_neon(y_ptr, y_stride, blimit);
return;
}
void vp8_loop_filter_mbvs_neon(
unsigned char *y_ptr,
int y_stride,
const unsigned char *blimit) {
vp8_loop_filter_simple_vertical_edge_neon(y_ptr, y_stride, blimit);
return;
}

View File

@ -1,625 +0,0 @@
/*
* Copyright (c) 2014 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <arm_neon.h>
#include "./vpx_config.h"
static INLINE void vp8_mbloop_filter_neon(
uint8x16_t qblimit, // mblimit
uint8x16_t qlimit, // limit
uint8x16_t qthresh, // thresh
uint8x16_t q3, // p2
uint8x16_t q4, // p2
uint8x16_t q5, // p1
uint8x16_t q6, // p0
uint8x16_t q7, // q0
uint8x16_t q8, // q1
uint8x16_t q9, // q2
uint8x16_t q10, // q3
uint8x16_t *q4r, // p1
uint8x16_t *q5r, // p1
uint8x16_t *q6r, // p0
uint8x16_t *q7r, // q0
uint8x16_t *q8r, // q1
uint8x16_t *q9r) { // q1
uint8x16_t q0u8, q1u8, q11u8, q12u8, q13u8, q14u8, q15u8;
int16x8_t q0s16, q2s16, q11s16, q12s16, q13s16, q14s16, q15s16;
int8x16_t q1s8, q6s8, q7s8, q2s8, q11s8, q13s8;
uint16x8_t q0u16, q11u16, q12u16, q13u16, q14u16, q15u16;
int8x16_t q0s8, q12s8, q14s8, q15s8;
int8x8_t d0, d1, d2, d3, d4, d5, d24, d25, d28, d29;
q11u8 = vabdq_u8(q3, q4);
q12u8 = vabdq_u8(q4, q5);
q13u8 = vabdq_u8(q5, q6);
q14u8 = vabdq_u8(q8, q7);
q1u8 = vabdq_u8(q9, q8);
q0u8 = vabdq_u8(q10, q9);
q11u8 = vmaxq_u8(q11u8, q12u8);
q12u8 = vmaxq_u8(q13u8, q14u8);
q1u8 = vmaxq_u8(q1u8, q0u8);
q15u8 = vmaxq_u8(q11u8, q12u8);
q12u8 = vabdq_u8(q6, q7);
// vp8_hevmask
q13u8 = vcgtq_u8(q13u8, qthresh);
q14u8 = vcgtq_u8(q14u8, qthresh);
q15u8 = vmaxq_u8(q15u8, q1u8);
q15u8 = vcgeq_u8(qlimit, q15u8);
q1u8 = vabdq_u8(q5, q8);
q12u8 = vqaddq_u8(q12u8, q12u8);
// vp8_filter() function
// convert to signed
q0u8 = vdupq_n_u8(0x80);
q9 = veorq_u8(q9, q0u8);
q8 = veorq_u8(q8, q0u8);
q7 = veorq_u8(q7, q0u8);
q6 = veorq_u8(q6, q0u8);
q5 = veorq_u8(q5, q0u8);
q4 = veorq_u8(q4, q0u8);
q1u8 = vshrq_n_u8(q1u8, 1);
q12u8 = vqaddq_u8(q12u8, q1u8);
q14u8 = vorrq_u8(q13u8, q14u8);
q12u8 = vcgeq_u8(qblimit, q12u8);
q2s16 = vsubl_s8(vget_low_s8(vreinterpretq_s8_u8(q7)),
vget_low_s8(vreinterpretq_s8_u8(q6)));
q13s16 = vsubl_s8(vget_high_s8(vreinterpretq_s8_u8(q7)),
vget_high_s8(vreinterpretq_s8_u8(q6)));
q1s8 = vqsubq_s8(vreinterpretq_s8_u8(q5),
vreinterpretq_s8_u8(q8));
q11s16 = vdupq_n_s16(3);
q2s16 = vmulq_s16(q2s16, q11s16);
q13s16 = vmulq_s16(q13s16, q11s16);
q15u8 = vandq_u8(q15u8, q12u8);
q2s16 = vaddw_s8(q2s16, vget_low_s8(q1s8));
q13s16 = vaddw_s8(q13s16, vget_high_s8(q1s8));
q12u8 = vdupq_n_u8(3);
q11u8 = vdupq_n_u8(4);
// vp8_filter = clamp(vp8_filter + 3 * ( qs0 - ps0))
d2 = vqmovn_s16(q2s16);
d3 = vqmovn_s16(q13s16);
q1s8 = vcombine_s8(d2, d3);
q1s8 = vandq_s8(q1s8, vreinterpretq_s8_u8(q15u8));
q13s8 = vandq_s8(q1s8, vreinterpretq_s8_u8(q14u8));
q2s8 = vqaddq_s8(q13s8, vreinterpretq_s8_u8(q11u8));
q13s8 = vqaddq_s8(q13s8, vreinterpretq_s8_u8(q12u8));
q2s8 = vshrq_n_s8(q2s8, 3);
q13s8 = vshrq_n_s8(q13s8, 3);
q7s8 = vqsubq_s8(vreinterpretq_s8_u8(q7), q2s8);
q6s8 = vqaddq_s8(vreinterpretq_s8_u8(q6), q13s8);
q1s8 = vbicq_s8(q1s8, vreinterpretq_s8_u8(q14u8));
q0u16 = q11u16 = q12u16 = q13u16 = q14u16 = q15u16 = vdupq_n_u16(63);
d5 = vdup_n_s8(9);
d4 = vdup_n_s8(18);
q0s16 = vmlal_s8(vreinterpretq_s16_u16(q0u16), vget_low_s8(q1s8), d5);
q11s16 = vmlal_s8(vreinterpretq_s16_u16(q11u16), vget_high_s8(q1s8), d5);
d5 = vdup_n_s8(27);
q12s16 = vmlal_s8(vreinterpretq_s16_u16(q12u16), vget_low_s8(q1s8), d4);
q13s16 = vmlal_s8(vreinterpretq_s16_u16(q13u16), vget_high_s8(q1s8), d4);
q14s16 = vmlal_s8(vreinterpretq_s16_u16(q14u16), vget_low_s8(q1s8), d5);
q15s16 = vmlal_s8(vreinterpretq_s16_u16(q15u16), vget_high_s8(q1s8), d5);
d0 = vqshrn_n_s16(q0s16 , 7);
d1 = vqshrn_n_s16(q11s16, 7);
d24 = vqshrn_n_s16(q12s16, 7);
d25 = vqshrn_n_s16(q13s16, 7);
d28 = vqshrn_n_s16(q14s16, 7);
d29 = vqshrn_n_s16(q15s16, 7);
q0s8 = vcombine_s8(d0, d1);
q12s8 = vcombine_s8(d24, d25);
q14s8 = vcombine_s8(d28, d29);
q11s8 = vqsubq_s8(vreinterpretq_s8_u8(q9), q0s8);
q0s8 = vqaddq_s8(vreinterpretq_s8_u8(q4), q0s8);
q13s8 = vqsubq_s8(vreinterpretq_s8_u8(q8), q12s8);
q12s8 = vqaddq_s8(vreinterpretq_s8_u8(q5), q12s8);
q15s8 = vqsubq_s8((q7s8), q14s8);
q14s8 = vqaddq_s8((q6s8), q14s8);
q1u8 = vdupq_n_u8(0x80);
*q9r = veorq_u8(vreinterpretq_u8_s8(q11s8), q1u8);
*q8r = veorq_u8(vreinterpretq_u8_s8(q13s8), q1u8);
*q7r = veorq_u8(vreinterpretq_u8_s8(q15s8), q1u8);
*q6r = veorq_u8(vreinterpretq_u8_s8(q14s8), q1u8);
*q5r = veorq_u8(vreinterpretq_u8_s8(q12s8), q1u8);
*q4r = veorq_u8(vreinterpretq_u8_s8(q0s8), q1u8);
return;
}
void vp8_mbloop_filter_horizontal_edge_y_neon(
unsigned char *src,
int pitch,
unsigned char blimit,
unsigned char limit,
unsigned char thresh) {
uint8x16_t qblimit, qlimit, qthresh, q3, q4;
uint8x16_t q5, q6, q7, q8, q9, q10;
qblimit = vdupq_n_u8(blimit);
qlimit = vdupq_n_u8(limit);
qthresh = vdupq_n_u8(thresh);
src -= (pitch << 2);
q3 = vld1q_u8(src);
src += pitch;
q4 = vld1q_u8(src);
src += pitch;
q5 = vld1q_u8(src);
src += pitch;
q6 = vld1q_u8(src);
src += pitch;
q7 = vld1q_u8(src);
src += pitch;
q8 = vld1q_u8(src);
src += pitch;
q9 = vld1q_u8(src);
src += pitch;
q10 = vld1q_u8(src);
vp8_mbloop_filter_neon(qblimit, qlimit, qthresh, q3, q4,
q5, q6, q7, q8, q9, q10,
&q4, &q5, &q6, &q7, &q8, &q9);
src -= (pitch * 6);
vst1q_u8(src, q4);
src += pitch;
vst1q_u8(src, q5);
src += pitch;
vst1q_u8(src, q6);
src += pitch;
vst1q_u8(src, q7);
src += pitch;
vst1q_u8(src, q8);
src += pitch;
vst1q_u8(src, q9);
return;
}
void vp8_mbloop_filter_horizontal_edge_uv_neon(
unsigned char *u,
int pitch,
unsigned char blimit,
unsigned char limit,
unsigned char thresh,
unsigned char *v) {
uint8x16_t qblimit, qlimit, qthresh, q3, q4;
uint8x16_t q5, q6, q7, q8, q9, q10;
uint8x8_t d6, d7, d8, d9, d10, d11, d12, d13, d14;
uint8x8_t d15, d16, d17, d18, d19, d20, d21;
qblimit = vdupq_n_u8(blimit);
qlimit = vdupq_n_u8(limit);
qthresh = vdupq_n_u8(thresh);
u -= (pitch << 2);
v -= (pitch << 2);
d6 = vld1_u8(u);
u += pitch;
d7 = vld1_u8(v);
v += pitch;
d8 = vld1_u8(u);
u += pitch;
d9 = vld1_u8(v);
v += pitch;
d10 = vld1_u8(u);
u += pitch;
d11 = vld1_u8(v);
v += pitch;
d12 = vld1_u8(u);
u += pitch;
d13 = vld1_u8(v);
v += pitch;
d14 = vld1_u8(u);
u += pitch;
d15 = vld1_u8(v);
v += pitch;
d16 = vld1_u8(u);
u += pitch;
d17 = vld1_u8(v);
v += pitch;
d18 = vld1_u8(u);
u += pitch;
d19 = vld1_u8(v);
v += pitch;
d20 = vld1_u8(u);
d21 = vld1_u8(v);
q3 = vcombine_u8(d6, d7);
q4 = vcombine_u8(d8, d9);
q5 = vcombine_u8(d10, d11);
q6 = vcombine_u8(d12, d13);
q7 = vcombine_u8(d14, d15);
q8 = vcombine_u8(d16, d17);
q9 = vcombine_u8(d18, d19);
q10 = vcombine_u8(d20, d21);
vp8_mbloop_filter_neon(qblimit, qlimit, qthresh, q3, q4,
q5, q6, q7, q8, q9, q10,
&q4, &q5, &q6, &q7, &q8, &q9);
u -= (pitch * 6);
v -= (pitch * 6);
vst1_u8(u, vget_low_u8(q4));
u += pitch;
vst1_u8(v, vget_high_u8(q4));
v += pitch;
vst1_u8(u, vget_low_u8(q5));
u += pitch;
vst1_u8(v, vget_high_u8(q5));
v += pitch;
vst1_u8(u, vget_low_u8(q6));
u += pitch;
vst1_u8(v, vget_high_u8(q6));
v += pitch;
vst1_u8(u, vget_low_u8(q7));
u += pitch;
vst1_u8(v, vget_high_u8(q7));
v += pitch;
vst1_u8(u, vget_low_u8(q8));
u += pitch;
vst1_u8(v, vget_high_u8(q8));
v += pitch;
vst1_u8(u, vget_low_u8(q9));
vst1_u8(v, vget_high_u8(q9));
return;
}
void vp8_mbloop_filter_vertical_edge_y_neon(
unsigned char *src,
int pitch,
unsigned char blimit,
unsigned char limit,
unsigned char thresh) {
unsigned char *s1, *s2;
uint8x16_t qblimit, qlimit, qthresh, q3, q4;
uint8x16_t q5, q6, q7, q8, q9, q10;
uint8x8_t d6, d7, d8, d9, d10, d11, d12, d13, d14;
uint8x8_t d15, d16, d17, d18, d19, d20, d21;
uint32x4x2_t q2tmp0, q2tmp1, q2tmp2, q2tmp3;
uint16x8x2_t q2tmp4, q2tmp5, q2tmp6, q2tmp7;
uint8x16x2_t q2tmp8, q2tmp9, q2tmp10, q2tmp11;
qblimit = vdupq_n_u8(blimit);
qlimit = vdupq_n_u8(limit);
qthresh = vdupq_n_u8(thresh);
s1 = src - 4;
s2 = s1 + 8 * pitch;
d6 = vld1_u8(s1);
s1 += pitch;
d7 = vld1_u8(s2);
s2 += pitch;
d8 = vld1_u8(s1);
s1 += pitch;
d9 = vld1_u8(s2);
s2 += pitch;
d10 = vld1_u8(s1);
s1 += pitch;
d11 = vld1_u8(s2);
s2 += pitch;
d12 = vld1_u8(s1);
s1 += pitch;
d13 = vld1_u8(s2);
s2 += pitch;
d14 = vld1_u8(s1);
s1 += pitch;
d15 = vld1_u8(s2);
s2 += pitch;
d16 = vld1_u8(s1);
s1 += pitch;
d17 = vld1_u8(s2);
s2 += pitch;
d18 = vld1_u8(s1);
s1 += pitch;
d19 = vld1_u8(s2);
s2 += pitch;
d20 = vld1_u8(s1);
d21 = vld1_u8(s2);
q3 = vcombine_u8(d6, d7);
q4 = vcombine_u8(d8, d9);
q5 = vcombine_u8(d10, d11);
q6 = vcombine_u8(d12, d13);
q7 = vcombine_u8(d14, d15);
q8 = vcombine_u8(d16, d17);
q9 = vcombine_u8(d18, d19);
q10 = vcombine_u8(d20, d21);
q2tmp0 = vtrnq_u32(vreinterpretq_u32_u8(q3), vreinterpretq_u32_u8(q7));
q2tmp1 = vtrnq_u32(vreinterpretq_u32_u8(q4), vreinterpretq_u32_u8(q8));
q2tmp2 = vtrnq_u32(vreinterpretq_u32_u8(q5), vreinterpretq_u32_u8(q9));
q2tmp3 = vtrnq_u32(vreinterpretq_u32_u8(q6), vreinterpretq_u32_u8(q10));
q2tmp4 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[0]),
vreinterpretq_u16_u32(q2tmp2.val[0]));
q2tmp5 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[0]),
vreinterpretq_u16_u32(q2tmp3.val[0]));
q2tmp6 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[1]),
vreinterpretq_u16_u32(q2tmp2.val[1]));
q2tmp7 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[1]),
vreinterpretq_u16_u32(q2tmp3.val[1]));
q2tmp8 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[0]),
vreinterpretq_u8_u16(q2tmp5.val[0]));
q2tmp9 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[1]),
vreinterpretq_u8_u16(q2tmp5.val[1]));
q2tmp10 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[0]),
vreinterpretq_u8_u16(q2tmp7.val[0]));
q2tmp11 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[1]),
vreinterpretq_u8_u16(q2tmp7.val[1]));
q3 = q2tmp8.val[0];
q4 = q2tmp8.val[1];
q5 = q2tmp9.val[0];
q6 = q2tmp9.val[1];
q7 = q2tmp10.val[0];
q8 = q2tmp10.val[1];
q9 = q2tmp11.val[0];
q10 = q2tmp11.val[1];
vp8_mbloop_filter_neon(qblimit, qlimit, qthresh, q3, q4,
q5, q6, q7, q8, q9, q10,
&q4, &q5, &q6, &q7, &q8, &q9);
q2tmp0 = vtrnq_u32(vreinterpretq_u32_u8(q3), vreinterpretq_u32_u8(q7));
q2tmp1 = vtrnq_u32(vreinterpretq_u32_u8(q4), vreinterpretq_u32_u8(q8));
q2tmp2 = vtrnq_u32(vreinterpretq_u32_u8(q5), vreinterpretq_u32_u8(q9));
q2tmp3 = vtrnq_u32(vreinterpretq_u32_u8(q6), vreinterpretq_u32_u8(q10));
q2tmp4 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[0]),
vreinterpretq_u16_u32(q2tmp2.val[0]));
q2tmp5 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[0]),
vreinterpretq_u16_u32(q2tmp3.val[0]));
q2tmp6 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[1]),
vreinterpretq_u16_u32(q2tmp2.val[1]));
q2tmp7 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[1]),
vreinterpretq_u16_u32(q2tmp3.val[1]));
q2tmp8 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[0]),
vreinterpretq_u8_u16(q2tmp5.val[0]));
q2tmp9 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[1]),
vreinterpretq_u8_u16(q2tmp5.val[1]));
q2tmp10 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[0]),
vreinterpretq_u8_u16(q2tmp7.val[0]));
q2tmp11 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[1]),
vreinterpretq_u8_u16(q2tmp7.val[1]));
q3 = q2tmp8.val[0];
q4 = q2tmp8.val[1];
q5 = q2tmp9.val[0];
q6 = q2tmp9.val[1];
q7 = q2tmp10.val[0];
q8 = q2tmp10.val[1];
q9 = q2tmp11.val[0];
q10 = q2tmp11.val[1];
s1 -= 7 * pitch;
s2 -= 7 * pitch;
vst1_u8(s1, vget_low_u8(q3));
s1 += pitch;
vst1_u8(s2, vget_high_u8(q3));
s2 += pitch;
vst1_u8(s1, vget_low_u8(q4));
s1 += pitch;
vst1_u8(s2, vget_high_u8(q4));
s2 += pitch;
vst1_u8(s1, vget_low_u8(q5));
s1 += pitch;
vst1_u8(s2, vget_high_u8(q5));
s2 += pitch;
vst1_u8(s1, vget_low_u8(q6));
s1 += pitch;
vst1_u8(s2, vget_high_u8(q6));
s2 += pitch;
vst1_u8(s1, vget_low_u8(q7));
s1 += pitch;
vst1_u8(s2, vget_high_u8(q7));
s2 += pitch;
vst1_u8(s1, vget_low_u8(q8));
s1 += pitch;
vst1_u8(s2, vget_high_u8(q8));
s2 += pitch;
vst1_u8(s1, vget_low_u8(q9));
s1 += pitch;
vst1_u8(s2, vget_high_u8(q9));
s2 += pitch;
vst1_u8(s1, vget_low_u8(q10));
vst1_u8(s2, vget_high_u8(q10));
return;
}
void vp8_mbloop_filter_vertical_edge_uv_neon(
unsigned char *u,
int pitch,
unsigned char blimit,
unsigned char limit,
unsigned char thresh,
unsigned char *v) {
unsigned char *us, *ud;
unsigned char *vs, *vd;
uint8x16_t qblimit, qlimit, qthresh, q3, q4;
uint8x16_t q5, q6, q7, q8, q9, q10;
uint8x8_t d6, d7, d8, d9, d10, d11, d12, d13, d14;
uint8x8_t d15, d16, d17, d18, d19, d20, d21;
uint32x4x2_t q2tmp0, q2tmp1, q2tmp2, q2tmp3;
uint16x8x2_t q2tmp4, q2tmp5, q2tmp6, q2tmp7;
uint8x16x2_t q2tmp8, q2tmp9, q2tmp10, q2tmp11;
qblimit = vdupq_n_u8(blimit);
qlimit = vdupq_n_u8(limit);
qthresh = vdupq_n_u8(thresh);
us = u - 4;
vs = v - 4;
d6 = vld1_u8(us);
us += pitch;
d7 = vld1_u8(vs);
vs += pitch;
d8 = vld1_u8(us);
us += pitch;
d9 = vld1_u8(vs);
vs += pitch;
d10 = vld1_u8(us);
us += pitch;
d11 = vld1_u8(vs);
vs += pitch;
d12 = vld1_u8(us);
us += pitch;
d13 = vld1_u8(vs);
vs += pitch;
d14 = vld1_u8(us);
us += pitch;
d15 = vld1_u8(vs);
vs += pitch;
d16 = vld1_u8(us);
us += pitch;
d17 = vld1_u8(vs);
vs += pitch;
d18 = vld1_u8(us);
us += pitch;
d19 = vld1_u8(vs);
vs += pitch;
d20 = vld1_u8(us);
d21 = vld1_u8(vs);
q3 = vcombine_u8(d6, d7);
q4 = vcombine_u8(d8, d9);
q5 = vcombine_u8(d10, d11);
q6 = vcombine_u8(d12, d13);
q7 = vcombine_u8(d14, d15);
q8 = vcombine_u8(d16, d17);
q9 = vcombine_u8(d18, d19);
q10 = vcombine_u8(d20, d21);
q2tmp0 = vtrnq_u32(vreinterpretq_u32_u8(q3), vreinterpretq_u32_u8(q7));
q2tmp1 = vtrnq_u32(vreinterpretq_u32_u8(q4), vreinterpretq_u32_u8(q8));
q2tmp2 = vtrnq_u32(vreinterpretq_u32_u8(q5), vreinterpretq_u32_u8(q9));
q2tmp3 = vtrnq_u32(vreinterpretq_u32_u8(q6), vreinterpretq_u32_u8(q10));
q2tmp4 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[0]),
vreinterpretq_u16_u32(q2tmp2.val[0]));
q2tmp5 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[0]),
vreinterpretq_u16_u32(q2tmp3.val[0]));
q2tmp6 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[1]),
vreinterpretq_u16_u32(q2tmp2.val[1]));
q2tmp7 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[1]),
vreinterpretq_u16_u32(q2tmp3.val[1]));
q2tmp8 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[0]),
vreinterpretq_u8_u16(q2tmp5.val[0]));
q2tmp9 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[1]),
vreinterpretq_u8_u16(q2tmp5.val[1]));
q2tmp10 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[0]),
vreinterpretq_u8_u16(q2tmp7.val[0]));
q2tmp11 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[1]),
vreinterpretq_u8_u16(q2tmp7.val[1]));
q3 = q2tmp8.val[0];
q4 = q2tmp8.val[1];
q5 = q2tmp9.val[0];
q6 = q2tmp9.val[1];
q7 = q2tmp10.val[0];
q8 = q2tmp10.val[1];
q9 = q2tmp11.val[0];
q10 = q2tmp11.val[1];
vp8_mbloop_filter_neon(qblimit, qlimit, qthresh, q3, q4,
q5, q6, q7, q8, q9, q10,
&q4, &q5, &q6, &q7, &q8, &q9);
q2tmp0 = vtrnq_u32(vreinterpretq_u32_u8(q3), vreinterpretq_u32_u8(q7));
q2tmp1 = vtrnq_u32(vreinterpretq_u32_u8(q4), vreinterpretq_u32_u8(q8));
q2tmp2 = vtrnq_u32(vreinterpretq_u32_u8(q5), vreinterpretq_u32_u8(q9));
q2tmp3 = vtrnq_u32(vreinterpretq_u32_u8(q6), vreinterpretq_u32_u8(q10));
q2tmp4 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[0]),
vreinterpretq_u16_u32(q2tmp2.val[0]));
q2tmp5 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[0]),
vreinterpretq_u16_u32(q2tmp3.val[0]));
q2tmp6 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[1]),
vreinterpretq_u16_u32(q2tmp2.val[1]));
q2tmp7 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[1]),
vreinterpretq_u16_u32(q2tmp3.val[1]));
q2tmp8 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[0]),
vreinterpretq_u8_u16(q2tmp5.val[0]));
q2tmp9 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[1]),
vreinterpretq_u8_u16(q2tmp5.val[1]));
q2tmp10 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[0]),
vreinterpretq_u8_u16(q2tmp7.val[0]));
q2tmp11 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[1]),
vreinterpretq_u8_u16(q2tmp7.val[1]));
q3 = q2tmp8.val[0];
q4 = q2tmp8.val[1];
q5 = q2tmp9.val[0];
q6 = q2tmp9.val[1];
q7 = q2tmp10.val[0];
q8 = q2tmp10.val[1];
q9 = q2tmp11.val[0];
q10 = q2tmp11.val[1];
ud = u - 4;
vst1_u8(ud, vget_low_u8(q3));
ud += pitch;
vst1_u8(ud, vget_low_u8(q4));
ud += pitch;
vst1_u8(ud, vget_low_u8(q5));
ud += pitch;
vst1_u8(ud, vget_low_u8(q6));
ud += pitch;
vst1_u8(ud, vget_low_u8(q7));
ud += pitch;
vst1_u8(ud, vget_low_u8(q8));
ud += pitch;
vst1_u8(ud, vget_low_u8(q9));
ud += pitch;
vst1_u8(ud, vget_low_u8(q10));
vd = v - 4;
vst1_u8(vd, vget_high_u8(q3));
vd += pitch;
vst1_u8(vd, vget_high_u8(q4));
vd += pitch;
vst1_u8(vd, vget_high_u8(q5));
vd += pitch;
vst1_u8(vd, vget_high_u8(q6));
vd += pitch;
vst1_u8(vd, vget_high_u8(q7));
vd += pitch;
vst1_u8(vd, vget_high_u8(q8));
vd += pitch;
vst1_u8(vd, vget_high_u8(q9));
vd += pitch;
vst1_u8(vd, vget_high_u8(q10));
return;
}

View File

@ -1,123 +0,0 @@
/*
* Copyright (c) 2014 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <arm_neon.h>
static const int16_t cospi8sqrt2minus1 = 20091;
static const int16_t sinpi8sqrt2 = 35468;
void vp8_short_idct4x4llm_neon(
int16_t *input,
unsigned char *pred_ptr,
int pred_stride,
unsigned char *dst_ptr,
int dst_stride) {
int i;
uint32x2_t d6u32 = vdup_n_u32(0);
uint8x8_t d1u8;
int16x4_t d2, d3, d4, d5, d10, d11, d12, d13;
uint16x8_t q1u16;
int16x8_t q1s16, q2s16, q3s16, q4s16;
int32x2x2_t v2tmp0, v2tmp1;
int16x4x2_t v2tmp2, v2tmp3;
d2 = vld1_s16(input);
d3 = vld1_s16(input + 4);
d4 = vld1_s16(input + 8);
d5 = vld1_s16(input + 12);
// 1st for loop
q1s16 = vcombine_s16(d2, d4); // Swap d3 d4 here
q2s16 = vcombine_s16(d3, d5);
q3s16 = vqdmulhq_n_s16(q2s16, sinpi8sqrt2);
q4s16 = vqdmulhq_n_s16(q2s16, cospi8sqrt2minus1);
d12 = vqadd_s16(vget_low_s16(q1s16), vget_high_s16(q1s16)); // a1
d13 = vqsub_s16(vget_low_s16(q1s16), vget_high_s16(q1s16)); // b1
q3s16 = vshrq_n_s16(q3s16, 1);
q4s16 = vshrq_n_s16(q4s16, 1);
q3s16 = vqaddq_s16(q3s16, q2s16);
q4s16 = vqaddq_s16(q4s16, q2s16);
d10 = vqsub_s16(vget_low_s16(q3s16), vget_high_s16(q4s16)); // c1
d11 = vqadd_s16(vget_high_s16(q3s16), vget_low_s16(q4s16)); // d1
d2 = vqadd_s16(d12, d11);
d3 = vqadd_s16(d13, d10);
d4 = vqsub_s16(d13, d10);
d5 = vqsub_s16(d12, d11);
v2tmp0 = vtrn_s32(vreinterpret_s32_s16(d2), vreinterpret_s32_s16(d4));
v2tmp1 = vtrn_s32(vreinterpret_s32_s16(d3), vreinterpret_s32_s16(d5));
v2tmp2 = vtrn_s16(vreinterpret_s16_s32(v2tmp0.val[0]),
vreinterpret_s16_s32(v2tmp1.val[0]));
v2tmp3 = vtrn_s16(vreinterpret_s16_s32(v2tmp0.val[1]),
vreinterpret_s16_s32(v2tmp1.val[1]));
// 2nd for loop
q1s16 = vcombine_s16(v2tmp2.val[0], v2tmp3.val[0]);
q2s16 = vcombine_s16(v2tmp2.val[1], v2tmp3.val[1]);
q3s16 = vqdmulhq_n_s16(q2s16, sinpi8sqrt2);
q4s16 = vqdmulhq_n_s16(q2s16, cospi8sqrt2minus1);
d12 = vqadd_s16(vget_low_s16(q1s16), vget_high_s16(q1s16)); // a1
d13 = vqsub_s16(vget_low_s16(q1s16), vget_high_s16(q1s16)); // b1
q3s16 = vshrq_n_s16(q3s16, 1);
q4s16 = vshrq_n_s16(q4s16, 1);
q3s16 = vqaddq_s16(q3s16, q2s16);
q4s16 = vqaddq_s16(q4s16, q2s16);
d10 = vqsub_s16(vget_low_s16(q3s16), vget_high_s16(q4s16)); // c1
d11 = vqadd_s16(vget_high_s16(q3s16), vget_low_s16(q4s16)); // d1
d2 = vqadd_s16(d12, d11);
d3 = vqadd_s16(d13, d10);
d4 = vqsub_s16(d13, d10);
d5 = vqsub_s16(d12, d11);
d2 = vrshr_n_s16(d2, 3);
d3 = vrshr_n_s16(d3, 3);
d4 = vrshr_n_s16(d4, 3);
d5 = vrshr_n_s16(d5, 3);
v2tmp0 = vtrn_s32(vreinterpret_s32_s16(d2), vreinterpret_s32_s16(d4));
v2tmp1 = vtrn_s32(vreinterpret_s32_s16(d3), vreinterpret_s32_s16(d5));
v2tmp2 = vtrn_s16(vreinterpret_s16_s32(v2tmp0.val[0]),
vreinterpret_s16_s32(v2tmp1.val[0]));
v2tmp3 = vtrn_s16(vreinterpret_s16_s32(v2tmp0.val[1]),
vreinterpret_s16_s32(v2tmp1.val[1]));
q1s16 = vcombine_s16(v2tmp2.val[0], v2tmp2.val[1]);
q2s16 = vcombine_s16(v2tmp3.val[0], v2tmp3.val[1]);
// dc_only_idct_add
for (i = 0; i < 2; i++, q1s16 = q2s16) {
d6u32 = vld1_lane_u32((const uint32_t *)pred_ptr, d6u32, 0);
pred_ptr += pred_stride;
d6u32 = vld1_lane_u32((const uint32_t *)pred_ptr, d6u32, 1);
pred_ptr += pred_stride;
q1u16 = vaddw_u8(vreinterpretq_u16_s16(q1s16),
vreinterpret_u8_u32(d6u32));
d1u8 = vqmovun_s16(vreinterpretq_s16_u16(q1u16));
vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d1u8), 0);
dst_ptr += dst_stride;
vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d1u8), 1);
dst_ptr += dst_stride;
}
return;
}

File diff suppressed because it is too large Load Diff

View File

@ -1,550 +0,0 @@
/*
* Copyright (c) 2014 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <arm_neon.h>
#include "./vpx_config.h"
#include "vpx_ports/arm.h"
static INLINE void vp8_loop_filter_neon(
uint8x16_t qblimit, // flimit
uint8x16_t qlimit, // limit
uint8x16_t qthresh, // thresh
uint8x16_t q3, // p3
uint8x16_t q4, // p2
uint8x16_t q5, // p1
uint8x16_t q6, // p0
uint8x16_t q7, // q0
uint8x16_t q8, // q1
uint8x16_t q9, // q2
uint8x16_t q10, // q3
uint8x16_t *q5r, // p1
uint8x16_t *q6r, // p0
uint8x16_t *q7r, // q0
uint8x16_t *q8r) { // q1
uint8x16_t q0u8, q1u8, q2u8, q11u8, q12u8, q13u8, q14u8, q15u8;
int16x8_t q2s16, q11s16;
uint16x8_t q4u16;
int8x16_t q1s8, q2s8, q10s8, q11s8, q12s8, q13s8;
int8x8_t d2s8, d3s8;
q11u8 = vabdq_u8(q3, q4);
q12u8 = vabdq_u8(q4, q5);
q13u8 = vabdq_u8(q5, q6);
q14u8 = vabdq_u8(q8, q7);
q3 = vabdq_u8(q9, q8);
q4 = vabdq_u8(q10, q9);
q11u8 = vmaxq_u8(q11u8, q12u8);
q12u8 = vmaxq_u8(q13u8, q14u8);
q3 = vmaxq_u8(q3, q4);
q15u8 = vmaxq_u8(q11u8, q12u8);
q9 = vabdq_u8(q6, q7);
// vp8_hevmask
q13u8 = vcgtq_u8(q13u8, qthresh);
q14u8 = vcgtq_u8(q14u8, qthresh);
q15u8 = vmaxq_u8(q15u8, q3);
q2u8 = vabdq_u8(q5, q8);
q9 = vqaddq_u8(q9, q9);
q15u8 = vcgeq_u8(qlimit, q15u8);
// vp8_filter() function
// convert to signed
q10 = vdupq_n_u8(0x80);
q8 = veorq_u8(q8, q10);
q7 = veorq_u8(q7, q10);
q6 = veorq_u8(q6, q10);
q5 = veorq_u8(q5, q10);
q2u8 = vshrq_n_u8(q2u8, 1);
q9 = vqaddq_u8(q9, q2u8);
q10 = vdupq_n_u8(3);
q2s16 = vsubl_s8(vget_low_s8(vreinterpretq_s8_u8(q7)),
vget_low_s8(vreinterpretq_s8_u8(q6)));
q11s16 = vsubl_s8(vget_high_s8(vreinterpretq_s8_u8(q7)),
vget_high_s8(vreinterpretq_s8_u8(q6)));
q9 = vcgeq_u8(qblimit, q9);
q1s8 = vqsubq_s8(vreinterpretq_s8_u8(q5),
vreinterpretq_s8_u8(q8));
q14u8 = vorrq_u8(q13u8, q14u8);
q4u16 = vmovl_u8(vget_low_u8(q10));
q2s16 = vmulq_s16(q2s16, vreinterpretq_s16_u16(q4u16));
q11s16 = vmulq_s16(q11s16, vreinterpretq_s16_u16(q4u16));
q1u8 = vandq_u8(vreinterpretq_u8_s8(q1s8), q14u8);
q15u8 = vandq_u8(q15u8, q9);
q1s8 = vreinterpretq_s8_u8(q1u8);
q2s16 = vaddw_s8(q2s16, vget_low_s8(q1s8));
q11s16 = vaddw_s8(q11s16, vget_high_s8(q1s8));
q9 = vdupq_n_u8(4);
// vp8_filter = clamp(vp8_filter + 3 * ( qs0 - ps0))
d2s8 = vqmovn_s16(q2s16);
d3s8 = vqmovn_s16(q11s16);
q1s8 = vcombine_s8(d2s8, d3s8);
q1u8 = vandq_u8(vreinterpretq_u8_s8(q1s8), q15u8);
q1s8 = vreinterpretq_s8_u8(q1u8);
q2s8 = vqaddq_s8(q1s8, vreinterpretq_s8_u8(q10));
q1s8 = vqaddq_s8(q1s8, vreinterpretq_s8_u8(q9));
q2s8 = vshrq_n_s8(q2s8, 3);
q1s8 = vshrq_n_s8(q1s8, 3);
q11s8 = vqaddq_s8(vreinterpretq_s8_u8(q6), q2s8);
q10s8 = vqsubq_s8(vreinterpretq_s8_u8(q7), q1s8);
q1s8 = vrshrq_n_s8(q1s8, 1);
q1s8 = vbicq_s8(q1s8, vreinterpretq_s8_u8(q14u8));
q13s8 = vqaddq_s8(vreinterpretq_s8_u8(q5), q1s8);
q12s8 = vqsubq_s8(vreinterpretq_s8_u8(q8), q1s8);
q0u8 = vdupq_n_u8(0x80);
*q8r = veorq_u8(vreinterpretq_u8_s8(q12s8), q0u8);
*q7r = veorq_u8(vreinterpretq_u8_s8(q10s8), q0u8);
*q6r = veorq_u8(vreinterpretq_u8_s8(q11s8), q0u8);
*q5r = veorq_u8(vreinterpretq_u8_s8(q13s8), q0u8);
return;
}
void vp8_loop_filter_horizontal_edge_y_neon(
unsigned char *src,
int pitch,
unsigned char blimit,
unsigned char limit,
unsigned char thresh) {
uint8x16_t qblimit, qlimit, qthresh, q3, q4;
uint8x16_t q5, q6, q7, q8, q9, q10;
qblimit = vdupq_n_u8(blimit);
qlimit = vdupq_n_u8(limit);
qthresh = vdupq_n_u8(thresh);
src -= (pitch << 2);
q3 = vld1q_u8(src);
src += pitch;
q4 = vld1q_u8(src);
src += pitch;
q5 = vld1q_u8(src);
src += pitch;
q6 = vld1q_u8(src);
src += pitch;
q7 = vld1q_u8(src);
src += pitch;
q8 = vld1q_u8(src);
src += pitch;
q9 = vld1q_u8(src);
src += pitch;
q10 = vld1q_u8(src);
vp8_loop_filter_neon(qblimit, qlimit, qthresh, q3, q4,
q5, q6, q7, q8, q9, q10,
&q5, &q6, &q7, &q8);
src -= (pitch * 5);
vst1q_u8(src, q5);
src += pitch;
vst1q_u8(src, q6);
src += pitch;
vst1q_u8(src, q7);
src += pitch;
vst1q_u8(src, q8);
return;
}
void vp8_loop_filter_horizontal_edge_uv_neon(
unsigned char *u,
int pitch,
unsigned char blimit,
unsigned char limit,
unsigned char thresh,
unsigned char *v) {
uint8x16_t qblimit, qlimit, qthresh, q3, q4;
uint8x16_t q5, q6, q7, q8, q9, q10;
uint8x8_t d6, d7, d8, d9, d10, d11, d12, d13, d14;
uint8x8_t d15, d16, d17, d18, d19, d20, d21;
qblimit = vdupq_n_u8(blimit);
qlimit = vdupq_n_u8(limit);
qthresh = vdupq_n_u8(thresh);
u -= (pitch << 2);
v -= (pitch << 2);
d6 = vld1_u8(u);
u += pitch;
d7 = vld1_u8(v);
v += pitch;
d8 = vld1_u8(u);
u += pitch;
d9 = vld1_u8(v);
v += pitch;
d10 = vld1_u8(u);
u += pitch;
d11 = vld1_u8(v);
v += pitch;
d12 = vld1_u8(u);
u += pitch;
d13 = vld1_u8(v);
v += pitch;
d14 = vld1_u8(u);
u += pitch;
d15 = vld1_u8(v);
v += pitch;
d16 = vld1_u8(u);
u += pitch;
d17 = vld1_u8(v);
v += pitch;
d18 = vld1_u8(u);
u += pitch;
d19 = vld1_u8(v);
v += pitch;
d20 = vld1_u8(u);
d21 = vld1_u8(v);
q3 = vcombine_u8(d6, d7);
q4 = vcombine_u8(d8, d9);
q5 = vcombine_u8(d10, d11);
q6 = vcombine_u8(d12, d13);
q7 = vcombine_u8(d14, d15);
q8 = vcombine_u8(d16, d17);
q9 = vcombine_u8(d18, d19);
q10 = vcombine_u8(d20, d21);
vp8_loop_filter_neon(qblimit, qlimit, qthresh, q3, q4,
q5, q6, q7, q8, q9, q10,
&q5, &q6, &q7, &q8);
u -= (pitch * 5);
vst1_u8(u, vget_low_u8(q5));
u += pitch;
vst1_u8(u, vget_low_u8(q6));
u += pitch;
vst1_u8(u, vget_low_u8(q7));
u += pitch;
vst1_u8(u, vget_low_u8(q8));
v -= (pitch * 5);
vst1_u8(v, vget_high_u8(q5));
v += pitch;
vst1_u8(v, vget_high_u8(q6));
v += pitch;
vst1_u8(v, vget_high_u8(q7));
v += pitch;
vst1_u8(v, vget_high_u8(q8));
return;
}
static INLINE void write_4x8(unsigned char *dst, int pitch,
const uint8x8x4_t result) {
#ifdef VPX_INCOMPATIBLE_GCC
/*
* uint8x8x4_t result
00 01 02 03 | 04 05 06 07
10 11 12 13 | 14 15 16 17
20 21 22 23 | 24 25 26 27
30 31 32 33 | 34 35 36 37
---
* after vtrn_u16
00 01 20 21 | 04 05 24 25
02 03 22 23 | 06 07 26 27
10 11 30 31 | 14 15 34 35
12 13 32 33 | 16 17 36 37
---
* after vtrn_u8
00 10 20 30 | 04 14 24 34
01 11 21 31 | 05 15 25 35
02 12 22 32 | 06 16 26 36
03 13 23 33 | 07 17 27 37
*/
const uint16x4x2_t r02_u16 = vtrn_u16(vreinterpret_u16_u8(result.val[0]),
vreinterpret_u16_u8(result.val[2]));
const uint16x4x2_t r13_u16 = vtrn_u16(vreinterpret_u16_u8(result.val[1]),
vreinterpret_u16_u8(result.val[3]));
const uint8x8x2_t r01_u8 = vtrn_u8(vreinterpret_u8_u16(r02_u16.val[0]),
vreinterpret_u8_u16(r13_u16.val[0]));
const uint8x8x2_t r23_u8 = vtrn_u8(vreinterpret_u8_u16(r02_u16.val[1]),
vreinterpret_u8_u16(r13_u16.val[1]));
const uint32x2_t x_0_4 = vreinterpret_u32_u8(r01_u8.val[0]);
const uint32x2_t x_1_5 = vreinterpret_u32_u8(r01_u8.val[1]);
const uint32x2_t x_2_6 = vreinterpret_u32_u8(r23_u8.val[0]);
const uint32x2_t x_3_7 = vreinterpret_u32_u8(r23_u8.val[1]);
vst1_lane_u32((uint32_t *)dst, x_0_4, 0);
dst += pitch;
vst1_lane_u32((uint32_t *)dst, x_1_5, 0);
dst += pitch;
vst1_lane_u32((uint32_t *)dst, x_2_6, 0);
dst += pitch;
vst1_lane_u32((uint32_t *)dst, x_3_7, 0);
dst += pitch;
vst1_lane_u32((uint32_t *)dst, x_0_4, 1);
dst += pitch;
vst1_lane_u32((uint32_t *)dst, x_1_5, 1);
dst += pitch;
vst1_lane_u32((uint32_t *)dst, x_2_6, 1);
dst += pitch;
vst1_lane_u32((uint32_t *)dst, x_3_7, 1);
#else
vst4_lane_u8(dst, result, 0);
dst += pitch;
vst4_lane_u8(dst, result, 1);
dst += pitch;
vst4_lane_u8(dst, result, 2);
dst += pitch;
vst4_lane_u8(dst, result, 3);
dst += pitch;
vst4_lane_u8(dst, result, 4);
dst += pitch;
vst4_lane_u8(dst, result, 5);
dst += pitch;
vst4_lane_u8(dst, result, 6);
dst += pitch;
vst4_lane_u8(dst, result, 7);
#endif // VPX_INCOMPATIBLE_GCC
}
void vp8_loop_filter_vertical_edge_y_neon(
unsigned char *src,
int pitch,
unsigned char blimit,
unsigned char limit,
unsigned char thresh) {
unsigned char *s, *d;
uint8x16_t qblimit, qlimit, qthresh, q3, q4;
uint8x16_t q5, q6, q7, q8, q9, q10;
uint8x8_t d6, d7, d8, d9, d10, d11, d12, d13, d14;
uint8x8_t d15, d16, d17, d18, d19, d20, d21;
uint32x4x2_t q2tmp0, q2tmp1, q2tmp2, q2tmp3;
uint16x8x2_t q2tmp4, q2tmp5, q2tmp6, q2tmp7;
uint8x16x2_t q2tmp8, q2tmp9, q2tmp10, q2tmp11;
uint8x8x4_t q4ResultH, q4ResultL;
qblimit = vdupq_n_u8(blimit);
qlimit = vdupq_n_u8(limit);
qthresh = vdupq_n_u8(thresh);
s = src - 4;
d6 = vld1_u8(s);
s += pitch;
d8 = vld1_u8(s);
s += pitch;
d10 = vld1_u8(s);
s += pitch;
d12 = vld1_u8(s);
s += pitch;
d14 = vld1_u8(s);
s += pitch;
d16 = vld1_u8(s);
s += pitch;
d18 = vld1_u8(s);
s += pitch;
d20 = vld1_u8(s);
s += pitch;
d7 = vld1_u8(s);
s += pitch;
d9 = vld1_u8(s);
s += pitch;
d11 = vld1_u8(s);
s += pitch;
d13 = vld1_u8(s);
s += pitch;
d15 = vld1_u8(s);
s += pitch;
d17 = vld1_u8(s);
s += pitch;
d19 = vld1_u8(s);
s += pitch;
d21 = vld1_u8(s);
q3 = vcombine_u8(d6, d7);
q4 = vcombine_u8(d8, d9);
q5 = vcombine_u8(d10, d11);
q6 = vcombine_u8(d12, d13);
q7 = vcombine_u8(d14, d15);
q8 = vcombine_u8(d16, d17);
q9 = vcombine_u8(d18, d19);
q10 = vcombine_u8(d20, d21);
q2tmp0 = vtrnq_u32(vreinterpretq_u32_u8(q3), vreinterpretq_u32_u8(q7));
q2tmp1 = vtrnq_u32(vreinterpretq_u32_u8(q4), vreinterpretq_u32_u8(q8));
q2tmp2 = vtrnq_u32(vreinterpretq_u32_u8(q5), vreinterpretq_u32_u8(q9));
q2tmp3 = vtrnq_u32(vreinterpretq_u32_u8(q6), vreinterpretq_u32_u8(q10));
q2tmp4 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[0]),
vreinterpretq_u16_u32(q2tmp2.val[0]));
q2tmp5 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[0]),
vreinterpretq_u16_u32(q2tmp3.val[0]));
q2tmp6 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[1]),
vreinterpretq_u16_u32(q2tmp2.val[1]));
q2tmp7 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[1]),
vreinterpretq_u16_u32(q2tmp3.val[1]));
q2tmp8 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[0]),
vreinterpretq_u8_u16(q2tmp5.val[0]));
q2tmp9 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[1]),
vreinterpretq_u8_u16(q2tmp5.val[1]));
q2tmp10 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[0]),
vreinterpretq_u8_u16(q2tmp7.val[0]));
q2tmp11 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[1]),
vreinterpretq_u8_u16(q2tmp7.val[1]));
q3 = q2tmp8.val[0];
q4 = q2tmp8.val[1];
q5 = q2tmp9.val[0];
q6 = q2tmp9.val[1];
q7 = q2tmp10.val[0];
q8 = q2tmp10.val[1];
q9 = q2tmp11.val[0];
q10 = q2tmp11.val[1];
vp8_loop_filter_neon(qblimit, qlimit, qthresh, q3, q4,
q5, q6, q7, q8, q9, q10,
&q5, &q6, &q7, &q8);
q4ResultL.val[0] = vget_low_u8(q5); // d10
q4ResultL.val[1] = vget_low_u8(q6); // d12
q4ResultL.val[2] = vget_low_u8(q7); // d14
q4ResultL.val[3] = vget_low_u8(q8); // d16
q4ResultH.val[0] = vget_high_u8(q5); // d11
q4ResultH.val[1] = vget_high_u8(q6); // d13
q4ResultH.val[2] = vget_high_u8(q7); // d15
q4ResultH.val[3] = vget_high_u8(q8); // d17
d = src - 2;
write_4x8(d, pitch, q4ResultL);
d += pitch * 8;
write_4x8(d, pitch, q4ResultH);
}
void vp8_loop_filter_vertical_edge_uv_neon(
unsigned char *u,
int pitch,
unsigned char blimit,
unsigned char limit,
unsigned char thresh,
unsigned char *v) {
unsigned char *us, *ud;
unsigned char *vs, *vd;
uint8x16_t qblimit, qlimit, qthresh, q3, q4;
uint8x16_t q5, q6, q7, q8, q9, q10;
uint8x8_t d6, d7, d8, d9, d10, d11, d12, d13, d14;
uint8x8_t d15, d16, d17, d18, d19, d20, d21;
uint32x4x2_t q2tmp0, q2tmp1, q2tmp2, q2tmp3;
uint16x8x2_t q2tmp4, q2tmp5, q2tmp6, q2tmp7;
uint8x16x2_t q2tmp8, q2tmp9, q2tmp10, q2tmp11;
uint8x8x4_t q4ResultH, q4ResultL;
qblimit = vdupq_n_u8(blimit);
qlimit = vdupq_n_u8(limit);
qthresh = vdupq_n_u8(thresh);
us = u - 4;
d6 = vld1_u8(us);
us += pitch;
d8 = vld1_u8(us);
us += pitch;
d10 = vld1_u8(us);
us += pitch;
d12 = vld1_u8(us);
us += pitch;
d14 = vld1_u8(us);
us += pitch;
d16 = vld1_u8(us);
us += pitch;
d18 = vld1_u8(us);
us += pitch;
d20 = vld1_u8(us);
vs = v - 4;
d7 = vld1_u8(vs);
vs += pitch;
d9 = vld1_u8(vs);
vs += pitch;
d11 = vld1_u8(vs);
vs += pitch;
d13 = vld1_u8(vs);
vs += pitch;
d15 = vld1_u8(vs);
vs += pitch;
d17 = vld1_u8(vs);
vs += pitch;
d19 = vld1_u8(vs);
vs += pitch;
d21 = vld1_u8(vs);
q3 = vcombine_u8(d6, d7);
q4 = vcombine_u8(d8, d9);
q5 = vcombine_u8(d10, d11);
q6 = vcombine_u8(d12, d13);
q7 = vcombine_u8(d14, d15);
q8 = vcombine_u8(d16, d17);
q9 = vcombine_u8(d18, d19);
q10 = vcombine_u8(d20, d21);
q2tmp0 = vtrnq_u32(vreinterpretq_u32_u8(q3), vreinterpretq_u32_u8(q7));
q2tmp1 = vtrnq_u32(vreinterpretq_u32_u8(q4), vreinterpretq_u32_u8(q8));
q2tmp2 = vtrnq_u32(vreinterpretq_u32_u8(q5), vreinterpretq_u32_u8(q9));
q2tmp3 = vtrnq_u32(vreinterpretq_u32_u8(q6), vreinterpretq_u32_u8(q10));
q2tmp4 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[0]),
vreinterpretq_u16_u32(q2tmp2.val[0]));
q2tmp5 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[0]),
vreinterpretq_u16_u32(q2tmp3.val[0]));
q2tmp6 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[1]),
vreinterpretq_u16_u32(q2tmp2.val[1]));
q2tmp7 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[1]),
vreinterpretq_u16_u32(q2tmp3.val[1]));
q2tmp8 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[0]),
vreinterpretq_u8_u16(q2tmp5.val[0]));
q2tmp9 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[1]),
vreinterpretq_u8_u16(q2tmp5.val[1]));
q2tmp10 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[0]),
vreinterpretq_u8_u16(q2tmp7.val[0]));
q2tmp11 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[1]),
vreinterpretq_u8_u16(q2tmp7.val[1]));
q3 = q2tmp8.val[0];
q4 = q2tmp8.val[1];
q5 = q2tmp9.val[0];
q6 = q2tmp9.val[1];
q7 = q2tmp10.val[0];
q8 = q2tmp10.val[1];
q9 = q2tmp11.val[0];
q10 = q2tmp11.val[1];
vp8_loop_filter_neon(qblimit, qlimit, qthresh, q3, q4,
q5, q6, q7, q8, q9, q10,
&q5, &q6, &q7, &q8);
q4ResultL.val[0] = vget_low_u8(q5); // d10
q4ResultL.val[1] = vget_low_u8(q6); // d12
q4ResultL.val[2] = vget_low_u8(q7); // d14
q4ResultL.val[3] = vget_low_u8(q8); // d16
ud = u - 2;
write_4x8(ud, pitch, q4ResultL);
q4ResultH.val[0] = vget_high_u8(q5); // d11
q4ResultH.val[1] = vget_high_u8(q6); // d13
q4ResultH.val[2] = vget_high_u8(q7); // d15
q4ResultH.val[3] = vget_high_u8(q8); // d17
vd = v - 2;
write_4x8(vd, pitch, q4ResultH);
}

View File

@ -1,22 +0,0 @@
/*
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "blockd.h"
#include "vpx_mem/vpx_mem.h"
const unsigned char vp8_block2left[25] =
{
0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8
};
const unsigned char vp8_block2above[25] =
{
0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 4, 5, 6, 7, 6, 7, 8
};

View File

@ -1,312 +0,0 @@
/*
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef VP8_COMMON_BLOCKD_H_
#define VP8_COMMON_BLOCKD_H_
void vpx_log(const char *format, ...);
#include "vpx_config.h"
#include "vpx_scale/yv12config.h"
#include "mv.h"
#include "treecoder.h"
#include "vpx_ports/mem.h"
#ifdef __cplusplus
extern "C" {
#endif
/*#define DCPRED 1*/
#define DCPREDSIMTHRESH 0
#define DCPREDCNTTHRESH 3
#define MB_FEATURE_TREE_PROBS 3
#define MAX_MB_SEGMENTS 4
#define MAX_REF_LF_DELTAS 4
#define MAX_MODE_LF_DELTAS 4
/* Segment Feature Masks */
#define SEGMENT_DELTADATA 0
#define SEGMENT_ABSDATA 1
typedef struct
{
int r, c;
} POS;
#define PLANE_TYPE_Y_NO_DC 0
#define PLANE_TYPE_Y2 1
#define PLANE_TYPE_UV 2
#define PLANE_TYPE_Y_WITH_DC 3
typedef char ENTROPY_CONTEXT;
typedef struct
{
ENTROPY_CONTEXT y1[4];
ENTROPY_CONTEXT u[2];
ENTROPY_CONTEXT v[2];
ENTROPY_CONTEXT y2;
} ENTROPY_CONTEXT_PLANES;
extern const unsigned char vp8_block2left[25];
extern const unsigned char vp8_block2above[25];
#define VP8_COMBINEENTROPYCONTEXTS( Dest, A, B) \
Dest = (A)+(B);
typedef enum
{
KEY_FRAME = 0,
INTER_FRAME = 1
} FRAME_TYPE;
typedef enum
{
DC_PRED, /* average of above and left pixels */
V_PRED, /* vertical prediction */
H_PRED, /* horizontal prediction */
TM_PRED, /* Truemotion prediction */
B_PRED, /* block based prediction, each block has its own prediction mode */
NEARESTMV,
NEARMV,
ZEROMV,
NEWMV,
SPLITMV,
MB_MODE_COUNT
} MB_PREDICTION_MODE;
/* Macroblock level features */
typedef enum
{
MB_LVL_ALT_Q = 0, /* Use alternate Quantizer .... */
MB_LVL_ALT_LF = 1, /* Use alternate loop filter value... */
MB_LVL_MAX = 2 /* Number of MB level features supported */
} MB_LVL_FEATURES;
/* Segment Feature Masks */
#define SEGMENT_ALTQ 0x01
#define SEGMENT_ALT_LF 0x02
#define VP8_YMODES (B_PRED + 1)
#define VP8_UV_MODES (TM_PRED + 1)
#define VP8_MVREFS (1 + SPLITMV - NEARESTMV)
typedef enum
{
B_DC_PRED, /* average of above and left pixels */
B_TM_PRED,
B_VE_PRED, /* vertical prediction */
B_HE_PRED, /* horizontal prediction */
B_LD_PRED,
B_RD_PRED,
B_VR_PRED,
B_VL_PRED,
B_HD_PRED,
B_HU_PRED,
LEFT4X4,
ABOVE4X4,
ZERO4X4,
NEW4X4,
B_MODE_COUNT
} B_PREDICTION_MODE;
#define VP8_BINTRAMODES (B_HU_PRED + 1) /* 10 */
#define VP8_SUBMVREFS (1 + NEW4X4 - LEFT4X4)
/* For keyframes, intra block modes are predicted by the (already decoded)
modes for the Y blocks to the left and above us; for interframes, there
is a single probability table. */
union b_mode_info
{
B_PREDICTION_MODE as_mode;
int_mv mv;
};
typedef enum
{
INTRA_FRAME = 0,
LAST_FRAME = 1,
GOLDEN_FRAME = 2,
ALTREF_FRAME = 3,
MAX_REF_FRAMES = 4
} MV_REFERENCE_FRAME;
typedef struct
{
uint8_t mode, uv_mode;
uint8_t ref_frame;
uint8_t is_4x4;
int_mv mv;
uint8_t partitioning;
uint8_t mb_skip_coeff; /* does this mb has coefficients at all, 1=no coefficients, 0=need decode tokens */
uint8_t need_to_clamp_mvs;
uint8_t segment_id; /* Which set of segmentation parameters should be used for this MB */
} MB_MODE_INFO;
typedef struct modeinfo
{
MB_MODE_INFO mbmi;
union b_mode_info bmi[16];
} MODE_INFO;
#if CONFIG_MULTI_RES_ENCODING
/* The mb-level information needed to be stored for higher-resolution encoder */
typedef struct
{
MB_PREDICTION_MODE mode;
MV_REFERENCE_FRAME ref_frame;
int_mv mv;
int dissim; /* dissimilarity level of the macroblock */
} LOWER_RES_MB_INFO;
/* The frame-level information needed to be stored for higher-resolution
* encoder */
typedef struct
{
FRAME_TYPE frame_type;
int is_frame_dropped;
// The frame rate for the lowest resolution.
double low_res_framerate;
/* The frame number of each reference frames */
unsigned int low_res_ref_frames[MAX_REF_FRAMES];
// The video frame counter value for the key frame, for lowest resolution.
unsigned int key_frame_counter_value;
LOWER_RES_MB_INFO *mb_info;
} LOWER_RES_FRAME_INFO;
#endif
typedef struct blockd
{
short *qcoeff;
short *dqcoeff;
unsigned char *predictor;
short *dequant;
int offset;
char *eob;
union b_mode_info bmi;
} BLOCKD;
typedef void (*vp8_subpix_fn_t)(unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch);
typedef struct macroblockd
{
DECLARE_ALIGNED(16, unsigned char, predictor[384]);
DECLARE_ALIGNED(16, short, qcoeff[400]);
DECLARE_ALIGNED(16, short, dqcoeff[400]);
DECLARE_ALIGNED(16, char, eobs[25]);
DECLARE_ALIGNED(16, short, dequant_y1[16]);
DECLARE_ALIGNED(16, short, dequant_y1_dc[16]);
DECLARE_ALIGNED(16, short, dequant_y2[16]);
DECLARE_ALIGNED(16, short, dequant_uv[16]);
/* 16 Y blocks, 4 U, 4 V, 1 DC 2nd order block, each with 16 entries. */
BLOCKD block[25];
int fullpixel_mask;
YV12_BUFFER_CONFIG pre; /* Filtered copy of previous frame reconstruction */
YV12_BUFFER_CONFIG dst;
MODE_INFO *mode_info_context;
int mode_info_stride;
FRAME_TYPE frame_type;
int up_available;
int left_available;
unsigned char *recon_above[3];
unsigned char *recon_left[3];
int recon_left_stride[2];
/* Y,U,V,Y2 */
ENTROPY_CONTEXT_PLANES *above_context;
ENTROPY_CONTEXT_PLANES *left_context;
/* 0 indicates segmentation at MB level is not enabled. Otherwise the individual bits indicate which features are active. */
unsigned char segmentation_enabled;
/* 0 (do not update) 1 (update) the macroblock segmentation map. */
unsigned char update_mb_segmentation_map;
/* 0 (do not update) 1 (update) the macroblock segmentation feature data. */
unsigned char update_mb_segmentation_data;
/* 0 (do not update) 1 (update) the macroblock segmentation feature data. */
unsigned char mb_segement_abs_delta;
/* Per frame flags that define which MB level features (such as quantizer or loop filter level) */
/* are enabled and when enabled the proabilities used to decode the per MB flags in MB_MODE_INFO */
vp8_prob mb_segment_tree_probs[MB_FEATURE_TREE_PROBS]; /* Probability Tree used to code Segment number */
signed char segment_feature_data[MB_LVL_MAX][MAX_MB_SEGMENTS]; /* Segment parameters */
/* mode_based Loop filter adjustment */
unsigned char mode_ref_lf_delta_enabled;
unsigned char mode_ref_lf_delta_update;
/* Delta values have the range +/- MAX_LOOP_FILTER */
signed char last_ref_lf_deltas[MAX_REF_LF_DELTAS]; /* 0 = Intra, Last, GF, ARF */
signed char ref_lf_deltas[MAX_REF_LF_DELTAS]; /* 0 = Intra, Last, GF, ARF */
signed char last_mode_lf_deltas[MAX_MODE_LF_DELTAS]; /* 0 = BPRED, ZERO_MV, MV, SPLIT */
signed char mode_lf_deltas[MAX_MODE_LF_DELTAS]; /* 0 = BPRED, ZERO_MV, MV, SPLIT */
/* Distance of MB away from frame edges */
int mb_to_left_edge;
int mb_to_right_edge;
int mb_to_top_edge;
int mb_to_bottom_edge;
vp8_subpix_fn_t subpixel_predict;
vp8_subpix_fn_t subpixel_predict8x4;
vp8_subpix_fn_t subpixel_predict8x8;
vp8_subpix_fn_t subpixel_predict16x16;
void *current_bc;
int corrupted;
#if ARCH_X86 || ARCH_X86_64
/* This is an intermediate buffer currently used in sub-pixel motion search
* to keep a copy of the reference area. This buffer can be used for other
* purpose.
*/
DECLARE_ALIGNED(32, unsigned char, y_buf[22*32]);
#endif
} MACROBLOCKD;
extern void vp8_build_block_doffsets(MACROBLOCKD *x);
extern void vp8_setup_block_dptrs(MACROBLOCKD *x);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // VP8_COMMON_BLOCKD_H_

View File

@ -1,197 +0,0 @@
/*
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef VP8_COMMON_COEFUPDATEPROBS_H_
#define VP8_COMMON_COEFUPDATEPROBS_H_
#ifdef __cplusplus
extern "C" {
#endif
/* Update probabilities for the nodes in the token entropy tree.
Generated file included by entropy.c */
const vp8_prob vp8_coef_update_probs [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES] =
{
{
{
{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
},
{
{176, 246, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
{223, 241, 252, 255, 255, 255, 255, 255, 255, 255, 255, },
{249, 253, 253, 255, 255, 255, 255, 255, 255, 255, 255, },
},
{
{255, 244, 252, 255, 255, 255, 255, 255, 255, 255, 255, },
{234, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255, },
{253, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
},
{
{255, 246, 254, 255, 255, 255, 255, 255, 255, 255, 255, },
{239, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255, },
{254, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255, },
},
{
{255, 248, 254, 255, 255, 255, 255, 255, 255, 255, 255, },
{251, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255, },
{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
},
{
{255, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255, },
{251, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255, },
{254, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255, },
},
{
{255, 254, 253, 255, 254, 255, 255, 255, 255, 255, 255, },
{250, 255, 254, 255, 254, 255, 255, 255, 255, 255, 255, },
{254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
},
{
{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
},
},
{
{
{217, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
{225, 252, 241, 253, 255, 255, 254, 255, 255, 255, 255, },
{234, 250, 241, 250, 253, 255, 253, 254, 255, 255, 255, },
},
{
{255, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
{223, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255, },
{238, 253, 254, 254, 255, 255, 255, 255, 255, 255, 255, },
},
{
{255, 248, 254, 255, 255, 255, 255, 255, 255, 255, 255, },
{249, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
},
{
{255, 253, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
{247, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
},
{
{255, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255, },
{252, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
},
{
{255, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255, },
{253, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
},
{
{255, 254, 253, 255, 255, 255, 255, 255, 255, 255, 255, },
{250, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
{254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
},
{
{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
},
},
{
{
{186, 251, 250, 255, 255, 255, 255, 255, 255, 255, 255, },
{234, 251, 244, 254, 255, 255, 255, 255, 255, 255, 255, },
{251, 251, 243, 253, 254, 255, 254, 255, 255, 255, 255, },
},
{
{255, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255, },
{236, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255, },
{251, 253, 253, 254, 254, 255, 255, 255, 255, 255, 255, },
},
{
{255, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255, },
{254, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255, },
{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
},
{
{255, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
{254, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
{254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
},
{
{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
{254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
},
{
{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
},
{
{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
},
{
{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
},
},
{
{
{248, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
{250, 254, 252, 254, 255, 255, 255, 255, 255, 255, 255, },
{248, 254, 249, 253, 255, 255, 255, 255, 255, 255, 255, },
},
{
{255, 253, 253, 255, 255, 255, 255, 255, 255, 255, 255, },
{246, 253, 253, 255, 255, 255, 255, 255, 255, 255, 255, },
{252, 254, 251, 254, 254, 255, 255, 255, 255, 255, 255, },
},
{
{255, 254, 252, 255, 255, 255, 255, 255, 255, 255, 255, },
{248, 254, 253, 255, 255, 255, 255, 255, 255, 255, 255, },
{253, 255, 254, 254, 255, 255, 255, 255, 255, 255, 255, },
},
{
{255, 251, 254, 255, 255, 255, 255, 255, 255, 255, 255, },
{245, 251, 254, 255, 255, 255, 255, 255, 255, 255, 255, },
{253, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255, },
},
{
{255, 251, 253, 255, 255, 255, 255, 255, 255, 255, 255, },
{252, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255, },
{255, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
},
{
{255, 252, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
{249, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255, },
{255, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255, },
},
{
{255, 255, 253, 255, 255, 255, 255, 255, 255, 255, 255, },
{250, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
},
{
{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
{254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, },
},
},
};
#ifdef __cplusplus
} // extern "C"
#endif
#endif // VP8_COMMON_COEFUPDATEPROBS_H_

View File

@ -1,48 +0,0 @@
/*
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef VP8_COMMON_COMMON_H_
#define VP8_COMMON_COMMON_H_
#include <assert.h>
/* Interface header for common constant data structures and lookup tables */
#include "vpx_mem/vpx_mem.h"
#ifdef __cplusplus
extern "C" {
#endif
/* Only need this for fixed-size arrays, for structs just assign. */
#define vp8_copy( Dest, Src) { \
assert( sizeof( Dest) == sizeof( Src)); \
memcpy( Dest, Src, sizeof( Src)); \
}
/* Use this for variably-sized arrays. */
#define vp8_copy_array( Dest, Src, N) { \
assert( sizeof( *Dest) == sizeof( *Src)); \
memcpy( Dest, Src, N * sizeof( *Src)); \
}
#define vp8_zero( Dest) memset( &Dest, 0, sizeof( Dest));
#define vp8_zero_array( Dest, N) memset( Dest, 0, N * sizeof( *Dest));
#ifdef __cplusplus
} // extern "C"
#endif
#endif // VP8_COMMON_COMMON_H_

View File

@ -1,32 +0,0 @@
/*
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <string.h>
#include "./vp8_rtcd.h"
#include "vpx/vpx_integer.h"
/* Copy 2 macroblocks to a buffer */
void vp8_copy32xn_c(const unsigned char *src_ptr, int src_stride,
unsigned char *dst_ptr, int dst_stride,
int height)
{
int r;
for (r = 0; r < height; r++)
{
memcpy(dst_ptr, src_ptr, 32);
src_ptr += src_stride;
dst_ptr += dst_stride;
}
}

View File

@ -1,155 +0,0 @@
/*
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <stdio.h>
#include "blockd.h"
void vp8_print_modes_and_motion_vectors(MODE_INFO *mi, int rows, int cols, int frame)
{
int mb_row;
int mb_col;
int mb_index = 0;
FILE *mvs = fopen("mvs.stt", "a");
/* print out the macroblock Y modes */
mb_index = 0;
fprintf(mvs, "Mb Modes for Frame %d\n", frame);
for (mb_row = 0; mb_row < rows; mb_row++)
{
for (mb_col = 0; mb_col < cols; mb_col++)
{
fprintf(mvs, "%2d ", mi[mb_index].mbmi.mode);
mb_index++;
}
fprintf(mvs, "\n");
mb_index++;
}
fprintf(mvs, "\n");
mb_index = 0;
fprintf(mvs, "Mb mv ref for Frame %d\n", frame);
for (mb_row = 0; mb_row < rows; mb_row++)
{
for (mb_col = 0; mb_col < cols; mb_col++)
{
fprintf(mvs, "%2d ", mi[mb_index].mbmi.ref_frame);
mb_index++;
}
fprintf(mvs, "\n");
mb_index++;
}
fprintf(mvs, "\n");
/* print out the macroblock UV modes */
mb_index = 0;
fprintf(mvs, "UV Modes for Frame %d\n", frame);
for (mb_row = 0; mb_row < rows; mb_row++)
{
for (mb_col = 0; mb_col < cols; mb_col++)
{
fprintf(mvs, "%2d ", mi[mb_index].mbmi.uv_mode);
mb_index++;
}
mb_index++;
fprintf(mvs, "\n");
}
fprintf(mvs, "\n");
/* print out the block modes */
fprintf(mvs, "Mbs for Frame %d\n", frame);
{
int b_row;
for (b_row = 0; b_row < 4 * rows; b_row++)
{
int b_col;
int bindex;
for (b_col = 0; b_col < 4 * cols; b_col++)
{
mb_index = (b_row >> 2) * (cols + 1) + (b_col >> 2);
bindex = (b_row & 3) * 4 + (b_col & 3);
if (mi[mb_index].mbmi.mode == B_PRED)
fprintf(mvs, "%2d ", mi[mb_index].bmi[bindex].as_mode);
else
fprintf(mvs, "xx ");
}
fprintf(mvs, "\n");
}
}
fprintf(mvs, "\n");
/* print out the macroblock mvs */
mb_index = 0;
fprintf(mvs, "MVs for Frame %d\n", frame);
for (mb_row = 0; mb_row < rows; mb_row++)
{
for (mb_col = 0; mb_col < cols; mb_col++)
{
fprintf(mvs, "%5d:%-5d", mi[mb_index].mbmi.mv.as_mv.row / 2, mi[mb_index].mbmi.mv.as_mv.col / 2);
mb_index++;
}
mb_index++;
fprintf(mvs, "\n");
}
fprintf(mvs, "\n");
/* print out the block modes */
fprintf(mvs, "MVs for Frame %d\n", frame);
{
int b_row;
for (b_row = 0; b_row < 4 * rows; b_row++)
{
int b_col;
int bindex;
for (b_col = 0; b_col < 4 * cols; b_col++)
{
mb_index = (b_row >> 2) * (cols + 1) + (b_col >> 2);
bindex = (b_row & 3) * 4 + (b_col & 3);
fprintf(mvs, "%3d:%-3d ", mi[mb_index].bmi[bindex].mv.as_mv.row, mi[mb_index].bmi[bindex].mv.as_mv.col);
}
fprintf(mvs, "\n");
}
}
fprintf(mvs, "\n");
fclose(mvs);
}

View File

@ -1,200 +0,0 @@
/*
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef VP8_COMMON_DEFAULT_COEF_PROBS_H_
#define VP8_COMMON_DEFAULT_COEF_PROBS_H_
#ifdef __cplusplus
extern "C" {
#endif
/*Generated file, included by entropy.c*/
static const vp8_prob default_coef_probs [BLOCK_TYPES]
[COEF_BANDS]
[PREV_COEF_CONTEXTS]
[ENTROPY_NODES] =
{
{ /* Block Type ( 0 ) */
{ /* Coeff Band ( 0 )*/
{ 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 },
{ 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 },
{ 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 }
},
{ /* Coeff Band ( 1 )*/
{ 253, 136, 254, 255, 228, 219, 128, 128, 128, 128, 128 },
{ 189, 129, 242, 255, 227, 213, 255, 219, 128, 128, 128 },
{ 106, 126, 227, 252, 214, 209, 255, 255, 128, 128, 128 }
},
{ /* Coeff Band ( 2 )*/
{ 1, 98, 248, 255, 236, 226, 255, 255, 128, 128, 128 },
{ 181, 133, 238, 254, 221, 234, 255, 154, 128, 128, 128 },
{ 78, 134, 202, 247, 198, 180, 255, 219, 128, 128, 128 }
},
{ /* Coeff Band ( 3 )*/
{ 1, 185, 249, 255, 243, 255, 128, 128, 128, 128, 128 },
{ 184, 150, 247, 255, 236, 224, 128, 128, 128, 128, 128 },
{ 77, 110, 216, 255, 236, 230, 128, 128, 128, 128, 128 }
},
{ /* Coeff Band ( 4 )*/
{ 1, 101, 251, 255, 241, 255, 128, 128, 128, 128, 128 },
{ 170, 139, 241, 252, 236, 209, 255, 255, 128, 128, 128 },
{ 37, 116, 196, 243, 228, 255, 255, 255, 128, 128, 128 }
},
{ /* Coeff Band ( 5 )*/
{ 1, 204, 254, 255, 245, 255, 128, 128, 128, 128, 128 },
{ 207, 160, 250, 255, 238, 128, 128, 128, 128, 128, 128 },
{ 102, 103, 231, 255, 211, 171, 128, 128, 128, 128, 128 }
},
{ /* Coeff Band ( 6 )*/
{ 1, 152, 252, 255, 240, 255, 128, 128, 128, 128, 128 },
{ 177, 135, 243, 255, 234, 225, 128, 128, 128, 128, 128 },
{ 80, 129, 211, 255, 194, 224, 128, 128, 128, 128, 128 }
},
{ /* Coeff Band ( 7 )*/
{ 1, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128 },
{ 246, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128 },
{ 255, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 }
}
},
{ /* Block Type ( 1 ) */
{ /* Coeff Band ( 0 )*/
{ 198, 35, 237, 223, 193, 187, 162, 160, 145, 155, 62 },
{ 131, 45, 198, 221, 172, 176, 220, 157, 252, 221, 1 },
{ 68, 47, 146, 208, 149, 167, 221, 162, 255, 223, 128 }
},
{ /* Coeff Band ( 1 )*/
{ 1, 149, 241, 255, 221, 224, 255, 255, 128, 128, 128 },
{ 184, 141, 234, 253, 222, 220, 255, 199, 128, 128, 128 },
{ 81, 99, 181, 242, 176, 190, 249, 202, 255, 255, 128 }
},
{ /* Coeff Band ( 2 )*/
{ 1, 129, 232, 253, 214, 197, 242, 196, 255, 255, 128 },
{ 99, 121, 210, 250, 201, 198, 255, 202, 128, 128, 128 },
{ 23, 91, 163, 242, 170, 187, 247, 210, 255, 255, 128 }
},
{ /* Coeff Band ( 3 )*/
{ 1, 200, 246, 255, 234, 255, 128, 128, 128, 128, 128 },
{ 109, 178, 241, 255, 231, 245, 255, 255, 128, 128, 128 },
{ 44, 130, 201, 253, 205, 192, 255, 255, 128, 128, 128 }
},
{ /* Coeff Band ( 4 )*/
{ 1, 132, 239, 251, 219, 209, 255, 165, 128, 128, 128 },
{ 94, 136, 225, 251, 218, 190, 255, 255, 128, 128, 128 },
{ 22, 100, 174, 245, 186, 161, 255, 199, 128, 128, 128 }
},
{ /* Coeff Band ( 5 )*/
{ 1, 182, 249, 255, 232, 235, 128, 128, 128, 128, 128 },
{ 124, 143, 241, 255, 227, 234, 128, 128, 128, 128, 128 },
{ 35, 77, 181, 251, 193, 211, 255, 205, 128, 128, 128 }
},
{ /* Coeff Band ( 6 )*/
{ 1, 157, 247, 255, 236, 231, 255, 255, 128, 128, 128 },
{ 121, 141, 235, 255, 225, 227, 255, 255, 128, 128, 128 },
{ 45, 99, 188, 251, 195, 217, 255, 224, 128, 128, 128 }
},
{ /* Coeff Band ( 7 )*/
{ 1, 1, 251, 255, 213, 255, 128, 128, 128, 128, 128 },
{ 203, 1, 248, 255, 255, 128, 128, 128, 128, 128, 128 },
{ 137, 1, 177, 255, 224, 255, 128, 128, 128, 128, 128 }
}
},
{ /* Block Type ( 2 ) */
{ /* Coeff Band ( 0 )*/
{ 253, 9, 248, 251, 207, 208, 255, 192, 128, 128, 128 },
{ 175, 13, 224, 243, 193, 185, 249, 198, 255, 255, 128 },
{ 73, 17, 171, 221, 161, 179, 236, 167, 255, 234, 128 }
},
{ /* Coeff Band ( 1 )*/
{ 1, 95, 247, 253, 212, 183, 255, 255, 128, 128, 128 },
{ 239, 90, 244, 250, 211, 209, 255, 255, 128, 128, 128 },
{ 155, 77, 195, 248, 188, 195, 255, 255, 128, 128, 128 }
},
{ /* Coeff Band ( 2 )*/
{ 1, 24, 239, 251, 218, 219, 255, 205, 128, 128, 128 },
{ 201, 51, 219, 255, 196, 186, 128, 128, 128, 128, 128 },
{ 69, 46, 190, 239, 201, 218, 255, 228, 128, 128, 128 }
},
{ /* Coeff Band ( 3 )*/
{ 1, 191, 251, 255, 255, 128, 128, 128, 128, 128, 128 },
{ 223, 165, 249, 255, 213, 255, 128, 128, 128, 128, 128 },
{ 141, 124, 248, 255, 255, 128, 128, 128, 128, 128, 128 }
},
{ /* Coeff Band ( 4 )*/
{ 1, 16, 248, 255, 255, 128, 128, 128, 128, 128, 128 },
{ 190, 36, 230, 255, 236, 255, 128, 128, 128, 128, 128 },
{ 149, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128 }
},
{ /* Coeff Band ( 5 )*/
{ 1, 226, 255, 128, 128, 128, 128, 128, 128, 128, 128 },
{ 247, 192, 255, 128, 128, 128, 128, 128, 128, 128, 128 },
{ 240, 128, 255, 128, 128, 128, 128, 128, 128, 128, 128 }
},
{ /* Coeff Band ( 6 )*/
{ 1, 134, 252, 255, 255, 128, 128, 128, 128, 128, 128 },
{ 213, 62, 250, 255, 255, 128, 128, 128, 128, 128, 128 },
{ 55, 93, 255, 128, 128, 128, 128, 128, 128, 128, 128 }
},
{ /* Coeff Band ( 7 )*/
{ 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 },
{ 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 },
{ 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 }
}
},
{ /* Block Type ( 3 ) */
{ /* Coeff Band ( 0 )*/
{ 202, 24, 213, 235, 186, 191, 220, 160, 240, 175, 255 },
{ 126, 38, 182, 232, 169, 184, 228, 174, 255, 187, 128 },
{ 61, 46, 138, 219, 151, 178, 240, 170, 255, 216, 128 }
},
{ /* Coeff Band ( 1 )*/
{ 1, 112, 230, 250, 199, 191, 247, 159, 255, 255, 128 },
{ 166, 109, 228, 252, 211, 215, 255, 174, 128, 128, 128 },
{ 39, 77, 162, 232, 172, 180, 245, 178, 255, 255, 128 }
},
{ /* Coeff Band ( 2 )*/
{ 1, 52, 220, 246, 198, 199, 249, 220, 255, 255, 128 },
{ 124, 74, 191, 243, 183, 193, 250, 221, 255, 255, 128 },
{ 24, 71, 130, 219, 154, 170, 243, 182, 255, 255, 128 }
},
{ /* Coeff Band ( 3 )*/
{ 1, 182, 225, 249, 219, 240, 255, 224, 128, 128, 128 },
{ 149, 150, 226, 252, 216, 205, 255, 171, 128, 128, 128 },
{ 28, 108, 170, 242, 183, 194, 254, 223, 255, 255, 128 }
},
{ /* Coeff Band ( 4 )*/
{ 1, 81, 230, 252, 204, 203, 255, 192, 128, 128, 128 },
{ 123, 102, 209, 247, 188, 196, 255, 233, 128, 128, 128 },
{ 20, 95, 153, 243, 164, 173, 255, 203, 128, 128, 128 }
},
{ /* Coeff Band ( 5 )*/
{ 1, 222, 248, 255, 216, 213, 128, 128, 128, 128, 128 },
{ 168, 175, 246, 252, 235, 205, 255, 255, 128, 128, 128 },
{ 47, 116, 215, 255, 211, 212, 255, 255, 128, 128, 128 }
},
{ /* Coeff Band ( 6 )*/
{ 1, 121, 236, 253, 212, 214, 255, 255, 128, 128, 128 },
{ 141, 84, 213, 252, 201, 202, 255, 219, 128, 128, 128 },
{ 42, 80, 160, 240, 162, 185, 255, 205, 128, 128, 128 }
},
{ /* Coeff Band ( 7 )*/
{ 1, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128 },
{ 244, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128 },
{ 238, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128 }
}
}
};
#ifdef __cplusplus
} // extern "C"
#endif
#endif // VP8_COMMON_DEFAULT_COEF_PROBS_H_

View File

@ -1,43 +0,0 @@
/*
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "vpx_config.h"
#include "vp8_rtcd.h"
#include "vp8/common/blockd.h"
#include "vpx_mem/vpx_mem.h"
void vp8_dequantize_b_c(BLOCKD *d, short *DQC)
{
int i;
short *DQ = d->dqcoeff;
short *Q = d->qcoeff;
for (i = 0; i < 16; i++)
{
DQ[i] = Q[i] * DQC[i];
}
}
void vp8_dequant_idct_add_c(short *input, short *dq,
unsigned char *dest, int stride)
{
int i;
for (i = 0; i < 16; i++)
{
input[i] = dq[i] * input[i];
}
vp8_short_idct4x4llm_c(input, dest, stride, dest, stride);
memset(input, 0, 32);
}

View File

@ -1,188 +0,0 @@
/*
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "entropy.h"
#include "blockd.h"
#include "onyxc_int.h"
#include "vpx_mem/vpx_mem.h"
#include "coefupdateprobs.h"
DECLARE_ALIGNED(16, const unsigned char, vp8_norm[256]) =
{
0, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
};
DECLARE_ALIGNED(16, const unsigned char, vp8_coef_bands[16]) =
{ 0, 1, 2, 3, 6, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6, 7};
DECLARE_ALIGNED(16, const unsigned char,
vp8_prev_token_class[MAX_ENTROPY_TOKENS]) =
{ 0, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0};
DECLARE_ALIGNED(16, const int, vp8_default_zig_zag1d[16]) =
{
0, 1, 4, 8,
5, 2, 3, 6,
9, 12, 13, 10,
7, 11, 14, 15,
};
DECLARE_ALIGNED(16, const short, vp8_default_inv_zig_zag[16]) =
{
1, 2, 6, 7,
3, 5, 8, 13,
4, 9, 12, 14,
10, 11, 15, 16
};
/* vp8_default_zig_zag_mask generated with:
void vp8_init_scan_order_mask()
{
int i;
for (i = 0; i < 16; i++)
{
vp8_default_zig_zag_mask[vp8_default_zig_zag1d[i]] = 1 << i;
}
}
*/
DECLARE_ALIGNED(16, const short, vp8_default_zig_zag_mask[16]) =
{
1, 2, 32, 64,
4, 16, 128, 4096,
8, 256, 2048, 8192,
512, 1024, 16384, -32768
};
const int vp8_mb_feature_data_bits[MB_LVL_MAX] = {7, 6};
/* Array indices are identical to previously-existing CONTEXT_NODE indices */
const vp8_tree_index vp8_coef_tree[ 22] = /* corresponding _CONTEXT_NODEs */
{
-DCT_EOB_TOKEN, 2, /* 0 = EOB */
-ZERO_TOKEN, 4, /* 1 = ZERO */
-ONE_TOKEN, 6, /* 2 = ONE */
8, 12, /* 3 = LOW_VAL */
-TWO_TOKEN, 10, /* 4 = TWO */
-THREE_TOKEN, -FOUR_TOKEN, /* 5 = THREE */
14, 16, /* 6 = HIGH_LOW */
-DCT_VAL_CATEGORY1, -DCT_VAL_CATEGORY2, /* 7 = CAT_ONE */
18, 20, /* 8 = CAT_THREEFOUR */
-DCT_VAL_CATEGORY3, -DCT_VAL_CATEGORY4, /* 9 = CAT_THREE */
-DCT_VAL_CATEGORY5, -DCT_VAL_CATEGORY6 /* 10 = CAT_FIVE */
};
/* vp8_coef_encodings generated with:
vp8_tokens_from_tree(vp8_coef_encodings, vp8_coef_tree);
*/
vp8_token vp8_coef_encodings[MAX_ENTROPY_TOKENS] =
{
{2, 2},
{6, 3},
{28, 5},
{58, 6},
{59, 6},
{60, 6},
{61, 6},
{124, 7},
{125, 7},
{126, 7},
{127, 7},
{0, 1}
};
/* Trees for extra bits. Probabilities are constant and
do not depend on previously encoded bits */
static const vp8_prob Pcat1[] = { 159};
static const vp8_prob Pcat2[] = { 165, 145};
static const vp8_prob Pcat3[] = { 173, 148, 140};
static const vp8_prob Pcat4[] = { 176, 155, 140, 135};
static const vp8_prob Pcat5[] = { 180, 157, 141, 134, 130};
static const vp8_prob Pcat6[] =
{ 254, 254, 243, 230, 196, 177, 153, 140, 133, 130, 129};
/* tree index tables generated with:
void init_bit_tree(vp8_tree_index *p, int n)
{
int i = 0;
while (++i < n)
{
p[0] = p[1] = i << 1;
p += 2;
}
p[0] = p[1] = 0;
}
void init_bit_trees()
{
init_bit_tree(cat1, 1);
init_bit_tree(cat2, 2);
init_bit_tree(cat3, 3);
init_bit_tree(cat4, 4);
init_bit_tree(cat5, 5);
init_bit_tree(cat6, 11);
}
*/
static const vp8_tree_index cat1[2] = { 0, 0 };
static const vp8_tree_index cat2[4] = { 2, 2, 0, 0 };
static const vp8_tree_index cat3[6] = { 2, 2, 4, 4, 0, 0 };
static const vp8_tree_index cat4[8] = { 2, 2, 4, 4, 6, 6, 0, 0 };
static const vp8_tree_index cat5[10] = { 2, 2, 4, 4, 6, 6, 8, 8, 0, 0 };
static const vp8_tree_index cat6[22] = { 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12,
14, 14, 16, 16, 18, 18, 20, 20, 0, 0 };
const vp8_extra_bit_struct vp8_extra_bits[12] =
{
{ 0, 0, 0, 0},
{ 0, 0, 0, 1},
{ 0, 0, 0, 2},
{ 0, 0, 0, 3},
{ 0, 0, 0, 4},
{ cat1, Pcat1, 1, 5},
{ cat2, Pcat2, 2, 7},
{ cat3, Pcat3, 3, 11},
{ cat4, Pcat4, 4, 19},
{ cat5, Pcat5, 5, 35},
{ cat6, Pcat6, 11, 67},
{ 0, 0, 0, 0}
};
#include "default_coef_probs.h"
void vp8_default_coef_probs(VP8_COMMON *pc)
{
memcpy(pc->fc.coef_probs, default_coef_probs, sizeof(default_coef_probs));
}

View File

@ -1,109 +0,0 @@
/*
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef VP8_COMMON_ENTROPY_H_
#define VP8_COMMON_ENTROPY_H_
#include "treecoder.h"
#include "blockd.h"
#ifdef __cplusplus
extern "C" {
#endif
/* Coefficient token alphabet */
#define ZERO_TOKEN 0 /* 0 Extra Bits 0+0 */
#define ONE_TOKEN 1 /* 1 Extra Bits 0+1 */
#define TWO_TOKEN 2 /* 2 Extra Bits 0+1 */
#define THREE_TOKEN 3 /* 3 Extra Bits 0+1 */
#define FOUR_TOKEN 4 /* 4 Extra Bits 0+1 */
#define DCT_VAL_CATEGORY1 5 /* 5-6 Extra Bits 1+1 */
#define DCT_VAL_CATEGORY2 6 /* 7-10 Extra Bits 2+1 */
#define DCT_VAL_CATEGORY3 7 /* 11-18 Extra Bits 3+1 */
#define DCT_VAL_CATEGORY4 8 /* 19-34 Extra Bits 4+1 */
#define DCT_VAL_CATEGORY5 9 /* 35-66 Extra Bits 5+1 */
#define DCT_VAL_CATEGORY6 10 /* 67+ Extra Bits 11+1 */
#define DCT_EOB_TOKEN 11 /* EOB Extra Bits 0+0 */
#define MAX_ENTROPY_TOKENS 12
#define ENTROPY_NODES 11
extern const vp8_tree_index vp8_coef_tree[];
extern const struct vp8_token_struct vp8_coef_encodings[MAX_ENTROPY_TOKENS];
typedef struct
{
vp8_tree_p tree;
const vp8_prob *prob;
int Len;
int base_val;
} vp8_extra_bit_struct;
extern const vp8_extra_bit_struct vp8_extra_bits[12]; /* indexed by token value */
#define PROB_UPDATE_BASELINE_COST 7
#define MAX_PROB 255
#define DCT_MAX_VALUE 2048
/* Coefficients are predicted via a 3-dimensional probability table. */
/* Outside dimension. 0 = Y no DC, 1 = Y2, 2 = UV, 3 = Y with DC */
#define BLOCK_TYPES 4
/* Middle dimension is a coarsening of the coefficient's
position within the 4x4 DCT. */
#define COEF_BANDS 8
extern DECLARE_ALIGNED(16, const unsigned char, vp8_coef_bands[16]);
/* Inside dimension is 3-valued measure of nearby complexity, that is,
the extent to which nearby coefficients are nonzero. For the first
coefficient (DC, unless block type is 0), we look at the (already encoded)
blocks above and to the left of the current block. The context index is
then the number (0,1,or 2) of these blocks having nonzero coefficients.
After decoding a coefficient, the measure is roughly the size of the
most recently decoded coefficient (0 for 0, 1 for 1, 2 for >1).
Note that the intuitive meaning of this measure changes as coefficients
are decoded, e.g., prior to the first token, a zero means that my neighbors
are empty while, after the first token, because of the use of end-of-block,
a zero means we just decoded a zero and hence guarantees that a non-zero
coefficient will appear later in this block. However, this shift
in meaning is perfectly OK because our context depends also on the
coefficient band (and since zigzag positions 0, 1, and 2 are in
distinct bands). */
/*# define DC_TOKEN_CONTEXTS 3*/ /* 00, 0!0, !0!0 */
# define PREV_COEF_CONTEXTS 3
extern DECLARE_ALIGNED(16, const unsigned char, vp8_prev_token_class[MAX_ENTROPY_TOKENS]);
extern const vp8_prob vp8_coef_update_probs [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
struct VP8Common;
void vp8_default_coef_probs(struct VP8Common *);
extern DECLARE_ALIGNED(16, const int, vp8_default_zig_zag1d[16]);
extern DECLARE_ALIGNED(16, const short, vp8_default_inv_zig_zag[16]);
extern DECLARE_ALIGNED(16, const short, vp8_default_zig_zag_mask[16]);
extern const int vp8_mb_feature_data_bits[MB_LVL_MAX];
void vp8_coef_tree_initialize(void);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // VP8_COMMON_ENTROPY_H_

View File

@ -1,171 +0,0 @@
/*
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#define USE_PREBUILT_TABLES
#include "entropymode.h"
#include "entropy.h"
#include "vpx_mem/vpx_mem.h"
#include "vp8_entropymodedata.h"
int vp8_mv_cont(const int_mv *l, const int_mv *a)
{
int lez = (l->as_int == 0);
int aez = (a->as_int == 0);
int lea = (l->as_int == a->as_int);
if (lea && lez)
return SUBMVREF_LEFT_ABOVE_ZED;
if (lea)
return SUBMVREF_LEFT_ABOVE_SAME;
if (aez)
return SUBMVREF_ABOVE_ZED;
if (lez)
return SUBMVREF_LEFT_ZED;
return SUBMVREF_NORMAL;
}
static const vp8_prob sub_mv_ref_prob [VP8_SUBMVREFS-1] = { 180, 162, 25};
const vp8_prob vp8_sub_mv_ref_prob2 [SUBMVREF_COUNT][VP8_SUBMVREFS-1] =
{
{ 147, 136, 18 },
{ 106, 145, 1 },
{ 179, 121, 1 },
{ 223, 1 , 34 },
{ 208, 1 , 1 }
};
const vp8_mbsplit vp8_mbsplits [VP8_NUMMBSPLITS] =
{
{
0, 0, 0, 0,
0, 0, 0, 0,
1, 1, 1, 1,
1, 1, 1, 1,
},
{
0, 0, 1, 1,
0, 0, 1, 1,
0, 0, 1, 1,
0, 0, 1, 1,
},
{
0, 0, 1, 1,
0, 0, 1, 1,
2, 2, 3, 3,
2, 2, 3, 3,
},
{
0, 1, 2, 3,
4, 5, 6, 7,
8, 9, 10, 11,
12, 13, 14, 15,
}
};
const int vp8_mbsplit_count [VP8_NUMMBSPLITS] = { 2, 2, 4, 16};
const vp8_prob vp8_mbsplit_probs [VP8_NUMMBSPLITS-1] = { 110, 111, 150};
/* Array indices are identical to previously-existing INTRAMODECONTEXTNODES. */
const vp8_tree_index vp8_bmode_tree[18] = /* INTRAMODECONTEXTNODE value */
{
-B_DC_PRED, 2, /* 0 = DC_NODE */
-B_TM_PRED, 4, /* 1 = TM_NODE */
-B_VE_PRED, 6, /* 2 = VE_NODE */
8, 12, /* 3 = COM_NODE */
-B_HE_PRED, 10, /* 4 = HE_NODE */
-B_RD_PRED, -B_VR_PRED, /* 5 = RD_NODE */
-B_LD_PRED, 14, /* 6 = LD_NODE */
-B_VL_PRED, 16, /* 7 = VL_NODE */
-B_HD_PRED, -B_HU_PRED /* 8 = HD_NODE */
};
/* Again, these trees use the same probability indices as their
explicitly-programmed predecessors. */
const vp8_tree_index vp8_ymode_tree[8] =
{
-DC_PRED, 2,
4, 6,
-V_PRED, -H_PRED,
-TM_PRED, -B_PRED
};
const vp8_tree_index vp8_kf_ymode_tree[8] =
{
-B_PRED, 2,
4, 6,
-DC_PRED, -V_PRED,
-H_PRED, -TM_PRED
};
const vp8_tree_index vp8_uv_mode_tree[6] =
{
-DC_PRED, 2,
-V_PRED, 4,
-H_PRED, -TM_PRED
};
const vp8_tree_index vp8_mbsplit_tree[6] =
{
-3, 2,
-2, 4,
-0, -1
};
const vp8_tree_index vp8_mv_ref_tree[8] =
{
-ZEROMV, 2,
-NEARESTMV, 4,
-NEARMV, 6,
-NEWMV, -SPLITMV
};
const vp8_tree_index vp8_sub_mv_ref_tree[6] =
{
-LEFT4X4, 2,
-ABOVE4X4, 4,
-ZERO4X4, -NEW4X4
};
const vp8_tree_index vp8_small_mvtree [14] =
{
2, 8,
4, 6,
-0, -1,
-2, -3,
10, 12,
-4, -5,
-6, -7
};
void vp8_init_mbmode_probs(VP8_COMMON *x)
{
memcpy(x->fc.ymode_prob, vp8_ymode_prob, sizeof(vp8_ymode_prob));
memcpy(x->fc.uv_mode_prob, vp8_uv_mode_prob, sizeof(vp8_uv_mode_prob));
memcpy(x->fc.sub_mv_ref_prob, sub_mv_ref_prob, sizeof(sub_mv_ref_prob));
}
void vp8_default_bmode_probs(vp8_prob p [VP8_BINTRAMODES-1])
{
memcpy(p, vp8_bmode_prob, sizeof(vp8_bmode_prob));
}

View File

@ -1,88 +0,0 @@
/*
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef VP8_COMMON_ENTROPYMODE_H_
#define VP8_COMMON_ENTROPYMODE_H_
#include "onyxc_int.h"
#include "treecoder.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef enum
{
SUBMVREF_NORMAL,
SUBMVREF_LEFT_ZED,
SUBMVREF_ABOVE_ZED,
SUBMVREF_LEFT_ABOVE_SAME,
SUBMVREF_LEFT_ABOVE_ZED
} sumvfref_t;
typedef int vp8_mbsplit[16];
#define VP8_NUMMBSPLITS 4
extern const vp8_mbsplit vp8_mbsplits [VP8_NUMMBSPLITS];
extern const int vp8_mbsplit_count [VP8_NUMMBSPLITS]; /* # of subsets */
extern const vp8_prob vp8_mbsplit_probs [VP8_NUMMBSPLITS-1];
extern int vp8_mv_cont(const int_mv *l, const int_mv *a);
#define SUBMVREF_COUNT 5
extern const vp8_prob vp8_sub_mv_ref_prob2 [SUBMVREF_COUNT][VP8_SUBMVREFS-1];
extern const unsigned int vp8_kf_default_bmode_counts [VP8_BINTRAMODES] [VP8_BINTRAMODES] [VP8_BINTRAMODES];
extern const vp8_tree_index vp8_bmode_tree[];
extern const vp8_tree_index vp8_ymode_tree[];
extern const vp8_tree_index vp8_kf_ymode_tree[];
extern const vp8_tree_index vp8_uv_mode_tree[];
extern const vp8_tree_index vp8_mbsplit_tree[];
extern const vp8_tree_index vp8_mv_ref_tree[];
extern const vp8_tree_index vp8_sub_mv_ref_tree[];
extern const struct vp8_token_struct vp8_bmode_encodings[VP8_BINTRAMODES];
extern const struct vp8_token_struct vp8_ymode_encodings[VP8_YMODES];
extern const struct vp8_token_struct vp8_kf_ymode_encodings[VP8_YMODES];
extern const struct vp8_token_struct vp8_uv_mode_encodings[VP8_UV_MODES];
extern const struct vp8_token_struct vp8_mbsplit_encodings[VP8_NUMMBSPLITS];
/* Inter mode values do not start at zero */
extern const struct vp8_token_struct vp8_mv_ref_encoding_array[VP8_MVREFS];
extern const struct vp8_token_struct vp8_sub_mv_ref_encoding_array[VP8_SUBMVREFS];
extern const vp8_tree_index vp8_small_mvtree[];
extern const struct vp8_token_struct vp8_small_mvencodings[8];
/* Key frame default mode probs */
extern const vp8_prob vp8_kf_bmode_prob[VP8_BINTRAMODES][VP8_BINTRAMODES]
[VP8_BINTRAMODES-1];
extern const vp8_prob vp8_kf_uv_mode_prob[VP8_UV_MODES-1];
extern const vp8_prob vp8_kf_ymode_prob[VP8_YMODES-1];
void vp8_init_mbmode_probs(VP8_COMMON *x);
void vp8_default_bmode_probs(vp8_prob dest [VP8_BINTRAMODES-1]);
void vp8_kf_default_bmode_probs(vp8_prob dest [VP8_BINTRAMODES] [VP8_BINTRAMODES] [VP8_BINTRAMODES-1]);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // VP8_COMMON_ENTROPYMODE_H_

View File

@ -1,49 +0,0 @@
/*
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "entropymv.h"
const MV_CONTEXT vp8_mv_update_probs[2] =
{
{{
237,
246,
253, 253, 254, 254, 254, 254, 254,
254, 254, 254, 254, 254, 250, 250, 252, 254, 254
}},
{{
231,
243,
245, 253, 254, 254, 254, 254, 254,
254, 254, 254, 254, 254, 251, 251, 254, 254, 254
}}
};
const MV_CONTEXT vp8_default_mv_context[2] =
{
{{
/* row */
162, /* is short */
128, /* sign */
225, 146, 172, 147, 214, 39, 156, /* short tree */
128, 129, 132, 75, 145, 178, 206, 239, 254, 254 /* long bits */
}},
{{
/* same for column */
164, /* is short */
128,
204, 170, 119, 235, 140, 230, 228,
128, 130, 130, 74, 148, 180, 203, 236, 254, 254 /* long bits */
}}
};

View File

@ -1,52 +0,0 @@
/*
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef VP8_COMMON_ENTROPYMV_H_
#define VP8_COMMON_ENTROPYMV_H_
#include "treecoder.h"
#ifdef __cplusplus
extern "C" {
#endif
enum
{
mv_max = 1023, /* max absolute value of a MV component */
MVvals = (2 * mv_max) + 1, /* # possible values "" */
mvfp_max = 255, /* max absolute value of a full pixel MV component */
MVfpvals = (2 * mvfp_max) +1, /* # possible full pixel MV values */
mvlong_width = 10, /* Large MVs have 9 bit magnitudes */
mvnum_short = 8, /* magnitudes 0 through 7 */
/* probability offsets for coding each MV component */
mvpis_short = 0, /* short (<= 7) vs long (>= 8) */
MVPsign, /* sign for non-zero */
MVPshort, /* 8 short values = 7-position tree */
MVPbits = MVPshort + mvnum_short - 1, /* mvlong_width long value bits */
MVPcount = MVPbits + mvlong_width /* (with independent probabilities) */
};
typedef struct mv_context
{
vp8_prob prob[MVPcount]; /* often come in row, col pairs */
} MV_CONTEXT;
extern const MV_CONTEXT vp8_mv_update_probs[2], vp8_default_mv_context[2];
#ifdef __cplusplus
} // extern "C"
#endif
#endif // VP8_COMMON_ENTROPYMV_H_

View File

@ -1,188 +0,0 @@
/*
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "extend.h"
#include "vpx_mem/vpx_mem.h"
static void copy_and_extend_plane
(
unsigned char *s, /* source */
int sp, /* source pitch */
unsigned char *d, /* destination */
int dp, /* destination pitch */
int h, /* height */
int w, /* width */
int et, /* extend top border */
int el, /* extend left border */
int eb, /* extend bottom border */
int er /* extend right border */
)
{
int i;
unsigned char *src_ptr1, *src_ptr2;
unsigned char *dest_ptr1, *dest_ptr2;
int linesize;
/* copy the left and right most columns out */
src_ptr1 = s;
src_ptr2 = s + w - 1;
dest_ptr1 = d - el;
dest_ptr2 = d + w;
for (i = 0; i < h; i++)
{
memset(dest_ptr1, src_ptr1[0], el);
memcpy(dest_ptr1 + el, src_ptr1, w);
memset(dest_ptr2, src_ptr2[0], er);
src_ptr1 += sp;
src_ptr2 += sp;
dest_ptr1 += dp;
dest_ptr2 += dp;
}
/* Now copy the top and bottom lines into each line of the respective
* borders
*/
src_ptr1 = d - el;
src_ptr2 = d + dp * (h - 1) - el;
dest_ptr1 = d + dp * (-et) - el;
dest_ptr2 = d + dp * (h) - el;
linesize = el + er + w;
for (i = 0; i < et; i++)
{
memcpy(dest_ptr1, src_ptr1, linesize);
dest_ptr1 += dp;
}
for (i = 0; i < eb; i++)
{
memcpy(dest_ptr2, src_ptr2, linesize);
dest_ptr2 += dp;
}
}
void vp8_copy_and_extend_frame(YV12_BUFFER_CONFIG *src,
YV12_BUFFER_CONFIG *dst)
{
int et = dst->border;
int el = dst->border;
int eb = dst->border + dst->y_height - src->y_height;
int er = dst->border + dst->y_width - src->y_width;
copy_and_extend_plane(src->y_buffer, src->y_stride,
dst->y_buffer, dst->y_stride,
src->y_height, src->y_width,
et, el, eb, er);
et = dst->border >> 1;
el = dst->border >> 1;
eb = (dst->border >> 1) + dst->uv_height - src->uv_height;
er = (dst->border >> 1) + dst->uv_width - src->uv_width;
copy_and_extend_plane(src->u_buffer, src->uv_stride,
dst->u_buffer, dst->uv_stride,
src->uv_height, src->uv_width,
et, el, eb, er);
copy_and_extend_plane(src->v_buffer, src->uv_stride,
dst->v_buffer, dst->uv_stride,
src->uv_height, src->uv_width,
et, el, eb, er);
}
void vp8_copy_and_extend_frame_with_rect(YV12_BUFFER_CONFIG *src,
YV12_BUFFER_CONFIG *dst,
int srcy, int srcx,
int srch, int srcw)
{
int et = dst->border;
int el = dst->border;
int eb = dst->border + dst->y_height - src->y_height;
int er = dst->border + dst->y_width - src->y_width;
int src_y_offset = srcy * src->y_stride + srcx;
int dst_y_offset = srcy * dst->y_stride + srcx;
int src_uv_offset = ((srcy * src->uv_stride) >> 1) + (srcx >> 1);
int dst_uv_offset = ((srcy * dst->uv_stride) >> 1) + (srcx >> 1);
/* If the side is not touching the bounder then don't extend. */
if (srcy)
et = 0;
if (srcx)
el = 0;
if (srcy + srch != src->y_height)
eb = 0;
if (srcx + srcw != src->y_width)
er = 0;
copy_and_extend_plane(src->y_buffer + src_y_offset,
src->y_stride,
dst->y_buffer + dst_y_offset,
dst->y_stride,
srch, srcw,
et, el, eb, er);
et = (et + 1) >> 1;
el = (el + 1) >> 1;
eb = (eb + 1) >> 1;
er = (er + 1) >> 1;
srch = (srch + 1) >> 1;
srcw = (srcw + 1) >> 1;
copy_and_extend_plane(src->u_buffer + src_uv_offset,
src->uv_stride,
dst->u_buffer + dst_uv_offset,
dst->uv_stride,
srch, srcw,
et, el, eb, er);
copy_and_extend_plane(src->v_buffer + src_uv_offset,
src->uv_stride,
dst->v_buffer + dst_uv_offset,
dst->uv_stride,
srch, srcw,
et, el, eb, er);
}
/* note the extension is only for the last row, for intra prediction purpose */
void vp8_extend_mb_row(YV12_BUFFER_CONFIG *ybf,
unsigned char *YPtr,
unsigned char *UPtr,
unsigned char *VPtr)
{
int i;
YPtr += ybf->y_stride * 14;
UPtr += ybf->uv_stride * 6;
VPtr += ybf->uv_stride * 6;
for (i = 0; i < 4; i++)
{
YPtr[i] = YPtr[-1];
UPtr[i] = UPtr[-1];
VPtr[i] = VPtr[-1];
}
YPtr += ybf->y_stride;
UPtr += ybf->uv_stride;
VPtr += ybf->uv_stride;
for (i = 0; i < 4; i++)
{
YPtr[i] = YPtr[-1];
UPtr[i] = UPtr[-1];
VPtr[i] = VPtr[-1];
}
}

View File

@ -1,33 +0,0 @@
/*
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef VP8_COMMON_EXTEND_H_
#define VP8_COMMON_EXTEND_H_
#include "vpx_scale/yv12config.h"
#ifdef __cplusplus
extern "C" {
#endif
void vp8_extend_mb_row(YV12_BUFFER_CONFIG *ybf, unsigned char *YPtr, unsigned char *UPtr, unsigned char *VPtr);
void vp8_copy_and_extend_frame(YV12_BUFFER_CONFIG *src,
YV12_BUFFER_CONFIG *dst);
void vp8_copy_and_extend_frame_with_rect(YV12_BUFFER_CONFIG *src,
YV12_BUFFER_CONFIG *dst,
int srcy, int srcx,
int srch, int srcw);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // VP8_COMMON_EXTEND_H_

View File

@ -1,493 +0,0 @@
/*
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "filter.h"
#include "./vp8_rtcd.h"
DECLARE_ALIGNED(16, const short, vp8_bilinear_filters[8][2]) =
{
{ 128, 0 },
{ 112, 16 },
{ 96, 32 },
{ 80, 48 },
{ 64, 64 },
{ 48, 80 },
{ 32, 96 },
{ 16, 112 }
};
DECLARE_ALIGNED(16, const short, vp8_sub_pel_filters[8][6]) =
{
{ 0, 0, 128, 0, 0, 0 }, /* note that 1/8 pel positions are just as per alpha -0.5 bicubic */
{ 0, -6, 123, 12, -1, 0 },
{ 2, -11, 108, 36, -8, 1 }, /* New 1/4 pel 6 tap filter */
{ 0, -9, 93, 50, -6, 0 },
{ 3, -16, 77, 77, -16, 3 }, /* New 1/2 pel 6 tap filter */
{ 0, -6, 50, 93, -9, 0 },
{ 1, -8, 36, 108, -11, 2 }, /* New 1/4 pel 6 tap filter */
{ 0, -1, 12, 123, -6, 0 },
};
static void filter_block2d_first_pass
(
unsigned char *src_ptr,
int *output_ptr,
unsigned int src_pixels_per_line,
unsigned int pixel_step,
unsigned int output_height,
unsigned int output_width,
const short *vp8_filter
)
{
unsigned int i, j;
int Temp;
for (i = 0; i < output_height; i++)
{
for (j = 0; j < output_width; j++)
{
Temp = ((int)src_ptr[-2 * (int)pixel_step] * vp8_filter[0]) +
((int)src_ptr[-1 * (int)pixel_step] * vp8_filter[1]) +
((int)src_ptr[0] * vp8_filter[2]) +
((int)src_ptr[pixel_step] * vp8_filter[3]) +
((int)src_ptr[2*pixel_step] * vp8_filter[4]) +
((int)src_ptr[3*pixel_step] * vp8_filter[5]) +
(VP8_FILTER_WEIGHT >> 1); /* Rounding */
/* Normalize back to 0-255 */
Temp = Temp >> VP8_FILTER_SHIFT;
if (Temp < 0)
Temp = 0;
else if (Temp > 255)
Temp = 255;
output_ptr[j] = Temp;
src_ptr++;
}
/* Next row... */
src_ptr += src_pixels_per_line - output_width;
output_ptr += output_width;
}
}
static void filter_block2d_second_pass
(
int *src_ptr,
unsigned char *output_ptr,
int output_pitch,
unsigned int src_pixels_per_line,
unsigned int pixel_step,
unsigned int output_height,
unsigned int output_width,
const short *vp8_filter
)
{
unsigned int i, j;
int Temp;
for (i = 0; i < output_height; i++)
{
for (j = 0; j < output_width; j++)
{
/* Apply filter */
Temp = ((int)src_ptr[-2 * (int)pixel_step] * vp8_filter[0]) +
((int)src_ptr[-1 * (int)pixel_step] * vp8_filter[1]) +
((int)src_ptr[0] * vp8_filter[2]) +
((int)src_ptr[pixel_step] * vp8_filter[3]) +
((int)src_ptr[2*pixel_step] * vp8_filter[4]) +
((int)src_ptr[3*pixel_step] * vp8_filter[5]) +
(VP8_FILTER_WEIGHT >> 1); /* Rounding */
/* Normalize back to 0-255 */
Temp = Temp >> VP8_FILTER_SHIFT;
if (Temp < 0)
Temp = 0;
else if (Temp > 255)
Temp = 255;
output_ptr[j] = (unsigned char)Temp;
src_ptr++;
}
/* Start next row */
src_ptr += src_pixels_per_line - output_width;
output_ptr += output_pitch;
}
}
static void filter_block2d
(
unsigned char *src_ptr,
unsigned char *output_ptr,
unsigned int src_pixels_per_line,
int output_pitch,
const short *HFilter,
const short *VFilter
)
{
int FData[9*4]; /* Temp data buffer used in filtering */
/* First filter 1-D horizontally... */
filter_block2d_first_pass(src_ptr - (2 * src_pixels_per_line), FData, src_pixels_per_line, 1, 9, 4, HFilter);
/* then filter verticaly... */
filter_block2d_second_pass(FData + 8, output_ptr, output_pitch, 4, 4, 4, 4, VFilter);
}
void vp8_sixtap_predict4x4_c
(
unsigned char *src_ptr,
int src_pixels_per_line,
int xoffset,
int yoffset,
unsigned char *dst_ptr,
int dst_pitch
)
{
const short *HFilter;
const short *VFilter;
HFilter = vp8_sub_pel_filters[xoffset]; /* 6 tap */
VFilter = vp8_sub_pel_filters[yoffset]; /* 6 tap */
filter_block2d(src_ptr, dst_ptr, src_pixels_per_line, dst_pitch, HFilter, VFilter);
}
void vp8_sixtap_predict8x8_c
(
unsigned char *src_ptr,
int src_pixels_per_line,
int xoffset,
int yoffset,
unsigned char *dst_ptr,
int dst_pitch
)
{
const short *HFilter;
const short *VFilter;
int FData[13*16]; /* Temp data buffer used in filtering */
HFilter = vp8_sub_pel_filters[xoffset]; /* 6 tap */
VFilter = vp8_sub_pel_filters[yoffset]; /* 6 tap */
/* First filter 1-D horizontally... */
filter_block2d_first_pass(src_ptr - (2 * src_pixels_per_line), FData, src_pixels_per_line, 1, 13, 8, HFilter);
/* then filter verticaly... */
filter_block2d_second_pass(FData + 16, dst_ptr, dst_pitch, 8, 8, 8, 8, VFilter);
}
void vp8_sixtap_predict8x4_c
(
unsigned char *src_ptr,
int src_pixels_per_line,
int xoffset,
int yoffset,
unsigned char *dst_ptr,
int dst_pitch
)
{
const short *HFilter;
const short *VFilter;
int FData[13*16]; /* Temp data buffer used in filtering */
HFilter = vp8_sub_pel_filters[xoffset]; /* 6 tap */
VFilter = vp8_sub_pel_filters[yoffset]; /* 6 tap */
/* First filter 1-D horizontally... */
filter_block2d_first_pass(src_ptr - (2 * src_pixels_per_line), FData, src_pixels_per_line, 1, 9, 8, HFilter);
/* then filter verticaly... */
filter_block2d_second_pass(FData + 16, dst_ptr, dst_pitch, 8, 8, 4, 8, VFilter);
}
void vp8_sixtap_predict16x16_c
(
unsigned char *src_ptr,
int src_pixels_per_line,
int xoffset,
int yoffset,
unsigned char *dst_ptr,
int dst_pitch
)
{
const short *HFilter;
const short *VFilter;
int FData[21*24]; /* Temp data buffer used in filtering */
HFilter = vp8_sub_pel_filters[xoffset]; /* 6 tap */
VFilter = vp8_sub_pel_filters[yoffset]; /* 6 tap */
/* First filter 1-D horizontally... */
filter_block2d_first_pass(src_ptr - (2 * src_pixels_per_line), FData, src_pixels_per_line, 1, 21, 16, HFilter);
/* then filter verticaly... */
filter_block2d_second_pass(FData + 32, dst_ptr, dst_pitch, 16, 16, 16, 16, VFilter);
}
/****************************************************************************
*
* ROUTINE : filter_block2d_bil_first_pass
*
* INPUTS : UINT8 *src_ptr : Pointer to source block.
* UINT32 src_stride : Stride of source block.
* UINT32 height : Block height.
* UINT32 width : Block width.
* INT32 *vp8_filter : Array of 2 bi-linear filter taps.
*
* OUTPUTS : INT32 *dst_ptr : Pointer to filtered block.
*
* RETURNS : void
*
* FUNCTION : Applies a 1-D 2-tap bi-linear filter to the source block
* in the horizontal direction to produce the filtered output
* block. Used to implement first-pass of 2-D separable filter.
*
* SPECIAL NOTES : Produces INT32 output to retain precision for next pass.
* Two filter taps should sum to VP8_FILTER_WEIGHT.
*
****************************************************************************/
static void filter_block2d_bil_first_pass
(
unsigned char *src_ptr,
unsigned short *dst_ptr,
unsigned int src_stride,
unsigned int height,
unsigned int width,
const short *vp8_filter
)
{
unsigned int i, j;
for (i = 0; i < height; i++)
{
for (j = 0; j < width; j++)
{
/* Apply bilinear filter */
dst_ptr[j] = (((int)src_ptr[0] * vp8_filter[0]) +
((int)src_ptr[1] * vp8_filter[1]) +
(VP8_FILTER_WEIGHT / 2)) >> VP8_FILTER_SHIFT;
src_ptr++;
}
/* Next row... */
src_ptr += src_stride - width;
dst_ptr += width;
}
}
/****************************************************************************
*
* ROUTINE : filter_block2d_bil_second_pass
*
* INPUTS : INT32 *src_ptr : Pointer to source block.
* UINT32 dst_pitch : Destination block pitch.
* UINT32 height : Block height.
* UINT32 width : Block width.
* INT32 *vp8_filter : Array of 2 bi-linear filter taps.
*
* OUTPUTS : UINT16 *dst_ptr : Pointer to filtered block.
*
* RETURNS : void
*
* FUNCTION : Applies a 1-D 2-tap bi-linear filter to the source block
* in the vertical direction to produce the filtered output
* block. Used to implement second-pass of 2-D separable filter.
*
* SPECIAL NOTES : Requires 32-bit input as produced by filter_block2d_bil_first_pass.
* Two filter taps should sum to VP8_FILTER_WEIGHT.
*
****************************************************************************/
static void filter_block2d_bil_second_pass
(
unsigned short *src_ptr,
unsigned char *dst_ptr,
int dst_pitch,
unsigned int height,
unsigned int width,
const short *vp8_filter
)
{
unsigned int i, j;
int Temp;
for (i = 0; i < height; i++)
{
for (j = 0; j < width; j++)
{
/* Apply filter */
Temp = ((int)src_ptr[0] * vp8_filter[0]) +
((int)src_ptr[width] * vp8_filter[1]) +
(VP8_FILTER_WEIGHT / 2);
dst_ptr[j] = (unsigned int)(Temp >> VP8_FILTER_SHIFT);
src_ptr++;
}
/* Next row... */
dst_ptr += dst_pitch;
}
}
/****************************************************************************
*
* ROUTINE : filter_block2d_bil
*
* INPUTS : UINT8 *src_ptr : Pointer to source block.
* UINT32 src_pitch : Stride of source block.
* UINT32 dst_pitch : Stride of destination block.
* INT32 *HFilter : Array of 2 horizontal filter taps.
* INT32 *VFilter : Array of 2 vertical filter taps.
* INT32 Width : Block width
* INT32 Height : Block height
*
* OUTPUTS : UINT16 *dst_ptr : Pointer to filtered block.
*
* RETURNS : void
*
* FUNCTION : 2-D filters an input block by applying a 2-tap
* bi-linear filter horizontally followed by a 2-tap
* bi-linear filter vertically on the result.
*
* SPECIAL NOTES : The largest block size can be handled here is 16x16
*
****************************************************************************/
static void filter_block2d_bil
(
unsigned char *src_ptr,
unsigned char *dst_ptr,
unsigned int src_pitch,
unsigned int dst_pitch,
const short *HFilter,
const short *VFilter,
int Width,
int Height
)
{
unsigned short FData[17*16]; /* Temp data buffer used in filtering */
/* First filter 1-D horizontally... */
filter_block2d_bil_first_pass(src_ptr, FData, src_pitch, Height + 1, Width, HFilter);
/* then 1-D vertically... */
filter_block2d_bil_second_pass(FData, dst_ptr, dst_pitch, Height, Width, VFilter);
}
void vp8_bilinear_predict4x4_c
(
unsigned char *src_ptr,
int src_pixels_per_line,
int xoffset,
int yoffset,
unsigned char *dst_ptr,
int dst_pitch
)
{
const short *HFilter;
const short *VFilter;
HFilter = vp8_bilinear_filters[xoffset];
VFilter = vp8_bilinear_filters[yoffset];
#if 0
{
int i;
unsigned char temp1[16];
unsigned char temp2[16];
bilinear_predict4x4_mmx(src_ptr, src_pixels_per_line, xoffset, yoffset, temp1, 4);
filter_block2d_bil(src_ptr, temp2, src_pixels_per_line, 4, HFilter, VFilter, 4, 4);
for (i = 0; i < 16; i++)
{
if (temp1[i] != temp2[i])
{
bilinear_predict4x4_mmx(src_ptr, src_pixels_per_line, xoffset, yoffset, temp1, 4);
filter_block2d_bil(src_ptr, temp2, src_pixels_per_line, 4, HFilter, VFilter, 4, 4);
}
}
}
#endif
filter_block2d_bil(src_ptr, dst_ptr, src_pixels_per_line, dst_pitch, HFilter, VFilter, 4, 4);
}
void vp8_bilinear_predict8x8_c
(
unsigned char *src_ptr,
int src_pixels_per_line,
int xoffset,
int yoffset,
unsigned char *dst_ptr,
int dst_pitch
)
{
const short *HFilter;
const short *VFilter;
HFilter = vp8_bilinear_filters[xoffset];
VFilter = vp8_bilinear_filters[yoffset];
filter_block2d_bil(src_ptr, dst_ptr, src_pixels_per_line, dst_pitch, HFilter, VFilter, 8, 8);
}
void vp8_bilinear_predict8x4_c
(
unsigned char *src_ptr,
int src_pixels_per_line,
int xoffset,
int yoffset,
unsigned char *dst_ptr,
int dst_pitch
)
{
const short *HFilter;
const short *VFilter;
HFilter = vp8_bilinear_filters[xoffset];
VFilter = vp8_bilinear_filters[yoffset];
filter_block2d_bil(src_ptr, dst_ptr, src_pixels_per_line, dst_pitch, HFilter, VFilter, 8, 4);
}
void vp8_bilinear_predict16x16_c
(
unsigned char *src_ptr,
int src_pixels_per_line,
int xoffset,
int yoffset,
unsigned char *dst_ptr,
int dst_pitch
)
{
const short *HFilter;
const short *VFilter;
HFilter = vp8_bilinear_filters[xoffset];
VFilter = vp8_bilinear_filters[yoffset];
filter_block2d_bil(src_ptr, dst_ptr, src_pixels_per_line, dst_pitch, HFilter, VFilter, 16, 16);
}

View File

@ -1,32 +0,0 @@
/*
* Copyright (c) 2011 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef VP8_COMMON_FILTER_H_
#define VP8_COMMON_FILTER_H_
#include "vpx_ports/mem.h"
#ifdef __cplusplus
extern "C" {
#endif
#define BLOCK_HEIGHT_WIDTH 4
#define VP8_FILTER_WEIGHT 128
#define VP8_FILTER_SHIFT 7
extern DECLARE_ALIGNED(16, const short, vp8_bilinear_filters[8][2]);
extern DECLARE_ALIGNED(16, const short, vp8_sub_pel_filters[8][6]);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // VP8_COMMON_FILTER_H_

View File

@ -1,193 +0,0 @@
/*
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "findnearmv.h"
const unsigned char vp8_mbsplit_offset[4][16] = {
{ 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{ 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{ 0, 2, 8, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}
};
/* Predict motion vectors using those from already-decoded nearby blocks.
Note that we only consider one 4x4 subblock from each candidate 16x16
macroblock. */
void vp8_find_near_mvs
(
MACROBLOCKD *xd,
const MODE_INFO *here,
int_mv *nearest,
int_mv *nearby,
int_mv *best_mv,
int cnt[4],
int refframe,
int *ref_frame_sign_bias
)
{
const MODE_INFO *above = here - xd->mode_info_stride;
const MODE_INFO *left = here - 1;
const MODE_INFO *aboveleft = above - 1;
int_mv near_mvs[4];
int_mv *mv = near_mvs;
int *cntx = cnt;
enum {CNT_INTRA, CNT_NEAREST, CNT_NEAR, CNT_SPLITMV};
/* Zero accumulators */
mv[0].as_int = mv[1].as_int = mv[2].as_int = 0;
cnt[0] = cnt[1] = cnt[2] = cnt[3] = 0;
/* Process above */
if (above->mbmi.ref_frame != INTRA_FRAME)
{
if (above->mbmi.mv.as_int)
{
(++mv)->as_int = above->mbmi.mv.as_int;
mv_bias(ref_frame_sign_bias[above->mbmi.ref_frame], refframe, mv, ref_frame_sign_bias);
++cntx;
}
*cntx += 2;
}
/* Process left */
if (left->mbmi.ref_frame != INTRA_FRAME)
{
if (left->mbmi.mv.as_int)
{
int_mv this_mv;
this_mv.as_int = left->mbmi.mv.as_int;
mv_bias(ref_frame_sign_bias[left->mbmi.ref_frame], refframe, &this_mv, ref_frame_sign_bias);
if (this_mv.as_int != mv->as_int)
{
(++mv)->as_int = this_mv.as_int;
++cntx;
}
*cntx += 2;
}
else
cnt[CNT_INTRA] += 2;
}
/* Process above left */
if (aboveleft->mbmi.ref_frame != INTRA_FRAME)
{
if (aboveleft->mbmi.mv.as_int)
{
int_mv this_mv;
this_mv.as_int = aboveleft->mbmi.mv.as_int;
mv_bias(ref_frame_sign_bias[aboveleft->mbmi.ref_frame], refframe, &this_mv, ref_frame_sign_bias);
if (this_mv.as_int != mv->as_int)
{
(++mv)->as_int = this_mv.as_int;
++cntx;
}
*cntx += 1;
}
else
cnt[CNT_INTRA] += 1;
}
/* If we have three distinct MV's ... */
if (cnt[CNT_SPLITMV])
{
/* See if above-left MV can be merged with NEAREST */
if (mv->as_int == near_mvs[CNT_NEAREST].as_int)
cnt[CNT_NEAREST] += 1;
}
cnt[CNT_SPLITMV] = ((above->mbmi.mode == SPLITMV)
+ (left->mbmi.mode == SPLITMV)) * 2
+ (aboveleft->mbmi.mode == SPLITMV);
/* Swap near and nearest if necessary */
if (cnt[CNT_NEAR] > cnt[CNT_NEAREST])
{
int tmp;
tmp = cnt[CNT_NEAREST];
cnt[CNT_NEAREST] = cnt[CNT_NEAR];
cnt[CNT_NEAR] = tmp;
tmp = near_mvs[CNT_NEAREST].as_int;
near_mvs[CNT_NEAREST].as_int = near_mvs[CNT_NEAR].as_int;
near_mvs[CNT_NEAR].as_int = tmp;
}
/* Use near_mvs[0] to store the "best" MV */
if (cnt[CNT_NEAREST] >= cnt[CNT_INTRA])
near_mvs[CNT_INTRA] = near_mvs[CNT_NEAREST];
/* Set up return values */
best_mv->as_int = near_mvs[0].as_int;
nearest->as_int = near_mvs[CNT_NEAREST].as_int;
nearby->as_int = near_mvs[CNT_NEAR].as_int;
}
static void invert_and_clamp_mvs(int_mv *inv, int_mv *src, MACROBLOCKD *xd)
{
inv->as_mv.row = src->as_mv.row * -1;
inv->as_mv.col = src->as_mv.col * -1;
vp8_clamp_mv2(inv, xd);
vp8_clamp_mv2(src, xd);
}
int vp8_find_near_mvs_bias
(
MACROBLOCKD *xd,
const MODE_INFO *here,
int_mv mode_mv_sb[2][MB_MODE_COUNT],
int_mv best_mv_sb[2],
int cnt[4],
int refframe,
int *ref_frame_sign_bias
)
{
int sign_bias = ref_frame_sign_bias[refframe];
vp8_find_near_mvs(xd,
here,
&mode_mv_sb[sign_bias][NEARESTMV],
&mode_mv_sb[sign_bias][NEARMV],
&best_mv_sb[sign_bias],
cnt,
refframe,
ref_frame_sign_bias);
invert_and_clamp_mvs(&mode_mv_sb[!sign_bias][NEARESTMV],
&mode_mv_sb[sign_bias][NEARESTMV], xd);
invert_and_clamp_mvs(&mode_mv_sb[!sign_bias][NEARMV],
&mode_mv_sb[sign_bias][NEARMV], xd);
invert_and_clamp_mvs(&best_mv_sb[!sign_bias],
&best_mv_sb[sign_bias], xd);
return sign_bias;
}
vp8_prob *vp8_mv_ref_probs(
vp8_prob p[VP8_MVREFS-1], const int near_mv_ref_ct[4]
)
{
p[0] = vp8_mode_contexts [near_mv_ref_ct[0]] [0];
p[1] = vp8_mode_contexts [near_mv_ref_ct[1]] [1];
p[2] = vp8_mode_contexts [near_mv_ref_ct[2]] [2];
p[3] = vp8_mode_contexts [near_mv_ref_ct[3]] [3];
/*p[3] = vp8_mode_contexts [near_mv_ref_ct[1] + near_mv_ref_ct[2] + near_mv_ref_ct[3]] [3];*/
return p;
}

View File

@ -1,195 +0,0 @@
/*
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef VP8_COMMON_FINDNEARMV_H_
#define VP8_COMMON_FINDNEARMV_H_
#include "./vpx_config.h"
#include "mv.h"
#include "blockd.h"
#include "modecont.h"
#include "treecoder.h"
#ifdef __cplusplus
extern "C" {
#endif
static INLINE void mv_bias(int refmb_ref_frame_sign_bias, int refframe,
int_mv *mvp, const int *ref_frame_sign_bias)
{
if (refmb_ref_frame_sign_bias != ref_frame_sign_bias[refframe])
{
mvp->as_mv.row *= -1;
mvp->as_mv.col *= -1;
}
}
#define LEFT_TOP_MARGIN (16 << 3)
#define RIGHT_BOTTOM_MARGIN (16 << 3)
static INLINE void vp8_clamp_mv2(int_mv *mv, const MACROBLOCKD *xd)
{
if (mv->as_mv.col < (xd->mb_to_left_edge - LEFT_TOP_MARGIN))
mv->as_mv.col = xd->mb_to_left_edge - LEFT_TOP_MARGIN;
else if (mv->as_mv.col > xd->mb_to_right_edge + RIGHT_BOTTOM_MARGIN)
mv->as_mv.col = xd->mb_to_right_edge + RIGHT_BOTTOM_MARGIN;
if (mv->as_mv.row < (xd->mb_to_top_edge - LEFT_TOP_MARGIN))
mv->as_mv.row = xd->mb_to_top_edge - LEFT_TOP_MARGIN;
else if (mv->as_mv.row > xd->mb_to_bottom_edge + RIGHT_BOTTOM_MARGIN)
mv->as_mv.row = xd->mb_to_bottom_edge + RIGHT_BOTTOM_MARGIN;
}
static INLINE void vp8_clamp_mv(int_mv *mv, int mb_to_left_edge,
int mb_to_right_edge, int mb_to_top_edge,
int mb_to_bottom_edge)
{
mv->as_mv.col = (mv->as_mv.col < mb_to_left_edge) ?
mb_to_left_edge : mv->as_mv.col;
mv->as_mv.col = (mv->as_mv.col > mb_to_right_edge) ?
mb_to_right_edge : mv->as_mv.col;
mv->as_mv.row = (mv->as_mv.row < mb_to_top_edge) ?
mb_to_top_edge : mv->as_mv.row;
mv->as_mv.row = (mv->as_mv.row > mb_to_bottom_edge) ?
mb_to_bottom_edge : mv->as_mv.row;
}
static INLINE unsigned int vp8_check_mv_bounds(int_mv *mv, int mb_to_left_edge,
int mb_to_right_edge,
int mb_to_top_edge,
int mb_to_bottom_edge)
{
unsigned int need_to_clamp;
need_to_clamp = (mv->as_mv.col < mb_to_left_edge);
need_to_clamp |= (mv->as_mv.col > mb_to_right_edge);
need_to_clamp |= (mv->as_mv.row < mb_to_top_edge);
need_to_clamp |= (mv->as_mv.row > mb_to_bottom_edge);
return need_to_clamp;
}
void vp8_find_near_mvs
(
MACROBLOCKD *xd,
const MODE_INFO *here,
int_mv *nearest, int_mv *nearby, int_mv *best,
int near_mv_ref_cts[4],
int refframe,
int *ref_frame_sign_bias
);
int vp8_find_near_mvs_bias
(
MACROBLOCKD *xd,
const MODE_INFO *here,
int_mv mode_mv_sb[2][MB_MODE_COUNT],
int_mv best_mv_sb[2],
int cnt[4],
int refframe,
int *ref_frame_sign_bias
);
vp8_prob *vp8_mv_ref_probs(
vp8_prob p[VP8_MVREFS-1], const int near_mv_ref_ct[4]
);
extern const unsigned char vp8_mbsplit_offset[4][16];
static INLINE uint32_t left_block_mv(const MODE_INFO *cur_mb, int b)
{
if (!(b & 3))
{
/* On L edge, get from MB to left of us */
--cur_mb;
if(cur_mb->mbmi.mode != SPLITMV)
return cur_mb->mbmi.mv.as_int;
b += 4;
}
return (cur_mb->bmi + b - 1)->mv.as_int;
}
static INLINE uint32_t above_block_mv(const MODE_INFO *cur_mb, int b,
int mi_stride)
{
if (!(b >> 2))
{
/* On top edge, get from MB above us */
cur_mb -= mi_stride;
if(cur_mb->mbmi.mode != SPLITMV)
return cur_mb->mbmi.mv.as_int;
b += 16;
}
return (cur_mb->bmi + (b - 4))->mv.as_int;
}
static INLINE B_PREDICTION_MODE left_block_mode(const MODE_INFO *cur_mb, int b)
{
if (!(b & 3))
{
/* On L edge, get from MB to left of us */
--cur_mb;
switch (cur_mb->mbmi.mode)
{
case B_PRED:
return (cur_mb->bmi + b + 3)->as_mode;
case DC_PRED:
return B_DC_PRED;
case V_PRED:
return B_VE_PRED;
case H_PRED:
return B_HE_PRED;
case TM_PRED:
return B_TM_PRED;
default:
return B_DC_PRED;
}
}
return (cur_mb->bmi + b - 1)->as_mode;
}
static INLINE B_PREDICTION_MODE above_block_mode(const MODE_INFO *cur_mb, int b,
int mi_stride)
{
if (!(b >> 2))
{
/* On top edge, get from MB above us */
cur_mb -= mi_stride;
switch (cur_mb->mbmi.mode)
{
case B_PRED:
return (cur_mb->bmi + b + 12)->as_mode;
case DC_PRED:
return B_DC_PRED;
case V_PRED:
return B_VE_PRED;
case H_PRED:
return B_HE_PRED;
case TM_PRED:
return B_TM_PRED;
default:
return B_DC_PRED;
}
}
return (cur_mb->bmi + b - 4)->as_mode;
}
#ifdef __cplusplus
} // extern "C"
#endif
#endif // VP8_COMMON_FINDNEARMV_H_

View File

@ -1,106 +0,0 @@
/*
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "vpx_config.h"
#include "vp8_rtcd.h"
#if ARCH_ARM
#include "vpx_ports/arm.h"
#elif ARCH_X86 || ARCH_X86_64
#include "vpx_ports/x86.h"
#endif
#include "vp8/common/onyxc_int.h"
#include "vp8/common/systemdependent.h"
#if CONFIG_MULTITHREAD
#if HAVE_UNISTD_H && !defined(__OS2__)
#include <unistd.h>
#elif defined(_WIN32)
#include <windows.h>
typedef void (WINAPI *PGNSI)(LPSYSTEM_INFO);
#elif defined(__OS2__)
#define INCL_DOS
#define INCL_DOSSPINLOCK
#include <os2.h>
#endif
#endif
#if CONFIG_MULTITHREAD
static int get_cpu_count()
{
int core_count = 16;
#if HAVE_UNISTD_H && !defined(__OS2__)
#if defined(_SC_NPROCESSORS_ONLN)
core_count = sysconf(_SC_NPROCESSORS_ONLN);
#elif defined(_SC_NPROC_ONLN)
core_count = sysconf(_SC_NPROC_ONLN);
#endif
#elif defined(_WIN32)
{
#if _WIN32_WINNT >= 0x0501
SYSTEM_INFO sysinfo;
GetNativeSystemInfo(&sysinfo);
#else
PGNSI pGNSI;
SYSTEM_INFO sysinfo;
/* Call GetNativeSystemInfo if supported or
* GetSystemInfo otherwise. */
pGNSI = (PGNSI) GetProcAddress(
GetModuleHandle(TEXT("kernel32.dll")), "GetNativeSystemInfo");
if (pGNSI != NULL)
pGNSI(&sysinfo);
else
GetSystemInfo(&sysinfo);
#endif
core_count = sysinfo.dwNumberOfProcessors;
}
#elif defined(__OS2__)
{
ULONG proc_id;
ULONG status;
core_count = 0;
for (proc_id = 1; ; proc_id++)
{
if (DosGetProcessorStatus(proc_id, &status))
break;
if (status == PROC_ONLINE)
core_count++;
}
}
#else
/* other platforms */
#endif
return core_count > 0 ? core_count : 1;
}
#endif
void vp8_clear_system_state_c() {};
void vp8_machine_specific_config(VP8_COMMON *ctx)
{
#if CONFIG_MULTITHREAD
ctx->processor_core_count = get_cpu_count();
#else
(void)ctx;
#endif /* CONFIG_MULTITHREAD */
#if ARCH_ARM
ctx->cpu_caps = arm_cpu_caps();
#elif ARCH_X86 || ARCH_X86_64
ctx->cpu_caps = x86_simd_caps();
#endif
}

View File

@ -1,51 +0,0 @@
/*
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef VP8_COMMON_HEADER_H_
#define VP8_COMMON_HEADER_H_
#ifdef __cplusplus
extern "C" {
#endif
/* 24 bits total */
typedef struct
{
unsigned int type: 1;
unsigned int version: 3;
unsigned int show_frame: 1;
/* Allow 2^20 bytes = 8 megabits for first partition */
unsigned int first_partition_length_in_bytes: 19;
#ifdef PACKET_TESTING
unsigned int frame_number;
unsigned int update_gold: 1;
unsigned int uses_gold: 1;
unsigned int update_last: 1;
unsigned int uses_last: 1;
#endif
} VP8_HEADER;
#ifdef PACKET_TESTING
#define VP8_HEADER_SIZE 8
#else
#define VP8_HEADER_SIZE 3
#endif
#ifdef __cplusplus
} // extern "C"
#endif
#endif // VP8_COMMON_HEADER_H_

View File

@ -1,90 +0,0 @@
/*
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "vpx_config.h"
#include "vp8_rtcd.h"
#include "vpx_mem/vpx_mem.h"
void vp8_dequant_idct_add_c(short *input, short *dq,
unsigned char *dest, int stride);
void vp8_dc_only_idct_add_c(short input_dc, unsigned char * pred,
int pred_stride, unsigned char *dst_ptr,
int dst_stride);
void vp8_dequant_idct_add_y_block_c
(short *q, short *dq,
unsigned char *dst, int stride, char *eobs)
{
int i, j;
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
if (*eobs++ > 1)
vp8_dequant_idct_add_c (q, dq, dst, stride);
else
{
vp8_dc_only_idct_add_c (q[0]*dq[0], dst, stride, dst, stride);
memset(q, 0, 2 * sizeof(q[0]));
}
q += 16;
dst += 4;
}
dst += 4*stride - 16;
}
}
void vp8_dequant_idct_add_uv_block_c
(short *q, short *dq,
unsigned char *dstu, unsigned char *dstv, int stride, char *eobs)
{
int i, j;
for (i = 0; i < 2; i++)
{
for (j = 0; j < 2; j++)
{
if (*eobs++ > 1)
vp8_dequant_idct_add_c (q, dq, dstu, stride);
else
{
vp8_dc_only_idct_add_c (q[0]*dq[0], dstu, stride, dstu, stride);
memset(q, 0, 2 * sizeof(q[0]));
}
q += 16;
dstu += 4;
}
dstu += 4*stride - 8;
}
for (i = 0; i < 2; i++)
{
for (j = 0; j < 2; j++)
{
if (*eobs++ > 1)
vp8_dequant_idct_add_c (q, dq, dstv, stride);
else
{
vp8_dc_only_idct_add_c (q[0]*dq[0], dstv, stride, dstv, stride);
memset(q, 0, 2 * sizeof(q[0]));
}
q += 16;
dstv += 4;
}
dstv += 4*stride - 8;
}
}

View File

@ -1,205 +0,0 @@
/*
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "./vp8_rtcd.h"
/****************************************************************************
* Notes:
*
* This implementation makes use of 16 bit fixed point verio of two multiply
* constants:
* 1. sqrt(2) * cos (pi/8)
* 2. sqrt(2) * sin (pi/8)
* Becuase the first constant is bigger than 1, to maintain the same 16 bit
* fixed point precision as the second one, we use a trick of
* x * a = x + x*(a-1)
* so
* x * sqrt(2) * cos (pi/8) = x + x * (sqrt(2) *cos(pi/8)-1).
**************************************************************************/
static const int cospi8sqrt2minus1 = 20091;
static const int sinpi8sqrt2 = 35468;
void vp8_short_idct4x4llm_c(short *input, unsigned char *pred_ptr,
int pred_stride, unsigned char *dst_ptr,
int dst_stride)
{
int i;
int r, c;
int a1, b1, c1, d1;
short output[16];
short *ip = input;
short *op = output;
int temp1, temp2;
int shortpitch = 4;
for (i = 0; i < 4; i++)
{
a1 = ip[0] + ip[8];
b1 = ip[0] - ip[8];
temp1 = (ip[4] * sinpi8sqrt2) >> 16;
temp2 = ip[12] + ((ip[12] * cospi8sqrt2minus1) >> 16);
c1 = temp1 - temp2;
temp1 = ip[4] + ((ip[4] * cospi8sqrt2minus1) >> 16);
temp2 = (ip[12] * sinpi8sqrt2) >> 16;
d1 = temp1 + temp2;
op[shortpitch*0] = a1 + d1;
op[shortpitch*3] = a1 - d1;
op[shortpitch*1] = b1 + c1;
op[shortpitch*2] = b1 - c1;
ip++;
op++;
}
ip = output;
op = output;
for (i = 0; i < 4; i++)
{
a1 = ip[0] + ip[2];
b1 = ip[0] - ip[2];
temp1 = (ip[1] * sinpi8sqrt2) >> 16;
temp2 = ip[3] + ((ip[3] * cospi8sqrt2minus1) >> 16);
c1 = temp1 - temp2;
temp1 = ip[1] + ((ip[1] * cospi8sqrt2minus1) >> 16);
temp2 = (ip[3] * sinpi8sqrt2) >> 16;
d1 = temp1 + temp2;
op[0] = (a1 + d1 + 4) >> 3;
op[3] = (a1 - d1 + 4) >> 3;
op[1] = (b1 + c1 + 4) >> 3;
op[2] = (b1 - c1 + 4) >> 3;
ip += shortpitch;
op += shortpitch;
}
ip = output;
for (r = 0; r < 4; r++)
{
for (c = 0; c < 4; c++)
{
int a = ip[c] + pred_ptr[c] ;
if (a < 0)
a = 0;
if (a > 255)
a = 255;
dst_ptr[c] = (unsigned char) a ;
}
ip += 4;
dst_ptr += dst_stride;
pred_ptr += pred_stride;
}
}
void vp8_dc_only_idct_add_c(short input_dc, unsigned char *pred_ptr,
int pred_stride, unsigned char *dst_ptr,
int dst_stride)
{
int a1 = ((input_dc + 4) >> 3);
int r, c;
for (r = 0; r < 4; r++)
{
for (c = 0; c < 4; c++)
{
int a = a1 + pred_ptr[c] ;
if (a < 0)
a = 0;
if (a > 255)
a = 255;
dst_ptr[c] = (unsigned char) a ;
}
dst_ptr += dst_stride;
pred_ptr += pred_stride;
}
}
void vp8_short_inv_walsh4x4_c(short *input, short *mb_dqcoeff)
{
short output[16];
int i;
int a1, b1, c1, d1;
int a2, b2, c2, d2;
short *ip = input;
short *op = output;
for (i = 0; i < 4; i++)
{
a1 = ip[0] + ip[12];
b1 = ip[4] + ip[8];
c1 = ip[4] - ip[8];
d1 = ip[0] - ip[12];
op[0] = a1 + b1;
op[4] = c1 + d1;
op[8] = a1 - b1;
op[12] = d1 - c1;
ip++;
op++;
}
ip = output;
op = output;
for (i = 0; i < 4; i++)
{
a1 = ip[0] + ip[3];
b1 = ip[1] + ip[2];
c1 = ip[1] - ip[2];
d1 = ip[0] - ip[3];
a2 = a1 + b1;
b2 = c1 + d1;
c2 = a1 - b1;
d2 = d1 - c1;
op[0] = (a2 + 3) >> 3;
op[1] = (b2 + 3) >> 3;
op[2] = (c2 + 3) >> 3;
op[3] = (d2 + 3) >> 3;
ip += 4;
op += 4;
}
for(i = 0; i < 16; i++)
{
mb_dqcoeff[i * 16] = output[i];
}
}
void vp8_short_inv_walsh4x4_1_c(short *input, short *mb_dqcoeff)
{
int i;
int a1;
a1 = ((input[0] + 3) >> 3);
for(i = 0; i < 16; i++)
{
mb_dqcoeff[i * 16] = a1;
}
}

View File

@ -1,70 +0,0 @@
/*
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef VP8_COMMON_INVTRANS_H_
#define VP8_COMMON_INVTRANS_H_
#include "./vpx_config.h"
#include "vp8_rtcd.h"
#include "blockd.h"
#include "onyxc_int.h"
#if CONFIG_MULTITHREAD
#include "vpx_mem/vpx_mem.h"
#endif
#ifdef __cplusplus
extern "C" {
#endif
static void eob_adjust(char *eobs, short *diff)
{
/* eob adjust.... the idct can only skip if both the dc and eob are zero */
int js;
for(js = 0; js < 16; js++)
{
if((eobs[js] == 0) && (diff[0] != 0))
eobs[js]++;
diff+=16;
}
}
static INLINE void vp8_inverse_transform_mby(MACROBLOCKD *xd)
{
short *DQC = xd->dequant_y1;
if (xd->mode_info_context->mbmi.mode != SPLITMV)
{
/* do 2nd order transform on the dc block */
if (xd->eobs[24] > 1)
{
vp8_short_inv_walsh4x4
(&xd->block[24].dqcoeff[0], xd->qcoeff);
}
else
{
vp8_short_inv_walsh4x4_1
(&xd->block[24].dqcoeff[0], xd->qcoeff);
}
eob_adjust(xd->eobs, xd->qcoeff);
DQC = xd->dequant_y1_dc;
}
vp8_dequant_idct_add_y_block
(xd->qcoeff, DQC,
xd->dst.y_buffer,
xd->dst.y_stride, xd->eobs);
}
#ifdef __cplusplus
} // extern "C"
#endif
#endif // VP8_COMMON_INVTRANS_H_

View File

@ -1,113 +0,0 @@
/*
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef VP8_COMMON_LOOPFILTER_H_
#define VP8_COMMON_LOOPFILTER_H_
#include "vpx_ports/mem.h"
#include "vpx_config.h"
#include "vp8_rtcd.h"
#ifdef __cplusplus
extern "C" {
#endif
#define MAX_LOOP_FILTER 63
/* fraction of total macroblock rows to be used in fast filter level picking */
/* has to be > 2 */
#define PARTIAL_FRAME_FRACTION 8
typedef enum
{
NORMAL_LOOPFILTER = 0,
SIMPLE_LOOPFILTER = 1
} LOOPFILTERTYPE;
#if ARCH_ARM
#define SIMD_WIDTH 1
#else
#define SIMD_WIDTH 16
#endif
/* Need to align this structure so when it is declared and
* passed it can be loaded into vector registers.
*/
typedef struct
{
DECLARE_ALIGNED(SIMD_WIDTH, unsigned char, mblim[MAX_LOOP_FILTER + 1][SIMD_WIDTH]);
DECLARE_ALIGNED(SIMD_WIDTH, unsigned char, blim[MAX_LOOP_FILTER + 1][SIMD_WIDTH]);
DECLARE_ALIGNED(SIMD_WIDTH, unsigned char, lim[MAX_LOOP_FILTER + 1][SIMD_WIDTH]);
DECLARE_ALIGNED(SIMD_WIDTH, unsigned char, hev_thr[4][SIMD_WIDTH]);
unsigned char lvl[4][4][4];
unsigned char hev_thr_lut[2][MAX_LOOP_FILTER + 1];
unsigned char mode_lf_lut[10];
} loop_filter_info_n;
typedef struct loop_filter_info
{
const unsigned char * mblim;
const unsigned char * blim;
const unsigned char * lim;
const unsigned char * hev_thr;
} loop_filter_info;
typedef void loop_filter_uvfunction
(
unsigned char *u, /* source pointer */
int p, /* pitch */
const unsigned char *blimit,
const unsigned char *limit,
const unsigned char *thresh,
unsigned char *v
);
/* assorted loopfilter functions which get used elsewhere */
struct VP8Common;
struct macroblockd;
struct modeinfo;
void vp8_loop_filter_init(struct VP8Common *cm);
void vp8_loop_filter_frame_init(struct VP8Common *cm,
struct macroblockd *mbd,
int default_filt_lvl);
void vp8_loop_filter_frame(struct VP8Common *cm, struct macroblockd *mbd,
int frame_type);
void vp8_loop_filter_partial_frame(struct VP8Common *cm,
struct macroblockd *mbd,
int default_filt_lvl);
void vp8_loop_filter_frame_yonly(struct VP8Common *cm,
struct macroblockd *mbd,
int default_filt_lvl);
void vp8_loop_filter_update_sharpness(loop_filter_info_n *lfi,
int sharpness_lvl);
void vp8_loop_filter_row_normal(struct VP8Common *cm,
struct modeinfo *mode_info_context,
int mb_row, int post_ystride, int post_uvstride,
unsigned char *y_ptr, unsigned char *u_ptr,
unsigned char *v_ptr);
void vp8_loop_filter_row_simple(struct VP8Common *cm,
struct modeinfo *mode_info_context,
int mb_row, int post_ystride, int post_uvstride,
unsigned char *y_ptr, unsigned char *u_ptr,
unsigned char *v_ptr);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // VP8_COMMON_LOOPFILTER_H_

View File

@ -1,430 +0,0 @@
/*
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <stdlib.h>
#include "loopfilter.h"
#include "onyxc_int.h"
typedef unsigned char uc;
static signed char vp8_signed_char_clamp(int t)
{
t = (t < -128 ? -128 : t);
t = (t > 127 ? 127 : t);
return (signed char) t;
}
/* should we apply any filter at all ( 11111111 yes, 00000000 no) */
static signed char vp8_filter_mask(uc limit, uc blimit,
uc p3, uc p2, uc p1, uc p0,
uc q0, uc q1, uc q2, uc q3)
{
signed char mask = 0;
mask |= (abs(p3 - p2) > limit);
mask |= (abs(p2 - p1) > limit);
mask |= (abs(p1 - p0) > limit);
mask |= (abs(q1 - q0) > limit);
mask |= (abs(q2 - q1) > limit);
mask |= (abs(q3 - q2) > limit);
mask |= (abs(p0 - q0) * 2 + abs(p1 - q1) / 2 > blimit);
return mask - 1;
}
/* is there high variance internal edge ( 11111111 yes, 00000000 no) */
static signed char vp8_hevmask(uc thresh, uc p1, uc p0, uc q0, uc q1)
{
signed char hev = 0;
hev |= (abs(p1 - p0) > thresh) * -1;
hev |= (abs(q1 - q0) > thresh) * -1;
return hev;
}
static void vp8_filter(signed char mask, uc hev, uc *op1,
uc *op0, uc *oq0, uc *oq1)
{
signed char ps0, qs0;
signed char ps1, qs1;
signed char filter_value, Filter1, Filter2;
signed char u;
ps1 = (signed char) * op1 ^ 0x80;
ps0 = (signed char) * op0 ^ 0x80;
qs0 = (signed char) * oq0 ^ 0x80;
qs1 = (signed char) * oq1 ^ 0x80;
/* add outer taps if we have high edge variance */
filter_value = vp8_signed_char_clamp(ps1 - qs1);
filter_value &= hev;
/* inner taps */
filter_value = vp8_signed_char_clamp(filter_value + 3 * (qs0 - ps0));
filter_value &= mask;
/* save bottom 3 bits so that we round one side +4 and the other +3
* if it equals 4 we'll set to adjust by -1 to account for the fact
* we'd round 3 the other way
*/
Filter1 = vp8_signed_char_clamp(filter_value + 4);
Filter2 = vp8_signed_char_clamp(filter_value + 3);
Filter1 >>= 3;
Filter2 >>= 3;
u = vp8_signed_char_clamp(qs0 - Filter1);
*oq0 = u ^ 0x80;
u = vp8_signed_char_clamp(ps0 + Filter2);
*op0 = u ^ 0x80;
filter_value = Filter1;
/* outer tap adjustments */
filter_value += 1;
filter_value >>= 1;
filter_value &= ~hev;
u = vp8_signed_char_clamp(qs1 - filter_value);
*oq1 = u ^ 0x80;
u = vp8_signed_char_clamp(ps1 + filter_value);
*op1 = u ^ 0x80;
}
void vp8_loop_filter_horizontal_edge_c
(
unsigned char *s,
int p, /* pitch */
const unsigned char *blimit,
const unsigned char *limit,
const unsigned char *thresh,
int count
)
{
int hev = 0; /* high edge variance */
signed char mask = 0;
int i = 0;
/* loop filter designed to work using chars so that we can make maximum use
* of 8 bit simd instructions.
*/
do
{
mask = vp8_filter_mask(limit[0], blimit[0],
s[-4*p], s[-3*p], s[-2*p], s[-1*p],
s[0*p], s[1*p], s[2*p], s[3*p]);
hev = vp8_hevmask(thresh[0], s[-2*p], s[-1*p], s[0*p], s[1*p]);
vp8_filter(mask, hev, s - 2 * p, s - 1 * p, s, s + 1 * p);
++s;
}
while (++i < count * 8);
}
void vp8_loop_filter_vertical_edge_c
(
unsigned char *s,
int p,
const unsigned char *blimit,
const unsigned char *limit,
const unsigned char *thresh,
int count
)
{
int hev = 0; /* high edge variance */
signed char mask = 0;
int i = 0;
/* loop filter designed to work using chars so that we can make maximum use
* of 8 bit simd instructions.
*/
do
{
mask = vp8_filter_mask(limit[0], blimit[0],
s[-4], s[-3], s[-2], s[-1], s[0], s[1], s[2], s[3]);
hev = vp8_hevmask(thresh[0], s[-2], s[-1], s[0], s[1]);
vp8_filter(mask, hev, s - 2, s - 1, s, s + 1);
s += p;
}
while (++i < count * 8);
}
static void vp8_mbfilter(signed char mask, uc hev,
uc *op2, uc *op1, uc *op0, uc *oq0, uc *oq1, uc *oq2)
{
signed char s, u;
signed char filter_value, Filter1, Filter2;
signed char ps2 = (signed char) * op2 ^ 0x80;
signed char ps1 = (signed char) * op1 ^ 0x80;
signed char ps0 = (signed char) * op0 ^ 0x80;
signed char qs0 = (signed char) * oq0 ^ 0x80;
signed char qs1 = (signed char) * oq1 ^ 0x80;
signed char qs2 = (signed char) * oq2 ^ 0x80;
/* add outer taps if we have high edge variance */
filter_value = vp8_signed_char_clamp(ps1 - qs1);
filter_value = vp8_signed_char_clamp(filter_value + 3 * (qs0 - ps0));
filter_value &= mask;
Filter2 = filter_value;
Filter2 &= hev;
/* save bottom 3 bits so that we round one side +4 and the other +3 */
Filter1 = vp8_signed_char_clamp(Filter2 + 4);
Filter2 = vp8_signed_char_clamp(Filter2 + 3);
Filter1 >>= 3;
Filter2 >>= 3;
qs0 = vp8_signed_char_clamp(qs0 - Filter1);
ps0 = vp8_signed_char_clamp(ps0 + Filter2);
/* only apply wider filter if not high edge variance */
filter_value &= ~hev;
Filter2 = filter_value;
/* roughly 3/7th difference across boundary */
u = vp8_signed_char_clamp((63 + Filter2 * 27) >> 7);
s = vp8_signed_char_clamp(qs0 - u);
*oq0 = s ^ 0x80;
s = vp8_signed_char_clamp(ps0 + u);
*op0 = s ^ 0x80;
/* roughly 2/7th difference across boundary */
u = vp8_signed_char_clamp((63 + Filter2 * 18) >> 7);
s = vp8_signed_char_clamp(qs1 - u);
*oq1 = s ^ 0x80;
s = vp8_signed_char_clamp(ps1 + u);
*op1 = s ^ 0x80;
/* roughly 1/7th difference across boundary */
u = vp8_signed_char_clamp((63 + Filter2 * 9) >> 7);
s = vp8_signed_char_clamp(qs2 - u);
*oq2 = s ^ 0x80;
s = vp8_signed_char_clamp(ps2 + u);
*op2 = s ^ 0x80;
}
void vp8_mbloop_filter_horizontal_edge_c
(
unsigned char *s,
int p,
const unsigned char *blimit,
const unsigned char *limit,
const unsigned char *thresh,
int count
)
{
signed char hev = 0; /* high edge variance */
signed char mask = 0;
int i = 0;
/* loop filter designed to work using chars so that we can make maximum use
* of 8 bit simd instructions.
*/
do
{
mask = vp8_filter_mask(limit[0], blimit[0],
s[-4*p], s[-3*p], s[-2*p], s[-1*p],
s[0*p], s[1*p], s[2*p], s[3*p]);
hev = vp8_hevmask(thresh[0], s[-2*p], s[-1*p], s[0*p], s[1*p]);
vp8_mbfilter(mask, hev, s - 3 * p, s - 2 * p, s - 1 * p, s, s + 1 * p, s + 2 * p);
++s;
}
while (++i < count * 8);
}
void vp8_mbloop_filter_vertical_edge_c
(
unsigned char *s,
int p,
const unsigned char *blimit,
const unsigned char *limit,
const unsigned char *thresh,
int count
)
{
signed char hev = 0; /* high edge variance */
signed char mask = 0;
int i = 0;
do
{
mask = vp8_filter_mask(limit[0], blimit[0],
s[-4], s[-3], s[-2], s[-1], s[0], s[1], s[2], s[3]);
hev = vp8_hevmask(thresh[0], s[-2], s[-1], s[0], s[1]);
vp8_mbfilter(mask, hev, s - 3, s - 2, s - 1, s, s + 1, s + 2);
s += p;
}
while (++i < count * 8);
}
/* should we apply any filter at all ( 11111111 yes, 00000000 no) */
static signed char vp8_simple_filter_mask(uc blimit, uc p1, uc p0, uc q0, uc q1)
{
/* Why does this cause problems for win32?
* error C2143: syntax error : missing ';' before 'type'
* (void) limit;
*/
signed char mask = (abs(p0 - q0) * 2 + abs(p1 - q1) / 2 <= blimit) * -1;
return mask;
}
static void vp8_simple_filter(signed char mask, uc *op1, uc *op0, uc *oq0, uc *oq1)
{
signed char filter_value, Filter1, Filter2;
signed char p1 = (signed char) * op1 ^ 0x80;
signed char p0 = (signed char) * op0 ^ 0x80;
signed char q0 = (signed char) * oq0 ^ 0x80;
signed char q1 = (signed char) * oq1 ^ 0x80;
signed char u;
filter_value = vp8_signed_char_clamp(p1 - q1);
filter_value = vp8_signed_char_clamp(filter_value + 3 * (q0 - p0));
filter_value &= mask;
/* save bottom 3 bits so that we round one side +4 and the other +3 */
Filter1 = vp8_signed_char_clamp(filter_value + 4);
Filter1 >>= 3;
u = vp8_signed_char_clamp(q0 - Filter1);
*oq0 = u ^ 0x80;
Filter2 = vp8_signed_char_clamp(filter_value + 3);
Filter2 >>= 3;
u = vp8_signed_char_clamp(p0 + Filter2);
*op0 = u ^ 0x80;
}
void vp8_loop_filter_simple_horizontal_edge_c
(
unsigned char *s,
int p,
const unsigned char *blimit
)
{
signed char mask = 0;
int i = 0;
do
{
mask = vp8_simple_filter_mask(blimit[0], s[-2*p], s[-1*p], s[0*p], s[1*p]);
vp8_simple_filter(mask, s - 2 * p, s - 1 * p, s, s + 1 * p);
++s;
}
while (++i < 16);
}
void vp8_loop_filter_simple_vertical_edge_c
(
unsigned char *s,
int p,
const unsigned char *blimit
)
{
signed char mask = 0;
int i = 0;
do
{
mask = vp8_simple_filter_mask(blimit[0], s[-2], s[-1], s[0], s[1]);
vp8_simple_filter(mask, s - 2, s - 1, s, s + 1);
s += p;
}
while (++i < 16);
}
/* Horizontal MB filtering */
void vp8_loop_filter_mbh_c(unsigned char *y_ptr, unsigned char *u_ptr,
unsigned char *v_ptr, int y_stride, int uv_stride,
loop_filter_info *lfi)
{
vp8_mbloop_filter_horizontal_edge_c(y_ptr, y_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 2);
if (u_ptr)
vp8_mbloop_filter_horizontal_edge_c(u_ptr, uv_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 1);
if (v_ptr)
vp8_mbloop_filter_horizontal_edge_c(v_ptr, uv_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 1);
}
/* Vertical MB Filtering */
void vp8_loop_filter_mbv_c(unsigned char *y_ptr, unsigned char *u_ptr,
unsigned char *v_ptr, int y_stride, int uv_stride,
loop_filter_info *lfi)
{
vp8_mbloop_filter_vertical_edge_c(y_ptr, y_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 2);
if (u_ptr)
vp8_mbloop_filter_vertical_edge_c(u_ptr, uv_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 1);
if (v_ptr)
vp8_mbloop_filter_vertical_edge_c(v_ptr, uv_stride, lfi->mblim, lfi->lim, lfi->hev_thr, 1);
}
/* Horizontal B Filtering */
void vp8_loop_filter_bh_c(unsigned char *y_ptr, unsigned char *u_ptr,
unsigned char *v_ptr, int y_stride, int uv_stride,
loop_filter_info *lfi)
{
vp8_loop_filter_horizontal_edge_c(y_ptr + 4 * y_stride, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
vp8_loop_filter_horizontal_edge_c(y_ptr + 8 * y_stride, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
vp8_loop_filter_horizontal_edge_c(y_ptr + 12 * y_stride, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
if (u_ptr)
vp8_loop_filter_horizontal_edge_c(u_ptr + 4 * uv_stride, uv_stride, lfi->blim, lfi->lim, lfi->hev_thr, 1);
if (v_ptr)
vp8_loop_filter_horizontal_edge_c(v_ptr + 4 * uv_stride, uv_stride, lfi->blim, lfi->lim, lfi->hev_thr, 1);
}
void vp8_loop_filter_bhs_c(unsigned char *y_ptr, int y_stride,
const unsigned char *blimit)
{
vp8_loop_filter_simple_horizontal_edge_c(y_ptr + 4 * y_stride, y_stride, blimit);
vp8_loop_filter_simple_horizontal_edge_c(y_ptr + 8 * y_stride, y_stride, blimit);
vp8_loop_filter_simple_horizontal_edge_c(y_ptr + 12 * y_stride, y_stride, blimit);
}
/* Vertical B Filtering */
void vp8_loop_filter_bv_c(unsigned char *y_ptr, unsigned char *u_ptr,
unsigned char *v_ptr, int y_stride, int uv_stride,
loop_filter_info *lfi)
{
vp8_loop_filter_vertical_edge_c(y_ptr + 4, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
vp8_loop_filter_vertical_edge_c(y_ptr + 8, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
vp8_loop_filter_vertical_edge_c(y_ptr + 12, y_stride, lfi->blim, lfi->lim, lfi->hev_thr, 2);
if (u_ptr)
vp8_loop_filter_vertical_edge_c(u_ptr + 4, uv_stride, lfi->blim, lfi->lim, lfi->hev_thr, 1);
if (v_ptr)
vp8_loop_filter_vertical_edge_c(v_ptr + 4, uv_stride, lfi->blim, lfi->lim, lfi->hev_thr, 1);
}
void vp8_loop_filter_bvs_c(unsigned char *y_ptr, int y_stride,
const unsigned char *blimit)
{
vp8_loop_filter_simple_vertical_edge_c(y_ptr + 4, y_stride, blimit);
vp8_loop_filter_simple_vertical_edge_c(y_ptr + 8, y_stride, blimit);
vp8_loop_filter_simple_vertical_edge_c(y_ptr + 12, y_stride, blimit);
}

Some files were not shown because too many files have changed in this diff Show More