Use remote packages for aom and libyuv

This commit is contained in:
dijunkun
2024-05-22 16:17:05 +08:00
parent c95a2a32dc
commit fdb8819926
115 changed files with 36 additions and 98518 deletions

23
thirdparty/aom/xmake.lua vendored Normal file
View File

@@ -0,0 +1,23 @@
package("aom")
set_homepage("https://aomedia.googlesource.com/aom/")
set_description("AV1 Codec Library")
set_license("BSD-3-Clause")
set_urls("https://aomedia.googlesource.com/aom.git")
add_versions("v3.9.0", "6cab58c3925e0f4138e15a4ed510161ea83b6db1")
add_deps("cmake")
if is_os("windows") then
add_defines("_CRT_SECURE_NO_WARNINGS")
end
on_install("windows", "linux", "macosx", function (package)
local configs = {"-DENABLE_EXAMPLES=OFF", "-DENABLE_TESTS=OFF", "-DENABLE_TOOLS=OFF", "-DENABLE_DOCS=OFF"}
table.insert(configs, "-DCMAKE_BUILD_TYPE=" .. (package:debug() and "Debug" or "Release"))
import("package.tools.cmake").install(package, configs)
end)
on_test(function (package)
assert(package:has_cfuncs("aom_codec_version", {includes = "aom/aom_codec.h"}))
end)

View File

@@ -1,6 +0,0 @@
# Defines the Chromium style for automatic reformatting.
# http://clang.llvm.org/docs/ClangFormatStyleOptions.html
BasedOnStyle: Chromium
---
Language: Java
BasedOnStyle: Google

View File

@@ -1,36 +0,0 @@
*.pyc
.landmines
pin-log.txt
/base
/build
/buildtools
/google_apis
/links
/links.db
/ios
/mojo
/native_client
/net
/out
/source/out
/sde-avx-sse-transition-out.txt
/testing
/third_party
/tools
# Files generated by CMake build
cmake_install.cmake
CMakeCache.txt
CMakeFiles/
yuvconvert
libgtest.a
libyuv.a
libyuv_unittest
# Files generated by winarm.mk build
libyuv_arm.lib
source/*.o
# Files generated by perf
perf.data
perf.data.old

36
thirdparty/libyuv/.gn vendored
View File

@@ -1,36 +0,0 @@
# Copyright 2015 The LibYuv Project Authors. All rights reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
import("//build/dotfile_settings.gni")
# The location of the build configuration file.
buildconfig = "//build/config/BUILDCONFIG.gn"
# The secondary source root is a parallel directory tree where
# GN build files are placed when they can not be placed directly
# in the source tree, e.g. for third party source trees.
secondary_source = "//build/secondary/"
# These are the targets to check headers for by default. The files in targets
# matching these patterns (see "gn help label_pattern" for format) will have
# their includes checked for proper dependencies when you run either
# "gn check" or "gn gen --check".
check_targets = [ "//libyuv/*" ]
# These are the list of GN files that run exec_script. This whitelist exists
# to force additional review for new uses of exec_script, which is strongly
# discouraged except for gypi_to_gn calls.
exec_script_whitelist = build_dotfile_settings.exec_script_whitelist +
[ "//build_overrides/build.gni" ]
default_args = {
mac_sdk_min = "10.12"
# https://bugs.chromium.org/p/libyuv/issues/detail?id=826
ios_deployment_target = "10.0"
}

View File

@@ -1,59 +0,0 @@
# This is a vpython "spec" file.
#
# It describes patterns for python wheel dependencies of the python scripts in
# the chromium repo, particularly for dependencies that have compiled components
# (since pure-python dependencies can be easily vendored into third_party).
#
# When vpython is invoked, it finds this file and builds a python VirtualEnv,
# containing all of the dependencies described in this file, fetching them from
# CIPD (the "Chrome Infrastructure Package Deployer" service). Unlike `pip`,
# this never requires the end-user machine to have a working python extension
# compilation environment. All of these packages are built using:
# https://chromium.googlesource.com/infra/infra/+/master/infra/tools/dockerbuild/
#
# All python scripts in the repo share this same spec, to avoid dependency
# fragmentation.
#
# If you have depot_tools installed in your $PATH, you can invoke python scripts
# in this repo by running them as you normally would run them, except
# substituting `vpython` instead of `python` on the command line, e.g.:
# vpython path/to/script.py some --arguments
#
# Read more about `vpython` and how to modify this file here:
# https://chromium.googlesource.com/infra/infra/+/master/doc/users/vpython.md
python_version: "2.7"
# Used by:
# third_party/catapult
wheel: <
name: "infra/python/wheels/psutil/${platform}_${py_python}_${py_abi}"
version: "version:5.2.2"
>
# Used by:
# third_party/catapult
wheel: <
name: "infra/python/wheels/pypiwin32/${vpython_platform}"
version: "version:219"
match_tag: <
platform: "win32"
>
match_tag: <
platform: "win_amd64"
>
>
# Used by:
# tools/swarming_client
wheel: <
name: "infra/python/wheels/six-py2_py3"
version: "version:1.15.0"
>
# Used by:
# build/android
wheel: <
name: "infra/python/wheels/requests-py2_py3"
version: "version:2.13.0"
>

View File

@@ -1,4 +0,0 @@
# Names should be added to this file like so:
# Name or Organization <email address>
Google Inc.

View File

@@ -1,156 +0,0 @@
cc_library {
name: "libyuv",
vendor_available: true,
vndk: {
enabled: true,
},
srcs: [
"source/compare.cc",
"source/compare_common.cc",
"source/compare_gcc.cc",
"source/compare_mmi.cc",
"source/compare_msa.cc",
"source/compare_neon.cc",
"source/compare_neon64.cc",
"source/convert.cc",
"source/convert_argb.cc",
"source/convert_from.cc",
"source/convert_from_argb.cc",
"source/convert_jpeg.cc",
"source/convert_to_argb.cc",
"source/convert_to_i420.cc",
"source/cpu_id.cc",
"source/mjpeg_decoder.cc",
"source/mjpeg_validate.cc",
"source/planar_functions.cc",
"source/rotate.cc",
"source/rotate_any.cc",
"source/rotate_argb.cc",
"source/rotate_common.cc",
"source/rotate_gcc.cc",
"source/rotate_mmi.cc",
"source/rotate_msa.cc",
"source/rotate_neon.cc",
"source/rotate_neon64.cc",
"source/row_any.cc",
"source/row_common.cc",
"source/row_gcc.cc",
"source/row_mmi.cc",
"source/row_msa.cc",
"source/row_neon.cc",
"source/row_neon64.cc",
"source/scale.cc",
"source/scale_any.cc",
"source/scale_argb.cc",
"source/scale_common.cc",
"source/scale_gcc.cc",
"source/scale_mmi.cc",
"source/scale_msa.cc",
"source/scale_neon.cc",
"source/scale_neon64.cc",
"source/scale_uv.cc",
"source/video_common.cc",
],
cflags: [
"-Wall",
"-Werror",
"-Wno-unused-parameter",
"-fexceptions",
"-DHAVE_JPEG",
],
shared_libs: ["libjpeg"],
export_include_dirs: ["include"],
}
// compatibilty static library until all uses of libyuv_static are replaced
// with libyuv (b/37646797)
cc_library_static {
name: "libyuv_static",
vendor_available: true,
whole_static_libs: ["libyuv"],
}
cc_test {
name: "libyuv_unittest",
static_libs: ["libyuv"],
shared_libs: ["libjpeg"],
cflags: ["-Wall", "-Werror"],
srcs: [
"unit_test/basictypes_test.cc",
"unit_test/color_test.cc",
"unit_test/compare_test.cc",
"unit_test/convert_test.cc",
"unit_test/cpu_test.cc",
"unit_test/cpu_thread_test.cc",
"unit_test/math_test.cc",
"unit_test/planar_test.cc",
"unit_test/rotate_argb_test.cc",
"unit_test/rotate_test.cc",
"unit_test/scale_argb_test.cc",
"unit_test/scale_test.cc",
"unit_test/scale_uv_test.cc",
"unit_test/unit_test.cc",
"unit_test/video_common_test.cc",
],
}
cc_test {
name: "compare",
gtest: false,
srcs: [
"util/compare.cc",
],
static_libs: ["libyuv"],
}
cc_test {
name: "i444tonv12_eg",
gtest: false,
srcs: [
"util/i444tonv12_eg.cc",
],
static_libs: ["libyuv"],
}
cc_test {
name: "cpuid",
gtest: false,
srcs: [
"util/cpuid.c",
],
static_libs: ["libyuv"],
}
cc_test {
name: "psnr",
gtest: false,
srcs: [
"util/psnr_main.cc",
"util/psnr.cc",
"util/ssim.cc",
],
static_libs: ["libyuv"],
}
cc_test {
name: "yuvconvert",
gtest: false,
srcs: [
"util/yuvconvert.cc",
],
static_libs: ["libyuv"],
shared_libs: ["libjpeg"],
}
cc_test {
name: "yuvconstants",
gtest: false,
srcs: [
"util/yuvconstants.c",
],
static_libs: ["libyuv"],
}

View File

@@ -1,110 +0,0 @@
# This is the Android makefile for libyuv for NDK.
LOCAL_PATH:= $(call my-dir)
include $(CLEAR_VARS)
LOCAL_CPP_EXTENSION := .cc
LOCAL_SRC_FILES := \
source/compare.cc \
source/compare_common.cc \
source/compare_gcc.cc \
source/compare_mmi.cc \
source/compare_msa.cc \
source/compare_neon.cc \
source/compare_neon64.cc \
source/compare_win.cc \
source/convert.cc \
source/convert_argb.cc \
source/convert_from.cc \
source/convert_from_argb.cc \
source/convert_to_argb.cc \
source/convert_to_i420.cc \
source/cpu_id.cc \
source/planar_functions.cc \
source/rotate.cc \
source/rotate_any.cc \
source/rotate_argb.cc \
source/rotate_common.cc \
source/rotate_gcc.cc \
source/rotate_mmi.cc \
source/rotate_msa.cc \
source/rotate_neon.cc \
source/rotate_neon64.cc \
source/rotate_win.cc \
source/row_any.cc \
source/row_common.cc \
source/row_gcc.cc \
source/row_mmi.cc \
source/row_msa.cc \
source/row_neon.cc \
source/row_neon64.cc \
source/row_win.cc \
source/scale.cc \
source/scale_any.cc \
source/scale_argb.cc \
source/scale_common.cc \
source/scale_gcc.cc \
source/scale_mmi.cc \
source/scale_msa.cc \
source/scale_neon.cc \
source/scale_neon64.cc \
source/scale_uv.cc \
source/scale_win.cc \
source/video_common.cc
common_CFLAGS := -Wall -fexceptions
ifneq ($(LIBYUV_DISABLE_JPEG), "yes")
LOCAL_SRC_FILES += \
source/convert_jpeg.cc \
source/mjpeg_decoder.cc \
source/mjpeg_validate.cc
common_CFLAGS += -DHAVE_JPEG
LOCAL_SHARED_LIBRARIES := libjpeg
endif
LOCAL_CFLAGS += $(common_CFLAGS)
LOCAL_EXPORT_C_INCLUDES := $(LOCAL_PATH)/include
LOCAL_C_INCLUDES += $(LOCAL_PATH)/include
LOCAL_EXPORT_C_INCLUDE_DIRS := $(LOCAL_PATH)/include
LOCAL_MODULE := libyuv_static
LOCAL_MODULE_TAGS := optional
include $(BUILD_STATIC_LIBRARY)
include $(CLEAR_VARS)
LOCAL_WHOLE_STATIC_LIBRARIES := libyuv_static
LOCAL_MODULE := libyuv
ifneq ($(LIBYUV_DISABLE_JPEG), "yes")
LOCAL_SHARED_LIBRARIES := libjpeg
endif
include $(BUILD_SHARED_LIBRARY)
include $(CLEAR_VARS)
LOCAL_STATIC_LIBRARIES := libyuv_static
LOCAL_SHARED_LIBRARIES := libjpeg
LOCAL_MODULE_TAGS := tests
LOCAL_CPP_EXTENSION := .cc
LOCAL_C_INCLUDES += $(LOCAL_PATH)/include
LOCAL_SRC_FILES := \
unit_test/basictypes_test.cc \
unit_test/color_test.cc \
unit_test/compare_test.cc \
unit_test/convert_test.cc \
unit_test/cpu_test.cc \
unit_test/cpu_thread_test.cc \
unit_test/math_test.cc \
unit_test/planar_test.cc \
unit_test/rotate_argb_test.cc \
unit_test/rotate_test.cc \
unit_test/scale_argb_test.cc \
unit_test/scale_test.cc \
unit_test/scale_uv_test.cc \
unit_test/unit_test.cc \
unit_test/video_common_test.cc
LOCAL_MODULE := libyuv_unittest
include $(BUILD_NATIVE_TEST)

View File

@@ -1,404 +0,0 @@
# Copyright 2014 The LibYuv Project Authors. All rights reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
import("//testing/test.gni")
import("libyuv.gni")
declare_args() {
# Set to false to disable building with absl flags.
libyuv_use_absl_flags = true
# When building a shared library using a target in WebRTC or
# Chromium projects that depends on libyuv, setting this flag
# to true makes libyuv symbols visible inside that library.
libyuv_symbols_visible = false
}
config("libyuv_config") {
include_dirs = [ "include" ]
if (is_android && current_cpu == "arm64") {
ldflags = [ "-Wl,--dynamic-linker,/system/bin/linker64" ]
}
if (is_android && current_cpu != "arm64") {
ldflags = [ "-Wl,--dynamic-linker,/system/bin/linker" ]
}
}
# This target is built when no specific target is specified on the command line.
group("default") {
testonly = true
deps = [ ":libyuv" ]
if (libyuv_include_tests) {
deps += [
":compare",
":cpuid",
":i444tonv12_eg",
":libyuv_unittest",
":psnr",
":yuvconstants",
":yuvconvert",
]
}
}
group("libyuv") {
all_dependent_configs = [ ":libyuv_config" ]
deps = []
if (is_win && target_cpu == "x64") {
# Compile with clang in order to get inline assembly
public_deps = [ ":libyuv_internal(//build/toolchain/win:win_clang_x64)" ]
} else {
public_deps = [ ":libyuv_internal" ]
}
if (libyuv_use_neon) {
deps += [ ":libyuv_neon" ]
}
if (libyuv_use_msa) {
deps += [ ":libyuv_msa" ]
}
if (libyuv_use_mmi) {
deps += [ ":libyuv_mmi" ]
}
if (!is_ios && !libyuv_disable_jpeg) {
# Make sure that clients of libyuv link with libjpeg. This can't go in
# libyuv_internal because in Windows x64 builds that will generate a clang
# build of libjpeg, and we don't want two copies.
deps += [ "//third_party:jpeg" ]
}
}
static_library("libyuv_internal") {
visibility = [ ":*" ]
sources = [
# Headers
"include/libyuv.h",
"include/libyuv/basic_types.h",
"include/libyuv/compare.h",
"include/libyuv/convert.h",
"include/libyuv/convert_argb.h",
"include/libyuv/convert_from.h",
"include/libyuv/convert_from_argb.h",
"include/libyuv/cpu_id.h",
"include/libyuv/mjpeg_decoder.h",
"include/libyuv/planar_functions.h",
"include/libyuv/rotate.h",
"include/libyuv/rotate_argb.h",
"include/libyuv/rotate_row.h",
"include/libyuv/row.h",
"include/libyuv/scale.h",
"include/libyuv/scale_argb.h",
"include/libyuv/scale_row.h",
"include/libyuv/scale_uv.h",
"include/libyuv/version.h",
"include/libyuv/video_common.h",
# Source Files
"source/compare.cc",
"source/compare_common.cc",
"source/compare_gcc.cc",
"source/compare_win.cc",
"source/convert.cc",
"source/convert_argb.cc",
"source/convert_from.cc",
"source/convert_from_argb.cc",
"source/convert_jpeg.cc",
"source/convert_to_argb.cc",
"source/convert_to_i420.cc",
"source/cpu_id.cc",
"source/mjpeg_decoder.cc",
"source/mjpeg_validate.cc",
"source/planar_functions.cc",
"source/rotate.cc",
"source/rotate_any.cc",
"source/rotate_argb.cc",
"source/rotate_common.cc",
"source/rotate_gcc.cc",
"source/rotate_win.cc",
"source/row_any.cc",
"source/row_common.cc",
"source/row_gcc.cc",
"source/row_win.cc",
"source/scale.cc",
"source/scale_any.cc",
"source/scale_argb.cc",
"source/scale_common.cc",
"source/scale_gcc.cc",
"source/scale_uv.cc",
"source/scale_win.cc",
"source/video_common.cc",
]
configs += [ ":libyuv_config" ]
defines = []
deps = []
if (libyuv_symbols_visible) {
configs -= [ "//build/config/gcc:symbol_visibility_hidden" ]
configs += [ "//build/config/gcc:symbol_visibility_default" ]
}
if (!is_ios && !libyuv_disable_jpeg) {
defines += [ "HAVE_JPEG" ]
# Needed to pull in libjpeg headers. Can't add //third_party:jpeg to deps
# because in Windows x64 build it will get compiled with clang.
deps += [ "//third_party:jpeg_includes" ]
}
# Always enable optimization for Release and NaCl builds (to workaround
# crbug.com/538243).
if (!is_debug || is_nacl) {
configs -= [ "//build/config/compiler:default_optimization" ]
# Enable optimize for speed (-O2) over size (-Os).
configs += [ "//build/config/compiler:optimize_max" ]
}
# To enable AVX2 or other cpu optimization, pass flag here
if (!is_win) {
cflags = [
# "-mpopcnt",
# "-mavx2",
# "-mfma",
"-ffp-contract=fast", # Enable fma vectorization for NEON.
]
}
if (!libyuv_use_mmi) {
defines += [ "LIBYUV_DISABLE_MMI" ]
}
}
if (libyuv_use_neon) {
static_library("libyuv_neon") {
sources = [
# ARM Source Files
"source/compare_neon.cc",
"source/compare_neon64.cc",
"source/rotate_neon.cc",
"source/rotate_neon64.cc",
"source/row_neon.cc",
"source/row_neon64.cc",
"source/scale_neon.cc",
"source/scale_neon64.cc",
]
deps = [ ":libyuv_internal" ]
public_configs = [ ":libyuv_config" ]
# Always enable optimization for Release and NaCl builds (to workaround
# crbug.com/538243).
if (!is_debug) {
configs -= [ "//build/config/compiler:default_optimization" ]
# Enable optimize for speed (-O2) over size (-Os).
# TODO(fbarchard): Consider optimize_speed which is O3.
configs += [ "//build/config/compiler:optimize_max" ]
}
if (current_cpu != "arm64") {
configs -= [ "//build/config/compiler:compiler_arm_fpu" ]
cflags = [ "-mfpu=neon" ]
}
}
}
if (libyuv_use_msa) {
static_library("libyuv_msa") {
sources = [
# MSA Source Files
"source/compare_msa.cc",
"source/rotate_msa.cc",
"source/row_msa.cc",
"source/scale_msa.cc",
]
deps = [ ":libyuv_internal" ]
public_configs = [ ":libyuv_config" ]
}
}
if (libyuv_use_mmi) {
static_library("libyuv_mmi") {
sources = [
# MMI Source Files
"source/compare_mmi.cc",
"source/rotate_mmi.cc",
"source/row_mmi.cc",
"source/scale_mmi.cc",
]
deps = [ ":libyuv_internal" ]
public_configs = [ ":libyuv_config" ]
}
}
if (libyuv_include_tests) {
config("libyuv_unittest_warnings_config") {
if (!is_win) {
cflags = [
# TODO(fbarchard): Fix sign and unused variable warnings.
"-Wno-sign-compare",
"-Wno-unused-variable",
]
}
if (is_win) {
cflags = [
"/wd4245", # signed/unsigned mismatch
"/wd4189", # local variable is initialized but not referenced
]
}
}
config("libyuv_unittest_config") {
defines = [ "GTEST_RELATIVE_PATH" ]
}
test("libyuv_unittest") {
testonly = true
sources = [
"unit_test/basictypes_test.cc",
"unit_test/color_test.cc",
"unit_test/compare_test.cc",
"unit_test/convert_test.cc",
"unit_test/cpu_test.cc",
"unit_test/cpu_thread_test.cc",
"unit_test/math_test.cc",
"unit_test/planar_test.cc",
"unit_test/rotate_argb_test.cc",
"unit_test/rotate_test.cc",
"unit_test/scale_argb_test.cc",
"unit_test/scale_test.cc",
"unit_test/scale_uv_test.cc",
"unit_test/unit_test.cc",
"unit_test/unit_test.h",
"unit_test/video_common_test.cc",
]
deps = [
":libyuv",
"//testing/gtest",
]
defines = []
if (libyuv_use_absl_flags) {
defines += [ "LIBYUV_USE_ABSL_FLAGS" ]
deps += [
"//third_party/abseil-cpp/absl/flags:flag",
"//third_party/abseil-cpp/absl/flags:parse",
]
}
configs += [ ":libyuv_unittest_warnings_config" ]
public_deps = [ "//testing/gtest" ]
public_configs = [ ":libyuv_unittest_config" ]
if (is_linux || is_chromeos) {
cflags = [ "-fexceptions" ]
}
if (is_ios) {
configs -= [ "//build/config/compiler:default_symbols" ]
configs += [ "//build/config/compiler:symbols" ]
cflags = [ "-Wno-sometimes-uninitialized" ]
}
if (!is_ios && !libyuv_disable_jpeg) {
defines += [ "HAVE_JPEG" ]
}
if (is_android) {
deps += [ "//testing/android/native_test:native_test_native_code" ]
}
# TODO(YangZhang): These lines can be removed when high accuracy
# YUV to RGB to Neon is ported.
if ((target_cpu == "armv7" || target_cpu == "armv7s" ||
(target_cpu == "arm" && arm_version >= 7) || target_cpu == "arm64") &&
(arm_use_neon || arm_optionally_use_neon)) {
defines += [ "LIBYUV_NEON" ]
}
defines += [
# Enable the following 3 macros to turn off assembly for specified CPU.
# "LIBYUV_DISABLE_X86",
# "LIBYUV_DISABLE_NEON",
# Enable the following macro to build libyuv as a shared library (dll).
# "LIBYUV_USING_SHARED_LIBRARY"
]
}
executable("compare") {
sources = [
# sources
"util/compare.cc",
]
deps = [ ":libyuv" ]
if (is_linux || is_chromeos) {
cflags = [ "-fexceptions" ]
}
}
executable("yuvconvert") {
sources = [
# sources
"util/yuvconvert.cc",
]
deps = [ ":libyuv" ]
if (is_linux || is_chromeos) {
cflags = [ "-fexceptions" ]
}
}
executable("yuvconstants") {
sources = [
# sources
"util/yuvconstants.c",
]
deps = [ ":libyuv" ]
if (is_linux || is_chromeos) {
cflags = [ "-fexceptions" ]
}
}
executable("psnr") {
sources = [
# sources
"util/psnr.cc",
"util/psnr_main.cc",
"util/ssim.cc",
]
deps = [ ":libyuv" ]
if (!is_ios && !libyuv_disable_jpeg) {
defines = [ "HAVE_JPEG" ]
}
}
executable("i444tonv12_eg") {
sources = [
# sources
"util/i444tonv12_eg.cc",
]
deps = [ ":libyuv" ]
}
executable("cpuid") {
sources = [
# sources
"util/cpuid.c",
]
deps = [ ":libyuv" ]
}
}

View File

@@ -1,69 +0,0 @@
# determine the version number from the #define in libyuv/version.h
EXECUTE_PROCESS (
COMMAND grep --perl-regex --only-matching "(?<=LIBYUV_VERSION )[0-9]+" include/libyuv/version.h
WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
OUTPUT_VARIABLE YUV_VERSION_NUMBER
OUTPUT_STRIP_TRAILING_WHITESPACE )
SET ( YUV_VER_MAJOR 0 )
SET ( YUV_VER_MINOR 0 )
SET ( YUV_VER_PATCH ${YUV_VERSION_NUMBER} )
SET ( YUV_VERSION ${YUV_VER_MAJOR}.${YUV_VER_MINOR}.${YUV_VER_PATCH} )
MESSAGE ( "Building ver.: ${YUV_VERSION}" )
# is this a 32-bit or 64-bit build?
IF ( CMAKE_SIZEOF_VOID_P EQUAL 8 )
SET ( YUV_BIT_SIZE 64 )
ELSEIF ( CMAKE_SIZEOF_VOID_P EQUAL 4 )
SET ( YUV_BIT_SIZE 32 )
ELSE ()
MESSAGE ( FATAL_ERROR "CMAKE_SIZEOF_VOID_P=${CMAKE_SIZEOF_VOID_P}" )
ENDIF ()
# detect if this is a ARM build
STRING (FIND "${CMAKE_CXX_COMPILER}" "arm-linux-gnueabihf-g++" pos)
IF ( ${pos} EQUAL -1 )
SET ( YUV_CROSS_COMPILE_FOR_ARM7 FALSE )
ELSE ()
MESSAGE ( "Cross compiling for ARM7" )
SET ( YUV_CROSS_COMPILE_FOR_ARM7 TRUE )
ENDIF ()
STRING (FIND "${CMAKE_SYSTEM_PROCESSOR}" "arm" pos)
IF ( ${pos} EQUAL -1 )
SET ( YUV_COMPILE_FOR_ARM7 FALSE )
ELSE ()
MESSAGE ( "Compiling for ARM" )
SET ( YUV_COMPILE_FOR_ARM7 TRUE )
ENDIF ()
# setup the sytem name, such as "x86-32", "amd-64", and "arm-32
IF ( ${YUV_CROSS_COMPILE_FOR_ARM7} OR ${YUV_COMPILE_FOR_ARM7} )
SET ( YUV_SYSTEM_NAME "armhf-${YUV_BIT_SIZE}" )
ELSE ()
IF ( YUV_BIT_SIZE EQUAL 32 )
SET ( YUV_SYSTEM_NAME "x86-${YUV_BIT_SIZE}" )
ELSE ()
SET ( YUV_SYSTEM_NAME "amd-${YUV_BIT_SIZE}" )
ENDIF ()
ENDIF ()
MESSAGE ( "Packaging for: ${YUV_SYSTEM_NAME}" )
# define all the variables needed by CPack to create .deb and .rpm packages
SET ( CPACK_PACKAGE_VENDOR "Frank Barchard" )
SET ( CPACK_PACKAGE_CONTACT "fbarchard@chromium.org" )
SET ( CPACK_PACKAGE_VERSION ${YUV_VERSION} )
SET ( CPACK_PACKAGE_VERSION_MAJOR ${YUV_VER_MAJOR} )
SET ( CPACK_PACKAGE_VERSION_MINOR ${YUV_VER_MINOR} )
SET ( CPACK_PACKAGE_VERSION_PATCH ${YUV_VER_PATCH} )
SET ( CPACK_RESOURCE_FILE_LICENSE ${PROJECT_SOURCE_DIR}/LICENSE )
SET ( CPACK_SYSTEM_NAME "linux-${YUV_SYSTEM_NAME}" )
SET ( CPACK_PACKAGE_NAME "libyuv" )
SET ( CPACK_PACKAGE_DESCRIPTION_SUMMARY "YUV library" )
SET ( CPACK_PACKAGE_DESCRIPTION "YUV library and YUV conversion tool" )
SET ( CPACK_DEBIAN_PACKAGE_SECTION "other" )
SET ( CPACK_DEBIAN_PACKAGE_PRIORITY "optional" )
SET ( CPACK_DEBIAN_PACKAGE_MAINTAINER "Frank Barchard <fbarchard@chromium.org>" )
SET ( CPACK_GENERATOR "DEB;RPM" )
# create the .deb and .rpm files (you'll need build-essential and rpm tools)
INCLUDE( CPack )

View File

@@ -1,86 +0,0 @@
# CMakeLists for libyuv
# Originally created for "roxlu build system" to compile libyuv on windows
# Run with -DTEST=ON to build unit tests
PROJECT(YUV C CXX) # "C" is required even for C++ projects
CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
OPTION(TEST "Built unit tests" OFF)
SET(ly_base_dir ${PROJECT_SOURCE_DIR})
SET(ly_src_dir ${ly_base_dir}/source)
SET(ly_inc_dir ${ly_base_dir}/include)
SET(ly_tst_dir ${ly_base_dir}/unit_test)
SET(ly_lib_name yuv)
SET(ly_lib_static ${ly_lib_name})
SET(ly_lib_shared ${ly_lib_name}_shared)
FILE(GLOB_RECURSE ly_source_files ${ly_src_dir}/*.cc)
LIST(SORT ly_source_files)
FILE(GLOB_RECURSE ly_unittest_sources ${ly_tst_dir}/*.cc)
LIST(SORT ly_unittest_sources)
INCLUDE_DIRECTORIES(BEFORE ${ly_inc_dir})
# this creates the static library (.a)
ADD_LIBRARY(${ly_lib_static} STATIC ${ly_source_files})
# this creates the shared library (.so)
ADD_LIBRARY(${ly_lib_shared} SHARED ${ly_source_files})
SET_TARGET_PROPERTIES(${ly_lib_shared} PROPERTIES OUTPUT_NAME "${ly_lib_name}")
SET_TARGET_PROPERTIES(${ly_lib_shared} PROPERTIES PREFIX "lib")
# this creates the conversion tool
# ADD_EXECUTABLE(yuvconvert ${ly_base_dir}/util/yuvconvert.cc)
# TARGET_LINK_LIBRARIES(yuvconvert ${ly_lib_static})
# INCLUDE(FindJPEG)
# if(JPEG_FOUND)
# include_directories(${JPEG_INCLUDE_DIR})
# target_link_libraries(yuvconvert ${JPEG_LIBRARY})
# add_definitions(-DHAVE_JPEG)
# endif()
if(TEST)
find_library(GTEST_LIBRARY gtest)
if(GTEST_LIBRARY STREQUAL "GTEST_LIBRARY-NOTFOUND")
set(GTEST_SRC_DIR /usr/src/gtest CACHE STRING "Location of gtest sources")
if(EXISTS ${GTEST_SRC_DIR}/src/gtest-all.cc)
message(STATUS "building gtest from sources in ${GTEST_SRC_DIR}")
set(gtest_sources ${GTEST_SRC_DIR}/src/gtest-all.cc)
add_library(gtest STATIC ${gtest_sources})
include_directories(${GTEST_SRC_DIR})
include_directories(${GTEST_SRC_DIR}/include)
set(GTEST_LIBRARY gtest)
else()
message(FATAL_ERROR "TEST is set but unable to find gtest library")
endif()
endif()
add_executable(libyuv_unittest ${ly_unittest_sources})
target_link_libraries(libyuv_unittest ${ly_lib_name} ${GTEST_LIBRARY})
find_library(PTHREAD_LIBRARY pthread)
if(NOT PTHREAD_LIBRARY STREQUAL "PTHREAD_LIBRARY-NOTFOUND")
target_link_libraries(libyuv_unittest pthread)
endif()
if(JPEG_FOUND)
target_link_libraries(libyuv_unittest ${JPEG_LIBRARY})
endif()
if(NACL AND NACL_LIBC STREQUAL "newlib")
target_link_libraries(libyuv_unittest glibc-compat)
endif()
endif()
# install the conversion tool, .so, .a, and all the header files
# INSTALL(PROGRAMS ${CMAKE_BINARY_DIR}/yuvconvert DESTINATION bin)
INSTALL(TARGETS ${ly_lib_static} DESTINATION lib)
# INSTALL ( TARGETS ${ly_lib_shared} LIBRARY DESTINATION lib RUNTIME DESTINATION bin )
INSTALL(DIRECTORY ${PROJECT_SOURCE_DIR}/include/ DESTINATION include)
# create the .deb and .rpm packages using cpack
INCLUDE(CM_linux_packages.cmake)

View File

@@ -1,3 +0,0 @@
monorail {
component: "Internals>Images>Codecs"
}

View File

@@ -1,29 +0,0 @@
Copyright 2011 The LibYuv Project Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of Google nor the names of its contributors may
be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@@ -1,10 +0,0 @@
mbonadei@chromium.org
fbarchard@chromium.org
magjed@chromium.org
pbos@chromium.org
per-file *.gn=mbonadei@chromium.org
per-file .gitignore=*
per-file AUTHORS=*
per-file DEPS=*
per-file PRESUBMIT.py=mbonadei@chromium.org

View File

@@ -1,24 +0,0 @@
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the LibYuv code package.
Google hereby grants to you a perpetual, worldwide, non-exclusive,
no-charge, irrevocable (except as stated in this section) patent
license to make, have made, use, offer to sell, sell, import,
transfer, and otherwise run, modify and propagate the contents of this
implementation of the LibYuv code package, where such license applies
only to those patent claims, both currently owned by Google and
acquired in the future, licensable by Google that are necessarily
infringed by this implementation of the LibYuv code package. This
grant does not include claims that would be infringed only as a
consequence of further modification of this implementation. If you or
your agent or exclusive licensee institute or order or agree to the
institution of patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that this
implementation of the LibYuv code package or any code incorporated
within this implementation of the LibYuv code package constitutes
direct or contributory patent infringement, or inducement of patent
infringement, then any patent rights granted to you under this License
for this implementation of the LibYuv code package shall terminate as
of the date such litigation is filed.

View File

@@ -1,49 +0,0 @@
# Copyright 2017 The LibYuv Project Authors. All rights reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
results = []
results.extend(input_api.canned_checks.RunPylint(input_api, output_api,
files_to_skip=(r'^base[\\\/].*\.py$',
r'^build[\\\/].*\.py$',
r'^buildtools[\\\/].*\.py$',
r'^ios[\\\/].*\.py$',
r'^out.*[\\\/].*\.py$',
r'^testing[\\\/].*\.py$',
r'^third_party[\\\/].*\.py$',
r'^tools[\\\/].*\.py$',
# TODO(kjellander): should arguably be checked.
r'^tools_libyuv[\\\/]valgrind[\\\/].*\.py$',
r'^xcodebuild.*[\\\/].*\.py$',),
disabled_warnings=['F0401', # Failed to import x
'E0611', # No package y in x
'W0232', # Class has no __init__ method
],
pylintrc='pylintrc'))
return results
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
results.extend(
input_api.canned_checks.CheckGNFormatted(input_api, output_api))
return results
def CheckChangeOnCommit(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
results.extend(input_api.canned_checks.CheckOwners(input_api, output_api))
results.extend(input_api.canned_checks.CheckChangeWasUploaded(
input_api, output_api))
results.extend(input_api.canned_checks.CheckChangeHasDescription(
input_api, output_api))
return results

View File

@@ -1,8 +0,0 @@
Name: libyuv
URL: http://code.google.com/p/libyuv/
Version: 1787
License: BSD
License File: LICENSE
Description:
libyuv is an open source project that includes YUV conversion and scaling functionality.

View File

@@ -1,18 +0,0 @@
**libyuv** is an open source project that includes YUV scaling and conversion functionality.
* Scale YUV to prepare content for compression, with point, bilinear or box filter.
* Convert to YUV from webcam formats for compression.
* Convert to RGB formats for rendering/effects.
* Rotate by 90/180/270 degrees to adjust for mobile devices in portrait mode.
* Optimized for SSSE3/AVX2 on x86/x64.
* Optimized for Neon on Arm.
* Optimized for MSA on Mips.
### Development
See [Getting started][1] for instructions on how to get started developing.
You can also browse the [docs directory][2] for more documentation.
[1]: ./docs/getting_started.md
[2]: ./docs/

View File

@@ -1,53 +0,0 @@
# Copyright 2016 The LibYuv Project Authors. All rights reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
# Variable that can be used to support multiple build scenarios, like having
# Chromium specific targets in a client project's GN file etc.
build_with_chromium = false
# Some non-Chromium builds don't support building java targets.
enable_java_templates = true
# Allow using custom suppressions files (currently not used by libyuv).
asan_suppressions_file = "//build/sanitizers/asan_suppressions.cc"
lsan_suppressions_file = "//build/sanitizers/lsan_suppressions.cc"
tsan_suppressions_file = "//build/sanitizers/tsan_suppressions.cc"
msan_blacklist_path =
rebase_path("//tools_libyuv/msan/blacklist.txt", root_build_dir)
ubsan_blacklist_path =
rebase_path("//tools_libyuv/ubsan/blacklist.txt", root_build_dir)
ubsan_vptr_blacklist_path =
rebase_path("//tools_libyuv/ubsan/vptr_blacklist.txt", root_build_dir)
# For Chromium, Android 32-bit non-component, non-clang builds hit a 4GiB size
# limit, making them requiring symbol_level=2. WebRTC doesn't hit that problem
# so we just ignore that assert. See https://crbug.com/648948 for more info.
ignore_elf32_limitations = true
# Use bundled hermetic Xcode installation maintained by Chromium,
# except for local iOS builds where it is unsupported.
if (host_os == "mac") {
_result = exec_script("//build/mac/should_use_hermetic_xcode.py",
[ target_os ],
"value")
assert(_result != 2,
"Do not allow building targets with the default" +
"hermetic toolchain if the minimum OS version is not met.")
use_system_xcode = _result == 0
}
declare_args() {
# Tracing support requires //third_party/perfetto.
enable_base_tracing = false
use_perfetto_client_library = false
# Allows googletest to pretty-print various absl types.
# Defined here rather than in gtest.gni to match chromium.
gtest_enable_absl_printers = true
}

View File

@@ -1,19 +0,0 @@
# Copyright (c) 2016 The LibYuv project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
# Include support for registering main function in multi-process tests.
gtest_include_multiprocess = true
# Include support for platform-specific operations across unit tests.
gtest_include_platform_test = true
# Exclude support for testing Objective C code on OS X and iOS.
gtest_include_objc_support = true
# Exclude support for flushing coverage files on iOS.
gtest_include_ios_coverage = true

View File

@@ -1,107 +0,0 @@
#!/usr/bin/env python
# Copyright 2017 The LibYuv Project Authors. All rights reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
# This is a copy of the file from WebRTC in:
# https://chromium.googlesource.com/external/webrtc/+/master/cleanup_links.py
"""Script to cleanup symlinks created from setup_links.py.
Before 177567c518b121731e507e9b9c4049c4dc96e4c8 (#15754) we had a Chromium
checkout which we created symlinks into. In order to do clean syncs after
landing that change, this script cleans up any old symlinks, avoiding annoying
manual cleanup needed in order to complete gclient sync.
"""
import logging
import optparse
import os
import shelve
import subprocess
import sys
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
LINKS_DB = 'links'
# Version management to make future upgrades/downgrades easier to support.
SCHEMA_VERSION = 1
class WebRTCLinkSetup(object):
def __init__(self, links_db, dry_run=False):
self._dry_run = dry_run
self._links_db = links_db
def CleanupLinks(self):
logging.debug('CleanupLinks')
for source, link_path in self._links_db.iteritems():
if source == 'SCHEMA_VERSION':
continue
if os.path.islink(link_path) or sys.platform.startswith('win'):
# os.path.islink() always returns false on Windows
# See http://bugs.python.org/issue13143.
logging.debug('Removing link to %s at %s', source, link_path)
if not self._dry_run:
if os.path.exists(link_path):
if sys.platform.startswith('win') and os.path.isdir(link_path):
subprocess.check_call(['rmdir', '/q', '/s', link_path],
shell=True)
else:
os.remove(link_path)
del self._links_db[source]
def _initialize_database(filename):
links_database = shelve.open(filename)
# Wipe the database if this version of the script ends up looking at a
# newer (future) version of the links db, just to be sure.
version = links_database.get('SCHEMA_VERSION')
if version and version != SCHEMA_VERSION:
logging.info('Found database with schema version %s while this script only '
'supports %s. Wiping previous database contents.', version,
SCHEMA_VERSION)
links_database.clear()
links_database['SCHEMA_VERSION'] = SCHEMA_VERSION
return links_database
def main():
parser = optparse.OptionParser()
parser.add_option('-d', '--dry-run', action='store_true', default=False,
help='Print what would be done, but don\'t perform any '
'operations. This will automatically set logging to '
'verbose.')
parser.add_option('-v', '--verbose', action='store_const',
const=logging.DEBUG, default=logging.INFO,
help='Print verbose output for debugging.')
options, _ = parser.parse_args()
if options.dry_run:
options.verbose = logging.DEBUG
logging.basicConfig(format='%(message)s', level=options.verbose)
# Work from the root directory of the checkout.
script_dir = os.path.dirname(os.path.abspath(__file__))
os.chdir(script_dir)
# The database file gets .db appended on some platforms.
db_filenames = [LINKS_DB, LINKS_DB + '.db']
if any(os.path.isfile(f) for f in db_filenames):
links_database = _initialize_database(LINKS_DB)
try:
symlink_creator = WebRTCLinkSetup(links_database, options.dry_run)
symlink_creator.CleanupLinks()
finally:
for f in db_filenames:
if os.path.isfile(f):
os.remove(f)
return 0
if __name__ == '__main__':
sys.exit(main())

View File

@@ -1,5 +0,0 @@
# This file is used by `git cl` to get repository specific information.
CODE_REVIEW_SERVER: codereview.chromium.org
GERRIT_HOST: True
PROJECT: libyuv
VIEW_VC: https://chromium.googlesource.com/libyuv/libyuv/+/

View File

@@ -1,29 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2014 The LibYuv Project Authors. All rights reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
# This script is used to run the vs_toolchain.py script to download the
# Visual Studio toolchain. It's just a temporary measure while waiting for the
# Chrome team to move find_depot_tools into src/build to get rid of these
# workarounds (similar one in gyp_libyuv).
import os
import sys
checkout_root = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.join(checkout_root, 'build'))
sys.path.insert(0, os.path.join(checkout_root, 'tools', 'find_depot_tools'))
import vs_toolchain # pylint: disable=wrong-import-position
if __name__ == '__main__':
sys.exit(vs_toolchain.main())

View File

@@ -1,33 +0,0 @@
/*
* Copyright 2011 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef INCLUDE_LIBYUV_H_
#define INCLUDE_LIBYUV_H_
#include "libyuv/basic_types.h"
#include "libyuv/compare.h"
#include "libyuv/convert.h"
#include "libyuv/convert_argb.h"
#include "libyuv/convert_from.h"
#include "libyuv/convert_from_argb.h"
#include "libyuv/cpu_id.h"
#include "libyuv/mjpeg_decoder.h"
#include "libyuv/planar_functions.h"
#include "libyuv/rotate.h"
#include "libyuv/rotate_argb.h"
#include "libyuv/row.h"
#include "libyuv/scale.h"
#include "libyuv/scale_argb.h"
#include "libyuv/scale_row.h"
#include "libyuv/scale_uv.h"
#include "libyuv/version.h"
#include "libyuv/video_common.h"
#endif // INCLUDE_LIBYUV_H_

View File

@@ -1,68 +0,0 @@
/*
* Copyright 2011 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef INCLUDE_LIBYUV_BASIC_TYPES_H_
#define INCLUDE_LIBYUV_BASIC_TYPES_H_
#include <stddef.h> // For size_t and NULL
#if !defined(INT_TYPES_DEFINED) && !defined(GG_LONGLONG)
#define INT_TYPES_DEFINED
#if defined(_MSC_VER) && (_MSC_VER < 1600)
#include <sys/types.h> // for uintptr_t on x86
typedef unsigned __int64 uint64_t;
typedef __int64 int64_t;
typedef unsigned int uint32_t;
typedef int int32_t;
typedef unsigned short uint16_t;
typedef short int16_t;
typedef unsigned char uint8_t;
typedef signed char int8_t;
#else
#include <stdint.h> // for uintptr_t and C99 types
#endif // defined(_MSC_VER) && (_MSC_VER < 1600)
// Types are deprecated. Enable this macro for legacy types.
#ifdef LIBYUV_LEGACY_TYPES
typedef uint64_t uint64;
typedef int64_t int64;
typedef uint32_t uint32;
typedef int32_t int32;
typedef uint16_t uint16;
typedef int16_t int16;
typedef uint8_t uint8;
typedef int8_t int8;
#endif // LIBYUV_LEGACY_TYPES
#endif // INT_TYPES_DEFINED
#if !defined(LIBYUV_API)
#if defined(_WIN32) || defined(__CYGWIN__)
#if defined(LIBYUV_BUILDING_SHARED_LIBRARY)
#define LIBYUV_API __declspec(dllexport)
#elif defined(LIBYUV_USING_SHARED_LIBRARY)
#define LIBYUV_API __declspec(dllimport)
#else
#define LIBYUV_API
#endif // LIBYUV_BUILDING_SHARED_LIBRARY
#elif defined(__GNUC__) && (__GNUC__ >= 4) && !defined(__APPLE__) && \
(defined(LIBYUV_BUILDING_SHARED_LIBRARY) || \
defined(LIBYUV_USING_SHARED_LIBRARY))
#define LIBYUV_API __attribute__((visibility("default")))
#else
#define LIBYUV_API
#endif // __GNUC__
#endif // LIBYUV_API
// TODO(fbarchard): Remove bool macros.
#define LIBYUV_BOOL int
#define LIBYUV_FALSE 0
#define LIBYUV_TRUE 1
#endif // INCLUDE_LIBYUV_BASIC_TYPES_H_

View File

@@ -1,111 +0,0 @@
/*
* Copyright 2011 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef INCLUDE_LIBYUV_COMPARE_H_
#define INCLUDE_LIBYUV_COMPARE_H_
#include "libyuv/basic_types.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
// Compute a hash for specified memory. Seed of 5381 recommended.
LIBYUV_API
uint32_t HashDjb2(const uint8_t* src, uint64_t count, uint32_t seed);
// Hamming Distance
LIBYUV_API
uint64_t ComputeHammingDistance(const uint8_t* src_a,
const uint8_t* src_b,
int count);
// Scan an opaque argb image and return fourcc based on alpha offset.
// Returns FOURCC_ARGB, FOURCC_BGRA, or 0 if unknown.
LIBYUV_API
uint32_t ARGBDetect(const uint8_t* argb,
int stride_argb,
int width,
int height);
// Sum Square Error - used to compute Mean Square Error or PSNR.
LIBYUV_API
uint64_t ComputeSumSquareError(const uint8_t* src_a,
const uint8_t* src_b,
int count);
LIBYUV_API
uint64_t ComputeSumSquareErrorPlane(const uint8_t* src_a,
int stride_a,
const uint8_t* src_b,
int stride_b,
int width,
int height);
static const int kMaxPsnr = 128;
LIBYUV_API
double SumSquareErrorToPsnr(uint64_t sse, uint64_t count);
LIBYUV_API
double CalcFramePsnr(const uint8_t* src_a,
int stride_a,
const uint8_t* src_b,
int stride_b,
int width,
int height);
LIBYUV_API
double I420Psnr(const uint8_t* src_y_a,
int stride_y_a,
const uint8_t* src_u_a,
int stride_u_a,
const uint8_t* src_v_a,
int stride_v_a,
const uint8_t* src_y_b,
int stride_y_b,
const uint8_t* src_u_b,
int stride_u_b,
const uint8_t* src_v_b,
int stride_v_b,
int width,
int height);
LIBYUV_API
double CalcFrameSsim(const uint8_t* src_a,
int stride_a,
const uint8_t* src_b,
int stride_b,
int width,
int height);
LIBYUV_API
double I420Ssim(const uint8_t* src_y_a,
int stride_y_a,
const uint8_t* src_u_a,
int stride_u_a,
const uint8_t* src_v_a,
int stride_v_a,
const uint8_t* src_y_b,
int stride_y_b,
const uint8_t* src_u_b,
int stride_u_b,
const uint8_t* src_v_b,
int stride_v_b,
int width,
int height);
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif
#endif // INCLUDE_LIBYUV_COMPARE_H_

View File

@@ -1,142 +0,0 @@
/*
* Copyright 2013 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef INCLUDE_LIBYUV_COMPARE_ROW_H_
#define INCLUDE_LIBYUV_COMPARE_ROW_H_
#include "libyuv/basic_types.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
#if defined(__pnacl__) || defined(__CLR_VER) || \
(defined(__native_client__) && defined(__x86_64__)) || \
(defined(__i386__) && !defined(__SSE__) && !defined(__clang__))
#define LIBYUV_DISABLE_X86
#endif
#if defined(__native_client__)
#define LIBYUV_DISABLE_NEON
#endif
// MemorySanitizer does not support assembly code yet. http://crbug.com/344505
#if defined(__has_feature)
#if __has_feature(memory_sanitizer)
#define LIBYUV_DISABLE_X86
#endif
#endif
// Visual C 2012 required for AVX2.
#if defined(_M_IX86) && !defined(__clang__) && defined(_MSC_VER) && \
_MSC_VER >= 1700
#define VISUALC_HAS_AVX2 1
#endif // VisualStudio >= 2012
// clang >= 3.4.0 required for AVX2.
#if defined(__clang__) && (defined(__x86_64__) || defined(__i386__))
#if (__clang_major__ > 3) || (__clang_major__ == 3 && (__clang_minor__ >= 4))
#define CLANG_HAS_AVX2 1
#endif // clang >= 3.4
#endif // __clang__
// The following are available for Visual C and GCC:
#if !defined(LIBYUV_DISABLE_X86) && \
(defined(__x86_64__) || defined(__i386__) || defined(_M_IX86))
#define HAS_HASHDJB2_SSE41
#define HAS_SUMSQUAREERROR_SSE2
#define HAS_HAMMINGDISTANCE_SSE42
#endif
// The following are available for Visual C and clangcl 32 bit:
#if !defined(LIBYUV_DISABLE_X86) && defined(_M_IX86) && \
defined(_MSC_VER) && !defined(__clang__) && \
(defined(VISUALC_HAS_AVX2) || defined(CLANG_HAS_AVX2))
#define HAS_HASHDJB2_AVX2
#define HAS_SUMSQUAREERROR_AVX2
#endif
// The following are available for GCC and clangcl:
#if !defined(LIBYUV_DISABLE_X86) && \
(defined(__x86_64__) || defined(__i386__))
#define HAS_HAMMINGDISTANCE_SSSE3
#endif
// The following are available for GCC and clangcl:
#if !defined(LIBYUV_DISABLE_X86) && defined(CLANG_HAS_AVX2) && \
(defined(__x86_64__) || defined(__i386__))
#define HAS_HAMMINGDISTANCE_AVX2
#endif
// The following are available for Neon:
#if !defined(LIBYUV_DISABLE_NEON) && \
(defined(__ARM_NEON__) || defined(LIBYUV_NEON) || defined(__aarch64__))
#define HAS_SUMSQUAREERROR_NEON
#define HAS_HAMMINGDISTANCE_NEON
#endif
#if !defined(LIBYUV_DISABLE_MSA) && defined(__mips_msa)
#define HAS_HAMMINGDISTANCE_MSA
#define HAS_SUMSQUAREERROR_MSA
#endif
#if !defined(LIBYUV_DISABLE_MMI) && defined(_MIPS_ARCH_LOONGSON3A)
#define HAS_HAMMINGDISTANCE_MMI
#define HAS_SUMSQUAREERROR_MMI
#endif
uint32_t HammingDistance_C(const uint8_t* src_a,
const uint8_t* src_b,
int count);
uint32_t HammingDistance_SSE42(const uint8_t* src_a,
const uint8_t* src_b,
int count);
uint32_t HammingDistance_SSSE3(const uint8_t* src_a,
const uint8_t* src_b,
int count);
uint32_t HammingDistance_AVX2(const uint8_t* src_a,
const uint8_t* src_b,
int count);
uint32_t HammingDistance_NEON(const uint8_t* src_a,
const uint8_t* src_b,
int count);
uint32_t HammingDistance_MSA(const uint8_t* src_a,
const uint8_t* src_b,
int count);
uint32_t HammingDistance_MMI(const uint8_t* src_a,
const uint8_t* src_b,
int count);
uint32_t SumSquareError_C(const uint8_t* src_a,
const uint8_t* src_b,
int count);
uint32_t SumSquareError_SSE2(const uint8_t* src_a,
const uint8_t* src_b,
int count);
uint32_t SumSquareError_AVX2(const uint8_t* src_a,
const uint8_t* src_b,
int count);
uint32_t SumSquareError_NEON(const uint8_t* src_a,
const uint8_t* src_b,
int count);
uint32_t SumSquareError_MSA(const uint8_t* src_a,
const uint8_t* src_b,
int count);
uint32_t SumSquareError_MMI(const uint8_t* src_a,
const uint8_t* src_b,
int count);
uint32_t HashDjb2_C(const uint8_t* src, int count, uint32_t seed);
uint32_t HashDjb2_SSE41(const uint8_t* src, int count, uint32_t seed);
uint32_t HashDjb2_AVX2(const uint8_t* src, int count, uint32_t seed);
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif
#endif // INCLUDE_LIBYUV_COMPARE_ROW_H_

View File

@@ -1,860 +0,0 @@
/*
* Copyright 2011 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef INCLUDE_LIBYUV_CONVERT_H_
#define INCLUDE_LIBYUV_CONVERT_H_
#include "libyuv/basic_types.h"
#include "libyuv/rotate.h" // For enum RotationMode.
// TODO(fbarchard): fix WebRTC source to include following libyuv headers:
#include "libyuv/convert_argb.h" // For WebRTC I420ToARGB. b/620
#include "libyuv/convert_from.h" // For WebRTC ConvertFromI420. b/620
#include "libyuv/planar_functions.h" // For WebRTC I420Rect, CopyPlane. b/618
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
// Convert I444 to I420.
LIBYUV_API
int I444ToI420(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
// Convert I444 to NV12.
LIBYUV_API
int I444ToNV12(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_uv,
int dst_stride_uv,
int width,
int height);
// Convert I444 to NV21.
LIBYUV_API
int I444ToNV21(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_vu,
int dst_stride_vu,
int width,
int height);
// Convert I422 to I420.
LIBYUV_API
int I422ToI420(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
// Convert I422 to I444.
LIBYUV_API
int I422ToI444(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
// Convert I422 to NV21.
LIBYUV_API
int I422ToNV21(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_vu,
int dst_stride_vu,
int width,
int height);
// Copy I420 to I420.
#define I420ToI420 I420Copy
LIBYUV_API
int I420Copy(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
// Convert I420 to I444.
LIBYUV_API
int I420ToI444(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
// Copy I010 to I010
#define I010ToI010 I010Copy
#define H010ToH010 I010Copy
LIBYUV_API
int I010Copy(const uint16_t* src_y,
int src_stride_y,
const uint16_t* src_u,
int src_stride_u,
const uint16_t* src_v,
int src_stride_v,
uint16_t* dst_y,
int dst_stride_y,
uint16_t* dst_u,
int dst_stride_u,
uint16_t* dst_v,
int dst_stride_v,
int width,
int height);
// Convert 10 bit YUV to 8 bit
#define H010ToH420 I010ToI420
LIBYUV_API
int I010ToI420(const uint16_t* src_y,
int src_stride_y,
const uint16_t* src_u,
int src_stride_u,
const uint16_t* src_v,
int src_stride_v,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
#define H210ToH422 I210ToI422
LIBYUV_API
int I210ToI422(const uint16_t* src_y,
int src_stride_y,
const uint16_t* src_u,
int src_stride_u,
const uint16_t* src_v,
int src_stride_v,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
#define H410ToH444 I410ToI444
LIBYUV_API
int I410ToI444(const uint16_t* src_y,
int src_stride_y,
const uint16_t* src_u,
int src_stride_u,
const uint16_t* src_v,
int src_stride_v,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
#define H012ToH420 I012ToI420
LIBYUV_API
int I012ToI420(const uint16_t* src_y,
int src_stride_y,
const uint16_t* src_u,
int src_stride_u,
const uint16_t* src_v,
int src_stride_v,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
#define H212ToH422 I212ToI422
LIBYUV_API
int I212ToI422(const uint16_t* src_y,
int src_stride_y,
const uint16_t* src_u,
int src_stride_u,
const uint16_t* src_v,
int src_stride_v,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
#define H412ToH444 I412ToI444
LIBYUV_API
int I412ToI444(const uint16_t* src_y,
int src_stride_y,
const uint16_t* src_u,
int src_stride_u,
const uint16_t* src_v,
int src_stride_v,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
#define I412ToI012 I410ToI010
#define H410ToH010 I410ToI010
#define H412ToH012 I410ToI010
LIBYUV_API
int I410ToI010(const uint16_t* src_y,
int src_stride_y,
const uint16_t* src_u,
int src_stride_u,
const uint16_t* src_v,
int src_stride_v,
uint16_t* dst_y,
int dst_stride_y,
uint16_t* dst_u,
int dst_stride_u,
uint16_t* dst_v,
int dst_stride_v,
int width,
int height);
#define I212ToI012 I210ToI010
#define H210ToH010 I210ToI010
#define H212ToH012 I210ToI010
LIBYUV_API
int I210ToI010(const uint16_t* src_y,
int src_stride_y,
const uint16_t* src_u,
int src_stride_u,
const uint16_t* src_v,
int src_stride_v,
uint16_t* dst_y,
int dst_stride_y,
uint16_t* dst_u,
int dst_stride_u,
uint16_t* dst_v,
int dst_stride_v,
int width,
int height);
// Convert I010 to I410
LIBYUV_API
int I010ToI410(const uint16_t* src_y,
int src_stride_y,
const uint16_t* src_u,
int src_stride_u,
const uint16_t* src_v,
int src_stride_v,
uint16_t* dst_y,
int dst_stride_y,
uint16_t* dst_u,
int dst_stride_u,
uint16_t* dst_v,
int dst_stride_v,
int width,
int height);
// Convert I012 to I412
#define I012ToI412 I010ToI410
// Convert I210 to I410
LIBYUV_API
int I210ToI410(const uint16_t* src_y,
int src_stride_y,
const uint16_t* src_u,
int src_stride_u,
const uint16_t* src_v,
int src_stride_v,
uint16_t* dst_y,
int dst_stride_y,
uint16_t* dst_u,
int dst_stride_u,
uint16_t* dst_v,
int dst_stride_v,
int width,
int height);
// Convert I212 to I412
#define I212ToI412 I210ToI410
// Convert I010 to P010
LIBYUV_API
int I010ToP010(const uint16_t* src_y,
int src_stride_y,
const uint16_t* src_u,
int src_stride_u,
const uint16_t* src_v,
int src_stride_v,
uint16_t* dst_y,
int dst_stride_y,
uint16_t* dst_uv,
int dst_stride_uv,
int width,
int height);
// Convert I210 to P210
LIBYUV_API
int I210ToP210(const uint16_t* src_y,
int src_stride_y,
const uint16_t* src_u,
int src_stride_u,
const uint16_t* src_v,
int src_stride_v,
uint16_t* dst_y,
int dst_stride_y,
uint16_t* dst_uv,
int dst_stride_uv,
int width,
int height);
// Convert I012 to P012
LIBYUV_API
int I012ToP012(const uint16_t* src_y,
int src_stride_y,
const uint16_t* src_u,
int src_stride_u,
const uint16_t* src_v,
int src_stride_v,
uint16_t* dst_y,
int dst_stride_y,
uint16_t* dst_uv,
int dst_stride_uv,
int width,
int height);
// Convert I212 to P212
LIBYUV_API
int I212ToP212(const uint16_t* src_y,
int src_stride_y,
const uint16_t* src_u,
int src_stride_u,
const uint16_t* src_v,
int src_stride_v,
uint16_t* dst_y,
int dst_stride_y,
uint16_t* dst_uv,
int dst_stride_uv,
int width,
int height);
// Convert I400 (grey) to I420.
LIBYUV_API
int I400ToI420(const uint8_t* src_y,
int src_stride_y,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
// Convert I400 (grey) to NV21.
LIBYUV_API
int I400ToNV21(const uint8_t* src_y,
int src_stride_y,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_vu,
int dst_stride_vu,
int width,
int height);
#define J400ToJ420 I400ToI420
// Convert NV12 to I420.
LIBYUV_API
int NV12ToI420(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_uv,
int src_stride_uv,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
// Convert NV21 to I420.
LIBYUV_API
int NV21ToI420(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_vu,
int src_stride_vu,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
// Convert NV12 to NV24.
LIBYUV_API
int NV12ToNV24(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_uv,
int src_stride_uv,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_uv,
int dst_stride_uv,
int width,
int height);
// Convert NV16 to NV24.
LIBYUV_API
int NV16ToNV24(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_uv,
int src_stride_uv,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_uv,
int dst_stride_uv,
int width,
int height);
// Convert P010 to P410.
LIBYUV_API
int P010ToP410(const uint16_t* src_y,
int src_stride_y,
const uint16_t* src_uv,
int src_stride_uv,
uint16_t* dst_y,
int dst_stride_y,
uint16_t* dst_uv,
int dst_stride_uv,
int width,
int height);
// Convert P012 to P412.
#define P012ToP412 P010ToP410
// Convert P016 to P416.
#define P016ToP416 P010ToP410
// Convert P210 to P410.
LIBYUV_API
int P210ToP410(const uint16_t* src_y,
int src_stride_y,
const uint16_t* src_uv,
int src_stride_uv,
uint16_t* dst_y,
int dst_stride_y,
uint16_t* dst_uv,
int dst_stride_uv,
int width,
int height);
// Convert P212 to P412.
#define P212ToP412 P210ToP410
// Convert P216 to P416.
#define P216ToP416 P210ToP410
// Convert YUY2 to I420.
LIBYUV_API
int YUY2ToI420(const uint8_t* src_yuy2,
int src_stride_yuy2,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
// Convert UYVY to I420.
LIBYUV_API
int UYVYToI420(const uint8_t* src_uyvy,
int src_stride_uyvy,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
// Convert AYUV to NV12.
LIBYUV_API
int AYUVToNV12(const uint8_t* src_ayuv,
int src_stride_ayuv,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_uv,
int dst_stride_uv,
int width,
int height);
// Convert AYUV to NV21.
LIBYUV_API
int AYUVToNV21(const uint8_t* src_ayuv,
int src_stride_ayuv,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_vu,
int dst_stride_vu,
int width,
int height);
// Convert Android420 to I420.
LIBYUV_API
int Android420ToI420(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
int src_pixel_stride_uv,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
// ARGB little endian (bgra in memory) to I420.
LIBYUV_API
int ARGBToI420(const uint8_t* src_argb,
int src_stride_argb,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
// BGRA little endian (argb in memory) to I420.
LIBYUV_API
int BGRAToI420(const uint8_t* src_bgra,
int src_stride_bgra,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
// ABGR little endian (rgba in memory) to I420.
LIBYUV_API
int ABGRToI420(const uint8_t* src_abgr,
int src_stride_abgr,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
// RGBA little endian (abgr in memory) to I420.
LIBYUV_API
int RGBAToI420(const uint8_t* src_rgba,
int src_stride_rgba,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
// RGB little endian (bgr in memory) to I420.
LIBYUV_API
int RGB24ToI420(const uint8_t* src_rgb24,
int src_stride_rgb24,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
// RGB little endian (bgr in memory) to J420.
LIBYUV_API
int RGB24ToJ420(const uint8_t* src_rgb24,
int src_stride_rgb24,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
// RGB big endian (rgb in memory) to I420.
LIBYUV_API
int RAWToI420(const uint8_t* src_raw,
int src_stride_raw,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
// RGB big endian (rgb in memory) to J420.
LIBYUV_API
int RAWToJ420(const uint8_t* src_raw,
int src_stride_raw,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
// RGB16 (RGBP fourcc) little endian to I420.
LIBYUV_API
int RGB565ToI420(const uint8_t* src_rgb565,
int src_stride_rgb565,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
// RGB15 (RGBO fourcc) little endian to I420.
LIBYUV_API
int ARGB1555ToI420(const uint8_t* src_argb1555,
int src_stride_argb1555,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
// RGB12 (R444 fourcc) little endian to I420.
LIBYUV_API
int ARGB4444ToI420(const uint8_t* src_argb4444,
int src_stride_argb4444,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
// RGB little endian (bgr in memory) to J400.
LIBYUV_API
int RGB24ToJ400(const uint8_t* src_rgb24,
int src_stride_rgb24,
uint8_t* dst_yj,
int dst_stride_yj,
int width,
int height);
// RGB big endian (rgb in memory) to J400.
LIBYUV_API
int RAWToJ400(const uint8_t* src_raw,
int src_stride_raw,
uint8_t* dst_yj,
int dst_stride_yj,
int width,
int height);
// src_width/height provided by capture.
// dst_width/height for clipping determine final size.
LIBYUV_API
int MJPGToI420(const uint8_t* sample,
size_t sample_size,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int src_width,
int src_height,
int dst_width,
int dst_height);
// JPEG to NV21
LIBYUV_API
int MJPGToNV21(const uint8_t* sample,
size_t sample_size,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_vu,
int dst_stride_vu,
int src_width,
int src_height,
int dst_width,
int dst_height);
// JPEG to NV12
LIBYUV_API
int MJPGToNV12(const uint8_t* sample,
size_t sample_size,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_uv,
int dst_stride_uv,
int src_width,
int src_height,
int dst_width,
int dst_height);
// Query size of MJPG in pixels.
LIBYUV_API
int MJPGSize(const uint8_t* sample,
size_t sample_size,
int* width,
int* height);
// Convert camera sample to I420 with cropping, rotation and vertical flip.
// "src_size" is needed to parse MJPG.
// "dst_stride_y" number of bytes in a row of the dst_y plane.
// Normally this would be the same as dst_width, with recommended alignment
// to 16 bytes for better efficiency.
// If rotation of 90 or 270 is used, stride is affected. The caller should
// allocate the I420 buffer according to rotation.
// "dst_stride_u" number of bytes in a row of the dst_u plane.
// Normally this would be the same as (dst_width + 1) / 2, with
// recommended alignment to 16 bytes for better efficiency.
// If rotation of 90 or 270 is used, stride is affected.
// "crop_x" and "crop_y" are starting position for cropping.
// To center, crop_x = (src_width - dst_width) / 2
// crop_y = (src_height - dst_height) / 2
// "src_width" / "src_height" is size of src_frame in pixels.
// "src_height" can be negative indicating a vertically flipped image source.
// "crop_width" / "crop_height" is the size to crop the src to.
// Must be less than or equal to src_width/src_height
// Cropping parameters are pre-rotation.
// "rotation" can be 0, 90, 180 or 270.
// "fourcc" is a fourcc. ie 'I420', 'YUY2'
// Returns 0 for successful; -1 for invalid parameter. Non-zero for failure.
LIBYUV_API
int ConvertToI420(const uint8_t* sample,
size_t sample_size,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int crop_x,
int crop_y,
int src_width,
int src_height,
int crop_width,
int crop_height,
enum RotationMode rotation,
uint32_t fourcc);
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif
#endif // INCLUDE_LIBYUV_CONVERT_H_

File diff suppressed because it is too large Load Diff

View File

@@ -1,203 +0,0 @@
/*
* Copyright 2011 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef INCLUDE_LIBYUV_CONVERT_FROM_H_
#define INCLUDE_LIBYUV_CONVERT_FROM_H_
#include "libyuv/basic_types.h"
#include "libyuv/rotate.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
// See Also convert.h for conversions from formats to I420.
// Convert 8 bit YUV to 10 bit.
#define H420ToH010 I420ToI010
LIBYUV_API
int I420ToI010(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint16_t* dst_y,
int dst_stride_y,
uint16_t* dst_u,
int dst_stride_u,
uint16_t* dst_v,
int dst_stride_v,
int width,
int height);
// Convert 8 bit YUV to 12 bit.
#define H420ToH012 I420ToI012
LIBYUV_API
int I420ToI012(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint16_t* dst_y,
int dst_stride_y,
uint16_t* dst_u,
int dst_stride_u,
uint16_t* dst_v,
int dst_stride_v,
int width,
int height);
LIBYUV_API
int I420ToI422(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
LIBYUV_API
int I420ToI444(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
// Copy to I400. Source can be I420, I422, I444, I400, NV12 or NV21.
LIBYUV_API
int I400Copy(const uint8_t* src_y,
int src_stride_y,
uint8_t* dst_y,
int dst_stride_y,
int width,
int height);
LIBYUV_API
int I420ToNV12(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_uv,
int dst_stride_uv,
int width,
int height);
LIBYUV_API
int I420ToNV21(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_vu,
int dst_stride_vu,
int width,
int height);
LIBYUV_API
int I420ToYUY2(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint8_t* dst_yuy2,
int dst_stride_yuy2,
int width,
int height);
LIBYUV_API
int I420ToUYVY(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint8_t* dst_uyvy,
int dst_stride_uyvy,
int width,
int height);
// The following are from convert_argb.h
// DEPRECATED: The prototypes will be removed in future. Use convert_argb.h
// Convert I420 to ARGB.
LIBYUV_API
int I420ToARGB(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint8_t* dst_argb,
int dst_stride_argb,
int width,
int height);
// Convert I420 to ABGR.
LIBYUV_API
int I420ToABGR(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint8_t* dst_abgr,
int dst_stride_abgr,
int width,
int height);
// Convert I420 to specified format.
// "dst_sample_stride" is bytes in a row for the destination. Pass 0 if the
// buffer has contiguous rows. Can be negative. A multiple of 16 is optimal.
LIBYUV_API
int ConvertFromI420(const uint8_t* y,
int y_stride,
const uint8_t* u,
int u_stride,
const uint8_t* v,
int v_stride,
uint8_t* dst_sample,
int dst_sample_stride,
int width,
int height,
uint32_t fourcc);
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif
#endif // INCLUDE_LIBYUV_CONVERT_FROM_H_

View File

@@ -1,335 +0,0 @@
/*
* Copyright 2012 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef INCLUDE_LIBYUV_CONVERT_FROM_ARGB_H_
#define INCLUDE_LIBYUV_CONVERT_FROM_ARGB_H_
#include "libyuv/basic_types.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
// Copy ARGB to ARGB.
#define ARGBToARGB ARGBCopy
LIBYUV_API
int ARGBCopy(const uint8_t* src_argb,
int src_stride_argb,
uint8_t* dst_argb,
int dst_stride_argb,
int width,
int height);
// Convert ARGB To BGRA.
LIBYUV_API
int ARGBToBGRA(const uint8_t* src_argb,
int src_stride_argb,
uint8_t* dst_bgra,
int dst_stride_bgra,
int width,
int height);
// Convert ARGB To ABGR.
LIBYUV_API
int ARGBToABGR(const uint8_t* src_argb,
int src_stride_argb,
uint8_t* dst_abgr,
int dst_stride_abgr,
int width,
int height);
// Convert ARGB To RGBA.
LIBYUV_API
int ARGBToRGBA(const uint8_t* src_argb,
int src_stride_argb,
uint8_t* dst_rgba,
int dst_stride_rgba,
int width,
int height);
// Aliases
#define ARGBToAB30 ABGRToAR30
#define ABGRToAB30 ARGBToAR30
// Convert ABGR To AR30.
LIBYUV_API
int ABGRToAR30(const uint8_t* src_abgr,
int src_stride_abgr,
uint8_t* dst_ar30,
int dst_stride_ar30,
int width,
int height);
// Convert ARGB To AR30.
LIBYUV_API
int ARGBToAR30(const uint8_t* src_argb,
int src_stride_argb,
uint8_t* dst_ar30,
int dst_stride_ar30,
int width,
int height);
// Aliases
#define ABGRToRGB24 ARGBToRAW
#define ABGRToRAW ARGBToRGB24
// Convert ARGB To RGB24.
LIBYUV_API
int ARGBToRGB24(const uint8_t* src_argb,
int src_stride_argb,
uint8_t* dst_rgb24,
int dst_stride_rgb24,
int width,
int height);
// Convert ARGB To RAW.
LIBYUV_API
int ARGBToRAW(const uint8_t* src_argb,
int src_stride_argb,
uint8_t* dst_raw,
int dst_stride_raw,
int width,
int height);
// Convert ARGB To RGB565.
LIBYUV_API
int ARGBToRGB565(const uint8_t* src_argb,
int src_stride_argb,
uint8_t* dst_rgb565,
int dst_stride_rgb565,
int width,
int height);
// Convert ARGB To RGB565 with 4x4 dither matrix (16 bytes).
// Values in dither matrix from 0 to 7 recommended.
// The order of the dither matrix is first byte is upper left.
// TODO(fbarchard): Consider pointer to 2d array for dither4x4.
// const uint8_t(*dither)[4][4];
LIBYUV_API
int ARGBToRGB565Dither(const uint8_t* src_argb,
int src_stride_argb,
uint8_t* dst_rgb565,
int dst_stride_rgb565,
const uint8_t* dither4x4,
int width,
int height);
// Convert ARGB To ARGB1555.
LIBYUV_API
int ARGBToARGB1555(const uint8_t* src_argb,
int src_stride_argb,
uint8_t* dst_argb1555,
int dst_stride_argb1555,
int width,
int height);
// Convert ARGB To ARGB4444.
LIBYUV_API
int ARGBToARGB4444(const uint8_t* src_argb,
int src_stride_argb,
uint8_t* dst_argb4444,
int dst_stride_argb4444,
int width,
int height);
// Convert ARGB To I444.
LIBYUV_API
int ARGBToI444(const uint8_t* src_argb,
int src_stride_argb,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
// Convert ARGB to AR64.
LIBYUV_API
int ARGBToAR64(const uint8_t* src_argb,
int src_stride_argb,
uint16_t* dst_ar64,
int dst_stride_ar64,
int width,
int height);
// Convert ABGR to AB64.
#define ABGRToAB64 ARGBToAR64
// Convert ARGB to AB64.
LIBYUV_API
int ARGBToAB64(const uint8_t* src_argb,
int src_stride_argb,
uint16_t* dst_ab64,
int dst_stride_ab64,
int width,
int height);
// Convert ABGR to AR64.
#define ABGRToAR64 ARGBToAB64
// Convert ARGB To I422.
LIBYUV_API
int ARGBToI422(const uint8_t* src_argb,
int src_stride_argb,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
// Convert ARGB To I420. (also in convert.h)
LIBYUV_API
int ARGBToI420(const uint8_t* src_argb,
int src_stride_argb,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
// Convert ARGB to J420. (JPeg full range I420).
LIBYUV_API
int ARGBToJ420(const uint8_t* src_argb,
int src_stride_argb,
uint8_t* dst_yj,
int dst_stride_yj,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
// Convert ARGB to J422.
LIBYUV_API
int ARGBToJ422(const uint8_t* src_argb,
int src_stride_argb,
uint8_t* dst_yj,
int dst_stride_yj,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height);
// Convert ARGB to J400. (JPeg full range).
LIBYUV_API
int ARGBToJ400(const uint8_t* src_argb,
int src_stride_argb,
uint8_t* dst_yj,
int dst_stride_yj,
int width,
int height);
// Convert RGBA to J400. (JPeg full range).
LIBYUV_API
int RGBAToJ400(const uint8_t* src_rgba,
int src_stride_rgba,
uint8_t* dst_yj,
int dst_stride_yj,
int width,
int height);
// Convert ARGB to I400.
LIBYUV_API
int ARGBToI400(const uint8_t* src_argb,
int src_stride_argb,
uint8_t* dst_y,
int dst_stride_y,
int width,
int height);
// Convert ARGB to G. (Reverse of J400toARGB, which replicates G back to ARGB)
LIBYUV_API
int ARGBToG(const uint8_t* src_argb,
int src_stride_argb,
uint8_t* dst_g,
int dst_stride_g,
int width,
int height);
// Convert ARGB To NV12.
LIBYUV_API
int ARGBToNV12(const uint8_t* src_argb,
int src_stride_argb,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_uv,
int dst_stride_uv,
int width,
int height);
// Convert ARGB To NV21.
LIBYUV_API
int ARGBToNV21(const uint8_t* src_argb,
int src_stride_argb,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_vu,
int dst_stride_vu,
int width,
int height);
// Convert ABGR To NV12.
LIBYUV_API
int ABGRToNV12(const uint8_t* src_abgr,
int src_stride_abgr,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_uv,
int dst_stride_uv,
int width,
int height);
// Convert ABGR To NV21.
LIBYUV_API
int ABGRToNV21(const uint8_t* src_abgr,
int src_stride_abgr,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_vu,
int dst_stride_vu,
int width,
int height);
// Convert ARGB To YUY2.
LIBYUV_API
int ARGBToYUY2(const uint8_t* src_argb,
int src_stride_argb,
uint8_t* dst_yuy2,
int dst_stride_yuy2,
int width,
int height);
// Convert ARGB To UYVY.
LIBYUV_API
int ARGBToUYVY(const uint8_t* src_argb,
int src_stride_argb,
uint8_t* dst_uyvy,
int dst_stride_uyvy,
int width,
int height);
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif
#endif // INCLUDE_LIBYUV_CONVERT_FROM_ARGB_H_

View File

@@ -1,122 +0,0 @@
/*
* Copyright 2011 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef INCLUDE_LIBYUV_CPU_ID_H_
#define INCLUDE_LIBYUV_CPU_ID_H_
#include "libyuv/basic_types.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
// Internal flag to indicate cpuid requires initialization.
static const int kCpuInitialized = 0x1;
// These flags are only valid on ARM processors.
static const int kCpuHasARM = 0x2;
static const int kCpuHasNEON = 0x4;
// 0x8 reserved for future ARM flag.
// These flags are only valid on x86 processors.
static const int kCpuHasX86 = 0x10;
static const int kCpuHasSSE2 = 0x20;
static const int kCpuHasSSSE3 = 0x40;
static const int kCpuHasSSE41 = 0x80;
static const int kCpuHasSSE42 = 0x100; // unused at this time.
static const int kCpuHasAVX = 0x200;
static const int kCpuHasAVX2 = 0x400;
static const int kCpuHasERMS = 0x800;
static const int kCpuHasFMA3 = 0x1000;
static const int kCpuHasF16C = 0x2000;
static const int kCpuHasGFNI = 0x4000;
static const int kCpuHasAVX512BW = 0x8000;
static const int kCpuHasAVX512VL = 0x10000;
static const int kCpuHasAVX512VBMI = 0x20000;
static const int kCpuHasAVX512VBMI2 = 0x40000;
static const int kCpuHasAVX512VBITALG = 0x80000;
static const int kCpuHasAVX512VPOPCNTDQ = 0x100000;
// These flags are only valid on MIPS processors.
static const int kCpuHasMIPS = 0x200000;
static const int kCpuHasMSA = 0x400000;
static const int kCpuHasMMI = 0x800000;
// Optional init function. TestCpuFlag does an auto-init.
// Returns cpu_info flags.
LIBYUV_API
int InitCpuFlags(void);
// Detect CPU has SSE2 etc.
// Test_flag parameter should be one of kCpuHas constants above.
// Returns non-zero if instruction set is detected
static __inline int TestCpuFlag(int test_flag) {
LIBYUV_API extern int cpu_info_;
#ifdef __ATOMIC_RELAXED
int cpu_info = __atomic_load_n(&cpu_info_, __ATOMIC_RELAXED);
#else
int cpu_info = cpu_info_;
#endif
return (!cpu_info ? InitCpuFlags() : cpu_info) & test_flag;
}
// Internal function for parsing /proc/cpuinfo.
LIBYUV_API
int ArmCpuCaps(const char* cpuinfo_name);
LIBYUV_API
int MipsCpuCaps(const char* cpuinfo_name);
// For testing, allow CPU flags to be disabled.
// ie MaskCpuFlags(~kCpuHasSSSE3) to disable SSSE3.
// MaskCpuFlags(-1) to enable all cpu specific optimizations.
// MaskCpuFlags(1) to disable all cpu specific optimizations.
// MaskCpuFlags(0) to reset state so next call will auto init.
// Returns cpu_info flags.
LIBYUV_API
int MaskCpuFlags(int enable_flags);
// Sets the CPU flags to |cpu_flags|, bypassing the detection code. |cpu_flags|
// should be a valid combination of the kCpuHas constants above and include
// kCpuInitialized. Use this method when running in a sandboxed process where
// the detection code might fail (as it might access /proc/cpuinfo). In such
// cases the cpu_info can be obtained from a non sandboxed process by calling
// InitCpuFlags() and passed to the sandboxed process (via command line
// parameters, IPC...) which can then call this method to initialize the CPU
// flags.
// Notes:
// - when specifying 0 for |cpu_flags|, the auto initialization is enabled
// again.
// - enabling CPU features that are not supported by the CPU will result in
// undefined behavior.
// TODO(fbarchard): consider writing a helper function that translates from
// other library CPU info to libyuv CPU info and add a .md doc that explains
// CPU detection.
static __inline void SetCpuFlags(int cpu_flags) {
LIBYUV_API extern int cpu_info_;
#ifdef __ATOMIC_RELAXED
__atomic_store_n(&cpu_info_, cpu_flags, __ATOMIC_RELAXED);
#else
cpu_info_ = cpu_flags;
#endif
}
// Low level cpuid for X86. Returns zeros on other CPUs.
// eax is the info type that you want.
// ecx is typically the cpu number, and should normally be zero.
LIBYUV_API
void CpuId(int info_eax, int info_ecx, int* cpu_info);
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif
#endif // INCLUDE_LIBYUV_CPU_ID_H_

View File

@@ -1,236 +0,0 @@
/*
* Copyright 2016 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef INCLUDE_LIBYUV_MACROS_MSA_H_
#define INCLUDE_LIBYUV_MACROS_MSA_H_
#if !defined(LIBYUV_DISABLE_MSA) && defined(__mips_msa)
#include <msa.h>
#include <stdint.h>
#if (__mips_isa_rev >= 6)
#define LW(psrc) \
({ \
const uint8_t* psrc_lw_m = (const uint8_t*)(psrc); \
uint32_t val_m; \
asm volatile("lw %[val_m], %[psrc_lw_m] \n" \
: [val_m] "=r"(val_m) \
: [psrc_lw_m] "m"(*psrc_lw_m)); \
val_m; \
})
#if (__mips == 64)
#define LD(psrc) \
({ \
const uint8_t* psrc_ld_m = (const uint8_t*)(psrc); \
uint64_t val_m = 0; \
asm volatile("ld %[val_m], %[psrc_ld_m] \n" \
: [val_m] "=r"(val_m) \
: [psrc_ld_m] "m"(*psrc_ld_m)); \
val_m; \
})
#else // !(__mips == 64)
#define LD(psrc) \
({ \
const uint8_t* psrc_ld_m = (const uint8_t*)(psrc); \
uint32_t val0_m, val1_m; \
uint64_t val_m = 0; \
val0_m = LW(psrc_ld_m); \
val1_m = LW(psrc_ld_m + 4); \
val_m = (uint64_t)(val1_m); /* NOLINT */ \
val_m = (uint64_t)((val_m << 32) & 0xFFFFFFFF00000000); /* NOLINT */ \
val_m = (uint64_t)(val_m | (uint64_t)val0_m); /* NOLINT */ \
val_m; \
})
#endif // (__mips == 64)
#define SW(val, pdst) \
({ \
uint8_t* pdst_sw_m = (uint8_t*)(pdst); /* NOLINT */ \
uint32_t val_m = (val); \
asm volatile("sw %[val_m], %[pdst_sw_m] \n" \
: [pdst_sw_m] "=m"(*pdst_sw_m) \
: [val_m] "r"(val_m)); \
})
#if (__mips == 64)
#define SD(val, pdst) \
({ \
uint8_t* pdst_sd_m = (uint8_t*)(pdst); /* NOLINT */ \
uint64_t val_m = (val); \
asm volatile("sd %[val_m], %[pdst_sd_m] \n" \
: [pdst_sd_m] "=m"(*pdst_sd_m) \
: [val_m] "r"(val_m)); \
})
#else // !(__mips == 64)
#define SD(val, pdst) \
({ \
uint8_t* pdst_sd_m = (uint8_t*)(pdst); /* NOLINT */ \
uint32_t val0_m, val1_m; \
val0_m = (uint32_t)((val)&0x00000000FFFFFFFF); \
val1_m = (uint32_t)(((val) >> 32) & 0x00000000FFFFFFFF); \
SW(val0_m, pdst_sd_m); \
SW(val1_m, pdst_sd_m + 4); \
})
#endif // !(__mips == 64)
#else // !(__mips_isa_rev >= 6)
#define LW(psrc) \
({ \
const uint8_t* psrc_lw_m = (const uint8_t*)(psrc); \
uint32_t val_m; \
asm volatile("ulw %[val_m], %[psrc_lw_m] \n" \
: [val_m] "=r"(val_m) \
: [psrc_lw_m] "m"(*psrc_lw_m)); \
val_m; \
})
#if (__mips == 64)
#define LD(psrc) \
({ \
const uint8_t* psrc_ld_m = (const uint8_t*)(psrc); \
uint64_t val_m = 0; \
asm volatile("uld %[val_m], %[psrc_ld_m] \n" \
: [val_m] "=r"(val_m) \
: [psrc_ld_m] "m"(*psrc_ld_m)); \
val_m; \
})
#else // !(__mips == 64)
#define LD(psrc) \
({ \
const uint8_t* psrc_ld_m = (const uint8_t*)(psrc); \
uint32_t val0_m, val1_m; \
uint64_t val_m = 0; \
val0_m = LW(psrc_ld_m); \
val1_m = LW(psrc_ld_m + 4); \
val_m = (uint64_t)(val1_m); /* NOLINT */ \
val_m = (uint64_t)((val_m << 32) & 0xFFFFFFFF00000000); /* NOLINT */ \
val_m = (uint64_t)(val_m | (uint64_t)val0_m); /* NOLINT */ \
val_m; \
})
#endif // (__mips == 64)
#define SW(val, pdst) \
({ \
uint8_t* pdst_sw_m = (uint8_t*)(pdst); /* NOLINT */ \
uint32_t val_m = (val); \
asm volatile("usw %[val_m], %[pdst_sw_m] \n" \
: [pdst_sw_m] "=m"(*pdst_sw_m) \
: [val_m] "r"(val_m)); \
})
#define SD(val, pdst) \
({ \
uint8_t* pdst_sd_m = (uint8_t*)(pdst); /* NOLINT */ \
uint32_t val0_m, val1_m; \
val0_m = (uint32_t)((val)&0x00000000FFFFFFFF); \
val1_m = (uint32_t)(((val) >> 32) & 0x00000000FFFFFFFF); \
SW(val0_m, pdst_sd_m); \
SW(val1_m, pdst_sd_m + 4); \
})
#endif // (__mips_isa_rev >= 6)
// TODO(fbarchard): Consider removing __VAR_ARGS versions.
#define LD_B(RTYPE, psrc) *((RTYPE*)(psrc)) /* NOLINT */
#define LD_UB(...) LD_B(const v16u8, __VA_ARGS__)
#define LD_H(RTYPE, psrc) *((RTYPE*)(psrc)) /* NOLINT */
#define LD_UH(...) LD_H(const v8u16, __VA_ARGS__)
#define ST_B(RTYPE, in, pdst) *((RTYPE*)(pdst)) = (in) /* NOLINT */
#define ST_UB(...) ST_B(v16u8, __VA_ARGS__)
#define ST_H(RTYPE, in, pdst) *((RTYPE*)(pdst)) = (in) /* NOLINT */
#define ST_UH(...) ST_H(v8u16, __VA_ARGS__)
/* Description : Load two vectors with 16 'byte' sized elements
Arguments : Inputs - psrc, stride
Outputs - out0, out1
Return Type - as per RTYPE
Details : Load 16 byte elements in 'out0' from (psrc)
Load 16 byte elements in 'out1' from (psrc + stride)
*/
#define LD_B2(RTYPE, psrc, stride, out0, out1) \
{ \
out0 = LD_B(RTYPE, (psrc)); \
out1 = LD_B(RTYPE, (psrc) + stride); \
}
#define LD_UB2(...) LD_B2(const v16u8, __VA_ARGS__)
#define LD_B4(RTYPE, psrc, stride, out0, out1, out2, out3) \
{ \
LD_B2(RTYPE, (psrc), stride, out0, out1); \
LD_B2(RTYPE, (psrc) + 2 * stride, stride, out2, out3); \
}
#define LD_UB4(...) LD_B4(const v16u8, __VA_ARGS__)
/* Description : Store two vectors with stride each having 16 'byte' sized
elements
Arguments : Inputs - in0, in1, pdst, stride
Details : Store 16 byte elements from 'in0' to (pdst)
Store 16 byte elements from 'in1' to (pdst + stride)
*/
#define ST_B2(RTYPE, in0, in1, pdst, stride) \
{ \
ST_B(RTYPE, in0, (pdst)); \
ST_B(RTYPE, in1, (pdst) + stride); \
}
#define ST_UB2(...) ST_B2(v16u8, __VA_ARGS__)
#define ST_B4(RTYPE, in0, in1, in2, in3, pdst, stride) \
{ \
ST_B2(RTYPE, in0, in1, (pdst), stride); \
ST_B2(RTYPE, in2, in3, (pdst) + 2 * stride, stride); \
}
#define ST_UB4(...) ST_B4(v16u8, __VA_ARGS__)
/* Description : Store vectors of 8 halfword elements with stride
Arguments : Inputs - in0, in1, pdst, stride
Details : Store 8 halfword elements from 'in0' to (pdst)
Store 8 halfword elements from 'in1' to (pdst + stride)
*/
#define ST_H2(RTYPE, in0, in1, pdst, stride) \
{ \
ST_H(RTYPE, in0, (pdst)); \
ST_H(RTYPE, in1, (pdst) + stride); \
}
#define ST_UH2(...) ST_H2(v8u16, __VA_ARGS__)
// TODO(fbarchard): Consider using __msa_vshf_b and __msa_ilvr_b directly.
/* Description : Shuffle byte vector elements as per mask vector
Arguments : Inputs - in0, in1, in2, in3, mask0, mask1
Outputs - out0, out1
Return Type - as per RTYPE
Details : Byte elements from 'in0' & 'in1' are copied selectively to
'out0' as per control vector 'mask0'
*/
#define VSHF_B2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1) \
{ \
out0 = (RTYPE)__msa_vshf_b((v16i8)mask0, (v16i8)in1, (v16i8)in0); \
out1 = (RTYPE)__msa_vshf_b((v16i8)mask1, (v16i8)in3, (v16i8)in2); \
}
#define VSHF_B2_UB(...) VSHF_B2(v16u8, __VA_ARGS__)
/* Description : Interleave both left and right half of input vectors
Arguments : Inputs - in0, in1
Outputs - out0, out1
Return Type - as per RTYPE
Details : Right half of byte elements from 'in0' and 'in1' are
interleaved and written to 'out0'
*/
#define ILVRL_B2(RTYPE, in0, in1, out0, out1) \
{ \
out0 = (RTYPE)__msa_ilvr_b((v16i8)in0, (v16i8)in1); \
out1 = (RTYPE)__msa_ilvl_b((v16i8)in0, (v16i8)in1); \
}
#define ILVRL_B2_UB(...) ILVRL_B2(v16u8, __VA_ARGS__)
#endif /* !defined(LIBYUV_DISABLE_MSA) && defined(__mips_msa) */
#endif // INCLUDE_LIBYUV_MACROS_MSA_H_

View File

@@ -1,195 +0,0 @@
/*
* Copyright 2012 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef INCLUDE_LIBYUV_MJPEG_DECODER_H_
#define INCLUDE_LIBYUV_MJPEG_DECODER_H_
#include "libyuv/basic_types.h"
#ifdef __cplusplus
// NOTE: For a simplified public API use convert.h MJPGToI420().
struct jpeg_common_struct;
struct jpeg_decompress_struct;
struct jpeg_source_mgr;
namespace libyuv {
#ifdef __cplusplus
extern "C" {
#endif
LIBYUV_BOOL ValidateJpeg(const uint8_t* sample, size_t sample_size);
#ifdef __cplusplus
} // extern "C"
#endif
static const uint32_t kUnknownDataSize = 0xFFFFFFFF;
enum JpegSubsamplingType {
kJpegYuv420,
kJpegYuv422,
kJpegYuv444,
kJpegYuv400,
kJpegUnknown
};
struct Buffer {
const uint8_t* data;
int len;
};
struct BufferVector {
Buffer* buffers;
int len;
int pos;
};
struct SetJmpErrorMgr;
// MJPEG ("Motion JPEG") is a pseudo-standard video codec where the frames are
// simply independent JPEG images with a fixed huffman table (which is omitted).
// It is rarely used in video transmission, but is common as a camera capture
// format, especially in Logitech devices. This class implements a decoder for
// MJPEG frames.
//
// See http://tools.ietf.org/html/rfc2435
class LIBYUV_API MJpegDecoder {
public:
typedef void (*CallbackFunction)(void* opaque,
const uint8_t* const* data,
const int* strides,
int rows);
static const int kColorSpaceUnknown;
static const int kColorSpaceGrayscale;
static const int kColorSpaceRgb;
static const int kColorSpaceYCbCr;
static const int kColorSpaceCMYK;
static const int kColorSpaceYCCK;
MJpegDecoder();
~MJpegDecoder();
// Loads a new frame, reads its headers, and determines the uncompressed
// image format.
// Returns LIBYUV_TRUE if image looks valid and format is supported.
// If return value is LIBYUV_TRUE, then the values for all the following
// getters are populated.
// src_len is the size of the compressed mjpeg frame in bytes.
LIBYUV_BOOL LoadFrame(const uint8_t* src, size_t src_len);
// Returns width of the last loaded frame in pixels.
int GetWidth();
// Returns height of the last loaded frame in pixels.
int GetHeight();
// Returns format of the last loaded frame. The return value is one of the
// kColorSpace* constants.
int GetColorSpace();
// Number of color components in the color space.
int GetNumComponents();
// Sample factors of the n-th component.
int GetHorizSampFactor(int component);
int GetVertSampFactor(int component);
int GetHorizSubSampFactor(int component);
int GetVertSubSampFactor(int component);
// Public for testability.
int GetImageScanlinesPerImcuRow();
// Public for testability.
int GetComponentScanlinesPerImcuRow(int component);
// Width of a component in bytes.
int GetComponentWidth(int component);
// Height of a component.
int GetComponentHeight(int component);
// Width of a component in bytes with padding for DCTSIZE. Public for testing.
int GetComponentStride(int component);
// Size of a component in bytes.
int GetComponentSize(int component);
// Call this after LoadFrame() if you decide you don't want to decode it
// after all.
LIBYUV_BOOL UnloadFrame();
// Decodes the entire image into a one-buffer-per-color-component format.
// dst_width must match exactly. dst_height must be <= to image height; if
// less, the image is cropped. "planes" must have size equal to at least
// GetNumComponents() and they must point to non-overlapping buffers of size
// at least GetComponentSize(i). The pointers in planes are incremented
// to point to after the end of the written data.
// TODO(fbarchard): Add dst_x, dst_y to allow specific rect to be decoded.
LIBYUV_BOOL DecodeToBuffers(uint8_t** planes, int dst_width, int dst_height);
// Decodes the entire image and passes the data via repeated calls to a
// callback function. Each call will get the data for a whole number of
// image scanlines.
// TODO(fbarchard): Add dst_x, dst_y to allow specific rect to be decoded.
LIBYUV_BOOL DecodeToCallback(CallbackFunction fn,
void* opaque,
int dst_width,
int dst_height);
// The helper function which recognizes the jpeg sub-sampling type.
static JpegSubsamplingType JpegSubsamplingTypeHelper(
int* subsample_x,
int* subsample_y,
int number_of_components);
private:
void AllocOutputBuffers(int num_outbufs);
void DestroyOutputBuffers();
LIBYUV_BOOL StartDecode();
LIBYUV_BOOL FinishDecode();
void SetScanlinePointers(uint8_t** data);
LIBYUV_BOOL DecodeImcuRow();
int GetComponentScanlinePadding(int component);
// A buffer holding the input data for a frame.
Buffer buf_;
BufferVector buf_vec_;
jpeg_decompress_struct* decompress_struct_;
jpeg_source_mgr* source_mgr_;
SetJmpErrorMgr* error_mgr_;
// LIBYUV_TRUE iff at least one component has scanline padding. (i.e.,
// GetComponentScanlinePadding() != 0.)
LIBYUV_BOOL has_scanline_padding_;
// Temporaries used to point to scanline outputs.
int num_outbufs_; // Outermost size of all arrays below.
uint8_t*** scanlines_;
int* scanlines_sizes_;
// Temporary buffer used for decoding when we can't decode directly to the
// output buffers. Large enough for just one iMCU row.
uint8_t** databuf_;
int* databuf_strides_;
};
} // namespace libyuv
#endif // __cplusplus
#endif // INCLUDE_LIBYUV_MJPEG_DECODER_H_

File diff suppressed because it is too large Load Diff

View File

@@ -1,182 +0,0 @@
/*
* Copyright 2011 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef INCLUDE_LIBYUV_ROTATE_H_
#define INCLUDE_LIBYUV_ROTATE_H_
#include "libyuv/basic_types.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
// Supported rotation.
typedef enum RotationMode {
kRotate0 = 0, // No rotation.
kRotate90 = 90, // Rotate 90 degrees clockwise.
kRotate180 = 180, // Rotate 180 degrees.
kRotate270 = 270, // Rotate 270 degrees clockwise.
// Deprecated.
kRotateNone = 0,
kRotateClockwise = 90,
kRotateCounterClockwise = 270,
} RotationModeEnum;
// Rotate I420 frame.
LIBYUV_API
int I420Rotate(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height,
enum RotationMode mode);
// Rotate I444 frame.
LIBYUV_API
int I444Rotate(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height,
enum RotationMode mode);
// Rotate NV12 input and store in I420.
LIBYUV_API
int NV12ToI420Rotate(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_uv,
int src_stride_uv,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height,
enum RotationMode mode);
// Rotate a plane by 0, 90, 180, or 270.
LIBYUV_API
int RotatePlane(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width,
int height,
enum RotationMode mode);
// Rotate planes by 90, 180, 270. Deprecated.
LIBYUV_API
void RotatePlane90(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width,
int height);
LIBYUV_API
void RotatePlane180(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width,
int height);
LIBYUV_API
void RotatePlane270(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width,
int height);
// Rotations for when U and V are interleaved.
// These functions take one input pointer and
// split the data into two buffers while
// rotating them. Deprecated.
LIBYUV_API
void RotateUV90(const uint8_t* src,
int src_stride,
uint8_t* dst_a,
int dst_stride_a,
uint8_t* dst_b,
int dst_stride_b,
int width,
int height);
LIBYUV_API
void RotateUV180(const uint8_t* src,
int src_stride,
uint8_t* dst_a,
int dst_stride_a,
uint8_t* dst_b,
int dst_stride_b,
int width,
int height);
LIBYUV_API
void RotateUV270(const uint8_t* src,
int src_stride,
uint8_t* dst_a,
int dst_stride_a,
uint8_t* dst_b,
int dst_stride_b,
int width,
int height);
// The 90 and 270 functions are based on transposes.
// Doing a transpose with reversing the read/write
// order will result in a rotation by +- 90 degrees.
// Deprecated.
LIBYUV_API
void TransposePlane(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width,
int height);
LIBYUV_API
void TransposeUV(const uint8_t* src,
int src_stride,
uint8_t* dst_a,
int dst_stride_a,
uint8_t* dst_b,
int dst_stride_b,
int width,
int height);
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif
#endif // INCLUDE_LIBYUV_ROTATE_H_

View File

@@ -1,37 +0,0 @@
/*
* Copyright 2012 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef INCLUDE_LIBYUV_ROTATE_ARGB_H_
#define INCLUDE_LIBYUV_ROTATE_ARGB_H_
#include "libyuv/basic_types.h"
#include "libyuv/rotate.h" // For RotationMode.
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
// Rotate ARGB frame
LIBYUV_API
int ARGBRotate(const uint8_t* src_argb,
int src_stride_argb,
uint8_t* dst_argb,
int dst_stride_argb,
int src_width,
int src_height,
enum RotationMode mode);
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif
#endif // INCLUDE_LIBYUV_ROTATE_ARGB_H_

View File

@@ -1,224 +0,0 @@
/*
* Copyright 2013 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef INCLUDE_LIBYUV_ROTATE_ROW_H_
#define INCLUDE_LIBYUV_ROTATE_ROW_H_
#include "libyuv/basic_types.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
#if defined(__pnacl__) || defined(__CLR_VER) || \
(defined(__native_client__) && defined(__x86_64__)) || \
(defined(__i386__) && !defined(__SSE__) && !defined(__clang__))
#define LIBYUV_DISABLE_X86
#endif
#if defined(__native_client__)
#define LIBYUV_DISABLE_NEON
#endif
// MemorySanitizer does not support assembly code yet. http://crbug.com/344505
#if defined(__has_feature)
#if __has_feature(memory_sanitizer)
#define LIBYUV_DISABLE_X86
#endif
#endif
// The following are available for Visual C 32 bit:
#if !defined(LIBYUV_DISABLE_X86) && defined(_M_IX86) && defined(_MSC_VER) && \
!defined(__clang__)
#define HAS_TRANSPOSEWX8_SSSE3
#define HAS_TRANSPOSEUVWX8_SSE2
#endif
// The following are available for GCC 32 or 64 bit:
#if !defined(LIBYUV_DISABLE_X86) && (defined(__i386__) || defined(__x86_64__))
#define HAS_TRANSPOSEWX8_SSSE3
#endif
// The following are available for 64 bit GCC:
#if !defined(LIBYUV_DISABLE_X86) && defined(__x86_64__)
#define HAS_TRANSPOSEWX8_FAST_SSSE3
#define HAS_TRANSPOSEUVWX8_SSE2
#endif
#if !defined(LIBYUV_DISABLE_NEON) && \
(defined(__ARM_NEON__) || defined(LIBYUV_NEON) || defined(__aarch64__))
#define HAS_TRANSPOSEWX8_NEON
#define HAS_TRANSPOSEUVWX8_NEON
#endif
#if !defined(LIBYUV_DISABLE_MSA) && defined(__mips_msa)
#define HAS_TRANSPOSEWX16_MSA
#define HAS_TRANSPOSEUVWX16_MSA
#endif
#if !defined(LIBYUV_DISABLE_MMI) && defined(_MIPS_ARCH_LOONGSON3A)
#define HAS_TRANSPOSEWX8_MMI
#define HAS_TRANSPOSEUVWX8_MMI
#endif
void TransposeWxH_C(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width,
int height);
void TransposeWx8_C(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width);
void TransposeWx16_C(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width);
void TransposeWx8_NEON(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width);
void TransposeWx8_SSSE3(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width);
void TransposeWx8_MMI(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width);
void TransposeWx8_Fast_SSSE3(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width);
void TransposeWx16_MSA(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width);
void TransposeWx8_Any_NEON(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width);
void TransposeWx8_Any_SSSE3(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width);
void TransposeWx8_Any_MMI(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width);
void TransposeWx8_Fast_Any_SSSE3(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width);
void TransposeWx16_Any_MSA(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width);
void TransposeUVWxH_C(const uint8_t* src,
int src_stride,
uint8_t* dst_a,
int dst_stride_a,
uint8_t* dst_b,
int dst_stride_b,
int width,
int height);
void TransposeUVWx8_C(const uint8_t* src,
int src_stride,
uint8_t* dst_a,
int dst_stride_a,
uint8_t* dst_b,
int dst_stride_b,
int width);
void TransposeUVWx16_C(const uint8_t* src,
int src_stride,
uint8_t* dst_a,
int dst_stride_a,
uint8_t* dst_b,
int dst_stride_b,
int width);
void TransposeUVWx8_SSE2(const uint8_t* src,
int src_stride,
uint8_t* dst_a,
int dst_stride_a,
uint8_t* dst_b,
int dst_stride_b,
int width);
void TransposeUVWx8_NEON(const uint8_t* src,
int src_stride,
uint8_t* dst_a,
int dst_stride_a,
uint8_t* dst_b,
int dst_stride_b,
int width);
void TransposeUVWx8_MMI(const uint8_t* src,
int src_stride,
uint8_t* dst_a,
int dst_stride_a,
uint8_t* dst_b,
int dst_stride_b,
int width);
void TransposeUVWx16_MSA(const uint8_t* src,
int src_stride,
uint8_t* dst_a,
int dst_stride_a,
uint8_t* dst_b,
int dst_stride_b,
int width);
void TransposeUVWx8_Any_SSE2(const uint8_t* src,
int src_stride,
uint8_t* dst_a,
int dst_stride_a,
uint8_t* dst_b,
int dst_stride_b,
int width);
void TransposeUVWx8_Any_NEON(const uint8_t* src,
int src_stride,
uint8_t* dst_a,
int dst_stride_a,
uint8_t* dst_b,
int dst_stride_b,
int width);
void TransposeUVWx8_Any_MMI(const uint8_t* src,
int src_stride,
uint8_t* dst_a,
int dst_stride_a,
uint8_t* dst_b,
int dst_stride_b,
int width);
void TransposeUVWx16_Any_MSA(const uint8_t* src,
int src_stride,
uint8_t* dst_a,
int dst_stride_a,
uint8_t* dst_b,
int dst_stride_b,
int width);
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif
#endif // INCLUDE_LIBYUV_ROTATE_ROW_H_

File diff suppressed because it is too large Load Diff

View File

@@ -1,254 +0,0 @@
/*
* Copyright 2011 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef INCLUDE_LIBYUV_SCALE_H_
#define INCLUDE_LIBYUV_SCALE_H_
#include "libyuv/basic_types.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
// Supported filtering.
typedef enum FilterMode {
kFilterNone = 0, // Point sample; Fastest.
kFilterLinear = 1, // Filter horizontally only.
kFilterBilinear = 2, // Faster than box, but lower quality scaling down.
kFilterBox = 3 // Highest quality.
} FilterModeEnum;
// Scale a YUV plane.
LIBYUV_API
void ScalePlane(const uint8_t* src,
int src_stride,
int src_width,
int src_height,
uint8_t* dst,
int dst_stride,
int dst_width,
int dst_height,
enum FilterMode filtering);
LIBYUV_API
void ScalePlane_16(const uint16_t* src,
int src_stride,
int src_width,
int src_height,
uint16_t* dst,
int dst_stride,
int dst_width,
int dst_height,
enum FilterMode filtering);
// Sample is expected to be in the low 12 bits.
LIBYUV_API
void ScalePlane_12(const uint16_t* src,
int src_stride,
int src_width,
int src_height,
uint16_t* dst,
int dst_stride,
int dst_width,
int dst_height,
enum FilterMode filtering);
// Scales a YUV 4:2:0 image from the src width and height to the
// dst width and height.
// If filtering is kFilterNone, a simple nearest-neighbor algorithm is
// used. This produces basic (blocky) quality at the fastest speed.
// If filtering is kFilterBilinear, interpolation is used to produce a better
// quality image, at the expense of speed.
// If filtering is kFilterBox, averaging is used to produce ever better
// quality image, at further expense of speed.
// Returns 0 if successful.
LIBYUV_API
int I420Scale(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
int src_width,
int src_height,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int dst_width,
int dst_height,
enum FilterMode filtering);
LIBYUV_API
int I420Scale_16(const uint16_t* src_y,
int src_stride_y,
const uint16_t* src_u,
int src_stride_u,
const uint16_t* src_v,
int src_stride_v,
int src_width,
int src_height,
uint16_t* dst_y,
int dst_stride_y,
uint16_t* dst_u,
int dst_stride_u,
uint16_t* dst_v,
int dst_stride_v,
int dst_width,
int dst_height,
enum FilterMode filtering);
LIBYUV_API
int I420Scale_12(const uint16_t* src_y,
int src_stride_y,
const uint16_t* src_u,
int src_stride_u,
const uint16_t* src_v,
int src_stride_v,
int src_width,
int src_height,
uint16_t* dst_y,
int dst_stride_y,
uint16_t* dst_u,
int dst_stride_u,
uint16_t* dst_v,
int dst_stride_v,
int dst_width,
int dst_height,
enum FilterMode filtering);
// Scales a YUV 4:4:4 image from the src width and height to the
// dst width and height.
// If filtering is kFilterNone, a simple nearest-neighbor algorithm is
// used. This produces basic (blocky) quality at the fastest speed.
// If filtering is kFilterBilinear, interpolation is used to produce a better
// quality image, at the expense of speed.
// If filtering is kFilterBox, averaging is used to produce ever better
// quality image, at further expense of speed.
// Returns 0 if successful.
LIBYUV_API
int I444Scale(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
int src_width,
int src_height,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int dst_width,
int dst_height,
enum FilterMode filtering);
LIBYUV_API
int I444Scale_16(const uint16_t* src_y,
int src_stride_y,
const uint16_t* src_u,
int src_stride_u,
const uint16_t* src_v,
int src_stride_v,
int src_width,
int src_height,
uint16_t* dst_y,
int dst_stride_y,
uint16_t* dst_u,
int dst_stride_u,
uint16_t* dst_v,
int dst_stride_v,
int dst_width,
int dst_height,
enum FilterMode filtering);
LIBYUV_API
int I444Scale_12(const uint16_t* src_y,
int src_stride_y,
const uint16_t* src_u,
int src_stride_u,
const uint16_t* src_v,
int src_stride_v,
int src_width,
int src_height,
uint16_t* dst_y,
int dst_stride_y,
uint16_t* dst_u,
int dst_stride_u,
uint16_t* dst_v,
int dst_stride_v,
int dst_width,
int dst_height,
enum FilterMode filtering);
// Scales an NV12 image from the src width and height to the
// dst width and height.
// If filtering is kFilterNone, a simple nearest-neighbor algorithm is
// used. This produces basic (blocky) quality at the fastest speed.
// If filtering is kFilterBilinear, interpolation is used to produce a better
// quality image, at the expense of speed.
// kFilterBox is not supported for the UV channel and will be treated as
// bilinear.
// Returns 0 if successful.
LIBYUV_API
int NV12Scale(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_uv,
int src_stride_uv,
int src_width,
int src_height,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_uv,
int dst_stride_uv,
int dst_width,
int dst_height,
enum FilterMode filtering);
#ifdef __cplusplus
// Legacy API. Deprecated.
LIBYUV_API
int Scale(const uint8_t* src_y,
const uint8_t* src_u,
const uint8_t* src_v,
int src_stride_y,
int src_stride_u,
int src_stride_v,
int src_width,
int src_height,
uint8_t* dst_y,
uint8_t* dst_u,
uint8_t* dst_v,
int dst_stride_y,
int dst_stride_u,
int dst_stride_v,
int dst_width,
int dst_height,
LIBYUV_BOOL interpolate);
// For testing, allow disabling of specialized scalers.
LIBYUV_API
void SetUseReferenceImpl(LIBYUV_BOOL use);
#endif // __cplusplus
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif
#endif // INCLUDE_LIBYUV_SCALE_H_

View File

@@ -1,76 +0,0 @@
/*
* Copyright 2012 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef INCLUDE_LIBYUV_SCALE_ARGB_H_
#define INCLUDE_LIBYUV_SCALE_ARGB_H_
#include "libyuv/basic_types.h"
#include "libyuv/scale.h" // For FilterMode
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
LIBYUV_API
int ARGBScale(const uint8_t* src_argb,
int src_stride_argb,
int src_width,
int src_height,
uint8_t* dst_argb,
int dst_stride_argb,
int dst_width,
int dst_height,
enum FilterMode filtering);
// Clipped scale takes destination rectangle coordinates for clip values.
LIBYUV_API
int ARGBScaleClip(const uint8_t* src_argb,
int src_stride_argb,
int src_width,
int src_height,
uint8_t* dst_argb,
int dst_stride_argb,
int dst_width,
int dst_height,
int clip_x,
int clip_y,
int clip_width,
int clip_height,
enum FilterMode filtering);
// Scale with YUV conversion to ARGB and clipping.
LIBYUV_API
int YUVToARGBScaleClip(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint32_t src_fourcc,
int src_width,
int src_height,
uint8_t* dst_argb,
int dst_stride_argb,
uint32_t dst_fourcc,
int dst_width,
int dst_height,
int clip_x,
int clip_y,
int clip_width,
int clip_height,
enum FilterMode filtering);
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif
#endif // INCLUDE_LIBYUV_SCALE_ARGB_H_

File diff suppressed because it is too large Load Diff

View File

@@ -1,51 +0,0 @@
/*
* Copyright 2020 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef INCLUDE_LIBYUV_SCALE_UV_H_
#define INCLUDE_LIBYUV_SCALE_UV_H_
#include "libyuv/basic_types.h"
#include "libyuv/scale.h" // For FilterMode
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
LIBYUV_API
int UVScale(const uint8_t* src_uv,
int src_stride_uv,
int src_width,
int src_height,
uint8_t* dst_uv,
int dst_stride_uv,
int dst_width,
int dst_height,
enum FilterMode filtering);
// Scale a 16 bit UV image.
// This function is currently incomplete, it can't handle all cases.
LIBYUV_API
int UVScale_16(const uint16_t* src_uv,
int src_stride_uv,
int src_width,
int src_height,
uint16_t* dst_uv,
int dst_stride_uv,
int dst_width,
int dst_height,
enum FilterMode filtering);
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif
#endif // INCLUDE_LIBYUV_SCALE_UV_H_

View File

@@ -1,16 +0,0 @@
/*
* Copyright 2012 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef INCLUDE_LIBYUV_VERSION_H_
#define INCLUDE_LIBYUV_VERSION_H_
#define LIBYUV_VERSION 1787
#endif // INCLUDE_LIBYUV_VERSION_H_

View File

@@ -1,222 +0,0 @@
/*
* Copyright 2011 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
// Common definitions for video, including fourcc and VideoFormat.
#ifndef INCLUDE_LIBYUV_VIDEO_COMMON_H_
#define INCLUDE_LIBYUV_VIDEO_COMMON_H_
#include "libyuv/basic_types.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
//////////////////////////////////////////////////////////////////////////////
// Definition of FourCC codes
//////////////////////////////////////////////////////////////////////////////
// Convert four characters to a FourCC code.
// Needs to be a macro otherwise the OS X compiler complains when the kFormat*
// constants are used in a switch.
#ifdef __cplusplus
#define FOURCC(a, b, c, d) \
((static_cast<uint32_t>(a)) | (static_cast<uint32_t>(b) << 8) | \
(static_cast<uint32_t>(c) << 16) | /* NOLINT */ \
(static_cast<uint32_t>(d) << 24)) /* NOLINT */
#else
#define FOURCC(a, b, c, d) \
(((uint32_t)(a)) | ((uint32_t)(b) << 8) | /* NOLINT */ \
((uint32_t)(c) << 16) | ((uint32_t)(d) << 24)) /* NOLINT */
#endif
// Some pages discussing FourCC codes:
// http://www.fourcc.org/yuv.php
// http://v4l2spec.bytesex.org/spec/book1.htm
// http://developer.apple.com/quicktime/icefloe/dispatch020.html
// http://msdn.microsoft.com/library/windows/desktop/dd206750.aspx#nv12
// http://people.xiph.org/~xiphmont/containers/nut/nut4cc.txt
// FourCC codes grouped according to implementation efficiency.
// Primary formats should convert in 1 efficient step.
// Secondary formats are converted in 2 steps.
// Auxilliary formats call primary converters.
enum FourCC {
// 10 Primary YUV formats: 5 planar, 2 biplanar, 2 packed.
FOURCC_I420 = FOURCC('I', '4', '2', '0'),
FOURCC_I422 = FOURCC('I', '4', '2', '2'),
FOURCC_I444 = FOURCC('I', '4', '4', '4'),
FOURCC_I400 = FOURCC('I', '4', '0', '0'),
FOURCC_NV21 = FOURCC('N', 'V', '2', '1'),
FOURCC_NV12 = FOURCC('N', 'V', '1', '2'),
FOURCC_YUY2 = FOURCC('Y', 'U', 'Y', '2'),
FOURCC_UYVY = FOURCC('U', 'Y', 'V', 'Y'),
FOURCC_I010 = FOURCC('I', '0', '1', '0'), // bt.601 10 bit 420
FOURCC_I210 = FOURCC('I', '2', '1', '0'), // bt.601 10 bit 422
// 1 Secondary YUV format: row biplanar. deprecated.
FOURCC_M420 = FOURCC('M', '4', '2', '0'),
// 13 Primary RGB formats: 4 32 bpp, 2 24 bpp, 3 16 bpp, 1 10 bpc 2 64 bpp
FOURCC_ARGB = FOURCC('A', 'R', 'G', 'B'),
FOURCC_BGRA = FOURCC('B', 'G', 'R', 'A'),
FOURCC_ABGR = FOURCC('A', 'B', 'G', 'R'),
FOURCC_AR30 = FOURCC('A', 'R', '3', '0'), // 10 bit per channel. 2101010.
FOURCC_AB30 = FOURCC('A', 'B', '3', '0'), // ABGR version of 10 bit
FOURCC_AR64 = FOURCC('A', 'R', '6', '4'), // 16 bit per channel.
FOURCC_AB64 = FOURCC('A', 'B', '6', '4'), // ABGR version of 16 bit
FOURCC_24BG = FOURCC('2', '4', 'B', 'G'),
FOURCC_RAW = FOURCC('r', 'a', 'w', ' '),
FOURCC_RGBA = FOURCC('R', 'G', 'B', 'A'),
FOURCC_RGBP = FOURCC('R', 'G', 'B', 'P'), // rgb565 LE.
FOURCC_RGBO = FOURCC('R', 'G', 'B', 'O'), // argb1555 LE.
FOURCC_R444 = FOURCC('R', '4', '4', '4'), // argb4444 LE.
// 1 Primary Compressed YUV format.
FOURCC_MJPG = FOURCC('M', 'J', 'P', 'G'),
// 14 Auxiliary YUV variations: 3 with U and V planes are swapped, 1 Alias.
FOURCC_YV12 = FOURCC('Y', 'V', '1', '2'),
FOURCC_YV16 = FOURCC('Y', 'V', '1', '6'),
FOURCC_YV24 = FOURCC('Y', 'V', '2', '4'),
FOURCC_YU12 = FOURCC('Y', 'U', '1', '2'), // Linux version of I420.
FOURCC_J420 =
FOURCC('J', '4', '2', '0'), // jpeg (bt.601 full), unofficial fourcc
FOURCC_J422 =
FOURCC('J', '4', '2', '2'), // jpeg (bt.601 full), unofficial fourcc
FOURCC_J444 =
FOURCC('J', '4', '4', '4'), // jpeg (bt.601 full), unofficial fourcc
FOURCC_J400 =
FOURCC('J', '4', '0', '0'), // jpeg (bt.601 full), unofficial fourcc
FOURCC_F420 = FOURCC('F', '4', '2', '0'), // bt.709 full, unofficial fourcc
FOURCC_F422 = FOURCC('F', '4', '2', '2'), // bt.709 full, unofficial fourcc
FOURCC_F444 = FOURCC('F', '4', '4', '4'), // bt.709 full, unofficial fourcc
FOURCC_H420 = FOURCC('H', '4', '2', '0'), // bt.709, unofficial fourcc
FOURCC_H422 = FOURCC('H', '4', '2', '2'), // bt.709, unofficial fourcc
FOURCC_H444 = FOURCC('H', '4', '4', '4'), // bt.709, unofficial fourcc
FOURCC_U420 = FOURCC('U', '4', '2', '0'), // bt.2020, unofficial fourcc
FOURCC_U422 = FOURCC('U', '4', '2', '2'), // bt.2020, unofficial fourcc
FOURCC_U444 = FOURCC('U', '4', '4', '4'), // bt.2020, unofficial fourcc
FOURCC_F010 = FOURCC('F', '0', '1', '0'), // bt.709 full range 10 bit 420
FOURCC_H010 = FOURCC('H', '0', '1', '0'), // bt.709 10 bit 420
FOURCC_U010 = FOURCC('U', '0', '1', '0'), // bt.2020 10 bit 420
FOURCC_F210 = FOURCC('F', '2', '1', '0'), // bt.709 full range 10 bit 422
FOURCC_H210 = FOURCC('H', '2', '1', '0'), // bt.709 10 bit 422
FOURCC_U210 = FOURCC('U', '2', '1', '0'), // bt.2020 10 bit 422
FOURCC_P010 = FOURCC('P', '0', '1', '0'),
FOURCC_P210 = FOURCC('P', '2', '1', '0'),
// 14 Auxiliary aliases. CanonicalFourCC() maps these to canonical fourcc.
FOURCC_IYUV = FOURCC('I', 'Y', 'U', 'V'), // Alias for I420.
FOURCC_YU16 = FOURCC('Y', 'U', '1', '6'), // Alias for I422.
FOURCC_YU24 = FOURCC('Y', 'U', '2', '4'), // Alias for I444.
FOURCC_YUYV = FOURCC('Y', 'U', 'Y', 'V'), // Alias for YUY2.
FOURCC_YUVS = FOURCC('y', 'u', 'v', 's'), // Alias for YUY2 on Mac.
FOURCC_HDYC = FOURCC('H', 'D', 'Y', 'C'), // Alias for UYVY.
FOURCC_2VUY = FOURCC('2', 'v', 'u', 'y'), // Alias for UYVY on Mac.
FOURCC_JPEG = FOURCC('J', 'P', 'E', 'G'), // Alias for MJPG.
FOURCC_DMB1 = FOURCC('d', 'm', 'b', '1'), // Alias for MJPG on Mac.
FOURCC_BA81 = FOURCC('B', 'A', '8', '1'), // Alias for BGGR.
FOURCC_RGB3 = FOURCC('R', 'G', 'B', '3'), // Alias for RAW.
FOURCC_BGR3 = FOURCC('B', 'G', 'R', '3'), // Alias for 24BG.
FOURCC_CM32 = FOURCC(0, 0, 0, 32), // Alias for BGRA kCMPixelFormat_32ARGB
FOURCC_CM24 = FOURCC(0, 0, 0, 24), // Alias for RAW kCMPixelFormat_24RGB
FOURCC_L555 = FOURCC('L', '5', '5', '5'), // Alias for RGBO.
FOURCC_L565 = FOURCC('L', '5', '6', '5'), // Alias for RGBP.
FOURCC_5551 = FOURCC('5', '5', '5', '1'), // Alias for RGBO.
// deprecated formats. Not supported, but defined for backward compatibility.
FOURCC_I411 = FOURCC('I', '4', '1', '1'),
FOURCC_Q420 = FOURCC('Q', '4', '2', '0'),
FOURCC_RGGB = FOURCC('R', 'G', 'G', 'B'),
FOURCC_BGGR = FOURCC('B', 'G', 'G', 'R'),
FOURCC_GRBG = FOURCC('G', 'R', 'B', 'G'),
FOURCC_GBRG = FOURCC('G', 'B', 'R', 'G'),
FOURCC_H264 = FOURCC('H', '2', '6', '4'),
// Match any fourcc.
FOURCC_ANY = -1,
};
enum FourCCBpp {
// Canonical fourcc codes used in our code.
FOURCC_BPP_I420 = 12,
FOURCC_BPP_I422 = 16,
FOURCC_BPP_I444 = 24,
FOURCC_BPP_I411 = 12,
FOURCC_BPP_I400 = 8,
FOURCC_BPP_NV21 = 12,
FOURCC_BPP_NV12 = 12,
FOURCC_BPP_YUY2 = 16,
FOURCC_BPP_UYVY = 16,
FOURCC_BPP_M420 = 12, // deprecated
FOURCC_BPP_Q420 = 12,
FOURCC_BPP_ARGB = 32,
FOURCC_BPP_BGRA = 32,
FOURCC_BPP_ABGR = 32,
FOURCC_BPP_RGBA = 32,
FOURCC_BPP_AR30 = 32,
FOURCC_BPP_AB30 = 32,
FOURCC_BPP_AR64 = 64,
FOURCC_BPP_AB64 = 64,
FOURCC_BPP_24BG = 24,
FOURCC_BPP_RAW = 24,
FOURCC_BPP_RGBP = 16,
FOURCC_BPP_RGBO = 16,
FOURCC_BPP_R444 = 16,
FOURCC_BPP_RGGB = 8,
FOURCC_BPP_BGGR = 8,
FOURCC_BPP_GRBG = 8,
FOURCC_BPP_GBRG = 8,
FOURCC_BPP_YV12 = 12,
FOURCC_BPP_YV16 = 16,
FOURCC_BPP_YV24 = 24,
FOURCC_BPP_YU12 = 12,
FOURCC_BPP_J420 = 12,
FOURCC_BPP_J400 = 8,
FOURCC_BPP_H420 = 12,
FOURCC_BPP_H422 = 16,
FOURCC_BPP_I010 = 15,
FOURCC_BPP_I210 = 20,
FOURCC_BPP_H010 = 15,
FOURCC_BPP_H210 = 20,
FOURCC_BPP_P010 = 15,
FOURCC_BPP_P210 = 20,
FOURCC_BPP_MJPG = 0, // 0 means unknown.
FOURCC_BPP_H264 = 0,
FOURCC_BPP_IYUV = 12,
FOURCC_BPP_YU16 = 16,
FOURCC_BPP_YU24 = 24,
FOURCC_BPP_YUYV = 16,
FOURCC_BPP_YUVS = 16,
FOURCC_BPP_HDYC = 16,
FOURCC_BPP_2VUY = 16,
FOURCC_BPP_JPEG = 1,
FOURCC_BPP_DMB1 = 1,
FOURCC_BPP_BA81 = 8,
FOURCC_BPP_RGB3 = 24,
FOURCC_BPP_BGR3 = 24,
FOURCC_BPP_CM32 = 32,
FOURCC_BPP_CM24 = 24,
// Match any fourcc.
FOURCC_BPP_ANY = 0, // 0 means unknown.
};
// Converts fourcc aliases into canonical ones.
LIBYUV_API uint32_t CanonicalFourCC(uint32_t fourcc);
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif
#endif // INCLUDE_LIBYUV_VIDEO_COMMON_H_

View File

@@ -1,23 +0,0 @@
# Copyright 2016 The LibYuv Project Authors. All rights reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
import("//build_overrides/build.gni")
import("//build/config/arm.gni")
import("//build/config/mips.gni")
declare_args() {
libyuv_include_tests = !build_with_chromium
libyuv_disable_jpeg = false
libyuv_use_neon =
current_cpu == "arm64" ||
(current_cpu == "arm" && (arm_use_neon || arm_optionally_use_neon))
libyuv_use_msa =
(current_cpu == "mips64el" || current_cpu == "mipsel") && mips_use_msa
libyuv_use_mmi =
(current_cpu == "mips64el" || current_cpu == "mipsel") && mips_use_mmi
}

View File

@@ -1,97 +0,0 @@
# This is a generic makefile for libyuv for gcc.
# make -f linux.mk CXX=clang++
CC?=gcc
CFLAGS?=-O2 -fomit-frame-pointer
CFLAGS+=-Iinclude/
CXX?=g++
CXXFLAGS?=-O2 -fomit-frame-pointer
CXXFLAGS+=-Iinclude/
LOCAL_OBJ_FILES := \
source/compare.o \
source/compare_common.o \
source/compare_gcc.o \
source/compare_mmi.o \
source/compare_msa.o \
source/compare_neon.o \
source/compare_neon64.o \
source/compare_win.o \
source/convert.o \
source/convert_argb.o \
source/convert_from.o \
source/convert_from_argb.o \
source/convert_jpeg.o \
source/convert_to_argb.o \
source/convert_to_i420.o \
source/cpu_id.o \
source/mjpeg_decoder.o \
source/mjpeg_validate.o \
source/planar_functions.o \
source/rotate.o \
source/rotate_any.o \
source/rotate_argb.o \
source/rotate_common.o \
source/rotate_gcc.o \
source/rotate_mmi.o \
source/rotate_msa.o \
source/rotate_neon.o \
source/rotate_neon64.o \
source/rotate_win.o \
source/row_any.o \
source/row_common.o \
source/row_gcc.o \
source/row_mmi.o \
source/row_msa.o \
source/row_neon.o \
source/row_neon64.o \
source/row_win.o \
source/scale.o \
source/scale_any.o \
source/scale_argb.o \
source/scale_common.o \
source/scale_gcc.o \
source/scale_mmi.o \
source/scale_msa.o \
source/scale_neon.o \
source/scale_neon64.o \
source/scale_uv.o \
source/scale_win.o \
source/video_common.o
.cc.o:
$(CXX) -c $(CXXFLAGS) $*.cc -o $*.o
.c.o:
$(CC) -c $(CFLAGS) $*.c -o $*.o
all: libyuv.a i444tonv12_eg yuvconvert yuvconstants cpuid psnr
libyuv.a: $(LOCAL_OBJ_FILES)
$(AR) $(ARFLAGS) $@ $(LOCAL_OBJ_FILES)
# A C++ test utility that uses libyuv conversion.
yuvconvert: util/yuvconvert.cc libyuv.a
$(CXX) $(CXXFLAGS) -Iutil/ -o $@ util/yuvconvert.cc libyuv.a
# A C test utility that generates yuvconstants for yuv to rgb.
yuvconstants: util/yuvconstants.c libyuv.a
$(CXX) $(CXXFLAGS) -Iutil/ -lm -o $@ util/yuvconstants.c libyuv.a
# A standalone test utility
psnr: util/psnr.cc
$(CXX) $(CXXFLAGS) -Iutil/ -o $@ util/psnr.cc util/psnr_main.cc util/ssim.cc
# A simple conversion example.
i444tonv12_eg: util/i444tonv12_eg.cc libyuv.a
$(CXX) $(CXXFLAGS) -o $@ util/i444tonv12_eg.cc libyuv.a
# A C test utility that uses libyuv conversion from C.
# gcc 4.4 and older require -fno-exceptions to avoid link error on __gxx_personality_v0
# CC=gcc-4.4 CXXFLAGS=-fno-exceptions CXX=g++-4.4 make -f linux.mk
cpuid: util/cpuid.c libyuv.a
$(CC) $(CFLAGS) -o $@ util/cpuid.c libyuv.a
clean:
/bin/rm -f source/*.o *.ii *.s libyuv.a i444tonv12_eg yuvconvert yuvconstants cpuid psnr

View File

@@ -1,13 +0,0 @@
# This file contains all the common make variables which are useful for
# anyone depending on this library.
# Note that dependencies on NDK are not directly listed since NDK auto adds
# them.
LIBYUV_INCLUDES := $(LIBYUV_PATH)/include
LIBYUV_C_FLAGS :=
LIBYUV_CPP_FLAGS :=
LIBYUV_LDLIBS :=
LIBYUV_DEP_MODULES :=

View File

@@ -1,17 +0,0 @@
[MESSAGES CONTROL]
# Disable the message, report, category or checker with the given id(s).
# TODO(kjellander): Reduce this list to as small as possible.
disable=I0010,I0011,bad-continuation,broad-except,duplicate-code,eval-used,exec-used,fixme,invalid-name,missing-docstring,no-init,no-member,too-few-public-methods,too-many-ancestors,too-many-arguments,too-many-branches,too-many-function-args,too-many-instance-attributes,too-many-lines,too-many-locals,too-many-public-methods,too-many-return-statements,too-many-statements
[REPORTS]
# Don't write out full reports, just messages.
reports=no
[FORMAT]
# We use two spaces for indents, instead of the usual four spaces or tab.
indent-string=' '

View File

@@ -1,440 +0,0 @@
/*
* Copyright 2011 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "libyuv/compare.h"
#include <float.h>
#include <math.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "libyuv/basic_types.h"
#include "libyuv/compare_row.h"
#include "libyuv/cpu_id.h"
#include "libyuv/row.h"
#include "libyuv/video_common.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
// hash seed of 5381 recommended.
LIBYUV_API
uint32_t HashDjb2(const uint8_t* src, uint64_t count, uint32_t seed) {
const int kBlockSize = 1 << 15; // 32768;
int remainder;
uint32_t (*HashDjb2_SSE)(const uint8_t* src, int count, uint32_t seed) =
HashDjb2_C;
#if defined(HAS_HASHDJB2_SSE41)
if (TestCpuFlag(kCpuHasSSE41)) {
HashDjb2_SSE = HashDjb2_SSE41;
}
#endif
#if defined(HAS_HASHDJB2_AVX2)
if (TestCpuFlag(kCpuHasAVX2)) {
HashDjb2_SSE = HashDjb2_AVX2;
}
#endif
while (count >= (uint64_t)(kBlockSize)) {
seed = HashDjb2_SSE(src, kBlockSize, seed);
src += kBlockSize;
count -= kBlockSize;
}
remainder = (int)count & ~15;
if (remainder) {
seed = HashDjb2_SSE(src, remainder, seed);
src += remainder;
count -= remainder;
}
remainder = (int)count & 15;
if (remainder) {
seed = HashDjb2_C(src, remainder, seed);
}
return seed;
}
static uint32_t ARGBDetectRow_C(const uint8_t* argb, int width) {
int x;
for (x = 0; x < width - 1; x += 2) {
if (argb[0] != 255) { // First byte is not Alpha of 255, so not ARGB.
return FOURCC_BGRA;
}
if (argb[3] != 255) { // Fourth byte is not Alpha of 255, so not BGRA.
return FOURCC_ARGB;
}
if (argb[4] != 255) { // Second pixel first byte is not Alpha of 255.
return FOURCC_BGRA;
}
if (argb[7] != 255) { // Second pixel fourth byte is not Alpha of 255.
return FOURCC_ARGB;
}
argb += 8;
}
if (width & 1) {
if (argb[0] != 255) { // First byte is not Alpha of 255, so not ARGB.
return FOURCC_BGRA;
}
if (argb[3] != 255) { // 4th byte is not Alpha of 255, so not BGRA.
return FOURCC_ARGB;
}
}
return 0;
}
// Scan an opaque argb image and return fourcc based on alpha offset.
// Returns FOURCC_ARGB, FOURCC_BGRA, or 0 if unknown.
LIBYUV_API
uint32_t ARGBDetect(const uint8_t* argb,
int stride_argb,
int width,
int height) {
uint32_t fourcc = 0;
int h;
// Coalesce rows.
if (stride_argb == width * 4) {
width *= height;
height = 1;
stride_argb = 0;
}
for (h = 0; h < height && fourcc == 0; ++h) {
fourcc = ARGBDetectRow_C(argb, width);
argb += stride_argb;
}
return fourcc;
}
// NEON version accumulates in 16 bit shorts which overflow at 65536 bytes.
// So actual maximum is 1 less loop, which is 64436 - 32 bytes.
LIBYUV_API
uint64_t ComputeHammingDistance(const uint8_t* src_a,
const uint8_t* src_b,
int count) {
const int kBlockSize = 1 << 15; // 32768;
const int kSimdSize = 64;
// SIMD for multiple of 64, and C for remainder
int remainder = count & (kBlockSize - 1) & ~(kSimdSize - 1);
uint64_t diff = 0;
int i;
uint32_t (*HammingDistance)(const uint8_t* src_a, const uint8_t* src_b,
int count) = HammingDistance_C;
#if defined(HAS_HAMMINGDISTANCE_NEON)
if (TestCpuFlag(kCpuHasNEON)) {
HammingDistance = HammingDistance_NEON;
}
#endif
#if defined(HAS_HAMMINGDISTANCE_SSSE3)
if (TestCpuFlag(kCpuHasSSSE3)) {
HammingDistance = HammingDistance_SSSE3;
}
#endif
#if defined(HAS_HAMMINGDISTANCE_SSE42)
if (TestCpuFlag(kCpuHasSSE42)) {
HammingDistance = HammingDistance_SSE42;
}
#endif
#if defined(HAS_HAMMINGDISTANCE_AVX2)
if (TestCpuFlag(kCpuHasAVX2)) {
HammingDistance = HammingDistance_AVX2;
}
#endif
#if defined(HAS_HAMMINGDISTANCE_MMI)
if (TestCpuFlag(kCpuHasMMI)) {
HammingDistance = HammingDistance_MMI;
}
#endif
#if defined(HAS_HAMMINGDISTANCE_MSA)
if (TestCpuFlag(kCpuHasMSA)) {
HammingDistance = HammingDistance_MSA;
}
#endif
#ifdef _OPENMP
#pragma omp parallel for reduction(+ : diff)
#endif
for (i = 0; i < (count - (kBlockSize - 1)); i += kBlockSize) {
diff += HammingDistance(src_a + i, src_b + i, kBlockSize);
}
src_a += count & ~(kBlockSize - 1);
src_b += count & ~(kBlockSize - 1);
if (remainder) {
diff += HammingDistance(src_a, src_b, remainder);
src_a += remainder;
src_b += remainder;
}
remainder = count & (kSimdSize - 1);
if (remainder) {
diff += HammingDistance_C(src_a, src_b, remainder);
}
return diff;
}
// TODO(fbarchard): Refactor into row function.
LIBYUV_API
uint64_t ComputeSumSquareError(const uint8_t* src_a,
const uint8_t* src_b,
int count) {
// SumSquareError returns values 0 to 65535 for each squared difference.
// Up to 65536 of those can be summed and remain within a uint32_t.
// After each block of 65536 pixels, accumulate into a uint64_t.
const int kBlockSize = 65536;
int remainder = count & (kBlockSize - 1) & ~31;
uint64_t sse = 0;
int i;
uint32_t (*SumSquareError)(const uint8_t* src_a, const uint8_t* src_b,
int count) = SumSquareError_C;
#if defined(HAS_SUMSQUAREERROR_NEON)
if (TestCpuFlag(kCpuHasNEON)) {
SumSquareError = SumSquareError_NEON;
}
#endif
#if defined(HAS_SUMSQUAREERROR_SSE2)
if (TestCpuFlag(kCpuHasSSE2)) {
// Note only used for multiples of 16 so count is not checked.
SumSquareError = SumSquareError_SSE2;
}
#endif
#if defined(HAS_SUMSQUAREERROR_AVX2)
if (TestCpuFlag(kCpuHasAVX2)) {
// Note only used for multiples of 32 so count is not checked.
SumSquareError = SumSquareError_AVX2;
}
#endif
#if defined(HAS_SUMSQUAREERROR_MMI)
if (TestCpuFlag(kCpuHasMMI)) {
SumSquareError = SumSquareError_MMI;
}
#endif
#if defined(HAS_SUMSQUAREERROR_MSA)
if (TestCpuFlag(kCpuHasMSA)) {
SumSquareError = SumSquareError_MSA;
}
#endif
#ifdef _OPENMP
#pragma omp parallel for reduction(+ : sse)
#endif
for (i = 0; i < (count - (kBlockSize - 1)); i += kBlockSize) {
sse += SumSquareError(src_a + i, src_b + i, kBlockSize);
}
src_a += count & ~(kBlockSize - 1);
src_b += count & ~(kBlockSize - 1);
if (remainder) {
sse += SumSquareError(src_a, src_b, remainder);
src_a += remainder;
src_b += remainder;
}
remainder = count & 31;
if (remainder) {
sse += SumSquareError_C(src_a, src_b, remainder);
}
return sse;
}
LIBYUV_API
uint64_t ComputeSumSquareErrorPlane(const uint8_t* src_a,
int stride_a,
const uint8_t* src_b,
int stride_b,
int width,
int height) {
uint64_t sse = 0;
int h;
// Coalesce rows.
if (stride_a == width && stride_b == width) {
width *= height;
height = 1;
stride_a = stride_b = 0;
}
for (h = 0; h < height; ++h) {
sse += ComputeSumSquareError(src_a, src_b, width);
src_a += stride_a;
src_b += stride_b;
}
return sse;
}
LIBYUV_API
double SumSquareErrorToPsnr(uint64_t sse, uint64_t count) {
double psnr;
if (sse > 0) {
double mse = (double)count / (double)sse;
psnr = 10.0 * log10(255.0 * 255.0 * mse);
} else {
psnr = kMaxPsnr; // Limit to prevent divide by 0
}
if (psnr > kMaxPsnr) {
psnr = kMaxPsnr;
}
return psnr;
}
LIBYUV_API
double CalcFramePsnr(const uint8_t* src_a,
int stride_a,
const uint8_t* src_b,
int stride_b,
int width,
int height) {
const uint64_t samples = (uint64_t)width * (uint64_t)height;
const uint64_t sse = ComputeSumSquareErrorPlane(src_a, stride_a, src_b,
stride_b, width, height);
return SumSquareErrorToPsnr(sse, samples);
}
LIBYUV_API
double I420Psnr(const uint8_t* src_y_a,
int stride_y_a,
const uint8_t* src_u_a,
int stride_u_a,
const uint8_t* src_v_a,
int stride_v_a,
const uint8_t* src_y_b,
int stride_y_b,
const uint8_t* src_u_b,
int stride_u_b,
const uint8_t* src_v_b,
int stride_v_b,
int width,
int height) {
const uint64_t sse_y = ComputeSumSquareErrorPlane(
src_y_a, stride_y_a, src_y_b, stride_y_b, width, height);
const int width_uv = (width + 1) >> 1;
const int height_uv = (height + 1) >> 1;
const uint64_t sse_u = ComputeSumSquareErrorPlane(
src_u_a, stride_u_a, src_u_b, stride_u_b, width_uv, height_uv);
const uint64_t sse_v = ComputeSumSquareErrorPlane(
src_v_a, stride_v_a, src_v_b, stride_v_b, width_uv, height_uv);
const uint64_t samples = (uint64_t)width * (uint64_t)height +
2 * ((uint64_t)width_uv * (uint64_t)height_uv);
const uint64_t sse = sse_y + sse_u + sse_v;
return SumSquareErrorToPsnr(sse, samples);
}
static const int64_t cc1 = 26634; // (64^2*(.01*255)^2
static const int64_t cc2 = 239708; // (64^2*(.03*255)^2
static double Ssim8x8_C(const uint8_t* src_a,
int stride_a,
const uint8_t* src_b,
int stride_b) {
int64_t sum_a = 0;
int64_t sum_b = 0;
int64_t sum_sq_a = 0;
int64_t sum_sq_b = 0;
int64_t sum_axb = 0;
int i;
for (i = 0; i < 8; ++i) {
int j;
for (j = 0; j < 8; ++j) {
sum_a += src_a[j];
sum_b += src_b[j];
sum_sq_a += src_a[j] * src_a[j];
sum_sq_b += src_b[j] * src_b[j];
sum_axb += src_a[j] * src_b[j];
}
src_a += stride_a;
src_b += stride_b;
}
{
const int64_t count = 64;
// scale the constants by number of pixels
const int64_t c1 = (cc1 * count * count) >> 12;
const int64_t c2 = (cc2 * count * count) >> 12;
const int64_t sum_a_x_sum_b = sum_a * sum_b;
const int64_t ssim_n = (2 * sum_a_x_sum_b + c1) *
(2 * count * sum_axb - 2 * sum_a_x_sum_b + c2);
const int64_t sum_a_sq = sum_a * sum_a;
const int64_t sum_b_sq = sum_b * sum_b;
const int64_t ssim_d =
(sum_a_sq + sum_b_sq + c1) *
(count * sum_sq_a - sum_a_sq + count * sum_sq_b - sum_b_sq + c2);
if (ssim_d == 0.0) {
return DBL_MAX;
}
return ssim_n * 1.0 / ssim_d;
}
}
// We are using a 8x8 moving window with starting location of each 8x8 window
// on the 4x4 pixel grid. Such arrangement allows the windows to overlap
// block boundaries to penalize blocking artifacts.
LIBYUV_API
double CalcFrameSsim(const uint8_t* src_a,
int stride_a,
const uint8_t* src_b,
int stride_b,
int width,
int height) {
int samples = 0;
double ssim_total = 0;
double (*Ssim8x8)(const uint8_t* src_a, int stride_a, const uint8_t* src_b,
int stride_b) = Ssim8x8_C;
// sample point start with each 4x4 location
int i;
for (i = 0; i < height - 8; i += 4) {
int j;
for (j = 0; j < width - 8; j += 4) {
ssim_total += Ssim8x8(src_a + j, stride_a, src_b + j, stride_b);
samples++;
}
src_a += stride_a * 4;
src_b += stride_b * 4;
}
ssim_total /= samples;
return ssim_total;
}
LIBYUV_API
double I420Ssim(const uint8_t* src_y_a,
int stride_y_a,
const uint8_t* src_u_a,
int stride_u_a,
const uint8_t* src_v_a,
int stride_v_a,
const uint8_t* src_y_b,
int stride_y_b,
const uint8_t* src_u_b,
int stride_u_b,
const uint8_t* src_v_b,
int stride_v_b,
int width,
int height) {
const double ssim_y =
CalcFrameSsim(src_y_a, stride_y_a, src_y_b, stride_y_b, width, height);
const int width_uv = (width + 1) >> 1;
const int height_uv = (height + 1) >> 1;
const double ssim_u = CalcFrameSsim(src_u_a, stride_u_a, src_u_b, stride_u_b,
width_uv, height_uv);
const double ssim_v = CalcFrameSsim(src_v_a, stride_v_a, src_v_b, stride_v_b,
width_uv, height_uv);
return ssim_y * 0.8 + 0.1 * (ssim_u + ssim_v);
}
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif

View File

@@ -1,74 +0,0 @@
/*
* Copyright 2012 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "libyuv/basic_types.h"
#include "libyuv/compare_row.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
// Hakmem method for hamming distance.
uint32_t HammingDistance_C(const uint8_t* src_a,
const uint8_t* src_b,
int count) {
uint32_t diff = 0u;
int i;
for (i = 0; i < count - 3; i += 4) {
uint32_t x = *((const uint32_t*)src_a) ^ *((const uint32_t*)src_b);
uint32_t u = x - ((x >> 1) & 0x55555555);
u = ((u >> 2) & 0x33333333) + (u & 0x33333333);
diff += ((((u + (u >> 4)) & 0x0f0f0f0f) * 0x01010101) >> 24);
src_a += 4;
src_b += 4;
}
for (; i < count; ++i) {
uint32_t x = *src_a ^ *src_b;
uint32_t u = x - ((x >> 1) & 0x55);
u = ((u >> 2) & 0x33) + (u & 0x33);
diff += (u + (u >> 4)) & 0x0f;
src_a += 1;
src_b += 1;
}
return diff;
}
uint32_t SumSquareError_C(const uint8_t* src_a,
const uint8_t* src_b,
int count) {
uint32_t sse = 0u;
int i;
for (i = 0; i < count; ++i) {
int diff = src_a[i] - src_b[i];
sse += (uint32_t)(diff * diff);
}
return sse;
}
// hash seed of 5381 recommended.
// Internal C version of HashDjb2 with int sized count for efficiency.
uint32_t HashDjb2_C(const uint8_t* src, int count, uint32_t seed) {
uint32_t hash = seed;
int i;
for (i = 0; i < count; ++i) {
hash += (hash << 5) + src[i];
}
return hash;
}
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif

View File

@@ -1,360 +0,0 @@
/*
* Copyright 2012 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "libyuv/basic_types.h"
#include "libyuv/compare_row.h"
#include "libyuv/row.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
// This module is for GCC x86 and x64.
#if !defined(LIBYUV_DISABLE_X86) && \
(defined(__x86_64__) || defined(__i386__))
#if defined(__x86_64__)
uint32_t HammingDistance_SSE42(const uint8_t* src_a,
const uint8_t* src_b,
int count) {
uint64_t diff = 0u;
asm volatile(
"xor %3,%3 \n"
"xor %%r8,%%r8 \n"
"xor %%r9,%%r9 \n"
"xor %%r10,%%r10 \n"
// Process 32 bytes per loop.
LABELALIGN
"1: \n"
"mov (%0),%%rcx \n"
"mov 0x8(%0),%%rdx \n"
"xor (%1),%%rcx \n"
"xor 0x8(%1),%%rdx \n"
"popcnt %%rcx,%%rcx \n"
"popcnt %%rdx,%%rdx \n"
"mov 0x10(%0),%%rsi \n"
"mov 0x18(%0),%%rdi \n"
"xor 0x10(%1),%%rsi \n"
"xor 0x18(%1),%%rdi \n"
"popcnt %%rsi,%%rsi \n"
"popcnt %%rdi,%%rdi \n"
"add $0x20,%0 \n"
"add $0x20,%1 \n"
"add %%rcx,%3 \n"
"add %%rdx,%%r8 \n"
"add %%rsi,%%r9 \n"
"add %%rdi,%%r10 \n"
"sub $0x20,%2 \n"
"jg 1b \n"
"add %%r8, %3 \n"
"add %%r9, %3 \n"
"add %%r10, %3 \n"
: "+r"(src_a), // %0
"+r"(src_b), // %1
"+r"(count), // %2
"=r"(diff) // %3
:
: "memory", "cc", "rcx", "rdx", "rsi", "rdi", "r8", "r9", "r10");
return static_cast<uint32_t>(diff);
}
#else
uint32_t HammingDistance_SSE42(const uint8_t* src_a,
const uint8_t* src_b,
int count) {
uint32_t diff = 0u;
asm volatile(
// Process 16 bytes per loop.
LABELALIGN
"1: \n"
"mov (%0),%%ecx \n"
"mov 0x4(%0),%%edx \n"
"xor (%1),%%ecx \n"
"xor 0x4(%1),%%edx \n"
"popcnt %%ecx,%%ecx \n"
"add %%ecx,%3 \n"
"popcnt %%edx,%%edx \n"
"add %%edx,%3 \n"
"mov 0x8(%0),%%ecx \n"
"mov 0xc(%0),%%edx \n"
"xor 0x8(%1),%%ecx \n"
"xor 0xc(%1),%%edx \n"
"popcnt %%ecx,%%ecx \n"
"add %%ecx,%3 \n"
"popcnt %%edx,%%edx \n"
"add %%edx,%3 \n"
"add $0x10,%0 \n"
"add $0x10,%1 \n"
"sub $0x10,%2 \n"
"jg 1b \n"
: "+r"(src_a), // %0
"+r"(src_b), // %1
"+r"(count), // %2
"+r"(diff) // %3
:
: "memory", "cc", "ecx", "edx");
return diff;
}
#endif
static const vec8 kNibbleMask = {15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15};
static const vec8 kBitCount = {0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4};
uint32_t HammingDistance_SSSE3(const uint8_t* src_a,
const uint8_t* src_b,
int count) {
uint32_t diff = 0u;
asm volatile(
"movdqa %4,%%xmm2 \n"
"movdqa %5,%%xmm3 \n"
"pxor %%xmm0,%%xmm0 \n"
"pxor %%xmm1,%%xmm1 \n"
"sub %0,%1 \n"
LABELALIGN
"1: \n"
"movdqa (%0),%%xmm4 \n"
"movdqa 0x10(%0), %%xmm5 \n"
"pxor (%0,%1), %%xmm4 \n"
"movdqa %%xmm4,%%xmm6 \n"
"pand %%xmm2,%%xmm6 \n"
"psrlw $0x4,%%xmm4 \n"
"movdqa %%xmm3,%%xmm7 \n"
"pshufb %%xmm6,%%xmm7 \n"
"pand %%xmm2,%%xmm4 \n"
"movdqa %%xmm3,%%xmm6 \n"
"pshufb %%xmm4,%%xmm6 \n"
"paddb %%xmm7,%%xmm6 \n"
"pxor 0x10(%0,%1),%%xmm5 \n"
"add $0x20,%0 \n"
"movdqa %%xmm5,%%xmm4 \n"
"pand %%xmm2,%%xmm5 \n"
"psrlw $0x4,%%xmm4 \n"
"movdqa %%xmm3,%%xmm7 \n"
"pshufb %%xmm5,%%xmm7 \n"
"pand %%xmm2,%%xmm4 \n"
"movdqa %%xmm3,%%xmm5 \n"
"pshufb %%xmm4,%%xmm5 \n"
"paddb %%xmm7,%%xmm5 \n"
"paddb %%xmm5,%%xmm6 \n"
"psadbw %%xmm1,%%xmm6 \n"
"paddd %%xmm6,%%xmm0 \n"
"sub $0x20,%2 \n"
"jg 1b \n"
"pshufd $0xaa,%%xmm0,%%xmm1 \n"
"paddd %%xmm1,%%xmm0 \n"
"movd %%xmm0, %3 \n"
: "+r"(src_a), // %0
"+r"(src_b), // %1
"+r"(count), // %2
"=r"(diff) // %3
: "m"(kNibbleMask), // %4
"m"(kBitCount) // %5
: "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6",
"xmm7");
return diff;
}
#ifdef HAS_HAMMINGDISTANCE_AVX2
uint32_t HammingDistance_AVX2(const uint8_t* src_a,
const uint8_t* src_b,
int count) {
uint32_t diff = 0u;
asm volatile(
"vbroadcastf128 %4,%%ymm2 \n"
"vbroadcastf128 %5,%%ymm3 \n"
"vpxor %%ymm0,%%ymm0,%%ymm0 \n"
"vpxor %%ymm1,%%ymm1,%%ymm1 \n"
"sub %0,%1 \n"
LABELALIGN
"1: \n"
"vmovdqa (%0),%%ymm4 \n"
"vmovdqa 0x20(%0), %%ymm5 \n"
"vpxor (%0,%1), %%ymm4, %%ymm4 \n"
"vpand %%ymm2,%%ymm4,%%ymm6 \n"
"vpsrlw $0x4,%%ymm4,%%ymm4 \n"
"vpshufb %%ymm6,%%ymm3,%%ymm6 \n"
"vpand %%ymm2,%%ymm4,%%ymm4 \n"
"vpshufb %%ymm4,%%ymm3,%%ymm4 \n"
"vpaddb %%ymm4,%%ymm6,%%ymm6 \n"
"vpxor 0x20(%0,%1),%%ymm5,%%ymm4 \n"
"add $0x40,%0 \n"
"vpand %%ymm2,%%ymm4,%%ymm5 \n"
"vpsrlw $0x4,%%ymm4,%%ymm4 \n"
"vpshufb %%ymm5,%%ymm3,%%ymm5 \n"
"vpand %%ymm2,%%ymm4,%%ymm4 \n"
"vpshufb %%ymm4,%%ymm3,%%ymm4 \n"
"vpaddb %%ymm5,%%ymm4,%%ymm4 \n"
"vpaddb %%ymm6,%%ymm4,%%ymm4 \n"
"vpsadbw %%ymm1,%%ymm4,%%ymm4 \n"
"vpaddd %%ymm0,%%ymm4,%%ymm0 \n"
"sub $0x40,%2 \n"
"jg 1b \n"
"vpermq $0xb1,%%ymm0,%%ymm1 \n"
"vpaddd %%ymm1,%%ymm0,%%ymm0 \n"
"vpermq $0xaa,%%ymm0,%%ymm1 \n"
"vpaddd %%ymm1,%%ymm0,%%ymm0 \n"
"vmovd %%xmm0, %3 \n"
"vzeroupper \n"
: "+r"(src_a), // %0
"+r"(src_b), // %1
"+r"(count), // %2
"=r"(diff) // %3
: "m"(kNibbleMask), // %4
"m"(kBitCount) // %5
: "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6");
return diff;
}
#endif // HAS_HAMMINGDISTANCE_AVX2
uint32_t SumSquareError_SSE2(const uint8_t* src_a,
const uint8_t* src_b,
int count) {
uint32_t sse;
asm volatile(
"pxor %%xmm0,%%xmm0 \n"
"pxor %%xmm5,%%xmm5 \n"
LABELALIGN
"1: \n"
"movdqu (%0),%%xmm1 \n"
"lea 0x10(%0),%0 \n"
"movdqu (%1),%%xmm2 \n"
"lea 0x10(%1),%1 \n"
"movdqa %%xmm1,%%xmm3 \n"
"psubusb %%xmm2,%%xmm1 \n"
"psubusb %%xmm3,%%xmm2 \n"
"por %%xmm2,%%xmm1 \n"
"movdqa %%xmm1,%%xmm2 \n"
"punpcklbw %%xmm5,%%xmm1 \n"
"punpckhbw %%xmm5,%%xmm2 \n"
"pmaddwd %%xmm1,%%xmm1 \n"
"pmaddwd %%xmm2,%%xmm2 \n"
"paddd %%xmm1,%%xmm0 \n"
"paddd %%xmm2,%%xmm0 \n"
"sub $0x10,%2 \n"
"jg 1b \n"
"pshufd $0xee,%%xmm0,%%xmm1 \n"
"paddd %%xmm1,%%xmm0 \n"
"pshufd $0x1,%%xmm0,%%xmm1 \n"
"paddd %%xmm1,%%xmm0 \n"
"movd %%xmm0,%3 \n"
: "+r"(src_a), // %0
"+r"(src_b), // %1
"+r"(count), // %2
"=g"(sse) // %3
::"memory",
"cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm5");
return sse;
}
static const uvec32 kHash16x33 = {0x92d9e201, 0, 0, 0}; // 33 ^ 16
static const uvec32 kHashMul0 = {
0x0c3525e1, // 33 ^ 15
0xa3476dc1, // 33 ^ 14
0x3b4039a1, // 33 ^ 13
0x4f5f0981, // 33 ^ 12
};
static const uvec32 kHashMul1 = {
0x30f35d61, // 33 ^ 11
0x855cb541, // 33 ^ 10
0x040a9121, // 33 ^ 9
0x747c7101, // 33 ^ 8
};
static const uvec32 kHashMul2 = {
0xec41d4e1, // 33 ^ 7
0x4cfa3cc1, // 33 ^ 6
0x025528a1, // 33 ^ 5
0x00121881, // 33 ^ 4
};
static const uvec32 kHashMul3 = {
0x00008c61, // 33 ^ 3
0x00000441, // 33 ^ 2
0x00000021, // 33 ^ 1
0x00000001, // 33 ^ 0
};
uint32_t HashDjb2_SSE41(const uint8_t* src, int count, uint32_t seed) {
uint32_t hash;
asm volatile(
"movd %2,%%xmm0 \n"
"pxor %%xmm7,%%xmm7 \n"
"movdqa %4,%%xmm6 \n"
LABELALIGN
"1: \n"
"movdqu (%0),%%xmm1 \n"
"lea 0x10(%0),%0 \n"
"pmulld %%xmm6,%%xmm0 \n"
"movdqa %5,%%xmm5 \n"
"movdqa %%xmm1,%%xmm2 \n"
"punpcklbw %%xmm7,%%xmm2 \n"
"movdqa %%xmm2,%%xmm3 \n"
"punpcklwd %%xmm7,%%xmm3 \n"
"pmulld %%xmm5,%%xmm3 \n"
"movdqa %6,%%xmm5 \n"
"movdqa %%xmm2,%%xmm4 \n"
"punpckhwd %%xmm7,%%xmm4 \n"
"pmulld %%xmm5,%%xmm4 \n"
"movdqa %7,%%xmm5 \n"
"punpckhbw %%xmm7,%%xmm1 \n"
"movdqa %%xmm1,%%xmm2 \n"
"punpcklwd %%xmm7,%%xmm2 \n"
"pmulld %%xmm5,%%xmm2 \n"
"movdqa %8,%%xmm5 \n"
"punpckhwd %%xmm7,%%xmm1 \n"
"pmulld %%xmm5,%%xmm1 \n"
"paddd %%xmm4,%%xmm3 \n"
"paddd %%xmm2,%%xmm1 \n"
"paddd %%xmm3,%%xmm1 \n"
"pshufd $0xe,%%xmm1,%%xmm2 \n"
"paddd %%xmm2,%%xmm1 \n"
"pshufd $0x1,%%xmm1,%%xmm2 \n"
"paddd %%xmm2,%%xmm1 \n"
"paddd %%xmm1,%%xmm0 \n"
"sub $0x10,%1 \n"
"jg 1b \n"
"movd %%xmm0,%3 \n"
: "+r"(src), // %0
"+r"(count), // %1
"+rm"(seed), // %2
"=g"(hash) // %3
: "m"(kHash16x33), // %4
"m"(kHashMul0), // %5
"m"(kHashMul1), // %6
"m"(kHashMul2), // %7
"m"(kHashMul3) // %8
: "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6",
"xmm7");
return hash;
}
#endif // defined(__x86_64__) || (defined(__i386__) && !defined(__pic__)))
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif

View File

@@ -1,123 +0,0 @@
/*
* Copyright 2012 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "libyuv/basic_types.h"
#include "libyuv/compare_row.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
// This module is for Mips MMI.
#if !defined(LIBYUV_DISABLE_MMI) && defined(_MIPS_ARCH_LOONGSON3A)
// Hakmem method for hamming distance.
uint32_t HammingDistance_MMI(const uint8_t* src_a,
const uint8_t* src_b,
int count) {
uint32_t diff = 0u;
uint64_t temp = 0, temp1 = 0, ta = 0, tb = 0;
uint64_t c1 = 0x5555555555555555;
uint64_t c2 = 0x3333333333333333;
uint64_t c3 = 0x0f0f0f0f0f0f0f0f;
uint32_t c4 = 0x01010101;
uint64_t s1 = 1, s2 = 2, s3 = 4;
__asm__ volatile(
"1: \n\t"
"ldc1 %[ta], 0(%[src_a]) \n\t"
"ldc1 %[tb], 0(%[src_b]) \n\t"
"xor %[temp], %[ta], %[tb] \n\t"
"psrlw %[temp1], %[temp], %[s1] \n\t" // temp1=x>>1
"and %[temp1], %[temp1], %[c1] \n\t" // temp1&=c1
"psubw %[temp1], %[temp], %[temp1] \n\t" // x-temp1
"and %[temp], %[temp1], %[c2] \n\t" // t = (u&c2)
"psrlw %[temp1], %[temp1], %[s2] \n\t" // u>>2
"and %[temp1], %[temp1], %[c2] \n\t" // u>>2 & c2
"paddw %[temp1], %[temp1], %[temp] \n\t" // t1 = t1+t
"psrlw %[temp], %[temp1], %[s3] \n\t" // u>>4
"paddw %[temp1], %[temp1], %[temp] \n\t" // u+(u>>4)
"and %[temp1], %[temp1], %[c3] \n\t" //&c3
"dmfc1 $t0, %[temp1] \n\t"
"dsrl32 $t0, $t0, 0 \n\t "
"mul $t0, $t0, %[c4] \n\t"
"dsrl $t0, $t0, 24 \n\t"
"dadd %[diff], %[diff], $t0 \n\t"
"dmfc1 $t0, %[temp1] \n\t"
"mul $t0, $t0, %[c4] \n\t"
"dsrl $t0, $t0, 24 \n\t"
"dadd %[diff], %[diff], $t0 \n\t"
"daddiu %[src_a], %[src_a], 8 \n\t"
"daddiu %[src_b], %[src_b], 8 \n\t"
"addiu %[count], %[count], -8 \n\t"
"bgtz %[count], 1b \n\t"
"nop \n\t"
: [diff] "+r"(diff), [src_a] "+r"(src_a), [src_b] "+r"(src_b),
[count] "+r"(count), [ta] "+f"(ta), [tb] "+f"(tb), [temp] "+f"(temp),
[temp1] "+f"(temp1)
: [c1] "f"(c1), [c2] "f"(c2), [c3] "f"(c3), [c4] "r"(c4), [s1] "f"(s1),
[s2] "f"(s2), [s3] "f"(s3)
: "memory");
return diff;
}
uint32_t SumSquareError_MMI(const uint8_t* src_a,
const uint8_t* src_b,
int count) {
uint32_t sse = 0u;
uint32_t sse_hi = 0u, sse_lo = 0u;
uint64_t src1, src2;
uint64_t diff, diff_hi, diff_lo;
uint64_t sse_sum, sse_tmp;
const uint64_t mask = 0x0ULL;
__asm__ volatile(
"xor %[sse_sum], %[sse_sum], %[sse_sum] \n\t"
"1: \n\t"
"ldc1 %[src1], 0x00(%[src_a]) \n\t"
"ldc1 %[src2], 0x00(%[src_b]) \n\t"
"pasubub %[diff], %[src1], %[src2] \n\t"
"punpcklbh %[diff_lo], %[diff], %[mask] \n\t"
"punpckhbh %[diff_hi], %[diff], %[mask] \n\t"
"pmaddhw %[sse_tmp], %[diff_lo], %[diff_lo] \n\t"
"paddw %[sse_sum], %[sse_sum], %[sse_tmp] \n\t"
"pmaddhw %[sse_tmp], %[diff_hi], %[diff_hi] \n\t"
"paddw %[sse_sum], %[sse_sum], %[sse_tmp] \n\t"
"daddiu %[src_a], %[src_a], 0x08 \n\t"
"daddiu %[src_b], %[src_b], 0x08 \n\t"
"daddiu %[count], %[count], -0x08 \n\t"
"bnez %[count], 1b \n\t"
"mfc1 %[sse_lo], %[sse_sum] \n\t"
"mfhc1 %[sse_hi], %[sse_sum] \n\t"
"daddu %[sse], %[sse_hi], %[sse_lo] \n\t"
: [sse] "+&r"(sse), [diff] "=&f"(diff), [src1] "=&f"(src1),
[src2] "=&f"(src2), [diff_lo] "=&f"(diff_lo), [diff_hi] "=&f"(diff_hi),
[sse_sum] "=&f"(sse_sum), [sse_tmp] "=&f"(sse_tmp),
[sse_hi] "+&r"(sse_hi), [sse_lo] "+&r"(sse_lo)
: [src_a] "r"(src_a), [src_b] "r"(src_b), [count] "r"(count),
[mask] "f"(mask)
: "memory");
return sse;
}
#endif // !defined(LIBYUV_DISABLE_MMI) && defined(_MIPS_ARCH_LOONGSON3A)
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif

View File

@@ -1,97 +0,0 @@
/*
* Copyright 2017 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "libyuv/basic_types.h"
#include "libyuv/compare_row.h"
#include "libyuv/row.h"
// This module is for GCC MSA
#if !defined(LIBYUV_DISABLE_MSA) && defined(__mips_msa)
#include "libyuv/macros_msa.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
uint32_t HammingDistance_MSA(const uint8_t* src_a,
const uint8_t* src_b,
int count) {
uint32_t diff = 0u;
int i;
v16u8 src0, src1, src2, src3;
v2i64 vec0 = {0}, vec1 = {0};
for (i = 0; i < count; i += 32) {
src0 = (v16u8)__msa_ld_b((v16i8*)src_a, 0);
src1 = (v16u8)__msa_ld_b((v16i8*)src_a, 16);
src2 = (v16u8)__msa_ld_b((v16i8*)src_b, 0);
src3 = (v16u8)__msa_ld_b((v16i8*)src_b, 16);
src0 ^= src2;
src1 ^= src3;
vec0 += __msa_pcnt_d((v2i64)src0);
vec1 += __msa_pcnt_d((v2i64)src1);
src_a += 32;
src_b += 32;
}
vec0 += vec1;
diff = (uint32_t)__msa_copy_u_w((v4i32)vec0, 0);
diff += (uint32_t)__msa_copy_u_w((v4i32)vec0, 2);
return diff;
}
uint32_t SumSquareError_MSA(const uint8_t* src_a,
const uint8_t* src_b,
int count) {
uint32_t sse = 0u;
int i;
v16u8 src0, src1, src2, src3;
v8i16 vec0, vec1, vec2, vec3;
v4i32 reg0 = {0}, reg1 = {0}, reg2 = {0}, reg3 = {0};
v2i64 tmp0;
for (i = 0; i < count; i += 32) {
src0 = (v16u8)__msa_ld_b((v16i8*)src_a, 0);
src1 = (v16u8)__msa_ld_b((v16i8*)src_a, 16);
src2 = (v16u8)__msa_ld_b((v16i8*)src_b, 0);
src3 = (v16u8)__msa_ld_b((v16i8*)src_b, 16);
vec0 = (v8i16)__msa_ilvr_b((v16i8)src2, (v16i8)src0);
vec1 = (v8i16)__msa_ilvl_b((v16i8)src2, (v16i8)src0);
vec2 = (v8i16)__msa_ilvr_b((v16i8)src3, (v16i8)src1);
vec3 = (v8i16)__msa_ilvl_b((v16i8)src3, (v16i8)src1);
vec0 = __msa_hsub_u_h((v16u8)vec0, (v16u8)vec0);
vec1 = __msa_hsub_u_h((v16u8)vec1, (v16u8)vec1);
vec2 = __msa_hsub_u_h((v16u8)vec2, (v16u8)vec2);
vec3 = __msa_hsub_u_h((v16u8)vec3, (v16u8)vec3);
reg0 = __msa_dpadd_s_w(reg0, vec0, vec0);
reg1 = __msa_dpadd_s_w(reg1, vec1, vec1);
reg2 = __msa_dpadd_s_w(reg2, vec2, vec2);
reg3 = __msa_dpadd_s_w(reg3, vec3, vec3);
src_a += 32;
src_b += 32;
}
reg0 += reg1;
reg2 += reg3;
reg0 += reg2;
tmp0 = __msa_hadd_s_d(reg0, reg0);
sse = (uint32_t)__msa_copy_u_w((v4i32)tmp0, 0);
sse += (uint32_t)__msa_copy_u_w((v4i32)tmp0, 2);
return sse;
}
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif
#endif // !defined(LIBYUV_DISABLE_MSA) && defined(__mips_msa)

View File

@@ -1,96 +0,0 @@
/*
* Copyright 2012 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "libyuv/basic_types.h"
#include "libyuv/compare_row.h"
#include "libyuv/row.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
#if !defined(LIBYUV_DISABLE_NEON) && defined(__ARM_NEON__) && \
!defined(__aarch64__)
// 256 bits at a time
// uses short accumulator which restricts count to 131 KB
uint32_t HammingDistance_NEON(const uint8_t* src_a,
const uint8_t* src_b,
int count) {
uint32_t diff;
asm volatile(
"vmov.u16 q4, #0 \n" // accumulator
"1: \n"
"vld1.8 {q0, q1}, [%0]! \n"
"vld1.8 {q2, q3}, [%1]! \n"
"veor.32 q0, q0, q2 \n"
"veor.32 q1, q1, q3 \n"
"vcnt.i8 q0, q0 \n"
"vcnt.i8 q1, q1 \n"
"subs %2, %2, #32 \n"
"vadd.u8 q0, q0, q1 \n" // 16 byte counts
"vpadal.u8 q4, q0 \n" // 8 shorts
"bgt 1b \n"
"vpaddl.u16 q0, q4 \n" // 4 ints
"vpadd.u32 d0, d0, d1 \n"
"vpadd.u32 d0, d0, d0 \n"
"vmov.32 %3, d0[0] \n"
: "+r"(src_a), "+r"(src_b), "+r"(count), "=r"(diff)
:
: "cc", "q0", "q1", "q2", "q3", "q4");
return diff;
}
uint32_t SumSquareError_NEON(const uint8_t* src_a,
const uint8_t* src_b,
int count) {
uint32_t sse;
asm volatile(
"vmov.u8 q8, #0 \n"
"vmov.u8 q10, #0 \n"
"vmov.u8 q9, #0 \n"
"vmov.u8 q11, #0 \n"
"1: \n"
"vld1.8 {q0}, [%0]! \n"
"vld1.8 {q1}, [%1]! \n"
"subs %2, %2, #16 \n"
"vsubl.u8 q2, d0, d2 \n"
"vsubl.u8 q3, d1, d3 \n"
"vmlal.s16 q8, d4, d4 \n"
"vmlal.s16 q9, d6, d6 \n"
"vmlal.s16 q10, d5, d5 \n"
"vmlal.s16 q11, d7, d7 \n"
"bgt 1b \n"
"vadd.u32 q8, q8, q9 \n"
"vadd.u32 q10, q10, q11 \n"
"vadd.u32 q11, q8, q10 \n"
"vpaddl.u32 q1, q11 \n"
"vadd.u64 d0, d2, d3 \n"
"vmov.32 %3, d0[0] \n"
: "+r"(src_a), "+r"(src_b), "+r"(count), "=r"(sse)
:
: "memory", "cc", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11");
return sse;
}
#endif // defined(__ARM_NEON__) && !defined(__aarch64__)
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif

View File

@@ -1,94 +0,0 @@
/*
* Copyright 2012 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "libyuv/basic_types.h"
#include "libyuv/compare_row.h"
#include "libyuv/row.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
#if !defined(LIBYUV_DISABLE_NEON) && defined(__aarch64__)
// 256 bits at a time
// uses short accumulator which restricts count to 131 KB
uint32_t HammingDistance_NEON(const uint8_t* src_a,
const uint8_t* src_b,
int count) {
uint32_t diff;
asm volatile(
"movi v4.8h, #0 \n"
"1: \n"
"ld1 {v0.16b, v1.16b}, [%0], #32 \n"
"ld1 {v2.16b, v3.16b}, [%1], #32 \n"
"eor v0.16b, v0.16b, v2.16b \n"
"prfm pldl1keep, [%0, 448] \n" // prefetch 7 lines ahead
"eor v1.16b, v1.16b, v3.16b \n"
"cnt v0.16b, v0.16b \n"
"prfm pldl1keep, [%1, 448] \n"
"cnt v1.16b, v1.16b \n"
"subs %w2, %w2, #32 \n"
"add v0.16b, v0.16b, v1.16b \n"
"uadalp v4.8h, v0.16b \n"
"b.gt 1b \n"
"uaddlv s4, v4.8h \n"
"fmov %w3, s4 \n"
: "+r"(src_a), "+r"(src_b), "+r"(count), "=r"(diff)
:
: "cc", "v0", "v1", "v2", "v3", "v4");
return diff;
}
uint32_t SumSquareError_NEON(const uint8_t* src_a,
const uint8_t* src_b,
int count) {
uint32_t sse;
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"1: \n"
"ld1 {v0.16b}, [%0], #16 \n"
"ld1 {v1.16b}, [%1], #16 \n"
"subs %w2, %w2, #16 \n"
"usubl v2.8h, v0.8b, v1.8b \n"
"usubl2 v3.8h, v0.16b, v1.16b \n"
"prfm pldl1keep, [%0, 448] \n" // prefetch 7 lines ahead
"smlal v16.4s, v2.4h, v2.4h \n"
"smlal v17.4s, v3.4h, v3.4h \n"
"prfm pldl1keep, [%1, 448] \n"
"smlal2 v18.4s, v2.8h, v2.8h \n"
"smlal2 v19.4s, v3.8h, v3.8h \n"
"b.gt 1b \n"
"add v16.4s, v16.4s, v17.4s \n"
"add v18.4s, v18.4s, v19.4s \n"
"add v19.4s, v16.4s, v18.4s \n"
"addv s0, v19.4s \n"
"fmov %w3, s0 \n"
: "+r"(src_a), "+r"(src_b), "+r"(count), "=r"(sse)
:
: "cc", "v0", "v1", "v2", "v3", "v16", "v17", "v18", "v19");
return sse;
}
#endif // !defined(LIBYUV_DISABLE_NEON) && defined(__aarch64__)
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif

View File

@@ -1,241 +0,0 @@
/*
* Copyright 2012 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "libyuv/basic_types.h"
#include "libyuv/compare_row.h"
#include "libyuv/row.h"
#if defined(_MSC_VER)
#include <intrin.h> // For __popcnt
#endif
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
// This module is for 32 bit Visual C x86
#if !defined(LIBYUV_DISABLE_X86) && defined(_MSC_VER) && \
!defined(__clang__) && defined(_M_IX86)
uint32_t HammingDistance_SSE42(const uint8_t* src_a,
const uint8_t* src_b,
int count) {
uint32_t diff = 0u;
int i;
for (i = 0; i < count - 3; i += 4) {
uint32_t x = *((uint32_t*)src_a) ^ *((uint32_t*)src_b); // NOLINT
src_a += 4;
src_b += 4;
diff += __popcnt(x);
}
return diff;
}
__declspec(naked) uint32_t
SumSquareError_SSE2(const uint8_t* src_a, const uint8_t* src_b, int count) {
__asm {
mov eax, [esp + 4] // src_a
mov edx, [esp + 8] // src_b
mov ecx, [esp + 12] // count
pxor xmm0, xmm0
pxor xmm5, xmm5
wloop:
movdqu xmm1, [eax]
lea eax, [eax + 16]
movdqu xmm2, [edx]
lea edx, [edx + 16]
movdqa xmm3, xmm1 // abs trick
psubusb xmm1, xmm2
psubusb xmm2, xmm3
por xmm1, xmm2
movdqa xmm2, xmm1
punpcklbw xmm1, xmm5
punpckhbw xmm2, xmm5
pmaddwd xmm1, xmm1
pmaddwd xmm2, xmm2
paddd xmm0, xmm1
paddd xmm0, xmm2
sub ecx, 16
jg wloop
pshufd xmm1, xmm0, 0xee
paddd xmm0, xmm1
pshufd xmm1, xmm0, 0x01
paddd xmm0, xmm1
movd eax, xmm0
ret
}
}
#ifdef HAS_SUMSQUAREERROR_AVX2
// C4752: found Intel(R) Advanced Vector Extensions; consider using /arch:AVX.
#pragma warning(disable : 4752)
__declspec(naked) uint32_t
SumSquareError_AVX2(const uint8_t* src_a, const uint8_t* src_b, int count) {
__asm {
mov eax, [esp + 4] // src_a
mov edx, [esp + 8] // src_b
mov ecx, [esp + 12] // count
vpxor ymm0, ymm0, ymm0 // sum
vpxor ymm5, ymm5, ymm5 // constant 0 for unpck
sub edx, eax
wloop:
vmovdqu ymm1, [eax]
vmovdqu ymm2, [eax + edx]
lea eax, [eax + 32]
vpsubusb ymm3, ymm1, ymm2 // abs difference trick
vpsubusb ymm2, ymm2, ymm1
vpor ymm1, ymm2, ymm3
vpunpcklbw ymm2, ymm1, ymm5 // u16. mutates order.
vpunpckhbw ymm1, ymm1, ymm5
vpmaddwd ymm2, ymm2, ymm2 // square + hadd to u32.
vpmaddwd ymm1, ymm1, ymm1
vpaddd ymm0, ymm0, ymm1
vpaddd ymm0, ymm0, ymm2
sub ecx, 32
jg wloop
vpshufd ymm1, ymm0, 0xee // 3, 2 + 1, 0 both lanes.
vpaddd ymm0, ymm0, ymm1
vpshufd ymm1, ymm0, 0x01 // 1 + 0 both lanes.
vpaddd ymm0, ymm0, ymm1
vpermq ymm1, ymm0, 0x02 // high + low lane.
vpaddd ymm0, ymm0, ymm1
vmovd eax, xmm0
vzeroupper
ret
}
}
#endif // HAS_SUMSQUAREERROR_AVX2
uvec32 kHash16x33 = {0x92d9e201, 0, 0, 0}; // 33 ^ 16
uvec32 kHashMul0 = {
0x0c3525e1, // 33 ^ 15
0xa3476dc1, // 33 ^ 14
0x3b4039a1, // 33 ^ 13
0x4f5f0981, // 33 ^ 12
};
uvec32 kHashMul1 = {
0x30f35d61, // 33 ^ 11
0x855cb541, // 33 ^ 10
0x040a9121, // 33 ^ 9
0x747c7101, // 33 ^ 8
};
uvec32 kHashMul2 = {
0xec41d4e1, // 33 ^ 7
0x4cfa3cc1, // 33 ^ 6
0x025528a1, // 33 ^ 5
0x00121881, // 33 ^ 4
};
uvec32 kHashMul3 = {
0x00008c61, // 33 ^ 3
0x00000441, // 33 ^ 2
0x00000021, // 33 ^ 1
0x00000001, // 33 ^ 0
};
__declspec(naked) uint32_t
HashDjb2_SSE41(const uint8_t* src, int count, uint32_t seed) {
__asm {
mov eax, [esp + 4] // src
mov ecx, [esp + 8] // count
movd xmm0, [esp + 12] // seed
pxor xmm7, xmm7 // constant 0 for unpck
movdqa xmm6, xmmword ptr kHash16x33
wloop:
movdqu xmm1, [eax] // src[0-15]
lea eax, [eax + 16]
pmulld xmm0, xmm6 // hash *= 33 ^ 16
movdqa xmm5, xmmword ptr kHashMul0
movdqa xmm2, xmm1
punpcklbw xmm2, xmm7 // src[0-7]
movdqa xmm3, xmm2
punpcklwd xmm3, xmm7 // src[0-3]
pmulld xmm3, xmm5
movdqa xmm5, xmmword ptr kHashMul1
movdqa xmm4, xmm2
punpckhwd xmm4, xmm7 // src[4-7]
pmulld xmm4, xmm5
movdqa xmm5, xmmword ptr kHashMul2
punpckhbw xmm1, xmm7 // src[8-15]
movdqa xmm2, xmm1
punpcklwd xmm2, xmm7 // src[8-11]
pmulld xmm2, xmm5
movdqa xmm5, xmmword ptr kHashMul3
punpckhwd xmm1, xmm7 // src[12-15]
pmulld xmm1, xmm5
paddd xmm3, xmm4 // add 16 results
paddd xmm1, xmm2
paddd xmm1, xmm3
pshufd xmm2, xmm1, 0x0e // upper 2 dwords
paddd xmm1, xmm2
pshufd xmm2, xmm1, 0x01
paddd xmm1, xmm2
paddd xmm0, xmm1
sub ecx, 16
jg wloop
movd eax, xmm0 // return hash
ret
}
}
// Visual C 2012 required for AVX2.
#ifdef HAS_HASHDJB2_AVX2
__declspec(naked) uint32_t
HashDjb2_AVX2(const uint8_t* src, int count, uint32_t seed) {
__asm {
mov eax, [esp + 4] // src
mov ecx, [esp + 8] // count
vmovd xmm0, [esp + 12] // seed
wloop:
vpmovzxbd xmm3, [eax] // src[0-3]
vpmulld xmm0, xmm0, xmmword ptr kHash16x33 // hash *= 33 ^ 16
vpmovzxbd xmm4, [eax + 4] // src[4-7]
vpmulld xmm3, xmm3, xmmword ptr kHashMul0
vpmovzxbd xmm2, [eax + 8] // src[8-11]
vpmulld xmm4, xmm4, xmmword ptr kHashMul1
vpmovzxbd xmm1, [eax + 12] // src[12-15]
vpmulld xmm2, xmm2, xmmword ptr kHashMul2
lea eax, [eax + 16]
vpmulld xmm1, xmm1, xmmword ptr kHashMul3
vpaddd xmm3, xmm3, xmm4 // add 16 results
vpaddd xmm1, xmm1, xmm2
vpaddd xmm1, xmm1, xmm3
vpshufd xmm2, xmm1, 0x0e // upper 2 dwords
vpaddd xmm1, xmm1,xmm2
vpshufd xmm2, xmm1, 0x01
vpaddd xmm1, xmm1, xmm2
vpaddd xmm0, xmm0, xmm1
sub ecx, 16
jg wloop
vmovd eax, xmm0 // return hash
vzeroupper
ret
}
}
#endif // HAS_HASHDJB2_AVX2
#endif // !defined(LIBYUV_DISABLE_X86) && defined(_M_IX86)
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,855 +0,0 @@
/*
* Copyright 2012 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "libyuv/convert_from.h"
#include "libyuv/basic_types.h"
#include "libyuv/convert.h" // For I420Copy
#include "libyuv/cpu_id.h"
#include "libyuv/planar_functions.h"
#include "libyuv/rotate.h"
#include "libyuv/row.h"
#include "libyuv/scale.h" // For ScalePlane()
#include "libyuv/video_common.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
#define SUBSAMPLE(v, a, s) (v < 0) ? (-((-v + a) >> s)) : ((v + a) >> s)
static __inline int Abs(int v) {
return v >= 0 ? v : -v;
}
// I420 To any I4xx YUV format with mirroring.
// TODO(fbarchard): Consider kFilterNone for Y, or CopyPlane
static int I420ToI4xx(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int src_y_width,
int src_y_height,
int dst_uv_width,
int dst_uv_height) {
const int dst_y_width = Abs(src_y_width);
const int dst_y_height = Abs(src_y_height);
const int src_uv_width = SUBSAMPLE(src_y_width, 1, 1);
const int src_uv_height = SUBSAMPLE(src_y_height, 1, 1);
if (src_y_width == 0 || src_y_height == 0 || dst_uv_width <= 0 ||
dst_uv_height <= 0) {
return -1;
}
if (dst_y) {
ScalePlane(src_y, src_stride_y, src_y_width, src_y_height, dst_y,
dst_stride_y, dst_y_width, dst_y_height, kFilterBilinear);
}
ScalePlane(src_u, src_stride_u, src_uv_width, src_uv_height, dst_u,
dst_stride_u, dst_uv_width, dst_uv_height, kFilterBilinear);
ScalePlane(src_v, src_stride_v, src_uv_width, src_uv_height, dst_v,
dst_stride_v, dst_uv_width, dst_uv_height, kFilterBilinear);
return 0;
}
// Convert 8 bit YUV to 10 bit.
LIBYUV_API
int I420ToI010(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint16_t* dst_y,
int dst_stride_y,
uint16_t* dst_u,
int dst_stride_u,
uint16_t* dst_v,
int dst_stride_v,
int width,
int height) {
int halfwidth = (width + 1) >> 1;
int halfheight = (height + 1) >> 1;
if (!src_u || !src_v || !dst_u || !dst_v || width <= 0 || height == 0) {
return -1;
}
// Negative height means invert the image.
if (height < 0) {
height = -height;
halfheight = (height + 1) >> 1;
src_y = src_y + (height - 1) * src_stride_y;
src_u = src_u + (halfheight - 1) * src_stride_u;
src_v = src_v + (halfheight - 1) * src_stride_v;
src_stride_y = -src_stride_y;
src_stride_u = -src_stride_u;
src_stride_v = -src_stride_v;
}
// Convert Y plane.
Convert8To16Plane(src_y, src_stride_y, dst_y, dst_stride_y, 1024, width,
height);
// Convert UV planes.
Convert8To16Plane(src_u, src_stride_u, dst_u, dst_stride_u, 1024, halfwidth,
halfheight);
Convert8To16Plane(src_v, src_stride_v, dst_v, dst_stride_v, 1024, halfwidth,
halfheight);
return 0;
}
// Convert 8 bit YUV to 12 bit.
LIBYUV_API
int I420ToI012(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint16_t* dst_y,
int dst_stride_y,
uint16_t* dst_u,
int dst_stride_u,
uint16_t* dst_v,
int dst_stride_v,
int width,
int height) {
int halfwidth = (width + 1) >> 1;
int halfheight = (height + 1) >> 1;
if (!src_u || !src_v || !dst_u || !dst_v || width <= 0 || height == 0) {
return -1;
}
// Negative height means invert the image.
if (height < 0) {
height = -height;
halfheight = (height + 1) >> 1;
src_y = src_y + (height - 1) * src_stride_y;
src_u = src_u + (halfheight - 1) * src_stride_u;
src_v = src_v + (halfheight - 1) * src_stride_v;
src_stride_y = -src_stride_y;
src_stride_u = -src_stride_u;
src_stride_v = -src_stride_v;
}
// Convert Y plane.
Convert8To16Plane(src_y, src_stride_y, dst_y, dst_stride_y, 4096, width,
height);
// Convert UV planes.
Convert8To16Plane(src_u, src_stride_u, dst_u, dst_stride_u, 4096, halfwidth,
halfheight);
Convert8To16Plane(src_v, src_stride_v, dst_v, dst_stride_v, 4096, halfwidth,
halfheight);
return 0;
}
// 420 chroma is 1/2 width, 1/2 height
// 422 chroma is 1/2 width, 1x height
LIBYUV_API
int I420ToI422(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height) {
const int dst_uv_width = (Abs(width) + 1) >> 1;
const int dst_uv_height = Abs(height);
return I420ToI4xx(src_y, src_stride_y, src_u, src_stride_u, src_v,
src_stride_v, dst_y, dst_stride_y, dst_u, dst_stride_u,
dst_v, dst_stride_v, width, height, dst_uv_width,
dst_uv_height);
}
// 420 chroma is 1/2 width, 1/2 height
// 444 chroma is 1x width, 1x height
LIBYUV_API
int I420ToI444(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height) {
const int dst_uv_width = Abs(width);
const int dst_uv_height = Abs(height);
return I420ToI4xx(src_y, src_stride_y, src_u, src_stride_u, src_v,
src_stride_v, dst_y, dst_stride_y, dst_u, dst_stride_u,
dst_v, dst_stride_v, width, height, dst_uv_width,
dst_uv_height);
}
// 420 chroma to 444 chroma, 10/12 bit version
LIBYUV_API
int I010ToI410(const uint16_t* src_y,
int src_stride_y,
const uint16_t* src_u,
int src_stride_u,
const uint16_t* src_v,
int src_stride_v,
uint16_t* dst_y,
int dst_stride_y,
uint16_t* dst_u,
int dst_stride_u,
uint16_t* dst_v,
int dst_stride_v,
int width,
int height) {
if (width == 0 || height == 0) {
return -1;
}
if (dst_y) {
ScalePlane_12(src_y, src_stride_y, width, height, dst_y, dst_stride_y,
Abs(width), Abs(height), kFilterBilinear);
}
ScalePlane_12(src_u, src_stride_u, SUBSAMPLE(width, 1, 1),
SUBSAMPLE(height, 1, 1), dst_u, dst_stride_u, Abs(width),
Abs(height), kFilterBilinear);
ScalePlane_12(src_v, src_stride_v, SUBSAMPLE(width, 1, 1),
SUBSAMPLE(height, 1, 1), dst_v, dst_stride_v, Abs(width),
Abs(height), kFilterBilinear);
return 0;
}
// 422 chroma to 444 chroma, 10/12 bit version
LIBYUV_API
int I210ToI410(const uint16_t* src_y,
int src_stride_y,
const uint16_t* src_u,
int src_stride_u,
const uint16_t* src_v,
int src_stride_v,
uint16_t* dst_y,
int dst_stride_y,
uint16_t* dst_u,
int dst_stride_u,
uint16_t* dst_v,
int dst_stride_v,
int width,
int height) {
if (width == 0 || height == 0) {
return -1;
}
if (dst_y) {
ScalePlane_12(src_y, src_stride_y, width, height, dst_y, dst_stride_y,
Abs(width), Abs(height), kFilterBilinear);
}
ScalePlane_12(src_u, src_stride_u, SUBSAMPLE(width, 1, 1), height, dst_u,
dst_stride_u, Abs(width), Abs(height), kFilterBilinear);
ScalePlane_12(src_v, src_stride_v, SUBSAMPLE(width, 1, 1), height, dst_v,
dst_stride_v, Abs(width), Abs(height), kFilterBilinear);
return 0;
}
// 422 chroma is 1/2 width, 1x height
// 444 chroma is 1x width, 1x height
LIBYUV_API
int I422ToI444(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height) {
if (width == 0 || height == 0) {
return -1;
}
if (dst_y) {
ScalePlane(src_y, src_stride_y, width, height, dst_y, dst_stride_y,
Abs(width), Abs(height), kFilterBilinear);
}
ScalePlane(src_u, src_stride_u, SUBSAMPLE(width, 1, 1), height, dst_u,
dst_stride_u, Abs(width), Abs(height), kFilterBilinear);
ScalePlane(src_v, src_stride_v, SUBSAMPLE(width, 1, 1), height, dst_v,
dst_stride_v, Abs(width), Abs(height), kFilterBilinear);
return 0;
}
// Copy to I400. Source can be I420,422,444,400,NV12,NV21
LIBYUV_API
int I400Copy(const uint8_t* src_y,
int src_stride_y,
uint8_t* dst_y,
int dst_stride_y,
int width,
int height) {
if (!src_y || !dst_y || width <= 0 || height == 0) {
return -1;
}
// Negative height means invert the image.
if (height < 0) {
height = -height;
src_y = src_y + (height - 1) * src_stride_y;
src_stride_y = -src_stride_y;
}
CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
return 0;
}
LIBYUV_API
int I422ToYUY2(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint8_t* dst_yuy2,
int dst_stride_yuy2,
int width,
int height) {
int y;
void (*I422ToYUY2Row)(const uint8_t* src_y, const uint8_t* src_u,
const uint8_t* src_v, uint8_t* dst_yuy2, int width) =
I422ToYUY2Row_C;
if (!src_y || !src_u || !src_v || !dst_yuy2 || width <= 0 || height == 0) {
return -1;
}
// Negative height means invert the image.
if (height < 0) {
height = -height;
dst_yuy2 = dst_yuy2 + (height - 1) * dst_stride_yuy2;
dst_stride_yuy2 = -dst_stride_yuy2;
}
// Coalesce rows.
if (src_stride_y == width && src_stride_u * 2 == width &&
src_stride_v * 2 == width && dst_stride_yuy2 == width * 2) {
width *= height;
height = 1;
src_stride_y = src_stride_u = src_stride_v = dst_stride_yuy2 = 0;
}
#if defined(HAS_I422TOYUY2ROW_SSE2)
if (TestCpuFlag(kCpuHasSSE2)) {
I422ToYUY2Row = I422ToYUY2Row_Any_SSE2;
if (IS_ALIGNED(width, 16)) {
I422ToYUY2Row = I422ToYUY2Row_SSE2;
}
}
#endif
#if defined(HAS_I422TOYUY2ROW_AVX2)
if (TestCpuFlag(kCpuHasAVX2)) {
I422ToYUY2Row = I422ToYUY2Row_Any_AVX2;
if (IS_ALIGNED(width, 32)) {
I422ToYUY2Row = I422ToYUY2Row_AVX2;
}
}
#endif
#if defined(HAS_I422TOYUY2ROW_NEON)
if (TestCpuFlag(kCpuHasNEON)) {
I422ToYUY2Row = I422ToYUY2Row_Any_NEON;
if (IS_ALIGNED(width, 16)) {
I422ToYUY2Row = I422ToYUY2Row_NEON;
}
}
#endif
for (y = 0; y < height; ++y) {
I422ToYUY2Row(src_y, src_u, src_v, dst_yuy2, width);
src_y += src_stride_y;
src_u += src_stride_u;
src_v += src_stride_v;
dst_yuy2 += dst_stride_yuy2;
}
return 0;
}
LIBYUV_API
int I420ToYUY2(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint8_t* dst_yuy2,
int dst_stride_yuy2,
int width,
int height) {
int y;
void (*I422ToYUY2Row)(const uint8_t* src_y, const uint8_t* src_u,
const uint8_t* src_v, uint8_t* dst_yuy2, int width) =
I422ToYUY2Row_C;
if (!src_y || !src_u || !src_v || !dst_yuy2 || width <= 0 || height == 0) {
return -1;
}
// Negative height means invert the image.
if (height < 0) {
height = -height;
dst_yuy2 = dst_yuy2 + (height - 1) * dst_stride_yuy2;
dst_stride_yuy2 = -dst_stride_yuy2;
}
#if defined(HAS_I422TOYUY2ROW_SSE2)
if (TestCpuFlag(kCpuHasSSE2)) {
I422ToYUY2Row = I422ToYUY2Row_Any_SSE2;
if (IS_ALIGNED(width, 16)) {
I422ToYUY2Row = I422ToYUY2Row_SSE2;
}
}
#endif
#if defined(HAS_I422TOYUY2ROW_AVX2)
if (TestCpuFlag(kCpuHasAVX2)) {
I422ToYUY2Row = I422ToYUY2Row_Any_AVX2;
if (IS_ALIGNED(width, 32)) {
I422ToYUY2Row = I422ToYUY2Row_AVX2;
}
}
#endif
#if defined(HAS_I422TOYUY2ROW_NEON)
if (TestCpuFlag(kCpuHasNEON)) {
I422ToYUY2Row = I422ToYUY2Row_Any_NEON;
if (IS_ALIGNED(width, 16)) {
I422ToYUY2Row = I422ToYUY2Row_NEON;
}
}
#endif
#if defined(HAS_I422TOYUY2ROW_MMI)
if (TestCpuFlag(kCpuHasMMI)) {
I422ToYUY2Row = I422ToYUY2Row_Any_MMI;
if (IS_ALIGNED(width, 8)) {
I422ToYUY2Row = I422ToYUY2Row_MMI;
}
}
#endif
#if defined(HAS_I422TOYUY2ROW_MSA)
if (TestCpuFlag(kCpuHasMSA)) {
I422ToYUY2Row = I422ToYUY2Row_Any_MSA;
if (IS_ALIGNED(width, 32)) {
I422ToYUY2Row = I422ToYUY2Row_MSA;
}
}
#endif
for (y = 0; y < height - 1; y += 2) {
I422ToYUY2Row(src_y, src_u, src_v, dst_yuy2, width);
I422ToYUY2Row(src_y + src_stride_y, src_u, src_v,
dst_yuy2 + dst_stride_yuy2, width);
src_y += src_stride_y * 2;
src_u += src_stride_u;
src_v += src_stride_v;
dst_yuy2 += dst_stride_yuy2 * 2;
}
if (height & 1) {
I422ToYUY2Row(src_y, src_u, src_v, dst_yuy2, width);
}
return 0;
}
LIBYUV_API
int I422ToUYVY(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint8_t* dst_uyvy,
int dst_stride_uyvy,
int width,
int height) {
int y;
void (*I422ToUYVYRow)(const uint8_t* src_y, const uint8_t* src_u,
const uint8_t* src_v, uint8_t* dst_uyvy, int width) =
I422ToUYVYRow_C;
if (!src_y || !src_u || !src_v || !dst_uyvy || width <= 0 || height == 0) {
return -1;
}
// Negative height means invert the image.
if (height < 0) {
height = -height;
dst_uyvy = dst_uyvy + (height - 1) * dst_stride_uyvy;
dst_stride_uyvy = -dst_stride_uyvy;
}
// Coalesce rows.
if (src_stride_y == width && src_stride_u * 2 == width &&
src_stride_v * 2 == width && dst_stride_uyvy == width * 2) {
width *= height;
height = 1;
src_stride_y = src_stride_u = src_stride_v = dst_stride_uyvy = 0;
}
#if defined(HAS_I422TOUYVYROW_SSE2)
if (TestCpuFlag(kCpuHasSSE2)) {
I422ToUYVYRow = I422ToUYVYRow_Any_SSE2;
if (IS_ALIGNED(width, 16)) {
I422ToUYVYRow = I422ToUYVYRow_SSE2;
}
}
#endif
#if defined(HAS_I422TOUYVYROW_AVX2)
if (TestCpuFlag(kCpuHasAVX2)) {
I422ToUYVYRow = I422ToUYVYRow_Any_AVX2;
if (IS_ALIGNED(width, 32)) {
I422ToUYVYRow = I422ToUYVYRow_AVX2;
}
}
#endif
#if defined(HAS_I422TOUYVYROW_NEON)
if (TestCpuFlag(kCpuHasNEON)) {
I422ToUYVYRow = I422ToUYVYRow_Any_NEON;
if (IS_ALIGNED(width, 16)) {
I422ToUYVYRow = I422ToUYVYRow_NEON;
}
}
#endif
#if defined(HAS_I422TOUYVYROW_MMI)
if (TestCpuFlag(kCpuHasMMI)) {
I422ToUYVYRow = I422ToUYVYRow_Any_MMI;
if (IS_ALIGNED(width, 8)) {
I422ToUYVYRow = I422ToUYVYRow_MMI;
}
}
#endif
#if defined(HAS_I422TOUYVYROW_MSA)
if (TestCpuFlag(kCpuHasMSA)) {
I422ToUYVYRow = I422ToUYVYRow_Any_MSA;
if (IS_ALIGNED(width, 32)) {
I422ToUYVYRow = I422ToUYVYRow_MSA;
}
}
#endif
for (y = 0; y < height; ++y) {
I422ToUYVYRow(src_y, src_u, src_v, dst_uyvy, width);
src_y += src_stride_y;
src_u += src_stride_u;
src_v += src_stride_v;
dst_uyvy += dst_stride_uyvy;
}
return 0;
}
LIBYUV_API
int I420ToUYVY(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint8_t* dst_uyvy,
int dst_stride_uyvy,
int width,
int height) {
int y;
void (*I422ToUYVYRow)(const uint8_t* src_y, const uint8_t* src_u,
const uint8_t* src_v, uint8_t* dst_uyvy, int width) =
I422ToUYVYRow_C;
if (!src_y || !src_u || !src_v || !dst_uyvy || width <= 0 || height == 0) {
return -1;
}
// Negative height means invert the image.
if (height < 0) {
height = -height;
dst_uyvy = dst_uyvy + (height - 1) * dst_stride_uyvy;
dst_stride_uyvy = -dst_stride_uyvy;
}
#if defined(HAS_I422TOUYVYROW_SSE2)
if (TestCpuFlag(kCpuHasSSE2)) {
I422ToUYVYRow = I422ToUYVYRow_Any_SSE2;
if (IS_ALIGNED(width, 16)) {
I422ToUYVYRow = I422ToUYVYRow_SSE2;
}
}
#endif
#if defined(HAS_I422TOUYVYROW_AVX2)
if (TestCpuFlag(kCpuHasAVX2)) {
I422ToUYVYRow = I422ToUYVYRow_Any_AVX2;
if (IS_ALIGNED(width, 32)) {
I422ToUYVYRow = I422ToUYVYRow_AVX2;
}
}
#endif
#if defined(HAS_I422TOUYVYROW_NEON)
if (TestCpuFlag(kCpuHasNEON)) {
I422ToUYVYRow = I422ToUYVYRow_Any_NEON;
if (IS_ALIGNED(width, 16)) {
I422ToUYVYRow = I422ToUYVYRow_NEON;
}
}
#endif
#if defined(HAS_I422TOUYVYROW_MMI)
if (TestCpuFlag(kCpuHasMMI)) {
I422ToUYVYRow = I422ToUYVYRow_Any_MMI;
if (IS_ALIGNED(width, 8)) {
I422ToUYVYRow = I422ToUYVYRow_MMI;
}
}
#endif
#if defined(HAS_I422TOUYVYROW_MSA)
if (TestCpuFlag(kCpuHasMSA)) {
I422ToUYVYRow = I422ToUYVYRow_Any_MSA;
if (IS_ALIGNED(width, 32)) {
I422ToUYVYRow = I422ToUYVYRow_MSA;
}
}
#endif
for (y = 0; y < height - 1; y += 2) {
I422ToUYVYRow(src_y, src_u, src_v, dst_uyvy, width);
I422ToUYVYRow(src_y + src_stride_y, src_u, src_v,
dst_uyvy + dst_stride_uyvy, width);
src_y += src_stride_y * 2;
src_u += src_stride_u;
src_v += src_stride_v;
dst_uyvy += dst_stride_uyvy * 2;
}
if (height & 1) {
I422ToUYVYRow(src_y, src_u, src_v, dst_uyvy, width);
}
return 0;
}
LIBYUV_API
int I420ToNV12(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_uv,
int dst_stride_uv,
int width,
int height) {
int halfwidth = (width + 1) / 2;
int halfheight = (height + 1) / 2;
if (!src_y || !src_u || !src_v || !dst_y || !dst_uv || width <= 0 ||
height == 0) {
return -1;
}
// Negative height means invert the image.
if (height < 0) {
height = -height;
halfheight = (height + 1) >> 1;
src_y = src_y + (height - 1) * src_stride_y;
src_u = src_u + (halfheight - 1) * src_stride_u;
src_v = src_v + (halfheight - 1) * src_stride_v;
src_stride_y = -src_stride_y;
src_stride_u = -src_stride_u;
src_stride_v = -src_stride_v;
}
if (dst_y) {
CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
}
MergeUVPlane(src_u, src_stride_u, src_v, src_stride_v, dst_uv, dst_stride_uv,
halfwidth, halfheight);
return 0;
}
LIBYUV_API
int I420ToNV21(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_vu,
int dst_stride_vu,
int width,
int height) {
return I420ToNV12(src_y, src_stride_y, src_v, src_stride_v, src_u,
src_stride_u, dst_y, dst_stride_y, dst_vu, dst_stride_vu,
width, height);
}
// Convert I420 to specified format
LIBYUV_API
int ConvertFromI420(const uint8_t* y,
int y_stride,
const uint8_t* u,
int u_stride,
const uint8_t* v,
int v_stride,
uint8_t* dst_sample,
int dst_sample_stride,
int width,
int height,
uint32_t fourcc) {
uint32_t format = CanonicalFourCC(fourcc);
int r = 0;
if (!y || !u || !v || !dst_sample || width <= 0 || height == 0) {
return -1;
}
switch (format) {
// Single plane formats
case FOURCC_YUY2:
r = I420ToYUY2(y, y_stride, u, u_stride, v, v_stride, dst_sample,
dst_sample_stride ? dst_sample_stride : width * 2, width,
height);
break;
case FOURCC_UYVY:
r = I420ToUYVY(y, y_stride, u, u_stride, v, v_stride, dst_sample,
dst_sample_stride ? dst_sample_stride : width * 2, width,
height);
break;
case FOURCC_RGBP:
r = I420ToRGB565(y, y_stride, u, u_stride, v, v_stride, dst_sample,
dst_sample_stride ? dst_sample_stride : width * 2, width,
height);
break;
case FOURCC_RGBO:
r = I420ToARGB1555(y, y_stride, u, u_stride, v, v_stride, dst_sample,
dst_sample_stride ? dst_sample_stride : width * 2,
width, height);
break;
case FOURCC_R444:
r = I420ToARGB4444(y, y_stride, u, u_stride, v, v_stride, dst_sample,
dst_sample_stride ? dst_sample_stride : width * 2,
width, height);
break;
case FOURCC_24BG:
r = I420ToRGB24(y, y_stride, u, u_stride, v, v_stride, dst_sample,
dst_sample_stride ? dst_sample_stride : width * 3, width,
height);
break;
case FOURCC_RAW:
r = I420ToRAW(y, y_stride, u, u_stride, v, v_stride, dst_sample,
dst_sample_stride ? dst_sample_stride : width * 3, width,
height);
break;
case FOURCC_ARGB:
r = I420ToARGB(y, y_stride, u, u_stride, v, v_stride, dst_sample,
dst_sample_stride ? dst_sample_stride : width * 4, width,
height);
break;
case FOURCC_BGRA:
r = I420ToBGRA(y, y_stride, u, u_stride, v, v_stride, dst_sample,
dst_sample_stride ? dst_sample_stride : width * 4, width,
height);
break;
case FOURCC_ABGR:
r = I420ToABGR(y, y_stride, u, u_stride, v, v_stride, dst_sample,
dst_sample_stride ? dst_sample_stride : width * 4, width,
height);
break;
case FOURCC_RGBA:
r = I420ToRGBA(y, y_stride, u, u_stride, v, v_stride, dst_sample,
dst_sample_stride ? dst_sample_stride : width * 4, width,
height);
break;
case FOURCC_AR30:
r = I420ToAR30(y, y_stride, u, u_stride, v, v_stride, dst_sample,
dst_sample_stride ? dst_sample_stride : width * 4, width,
height);
break;
case FOURCC_I400:
r = I400Copy(y, y_stride, dst_sample,
dst_sample_stride ? dst_sample_stride : width, width,
height);
break;
case FOURCC_NV12: {
uint8_t* dst_uv = dst_sample + width * height;
r = I420ToNV12(y, y_stride, u, u_stride, v, v_stride, dst_sample,
dst_sample_stride ? dst_sample_stride : width, dst_uv,
dst_sample_stride ? dst_sample_stride : width, width,
height);
break;
}
case FOURCC_NV21: {
uint8_t* dst_vu = dst_sample + width * height;
r = I420ToNV21(y, y_stride, u, u_stride, v, v_stride, dst_sample,
dst_sample_stride ? dst_sample_stride : width, dst_vu,
dst_sample_stride ? dst_sample_stride : width, width,
height);
break;
}
// Triplanar formats
case FOURCC_I420:
case FOURCC_YV12: {
dst_sample_stride = dst_sample_stride ? dst_sample_stride : width;
int halfstride = (dst_sample_stride + 1) / 2;
int halfheight = (height + 1) / 2;
uint8_t* dst_u;
uint8_t* dst_v;
if (format == FOURCC_YV12) {
dst_v = dst_sample + dst_sample_stride * height;
dst_u = dst_v + halfstride * halfheight;
} else {
dst_u = dst_sample + dst_sample_stride * height;
dst_v = dst_u + halfstride * halfheight;
}
r = I420Copy(y, y_stride, u, u_stride, v, v_stride, dst_sample,
dst_sample_stride, dst_u, halfstride, dst_v, halfstride,
width, height);
break;
}
case FOURCC_I422:
case FOURCC_YV16: {
dst_sample_stride = dst_sample_stride ? dst_sample_stride : width;
int halfstride = (dst_sample_stride + 1) / 2;
uint8_t* dst_u;
uint8_t* dst_v;
if (format == FOURCC_YV16) {
dst_v = dst_sample + dst_sample_stride * height;
dst_u = dst_v + halfstride * height;
} else {
dst_u = dst_sample + dst_sample_stride * height;
dst_v = dst_u + halfstride * height;
}
r = I420ToI422(y, y_stride, u, u_stride, v, v_stride, dst_sample,
dst_sample_stride, dst_u, halfstride, dst_v, halfstride,
width, height);
break;
}
case FOURCC_I444:
case FOURCC_YV24: {
dst_sample_stride = dst_sample_stride ? dst_sample_stride : width;
uint8_t* dst_u;
uint8_t* dst_v;
if (format == FOURCC_YV24) {
dst_v = dst_sample + dst_sample_stride * height;
dst_u = dst_v + dst_sample_stride * height;
} else {
dst_u = dst_sample + dst_sample_stride * height;
dst_v = dst_u + dst_sample_stride * height;
}
r = I420ToI444(y, y_stride, u, u_stride, v, v_stride, dst_sample,
dst_sample_stride, dst_u, dst_sample_stride, dst_v,
dst_sample_stride, width, height);
break;
}
// Formats not supported - MJPG, biplanar, some rgb formats.
default:
return -1; // unknown fourcc - return failure code.
}
return r;
}
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -1,602 +0,0 @@
/*
* Copyright 2011 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "libyuv/convert.h"
#include "libyuv/convert_argb.h"
#ifdef HAVE_JPEG
#include "libyuv/mjpeg_decoder.h"
#endif
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
#ifdef HAVE_JPEG
struct I420Buffers {
uint8_t* y;
int y_stride;
uint8_t* u;
int u_stride;
uint8_t* v;
int v_stride;
int w;
int h;
};
static void JpegCopyI420(void* opaque,
const uint8_t* const* data,
const int* strides,
int rows) {
I420Buffers* dest = (I420Buffers*)(opaque);
I420Copy(data[0], strides[0], data[1], strides[1], data[2], strides[2],
dest->y, dest->y_stride, dest->u, dest->u_stride, dest->v,
dest->v_stride, dest->w, rows);
dest->y += rows * dest->y_stride;
dest->u += ((rows + 1) >> 1) * dest->u_stride;
dest->v += ((rows + 1) >> 1) * dest->v_stride;
dest->h -= rows;
}
static void JpegI422ToI420(void* opaque,
const uint8_t* const* data,
const int* strides,
int rows) {
I420Buffers* dest = (I420Buffers*)(opaque);
I422ToI420(data[0], strides[0], data[1], strides[1], data[2], strides[2],
dest->y, dest->y_stride, dest->u, dest->u_stride, dest->v,
dest->v_stride, dest->w, rows);
dest->y += rows * dest->y_stride;
dest->u += ((rows + 1) >> 1) * dest->u_stride;
dest->v += ((rows + 1) >> 1) * dest->v_stride;
dest->h -= rows;
}
static void JpegI444ToI420(void* opaque,
const uint8_t* const* data,
const int* strides,
int rows) {
I420Buffers* dest = (I420Buffers*)(opaque);
I444ToI420(data[0], strides[0], data[1], strides[1], data[2], strides[2],
dest->y, dest->y_stride, dest->u, dest->u_stride, dest->v,
dest->v_stride, dest->w, rows);
dest->y += rows * dest->y_stride;
dest->u += ((rows + 1) >> 1) * dest->u_stride;
dest->v += ((rows + 1) >> 1) * dest->v_stride;
dest->h -= rows;
}
static void JpegI400ToI420(void* opaque,
const uint8_t* const* data,
const int* strides,
int rows) {
I420Buffers* dest = (I420Buffers*)(opaque);
I400ToI420(data[0], strides[0], dest->y, dest->y_stride, dest->u,
dest->u_stride, dest->v, dest->v_stride, dest->w, rows);
dest->y += rows * dest->y_stride;
dest->u += ((rows + 1) >> 1) * dest->u_stride;
dest->v += ((rows + 1) >> 1) * dest->v_stride;
dest->h -= rows;
}
// Query size of MJPG in pixels.
LIBYUV_API
int MJPGSize(const uint8_t* src_mjpg,
size_t src_size_mjpg,
int* width,
int* height) {
MJpegDecoder mjpeg_decoder;
LIBYUV_BOOL ret = mjpeg_decoder.LoadFrame(src_mjpg, src_size_mjpg);
if (ret) {
*width = mjpeg_decoder.GetWidth();
*height = mjpeg_decoder.GetHeight();
}
mjpeg_decoder.UnloadFrame();
return ret ? 0 : -1; // -1 for runtime failure.
}
// MJPG (Motion JPeg) to I420
// TODO(fbarchard): review src_width and src_height requirement. dst_width and
// dst_height may be enough.
LIBYUV_API
int MJPGToI420(const uint8_t* src_mjpg,
size_t src_size_mjpg,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int src_width,
int src_height,
int dst_width,
int dst_height) {
if (src_size_mjpg == kUnknownDataSize) {
// ERROR: MJPEG frame size unknown
return -1;
}
// TODO(fbarchard): Port MJpeg to C.
MJpegDecoder mjpeg_decoder;
LIBYUV_BOOL ret = mjpeg_decoder.LoadFrame(src_mjpg, src_size_mjpg);
if (ret && (mjpeg_decoder.GetWidth() != src_width ||
mjpeg_decoder.GetHeight() != src_height)) {
// ERROR: MJPEG frame has unexpected dimensions
mjpeg_decoder.UnloadFrame();
return 1; // runtime failure
}
if (ret) {
I420Buffers bufs = {dst_y, dst_stride_y, dst_u, dst_stride_u,
dst_v, dst_stride_v, dst_width, dst_height};
// YUV420
if (mjpeg_decoder.GetColorSpace() == MJpegDecoder::kColorSpaceYCbCr &&
mjpeg_decoder.GetNumComponents() == 3 &&
mjpeg_decoder.GetVertSampFactor(0) == 2 &&
mjpeg_decoder.GetHorizSampFactor(0) == 2 &&
mjpeg_decoder.GetVertSampFactor(1) == 1 &&
mjpeg_decoder.GetHorizSampFactor(1) == 1 &&
mjpeg_decoder.GetVertSampFactor(2) == 1 &&
mjpeg_decoder.GetHorizSampFactor(2) == 1) {
ret = mjpeg_decoder.DecodeToCallback(&JpegCopyI420, &bufs, dst_width,
dst_height);
// YUV422
} else if (mjpeg_decoder.GetColorSpace() ==
MJpegDecoder::kColorSpaceYCbCr &&
mjpeg_decoder.GetNumComponents() == 3 &&
mjpeg_decoder.GetVertSampFactor(0) == 1 &&
mjpeg_decoder.GetHorizSampFactor(0) == 2 &&
mjpeg_decoder.GetVertSampFactor(1) == 1 &&
mjpeg_decoder.GetHorizSampFactor(1) == 1 &&
mjpeg_decoder.GetVertSampFactor(2) == 1 &&
mjpeg_decoder.GetHorizSampFactor(2) == 1) {
ret = mjpeg_decoder.DecodeToCallback(&JpegI422ToI420, &bufs, dst_width,
dst_height);
// YUV444
} else if (mjpeg_decoder.GetColorSpace() ==
MJpegDecoder::kColorSpaceYCbCr &&
mjpeg_decoder.GetNumComponents() == 3 &&
mjpeg_decoder.GetVertSampFactor(0) == 1 &&
mjpeg_decoder.GetHorizSampFactor(0) == 1 &&
mjpeg_decoder.GetVertSampFactor(1) == 1 &&
mjpeg_decoder.GetHorizSampFactor(1) == 1 &&
mjpeg_decoder.GetVertSampFactor(2) == 1 &&
mjpeg_decoder.GetHorizSampFactor(2) == 1) {
ret = mjpeg_decoder.DecodeToCallback(&JpegI444ToI420, &bufs, dst_width,
dst_height);
// YUV400
} else if (mjpeg_decoder.GetColorSpace() ==
MJpegDecoder::kColorSpaceGrayscale &&
mjpeg_decoder.GetNumComponents() == 1 &&
mjpeg_decoder.GetVertSampFactor(0) == 1 &&
mjpeg_decoder.GetHorizSampFactor(0) == 1) {
ret = mjpeg_decoder.DecodeToCallback(&JpegI400ToI420, &bufs, dst_width,
dst_height);
} else {
// TODO(fbarchard): Implement conversion for any other
// colorspace/subsample factors that occur in practice. ERROR: Unable to
// convert MJPEG frame because format is not supported
mjpeg_decoder.UnloadFrame();
return 1;
}
}
return ret ? 0 : 1;
}
struct NV21Buffers {
uint8_t* y;
int y_stride;
uint8_t* vu;
int vu_stride;
int w;
int h;
};
static void JpegI420ToNV21(void* opaque,
const uint8_t* const* data,
const int* strides,
int rows) {
NV21Buffers* dest = (NV21Buffers*)(opaque);
I420ToNV21(data[0], strides[0], data[1], strides[1], data[2], strides[2],
dest->y, dest->y_stride, dest->vu, dest->vu_stride, dest->w, rows);
dest->y += rows * dest->y_stride;
dest->vu += ((rows + 1) >> 1) * dest->vu_stride;
dest->h -= rows;
}
static void JpegI422ToNV21(void* opaque,
const uint8_t* const* data,
const int* strides,
int rows) {
NV21Buffers* dest = (NV21Buffers*)(opaque);
I422ToNV21(data[0], strides[0], data[1], strides[1], data[2], strides[2],
dest->y, dest->y_stride, dest->vu, dest->vu_stride, dest->w, rows);
dest->y += rows * dest->y_stride;
dest->vu += ((rows + 1) >> 1) * dest->vu_stride;
dest->h -= rows;
}
static void JpegI444ToNV21(void* opaque,
const uint8_t* const* data,
const int* strides,
int rows) {
NV21Buffers* dest = (NV21Buffers*)(opaque);
I444ToNV21(data[0], strides[0], data[1], strides[1], data[2], strides[2],
dest->y, dest->y_stride, dest->vu, dest->vu_stride, dest->w, rows);
dest->y += rows * dest->y_stride;
dest->vu += ((rows + 1) >> 1) * dest->vu_stride;
dest->h -= rows;
}
static void JpegI400ToNV21(void* opaque,
const uint8_t* const* data,
const int* strides,
int rows) {
NV21Buffers* dest = (NV21Buffers*)(opaque);
I400ToNV21(data[0], strides[0], dest->y, dest->y_stride, dest->vu,
dest->vu_stride, dest->w, rows);
dest->y += rows * dest->y_stride;
dest->vu += ((rows + 1) >> 1) * dest->vu_stride;
dest->h -= rows;
}
// MJPG (Motion JPeg) to NV21
LIBYUV_API
int MJPGToNV21(const uint8_t* src_mjpg,
size_t src_size_mjpg,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_vu,
int dst_stride_vu,
int src_width,
int src_height,
int dst_width,
int dst_height) {
if (src_size_mjpg == kUnknownDataSize) {
// ERROR: MJPEG frame size unknown
return -1;
}
// TODO(fbarchard): Port MJpeg to C.
MJpegDecoder mjpeg_decoder;
LIBYUV_BOOL ret = mjpeg_decoder.LoadFrame(src_mjpg, src_size_mjpg);
if (ret && (mjpeg_decoder.GetWidth() != src_width ||
mjpeg_decoder.GetHeight() != src_height)) {
// ERROR: MJPEG frame has unexpected dimensions
mjpeg_decoder.UnloadFrame();
return 1; // runtime failure
}
if (ret) {
NV21Buffers bufs = {dst_y, dst_stride_y, dst_vu,
dst_stride_vu, dst_width, dst_height};
// YUV420
if (mjpeg_decoder.GetColorSpace() == MJpegDecoder::kColorSpaceYCbCr &&
mjpeg_decoder.GetNumComponents() == 3 &&
mjpeg_decoder.GetVertSampFactor(0) == 2 &&
mjpeg_decoder.GetHorizSampFactor(0) == 2 &&
mjpeg_decoder.GetVertSampFactor(1) == 1 &&
mjpeg_decoder.GetHorizSampFactor(1) == 1 &&
mjpeg_decoder.GetVertSampFactor(2) == 1 &&
mjpeg_decoder.GetHorizSampFactor(2) == 1) {
ret = mjpeg_decoder.DecodeToCallback(&JpegI420ToNV21, &bufs, dst_width,
dst_height);
// YUV422
} else if (mjpeg_decoder.GetColorSpace() ==
MJpegDecoder::kColorSpaceYCbCr &&
mjpeg_decoder.GetNumComponents() == 3 &&
mjpeg_decoder.GetVertSampFactor(0) == 1 &&
mjpeg_decoder.GetHorizSampFactor(0) == 2 &&
mjpeg_decoder.GetVertSampFactor(1) == 1 &&
mjpeg_decoder.GetHorizSampFactor(1) == 1 &&
mjpeg_decoder.GetVertSampFactor(2) == 1 &&
mjpeg_decoder.GetHorizSampFactor(2) == 1) {
ret = mjpeg_decoder.DecodeToCallback(&JpegI422ToNV21, &bufs, dst_width,
dst_height);
// YUV444
} else if (mjpeg_decoder.GetColorSpace() ==
MJpegDecoder::kColorSpaceYCbCr &&
mjpeg_decoder.GetNumComponents() == 3 &&
mjpeg_decoder.GetVertSampFactor(0) == 1 &&
mjpeg_decoder.GetHorizSampFactor(0) == 1 &&
mjpeg_decoder.GetVertSampFactor(1) == 1 &&
mjpeg_decoder.GetHorizSampFactor(1) == 1 &&
mjpeg_decoder.GetVertSampFactor(2) == 1 &&
mjpeg_decoder.GetHorizSampFactor(2) == 1) {
ret = mjpeg_decoder.DecodeToCallback(&JpegI444ToNV21, &bufs, dst_width,
dst_height);
// YUV400
} else if (mjpeg_decoder.GetColorSpace() ==
MJpegDecoder::kColorSpaceGrayscale &&
mjpeg_decoder.GetNumComponents() == 1 &&
mjpeg_decoder.GetVertSampFactor(0) == 1 &&
mjpeg_decoder.GetHorizSampFactor(0) == 1) {
ret = mjpeg_decoder.DecodeToCallback(&JpegI400ToNV21, &bufs, dst_width,
dst_height);
} else {
// Unknown colorspace.
mjpeg_decoder.UnloadFrame();
return 1;
}
}
return ret ? 0 : 1;
}
static void JpegI420ToNV12(void* opaque,
const uint8_t* const* data,
const int* strides,
int rows) {
NV21Buffers* dest = (NV21Buffers*)(opaque);
// Use NV21 with VU swapped.
I420ToNV21(data[0], strides[0], data[2], strides[2], data[1], strides[1],
dest->y, dest->y_stride, dest->vu, dest->vu_stride, dest->w, rows);
dest->y += rows * dest->y_stride;
dest->vu += ((rows + 1) >> 1) * dest->vu_stride;
dest->h -= rows;
}
static void JpegI422ToNV12(void* opaque,
const uint8_t* const* data,
const int* strides,
int rows) {
NV21Buffers* dest = (NV21Buffers*)(opaque);
// Use NV21 with VU swapped.
I422ToNV21(data[0], strides[0], data[2], strides[2], data[1], strides[1],
dest->y, dest->y_stride, dest->vu, dest->vu_stride, dest->w, rows);
dest->y += rows * dest->y_stride;
dest->vu += ((rows + 1) >> 1) * dest->vu_stride;
dest->h -= rows;
}
static void JpegI444ToNV12(void* opaque,
const uint8_t* const* data,
const int* strides,
int rows) {
NV21Buffers* dest = (NV21Buffers*)(opaque);
// Use NV21 with VU swapped.
I444ToNV21(data[0], strides[0], data[2], strides[2], data[1], strides[1],
dest->y, dest->y_stride, dest->vu, dest->vu_stride, dest->w, rows);
dest->y += rows * dest->y_stride;
dest->vu += ((rows + 1) >> 1) * dest->vu_stride;
dest->h -= rows;
}
static void JpegI400ToNV12(void* opaque,
const uint8_t* const* data,
const int* strides,
int rows) {
NV21Buffers* dest = (NV21Buffers*)(opaque);
// Use NV21 since there is no UV plane.
I400ToNV21(data[0], strides[0], dest->y, dest->y_stride, dest->vu,
dest->vu_stride, dest->w, rows);
dest->y += rows * dest->y_stride;
dest->vu += ((rows + 1) >> 1) * dest->vu_stride;
dest->h -= rows;
}
// MJPG (Motion JPEG) to NV12.
LIBYUV_API
int MJPGToNV12(const uint8_t* sample,
size_t sample_size,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_uv,
int dst_stride_uv,
int src_width,
int src_height,
int dst_width,
int dst_height) {
if (sample_size == kUnknownDataSize) {
// ERROR: MJPEG frame size unknown
return -1;
}
// TODO(fbarchard): Port MJpeg to C.
MJpegDecoder mjpeg_decoder;
LIBYUV_BOOL ret = mjpeg_decoder.LoadFrame(sample, sample_size);
if (ret && (mjpeg_decoder.GetWidth() != src_width ||
mjpeg_decoder.GetHeight() != src_height)) {
// ERROR: MJPEG frame has unexpected dimensions
mjpeg_decoder.UnloadFrame();
return 1; // runtime failure
}
if (ret) {
// Use NV21Buffers but with UV instead of VU.
NV21Buffers bufs = {dst_y, dst_stride_y, dst_uv,
dst_stride_uv, dst_width, dst_height};
// YUV420
if (mjpeg_decoder.GetColorSpace() == MJpegDecoder::kColorSpaceYCbCr &&
mjpeg_decoder.GetNumComponents() == 3 &&
mjpeg_decoder.GetVertSampFactor(0) == 2 &&
mjpeg_decoder.GetHorizSampFactor(0) == 2 &&
mjpeg_decoder.GetVertSampFactor(1) == 1 &&
mjpeg_decoder.GetHorizSampFactor(1) == 1 &&
mjpeg_decoder.GetVertSampFactor(2) == 1 &&
mjpeg_decoder.GetHorizSampFactor(2) == 1) {
ret = mjpeg_decoder.DecodeToCallback(&JpegI420ToNV12, &bufs, dst_width,
dst_height);
// YUV422
} else if (mjpeg_decoder.GetColorSpace() ==
MJpegDecoder::kColorSpaceYCbCr &&
mjpeg_decoder.GetNumComponents() == 3 &&
mjpeg_decoder.GetVertSampFactor(0) == 1 &&
mjpeg_decoder.GetHorizSampFactor(0) == 2 &&
mjpeg_decoder.GetVertSampFactor(1) == 1 &&
mjpeg_decoder.GetHorizSampFactor(1) == 1 &&
mjpeg_decoder.GetVertSampFactor(2) == 1 &&
mjpeg_decoder.GetHorizSampFactor(2) == 1) {
ret = mjpeg_decoder.DecodeToCallback(&JpegI422ToNV12, &bufs, dst_width,
dst_height);
// YUV444
} else if (mjpeg_decoder.GetColorSpace() ==
MJpegDecoder::kColorSpaceYCbCr &&
mjpeg_decoder.GetNumComponents() == 3 &&
mjpeg_decoder.GetVertSampFactor(0) == 1 &&
mjpeg_decoder.GetHorizSampFactor(0) == 1 &&
mjpeg_decoder.GetVertSampFactor(1) == 1 &&
mjpeg_decoder.GetHorizSampFactor(1) == 1 &&
mjpeg_decoder.GetVertSampFactor(2) == 1 &&
mjpeg_decoder.GetHorizSampFactor(2) == 1) {
ret = mjpeg_decoder.DecodeToCallback(&JpegI444ToNV12, &bufs, dst_width,
dst_height);
// YUV400
} else if (mjpeg_decoder.GetColorSpace() ==
MJpegDecoder::kColorSpaceGrayscale &&
mjpeg_decoder.GetNumComponents() == 1 &&
mjpeg_decoder.GetVertSampFactor(0) == 1 &&
mjpeg_decoder.GetHorizSampFactor(0) == 1) {
ret = mjpeg_decoder.DecodeToCallback(&JpegI400ToNV12, &bufs, dst_width,
dst_height);
} else {
// Unknown colorspace.
mjpeg_decoder.UnloadFrame();
return 1;
}
}
return ret ? 0 : 1;
}
struct ARGBBuffers {
uint8_t* argb;
int argb_stride;
int w;
int h;
};
static void JpegI420ToARGB(void* opaque,
const uint8_t* const* data,
const int* strides,
int rows) {
ARGBBuffers* dest = (ARGBBuffers*)(opaque);
I420ToARGB(data[0], strides[0], data[1], strides[1], data[2], strides[2],
dest->argb, dest->argb_stride, dest->w, rows);
dest->argb += rows * dest->argb_stride;
dest->h -= rows;
}
static void JpegI422ToARGB(void* opaque,
const uint8_t* const* data,
const int* strides,
int rows) {
ARGBBuffers* dest = (ARGBBuffers*)(opaque);
I422ToARGB(data[0], strides[0], data[1], strides[1], data[2], strides[2],
dest->argb, dest->argb_stride, dest->w, rows);
dest->argb += rows * dest->argb_stride;
dest->h -= rows;
}
static void JpegI444ToARGB(void* opaque,
const uint8_t* const* data,
const int* strides,
int rows) {
ARGBBuffers* dest = (ARGBBuffers*)(opaque);
I444ToARGB(data[0], strides[0], data[1], strides[1], data[2], strides[2],
dest->argb, dest->argb_stride, dest->w, rows);
dest->argb += rows * dest->argb_stride;
dest->h -= rows;
}
static void JpegI400ToARGB(void* opaque,
const uint8_t* const* data,
const int* strides,
int rows) {
ARGBBuffers* dest = (ARGBBuffers*)(opaque);
I400ToARGB(data[0], strides[0], dest->argb, dest->argb_stride, dest->w, rows);
dest->argb += rows * dest->argb_stride;
dest->h -= rows;
}
// MJPG (Motion JPeg) to ARGB
// TODO(fbarchard): review src_width and src_height requirement. dst_width and
// dst_height may be enough.
LIBYUV_API
int MJPGToARGB(const uint8_t* src_mjpg,
size_t src_size_mjpg,
uint8_t* dst_argb,
int dst_stride_argb,
int src_width,
int src_height,
int dst_width,
int dst_height) {
if (src_size_mjpg == kUnknownDataSize) {
// ERROR: MJPEG frame size unknown
return -1;
}
// TODO(fbarchard): Port MJpeg to C.
MJpegDecoder mjpeg_decoder;
LIBYUV_BOOL ret = mjpeg_decoder.LoadFrame(src_mjpg, src_size_mjpg);
if (ret && (mjpeg_decoder.GetWidth() != src_width ||
mjpeg_decoder.GetHeight() != src_height)) {
// ERROR: MJPEG frame has unexpected dimensions
mjpeg_decoder.UnloadFrame();
return 1; // runtime failure
}
if (ret) {
ARGBBuffers bufs = {dst_argb, dst_stride_argb, dst_width, dst_height};
// YUV420
if (mjpeg_decoder.GetColorSpace() == MJpegDecoder::kColorSpaceYCbCr &&
mjpeg_decoder.GetNumComponents() == 3 &&
mjpeg_decoder.GetVertSampFactor(0) == 2 &&
mjpeg_decoder.GetHorizSampFactor(0) == 2 &&
mjpeg_decoder.GetVertSampFactor(1) == 1 &&
mjpeg_decoder.GetHorizSampFactor(1) == 1 &&
mjpeg_decoder.GetVertSampFactor(2) == 1 &&
mjpeg_decoder.GetHorizSampFactor(2) == 1) {
ret = mjpeg_decoder.DecodeToCallback(&JpegI420ToARGB, &bufs, dst_width,
dst_height);
// YUV422
} else if (mjpeg_decoder.GetColorSpace() ==
MJpegDecoder::kColorSpaceYCbCr &&
mjpeg_decoder.GetNumComponents() == 3 &&
mjpeg_decoder.GetVertSampFactor(0) == 1 &&
mjpeg_decoder.GetHorizSampFactor(0) == 2 &&
mjpeg_decoder.GetVertSampFactor(1) == 1 &&
mjpeg_decoder.GetHorizSampFactor(1) == 1 &&
mjpeg_decoder.GetVertSampFactor(2) == 1 &&
mjpeg_decoder.GetHorizSampFactor(2) == 1) {
ret = mjpeg_decoder.DecodeToCallback(&JpegI422ToARGB, &bufs, dst_width,
dst_height);
// YUV444
} else if (mjpeg_decoder.GetColorSpace() ==
MJpegDecoder::kColorSpaceYCbCr &&
mjpeg_decoder.GetNumComponents() == 3 &&
mjpeg_decoder.GetVertSampFactor(0) == 1 &&
mjpeg_decoder.GetHorizSampFactor(0) == 1 &&
mjpeg_decoder.GetVertSampFactor(1) == 1 &&
mjpeg_decoder.GetHorizSampFactor(1) == 1 &&
mjpeg_decoder.GetVertSampFactor(2) == 1 &&
mjpeg_decoder.GetHorizSampFactor(2) == 1) {
ret = mjpeg_decoder.DecodeToCallback(&JpegI444ToARGB, &bufs, dst_width,
dst_height);
// YUV400
} else if (mjpeg_decoder.GetColorSpace() ==
MJpegDecoder::kColorSpaceGrayscale &&
mjpeg_decoder.GetNumComponents() == 1 &&
mjpeg_decoder.GetVertSampFactor(0) == 1 &&
mjpeg_decoder.GetHorizSampFactor(0) == 1) {
ret = mjpeg_decoder.DecodeToCallback(&JpegI400ToARGB, &bufs, dst_width,
dst_height);
} else {
// TODO(fbarchard): Implement conversion for any other
// colorspace/subsample factors that occur in practice. ERROR: Unable to
// convert MJPEG frame because format is not supported
mjpeg_decoder.UnloadFrame();
return 1;
}
}
return ret ? 0 : 1;
}
#endif // HAVE_JPEG
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif

View File

@@ -1,382 +0,0 @@
/*
* Copyright 2011 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "libyuv/convert_argb.h"
#include "libyuv/cpu_id.h"
#ifdef HAVE_JPEG
#include "libyuv/mjpeg_decoder.h"
#endif
#include "libyuv/rotate_argb.h"
#include "libyuv/row.h"
#include "libyuv/video_common.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
// Convert camera sample to ARGB with cropping, rotation and vertical flip.
// src_width is used for source stride computation
// src_height is used to compute location of planes, and indicate inversion
// sample_size is measured in bytes and is the size of the frame.
// With MJPEG it is the compressed size of the frame.
// TODO(fbarchard): Add the following:
// H010ToARGB
// I010ToARGB
LIBYUV_API
int ConvertToARGB(const uint8_t* sample,
size_t sample_size,
uint8_t* dst_argb,
int dst_stride_argb,
int crop_x,
int crop_y,
int src_width,
int src_height,
int crop_width,
int crop_height,
enum RotationMode rotation,
uint32_t fourcc) {
uint32_t format = CanonicalFourCC(fourcc);
int aligned_src_width = (src_width + 1) & ~1;
const uint8_t* src;
const uint8_t* src_uv;
int abs_src_height = (src_height < 0) ? -src_height : src_height;
int inv_crop_height = (crop_height < 0) ? -crop_height : crop_height;
int r = 0;
// One pass rotation is available for some formats. For the rest, convert
// to ARGB (with optional vertical flipping) into a temporary ARGB buffer,
// and then rotate the ARGB to the final destination buffer.
// For in-place conversion, if destination dst_argb is same as source sample,
// also enable temporary buffer.
LIBYUV_BOOL need_buf =
(rotation && format != FOURCC_ARGB) || dst_argb == sample;
uint8_t* dest_argb = dst_argb;
int dest_dst_stride_argb = dst_stride_argb;
uint8_t* rotate_buffer = NULL;
int abs_crop_height = (crop_height < 0) ? -crop_height : crop_height;
if (dst_argb == NULL || sample == NULL || src_width <= 0 || crop_width <= 0 ||
src_height == 0 || crop_height == 0) {
return -1;
}
if (src_height < 0) {
inv_crop_height = -inv_crop_height;
}
if (need_buf) {
int argb_size = crop_width * 4 * abs_crop_height;
rotate_buffer = (uint8_t*)malloc(argb_size); /* NOLINT */
if (!rotate_buffer) {
return 1; // Out of memory runtime error.
}
dst_argb = rotate_buffer;
dst_stride_argb = crop_width * 4;
}
switch (format) {
// Single plane formats
case FOURCC_YUY2:
src = sample + (aligned_src_width * crop_y + crop_x) * 2;
r = YUY2ToARGB(src, aligned_src_width * 2, dst_argb, dst_stride_argb,
crop_width, inv_crop_height);
break;
case FOURCC_UYVY:
src = sample + (aligned_src_width * crop_y + crop_x) * 2;
r = UYVYToARGB(src, aligned_src_width * 2, dst_argb, dst_stride_argb,
crop_width, inv_crop_height);
break;
case FOURCC_24BG:
src = sample + (src_width * crop_y + crop_x) * 3;
r = RGB24ToARGB(src, src_width * 3, dst_argb, dst_stride_argb, crop_width,
inv_crop_height);
break;
case FOURCC_RAW:
src = sample + (src_width * crop_y + crop_x) * 3;
r = RAWToARGB(src, src_width * 3, dst_argb, dst_stride_argb, crop_width,
inv_crop_height);
break;
case FOURCC_ARGB:
if (!need_buf && !rotation) {
src = sample + (src_width * crop_y + crop_x) * 4;
r = ARGBToARGB(src, src_width * 4, dst_argb, dst_stride_argb,
crop_width, inv_crop_height);
}
break;
case FOURCC_BGRA:
src = sample + (src_width * crop_y + crop_x) * 4;
r = BGRAToARGB(src, src_width * 4, dst_argb, dst_stride_argb, crop_width,
inv_crop_height);
break;
case FOURCC_ABGR:
src = sample + (src_width * crop_y + crop_x) * 4;
r = ABGRToARGB(src, src_width * 4, dst_argb, dst_stride_argb, crop_width,
inv_crop_height);
break;
case FOURCC_RGBA:
src = sample + (src_width * crop_y + crop_x) * 4;
r = RGBAToARGB(src, src_width * 4, dst_argb, dst_stride_argb, crop_width,
inv_crop_height);
break;
case FOURCC_AR30:
src = sample + (src_width * crop_y + crop_x) * 4;
r = AR30ToARGB(src, src_width * 4, dst_argb, dst_stride_argb, crop_width,
inv_crop_height);
break;
case FOURCC_AB30:
src = sample + (src_width * crop_y + crop_x) * 4;
r = AB30ToARGB(src, src_width * 4, dst_argb, dst_stride_argb, crop_width,
inv_crop_height);
break;
case FOURCC_RGBP:
src = sample + (src_width * crop_y + crop_x) * 2;
r = RGB565ToARGB(src, src_width * 2, dst_argb, dst_stride_argb,
crop_width, inv_crop_height);
break;
case FOURCC_RGBO:
src = sample + (src_width * crop_y + crop_x) * 2;
r = ARGB1555ToARGB(src, src_width * 2, dst_argb, dst_stride_argb,
crop_width, inv_crop_height);
break;
case FOURCC_R444:
src = sample + (src_width * crop_y + crop_x) * 2;
r = ARGB4444ToARGB(src, src_width * 2, dst_argb, dst_stride_argb,
crop_width, inv_crop_height);
break;
case FOURCC_I400:
src = sample + src_width * crop_y + crop_x;
r = I400ToARGB(src, src_width, dst_argb, dst_stride_argb, crop_width,
inv_crop_height);
break;
case FOURCC_J400:
src = sample + src_width * crop_y + crop_x;
r = J400ToARGB(src, src_width, dst_argb, dst_stride_argb, crop_width,
inv_crop_height);
break;
// Biplanar formats
case FOURCC_NV12:
src = sample + (src_width * crop_y + crop_x);
src_uv =
sample + aligned_src_width * (abs_src_height + crop_y / 2) + crop_x;
r = NV12ToARGB(src, src_width, src_uv, aligned_src_width, dst_argb,
dst_stride_argb, crop_width, inv_crop_height);
break;
case FOURCC_NV21:
src = sample + (src_width * crop_y + crop_x);
src_uv =
sample + aligned_src_width * (abs_src_height + crop_y / 2) + crop_x;
// Call NV12 but with u and v parameters swapped.
r = NV21ToARGB(src, src_width, src_uv, aligned_src_width, dst_argb,
dst_stride_argb, crop_width, inv_crop_height);
break;
// Triplanar formats
case FOURCC_I420:
case FOURCC_YV12: {
const uint8_t* src_y = sample + (src_width * crop_y + crop_x);
const uint8_t* src_u;
const uint8_t* src_v;
int halfwidth = (src_width + 1) / 2;
int halfheight = (abs_src_height + 1) / 2;
if (format == FOURCC_YV12) {
src_v = sample + src_width * abs_src_height +
(halfwidth * crop_y + crop_x) / 2;
src_u = sample + src_width * abs_src_height +
halfwidth * (halfheight + crop_y / 2) + crop_x / 2;
} else {
src_u = sample + src_width * abs_src_height +
(halfwidth * crop_y + crop_x) / 2;
src_v = sample + src_width * abs_src_height +
halfwidth * (halfheight + crop_y / 2) + crop_x / 2;
}
r = I420ToARGB(src_y, src_width, src_u, halfwidth, src_v, halfwidth,
dst_argb, dst_stride_argb, crop_width, inv_crop_height);
break;
}
case FOURCC_J420: {
int halfwidth = (src_width + 1) / 2;
int halfheight = (abs_src_height + 1) / 2;
const uint8_t* src_y = sample + (src_width * crop_y + crop_x);
const uint8_t* src_u = sample + src_width * abs_src_height +
(halfwidth * crop_y + crop_x) / 2;
const uint8_t* src_v = sample + src_width * abs_src_height +
halfwidth * (halfheight + crop_y / 2) + crop_x / 2;
r = J420ToARGB(src_y, src_width, src_u, halfwidth, src_v, halfwidth,
dst_argb, dst_stride_argb, crop_width, inv_crop_height);
break;
}
case FOURCC_H420: {
int halfwidth = (src_width + 1) / 2;
int halfheight = (abs_src_height + 1) / 2;
const uint8_t* src_y = sample + (src_width * crop_y + crop_x);
const uint8_t* src_u = sample + src_width * abs_src_height +
(halfwidth * crop_y + crop_x) / 2;
const uint8_t* src_v = sample + src_width * abs_src_height +
halfwidth * (halfheight + crop_y / 2) + crop_x / 2;
r = H420ToARGB(src_y, src_width, src_u, halfwidth, src_v, halfwidth,
dst_argb, dst_stride_argb, crop_width, inv_crop_height);
break;
}
case FOURCC_U420: {
int halfwidth = (src_width + 1) / 2;
int halfheight = (abs_src_height + 1) / 2;
const uint8_t* src_y = sample + (src_width * crop_y + crop_x);
const uint8_t* src_u = sample + src_width * abs_src_height +
(halfwidth * crop_y + crop_x) / 2;
const uint8_t* src_v = sample + src_width * abs_src_height +
halfwidth * (halfheight + crop_y / 2) + crop_x / 2;
r = U420ToARGB(src_y, src_width, src_u, halfwidth, src_v, halfwidth,
dst_argb, dst_stride_argb, crop_width, inv_crop_height);
break;
}
case FOURCC_I422:
case FOURCC_YV16: {
int halfwidth = (src_width + 1) / 2;
const uint8_t* src_y = sample + src_width * crop_y + crop_x;
const uint8_t* src_u;
const uint8_t* src_v;
if (format == FOURCC_YV16) {
src_v = sample + src_width * abs_src_height + halfwidth * crop_y +
crop_x / 2;
src_u = sample + src_width * abs_src_height +
halfwidth * (abs_src_height + crop_y) + crop_x / 2;
} else {
src_u = sample + src_width * abs_src_height + halfwidth * crop_y +
crop_x / 2;
src_v = sample + src_width * abs_src_height +
halfwidth * (abs_src_height + crop_y) + crop_x / 2;
}
r = I422ToARGB(src_y, src_width, src_u, halfwidth, src_v, halfwidth,
dst_argb, dst_stride_argb, crop_width, inv_crop_height);
break;
}
case FOURCC_J422: {
int halfwidth = (src_width + 1) / 2;
const uint8_t* src_y = sample + src_width * crop_y + crop_x;
const uint8_t* src_u =
sample + src_width * abs_src_height + halfwidth * crop_y + crop_x / 2;
const uint8_t* src_v = sample + src_width * abs_src_height +
halfwidth * (abs_src_height + crop_y) + crop_x / 2;
r = J422ToARGB(src_y, src_width, src_u, halfwidth, src_v, halfwidth,
dst_argb, dst_stride_argb, crop_width, inv_crop_height);
break;
}
case FOURCC_H422: {
int halfwidth = (src_width + 1) / 2;
const uint8_t* src_y = sample + src_width * crop_y + crop_x;
const uint8_t* src_u =
sample + src_width * abs_src_height + halfwidth * crop_y + crop_x / 2;
const uint8_t* src_v = sample + src_width * abs_src_height +
halfwidth * (abs_src_height + crop_y) + crop_x / 2;
r = H422ToARGB(src_y, src_width, src_u, halfwidth, src_v, halfwidth,
dst_argb, dst_stride_argb, crop_width, inv_crop_height);
break;
}
case FOURCC_U422: {
int halfwidth = (src_width + 1) / 2;
const uint8_t* src_y = sample + src_width * crop_y + crop_x;
const uint8_t* src_u =
sample + src_width * abs_src_height + halfwidth * crop_y + crop_x / 2;
const uint8_t* src_v = sample + src_width * abs_src_height +
halfwidth * (abs_src_height + crop_y) + crop_x / 2;
r = H422ToARGB(src_y, src_width, src_u, halfwidth, src_v, halfwidth,
dst_argb, dst_stride_argb, crop_width, inv_crop_height);
break;
}
case FOURCC_I444:
case FOURCC_YV24: {
const uint8_t* src_y = sample + src_width * crop_y + crop_x;
const uint8_t* src_u;
const uint8_t* src_v;
if (format == FOURCC_YV24) {
src_v = sample + src_width * (abs_src_height + crop_y) + crop_x;
src_u = sample + src_width * (abs_src_height * 2 + crop_y) + crop_x;
} else {
src_u = sample + src_width * (abs_src_height + crop_y) + crop_x;
src_v = sample + src_width * (abs_src_height * 2 + crop_y) + crop_x;
}
r = I444ToARGB(src_y, src_width, src_u, src_width, src_v, src_width,
dst_argb, dst_stride_argb, crop_width, inv_crop_height);
break;
}
case FOURCC_J444: {
const uint8_t* src_y = sample + src_width * crop_y + crop_x;
const uint8_t* src_u;
const uint8_t* src_v;
src_u = sample + src_width * (abs_src_height + crop_y) + crop_x;
src_v = sample + src_width * (abs_src_height * 2 + crop_y) + crop_x;
r = J444ToARGB(src_y, src_width, src_u, src_width, src_v, src_width,
dst_argb, dst_stride_argb, crop_width, inv_crop_height);
break;
}
case FOURCC_H444: {
const uint8_t* src_y = sample + src_width * crop_y + crop_x;
const uint8_t* src_u;
const uint8_t* src_v;
src_u = sample + src_width * (abs_src_height + crop_y) + crop_x;
src_v = sample + src_width * (abs_src_height * 2 + crop_y) + crop_x;
r = H444ToARGB(src_y, src_width, src_u, src_width, src_v, src_width,
dst_argb, dst_stride_argb, crop_width, inv_crop_height);
break;
}
case FOURCC_U444: {
const uint8_t* src_y = sample + src_width * crop_y + crop_x;
const uint8_t* src_u;
const uint8_t* src_v;
src_u = sample + src_width * (abs_src_height + crop_y) + crop_x;
src_v = sample + src_width * (abs_src_height * 2 + crop_y) + crop_x;
r = U444ToARGB(src_y, src_width, src_u, src_width, src_v, src_width,
dst_argb, dst_stride_argb, crop_width, inv_crop_height);
break;
}
#ifdef HAVE_JPEG
case FOURCC_MJPG:
r = MJPGToARGB(sample, sample_size, dst_argb, dst_stride_argb, src_width,
abs_src_height, crop_width, inv_crop_height);
break;
#endif
default:
r = -1; // unknown fourcc - return failure code.
}
if (need_buf) {
if (!r) {
r = ARGBRotate(dst_argb, dst_stride_argb, dest_argb, dest_dst_stride_argb,
crop_width, abs_crop_height, rotation);
}
free(rotate_buffer);
} else if (rotation) {
src = sample + (src_width * crop_y + crop_x) * 4;
r = ARGBRotate(src, src_width * 4, dst_argb, dst_stride_argb, crop_width,
inv_crop_height, rotation);
}
return r;
}
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif

View File

@@ -1,272 +0,0 @@
/*
* Copyright 2011 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <stdlib.h>
#include "libyuv/convert.h"
#include "libyuv/video_common.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
// Convert camera sample to I420 with cropping, rotation and vertical flip.
// src_width is used for source stride computation
// src_height is used to compute location of planes, and indicate inversion
// sample_size is measured in bytes and is the size of the frame.
// With MJPEG it is the compressed size of the frame.
LIBYUV_API
int ConvertToI420(const uint8_t* sample,
size_t sample_size,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int crop_x,
int crop_y,
int src_width,
int src_height,
int crop_width,
int crop_height,
enum RotationMode rotation,
uint32_t fourcc) {
uint32_t format = CanonicalFourCC(fourcc);
int aligned_src_width = (src_width + 1) & ~1;
const uint8_t* src;
const uint8_t* src_uv;
const int abs_src_height = (src_height < 0) ? -src_height : src_height;
// TODO(nisse): Why allow crop_height < 0?
const int abs_crop_height = (crop_height < 0) ? -crop_height : crop_height;
int r = 0;
LIBYUV_BOOL need_buf =
(rotation && format != FOURCC_I420 && format != FOURCC_NV12 &&
format != FOURCC_NV21 && format != FOURCC_YV12) ||
dst_y == sample;
uint8_t* tmp_y = dst_y;
uint8_t* tmp_u = dst_u;
uint8_t* tmp_v = dst_v;
int tmp_y_stride = dst_stride_y;
int tmp_u_stride = dst_stride_u;
int tmp_v_stride = dst_stride_v;
uint8_t* rotate_buffer = NULL;
const int inv_crop_height =
(src_height < 0) ? -abs_crop_height : abs_crop_height;
if (!dst_y || !dst_u || !dst_v || !sample || src_width <= 0 ||
crop_width <= 0 || src_height == 0 || crop_height == 0) {
return -1;
}
// One pass rotation is available for some formats. For the rest, convert
// to I420 (with optional vertical flipping) into a temporary I420 buffer,
// and then rotate the I420 to the final destination buffer.
// For in-place conversion, if destination dst_y is same as source sample,
// also enable temporary buffer.
if (need_buf) {
int y_size = crop_width * abs_crop_height;
int uv_size = ((crop_width + 1) / 2) * ((abs_crop_height + 1) / 2);
rotate_buffer = (uint8_t*)malloc(y_size + uv_size * 2); /* NOLINT */
if (!rotate_buffer) {
return 1; // Out of memory runtime error.
}
dst_y = rotate_buffer;
dst_u = dst_y + y_size;
dst_v = dst_u + uv_size;
dst_stride_y = crop_width;
dst_stride_u = dst_stride_v = ((crop_width + 1) / 2);
}
switch (format) {
// Single plane formats
case FOURCC_YUY2:
src = sample + (aligned_src_width * crop_y + crop_x) * 2;
r = YUY2ToI420(src, aligned_src_width * 2, dst_y, dst_stride_y, dst_u,
dst_stride_u, dst_v, dst_stride_v, crop_width,
inv_crop_height);
break;
case FOURCC_UYVY:
src = sample + (aligned_src_width * crop_y + crop_x) * 2;
r = UYVYToI420(src, aligned_src_width * 2, dst_y, dst_stride_y, dst_u,
dst_stride_u, dst_v, dst_stride_v, crop_width,
inv_crop_height);
break;
case FOURCC_RGBP:
src = sample + (src_width * crop_y + crop_x) * 2;
r = RGB565ToI420(src, src_width * 2, dst_y, dst_stride_y, dst_u,
dst_stride_u, dst_v, dst_stride_v, crop_width,
inv_crop_height);
break;
case FOURCC_RGBO:
src = sample + (src_width * crop_y + crop_x) * 2;
r = ARGB1555ToI420(src, src_width * 2, dst_y, dst_stride_y, dst_u,
dst_stride_u, dst_v, dst_stride_v, crop_width,
inv_crop_height);
break;
case FOURCC_R444:
src = sample + (src_width * crop_y + crop_x) * 2;
r = ARGB4444ToI420(src, src_width * 2, dst_y, dst_stride_y, dst_u,
dst_stride_u, dst_v, dst_stride_v, crop_width,
inv_crop_height);
break;
case FOURCC_24BG:
src = sample + (src_width * crop_y + crop_x) * 3;
r = RGB24ToI420(src, src_width * 3, dst_y, dst_stride_y, dst_u,
dst_stride_u, dst_v, dst_stride_v, crop_width,
inv_crop_height);
break;
case FOURCC_RAW:
src = sample + (src_width * crop_y + crop_x) * 3;
r = RAWToI420(src, src_width * 3, dst_y, dst_stride_y, dst_u,
dst_stride_u, dst_v, dst_stride_v, crop_width,
inv_crop_height);
break;
case FOURCC_ARGB:
src = sample + (src_width * crop_y + crop_x) * 4;
r = ARGBToI420(src, src_width * 4, dst_y, dst_stride_y, dst_u,
dst_stride_u, dst_v, dst_stride_v, crop_width,
inv_crop_height);
break;
case FOURCC_BGRA:
src = sample + (src_width * crop_y + crop_x) * 4;
r = BGRAToI420(src, src_width * 4, dst_y, dst_stride_y, dst_u,
dst_stride_u, dst_v, dst_stride_v, crop_width,
inv_crop_height);
break;
case FOURCC_ABGR:
src = sample + (src_width * crop_y + crop_x) * 4;
r = ABGRToI420(src, src_width * 4, dst_y, dst_stride_y, dst_u,
dst_stride_u, dst_v, dst_stride_v, crop_width,
inv_crop_height);
break;
case FOURCC_RGBA:
src = sample + (src_width * crop_y + crop_x) * 4;
r = RGBAToI420(src, src_width * 4, dst_y, dst_stride_y, dst_u,
dst_stride_u, dst_v, dst_stride_v, crop_width,
inv_crop_height);
break;
// TODO(fbarchard): Add AR30 and AB30
case FOURCC_I400:
src = sample + src_width * crop_y + crop_x;
r = I400ToI420(src, src_width, dst_y, dst_stride_y, dst_u, dst_stride_u,
dst_v, dst_stride_v, crop_width, inv_crop_height);
break;
// Biplanar formats
case FOURCC_NV12:
src = sample + (src_width * crop_y + crop_x);
src_uv = sample + (src_width * abs_src_height) +
((crop_y / 2) * aligned_src_width) + ((crop_x / 2) * 2);
r = NV12ToI420Rotate(src, src_width, src_uv, aligned_src_width, dst_y,
dst_stride_y, dst_u, dst_stride_u, dst_v,
dst_stride_v, crop_width, inv_crop_height, rotation);
break;
case FOURCC_NV21:
src = sample + (src_width * crop_y + crop_x);
src_uv = sample + (src_width * abs_src_height) +
((crop_y / 2) * aligned_src_width) + ((crop_x / 2) * 2);
// Call NV12 but with dst_u and dst_v parameters swapped.
r = NV12ToI420Rotate(src, src_width, src_uv, aligned_src_width, dst_y,
dst_stride_y, dst_v, dst_stride_v, dst_u,
dst_stride_u, crop_width, inv_crop_height, rotation);
break;
// Triplanar formats
case FOURCC_I420:
case FOURCC_YV12: {
const uint8_t* src_y = sample + (src_width * crop_y + crop_x);
const uint8_t* src_u;
const uint8_t* src_v;
int halfwidth = (src_width + 1) / 2;
int halfheight = (abs_src_height + 1) / 2;
if (format == FOURCC_YV12) {
src_v = sample + src_width * abs_src_height + halfwidth * (crop_y / 2) +
(crop_x / 2);
src_u = sample + src_width * abs_src_height +
halfwidth * (halfheight + (crop_y / 2)) + (crop_x / 2);
} else {
src_u = sample + src_width * abs_src_height + halfwidth * (crop_y / 2) +
(crop_x / 2);
src_v = sample + src_width * abs_src_height +
halfwidth * (halfheight + (crop_y / 2)) + (crop_x / 2);
}
r = I420Rotate(src_y, src_width, src_u, halfwidth, src_v, halfwidth,
dst_y, dst_stride_y, dst_u, dst_stride_u, dst_v,
dst_stride_v, crop_width, inv_crop_height, rotation);
break;
}
case FOURCC_I422:
case FOURCC_YV16: {
const uint8_t* src_y = sample + src_width * crop_y + crop_x;
const uint8_t* src_u;
const uint8_t* src_v;
int halfwidth = (src_width + 1) / 2;
if (format == FOURCC_YV16) {
src_v = sample + src_width * abs_src_height + halfwidth * crop_y +
(crop_x / 2);
src_u = sample + src_width * abs_src_height +
halfwidth * (abs_src_height + crop_y) + (crop_x / 2);
} else {
src_u = sample + src_width * abs_src_height + halfwidth * crop_y +
(crop_x / 2);
src_v = sample + src_width * abs_src_height +
halfwidth * (abs_src_height + crop_y) + (crop_x / 2);
}
r = I422ToI420(src_y, src_width, src_u, halfwidth, src_v, halfwidth,
dst_y, dst_stride_y, dst_u, dst_stride_u, dst_v,
dst_stride_v, crop_width, inv_crop_height);
break;
}
case FOURCC_I444:
case FOURCC_YV24: {
const uint8_t* src_y = sample + src_width * crop_y + crop_x;
const uint8_t* src_u;
const uint8_t* src_v;
if (format == FOURCC_YV24) {
src_v = sample + src_width * (abs_src_height + crop_y) + crop_x;
src_u = sample + src_width * (abs_src_height * 2 + crop_y) + crop_x;
} else {
src_u = sample + src_width * (abs_src_height + crop_y) + crop_x;
src_v = sample + src_width * (abs_src_height * 2 + crop_y) + crop_x;
}
r = I444ToI420(src_y, src_width, src_u, src_width, src_v, src_width,
dst_y, dst_stride_y, dst_u, dst_stride_u, dst_v,
dst_stride_v, crop_width, inv_crop_height);
break;
}
#ifdef HAVE_JPEG
case FOURCC_MJPG:
r = MJPGToI420(sample, sample_size, dst_y, dst_stride_y, dst_u,
dst_stride_u, dst_v, dst_stride_v, src_width,
abs_src_height, crop_width, inv_crop_height);
break;
#endif
default:
r = -1; // unknown fourcc - return failure code.
}
if (need_buf) {
if (!r) {
r = I420Rotate(dst_y, dst_stride_y, dst_u, dst_stride_u, dst_v,
dst_stride_v, tmp_y, tmp_y_stride, tmp_u, tmp_u_stride,
tmp_v, tmp_v_stride, crop_width, abs_crop_height,
rotation);
}
free(rotate_buffer);
}
return r;
}
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif

View File

@@ -1,280 +0,0 @@
/*
* Copyright 2011 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "libyuv/cpu_id.h"
#if defined(_MSC_VER)
#include <intrin.h> // For __cpuidex()
#endif
#if !defined(__pnacl__) && !defined(__CLR_VER) && \
!defined(__native_client__) && (defined(_M_IX86) || defined(_M_X64)) && \
defined(_MSC_FULL_VER) && (_MSC_FULL_VER >= 160040219)
#include <immintrin.h> // For _xgetbv()
#endif
// For ArmCpuCaps() but unittested on all platforms
#include <stdio.h>
#include <string.h>
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
// For functions that use the stack and have runtime checks for overflow,
// use SAFEBUFFERS to avoid additional check.
#if defined(_MSC_FULL_VER) && (_MSC_FULL_VER >= 160040219) && \
!defined(__clang__)
#define SAFEBUFFERS __declspec(safebuffers)
#else
#define SAFEBUFFERS
#endif
// cpu_info_ variable for SIMD instruction sets detected.
LIBYUV_API int cpu_info_ = 0;
// TODO(fbarchard): Consider using int for cpuid so casting is not needed.
// Low level cpuid for X86.
#if (defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \
defined(__x86_64__)) && \
!defined(__pnacl__) && !defined(__CLR_VER)
LIBYUV_API
void CpuId(int info_eax, int info_ecx, int* cpu_info) {
#if defined(_MSC_VER)
// Visual C version uses intrinsic or inline x86 assembly.
#if defined(_MSC_FULL_VER) && (_MSC_FULL_VER >= 160040219)
__cpuidex(cpu_info, info_eax, info_ecx);
#elif defined(_M_IX86)
__asm {
mov eax, info_eax
mov ecx, info_ecx
mov edi, cpu_info
cpuid
mov [edi], eax
mov [edi + 4], ebx
mov [edi + 8], ecx
mov [edi + 12], edx
}
#else // Visual C but not x86
if (info_ecx == 0) {
__cpuid(cpu_info, info_eax);
} else {
cpu_info[3] = cpu_info[2] = cpu_info[1] = cpu_info[0] = 0u;
}
#endif
// GCC version uses inline x86 assembly.
#else // defined(_MSC_VER)
int info_ebx, info_edx;
asm volatile(
#if defined(__i386__) && defined(__PIC__)
// Preserve ebx for fpic 32 bit.
"mov %%ebx, %%edi \n"
"cpuid \n"
"xchg %%edi, %%ebx \n"
: "=D"(info_ebx),
#else
"cpuid \n"
: "=b"(info_ebx),
#endif // defined( __i386__) && defined(__PIC__)
"+a"(info_eax), "+c"(info_ecx), "=d"(info_edx));
cpu_info[0] = info_eax;
cpu_info[1] = info_ebx;
cpu_info[2] = info_ecx;
cpu_info[3] = info_edx;
#endif // defined(_MSC_VER)
}
#else // (defined(_M_IX86) || defined(_M_X64) ...
LIBYUV_API
void CpuId(int eax, int ecx, int* cpu_info) {
(void)eax;
(void)ecx;
cpu_info[0] = cpu_info[1] = cpu_info[2] = cpu_info[3] = 0;
}
#endif
// For VS2010 and earlier emit can be used:
// _asm _emit 0x0f _asm _emit 0x01 _asm _emit 0xd0 // For VS2010 and earlier.
// __asm {
// xor ecx, ecx // xcr 0
// xgetbv
// mov xcr0, eax
// }
// For VS2013 and earlier 32 bit, the _xgetbv(0) optimizer produces bad code.
// https://code.google.com/p/libyuv/issues/detail?id=529
#if defined(_M_IX86) && (_MSC_VER < 1900)
#pragma optimize("g", off)
#endif
#if (defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \
defined(__x86_64__)) && \
!defined(__pnacl__) && !defined(__CLR_VER) && !defined(__native_client__)
// X86 CPUs have xgetbv to detect OS saves high parts of ymm registers.
int GetXCR0() {
int xcr0 = 0;
#if defined(_MSC_FULL_VER) && (_MSC_FULL_VER >= 160040219)
xcr0 = (int)_xgetbv(0); // VS2010 SP1 required. NOLINT
#elif defined(__i386__) || defined(__x86_64__)
asm(".byte 0x0f, 0x01, 0xd0" : "=a"(xcr0) : "c"(0) : "%edx");
#endif // defined(__i386__) || defined(__x86_64__)
return xcr0;
}
#else
// xgetbv unavailable to query for OSSave support. Return 0.
#define GetXCR0() 0
#endif // defined(_M_IX86) || defined(_M_X64) ..
// Return optimization to previous setting.
#if defined(_M_IX86) && (_MSC_VER < 1900)
#pragma optimize("g", on)
#endif
// based on libvpx arm_cpudetect.c
// For Arm, but public to allow testing on any CPU
LIBYUV_API SAFEBUFFERS int ArmCpuCaps(const char* cpuinfo_name) {
char cpuinfo_line[512];
FILE* f = fopen(cpuinfo_name, "r");
if (!f) {
// Assume Neon if /proc/cpuinfo is unavailable.
// This will occur for Chrome sandbox for Pepper or Render process.
return kCpuHasNEON;
}
while (fgets(cpuinfo_line, sizeof(cpuinfo_line) - 1, f)) {
if (memcmp(cpuinfo_line, "Features", 8) == 0) {
char* p = strstr(cpuinfo_line, " neon");
if (p && (p[5] == ' ' || p[5] == '\n')) {
fclose(f);
return kCpuHasNEON;
}
// aarch64 uses asimd for Neon.
p = strstr(cpuinfo_line, " asimd");
if (p) {
fclose(f);
return kCpuHasNEON;
}
}
}
fclose(f);
return 0;
}
// TODO(fbarchard): Consider read_msa_ir().
LIBYUV_API SAFEBUFFERS int MipsCpuCaps(const char* cpuinfo_name) {
char cpuinfo_line[512];
int flag = 0x0;
FILE* f = fopen(cpuinfo_name, "r");
if (!f) {
// Assume nothing if /proc/cpuinfo is unavailable.
// This will occur for Chrome sandbox for Pepper or Render process.
return 0;
}
while (fgets(cpuinfo_line, sizeof(cpuinfo_line) - 1, f)) {
if (memcmp(cpuinfo_line, "cpu model", 9) == 0) {
// Workaround early kernel without mmi in ASEs line.
if (strstr(cpuinfo_line, "Loongson-3")) {
flag |= kCpuHasMMI;
} else if (strstr(cpuinfo_line, "Loongson-2K")) {
flag |= kCpuHasMMI | kCpuHasMSA;
}
}
if (memcmp(cpuinfo_line, "ASEs implemented", 16) == 0) {
if (strstr(cpuinfo_line, "loongson-mmi") &&
strstr(cpuinfo_line, "loongson-ext")) {
flag |= kCpuHasMMI;
}
if (strstr(cpuinfo_line, "msa")) {
flag |= kCpuHasMSA;
}
// ASEs is the last line, so we can break here.
break;
}
}
fclose(f);
return flag;
}
static SAFEBUFFERS int GetCpuFlags(void) {
int cpu_info = 0;
#if !defined(__pnacl__) && !defined(__CLR_VER) && \
(defined(__x86_64__) || defined(_M_X64) || defined(__i386__) || \
defined(_M_IX86))
int cpu_info0[4] = {0, 0, 0, 0};
int cpu_info1[4] = {0, 0, 0, 0};
int cpu_info7[4] = {0, 0, 0, 0};
CpuId(0, 0, cpu_info0);
CpuId(1, 0, cpu_info1);
if (cpu_info0[0] >= 7) {
CpuId(7, 0, cpu_info7);
}
cpu_info = kCpuHasX86 | ((cpu_info1[3] & 0x04000000) ? kCpuHasSSE2 : 0) |
((cpu_info1[2] & 0x00000200) ? kCpuHasSSSE3 : 0) |
((cpu_info1[2] & 0x00080000) ? kCpuHasSSE41 : 0) |
((cpu_info1[2] & 0x00100000) ? kCpuHasSSE42 : 0) |
((cpu_info7[1] & 0x00000200) ? kCpuHasERMS : 0);
// AVX requires OS saves YMM registers.
if (((cpu_info1[2] & 0x1c000000) == 0x1c000000) && // AVX and OSXSave
((GetXCR0() & 6) == 6)) { // Test OS saves YMM registers
cpu_info |= kCpuHasAVX | ((cpu_info7[1] & 0x00000020) ? kCpuHasAVX2 : 0) |
((cpu_info1[2] & 0x00001000) ? kCpuHasFMA3 : 0) |
((cpu_info1[2] & 0x20000000) ? kCpuHasF16C : 0);
// Detect AVX512bw
if ((GetXCR0() & 0xe0) == 0xe0) {
cpu_info |= (cpu_info7[1] & 0x40000000) ? kCpuHasAVX512BW : 0;
cpu_info |= (cpu_info7[1] & 0x80000000) ? kCpuHasAVX512VL : 0;
cpu_info |= (cpu_info7[2] & 0x00000002) ? kCpuHasAVX512VBMI : 0;
cpu_info |= (cpu_info7[2] & 0x00000040) ? kCpuHasAVX512VBMI2 : 0;
cpu_info |= (cpu_info7[2] & 0x00001000) ? kCpuHasAVX512VBITALG : 0;
cpu_info |= (cpu_info7[2] & 0x00004000) ? kCpuHasAVX512VPOPCNTDQ : 0;
cpu_info |= (cpu_info7[2] & 0x00000100) ? kCpuHasGFNI : 0;
}
}
#endif
#if defined(__mips__) && defined(__linux__)
cpu_info = MipsCpuCaps("/proc/cpuinfo");
cpu_info |= kCpuHasMIPS;
#endif
#if defined(__arm__) || defined(__aarch64__)
// gcc -mfpu=neon defines __ARM_NEON__
// __ARM_NEON__ generates code that requires Neon. NaCL also requires Neon.
// For Linux, /proc/cpuinfo can be tested but without that assume Neon.
#if defined(__ARM_NEON__) || defined(__native_client__) || !defined(__linux__)
cpu_info = kCpuHasNEON;
// For aarch64(arm64), /proc/cpuinfo's feature is not complete, e.g. no neon
// flag in it.
// So for aarch64, neon enabling is hard coded here.
#endif
#if defined(__aarch64__)
cpu_info = kCpuHasNEON;
#else
// Linux arm parse text file for neon detect.
cpu_info = ArmCpuCaps("/proc/cpuinfo");
#endif
cpu_info |= kCpuHasARM;
#endif // __arm__
cpu_info |= kCpuInitialized;
return cpu_info;
}
// Note that use of this function is not thread safe.
LIBYUV_API
int MaskCpuFlags(int enable_flags) {
int cpu_info = GetCpuFlags() & enable_flags;
SetCpuFlags(cpu_info);
return cpu_info;
}
LIBYUV_API
int InitCpuFlags(void) {
return MaskCpuFlags(-1);
}
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif

View File

@@ -1,585 +0,0 @@
/*
* Copyright 2012 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "libyuv/mjpeg_decoder.h"
#ifdef HAVE_JPEG
#include <assert.h>
#if !defined(__pnacl__) && !defined(__CLR_VER) && \
!defined(COVERAGE_ENABLED) && !defined(TARGET_IPHONE_SIMULATOR)
// Must be included before jpeglib.
#include <setjmp.h>
#define HAVE_SETJMP
#if defined(_MSC_VER)
// disable warning 4324: structure was padded due to __declspec(align())
#pragma warning(disable : 4324)
#endif
#endif
#include <stdio.h> // For jpeglib.h.
// C++ build requires extern C for jpeg internals.
#ifdef __cplusplus
extern "C" {
#endif
#include <jpeglib.h>
#ifdef __cplusplus
} // extern "C"
#endif
#include "libyuv/planar_functions.h" // For CopyPlane().
namespace libyuv {
#ifdef HAVE_SETJMP
struct SetJmpErrorMgr {
jpeg_error_mgr base; // Must be at the top
jmp_buf setjmp_buffer;
};
#endif
const int MJpegDecoder::kColorSpaceUnknown = JCS_UNKNOWN;
const int MJpegDecoder::kColorSpaceGrayscale = JCS_GRAYSCALE;
const int MJpegDecoder::kColorSpaceRgb = JCS_RGB;
const int MJpegDecoder::kColorSpaceYCbCr = JCS_YCbCr;
const int MJpegDecoder::kColorSpaceCMYK = JCS_CMYK;
const int MJpegDecoder::kColorSpaceYCCK = JCS_YCCK;
// Methods that are passed to jpeglib.
boolean fill_input_buffer(jpeg_decompress_struct* cinfo);
void init_source(jpeg_decompress_struct* cinfo);
void skip_input_data(jpeg_decompress_struct* cinfo, long num_bytes); // NOLINT
void term_source(jpeg_decompress_struct* cinfo);
void ErrorHandler(jpeg_common_struct* cinfo);
void OutputHandler(jpeg_common_struct* cinfo);
MJpegDecoder::MJpegDecoder()
: has_scanline_padding_(LIBYUV_FALSE),
num_outbufs_(0),
scanlines_(NULL),
scanlines_sizes_(NULL),
databuf_(NULL),
databuf_strides_(NULL) {
decompress_struct_ = new jpeg_decompress_struct;
source_mgr_ = new jpeg_source_mgr;
#ifdef HAVE_SETJMP
error_mgr_ = new SetJmpErrorMgr;
decompress_struct_->err = jpeg_std_error(&error_mgr_->base);
// Override standard exit()-based error handler.
error_mgr_->base.error_exit = &ErrorHandler;
error_mgr_->base.output_message = &OutputHandler;
#endif
decompress_struct_->client_data = NULL;
source_mgr_->init_source = &init_source;
source_mgr_->fill_input_buffer = &fill_input_buffer;
source_mgr_->skip_input_data = &skip_input_data;
source_mgr_->resync_to_restart = &jpeg_resync_to_restart;
source_mgr_->term_source = &term_source;
jpeg_create_decompress(decompress_struct_);
decompress_struct_->src = source_mgr_;
buf_vec_.buffers = &buf_;
buf_vec_.len = 1;
}
MJpegDecoder::~MJpegDecoder() {
jpeg_destroy_decompress(decompress_struct_);
delete decompress_struct_;
delete source_mgr_;
#ifdef HAVE_SETJMP
delete error_mgr_;
#endif
DestroyOutputBuffers();
}
LIBYUV_BOOL MJpegDecoder::LoadFrame(const uint8_t* src, size_t src_len) {
if (!ValidateJpeg(src, src_len)) {
return LIBYUV_FALSE;
}
buf_.data = src;
buf_.len = static_cast<int>(src_len);
buf_vec_.pos = 0;
decompress_struct_->client_data = &buf_vec_;
#ifdef HAVE_SETJMP
if (setjmp(error_mgr_->setjmp_buffer)) {
// We called jpeg_read_header, it experienced an error, and we called
// longjmp() and rewound the stack to here. Return error.
return LIBYUV_FALSE;
}
#endif
if (jpeg_read_header(decompress_struct_, TRUE) != JPEG_HEADER_OK) {
// ERROR: Bad MJPEG header
return LIBYUV_FALSE;
}
AllocOutputBuffers(GetNumComponents());
for (int i = 0; i < num_outbufs_; ++i) {
int scanlines_size = GetComponentScanlinesPerImcuRow(i);
if (scanlines_sizes_[i] != scanlines_size) {
if (scanlines_[i]) {
delete scanlines_[i];
}
scanlines_[i] = new uint8_t*[scanlines_size];
scanlines_sizes_[i] = scanlines_size;
}
// We allocate padding for the final scanline to pad it up to DCTSIZE bytes
// to avoid memory errors, since jpeglib only reads full MCUs blocks. For
// the preceding scanlines, the padding is not needed/wanted because the
// following addresses will already be valid (they are the initial bytes of
// the next scanline) and will be overwritten when jpeglib writes out that
// next scanline.
int databuf_stride = GetComponentStride(i);
int databuf_size = scanlines_size * databuf_stride;
if (databuf_strides_[i] != databuf_stride) {
if (databuf_[i]) {
delete databuf_[i];
}
databuf_[i] = new uint8_t[databuf_size];
databuf_strides_[i] = databuf_stride;
}
if (GetComponentStride(i) != GetComponentWidth(i)) {
has_scanline_padding_ = LIBYUV_TRUE;
}
}
return LIBYUV_TRUE;
}
static int DivideAndRoundUp(int numerator, int denominator) {
return (numerator + denominator - 1) / denominator;
}
static int DivideAndRoundDown(int numerator, int denominator) {
return numerator / denominator;
}
// Returns width of the last loaded frame.
int MJpegDecoder::GetWidth() {
return decompress_struct_->image_width;
}
// Returns height of the last loaded frame.
int MJpegDecoder::GetHeight() {
return decompress_struct_->image_height;
}
// Returns format of the last loaded frame. The return value is one of the
// kColorSpace* constants.
int MJpegDecoder::GetColorSpace() {
return decompress_struct_->jpeg_color_space;
}
// Number of color components in the color space.
int MJpegDecoder::GetNumComponents() {
return decompress_struct_->num_components;
}
// Sample factors of the n-th component.
int MJpegDecoder::GetHorizSampFactor(int component) {
return decompress_struct_->comp_info[component].h_samp_factor;
}
int MJpegDecoder::GetVertSampFactor(int component) {
return decompress_struct_->comp_info[component].v_samp_factor;
}
int MJpegDecoder::GetHorizSubSampFactor(int component) {
return decompress_struct_->max_h_samp_factor / GetHorizSampFactor(component);
}
int MJpegDecoder::GetVertSubSampFactor(int component) {
return decompress_struct_->max_v_samp_factor / GetVertSampFactor(component);
}
int MJpegDecoder::GetImageScanlinesPerImcuRow() {
return decompress_struct_->max_v_samp_factor * DCTSIZE;
}
int MJpegDecoder::GetComponentScanlinesPerImcuRow(int component) {
int vs = GetVertSubSampFactor(component);
return DivideAndRoundUp(GetImageScanlinesPerImcuRow(), vs);
}
int MJpegDecoder::GetComponentWidth(int component) {
int hs = GetHorizSubSampFactor(component);
return DivideAndRoundUp(GetWidth(), hs);
}
int MJpegDecoder::GetComponentHeight(int component) {
int vs = GetVertSubSampFactor(component);
return DivideAndRoundUp(GetHeight(), vs);
}
// Get width in bytes padded out to a multiple of DCTSIZE
int MJpegDecoder::GetComponentStride(int component) {
return (GetComponentWidth(component) + DCTSIZE - 1) & ~(DCTSIZE - 1);
}
int MJpegDecoder::GetComponentSize(int component) {
return GetComponentWidth(component) * GetComponentHeight(component);
}
LIBYUV_BOOL MJpegDecoder::UnloadFrame() {
#ifdef HAVE_SETJMP
if (setjmp(error_mgr_->setjmp_buffer)) {
// We called jpeg_abort_decompress, it experienced an error, and we called
// longjmp() and rewound the stack to here. Return error.
return LIBYUV_FALSE;
}
#endif
jpeg_abort_decompress(decompress_struct_);
return LIBYUV_TRUE;
}
// TODO(fbarchard): Allow rectangle to be specified: x, y, width, height.
LIBYUV_BOOL MJpegDecoder::DecodeToBuffers(uint8_t** planes,
int dst_width,
int dst_height) {
if (dst_width != GetWidth() || dst_height > GetHeight()) {
// ERROR: Bad dimensions
return LIBYUV_FALSE;
}
#ifdef HAVE_SETJMP
if (setjmp(error_mgr_->setjmp_buffer)) {
// We called into jpeglib, it experienced an error sometime during this
// function call, and we called longjmp() and rewound the stack to here.
// Return error.
return LIBYUV_FALSE;
}
#endif
if (!StartDecode()) {
return LIBYUV_FALSE;
}
SetScanlinePointers(databuf_);
int lines_left = dst_height;
// Compute amount of lines to skip to implement vertical crop.
// TODO(fbarchard): Ensure skip is a multiple of maximum component
// subsample. ie 2
int skip = (GetHeight() - dst_height) / 2;
if (skip > 0) {
// There is no API to skip lines in the output data, so we read them
// into the temp buffer.
while (skip >= GetImageScanlinesPerImcuRow()) {
if (!DecodeImcuRow()) {
FinishDecode();
return LIBYUV_FALSE;
}
skip -= GetImageScanlinesPerImcuRow();
}
if (skip > 0) {
// Have a partial iMCU row left over to skip. Must read it and then
// copy the parts we want into the destination.
if (!DecodeImcuRow()) {
FinishDecode();
return LIBYUV_FALSE;
}
for (int i = 0; i < num_outbufs_; ++i) {
// TODO(fbarchard): Compute skip to avoid this
assert(skip % GetVertSubSampFactor(i) == 0);
int rows_to_skip = DivideAndRoundDown(skip, GetVertSubSampFactor(i));
int scanlines_to_copy =
GetComponentScanlinesPerImcuRow(i) - rows_to_skip;
int data_to_skip = rows_to_skip * GetComponentStride(i);
CopyPlane(databuf_[i] + data_to_skip, GetComponentStride(i), planes[i],
GetComponentWidth(i), GetComponentWidth(i),
scanlines_to_copy);
planes[i] += scanlines_to_copy * GetComponentWidth(i);
}
lines_left -= (GetImageScanlinesPerImcuRow() - skip);
}
}
// Read full MCUs but cropped horizontally
for (; lines_left > GetImageScanlinesPerImcuRow();
lines_left -= GetImageScanlinesPerImcuRow()) {
if (!DecodeImcuRow()) {
FinishDecode();
return LIBYUV_FALSE;
}
for (int i = 0; i < num_outbufs_; ++i) {
int scanlines_to_copy = GetComponentScanlinesPerImcuRow(i);
CopyPlane(databuf_[i], GetComponentStride(i), planes[i],
GetComponentWidth(i), GetComponentWidth(i), scanlines_to_copy);
planes[i] += scanlines_to_copy * GetComponentWidth(i);
}
}
if (lines_left > 0) {
// Have a partial iMCU row left over to decode.
if (!DecodeImcuRow()) {
FinishDecode();
return LIBYUV_FALSE;
}
for (int i = 0; i < num_outbufs_; ++i) {
int scanlines_to_copy =
DivideAndRoundUp(lines_left, GetVertSubSampFactor(i));
CopyPlane(databuf_[i], GetComponentStride(i), planes[i],
GetComponentWidth(i), GetComponentWidth(i), scanlines_to_copy);
planes[i] += scanlines_to_copy * GetComponentWidth(i);
}
}
return FinishDecode();
}
LIBYUV_BOOL MJpegDecoder::DecodeToCallback(CallbackFunction fn,
void* opaque,
int dst_width,
int dst_height) {
if (dst_width != GetWidth() || dst_height > GetHeight()) {
// ERROR: Bad dimensions
return LIBYUV_FALSE;
}
#ifdef HAVE_SETJMP
if (setjmp(error_mgr_->setjmp_buffer)) {
// We called into jpeglib, it experienced an error sometime during this
// function call, and we called longjmp() and rewound the stack to here.
// Return error.
return LIBYUV_FALSE;
}
#endif
if (!StartDecode()) {
return LIBYUV_FALSE;
}
SetScanlinePointers(databuf_);
int lines_left = dst_height;
// TODO(fbarchard): Compute amount of lines to skip to implement vertical crop
int skip = (GetHeight() - dst_height) / 2;
if (skip > 0) {
while (skip >= GetImageScanlinesPerImcuRow()) {
if (!DecodeImcuRow()) {
FinishDecode();
return LIBYUV_FALSE;
}
skip -= GetImageScanlinesPerImcuRow();
}
if (skip > 0) {
// Have a partial iMCU row left over to skip.
if (!DecodeImcuRow()) {
FinishDecode();
return LIBYUV_FALSE;
}
for (int i = 0; i < num_outbufs_; ++i) {
// TODO(fbarchard): Compute skip to avoid this
assert(skip % GetVertSubSampFactor(i) == 0);
int rows_to_skip = DivideAndRoundDown(skip, GetVertSubSampFactor(i));
int data_to_skip = rows_to_skip * GetComponentStride(i);
// Change our own data buffer pointers so we can pass them to the
// callback.
databuf_[i] += data_to_skip;
}
int scanlines_to_copy = GetImageScanlinesPerImcuRow() - skip;
(*fn)(opaque, databuf_, databuf_strides_, scanlines_to_copy);
// Now change them back.
for (int i = 0; i < num_outbufs_; ++i) {
int rows_to_skip = DivideAndRoundDown(skip, GetVertSubSampFactor(i));
int data_to_skip = rows_to_skip * GetComponentStride(i);
databuf_[i] -= data_to_skip;
}
lines_left -= scanlines_to_copy;
}
}
// Read full MCUs until we get to the crop point.
for (; lines_left >= GetImageScanlinesPerImcuRow();
lines_left -= GetImageScanlinesPerImcuRow()) {
if (!DecodeImcuRow()) {
FinishDecode();
return LIBYUV_FALSE;
}
(*fn)(opaque, databuf_, databuf_strides_, GetImageScanlinesPerImcuRow());
}
if (lines_left > 0) {
// Have a partial iMCU row left over to decode.
if (!DecodeImcuRow()) {
FinishDecode();
return LIBYUV_FALSE;
}
(*fn)(opaque, databuf_, databuf_strides_, lines_left);
}
return FinishDecode();
}
void init_source(j_decompress_ptr cinfo) {
fill_input_buffer(cinfo);
}
boolean fill_input_buffer(j_decompress_ptr cinfo) {
BufferVector* buf_vec = reinterpret_cast<BufferVector*>(cinfo->client_data);
if (buf_vec->pos >= buf_vec->len) {
// Don't assert-fail when fuzzing.
#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
assert(0 && "No more data");
#endif
// ERROR: No more data
return FALSE;
}
cinfo->src->next_input_byte = buf_vec->buffers[buf_vec->pos].data;
cinfo->src->bytes_in_buffer = buf_vec->buffers[buf_vec->pos].len;
++buf_vec->pos;
return TRUE;
}
void skip_input_data(j_decompress_ptr cinfo, long num_bytes) { // NOLINT
jpeg_source_mgr* src = cinfo->src;
size_t bytes = static_cast<size_t>(num_bytes);
if (bytes > src->bytes_in_buffer) {
src->next_input_byte = nullptr;
src->bytes_in_buffer = 0;
} else {
src->next_input_byte += bytes;
src->bytes_in_buffer -= bytes;
}
}
void term_source(j_decompress_ptr cinfo) {
(void)cinfo; // Nothing to do.
}
#ifdef HAVE_SETJMP
void ErrorHandler(j_common_ptr cinfo) {
// This is called when a jpeglib command experiences an error. Unfortunately
// jpeglib's error handling model is not very flexible, because it expects the
// error handler to not return--i.e., it wants the program to terminate. To
// recover from errors we use setjmp() as shown in their example. setjmp() is
// C's implementation for the "call with current continuation" functionality
// seen in some functional programming languages.
// A formatted message can be output, but is unsafe for release.
#ifdef DEBUG
char buf[JMSG_LENGTH_MAX];
(*cinfo->err->format_message)(cinfo, buf);
// ERROR: Error in jpeglib: buf
#endif
SetJmpErrorMgr* mgr = reinterpret_cast<SetJmpErrorMgr*>(cinfo->err);
// This rewinds the call stack to the point of the corresponding setjmp()
// and causes it to return (for a second time) with value 1.
longjmp(mgr->setjmp_buffer, 1);
}
// Suppress fprintf warnings.
void OutputHandler(j_common_ptr cinfo) {
(void)cinfo;
}
#endif // HAVE_SETJMP
void MJpegDecoder::AllocOutputBuffers(int num_outbufs) {
if (num_outbufs != num_outbufs_) {
// We could perhaps optimize this case to resize the output buffers without
// necessarily having to delete and recreate each one, but it's not worth
// it.
DestroyOutputBuffers();
scanlines_ = new uint8_t**[num_outbufs];
scanlines_sizes_ = new int[num_outbufs];
databuf_ = new uint8_t*[num_outbufs];
databuf_strides_ = new int[num_outbufs];
for (int i = 0; i < num_outbufs; ++i) {
scanlines_[i] = NULL;
scanlines_sizes_[i] = 0;
databuf_[i] = NULL;
databuf_strides_[i] = 0;
}
num_outbufs_ = num_outbufs;
}
}
void MJpegDecoder::DestroyOutputBuffers() {
for (int i = 0; i < num_outbufs_; ++i) {
delete[] scanlines_[i];
delete[] databuf_[i];
}
delete[] scanlines_;
delete[] databuf_;
delete[] scanlines_sizes_;
delete[] databuf_strides_;
scanlines_ = NULL;
databuf_ = NULL;
scanlines_sizes_ = NULL;
databuf_strides_ = NULL;
num_outbufs_ = 0;
}
// JDCT_IFAST and do_block_smoothing improve performance substantially.
LIBYUV_BOOL MJpegDecoder::StartDecode() {
decompress_struct_->raw_data_out = TRUE;
decompress_struct_->dct_method = JDCT_IFAST; // JDCT_ISLOW is default
decompress_struct_->dither_mode = JDITHER_NONE;
// Not applicable to 'raw':
decompress_struct_->do_fancy_upsampling = (boolean)(LIBYUV_FALSE);
// Only for buffered mode:
decompress_struct_->enable_2pass_quant = (boolean)(LIBYUV_FALSE);
// Blocky but fast:
decompress_struct_->do_block_smoothing = (boolean)(LIBYUV_FALSE);
if (!jpeg_start_decompress(decompress_struct_)) {
// ERROR: Couldn't start JPEG decompressor";
return LIBYUV_FALSE;
}
return LIBYUV_TRUE;
}
LIBYUV_BOOL MJpegDecoder::FinishDecode() {
// jpeglib considers it an error if we finish without decoding the whole
// image, so we call "abort" rather than "finish".
jpeg_abort_decompress(decompress_struct_);
return LIBYUV_TRUE;
}
void MJpegDecoder::SetScanlinePointers(uint8_t** data) {
for (int i = 0; i < num_outbufs_; ++i) {
uint8_t* data_i = data[i];
for (int j = 0; j < scanlines_sizes_[i]; ++j) {
scanlines_[i][j] = data_i;
data_i += GetComponentStride(i);
}
}
}
inline LIBYUV_BOOL MJpegDecoder::DecodeImcuRow() {
return (unsigned int)(GetImageScanlinesPerImcuRow()) ==
jpeg_read_raw_data(decompress_struct_, scanlines_,
GetImageScanlinesPerImcuRow());
}
// The helper function which recognizes the jpeg sub-sampling type.
JpegSubsamplingType MJpegDecoder::JpegSubsamplingTypeHelper(
int* subsample_x,
int* subsample_y,
int number_of_components) {
if (number_of_components == 3) { // Color images.
if (subsample_x[0] == 1 && subsample_y[0] == 1 && subsample_x[1] == 2 &&
subsample_y[1] == 2 && subsample_x[2] == 2 && subsample_y[2] == 2) {
return kJpegYuv420;
}
if (subsample_x[0] == 1 && subsample_y[0] == 1 && subsample_x[1] == 2 &&
subsample_y[1] == 1 && subsample_x[2] == 2 && subsample_y[2] == 1) {
return kJpegYuv422;
}
if (subsample_x[0] == 1 && subsample_y[0] == 1 && subsample_x[1] == 1 &&
subsample_y[1] == 1 && subsample_x[2] == 1 && subsample_y[2] == 1) {
return kJpegYuv444;
}
} else if (number_of_components == 1) { // Grey-scale images.
if (subsample_x[0] == 1 && subsample_y[0] == 1) {
return kJpegYuv400;
}
}
return kJpegUnknown;
}
} // namespace libyuv
#endif // HAVE_JPEG

View File

@@ -1,71 +0,0 @@
/*
* Copyright 2012 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "libyuv/mjpeg_decoder.h"
#include <string.h> // For memchr.
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
// Helper function to scan for EOI marker (0xff 0xd9).
static LIBYUV_BOOL ScanEOI(const uint8_t* src_mjpg, size_t src_size_mjpg) {
if (src_size_mjpg >= 2) {
const uint8_t* end = src_mjpg + src_size_mjpg - 1;
const uint8_t* it = src_mjpg;
while (it < end) {
// TODO(fbarchard): scan for 0xd9 instead.
it = (const uint8_t*)(memchr(it, 0xff, end - it));
if (it == NULL) {
break;
}
if (it[1] == 0xd9) {
return LIBYUV_TRUE; // Success: Valid jpeg.
}
++it; // Skip over current 0xff.
}
}
// ERROR: Invalid jpeg end code not found. Size src_size_mjpg
return LIBYUV_FALSE;
}
// Helper function to validate the jpeg appears intact.
LIBYUV_BOOL ValidateJpeg(const uint8_t* src_mjpg, size_t src_size_mjpg) {
// Maximum size that ValidateJpeg will consider valid.
const size_t kMaxJpegSize = 0x7fffffffull;
const size_t kBackSearchSize = 1024;
if (src_size_mjpg < 64 || src_size_mjpg > kMaxJpegSize || !src_mjpg) {
// ERROR: Invalid jpeg size: src_size_mjpg
return LIBYUV_FALSE;
}
// SOI marker
if (src_mjpg[0] != 0xff || src_mjpg[1] != 0xd8 || src_mjpg[2] != 0xff) {
// ERROR: Invalid jpeg initial start code
return LIBYUV_FALSE;
}
// Look for the End Of Image (EOI) marker near the end of the buffer.
if (src_size_mjpg > kBackSearchSize) {
if (ScanEOI(src_mjpg + src_size_mjpg - kBackSearchSize, kBackSearchSize)) {
return LIBYUV_TRUE; // Success: Valid jpeg.
}
// Reduce search size for forward search.
src_size_mjpg = src_size_mjpg - kBackSearchSize + 1;
}
// Step over SOI marker and scan for EOI.
return ScanEOI(src_mjpg + 2, src_size_mjpg - 2);
}
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -1,609 +0,0 @@
/*
* Copyright 2011 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "libyuv/rotate.h"
#include "libyuv/convert.h"
#include "libyuv/cpu_id.h"
#include "libyuv/planar_functions.h"
#include "libyuv/rotate_row.h"
#include "libyuv/row.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
LIBYUV_API
void TransposePlane(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width,
int height) {
int i = height;
#if defined(HAS_TRANSPOSEWX16_MSA)
void (*TransposeWx16)(const uint8_t* src, int src_stride, uint8_t* dst,
int dst_stride, int width) = TransposeWx16_C;
#else
void (*TransposeWx8)(const uint8_t* src, int src_stride, uint8_t* dst,
int dst_stride, int width) = TransposeWx8_C;
#endif
#if defined(HAS_TRANSPOSEWX16_MSA)
if (TestCpuFlag(kCpuHasMSA)) {
TransposeWx16 = TransposeWx16_Any_MSA;
if (IS_ALIGNED(width, 16)) {
TransposeWx16 = TransposeWx16_MSA;
}
}
#else
#if defined(HAS_TRANSPOSEWX8_NEON)
if (TestCpuFlag(kCpuHasNEON)) {
TransposeWx8 = TransposeWx8_NEON;
}
#endif
#if defined(HAS_TRANSPOSEWX8_SSSE3)
if (TestCpuFlag(kCpuHasSSSE3)) {
TransposeWx8 = TransposeWx8_Any_SSSE3;
if (IS_ALIGNED(width, 8)) {
TransposeWx8 = TransposeWx8_SSSE3;
}
}
#endif
#if defined(HAS_TRANSPOSEWX8_MMI)
if (TestCpuFlag(kCpuHasMMI)) {
TransposeWx8 = TransposeWx8_MMI;
}
#endif
#if defined(HAS_TRANSPOSEWX8_FAST_SSSE3)
if (TestCpuFlag(kCpuHasSSSE3)) {
TransposeWx8 = TransposeWx8_Fast_Any_SSSE3;
if (IS_ALIGNED(width, 16)) {
TransposeWx8 = TransposeWx8_Fast_SSSE3;
}
}
#endif
#endif /* defined(HAS_TRANSPOSEWX16_MSA) */
#if defined(HAS_TRANSPOSEWX16_MSA)
// Work across the source in 16x16 tiles
while (i >= 16) {
TransposeWx16(src, src_stride, dst, dst_stride, width);
src += 16 * src_stride; // Go down 16 rows.
dst += 16; // Move over 16 columns.
i -= 16;
}
#else
// Work across the source in 8x8 tiles
while (i >= 8) {
TransposeWx8(src, src_stride, dst, dst_stride, width);
src += 8 * src_stride; // Go down 8 rows.
dst += 8; // Move over 8 columns.
i -= 8;
}
#endif
if (i > 0) {
TransposeWxH_C(src, src_stride, dst, dst_stride, width, i);
}
}
LIBYUV_API
void RotatePlane90(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width,
int height) {
// Rotate by 90 is a transpose with the source read
// from bottom to top. So set the source pointer to the end
// of the buffer and flip the sign of the source stride.
src += src_stride * (height - 1);
src_stride = -src_stride;
TransposePlane(src, src_stride, dst, dst_stride, width, height);
}
LIBYUV_API
void RotatePlane270(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width,
int height) {
// Rotate by 270 is a transpose with the destination written
// from bottom to top. So set the destination pointer to the end
// of the buffer and flip the sign of the destination stride.
dst += dst_stride * (width - 1);
dst_stride = -dst_stride;
TransposePlane(src, src_stride, dst, dst_stride, width, height);
}
LIBYUV_API
void RotatePlane180(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width,
int height) {
// Swap first and last row and mirror the content. Uses a temporary row.
align_buffer_64(row, width);
const uint8_t* src_bot = src + src_stride * (height - 1);
uint8_t* dst_bot = dst + dst_stride * (height - 1);
int half_height = (height + 1) >> 1;
int y;
void (*MirrorRow)(const uint8_t* src, uint8_t* dst, int width) = MirrorRow_C;
void (*CopyRow)(const uint8_t* src, uint8_t* dst, int width) = CopyRow_C;
#if defined(HAS_MIRRORROW_NEON)
if (TestCpuFlag(kCpuHasNEON)) {
MirrorRow = MirrorRow_Any_NEON;
if (IS_ALIGNED(width, 32)) {
MirrorRow = MirrorRow_NEON;
}
}
#endif
#if defined(HAS_MIRRORROW_SSSE3)
if (TestCpuFlag(kCpuHasSSSE3)) {
MirrorRow = MirrorRow_Any_SSSE3;
if (IS_ALIGNED(width, 16)) {
MirrorRow = MirrorRow_SSSE3;
}
}
#endif
#if defined(HAS_MIRRORROW_AVX2)
if (TestCpuFlag(kCpuHasAVX2)) {
MirrorRow = MirrorRow_Any_AVX2;
if (IS_ALIGNED(width, 32)) {
MirrorRow = MirrorRow_AVX2;
}
}
#endif
#if defined(HAS_MIRRORROW_MMI)
if (TestCpuFlag(kCpuHasMMI)) {
MirrorRow = MirrorRow_Any_MMI;
if (IS_ALIGNED(width, 8)) {
MirrorRow = MirrorRow_MMI;
}
}
#endif
#if defined(HAS_MIRRORROW_MSA)
if (TestCpuFlag(kCpuHasMSA)) {
MirrorRow = MirrorRow_Any_MSA;
if (IS_ALIGNED(width, 64)) {
MirrorRow = MirrorRow_MSA;
}
}
#endif
#if defined(HAS_COPYROW_SSE2)
if (TestCpuFlag(kCpuHasSSE2)) {
CopyRow = IS_ALIGNED(width, 32) ? CopyRow_SSE2 : CopyRow_Any_SSE2;
}
#endif
#if defined(HAS_COPYROW_AVX)
if (TestCpuFlag(kCpuHasAVX)) {
CopyRow = IS_ALIGNED(width, 64) ? CopyRow_AVX : CopyRow_Any_AVX;
}
#endif
#if defined(HAS_COPYROW_ERMS)
if (TestCpuFlag(kCpuHasERMS)) {
CopyRow = CopyRow_ERMS;
}
#endif
#if defined(HAS_COPYROW_NEON)
if (TestCpuFlag(kCpuHasNEON)) {
CopyRow = IS_ALIGNED(width, 32) ? CopyRow_NEON : CopyRow_Any_NEON;
}
#endif
#if defined(HAS_COPYROW_MMI)
if (TestCpuFlag(kCpuHasMMI)) {
CopyRow = IS_ALIGNED(width, 8) ? CopyRow_MMI : CopyRow_Any_MMI;
}
#endif
// Odd height will harmlessly mirror the middle row twice.
for (y = 0; y < half_height; ++y) {
CopyRow(src, row, width); // Copy first row into buffer
MirrorRow(src_bot, dst, width); // Mirror last row into first row
MirrorRow(row, dst_bot, width); // Mirror buffer into last row
src += src_stride;
dst += dst_stride;
src_bot -= src_stride;
dst_bot -= dst_stride;
}
free_aligned_buffer_64(row);
}
LIBYUV_API
void TransposeUV(const uint8_t* src,
int src_stride,
uint8_t* dst_a,
int dst_stride_a,
uint8_t* dst_b,
int dst_stride_b,
int width,
int height) {
int i = height;
#if defined(HAS_TRANSPOSEUVWX16_MSA)
void (*TransposeUVWx16)(const uint8_t* src, int src_stride, uint8_t* dst_a,
int dst_stride_a, uint8_t* dst_b, int dst_stride_b,
int width) = TransposeUVWx16_C;
#else
void (*TransposeUVWx8)(const uint8_t* src, int src_stride, uint8_t* dst_a,
int dst_stride_a, uint8_t* dst_b, int dst_stride_b,
int width) = TransposeUVWx8_C;
#endif
#if defined(HAS_TRANSPOSEUVWX16_MSA)
if (TestCpuFlag(kCpuHasMSA)) {
TransposeUVWx16 = TransposeUVWx16_Any_MSA;
if (IS_ALIGNED(width, 8)) {
TransposeUVWx16 = TransposeUVWx16_MSA;
}
}
#else
#if defined(HAS_TRANSPOSEUVWX8_NEON)
if (TestCpuFlag(kCpuHasNEON)) {
TransposeUVWx8 = TransposeUVWx8_NEON;
}
#endif
#if defined(HAS_TRANSPOSEUVWX8_SSE2)
if (TestCpuFlag(kCpuHasSSE2)) {
TransposeUVWx8 = TransposeUVWx8_Any_SSE2;
if (IS_ALIGNED(width, 8)) {
TransposeUVWx8 = TransposeUVWx8_SSE2;
}
}
#endif
#if defined(HAS_TRANSPOSEUVWX8_MMI)
if (TestCpuFlag(kCpuHasMMI)) {
TransposeUVWx8 = TransposeUVWx8_Any_MMI;
if (IS_ALIGNED(width, 4)) {
TransposeUVWx8 = TransposeUVWx8_MMI;
}
}
#endif
#endif /* defined(HAS_TRANSPOSEUVWX16_MSA) */
#if defined(HAS_TRANSPOSEUVWX16_MSA)
// Work through the source in 8x8 tiles.
while (i >= 16) {
TransposeUVWx16(src, src_stride, dst_a, dst_stride_a, dst_b, dst_stride_b,
width);
src += 16 * src_stride; // Go down 16 rows.
dst_a += 16; // Move over 8 columns.
dst_b += 16; // Move over 8 columns.
i -= 16;
}
#else
// Work through the source in 8x8 tiles.
while (i >= 8) {
TransposeUVWx8(src, src_stride, dst_a, dst_stride_a, dst_b, dst_stride_b,
width);
src += 8 * src_stride; // Go down 8 rows.
dst_a += 8; // Move over 8 columns.
dst_b += 8; // Move over 8 columns.
i -= 8;
}
#endif
if (i > 0) {
TransposeUVWxH_C(src, src_stride, dst_a, dst_stride_a, dst_b, dst_stride_b,
width, i);
}
}
LIBYUV_API
void RotateUV90(const uint8_t* src,
int src_stride,
uint8_t* dst_a,
int dst_stride_a,
uint8_t* dst_b,
int dst_stride_b,
int width,
int height) {
src += src_stride * (height - 1);
src_stride = -src_stride;
TransposeUV(src, src_stride, dst_a, dst_stride_a, dst_b, dst_stride_b, width,
height);
}
LIBYUV_API
void RotateUV270(const uint8_t* src,
int src_stride,
uint8_t* dst_a,
int dst_stride_a,
uint8_t* dst_b,
int dst_stride_b,
int width,
int height) {
dst_a += dst_stride_a * (width - 1);
dst_b += dst_stride_b * (width - 1);
dst_stride_a = -dst_stride_a;
dst_stride_b = -dst_stride_b;
TransposeUV(src, src_stride, dst_a, dst_stride_a, dst_b, dst_stride_b, width,
height);
}
// Rotate 180 is a horizontal and vertical flip.
LIBYUV_API
void RotateUV180(const uint8_t* src,
int src_stride,
uint8_t* dst_a,
int dst_stride_a,
uint8_t* dst_b,
int dst_stride_b,
int width,
int height) {
int i;
void (*MirrorSplitUVRow)(const uint8_t* src, uint8_t* dst_u, uint8_t* dst_v,
int width) = MirrorSplitUVRow_C;
#if defined(HAS_MIRRORSPLITUVROW_NEON)
if (TestCpuFlag(kCpuHasNEON) && IS_ALIGNED(width, 16)) {
MirrorSplitUVRow = MirrorSplitUVRow_NEON;
}
#endif
#if defined(HAS_MIRRORSPLITUVROW_SSSE3)
if (TestCpuFlag(kCpuHasSSSE3) && IS_ALIGNED(width, 16)) {
MirrorSplitUVRow = MirrorSplitUVRow_SSSE3;
}
#endif
#if defined(HAS_MIRRORSPLITUVROW_MMI)
if (TestCpuFlag(kCpuHasMMI) && IS_ALIGNED(width, 8)) {
MirrorSplitUVRow = MirrorSplitUVRow_MMI;
}
#endif
#if defined(HAS_MIRRORSPLITUVROW_MSA)
if (TestCpuFlag(kCpuHasMSA) && IS_ALIGNED(width, 32)) {
MirrorSplitUVRow = MirrorSplitUVRow_MSA;
}
#endif
dst_a += dst_stride_a * (height - 1);
dst_b += dst_stride_b * (height - 1);
for (i = 0; i < height; ++i) {
MirrorSplitUVRow(src, dst_a, dst_b, width);
src += src_stride;
dst_a -= dst_stride_a;
dst_b -= dst_stride_b;
}
}
LIBYUV_API
int RotatePlane(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width,
int height,
enum RotationMode mode) {
if (!src || width <= 0 || height == 0 || !dst) {
return -1;
}
// Negative height means invert the image.
if (height < 0) {
height = -height;
src = src + (height - 1) * src_stride;
src_stride = -src_stride;
}
switch (mode) {
case kRotate0:
// copy frame
CopyPlane(src, src_stride, dst, dst_stride, width, height);
return 0;
case kRotate90:
RotatePlane90(src, src_stride, dst, dst_stride, width, height);
return 0;
case kRotate270:
RotatePlane270(src, src_stride, dst, dst_stride, width, height);
return 0;
case kRotate180:
RotatePlane180(src, src_stride, dst, dst_stride, width, height);
return 0;
default:
break;
}
return -1;
}
LIBYUV_API
int I420Rotate(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height,
enum RotationMode mode) {
int halfwidth = (width + 1) >> 1;
int halfheight = (height + 1) >> 1;
if (!src_y || !src_u || !src_v || width <= 0 || height == 0 || !dst_y ||
!dst_u || !dst_v) {
return -1;
}
// Negative height means invert the image.
if (height < 0) {
height = -height;
halfheight = (height + 1) >> 1;
src_y = src_y + (height - 1) * src_stride_y;
src_u = src_u + (halfheight - 1) * src_stride_u;
src_v = src_v + (halfheight - 1) * src_stride_v;
src_stride_y = -src_stride_y;
src_stride_u = -src_stride_u;
src_stride_v = -src_stride_v;
}
switch (mode) {
case kRotate0:
// copy frame
return I420Copy(src_y, src_stride_y, src_u, src_stride_u, src_v,
src_stride_v, dst_y, dst_stride_y, dst_u, dst_stride_u,
dst_v, dst_stride_v, width, height);
case kRotate90:
RotatePlane90(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
RotatePlane90(src_u, src_stride_u, dst_u, dst_stride_u, halfwidth,
halfheight);
RotatePlane90(src_v, src_stride_v, dst_v, dst_stride_v, halfwidth,
halfheight);
return 0;
case kRotate270:
RotatePlane270(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
RotatePlane270(src_u, src_stride_u, dst_u, dst_stride_u, halfwidth,
halfheight);
RotatePlane270(src_v, src_stride_v, dst_v, dst_stride_v, halfwidth,
halfheight);
return 0;
case kRotate180:
RotatePlane180(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
RotatePlane180(src_u, src_stride_u, dst_u, dst_stride_u, halfwidth,
halfheight);
RotatePlane180(src_v, src_stride_v, dst_v, dst_stride_v, halfwidth,
halfheight);
return 0;
default:
break;
}
return -1;
}
LIBYUV_API
int I444Rotate(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height,
enum libyuv::RotationMode mode) {
if (!src_y || !src_u || !src_v || width <= 0 || height == 0 || !dst_y ||
!dst_u || !dst_v) {
return -1;
}
// Negative height means invert the image.
if (height < 0) {
height = -height;
src_y = src_y + (height - 1) * src_stride_y;
src_u = src_u + (height - 1) * src_stride_u;
src_v = src_v + (height - 1) * src_stride_v;
src_stride_y = -src_stride_y;
src_stride_u = -src_stride_u;
src_stride_v = -src_stride_v;
}
switch (mode) {
case libyuv::kRotate0:
// copy frame
CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
CopyPlane(src_u, src_stride_u, dst_u, dst_stride_u, width, height);
CopyPlane(src_v, src_stride_v, dst_v, dst_stride_v, width, height);
return 0;
case libyuv::kRotate90:
RotatePlane90(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
RotatePlane90(src_u, src_stride_u, dst_u, dst_stride_u, width, height);
RotatePlane90(src_v, src_stride_v, dst_v, dst_stride_v, width, height);
return 0;
case libyuv::kRotate270:
RotatePlane270(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
RotatePlane270(src_u, src_stride_u, dst_u, dst_stride_u, width, height);
RotatePlane270(src_v, src_stride_v, dst_v, dst_stride_v, width, height);
return 0;
case libyuv::kRotate180:
RotatePlane180(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
RotatePlane180(src_u, src_stride_u, dst_u, dst_stride_u, width, height);
RotatePlane180(src_v, src_stride_v, dst_v, dst_stride_v, width, height);
return 0;
default:
break;
}
return -1;
}
LIBYUV_API
int NV12ToI420Rotate(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_uv,
int src_stride_uv,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height,
enum RotationMode mode) {
int halfwidth = (width + 1) >> 1;
int halfheight = (height + 1) >> 1;
if (!src_y || !src_uv || width <= 0 || height == 0 || !dst_y || !dst_u ||
!dst_v) {
return -1;
}
// Negative height means invert the image.
if (height < 0) {
height = -height;
halfheight = (height + 1) >> 1;
src_y = src_y + (height - 1) * src_stride_y;
src_uv = src_uv + (halfheight - 1) * src_stride_uv;
src_stride_y = -src_stride_y;
src_stride_uv = -src_stride_uv;
}
switch (mode) {
case kRotate0:
// copy frame
return NV12ToI420(src_y, src_stride_y, src_uv, src_stride_uv, dst_y,
dst_stride_y, dst_u, dst_stride_u, dst_v, dst_stride_v,
width, height);
case kRotate90:
RotatePlane90(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
RotateUV90(src_uv, src_stride_uv, dst_u, dst_stride_u, dst_v,
dst_stride_v, halfwidth, halfheight);
return 0;
case kRotate270:
RotatePlane270(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
RotateUV270(src_uv, src_stride_uv, dst_u, dst_stride_u, dst_v,
dst_stride_v, halfwidth, halfheight);
return 0;
case kRotate180:
RotatePlane180(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
RotateUV180(src_uv, src_stride_uv, dst_u, dst_stride_u, dst_v,
dst_stride_v, halfwidth, halfheight);
return 0;
default:
break;
}
return -1;
}
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif

View File

@@ -1,79 +0,0 @@
/*
* Copyright 2015 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "libyuv/rotate.h"
#include "libyuv/rotate_row.h"
#include "libyuv/basic_types.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
#define TANY(NAMEANY, TPOS_SIMD, MASK) \
void NAMEANY(const uint8_t* src, int src_stride, uint8_t* dst, \
int dst_stride, int width) { \
int r = width & MASK; \
int n = width - r; \
if (n > 0) { \
TPOS_SIMD(src, src_stride, dst, dst_stride, n); \
} \
TransposeWx8_C(src + n, src_stride, dst + n * dst_stride, dst_stride, r); \
}
#ifdef HAS_TRANSPOSEWX8_NEON
TANY(TransposeWx8_Any_NEON, TransposeWx8_NEON, 7)
#endif
#ifdef HAS_TRANSPOSEWX8_SSSE3
TANY(TransposeWx8_Any_SSSE3, TransposeWx8_SSSE3, 7)
#endif
#ifdef HAS_TRANSPOSEWX8_MMI
TANY(TransposeWx8_Any_MMI, TransposeWx8_MMI, 7)
#endif
#ifdef HAS_TRANSPOSEWX8_FAST_SSSE3
TANY(TransposeWx8_Fast_Any_SSSE3, TransposeWx8_Fast_SSSE3, 15)
#endif
#ifdef HAS_TRANSPOSEWX16_MSA
TANY(TransposeWx16_Any_MSA, TransposeWx16_MSA, 15)
#endif
#undef TANY
#define TUVANY(NAMEANY, TPOS_SIMD, MASK) \
void NAMEANY(const uint8_t* src, int src_stride, uint8_t* dst_a, \
int dst_stride_a, uint8_t* dst_b, int dst_stride_b, \
int width) { \
int r = width & MASK; \
int n = width - r; \
if (n > 0) { \
TPOS_SIMD(src, src_stride, dst_a, dst_stride_a, dst_b, dst_stride_b, n); \
} \
TransposeUVWx8_C(src + n * 2, src_stride, dst_a + n * dst_stride_a, \
dst_stride_a, dst_b + n * dst_stride_b, dst_stride_b, r); \
}
#ifdef HAS_TRANSPOSEUVWX8_NEON
TUVANY(TransposeUVWx8_Any_NEON, TransposeUVWx8_NEON, 7)
#endif
#ifdef HAS_TRANSPOSEUVWX8_SSE2
TUVANY(TransposeUVWx8_Any_SSE2, TransposeUVWx8_SSE2, 7)
#endif
#ifdef HAS_TRANSPOSEUVWX8_MMI
TUVANY(TransposeUVWx8_Any_MMI, TransposeUVWx8_MMI, 7)
#endif
#ifdef HAS_TRANSPOSEUVWX16_MSA
TUVANY(TransposeUVWx16_Any_MSA, TransposeUVWx16_MSA, 7)
#endif
#undef TUVANY
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif

View File

@@ -1,243 +0,0 @@
/*
* Copyright 2012 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "libyuv/rotate.h"
#include "libyuv/convert.h"
#include "libyuv/cpu_id.h"
#include "libyuv/planar_functions.h"
#include "libyuv/row.h"
#include "libyuv/scale_row.h" /* for ScaleARGBRowDownEven_ */
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
static int ARGBTranspose(const uint8_t* src_argb,
int src_stride_argb,
uint8_t* dst_argb,
int dst_stride_argb,
int width,
int height) {
int i;
int src_pixel_step = src_stride_argb >> 2;
void (*ScaleARGBRowDownEven)(
const uint8_t* src_argb, ptrdiff_t src_stride_argb, int src_step,
uint8_t* dst_argb, int dst_width) = ScaleARGBRowDownEven_C;
// Check stride is a multiple of 4.
if (src_stride_argb & 3) {
return -1;
}
#if defined(HAS_SCALEARGBROWDOWNEVEN_SSE2)
if (TestCpuFlag(kCpuHasSSE2)) {
ScaleARGBRowDownEven = ScaleARGBRowDownEven_Any_SSE2;
if (IS_ALIGNED(height, 4)) { // Width of dest.
ScaleARGBRowDownEven = ScaleARGBRowDownEven_SSE2;
}
}
#endif
#if defined(HAS_SCALEARGBROWDOWNEVEN_NEON)
if (TestCpuFlag(kCpuHasNEON)) {
ScaleARGBRowDownEven = ScaleARGBRowDownEven_Any_NEON;
if (IS_ALIGNED(height, 4)) { // Width of dest.
ScaleARGBRowDownEven = ScaleARGBRowDownEven_NEON;
}
}
#endif
#if defined(HAS_SCALEARGBROWDOWNEVEN_MMI)
if (TestCpuFlag(kCpuHasMMI)) {
ScaleARGBRowDownEven = ScaleARGBRowDownEven_Any_MMI;
if (IS_ALIGNED(height, 4)) { // Width of dest.
ScaleARGBRowDownEven = ScaleARGBRowDownEven_MMI;
}
}
#endif
#if defined(HAS_SCALEARGBROWDOWNEVEN_MSA)
if (TestCpuFlag(kCpuHasMSA)) {
ScaleARGBRowDownEven = ScaleARGBRowDownEven_Any_MSA;
if (IS_ALIGNED(height, 4)) { // Width of dest.
ScaleARGBRowDownEven = ScaleARGBRowDownEven_MSA;
}
}
#endif
for (i = 0; i < width; ++i) { // column of source to row of dest.
ScaleARGBRowDownEven(src_argb, 0, src_pixel_step, dst_argb, height);
dst_argb += dst_stride_argb;
src_argb += 4;
}
return 0;
}
static int ARGBRotate90(const uint8_t* src_argb,
int src_stride_argb,
uint8_t* dst_argb,
int dst_stride_argb,
int width,
int height) {
// Rotate by 90 is a ARGBTranspose with the source read
// from bottom to top. So set the source pointer to the end
// of the buffer and flip the sign of the source stride.
src_argb += src_stride_argb * (height - 1);
src_stride_argb = -src_stride_argb;
return ARGBTranspose(src_argb, src_stride_argb, dst_argb, dst_stride_argb,
width, height);
}
static int ARGBRotate270(const uint8_t* src_argb,
int src_stride_argb,
uint8_t* dst_argb,
int dst_stride_argb,
int width,
int height) {
// Rotate by 270 is a ARGBTranspose with the destination written
// from bottom to top. So set the destination pointer to the end
// of the buffer and flip the sign of the destination stride.
dst_argb += dst_stride_argb * (width - 1);
dst_stride_argb = -dst_stride_argb;
return ARGBTranspose(src_argb, src_stride_argb, dst_argb, dst_stride_argb,
width, height);
}
static int ARGBRotate180(const uint8_t* src_argb,
int src_stride_argb,
uint8_t* dst_argb,
int dst_stride_argb,
int width,
int height) {
// Swap first and last row and mirror the content. Uses a temporary row.
align_buffer_64(row, width * 4);
const uint8_t* src_bot = src_argb + src_stride_argb * (height - 1);
uint8_t* dst_bot = dst_argb + dst_stride_argb * (height - 1);
int half_height = (height + 1) >> 1;
int y;
void (*ARGBMirrorRow)(const uint8_t* src_argb, uint8_t* dst_argb, int width) =
ARGBMirrorRow_C;
void (*CopyRow)(const uint8_t* src_argb, uint8_t* dst_argb, int width) =
CopyRow_C;
#if defined(HAS_ARGBMIRRORROW_NEON)
if (TestCpuFlag(kCpuHasNEON)) {
ARGBMirrorRow = ARGBMirrorRow_Any_NEON;
if (IS_ALIGNED(width, 8)) {
ARGBMirrorRow = ARGBMirrorRow_NEON;
}
}
#endif
#if defined(HAS_ARGBMIRRORROW_SSE2)
if (TestCpuFlag(kCpuHasSSE2)) {
ARGBMirrorRow = ARGBMirrorRow_Any_SSE2;
if (IS_ALIGNED(width, 4)) {
ARGBMirrorRow = ARGBMirrorRow_SSE2;
}
}
#endif
#if defined(HAS_ARGBMIRRORROW_AVX2)
if (TestCpuFlag(kCpuHasAVX2)) {
ARGBMirrorRow = ARGBMirrorRow_Any_AVX2;
if (IS_ALIGNED(width, 8)) {
ARGBMirrorRow = ARGBMirrorRow_AVX2;
}
}
#endif
#if defined(HAS_ARGBMIRRORROW_MMI)
if (TestCpuFlag(kCpuHasMMI)) {
ARGBMirrorRow = ARGBMirrorRow_Any_MMI;
if (IS_ALIGNED(width, 2)) {
ARGBMirrorRow = ARGBMirrorRow_MMI;
}
}
#endif
#if defined(HAS_ARGBMIRRORROW_MSA)
if (TestCpuFlag(kCpuHasMSA)) {
ARGBMirrorRow = ARGBMirrorRow_Any_MSA;
if (IS_ALIGNED(width, 16)) {
ARGBMirrorRow = ARGBMirrorRow_MSA;
}
}
#endif
#if defined(HAS_COPYROW_SSE2)
if (TestCpuFlag(kCpuHasSSE2)) {
CopyRow = IS_ALIGNED(width * 4, 32) ? CopyRow_SSE2 : CopyRow_Any_SSE2;
}
#endif
#if defined(HAS_COPYROW_AVX)
if (TestCpuFlag(kCpuHasAVX)) {
CopyRow = IS_ALIGNED(width * 4, 64) ? CopyRow_AVX : CopyRow_Any_AVX;
}
#endif
#if defined(HAS_COPYROW_ERMS)
if (TestCpuFlag(kCpuHasERMS)) {
CopyRow = CopyRow_ERMS;
}
#endif
#if defined(HAS_COPYROW_NEON)
if (TestCpuFlag(kCpuHasNEON)) {
CopyRow = IS_ALIGNED(width * 4, 32) ? CopyRow_NEON : CopyRow_Any_NEON;
}
#endif
// Odd height will harmlessly mirror the middle row twice.
for (y = 0; y < half_height; ++y) {
ARGBMirrorRow(src_argb, row, width); // Mirror first row into a buffer
ARGBMirrorRow(src_bot, dst_argb, width); // Mirror last row into first row
CopyRow(row, dst_bot, width * 4); // Copy first mirrored row into last
src_argb += src_stride_argb;
dst_argb += dst_stride_argb;
src_bot -= src_stride_argb;
dst_bot -= dst_stride_argb;
}
free_aligned_buffer_64(row);
return 0;
}
LIBYUV_API
int ARGBRotate(const uint8_t* src_argb,
int src_stride_argb,
uint8_t* dst_argb,
int dst_stride_argb,
int width,
int height,
enum RotationMode mode) {
if (!src_argb || width <= 0 || height == 0 || !dst_argb) {
return -1;
}
// Negative height means invert the image.
if (height < 0) {
height = -height;
src_argb = src_argb + (height - 1) * src_stride_argb;
src_stride_argb = -src_stride_argb;
}
switch (mode) {
case kRotate0:
// copy frame
return ARGBCopy(src_argb, src_stride_argb, dst_argb, dst_stride_argb,
width, height);
case kRotate90:
return ARGBRotate90(src_argb, src_stride_argb, dst_argb, dst_stride_argb,
width, height);
case kRotate270:
return ARGBRotate270(src_argb, src_stride_argb, dst_argb, dst_stride_argb,
width, height);
case kRotate180:
return ARGBRotate180(src_argb, src_stride_argb, dst_argb, dst_stride_argb,
width, height);
default:
break;
}
return -1;
}
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif

View File

@@ -1,106 +0,0 @@
/*
* Copyright 2011 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "libyuv/rotate_row.h"
#include "libyuv/row.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
void TransposeWx8_C(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width) {
int i;
for (i = 0; i < width; ++i) {
dst[0] = src[0 * src_stride];
dst[1] = src[1 * src_stride];
dst[2] = src[2 * src_stride];
dst[3] = src[3 * src_stride];
dst[4] = src[4 * src_stride];
dst[5] = src[5 * src_stride];
dst[6] = src[6 * src_stride];
dst[7] = src[7 * src_stride];
++src;
dst += dst_stride;
}
}
void TransposeUVWx8_C(const uint8_t* src,
int src_stride,
uint8_t* dst_a,
int dst_stride_a,
uint8_t* dst_b,
int dst_stride_b,
int width) {
int i;
for (i = 0; i < width; ++i) {
dst_a[0] = src[0 * src_stride + 0];
dst_b[0] = src[0 * src_stride + 1];
dst_a[1] = src[1 * src_stride + 0];
dst_b[1] = src[1 * src_stride + 1];
dst_a[2] = src[2 * src_stride + 0];
dst_b[2] = src[2 * src_stride + 1];
dst_a[3] = src[3 * src_stride + 0];
dst_b[3] = src[3 * src_stride + 1];
dst_a[4] = src[4 * src_stride + 0];
dst_b[4] = src[4 * src_stride + 1];
dst_a[5] = src[5 * src_stride + 0];
dst_b[5] = src[5 * src_stride + 1];
dst_a[6] = src[6 * src_stride + 0];
dst_b[6] = src[6 * src_stride + 1];
dst_a[7] = src[7 * src_stride + 0];
dst_b[7] = src[7 * src_stride + 1];
src += 2;
dst_a += dst_stride_a;
dst_b += dst_stride_b;
}
}
void TransposeWxH_C(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width,
int height) {
int i;
for (i = 0; i < width; ++i) {
int j;
for (j = 0; j < height; ++j) {
dst[i * dst_stride + j] = src[j * src_stride + i];
}
}
}
void TransposeUVWxH_C(const uint8_t* src,
int src_stride,
uint8_t* dst_a,
int dst_stride_a,
uint8_t* dst_b,
int dst_stride_b,
int width,
int height) {
int i;
for (i = 0; i < width * 2; i += 2) {
int j;
for (j = 0; j < height; ++j) {
dst_a[j + ((i >> 1) * dst_stride_a)] = src[i + (j * src_stride)];
dst_b[j + ((i >> 1) * dst_stride_b)] = src[i + (j * src_stride) + 1];
}
}
}
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif

View File

@@ -1,374 +0,0 @@
/*
* Copyright 2015 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "libyuv/rotate_row.h"
#include "libyuv/row.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
// This module is for GCC x86 and x64.
#if !defined(LIBYUV_DISABLE_X86) && \
(defined(__x86_64__) || defined(__i386__))
// Transpose 8x8. 32 or 64 bit, but not NaCL for 64 bit.
#if defined(HAS_TRANSPOSEWX8_SSSE3)
void TransposeWx8_SSSE3(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width) {
asm volatile(
// Read in the data from the source pointer.
// First round of bit swap.
LABELALIGN
"1: \n"
"movq (%0),%%xmm0 \n"
"movq (%0,%3),%%xmm1 \n"
"lea (%0,%3,2),%0 \n"
"punpcklbw %%xmm1,%%xmm0 \n"
"movq (%0),%%xmm2 \n"
"movdqa %%xmm0,%%xmm1 \n"
"palignr $0x8,%%xmm1,%%xmm1 \n"
"movq (%0,%3),%%xmm3 \n"
"lea (%0,%3,2),%0 \n"
"punpcklbw %%xmm3,%%xmm2 \n"
"movdqa %%xmm2,%%xmm3 \n"
"movq (%0),%%xmm4 \n"
"palignr $0x8,%%xmm3,%%xmm3 \n"
"movq (%0,%3),%%xmm5 \n"
"lea (%0,%3,2),%0 \n"
"punpcklbw %%xmm5,%%xmm4 \n"
"movdqa %%xmm4,%%xmm5 \n"
"movq (%0),%%xmm6 \n"
"palignr $0x8,%%xmm5,%%xmm5 \n"
"movq (%0,%3),%%xmm7 \n"
"lea (%0,%3,2),%0 \n"
"punpcklbw %%xmm7,%%xmm6 \n"
"neg %3 \n"
"movdqa %%xmm6,%%xmm7 \n"
"lea 0x8(%0,%3,8),%0 \n"
"palignr $0x8,%%xmm7,%%xmm7 \n"
"neg %3 \n"
// Second round of bit swap.
"punpcklwd %%xmm2,%%xmm0 \n"
"punpcklwd %%xmm3,%%xmm1 \n"
"movdqa %%xmm0,%%xmm2 \n"
"movdqa %%xmm1,%%xmm3 \n"
"palignr $0x8,%%xmm2,%%xmm2 \n"
"palignr $0x8,%%xmm3,%%xmm3 \n"
"punpcklwd %%xmm6,%%xmm4 \n"
"punpcklwd %%xmm7,%%xmm5 \n"
"movdqa %%xmm4,%%xmm6 \n"
"movdqa %%xmm5,%%xmm7 \n"
"palignr $0x8,%%xmm6,%%xmm6 \n"
"palignr $0x8,%%xmm7,%%xmm7 \n"
// Third round of bit swap.
// Write to the destination pointer.
"punpckldq %%xmm4,%%xmm0 \n"
"movq %%xmm0,(%1) \n"
"movdqa %%xmm0,%%xmm4 \n"
"palignr $0x8,%%xmm4,%%xmm4 \n"
"movq %%xmm4,(%1,%4) \n"
"lea (%1,%4,2),%1 \n"
"punpckldq %%xmm6,%%xmm2 \n"
"movdqa %%xmm2,%%xmm6 \n"
"movq %%xmm2,(%1) \n"
"palignr $0x8,%%xmm6,%%xmm6 \n"
"punpckldq %%xmm5,%%xmm1 \n"
"movq %%xmm6,(%1,%4) \n"
"lea (%1,%4,2),%1 \n"
"movdqa %%xmm1,%%xmm5 \n"
"movq %%xmm1,(%1) \n"
"palignr $0x8,%%xmm5,%%xmm5 \n"
"movq %%xmm5,(%1,%4) \n"
"lea (%1,%4,2),%1 \n"
"punpckldq %%xmm7,%%xmm3 \n"
"movq %%xmm3,(%1) \n"
"movdqa %%xmm3,%%xmm7 \n"
"palignr $0x8,%%xmm7,%%xmm7 \n"
"sub $0x8,%2 \n"
"movq %%xmm7,(%1,%4) \n"
"lea (%1,%4,2),%1 \n"
"jg 1b \n"
: "+r"(src), // %0
"+r"(dst), // %1
"+r"(width) // %2
: "r"((intptr_t)(src_stride)), // %3
"r"((intptr_t)(dst_stride)) // %4
: "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6",
"xmm7");
}
#endif // defined(HAS_TRANSPOSEWX8_SSSE3)
// Transpose 16x8. 64 bit
#if defined(HAS_TRANSPOSEWX8_FAST_SSSE3)
void TransposeWx8_Fast_SSSE3(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width) {
asm volatile(
// Read in the data from the source pointer.
// First round of bit swap.
LABELALIGN
"1: \n"
"movdqu (%0),%%xmm0 \n"
"movdqu (%0,%3),%%xmm1 \n"
"lea (%0,%3,2),%0 \n"
"movdqa %%xmm0,%%xmm8 \n"
"punpcklbw %%xmm1,%%xmm0 \n"
"punpckhbw %%xmm1,%%xmm8 \n"
"movdqu (%0),%%xmm2 \n"
"movdqa %%xmm0,%%xmm1 \n"
"movdqa %%xmm8,%%xmm9 \n"
"palignr $0x8,%%xmm1,%%xmm1 \n"
"palignr $0x8,%%xmm9,%%xmm9 \n"
"movdqu (%0,%3),%%xmm3 \n"
"lea (%0,%3,2),%0 \n"
"movdqa %%xmm2,%%xmm10 \n"
"punpcklbw %%xmm3,%%xmm2 \n"
"punpckhbw %%xmm3,%%xmm10 \n"
"movdqa %%xmm2,%%xmm3 \n"
"movdqa %%xmm10,%%xmm11 \n"
"movdqu (%0),%%xmm4 \n"
"palignr $0x8,%%xmm3,%%xmm3 \n"
"palignr $0x8,%%xmm11,%%xmm11 \n"
"movdqu (%0,%3),%%xmm5 \n"
"lea (%0,%3,2),%0 \n"
"movdqa %%xmm4,%%xmm12 \n"
"punpcklbw %%xmm5,%%xmm4 \n"
"punpckhbw %%xmm5,%%xmm12 \n"
"movdqa %%xmm4,%%xmm5 \n"
"movdqa %%xmm12,%%xmm13 \n"
"movdqu (%0),%%xmm6 \n"
"palignr $0x8,%%xmm5,%%xmm5 \n"
"palignr $0x8,%%xmm13,%%xmm13 \n"
"movdqu (%0,%3),%%xmm7 \n"
"lea (%0,%3,2),%0 \n"
"movdqa %%xmm6,%%xmm14 \n"
"punpcklbw %%xmm7,%%xmm6 \n"
"punpckhbw %%xmm7,%%xmm14 \n"
"neg %3 \n"
"movdqa %%xmm6,%%xmm7 \n"
"movdqa %%xmm14,%%xmm15 \n"
"lea 0x10(%0,%3,8),%0 \n"
"palignr $0x8,%%xmm7,%%xmm7 \n"
"palignr $0x8,%%xmm15,%%xmm15 \n"
"neg %3 \n"
// Second round of bit swap.
"punpcklwd %%xmm2,%%xmm0 \n"
"punpcklwd %%xmm3,%%xmm1 \n"
"movdqa %%xmm0,%%xmm2 \n"
"movdqa %%xmm1,%%xmm3 \n"
"palignr $0x8,%%xmm2,%%xmm2 \n"
"palignr $0x8,%%xmm3,%%xmm3 \n"
"punpcklwd %%xmm6,%%xmm4 \n"
"punpcklwd %%xmm7,%%xmm5 \n"
"movdqa %%xmm4,%%xmm6 \n"
"movdqa %%xmm5,%%xmm7 \n"
"palignr $0x8,%%xmm6,%%xmm6 \n"
"palignr $0x8,%%xmm7,%%xmm7 \n"
"punpcklwd %%xmm10,%%xmm8 \n"
"punpcklwd %%xmm11,%%xmm9 \n"
"movdqa %%xmm8,%%xmm10 \n"
"movdqa %%xmm9,%%xmm11 \n"
"palignr $0x8,%%xmm10,%%xmm10 \n"
"palignr $0x8,%%xmm11,%%xmm11 \n"
"punpcklwd %%xmm14,%%xmm12 \n"
"punpcklwd %%xmm15,%%xmm13 \n"
"movdqa %%xmm12,%%xmm14 \n"
"movdqa %%xmm13,%%xmm15 \n"
"palignr $0x8,%%xmm14,%%xmm14 \n"
"palignr $0x8,%%xmm15,%%xmm15 \n"
// Third round of bit swap.
// Write to the destination pointer.
"punpckldq %%xmm4,%%xmm0 \n"
"movq %%xmm0,(%1) \n"
"movdqa %%xmm0,%%xmm4 \n"
"palignr $0x8,%%xmm4,%%xmm4 \n"
"movq %%xmm4,(%1,%4) \n"
"lea (%1,%4,2),%1 \n"
"punpckldq %%xmm6,%%xmm2 \n"
"movdqa %%xmm2,%%xmm6 \n"
"movq %%xmm2,(%1) \n"
"palignr $0x8,%%xmm6,%%xmm6 \n"
"punpckldq %%xmm5,%%xmm1 \n"
"movq %%xmm6,(%1,%4) \n"
"lea (%1,%4,2),%1 \n"
"movdqa %%xmm1,%%xmm5 \n"
"movq %%xmm1,(%1) \n"
"palignr $0x8,%%xmm5,%%xmm5 \n"
"movq %%xmm5,(%1,%4) \n"
"lea (%1,%4,2),%1 \n"
"punpckldq %%xmm7,%%xmm3 \n"
"movq %%xmm3,(%1) \n"
"movdqa %%xmm3,%%xmm7 \n"
"palignr $0x8,%%xmm7,%%xmm7 \n"
"movq %%xmm7,(%1,%4) \n"
"lea (%1,%4,2),%1 \n"
"punpckldq %%xmm12,%%xmm8 \n"
"movq %%xmm8,(%1) \n"
"movdqa %%xmm8,%%xmm12 \n"
"palignr $0x8,%%xmm12,%%xmm12 \n"
"movq %%xmm12,(%1,%4) \n"
"lea (%1,%4,2),%1 \n"
"punpckldq %%xmm14,%%xmm10 \n"
"movdqa %%xmm10,%%xmm14 \n"
"movq %%xmm10,(%1) \n"
"palignr $0x8,%%xmm14,%%xmm14 \n"
"punpckldq %%xmm13,%%xmm9 \n"
"movq %%xmm14,(%1,%4) \n"
"lea (%1,%4,2),%1 \n"
"movdqa %%xmm9,%%xmm13 \n"
"movq %%xmm9,(%1) \n"
"palignr $0x8,%%xmm13,%%xmm13 \n"
"movq %%xmm13,(%1,%4) \n"
"lea (%1,%4,2),%1 \n"
"punpckldq %%xmm15,%%xmm11 \n"
"movq %%xmm11,(%1) \n"
"movdqa %%xmm11,%%xmm15 \n"
"palignr $0x8,%%xmm15,%%xmm15 \n"
"sub $0x10,%2 \n"
"movq %%xmm15,(%1,%4) \n"
"lea (%1,%4,2),%1 \n"
"jg 1b \n"
: "+r"(src), // %0
"+r"(dst), // %1
"+r"(width) // %2
: "r"((intptr_t)(src_stride)), // %3
"r"((intptr_t)(dst_stride)) // %4
: "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6",
"xmm7", "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14",
"xmm15");
}
#endif // defined(HAS_TRANSPOSEWX8_FAST_SSSE3)
// Transpose UV 8x8. 64 bit.
#if defined(HAS_TRANSPOSEUVWX8_SSE2)
void TransposeUVWx8_SSE2(const uint8_t* src,
int src_stride,
uint8_t* dst_a,
int dst_stride_a,
uint8_t* dst_b,
int dst_stride_b,
int width) {
asm volatile(
// Read in the data from the source pointer.
// First round of bit swap.
LABELALIGN
"1: \n"
"movdqu (%0),%%xmm0 \n"
"movdqu (%0,%4),%%xmm1 \n"
"lea (%0,%4,2),%0 \n"
"movdqa %%xmm0,%%xmm8 \n"
"punpcklbw %%xmm1,%%xmm0 \n"
"punpckhbw %%xmm1,%%xmm8 \n"
"movdqa %%xmm8,%%xmm1 \n"
"movdqu (%0),%%xmm2 \n"
"movdqu (%0,%4),%%xmm3 \n"
"lea (%0,%4,2),%0 \n"
"movdqa %%xmm2,%%xmm8 \n"
"punpcklbw %%xmm3,%%xmm2 \n"
"punpckhbw %%xmm3,%%xmm8 \n"
"movdqa %%xmm8,%%xmm3 \n"
"movdqu (%0),%%xmm4 \n"
"movdqu (%0,%4),%%xmm5 \n"
"lea (%0,%4,2),%0 \n"
"movdqa %%xmm4,%%xmm8 \n"
"punpcklbw %%xmm5,%%xmm4 \n"
"punpckhbw %%xmm5,%%xmm8 \n"
"movdqa %%xmm8,%%xmm5 \n"
"movdqu (%0),%%xmm6 \n"
"movdqu (%0,%4),%%xmm7 \n"
"lea (%0,%4,2),%0 \n"
"movdqa %%xmm6,%%xmm8 \n"
"punpcklbw %%xmm7,%%xmm6 \n"
"neg %4 \n"
"lea 0x10(%0,%4,8),%0 \n"
"punpckhbw %%xmm7,%%xmm8 \n"
"movdqa %%xmm8,%%xmm7 \n"
"neg %4 \n"
// Second round of bit swap.
"movdqa %%xmm0,%%xmm8 \n"
"movdqa %%xmm1,%%xmm9 \n"
"punpckhwd %%xmm2,%%xmm8 \n"
"punpckhwd %%xmm3,%%xmm9 \n"
"punpcklwd %%xmm2,%%xmm0 \n"
"punpcklwd %%xmm3,%%xmm1 \n"
"movdqa %%xmm8,%%xmm2 \n"
"movdqa %%xmm9,%%xmm3 \n"
"movdqa %%xmm4,%%xmm8 \n"
"movdqa %%xmm5,%%xmm9 \n"
"punpckhwd %%xmm6,%%xmm8 \n"
"punpckhwd %%xmm7,%%xmm9 \n"
"punpcklwd %%xmm6,%%xmm4 \n"
"punpcklwd %%xmm7,%%xmm5 \n"
"movdqa %%xmm8,%%xmm6 \n"
"movdqa %%xmm9,%%xmm7 \n"
// Third round of bit swap.
// Write to the destination pointer.
"movdqa %%xmm0,%%xmm8 \n"
"punpckldq %%xmm4,%%xmm0 \n"
"movlpd %%xmm0,(%1) \n" // Write back U channel
"movhpd %%xmm0,(%2) \n" // Write back V channel
"punpckhdq %%xmm4,%%xmm8 \n"
"movlpd %%xmm8,(%1,%5) \n"
"lea (%1,%5,2),%1 \n"
"movhpd %%xmm8,(%2,%6) \n"
"lea (%2,%6,2),%2 \n"
"movdqa %%xmm2,%%xmm8 \n"
"punpckldq %%xmm6,%%xmm2 \n"
"movlpd %%xmm2,(%1) \n"
"movhpd %%xmm2,(%2) \n"
"punpckhdq %%xmm6,%%xmm8 \n"
"movlpd %%xmm8,(%1,%5) \n"
"lea (%1,%5,2),%1 \n"
"movhpd %%xmm8,(%2,%6) \n"
"lea (%2,%6,2),%2 \n"
"movdqa %%xmm1,%%xmm8 \n"
"punpckldq %%xmm5,%%xmm1 \n"
"movlpd %%xmm1,(%1) \n"
"movhpd %%xmm1,(%2) \n"
"punpckhdq %%xmm5,%%xmm8 \n"
"movlpd %%xmm8,(%1,%5) \n"
"lea (%1,%5,2),%1 \n"
"movhpd %%xmm8,(%2,%6) \n"
"lea (%2,%6,2),%2 \n"
"movdqa %%xmm3,%%xmm8 \n"
"punpckldq %%xmm7,%%xmm3 \n"
"movlpd %%xmm3,(%1) \n"
"movhpd %%xmm3,(%2) \n"
"punpckhdq %%xmm7,%%xmm8 \n"
"sub $0x8,%3 \n"
"movlpd %%xmm8,(%1,%5) \n"
"lea (%1,%5,2),%1 \n"
"movhpd %%xmm8,(%2,%6) \n"
"lea (%2,%6,2),%2 \n"
"jg 1b \n"
: "+r"(src), // %0
"+r"(dst_a), // %1
"+r"(dst_b), // %2
"+r"(width) // %3
: "r"((intptr_t)(src_stride)), // %4
"r"((intptr_t)(dst_stride_a)), // %5
"r"((intptr_t)(dst_stride_b)) // %6
: "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6",
"xmm7", "xmm8", "xmm9");
}
#endif // defined(HAS_TRANSPOSEUVWX8_SSE2)
#endif // defined(__x86_64__) || defined(__i386__)
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif

View File

@@ -1,291 +0,0 @@
/*
* Copyright 2011 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "libyuv/rotate_row.h"
#include "libyuv/row.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
// This module is for Mips MMI.
#if !defined(LIBYUV_DISABLE_MMI) && defined(_MIPS_ARCH_LOONGSON3A)
void TransposeWx8_MMI(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width) {
uint64_t tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
uint64_t tmp7, tmp8, tmp9, tmp10, tmp11, tmp12, tmp13;
uint8_t* src_tmp = nullptr;
__asm__ volatile(
"1: \n\t"
"ldc1 %[tmp12], 0x00(%[src]) \n\t"
"dadd %[src_tmp], %[src], %[src_stride] \n\t"
"ldc1 %[tmp13], 0x00(%[src_tmp]) \n\t"
/* tmp0 = (00 10 01 11 02 12 03 13) */
"punpcklbh %[tmp0], %[tmp12], %[tmp13] \n\t"
/* tmp1 = (04 14 05 15 06 16 07 17) */
"punpckhbh %[tmp1], %[tmp12], %[tmp13] \n\t"
"dadd %[src_tmp], %[src_tmp], %[src_stride] \n\t"
"ldc1 %[tmp12], 0x00(%[src_tmp]) \n\t"
"dadd %[src_tmp], %[src_tmp], %[src_stride] \n\t"
"ldc1 %[tmp13], 0x00(%[src_tmp]) \n\t"
/* tmp2 = (20 30 21 31 22 32 23 33) */
"punpcklbh %[tmp2], %[tmp12], %[tmp13] \n\t"
/* tmp3 = (24 34 25 35 26 36 27 37) */
"punpckhbh %[tmp3], %[tmp12], %[tmp13] \n\t"
/* tmp4 = (00 10 20 30 01 11 21 31) */
"punpcklhw %[tmp4], %[tmp0], %[tmp2] \n\t"
/* tmp5 = (02 12 22 32 03 13 23 33) */
"punpckhhw %[tmp5], %[tmp0], %[tmp2] \n\t"
/* tmp6 = (04 14 24 34 05 15 25 35) */
"punpcklhw %[tmp6], %[tmp1], %[tmp3] \n\t"
/* tmp7 = (06 16 26 36 07 17 27 37) */
"punpckhhw %[tmp7], %[tmp1], %[tmp3] \n\t"
"dadd %[src_tmp], %[src_tmp], %[src_stride] \n\t"
"ldc1 %[tmp12], 0x00(%[src_tmp]) \n\t"
"dadd %[src_tmp], %[src_tmp], %[src_stride] \n\t"
"ldc1 %[tmp13], 0x00(%[src_tmp]) \n\t"
/* tmp0 = (40 50 41 51 42 52 43 53) */
"punpcklbh %[tmp0], %[tmp12], %[tmp13] \n\t"
/* tmp1 = (44 54 45 55 46 56 47 57) */
"punpckhbh %[tmp1], %[tmp12], %[tmp13] \n\t"
"dadd %[src_tmp], %[src_tmp], %[src_stride] \n\t"
"ldc1 %[tmp12], 0x00(%[src_tmp]) \n\t"
"dadd %[src_tmp], %[src_tmp], %[src_stride] \n\t"
"ldc1 %[tmp13], 0x00(%[src_tmp]) \n\t"
/* tmp2 = (60 70 61 71 62 72 63 73) */
"punpcklbh %[tmp2], %[tmp12], %[tmp13] \n\t"
/* tmp3 = (64 74 65 75 66 76 67 77) */
"punpckhbh %[tmp3], %[tmp12], %[tmp13] \n\t"
/* tmp8 = (40 50 60 70 41 51 61 71) */
"punpcklhw %[tmp8], %[tmp0], %[tmp2] \n\t"
/* tmp9 = (42 52 62 72 43 53 63 73) */
"punpckhhw %[tmp9], %[tmp0], %[tmp2] \n\t"
/* tmp10 = (44 54 64 74 45 55 65 75) */
"punpcklhw %[tmp10], %[tmp1], %[tmp3] \n\t"
/* tmp11 = (46 56 66 76 47 57 67 77) */
"punpckhhw %[tmp11], %[tmp1], %[tmp3] \n\t"
/* tmp0 = (00 10 20 30 40 50 60 70) */
"punpcklwd %[tmp0], %[tmp4], %[tmp8] \n\t"
/* tmp1 = (01 11 21 31 41 51 61 71) */
"punpckhwd %[tmp1], %[tmp4], %[tmp8] \n\t"
"gssdlc1 %[tmp0], 0x07(%[dst]) \n\t"
"gssdrc1 %[tmp0], 0x00(%[dst]) \n\t"
"dadd %[dst], %[dst], %[dst_stride] \n\t"
"gssdlc1 %[tmp1], 0x07(%[dst]) \n\t"
"gssdrc1 %[tmp1], 0x00(%[dst]) \n\t"
/* tmp0 = (02 12 22 32 42 52 62 72) */
"punpcklwd %[tmp0], %[tmp5], %[tmp9] \n\t"
/* tmp1 = (03 13 23 33 43 53 63 73) */
"punpckhwd %[tmp1], %[tmp5], %[tmp9] \n\t"
"dadd %[dst], %[dst], %[dst_stride] \n\t"
"gssdlc1 %[tmp0], 0x07(%[dst]) \n\t"
"gssdrc1 %[tmp0], 0x00(%[dst]) \n\t"
"dadd %[dst], %[dst], %[dst_stride] \n\t"
"gssdlc1 %[tmp1], 0x07(%[dst]) \n\t"
"gssdrc1 %[tmp1], 0x00(%[dst]) \n\t"
/* tmp0 = (04 14 24 34 44 54 64 74) */
"punpcklwd %[tmp0], %[tmp6], %[tmp10] \n\t"
/* tmp1 = (05 15 25 35 45 55 65 75) */
"punpckhwd %[tmp1], %[tmp6], %[tmp10] \n\t"
"dadd %[dst], %[dst], %[dst_stride] \n\t"
"gssdlc1 %[tmp0], 0x07(%[dst]) \n\t"
"gssdrc1 %[tmp0], 0x00(%[dst]) \n\t"
"dadd %[dst], %[dst], %[dst_stride] \n\t"
"gssdlc1 %[tmp1], 0x07(%[dst]) \n\t"
"gssdrc1 %[tmp1], 0x00(%[dst]) \n\t"
/* tmp0 = (06 16 26 36 46 56 66 76) */
"punpcklwd %[tmp0], %[tmp7], %[tmp11] \n\t"
/* tmp1 = (07 17 27 37 47 57 67 77) */
"punpckhwd %[tmp1], %[tmp7], %[tmp11] \n\t"
"dadd %[dst], %[dst], %[dst_stride] \n\t"
"gssdlc1 %[tmp0], 0x07(%[dst]) \n\t"
"gssdrc1 %[tmp0], 0x00(%[dst]) \n\t"
"dadd %[dst], %[dst], %[dst_stride] \n\t"
"gssdlc1 %[tmp1], 0x07(%[dst]) \n\t"
"gssdrc1 %[tmp1], 0x00(%[dst]) \n\t"
"dadd %[dst], %[dst], %[dst_stride] \n\t"
"daddi %[src], %[src], 0x08 \n\t"
"daddi %[width], %[width], -0x08 \n\t"
"bnez %[width], 1b \n\t"
: [tmp0] "=&f"(tmp0), [tmp1] "=&f"(tmp1), [tmp2] "=&f"(tmp2),
[tmp3] "=&f"(tmp3), [tmp4] "=&f"(tmp4), [tmp5] "=&f"(tmp5),
[tmp6] "=&f"(tmp6), [tmp7] "=&f"(tmp7), [tmp8] "=&f"(tmp8),
[tmp9] "=&f"(tmp9), [tmp10] "=&f"(tmp10), [tmp11] "=&f"(tmp11),
[tmp12] "=&f"(tmp12), [tmp13] "=&f"(tmp13), [dst] "+&r"(dst),
[src_tmp] "+&r"(src_tmp)
: [src] "r"(src), [width] "r"(width), [src_stride] "r"(src_stride),
[dst_stride] "r"(dst_stride)
: "memory");
}
void TransposeUVWx8_MMI(const uint8_t* src,
int src_stride,
uint8_t* dst_a,
int dst_stride_a,
uint8_t* dst_b,
int dst_stride_b,
int width) {
uint64_t tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
uint64_t tmp7, tmp8, tmp9, tmp10, tmp11, tmp12, tmp13;
uint8_t* src_tmp = nullptr;
__asm__ volatile(
"1: \n\t"
/* tmp12 = (u00 v00 u01 v01 u02 v02 u03 v03) */
"ldc1 %[tmp12], 0x00(%[src]) \n\t"
"dadd %[src_tmp], %[src], %[src_stride] \n\t"
/* tmp13 = (u10 v10 u11 v11 u12 v12 u13 v13) */
"ldc1 %[tmp13], 0x00(%[src_tmp]) \n\t"
/* tmp0 = (u00 u10 v00 v10 u01 u11 v01 v11) */
"punpcklbh %[tmp0], %[tmp12], %[tmp13] \n\t"
/* tmp1 = (u02 u12 v02 v12 u03 u13 v03 v13) */
"punpckhbh %[tmp1], %[tmp12], %[tmp13] \n\t"
"dadd %[src_tmp], %[src_tmp], %[src_stride] \n\t"
/* tmp12 = (u20 v20 u21 v21 u22 v22 u23 v23) */
"ldc1 %[tmp12], 0x00(%[src_tmp]) \n\t"
"dadd %[src_tmp], %[src_tmp], %[src_stride] \n\t"
/* tmp13 = (u30 v30 u31 v31 u32 v32 u33 v33) */
"ldc1 %[tmp13], 0x00(%[src_tmp]) \n\t"
/* tmp2 = (u20 u30 v20 v30 u21 u31 v21 v31) */
"punpcklbh %[tmp2], %[tmp12], %[tmp13] \n\t"
/* tmp3 = (u22 u32 v22 v32 u23 u33 v23 v33) */
"punpckhbh %[tmp3], %[tmp12], %[tmp13] \n\t"
/* tmp4 = (u00 u10 u20 u30 v00 v10 v20 v30) */
"punpcklhw %[tmp4], %[tmp0], %[tmp2] \n\t"
/* tmp5 = (u01 u11 u21 u31 v01 v11 v21 v31) */
"punpckhhw %[tmp5], %[tmp0], %[tmp2] \n\t"
/* tmp6 = (u02 u12 u22 u32 v02 v12 v22 v32) */
"punpcklhw %[tmp6], %[tmp1], %[tmp3] \n\t"
/* tmp7 = (u03 u13 u23 u33 v03 v13 v23 v33) */
"punpckhhw %[tmp7], %[tmp1], %[tmp3] \n\t"
"dadd %[src_tmp], %[src_tmp], %[src_stride] \n\t"
/* tmp12 = (u40 v40 u41 v41 u42 v42 u43 v43) */
"ldc1 %[tmp12], 0x00(%[src_tmp]) \n\t"
/* tmp13 = (u50 v50 u51 v51 u52 v52 u53 v53) */
"dadd %[src_tmp], %[src_tmp], %[src_stride] \n\t"
"ldc1 %[tmp13], 0x00(%[src_tmp]) \n\t"
/* tmp0 = (u40 u50 v40 v50 u41 u51 v41 v51) */
"punpcklbh %[tmp0], %[tmp12], %[tmp13] \n\t"
/* tmp1 = (u42 u52 v42 v52 u43 u53 v43 v53) */
"punpckhbh %[tmp1], %[tmp12], %[tmp13] \n\t"
"dadd %[src_tmp], %[src_tmp], %[src_stride] \n\t"
/* tmp12 = (u60 v60 u61 v61 u62 v62 u63 v63) */
"ldc1 %[tmp12], 0x00(%[src_tmp]) \n\t"
/* tmp13 = (u70 v70 u71 v71 u72 v72 u73 v73) */
"dadd %[src_tmp], %[src_tmp], %[src_stride] \n\t"
"ldc1 %[tmp13], 0x00(%[src_tmp]) \n\t"
/* tmp2 = (u60 u70 v60 v70 u61 u71 v61 v71) */
"punpcklbh %[tmp2], %[tmp12], %[tmp13] \n\t"
/* tmp3 = (u62 u72 v62 v72 u63 u73 v63 v73) */
"punpckhbh %[tmp3], %[tmp12], %[tmp13] \n\t"
/* tmp8 = (u40 u50 u60 u70 v40 v50 v60 v70) */
"punpcklhw %[tmp8], %[tmp0], %[tmp2] \n\t"
/* tmp9 = (u41 u51 u61 u71 v41 v51 v61 v71) */
"punpckhhw %[tmp9], %[tmp0], %[tmp2] \n\t"
/* tmp10 = (u42 u52 u62 u72 v42 v52 v62 v72) */
"punpcklhw %[tmp10], %[tmp1], %[tmp3] \n\t"
/* tmp11 = (u43 u53 u63 u73 v43 v53 v63 v73) */
"punpckhhw %[tmp11], %[tmp1], %[tmp3] \n\t"
/* tmp0 = (u00 u10 u20 u30 u40 u50 u60 u70) */
"punpcklwd %[tmp0], %[tmp4], %[tmp8] \n\t"
/* tmp1 = (v00 v10 v20 v30 v40 v50 v60 v70) */
"punpckhwd %[tmp1], %[tmp4], %[tmp8] \n\t"
"gssdlc1 %[tmp0], 0x07(%[dst_a]) \n\t"
"gssdrc1 %[tmp0], 0x00(%[dst_a]) \n\t"
"gssdlc1 %[tmp1], 0x07(%[dst_b]) \n\t"
"gssdrc1 %[tmp1], 0x00(%[dst_b]) \n\t"
/* tmp0 = (u01 u11 u21 u31 u41 u51 u61 u71) */
"punpcklwd %[tmp0], %[tmp5], %[tmp9] \n\t"
/* tmp1 = (v01 v11 v21 v31 v41 v51 v61 v71) */
"punpckhwd %[tmp1], %[tmp5], %[tmp9] \n\t"
"dadd %[dst_a], %[dst_a], %[dst_stride_a] \n\t"
"gssdlc1 %[tmp0], 0x07(%[dst_a]) \n\t"
"gssdrc1 %[tmp0], 0x00(%[dst_a]) \n\t"
"dadd %[dst_b], %[dst_b], %[dst_stride_b] \n\t"
"gssdlc1 %[tmp1], 0x07(%[dst_b]) \n\t"
"gssdrc1 %[tmp1], 0x00(%[dst_b]) \n\t"
/* tmp0 = (u02 u12 u22 u32 u42 u52 u62 u72) */
"punpcklwd %[tmp0], %[tmp6], %[tmp10] \n\t"
/* tmp1 = (v02 v12 v22 v32 v42 v52 v62 v72) */
"punpckhwd %[tmp1], %[tmp6], %[tmp10] \n\t"
"dadd %[dst_a], %[dst_a], %[dst_stride_a] \n\t"
"gssdlc1 %[tmp0], 0x07(%[dst_a]) \n\t"
"gssdrc1 %[tmp0], 0x00(%[dst_a]) \n\t"
"dadd %[dst_b], %[dst_b], %[dst_stride_b] \n\t"
"gssdlc1 %[tmp1], 0x07(%[dst_b]) \n\t"
"gssdrc1 %[tmp1], 0x00(%[dst_b]) \n\t"
/* tmp0 = (u03 u13 u23 u33 u43 u53 u63 u73) */
"punpcklwd %[tmp0], %[tmp7], %[tmp11] \n\t"
/* tmp1 = (v03 v13 v23 v33 v43 v53 v63 v73) */
"punpckhwd %[tmp1], %[tmp7], %[tmp11] \n\t"
"dadd %[dst_a], %[dst_a], %[dst_stride_a] \n\t"
"gssdlc1 %[tmp0], 0x07(%[dst_a]) \n\t"
"gssdrc1 %[tmp0], 0x00(%[dst_a]) \n\t"
"dadd %[dst_b], %[dst_b], %[dst_stride_b] \n\t"
"gssdlc1 %[tmp1], 0x07(%[dst_b]) \n\t"
"gssdrc1 %[tmp1], 0x00(%[dst_b]) \n\t"
"dadd %[dst_a], %[dst_a], %[dst_stride_a] \n\t"
"dadd %[dst_b], %[dst_b], %[dst_stride_b] \n\t"
"daddiu %[src], %[src], 0x08 \n\t"
"daddi %[width], %[width], -0x04 \n\t"
"bnez %[width], 1b \n\t"
: [tmp0] "=&f"(tmp0), [tmp1] "=&f"(tmp1), [tmp2] "=&f"(tmp2),
[tmp3] "=&f"(tmp3), [tmp4] "=&f"(tmp4), [tmp5] "=&f"(tmp5),
[tmp6] "=&f"(tmp6), [tmp7] "=&f"(tmp7), [tmp8] "=&f"(tmp8),
[tmp9] "=&f"(tmp9), [tmp10] "=&f"(tmp10), [tmp11] "=&f"(tmp11),
[tmp12] "=&f"(tmp12), [tmp13] "=&f"(tmp13), [dst_a] "+&r"(dst_a),
[dst_b] "+&r"(dst_b), [src_tmp] "+&r"(src_tmp)
: [src] "r"(src), [width] "r"(width), [dst_stride_a] "r"(dst_stride_a),
[dst_stride_b] "r"(dst_stride_b), [src_stride] "r"(src_stride)
: "memory");
}
#endif // !defined(LIBYUV_DISABLE_MMI) && defined(_MIPS_ARCH_LOONGSON3A)
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif

View File

@@ -1,250 +0,0 @@
/*
* Copyright 2016 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "libyuv/rotate_row.h"
// This module is for GCC MSA
#if !defined(LIBYUV_DISABLE_MSA) && defined(__mips_msa)
#include "libyuv/macros_msa.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
#define ILVRL_B(in0, in1, in2, in3, out0, out1, out2, out3) \
{ \
out0 = (v16u8)__msa_ilvr_b((v16i8)in1, (v16i8)in0); \
out1 = (v16u8)__msa_ilvl_b((v16i8)in1, (v16i8)in0); \
out2 = (v16u8)__msa_ilvr_b((v16i8)in3, (v16i8)in2); \
out3 = (v16u8)__msa_ilvl_b((v16i8)in3, (v16i8)in2); \
}
#define ILVRL_H(in0, in1, in2, in3, out0, out1, out2, out3) \
{ \
out0 = (v16u8)__msa_ilvr_h((v8i16)in1, (v8i16)in0); \
out1 = (v16u8)__msa_ilvl_h((v8i16)in1, (v8i16)in0); \
out2 = (v16u8)__msa_ilvr_h((v8i16)in3, (v8i16)in2); \
out3 = (v16u8)__msa_ilvl_h((v8i16)in3, (v8i16)in2); \
}
#define ILVRL_W(in0, in1, in2, in3, out0, out1, out2, out3) \
{ \
out0 = (v16u8)__msa_ilvr_w((v4i32)in1, (v4i32)in0); \
out1 = (v16u8)__msa_ilvl_w((v4i32)in1, (v4i32)in0); \
out2 = (v16u8)__msa_ilvr_w((v4i32)in3, (v4i32)in2); \
out3 = (v16u8)__msa_ilvl_w((v4i32)in3, (v4i32)in2); \
}
#define ILVRL_D(in0, in1, in2, in3, out0, out1, out2, out3) \
{ \
out0 = (v16u8)__msa_ilvr_d((v2i64)in1, (v2i64)in0); \
out1 = (v16u8)__msa_ilvl_d((v2i64)in1, (v2i64)in0); \
out2 = (v16u8)__msa_ilvr_d((v2i64)in3, (v2i64)in2); \
out3 = (v16u8)__msa_ilvl_d((v2i64)in3, (v2i64)in2); \
}
void TransposeWx16_C(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width) {
TransposeWx8_C(src, src_stride, dst, dst_stride, width);
TransposeWx8_C((src + 8 * src_stride), src_stride, (dst + 8), dst_stride,
width);
}
void TransposeUVWx16_C(const uint8_t* src,
int src_stride,
uint8_t* dst_a,
int dst_stride_a,
uint8_t* dst_b,
int dst_stride_b,
int width) {
TransposeUVWx8_C(src, src_stride, dst_a, dst_stride_a, dst_b, dst_stride_b,
width);
TransposeUVWx8_C((src + 8 * src_stride), src_stride, (dst_a + 8),
dst_stride_a, (dst_b + 8), dst_stride_b, width);
}
void TransposeWx16_MSA(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width) {
int x;
const uint8_t* s;
v16u8 src0, src1, src2, src3, dst0, dst1, dst2, dst3, vec0, vec1, vec2, vec3;
v16u8 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7;
v16u8 res0, res1, res2, res3, res4, res5, res6, res7, res8, res9;
for (x = 0; x < width; x += 16) {
s = src;
src0 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
src1 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
src2 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
src3 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
ILVRL_B(src0, src1, src2, src3, vec0, vec1, vec2, vec3);
ILVRL_H(vec0, vec2, vec1, vec3, reg0, reg1, reg2, reg3);
src0 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
src1 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
src2 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
src3 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
ILVRL_B(src0, src1, src2, src3, vec0, vec1, vec2, vec3);
ILVRL_H(vec0, vec2, vec1, vec3, reg4, reg5, reg6, reg7);
ILVRL_W(reg0, reg4, reg1, reg5, res0, res1, res2, res3);
ILVRL_W(reg2, reg6, reg3, reg7, res4, res5, res6, res7);
src0 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
src1 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
src2 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
src3 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
ILVRL_B(src0, src1, src2, src3, vec0, vec1, vec2, vec3);
ILVRL_H(vec0, vec2, vec1, vec3, reg0, reg1, reg2, reg3);
src0 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
src1 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
src2 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
src3 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
ILVRL_B(src0, src1, src2, src3, vec0, vec1, vec2, vec3);
ILVRL_H(vec0, vec2, vec1, vec3, reg4, reg5, reg6, reg7);
res8 = (v16u8)__msa_ilvr_w((v4i32)reg4, (v4i32)reg0);
res9 = (v16u8)__msa_ilvl_w((v4i32)reg4, (v4i32)reg0);
ILVRL_D(res0, res8, res1, res9, dst0, dst1, dst2, dst3);
ST_UB4(dst0, dst1, dst2, dst3, dst, dst_stride);
dst += dst_stride * 4;
res8 = (v16u8)__msa_ilvr_w((v4i32)reg5, (v4i32)reg1);
res9 = (v16u8)__msa_ilvl_w((v4i32)reg5, (v4i32)reg1);
ILVRL_D(res2, res8, res3, res9, dst0, dst1, dst2, dst3);
ST_UB4(dst0, dst1, dst2, dst3, dst, dst_stride);
dst += dst_stride * 4;
res8 = (v16u8)__msa_ilvr_w((v4i32)reg6, (v4i32)reg2);
res9 = (v16u8)__msa_ilvl_w((v4i32)reg6, (v4i32)reg2);
ILVRL_D(res4, res8, res5, res9, dst0, dst1, dst2, dst3);
ST_UB4(dst0, dst1, dst2, dst3, dst, dst_stride);
dst += dst_stride * 4;
res8 = (v16u8)__msa_ilvr_w((v4i32)reg7, (v4i32)reg3);
res9 = (v16u8)__msa_ilvl_w((v4i32)reg7, (v4i32)reg3);
ILVRL_D(res6, res8, res7, res9, dst0, dst1, dst2, dst3);
ST_UB4(dst0, dst1, dst2, dst3, dst, dst_stride);
src += 16;
dst += dst_stride * 4;
}
}
void TransposeUVWx16_MSA(const uint8_t* src,
int src_stride,
uint8_t* dst_a,
int dst_stride_a,
uint8_t* dst_b,
int dst_stride_b,
int width) {
int x;
const uint8_t* s;
v16u8 src0, src1, src2, src3, dst0, dst1, dst2, dst3, vec0, vec1, vec2, vec3;
v16u8 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7;
v16u8 res0, res1, res2, res3, res4, res5, res6, res7, res8, res9;
for (x = 0; x < width; x += 8) {
s = src;
src0 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
src1 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
src2 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
src3 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
ILVRL_B(src0, src1, src2, src3, vec0, vec1, vec2, vec3);
ILVRL_H(vec0, vec2, vec1, vec3, reg0, reg1, reg2, reg3);
src0 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
src1 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
src2 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
src3 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
ILVRL_B(src0, src1, src2, src3, vec0, vec1, vec2, vec3);
ILVRL_H(vec0, vec2, vec1, vec3, reg4, reg5, reg6, reg7);
ILVRL_W(reg0, reg4, reg1, reg5, res0, res1, res2, res3);
ILVRL_W(reg2, reg6, reg3, reg7, res4, res5, res6, res7);
src0 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
src1 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
src2 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
src3 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
ILVRL_B(src0, src1, src2, src3, vec0, vec1, vec2, vec3);
ILVRL_H(vec0, vec2, vec1, vec3, reg0, reg1, reg2, reg3);
src0 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
src1 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
src2 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
src3 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
ILVRL_B(src0, src1, src2, src3, vec0, vec1, vec2, vec3);
ILVRL_H(vec0, vec2, vec1, vec3, reg4, reg5, reg6, reg7);
res8 = (v16u8)__msa_ilvr_w((v4i32)reg4, (v4i32)reg0);
res9 = (v16u8)__msa_ilvl_w((v4i32)reg4, (v4i32)reg0);
ILVRL_D(res0, res8, res1, res9, dst0, dst1, dst2, dst3);
ST_UB2(dst0, dst2, dst_a, dst_stride_a);
ST_UB2(dst1, dst3, dst_b, dst_stride_b);
dst_a += dst_stride_a * 2;
dst_b += dst_stride_b * 2;
res8 = (v16u8)__msa_ilvr_w((v4i32)reg5, (v4i32)reg1);
res9 = (v16u8)__msa_ilvl_w((v4i32)reg5, (v4i32)reg1);
ILVRL_D(res2, res8, res3, res9, dst0, dst1, dst2, dst3);
ST_UB2(dst0, dst2, dst_a, dst_stride_a);
ST_UB2(dst1, dst3, dst_b, dst_stride_b);
dst_a += dst_stride_a * 2;
dst_b += dst_stride_b * 2;
res8 = (v16u8)__msa_ilvr_w((v4i32)reg6, (v4i32)reg2);
res9 = (v16u8)__msa_ilvl_w((v4i32)reg6, (v4i32)reg2);
ILVRL_D(res4, res8, res5, res9, dst0, dst1, dst2, dst3);
ST_UB2(dst0, dst2, dst_a, dst_stride_a);
ST_UB2(dst1, dst3, dst_b, dst_stride_b);
dst_a += dst_stride_a * 2;
dst_b += dst_stride_b * 2;
res8 = (v16u8)__msa_ilvr_w((v4i32)reg7, (v4i32)reg3);
res9 = (v16u8)__msa_ilvl_w((v4i32)reg7, (v4i32)reg3);
ILVRL_D(res6, res8, res7, res9, dst0, dst1, dst2, dst3);
ST_UB2(dst0, dst2, dst_a, dst_stride_a);
ST_UB2(dst1, dst3, dst_b, dst_stride_b);
src += 16;
dst_a += dst_stride_a * 2;
dst_b += dst_stride_b * 2;
}
}
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif
#endif // !defined(LIBYUV_DISABLE_MSA) && defined(__mips_msa)

View File

@@ -1,418 +0,0 @@
/*
* Copyright 2011 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "libyuv/rotate_row.h"
#include "libyuv/row.h"
#include "libyuv/basic_types.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
#if !defined(LIBYUV_DISABLE_NEON) && defined(__ARM_NEON__) && \
!defined(__aarch64__)
static const uvec8 kVTbl4x4Transpose = {0, 4, 8, 12, 1, 5, 9, 13,
2, 6, 10, 14, 3, 7, 11, 15};
void TransposeWx8_NEON(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width) {
const uint8_t* src_temp;
asm volatile(
// loops are on blocks of 8. loop will stop when
// counter gets to or below 0. starting the counter
// at w-8 allow for this
"sub %5, #8 \n"
// handle 8x8 blocks. this should be the majority of the plane
"1: \n"
"mov %0, %1 \n"
"vld1.8 {d0}, [%0], %2 \n"
"vld1.8 {d1}, [%0], %2 \n"
"vld1.8 {d2}, [%0], %2 \n"
"vld1.8 {d3}, [%0], %2 \n"
"vld1.8 {d4}, [%0], %2 \n"
"vld1.8 {d5}, [%0], %2 \n"
"vld1.8 {d6}, [%0], %2 \n"
"vld1.8 {d7}, [%0] \n"
"vtrn.8 d1, d0 \n"
"vtrn.8 d3, d2 \n"
"vtrn.8 d5, d4 \n"
"vtrn.8 d7, d6 \n"
"vtrn.16 d1, d3 \n"
"vtrn.16 d0, d2 \n"
"vtrn.16 d5, d7 \n"
"vtrn.16 d4, d6 \n"
"vtrn.32 d1, d5 \n"
"vtrn.32 d0, d4 \n"
"vtrn.32 d3, d7 \n"
"vtrn.32 d2, d6 \n"
"vrev16.8 q0, q0 \n"
"vrev16.8 q1, q1 \n"
"vrev16.8 q2, q2 \n"
"vrev16.8 q3, q3 \n"
"mov %0, %3 \n"
"vst1.8 {d1}, [%0], %4 \n"
"vst1.8 {d0}, [%0], %4 \n"
"vst1.8 {d3}, [%0], %4 \n"
"vst1.8 {d2}, [%0], %4 \n"
"vst1.8 {d5}, [%0], %4 \n"
"vst1.8 {d4}, [%0], %4 \n"
"vst1.8 {d7}, [%0], %4 \n"
"vst1.8 {d6}, [%0] \n"
"add %1, #8 \n" // src += 8
"add %3, %3, %4, lsl #3 \n" // dst += 8 * dst_stride
"subs %5, #8 \n" // w -= 8
"bge 1b \n"
// add 8 back to counter. if the result is 0 there are
// no residuals.
"adds %5, #8 \n"
"beq 4f \n"
// some residual, so between 1 and 7 lines left to transpose
"cmp %5, #2 \n"
"blt 3f \n"
"cmp %5, #4 \n"
"blt 2f \n"
// 4x8 block
"mov %0, %1 \n"
"vld1.32 {d0[0]}, [%0], %2 \n"
"vld1.32 {d0[1]}, [%0], %2 \n"
"vld1.32 {d1[0]}, [%0], %2 \n"
"vld1.32 {d1[1]}, [%0], %2 \n"
"vld1.32 {d2[0]}, [%0], %2 \n"
"vld1.32 {d2[1]}, [%0], %2 \n"
"vld1.32 {d3[0]}, [%0], %2 \n"
"vld1.32 {d3[1]}, [%0] \n"
"mov %0, %3 \n"
"vld1.8 {q3}, [%6] \n"
"vtbl.8 d4, {d0, d1}, d6 \n"
"vtbl.8 d5, {d0, d1}, d7 \n"
"vtbl.8 d0, {d2, d3}, d6 \n"
"vtbl.8 d1, {d2, d3}, d7 \n"
// TODO(frkoenig): Rework shuffle above to
// write out with 4 instead of 8 writes.
"vst1.32 {d4[0]}, [%0], %4 \n"
"vst1.32 {d4[1]}, [%0], %4 \n"
"vst1.32 {d5[0]}, [%0], %4 \n"
"vst1.32 {d5[1]}, [%0] \n"
"add %0, %3, #4 \n"
"vst1.32 {d0[0]}, [%0], %4 \n"
"vst1.32 {d0[1]}, [%0], %4 \n"
"vst1.32 {d1[0]}, [%0], %4 \n"
"vst1.32 {d1[1]}, [%0] \n"
"add %1, #4 \n" // src += 4
"add %3, %3, %4, lsl #2 \n" // dst += 4 * dst_stride
"subs %5, #4 \n" // w -= 4
"beq 4f \n"
// some residual, check to see if it includes a 2x8 block,
// or less
"cmp %5, #2 \n"
"blt 3f \n"
// 2x8 block
"2: \n"
"mov %0, %1 \n"
"vld1.16 {d0[0]}, [%0], %2 \n"
"vld1.16 {d1[0]}, [%0], %2 \n"
"vld1.16 {d0[1]}, [%0], %2 \n"
"vld1.16 {d1[1]}, [%0], %2 \n"
"vld1.16 {d0[2]}, [%0], %2 \n"
"vld1.16 {d1[2]}, [%0], %2 \n"
"vld1.16 {d0[3]}, [%0], %2 \n"
"vld1.16 {d1[3]}, [%0] \n"
"vtrn.8 d0, d1 \n"
"mov %0, %3 \n"
"vst1.64 {d0}, [%0], %4 \n"
"vst1.64 {d1}, [%0] \n"
"add %1, #2 \n" // src += 2
"add %3, %3, %4, lsl #1 \n" // dst += 2 * dst_stride
"subs %5, #2 \n" // w -= 2
"beq 4f \n"
// 1x8 block
"3: \n"
"vld1.8 {d0[0]}, [%1], %2 \n"
"vld1.8 {d0[1]}, [%1], %2 \n"
"vld1.8 {d0[2]}, [%1], %2 \n"
"vld1.8 {d0[3]}, [%1], %2 \n"
"vld1.8 {d0[4]}, [%1], %2 \n"
"vld1.8 {d0[5]}, [%1], %2 \n"
"vld1.8 {d0[6]}, [%1], %2 \n"
"vld1.8 {d0[7]}, [%1] \n"
"vst1.64 {d0}, [%3] \n"
"4: \n"
: "=&r"(src_temp), // %0
"+r"(src), // %1
"+r"(src_stride), // %2
"+r"(dst), // %3
"+r"(dst_stride), // %4
"+r"(width) // %5
: "r"(&kVTbl4x4Transpose) // %6
: "memory", "cc", "q0", "q1", "q2", "q3");
}
static const uvec8 kVTbl4x4TransposeDi = {0, 8, 1, 9, 2, 10, 3, 11,
4, 12, 5, 13, 6, 14, 7, 15};
void TransposeUVWx8_NEON(const uint8_t* src,
int src_stride,
uint8_t* dst_a,
int dst_stride_a,
uint8_t* dst_b,
int dst_stride_b,
int width) {
const uint8_t* src_temp;
asm volatile(
// loops are on blocks of 8. loop will stop when
// counter gets to or below 0. starting the counter
// at w-8 allow for this
"sub %7, #8 \n"
// handle 8x8 blocks. this should be the majority of the plane
"1: \n"
"mov %0, %1 \n"
"vld2.8 {d0, d1}, [%0], %2 \n"
"vld2.8 {d2, d3}, [%0], %2 \n"
"vld2.8 {d4, d5}, [%0], %2 \n"
"vld2.8 {d6, d7}, [%0], %2 \n"
"vld2.8 {d16, d17}, [%0], %2 \n"
"vld2.8 {d18, d19}, [%0], %2 \n"
"vld2.8 {d20, d21}, [%0], %2 \n"
"vld2.8 {d22, d23}, [%0] \n"
"vtrn.8 q1, q0 \n"
"vtrn.8 q3, q2 \n"
"vtrn.8 q9, q8 \n"
"vtrn.8 q11, q10 \n"
"vtrn.16 q1, q3 \n"
"vtrn.16 q0, q2 \n"
"vtrn.16 q9, q11 \n"
"vtrn.16 q8, q10 \n"
"vtrn.32 q1, q9 \n"
"vtrn.32 q0, q8 \n"
"vtrn.32 q3, q11 \n"
"vtrn.32 q2, q10 \n"
"vrev16.8 q0, q0 \n"
"vrev16.8 q1, q1 \n"
"vrev16.8 q2, q2 \n"
"vrev16.8 q3, q3 \n"
"vrev16.8 q8, q8 \n"
"vrev16.8 q9, q9 \n"
"vrev16.8 q10, q10 \n"
"vrev16.8 q11, q11 \n"
"mov %0, %3 \n"
"vst1.8 {d2}, [%0], %4 \n"
"vst1.8 {d0}, [%0], %4 \n"
"vst1.8 {d6}, [%0], %4 \n"
"vst1.8 {d4}, [%0], %4 \n"
"vst1.8 {d18}, [%0], %4 \n"
"vst1.8 {d16}, [%0], %4 \n"
"vst1.8 {d22}, [%0], %4 \n"
"vst1.8 {d20}, [%0] \n"
"mov %0, %5 \n"
"vst1.8 {d3}, [%0], %6 \n"
"vst1.8 {d1}, [%0], %6 \n"
"vst1.8 {d7}, [%0], %6 \n"
"vst1.8 {d5}, [%0], %6 \n"
"vst1.8 {d19}, [%0], %6 \n"
"vst1.8 {d17}, [%0], %6 \n"
"vst1.8 {d23}, [%0], %6 \n"
"vst1.8 {d21}, [%0] \n"
"add %1, #8*2 \n" // src += 8*2
"add %3, %3, %4, lsl #3 \n" // dst_a += 8 *
// dst_stride_a
"add %5, %5, %6, lsl #3 \n" // dst_b += 8 *
// dst_stride_b
"subs %7, #8 \n" // w -= 8
"bge 1b \n"
// add 8 back to counter. if the result is 0 there are
// no residuals.
"adds %7, #8 \n"
"beq 4f \n"
// some residual, so between 1 and 7 lines left to transpose
"cmp %7, #2 \n"
"blt 3f \n"
"cmp %7, #4 \n"
"blt 2f \n"
// TODO(frkoenig): Clean this up
// 4x8 block
"mov %0, %1 \n"
"vld1.64 {d0}, [%0], %2 \n"
"vld1.64 {d1}, [%0], %2 \n"
"vld1.64 {d2}, [%0], %2 \n"
"vld1.64 {d3}, [%0], %2 \n"
"vld1.64 {d4}, [%0], %2 \n"
"vld1.64 {d5}, [%0], %2 \n"
"vld1.64 {d6}, [%0], %2 \n"
"vld1.64 {d7}, [%0] \n"
"vld1.8 {q15}, [%8] \n"
"vtrn.8 q0, q1 \n"
"vtrn.8 q2, q3 \n"
"vtbl.8 d16, {d0, d1}, d30 \n"
"vtbl.8 d17, {d0, d1}, d31 \n"
"vtbl.8 d18, {d2, d3}, d30 \n"
"vtbl.8 d19, {d2, d3}, d31 \n"
"vtbl.8 d20, {d4, d5}, d30 \n"
"vtbl.8 d21, {d4, d5}, d31 \n"
"vtbl.8 d22, {d6, d7}, d30 \n"
"vtbl.8 d23, {d6, d7}, d31 \n"
"mov %0, %3 \n"
"vst1.32 {d16[0]}, [%0], %4 \n"
"vst1.32 {d16[1]}, [%0], %4 \n"
"vst1.32 {d17[0]}, [%0], %4 \n"
"vst1.32 {d17[1]}, [%0], %4 \n"
"add %0, %3, #4 \n"
"vst1.32 {d20[0]}, [%0], %4 \n"
"vst1.32 {d20[1]}, [%0], %4 \n"
"vst1.32 {d21[0]}, [%0], %4 \n"
"vst1.32 {d21[1]}, [%0] \n"
"mov %0, %5 \n"
"vst1.32 {d18[0]}, [%0], %6 \n"
"vst1.32 {d18[1]}, [%0], %6 \n"
"vst1.32 {d19[0]}, [%0], %6 \n"
"vst1.32 {d19[1]}, [%0], %6 \n"
"add %0, %5, #4 \n"
"vst1.32 {d22[0]}, [%0], %6 \n"
"vst1.32 {d22[1]}, [%0], %6 \n"
"vst1.32 {d23[0]}, [%0], %6 \n"
"vst1.32 {d23[1]}, [%0] \n"
"add %1, #4*2 \n" // src += 4 * 2
"add %3, %3, %4, lsl #2 \n" // dst_a += 4 *
// dst_stride_a
"add %5, %5, %6, lsl #2 \n" // dst_b += 4 *
// dst_stride_b
"subs %7, #4 \n" // w -= 4
"beq 4f \n"
// some residual, check to see if it includes a 2x8 block,
// or less
"cmp %7, #2 \n"
"blt 3f \n"
// 2x8 block
"2: \n"
"mov %0, %1 \n"
"vld2.16 {d0[0], d2[0]}, [%0], %2 \n"
"vld2.16 {d1[0], d3[0]}, [%0], %2 \n"
"vld2.16 {d0[1], d2[1]}, [%0], %2 \n"
"vld2.16 {d1[1], d3[1]}, [%0], %2 \n"
"vld2.16 {d0[2], d2[2]}, [%0], %2 \n"
"vld2.16 {d1[2], d3[2]}, [%0], %2 \n"
"vld2.16 {d0[3], d2[3]}, [%0], %2 \n"
"vld2.16 {d1[3], d3[3]}, [%0] \n"
"vtrn.8 d0, d1 \n"
"vtrn.8 d2, d3 \n"
"mov %0, %3 \n"
"vst1.64 {d0}, [%0], %4 \n"
"vst1.64 {d2}, [%0] \n"
"mov %0, %5 \n"
"vst1.64 {d1}, [%0], %6 \n"
"vst1.64 {d3}, [%0] \n"
"add %1, #2*2 \n" // src += 2 * 2
"add %3, %3, %4, lsl #1 \n" // dst_a += 2 *
// dst_stride_a
"add %5, %5, %6, lsl #1 \n" // dst_b += 2 *
// dst_stride_b
"subs %7, #2 \n" // w -= 2
"beq 4f \n"
// 1x8 block
"3: \n"
"vld2.8 {d0[0], d1[0]}, [%1], %2 \n"
"vld2.8 {d0[1], d1[1]}, [%1], %2 \n"
"vld2.8 {d0[2], d1[2]}, [%1], %2 \n"
"vld2.8 {d0[3], d1[3]}, [%1], %2 \n"
"vld2.8 {d0[4], d1[4]}, [%1], %2 \n"
"vld2.8 {d0[5], d1[5]}, [%1], %2 \n"
"vld2.8 {d0[6], d1[6]}, [%1], %2 \n"
"vld2.8 {d0[7], d1[7]}, [%1] \n"
"vst1.64 {d0}, [%3] \n"
"vst1.64 {d1}, [%5] \n"
"4: \n"
: "=&r"(src_temp), // %0
"+r"(src), // %1
"+r"(src_stride), // %2
"+r"(dst_a), // %3
"+r"(dst_stride_a), // %4
"+r"(dst_b), // %5
"+r"(dst_stride_b), // %6
"+r"(width) // %7
: "r"(&kVTbl4x4TransposeDi) // %8
: "memory", "cc", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11");
}
#endif // defined(__ARM_NEON__) && !defined(__aarch64__)
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif

View File

@@ -1,443 +0,0 @@
/*
* Copyright 2014 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "libyuv/rotate_row.h"
#include "libyuv/row.h"
#include "libyuv/basic_types.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
// This module is for GCC Neon armv8 64 bit.
#if !defined(LIBYUV_DISABLE_NEON) && defined(__aarch64__)
static const uvec8 kVTbl4x4Transpose = {0, 4, 8, 12, 1, 5, 9, 13,
2, 6, 10, 14, 3, 7, 11, 15};
void TransposeWx8_NEON(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width) {
const uint8_t* src_temp;
asm volatile(
// loops are on blocks of 8. loop will stop when
// counter gets to or below 0. starting the counter
// at w-8 allow for this
"sub %w3, %w3, #8 \n"
// handle 8x8 blocks. this should be the majority of the plane
"1: \n"
"mov %0, %1 \n"
"ld1 {v0.8b}, [%0], %5 \n"
"ld1 {v1.8b}, [%0], %5 \n"
"ld1 {v2.8b}, [%0], %5 \n"
"ld1 {v3.8b}, [%0], %5 \n"
"ld1 {v4.8b}, [%0], %5 \n"
"ld1 {v5.8b}, [%0], %5 \n"
"ld1 {v6.8b}, [%0], %5 \n"
"ld1 {v7.8b}, [%0] \n"
"mov %0, %1 \n"
"trn2 v16.8b, v0.8b, v1.8b \n"
"prfm pldl1keep, [%0, 448] \n" // prefetch 7 lines ahead
"trn1 v17.8b, v0.8b, v1.8b \n"
"add %0, %0, %5 \n"
"trn2 v18.8b, v2.8b, v3.8b \n"
"prfm pldl1keep, [%0, 448] \n" // row 1
"trn1 v19.8b, v2.8b, v3.8b \n"
"add %0, %0, %5 \n"
"trn2 v20.8b, v4.8b, v5.8b \n"
"prfm pldl1keep, [%0, 448] \n" // row 2
"trn1 v21.8b, v4.8b, v5.8b \n"
"add %0, %0, %5 \n"
"trn2 v22.8b, v6.8b, v7.8b \n"
"prfm pldl1keep, [%0, 448] \n" // row 3
"trn1 v23.8b, v6.8b, v7.8b \n"
"add %0, %0, %5 \n"
"trn2 v3.4h, v17.4h, v19.4h \n"
"prfm pldl1keep, [%0, 448] \n" // row 4
"trn1 v1.4h, v17.4h, v19.4h \n"
"add %0, %0, %5 \n"
"trn2 v2.4h, v16.4h, v18.4h \n"
"prfm pldl1keep, [%0, 448] \n" // row 5
"trn1 v0.4h, v16.4h, v18.4h \n"
"add %0, %0, %5 \n"
"trn2 v7.4h, v21.4h, v23.4h \n"
"prfm pldl1keep, [%0, 448] \n" // row 6
"trn1 v5.4h, v21.4h, v23.4h \n"
"add %0, %0, %5 \n"
"trn2 v6.4h, v20.4h, v22.4h \n"
"prfm pldl1keep, [%0, 448] \n" // row 7
"trn1 v4.4h, v20.4h, v22.4h \n"
"trn2 v21.2s, v1.2s, v5.2s \n"
"trn1 v17.2s, v1.2s, v5.2s \n"
"trn2 v20.2s, v0.2s, v4.2s \n"
"trn1 v16.2s, v0.2s, v4.2s \n"
"trn2 v23.2s, v3.2s, v7.2s \n"
"trn1 v19.2s, v3.2s, v7.2s \n"
"trn2 v22.2s, v2.2s, v6.2s \n"
"trn1 v18.2s, v2.2s, v6.2s \n"
"mov %0, %2 \n"
"st1 {v17.8b}, [%0], %6 \n"
"st1 {v16.8b}, [%0], %6 \n"
"st1 {v19.8b}, [%0], %6 \n"
"st1 {v18.8b}, [%0], %6 \n"
"st1 {v21.8b}, [%0], %6 \n"
"st1 {v20.8b}, [%0], %6 \n"
"st1 {v23.8b}, [%0], %6 \n"
"st1 {v22.8b}, [%0] \n"
"add %1, %1, #8 \n" // src += 8
"add %2, %2, %6, lsl #3 \n" // dst += 8 * dst_stride
"subs %w3, %w3, #8 \n" // w -= 8
"b.ge 1b \n"
// add 8 back to counter. if the result is 0 there are
// no residuals.
"adds %w3, %w3, #8 \n"
"b.eq 4f \n"
// some residual, so between 1 and 7 lines left to transpose
"cmp %w3, #2 \n"
"b.lt 3f \n"
"cmp %w3, #4 \n"
"b.lt 2f \n"
// 4x8 block
"mov %0, %1 \n"
"ld1 {v0.s}[0], [%0], %5 \n"
"ld1 {v0.s}[1], [%0], %5 \n"
"ld1 {v0.s}[2], [%0], %5 \n"
"ld1 {v0.s}[3], [%0], %5 \n"
"ld1 {v1.s}[0], [%0], %5 \n"
"ld1 {v1.s}[1], [%0], %5 \n"
"ld1 {v1.s}[2], [%0], %5 \n"
"ld1 {v1.s}[3], [%0] \n"
"mov %0, %2 \n"
"ld1 {v2.16b}, [%4] \n"
"tbl v3.16b, {v0.16b}, v2.16b \n"
"tbl v0.16b, {v1.16b}, v2.16b \n"
// TODO(frkoenig): Rework shuffle above to
// write out with 4 instead of 8 writes.
"st1 {v3.s}[0], [%0], %6 \n"
"st1 {v3.s}[1], [%0], %6 \n"
"st1 {v3.s}[2], [%0], %6 \n"
"st1 {v3.s}[3], [%0] \n"
"add %0, %2, #4 \n"
"st1 {v0.s}[0], [%0], %6 \n"
"st1 {v0.s}[1], [%0], %6 \n"
"st1 {v0.s}[2], [%0], %6 \n"
"st1 {v0.s}[3], [%0] \n"
"add %1, %1, #4 \n" // src += 4
"add %2, %2, %6, lsl #2 \n" // dst += 4 * dst_stride
"subs %w3, %w3, #4 \n" // w -= 4
"b.eq 4f \n"
// some residual, check to see if it includes a 2x8 block,
// or less
"cmp %w3, #2 \n"
"b.lt 3f \n"
// 2x8 block
"2: \n"
"mov %0, %1 \n"
"ld1 {v0.h}[0], [%0], %5 \n"
"ld1 {v1.h}[0], [%0], %5 \n"
"ld1 {v0.h}[1], [%0], %5 \n"
"ld1 {v1.h}[1], [%0], %5 \n"
"ld1 {v0.h}[2], [%0], %5 \n"
"ld1 {v1.h}[2], [%0], %5 \n"
"ld1 {v0.h}[3], [%0], %5 \n"
"ld1 {v1.h}[3], [%0] \n"
"trn2 v2.8b, v0.8b, v1.8b \n"
"trn1 v3.8b, v0.8b, v1.8b \n"
"mov %0, %2 \n"
"st1 {v3.8b}, [%0], %6 \n"
"st1 {v2.8b}, [%0] \n"
"add %1, %1, #2 \n" // src += 2
"add %2, %2, %6, lsl #1 \n" // dst += 2 * dst_stride
"subs %w3, %w3, #2 \n" // w -= 2
"b.eq 4f \n"
// 1x8 block
"3: \n"
"ld1 {v0.b}[0], [%1], %5 \n"
"ld1 {v0.b}[1], [%1], %5 \n"
"ld1 {v0.b}[2], [%1], %5 \n"
"ld1 {v0.b}[3], [%1], %5 \n"
"ld1 {v0.b}[4], [%1], %5 \n"
"ld1 {v0.b}[5], [%1], %5 \n"
"ld1 {v0.b}[6], [%1], %5 \n"
"ld1 {v0.b}[7], [%1] \n"
"st1 {v0.8b}, [%2] \n"
"4: \n"
: "=&r"(src_temp), // %0
"+r"(src), // %1
"+r"(dst), // %2
"+r"(width) // %3
: "r"(&kVTbl4x4Transpose), // %4
"r"(static_cast<ptrdiff_t>(src_stride)), // %5
"r"(static_cast<ptrdiff_t>(dst_stride)) // %6
: "memory", "cc", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16",
"v17", "v18", "v19", "v20", "v21", "v22", "v23");
}
static const uint8_t kVTbl4x4TransposeDi[32] = {
0, 16, 32, 48, 2, 18, 34, 50, 4, 20, 36, 52, 6, 22, 38, 54,
1, 17, 33, 49, 3, 19, 35, 51, 5, 21, 37, 53, 7, 23, 39, 55};
void TransposeUVWx8_NEON(const uint8_t* src,
int src_stride,
uint8_t* dst_a,
int dst_stride_a,
uint8_t* dst_b,
int dst_stride_b,
int width) {
const uint8_t* src_temp;
asm volatile(
// loops are on blocks of 8. loop will stop when
// counter gets to or below 0. starting the counter
// at w-8 allow for this
"sub %w4, %w4, #8 \n"
// handle 8x8 blocks. this should be the majority of the plane
"1: \n"
"mov %0, %1 \n"
"ld1 {v0.16b}, [%0], %5 \n"
"ld1 {v1.16b}, [%0], %5 \n"
"ld1 {v2.16b}, [%0], %5 \n"
"ld1 {v3.16b}, [%0], %5 \n"
"ld1 {v4.16b}, [%0], %5 \n"
"ld1 {v5.16b}, [%0], %5 \n"
"ld1 {v6.16b}, [%0], %5 \n"
"ld1 {v7.16b}, [%0] \n"
"mov %0, %1 \n"
"trn1 v16.16b, v0.16b, v1.16b \n"
"trn2 v17.16b, v0.16b, v1.16b \n"
"trn1 v18.16b, v2.16b, v3.16b \n"
"trn2 v19.16b, v2.16b, v3.16b \n"
"trn1 v20.16b, v4.16b, v5.16b \n"
"trn2 v21.16b, v4.16b, v5.16b \n"
"trn1 v22.16b, v6.16b, v7.16b \n"
"trn2 v23.16b, v6.16b, v7.16b \n"
"trn1 v0.8h, v16.8h, v18.8h \n"
"trn2 v1.8h, v16.8h, v18.8h \n"
"trn1 v2.8h, v20.8h, v22.8h \n"
"trn2 v3.8h, v20.8h, v22.8h \n"
"trn1 v4.8h, v17.8h, v19.8h \n"
"trn2 v5.8h, v17.8h, v19.8h \n"
"trn1 v6.8h, v21.8h, v23.8h \n"
"trn2 v7.8h, v21.8h, v23.8h \n"
"trn1 v16.4s, v0.4s, v2.4s \n"
"trn2 v17.4s, v0.4s, v2.4s \n"
"trn1 v18.4s, v1.4s, v3.4s \n"
"trn2 v19.4s, v1.4s, v3.4s \n"
"trn1 v20.4s, v4.4s, v6.4s \n"
"trn2 v21.4s, v4.4s, v6.4s \n"
"trn1 v22.4s, v5.4s, v7.4s \n"
"trn2 v23.4s, v5.4s, v7.4s \n"
"mov %0, %2 \n"
"st1 {v16.d}[0], [%0], %6 \n"
"st1 {v18.d}[0], [%0], %6 \n"
"st1 {v17.d}[0], [%0], %6 \n"
"st1 {v19.d}[0], [%0], %6 \n"
"st1 {v16.d}[1], [%0], %6 \n"
"st1 {v18.d}[1], [%0], %6 \n"
"st1 {v17.d}[1], [%0], %6 \n"
"st1 {v19.d}[1], [%0] \n"
"mov %0, %3 \n"
"st1 {v20.d}[0], [%0], %7 \n"
"st1 {v22.d}[0], [%0], %7 \n"
"st1 {v21.d}[0], [%0], %7 \n"
"st1 {v23.d}[0], [%0], %7 \n"
"st1 {v20.d}[1], [%0], %7 \n"
"st1 {v22.d}[1], [%0], %7 \n"
"st1 {v21.d}[1], [%0], %7 \n"
"st1 {v23.d}[1], [%0] \n"
"add %1, %1, #16 \n" // src += 8*2
"add %2, %2, %6, lsl #3 \n" // dst_a += 8 *
// dst_stride_a
"add %3, %3, %7, lsl #3 \n" // dst_b += 8 *
// dst_stride_b
"subs %w4, %w4, #8 \n" // w -= 8
"b.ge 1b \n"
// add 8 back to counter. if the result is 0 there are
// no residuals.
"adds %w4, %w4, #8 \n"
"b.eq 4f \n"
// some residual, so between 1 and 7 lines left to transpose
"cmp %w4, #2 \n"
"b.lt 3f \n"
"cmp %w4, #4 \n"
"b.lt 2f \n"
// TODO(frkoenig): Clean this up
// 4x8 block
"mov %0, %1 \n"
"ld1 {v0.8b}, [%0], %5 \n"
"ld1 {v1.8b}, [%0], %5 \n"
"ld1 {v2.8b}, [%0], %5 \n"
"ld1 {v3.8b}, [%0], %5 \n"
"ld1 {v4.8b}, [%0], %5 \n"
"ld1 {v5.8b}, [%0], %5 \n"
"ld1 {v6.8b}, [%0], %5 \n"
"ld1 {v7.8b}, [%0] \n"
"ld1 {v30.16b}, [%8], #16 \n"
"ld1 {v31.16b}, [%8] \n"
"tbl v16.16b, {v0.16b, v1.16b, v2.16b, v3.16b}, v30.16b \n"
"tbl v17.16b, {v0.16b, v1.16b, v2.16b, v3.16b}, v31.16b \n"
"tbl v18.16b, {v4.16b, v5.16b, v6.16b, v7.16b}, v30.16b \n"
"tbl v19.16b, {v4.16b, v5.16b, v6.16b, v7.16b}, v31.16b \n"
"mov %0, %2 \n"
"st1 {v16.s}[0], [%0], %6 \n"
"st1 {v16.s}[1], [%0], %6 \n"
"st1 {v16.s}[2], [%0], %6 \n"
"st1 {v16.s}[3], [%0], %6 \n"
"add %0, %2, #4 \n"
"st1 {v18.s}[0], [%0], %6 \n"
"st1 {v18.s}[1], [%0], %6 \n"
"st1 {v18.s}[2], [%0], %6 \n"
"st1 {v18.s}[3], [%0] \n"
"mov %0, %3 \n"
"st1 {v17.s}[0], [%0], %7 \n"
"st1 {v17.s}[1], [%0], %7 \n"
"st1 {v17.s}[2], [%0], %7 \n"
"st1 {v17.s}[3], [%0], %7 \n"
"add %0, %3, #4 \n"
"st1 {v19.s}[0], [%0], %7 \n"
"st1 {v19.s}[1], [%0], %7 \n"
"st1 {v19.s}[2], [%0], %7 \n"
"st1 {v19.s}[3], [%0] \n"
"add %1, %1, #8 \n" // src += 4 * 2
"add %2, %2, %6, lsl #2 \n" // dst_a += 4 *
// dst_stride_a
"add %3, %3, %7, lsl #2 \n" // dst_b += 4 *
// dst_stride_b
"subs %w4, %w4, #4 \n" // w -= 4
"b.eq 4f \n"
// some residual, check to see if it includes a 2x8 block,
// or less
"cmp %w4, #2 \n"
"b.lt 3f \n"
// 2x8 block
"2: \n"
"mov %0, %1 \n"
"ld2 {v0.h, v1.h}[0], [%0], %5 \n"
"ld2 {v2.h, v3.h}[0], [%0], %5 \n"
"ld2 {v0.h, v1.h}[1], [%0], %5 \n"
"ld2 {v2.h, v3.h}[1], [%0], %5 \n"
"ld2 {v0.h, v1.h}[2], [%0], %5 \n"
"ld2 {v2.h, v3.h}[2], [%0], %5 \n"
"ld2 {v0.h, v1.h}[3], [%0], %5 \n"
"ld2 {v2.h, v3.h}[3], [%0] \n"
"trn1 v4.8b, v0.8b, v2.8b \n"
"trn2 v5.8b, v0.8b, v2.8b \n"
"trn1 v6.8b, v1.8b, v3.8b \n"
"trn2 v7.8b, v1.8b, v3.8b \n"
"mov %0, %2 \n"
"st1 {v4.d}[0], [%0], %6 \n"
"st1 {v6.d}[0], [%0] \n"
"mov %0, %3 \n"
"st1 {v5.d}[0], [%0], %7 \n"
"st1 {v7.d}[0], [%0] \n"
"add %1, %1, #4 \n" // src += 2 * 2
"add %2, %2, %6, lsl #1 \n" // dst_a += 2 *
// dst_stride_a
"add %3, %3, %7, lsl #1 \n" // dst_b += 2 *
// dst_stride_b
"subs %w4, %w4, #2 \n" // w -= 2
"b.eq 4f \n"
// 1x8 block
"3: \n"
"ld2 {v0.b, v1.b}[0], [%1], %5 \n"
"ld2 {v0.b, v1.b}[1], [%1], %5 \n"
"ld2 {v0.b, v1.b}[2], [%1], %5 \n"
"ld2 {v0.b, v1.b}[3], [%1], %5 \n"
"ld2 {v0.b, v1.b}[4], [%1], %5 \n"
"ld2 {v0.b, v1.b}[5], [%1], %5 \n"
"ld2 {v0.b, v1.b}[6], [%1], %5 \n"
"ld2 {v0.b, v1.b}[7], [%1] \n"
"st1 {v0.d}[0], [%2] \n"
"st1 {v1.d}[0], [%3] \n"
"4: \n"
: "=&r"(src_temp), // %0
"+r"(src), // %1
"+r"(dst_a), // %2
"+r"(dst_b), // %3
"+r"(width) // %4
: "r"(static_cast<ptrdiff_t>(src_stride)), // %5
"r"(static_cast<ptrdiff_t>(dst_stride_a)), // %6
"r"(static_cast<ptrdiff_t>(dst_stride_b)), // %7
"r"(&kVTbl4x4TransposeDi) // %8
: "memory", "cc", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16",
"v17", "v18", "v19", "v20", "v21", "v22", "v23", "v30", "v31");
}
#endif // !defined(LIBYUV_DISABLE_NEON) && defined(__aarch64__)
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif

View File

@@ -1,253 +0,0 @@
/*
* Copyright 2013 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "libyuv/rotate_row.h"
#include "libyuv/row.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
// This module is for 32 bit Visual C x86
#if !defined(LIBYUV_DISABLE_X86) && defined(_MSC_VER) && \
!defined(__clang__) && defined(_M_IX86)
__declspec(naked) void TransposeWx8_SSSE3(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width) {
__asm {
push edi
push esi
push ebp
mov eax, [esp + 12 + 4] // src
mov edi, [esp + 12 + 8] // src_stride
mov edx, [esp + 12 + 12] // dst
mov esi, [esp + 12 + 16] // dst_stride
mov ecx, [esp + 12 + 20] // width
// Read in the data from the source pointer.
// First round of bit swap.
align 4
convertloop:
movq xmm0, qword ptr [eax]
lea ebp, [eax + 8]
movq xmm1, qword ptr [eax + edi]
lea eax, [eax + 2 * edi]
punpcklbw xmm0, xmm1
movq xmm2, qword ptr [eax]
movdqa xmm1, xmm0
palignr xmm1, xmm1, 8
movq xmm3, qword ptr [eax + edi]
lea eax, [eax + 2 * edi]
punpcklbw xmm2, xmm3
movdqa xmm3, xmm2
movq xmm4, qword ptr [eax]
palignr xmm3, xmm3, 8
movq xmm5, qword ptr [eax + edi]
punpcklbw xmm4, xmm5
lea eax, [eax + 2 * edi]
movdqa xmm5, xmm4
movq xmm6, qword ptr [eax]
palignr xmm5, xmm5, 8
movq xmm7, qword ptr [eax + edi]
punpcklbw xmm6, xmm7
mov eax, ebp
movdqa xmm7, xmm6
palignr xmm7, xmm7, 8
// Second round of bit swap.
punpcklwd xmm0, xmm2
punpcklwd xmm1, xmm3
movdqa xmm2, xmm0
movdqa xmm3, xmm1
palignr xmm2, xmm2, 8
palignr xmm3, xmm3, 8
punpcklwd xmm4, xmm6
punpcklwd xmm5, xmm7
movdqa xmm6, xmm4
movdqa xmm7, xmm5
palignr xmm6, xmm6, 8
palignr xmm7, xmm7, 8
// Third round of bit swap.
// Write to the destination pointer.
punpckldq xmm0, xmm4
movq qword ptr [edx], xmm0
movdqa xmm4, xmm0
palignr xmm4, xmm4, 8
movq qword ptr [edx + esi], xmm4
lea edx, [edx + 2 * esi]
punpckldq xmm2, xmm6
movdqa xmm6, xmm2
palignr xmm6, xmm6, 8
movq qword ptr [edx], xmm2
punpckldq xmm1, xmm5
movq qword ptr [edx + esi], xmm6
lea edx, [edx + 2 * esi]
movdqa xmm5, xmm1
movq qword ptr [edx], xmm1
palignr xmm5, xmm5, 8
punpckldq xmm3, xmm7
movq qword ptr [edx + esi], xmm5
lea edx, [edx + 2 * esi]
movq qword ptr [edx], xmm3
movdqa xmm7, xmm3
palignr xmm7, xmm7, 8
sub ecx, 8
movq qword ptr [edx + esi], xmm7
lea edx, [edx + 2 * esi]
jg convertloop
pop ebp
pop esi
pop edi
ret
}
}
__declspec(naked) void TransposeUVWx8_SSE2(const uint8_t* src,
int src_stride,
uint8_t* dst_a,
int dst_stride_a,
uint8_t* dst_b,
int dst_stride_b,
int w) {
__asm {
push ebx
push esi
push edi
push ebp
mov eax, [esp + 16 + 4] // src
mov edi, [esp + 16 + 8] // src_stride
mov edx, [esp + 16 + 12] // dst_a
mov esi, [esp + 16 + 16] // dst_stride_a
mov ebx, [esp + 16 + 20] // dst_b
mov ebp, [esp + 16 + 24] // dst_stride_b
mov ecx, esp
sub esp, 4 + 16
and esp, ~15
mov [esp + 16], ecx
mov ecx, [ecx + 16 + 28] // w
align 4
// Read in the data from the source pointer.
// First round of bit swap.
convertloop:
movdqu xmm0, [eax]
movdqu xmm1, [eax + edi]
lea eax, [eax + 2 * edi]
movdqa xmm7, xmm0 // use xmm7 as temp register.
punpcklbw xmm0, xmm1
punpckhbw xmm7, xmm1
movdqa xmm1, xmm7
movdqu xmm2, [eax]
movdqu xmm3, [eax + edi]
lea eax, [eax + 2 * edi]
movdqa xmm7, xmm2
punpcklbw xmm2, xmm3
punpckhbw xmm7, xmm3
movdqa xmm3, xmm7
movdqu xmm4, [eax]
movdqu xmm5, [eax + edi]
lea eax, [eax + 2 * edi]
movdqa xmm7, xmm4
punpcklbw xmm4, xmm5
punpckhbw xmm7, xmm5
movdqa xmm5, xmm7
movdqu xmm6, [eax]
movdqu xmm7, [eax + edi]
lea eax, [eax + 2 * edi]
movdqu [esp], xmm5 // backup xmm5
neg edi
movdqa xmm5, xmm6 // use xmm5 as temp register.
punpcklbw xmm6, xmm7
punpckhbw xmm5, xmm7
movdqa xmm7, xmm5
lea eax, [eax + 8 * edi + 16]
neg edi
// Second round of bit swap.
movdqa xmm5, xmm0
punpcklwd xmm0, xmm2
punpckhwd xmm5, xmm2
movdqa xmm2, xmm5
movdqa xmm5, xmm1
punpcklwd xmm1, xmm3
punpckhwd xmm5, xmm3
movdqa xmm3, xmm5
movdqa xmm5, xmm4
punpcklwd xmm4, xmm6
punpckhwd xmm5, xmm6
movdqa xmm6, xmm5
movdqu xmm5, [esp] // restore xmm5
movdqu [esp], xmm6 // backup xmm6
movdqa xmm6, xmm5 // use xmm6 as temp register.
punpcklwd xmm5, xmm7
punpckhwd xmm6, xmm7
movdqa xmm7, xmm6
// Third round of bit swap.
// Write to the destination pointer.
movdqa xmm6, xmm0
punpckldq xmm0, xmm4
punpckhdq xmm6, xmm4
movdqa xmm4, xmm6
movdqu xmm6, [esp] // restore xmm6
movlpd qword ptr [edx], xmm0
movhpd qword ptr [ebx], xmm0
movlpd qword ptr [edx + esi], xmm4
lea edx, [edx + 2 * esi]
movhpd qword ptr [ebx + ebp], xmm4
lea ebx, [ebx + 2 * ebp]
movdqa xmm0, xmm2 // use xmm0 as the temp register.
punpckldq xmm2, xmm6
movlpd qword ptr [edx], xmm2
movhpd qword ptr [ebx], xmm2
punpckhdq xmm0, xmm6
movlpd qword ptr [edx + esi], xmm0
lea edx, [edx + 2 * esi]
movhpd qword ptr [ebx + ebp], xmm0
lea ebx, [ebx + 2 * ebp]
movdqa xmm0, xmm1 // use xmm0 as the temp register.
punpckldq xmm1, xmm5
movlpd qword ptr [edx], xmm1
movhpd qword ptr [ebx], xmm1
punpckhdq xmm0, xmm5
movlpd qword ptr [edx + esi], xmm0
lea edx, [edx + 2 * esi]
movhpd qword ptr [ebx + ebp], xmm0
lea ebx, [ebx + 2 * ebp]
movdqa xmm0, xmm3 // use xmm0 as the temp register.
punpckldq xmm3, xmm7
movlpd qword ptr [edx], xmm3
movhpd qword ptr [ebx], xmm3
punpckhdq xmm0, xmm7
sub ecx, 8
movlpd qword ptr [edx + esi], xmm0
lea edx, [edx + 2 * esi]
movhpd qword ptr [ebx + ebp], xmm0
lea ebx, [ebx + 2 * ebp]
jg convertloop
mov esp, [esp + 16]
pop ebp
pop edi
pop esi
pop ebx
ret
}
}
#endif // !defined(LIBYUV_DISABLE_X86) && defined(_M_IX86)
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,949 +0,0 @@
/*
* Copyright 2016 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <assert.h>
#include "libyuv/scale_row.h"
// This module is for GCC MSA
#if !defined(LIBYUV_DISABLE_MSA) && defined(__mips_msa)
#include "libyuv/macros_msa.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
#define LOAD_INDEXED_DATA(srcp, indx0, out0) \
{ \
out0[0] = srcp[indx0[0]]; \
out0[1] = srcp[indx0[1]]; \
out0[2] = srcp[indx0[2]]; \
out0[3] = srcp[indx0[3]]; \
}
void ScaleARGBRowDown2_MSA(const uint8_t* src_argb,
ptrdiff_t src_stride,
uint8_t* dst_argb,
int dst_width) {
int x;
v16u8 src0, src1, dst0;
(void)src_stride;
for (x = 0; x < dst_width; x += 4) {
src0 = (v16u8)__msa_ld_b((v16i8*)src_argb, 0);
src1 = (v16u8)__msa_ld_b((v16i8*)src_argb, 16);
dst0 = (v16u8)__msa_pckod_w((v4i32)src1, (v4i32)src0);
ST_UB(dst0, dst_argb);
src_argb += 32;
dst_argb += 16;
}
}
void ScaleARGBRowDown2Linear_MSA(const uint8_t* src_argb,
ptrdiff_t src_stride,
uint8_t* dst_argb,
int dst_width) {
int x;
v16u8 src0, src1, vec0, vec1, dst0;
(void)src_stride;
for (x = 0; x < dst_width; x += 4) {
src0 = (v16u8)__msa_ld_b((v16i8*)src_argb, 0);
src1 = (v16u8)__msa_ld_b((v16i8*)src_argb, 16);
vec0 = (v16u8)__msa_pckev_w((v4i32)src1, (v4i32)src0);
vec1 = (v16u8)__msa_pckod_w((v4i32)src1, (v4i32)src0);
dst0 = (v16u8)__msa_aver_u_b((v16u8)vec0, (v16u8)vec1);
ST_UB(dst0, dst_argb);
src_argb += 32;
dst_argb += 16;
}
}
void ScaleARGBRowDown2Box_MSA(const uint8_t* src_argb,
ptrdiff_t src_stride,
uint8_t* dst_argb,
int dst_width) {
int x;
const uint8_t* s = src_argb;
const uint8_t* t = src_argb + src_stride;
v16u8 src0, src1, src2, src3, vec0, vec1, vec2, vec3, dst0;
v8u16 reg0, reg1, reg2, reg3;
v16i8 shuffler = {0, 4, 1, 5, 2, 6, 3, 7, 8, 12, 9, 13, 10, 14, 11, 15};
for (x = 0; x < dst_width; x += 4) {
src0 = (v16u8)__msa_ld_b((v16i8*)s, 0);
src1 = (v16u8)__msa_ld_b((v16i8*)s, 16);
src2 = (v16u8)__msa_ld_b((v16i8*)t, 0);
src3 = (v16u8)__msa_ld_b((v16i8*)t, 16);
vec0 = (v16u8)__msa_vshf_b(shuffler, (v16i8)src0, (v16i8)src0);
vec1 = (v16u8)__msa_vshf_b(shuffler, (v16i8)src1, (v16i8)src1);
vec2 = (v16u8)__msa_vshf_b(shuffler, (v16i8)src2, (v16i8)src2);
vec3 = (v16u8)__msa_vshf_b(shuffler, (v16i8)src3, (v16i8)src3);
reg0 = __msa_hadd_u_h(vec0, vec0);
reg1 = __msa_hadd_u_h(vec1, vec1);
reg2 = __msa_hadd_u_h(vec2, vec2);
reg3 = __msa_hadd_u_h(vec3, vec3);
reg0 += reg2;
reg1 += reg3;
reg0 = (v8u16)__msa_srari_h((v8i16)reg0, 2);
reg1 = (v8u16)__msa_srari_h((v8i16)reg1, 2);
dst0 = (v16u8)__msa_pckev_b((v16i8)reg1, (v16i8)reg0);
ST_UB(dst0, dst_argb);
s += 32;
t += 32;
dst_argb += 16;
}
}
void ScaleARGBRowDownEven_MSA(const uint8_t* src_argb,
ptrdiff_t src_stride,
int32_t src_stepx,
uint8_t* dst_argb,
int dst_width) {
int x;
int32_t stepx = src_stepx * 4;
int32_t data0, data1, data2, data3;
(void)src_stride;
for (x = 0; x < dst_width; x += 4) {
data0 = LW(src_argb);
data1 = LW(src_argb + stepx);
data2 = LW(src_argb + stepx * 2);
data3 = LW(src_argb + stepx * 3);
SW(data0, dst_argb);
SW(data1, dst_argb + 4);
SW(data2, dst_argb + 8);
SW(data3, dst_argb + 12);
src_argb += stepx * 4;
dst_argb += 16;
}
}
void ScaleARGBRowDownEvenBox_MSA(const uint8_t* src_argb,
ptrdiff_t src_stride,
int src_stepx,
uint8_t* dst_argb,
int dst_width) {
int x;
const uint8_t* nxt_argb = src_argb + src_stride;
int32_t stepx = src_stepx * 4;
int64_t data0, data1, data2, data3;
v16u8 src0 = {0}, src1 = {0}, src2 = {0}, src3 = {0};
v16u8 vec0, vec1, vec2, vec3;
v8u16 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7;
v16u8 dst0;
for (x = 0; x < dst_width; x += 4) {
data0 = LD(src_argb);
data1 = LD(src_argb + stepx);
data2 = LD(src_argb + stepx * 2);
data3 = LD(src_argb + stepx * 3);
src0 = (v16u8)__msa_insert_d((v2i64)src0, 0, data0);
src0 = (v16u8)__msa_insert_d((v2i64)src0, 1, data1);
src1 = (v16u8)__msa_insert_d((v2i64)src1, 0, data2);
src1 = (v16u8)__msa_insert_d((v2i64)src1, 1, data3);
data0 = LD(nxt_argb);
data1 = LD(nxt_argb + stepx);
data2 = LD(nxt_argb + stepx * 2);
data3 = LD(nxt_argb + stepx * 3);
src2 = (v16u8)__msa_insert_d((v2i64)src2, 0, data0);
src2 = (v16u8)__msa_insert_d((v2i64)src2, 1, data1);
src3 = (v16u8)__msa_insert_d((v2i64)src3, 0, data2);
src3 = (v16u8)__msa_insert_d((v2i64)src3, 1, data3);
vec0 = (v16u8)__msa_ilvr_b((v16i8)src2, (v16i8)src0);
vec1 = (v16u8)__msa_ilvr_b((v16i8)src3, (v16i8)src1);
vec2 = (v16u8)__msa_ilvl_b((v16i8)src2, (v16i8)src0);
vec3 = (v16u8)__msa_ilvl_b((v16i8)src3, (v16i8)src1);
reg0 = __msa_hadd_u_h(vec0, vec0);
reg1 = __msa_hadd_u_h(vec1, vec1);
reg2 = __msa_hadd_u_h(vec2, vec2);
reg3 = __msa_hadd_u_h(vec3, vec3);
reg4 = (v8u16)__msa_pckev_d((v2i64)reg2, (v2i64)reg0);
reg5 = (v8u16)__msa_pckev_d((v2i64)reg3, (v2i64)reg1);
reg6 = (v8u16)__msa_pckod_d((v2i64)reg2, (v2i64)reg0);
reg7 = (v8u16)__msa_pckod_d((v2i64)reg3, (v2i64)reg1);
reg4 += reg6;
reg5 += reg7;
reg4 = (v8u16)__msa_srari_h((v8i16)reg4, 2);
reg5 = (v8u16)__msa_srari_h((v8i16)reg5, 2);
dst0 = (v16u8)__msa_pckev_b((v16i8)reg5, (v16i8)reg4);
ST_UB(dst0, dst_argb);
src_argb += stepx * 4;
nxt_argb += stepx * 4;
dst_argb += 16;
}
}
void ScaleRowDown2_MSA(const uint8_t* src_ptr,
ptrdiff_t src_stride,
uint8_t* dst,
int dst_width) {
int x;
v16u8 src0, src1, src2, src3, dst0, dst1;
(void)src_stride;
for (x = 0; x < dst_width; x += 32) {
src0 = (v16u8)__msa_ld_b((v16i8*)src_ptr, 0);
src1 = (v16u8)__msa_ld_b((v16i8*)src_ptr, 16);
src2 = (v16u8)__msa_ld_b((v16i8*)src_ptr, 32);
src3 = (v16u8)__msa_ld_b((v16i8*)src_ptr, 48);
dst0 = (v16u8)__msa_pckod_b((v16i8)src1, (v16i8)src0);
dst1 = (v16u8)__msa_pckod_b((v16i8)src3, (v16i8)src2);
ST_UB2(dst0, dst1, dst, 16);
src_ptr += 64;
dst += 32;
}
}
void ScaleRowDown2Linear_MSA(const uint8_t* src_ptr,
ptrdiff_t src_stride,
uint8_t* dst,
int dst_width) {
int x;
v16u8 src0, src1, src2, src3, vec0, vec1, vec2, vec3, dst0, dst1;
(void)src_stride;
for (x = 0; x < dst_width; x += 32) {
src0 = (v16u8)__msa_ld_b((v16i8*)src_ptr, 0);
src1 = (v16u8)__msa_ld_b((v16i8*)src_ptr, 16);
src2 = (v16u8)__msa_ld_b((v16i8*)src_ptr, 32);
src3 = (v16u8)__msa_ld_b((v16i8*)src_ptr, 48);
vec0 = (v16u8)__msa_pckev_b((v16i8)src1, (v16i8)src0);
vec2 = (v16u8)__msa_pckev_b((v16i8)src3, (v16i8)src2);
vec1 = (v16u8)__msa_pckod_b((v16i8)src1, (v16i8)src0);
vec3 = (v16u8)__msa_pckod_b((v16i8)src3, (v16i8)src2);
dst0 = __msa_aver_u_b(vec1, vec0);
dst1 = __msa_aver_u_b(vec3, vec2);
ST_UB2(dst0, dst1, dst, 16);
src_ptr += 64;
dst += 32;
}
}
void ScaleRowDown2Box_MSA(const uint8_t* src_ptr,
ptrdiff_t src_stride,
uint8_t* dst,
int dst_width) {
int x;
const uint8_t* s = src_ptr;
const uint8_t* t = src_ptr + src_stride;
v16u8 src0, src1, src2, src3, src4, src5, src6, src7, dst0, dst1;
v8u16 vec0, vec1, vec2, vec3;
for (x = 0; x < dst_width; x += 32) {
src0 = (v16u8)__msa_ld_b((v16i8*)s, 0);
src1 = (v16u8)__msa_ld_b((v16i8*)s, 16);
src2 = (v16u8)__msa_ld_b((v16i8*)s, 32);
src3 = (v16u8)__msa_ld_b((v16i8*)s, 48);
src4 = (v16u8)__msa_ld_b((v16i8*)t, 0);
src5 = (v16u8)__msa_ld_b((v16i8*)t, 16);
src6 = (v16u8)__msa_ld_b((v16i8*)t, 32);
src7 = (v16u8)__msa_ld_b((v16i8*)t, 48);
vec0 = __msa_hadd_u_h(src0, src0);
vec1 = __msa_hadd_u_h(src1, src1);
vec2 = __msa_hadd_u_h(src2, src2);
vec3 = __msa_hadd_u_h(src3, src3);
vec0 += __msa_hadd_u_h(src4, src4);
vec1 += __msa_hadd_u_h(src5, src5);
vec2 += __msa_hadd_u_h(src6, src6);
vec3 += __msa_hadd_u_h(src7, src7);
vec0 = (v8u16)__msa_srari_h((v8i16)vec0, 2);
vec1 = (v8u16)__msa_srari_h((v8i16)vec1, 2);
vec2 = (v8u16)__msa_srari_h((v8i16)vec2, 2);
vec3 = (v8u16)__msa_srari_h((v8i16)vec3, 2);
dst0 = (v16u8)__msa_pckev_b((v16i8)vec1, (v16i8)vec0);
dst1 = (v16u8)__msa_pckev_b((v16i8)vec3, (v16i8)vec2);
ST_UB2(dst0, dst1, dst, 16);
s += 64;
t += 64;
dst += 32;
}
}
void ScaleRowDown4_MSA(const uint8_t* src_ptr,
ptrdiff_t src_stride,
uint8_t* dst,
int dst_width) {
int x;
v16u8 src0, src1, src2, src3, vec0, vec1, dst0;
(void)src_stride;
for (x = 0; x < dst_width; x += 16) {
src0 = (v16u8)__msa_ld_b((v16i8*)src_ptr, 0);
src1 = (v16u8)__msa_ld_b((v16i8*)src_ptr, 16);
src2 = (v16u8)__msa_ld_b((v16i8*)src_ptr, 32);
src3 = (v16u8)__msa_ld_b((v16i8*)src_ptr, 48);
vec0 = (v16u8)__msa_pckev_b((v16i8)src1, (v16i8)src0);
vec1 = (v16u8)__msa_pckev_b((v16i8)src3, (v16i8)src2);
dst0 = (v16u8)__msa_pckod_b((v16i8)vec1, (v16i8)vec0);
ST_UB(dst0, dst);
src_ptr += 64;
dst += 16;
}
}
void ScaleRowDown4Box_MSA(const uint8_t* src_ptr,
ptrdiff_t src_stride,
uint8_t* dst,
int dst_width) {
int x;
const uint8_t* s = src_ptr;
const uint8_t* t0 = s + src_stride;
const uint8_t* t1 = s + src_stride * 2;
const uint8_t* t2 = s + src_stride * 3;
v16u8 src0, src1, src2, src3, src4, src5, src6, src7, dst0;
v8u16 vec0, vec1, vec2, vec3;
v4u32 reg0, reg1, reg2, reg3;
for (x = 0; x < dst_width; x += 16) {
src0 = (v16u8)__msa_ld_b((v16i8*)s, 0);
src1 = (v16u8)__msa_ld_b((v16i8*)s, 16);
src2 = (v16u8)__msa_ld_b((v16i8*)s, 32);
src3 = (v16u8)__msa_ld_b((v16i8*)s, 48);
src4 = (v16u8)__msa_ld_b((v16i8*)t0, 0);
src5 = (v16u8)__msa_ld_b((v16i8*)t0, 16);
src6 = (v16u8)__msa_ld_b((v16i8*)t0, 32);
src7 = (v16u8)__msa_ld_b((v16i8*)t0, 48);
vec0 = __msa_hadd_u_h(src0, src0);
vec1 = __msa_hadd_u_h(src1, src1);
vec2 = __msa_hadd_u_h(src2, src2);
vec3 = __msa_hadd_u_h(src3, src3);
vec0 += __msa_hadd_u_h(src4, src4);
vec1 += __msa_hadd_u_h(src5, src5);
vec2 += __msa_hadd_u_h(src6, src6);
vec3 += __msa_hadd_u_h(src7, src7);
src0 = (v16u8)__msa_ld_b((v16i8*)t1, 0);
src1 = (v16u8)__msa_ld_b((v16i8*)t1, 16);
src2 = (v16u8)__msa_ld_b((v16i8*)t1, 32);
src3 = (v16u8)__msa_ld_b((v16i8*)t1, 48);
src4 = (v16u8)__msa_ld_b((v16i8*)t2, 0);
src5 = (v16u8)__msa_ld_b((v16i8*)t2, 16);
src6 = (v16u8)__msa_ld_b((v16i8*)t2, 32);
src7 = (v16u8)__msa_ld_b((v16i8*)t2, 48);
vec0 += __msa_hadd_u_h(src0, src0);
vec1 += __msa_hadd_u_h(src1, src1);
vec2 += __msa_hadd_u_h(src2, src2);
vec3 += __msa_hadd_u_h(src3, src3);
vec0 += __msa_hadd_u_h(src4, src4);
vec1 += __msa_hadd_u_h(src5, src5);
vec2 += __msa_hadd_u_h(src6, src6);
vec3 += __msa_hadd_u_h(src7, src7);
reg0 = __msa_hadd_u_w(vec0, vec0);
reg1 = __msa_hadd_u_w(vec1, vec1);
reg2 = __msa_hadd_u_w(vec2, vec2);
reg3 = __msa_hadd_u_w(vec3, vec3);
reg0 = (v4u32)__msa_srari_w((v4i32)reg0, 4);
reg1 = (v4u32)__msa_srari_w((v4i32)reg1, 4);
reg2 = (v4u32)__msa_srari_w((v4i32)reg2, 4);
reg3 = (v4u32)__msa_srari_w((v4i32)reg3, 4);
vec0 = (v8u16)__msa_pckev_h((v8i16)reg1, (v8i16)reg0);
vec1 = (v8u16)__msa_pckev_h((v8i16)reg3, (v8i16)reg2);
dst0 = (v16u8)__msa_pckev_b((v16i8)vec1, (v16i8)vec0);
ST_UB(dst0, dst);
s += 64;
t0 += 64;
t1 += 64;
t2 += 64;
dst += 16;
}
}
void ScaleRowDown38_MSA(const uint8_t* src_ptr,
ptrdiff_t src_stride,
uint8_t* dst,
int dst_width) {
int x, width;
uint64_t dst0;
uint32_t dst1;
v16u8 src0, src1, vec0;
v16i8 mask = {0, 3, 6, 8, 11, 14, 16, 19, 22, 24, 27, 30, 0, 0, 0, 0};
(void)src_stride;
assert(dst_width % 3 == 0);
width = dst_width / 3;
for (x = 0; x < width; x += 4) {
src0 = (v16u8)__msa_ld_b((v16i8*)src_ptr, 0);
src1 = (v16u8)__msa_ld_b((v16i8*)src_ptr, 16);
vec0 = (v16u8)__msa_vshf_b(mask, (v16i8)src1, (v16i8)src0);
dst0 = __msa_copy_u_d((v2i64)vec0, 0);
dst1 = __msa_copy_u_w((v4i32)vec0, 2);
SD(dst0, dst);
SW(dst1, dst + 8);
src_ptr += 32;
dst += 12;
}
}
void ScaleRowDown38_2_Box_MSA(const uint8_t* src_ptr,
ptrdiff_t src_stride,
uint8_t* dst_ptr,
int dst_width) {
int x, width;
const uint8_t* s = src_ptr;
const uint8_t* t = src_ptr + src_stride;
uint64_t dst0;
uint32_t dst1;
v16u8 src0, src1, src2, src3, out;
v8u16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
v4u32 tmp0, tmp1, tmp2, tmp3, tmp4;
v8i16 zero = {0};
v8i16 mask = {0, 1, 2, 8, 3, 4, 5, 9};
v16i8 dst_mask = {0, 2, 16, 4, 6, 18, 8, 10, 20, 12, 14, 22, 0, 0, 0, 0};
v4u32 const_0x2AAA = (v4u32)__msa_fill_w(0x2AAA);
v4u32 const_0x4000 = (v4u32)__msa_fill_w(0x4000);
assert((dst_width % 3 == 0) && (dst_width > 0));
width = dst_width / 3;
for (x = 0; x < width; x += 4) {
src0 = (v16u8)__msa_ld_b((v16i8*)s, 0);
src1 = (v16u8)__msa_ld_b((v16i8*)s, 16);
src2 = (v16u8)__msa_ld_b((v16i8*)t, 0);
src3 = (v16u8)__msa_ld_b((v16i8*)t, 16);
vec0 = (v8u16)__msa_ilvr_b((v16i8)src2, (v16i8)src0);
vec1 = (v8u16)__msa_ilvl_b((v16i8)src2, (v16i8)src0);
vec2 = (v8u16)__msa_ilvr_b((v16i8)src3, (v16i8)src1);
vec3 = (v8u16)__msa_ilvl_b((v16i8)src3, (v16i8)src1);
vec0 = __msa_hadd_u_h((v16u8)vec0, (v16u8)vec0);
vec1 = __msa_hadd_u_h((v16u8)vec1, (v16u8)vec1);
vec2 = __msa_hadd_u_h((v16u8)vec2, (v16u8)vec2);
vec3 = __msa_hadd_u_h((v16u8)vec3, (v16u8)vec3);
vec4 = (v8u16)__msa_vshf_h(mask, zero, (v8i16)vec0);
vec5 = (v8u16)__msa_vshf_h(mask, zero, (v8i16)vec1);
vec6 = (v8u16)__msa_vshf_h(mask, zero, (v8i16)vec2);
vec7 = (v8u16)__msa_vshf_h(mask, zero, (v8i16)vec3);
vec0 = (v8u16)__msa_pckod_w((v4i32)vec1, (v4i32)vec0);
vec1 = (v8u16)__msa_pckod_w((v4i32)vec3, (v4i32)vec2);
vec0 = (v8u16)__msa_pckod_w((v4i32)vec1, (v4i32)vec0);
tmp0 = __msa_hadd_u_w(vec4, vec4);
tmp1 = __msa_hadd_u_w(vec5, vec5);
tmp2 = __msa_hadd_u_w(vec6, vec6);
tmp3 = __msa_hadd_u_w(vec7, vec7);
tmp4 = __msa_hadd_u_w(vec0, vec0);
vec0 = (v8u16)__msa_pckev_h((v8i16)tmp1, (v8i16)tmp0);
vec1 = (v8u16)__msa_pckev_h((v8i16)tmp3, (v8i16)tmp2);
tmp0 = __msa_hadd_u_w(vec0, vec0);
tmp1 = __msa_hadd_u_w(vec1, vec1);
tmp0 *= const_0x2AAA;
tmp1 *= const_0x2AAA;
tmp4 *= const_0x4000;
tmp0 = (v4u32)__msa_srai_w((v4i32)tmp0, 16);
tmp1 = (v4u32)__msa_srai_w((v4i32)tmp1, 16);
tmp4 = (v4u32)__msa_srai_w((v4i32)tmp4, 16);
vec0 = (v8u16)__msa_pckev_h((v8i16)tmp1, (v8i16)tmp0);
vec1 = (v8u16)__msa_pckev_h((v8i16)tmp4, (v8i16)tmp4);
out = (v16u8)__msa_vshf_b(dst_mask, (v16i8)vec1, (v16i8)vec0);
dst0 = __msa_copy_u_d((v2i64)out, 0);
dst1 = __msa_copy_u_w((v4i32)out, 2);
SD(dst0, dst_ptr);
SW(dst1, dst_ptr + 8);
s += 32;
t += 32;
dst_ptr += 12;
}
}
void ScaleRowDown38_3_Box_MSA(const uint8_t* src_ptr,
ptrdiff_t src_stride,
uint8_t* dst_ptr,
int dst_width) {
int x, width;
const uint8_t* s = src_ptr;
const uint8_t* t0 = s + src_stride;
const uint8_t* t1 = s + src_stride * 2;
uint64_t dst0;
uint32_t dst1;
v16u8 src0, src1, src2, src3, src4, src5, out;
v8u16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
v4u32 tmp0, tmp1, tmp2, tmp3, tmp4;
v8u16 zero = {0};
v8i16 mask = {0, 1, 2, 8, 3, 4, 5, 9};
v16i8 dst_mask = {0, 2, 16, 4, 6, 18, 8, 10, 20, 12, 14, 22, 0, 0, 0, 0};
v4u32 const_0x1C71 = (v4u32)__msa_fill_w(0x1C71);
v4u32 const_0x2AAA = (v4u32)__msa_fill_w(0x2AAA);
assert((dst_width % 3 == 0) && (dst_width > 0));
width = dst_width / 3;
for (x = 0; x < width; x += 4) {
src0 = (v16u8)__msa_ld_b((v16i8*)s, 0);
src1 = (v16u8)__msa_ld_b((v16i8*)s, 16);
src2 = (v16u8)__msa_ld_b((v16i8*)t0, 0);
src3 = (v16u8)__msa_ld_b((v16i8*)t0, 16);
src4 = (v16u8)__msa_ld_b((v16i8*)t1, 0);
src5 = (v16u8)__msa_ld_b((v16i8*)t1, 16);
vec0 = (v8u16)__msa_ilvr_b((v16i8)src2, (v16i8)src0);
vec1 = (v8u16)__msa_ilvl_b((v16i8)src2, (v16i8)src0);
vec2 = (v8u16)__msa_ilvr_b((v16i8)src3, (v16i8)src1);
vec3 = (v8u16)__msa_ilvl_b((v16i8)src3, (v16i8)src1);
vec4 = (v8u16)__msa_ilvr_b((v16i8)zero, (v16i8)src4);
vec5 = (v8u16)__msa_ilvl_b((v16i8)zero, (v16i8)src4);
vec6 = (v8u16)__msa_ilvr_b((v16i8)zero, (v16i8)src5);
vec7 = (v8u16)__msa_ilvl_b((v16i8)zero, (v16i8)src5);
vec0 = __msa_hadd_u_h((v16u8)vec0, (v16u8)vec0);
vec1 = __msa_hadd_u_h((v16u8)vec1, (v16u8)vec1);
vec2 = __msa_hadd_u_h((v16u8)vec2, (v16u8)vec2);
vec3 = __msa_hadd_u_h((v16u8)vec3, (v16u8)vec3);
vec0 += __msa_hadd_u_h((v16u8)vec4, (v16u8)vec4);
vec1 += __msa_hadd_u_h((v16u8)vec5, (v16u8)vec5);
vec2 += __msa_hadd_u_h((v16u8)vec6, (v16u8)vec6);
vec3 += __msa_hadd_u_h((v16u8)vec7, (v16u8)vec7);
vec4 = (v8u16)__msa_vshf_h(mask, (v8i16)zero, (v8i16)vec0);
vec5 = (v8u16)__msa_vshf_h(mask, (v8i16)zero, (v8i16)vec1);
vec6 = (v8u16)__msa_vshf_h(mask, (v8i16)zero, (v8i16)vec2);
vec7 = (v8u16)__msa_vshf_h(mask, (v8i16)zero, (v8i16)vec3);
vec0 = (v8u16)__msa_pckod_w((v4i32)vec1, (v4i32)vec0);
vec1 = (v8u16)__msa_pckod_w((v4i32)vec3, (v4i32)vec2);
vec0 = (v8u16)__msa_pckod_w((v4i32)vec1, (v4i32)vec0);
tmp0 = __msa_hadd_u_w(vec4, vec4);
tmp1 = __msa_hadd_u_w(vec5, vec5);
tmp2 = __msa_hadd_u_w(vec6, vec6);
tmp3 = __msa_hadd_u_w(vec7, vec7);
tmp4 = __msa_hadd_u_w(vec0, vec0);
vec0 = (v8u16)__msa_pckev_h((v8i16)tmp1, (v8i16)tmp0);
vec1 = (v8u16)__msa_pckev_h((v8i16)tmp3, (v8i16)tmp2);
tmp0 = __msa_hadd_u_w(vec0, vec0);
tmp1 = __msa_hadd_u_w(vec1, vec1);
tmp0 *= const_0x1C71;
tmp1 *= const_0x1C71;
tmp4 *= const_0x2AAA;
tmp0 = (v4u32)__msa_srai_w((v4i32)tmp0, 16);
tmp1 = (v4u32)__msa_srai_w((v4i32)tmp1, 16);
tmp4 = (v4u32)__msa_srai_w((v4i32)tmp4, 16);
vec0 = (v8u16)__msa_pckev_h((v8i16)tmp1, (v8i16)tmp0);
vec1 = (v8u16)__msa_pckev_h((v8i16)tmp4, (v8i16)tmp4);
out = (v16u8)__msa_vshf_b(dst_mask, (v16i8)vec1, (v16i8)vec0);
dst0 = __msa_copy_u_d((v2i64)out, 0);
dst1 = __msa_copy_u_w((v4i32)out, 2);
SD(dst0, dst_ptr);
SW(dst1, dst_ptr + 8);
s += 32;
t0 += 32;
t1 += 32;
dst_ptr += 12;
}
}
void ScaleAddRow_MSA(const uint8_t* src_ptr, uint16_t* dst_ptr, int src_width) {
int x;
v16u8 src0;
v8u16 dst0, dst1;
v16i8 zero = {0};
assert(src_width > 0);
for (x = 0; x < src_width; x += 16) {
src0 = LD_UB(src_ptr);
dst0 = (v8u16)__msa_ld_h((v8i16*)dst_ptr, 0);
dst1 = (v8u16)__msa_ld_h((v8i16*)dst_ptr, 16);
dst0 += (v8u16)__msa_ilvr_b(zero, (v16i8)src0);
dst1 += (v8u16)__msa_ilvl_b(zero, (v16i8)src0);
ST_UH2(dst0, dst1, dst_ptr, 8);
src_ptr += 16;
dst_ptr += 16;
}
}
void ScaleFilterCols_MSA(uint8_t* dst_ptr,
const uint8_t* src_ptr,
int dst_width,
int x,
int dx) {
int j;
v4i32 vec_x = __msa_fill_w(x);
v4i32 vec_dx = __msa_fill_w(dx);
v4i32 vec_const = {0, 1, 2, 3};
v4i32 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, vec8, vec9;
v4i32 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
v8u16 reg0, reg1;
v16u8 dst0;
v4i32 const_0xFFFF = __msa_fill_w(0xFFFF);
v4i32 const_0x40 = __msa_fill_w(0x40);
vec0 = vec_dx * vec_const;
vec1 = vec_dx * 4;
vec_x += vec0;
for (j = 0; j < dst_width - 1; j += 16) {
vec2 = vec_x >> 16;
vec6 = vec_x & const_0xFFFF;
vec_x += vec1;
vec3 = vec_x >> 16;
vec7 = vec_x & const_0xFFFF;
vec_x += vec1;
vec4 = vec_x >> 16;
vec8 = vec_x & const_0xFFFF;
vec_x += vec1;
vec5 = vec_x >> 16;
vec9 = vec_x & const_0xFFFF;
vec_x += vec1;
vec6 >>= 9;
vec7 >>= 9;
vec8 >>= 9;
vec9 >>= 9;
LOAD_INDEXED_DATA(src_ptr, vec2, tmp0);
LOAD_INDEXED_DATA(src_ptr, vec3, tmp1);
LOAD_INDEXED_DATA(src_ptr, vec4, tmp2);
LOAD_INDEXED_DATA(src_ptr, vec5, tmp3);
vec2 += 1;
vec3 += 1;
vec4 += 1;
vec5 += 1;
LOAD_INDEXED_DATA(src_ptr, vec2, tmp4);
LOAD_INDEXED_DATA(src_ptr, vec3, tmp5);
LOAD_INDEXED_DATA(src_ptr, vec4, tmp6);
LOAD_INDEXED_DATA(src_ptr, vec5, tmp7);
tmp4 -= tmp0;
tmp5 -= tmp1;
tmp6 -= tmp2;
tmp7 -= tmp3;
tmp4 *= vec6;
tmp5 *= vec7;
tmp6 *= vec8;
tmp7 *= vec9;
tmp4 += const_0x40;
tmp5 += const_0x40;
tmp6 += const_0x40;
tmp7 += const_0x40;
tmp4 >>= 7;
tmp5 >>= 7;
tmp6 >>= 7;
tmp7 >>= 7;
tmp0 += tmp4;
tmp1 += tmp5;
tmp2 += tmp6;
tmp3 += tmp7;
reg0 = (v8u16)__msa_pckev_h((v8i16)tmp1, (v8i16)tmp0);
reg1 = (v8u16)__msa_pckev_h((v8i16)tmp3, (v8i16)tmp2);
dst0 = (v16u8)__msa_pckev_b((v16i8)reg1, (v16i8)reg0);
__msa_st_b(dst0, dst_ptr, 0);
dst_ptr += 16;
}
}
void ScaleARGBCols_MSA(uint8_t* dst_argb,
const uint8_t* src_argb,
int dst_width,
int x,
int dx) {
const uint32_t* src = (const uint32_t*)(src_argb);
uint32_t* dst = (uint32_t*)(dst_argb);
int j;
v4i32 x_vec = __msa_fill_w(x);
v4i32 dx_vec = __msa_fill_w(dx);
v4i32 const_vec = {0, 1, 2, 3};
v4i32 vec0, vec1, vec2;
v4i32 dst0;
vec0 = dx_vec * const_vec;
vec1 = dx_vec * 4;
x_vec += vec0;
for (j = 0; j < dst_width; j += 4) {
vec2 = x_vec >> 16;
x_vec += vec1;
LOAD_INDEXED_DATA(src, vec2, dst0);
__msa_st_w(dst0, dst, 0);
dst += 4;
}
}
void ScaleARGBFilterCols_MSA(uint8_t* dst_argb,
const uint8_t* src_argb,
int dst_width,
int x,
int dx) {
const uint32_t* src = (const uint32_t*)(src_argb);
int j;
v4u32 src0, src1, src2, src3;
v4u32 vec0, vec1, vec2, vec3;
v16u8 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7;
v16u8 mult0, mult1, mult2, mult3;
v8u16 tmp0, tmp1, tmp2, tmp3;
v16u8 dst0, dst1;
v4u32 vec_x = (v4u32)__msa_fill_w(x);
v4u32 vec_dx = (v4u32)__msa_fill_w(dx);
v4u32 vec_const = {0, 1, 2, 3};
v16u8 const_0x7f = (v16u8)__msa_fill_b(0x7f);
vec0 = vec_dx * vec_const;
vec1 = vec_dx * 4;
vec_x += vec0;
for (j = 0; j < dst_width - 1; j += 8) {
vec2 = vec_x >> 16;
reg0 = (v16u8)(vec_x >> 9);
vec_x += vec1;
vec3 = vec_x >> 16;
reg1 = (v16u8)(vec_x >> 9);
vec_x += vec1;
reg0 = reg0 & const_0x7f;
reg1 = reg1 & const_0x7f;
reg0 = (v16u8)__msa_shf_b((v16i8)reg0, 0);
reg1 = (v16u8)__msa_shf_b((v16i8)reg1, 0);
reg2 = reg0 ^ const_0x7f;
reg3 = reg1 ^ const_0x7f;
mult0 = (v16u8)__msa_ilvr_b((v16i8)reg0, (v16i8)reg2);
mult1 = (v16u8)__msa_ilvl_b((v16i8)reg0, (v16i8)reg2);
mult2 = (v16u8)__msa_ilvr_b((v16i8)reg1, (v16i8)reg3);
mult3 = (v16u8)__msa_ilvl_b((v16i8)reg1, (v16i8)reg3);
LOAD_INDEXED_DATA(src, vec2, src0);
LOAD_INDEXED_DATA(src, vec3, src1);
vec2 += 1;
vec3 += 1;
LOAD_INDEXED_DATA(src, vec2, src2);
LOAD_INDEXED_DATA(src, vec3, src3);
reg4 = (v16u8)__msa_ilvr_b((v16i8)src2, (v16i8)src0);
reg5 = (v16u8)__msa_ilvl_b((v16i8)src2, (v16i8)src0);
reg6 = (v16u8)__msa_ilvr_b((v16i8)src3, (v16i8)src1);
reg7 = (v16u8)__msa_ilvl_b((v16i8)src3, (v16i8)src1);
tmp0 = __msa_dotp_u_h(reg4, mult0);
tmp1 = __msa_dotp_u_h(reg5, mult1);
tmp2 = __msa_dotp_u_h(reg6, mult2);
tmp3 = __msa_dotp_u_h(reg7, mult3);
tmp0 >>= 7;
tmp1 >>= 7;
tmp2 >>= 7;
tmp3 >>= 7;
dst0 = (v16u8)__msa_pckev_b((v16i8)tmp1, (v16i8)tmp0);
dst1 = (v16u8)__msa_pckev_b((v16i8)tmp3, (v16i8)tmp2);
__msa_st_b(dst0, dst_argb, 0);
__msa_st_b(dst1, dst_argb, 16);
dst_argb += 32;
}
}
void ScaleRowDown34_MSA(const uint8_t* src_ptr,
ptrdiff_t src_stride,
uint8_t* dst,
int dst_width) {
int x;
(void)src_stride;
v16u8 src0, src1, src2, src3;
v16u8 vec0, vec1, vec2;
v16i8 mask0 = {0, 1, 3, 4, 5, 7, 8, 9, 11, 12, 13, 15, 16, 17, 19, 20};
v16i8 mask1 = {5, 7, 8, 9, 11, 12, 13, 15, 16, 17, 19, 20, 21, 23, 24, 25};
v16i8 mask2 = {11, 12, 13, 15, 16, 17, 19, 20,
21, 23, 24, 25, 27, 28, 29, 31};
assert((dst_width % 3 == 0) && (dst_width > 0));
for (x = 0; x < dst_width; x += 48) {
src0 = (v16u8)__msa_ld_b((v16i8*)src_ptr, 0);
src1 = (v16u8)__msa_ld_b((v16i8*)src_ptr, 16);
src2 = (v16u8)__msa_ld_b((v16i8*)src_ptr, 32);
src3 = (v16u8)__msa_ld_b((v16i8*)src_ptr, 48);
vec0 = (v16u8)__msa_vshf_b(mask0, (v16i8)src1, (v16i8)src0);
vec1 = (v16u8)__msa_vshf_b(mask1, (v16i8)src2, (v16i8)src1);
vec2 = (v16u8)__msa_vshf_b(mask2, (v16i8)src3, (v16i8)src2);
__msa_st_b((v16i8)vec0, dst, 0);
__msa_st_b((v16i8)vec1, dst, 16);
__msa_st_b((v16i8)vec2, dst, 32);
src_ptr += 64;
dst += 48;
}
}
void ScaleRowDown34_0_Box_MSA(const uint8_t* src_ptr,
ptrdiff_t src_stride,
uint8_t* d,
int dst_width) {
const uint8_t* s = src_ptr;
const uint8_t* t = src_ptr + src_stride;
int x;
v16u8 src0, src1, src2, src3, src4, src5, src6, src7, dst0, dst1, dst2;
v16u8 vec0, vec1, vec2, vec3, vec4, vec5;
v16u8 vec6, vec7, vec8, vec9, vec10, vec11;
v8i16 reg0, reg1, reg2, reg3, reg4, reg5;
v8i16 reg6, reg7, reg8, reg9, reg10, reg11;
v16u8 const0 = {3, 1, 1, 1, 1, 3, 3, 1, 1, 1, 1, 3, 3, 1, 1, 1};
v16u8 const1 = {1, 3, 3, 1, 1, 1, 1, 3, 3, 1, 1, 1, 1, 3, 3, 1};
v16u8 const2 = {1, 1, 1, 3, 3, 1, 1, 1, 1, 3, 3, 1, 1, 1, 1, 3};
v16i8 mask0 = {0, 1, 1, 2, 2, 3, 4, 5, 5, 6, 6, 7, 8, 9, 9, 10};
v16i8 mask1 = {10, 11, 12, 13, 13, 14, 14, 15,
16, 17, 17, 18, 18, 19, 20, 21};
v16i8 mask2 = {5, 6, 6, 7, 8, 9, 9, 10, 10, 11, 12, 13, 13, 14, 14, 15};
v8i16 shft0 = {2, 1, 2, 2, 1, 2, 2, 1};
v8i16 shft1 = {2, 2, 1, 2, 2, 1, 2, 2};
v8i16 shft2 = {1, 2, 2, 1, 2, 2, 1, 2};
assert((dst_width % 3 == 0) && (dst_width > 0));
for (x = 0; x < dst_width; x += 48) {
src0 = (v16u8)__msa_ld_b((v16i8*)s, 0);
src1 = (v16u8)__msa_ld_b((v16i8*)s, 16);
src2 = (v16u8)__msa_ld_b((v16i8*)s, 32);
src3 = (v16u8)__msa_ld_b((v16i8*)s, 48);
src4 = (v16u8)__msa_ld_b((v16i8*)t, 0);
src5 = (v16u8)__msa_ld_b((v16i8*)t, 16);
src6 = (v16u8)__msa_ld_b((v16i8*)t, 32);
src7 = (v16u8)__msa_ld_b((v16i8*)t, 48);
vec0 = (v16u8)__msa_vshf_b(mask0, (v16i8)src0, (v16i8)src0);
vec1 = (v16u8)__msa_vshf_b(mask1, (v16i8)src1, (v16i8)src0);
vec2 = (v16u8)__msa_vshf_b(mask2, (v16i8)src1, (v16i8)src1);
vec3 = (v16u8)__msa_vshf_b(mask0, (v16i8)src2, (v16i8)src2);
vec4 = (v16u8)__msa_vshf_b(mask1, (v16i8)src3, (v16i8)src2);
vec5 = (v16u8)__msa_vshf_b(mask2, (v16i8)src3, (v16i8)src3);
vec6 = (v16u8)__msa_vshf_b(mask0, (v16i8)src4, (v16i8)src4);
vec7 = (v16u8)__msa_vshf_b(mask1, (v16i8)src5, (v16i8)src4);
vec8 = (v16u8)__msa_vshf_b(mask2, (v16i8)src5, (v16i8)src5);
vec9 = (v16u8)__msa_vshf_b(mask0, (v16i8)src6, (v16i8)src6);
vec10 = (v16u8)__msa_vshf_b(mask1, (v16i8)src7, (v16i8)src6);
vec11 = (v16u8)__msa_vshf_b(mask2, (v16i8)src7, (v16i8)src7);
reg0 = (v8i16)__msa_dotp_u_h(vec0, const0);
reg1 = (v8i16)__msa_dotp_u_h(vec1, const1);
reg2 = (v8i16)__msa_dotp_u_h(vec2, const2);
reg3 = (v8i16)__msa_dotp_u_h(vec3, const0);
reg4 = (v8i16)__msa_dotp_u_h(vec4, const1);
reg5 = (v8i16)__msa_dotp_u_h(vec5, const2);
reg6 = (v8i16)__msa_dotp_u_h(vec6, const0);
reg7 = (v8i16)__msa_dotp_u_h(vec7, const1);
reg8 = (v8i16)__msa_dotp_u_h(vec8, const2);
reg9 = (v8i16)__msa_dotp_u_h(vec9, const0);
reg10 = (v8i16)__msa_dotp_u_h(vec10, const1);
reg11 = (v8i16)__msa_dotp_u_h(vec11, const2);
reg0 = __msa_srar_h(reg0, shft0);
reg1 = __msa_srar_h(reg1, shft1);
reg2 = __msa_srar_h(reg2, shft2);
reg3 = __msa_srar_h(reg3, shft0);
reg4 = __msa_srar_h(reg4, shft1);
reg5 = __msa_srar_h(reg5, shft2);
reg6 = __msa_srar_h(reg6, shft0);
reg7 = __msa_srar_h(reg7, shft1);
reg8 = __msa_srar_h(reg8, shft2);
reg9 = __msa_srar_h(reg9, shft0);
reg10 = __msa_srar_h(reg10, shft1);
reg11 = __msa_srar_h(reg11, shft2);
reg0 = reg0 * 3 + reg6;
reg1 = reg1 * 3 + reg7;
reg2 = reg2 * 3 + reg8;
reg3 = reg3 * 3 + reg9;
reg4 = reg4 * 3 + reg10;
reg5 = reg5 * 3 + reg11;
reg0 = __msa_srari_h(reg0, 2);
reg1 = __msa_srari_h(reg1, 2);
reg2 = __msa_srari_h(reg2, 2);
reg3 = __msa_srari_h(reg3, 2);
reg4 = __msa_srari_h(reg4, 2);
reg5 = __msa_srari_h(reg5, 2);
dst0 = (v16u8)__msa_pckev_b((v16i8)reg1, (v16i8)reg0);
dst1 = (v16u8)__msa_pckev_b((v16i8)reg3, (v16i8)reg2);
dst2 = (v16u8)__msa_pckev_b((v16i8)reg5, (v16i8)reg4);
__msa_st_b((v16i8)dst0, d, 0);
__msa_st_b((v16i8)dst1, d, 16);
__msa_st_b((v16i8)dst2, d, 32);
s += 64;
t += 64;
d += 48;
}
}
void ScaleRowDown34_1_Box_MSA(const uint8_t* src_ptr,
ptrdiff_t src_stride,
uint8_t* d,
int dst_width) {
const uint8_t* s = src_ptr;
const uint8_t* t = src_ptr + src_stride;
int x;
v16u8 src0, src1, src2, src3, src4, src5, src6, src7, dst0, dst1, dst2;
v16u8 vec0, vec1, vec2, vec3, vec4, vec5;
v16u8 vec6, vec7, vec8, vec9, vec10, vec11;
v8i16 reg0, reg1, reg2, reg3, reg4, reg5;
v8i16 reg6, reg7, reg8, reg9, reg10, reg11;
v16u8 const0 = {3, 1, 1, 1, 1, 3, 3, 1, 1, 1, 1, 3, 3, 1, 1, 1};
v16u8 const1 = {1, 3, 3, 1, 1, 1, 1, 3, 3, 1, 1, 1, 1, 3, 3, 1};
v16u8 const2 = {1, 1, 1, 3, 3, 1, 1, 1, 1, 3, 3, 1, 1, 1, 1, 3};
v16i8 mask0 = {0, 1, 1, 2, 2, 3, 4, 5, 5, 6, 6, 7, 8, 9, 9, 10};
v16i8 mask1 = {10, 11, 12, 13, 13, 14, 14, 15,
16, 17, 17, 18, 18, 19, 20, 21};
v16i8 mask2 = {5, 6, 6, 7, 8, 9, 9, 10, 10, 11, 12, 13, 13, 14, 14, 15};
v8i16 shft0 = {2, 1, 2, 2, 1, 2, 2, 1};
v8i16 shft1 = {2, 2, 1, 2, 2, 1, 2, 2};
v8i16 shft2 = {1, 2, 2, 1, 2, 2, 1, 2};
assert((dst_width % 3 == 0) && (dst_width > 0));
for (x = 0; x < dst_width; x += 48) {
src0 = (v16u8)__msa_ld_b((v16i8*)s, 0);
src1 = (v16u8)__msa_ld_b((v16i8*)s, 16);
src2 = (v16u8)__msa_ld_b((v16i8*)s, 32);
src3 = (v16u8)__msa_ld_b((v16i8*)s, 48);
src4 = (v16u8)__msa_ld_b((v16i8*)t, 0);
src5 = (v16u8)__msa_ld_b((v16i8*)t, 16);
src6 = (v16u8)__msa_ld_b((v16i8*)t, 32);
src7 = (v16u8)__msa_ld_b((v16i8*)t, 48);
vec0 = (v16u8)__msa_vshf_b(mask0, (v16i8)src0, (v16i8)src0);
vec1 = (v16u8)__msa_vshf_b(mask1, (v16i8)src1, (v16i8)src0);
vec2 = (v16u8)__msa_vshf_b(mask2, (v16i8)src1, (v16i8)src1);
vec3 = (v16u8)__msa_vshf_b(mask0, (v16i8)src2, (v16i8)src2);
vec4 = (v16u8)__msa_vshf_b(mask1, (v16i8)src3, (v16i8)src2);
vec5 = (v16u8)__msa_vshf_b(mask2, (v16i8)src3, (v16i8)src3);
vec6 = (v16u8)__msa_vshf_b(mask0, (v16i8)src4, (v16i8)src4);
vec7 = (v16u8)__msa_vshf_b(mask1, (v16i8)src5, (v16i8)src4);
vec8 = (v16u8)__msa_vshf_b(mask2, (v16i8)src5, (v16i8)src5);
vec9 = (v16u8)__msa_vshf_b(mask0, (v16i8)src6, (v16i8)src6);
vec10 = (v16u8)__msa_vshf_b(mask1, (v16i8)src7, (v16i8)src6);
vec11 = (v16u8)__msa_vshf_b(mask2, (v16i8)src7, (v16i8)src7);
reg0 = (v8i16)__msa_dotp_u_h(vec0, const0);
reg1 = (v8i16)__msa_dotp_u_h(vec1, const1);
reg2 = (v8i16)__msa_dotp_u_h(vec2, const2);
reg3 = (v8i16)__msa_dotp_u_h(vec3, const0);
reg4 = (v8i16)__msa_dotp_u_h(vec4, const1);
reg5 = (v8i16)__msa_dotp_u_h(vec5, const2);
reg6 = (v8i16)__msa_dotp_u_h(vec6, const0);
reg7 = (v8i16)__msa_dotp_u_h(vec7, const1);
reg8 = (v8i16)__msa_dotp_u_h(vec8, const2);
reg9 = (v8i16)__msa_dotp_u_h(vec9, const0);
reg10 = (v8i16)__msa_dotp_u_h(vec10, const1);
reg11 = (v8i16)__msa_dotp_u_h(vec11, const2);
reg0 = __msa_srar_h(reg0, shft0);
reg1 = __msa_srar_h(reg1, shft1);
reg2 = __msa_srar_h(reg2, shft2);
reg3 = __msa_srar_h(reg3, shft0);
reg4 = __msa_srar_h(reg4, shft1);
reg5 = __msa_srar_h(reg5, shft2);
reg6 = __msa_srar_h(reg6, shft0);
reg7 = __msa_srar_h(reg7, shft1);
reg8 = __msa_srar_h(reg8, shft2);
reg9 = __msa_srar_h(reg9, shft0);
reg10 = __msa_srar_h(reg10, shft1);
reg11 = __msa_srar_h(reg11, shft2);
reg0 += reg6;
reg1 += reg7;
reg2 += reg8;
reg3 += reg9;
reg4 += reg10;
reg5 += reg11;
reg0 = __msa_srari_h(reg0, 1);
reg1 = __msa_srari_h(reg1, 1);
reg2 = __msa_srari_h(reg2, 1);
reg3 = __msa_srari_h(reg3, 1);
reg4 = __msa_srari_h(reg4, 1);
reg5 = __msa_srari_h(reg5, 1);
dst0 = (v16u8)__msa_pckev_b((v16i8)reg1, (v16i8)reg0);
dst1 = (v16u8)__msa_pckev_b((v16i8)reg3, (v16i8)reg2);
dst2 = (v16u8)__msa_pckev_b((v16i8)reg5, (v16i8)reg4);
__msa_st_b((v16i8)dst0, d, 0);
__msa_st_b((v16i8)dst1, d, 16);
__msa_st_b((v16i8)dst2, d, 32);
s += 64;
t += 64;
d += 48;
}
}
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif
#endif // !defined(LIBYUV_DISABLE_MSA) && defined(__mips_msa)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,35 +0,0 @@
#!/bin/bash
set -x
function runbenchmark1 {
perf record /google/src/cloud/fbarchard/clean/google3/blaze-bin/third_party/libyuv/libyuv_test --gunit_filter=*$1 --libyuv_width=1280 --libyuv_height=720 --libyuv_repeat=1000 --libyuv_flags=-1 --libyuv_cpu_info=-1
perf report | grep AVX
}
runbenchmark1 ABGRToI420
runbenchmark1 Android420ToI420
runbenchmark1 ARGBToI420
runbenchmark1 Convert16To8Plane
runbenchmark1 ConvertToARGB
runbenchmark1 ConvertToI420
runbenchmark1 CopyPlane
runbenchmark1 H010ToAB30
runbenchmark1 H010ToAR30
runbenchmark1 HalfFloatPlane
runbenchmark1 I010ToAB30
runbenchmark1 I010ToAR30
runbenchmark1 I420Copy
runbenchmark1 I420Psnr
runbenchmark1 I420Scale
runbenchmark1 I420Ssim
runbenchmark1 I420ToARGB
runbenchmark1 I420ToNV12
runbenchmark1 I420ToUYVY
runbenchmark1 I422ToI420
runbenchmark1 InitCpuFlags
runbenchmark1 J420ToARGB
runbenchmark1 NV12ToARGB
runbenchmark1 NV12ToI420
runbenchmark1 NV12ToI420Rotate
runbenchmark1 SetCpuFlags
runbenchmark1 YUY2ToI420

View File

@@ -1,62 +0,0 @@
/*
* Copyright 2011 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "libyuv/video_common.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
struct FourCCAliasEntry {
uint32_t alias;
uint32_t canonical;
};
#define NUM_ALIASES 18
static const struct FourCCAliasEntry kFourCCAliases[NUM_ALIASES] = {
{FOURCC_IYUV, FOURCC_I420},
{FOURCC_YU12, FOURCC_I420},
{FOURCC_YU16, FOURCC_I422},
{FOURCC_YU24, FOURCC_I444},
{FOURCC_YUYV, FOURCC_YUY2},
{FOURCC_YUVS, FOURCC_YUY2}, // kCMPixelFormat_422YpCbCr8_yuvs
{FOURCC_HDYC, FOURCC_UYVY},
{FOURCC_2VUY, FOURCC_UYVY}, // kCMPixelFormat_422YpCbCr8
{FOURCC_JPEG, FOURCC_MJPG}, // Note: JPEG has DHT while MJPG does not.
{FOURCC_DMB1, FOURCC_MJPG},
{FOURCC_BA81, FOURCC_BGGR}, // deprecated.
{FOURCC_RGB3, FOURCC_RAW},
{FOURCC_BGR3, FOURCC_24BG},
{FOURCC_CM32, FOURCC_BGRA}, // kCMPixelFormat_32ARGB
{FOURCC_CM24, FOURCC_RAW}, // kCMPixelFormat_24RGB
{FOURCC_L555, FOURCC_RGBO}, // kCMPixelFormat_16LE555
{FOURCC_L565, FOURCC_RGBP}, // kCMPixelFormat_16LE565
{FOURCC_5551, FOURCC_RGBO}, // kCMPixelFormat_16LE5551
};
// TODO(fbarchard): Consider mapping kCMPixelFormat_32BGRA to FOURCC_ARGB.
// {FOURCC_BGRA, FOURCC_ARGB}, // kCMPixelFormat_32BGRA
LIBYUV_API
uint32_t CanonicalFourCC(uint32_t fourcc) {
int i;
for (i = 0; i < NUM_ALIASES; ++i) {
if (kFourCCAliases[i].alias == fourcc) {
return kFourCCAliases[i].canonical;
}
}
// Not an alias, so return it as-is.
return fourcc;
}
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif

View File

@@ -1,9 +0,0 @@
psnr: psnr.cc ssim.cc psnr_main.cc
ifeq ($(CXX),icl)
$(CXX) /arch:SSE2 /Ox /openmp psnr.cc ssim.cc psnr_main.cc
else
$(CXX) -msse2 -O3 -fopenmp -static -o psnr psnr.cc ssim.cc psnr_main.cc -Wl,--strip-all
endif
# for MacOS
# /usr/local/bin/g++-7 -msse2 -O3 -fopenmp -Bstatic -o psnr psnr.cc ssim.cc psnr_main.cc

Some files were not shown because too many files have changed in this diff Show More