1.Use libyuv instead of ffmpeg to do nv12<->yuv420p convertion;2.Use local package to build libyuv(branch stable 2021.4.28 commit eb6e7bb63738e29efd82ea3cf2a115238a89fa51)

This commit is contained in:
dijunkun
2024-05-08 16:34:53 +08:00
parent e0d2ab5a9f
commit 6bc8aaabdc
126 changed files with 98754 additions and 241 deletions

440
thirdparty/libyuv/source/compare.cc vendored Normal file
View File

@@ -0,0 +1,440 @@
/*
* Copyright 2011 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "libyuv/compare.h"
#include <float.h>
#include <math.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "libyuv/basic_types.h"
#include "libyuv/compare_row.h"
#include "libyuv/cpu_id.h"
#include "libyuv/row.h"
#include "libyuv/video_common.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
// hash seed of 5381 recommended.
LIBYUV_API
uint32_t HashDjb2(const uint8_t* src, uint64_t count, uint32_t seed) {
const int kBlockSize = 1 << 15; // 32768;
int remainder;
uint32_t (*HashDjb2_SSE)(const uint8_t* src, int count, uint32_t seed) =
HashDjb2_C;
#if defined(HAS_HASHDJB2_SSE41)
if (TestCpuFlag(kCpuHasSSE41)) {
HashDjb2_SSE = HashDjb2_SSE41;
}
#endif
#if defined(HAS_HASHDJB2_AVX2)
if (TestCpuFlag(kCpuHasAVX2)) {
HashDjb2_SSE = HashDjb2_AVX2;
}
#endif
while (count >= (uint64_t)(kBlockSize)) {
seed = HashDjb2_SSE(src, kBlockSize, seed);
src += kBlockSize;
count -= kBlockSize;
}
remainder = (int)count & ~15;
if (remainder) {
seed = HashDjb2_SSE(src, remainder, seed);
src += remainder;
count -= remainder;
}
remainder = (int)count & 15;
if (remainder) {
seed = HashDjb2_C(src, remainder, seed);
}
return seed;
}
static uint32_t ARGBDetectRow_C(const uint8_t* argb, int width) {
int x;
for (x = 0; x < width - 1; x += 2) {
if (argb[0] != 255) { // First byte is not Alpha of 255, so not ARGB.
return FOURCC_BGRA;
}
if (argb[3] != 255) { // Fourth byte is not Alpha of 255, so not BGRA.
return FOURCC_ARGB;
}
if (argb[4] != 255) { // Second pixel first byte is not Alpha of 255.
return FOURCC_BGRA;
}
if (argb[7] != 255) { // Second pixel fourth byte is not Alpha of 255.
return FOURCC_ARGB;
}
argb += 8;
}
if (width & 1) {
if (argb[0] != 255) { // First byte is not Alpha of 255, so not ARGB.
return FOURCC_BGRA;
}
if (argb[3] != 255) { // 4th byte is not Alpha of 255, so not BGRA.
return FOURCC_ARGB;
}
}
return 0;
}
// Scan an opaque argb image and return fourcc based on alpha offset.
// Returns FOURCC_ARGB, FOURCC_BGRA, or 0 if unknown.
LIBYUV_API
uint32_t ARGBDetect(const uint8_t* argb,
int stride_argb,
int width,
int height) {
uint32_t fourcc = 0;
int h;
// Coalesce rows.
if (stride_argb == width * 4) {
width *= height;
height = 1;
stride_argb = 0;
}
for (h = 0; h < height && fourcc == 0; ++h) {
fourcc = ARGBDetectRow_C(argb, width);
argb += stride_argb;
}
return fourcc;
}
// NEON version accumulates in 16 bit shorts which overflow at 65536 bytes.
// So actual maximum is 1 less loop, which is 64436 - 32 bytes.
LIBYUV_API
uint64_t ComputeHammingDistance(const uint8_t* src_a,
const uint8_t* src_b,
int count) {
const int kBlockSize = 1 << 15; // 32768;
const int kSimdSize = 64;
// SIMD for multiple of 64, and C for remainder
int remainder = count & (kBlockSize - 1) & ~(kSimdSize - 1);
uint64_t diff = 0;
int i;
uint32_t (*HammingDistance)(const uint8_t* src_a, const uint8_t* src_b,
int count) = HammingDistance_C;
#if defined(HAS_HAMMINGDISTANCE_NEON)
if (TestCpuFlag(kCpuHasNEON)) {
HammingDistance = HammingDistance_NEON;
}
#endif
#if defined(HAS_HAMMINGDISTANCE_SSSE3)
if (TestCpuFlag(kCpuHasSSSE3)) {
HammingDistance = HammingDistance_SSSE3;
}
#endif
#if defined(HAS_HAMMINGDISTANCE_SSE42)
if (TestCpuFlag(kCpuHasSSE42)) {
HammingDistance = HammingDistance_SSE42;
}
#endif
#if defined(HAS_HAMMINGDISTANCE_AVX2)
if (TestCpuFlag(kCpuHasAVX2)) {
HammingDistance = HammingDistance_AVX2;
}
#endif
#if defined(HAS_HAMMINGDISTANCE_MMI)
if (TestCpuFlag(kCpuHasMMI)) {
HammingDistance = HammingDistance_MMI;
}
#endif
#if defined(HAS_HAMMINGDISTANCE_MSA)
if (TestCpuFlag(kCpuHasMSA)) {
HammingDistance = HammingDistance_MSA;
}
#endif
#ifdef _OPENMP
#pragma omp parallel for reduction(+ : diff)
#endif
for (i = 0; i < (count - (kBlockSize - 1)); i += kBlockSize) {
diff += HammingDistance(src_a + i, src_b + i, kBlockSize);
}
src_a += count & ~(kBlockSize - 1);
src_b += count & ~(kBlockSize - 1);
if (remainder) {
diff += HammingDistance(src_a, src_b, remainder);
src_a += remainder;
src_b += remainder;
}
remainder = count & (kSimdSize - 1);
if (remainder) {
diff += HammingDistance_C(src_a, src_b, remainder);
}
return diff;
}
// TODO(fbarchard): Refactor into row function.
LIBYUV_API
uint64_t ComputeSumSquareError(const uint8_t* src_a,
const uint8_t* src_b,
int count) {
// SumSquareError returns values 0 to 65535 for each squared difference.
// Up to 65536 of those can be summed and remain within a uint32_t.
// After each block of 65536 pixels, accumulate into a uint64_t.
const int kBlockSize = 65536;
int remainder = count & (kBlockSize - 1) & ~31;
uint64_t sse = 0;
int i;
uint32_t (*SumSquareError)(const uint8_t* src_a, const uint8_t* src_b,
int count) = SumSquareError_C;
#if defined(HAS_SUMSQUAREERROR_NEON)
if (TestCpuFlag(kCpuHasNEON)) {
SumSquareError = SumSquareError_NEON;
}
#endif
#if defined(HAS_SUMSQUAREERROR_SSE2)
if (TestCpuFlag(kCpuHasSSE2)) {
// Note only used for multiples of 16 so count is not checked.
SumSquareError = SumSquareError_SSE2;
}
#endif
#if defined(HAS_SUMSQUAREERROR_AVX2)
if (TestCpuFlag(kCpuHasAVX2)) {
// Note only used for multiples of 32 so count is not checked.
SumSquareError = SumSquareError_AVX2;
}
#endif
#if defined(HAS_SUMSQUAREERROR_MMI)
if (TestCpuFlag(kCpuHasMMI)) {
SumSquareError = SumSquareError_MMI;
}
#endif
#if defined(HAS_SUMSQUAREERROR_MSA)
if (TestCpuFlag(kCpuHasMSA)) {
SumSquareError = SumSquareError_MSA;
}
#endif
#ifdef _OPENMP
#pragma omp parallel for reduction(+ : sse)
#endif
for (i = 0; i < (count - (kBlockSize - 1)); i += kBlockSize) {
sse += SumSquareError(src_a + i, src_b + i, kBlockSize);
}
src_a += count & ~(kBlockSize - 1);
src_b += count & ~(kBlockSize - 1);
if (remainder) {
sse += SumSquareError(src_a, src_b, remainder);
src_a += remainder;
src_b += remainder;
}
remainder = count & 31;
if (remainder) {
sse += SumSquareError_C(src_a, src_b, remainder);
}
return sse;
}
LIBYUV_API
uint64_t ComputeSumSquareErrorPlane(const uint8_t* src_a,
int stride_a,
const uint8_t* src_b,
int stride_b,
int width,
int height) {
uint64_t sse = 0;
int h;
// Coalesce rows.
if (stride_a == width && stride_b == width) {
width *= height;
height = 1;
stride_a = stride_b = 0;
}
for (h = 0; h < height; ++h) {
sse += ComputeSumSquareError(src_a, src_b, width);
src_a += stride_a;
src_b += stride_b;
}
return sse;
}
LIBYUV_API
double SumSquareErrorToPsnr(uint64_t sse, uint64_t count) {
double psnr;
if (sse > 0) {
double mse = (double)count / (double)sse;
psnr = 10.0 * log10(255.0 * 255.0 * mse);
} else {
psnr = kMaxPsnr; // Limit to prevent divide by 0
}
if (psnr > kMaxPsnr) {
psnr = kMaxPsnr;
}
return psnr;
}
LIBYUV_API
double CalcFramePsnr(const uint8_t* src_a,
int stride_a,
const uint8_t* src_b,
int stride_b,
int width,
int height) {
const uint64_t samples = (uint64_t)width * (uint64_t)height;
const uint64_t sse = ComputeSumSquareErrorPlane(src_a, stride_a, src_b,
stride_b, width, height);
return SumSquareErrorToPsnr(sse, samples);
}
LIBYUV_API
double I420Psnr(const uint8_t* src_y_a,
int stride_y_a,
const uint8_t* src_u_a,
int stride_u_a,
const uint8_t* src_v_a,
int stride_v_a,
const uint8_t* src_y_b,
int stride_y_b,
const uint8_t* src_u_b,
int stride_u_b,
const uint8_t* src_v_b,
int stride_v_b,
int width,
int height) {
const uint64_t sse_y = ComputeSumSquareErrorPlane(
src_y_a, stride_y_a, src_y_b, stride_y_b, width, height);
const int width_uv = (width + 1) >> 1;
const int height_uv = (height + 1) >> 1;
const uint64_t sse_u = ComputeSumSquareErrorPlane(
src_u_a, stride_u_a, src_u_b, stride_u_b, width_uv, height_uv);
const uint64_t sse_v = ComputeSumSquareErrorPlane(
src_v_a, stride_v_a, src_v_b, stride_v_b, width_uv, height_uv);
const uint64_t samples = (uint64_t)width * (uint64_t)height +
2 * ((uint64_t)width_uv * (uint64_t)height_uv);
const uint64_t sse = sse_y + sse_u + sse_v;
return SumSquareErrorToPsnr(sse, samples);
}
static const int64_t cc1 = 26634; // (64^2*(.01*255)^2
static const int64_t cc2 = 239708; // (64^2*(.03*255)^2
static double Ssim8x8_C(const uint8_t* src_a,
int stride_a,
const uint8_t* src_b,
int stride_b) {
int64_t sum_a = 0;
int64_t sum_b = 0;
int64_t sum_sq_a = 0;
int64_t sum_sq_b = 0;
int64_t sum_axb = 0;
int i;
for (i = 0; i < 8; ++i) {
int j;
for (j = 0; j < 8; ++j) {
sum_a += src_a[j];
sum_b += src_b[j];
sum_sq_a += src_a[j] * src_a[j];
sum_sq_b += src_b[j] * src_b[j];
sum_axb += src_a[j] * src_b[j];
}
src_a += stride_a;
src_b += stride_b;
}
{
const int64_t count = 64;
// scale the constants by number of pixels
const int64_t c1 = (cc1 * count * count) >> 12;
const int64_t c2 = (cc2 * count * count) >> 12;
const int64_t sum_a_x_sum_b = sum_a * sum_b;
const int64_t ssim_n = (2 * sum_a_x_sum_b + c1) *
(2 * count * sum_axb - 2 * sum_a_x_sum_b + c2);
const int64_t sum_a_sq = sum_a * sum_a;
const int64_t sum_b_sq = sum_b * sum_b;
const int64_t ssim_d =
(sum_a_sq + sum_b_sq + c1) *
(count * sum_sq_a - sum_a_sq + count * sum_sq_b - sum_b_sq + c2);
if (ssim_d == 0.0) {
return DBL_MAX;
}
return ssim_n * 1.0 / ssim_d;
}
}
// We are using a 8x8 moving window with starting location of each 8x8 window
// on the 4x4 pixel grid. Such arrangement allows the windows to overlap
// block boundaries to penalize blocking artifacts.
LIBYUV_API
double CalcFrameSsim(const uint8_t* src_a,
int stride_a,
const uint8_t* src_b,
int stride_b,
int width,
int height) {
int samples = 0;
double ssim_total = 0;
double (*Ssim8x8)(const uint8_t* src_a, int stride_a, const uint8_t* src_b,
int stride_b) = Ssim8x8_C;
// sample point start with each 4x4 location
int i;
for (i = 0; i < height - 8; i += 4) {
int j;
for (j = 0; j < width - 8; j += 4) {
ssim_total += Ssim8x8(src_a + j, stride_a, src_b + j, stride_b);
samples++;
}
src_a += stride_a * 4;
src_b += stride_b * 4;
}
ssim_total /= samples;
return ssim_total;
}
LIBYUV_API
double I420Ssim(const uint8_t* src_y_a,
int stride_y_a,
const uint8_t* src_u_a,
int stride_u_a,
const uint8_t* src_v_a,
int stride_v_a,
const uint8_t* src_y_b,
int stride_y_b,
const uint8_t* src_u_b,
int stride_u_b,
const uint8_t* src_v_b,
int stride_v_b,
int width,
int height) {
const double ssim_y =
CalcFrameSsim(src_y_a, stride_y_a, src_y_b, stride_y_b, width, height);
const int width_uv = (width + 1) >> 1;
const int height_uv = (height + 1) >> 1;
const double ssim_u = CalcFrameSsim(src_u_a, stride_u_a, src_u_b, stride_u_b,
width_uv, height_uv);
const double ssim_v = CalcFrameSsim(src_v_a, stride_v_a, src_v_b, stride_v_b,
width_uv, height_uv);
return ssim_y * 0.8 + 0.1 * (ssim_u + ssim_v);
}
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif

View File

@@ -0,0 +1,74 @@
/*
* Copyright 2012 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "libyuv/basic_types.h"
#include "libyuv/compare_row.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
// Hakmem method for hamming distance.
uint32_t HammingDistance_C(const uint8_t* src_a,
const uint8_t* src_b,
int count) {
uint32_t diff = 0u;
int i;
for (i = 0; i < count - 3; i += 4) {
uint32_t x = *((const uint32_t*)src_a) ^ *((const uint32_t*)src_b);
uint32_t u = x - ((x >> 1) & 0x55555555);
u = ((u >> 2) & 0x33333333) + (u & 0x33333333);
diff += ((((u + (u >> 4)) & 0x0f0f0f0f) * 0x01010101) >> 24);
src_a += 4;
src_b += 4;
}
for (; i < count; ++i) {
uint32_t x = *src_a ^ *src_b;
uint32_t u = x - ((x >> 1) & 0x55);
u = ((u >> 2) & 0x33) + (u & 0x33);
diff += (u + (u >> 4)) & 0x0f;
src_a += 1;
src_b += 1;
}
return diff;
}
uint32_t SumSquareError_C(const uint8_t* src_a,
const uint8_t* src_b,
int count) {
uint32_t sse = 0u;
int i;
for (i = 0; i < count; ++i) {
int diff = src_a[i] - src_b[i];
sse += (uint32_t)(diff * diff);
}
return sse;
}
// hash seed of 5381 recommended.
// Internal C version of HashDjb2 with int sized count for efficiency.
uint32_t HashDjb2_C(const uint8_t* src, int count, uint32_t seed) {
uint32_t hash = seed;
int i;
for (i = 0; i < count; ++i) {
hash += (hash << 5) + src[i];
}
return hash;
}
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif

360
thirdparty/libyuv/source/compare_gcc.cc vendored Normal file
View File

@@ -0,0 +1,360 @@
/*
* Copyright 2012 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "libyuv/basic_types.h"
#include "libyuv/compare_row.h"
#include "libyuv/row.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
// This module is for GCC x86 and x64.
#if !defined(LIBYUV_DISABLE_X86) && \
(defined(__x86_64__) || defined(__i386__))
#if defined(__x86_64__)
uint32_t HammingDistance_SSE42(const uint8_t* src_a,
const uint8_t* src_b,
int count) {
uint64_t diff = 0u;
asm volatile(
"xor %3,%3 \n"
"xor %%r8,%%r8 \n"
"xor %%r9,%%r9 \n"
"xor %%r10,%%r10 \n"
// Process 32 bytes per loop.
LABELALIGN
"1: \n"
"mov (%0),%%rcx \n"
"mov 0x8(%0),%%rdx \n"
"xor (%1),%%rcx \n"
"xor 0x8(%1),%%rdx \n"
"popcnt %%rcx,%%rcx \n"
"popcnt %%rdx,%%rdx \n"
"mov 0x10(%0),%%rsi \n"
"mov 0x18(%0),%%rdi \n"
"xor 0x10(%1),%%rsi \n"
"xor 0x18(%1),%%rdi \n"
"popcnt %%rsi,%%rsi \n"
"popcnt %%rdi,%%rdi \n"
"add $0x20,%0 \n"
"add $0x20,%1 \n"
"add %%rcx,%3 \n"
"add %%rdx,%%r8 \n"
"add %%rsi,%%r9 \n"
"add %%rdi,%%r10 \n"
"sub $0x20,%2 \n"
"jg 1b \n"
"add %%r8, %3 \n"
"add %%r9, %3 \n"
"add %%r10, %3 \n"
: "+r"(src_a), // %0
"+r"(src_b), // %1
"+r"(count), // %2
"=r"(diff) // %3
:
: "memory", "cc", "rcx", "rdx", "rsi", "rdi", "r8", "r9", "r10");
return static_cast<uint32_t>(diff);
}
#else
uint32_t HammingDistance_SSE42(const uint8_t* src_a,
const uint8_t* src_b,
int count) {
uint32_t diff = 0u;
asm volatile(
// Process 16 bytes per loop.
LABELALIGN
"1: \n"
"mov (%0),%%ecx \n"
"mov 0x4(%0),%%edx \n"
"xor (%1),%%ecx \n"
"xor 0x4(%1),%%edx \n"
"popcnt %%ecx,%%ecx \n"
"add %%ecx,%3 \n"
"popcnt %%edx,%%edx \n"
"add %%edx,%3 \n"
"mov 0x8(%0),%%ecx \n"
"mov 0xc(%0),%%edx \n"
"xor 0x8(%1),%%ecx \n"
"xor 0xc(%1),%%edx \n"
"popcnt %%ecx,%%ecx \n"
"add %%ecx,%3 \n"
"popcnt %%edx,%%edx \n"
"add %%edx,%3 \n"
"add $0x10,%0 \n"
"add $0x10,%1 \n"
"sub $0x10,%2 \n"
"jg 1b \n"
: "+r"(src_a), // %0
"+r"(src_b), // %1
"+r"(count), // %2
"+r"(diff) // %3
:
: "memory", "cc", "ecx", "edx");
return diff;
}
#endif
static const vec8 kNibbleMask = {15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15};
static const vec8 kBitCount = {0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4};
uint32_t HammingDistance_SSSE3(const uint8_t* src_a,
const uint8_t* src_b,
int count) {
uint32_t diff = 0u;
asm volatile(
"movdqa %4,%%xmm2 \n"
"movdqa %5,%%xmm3 \n"
"pxor %%xmm0,%%xmm0 \n"
"pxor %%xmm1,%%xmm1 \n"
"sub %0,%1 \n"
LABELALIGN
"1: \n"
"movdqa (%0),%%xmm4 \n"
"movdqa 0x10(%0), %%xmm5 \n"
"pxor (%0,%1), %%xmm4 \n"
"movdqa %%xmm4,%%xmm6 \n"
"pand %%xmm2,%%xmm6 \n"
"psrlw $0x4,%%xmm4 \n"
"movdqa %%xmm3,%%xmm7 \n"
"pshufb %%xmm6,%%xmm7 \n"
"pand %%xmm2,%%xmm4 \n"
"movdqa %%xmm3,%%xmm6 \n"
"pshufb %%xmm4,%%xmm6 \n"
"paddb %%xmm7,%%xmm6 \n"
"pxor 0x10(%0,%1),%%xmm5 \n"
"add $0x20,%0 \n"
"movdqa %%xmm5,%%xmm4 \n"
"pand %%xmm2,%%xmm5 \n"
"psrlw $0x4,%%xmm4 \n"
"movdqa %%xmm3,%%xmm7 \n"
"pshufb %%xmm5,%%xmm7 \n"
"pand %%xmm2,%%xmm4 \n"
"movdqa %%xmm3,%%xmm5 \n"
"pshufb %%xmm4,%%xmm5 \n"
"paddb %%xmm7,%%xmm5 \n"
"paddb %%xmm5,%%xmm6 \n"
"psadbw %%xmm1,%%xmm6 \n"
"paddd %%xmm6,%%xmm0 \n"
"sub $0x20,%2 \n"
"jg 1b \n"
"pshufd $0xaa,%%xmm0,%%xmm1 \n"
"paddd %%xmm1,%%xmm0 \n"
"movd %%xmm0, %3 \n"
: "+r"(src_a), // %0
"+r"(src_b), // %1
"+r"(count), // %2
"=r"(diff) // %3
: "m"(kNibbleMask), // %4
"m"(kBitCount) // %5
: "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6",
"xmm7");
return diff;
}
#ifdef HAS_HAMMINGDISTANCE_AVX2
uint32_t HammingDistance_AVX2(const uint8_t* src_a,
const uint8_t* src_b,
int count) {
uint32_t diff = 0u;
asm volatile(
"vbroadcastf128 %4,%%ymm2 \n"
"vbroadcastf128 %5,%%ymm3 \n"
"vpxor %%ymm0,%%ymm0,%%ymm0 \n"
"vpxor %%ymm1,%%ymm1,%%ymm1 \n"
"sub %0,%1 \n"
LABELALIGN
"1: \n"
"vmovdqa (%0),%%ymm4 \n"
"vmovdqa 0x20(%0), %%ymm5 \n"
"vpxor (%0,%1), %%ymm4, %%ymm4 \n"
"vpand %%ymm2,%%ymm4,%%ymm6 \n"
"vpsrlw $0x4,%%ymm4,%%ymm4 \n"
"vpshufb %%ymm6,%%ymm3,%%ymm6 \n"
"vpand %%ymm2,%%ymm4,%%ymm4 \n"
"vpshufb %%ymm4,%%ymm3,%%ymm4 \n"
"vpaddb %%ymm4,%%ymm6,%%ymm6 \n"
"vpxor 0x20(%0,%1),%%ymm5,%%ymm4 \n"
"add $0x40,%0 \n"
"vpand %%ymm2,%%ymm4,%%ymm5 \n"
"vpsrlw $0x4,%%ymm4,%%ymm4 \n"
"vpshufb %%ymm5,%%ymm3,%%ymm5 \n"
"vpand %%ymm2,%%ymm4,%%ymm4 \n"
"vpshufb %%ymm4,%%ymm3,%%ymm4 \n"
"vpaddb %%ymm5,%%ymm4,%%ymm4 \n"
"vpaddb %%ymm6,%%ymm4,%%ymm4 \n"
"vpsadbw %%ymm1,%%ymm4,%%ymm4 \n"
"vpaddd %%ymm0,%%ymm4,%%ymm0 \n"
"sub $0x40,%2 \n"
"jg 1b \n"
"vpermq $0xb1,%%ymm0,%%ymm1 \n"
"vpaddd %%ymm1,%%ymm0,%%ymm0 \n"
"vpermq $0xaa,%%ymm0,%%ymm1 \n"
"vpaddd %%ymm1,%%ymm0,%%ymm0 \n"
"vmovd %%xmm0, %3 \n"
"vzeroupper \n"
: "+r"(src_a), // %0
"+r"(src_b), // %1
"+r"(count), // %2
"=r"(diff) // %3
: "m"(kNibbleMask), // %4
"m"(kBitCount) // %5
: "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6");
return diff;
}
#endif // HAS_HAMMINGDISTANCE_AVX2
uint32_t SumSquareError_SSE2(const uint8_t* src_a,
const uint8_t* src_b,
int count) {
uint32_t sse;
asm volatile(
"pxor %%xmm0,%%xmm0 \n"
"pxor %%xmm5,%%xmm5 \n"
LABELALIGN
"1: \n"
"movdqu (%0),%%xmm1 \n"
"lea 0x10(%0),%0 \n"
"movdqu (%1),%%xmm2 \n"
"lea 0x10(%1),%1 \n"
"movdqa %%xmm1,%%xmm3 \n"
"psubusb %%xmm2,%%xmm1 \n"
"psubusb %%xmm3,%%xmm2 \n"
"por %%xmm2,%%xmm1 \n"
"movdqa %%xmm1,%%xmm2 \n"
"punpcklbw %%xmm5,%%xmm1 \n"
"punpckhbw %%xmm5,%%xmm2 \n"
"pmaddwd %%xmm1,%%xmm1 \n"
"pmaddwd %%xmm2,%%xmm2 \n"
"paddd %%xmm1,%%xmm0 \n"
"paddd %%xmm2,%%xmm0 \n"
"sub $0x10,%2 \n"
"jg 1b \n"
"pshufd $0xee,%%xmm0,%%xmm1 \n"
"paddd %%xmm1,%%xmm0 \n"
"pshufd $0x1,%%xmm0,%%xmm1 \n"
"paddd %%xmm1,%%xmm0 \n"
"movd %%xmm0,%3 \n"
: "+r"(src_a), // %0
"+r"(src_b), // %1
"+r"(count), // %2
"=g"(sse) // %3
::"memory",
"cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm5");
return sse;
}
static const uvec32 kHash16x33 = {0x92d9e201, 0, 0, 0}; // 33 ^ 16
static const uvec32 kHashMul0 = {
0x0c3525e1, // 33 ^ 15
0xa3476dc1, // 33 ^ 14
0x3b4039a1, // 33 ^ 13
0x4f5f0981, // 33 ^ 12
};
static const uvec32 kHashMul1 = {
0x30f35d61, // 33 ^ 11
0x855cb541, // 33 ^ 10
0x040a9121, // 33 ^ 9
0x747c7101, // 33 ^ 8
};
static const uvec32 kHashMul2 = {
0xec41d4e1, // 33 ^ 7
0x4cfa3cc1, // 33 ^ 6
0x025528a1, // 33 ^ 5
0x00121881, // 33 ^ 4
};
static const uvec32 kHashMul3 = {
0x00008c61, // 33 ^ 3
0x00000441, // 33 ^ 2
0x00000021, // 33 ^ 1
0x00000001, // 33 ^ 0
};
uint32_t HashDjb2_SSE41(const uint8_t* src, int count, uint32_t seed) {
uint32_t hash;
asm volatile(
"movd %2,%%xmm0 \n"
"pxor %%xmm7,%%xmm7 \n"
"movdqa %4,%%xmm6 \n"
LABELALIGN
"1: \n"
"movdqu (%0),%%xmm1 \n"
"lea 0x10(%0),%0 \n"
"pmulld %%xmm6,%%xmm0 \n"
"movdqa %5,%%xmm5 \n"
"movdqa %%xmm1,%%xmm2 \n"
"punpcklbw %%xmm7,%%xmm2 \n"
"movdqa %%xmm2,%%xmm3 \n"
"punpcklwd %%xmm7,%%xmm3 \n"
"pmulld %%xmm5,%%xmm3 \n"
"movdqa %6,%%xmm5 \n"
"movdqa %%xmm2,%%xmm4 \n"
"punpckhwd %%xmm7,%%xmm4 \n"
"pmulld %%xmm5,%%xmm4 \n"
"movdqa %7,%%xmm5 \n"
"punpckhbw %%xmm7,%%xmm1 \n"
"movdqa %%xmm1,%%xmm2 \n"
"punpcklwd %%xmm7,%%xmm2 \n"
"pmulld %%xmm5,%%xmm2 \n"
"movdqa %8,%%xmm5 \n"
"punpckhwd %%xmm7,%%xmm1 \n"
"pmulld %%xmm5,%%xmm1 \n"
"paddd %%xmm4,%%xmm3 \n"
"paddd %%xmm2,%%xmm1 \n"
"paddd %%xmm3,%%xmm1 \n"
"pshufd $0xe,%%xmm1,%%xmm2 \n"
"paddd %%xmm2,%%xmm1 \n"
"pshufd $0x1,%%xmm1,%%xmm2 \n"
"paddd %%xmm2,%%xmm1 \n"
"paddd %%xmm1,%%xmm0 \n"
"sub $0x10,%1 \n"
"jg 1b \n"
"movd %%xmm0,%3 \n"
: "+r"(src), // %0
"+r"(count), // %1
"+rm"(seed), // %2
"=g"(hash) // %3
: "m"(kHash16x33), // %4
"m"(kHashMul0), // %5
"m"(kHashMul1), // %6
"m"(kHashMul2), // %7
"m"(kHashMul3) // %8
: "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6",
"xmm7");
return hash;
}
#endif // defined(__x86_64__) || (defined(__i386__) && !defined(__pic__)))
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif

123
thirdparty/libyuv/source/compare_mmi.cc vendored Normal file
View File

@@ -0,0 +1,123 @@
/*
* Copyright 2012 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "libyuv/basic_types.h"
#include "libyuv/compare_row.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
// This module is for Mips MMI.
#if !defined(LIBYUV_DISABLE_MMI) && defined(_MIPS_ARCH_LOONGSON3A)
// Hakmem method for hamming distance.
uint32_t HammingDistance_MMI(const uint8_t* src_a,
const uint8_t* src_b,
int count) {
uint32_t diff = 0u;
uint64_t temp = 0, temp1 = 0, ta = 0, tb = 0;
uint64_t c1 = 0x5555555555555555;
uint64_t c2 = 0x3333333333333333;
uint64_t c3 = 0x0f0f0f0f0f0f0f0f;
uint32_t c4 = 0x01010101;
uint64_t s1 = 1, s2 = 2, s3 = 4;
__asm__ volatile(
"1: \n\t"
"ldc1 %[ta], 0(%[src_a]) \n\t"
"ldc1 %[tb], 0(%[src_b]) \n\t"
"xor %[temp], %[ta], %[tb] \n\t"
"psrlw %[temp1], %[temp], %[s1] \n\t" // temp1=x>>1
"and %[temp1], %[temp1], %[c1] \n\t" // temp1&=c1
"psubw %[temp1], %[temp], %[temp1] \n\t" // x-temp1
"and %[temp], %[temp1], %[c2] \n\t" // t = (u&c2)
"psrlw %[temp1], %[temp1], %[s2] \n\t" // u>>2
"and %[temp1], %[temp1], %[c2] \n\t" // u>>2 & c2
"paddw %[temp1], %[temp1], %[temp] \n\t" // t1 = t1+t
"psrlw %[temp], %[temp1], %[s3] \n\t" // u>>4
"paddw %[temp1], %[temp1], %[temp] \n\t" // u+(u>>4)
"and %[temp1], %[temp1], %[c3] \n\t" //&c3
"dmfc1 $t0, %[temp1] \n\t"
"dsrl32 $t0, $t0, 0 \n\t "
"mul $t0, $t0, %[c4] \n\t"
"dsrl $t0, $t0, 24 \n\t"
"dadd %[diff], %[diff], $t0 \n\t"
"dmfc1 $t0, %[temp1] \n\t"
"mul $t0, $t0, %[c4] \n\t"
"dsrl $t0, $t0, 24 \n\t"
"dadd %[diff], %[diff], $t0 \n\t"
"daddiu %[src_a], %[src_a], 8 \n\t"
"daddiu %[src_b], %[src_b], 8 \n\t"
"addiu %[count], %[count], -8 \n\t"
"bgtz %[count], 1b \n\t"
"nop \n\t"
: [diff] "+r"(diff), [src_a] "+r"(src_a), [src_b] "+r"(src_b),
[count] "+r"(count), [ta] "+f"(ta), [tb] "+f"(tb), [temp] "+f"(temp),
[temp1] "+f"(temp1)
: [c1] "f"(c1), [c2] "f"(c2), [c3] "f"(c3), [c4] "r"(c4), [s1] "f"(s1),
[s2] "f"(s2), [s3] "f"(s3)
: "memory");
return diff;
}
uint32_t SumSquareError_MMI(const uint8_t* src_a,
const uint8_t* src_b,
int count) {
uint32_t sse = 0u;
uint32_t sse_hi = 0u, sse_lo = 0u;
uint64_t src1, src2;
uint64_t diff, diff_hi, diff_lo;
uint64_t sse_sum, sse_tmp;
const uint64_t mask = 0x0ULL;
__asm__ volatile(
"xor %[sse_sum], %[sse_sum], %[sse_sum] \n\t"
"1: \n\t"
"ldc1 %[src1], 0x00(%[src_a]) \n\t"
"ldc1 %[src2], 0x00(%[src_b]) \n\t"
"pasubub %[diff], %[src1], %[src2] \n\t"
"punpcklbh %[diff_lo], %[diff], %[mask] \n\t"
"punpckhbh %[diff_hi], %[diff], %[mask] \n\t"
"pmaddhw %[sse_tmp], %[diff_lo], %[diff_lo] \n\t"
"paddw %[sse_sum], %[sse_sum], %[sse_tmp] \n\t"
"pmaddhw %[sse_tmp], %[diff_hi], %[diff_hi] \n\t"
"paddw %[sse_sum], %[sse_sum], %[sse_tmp] \n\t"
"daddiu %[src_a], %[src_a], 0x08 \n\t"
"daddiu %[src_b], %[src_b], 0x08 \n\t"
"daddiu %[count], %[count], -0x08 \n\t"
"bnez %[count], 1b \n\t"
"mfc1 %[sse_lo], %[sse_sum] \n\t"
"mfhc1 %[sse_hi], %[sse_sum] \n\t"
"daddu %[sse], %[sse_hi], %[sse_lo] \n\t"
: [sse] "+&r"(sse), [diff] "=&f"(diff), [src1] "=&f"(src1),
[src2] "=&f"(src2), [diff_lo] "=&f"(diff_lo), [diff_hi] "=&f"(diff_hi),
[sse_sum] "=&f"(sse_sum), [sse_tmp] "=&f"(sse_tmp),
[sse_hi] "+&r"(sse_hi), [sse_lo] "+&r"(sse_lo)
: [src_a] "r"(src_a), [src_b] "r"(src_b), [count] "r"(count),
[mask] "f"(mask)
: "memory");
return sse;
}
#endif // !defined(LIBYUV_DISABLE_MMI) && defined(_MIPS_ARCH_LOONGSON3A)
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif

97
thirdparty/libyuv/source/compare_msa.cc vendored Normal file
View File

@@ -0,0 +1,97 @@
/*
* Copyright 2017 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "libyuv/basic_types.h"
#include "libyuv/compare_row.h"
#include "libyuv/row.h"
// This module is for GCC MSA
#if !defined(LIBYUV_DISABLE_MSA) && defined(__mips_msa)
#include "libyuv/macros_msa.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
uint32_t HammingDistance_MSA(const uint8_t* src_a,
const uint8_t* src_b,
int count) {
uint32_t diff = 0u;
int i;
v16u8 src0, src1, src2, src3;
v2i64 vec0 = {0}, vec1 = {0};
for (i = 0; i < count; i += 32) {
src0 = (v16u8)__msa_ld_b((v16i8*)src_a, 0);
src1 = (v16u8)__msa_ld_b((v16i8*)src_a, 16);
src2 = (v16u8)__msa_ld_b((v16i8*)src_b, 0);
src3 = (v16u8)__msa_ld_b((v16i8*)src_b, 16);
src0 ^= src2;
src1 ^= src3;
vec0 += __msa_pcnt_d((v2i64)src0);
vec1 += __msa_pcnt_d((v2i64)src1);
src_a += 32;
src_b += 32;
}
vec0 += vec1;
diff = (uint32_t)__msa_copy_u_w((v4i32)vec0, 0);
diff += (uint32_t)__msa_copy_u_w((v4i32)vec0, 2);
return diff;
}
uint32_t SumSquareError_MSA(const uint8_t* src_a,
const uint8_t* src_b,
int count) {
uint32_t sse = 0u;
int i;
v16u8 src0, src1, src2, src3;
v8i16 vec0, vec1, vec2, vec3;
v4i32 reg0 = {0}, reg1 = {0}, reg2 = {0}, reg3 = {0};
v2i64 tmp0;
for (i = 0; i < count; i += 32) {
src0 = (v16u8)__msa_ld_b((v16i8*)src_a, 0);
src1 = (v16u8)__msa_ld_b((v16i8*)src_a, 16);
src2 = (v16u8)__msa_ld_b((v16i8*)src_b, 0);
src3 = (v16u8)__msa_ld_b((v16i8*)src_b, 16);
vec0 = (v8i16)__msa_ilvr_b((v16i8)src2, (v16i8)src0);
vec1 = (v8i16)__msa_ilvl_b((v16i8)src2, (v16i8)src0);
vec2 = (v8i16)__msa_ilvr_b((v16i8)src3, (v16i8)src1);
vec3 = (v8i16)__msa_ilvl_b((v16i8)src3, (v16i8)src1);
vec0 = __msa_hsub_u_h((v16u8)vec0, (v16u8)vec0);
vec1 = __msa_hsub_u_h((v16u8)vec1, (v16u8)vec1);
vec2 = __msa_hsub_u_h((v16u8)vec2, (v16u8)vec2);
vec3 = __msa_hsub_u_h((v16u8)vec3, (v16u8)vec3);
reg0 = __msa_dpadd_s_w(reg0, vec0, vec0);
reg1 = __msa_dpadd_s_w(reg1, vec1, vec1);
reg2 = __msa_dpadd_s_w(reg2, vec2, vec2);
reg3 = __msa_dpadd_s_w(reg3, vec3, vec3);
src_a += 32;
src_b += 32;
}
reg0 += reg1;
reg2 += reg3;
reg0 += reg2;
tmp0 = __msa_hadd_s_d(reg0, reg0);
sse = (uint32_t)__msa_copy_u_w((v4i32)tmp0, 0);
sse += (uint32_t)__msa_copy_u_w((v4i32)tmp0, 2);
return sse;
}
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif
#endif // !defined(LIBYUV_DISABLE_MSA) && defined(__mips_msa)

View File

@@ -0,0 +1,96 @@
/*
* Copyright 2012 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "libyuv/basic_types.h"
#include "libyuv/compare_row.h"
#include "libyuv/row.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
#if !defined(LIBYUV_DISABLE_NEON) && defined(__ARM_NEON__) && \
!defined(__aarch64__)
// 256 bits at a time
// uses short accumulator which restricts count to 131 KB
uint32_t HammingDistance_NEON(const uint8_t* src_a,
const uint8_t* src_b,
int count) {
uint32_t diff;
asm volatile(
"vmov.u16 q4, #0 \n" // accumulator
"1: \n"
"vld1.8 {q0, q1}, [%0]! \n"
"vld1.8 {q2, q3}, [%1]! \n"
"veor.32 q0, q0, q2 \n"
"veor.32 q1, q1, q3 \n"
"vcnt.i8 q0, q0 \n"
"vcnt.i8 q1, q1 \n"
"subs %2, %2, #32 \n"
"vadd.u8 q0, q0, q1 \n" // 16 byte counts
"vpadal.u8 q4, q0 \n" // 8 shorts
"bgt 1b \n"
"vpaddl.u16 q0, q4 \n" // 4 ints
"vpadd.u32 d0, d0, d1 \n"
"vpadd.u32 d0, d0, d0 \n"
"vmov.32 %3, d0[0] \n"
: "+r"(src_a), "+r"(src_b), "+r"(count), "=r"(diff)
:
: "cc", "q0", "q1", "q2", "q3", "q4");
return diff;
}
uint32_t SumSquareError_NEON(const uint8_t* src_a,
const uint8_t* src_b,
int count) {
uint32_t sse;
asm volatile(
"vmov.u8 q8, #0 \n"
"vmov.u8 q10, #0 \n"
"vmov.u8 q9, #0 \n"
"vmov.u8 q11, #0 \n"
"1: \n"
"vld1.8 {q0}, [%0]! \n"
"vld1.8 {q1}, [%1]! \n"
"subs %2, %2, #16 \n"
"vsubl.u8 q2, d0, d2 \n"
"vsubl.u8 q3, d1, d3 \n"
"vmlal.s16 q8, d4, d4 \n"
"vmlal.s16 q9, d6, d6 \n"
"vmlal.s16 q10, d5, d5 \n"
"vmlal.s16 q11, d7, d7 \n"
"bgt 1b \n"
"vadd.u32 q8, q8, q9 \n"
"vadd.u32 q10, q10, q11 \n"
"vadd.u32 q11, q8, q10 \n"
"vpaddl.u32 q1, q11 \n"
"vadd.u64 d0, d2, d3 \n"
"vmov.32 %3, d0[0] \n"
: "+r"(src_a), "+r"(src_b), "+r"(count), "=r"(sse)
:
: "memory", "cc", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11");
return sse;
}
#endif // defined(__ARM_NEON__) && !defined(__aarch64__)
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif

View File

@@ -0,0 +1,94 @@
/*
* Copyright 2012 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "libyuv/basic_types.h"
#include "libyuv/compare_row.h"
#include "libyuv/row.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
#if !defined(LIBYUV_DISABLE_NEON) && defined(__aarch64__)
// 256 bits at a time
// uses short accumulator which restricts count to 131 KB
uint32_t HammingDistance_NEON(const uint8_t* src_a,
const uint8_t* src_b,
int count) {
uint32_t diff;
asm volatile(
"movi v4.8h, #0 \n"
"1: \n"
"ld1 {v0.16b, v1.16b}, [%0], #32 \n"
"ld1 {v2.16b, v3.16b}, [%1], #32 \n"
"eor v0.16b, v0.16b, v2.16b \n"
"prfm pldl1keep, [%0, 448] \n" // prefetch 7 lines ahead
"eor v1.16b, v1.16b, v3.16b \n"
"cnt v0.16b, v0.16b \n"
"prfm pldl1keep, [%1, 448] \n"
"cnt v1.16b, v1.16b \n"
"subs %w2, %w2, #32 \n"
"add v0.16b, v0.16b, v1.16b \n"
"uadalp v4.8h, v0.16b \n"
"b.gt 1b \n"
"uaddlv s4, v4.8h \n"
"fmov %w3, s4 \n"
: "+r"(src_a), "+r"(src_b), "+r"(count), "=r"(diff)
:
: "cc", "v0", "v1", "v2", "v3", "v4");
return diff;
}
uint32_t SumSquareError_NEON(const uint8_t* src_a,
const uint8_t* src_b,
int count) {
uint32_t sse;
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"1: \n"
"ld1 {v0.16b}, [%0], #16 \n"
"ld1 {v1.16b}, [%1], #16 \n"
"subs %w2, %w2, #16 \n"
"usubl v2.8h, v0.8b, v1.8b \n"
"usubl2 v3.8h, v0.16b, v1.16b \n"
"prfm pldl1keep, [%0, 448] \n" // prefetch 7 lines ahead
"smlal v16.4s, v2.4h, v2.4h \n"
"smlal v17.4s, v3.4h, v3.4h \n"
"prfm pldl1keep, [%1, 448] \n"
"smlal2 v18.4s, v2.8h, v2.8h \n"
"smlal2 v19.4s, v3.8h, v3.8h \n"
"b.gt 1b \n"
"add v16.4s, v16.4s, v17.4s \n"
"add v18.4s, v18.4s, v19.4s \n"
"add v19.4s, v16.4s, v18.4s \n"
"addv s0, v19.4s \n"
"fmov %w3, s0 \n"
: "+r"(src_a), "+r"(src_b), "+r"(count), "=r"(sse)
:
: "cc", "v0", "v1", "v2", "v3", "v16", "v17", "v18", "v19");
return sse;
}
#endif // !defined(LIBYUV_DISABLE_NEON) && defined(__aarch64__)
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif

241
thirdparty/libyuv/source/compare_win.cc vendored Normal file
View File

@@ -0,0 +1,241 @@
/*
* Copyright 2012 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "libyuv/basic_types.h"
#include "libyuv/compare_row.h"
#include "libyuv/row.h"
#if defined(_MSC_VER)
#include <intrin.h> // For __popcnt
#endif
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
// This module is for 32 bit Visual C x86
#if !defined(LIBYUV_DISABLE_X86) && defined(_MSC_VER) && \
!defined(__clang__) && defined(_M_IX86)
uint32_t HammingDistance_SSE42(const uint8_t* src_a,
const uint8_t* src_b,
int count) {
uint32_t diff = 0u;
int i;
for (i = 0; i < count - 3; i += 4) {
uint32_t x = *((uint32_t*)src_a) ^ *((uint32_t*)src_b); // NOLINT
src_a += 4;
src_b += 4;
diff += __popcnt(x);
}
return diff;
}
__declspec(naked) uint32_t
SumSquareError_SSE2(const uint8_t* src_a, const uint8_t* src_b, int count) {
__asm {
mov eax, [esp + 4] // src_a
mov edx, [esp + 8] // src_b
mov ecx, [esp + 12] // count
pxor xmm0, xmm0
pxor xmm5, xmm5
wloop:
movdqu xmm1, [eax]
lea eax, [eax + 16]
movdqu xmm2, [edx]
lea edx, [edx + 16]
movdqa xmm3, xmm1 // abs trick
psubusb xmm1, xmm2
psubusb xmm2, xmm3
por xmm1, xmm2
movdqa xmm2, xmm1
punpcklbw xmm1, xmm5
punpckhbw xmm2, xmm5
pmaddwd xmm1, xmm1
pmaddwd xmm2, xmm2
paddd xmm0, xmm1
paddd xmm0, xmm2
sub ecx, 16
jg wloop
pshufd xmm1, xmm0, 0xee
paddd xmm0, xmm1
pshufd xmm1, xmm0, 0x01
paddd xmm0, xmm1
movd eax, xmm0
ret
}
}
#ifdef HAS_SUMSQUAREERROR_AVX2
// C4752: found Intel(R) Advanced Vector Extensions; consider using /arch:AVX.
#pragma warning(disable : 4752)
__declspec(naked) uint32_t
SumSquareError_AVX2(const uint8_t* src_a, const uint8_t* src_b, int count) {
__asm {
mov eax, [esp + 4] // src_a
mov edx, [esp + 8] // src_b
mov ecx, [esp + 12] // count
vpxor ymm0, ymm0, ymm0 // sum
vpxor ymm5, ymm5, ymm5 // constant 0 for unpck
sub edx, eax
wloop:
vmovdqu ymm1, [eax]
vmovdqu ymm2, [eax + edx]
lea eax, [eax + 32]
vpsubusb ymm3, ymm1, ymm2 // abs difference trick
vpsubusb ymm2, ymm2, ymm1
vpor ymm1, ymm2, ymm3
vpunpcklbw ymm2, ymm1, ymm5 // u16. mutates order.
vpunpckhbw ymm1, ymm1, ymm5
vpmaddwd ymm2, ymm2, ymm2 // square + hadd to u32.
vpmaddwd ymm1, ymm1, ymm1
vpaddd ymm0, ymm0, ymm1
vpaddd ymm0, ymm0, ymm2
sub ecx, 32
jg wloop
vpshufd ymm1, ymm0, 0xee // 3, 2 + 1, 0 both lanes.
vpaddd ymm0, ymm0, ymm1
vpshufd ymm1, ymm0, 0x01 // 1 + 0 both lanes.
vpaddd ymm0, ymm0, ymm1
vpermq ymm1, ymm0, 0x02 // high + low lane.
vpaddd ymm0, ymm0, ymm1
vmovd eax, xmm0
vzeroupper
ret
}
}
#endif // HAS_SUMSQUAREERROR_AVX2
uvec32 kHash16x33 = {0x92d9e201, 0, 0, 0}; // 33 ^ 16
uvec32 kHashMul0 = {
0x0c3525e1, // 33 ^ 15
0xa3476dc1, // 33 ^ 14
0x3b4039a1, // 33 ^ 13
0x4f5f0981, // 33 ^ 12
};
uvec32 kHashMul1 = {
0x30f35d61, // 33 ^ 11
0x855cb541, // 33 ^ 10
0x040a9121, // 33 ^ 9
0x747c7101, // 33 ^ 8
};
uvec32 kHashMul2 = {
0xec41d4e1, // 33 ^ 7
0x4cfa3cc1, // 33 ^ 6
0x025528a1, // 33 ^ 5
0x00121881, // 33 ^ 4
};
uvec32 kHashMul3 = {
0x00008c61, // 33 ^ 3
0x00000441, // 33 ^ 2
0x00000021, // 33 ^ 1
0x00000001, // 33 ^ 0
};
__declspec(naked) uint32_t
HashDjb2_SSE41(const uint8_t* src, int count, uint32_t seed) {
__asm {
mov eax, [esp + 4] // src
mov ecx, [esp + 8] // count
movd xmm0, [esp + 12] // seed
pxor xmm7, xmm7 // constant 0 for unpck
movdqa xmm6, xmmword ptr kHash16x33
wloop:
movdqu xmm1, [eax] // src[0-15]
lea eax, [eax + 16]
pmulld xmm0, xmm6 // hash *= 33 ^ 16
movdqa xmm5, xmmword ptr kHashMul0
movdqa xmm2, xmm1
punpcklbw xmm2, xmm7 // src[0-7]
movdqa xmm3, xmm2
punpcklwd xmm3, xmm7 // src[0-3]
pmulld xmm3, xmm5
movdqa xmm5, xmmword ptr kHashMul1
movdqa xmm4, xmm2
punpckhwd xmm4, xmm7 // src[4-7]
pmulld xmm4, xmm5
movdqa xmm5, xmmword ptr kHashMul2
punpckhbw xmm1, xmm7 // src[8-15]
movdqa xmm2, xmm1
punpcklwd xmm2, xmm7 // src[8-11]
pmulld xmm2, xmm5
movdqa xmm5, xmmword ptr kHashMul3
punpckhwd xmm1, xmm7 // src[12-15]
pmulld xmm1, xmm5
paddd xmm3, xmm4 // add 16 results
paddd xmm1, xmm2
paddd xmm1, xmm3
pshufd xmm2, xmm1, 0x0e // upper 2 dwords
paddd xmm1, xmm2
pshufd xmm2, xmm1, 0x01
paddd xmm1, xmm2
paddd xmm0, xmm1
sub ecx, 16
jg wloop
movd eax, xmm0 // return hash
ret
}
}
// Visual C 2012 required for AVX2.
#ifdef HAS_HASHDJB2_AVX2
__declspec(naked) uint32_t
HashDjb2_AVX2(const uint8_t* src, int count, uint32_t seed) {
__asm {
mov eax, [esp + 4] // src
mov ecx, [esp + 8] // count
vmovd xmm0, [esp + 12] // seed
wloop:
vpmovzxbd xmm3, [eax] // src[0-3]
vpmulld xmm0, xmm0, xmmword ptr kHash16x33 // hash *= 33 ^ 16
vpmovzxbd xmm4, [eax + 4] // src[4-7]
vpmulld xmm3, xmm3, xmmword ptr kHashMul0
vpmovzxbd xmm2, [eax + 8] // src[8-11]
vpmulld xmm4, xmm4, xmmword ptr kHashMul1
vpmovzxbd xmm1, [eax + 12] // src[12-15]
vpmulld xmm2, xmm2, xmmword ptr kHashMul2
lea eax, [eax + 16]
vpmulld xmm1, xmm1, xmmword ptr kHashMul3
vpaddd xmm3, xmm3, xmm4 // add 16 results
vpaddd xmm1, xmm1, xmm2
vpaddd xmm1, xmm1, xmm3
vpshufd xmm2, xmm1, 0x0e // upper 2 dwords
vpaddd xmm1, xmm1,xmm2
vpshufd xmm2, xmm1, 0x01
vpaddd xmm1, xmm1, xmm2
vpaddd xmm0, xmm0, xmm1
sub ecx, 16
jg wloop
vmovd eax, xmm0 // return hash
vzeroupper
ret
}
}
#endif // HAS_HASHDJB2_AVX2
#endif // !defined(LIBYUV_DISABLE_X86) && defined(_M_IX86)
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif

3148
thirdparty/libyuv/source/convert.cc vendored Normal file

File diff suppressed because it is too large Load Diff

5350
thirdparty/libyuv/source/convert_argb.cc vendored Normal file

File diff suppressed because it is too large Load Diff

855
thirdparty/libyuv/source/convert_from.cc vendored Normal file
View File

@@ -0,0 +1,855 @@
/*
* Copyright 2012 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "libyuv/convert_from.h"
#include "libyuv/basic_types.h"
#include "libyuv/convert.h" // For I420Copy
#include "libyuv/cpu_id.h"
#include "libyuv/planar_functions.h"
#include "libyuv/rotate.h"
#include "libyuv/row.h"
#include "libyuv/scale.h" // For ScalePlane()
#include "libyuv/video_common.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
#define SUBSAMPLE(v, a, s) (v < 0) ? (-((-v + a) >> s)) : ((v + a) >> s)
static __inline int Abs(int v) {
return v >= 0 ? v : -v;
}
// I420 To any I4xx YUV format with mirroring.
// TODO(fbarchard): Consider kFilterNone for Y, or CopyPlane
static int I420ToI4xx(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int src_y_width,
int src_y_height,
int dst_uv_width,
int dst_uv_height) {
const int dst_y_width = Abs(src_y_width);
const int dst_y_height = Abs(src_y_height);
const int src_uv_width = SUBSAMPLE(src_y_width, 1, 1);
const int src_uv_height = SUBSAMPLE(src_y_height, 1, 1);
if (src_y_width == 0 || src_y_height == 0 || dst_uv_width <= 0 ||
dst_uv_height <= 0) {
return -1;
}
if (dst_y) {
ScalePlane(src_y, src_stride_y, src_y_width, src_y_height, dst_y,
dst_stride_y, dst_y_width, dst_y_height, kFilterBilinear);
}
ScalePlane(src_u, src_stride_u, src_uv_width, src_uv_height, dst_u,
dst_stride_u, dst_uv_width, dst_uv_height, kFilterBilinear);
ScalePlane(src_v, src_stride_v, src_uv_width, src_uv_height, dst_v,
dst_stride_v, dst_uv_width, dst_uv_height, kFilterBilinear);
return 0;
}
// Convert 8 bit YUV to 10 bit.
LIBYUV_API
int I420ToI010(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint16_t* dst_y,
int dst_stride_y,
uint16_t* dst_u,
int dst_stride_u,
uint16_t* dst_v,
int dst_stride_v,
int width,
int height) {
int halfwidth = (width + 1) >> 1;
int halfheight = (height + 1) >> 1;
if (!src_u || !src_v || !dst_u || !dst_v || width <= 0 || height == 0) {
return -1;
}
// Negative height means invert the image.
if (height < 0) {
height = -height;
halfheight = (height + 1) >> 1;
src_y = src_y + (height - 1) * src_stride_y;
src_u = src_u + (halfheight - 1) * src_stride_u;
src_v = src_v + (halfheight - 1) * src_stride_v;
src_stride_y = -src_stride_y;
src_stride_u = -src_stride_u;
src_stride_v = -src_stride_v;
}
// Convert Y plane.
Convert8To16Plane(src_y, src_stride_y, dst_y, dst_stride_y, 1024, width,
height);
// Convert UV planes.
Convert8To16Plane(src_u, src_stride_u, dst_u, dst_stride_u, 1024, halfwidth,
halfheight);
Convert8To16Plane(src_v, src_stride_v, dst_v, dst_stride_v, 1024, halfwidth,
halfheight);
return 0;
}
// Convert 8 bit YUV to 12 bit.
LIBYUV_API
int I420ToI012(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint16_t* dst_y,
int dst_stride_y,
uint16_t* dst_u,
int dst_stride_u,
uint16_t* dst_v,
int dst_stride_v,
int width,
int height) {
int halfwidth = (width + 1) >> 1;
int halfheight = (height + 1) >> 1;
if (!src_u || !src_v || !dst_u || !dst_v || width <= 0 || height == 0) {
return -1;
}
// Negative height means invert the image.
if (height < 0) {
height = -height;
halfheight = (height + 1) >> 1;
src_y = src_y + (height - 1) * src_stride_y;
src_u = src_u + (halfheight - 1) * src_stride_u;
src_v = src_v + (halfheight - 1) * src_stride_v;
src_stride_y = -src_stride_y;
src_stride_u = -src_stride_u;
src_stride_v = -src_stride_v;
}
// Convert Y plane.
Convert8To16Plane(src_y, src_stride_y, dst_y, dst_stride_y, 4096, width,
height);
// Convert UV planes.
Convert8To16Plane(src_u, src_stride_u, dst_u, dst_stride_u, 4096, halfwidth,
halfheight);
Convert8To16Plane(src_v, src_stride_v, dst_v, dst_stride_v, 4096, halfwidth,
halfheight);
return 0;
}
// 420 chroma is 1/2 width, 1/2 height
// 422 chroma is 1/2 width, 1x height
LIBYUV_API
int I420ToI422(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height) {
const int dst_uv_width = (Abs(width) + 1) >> 1;
const int dst_uv_height = Abs(height);
return I420ToI4xx(src_y, src_stride_y, src_u, src_stride_u, src_v,
src_stride_v, dst_y, dst_stride_y, dst_u, dst_stride_u,
dst_v, dst_stride_v, width, height, dst_uv_width,
dst_uv_height);
}
// 420 chroma is 1/2 width, 1/2 height
// 444 chroma is 1x width, 1x height
LIBYUV_API
int I420ToI444(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height) {
const int dst_uv_width = Abs(width);
const int dst_uv_height = Abs(height);
return I420ToI4xx(src_y, src_stride_y, src_u, src_stride_u, src_v,
src_stride_v, dst_y, dst_stride_y, dst_u, dst_stride_u,
dst_v, dst_stride_v, width, height, dst_uv_width,
dst_uv_height);
}
// 420 chroma to 444 chroma, 10/12 bit version
LIBYUV_API
int I010ToI410(const uint16_t* src_y,
int src_stride_y,
const uint16_t* src_u,
int src_stride_u,
const uint16_t* src_v,
int src_stride_v,
uint16_t* dst_y,
int dst_stride_y,
uint16_t* dst_u,
int dst_stride_u,
uint16_t* dst_v,
int dst_stride_v,
int width,
int height) {
if (width == 0 || height == 0) {
return -1;
}
if (dst_y) {
ScalePlane_12(src_y, src_stride_y, width, height, dst_y, dst_stride_y,
Abs(width), Abs(height), kFilterBilinear);
}
ScalePlane_12(src_u, src_stride_u, SUBSAMPLE(width, 1, 1),
SUBSAMPLE(height, 1, 1), dst_u, dst_stride_u, Abs(width),
Abs(height), kFilterBilinear);
ScalePlane_12(src_v, src_stride_v, SUBSAMPLE(width, 1, 1),
SUBSAMPLE(height, 1, 1), dst_v, dst_stride_v, Abs(width),
Abs(height), kFilterBilinear);
return 0;
}
// 422 chroma to 444 chroma, 10/12 bit version
LIBYUV_API
int I210ToI410(const uint16_t* src_y,
int src_stride_y,
const uint16_t* src_u,
int src_stride_u,
const uint16_t* src_v,
int src_stride_v,
uint16_t* dst_y,
int dst_stride_y,
uint16_t* dst_u,
int dst_stride_u,
uint16_t* dst_v,
int dst_stride_v,
int width,
int height) {
if (width == 0 || height == 0) {
return -1;
}
if (dst_y) {
ScalePlane_12(src_y, src_stride_y, width, height, dst_y, dst_stride_y,
Abs(width), Abs(height), kFilterBilinear);
}
ScalePlane_12(src_u, src_stride_u, SUBSAMPLE(width, 1, 1), height, dst_u,
dst_stride_u, Abs(width), Abs(height), kFilterBilinear);
ScalePlane_12(src_v, src_stride_v, SUBSAMPLE(width, 1, 1), height, dst_v,
dst_stride_v, Abs(width), Abs(height), kFilterBilinear);
return 0;
}
// 422 chroma is 1/2 width, 1x height
// 444 chroma is 1x width, 1x height
LIBYUV_API
int I422ToI444(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height) {
if (width == 0 || height == 0) {
return -1;
}
if (dst_y) {
ScalePlane(src_y, src_stride_y, width, height, dst_y, dst_stride_y,
Abs(width), Abs(height), kFilterBilinear);
}
ScalePlane(src_u, src_stride_u, SUBSAMPLE(width, 1, 1), height, dst_u,
dst_stride_u, Abs(width), Abs(height), kFilterBilinear);
ScalePlane(src_v, src_stride_v, SUBSAMPLE(width, 1, 1), height, dst_v,
dst_stride_v, Abs(width), Abs(height), kFilterBilinear);
return 0;
}
// Copy to I400. Source can be I420,422,444,400,NV12,NV21
LIBYUV_API
int I400Copy(const uint8_t* src_y,
int src_stride_y,
uint8_t* dst_y,
int dst_stride_y,
int width,
int height) {
if (!src_y || !dst_y || width <= 0 || height == 0) {
return -1;
}
// Negative height means invert the image.
if (height < 0) {
height = -height;
src_y = src_y + (height - 1) * src_stride_y;
src_stride_y = -src_stride_y;
}
CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
return 0;
}
LIBYUV_API
int I422ToYUY2(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint8_t* dst_yuy2,
int dst_stride_yuy2,
int width,
int height) {
int y;
void (*I422ToYUY2Row)(const uint8_t* src_y, const uint8_t* src_u,
const uint8_t* src_v, uint8_t* dst_yuy2, int width) =
I422ToYUY2Row_C;
if (!src_y || !src_u || !src_v || !dst_yuy2 || width <= 0 || height == 0) {
return -1;
}
// Negative height means invert the image.
if (height < 0) {
height = -height;
dst_yuy2 = dst_yuy2 + (height - 1) * dst_stride_yuy2;
dst_stride_yuy2 = -dst_stride_yuy2;
}
// Coalesce rows.
if (src_stride_y == width && src_stride_u * 2 == width &&
src_stride_v * 2 == width && dst_stride_yuy2 == width * 2) {
width *= height;
height = 1;
src_stride_y = src_stride_u = src_stride_v = dst_stride_yuy2 = 0;
}
#if defined(HAS_I422TOYUY2ROW_SSE2)
if (TestCpuFlag(kCpuHasSSE2)) {
I422ToYUY2Row = I422ToYUY2Row_Any_SSE2;
if (IS_ALIGNED(width, 16)) {
I422ToYUY2Row = I422ToYUY2Row_SSE2;
}
}
#endif
#if defined(HAS_I422TOYUY2ROW_AVX2)
if (TestCpuFlag(kCpuHasAVX2)) {
I422ToYUY2Row = I422ToYUY2Row_Any_AVX2;
if (IS_ALIGNED(width, 32)) {
I422ToYUY2Row = I422ToYUY2Row_AVX2;
}
}
#endif
#if defined(HAS_I422TOYUY2ROW_NEON)
if (TestCpuFlag(kCpuHasNEON)) {
I422ToYUY2Row = I422ToYUY2Row_Any_NEON;
if (IS_ALIGNED(width, 16)) {
I422ToYUY2Row = I422ToYUY2Row_NEON;
}
}
#endif
for (y = 0; y < height; ++y) {
I422ToYUY2Row(src_y, src_u, src_v, dst_yuy2, width);
src_y += src_stride_y;
src_u += src_stride_u;
src_v += src_stride_v;
dst_yuy2 += dst_stride_yuy2;
}
return 0;
}
LIBYUV_API
int I420ToYUY2(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint8_t* dst_yuy2,
int dst_stride_yuy2,
int width,
int height) {
int y;
void (*I422ToYUY2Row)(const uint8_t* src_y, const uint8_t* src_u,
const uint8_t* src_v, uint8_t* dst_yuy2, int width) =
I422ToYUY2Row_C;
if (!src_y || !src_u || !src_v || !dst_yuy2 || width <= 0 || height == 0) {
return -1;
}
// Negative height means invert the image.
if (height < 0) {
height = -height;
dst_yuy2 = dst_yuy2 + (height - 1) * dst_stride_yuy2;
dst_stride_yuy2 = -dst_stride_yuy2;
}
#if defined(HAS_I422TOYUY2ROW_SSE2)
if (TestCpuFlag(kCpuHasSSE2)) {
I422ToYUY2Row = I422ToYUY2Row_Any_SSE2;
if (IS_ALIGNED(width, 16)) {
I422ToYUY2Row = I422ToYUY2Row_SSE2;
}
}
#endif
#if defined(HAS_I422TOYUY2ROW_AVX2)
if (TestCpuFlag(kCpuHasAVX2)) {
I422ToYUY2Row = I422ToYUY2Row_Any_AVX2;
if (IS_ALIGNED(width, 32)) {
I422ToYUY2Row = I422ToYUY2Row_AVX2;
}
}
#endif
#if defined(HAS_I422TOYUY2ROW_NEON)
if (TestCpuFlag(kCpuHasNEON)) {
I422ToYUY2Row = I422ToYUY2Row_Any_NEON;
if (IS_ALIGNED(width, 16)) {
I422ToYUY2Row = I422ToYUY2Row_NEON;
}
}
#endif
#if defined(HAS_I422TOYUY2ROW_MMI)
if (TestCpuFlag(kCpuHasMMI)) {
I422ToYUY2Row = I422ToYUY2Row_Any_MMI;
if (IS_ALIGNED(width, 8)) {
I422ToYUY2Row = I422ToYUY2Row_MMI;
}
}
#endif
#if defined(HAS_I422TOYUY2ROW_MSA)
if (TestCpuFlag(kCpuHasMSA)) {
I422ToYUY2Row = I422ToYUY2Row_Any_MSA;
if (IS_ALIGNED(width, 32)) {
I422ToYUY2Row = I422ToYUY2Row_MSA;
}
}
#endif
for (y = 0; y < height - 1; y += 2) {
I422ToYUY2Row(src_y, src_u, src_v, dst_yuy2, width);
I422ToYUY2Row(src_y + src_stride_y, src_u, src_v,
dst_yuy2 + dst_stride_yuy2, width);
src_y += src_stride_y * 2;
src_u += src_stride_u;
src_v += src_stride_v;
dst_yuy2 += dst_stride_yuy2 * 2;
}
if (height & 1) {
I422ToYUY2Row(src_y, src_u, src_v, dst_yuy2, width);
}
return 0;
}
LIBYUV_API
int I422ToUYVY(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint8_t* dst_uyvy,
int dst_stride_uyvy,
int width,
int height) {
int y;
void (*I422ToUYVYRow)(const uint8_t* src_y, const uint8_t* src_u,
const uint8_t* src_v, uint8_t* dst_uyvy, int width) =
I422ToUYVYRow_C;
if (!src_y || !src_u || !src_v || !dst_uyvy || width <= 0 || height == 0) {
return -1;
}
// Negative height means invert the image.
if (height < 0) {
height = -height;
dst_uyvy = dst_uyvy + (height - 1) * dst_stride_uyvy;
dst_stride_uyvy = -dst_stride_uyvy;
}
// Coalesce rows.
if (src_stride_y == width && src_stride_u * 2 == width &&
src_stride_v * 2 == width && dst_stride_uyvy == width * 2) {
width *= height;
height = 1;
src_stride_y = src_stride_u = src_stride_v = dst_stride_uyvy = 0;
}
#if defined(HAS_I422TOUYVYROW_SSE2)
if (TestCpuFlag(kCpuHasSSE2)) {
I422ToUYVYRow = I422ToUYVYRow_Any_SSE2;
if (IS_ALIGNED(width, 16)) {
I422ToUYVYRow = I422ToUYVYRow_SSE2;
}
}
#endif
#if defined(HAS_I422TOUYVYROW_AVX2)
if (TestCpuFlag(kCpuHasAVX2)) {
I422ToUYVYRow = I422ToUYVYRow_Any_AVX2;
if (IS_ALIGNED(width, 32)) {
I422ToUYVYRow = I422ToUYVYRow_AVX2;
}
}
#endif
#if defined(HAS_I422TOUYVYROW_NEON)
if (TestCpuFlag(kCpuHasNEON)) {
I422ToUYVYRow = I422ToUYVYRow_Any_NEON;
if (IS_ALIGNED(width, 16)) {
I422ToUYVYRow = I422ToUYVYRow_NEON;
}
}
#endif
#if defined(HAS_I422TOUYVYROW_MMI)
if (TestCpuFlag(kCpuHasMMI)) {
I422ToUYVYRow = I422ToUYVYRow_Any_MMI;
if (IS_ALIGNED(width, 8)) {
I422ToUYVYRow = I422ToUYVYRow_MMI;
}
}
#endif
#if defined(HAS_I422TOUYVYROW_MSA)
if (TestCpuFlag(kCpuHasMSA)) {
I422ToUYVYRow = I422ToUYVYRow_Any_MSA;
if (IS_ALIGNED(width, 32)) {
I422ToUYVYRow = I422ToUYVYRow_MSA;
}
}
#endif
for (y = 0; y < height; ++y) {
I422ToUYVYRow(src_y, src_u, src_v, dst_uyvy, width);
src_y += src_stride_y;
src_u += src_stride_u;
src_v += src_stride_v;
dst_uyvy += dst_stride_uyvy;
}
return 0;
}
LIBYUV_API
int I420ToUYVY(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint8_t* dst_uyvy,
int dst_stride_uyvy,
int width,
int height) {
int y;
void (*I422ToUYVYRow)(const uint8_t* src_y, const uint8_t* src_u,
const uint8_t* src_v, uint8_t* dst_uyvy, int width) =
I422ToUYVYRow_C;
if (!src_y || !src_u || !src_v || !dst_uyvy || width <= 0 || height == 0) {
return -1;
}
// Negative height means invert the image.
if (height < 0) {
height = -height;
dst_uyvy = dst_uyvy + (height - 1) * dst_stride_uyvy;
dst_stride_uyvy = -dst_stride_uyvy;
}
#if defined(HAS_I422TOUYVYROW_SSE2)
if (TestCpuFlag(kCpuHasSSE2)) {
I422ToUYVYRow = I422ToUYVYRow_Any_SSE2;
if (IS_ALIGNED(width, 16)) {
I422ToUYVYRow = I422ToUYVYRow_SSE2;
}
}
#endif
#if defined(HAS_I422TOUYVYROW_AVX2)
if (TestCpuFlag(kCpuHasAVX2)) {
I422ToUYVYRow = I422ToUYVYRow_Any_AVX2;
if (IS_ALIGNED(width, 32)) {
I422ToUYVYRow = I422ToUYVYRow_AVX2;
}
}
#endif
#if defined(HAS_I422TOUYVYROW_NEON)
if (TestCpuFlag(kCpuHasNEON)) {
I422ToUYVYRow = I422ToUYVYRow_Any_NEON;
if (IS_ALIGNED(width, 16)) {
I422ToUYVYRow = I422ToUYVYRow_NEON;
}
}
#endif
#if defined(HAS_I422TOUYVYROW_MMI)
if (TestCpuFlag(kCpuHasMMI)) {
I422ToUYVYRow = I422ToUYVYRow_Any_MMI;
if (IS_ALIGNED(width, 8)) {
I422ToUYVYRow = I422ToUYVYRow_MMI;
}
}
#endif
#if defined(HAS_I422TOUYVYROW_MSA)
if (TestCpuFlag(kCpuHasMSA)) {
I422ToUYVYRow = I422ToUYVYRow_Any_MSA;
if (IS_ALIGNED(width, 32)) {
I422ToUYVYRow = I422ToUYVYRow_MSA;
}
}
#endif
for (y = 0; y < height - 1; y += 2) {
I422ToUYVYRow(src_y, src_u, src_v, dst_uyvy, width);
I422ToUYVYRow(src_y + src_stride_y, src_u, src_v,
dst_uyvy + dst_stride_uyvy, width);
src_y += src_stride_y * 2;
src_u += src_stride_u;
src_v += src_stride_v;
dst_uyvy += dst_stride_uyvy * 2;
}
if (height & 1) {
I422ToUYVYRow(src_y, src_u, src_v, dst_uyvy, width);
}
return 0;
}
LIBYUV_API
int I420ToNV12(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_uv,
int dst_stride_uv,
int width,
int height) {
int halfwidth = (width + 1) / 2;
int halfheight = (height + 1) / 2;
if (!src_y || !src_u || !src_v || !dst_y || !dst_uv || width <= 0 ||
height == 0) {
return -1;
}
// Negative height means invert the image.
if (height < 0) {
height = -height;
halfheight = (height + 1) >> 1;
src_y = src_y + (height - 1) * src_stride_y;
src_u = src_u + (halfheight - 1) * src_stride_u;
src_v = src_v + (halfheight - 1) * src_stride_v;
src_stride_y = -src_stride_y;
src_stride_u = -src_stride_u;
src_stride_v = -src_stride_v;
}
if (dst_y) {
CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
}
MergeUVPlane(src_u, src_stride_u, src_v, src_stride_v, dst_uv, dst_stride_uv,
halfwidth, halfheight);
return 0;
}
LIBYUV_API
int I420ToNV21(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_vu,
int dst_stride_vu,
int width,
int height) {
return I420ToNV12(src_y, src_stride_y, src_v, src_stride_v, src_u,
src_stride_u, dst_y, dst_stride_y, dst_vu, dst_stride_vu,
width, height);
}
// Convert I420 to specified format
LIBYUV_API
int ConvertFromI420(const uint8_t* y,
int y_stride,
const uint8_t* u,
int u_stride,
const uint8_t* v,
int v_stride,
uint8_t* dst_sample,
int dst_sample_stride,
int width,
int height,
uint32_t fourcc) {
uint32_t format = CanonicalFourCC(fourcc);
int r = 0;
if (!y || !u || !v || !dst_sample || width <= 0 || height == 0) {
return -1;
}
switch (format) {
// Single plane formats
case FOURCC_YUY2:
r = I420ToYUY2(y, y_stride, u, u_stride, v, v_stride, dst_sample,
dst_sample_stride ? dst_sample_stride : width * 2, width,
height);
break;
case FOURCC_UYVY:
r = I420ToUYVY(y, y_stride, u, u_stride, v, v_stride, dst_sample,
dst_sample_stride ? dst_sample_stride : width * 2, width,
height);
break;
case FOURCC_RGBP:
r = I420ToRGB565(y, y_stride, u, u_stride, v, v_stride, dst_sample,
dst_sample_stride ? dst_sample_stride : width * 2, width,
height);
break;
case FOURCC_RGBO:
r = I420ToARGB1555(y, y_stride, u, u_stride, v, v_stride, dst_sample,
dst_sample_stride ? dst_sample_stride : width * 2,
width, height);
break;
case FOURCC_R444:
r = I420ToARGB4444(y, y_stride, u, u_stride, v, v_stride, dst_sample,
dst_sample_stride ? dst_sample_stride : width * 2,
width, height);
break;
case FOURCC_24BG:
r = I420ToRGB24(y, y_stride, u, u_stride, v, v_stride, dst_sample,
dst_sample_stride ? dst_sample_stride : width * 3, width,
height);
break;
case FOURCC_RAW:
r = I420ToRAW(y, y_stride, u, u_stride, v, v_stride, dst_sample,
dst_sample_stride ? dst_sample_stride : width * 3, width,
height);
break;
case FOURCC_ARGB:
r = I420ToARGB(y, y_stride, u, u_stride, v, v_stride, dst_sample,
dst_sample_stride ? dst_sample_stride : width * 4, width,
height);
break;
case FOURCC_BGRA:
r = I420ToBGRA(y, y_stride, u, u_stride, v, v_stride, dst_sample,
dst_sample_stride ? dst_sample_stride : width * 4, width,
height);
break;
case FOURCC_ABGR:
r = I420ToABGR(y, y_stride, u, u_stride, v, v_stride, dst_sample,
dst_sample_stride ? dst_sample_stride : width * 4, width,
height);
break;
case FOURCC_RGBA:
r = I420ToRGBA(y, y_stride, u, u_stride, v, v_stride, dst_sample,
dst_sample_stride ? dst_sample_stride : width * 4, width,
height);
break;
case FOURCC_AR30:
r = I420ToAR30(y, y_stride, u, u_stride, v, v_stride, dst_sample,
dst_sample_stride ? dst_sample_stride : width * 4, width,
height);
break;
case FOURCC_I400:
r = I400Copy(y, y_stride, dst_sample,
dst_sample_stride ? dst_sample_stride : width, width,
height);
break;
case FOURCC_NV12: {
uint8_t* dst_uv = dst_sample + width * height;
r = I420ToNV12(y, y_stride, u, u_stride, v, v_stride, dst_sample,
dst_sample_stride ? dst_sample_stride : width, dst_uv,
dst_sample_stride ? dst_sample_stride : width, width,
height);
break;
}
case FOURCC_NV21: {
uint8_t* dst_vu = dst_sample + width * height;
r = I420ToNV21(y, y_stride, u, u_stride, v, v_stride, dst_sample,
dst_sample_stride ? dst_sample_stride : width, dst_vu,
dst_sample_stride ? dst_sample_stride : width, width,
height);
break;
}
// Triplanar formats
case FOURCC_I420:
case FOURCC_YV12: {
dst_sample_stride = dst_sample_stride ? dst_sample_stride : width;
int halfstride = (dst_sample_stride + 1) / 2;
int halfheight = (height + 1) / 2;
uint8_t* dst_u;
uint8_t* dst_v;
if (format == FOURCC_YV12) {
dst_v = dst_sample + dst_sample_stride * height;
dst_u = dst_v + halfstride * halfheight;
} else {
dst_u = dst_sample + dst_sample_stride * height;
dst_v = dst_u + halfstride * halfheight;
}
r = I420Copy(y, y_stride, u, u_stride, v, v_stride, dst_sample,
dst_sample_stride, dst_u, halfstride, dst_v, halfstride,
width, height);
break;
}
case FOURCC_I422:
case FOURCC_YV16: {
dst_sample_stride = dst_sample_stride ? dst_sample_stride : width;
int halfstride = (dst_sample_stride + 1) / 2;
uint8_t* dst_u;
uint8_t* dst_v;
if (format == FOURCC_YV16) {
dst_v = dst_sample + dst_sample_stride * height;
dst_u = dst_v + halfstride * height;
} else {
dst_u = dst_sample + dst_sample_stride * height;
dst_v = dst_u + halfstride * height;
}
r = I420ToI422(y, y_stride, u, u_stride, v, v_stride, dst_sample,
dst_sample_stride, dst_u, halfstride, dst_v, halfstride,
width, height);
break;
}
case FOURCC_I444:
case FOURCC_YV24: {
dst_sample_stride = dst_sample_stride ? dst_sample_stride : width;
uint8_t* dst_u;
uint8_t* dst_v;
if (format == FOURCC_YV24) {
dst_v = dst_sample + dst_sample_stride * height;
dst_u = dst_v + dst_sample_stride * height;
} else {
dst_u = dst_sample + dst_sample_stride * height;
dst_v = dst_u + dst_sample_stride * height;
}
r = I420ToI444(y, y_stride, u, u_stride, v, v_stride, dst_sample,
dst_sample_stride, dst_u, dst_sample_stride, dst_v,
dst_sample_stride, width, height);
break;
}
// Formats not supported - MJPG, biplanar, some rgb formats.
default:
return -1; // unknown fourcc - return failure code.
}
return r;
}
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif

File diff suppressed because it is too large Load Diff

602
thirdparty/libyuv/source/convert_jpeg.cc vendored Normal file
View File

@@ -0,0 +1,602 @@
/*
* Copyright 2011 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "libyuv/convert.h"
#include "libyuv/convert_argb.h"
#ifdef HAVE_JPEG
#include "libyuv/mjpeg_decoder.h"
#endif
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
#ifdef HAVE_JPEG
struct I420Buffers {
uint8_t* y;
int y_stride;
uint8_t* u;
int u_stride;
uint8_t* v;
int v_stride;
int w;
int h;
};
static void JpegCopyI420(void* opaque,
const uint8_t* const* data,
const int* strides,
int rows) {
I420Buffers* dest = (I420Buffers*)(opaque);
I420Copy(data[0], strides[0], data[1], strides[1], data[2], strides[2],
dest->y, dest->y_stride, dest->u, dest->u_stride, dest->v,
dest->v_stride, dest->w, rows);
dest->y += rows * dest->y_stride;
dest->u += ((rows + 1) >> 1) * dest->u_stride;
dest->v += ((rows + 1) >> 1) * dest->v_stride;
dest->h -= rows;
}
static void JpegI422ToI420(void* opaque,
const uint8_t* const* data,
const int* strides,
int rows) {
I420Buffers* dest = (I420Buffers*)(opaque);
I422ToI420(data[0], strides[0], data[1], strides[1], data[2], strides[2],
dest->y, dest->y_stride, dest->u, dest->u_stride, dest->v,
dest->v_stride, dest->w, rows);
dest->y += rows * dest->y_stride;
dest->u += ((rows + 1) >> 1) * dest->u_stride;
dest->v += ((rows + 1) >> 1) * dest->v_stride;
dest->h -= rows;
}
static void JpegI444ToI420(void* opaque,
const uint8_t* const* data,
const int* strides,
int rows) {
I420Buffers* dest = (I420Buffers*)(opaque);
I444ToI420(data[0], strides[0], data[1], strides[1], data[2], strides[2],
dest->y, dest->y_stride, dest->u, dest->u_stride, dest->v,
dest->v_stride, dest->w, rows);
dest->y += rows * dest->y_stride;
dest->u += ((rows + 1) >> 1) * dest->u_stride;
dest->v += ((rows + 1) >> 1) * dest->v_stride;
dest->h -= rows;
}
static void JpegI400ToI420(void* opaque,
const uint8_t* const* data,
const int* strides,
int rows) {
I420Buffers* dest = (I420Buffers*)(opaque);
I400ToI420(data[0], strides[0], dest->y, dest->y_stride, dest->u,
dest->u_stride, dest->v, dest->v_stride, dest->w, rows);
dest->y += rows * dest->y_stride;
dest->u += ((rows + 1) >> 1) * dest->u_stride;
dest->v += ((rows + 1) >> 1) * dest->v_stride;
dest->h -= rows;
}
// Query size of MJPG in pixels.
LIBYUV_API
int MJPGSize(const uint8_t* src_mjpg,
size_t src_size_mjpg,
int* width,
int* height) {
MJpegDecoder mjpeg_decoder;
LIBYUV_BOOL ret = mjpeg_decoder.LoadFrame(src_mjpg, src_size_mjpg);
if (ret) {
*width = mjpeg_decoder.GetWidth();
*height = mjpeg_decoder.GetHeight();
}
mjpeg_decoder.UnloadFrame();
return ret ? 0 : -1; // -1 for runtime failure.
}
// MJPG (Motion JPeg) to I420
// TODO(fbarchard): review src_width and src_height requirement. dst_width and
// dst_height may be enough.
LIBYUV_API
int MJPGToI420(const uint8_t* src_mjpg,
size_t src_size_mjpg,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int src_width,
int src_height,
int dst_width,
int dst_height) {
if (src_size_mjpg == kUnknownDataSize) {
// ERROR: MJPEG frame size unknown
return -1;
}
// TODO(fbarchard): Port MJpeg to C.
MJpegDecoder mjpeg_decoder;
LIBYUV_BOOL ret = mjpeg_decoder.LoadFrame(src_mjpg, src_size_mjpg);
if (ret && (mjpeg_decoder.GetWidth() != src_width ||
mjpeg_decoder.GetHeight() != src_height)) {
// ERROR: MJPEG frame has unexpected dimensions
mjpeg_decoder.UnloadFrame();
return 1; // runtime failure
}
if (ret) {
I420Buffers bufs = {dst_y, dst_stride_y, dst_u, dst_stride_u,
dst_v, dst_stride_v, dst_width, dst_height};
// YUV420
if (mjpeg_decoder.GetColorSpace() == MJpegDecoder::kColorSpaceYCbCr &&
mjpeg_decoder.GetNumComponents() == 3 &&
mjpeg_decoder.GetVertSampFactor(0) == 2 &&
mjpeg_decoder.GetHorizSampFactor(0) == 2 &&
mjpeg_decoder.GetVertSampFactor(1) == 1 &&
mjpeg_decoder.GetHorizSampFactor(1) == 1 &&
mjpeg_decoder.GetVertSampFactor(2) == 1 &&
mjpeg_decoder.GetHorizSampFactor(2) == 1) {
ret = mjpeg_decoder.DecodeToCallback(&JpegCopyI420, &bufs, dst_width,
dst_height);
// YUV422
} else if (mjpeg_decoder.GetColorSpace() ==
MJpegDecoder::kColorSpaceYCbCr &&
mjpeg_decoder.GetNumComponents() == 3 &&
mjpeg_decoder.GetVertSampFactor(0) == 1 &&
mjpeg_decoder.GetHorizSampFactor(0) == 2 &&
mjpeg_decoder.GetVertSampFactor(1) == 1 &&
mjpeg_decoder.GetHorizSampFactor(1) == 1 &&
mjpeg_decoder.GetVertSampFactor(2) == 1 &&
mjpeg_decoder.GetHorizSampFactor(2) == 1) {
ret = mjpeg_decoder.DecodeToCallback(&JpegI422ToI420, &bufs, dst_width,
dst_height);
// YUV444
} else if (mjpeg_decoder.GetColorSpace() ==
MJpegDecoder::kColorSpaceYCbCr &&
mjpeg_decoder.GetNumComponents() == 3 &&
mjpeg_decoder.GetVertSampFactor(0) == 1 &&
mjpeg_decoder.GetHorizSampFactor(0) == 1 &&
mjpeg_decoder.GetVertSampFactor(1) == 1 &&
mjpeg_decoder.GetHorizSampFactor(1) == 1 &&
mjpeg_decoder.GetVertSampFactor(2) == 1 &&
mjpeg_decoder.GetHorizSampFactor(2) == 1) {
ret = mjpeg_decoder.DecodeToCallback(&JpegI444ToI420, &bufs, dst_width,
dst_height);
// YUV400
} else if (mjpeg_decoder.GetColorSpace() ==
MJpegDecoder::kColorSpaceGrayscale &&
mjpeg_decoder.GetNumComponents() == 1 &&
mjpeg_decoder.GetVertSampFactor(0) == 1 &&
mjpeg_decoder.GetHorizSampFactor(0) == 1) {
ret = mjpeg_decoder.DecodeToCallback(&JpegI400ToI420, &bufs, dst_width,
dst_height);
} else {
// TODO(fbarchard): Implement conversion for any other
// colorspace/subsample factors that occur in practice. ERROR: Unable to
// convert MJPEG frame because format is not supported
mjpeg_decoder.UnloadFrame();
return 1;
}
}
return ret ? 0 : 1;
}
struct NV21Buffers {
uint8_t* y;
int y_stride;
uint8_t* vu;
int vu_stride;
int w;
int h;
};
static void JpegI420ToNV21(void* opaque,
const uint8_t* const* data,
const int* strides,
int rows) {
NV21Buffers* dest = (NV21Buffers*)(opaque);
I420ToNV21(data[0], strides[0], data[1], strides[1], data[2], strides[2],
dest->y, dest->y_stride, dest->vu, dest->vu_stride, dest->w, rows);
dest->y += rows * dest->y_stride;
dest->vu += ((rows + 1) >> 1) * dest->vu_stride;
dest->h -= rows;
}
static void JpegI422ToNV21(void* opaque,
const uint8_t* const* data,
const int* strides,
int rows) {
NV21Buffers* dest = (NV21Buffers*)(opaque);
I422ToNV21(data[0], strides[0], data[1], strides[1], data[2], strides[2],
dest->y, dest->y_stride, dest->vu, dest->vu_stride, dest->w, rows);
dest->y += rows * dest->y_stride;
dest->vu += ((rows + 1) >> 1) * dest->vu_stride;
dest->h -= rows;
}
static void JpegI444ToNV21(void* opaque,
const uint8_t* const* data,
const int* strides,
int rows) {
NV21Buffers* dest = (NV21Buffers*)(opaque);
I444ToNV21(data[0], strides[0], data[1], strides[1], data[2], strides[2],
dest->y, dest->y_stride, dest->vu, dest->vu_stride, dest->w, rows);
dest->y += rows * dest->y_stride;
dest->vu += ((rows + 1) >> 1) * dest->vu_stride;
dest->h -= rows;
}
static void JpegI400ToNV21(void* opaque,
const uint8_t* const* data,
const int* strides,
int rows) {
NV21Buffers* dest = (NV21Buffers*)(opaque);
I400ToNV21(data[0], strides[0], dest->y, dest->y_stride, dest->vu,
dest->vu_stride, dest->w, rows);
dest->y += rows * dest->y_stride;
dest->vu += ((rows + 1) >> 1) * dest->vu_stride;
dest->h -= rows;
}
// MJPG (Motion JPeg) to NV21
LIBYUV_API
int MJPGToNV21(const uint8_t* src_mjpg,
size_t src_size_mjpg,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_vu,
int dst_stride_vu,
int src_width,
int src_height,
int dst_width,
int dst_height) {
if (src_size_mjpg == kUnknownDataSize) {
// ERROR: MJPEG frame size unknown
return -1;
}
// TODO(fbarchard): Port MJpeg to C.
MJpegDecoder mjpeg_decoder;
LIBYUV_BOOL ret = mjpeg_decoder.LoadFrame(src_mjpg, src_size_mjpg);
if (ret && (mjpeg_decoder.GetWidth() != src_width ||
mjpeg_decoder.GetHeight() != src_height)) {
// ERROR: MJPEG frame has unexpected dimensions
mjpeg_decoder.UnloadFrame();
return 1; // runtime failure
}
if (ret) {
NV21Buffers bufs = {dst_y, dst_stride_y, dst_vu,
dst_stride_vu, dst_width, dst_height};
// YUV420
if (mjpeg_decoder.GetColorSpace() == MJpegDecoder::kColorSpaceYCbCr &&
mjpeg_decoder.GetNumComponents() == 3 &&
mjpeg_decoder.GetVertSampFactor(0) == 2 &&
mjpeg_decoder.GetHorizSampFactor(0) == 2 &&
mjpeg_decoder.GetVertSampFactor(1) == 1 &&
mjpeg_decoder.GetHorizSampFactor(1) == 1 &&
mjpeg_decoder.GetVertSampFactor(2) == 1 &&
mjpeg_decoder.GetHorizSampFactor(2) == 1) {
ret = mjpeg_decoder.DecodeToCallback(&JpegI420ToNV21, &bufs, dst_width,
dst_height);
// YUV422
} else if (mjpeg_decoder.GetColorSpace() ==
MJpegDecoder::kColorSpaceYCbCr &&
mjpeg_decoder.GetNumComponents() == 3 &&
mjpeg_decoder.GetVertSampFactor(0) == 1 &&
mjpeg_decoder.GetHorizSampFactor(0) == 2 &&
mjpeg_decoder.GetVertSampFactor(1) == 1 &&
mjpeg_decoder.GetHorizSampFactor(1) == 1 &&
mjpeg_decoder.GetVertSampFactor(2) == 1 &&
mjpeg_decoder.GetHorizSampFactor(2) == 1) {
ret = mjpeg_decoder.DecodeToCallback(&JpegI422ToNV21, &bufs, dst_width,
dst_height);
// YUV444
} else if (mjpeg_decoder.GetColorSpace() ==
MJpegDecoder::kColorSpaceYCbCr &&
mjpeg_decoder.GetNumComponents() == 3 &&
mjpeg_decoder.GetVertSampFactor(0) == 1 &&
mjpeg_decoder.GetHorizSampFactor(0) == 1 &&
mjpeg_decoder.GetVertSampFactor(1) == 1 &&
mjpeg_decoder.GetHorizSampFactor(1) == 1 &&
mjpeg_decoder.GetVertSampFactor(2) == 1 &&
mjpeg_decoder.GetHorizSampFactor(2) == 1) {
ret = mjpeg_decoder.DecodeToCallback(&JpegI444ToNV21, &bufs, dst_width,
dst_height);
// YUV400
} else if (mjpeg_decoder.GetColorSpace() ==
MJpegDecoder::kColorSpaceGrayscale &&
mjpeg_decoder.GetNumComponents() == 1 &&
mjpeg_decoder.GetVertSampFactor(0) == 1 &&
mjpeg_decoder.GetHorizSampFactor(0) == 1) {
ret = mjpeg_decoder.DecodeToCallback(&JpegI400ToNV21, &bufs, dst_width,
dst_height);
} else {
// Unknown colorspace.
mjpeg_decoder.UnloadFrame();
return 1;
}
}
return ret ? 0 : 1;
}
static void JpegI420ToNV12(void* opaque,
const uint8_t* const* data,
const int* strides,
int rows) {
NV21Buffers* dest = (NV21Buffers*)(opaque);
// Use NV21 with VU swapped.
I420ToNV21(data[0], strides[0], data[2], strides[2], data[1], strides[1],
dest->y, dest->y_stride, dest->vu, dest->vu_stride, dest->w, rows);
dest->y += rows * dest->y_stride;
dest->vu += ((rows + 1) >> 1) * dest->vu_stride;
dest->h -= rows;
}
static void JpegI422ToNV12(void* opaque,
const uint8_t* const* data,
const int* strides,
int rows) {
NV21Buffers* dest = (NV21Buffers*)(opaque);
// Use NV21 with VU swapped.
I422ToNV21(data[0], strides[0], data[2], strides[2], data[1], strides[1],
dest->y, dest->y_stride, dest->vu, dest->vu_stride, dest->w, rows);
dest->y += rows * dest->y_stride;
dest->vu += ((rows + 1) >> 1) * dest->vu_stride;
dest->h -= rows;
}
static void JpegI444ToNV12(void* opaque,
const uint8_t* const* data,
const int* strides,
int rows) {
NV21Buffers* dest = (NV21Buffers*)(opaque);
// Use NV21 with VU swapped.
I444ToNV21(data[0], strides[0], data[2], strides[2], data[1], strides[1],
dest->y, dest->y_stride, dest->vu, dest->vu_stride, dest->w, rows);
dest->y += rows * dest->y_stride;
dest->vu += ((rows + 1) >> 1) * dest->vu_stride;
dest->h -= rows;
}
static void JpegI400ToNV12(void* opaque,
const uint8_t* const* data,
const int* strides,
int rows) {
NV21Buffers* dest = (NV21Buffers*)(opaque);
// Use NV21 since there is no UV plane.
I400ToNV21(data[0], strides[0], dest->y, dest->y_stride, dest->vu,
dest->vu_stride, dest->w, rows);
dest->y += rows * dest->y_stride;
dest->vu += ((rows + 1) >> 1) * dest->vu_stride;
dest->h -= rows;
}
// MJPG (Motion JPEG) to NV12.
LIBYUV_API
int MJPGToNV12(const uint8_t* sample,
size_t sample_size,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_uv,
int dst_stride_uv,
int src_width,
int src_height,
int dst_width,
int dst_height) {
if (sample_size == kUnknownDataSize) {
// ERROR: MJPEG frame size unknown
return -1;
}
// TODO(fbarchard): Port MJpeg to C.
MJpegDecoder mjpeg_decoder;
LIBYUV_BOOL ret = mjpeg_decoder.LoadFrame(sample, sample_size);
if (ret && (mjpeg_decoder.GetWidth() != src_width ||
mjpeg_decoder.GetHeight() != src_height)) {
// ERROR: MJPEG frame has unexpected dimensions
mjpeg_decoder.UnloadFrame();
return 1; // runtime failure
}
if (ret) {
// Use NV21Buffers but with UV instead of VU.
NV21Buffers bufs = {dst_y, dst_stride_y, dst_uv,
dst_stride_uv, dst_width, dst_height};
// YUV420
if (mjpeg_decoder.GetColorSpace() == MJpegDecoder::kColorSpaceYCbCr &&
mjpeg_decoder.GetNumComponents() == 3 &&
mjpeg_decoder.GetVertSampFactor(0) == 2 &&
mjpeg_decoder.GetHorizSampFactor(0) == 2 &&
mjpeg_decoder.GetVertSampFactor(1) == 1 &&
mjpeg_decoder.GetHorizSampFactor(1) == 1 &&
mjpeg_decoder.GetVertSampFactor(2) == 1 &&
mjpeg_decoder.GetHorizSampFactor(2) == 1) {
ret = mjpeg_decoder.DecodeToCallback(&JpegI420ToNV12, &bufs, dst_width,
dst_height);
// YUV422
} else if (mjpeg_decoder.GetColorSpace() ==
MJpegDecoder::kColorSpaceYCbCr &&
mjpeg_decoder.GetNumComponents() == 3 &&
mjpeg_decoder.GetVertSampFactor(0) == 1 &&
mjpeg_decoder.GetHorizSampFactor(0) == 2 &&
mjpeg_decoder.GetVertSampFactor(1) == 1 &&
mjpeg_decoder.GetHorizSampFactor(1) == 1 &&
mjpeg_decoder.GetVertSampFactor(2) == 1 &&
mjpeg_decoder.GetHorizSampFactor(2) == 1) {
ret = mjpeg_decoder.DecodeToCallback(&JpegI422ToNV12, &bufs, dst_width,
dst_height);
// YUV444
} else if (mjpeg_decoder.GetColorSpace() ==
MJpegDecoder::kColorSpaceYCbCr &&
mjpeg_decoder.GetNumComponents() == 3 &&
mjpeg_decoder.GetVertSampFactor(0) == 1 &&
mjpeg_decoder.GetHorizSampFactor(0) == 1 &&
mjpeg_decoder.GetVertSampFactor(1) == 1 &&
mjpeg_decoder.GetHorizSampFactor(1) == 1 &&
mjpeg_decoder.GetVertSampFactor(2) == 1 &&
mjpeg_decoder.GetHorizSampFactor(2) == 1) {
ret = mjpeg_decoder.DecodeToCallback(&JpegI444ToNV12, &bufs, dst_width,
dst_height);
// YUV400
} else if (mjpeg_decoder.GetColorSpace() ==
MJpegDecoder::kColorSpaceGrayscale &&
mjpeg_decoder.GetNumComponents() == 1 &&
mjpeg_decoder.GetVertSampFactor(0) == 1 &&
mjpeg_decoder.GetHorizSampFactor(0) == 1) {
ret = mjpeg_decoder.DecodeToCallback(&JpegI400ToNV12, &bufs, dst_width,
dst_height);
} else {
// Unknown colorspace.
mjpeg_decoder.UnloadFrame();
return 1;
}
}
return ret ? 0 : 1;
}
struct ARGBBuffers {
uint8_t* argb;
int argb_stride;
int w;
int h;
};
static void JpegI420ToARGB(void* opaque,
const uint8_t* const* data,
const int* strides,
int rows) {
ARGBBuffers* dest = (ARGBBuffers*)(opaque);
I420ToARGB(data[0], strides[0], data[1], strides[1], data[2], strides[2],
dest->argb, dest->argb_stride, dest->w, rows);
dest->argb += rows * dest->argb_stride;
dest->h -= rows;
}
static void JpegI422ToARGB(void* opaque,
const uint8_t* const* data,
const int* strides,
int rows) {
ARGBBuffers* dest = (ARGBBuffers*)(opaque);
I422ToARGB(data[0], strides[0], data[1], strides[1], data[2], strides[2],
dest->argb, dest->argb_stride, dest->w, rows);
dest->argb += rows * dest->argb_stride;
dest->h -= rows;
}
static void JpegI444ToARGB(void* opaque,
const uint8_t* const* data,
const int* strides,
int rows) {
ARGBBuffers* dest = (ARGBBuffers*)(opaque);
I444ToARGB(data[0], strides[0], data[1], strides[1], data[2], strides[2],
dest->argb, dest->argb_stride, dest->w, rows);
dest->argb += rows * dest->argb_stride;
dest->h -= rows;
}
static void JpegI400ToARGB(void* opaque,
const uint8_t* const* data,
const int* strides,
int rows) {
ARGBBuffers* dest = (ARGBBuffers*)(opaque);
I400ToARGB(data[0], strides[0], dest->argb, dest->argb_stride, dest->w, rows);
dest->argb += rows * dest->argb_stride;
dest->h -= rows;
}
// MJPG (Motion JPeg) to ARGB
// TODO(fbarchard): review src_width and src_height requirement. dst_width and
// dst_height may be enough.
LIBYUV_API
int MJPGToARGB(const uint8_t* src_mjpg,
size_t src_size_mjpg,
uint8_t* dst_argb,
int dst_stride_argb,
int src_width,
int src_height,
int dst_width,
int dst_height) {
if (src_size_mjpg == kUnknownDataSize) {
// ERROR: MJPEG frame size unknown
return -1;
}
// TODO(fbarchard): Port MJpeg to C.
MJpegDecoder mjpeg_decoder;
LIBYUV_BOOL ret = mjpeg_decoder.LoadFrame(src_mjpg, src_size_mjpg);
if (ret && (mjpeg_decoder.GetWidth() != src_width ||
mjpeg_decoder.GetHeight() != src_height)) {
// ERROR: MJPEG frame has unexpected dimensions
mjpeg_decoder.UnloadFrame();
return 1; // runtime failure
}
if (ret) {
ARGBBuffers bufs = {dst_argb, dst_stride_argb, dst_width, dst_height};
// YUV420
if (mjpeg_decoder.GetColorSpace() == MJpegDecoder::kColorSpaceYCbCr &&
mjpeg_decoder.GetNumComponents() == 3 &&
mjpeg_decoder.GetVertSampFactor(0) == 2 &&
mjpeg_decoder.GetHorizSampFactor(0) == 2 &&
mjpeg_decoder.GetVertSampFactor(1) == 1 &&
mjpeg_decoder.GetHorizSampFactor(1) == 1 &&
mjpeg_decoder.GetVertSampFactor(2) == 1 &&
mjpeg_decoder.GetHorizSampFactor(2) == 1) {
ret = mjpeg_decoder.DecodeToCallback(&JpegI420ToARGB, &bufs, dst_width,
dst_height);
// YUV422
} else if (mjpeg_decoder.GetColorSpace() ==
MJpegDecoder::kColorSpaceYCbCr &&
mjpeg_decoder.GetNumComponents() == 3 &&
mjpeg_decoder.GetVertSampFactor(0) == 1 &&
mjpeg_decoder.GetHorizSampFactor(0) == 2 &&
mjpeg_decoder.GetVertSampFactor(1) == 1 &&
mjpeg_decoder.GetHorizSampFactor(1) == 1 &&
mjpeg_decoder.GetVertSampFactor(2) == 1 &&
mjpeg_decoder.GetHorizSampFactor(2) == 1) {
ret = mjpeg_decoder.DecodeToCallback(&JpegI422ToARGB, &bufs, dst_width,
dst_height);
// YUV444
} else if (mjpeg_decoder.GetColorSpace() ==
MJpegDecoder::kColorSpaceYCbCr &&
mjpeg_decoder.GetNumComponents() == 3 &&
mjpeg_decoder.GetVertSampFactor(0) == 1 &&
mjpeg_decoder.GetHorizSampFactor(0) == 1 &&
mjpeg_decoder.GetVertSampFactor(1) == 1 &&
mjpeg_decoder.GetHorizSampFactor(1) == 1 &&
mjpeg_decoder.GetVertSampFactor(2) == 1 &&
mjpeg_decoder.GetHorizSampFactor(2) == 1) {
ret = mjpeg_decoder.DecodeToCallback(&JpegI444ToARGB, &bufs, dst_width,
dst_height);
// YUV400
} else if (mjpeg_decoder.GetColorSpace() ==
MJpegDecoder::kColorSpaceGrayscale &&
mjpeg_decoder.GetNumComponents() == 1 &&
mjpeg_decoder.GetVertSampFactor(0) == 1 &&
mjpeg_decoder.GetHorizSampFactor(0) == 1) {
ret = mjpeg_decoder.DecodeToCallback(&JpegI400ToARGB, &bufs, dst_width,
dst_height);
} else {
// TODO(fbarchard): Implement conversion for any other
// colorspace/subsample factors that occur in practice. ERROR: Unable to
// convert MJPEG frame because format is not supported
mjpeg_decoder.UnloadFrame();
return 1;
}
}
return ret ? 0 : 1;
}
#endif // HAVE_JPEG
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif

View File

@@ -0,0 +1,382 @@
/*
* Copyright 2011 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "libyuv/convert_argb.h"
#include "libyuv/cpu_id.h"
#ifdef HAVE_JPEG
#include "libyuv/mjpeg_decoder.h"
#endif
#include "libyuv/rotate_argb.h"
#include "libyuv/row.h"
#include "libyuv/video_common.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
// Convert camera sample to ARGB with cropping, rotation and vertical flip.
// src_width is used for source stride computation
// src_height is used to compute location of planes, and indicate inversion
// sample_size is measured in bytes and is the size of the frame.
// With MJPEG it is the compressed size of the frame.
// TODO(fbarchard): Add the following:
// H010ToARGB
// I010ToARGB
LIBYUV_API
int ConvertToARGB(const uint8_t* sample,
size_t sample_size,
uint8_t* dst_argb,
int dst_stride_argb,
int crop_x,
int crop_y,
int src_width,
int src_height,
int crop_width,
int crop_height,
enum RotationMode rotation,
uint32_t fourcc) {
uint32_t format = CanonicalFourCC(fourcc);
int aligned_src_width = (src_width + 1) & ~1;
const uint8_t* src;
const uint8_t* src_uv;
int abs_src_height = (src_height < 0) ? -src_height : src_height;
int inv_crop_height = (crop_height < 0) ? -crop_height : crop_height;
int r = 0;
// One pass rotation is available for some formats. For the rest, convert
// to ARGB (with optional vertical flipping) into a temporary ARGB buffer,
// and then rotate the ARGB to the final destination buffer.
// For in-place conversion, if destination dst_argb is same as source sample,
// also enable temporary buffer.
LIBYUV_BOOL need_buf =
(rotation && format != FOURCC_ARGB) || dst_argb == sample;
uint8_t* dest_argb = dst_argb;
int dest_dst_stride_argb = dst_stride_argb;
uint8_t* rotate_buffer = NULL;
int abs_crop_height = (crop_height < 0) ? -crop_height : crop_height;
if (dst_argb == NULL || sample == NULL || src_width <= 0 || crop_width <= 0 ||
src_height == 0 || crop_height == 0) {
return -1;
}
if (src_height < 0) {
inv_crop_height = -inv_crop_height;
}
if (need_buf) {
int argb_size = crop_width * 4 * abs_crop_height;
rotate_buffer = (uint8_t*)malloc(argb_size); /* NOLINT */
if (!rotate_buffer) {
return 1; // Out of memory runtime error.
}
dst_argb = rotate_buffer;
dst_stride_argb = crop_width * 4;
}
switch (format) {
// Single plane formats
case FOURCC_YUY2:
src = sample + (aligned_src_width * crop_y + crop_x) * 2;
r = YUY2ToARGB(src, aligned_src_width * 2, dst_argb, dst_stride_argb,
crop_width, inv_crop_height);
break;
case FOURCC_UYVY:
src = sample + (aligned_src_width * crop_y + crop_x) * 2;
r = UYVYToARGB(src, aligned_src_width * 2, dst_argb, dst_stride_argb,
crop_width, inv_crop_height);
break;
case FOURCC_24BG:
src = sample + (src_width * crop_y + crop_x) * 3;
r = RGB24ToARGB(src, src_width * 3, dst_argb, dst_stride_argb, crop_width,
inv_crop_height);
break;
case FOURCC_RAW:
src = sample + (src_width * crop_y + crop_x) * 3;
r = RAWToARGB(src, src_width * 3, dst_argb, dst_stride_argb, crop_width,
inv_crop_height);
break;
case FOURCC_ARGB:
if (!need_buf && !rotation) {
src = sample + (src_width * crop_y + crop_x) * 4;
r = ARGBToARGB(src, src_width * 4, dst_argb, dst_stride_argb,
crop_width, inv_crop_height);
}
break;
case FOURCC_BGRA:
src = sample + (src_width * crop_y + crop_x) * 4;
r = BGRAToARGB(src, src_width * 4, dst_argb, dst_stride_argb, crop_width,
inv_crop_height);
break;
case FOURCC_ABGR:
src = sample + (src_width * crop_y + crop_x) * 4;
r = ABGRToARGB(src, src_width * 4, dst_argb, dst_stride_argb, crop_width,
inv_crop_height);
break;
case FOURCC_RGBA:
src = sample + (src_width * crop_y + crop_x) * 4;
r = RGBAToARGB(src, src_width * 4, dst_argb, dst_stride_argb, crop_width,
inv_crop_height);
break;
case FOURCC_AR30:
src = sample + (src_width * crop_y + crop_x) * 4;
r = AR30ToARGB(src, src_width * 4, dst_argb, dst_stride_argb, crop_width,
inv_crop_height);
break;
case FOURCC_AB30:
src = sample + (src_width * crop_y + crop_x) * 4;
r = AB30ToARGB(src, src_width * 4, dst_argb, dst_stride_argb, crop_width,
inv_crop_height);
break;
case FOURCC_RGBP:
src = sample + (src_width * crop_y + crop_x) * 2;
r = RGB565ToARGB(src, src_width * 2, dst_argb, dst_stride_argb,
crop_width, inv_crop_height);
break;
case FOURCC_RGBO:
src = sample + (src_width * crop_y + crop_x) * 2;
r = ARGB1555ToARGB(src, src_width * 2, dst_argb, dst_stride_argb,
crop_width, inv_crop_height);
break;
case FOURCC_R444:
src = sample + (src_width * crop_y + crop_x) * 2;
r = ARGB4444ToARGB(src, src_width * 2, dst_argb, dst_stride_argb,
crop_width, inv_crop_height);
break;
case FOURCC_I400:
src = sample + src_width * crop_y + crop_x;
r = I400ToARGB(src, src_width, dst_argb, dst_stride_argb, crop_width,
inv_crop_height);
break;
case FOURCC_J400:
src = sample + src_width * crop_y + crop_x;
r = J400ToARGB(src, src_width, dst_argb, dst_stride_argb, crop_width,
inv_crop_height);
break;
// Biplanar formats
case FOURCC_NV12:
src = sample + (src_width * crop_y + crop_x);
src_uv =
sample + aligned_src_width * (abs_src_height + crop_y / 2) + crop_x;
r = NV12ToARGB(src, src_width, src_uv, aligned_src_width, dst_argb,
dst_stride_argb, crop_width, inv_crop_height);
break;
case FOURCC_NV21:
src = sample + (src_width * crop_y + crop_x);
src_uv =
sample + aligned_src_width * (abs_src_height + crop_y / 2) + crop_x;
// Call NV12 but with u and v parameters swapped.
r = NV21ToARGB(src, src_width, src_uv, aligned_src_width, dst_argb,
dst_stride_argb, crop_width, inv_crop_height);
break;
// Triplanar formats
case FOURCC_I420:
case FOURCC_YV12: {
const uint8_t* src_y = sample + (src_width * crop_y + crop_x);
const uint8_t* src_u;
const uint8_t* src_v;
int halfwidth = (src_width + 1) / 2;
int halfheight = (abs_src_height + 1) / 2;
if (format == FOURCC_YV12) {
src_v = sample + src_width * abs_src_height +
(halfwidth * crop_y + crop_x) / 2;
src_u = sample + src_width * abs_src_height +
halfwidth * (halfheight + crop_y / 2) + crop_x / 2;
} else {
src_u = sample + src_width * abs_src_height +
(halfwidth * crop_y + crop_x) / 2;
src_v = sample + src_width * abs_src_height +
halfwidth * (halfheight + crop_y / 2) + crop_x / 2;
}
r = I420ToARGB(src_y, src_width, src_u, halfwidth, src_v, halfwidth,
dst_argb, dst_stride_argb, crop_width, inv_crop_height);
break;
}
case FOURCC_J420: {
int halfwidth = (src_width + 1) / 2;
int halfheight = (abs_src_height + 1) / 2;
const uint8_t* src_y = sample + (src_width * crop_y + crop_x);
const uint8_t* src_u = sample + src_width * abs_src_height +
(halfwidth * crop_y + crop_x) / 2;
const uint8_t* src_v = sample + src_width * abs_src_height +
halfwidth * (halfheight + crop_y / 2) + crop_x / 2;
r = J420ToARGB(src_y, src_width, src_u, halfwidth, src_v, halfwidth,
dst_argb, dst_stride_argb, crop_width, inv_crop_height);
break;
}
case FOURCC_H420: {
int halfwidth = (src_width + 1) / 2;
int halfheight = (abs_src_height + 1) / 2;
const uint8_t* src_y = sample + (src_width * crop_y + crop_x);
const uint8_t* src_u = sample + src_width * abs_src_height +
(halfwidth * crop_y + crop_x) / 2;
const uint8_t* src_v = sample + src_width * abs_src_height +
halfwidth * (halfheight + crop_y / 2) + crop_x / 2;
r = H420ToARGB(src_y, src_width, src_u, halfwidth, src_v, halfwidth,
dst_argb, dst_stride_argb, crop_width, inv_crop_height);
break;
}
case FOURCC_U420: {
int halfwidth = (src_width + 1) / 2;
int halfheight = (abs_src_height + 1) / 2;
const uint8_t* src_y = sample + (src_width * crop_y + crop_x);
const uint8_t* src_u = sample + src_width * abs_src_height +
(halfwidth * crop_y + crop_x) / 2;
const uint8_t* src_v = sample + src_width * abs_src_height +
halfwidth * (halfheight + crop_y / 2) + crop_x / 2;
r = U420ToARGB(src_y, src_width, src_u, halfwidth, src_v, halfwidth,
dst_argb, dst_stride_argb, crop_width, inv_crop_height);
break;
}
case FOURCC_I422:
case FOURCC_YV16: {
int halfwidth = (src_width + 1) / 2;
const uint8_t* src_y = sample + src_width * crop_y + crop_x;
const uint8_t* src_u;
const uint8_t* src_v;
if (format == FOURCC_YV16) {
src_v = sample + src_width * abs_src_height + halfwidth * crop_y +
crop_x / 2;
src_u = sample + src_width * abs_src_height +
halfwidth * (abs_src_height + crop_y) + crop_x / 2;
} else {
src_u = sample + src_width * abs_src_height + halfwidth * crop_y +
crop_x / 2;
src_v = sample + src_width * abs_src_height +
halfwidth * (abs_src_height + crop_y) + crop_x / 2;
}
r = I422ToARGB(src_y, src_width, src_u, halfwidth, src_v, halfwidth,
dst_argb, dst_stride_argb, crop_width, inv_crop_height);
break;
}
case FOURCC_J422: {
int halfwidth = (src_width + 1) / 2;
const uint8_t* src_y = sample + src_width * crop_y + crop_x;
const uint8_t* src_u =
sample + src_width * abs_src_height + halfwidth * crop_y + crop_x / 2;
const uint8_t* src_v = sample + src_width * abs_src_height +
halfwidth * (abs_src_height + crop_y) + crop_x / 2;
r = J422ToARGB(src_y, src_width, src_u, halfwidth, src_v, halfwidth,
dst_argb, dst_stride_argb, crop_width, inv_crop_height);
break;
}
case FOURCC_H422: {
int halfwidth = (src_width + 1) / 2;
const uint8_t* src_y = sample + src_width * crop_y + crop_x;
const uint8_t* src_u =
sample + src_width * abs_src_height + halfwidth * crop_y + crop_x / 2;
const uint8_t* src_v = sample + src_width * abs_src_height +
halfwidth * (abs_src_height + crop_y) + crop_x / 2;
r = H422ToARGB(src_y, src_width, src_u, halfwidth, src_v, halfwidth,
dst_argb, dst_stride_argb, crop_width, inv_crop_height);
break;
}
case FOURCC_U422: {
int halfwidth = (src_width + 1) / 2;
const uint8_t* src_y = sample + src_width * crop_y + crop_x;
const uint8_t* src_u =
sample + src_width * abs_src_height + halfwidth * crop_y + crop_x / 2;
const uint8_t* src_v = sample + src_width * abs_src_height +
halfwidth * (abs_src_height + crop_y) + crop_x / 2;
r = H422ToARGB(src_y, src_width, src_u, halfwidth, src_v, halfwidth,
dst_argb, dst_stride_argb, crop_width, inv_crop_height);
break;
}
case FOURCC_I444:
case FOURCC_YV24: {
const uint8_t* src_y = sample + src_width * crop_y + crop_x;
const uint8_t* src_u;
const uint8_t* src_v;
if (format == FOURCC_YV24) {
src_v = sample + src_width * (abs_src_height + crop_y) + crop_x;
src_u = sample + src_width * (abs_src_height * 2 + crop_y) + crop_x;
} else {
src_u = sample + src_width * (abs_src_height + crop_y) + crop_x;
src_v = sample + src_width * (abs_src_height * 2 + crop_y) + crop_x;
}
r = I444ToARGB(src_y, src_width, src_u, src_width, src_v, src_width,
dst_argb, dst_stride_argb, crop_width, inv_crop_height);
break;
}
case FOURCC_J444: {
const uint8_t* src_y = sample + src_width * crop_y + crop_x;
const uint8_t* src_u;
const uint8_t* src_v;
src_u = sample + src_width * (abs_src_height + crop_y) + crop_x;
src_v = sample + src_width * (abs_src_height * 2 + crop_y) + crop_x;
r = J444ToARGB(src_y, src_width, src_u, src_width, src_v, src_width,
dst_argb, dst_stride_argb, crop_width, inv_crop_height);
break;
}
case FOURCC_H444: {
const uint8_t* src_y = sample + src_width * crop_y + crop_x;
const uint8_t* src_u;
const uint8_t* src_v;
src_u = sample + src_width * (abs_src_height + crop_y) + crop_x;
src_v = sample + src_width * (abs_src_height * 2 + crop_y) + crop_x;
r = H444ToARGB(src_y, src_width, src_u, src_width, src_v, src_width,
dst_argb, dst_stride_argb, crop_width, inv_crop_height);
break;
}
case FOURCC_U444: {
const uint8_t* src_y = sample + src_width * crop_y + crop_x;
const uint8_t* src_u;
const uint8_t* src_v;
src_u = sample + src_width * (abs_src_height + crop_y) + crop_x;
src_v = sample + src_width * (abs_src_height * 2 + crop_y) + crop_x;
r = U444ToARGB(src_y, src_width, src_u, src_width, src_v, src_width,
dst_argb, dst_stride_argb, crop_width, inv_crop_height);
break;
}
#ifdef HAVE_JPEG
case FOURCC_MJPG:
r = MJPGToARGB(sample, sample_size, dst_argb, dst_stride_argb, src_width,
abs_src_height, crop_width, inv_crop_height);
break;
#endif
default:
r = -1; // unknown fourcc - return failure code.
}
if (need_buf) {
if (!r) {
r = ARGBRotate(dst_argb, dst_stride_argb, dest_argb, dest_dst_stride_argb,
crop_width, abs_crop_height, rotation);
}
free(rotate_buffer);
} else if (rotation) {
src = sample + (src_width * crop_y + crop_x) * 4;
r = ARGBRotate(src, src_width * 4, dst_argb, dst_stride_argb, crop_width,
inv_crop_height, rotation);
}
return r;
}
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif

View File

@@ -0,0 +1,272 @@
/*
* Copyright 2011 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <stdlib.h>
#include "libyuv/convert.h"
#include "libyuv/video_common.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
// Convert camera sample to I420 with cropping, rotation and vertical flip.
// src_width is used for source stride computation
// src_height is used to compute location of planes, and indicate inversion
// sample_size is measured in bytes and is the size of the frame.
// With MJPEG it is the compressed size of the frame.
LIBYUV_API
int ConvertToI420(const uint8_t* sample,
size_t sample_size,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int crop_x,
int crop_y,
int src_width,
int src_height,
int crop_width,
int crop_height,
enum RotationMode rotation,
uint32_t fourcc) {
uint32_t format = CanonicalFourCC(fourcc);
int aligned_src_width = (src_width + 1) & ~1;
const uint8_t* src;
const uint8_t* src_uv;
const int abs_src_height = (src_height < 0) ? -src_height : src_height;
// TODO(nisse): Why allow crop_height < 0?
const int abs_crop_height = (crop_height < 0) ? -crop_height : crop_height;
int r = 0;
LIBYUV_BOOL need_buf =
(rotation && format != FOURCC_I420 && format != FOURCC_NV12 &&
format != FOURCC_NV21 && format != FOURCC_YV12) ||
dst_y == sample;
uint8_t* tmp_y = dst_y;
uint8_t* tmp_u = dst_u;
uint8_t* tmp_v = dst_v;
int tmp_y_stride = dst_stride_y;
int tmp_u_stride = dst_stride_u;
int tmp_v_stride = dst_stride_v;
uint8_t* rotate_buffer = NULL;
const int inv_crop_height =
(src_height < 0) ? -abs_crop_height : abs_crop_height;
if (!dst_y || !dst_u || !dst_v || !sample || src_width <= 0 ||
crop_width <= 0 || src_height == 0 || crop_height == 0) {
return -1;
}
// One pass rotation is available for some formats. For the rest, convert
// to I420 (with optional vertical flipping) into a temporary I420 buffer,
// and then rotate the I420 to the final destination buffer.
// For in-place conversion, if destination dst_y is same as source sample,
// also enable temporary buffer.
if (need_buf) {
int y_size = crop_width * abs_crop_height;
int uv_size = ((crop_width + 1) / 2) * ((abs_crop_height + 1) / 2);
rotate_buffer = (uint8_t*)malloc(y_size + uv_size * 2); /* NOLINT */
if (!rotate_buffer) {
return 1; // Out of memory runtime error.
}
dst_y = rotate_buffer;
dst_u = dst_y + y_size;
dst_v = dst_u + uv_size;
dst_stride_y = crop_width;
dst_stride_u = dst_stride_v = ((crop_width + 1) / 2);
}
switch (format) {
// Single plane formats
case FOURCC_YUY2:
src = sample + (aligned_src_width * crop_y + crop_x) * 2;
r = YUY2ToI420(src, aligned_src_width * 2, dst_y, dst_stride_y, dst_u,
dst_stride_u, dst_v, dst_stride_v, crop_width,
inv_crop_height);
break;
case FOURCC_UYVY:
src = sample + (aligned_src_width * crop_y + crop_x) * 2;
r = UYVYToI420(src, aligned_src_width * 2, dst_y, dst_stride_y, dst_u,
dst_stride_u, dst_v, dst_stride_v, crop_width,
inv_crop_height);
break;
case FOURCC_RGBP:
src = sample + (src_width * crop_y + crop_x) * 2;
r = RGB565ToI420(src, src_width * 2, dst_y, dst_stride_y, dst_u,
dst_stride_u, dst_v, dst_stride_v, crop_width,
inv_crop_height);
break;
case FOURCC_RGBO:
src = sample + (src_width * crop_y + crop_x) * 2;
r = ARGB1555ToI420(src, src_width * 2, dst_y, dst_stride_y, dst_u,
dst_stride_u, dst_v, dst_stride_v, crop_width,
inv_crop_height);
break;
case FOURCC_R444:
src = sample + (src_width * crop_y + crop_x) * 2;
r = ARGB4444ToI420(src, src_width * 2, dst_y, dst_stride_y, dst_u,
dst_stride_u, dst_v, dst_stride_v, crop_width,
inv_crop_height);
break;
case FOURCC_24BG:
src = sample + (src_width * crop_y + crop_x) * 3;
r = RGB24ToI420(src, src_width * 3, dst_y, dst_stride_y, dst_u,
dst_stride_u, dst_v, dst_stride_v, crop_width,
inv_crop_height);
break;
case FOURCC_RAW:
src = sample + (src_width * crop_y + crop_x) * 3;
r = RAWToI420(src, src_width * 3, dst_y, dst_stride_y, dst_u,
dst_stride_u, dst_v, dst_stride_v, crop_width,
inv_crop_height);
break;
case FOURCC_ARGB:
src = sample + (src_width * crop_y + crop_x) * 4;
r = ARGBToI420(src, src_width * 4, dst_y, dst_stride_y, dst_u,
dst_stride_u, dst_v, dst_stride_v, crop_width,
inv_crop_height);
break;
case FOURCC_BGRA:
src = sample + (src_width * crop_y + crop_x) * 4;
r = BGRAToI420(src, src_width * 4, dst_y, dst_stride_y, dst_u,
dst_stride_u, dst_v, dst_stride_v, crop_width,
inv_crop_height);
break;
case FOURCC_ABGR:
src = sample + (src_width * crop_y + crop_x) * 4;
r = ABGRToI420(src, src_width * 4, dst_y, dst_stride_y, dst_u,
dst_stride_u, dst_v, dst_stride_v, crop_width,
inv_crop_height);
break;
case FOURCC_RGBA:
src = sample + (src_width * crop_y + crop_x) * 4;
r = RGBAToI420(src, src_width * 4, dst_y, dst_stride_y, dst_u,
dst_stride_u, dst_v, dst_stride_v, crop_width,
inv_crop_height);
break;
// TODO(fbarchard): Add AR30 and AB30
case FOURCC_I400:
src = sample + src_width * crop_y + crop_x;
r = I400ToI420(src, src_width, dst_y, dst_stride_y, dst_u, dst_stride_u,
dst_v, dst_stride_v, crop_width, inv_crop_height);
break;
// Biplanar formats
case FOURCC_NV12:
src = sample + (src_width * crop_y + crop_x);
src_uv = sample + (src_width * abs_src_height) +
((crop_y / 2) * aligned_src_width) + ((crop_x / 2) * 2);
r = NV12ToI420Rotate(src, src_width, src_uv, aligned_src_width, dst_y,
dst_stride_y, dst_u, dst_stride_u, dst_v,
dst_stride_v, crop_width, inv_crop_height, rotation);
break;
case FOURCC_NV21:
src = sample + (src_width * crop_y + crop_x);
src_uv = sample + (src_width * abs_src_height) +
((crop_y / 2) * aligned_src_width) + ((crop_x / 2) * 2);
// Call NV12 but with dst_u and dst_v parameters swapped.
r = NV12ToI420Rotate(src, src_width, src_uv, aligned_src_width, dst_y,
dst_stride_y, dst_v, dst_stride_v, dst_u,
dst_stride_u, crop_width, inv_crop_height, rotation);
break;
// Triplanar formats
case FOURCC_I420:
case FOURCC_YV12: {
const uint8_t* src_y = sample + (src_width * crop_y + crop_x);
const uint8_t* src_u;
const uint8_t* src_v;
int halfwidth = (src_width + 1) / 2;
int halfheight = (abs_src_height + 1) / 2;
if (format == FOURCC_YV12) {
src_v = sample + src_width * abs_src_height + halfwidth * (crop_y / 2) +
(crop_x / 2);
src_u = sample + src_width * abs_src_height +
halfwidth * (halfheight + (crop_y / 2)) + (crop_x / 2);
} else {
src_u = sample + src_width * abs_src_height + halfwidth * (crop_y / 2) +
(crop_x / 2);
src_v = sample + src_width * abs_src_height +
halfwidth * (halfheight + (crop_y / 2)) + (crop_x / 2);
}
r = I420Rotate(src_y, src_width, src_u, halfwidth, src_v, halfwidth,
dst_y, dst_stride_y, dst_u, dst_stride_u, dst_v,
dst_stride_v, crop_width, inv_crop_height, rotation);
break;
}
case FOURCC_I422:
case FOURCC_YV16: {
const uint8_t* src_y = sample + src_width * crop_y + crop_x;
const uint8_t* src_u;
const uint8_t* src_v;
int halfwidth = (src_width + 1) / 2;
if (format == FOURCC_YV16) {
src_v = sample + src_width * abs_src_height + halfwidth * crop_y +
(crop_x / 2);
src_u = sample + src_width * abs_src_height +
halfwidth * (abs_src_height + crop_y) + (crop_x / 2);
} else {
src_u = sample + src_width * abs_src_height + halfwidth * crop_y +
(crop_x / 2);
src_v = sample + src_width * abs_src_height +
halfwidth * (abs_src_height + crop_y) + (crop_x / 2);
}
r = I422ToI420(src_y, src_width, src_u, halfwidth, src_v, halfwidth,
dst_y, dst_stride_y, dst_u, dst_stride_u, dst_v,
dst_stride_v, crop_width, inv_crop_height);
break;
}
case FOURCC_I444:
case FOURCC_YV24: {
const uint8_t* src_y = sample + src_width * crop_y + crop_x;
const uint8_t* src_u;
const uint8_t* src_v;
if (format == FOURCC_YV24) {
src_v = sample + src_width * (abs_src_height + crop_y) + crop_x;
src_u = sample + src_width * (abs_src_height * 2 + crop_y) + crop_x;
} else {
src_u = sample + src_width * (abs_src_height + crop_y) + crop_x;
src_v = sample + src_width * (abs_src_height * 2 + crop_y) + crop_x;
}
r = I444ToI420(src_y, src_width, src_u, src_width, src_v, src_width,
dst_y, dst_stride_y, dst_u, dst_stride_u, dst_v,
dst_stride_v, crop_width, inv_crop_height);
break;
}
#ifdef HAVE_JPEG
case FOURCC_MJPG:
r = MJPGToI420(sample, sample_size, dst_y, dst_stride_y, dst_u,
dst_stride_u, dst_v, dst_stride_v, src_width,
abs_src_height, crop_width, inv_crop_height);
break;
#endif
default:
r = -1; // unknown fourcc - return failure code.
}
if (need_buf) {
if (!r) {
r = I420Rotate(dst_y, dst_stride_y, dst_u, dst_stride_u, dst_v,
dst_stride_v, tmp_y, tmp_y_stride, tmp_u, tmp_u_stride,
tmp_v, tmp_v_stride, crop_width, abs_crop_height,
rotation);
}
free(rotate_buffer);
}
return r;
}
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif

280
thirdparty/libyuv/source/cpu_id.cc vendored Normal file
View File

@@ -0,0 +1,280 @@
/*
* Copyright 2011 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "libyuv/cpu_id.h"
#if defined(_MSC_VER)
#include <intrin.h> // For __cpuidex()
#endif
#if !defined(__pnacl__) && !defined(__CLR_VER) && \
!defined(__native_client__) && (defined(_M_IX86) || defined(_M_X64)) && \
defined(_MSC_FULL_VER) && (_MSC_FULL_VER >= 160040219)
#include <immintrin.h> // For _xgetbv()
#endif
// For ArmCpuCaps() but unittested on all platforms
#include <stdio.h>
#include <string.h>
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
// For functions that use the stack and have runtime checks for overflow,
// use SAFEBUFFERS to avoid additional check.
#if defined(_MSC_FULL_VER) && (_MSC_FULL_VER >= 160040219) && \
!defined(__clang__)
#define SAFEBUFFERS __declspec(safebuffers)
#else
#define SAFEBUFFERS
#endif
// cpu_info_ variable for SIMD instruction sets detected.
LIBYUV_API int cpu_info_ = 0;
// TODO(fbarchard): Consider using int for cpuid so casting is not needed.
// Low level cpuid for X86.
#if (defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \
defined(__x86_64__)) && \
!defined(__pnacl__) && !defined(__CLR_VER)
LIBYUV_API
void CpuId(int info_eax, int info_ecx, int* cpu_info) {
#if defined(_MSC_VER)
// Visual C version uses intrinsic or inline x86 assembly.
#if defined(_MSC_FULL_VER) && (_MSC_FULL_VER >= 160040219)
__cpuidex(cpu_info, info_eax, info_ecx);
#elif defined(_M_IX86)
__asm {
mov eax, info_eax
mov ecx, info_ecx
mov edi, cpu_info
cpuid
mov [edi], eax
mov [edi + 4], ebx
mov [edi + 8], ecx
mov [edi + 12], edx
}
#else // Visual C but not x86
if (info_ecx == 0) {
__cpuid(cpu_info, info_eax);
} else {
cpu_info[3] = cpu_info[2] = cpu_info[1] = cpu_info[0] = 0u;
}
#endif
// GCC version uses inline x86 assembly.
#else // defined(_MSC_VER)
int info_ebx, info_edx;
asm volatile(
#if defined(__i386__) && defined(__PIC__)
// Preserve ebx for fpic 32 bit.
"mov %%ebx, %%edi \n"
"cpuid \n"
"xchg %%edi, %%ebx \n"
: "=D"(info_ebx),
#else
"cpuid \n"
: "=b"(info_ebx),
#endif // defined( __i386__) && defined(__PIC__)
"+a"(info_eax), "+c"(info_ecx), "=d"(info_edx));
cpu_info[0] = info_eax;
cpu_info[1] = info_ebx;
cpu_info[2] = info_ecx;
cpu_info[3] = info_edx;
#endif // defined(_MSC_VER)
}
#else // (defined(_M_IX86) || defined(_M_X64) ...
LIBYUV_API
void CpuId(int eax, int ecx, int* cpu_info) {
(void)eax;
(void)ecx;
cpu_info[0] = cpu_info[1] = cpu_info[2] = cpu_info[3] = 0;
}
#endif
// For VS2010 and earlier emit can be used:
// _asm _emit 0x0f _asm _emit 0x01 _asm _emit 0xd0 // For VS2010 and earlier.
// __asm {
// xor ecx, ecx // xcr 0
// xgetbv
// mov xcr0, eax
// }
// For VS2013 and earlier 32 bit, the _xgetbv(0) optimizer produces bad code.
// https://code.google.com/p/libyuv/issues/detail?id=529
#if defined(_M_IX86) && (_MSC_VER < 1900)
#pragma optimize("g", off)
#endif
#if (defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \
defined(__x86_64__)) && \
!defined(__pnacl__) && !defined(__CLR_VER) && !defined(__native_client__)
// X86 CPUs have xgetbv to detect OS saves high parts of ymm registers.
int GetXCR0() {
int xcr0 = 0;
#if defined(_MSC_FULL_VER) && (_MSC_FULL_VER >= 160040219)
xcr0 = (int)_xgetbv(0); // VS2010 SP1 required. NOLINT
#elif defined(__i386__) || defined(__x86_64__)
asm(".byte 0x0f, 0x01, 0xd0" : "=a"(xcr0) : "c"(0) : "%edx");
#endif // defined(__i386__) || defined(__x86_64__)
return xcr0;
}
#else
// xgetbv unavailable to query for OSSave support. Return 0.
#define GetXCR0() 0
#endif // defined(_M_IX86) || defined(_M_X64) ..
// Return optimization to previous setting.
#if defined(_M_IX86) && (_MSC_VER < 1900)
#pragma optimize("g", on)
#endif
// based on libvpx arm_cpudetect.c
// For Arm, but public to allow testing on any CPU
LIBYUV_API SAFEBUFFERS int ArmCpuCaps(const char* cpuinfo_name) {
char cpuinfo_line[512];
FILE* f = fopen(cpuinfo_name, "r");
if (!f) {
// Assume Neon if /proc/cpuinfo is unavailable.
// This will occur for Chrome sandbox for Pepper or Render process.
return kCpuHasNEON;
}
while (fgets(cpuinfo_line, sizeof(cpuinfo_line) - 1, f)) {
if (memcmp(cpuinfo_line, "Features", 8) == 0) {
char* p = strstr(cpuinfo_line, " neon");
if (p && (p[5] == ' ' || p[5] == '\n')) {
fclose(f);
return kCpuHasNEON;
}
// aarch64 uses asimd for Neon.
p = strstr(cpuinfo_line, " asimd");
if (p) {
fclose(f);
return kCpuHasNEON;
}
}
}
fclose(f);
return 0;
}
// TODO(fbarchard): Consider read_msa_ir().
LIBYUV_API SAFEBUFFERS int MipsCpuCaps(const char* cpuinfo_name) {
char cpuinfo_line[512];
int flag = 0x0;
FILE* f = fopen(cpuinfo_name, "r");
if (!f) {
// Assume nothing if /proc/cpuinfo is unavailable.
// This will occur for Chrome sandbox for Pepper or Render process.
return 0;
}
while (fgets(cpuinfo_line, sizeof(cpuinfo_line) - 1, f)) {
if (memcmp(cpuinfo_line, "cpu model", 9) == 0) {
// Workaround early kernel without mmi in ASEs line.
if (strstr(cpuinfo_line, "Loongson-3")) {
flag |= kCpuHasMMI;
} else if (strstr(cpuinfo_line, "Loongson-2K")) {
flag |= kCpuHasMMI | kCpuHasMSA;
}
}
if (memcmp(cpuinfo_line, "ASEs implemented", 16) == 0) {
if (strstr(cpuinfo_line, "loongson-mmi") &&
strstr(cpuinfo_line, "loongson-ext")) {
flag |= kCpuHasMMI;
}
if (strstr(cpuinfo_line, "msa")) {
flag |= kCpuHasMSA;
}
// ASEs is the last line, so we can break here.
break;
}
}
fclose(f);
return flag;
}
static SAFEBUFFERS int GetCpuFlags(void) {
int cpu_info = 0;
#if !defined(__pnacl__) && !defined(__CLR_VER) && \
(defined(__x86_64__) || defined(_M_X64) || defined(__i386__) || \
defined(_M_IX86))
int cpu_info0[4] = {0, 0, 0, 0};
int cpu_info1[4] = {0, 0, 0, 0};
int cpu_info7[4] = {0, 0, 0, 0};
CpuId(0, 0, cpu_info0);
CpuId(1, 0, cpu_info1);
if (cpu_info0[0] >= 7) {
CpuId(7, 0, cpu_info7);
}
cpu_info = kCpuHasX86 | ((cpu_info1[3] & 0x04000000) ? kCpuHasSSE2 : 0) |
((cpu_info1[2] & 0x00000200) ? kCpuHasSSSE3 : 0) |
((cpu_info1[2] & 0x00080000) ? kCpuHasSSE41 : 0) |
((cpu_info1[2] & 0x00100000) ? kCpuHasSSE42 : 0) |
((cpu_info7[1] & 0x00000200) ? kCpuHasERMS : 0);
// AVX requires OS saves YMM registers.
if (((cpu_info1[2] & 0x1c000000) == 0x1c000000) && // AVX and OSXSave
((GetXCR0() & 6) == 6)) { // Test OS saves YMM registers
cpu_info |= kCpuHasAVX | ((cpu_info7[1] & 0x00000020) ? kCpuHasAVX2 : 0) |
((cpu_info1[2] & 0x00001000) ? kCpuHasFMA3 : 0) |
((cpu_info1[2] & 0x20000000) ? kCpuHasF16C : 0);
// Detect AVX512bw
if ((GetXCR0() & 0xe0) == 0xe0) {
cpu_info |= (cpu_info7[1] & 0x40000000) ? kCpuHasAVX512BW : 0;
cpu_info |= (cpu_info7[1] & 0x80000000) ? kCpuHasAVX512VL : 0;
cpu_info |= (cpu_info7[2] & 0x00000002) ? kCpuHasAVX512VBMI : 0;
cpu_info |= (cpu_info7[2] & 0x00000040) ? kCpuHasAVX512VBMI2 : 0;
cpu_info |= (cpu_info7[2] & 0x00001000) ? kCpuHasAVX512VBITALG : 0;
cpu_info |= (cpu_info7[2] & 0x00004000) ? kCpuHasAVX512VPOPCNTDQ : 0;
cpu_info |= (cpu_info7[2] & 0x00000100) ? kCpuHasGFNI : 0;
}
}
#endif
#if defined(__mips__) && defined(__linux__)
cpu_info = MipsCpuCaps("/proc/cpuinfo");
cpu_info |= kCpuHasMIPS;
#endif
#if defined(__arm__) || defined(__aarch64__)
// gcc -mfpu=neon defines __ARM_NEON__
// __ARM_NEON__ generates code that requires Neon. NaCL also requires Neon.
// For Linux, /proc/cpuinfo can be tested but without that assume Neon.
#if defined(__ARM_NEON__) || defined(__native_client__) || !defined(__linux__)
cpu_info = kCpuHasNEON;
// For aarch64(arm64), /proc/cpuinfo's feature is not complete, e.g. no neon
// flag in it.
// So for aarch64, neon enabling is hard coded here.
#endif
#if defined(__aarch64__)
cpu_info = kCpuHasNEON;
#else
// Linux arm parse text file for neon detect.
cpu_info = ArmCpuCaps("/proc/cpuinfo");
#endif
cpu_info |= kCpuHasARM;
#endif // __arm__
cpu_info |= kCpuInitialized;
return cpu_info;
}
// Note that use of this function is not thread safe.
LIBYUV_API
int MaskCpuFlags(int enable_flags) {
int cpu_info = GetCpuFlags() & enable_flags;
SetCpuFlags(cpu_info);
return cpu_info;
}
LIBYUV_API
int InitCpuFlags(void) {
return MaskCpuFlags(-1);
}
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif

View File

@@ -0,0 +1,585 @@
/*
* Copyright 2012 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "libyuv/mjpeg_decoder.h"
#ifdef HAVE_JPEG
#include <assert.h>
#if !defined(__pnacl__) && !defined(__CLR_VER) && \
!defined(COVERAGE_ENABLED) && !defined(TARGET_IPHONE_SIMULATOR)
// Must be included before jpeglib.
#include <setjmp.h>
#define HAVE_SETJMP
#if defined(_MSC_VER)
// disable warning 4324: structure was padded due to __declspec(align())
#pragma warning(disable : 4324)
#endif
#endif
#include <stdio.h> // For jpeglib.h.
// C++ build requires extern C for jpeg internals.
#ifdef __cplusplus
extern "C" {
#endif
#include <jpeglib.h>
#ifdef __cplusplus
} // extern "C"
#endif
#include "libyuv/planar_functions.h" // For CopyPlane().
namespace libyuv {
#ifdef HAVE_SETJMP
struct SetJmpErrorMgr {
jpeg_error_mgr base; // Must be at the top
jmp_buf setjmp_buffer;
};
#endif
const int MJpegDecoder::kColorSpaceUnknown = JCS_UNKNOWN;
const int MJpegDecoder::kColorSpaceGrayscale = JCS_GRAYSCALE;
const int MJpegDecoder::kColorSpaceRgb = JCS_RGB;
const int MJpegDecoder::kColorSpaceYCbCr = JCS_YCbCr;
const int MJpegDecoder::kColorSpaceCMYK = JCS_CMYK;
const int MJpegDecoder::kColorSpaceYCCK = JCS_YCCK;
// Methods that are passed to jpeglib.
boolean fill_input_buffer(jpeg_decompress_struct* cinfo);
void init_source(jpeg_decompress_struct* cinfo);
void skip_input_data(jpeg_decompress_struct* cinfo, long num_bytes); // NOLINT
void term_source(jpeg_decompress_struct* cinfo);
void ErrorHandler(jpeg_common_struct* cinfo);
void OutputHandler(jpeg_common_struct* cinfo);
MJpegDecoder::MJpegDecoder()
: has_scanline_padding_(LIBYUV_FALSE),
num_outbufs_(0),
scanlines_(NULL),
scanlines_sizes_(NULL),
databuf_(NULL),
databuf_strides_(NULL) {
decompress_struct_ = new jpeg_decompress_struct;
source_mgr_ = new jpeg_source_mgr;
#ifdef HAVE_SETJMP
error_mgr_ = new SetJmpErrorMgr;
decompress_struct_->err = jpeg_std_error(&error_mgr_->base);
// Override standard exit()-based error handler.
error_mgr_->base.error_exit = &ErrorHandler;
error_mgr_->base.output_message = &OutputHandler;
#endif
decompress_struct_->client_data = NULL;
source_mgr_->init_source = &init_source;
source_mgr_->fill_input_buffer = &fill_input_buffer;
source_mgr_->skip_input_data = &skip_input_data;
source_mgr_->resync_to_restart = &jpeg_resync_to_restart;
source_mgr_->term_source = &term_source;
jpeg_create_decompress(decompress_struct_);
decompress_struct_->src = source_mgr_;
buf_vec_.buffers = &buf_;
buf_vec_.len = 1;
}
MJpegDecoder::~MJpegDecoder() {
jpeg_destroy_decompress(decompress_struct_);
delete decompress_struct_;
delete source_mgr_;
#ifdef HAVE_SETJMP
delete error_mgr_;
#endif
DestroyOutputBuffers();
}
LIBYUV_BOOL MJpegDecoder::LoadFrame(const uint8_t* src, size_t src_len) {
if (!ValidateJpeg(src, src_len)) {
return LIBYUV_FALSE;
}
buf_.data = src;
buf_.len = static_cast<int>(src_len);
buf_vec_.pos = 0;
decompress_struct_->client_data = &buf_vec_;
#ifdef HAVE_SETJMP
if (setjmp(error_mgr_->setjmp_buffer)) {
// We called jpeg_read_header, it experienced an error, and we called
// longjmp() and rewound the stack to here. Return error.
return LIBYUV_FALSE;
}
#endif
if (jpeg_read_header(decompress_struct_, TRUE) != JPEG_HEADER_OK) {
// ERROR: Bad MJPEG header
return LIBYUV_FALSE;
}
AllocOutputBuffers(GetNumComponents());
for (int i = 0; i < num_outbufs_; ++i) {
int scanlines_size = GetComponentScanlinesPerImcuRow(i);
if (scanlines_sizes_[i] != scanlines_size) {
if (scanlines_[i]) {
delete scanlines_[i];
}
scanlines_[i] = new uint8_t*[scanlines_size];
scanlines_sizes_[i] = scanlines_size;
}
// We allocate padding for the final scanline to pad it up to DCTSIZE bytes
// to avoid memory errors, since jpeglib only reads full MCUs blocks. For
// the preceding scanlines, the padding is not needed/wanted because the
// following addresses will already be valid (they are the initial bytes of
// the next scanline) and will be overwritten when jpeglib writes out that
// next scanline.
int databuf_stride = GetComponentStride(i);
int databuf_size = scanlines_size * databuf_stride;
if (databuf_strides_[i] != databuf_stride) {
if (databuf_[i]) {
delete databuf_[i];
}
databuf_[i] = new uint8_t[databuf_size];
databuf_strides_[i] = databuf_stride;
}
if (GetComponentStride(i) != GetComponentWidth(i)) {
has_scanline_padding_ = LIBYUV_TRUE;
}
}
return LIBYUV_TRUE;
}
static int DivideAndRoundUp(int numerator, int denominator) {
return (numerator + denominator - 1) / denominator;
}
static int DivideAndRoundDown(int numerator, int denominator) {
return numerator / denominator;
}
// Returns width of the last loaded frame.
int MJpegDecoder::GetWidth() {
return decompress_struct_->image_width;
}
// Returns height of the last loaded frame.
int MJpegDecoder::GetHeight() {
return decompress_struct_->image_height;
}
// Returns format of the last loaded frame. The return value is one of the
// kColorSpace* constants.
int MJpegDecoder::GetColorSpace() {
return decompress_struct_->jpeg_color_space;
}
// Number of color components in the color space.
int MJpegDecoder::GetNumComponents() {
return decompress_struct_->num_components;
}
// Sample factors of the n-th component.
int MJpegDecoder::GetHorizSampFactor(int component) {
return decompress_struct_->comp_info[component].h_samp_factor;
}
int MJpegDecoder::GetVertSampFactor(int component) {
return decompress_struct_->comp_info[component].v_samp_factor;
}
int MJpegDecoder::GetHorizSubSampFactor(int component) {
return decompress_struct_->max_h_samp_factor / GetHorizSampFactor(component);
}
int MJpegDecoder::GetVertSubSampFactor(int component) {
return decompress_struct_->max_v_samp_factor / GetVertSampFactor(component);
}
int MJpegDecoder::GetImageScanlinesPerImcuRow() {
return decompress_struct_->max_v_samp_factor * DCTSIZE;
}
int MJpegDecoder::GetComponentScanlinesPerImcuRow(int component) {
int vs = GetVertSubSampFactor(component);
return DivideAndRoundUp(GetImageScanlinesPerImcuRow(), vs);
}
int MJpegDecoder::GetComponentWidth(int component) {
int hs = GetHorizSubSampFactor(component);
return DivideAndRoundUp(GetWidth(), hs);
}
int MJpegDecoder::GetComponentHeight(int component) {
int vs = GetVertSubSampFactor(component);
return DivideAndRoundUp(GetHeight(), vs);
}
// Get width in bytes padded out to a multiple of DCTSIZE
int MJpegDecoder::GetComponentStride(int component) {
return (GetComponentWidth(component) + DCTSIZE - 1) & ~(DCTSIZE - 1);
}
int MJpegDecoder::GetComponentSize(int component) {
return GetComponentWidth(component) * GetComponentHeight(component);
}
LIBYUV_BOOL MJpegDecoder::UnloadFrame() {
#ifdef HAVE_SETJMP
if (setjmp(error_mgr_->setjmp_buffer)) {
// We called jpeg_abort_decompress, it experienced an error, and we called
// longjmp() and rewound the stack to here. Return error.
return LIBYUV_FALSE;
}
#endif
jpeg_abort_decompress(decompress_struct_);
return LIBYUV_TRUE;
}
// TODO(fbarchard): Allow rectangle to be specified: x, y, width, height.
LIBYUV_BOOL MJpegDecoder::DecodeToBuffers(uint8_t** planes,
int dst_width,
int dst_height) {
if (dst_width != GetWidth() || dst_height > GetHeight()) {
// ERROR: Bad dimensions
return LIBYUV_FALSE;
}
#ifdef HAVE_SETJMP
if (setjmp(error_mgr_->setjmp_buffer)) {
// We called into jpeglib, it experienced an error sometime during this
// function call, and we called longjmp() and rewound the stack to here.
// Return error.
return LIBYUV_FALSE;
}
#endif
if (!StartDecode()) {
return LIBYUV_FALSE;
}
SetScanlinePointers(databuf_);
int lines_left = dst_height;
// Compute amount of lines to skip to implement vertical crop.
// TODO(fbarchard): Ensure skip is a multiple of maximum component
// subsample. ie 2
int skip = (GetHeight() - dst_height) / 2;
if (skip > 0) {
// There is no API to skip lines in the output data, so we read them
// into the temp buffer.
while (skip >= GetImageScanlinesPerImcuRow()) {
if (!DecodeImcuRow()) {
FinishDecode();
return LIBYUV_FALSE;
}
skip -= GetImageScanlinesPerImcuRow();
}
if (skip > 0) {
// Have a partial iMCU row left over to skip. Must read it and then
// copy the parts we want into the destination.
if (!DecodeImcuRow()) {
FinishDecode();
return LIBYUV_FALSE;
}
for (int i = 0; i < num_outbufs_; ++i) {
// TODO(fbarchard): Compute skip to avoid this
assert(skip % GetVertSubSampFactor(i) == 0);
int rows_to_skip = DivideAndRoundDown(skip, GetVertSubSampFactor(i));
int scanlines_to_copy =
GetComponentScanlinesPerImcuRow(i) - rows_to_skip;
int data_to_skip = rows_to_skip * GetComponentStride(i);
CopyPlane(databuf_[i] + data_to_skip, GetComponentStride(i), planes[i],
GetComponentWidth(i), GetComponentWidth(i),
scanlines_to_copy);
planes[i] += scanlines_to_copy * GetComponentWidth(i);
}
lines_left -= (GetImageScanlinesPerImcuRow() - skip);
}
}
// Read full MCUs but cropped horizontally
for (; lines_left > GetImageScanlinesPerImcuRow();
lines_left -= GetImageScanlinesPerImcuRow()) {
if (!DecodeImcuRow()) {
FinishDecode();
return LIBYUV_FALSE;
}
for (int i = 0; i < num_outbufs_; ++i) {
int scanlines_to_copy = GetComponentScanlinesPerImcuRow(i);
CopyPlane(databuf_[i], GetComponentStride(i), planes[i],
GetComponentWidth(i), GetComponentWidth(i), scanlines_to_copy);
planes[i] += scanlines_to_copy * GetComponentWidth(i);
}
}
if (lines_left > 0) {
// Have a partial iMCU row left over to decode.
if (!DecodeImcuRow()) {
FinishDecode();
return LIBYUV_FALSE;
}
for (int i = 0; i < num_outbufs_; ++i) {
int scanlines_to_copy =
DivideAndRoundUp(lines_left, GetVertSubSampFactor(i));
CopyPlane(databuf_[i], GetComponentStride(i), planes[i],
GetComponentWidth(i), GetComponentWidth(i), scanlines_to_copy);
planes[i] += scanlines_to_copy * GetComponentWidth(i);
}
}
return FinishDecode();
}
LIBYUV_BOOL MJpegDecoder::DecodeToCallback(CallbackFunction fn,
void* opaque,
int dst_width,
int dst_height) {
if (dst_width != GetWidth() || dst_height > GetHeight()) {
// ERROR: Bad dimensions
return LIBYUV_FALSE;
}
#ifdef HAVE_SETJMP
if (setjmp(error_mgr_->setjmp_buffer)) {
// We called into jpeglib, it experienced an error sometime during this
// function call, and we called longjmp() and rewound the stack to here.
// Return error.
return LIBYUV_FALSE;
}
#endif
if (!StartDecode()) {
return LIBYUV_FALSE;
}
SetScanlinePointers(databuf_);
int lines_left = dst_height;
// TODO(fbarchard): Compute amount of lines to skip to implement vertical crop
int skip = (GetHeight() - dst_height) / 2;
if (skip > 0) {
while (skip >= GetImageScanlinesPerImcuRow()) {
if (!DecodeImcuRow()) {
FinishDecode();
return LIBYUV_FALSE;
}
skip -= GetImageScanlinesPerImcuRow();
}
if (skip > 0) {
// Have a partial iMCU row left over to skip.
if (!DecodeImcuRow()) {
FinishDecode();
return LIBYUV_FALSE;
}
for (int i = 0; i < num_outbufs_; ++i) {
// TODO(fbarchard): Compute skip to avoid this
assert(skip % GetVertSubSampFactor(i) == 0);
int rows_to_skip = DivideAndRoundDown(skip, GetVertSubSampFactor(i));
int data_to_skip = rows_to_skip * GetComponentStride(i);
// Change our own data buffer pointers so we can pass them to the
// callback.
databuf_[i] += data_to_skip;
}
int scanlines_to_copy = GetImageScanlinesPerImcuRow() - skip;
(*fn)(opaque, databuf_, databuf_strides_, scanlines_to_copy);
// Now change them back.
for (int i = 0; i < num_outbufs_; ++i) {
int rows_to_skip = DivideAndRoundDown(skip, GetVertSubSampFactor(i));
int data_to_skip = rows_to_skip * GetComponentStride(i);
databuf_[i] -= data_to_skip;
}
lines_left -= scanlines_to_copy;
}
}
// Read full MCUs until we get to the crop point.
for (; lines_left >= GetImageScanlinesPerImcuRow();
lines_left -= GetImageScanlinesPerImcuRow()) {
if (!DecodeImcuRow()) {
FinishDecode();
return LIBYUV_FALSE;
}
(*fn)(opaque, databuf_, databuf_strides_, GetImageScanlinesPerImcuRow());
}
if (lines_left > 0) {
// Have a partial iMCU row left over to decode.
if (!DecodeImcuRow()) {
FinishDecode();
return LIBYUV_FALSE;
}
(*fn)(opaque, databuf_, databuf_strides_, lines_left);
}
return FinishDecode();
}
void init_source(j_decompress_ptr cinfo) {
fill_input_buffer(cinfo);
}
boolean fill_input_buffer(j_decompress_ptr cinfo) {
BufferVector* buf_vec = reinterpret_cast<BufferVector*>(cinfo->client_data);
if (buf_vec->pos >= buf_vec->len) {
// Don't assert-fail when fuzzing.
#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
assert(0 && "No more data");
#endif
// ERROR: No more data
return FALSE;
}
cinfo->src->next_input_byte = buf_vec->buffers[buf_vec->pos].data;
cinfo->src->bytes_in_buffer = buf_vec->buffers[buf_vec->pos].len;
++buf_vec->pos;
return TRUE;
}
void skip_input_data(j_decompress_ptr cinfo, long num_bytes) { // NOLINT
jpeg_source_mgr* src = cinfo->src;
size_t bytes = static_cast<size_t>(num_bytes);
if (bytes > src->bytes_in_buffer) {
src->next_input_byte = nullptr;
src->bytes_in_buffer = 0;
} else {
src->next_input_byte += bytes;
src->bytes_in_buffer -= bytes;
}
}
void term_source(j_decompress_ptr cinfo) {
(void)cinfo; // Nothing to do.
}
#ifdef HAVE_SETJMP
void ErrorHandler(j_common_ptr cinfo) {
// This is called when a jpeglib command experiences an error. Unfortunately
// jpeglib's error handling model is not very flexible, because it expects the
// error handler to not return--i.e., it wants the program to terminate. To
// recover from errors we use setjmp() as shown in their example. setjmp() is
// C's implementation for the "call with current continuation" functionality
// seen in some functional programming languages.
// A formatted message can be output, but is unsafe for release.
#ifdef DEBUG
char buf[JMSG_LENGTH_MAX];
(*cinfo->err->format_message)(cinfo, buf);
// ERROR: Error in jpeglib: buf
#endif
SetJmpErrorMgr* mgr = reinterpret_cast<SetJmpErrorMgr*>(cinfo->err);
// This rewinds the call stack to the point of the corresponding setjmp()
// and causes it to return (for a second time) with value 1.
longjmp(mgr->setjmp_buffer, 1);
}
// Suppress fprintf warnings.
void OutputHandler(j_common_ptr cinfo) {
(void)cinfo;
}
#endif // HAVE_SETJMP
void MJpegDecoder::AllocOutputBuffers(int num_outbufs) {
if (num_outbufs != num_outbufs_) {
// We could perhaps optimize this case to resize the output buffers without
// necessarily having to delete and recreate each one, but it's not worth
// it.
DestroyOutputBuffers();
scanlines_ = new uint8_t**[num_outbufs];
scanlines_sizes_ = new int[num_outbufs];
databuf_ = new uint8_t*[num_outbufs];
databuf_strides_ = new int[num_outbufs];
for (int i = 0; i < num_outbufs; ++i) {
scanlines_[i] = NULL;
scanlines_sizes_[i] = 0;
databuf_[i] = NULL;
databuf_strides_[i] = 0;
}
num_outbufs_ = num_outbufs;
}
}
void MJpegDecoder::DestroyOutputBuffers() {
for (int i = 0; i < num_outbufs_; ++i) {
delete[] scanlines_[i];
delete[] databuf_[i];
}
delete[] scanlines_;
delete[] databuf_;
delete[] scanlines_sizes_;
delete[] databuf_strides_;
scanlines_ = NULL;
databuf_ = NULL;
scanlines_sizes_ = NULL;
databuf_strides_ = NULL;
num_outbufs_ = 0;
}
// JDCT_IFAST and do_block_smoothing improve performance substantially.
LIBYUV_BOOL MJpegDecoder::StartDecode() {
decompress_struct_->raw_data_out = TRUE;
decompress_struct_->dct_method = JDCT_IFAST; // JDCT_ISLOW is default
decompress_struct_->dither_mode = JDITHER_NONE;
// Not applicable to 'raw':
decompress_struct_->do_fancy_upsampling = (boolean)(LIBYUV_FALSE);
// Only for buffered mode:
decompress_struct_->enable_2pass_quant = (boolean)(LIBYUV_FALSE);
// Blocky but fast:
decompress_struct_->do_block_smoothing = (boolean)(LIBYUV_FALSE);
if (!jpeg_start_decompress(decompress_struct_)) {
// ERROR: Couldn't start JPEG decompressor";
return LIBYUV_FALSE;
}
return LIBYUV_TRUE;
}
LIBYUV_BOOL MJpegDecoder::FinishDecode() {
// jpeglib considers it an error if we finish without decoding the whole
// image, so we call "abort" rather than "finish".
jpeg_abort_decompress(decompress_struct_);
return LIBYUV_TRUE;
}
void MJpegDecoder::SetScanlinePointers(uint8_t** data) {
for (int i = 0; i < num_outbufs_; ++i) {
uint8_t* data_i = data[i];
for (int j = 0; j < scanlines_sizes_[i]; ++j) {
scanlines_[i][j] = data_i;
data_i += GetComponentStride(i);
}
}
}
inline LIBYUV_BOOL MJpegDecoder::DecodeImcuRow() {
return (unsigned int)(GetImageScanlinesPerImcuRow()) ==
jpeg_read_raw_data(decompress_struct_, scanlines_,
GetImageScanlinesPerImcuRow());
}
// The helper function which recognizes the jpeg sub-sampling type.
JpegSubsamplingType MJpegDecoder::JpegSubsamplingTypeHelper(
int* subsample_x,
int* subsample_y,
int number_of_components) {
if (number_of_components == 3) { // Color images.
if (subsample_x[0] == 1 && subsample_y[0] == 1 && subsample_x[1] == 2 &&
subsample_y[1] == 2 && subsample_x[2] == 2 && subsample_y[2] == 2) {
return kJpegYuv420;
}
if (subsample_x[0] == 1 && subsample_y[0] == 1 && subsample_x[1] == 2 &&
subsample_y[1] == 1 && subsample_x[2] == 2 && subsample_y[2] == 1) {
return kJpegYuv422;
}
if (subsample_x[0] == 1 && subsample_y[0] == 1 && subsample_x[1] == 1 &&
subsample_y[1] == 1 && subsample_x[2] == 1 && subsample_y[2] == 1) {
return kJpegYuv444;
}
} else if (number_of_components == 1) { // Grey-scale images.
if (subsample_x[0] == 1 && subsample_y[0] == 1) {
return kJpegYuv400;
}
}
return kJpegUnknown;
}
} // namespace libyuv
#endif // HAVE_JPEG

View File

@@ -0,0 +1,71 @@
/*
* Copyright 2012 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "libyuv/mjpeg_decoder.h"
#include <string.h> // For memchr.
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
// Helper function to scan for EOI marker (0xff 0xd9).
static LIBYUV_BOOL ScanEOI(const uint8_t* src_mjpg, size_t src_size_mjpg) {
if (src_size_mjpg >= 2) {
const uint8_t* end = src_mjpg + src_size_mjpg - 1;
const uint8_t* it = src_mjpg;
while (it < end) {
// TODO(fbarchard): scan for 0xd9 instead.
it = (const uint8_t*)(memchr(it, 0xff, end - it));
if (it == NULL) {
break;
}
if (it[1] == 0xd9) {
return LIBYUV_TRUE; // Success: Valid jpeg.
}
++it; // Skip over current 0xff.
}
}
// ERROR: Invalid jpeg end code not found. Size src_size_mjpg
return LIBYUV_FALSE;
}
// Helper function to validate the jpeg appears intact.
LIBYUV_BOOL ValidateJpeg(const uint8_t* src_mjpg, size_t src_size_mjpg) {
// Maximum size that ValidateJpeg will consider valid.
const size_t kMaxJpegSize = 0x7fffffffull;
const size_t kBackSearchSize = 1024;
if (src_size_mjpg < 64 || src_size_mjpg > kMaxJpegSize || !src_mjpg) {
// ERROR: Invalid jpeg size: src_size_mjpg
return LIBYUV_FALSE;
}
// SOI marker
if (src_mjpg[0] != 0xff || src_mjpg[1] != 0xd8 || src_mjpg[2] != 0xff) {
// ERROR: Invalid jpeg initial start code
return LIBYUV_FALSE;
}
// Look for the End Of Image (EOI) marker near the end of the buffer.
if (src_size_mjpg > kBackSearchSize) {
if (ScanEOI(src_mjpg + src_size_mjpg - kBackSearchSize, kBackSearchSize)) {
return LIBYUV_TRUE; // Success: Valid jpeg.
}
// Reduce search size for forward search.
src_size_mjpg = src_size_mjpg - kBackSearchSize + 1;
}
// Step over SOI marker and scan for EOI.
return ScanEOI(src_mjpg + 2, src_size_mjpg - 2);
}
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif

File diff suppressed because it is too large Load Diff

609
thirdparty/libyuv/source/rotate.cc vendored Normal file
View File

@@ -0,0 +1,609 @@
/*
* Copyright 2011 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "libyuv/rotate.h"
#include "libyuv/convert.h"
#include "libyuv/cpu_id.h"
#include "libyuv/planar_functions.h"
#include "libyuv/rotate_row.h"
#include "libyuv/row.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
LIBYUV_API
void TransposePlane(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width,
int height) {
int i = height;
#if defined(HAS_TRANSPOSEWX16_MSA)
void (*TransposeWx16)(const uint8_t* src, int src_stride, uint8_t* dst,
int dst_stride, int width) = TransposeWx16_C;
#else
void (*TransposeWx8)(const uint8_t* src, int src_stride, uint8_t* dst,
int dst_stride, int width) = TransposeWx8_C;
#endif
#if defined(HAS_TRANSPOSEWX16_MSA)
if (TestCpuFlag(kCpuHasMSA)) {
TransposeWx16 = TransposeWx16_Any_MSA;
if (IS_ALIGNED(width, 16)) {
TransposeWx16 = TransposeWx16_MSA;
}
}
#else
#if defined(HAS_TRANSPOSEWX8_NEON)
if (TestCpuFlag(kCpuHasNEON)) {
TransposeWx8 = TransposeWx8_NEON;
}
#endif
#if defined(HAS_TRANSPOSEWX8_SSSE3)
if (TestCpuFlag(kCpuHasSSSE3)) {
TransposeWx8 = TransposeWx8_Any_SSSE3;
if (IS_ALIGNED(width, 8)) {
TransposeWx8 = TransposeWx8_SSSE3;
}
}
#endif
#if defined(HAS_TRANSPOSEWX8_MMI)
if (TestCpuFlag(kCpuHasMMI)) {
TransposeWx8 = TransposeWx8_MMI;
}
#endif
#if defined(HAS_TRANSPOSEWX8_FAST_SSSE3)
if (TestCpuFlag(kCpuHasSSSE3)) {
TransposeWx8 = TransposeWx8_Fast_Any_SSSE3;
if (IS_ALIGNED(width, 16)) {
TransposeWx8 = TransposeWx8_Fast_SSSE3;
}
}
#endif
#endif /* defined(HAS_TRANSPOSEWX16_MSA) */
#if defined(HAS_TRANSPOSEWX16_MSA)
// Work across the source in 16x16 tiles
while (i >= 16) {
TransposeWx16(src, src_stride, dst, dst_stride, width);
src += 16 * src_stride; // Go down 16 rows.
dst += 16; // Move over 16 columns.
i -= 16;
}
#else
// Work across the source in 8x8 tiles
while (i >= 8) {
TransposeWx8(src, src_stride, dst, dst_stride, width);
src += 8 * src_stride; // Go down 8 rows.
dst += 8; // Move over 8 columns.
i -= 8;
}
#endif
if (i > 0) {
TransposeWxH_C(src, src_stride, dst, dst_stride, width, i);
}
}
LIBYUV_API
void RotatePlane90(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width,
int height) {
// Rotate by 90 is a transpose with the source read
// from bottom to top. So set the source pointer to the end
// of the buffer and flip the sign of the source stride.
src += src_stride * (height - 1);
src_stride = -src_stride;
TransposePlane(src, src_stride, dst, dst_stride, width, height);
}
LIBYUV_API
void RotatePlane270(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width,
int height) {
// Rotate by 270 is a transpose with the destination written
// from bottom to top. So set the destination pointer to the end
// of the buffer and flip the sign of the destination stride.
dst += dst_stride * (width - 1);
dst_stride = -dst_stride;
TransposePlane(src, src_stride, dst, dst_stride, width, height);
}
LIBYUV_API
void RotatePlane180(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width,
int height) {
// Swap first and last row and mirror the content. Uses a temporary row.
align_buffer_64(row, width);
const uint8_t* src_bot = src + src_stride * (height - 1);
uint8_t* dst_bot = dst + dst_stride * (height - 1);
int half_height = (height + 1) >> 1;
int y;
void (*MirrorRow)(const uint8_t* src, uint8_t* dst, int width) = MirrorRow_C;
void (*CopyRow)(const uint8_t* src, uint8_t* dst, int width) = CopyRow_C;
#if defined(HAS_MIRRORROW_NEON)
if (TestCpuFlag(kCpuHasNEON)) {
MirrorRow = MirrorRow_Any_NEON;
if (IS_ALIGNED(width, 32)) {
MirrorRow = MirrorRow_NEON;
}
}
#endif
#if defined(HAS_MIRRORROW_SSSE3)
if (TestCpuFlag(kCpuHasSSSE3)) {
MirrorRow = MirrorRow_Any_SSSE3;
if (IS_ALIGNED(width, 16)) {
MirrorRow = MirrorRow_SSSE3;
}
}
#endif
#if defined(HAS_MIRRORROW_AVX2)
if (TestCpuFlag(kCpuHasAVX2)) {
MirrorRow = MirrorRow_Any_AVX2;
if (IS_ALIGNED(width, 32)) {
MirrorRow = MirrorRow_AVX2;
}
}
#endif
#if defined(HAS_MIRRORROW_MMI)
if (TestCpuFlag(kCpuHasMMI)) {
MirrorRow = MirrorRow_Any_MMI;
if (IS_ALIGNED(width, 8)) {
MirrorRow = MirrorRow_MMI;
}
}
#endif
#if defined(HAS_MIRRORROW_MSA)
if (TestCpuFlag(kCpuHasMSA)) {
MirrorRow = MirrorRow_Any_MSA;
if (IS_ALIGNED(width, 64)) {
MirrorRow = MirrorRow_MSA;
}
}
#endif
#if defined(HAS_COPYROW_SSE2)
if (TestCpuFlag(kCpuHasSSE2)) {
CopyRow = IS_ALIGNED(width, 32) ? CopyRow_SSE2 : CopyRow_Any_SSE2;
}
#endif
#if defined(HAS_COPYROW_AVX)
if (TestCpuFlag(kCpuHasAVX)) {
CopyRow = IS_ALIGNED(width, 64) ? CopyRow_AVX : CopyRow_Any_AVX;
}
#endif
#if defined(HAS_COPYROW_ERMS)
if (TestCpuFlag(kCpuHasERMS)) {
CopyRow = CopyRow_ERMS;
}
#endif
#if defined(HAS_COPYROW_NEON)
if (TestCpuFlag(kCpuHasNEON)) {
CopyRow = IS_ALIGNED(width, 32) ? CopyRow_NEON : CopyRow_Any_NEON;
}
#endif
#if defined(HAS_COPYROW_MMI)
if (TestCpuFlag(kCpuHasMMI)) {
CopyRow = IS_ALIGNED(width, 8) ? CopyRow_MMI : CopyRow_Any_MMI;
}
#endif
// Odd height will harmlessly mirror the middle row twice.
for (y = 0; y < half_height; ++y) {
CopyRow(src, row, width); // Copy first row into buffer
MirrorRow(src_bot, dst, width); // Mirror last row into first row
MirrorRow(row, dst_bot, width); // Mirror buffer into last row
src += src_stride;
dst += dst_stride;
src_bot -= src_stride;
dst_bot -= dst_stride;
}
free_aligned_buffer_64(row);
}
LIBYUV_API
void TransposeUV(const uint8_t* src,
int src_stride,
uint8_t* dst_a,
int dst_stride_a,
uint8_t* dst_b,
int dst_stride_b,
int width,
int height) {
int i = height;
#if defined(HAS_TRANSPOSEUVWX16_MSA)
void (*TransposeUVWx16)(const uint8_t* src, int src_stride, uint8_t* dst_a,
int dst_stride_a, uint8_t* dst_b, int dst_stride_b,
int width) = TransposeUVWx16_C;
#else
void (*TransposeUVWx8)(const uint8_t* src, int src_stride, uint8_t* dst_a,
int dst_stride_a, uint8_t* dst_b, int dst_stride_b,
int width) = TransposeUVWx8_C;
#endif
#if defined(HAS_TRANSPOSEUVWX16_MSA)
if (TestCpuFlag(kCpuHasMSA)) {
TransposeUVWx16 = TransposeUVWx16_Any_MSA;
if (IS_ALIGNED(width, 8)) {
TransposeUVWx16 = TransposeUVWx16_MSA;
}
}
#else
#if defined(HAS_TRANSPOSEUVWX8_NEON)
if (TestCpuFlag(kCpuHasNEON)) {
TransposeUVWx8 = TransposeUVWx8_NEON;
}
#endif
#if defined(HAS_TRANSPOSEUVWX8_SSE2)
if (TestCpuFlag(kCpuHasSSE2)) {
TransposeUVWx8 = TransposeUVWx8_Any_SSE2;
if (IS_ALIGNED(width, 8)) {
TransposeUVWx8 = TransposeUVWx8_SSE2;
}
}
#endif
#if defined(HAS_TRANSPOSEUVWX8_MMI)
if (TestCpuFlag(kCpuHasMMI)) {
TransposeUVWx8 = TransposeUVWx8_Any_MMI;
if (IS_ALIGNED(width, 4)) {
TransposeUVWx8 = TransposeUVWx8_MMI;
}
}
#endif
#endif /* defined(HAS_TRANSPOSEUVWX16_MSA) */
#if defined(HAS_TRANSPOSEUVWX16_MSA)
// Work through the source in 8x8 tiles.
while (i >= 16) {
TransposeUVWx16(src, src_stride, dst_a, dst_stride_a, dst_b, dst_stride_b,
width);
src += 16 * src_stride; // Go down 16 rows.
dst_a += 16; // Move over 8 columns.
dst_b += 16; // Move over 8 columns.
i -= 16;
}
#else
// Work through the source in 8x8 tiles.
while (i >= 8) {
TransposeUVWx8(src, src_stride, dst_a, dst_stride_a, dst_b, dst_stride_b,
width);
src += 8 * src_stride; // Go down 8 rows.
dst_a += 8; // Move over 8 columns.
dst_b += 8; // Move over 8 columns.
i -= 8;
}
#endif
if (i > 0) {
TransposeUVWxH_C(src, src_stride, dst_a, dst_stride_a, dst_b, dst_stride_b,
width, i);
}
}
LIBYUV_API
void RotateUV90(const uint8_t* src,
int src_stride,
uint8_t* dst_a,
int dst_stride_a,
uint8_t* dst_b,
int dst_stride_b,
int width,
int height) {
src += src_stride * (height - 1);
src_stride = -src_stride;
TransposeUV(src, src_stride, dst_a, dst_stride_a, dst_b, dst_stride_b, width,
height);
}
LIBYUV_API
void RotateUV270(const uint8_t* src,
int src_stride,
uint8_t* dst_a,
int dst_stride_a,
uint8_t* dst_b,
int dst_stride_b,
int width,
int height) {
dst_a += dst_stride_a * (width - 1);
dst_b += dst_stride_b * (width - 1);
dst_stride_a = -dst_stride_a;
dst_stride_b = -dst_stride_b;
TransposeUV(src, src_stride, dst_a, dst_stride_a, dst_b, dst_stride_b, width,
height);
}
// Rotate 180 is a horizontal and vertical flip.
LIBYUV_API
void RotateUV180(const uint8_t* src,
int src_stride,
uint8_t* dst_a,
int dst_stride_a,
uint8_t* dst_b,
int dst_stride_b,
int width,
int height) {
int i;
void (*MirrorSplitUVRow)(const uint8_t* src, uint8_t* dst_u, uint8_t* dst_v,
int width) = MirrorSplitUVRow_C;
#if defined(HAS_MIRRORSPLITUVROW_NEON)
if (TestCpuFlag(kCpuHasNEON) && IS_ALIGNED(width, 16)) {
MirrorSplitUVRow = MirrorSplitUVRow_NEON;
}
#endif
#if defined(HAS_MIRRORSPLITUVROW_SSSE3)
if (TestCpuFlag(kCpuHasSSSE3) && IS_ALIGNED(width, 16)) {
MirrorSplitUVRow = MirrorSplitUVRow_SSSE3;
}
#endif
#if defined(HAS_MIRRORSPLITUVROW_MMI)
if (TestCpuFlag(kCpuHasMMI) && IS_ALIGNED(width, 8)) {
MirrorSplitUVRow = MirrorSplitUVRow_MMI;
}
#endif
#if defined(HAS_MIRRORSPLITUVROW_MSA)
if (TestCpuFlag(kCpuHasMSA) && IS_ALIGNED(width, 32)) {
MirrorSplitUVRow = MirrorSplitUVRow_MSA;
}
#endif
dst_a += dst_stride_a * (height - 1);
dst_b += dst_stride_b * (height - 1);
for (i = 0; i < height; ++i) {
MirrorSplitUVRow(src, dst_a, dst_b, width);
src += src_stride;
dst_a -= dst_stride_a;
dst_b -= dst_stride_b;
}
}
LIBYUV_API
int RotatePlane(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width,
int height,
enum RotationMode mode) {
if (!src || width <= 0 || height == 0 || !dst) {
return -1;
}
// Negative height means invert the image.
if (height < 0) {
height = -height;
src = src + (height - 1) * src_stride;
src_stride = -src_stride;
}
switch (mode) {
case kRotate0:
// copy frame
CopyPlane(src, src_stride, dst, dst_stride, width, height);
return 0;
case kRotate90:
RotatePlane90(src, src_stride, dst, dst_stride, width, height);
return 0;
case kRotate270:
RotatePlane270(src, src_stride, dst, dst_stride, width, height);
return 0;
case kRotate180:
RotatePlane180(src, src_stride, dst, dst_stride, width, height);
return 0;
default:
break;
}
return -1;
}
LIBYUV_API
int I420Rotate(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height,
enum RotationMode mode) {
int halfwidth = (width + 1) >> 1;
int halfheight = (height + 1) >> 1;
if (!src_y || !src_u || !src_v || width <= 0 || height == 0 || !dst_y ||
!dst_u || !dst_v) {
return -1;
}
// Negative height means invert the image.
if (height < 0) {
height = -height;
halfheight = (height + 1) >> 1;
src_y = src_y + (height - 1) * src_stride_y;
src_u = src_u + (halfheight - 1) * src_stride_u;
src_v = src_v + (halfheight - 1) * src_stride_v;
src_stride_y = -src_stride_y;
src_stride_u = -src_stride_u;
src_stride_v = -src_stride_v;
}
switch (mode) {
case kRotate0:
// copy frame
return I420Copy(src_y, src_stride_y, src_u, src_stride_u, src_v,
src_stride_v, dst_y, dst_stride_y, dst_u, dst_stride_u,
dst_v, dst_stride_v, width, height);
case kRotate90:
RotatePlane90(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
RotatePlane90(src_u, src_stride_u, dst_u, dst_stride_u, halfwidth,
halfheight);
RotatePlane90(src_v, src_stride_v, dst_v, dst_stride_v, halfwidth,
halfheight);
return 0;
case kRotate270:
RotatePlane270(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
RotatePlane270(src_u, src_stride_u, dst_u, dst_stride_u, halfwidth,
halfheight);
RotatePlane270(src_v, src_stride_v, dst_v, dst_stride_v, halfwidth,
halfheight);
return 0;
case kRotate180:
RotatePlane180(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
RotatePlane180(src_u, src_stride_u, dst_u, dst_stride_u, halfwidth,
halfheight);
RotatePlane180(src_v, src_stride_v, dst_v, dst_stride_v, halfwidth,
halfheight);
return 0;
default:
break;
}
return -1;
}
LIBYUV_API
int I444Rotate(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_u,
int src_stride_u,
const uint8_t* src_v,
int src_stride_v,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height,
enum libyuv::RotationMode mode) {
if (!src_y || !src_u || !src_v || width <= 0 || height == 0 || !dst_y ||
!dst_u || !dst_v) {
return -1;
}
// Negative height means invert the image.
if (height < 0) {
height = -height;
src_y = src_y + (height - 1) * src_stride_y;
src_u = src_u + (height - 1) * src_stride_u;
src_v = src_v + (height - 1) * src_stride_v;
src_stride_y = -src_stride_y;
src_stride_u = -src_stride_u;
src_stride_v = -src_stride_v;
}
switch (mode) {
case libyuv::kRotate0:
// copy frame
CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
CopyPlane(src_u, src_stride_u, dst_u, dst_stride_u, width, height);
CopyPlane(src_v, src_stride_v, dst_v, dst_stride_v, width, height);
return 0;
case libyuv::kRotate90:
RotatePlane90(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
RotatePlane90(src_u, src_stride_u, dst_u, dst_stride_u, width, height);
RotatePlane90(src_v, src_stride_v, dst_v, dst_stride_v, width, height);
return 0;
case libyuv::kRotate270:
RotatePlane270(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
RotatePlane270(src_u, src_stride_u, dst_u, dst_stride_u, width, height);
RotatePlane270(src_v, src_stride_v, dst_v, dst_stride_v, width, height);
return 0;
case libyuv::kRotate180:
RotatePlane180(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
RotatePlane180(src_u, src_stride_u, dst_u, dst_stride_u, width, height);
RotatePlane180(src_v, src_stride_v, dst_v, dst_stride_v, width, height);
return 0;
default:
break;
}
return -1;
}
LIBYUV_API
int NV12ToI420Rotate(const uint8_t* src_y,
int src_stride_y,
const uint8_t* src_uv,
int src_stride_uv,
uint8_t* dst_y,
int dst_stride_y,
uint8_t* dst_u,
int dst_stride_u,
uint8_t* dst_v,
int dst_stride_v,
int width,
int height,
enum RotationMode mode) {
int halfwidth = (width + 1) >> 1;
int halfheight = (height + 1) >> 1;
if (!src_y || !src_uv || width <= 0 || height == 0 || !dst_y || !dst_u ||
!dst_v) {
return -1;
}
// Negative height means invert the image.
if (height < 0) {
height = -height;
halfheight = (height + 1) >> 1;
src_y = src_y + (height - 1) * src_stride_y;
src_uv = src_uv + (halfheight - 1) * src_stride_uv;
src_stride_y = -src_stride_y;
src_stride_uv = -src_stride_uv;
}
switch (mode) {
case kRotate0:
// copy frame
return NV12ToI420(src_y, src_stride_y, src_uv, src_stride_uv, dst_y,
dst_stride_y, dst_u, dst_stride_u, dst_v, dst_stride_v,
width, height);
case kRotate90:
RotatePlane90(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
RotateUV90(src_uv, src_stride_uv, dst_u, dst_stride_u, dst_v,
dst_stride_v, halfwidth, halfheight);
return 0;
case kRotate270:
RotatePlane270(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
RotateUV270(src_uv, src_stride_uv, dst_u, dst_stride_u, dst_v,
dst_stride_v, halfwidth, halfheight);
return 0;
case kRotate180:
RotatePlane180(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
RotateUV180(src_uv, src_stride_uv, dst_u, dst_stride_u, dst_v,
dst_stride_v, halfwidth, halfheight);
return 0;
default:
break;
}
return -1;
}
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif

79
thirdparty/libyuv/source/rotate_any.cc vendored Normal file
View File

@@ -0,0 +1,79 @@
/*
* Copyright 2015 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "libyuv/rotate.h"
#include "libyuv/rotate_row.h"
#include "libyuv/basic_types.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
#define TANY(NAMEANY, TPOS_SIMD, MASK) \
void NAMEANY(const uint8_t* src, int src_stride, uint8_t* dst, \
int dst_stride, int width) { \
int r = width & MASK; \
int n = width - r; \
if (n > 0) { \
TPOS_SIMD(src, src_stride, dst, dst_stride, n); \
} \
TransposeWx8_C(src + n, src_stride, dst + n * dst_stride, dst_stride, r); \
}
#ifdef HAS_TRANSPOSEWX8_NEON
TANY(TransposeWx8_Any_NEON, TransposeWx8_NEON, 7)
#endif
#ifdef HAS_TRANSPOSEWX8_SSSE3
TANY(TransposeWx8_Any_SSSE3, TransposeWx8_SSSE3, 7)
#endif
#ifdef HAS_TRANSPOSEWX8_MMI
TANY(TransposeWx8_Any_MMI, TransposeWx8_MMI, 7)
#endif
#ifdef HAS_TRANSPOSEWX8_FAST_SSSE3
TANY(TransposeWx8_Fast_Any_SSSE3, TransposeWx8_Fast_SSSE3, 15)
#endif
#ifdef HAS_TRANSPOSEWX16_MSA
TANY(TransposeWx16_Any_MSA, TransposeWx16_MSA, 15)
#endif
#undef TANY
#define TUVANY(NAMEANY, TPOS_SIMD, MASK) \
void NAMEANY(const uint8_t* src, int src_stride, uint8_t* dst_a, \
int dst_stride_a, uint8_t* dst_b, int dst_stride_b, \
int width) { \
int r = width & MASK; \
int n = width - r; \
if (n > 0) { \
TPOS_SIMD(src, src_stride, dst_a, dst_stride_a, dst_b, dst_stride_b, n); \
} \
TransposeUVWx8_C(src + n * 2, src_stride, dst_a + n * dst_stride_a, \
dst_stride_a, dst_b + n * dst_stride_b, dst_stride_b, r); \
}
#ifdef HAS_TRANSPOSEUVWX8_NEON
TUVANY(TransposeUVWx8_Any_NEON, TransposeUVWx8_NEON, 7)
#endif
#ifdef HAS_TRANSPOSEUVWX8_SSE2
TUVANY(TransposeUVWx8_Any_SSE2, TransposeUVWx8_SSE2, 7)
#endif
#ifdef HAS_TRANSPOSEUVWX8_MMI
TUVANY(TransposeUVWx8_Any_MMI, TransposeUVWx8_MMI, 7)
#endif
#ifdef HAS_TRANSPOSEUVWX16_MSA
TUVANY(TransposeUVWx16_Any_MSA, TransposeUVWx16_MSA, 7)
#endif
#undef TUVANY
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif

243
thirdparty/libyuv/source/rotate_argb.cc vendored Normal file
View File

@@ -0,0 +1,243 @@
/*
* Copyright 2012 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "libyuv/rotate.h"
#include "libyuv/convert.h"
#include "libyuv/cpu_id.h"
#include "libyuv/planar_functions.h"
#include "libyuv/row.h"
#include "libyuv/scale_row.h" /* for ScaleARGBRowDownEven_ */
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
static int ARGBTranspose(const uint8_t* src_argb,
int src_stride_argb,
uint8_t* dst_argb,
int dst_stride_argb,
int width,
int height) {
int i;
int src_pixel_step = src_stride_argb >> 2;
void (*ScaleARGBRowDownEven)(
const uint8_t* src_argb, ptrdiff_t src_stride_argb, int src_step,
uint8_t* dst_argb, int dst_width) = ScaleARGBRowDownEven_C;
// Check stride is a multiple of 4.
if (src_stride_argb & 3) {
return -1;
}
#if defined(HAS_SCALEARGBROWDOWNEVEN_SSE2)
if (TestCpuFlag(kCpuHasSSE2)) {
ScaleARGBRowDownEven = ScaleARGBRowDownEven_Any_SSE2;
if (IS_ALIGNED(height, 4)) { // Width of dest.
ScaleARGBRowDownEven = ScaleARGBRowDownEven_SSE2;
}
}
#endif
#if defined(HAS_SCALEARGBROWDOWNEVEN_NEON)
if (TestCpuFlag(kCpuHasNEON)) {
ScaleARGBRowDownEven = ScaleARGBRowDownEven_Any_NEON;
if (IS_ALIGNED(height, 4)) { // Width of dest.
ScaleARGBRowDownEven = ScaleARGBRowDownEven_NEON;
}
}
#endif
#if defined(HAS_SCALEARGBROWDOWNEVEN_MMI)
if (TestCpuFlag(kCpuHasMMI)) {
ScaleARGBRowDownEven = ScaleARGBRowDownEven_Any_MMI;
if (IS_ALIGNED(height, 4)) { // Width of dest.
ScaleARGBRowDownEven = ScaleARGBRowDownEven_MMI;
}
}
#endif
#if defined(HAS_SCALEARGBROWDOWNEVEN_MSA)
if (TestCpuFlag(kCpuHasMSA)) {
ScaleARGBRowDownEven = ScaleARGBRowDownEven_Any_MSA;
if (IS_ALIGNED(height, 4)) { // Width of dest.
ScaleARGBRowDownEven = ScaleARGBRowDownEven_MSA;
}
}
#endif
for (i = 0; i < width; ++i) { // column of source to row of dest.
ScaleARGBRowDownEven(src_argb, 0, src_pixel_step, dst_argb, height);
dst_argb += dst_stride_argb;
src_argb += 4;
}
return 0;
}
static int ARGBRotate90(const uint8_t* src_argb,
int src_stride_argb,
uint8_t* dst_argb,
int dst_stride_argb,
int width,
int height) {
// Rotate by 90 is a ARGBTranspose with the source read
// from bottom to top. So set the source pointer to the end
// of the buffer and flip the sign of the source stride.
src_argb += src_stride_argb * (height - 1);
src_stride_argb = -src_stride_argb;
return ARGBTranspose(src_argb, src_stride_argb, dst_argb, dst_stride_argb,
width, height);
}
static int ARGBRotate270(const uint8_t* src_argb,
int src_stride_argb,
uint8_t* dst_argb,
int dst_stride_argb,
int width,
int height) {
// Rotate by 270 is a ARGBTranspose with the destination written
// from bottom to top. So set the destination pointer to the end
// of the buffer and flip the sign of the destination stride.
dst_argb += dst_stride_argb * (width - 1);
dst_stride_argb = -dst_stride_argb;
return ARGBTranspose(src_argb, src_stride_argb, dst_argb, dst_stride_argb,
width, height);
}
static int ARGBRotate180(const uint8_t* src_argb,
int src_stride_argb,
uint8_t* dst_argb,
int dst_stride_argb,
int width,
int height) {
// Swap first and last row and mirror the content. Uses a temporary row.
align_buffer_64(row, width * 4);
const uint8_t* src_bot = src_argb + src_stride_argb * (height - 1);
uint8_t* dst_bot = dst_argb + dst_stride_argb * (height - 1);
int half_height = (height + 1) >> 1;
int y;
void (*ARGBMirrorRow)(const uint8_t* src_argb, uint8_t* dst_argb, int width) =
ARGBMirrorRow_C;
void (*CopyRow)(const uint8_t* src_argb, uint8_t* dst_argb, int width) =
CopyRow_C;
#if defined(HAS_ARGBMIRRORROW_NEON)
if (TestCpuFlag(kCpuHasNEON)) {
ARGBMirrorRow = ARGBMirrorRow_Any_NEON;
if (IS_ALIGNED(width, 8)) {
ARGBMirrorRow = ARGBMirrorRow_NEON;
}
}
#endif
#if defined(HAS_ARGBMIRRORROW_SSE2)
if (TestCpuFlag(kCpuHasSSE2)) {
ARGBMirrorRow = ARGBMirrorRow_Any_SSE2;
if (IS_ALIGNED(width, 4)) {
ARGBMirrorRow = ARGBMirrorRow_SSE2;
}
}
#endif
#if defined(HAS_ARGBMIRRORROW_AVX2)
if (TestCpuFlag(kCpuHasAVX2)) {
ARGBMirrorRow = ARGBMirrorRow_Any_AVX2;
if (IS_ALIGNED(width, 8)) {
ARGBMirrorRow = ARGBMirrorRow_AVX2;
}
}
#endif
#if defined(HAS_ARGBMIRRORROW_MMI)
if (TestCpuFlag(kCpuHasMMI)) {
ARGBMirrorRow = ARGBMirrorRow_Any_MMI;
if (IS_ALIGNED(width, 2)) {
ARGBMirrorRow = ARGBMirrorRow_MMI;
}
}
#endif
#if defined(HAS_ARGBMIRRORROW_MSA)
if (TestCpuFlag(kCpuHasMSA)) {
ARGBMirrorRow = ARGBMirrorRow_Any_MSA;
if (IS_ALIGNED(width, 16)) {
ARGBMirrorRow = ARGBMirrorRow_MSA;
}
}
#endif
#if defined(HAS_COPYROW_SSE2)
if (TestCpuFlag(kCpuHasSSE2)) {
CopyRow = IS_ALIGNED(width * 4, 32) ? CopyRow_SSE2 : CopyRow_Any_SSE2;
}
#endif
#if defined(HAS_COPYROW_AVX)
if (TestCpuFlag(kCpuHasAVX)) {
CopyRow = IS_ALIGNED(width * 4, 64) ? CopyRow_AVX : CopyRow_Any_AVX;
}
#endif
#if defined(HAS_COPYROW_ERMS)
if (TestCpuFlag(kCpuHasERMS)) {
CopyRow = CopyRow_ERMS;
}
#endif
#if defined(HAS_COPYROW_NEON)
if (TestCpuFlag(kCpuHasNEON)) {
CopyRow = IS_ALIGNED(width * 4, 32) ? CopyRow_NEON : CopyRow_Any_NEON;
}
#endif
// Odd height will harmlessly mirror the middle row twice.
for (y = 0; y < half_height; ++y) {
ARGBMirrorRow(src_argb, row, width); // Mirror first row into a buffer
ARGBMirrorRow(src_bot, dst_argb, width); // Mirror last row into first row
CopyRow(row, dst_bot, width * 4); // Copy first mirrored row into last
src_argb += src_stride_argb;
dst_argb += dst_stride_argb;
src_bot -= src_stride_argb;
dst_bot -= dst_stride_argb;
}
free_aligned_buffer_64(row);
return 0;
}
LIBYUV_API
int ARGBRotate(const uint8_t* src_argb,
int src_stride_argb,
uint8_t* dst_argb,
int dst_stride_argb,
int width,
int height,
enum RotationMode mode) {
if (!src_argb || width <= 0 || height == 0 || !dst_argb) {
return -1;
}
// Negative height means invert the image.
if (height < 0) {
height = -height;
src_argb = src_argb + (height - 1) * src_stride_argb;
src_stride_argb = -src_stride_argb;
}
switch (mode) {
case kRotate0:
// copy frame
return ARGBCopy(src_argb, src_stride_argb, dst_argb, dst_stride_argb,
width, height);
case kRotate90:
return ARGBRotate90(src_argb, src_stride_argb, dst_argb, dst_stride_argb,
width, height);
case kRotate270:
return ARGBRotate270(src_argb, src_stride_argb, dst_argb, dst_stride_argb,
width, height);
case kRotate180:
return ARGBRotate180(src_argb, src_stride_argb, dst_argb, dst_stride_argb,
width, height);
default:
break;
}
return -1;
}
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif

View File

@@ -0,0 +1,106 @@
/*
* Copyright 2011 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "libyuv/rotate_row.h"
#include "libyuv/row.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
void TransposeWx8_C(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width) {
int i;
for (i = 0; i < width; ++i) {
dst[0] = src[0 * src_stride];
dst[1] = src[1 * src_stride];
dst[2] = src[2 * src_stride];
dst[3] = src[3 * src_stride];
dst[4] = src[4 * src_stride];
dst[5] = src[5 * src_stride];
dst[6] = src[6 * src_stride];
dst[7] = src[7 * src_stride];
++src;
dst += dst_stride;
}
}
void TransposeUVWx8_C(const uint8_t* src,
int src_stride,
uint8_t* dst_a,
int dst_stride_a,
uint8_t* dst_b,
int dst_stride_b,
int width) {
int i;
for (i = 0; i < width; ++i) {
dst_a[0] = src[0 * src_stride + 0];
dst_b[0] = src[0 * src_stride + 1];
dst_a[1] = src[1 * src_stride + 0];
dst_b[1] = src[1 * src_stride + 1];
dst_a[2] = src[2 * src_stride + 0];
dst_b[2] = src[2 * src_stride + 1];
dst_a[3] = src[3 * src_stride + 0];
dst_b[3] = src[3 * src_stride + 1];
dst_a[4] = src[4 * src_stride + 0];
dst_b[4] = src[4 * src_stride + 1];
dst_a[5] = src[5 * src_stride + 0];
dst_b[5] = src[5 * src_stride + 1];
dst_a[6] = src[6 * src_stride + 0];
dst_b[6] = src[6 * src_stride + 1];
dst_a[7] = src[7 * src_stride + 0];
dst_b[7] = src[7 * src_stride + 1];
src += 2;
dst_a += dst_stride_a;
dst_b += dst_stride_b;
}
}
void TransposeWxH_C(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width,
int height) {
int i;
for (i = 0; i < width; ++i) {
int j;
for (j = 0; j < height; ++j) {
dst[i * dst_stride + j] = src[j * src_stride + i];
}
}
}
void TransposeUVWxH_C(const uint8_t* src,
int src_stride,
uint8_t* dst_a,
int dst_stride_a,
uint8_t* dst_b,
int dst_stride_b,
int width,
int height) {
int i;
for (i = 0; i < width * 2; i += 2) {
int j;
for (j = 0; j < height; ++j) {
dst_a[j + ((i >> 1) * dst_stride_a)] = src[i + (j * src_stride)];
dst_b[j + ((i >> 1) * dst_stride_b)] = src[i + (j * src_stride) + 1];
}
}
}
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif

374
thirdparty/libyuv/source/rotate_gcc.cc vendored Normal file
View File

@@ -0,0 +1,374 @@
/*
* Copyright 2015 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "libyuv/rotate_row.h"
#include "libyuv/row.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
// This module is for GCC x86 and x64.
#if !defined(LIBYUV_DISABLE_X86) && \
(defined(__x86_64__) || defined(__i386__))
// Transpose 8x8. 32 or 64 bit, but not NaCL for 64 bit.
#if defined(HAS_TRANSPOSEWX8_SSSE3)
void TransposeWx8_SSSE3(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width) {
asm volatile(
// Read in the data from the source pointer.
// First round of bit swap.
LABELALIGN
"1: \n"
"movq (%0),%%xmm0 \n"
"movq (%0,%3),%%xmm1 \n"
"lea (%0,%3,2),%0 \n"
"punpcklbw %%xmm1,%%xmm0 \n"
"movq (%0),%%xmm2 \n"
"movdqa %%xmm0,%%xmm1 \n"
"palignr $0x8,%%xmm1,%%xmm1 \n"
"movq (%0,%3),%%xmm3 \n"
"lea (%0,%3,2),%0 \n"
"punpcklbw %%xmm3,%%xmm2 \n"
"movdqa %%xmm2,%%xmm3 \n"
"movq (%0),%%xmm4 \n"
"palignr $0x8,%%xmm3,%%xmm3 \n"
"movq (%0,%3),%%xmm5 \n"
"lea (%0,%3,2),%0 \n"
"punpcklbw %%xmm5,%%xmm4 \n"
"movdqa %%xmm4,%%xmm5 \n"
"movq (%0),%%xmm6 \n"
"palignr $0x8,%%xmm5,%%xmm5 \n"
"movq (%0,%3),%%xmm7 \n"
"lea (%0,%3,2),%0 \n"
"punpcklbw %%xmm7,%%xmm6 \n"
"neg %3 \n"
"movdqa %%xmm6,%%xmm7 \n"
"lea 0x8(%0,%3,8),%0 \n"
"palignr $0x8,%%xmm7,%%xmm7 \n"
"neg %3 \n"
// Second round of bit swap.
"punpcklwd %%xmm2,%%xmm0 \n"
"punpcklwd %%xmm3,%%xmm1 \n"
"movdqa %%xmm0,%%xmm2 \n"
"movdqa %%xmm1,%%xmm3 \n"
"palignr $0x8,%%xmm2,%%xmm2 \n"
"palignr $0x8,%%xmm3,%%xmm3 \n"
"punpcklwd %%xmm6,%%xmm4 \n"
"punpcklwd %%xmm7,%%xmm5 \n"
"movdqa %%xmm4,%%xmm6 \n"
"movdqa %%xmm5,%%xmm7 \n"
"palignr $0x8,%%xmm6,%%xmm6 \n"
"palignr $0x8,%%xmm7,%%xmm7 \n"
// Third round of bit swap.
// Write to the destination pointer.
"punpckldq %%xmm4,%%xmm0 \n"
"movq %%xmm0,(%1) \n"
"movdqa %%xmm0,%%xmm4 \n"
"palignr $0x8,%%xmm4,%%xmm4 \n"
"movq %%xmm4,(%1,%4) \n"
"lea (%1,%4,2),%1 \n"
"punpckldq %%xmm6,%%xmm2 \n"
"movdqa %%xmm2,%%xmm6 \n"
"movq %%xmm2,(%1) \n"
"palignr $0x8,%%xmm6,%%xmm6 \n"
"punpckldq %%xmm5,%%xmm1 \n"
"movq %%xmm6,(%1,%4) \n"
"lea (%1,%4,2),%1 \n"
"movdqa %%xmm1,%%xmm5 \n"
"movq %%xmm1,(%1) \n"
"palignr $0x8,%%xmm5,%%xmm5 \n"
"movq %%xmm5,(%1,%4) \n"
"lea (%1,%4,2),%1 \n"
"punpckldq %%xmm7,%%xmm3 \n"
"movq %%xmm3,(%1) \n"
"movdqa %%xmm3,%%xmm7 \n"
"palignr $0x8,%%xmm7,%%xmm7 \n"
"sub $0x8,%2 \n"
"movq %%xmm7,(%1,%4) \n"
"lea (%1,%4,2),%1 \n"
"jg 1b \n"
: "+r"(src), // %0
"+r"(dst), // %1
"+r"(width) // %2
: "r"((intptr_t)(src_stride)), // %3
"r"((intptr_t)(dst_stride)) // %4
: "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6",
"xmm7");
}
#endif // defined(HAS_TRANSPOSEWX8_SSSE3)
// Transpose 16x8. 64 bit
#if defined(HAS_TRANSPOSEWX8_FAST_SSSE3)
void TransposeWx8_Fast_SSSE3(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width) {
asm volatile(
// Read in the data from the source pointer.
// First round of bit swap.
LABELALIGN
"1: \n"
"movdqu (%0),%%xmm0 \n"
"movdqu (%0,%3),%%xmm1 \n"
"lea (%0,%3,2),%0 \n"
"movdqa %%xmm0,%%xmm8 \n"
"punpcklbw %%xmm1,%%xmm0 \n"
"punpckhbw %%xmm1,%%xmm8 \n"
"movdqu (%0),%%xmm2 \n"
"movdqa %%xmm0,%%xmm1 \n"
"movdqa %%xmm8,%%xmm9 \n"
"palignr $0x8,%%xmm1,%%xmm1 \n"
"palignr $0x8,%%xmm9,%%xmm9 \n"
"movdqu (%0,%3),%%xmm3 \n"
"lea (%0,%3,2),%0 \n"
"movdqa %%xmm2,%%xmm10 \n"
"punpcklbw %%xmm3,%%xmm2 \n"
"punpckhbw %%xmm3,%%xmm10 \n"
"movdqa %%xmm2,%%xmm3 \n"
"movdqa %%xmm10,%%xmm11 \n"
"movdqu (%0),%%xmm4 \n"
"palignr $0x8,%%xmm3,%%xmm3 \n"
"palignr $0x8,%%xmm11,%%xmm11 \n"
"movdqu (%0,%3),%%xmm5 \n"
"lea (%0,%3,2),%0 \n"
"movdqa %%xmm4,%%xmm12 \n"
"punpcklbw %%xmm5,%%xmm4 \n"
"punpckhbw %%xmm5,%%xmm12 \n"
"movdqa %%xmm4,%%xmm5 \n"
"movdqa %%xmm12,%%xmm13 \n"
"movdqu (%0),%%xmm6 \n"
"palignr $0x8,%%xmm5,%%xmm5 \n"
"palignr $0x8,%%xmm13,%%xmm13 \n"
"movdqu (%0,%3),%%xmm7 \n"
"lea (%0,%3,2),%0 \n"
"movdqa %%xmm6,%%xmm14 \n"
"punpcklbw %%xmm7,%%xmm6 \n"
"punpckhbw %%xmm7,%%xmm14 \n"
"neg %3 \n"
"movdqa %%xmm6,%%xmm7 \n"
"movdqa %%xmm14,%%xmm15 \n"
"lea 0x10(%0,%3,8),%0 \n"
"palignr $0x8,%%xmm7,%%xmm7 \n"
"palignr $0x8,%%xmm15,%%xmm15 \n"
"neg %3 \n"
// Second round of bit swap.
"punpcklwd %%xmm2,%%xmm0 \n"
"punpcklwd %%xmm3,%%xmm1 \n"
"movdqa %%xmm0,%%xmm2 \n"
"movdqa %%xmm1,%%xmm3 \n"
"palignr $0x8,%%xmm2,%%xmm2 \n"
"palignr $0x8,%%xmm3,%%xmm3 \n"
"punpcklwd %%xmm6,%%xmm4 \n"
"punpcklwd %%xmm7,%%xmm5 \n"
"movdqa %%xmm4,%%xmm6 \n"
"movdqa %%xmm5,%%xmm7 \n"
"palignr $0x8,%%xmm6,%%xmm6 \n"
"palignr $0x8,%%xmm7,%%xmm7 \n"
"punpcklwd %%xmm10,%%xmm8 \n"
"punpcklwd %%xmm11,%%xmm9 \n"
"movdqa %%xmm8,%%xmm10 \n"
"movdqa %%xmm9,%%xmm11 \n"
"palignr $0x8,%%xmm10,%%xmm10 \n"
"palignr $0x8,%%xmm11,%%xmm11 \n"
"punpcklwd %%xmm14,%%xmm12 \n"
"punpcklwd %%xmm15,%%xmm13 \n"
"movdqa %%xmm12,%%xmm14 \n"
"movdqa %%xmm13,%%xmm15 \n"
"palignr $0x8,%%xmm14,%%xmm14 \n"
"palignr $0x8,%%xmm15,%%xmm15 \n"
// Third round of bit swap.
// Write to the destination pointer.
"punpckldq %%xmm4,%%xmm0 \n"
"movq %%xmm0,(%1) \n"
"movdqa %%xmm0,%%xmm4 \n"
"palignr $0x8,%%xmm4,%%xmm4 \n"
"movq %%xmm4,(%1,%4) \n"
"lea (%1,%4,2),%1 \n"
"punpckldq %%xmm6,%%xmm2 \n"
"movdqa %%xmm2,%%xmm6 \n"
"movq %%xmm2,(%1) \n"
"palignr $0x8,%%xmm6,%%xmm6 \n"
"punpckldq %%xmm5,%%xmm1 \n"
"movq %%xmm6,(%1,%4) \n"
"lea (%1,%4,2),%1 \n"
"movdqa %%xmm1,%%xmm5 \n"
"movq %%xmm1,(%1) \n"
"palignr $0x8,%%xmm5,%%xmm5 \n"
"movq %%xmm5,(%1,%4) \n"
"lea (%1,%4,2),%1 \n"
"punpckldq %%xmm7,%%xmm3 \n"
"movq %%xmm3,(%1) \n"
"movdqa %%xmm3,%%xmm7 \n"
"palignr $0x8,%%xmm7,%%xmm7 \n"
"movq %%xmm7,(%1,%4) \n"
"lea (%1,%4,2),%1 \n"
"punpckldq %%xmm12,%%xmm8 \n"
"movq %%xmm8,(%1) \n"
"movdqa %%xmm8,%%xmm12 \n"
"palignr $0x8,%%xmm12,%%xmm12 \n"
"movq %%xmm12,(%1,%4) \n"
"lea (%1,%4,2),%1 \n"
"punpckldq %%xmm14,%%xmm10 \n"
"movdqa %%xmm10,%%xmm14 \n"
"movq %%xmm10,(%1) \n"
"palignr $0x8,%%xmm14,%%xmm14 \n"
"punpckldq %%xmm13,%%xmm9 \n"
"movq %%xmm14,(%1,%4) \n"
"lea (%1,%4,2),%1 \n"
"movdqa %%xmm9,%%xmm13 \n"
"movq %%xmm9,(%1) \n"
"palignr $0x8,%%xmm13,%%xmm13 \n"
"movq %%xmm13,(%1,%4) \n"
"lea (%1,%4,2),%1 \n"
"punpckldq %%xmm15,%%xmm11 \n"
"movq %%xmm11,(%1) \n"
"movdqa %%xmm11,%%xmm15 \n"
"palignr $0x8,%%xmm15,%%xmm15 \n"
"sub $0x10,%2 \n"
"movq %%xmm15,(%1,%4) \n"
"lea (%1,%4,2),%1 \n"
"jg 1b \n"
: "+r"(src), // %0
"+r"(dst), // %1
"+r"(width) // %2
: "r"((intptr_t)(src_stride)), // %3
"r"((intptr_t)(dst_stride)) // %4
: "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6",
"xmm7", "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14",
"xmm15");
}
#endif // defined(HAS_TRANSPOSEWX8_FAST_SSSE3)
// Transpose UV 8x8. 64 bit.
#if defined(HAS_TRANSPOSEUVWX8_SSE2)
void TransposeUVWx8_SSE2(const uint8_t* src,
int src_stride,
uint8_t* dst_a,
int dst_stride_a,
uint8_t* dst_b,
int dst_stride_b,
int width) {
asm volatile(
// Read in the data from the source pointer.
// First round of bit swap.
LABELALIGN
"1: \n"
"movdqu (%0),%%xmm0 \n"
"movdqu (%0,%4),%%xmm1 \n"
"lea (%0,%4,2),%0 \n"
"movdqa %%xmm0,%%xmm8 \n"
"punpcklbw %%xmm1,%%xmm0 \n"
"punpckhbw %%xmm1,%%xmm8 \n"
"movdqa %%xmm8,%%xmm1 \n"
"movdqu (%0),%%xmm2 \n"
"movdqu (%0,%4),%%xmm3 \n"
"lea (%0,%4,2),%0 \n"
"movdqa %%xmm2,%%xmm8 \n"
"punpcklbw %%xmm3,%%xmm2 \n"
"punpckhbw %%xmm3,%%xmm8 \n"
"movdqa %%xmm8,%%xmm3 \n"
"movdqu (%0),%%xmm4 \n"
"movdqu (%0,%4),%%xmm5 \n"
"lea (%0,%4,2),%0 \n"
"movdqa %%xmm4,%%xmm8 \n"
"punpcklbw %%xmm5,%%xmm4 \n"
"punpckhbw %%xmm5,%%xmm8 \n"
"movdqa %%xmm8,%%xmm5 \n"
"movdqu (%0),%%xmm6 \n"
"movdqu (%0,%4),%%xmm7 \n"
"lea (%0,%4,2),%0 \n"
"movdqa %%xmm6,%%xmm8 \n"
"punpcklbw %%xmm7,%%xmm6 \n"
"neg %4 \n"
"lea 0x10(%0,%4,8),%0 \n"
"punpckhbw %%xmm7,%%xmm8 \n"
"movdqa %%xmm8,%%xmm7 \n"
"neg %4 \n"
// Second round of bit swap.
"movdqa %%xmm0,%%xmm8 \n"
"movdqa %%xmm1,%%xmm9 \n"
"punpckhwd %%xmm2,%%xmm8 \n"
"punpckhwd %%xmm3,%%xmm9 \n"
"punpcklwd %%xmm2,%%xmm0 \n"
"punpcklwd %%xmm3,%%xmm1 \n"
"movdqa %%xmm8,%%xmm2 \n"
"movdqa %%xmm9,%%xmm3 \n"
"movdqa %%xmm4,%%xmm8 \n"
"movdqa %%xmm5,%%xmm9 \n"
"punpckhwd %%xmm6,%%xmm8 \n"
"punpckhwd %%xmm7,%%xmm9 \n"
"punpcklwd %%xmm6,%%xmm4 \n"
"punpcklwd %%xmm7,%%xmm5 \n"
"movdqa %%xmm8,%%xmm6 \n"
"movdqa %%xmm9,%%xmm7 \n"
// Third round of bit swap.
// Write to the destination pointer.
"movdqa %%xmm0,%%xmm8 \n"
"punpckldq %%xmm4,%%xmm0 \n"
"movlpd %%xmm0,(%1) \n" // Write back U channel
"movhpd %%xmm0,(%2) \n" // Write back V channel
"punpckhdq %%xmm4,%%xmm8 \n"
"movlpd %%xmm8,(%1,%5) \n"
"lea (%1,%5,2),%1 \n"
"movhpd %%xmm8,(%2,%6) \n"
"lea (%2,%6,2),%2 \n"
"movdqa %%xmm2,%%xmm8 \n"
"punpckldq %%xmm6,%%xmm2 \n"
"movlpd %%xmm2,(%1) \n"
"movhpd %%xmm2,(%2) \n"
"punpckhdq %%xmm6,%%xmm8 \n"
"movlpd %%xmm8,(%1,%5) \n"
"lea (%1,%5,2),%1 \n"
"movhpd %%xmm8,(%2,%6) \n"
"lea (%2,%6,2),%2 \n"
"movdqa %%xmm1,%%xmm8 \n"
"punpckldq %%xmm5,%%xmm1 \n"
"movlpd %%xmm1,(%1) \n"
"movhpd %%xmm1,(%2) \n"
"punpckhdq %%xmm5,%%xmm8 \n"
"movlpd %%xmm8,(%1,%5) \n"
"lea (%1,%5,2),%1 \n"
"movhpd %%xmm8,(%2,%6) \n"
"lea (%2,%6,2),%2 \n"
"movdqa %%xmm3,%%xmm8 \n"
"punpckldq %%xmm7,%%xmm3 \n"
"movlpd %%xmm3,(%1) \n"
"movhpd %%xmm3,(%2) \n"
"punpckhdq %%xmm7,%%xmm8 \n"
"sub $0x8,%3 \n"
"movlpd %%xmm8,(%1,%5) \n"
"lea (%1,%5,2),%1 \n"
"movhpd %%xmm8,(%2,%6) \n"
"lea (%2,%6,2),%2 \n"
"jg 1b \n"
: "+r"(src), // %0
"+r"(dst_a), // %1
"+r"(dst_b), // %2
"+r"(width) // %3
: "r"((intptr_t)(src_stride)), // %4
"r"((intptr_t)(dst_stride_a)), // %5
"r"((intptr_t)(dst_stride_b)) // %6
: "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6",
"xmm7", "xmm8", "xmm9");
}
#endif // defined(HAS_TRANSPOSEUVWX8_SSE2)
#endif // defined(__x86_64__) || defined(__i386__)
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif

291
thirdparty/libyuv/source/rotate_mmi.cc vendored Normal file
View File

@@ -0,0 +1,291 @@
/*
* Copyright 2011 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "libyuv/rotate_row.h"
#include "libyuv/row.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
// This module is for Mips MMI.
#if !defined(LIBYUV_DISABLE_MMI) && defined(_MIPS_ARCH_LOONGSON3A)
void TransposeWx8_MMI(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width) {
uint64_t tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
uint64_t tmp7, tmp8, tmp9, tmp10, tmp11, tmp12, tmp13;
uint8_t* src_tmp = nullptr;
__asm__ volatile(
"1: \n\t"
"ldc1 %[tmp12], 0x00(%[src]) \n\t"
"dadd %[src_tmp], %[src], %[src_stride] \n\t"
"ldc1 %[tmp13], 0x00(%[src_tmp]) \n\t"
/* tmp0 = (00 10 01 11 02 12 03 13) */
"punpcklbh %[tmp0], %[tmp12], %[tmp13] \n\t"
/* tmp1 = (04 14 05 15 06 16 07 17) */
"punpckhbh %[tmp1], %[tmp12], %[tmp13] \n\t"
"dadd %[src_tmp], %[src_tmp], %[src_stride] \n\t"
"ldc1 %[tmp12], 0x00(%[src_tmp]) \n\t"
"dadd %[src_tmp], %[src_tmp], %[src_stride] \n\t"
"ldc1 %[tmp13], 0x00(%[src_tmp]) \n\t"
/* tmp2 = (20 30 21 31 22 32 23 33) */
"punpcklbh %[tmp2], %[tmp12], %[tmp13] \n\t"
/* tmp3 = (24 34 25 35 26 36 27 37) */
"punpckhbh %[tmp3], %[tmp12], %[tmp13] \n\t"
/* tmp4 = (00 10 20 30 01 11 21 31) */
"punpcklhw %[tmp4], %[tmp0], %[tmp2] \n\t"
/* tmp5 = (02 12 22 32 03 13 23 33) */
"punpckhhw %[tmp5], %[tmp0], %[tmp2] \n\t"
/* tmp6 = (04 14 24 34 05 15 25 35) */
"punpcklhw %[tmp6], %[tmp1], %[tmp3] \n\t"
/* tmp7 = (06 16 26 36 07 17 27 37) */
"punpckhhw %[tmp7], %[tmp1], %[tmp3] \n\t"
"dadd %[src_tmp], %[src_tmp], %[src_stride] \n\t"
"ldc1 %[tmp12], 0x00(%[src_tmp]) \n\t"
"dadd %[src_tmp], %[src_tmp], %[src_stride] \n\t"
"ldc1 %[tmp13], 0x00(%[src_tmp]) \n\t"
/* tmp0 = (40 50 41 51 42 52 43 53) */
"punpcklbh %[tmp0], %[tmp12], %[tmp13] \n\t"
/* tmp1 = (44 54 45 55 46 56 47 57) */
"punpckhbh %[tmp1], %[tmp12], %[tmp13] \n\t"
"dadd %[src_tmp], %[src_tmp], %[src_stride] \n\t"
"ldc1 %[tmp12], 0x00(%[src_tmp]) \n\t"
"dadd %[src_tmp], %[src_tmp], %[src_stride] \n\t"
"ldc1 %[tmp13], 0x00(%[src_tmp]) \n\t"
/* tmp2 = (60 70 61 71 62 72 63 73) */
"punpcklbh %[tmp2], %[tmp12], %[tmp13] \n\t"
/* tmp3 = (64 74 65 75 66 76 67 77) */
"punpckhbh %[tmp3], %[tmp12], %[tmp13] \n\t"
/* tmp8 = (40 50 60 70 41 51 61 71) */
"punpcklhw %[tmp8], %[tmp0], %[tmp2] \n\t"
/* tmp9 = (42 52 62 72 43 53 63 73) */
"punpckhhw %[tmp9], %[tmp0], %[tmp2] \n\t"
/* tmp10 = (44 54 64 74 45 55 65 75) */
"punpcklhw %[tmp10], %[tmp1], %[tmp3] \n\t"
/* tmp11 = (46 56 66 76 47 57 67 77) */
"punpckhhw %[tmp11], %[tmp1], %[tmp3] \n\t"
/* tmp0 = (00 10 20 30 40 50 60 70) */
"punpcklwd %[tmp0], %[tmp4], %[tmp8] \n\t"
/* tmp1 = (01 11 21 31 41 51 61 71) */
"punpckhwd %[tmp1], %[tmp4], %[tmp8] \n\t"
"gssdlc1 %[tmp0], 0x07(%[dst]) \n\t"
"gssdrc1 %[tmp0], 0x00(%[dst]) \n\t"
"dadd %[dst], %[dst], %[dst_stride] \n\t"
"gssdlc1 %[tmp1], 0x07(%[dst]) \n\t"
"gssdrc1 %[tmp1], 0x00(%[dst]) \n\t"
/* tmp0 = (02 12 22 32 42 52 62 72) */
"punpcklwd %[tmp0], %[tmp5], %[tmp9] \n\t"
/* tmp1 = (03 13 23 33 43 53 63 73) */
"punpckhwd %[tmp1], %[tmp5], %[tmp9] \n\t"
"dadd %[dst], %[dst], %[dst_stride] \n\t"
"gssdlc1 %[tmp0], 0x07(%[dst]) \n\t"
"gssdrc1 %[tmp0], 0x00(%[dst]) \n\t"
"dadd %[dst], %[dst], %[dst_stride] \n\t"
"gssdlc1 %[tmp1], 0x07(%[dst]) \n\t"
"gssdrc1 %[tmp1], 0x00(%[dst]) \n\t"
/* tmp0 = (04 14 24 34 44 54 64 74) */
"punpcklwd %[tmp0], %[tmp6], %[tmp10] \n\t"
/* tmp1 = (05 15 25 35 45 55 65 75) */
"punpckhwd %[tmp1], %[tmp6], %[tmp10] \n\t"
"dadd %[dst], %[dst], %[dst_stride] \n\t"
"gssdlc1 %[tmp0], 0x07(%[dst]) \n\t"
"gssdrc1 %[tmp0], 0x00(%[dst]) \n\t"
"dadd %[dst], %[dst], %[dst_stride] \n\t"
"gssdlc1 %[tmp1], 0x07(%[dst]) \n\t"
"gssdrc1 %[tmp1], 0x00(%[dst]) \n\t"
/* tmp0 = (06 16 26 36 46 56 66 76) */
"punpcklwd %[tmp0], %[tmp7], %[tmp11] \n\t"
/* tmp1 = (07 17 27 37 47 57 67 77) */
"punpckhwd %[tmp1], %[tmp7], %[tmp11] \n\t"
"dadd %[dst], %[dst], %[dst_stride] \n\t"
"gssdlc1 %[tmp0], 0x07(%[dst]) \n\t"
"gssdrc1 %[tmp0], 0x00(%[dst]) \n\t"
"dadd %[dst], %[dst], %[dst_stride] \n\t"
"gssdlc1 %[tmp1], 0x07(%[dst]) \n\t"
"gssdrc1 %[tmp1], 0x00(%[dst]) \n\t"
"dadd %[dst], %[dst], %[dst_stride] \n\t"
"daddi %[src], %[src], 0x08 \n\t"
"daddi %[width], %[width], -0x08 \n\t"
"bnez %[width], 1b \n\t"
: [tmp0] "=&f"(tmp0), [tmp1] "=&f"(tmp1), [tmp2] "=&f"(tmp2),
[tmp3] "=&f"(tmp3), [tmp4] "=&f"(tmp4), [tmp5] "=&f"(tmp5),
[tmp6] "=&f"(tmp6), [tmp7] "=&f"(tmp7), [tmp8] "=&f"(tmp8),
[tmp9] "=&f"(tmp9), [tmp10] "=&f"(tmp10), [tmp11] "=&f"(tmp11),
[tmp12] "=&f"(tmp12), [tmp13] "=&f"(tmp13), [dst] "+&r"(dst),
[src_tmp] "+&r"(src_tmp)
: [src] "r"(src), [width] "r"(width), [src_stride] "r"(src_stride),
[dst_stride] "r"(dst_stride)
: "memory");
}
void TransposeUVWx8_MMI(const uint8_t* src,
int src_stride,
uint8_t* dst_a,
int dst_stride_a,
uint8_t* dst_b,
int dst_stride_b,
int width) {
uint64_t tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
uint64_t tmp7, tmp8, tmp9, tmp10, tmp11, tmp12, tmp13;
uint8_t* src_tmp = nullptr;
__asm__ volatile(
"1: \n\t"
/* tmp12 = (u00 v00 u01 v01 u02 v02 u03 v03) */
"ldc1 %[tmp12], 0x00(%[src]) \n\t"
"dadd %[src_tmp], %[src], %[src_stride] \n\t"
/* tmp13 = (u10 v10 u11 v11 u12 v12 u13 v13) */
"ldc1 %[tmp13], 0x00(%[src_tmp]) \n\t"
/* tmp0 = (u00 u10 v00 v10 u01 u11 v01 v11) */
"punpcklbh %[tmp0], %[tmp12], %[tmp13] \n\t"
/* tmp1 = (u02 u12 v02 v12 u03 u13 v03 v13) */
"punpckhbh %[tmp1], %[tmp12], %[tmp13] \n\t"
"dadd %[src_tmp], %[src_tmp], %[src_stride] \n\t"
/* tmp12 = (u20 v20 u21 v21 u22 v22 u23 v23) */
"ldc1 %[tmp12], 0x00(%[src_tmp]) \n\t"
"dadd %[src_tmp], %[src_tmp], %[src_stride] \n\t"
/* tmp13 = (u30 v30 u31 v31 u32 v32 u33 v33) */
"ldc1 %[tmp13], 0x00(%[src_tmp]) \n\t"
/* tmp2 = (u20 u30 v20 v30 u21 u31 v21 v31) */
"punpcklbh %[tmp2], %[tmp12], %[tmp13] \n\t"
/* tmp3 = (u22 u32 v22 v32 u23 u33 v23 v33) */
"punpckhbh %[tmp3], %[tmp12], %[tmp13] \n\t"
/* tmp4 = (u00 u10 u20 u30 v00 v10 v20 v30) */
"punpcklhw %[tmp4], %[tmp0], %[tmp2] \n\t"
/* tmp5 = (u01 u11 u21 u31 v01 v11 v21 v31) */
"punpckhhw %[tmp5], %[tmp0], %[tmp2] \n\t"
/* tmp6 = (u02 u12 u22 u32 v02 v12 v22 v32) */
"punpcklhw %[tmp6], %[tmp1], %[tmp3] \n\t"
/* tmp7 = (u03 u13 u23 u33 v03 v13 v23 v33) */
"punpckhhw %[tmp7], %[tmp1], %[tmp3] \n\t"
"dadd %[src_tmp], %[src_tmp], %[src_stride] \n\t"
/* tmp12 = (u40 v40 u41 v41 u42 v42 u43 v43) */
"ldc1 %[tmp12], 0x00(%[src_tmp]) \n\t"
/* tmp13 = (u50 v50 u51 v51 u52 v52 u53 v53) */
"dadd %[src_tmp], %[src_tmp], %[src_stride] \n\t"
"ldc1 %[tmp13], 0x00(%[src_tmp]) \n\t"
/* tmp0 = (u40 u50 v40 v50 u41 u51 v41 v51) */
"punpcklbh %[tmp0], %[tmp12], %[tmp13] \n\t"
/* tmp1 = (u42 u52 v42 v52 u43 u53 v43 v53) */
"punpckhbh %[tmp1], %[tmp12], %[tmp13] \n\t"
"dadd %[src_tmp], %[src_tmp], %[src_stride] \n\t"
/* tmp12 = (u60 v60 u61 v61 u62 v62 u63 v63) */
"ldc1 %[tmp12], 0x00(%[src_tmp]) \n\t"
/* tmp13 = (u70 v70 u71 v71 u72 v72 u73 v73) */
"dadd %[src_tmp], %[src_tmp], %[src_stride] \n\t"
"ldc1 %[tmp13], 0x00(%[src_tmp]) \n\t"
/* tmp2 = (u60 u70 v60 v70 u61 u71 v61 v71) */
"punpcklbh %[tmp2], %[tmp12], %[tmp13] \n\t"
/* tmp3 = (u62 u72 v62 v72 u63 u73 v63 v73) */
"punpckhbh %[tmp3], %[tmp12], %[tmp13] \n\t"
/* tmp8 = (u40 u50 u60 u70 v40 v50 v60 v70) */
"punpcklhw %[tmp8], %[tmp0], %[tmp2] \n\t"
/* tmp9 = (u41 u51 u61 u71 v41 v51 v61 v71) */
"punpckhhw %[tmp9], %[tmp0], %[tmp2] \n\t"
/* tmp10 = (u42 u52 u62 u72 v42 v52 v62 v72) */
"punpcklhw %[tmp10], %[tmp1], %[tmp3] \n\t"
/* tmp11 = (u43 u53 u63 u73 v43 v53 v63 v73) */
"punpckhhw %[tmp11], %[tmp1], %[tmp3] \n\t"
/* tmp0 = (u00 u10 u20 u30 u40 u50 u60 u70) */
"punpcklwd %[tmp0], %[tmp4], %[tmp8] \n\t"
/* tmp1 = (v00 v10 v20 v30 v40 v50 v60 v70) */
"punpckhwd %[tmp1], %[tmp4], %[tmp8] \n\t"
"gssdlc1 %[tmp0], 0x07(%[dst_a]) \n\t"
"gssdrc1 %[tmp0], 0x00(%[dst_a]) \n\t"
"gssdlc1 %[tmp1], 0x07(%[dst_b]) \n\t"
"gssdrc1 %[tmp1], 0x00(%[dst_b]) \n\t"
/* tmp0 = (u01 u11 u21 u31 u41 u51 u61 u71) */
"punpcklwd %[tmp0], %[tmp5], %[tmp9] \n\t"
/* tmp1 = (v01 v11 v21 v31 v41 v51 v61 v71) */
"punpckhwd %[tmp1], %[tmp5], %[tmp9] \n\t"
"dadd %[dst_a], %[dst_a], %[dst_stride_a] \n\t"
"gssdlc1 %[tmp0], 0x07(%[dst_a]) \n\t"
"gssdrc1 %[tmp0], 0x00(%[dst_a]) \n\t"
"dadd %[dst_b], %[dst_b], %[dst_stride_b] \n\t"
"gssdlc1 %[tmp1], 0x07(%[dst_b]) \n\t"
"gssdrc1 %[tmp1], 0x00(%[dst_b]) \n\t"
/* tmp0 = (u02 u12 u22 u32 u42 u52 u62 u72) */
"punpcklwd %[tmp0], %[tmp6], %[tmp10] \n\t"
/* tmp1 = (v02 v12 v22 v32 v42 v52 v62 v72) */
"punpckhwd %[tmp1], %[tmp6], %[tmp10] \n\t"
"dadd %[dst_a], %[dst_a], %[dst_stride_a] \n\t"
"gssdlc1 %[tmp0], 0x07(%[dst_a]) \n\t"
"gssdrc1 %[tmp0], 0x00(%[dst_a]) \n\t"
"dadd %[dst_b], %[dst_b], %[dst_stride_b] \n\t"
"gssdlc1 %[tmp1], 0x07(%[dst_b]) \n\t"
"gssdrc1 %[tmp1], 0x00(%[dst_b]) \n\t"
/* tmp0 = (u03 u13 u23 u33 u43 u53 u63 u73) */
"punpcklwd %[tmp0], %[tmp7], %[tmp11] \n\t"
/* tmp1 = (v03 v13 v23 v33 v43 v53 v63 v73) */
"punpckhwd %[tmp1], %[tmp7], %[tmp11] \n\t"
"dadd %[dst_a], %[dst_a], %[dst_stride_a] \n\t"
"gssdlc1 %[tmp0], 0x07(%[dst_a]) \n\t"
"gssdrc1 %[tmp0], 0x00(%[dst_a]) \n\t"
"dadd %[dst_b], %[dst_b], %[dst_stride_b] \n\t"
"gssdlc1 %[tmp1], 0x07(%[dst_b]) \n\t"
"gssdrc1 %[tmp1], 0x00(%[dst_b]) \n\t"
"dadd %[dst_a], %[dst_a], %[dst_stride_a] \n\t"
"dadd %[dst_b], %[dst_b], %[dst_stride_b] \n\t"
"daddiu %[src], %[src], 0x08 \n\t"
"daddi %[width], %[width], -0x04 \n\t"
"bnez %[width], 1b \n\t"
: [tmp0] "=&f"(tmp0), [tmp1] "=&f"(tmp1), [tmp2] "=&f"(tmp2),
[tmp3] "=&f"(tmp3), [tmp4] "=&f"(tmp4), [tmp5] "=&f"(tmp5),
[tmp6] "=&f"(tmp6), [tmp7] "=&f"(tmp7), [tmp8] "=&f"(tmp8),
[tmp9] "=&f"(tmp9), [tmp10] "=&f"(tmp10), [tmp11] "=&f"(tmp11),
[tmp12] "=&f"(tmp12), [tmp13] "=&f"(tmp13), [dst_a] "+&r"(dst_a),
[dst_b] "+&r"(dst_b), [src_tmp] "+&r"(src_tmp)
: [src] "r"(src), [width] "r"(width), [dst_stride_a] "r"(dst_stride_a),
[dst_stride_b] "r"(dst_stride_b), [src_stride] "r"(src_stride)
: "memory");
}
#endif // !defined(LIBYUV_DISABLE_MMI) && defined(_MIPS_ARCH_LOONGSON3A)
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif

250
thirdparty/libyuv/source/rotate_msa.cc vendored Normal file
View File

@@ -0,0 +1,250 @@
/*
* Copyright 2016 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "libyuv/rotate_row.h"
// This module is for GCC MSA
#if !defined(LIBYUV_DISABLE_MSA) && defined(__mips_msa)
#include "libyuv/macros_msa.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
#define ILVRL_B(in0, in1, in2, in3, out0, out1, out2, out3) \
{ \
out0 = (v16u8)__msa_ilvr_b((v16i8)in1, (v16i8)in0); \
out1 = (v16u8)__msa_ilvl_b((v16i8)in1, (v16i8)in0); \
out2 = (v16u8)__msa_ilvr_b((v16i8)in3, (v16i8)in2); \
out3 = (v16u8)__msa_ilvl_b((v16i8)in3, (v16i8)in2); \
}
#define ILVRL_H(in0, in1, in2, in3, out0, out1, out2, out3) \
{ \
out0 = (v16u8)__msa_ilvr_h((v8i16)in1, (v8i16)in0); \
out1 = (v16u8)__msa_ilvl_h((v8i16)in1, (v8i16)in0); \
out2 = (v16u8)__msa_ilvr_h((v8i16)in3, (v8i16)in2); \
out3 = (v16u8)__msa_ilvl_h((v8i16)in3, (v8i16)in2); \
}
#define ILVRL_W(in0, in1, in2, in3, out0, out1, out2, out3) \
{ \
out0 = (v16u8)__msa_ilvr_w((v4i32)in1, (v4i32)in0); \
out1 = (v16u8)__msa_ilvl_w((v4i32)in1, (v4i32)in0); \
out2 = (v16u8)__msa_ilvr_w((v4i32)in3, (v4i32)in2); \
out3 = (v16u8)__msa_ilvl_w((v4i32)in3, (v4i32)in2); \
}
#define ILVRL_D(in0, in1, in2, in3, out0, out1, out2, out3) \
{ \
out0 = (v16u8)__msa_ilvr_d((v2i64)in1, (v2i64)in0); \
out1 = (v16u8)__msa_ilvl_d((v2i64)in1, (v2i64)in0); \
out2 = (v16u8)__msa_ilvr_d((v2i64)in3, (v2i64)in2); \
out3 = (v16u8)__msa_ilvl_d((v2i64)in3, (v2i64)in2); \
}
void TransposeWx16_C(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width) {
TransposeWx8_C(src, src_stride, dst, dst_stride, width);
TransposeWx8_C((src + 8 * src_stride), src_stride, (dst + 8), dst_stride,
width);
}
void TransposeUVWx16_C(const uint8_t* src,
int src_stride,
uint8_t* dst_a,
int dst_stride_a,
uint8_t* dst_b,
int dst_stride_b,
int width) {
TransposeUVWx8_C(src, src_stride, dst_a, dst_stride_a, dst_b, dst_stride_b,
width);
TransposeUVWx8_C((src + 8 * src_stride), src_stride, (dst_a + 8),
dst_stride_a, (dst_b + 8), dst_stride_b, width);
}
void TransposeWx16_MSA(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width) {
int x;
const uint8_t* s;
v16u8 src0, src1, src2, src3, dst0, dst1, dst2, dst3, vec0, vec1, vec2, vec3;
v16u8 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7;
v16u8 res0, res1, res2, res3, res4, res5, res6, res7, res8, res9;
for (x = 0; x < width; x += 16) {
s = src;
src0 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
src1 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
src2 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
src3 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
ILVRL_B(src0, src1, src2, src3, vec0, vec1, vec2, vec3);
ILVRL_H(vec0, vec2, vec1, vec3, reg0, reg1, reg2, reg3);
src0 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
src1 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
src2 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
src3 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
ILVRL_B(src0, src1, src2, src3, vec0, vec1, vec2, vec3);
ILVRL_H(vec0, vec2, vec1, vec3, reg4, reg5, reg6, reg7);
ILVRL_W(reg0, reg4, reg1, reg5, res0, res1, res2, res3);
ILVRL_W(reg2, reg6, reg3, reg7, res4, res5, res6, res7);
src0 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
src1 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
src2 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
src3 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
ILVRL_B(src0, src1, src2, src3, vec0, vec1, vec2, vec3);
ILVRL_H(vec0, vec2, vec1, vec3, reg0, reg1, reg2, reg3);
src0 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
src1 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
src2 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
src3 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
ILVRL_B(src0, src1, src2, src3, vec0, vec1, vec2, vec3);
ILVRL_H(vec0, vec2, vec1, vec3, reg4, reg5, reg6, reg7);
res8 = (v16u8)__msa_ilvr_w((v4i32)reg4, (v4i32)reg0);
res9 = (v16u8)__msa_ilvl_w((v4i32)reg4, (v4i32)reg0);
ILVRL_D(res0, res8, res1, res9, dst0, dst1, dst2, dst3);
ST_UB4(dst0, dst1, dst2, dst3, dst, dst_stride);
dst += dst_stride * 4;
res8 = (v16u8)__msa_ilvr_w((v4i32)reg5, (v4i32)reg1);
res9 = (v16u8)__msa_ilvl_w((v4i32)reg5, (v4i32)reg1);
ILVRL_D(res2, res8, res3, res9, dst0, dst1, dst2, dst3);
ST_UB4(dst0, dst1, dst2, dst3, dst, dst_stride);
dst += dst_stride * 4;
res8 = (v16u8)__msa_ilvr_w((v4i32)reg6, (v4i32)reg2);
res9 = (v16u8)__msa_ilvl_w((v4i32)reg6, (v4i32)reg2);
ILVRL_D(res4, res8, res5, res9, dst0, dst1, dst2, dst3);
ST_UB4(dst0, dst1, dst2, dst3, dst, dst_stride);
dst += dst_stride * 4;
res8 = (v16u8)__msa_ilvr_w((v4i32)reg7, (v4i32)reg3);
res9 = (v16u8)__msa_ilvl_w((v4i32)reg7, (v4i32)reg3);
ILVRL_D(res6, res8, res7, res9, dst0, dst1, dst2, dst3);
ST_UB4(dst0, dst1, dst2, dst3, dst, dst_stride);
src += 16;
dst += dst_stride * 4;
}
}
void TransposeUVWx16_MSA(const uint8_t* src,
int src_stride,
uint8_t* dst_a,
int dst_stride_a,
uint8_t* dst_b,
int dst_stride_b,
int width) {
int x;
const uint8_t* s;
v16u8 src0, src1, src2, src3, dst0, dst1, dst2, dst3, vec0, vec1, vec2, vec3;
v16u8 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7;
v16u8 res0, res1, res2, res3, res4, res5, res6, res7, res8, res9;
for (x = 0; x < width; x += 8) {
s = src;
src0 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
src1 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
src2 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
src3 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
ILVRL_B(src0, src1, src2, src3, vec0, vec1, vec2, vec3);
ILVRL_H(vec0, vec2, vec1, vec3, reg0, reg1, reg2, reg3);
src0 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
src1 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
src2 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
src3 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
ILVRL_B(src0, src1, src2, src3, vec0, vec1, vec2, vec3);
ILVRL_H(vec0, vec2, vec1, vec3, reg4, reg5, reg6, reg7);
ILVRL_W(reg0, reg4, reg1, reg5, res0, res1, res2, res3);
ILVRL_W(reg2, reg6, reg3, reg7, res4, res5, res6, res7);
src0 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
src1 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
src2 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
src3 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
ILVRL_B(src0, src1, src2, src3, vec0, vec1, vec2, vec3);
ILVRL_H(vec0, vec2, vec1, vec3, reg0, reg1, reg2, reg3);
src0 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
src1 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
src2 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
src3 = (v16u8)__msa_ld_b((v16i8*)s, 0);
s += src_stride;
ILVRL_B(src0, src1, src2, src3, vec0, vec1, vec2, vec3);
ILVRL_H(vec0, vec2, vec1, vec3, reg4, reg5, reg6, reg7);
res8 = (v16u8)__msa_ilvr_w((v4i32)reg4, (v4i32)reg0);
res9 = (v16u8)__msa_ilvl_w((v4i32)reg4, (v4i32)reg0);
ILVRL_D(res0, res8, res1, res9, dst0, dst1, dst2, dst3);
ST_UB2(dst0, dst2, dst_a, dst_stride_a);
ST_UB2(dst1, dst3, dst_b, dst_stride_b);
dst_a += dst_stride_a * 2;
dst_b += dst_stride_b * 2;
res8 = (v16u8)__msa_ilvr_w((v4i32)reg5, (v4i32)reg1);
res9 = (v16u8)__msa_ilvl_w((v4i32)reg5, (v4i32)reg1);
ILVRL_D(res2, res8, res3, res9, dst0, dst1, dst2, dst3);
ST_UB2(dst0, dst2, dst_a, dst_stride_a);
ST_UB2(dst1, dst3, dst_b, dst_stride_b);
dst_a += dst_stride_a * 2;
dst_b += dst_stride_b * 2;
res8 = (v16u8)__msa_ilvr_w((v4i32)reg6, (v4i32)reg2);
res9 = (v16u8)__msa_ilvl_w((v4i32)reg6, (v4i32)reg2);
ILVRL_D(res4, res8, res5, res9, dst0, dst1, dst2, dst3);
ST_UB2(dst0, dst2, dst_a, dst_stride_a);
ST_UB2(dst1, dst3, dst_b, dst_stride_b);
dst_a += dst_stride_a * 2;
dst_b += dst_stride_b * 2;
res8 = (v16u8)__msa_ilvr_w((v4i32)reg7, (v4i32)reg3);
res9 = (v16u8)__msa_ilvl_w((v4i32)reg7, (v4i32)reg3);
ILVRL_D(res6, res8, res7, res9, dst0, dst1, dst2, dst3);
ST_UB2(dst0, dst2, dst_a, dst_stride_a);
ST_UB2(dst1, dst3, dst_b, dst_stride_b);
src += 16;
dst_a += dst_stride_a * 2;
dst_b += dst_stride_b * 2;
}
}
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif
#endif // !defined(LIBYUV_DISABLE_MSA) && defined(__mips_msa)

418
thirdparty/libyuv/source/rotate_neon.cc vendored Normal file
View File

@@ -0,0 +1,418 @@
/*
* Copyright 2011 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "libyuv/rotate_row.h"
#include "libyuv/row.h"
#include "libyuv/basic_types.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
#if !defined(LIBYUV_DISABLE_NEON) && defined(__ARM_NEON__) && \
!defined(__aarch64__)
static const uvec8 kVTbl4x4Transpose = {0, 4, 8, 12, 1, 5, 9, 13,
2, 6, 10, 14, 3, 7, 11, 15};
void TransposeWx8_NEON(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width) {
const uint8_t* src_temp;
asm volatile(
// loops are on blocks of 8. loop will stop when
// counter gets to or below 0. starting the counter
// at w-8 allow for this
"sub %5, #8 \n"
// handle 8x8 blocks. this should be the majority of the plane
"1: \n"
"mov %0, %1 \n"
"vld1.8 {d0}, [%0], %2 \n"
"vld1.8 {d1}, [%0], %2 \n"
"vld1.8 {d2}, [%0], %2 \n"
"vld1.8 {d3}, [%0], %2 \n"
"vld1.8 {d4}, [%0], %2 \n"
"vld1.8 {d5}, [%0], %2 \n"
"vld1.8 {d6}, [%0], %2 \n"
"vld1.8 {d7}, [%0] \n"
"vtrn.8 d1, d0 \n"
"vtrn.8 d3, d2 \n"
"vtrn.8 d5, d4 \n"
"vtrn.8 d7, d6 \n"
"vtrn.16 d1, d3 \n"
"vtrn.16 d0, d2 \n"
"vtrn.16 d5, d7 \n"
"vtrn.16 d4, d6 \n"
"vtrn.32 d1, d5 \n"
"vtrn.32 d0, d4 \n"
"vtrn.32 d3, d7 \n"
"vtrn.32 d2, d6 \n"
"vrev16.8 q0, q0 \n"
"vrev16.8 q1, q1 \n"
"vrev16.8 q2, q2 \n"
"vrev16.8 q3, q3 \n"
"mov %0, %3 \n"
"vst1.8 {d1}, [%0], %4 \n"
"vst1.8 {d0}, [%0], %4 \n"
"vst1.8 {d3}, [%0], %4 \n"
"vst1.8 {d2}, [%0], %4 \n"
"vst1.8 {d5}, [%0], %4 \n"
"vst1.8 {d4}, [%0], %4 \n"
"vst1.8 {d7}, [%0], %4 \n"
"vst1.8 {d6}, [%0] \n"
"add %1, #8 \n" // src += 8
"add %3, %3, %4, lsl #3 \n" // dst += 8 * dst_stride
"subs %5, #8 \n" // w -= 8
"bge 1b \n"
// add 8 back to counter. if the result is 0 there are
// no residuals.
"adds %5, #8 \n"
"beq 4f \n"
// some residual, so between 1 and 7 lines left to transpose
"cmp %5, #2 \n"
"blt 3f \n"
"cmp %5, #4 \n"
"blt 2f \n"
// 4x8 block
"mov %0, %1 \n"
"vld1.32 {d0[0]}, [%0], %2 \n"
"vld1.32 {d0[1]}, [%0], %2 \n"
"vld1.32 {d1[0]}, [%0], %2 \n"
"vld1.32 {d1[1]}, [%0], %2 \n"
"vld1.32 {d2[0]}, [%0], %2 \n"
"vld1.32 {d2[1]}, [%0], %2 \n"
"vld1.32 {d3[0]}, [%0], %2 \n"
"vld1.32 {d3[1]}, [%0] \n"
"mov %0, %3 \n"
"vld1.8 {q3}, [%6] \n"
"vtbl.8 d4, {d0, d1}, d6 \n"
"vtbl.8 d5, {d0, d1}, d7 \n"
"vtbl.8 d0, {d2, d3}, d6 \n"
"vtbl.8 d1, {d2, d3}, d7 \n"
// TODO(frkoenig): Rework shuffle above to
// write out with 4 instead of 8 writes.
"vst1.32 {d4[0]}, [%0], %4 \n"
"vst1.32 {d4[1]}, [%0], %4 \n"
"vst1.32 {d5[0]}, [%0], %4 \n"
"vst1.32 {d5[1]}, [%0] \n"
"add %0, %3, #4 \n"
"vst1.32 {d0[0]}, [%0], %4 \n"
"vst1.32 {d0[1]}, [%0], %4 \n"
"vst1.32 {d1[0]}, [%0], %4 \n"
"vst1.32 {d1[1]}, [%0] \n"
"add %1, #4 \n" // src += 4
"add %3, %3, %4, lsl #2 \n" // dst += 4 * dst_stride
"subs %5, #4 \n" // w -= 4
"beq 4f \n"
// some residual, check to see if it includes a 2x8 block,
// or less
"cmp %5, #2 \n"
"blt 3f \n"
// 2x8 block
"2: \n"
"mov %0, %1 \n"
"vld1.16 {d0[0]}, [%0], %2 \n"
"vld1.16 {d1[0]}, [%0], %2 \n"
"vld1.16 {d0[1]}, [%0], %2 \n"
"vld1.16 {d1[1]}, [%0], %2 \n"
"vld1.16 {d0[2]}, [%0], %2 \n"
"vld1.16 {d1[2]}, [%0], %2 \n"
"vld1.16 {d0[3]}, [%0], %2 \n"
"vld1.16 {d1[3]}, [%0] \n"
"vtrn.8 d0, d1 \n"
"mov %0, %3 \n"
"vst1.64 {d0}, [%0], %4 \n"
"vst1.64 {d1}, [%0] \n"
"add %1, #2 \n" // src += 2
"add %3, %3, %4, lsl #1 \n" // dst += 2 * dst_stride
"subs %5, #2 \n" // w -= 2
"beq 4f \n"
// 1x8 block
"3: \n"
"vld1.8 {d0[0]}, [%1], %2 \n"
"vld1.8 {d0[1]}, [%1], %2 \n"
"vld1.8 {d0[2]}, [%1], %2 \n"
"vld1.8 {d0[3]}, [%1], %2 \n"
"vld1.8 {d0[4]}, [%1], %2 \n"
"vld1.8 {d0[5]}, [%1], %2 \n"
"vld1.8 {d0[6]}, [%1], %2 \n"
"vld1.8 {d0[7]}, [%1] \n"
"vst1.64 {d0}, [%3] \n"
"4: \n"
: "=&r"(src_temp), // %0
"+r"(src), // %1
"+r"(src_stride), // %2
"+r"(dst), // %3
"+r"(dst_stride), // %4
"+r"(width) // %5
: "r"(&kVTbl4x4Transpose) // %6
: "memory", "cc", "q0", "q1", "q2", "q3");
}
static const uvec8 kVTbl4x4TransposeDi = {0, 8, 1, 9, 2, 10, 3, 11,
4, 12, 5, 13, 6, 14, 7, 15};
void TransposeUVWx8_NEON(const uint8_t* src,
int src_stride,
uint8_t* dst_a,
int dst_stride_a,
uint8_t* dst_b,
int dst_stride_b,
int width) {
const uint8_t* src_temp;
asm volatile(
// loops are on blocks of 8. loop will stop when
// counter gets to or below 0. starting the counter
// at w-8 allow for this
"sub %7, #8 \n"
// handle 8x8 blocks. this should be the majority of the plane
"1: \n"
"mov %0, %1 \n"
"vld2.8 {d0, d1}, [%0], %2 \n"
"vld2.8 {d2, d3}, [%0], %2 \n"
"vld2.8 {d4, d5}, [%0], %2 \n"
"vld2.8 {d6, d7}, [%0], %2 \n"
"vld2.8 {d16, d17}, [%0], %2 \n"
"vld2.8 {d18, d19}, [%0], %2 \n"
"vld2.8 {d20, d21}, [%0], %2 \n"
"vld2.8 {d22, d23}, [%0] \n"
"vtrn.8 q1, q0 \n"
"vtrn.8 q3, q2 \n"
"vtrn.8 q9, q8 \n"
"vtrn.8 q11, q10 \n"
"vtrn.16 q1, q3 \n"
"vtrn.16 q0, q2 \n"
"vtrn.16 q9, q11 \n"
"vtrn.16 q8, q10 \n"
"vtrn.32 q1, q9 \n"
"vtrn.32 q0, q8 \n"
"vtrn.32 q3, q11 \n"
"vtrn.32 q2, q10 \n"
"vrev16.8 q0, q0 \n"
"vrev16.8 q1, q1 \n"
"vrev16.8 q2, q2 \n"
"vrev16.8 q3, q3 \n"
"vrev16.8 q8, q8 \n"
"vrev16.8 q9, q9 \n"
"vrev16.8 q10, q10 \n"
"vrev16.8 q11, q11 \n"
"mov %0, %3 \n"
"vst1.8 {d2}, [%0], %4 \n"
"vst1.8 {d0}, [%0], %4 \n"
"vst1.8 {d6}, [%0], %4 \n"
"vst1.8 {d4}, [%0], %4 \n"
"vst1.8 {d18}, [%0], %4 \n"
"vst1.8 {d16}, [%0], %4 \n"
"vst1.8 {d22}, [%0], %4 \n"
"vst1.8 {d20}, [%0] \n"
"mov %0, %5 \n"
"vst1.8 {d3}, [%0], %6 \n"
"vst1.8 {d1}, [%0], %6 \n"
"vst1.8 {d7}, [%0], %6 \n"
"vst1.8 {d5}, [%0], %6 \n"
"vst1.8 {d19}, [%0], %6 \n"
"vst1.8 {d17}, [%0], %6 \n"
"vst1.8 {d23}, [%0], %6 \n"
"vst1.8 {d21}, [%0] \n"
"add %1, #8*2 \n" // src += 8*2
"add %3, %3, %4, lsl #3 \n" // dst_a += 8 *
// dst_stride_a
"add %5, %5, %6, lsl #3 \n" // dst_b += 8 *
// dst_stride_b
"subs %7, #8 \n" // w -= 8
"bge 1b \n"
// add 8 back to counter. if the result is 0 there are
// no residuals.
"adds %7, #8 \n"
"beq 4f \n"
// some residual, so between 1 and 7 lines left to transpose
"cmp %7, #2 \n"
"blt 3f \n"
"cmp %7, #4 \n"
"blt 2f \n"
// TODO(frkoenig): Clean this up
// 4x8 block
"mov %0, %1 \n"
"vld1.64 {d0}, [%0], %2 \n"
"vld1.64 {d1}, [%0], %2 \n"
"vld1.64 {d2}, [%0], %2 \n"
"vld1.64 {d3}, [%0], %2 \n"
"vld1.64 {d4}, [%0], %2 \n"
"vld1.64 {d5}, [%0], %2 \n"
"vld1.64 {d6}, [%0], %2 \n"
"vld1.64 {d7}, [%0] \n"
"vld1.8 {q15}, [%8] \n"
"vtrn.8 q0, q1 \n"
"vtrn.8 q2, q3 \n"
"vtbl.8 d16, {d0, d1}, d30 \n"
"vtbl.8 d17, {d0, d1}, d31 \n"
"vtbl.8 d18, {d2, d3}, d30 \n"
"vtbl.8 d19, {d2, d3}, d31 \n"
"vtbl.8 d20, {d4, d5}, d30 \n"
"vtbl.8 d21, {d4, d5}, d31 \n"
"vtbl.8 d22, {d6, d7}, d30 \n"
"vtbl.8 d23, {d6, d7}, d31 \n"
"mov %0, %3 \n"
"vst1.32 {d16[0]}, [%0], %4 \n"
"vst1.32 {d16[1]}, [%0], %4 \n"
"vst1.32 {d17[0]}, [%0], %4 \n"
"vst1.32 {d17[1]}, [%0], %4 \n"
"add %0, %3, #4 \n"
"vst1.32 {d20[0]}, [%0], %4 \n"
"vst1.32 {d20[1]}, [%0], %4 \n"
"vst1.32 {d21[0]}, [%0], %4 \n"
"vst1.32 {d21[1]}, [%0] \n"
"mov %0, %5 \n"
"vst1.32 {d18[0]}, [%0], %6 \n"
"vst1.32 {d18[1]}, [%0], %6 \n"
"vst1.32 {d19[0]}, [%0], %6 \n"
"vst1.32 {d19[1]}, [%0], %6 \n"
"add %0, %5, #4 \n"
"vst1.32 {d22[0]}, [%0], %6 \n"
"vst1.32 {d22[1]}, [%0], %6 \n"
"vst1.32 {d23[0]}, [%0], %6 \n"
"vst1.32 {d23[1]}, [%0] \n"
"add %1, #4*2 \n" // src += 4 * 2
"add %3, %3, %4, lsl #2 \n" // dst_a += 4 *
// dst_stride_a
"add %5, %5, %6, lsl #2 \n" // dst_b += 4 *
// dst_stride_b
"subs %7, #4 \n" // w -= 4
"beq 4f \n"
// some residual, check to see if it includes a 2x8 block,
// or less
"cmp %7, #2 \n"
"blt 3f \n"
// 2x8 block
"2: \n"
"mov %0, %1 \n"
"vld2.16 {d0[0], d2[0]}, [%0], %2 \n"
"vld2.16 {d1[0], d3[0]}, [%0], %2 \n"
"vld2.16 {d0[1], d2[1]}, [%0], %2 \n"
"vld2.16 {d1[1], d3[1]}, [%0], %2 \n"
"vld2.16 {d0[2], d2[2]}, [%0], %2 \n"
"vld2.16 {d1[2], d3[2]}, [%0], %2 \n"
"vld2.16 {d0[3], d2[3]}, [%0], %2 \n"
"vld2.16 {d1[3], d3[3]}, [%0] \n"
"vtrn.8 d0, d1 \n"
"vtrn.8 d2, d3 \n"
"mov %0, %3 \n"
"vst1.64 {d0}, [%0], %4 \n"
"vst1.64 {d2}, [%0] \n"
"mov %0, %5 \n"
"vst1.64 {d1}, [%0], %6 \n"
"vst1.64 {d3}, [%0] \n"
"add %1, #2*2 \n" // src += 2 * 2
"add %3, %3, %4, lsl #1 \n" // dst_a += 2 *
// dst_stride_a
"add %5, %5, %6, lsl #1 \n" // dst_b += 2 *
// dst_stride_b
"subs %7, #2 \n" // w -= 2
"beq 4f \n"
// 1x8 block
"3: \n"
"vld2.8 {d0[0], d1[0]}, [%1], %2 \n"
"vld2.8 {d0[1], d1[1]}, [%1], %2 \n"
"vld2.8 {d0[2], d1[2]}, [%1], %2 \n"
"vld2.8 {d0[3], d1[3]}, [%1], %2 \n"
"vld2.8 {d0[4], d1[4]}, [%1], %2 \n"
"vld2.8 {d0[5], d1[5]}, [%1], %2 \n"
"vld2.8 {d0[6], d1[6]}, [%1], %2 \n"
"vld2.8 {d0[7], d1[7]}, [%1] \n"
"vst1.64 {d0}, [%3] \n"
"vst1.64 {d1}, [%5] \n"
"4: \n"
: "=&r"(src_temp), // %0
"+r"(src), // %1
"+r"(src_stride), // %2
"+r"(dst_a), // %3
"+r"(dst_stride_a), // %4
"+r"(dst_b), // %5
"+r"(dst_stride_b), // %6
"+r"(width) // %7
: "r"(&kVTbl4x4TransposeDi) // %8
: "memory", "cc", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11");
}
#endif // defined(__ARM_NEON__) && !defined(__aarch64__)
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif

View File

@@ -0,0 +1,443 @@
/*
* Copyright 2014 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "libyuv/rotate_row.h"
#include "libyuv/row.h"
#include "libyuv/basic_types.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
// This module is for GCC Neon armv8 64 bit.
#if !defined(LIBYUV_DISABLE_NEON) && defined(__aarch64__)
static const uvec8 kVTbl4x4Transpose = {0, 4, 8, 12, 1, 5, 9, 13,
2, 6, 10, 14, 3, 7, 11, 15};
void TransposeWx8_NEON(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width) {
const uint8_t* src_temp;
asm volatile(
// loops are on blocks of 8. loop will stop when
// counter gets to or below 0. starting the counter
// at w-8 allow for this
"sub %w3, %w3, #8 \n"
// handle 8x8 blocks. this should be the majority of the plane
"1: \n"
"mov %0, %1 \n"
"ld1 {v0.8b}, [%0], %5 \n"
"ld1 {v1.8b}, [%0], %5 \n"
"ld1 {v2.8b}, [%0], %5 \n"
"ld1 {v3.8b}, [%0], %5 \n"
"ld1 {v4.8b}, [%0], %5 \n"
"ld1 {v5.8b}, [%0], %5 \n"
"ld1 {v6.8b}, [%0], %5 \n"
"ld1 {v7.8b}, [%0] \n"
"mov %0, %1 \n"
"trn2 v16.8b, v0.8b, v1.8b \n"
"prfm pldl1keep, [%0, 448] \n" // prefetch 7 lines ahead
"trn1 v17.8b, v0.8b, v1.8b \n"
"add %0, %0, %5 \n"
"trn2 v18.8b, v2.8b, v3.8b \n"
"prfm pldl1keep, [%0, 448] \n" // row 1
"trn1 v19.8b, v2.8b, v3.8b \n"
"add %0, %0, %5 \n"
"trn2 v20.8b, v4.8b, v5.8b \n"
"prfm pldl1keep, [%0, 448] \n" // row 2
"trn1 v21.8b, v4.8b, v5.8b \n"
"add %0, %0, %5 \n"
"trn2 v22.8b, v6.8b, v7.8b \n"
"prfm pldl1keep, [%0, 448] \n" // row 3
"trn1 v23.8b, v6.8b, v7.8b \n"
"add %0, %0, %5 \n"
"trn2 v3.4h, v17.4h, v19.4h \n"
"prfm pldl1keep, [%0, 448] \n" // row 4
"trn1 v1.4h, v17.4h, v19.4h \n"
"add %0, %0, %5 \n"
"trn2 v2.4h, v16.4h, v18.4h \n"
"prfm pldl1keep, [%0, 448] \n" // row 5
"trn1 v0.4h, v16.4h, v18.4h \n"
"add %0, %0, %5 \n"
"trn2 v7.4h, v21.4h, v23.4h \n"
"prfm pldl1keep, [%0, 448] \n" // row 6
"trn1 v5.4h, v21.4h, v23.4h \n"
"add %0, %0, %5 \n"
"trn2 v6.4h, v20.4h, v22.4h \n"
"prfm pldl1keep, [%0, 448] \n" // row 7
"trn1 v4.4h, v20.4h, v22.4h \n"
"trn2 v21.2s, v1.2s, v5.2s \n"
"trn1 v17.2s, v1.2s, v5.2s \n"
"trn2 v20.2s, v0.2s, v4.2s \n"
"trn1 v16.2s, v0.2s, v4.2s \n"
"trn2 v23.2s, v3.2s, v7.2s \n"
"trn1 v19.2s, v3.2s, v7.2s \n"
"trn2 v22.2s, v2.2s, v6.2s \n"
"trn1 v18.2s, v2.2s, v6.2s \n"
"mov %0, %2 \n"
"st1 {v17.8b}, [%0], %6 \n"
"st1 {v16.8b}, [%0], %6 \n"
"st1 {v19.8b}, [%0], %6 \n"
"st1 {v18.8b}, [%0], %6 \n"
"st1 {v21.8b}, [%0], %6 \n"
"st1 {v20.8b}, [%0], %6 \n"
"st1 {v23.8b}, [%0], %6 \n"
"st1 {v22.8b}, [%0] \n"
"add %1, %1, #8 \n" // src += 8
"add %2, %2, %6, lsl #3 \n" // dst += 8 * dst_stride
"subs %w3, %w3, #8 \n" // w -= 8
"b.ge 1b \n"
// add 8 back to counter. if the result is 0 there are
// no residuals.
"adds %w3, %w3, #8 \n"
"b.eq 4f \n"
// some residual, so between 1 and 7 lines left to transpose
"cmp %w3, #2 \n"
"b.lt 3f \n"
"cmp %w3, #4 \n"
"b.lt 2f \n"
// 4x8 block
"mov %0, %1 \n"
"ld1 {v0.s}[0], [%0], %5 \n"
"ld1 {v0.s}[1], [%0], %5 \n"
"ld1 {v0.s}[2], [%0], %5 \n"
"ld1 {v0.s}[3], [%0], %5 \n"
"ld1 {v1.s}[0], [%0], %5 \n"
"ld1 {v1.s}[1], [%0], %5 \n"
"ld1 {v1.s}[2], [%0], %5 \n"
"ld1 {v1.s}[3], [%0] \n"
"mov %0, %2 \n"
"ld1 {v2.16b}, [%4] \n"
"tbl v3.16b, {v0.16b}, v2.16b \n"
"tbl v0.16b, {v1.16b}, v2.16b \n"
// TODO(frkoenig): Rework shuffle above to
// write out with 4 instead of 8 writes.
"st1 {v3.s}[0], [%0], %6 \n"
"st1 {v3.s}[1], [%0], %6 \n"
"st1 {v3.s}[2], [%0], %6 \n"
"st1 {v3.s}[3], [%0] \n"
"add %0, %2, #4 \n"
"st1 {v0.s}[0], [%0], %6 \n"
"st1 {v0.s}[1], [%0], %6 \n"
"st1 {v0.s}[2], [%0], %6 \n"
"st1 {v0.s}[3], [%0] \n"
"add %1, %1, #4 \n" // src += 4
"add %2, %2, %6, lsl #2 \n" // dst += 4 * dst_stride
"subs %w3, %w3, #4 \n" // w -= 4
"b.eq 4f \n"
// some residual, check to see if it includes a 2x8 block,
// or less
"cmp %w3, #2 \n"
"b.lt 3f \n"
// 2x8 block
"2: \n"
"mov %0, %1 \n"
"ld1 {v0.h}[0], [%0], %5 \n"
"ld1 {v1.h}[0], [%0], %5 \n"
"ld1 {v0.h}[1], [%0], %5 \n"
"ld1 {v1.h}[1], [%0], %5 \n"
"ld1 {v0.h}[2], [%0], %5 \n"
"ld1 {v1.h}[2], [%0], %5 \n"
"ld1 {v0.h}[3], [%0], %5 \n"
"ld1 {v1.h}[3], [%0] \n"
"trn2 v2.8b, v0.8b, v1.8b \n"
"trn1 v3.8b, v0.8b, v1.8b \n"
"mov %0, %2 \n"
"st1 {v3.8b}, [%0], %6 \n"
"st1 {v2.8b}, [%0] \n"
"add %1, %1, #2 \n" // src += 2
"add %2, %2, %6, lsl #1 \n" // dst += 2 * dst_stride
"subs %w3, %w3, #2 \n" // w -= 2
"b.eq 4f \n"
// 1x8 block
"3: \n"
"ld1 {v0.b}[0], [%1], %5 \n"
"ld1 {v0.b}[1], [%1], %5 \n"
"ld1 {v0.b}[2], [%1], %5 \n"
"ld1 {v0.b}[3], [%1], %5 \n"
"ld1 {v0.b}[4], [%1], %5 \n"
"ld1 {v0.b}[5], [%1], %5 \n"
"ld1 {v0.b}[6], [%1], %5 \n"
"ld1 {v0.b}[7], [%1] \n"
"st1 {v0.8b}, [%2] \n"
"4: \n"
: "=&r"(src_temp), // %0
"+r"(src), // %1
"+r"(dst), // %2
"+r"(width) // %3
: "r"(&kVTbl4x4Transpose), // %4
"r"(static_cast<ptrdiff_t>(src_stride)), // %5
"r"(static_cast<ptrdiff_t>(dst_stride)) // %6
: "memory", "cc", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16",
"v17", "v18", "v19", "v20", "v21", "v22", "v23");
}
static const uint8_t kVTbl4x4TransposeDi[32] = {
0, 16, 32, 48, 2, 18, 34, 50, 4, 20, 36, 52, 6, 22, 38, 54,
1, 17, 33, 49, 3, 19, 35, 51, 5, 21, 37, 53, 7, 23, 39, 55};
void TransposeUVWx8_NEON(const uint8_t* src,
int src_stride,
uint8_t* dst_a,
int dst_stride_a,
uint8_t* dst_b,
int dst_stride_b,
int width) {
const uint8_t* src_temp;
asm volatile(
// loops are on blocks of 8. loop will stop when
// counter gets to or below 0. starting the counter
// at w-8 allow for this
"sub %w4, %w4, #8 \n"
// handle 8x8 blocks. this should be the majority of the plane
"1: \n"
"mov %0, %1 \n"
"ld1 {v0.16b}, [%0], %5 \n"
"ld1 {v1.16b}, [%0], %5 \n"
"ld1 {v2.16b}, [%0], %5 \n"
"ld1 {v3.16b}, [%0], %5 \n"
"ld1 {v4.16b}, [%0], %5 \n"
"ld1 {v5.16b}, [%0], %5 \n"
"ld1 {v6.16b}, [%0], %5 \n"
"ld1 {v7.16b}, [%0] \n"
"mov %0, %1 \n"
"trn1 v16.16b, v0.16b, v1.16b \n"
"trn2 v17.16b, v0.16b, v1.16b \n"
"trn1 v18.16b, v2.16b, v3.16b \n"
"trn2 v19.16b, v2.16b, v3.16b \n"
"trn1 v20.16b, v4.16b, v5.16b \n"
"trn2 v21.16b, v4.16b, v5.16b \n"
"trn1 v22.16b, v6.16b, v7.16b \n"
"trn2 v23.16b, v6.16b, v7.16b \n"
"trn1 v0.8h, v16.8h, v18.8h \n"
"trn2 v1.8h, v16.8h, v18.8h \n"
"trn1 v2.8h, v20.8h, v22.8h \n"
"trn2 v3.8h, v20.8h, v22.8h \n"
"trn1 v4.8h, v17.8h, v19.8h \n"
"trn2 v5.8h, v17.8h, v19.8h \n"
"trn1 v6.8h, v21.8h, v23.8h \n"
"trn2 v7.8h, v21.8h, v23.8h \n"
"trn1 v16.4s, v0.4s, v2.4s \n"
"trn2 v17.4s, v0.4s, v2.4s \n"
"trn1 v18.4s, v1.4s, v3.4s \n"
"trn2 v19.4s, v1.4s, v3.4s \n"
"trn1 v20.4s, v4.4s, v6.4s \n"
"trn2 v21.4s, v4.4s, v6.4s \n"
"trn1 v22.4s, v5.4s, v7.4s \n"
"trn2 v23.4s, v5.4s, v7.4s \n"
"mov %0, %2 \n"
"st1 {v16.d}[0], [%0], %6 \n"
"st1 {v18.d}[0], [%0], %6 \n"
"st1 {v17.d}[0], [%0], %6 \n"
"st1 {v19.d}[0], [%0], %6 \n"
"st1 {v16.d}[1], [%0], %6 \n"
"st1 {v18.d}[1], [%0], %6 \n"
"st1 {v17.d}[1], [%0], %6 \n"
"st1 {v19.d}[1], [%0] \n"
"mov %0, %3 \n"
"st1 {v20.d}[0], [%0], %7 \n"
"st1 {v22.d}[0], [%0], %7 \n"
"st1 {v21.d}[0], [%0], %7 \n"
"st1 {v23.d}[0], [%0], %7 \n"
"st1 {v20.d}[1], [%0], %7 \n"
"st1 {v22.d}[1], [%0], %7 \n"
"st1 {v21.d}[1], [%0], %7 \n"
"st1 {v23.d}[1], [%0] \n"
"add %1, %1, #16 \n" // src += 8*2
"add %2, %2, %6, lsl #3 \n" // dst_a += 8 *
// dst_stride_a
"add %3, %3, %7, lsl #3 \n" // dst_b += 8 *
// dst_stride_b
"subs %w4, %w4, #8 \n" // w -= 8
"b.ge 1b \n"
// add 8 back to counter. if the result is 0 there are
// no residuals.
"adds %w4, %w4, #8 \n"
"b.eq 4f \n"
// some residual, so between 1 and 7 lines left to transpose
"cmp %w4, #2 \n"
"b.lt 3f \n"
"cmp %w4, #4 \n"
"b.lt 2f \n"
// TODO(frkoenig): Clean this up
// 4x8 block
"mov %0, %1 \n"
"ld1 {v0.8b}, [%0], %5 \n"
"ld1 {v1.8b}, [%0], %5 \n"
"ld1 {v2.8b}, [%0], %5 \n"
"ld1 {v3.8b}, [%0], %5 \n"
"ld1 {v4.8b}, [%0], %5 \n"
"ld1 {v5.8b}, [%0], %5 \n"
"ld1 {v6.8b}, [%0], %5 \n"
"ld1 {v7.8b}, [%0] \n"
"ld1 {v30.16b}, [%8], #16 \n"
"ld1 {v31.16b}, [%8] \n"
"tbl v16.16b, {v0.16b, v1.16b, v2.16b, v3.16b}, v30.16b \n"
"tbl v17.16b, {v0.16b, v1.16b, v2.16b, v3.16b}, v31.16b \n"
"tbl v18.16b, {v4.16b, v5.16b, v6.16b, v7.16b}, v30.16b \n"
"tbl v19.16b, {v4.16b, v5.16b, v6.16b, v7.16b}, v31.16b \n"
"mov %0, %2 \n"
"st1 {v16.s}[0], [%0], %6 \n"
"st1 {v16.s}[1], [%0], %6 \n"
"st1 {v16.s}[2], [%0], %6 \n"
"st1 {v16.s}[3], [%0], %6 \n"
"add %0, %2, #4 \n"
"st1 {v18.s}[0], [%0], %6 \n"
"st1 {v18.s}[1], [%0], %6 \n"
"st1 {v18.s}[2], [%0], %6 \n"
"st1 {v18.s}[3], [%0] \n"
"mov %0, %3 \n"
"st1 {v17.s}[0], [%0], %7 \n"
"st1 {v17.s}[1], [%0], %7 \n"
"st1 {v17.s}[2], [%0], %7 \n"
"st1 {v17.s}[3], [%0], %7 \n"
"add %0, %3, #4 \n"
"st1 {v19.s}[0], [%0], %7 \n"
"st1 {v19.s}[1], [%0], %7 \n"
"st1 {v19.s}[2], [%0], %7 \n"
"st1 {v19.s}[3], [%0] \n"
"add %1, %1, #8 \n" // src += 4 * 2
"add %2, %2, %6, lsl #2 \n" // dst_a += 4 *
// dst_stride_a
"add %3, %3, %7, lsl #2 \n" // dst_b += 4 *
// dst_stride_b
"subs %w4, %w4, #4 \n" // w -= 4
"b.eq 4f \n"
// some residual, check to see if it includes a 2x8 block,
// or less
"cmp %w4, #2 \n"
"b.lt 3f \n"
// 2x8 block
"2: \n"
"mov %0, %1 \n"
"ld2 {v0.h, v1.h}[0], [%0], %5 \n"
"ld2 {v2.h, v3.h}[0], [%0], %5 \n"
"ld2 {v0.h, v1.h}[1], [%0], %5 \n"
"ld2 {v2.h, v3.h}[1], [%0], %5 \n"
"ld2 {v0.h, v1.h}[2], [%0], %5 \n"
"ld2 {v2.h, v3.h}[2], [%0], %5 \n"
"ld2 {v0.h, v1.h}[3], [%0], %5 \n"
"ld2 {v2.h, v3.h}[3], [%0] \n"
"trn1 v4.8b, v0.8b, v2.8b \n"
"trn2 v5.8b, v0.8b, v2.8b \n"
"trn1 v6.8b, v1.8b, v3.8b \n"
"trn2 v7.8b, v1.8b, v3.8b \n"
"mov %0, %2 \n"
"st1 {v4.d}[0], [%0], %6 \n"
"st1 {v6.d}[0], [%0] \n"
"mov %0, %3 \n"
"st1 {v5.d}[0], [%0], %7 \n"
"st1 {v7.d}[0], [%0] \n"
"add %1, %1, #4 \n" // src += 2 * 2
"add %2, %2, %6, lsl #1 \n" // dst_a += 2 *
// dst_stride_a
"add %3, %3, %7, lsl #1 \n" // dst_b += 2 *
// dst_stride_b
"subs %w4, %w4, #2 \n" // w -= 2
"b.eq 4f \n"
// 1x8 block
"3: \n"
"ld2 {v0.b, v1.b}[0], [%1], %5 \n"
"ld2 {v0.b, v1.b}[1], [%1], %5 \n"
"ld2 {v0.b, v1.b}[2], [%1], %5 \n"
"ld2 {v0.b, v1.b}[3], [%1], %5 \n"
"ld2 {v0.b, v1.b}[4], [%1], %5 \n"
"ld2 {v0.b, v1.b}[5], [%1], %5 \n"
"ld2 {v0.b, v1.b}[6], [%1], %5 \n"
"ld2 {v0.b, v1.b}[7], [%1] \n"
"st1 {v0.d}[0], [%2] \n"
"st1 {v1.d}[0], [%3] \n"
"4: \n"
: "=&r"(src_temp), // %0
"+r"(src), // %1
"+r"(dst_a), // %2
"+r"(dst_b), // %3
"+r"(width) // %4
: "r"(static_cast<ptrdiff_t>(src_stride)), // %5
"r"(static_cast<ptrdiff_t>(dst_stride_a)), // %6
"r"(static_cast<ptrdiff_t>(dst_stride_b)), // %7
"r"(&kVTbl4x4TransposeDi) // %8
: "memory", "cc", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16",
"v17", "v18", "v19", "v20", "v21", "v22", "v23", "v30", "v31");
}
#endif // !defined(LIBYUV_DISABLE_NEON) && defined(__aarch64__)
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif

253
thirdparty/libyuv/source/rotate_win.cc vendored Normal file
View File

@@ -0,0 +1,253 @@
/*
* Copyright 2013 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "libyuv/rotate_row.h"
#include "libyuv/row.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
// This module is for 32 bit Visual C x86
#if !defined(LIBYUV_DISABLE_X86) && defined(_MSC_VER) && \
!defined(__clang__) && defined(_M_IX86)
__declspec(naked) void TransposeWx8_SSSE3(const uint8_t* src,
int src_stride,
uint8_t* dst,
int dst_stride,
int width) {
__asm {
push edi
push esi
push ebp
mov eax, [esp + 12 + 4] // src
mov edi, [esp + 12 + 8] // src_stride
mov edx, [esp + 12 + 12] // dst
mov esi, [esp + 12 + 16] // dst_stride
mov ecx, [esp + 12 + 20] // width
// Read in the data from the source pointer.
// First round of bit swap.
align 4
convertloop:
movq xmm0, qword ptr [eax]
lea ebp, [eax + 8]
movq xmm1, qword ptr [eax + edi]
lea eax, [eax + 2 * edi]
punpcklbw xmm0, xmm1
movq xmm2, qword ptr [eax]
movdqa xmm1, xmm0
palignr xmm1, xmm1, 8
movq xmm3, qword ptr [eax + edi]
lea eax, [eax + 2 * edi]
punpcklbw xmm2, xmm3
movdqa xmm3, xmm2
movq xmm4, qword ptr [eax]
palignr xmm3, xmm3, 8
movq xmm5, qword ptr [eax + edi]
punpcklbw xmm4, xmm5
lea eax, [eax + 2 * edi]
movdqa xmm5, xmm4
movq xmm6, qword ptr [eax]
palignr xmm5, xmm5, 8
movq xmm7, qword ptr [eax + edi]
punpcklbw xmm6, xmm7
mov eax, ebp
movdqa xmm7, xmm6
palignr xmm7, xmm7, 8
// Second round of bit swap.
punpcklwd xmm0, xmm2
punpcklwd xmm1, xmm3
movdqa xmm2, xmm0
movdqa xmm3, xmm1
palignr xmm2, xmm2, 8
palignr xmm3, xmm3, 8
punpcklwd xmm4, xmm6
punpcklwd xmm5, xmm7
movdqa xmm6, xmm4
movdqa xmm7, xmm5
palignr xmm6, xmm6, 8
palignr xmm7, xmm7, 8
// Third round of bit swap.
// Write to the destination pointer.
punpckldq xmm0, xmm4
movq qword ptr [edx], xmm0
movdqa xmm4, xmm0
palignr xmm4, xmm4, 8
movq qword ptr [edx + esi], xmm4
lea edx, [edx + 2 * esi]
punpckldq xmm2, xmm6
movdqa xmm6, xmm2
palignr xmm6, xmm6, 8
movq qword ptr [edx], xmm2
punpckldq xmm1, xmm5
movq qword ptr [edx + esi], xmm6
lea edx, [edx + 2 * esi]
movdqa xmm5, xmm1
movq qword ptr [edx], xmm1
palignr xmm5, xmm5, 8
punpckldq xmm3, xmm7
movq qword ptr [edx + esi], xmm5
lea edx, [edx + 2 * esi]
movq qword ptr [edx], xmm3
movdqa xmm7, xmm3
palignr xmm7, xmm7, 8
sub ecx, 8
movq qword ptr [edx + esi], xmm7
lea edx, [edx + 2 * esi]
jg convertloop
pop ebp
pop esi
pop edi
ret
}
}
__declspec(naked) void TransposeUVWx8_SSE2(const uint8_t* src,
int src_stride,
uint8_t* dst_a,
int dst_stride_a,
uint8_t* dst_b,
int dst_stride_b,
int w) {
__asm {
push ebx
push esi
push edi
push ebp
mov eax, [esp + 16 + 4] // src
mov edi, [esp + 16 + 8] // src_stride
mov edx, [esp + 16 + 12] // dst_a
mov esi, [esp + 16 + 16] // dst_stride_a
mov ebx, [esp + 16 + 20] // dst_b
mov ebp, [esp + 16 + 24] // dst_stride_b
mov ecx, esp
sub esp, 4 + 16
and esp, ~15
mov [esp + 16], ecx
mov ecx, [ecx + 16 + 28] // w
align 4
// Read in the data from the source pointer.
// First round of bit swap.
convertloop:
movdqu xmm0, [eax]
movdqu xmm1, [eax + edi]
lea eax, [eax + 2 * edi]
movdqa xmm7, xmm0 // use xmm7 as temp register.
punpcklbw xmm0, xmm1
punpckhbw xmm7, xmm1
movdqa xmm1, xmm7
movdqu xmm2, [eax]
movdqu xmm3, [eax + edi]
lea eax, [eax + 2 * edi]
movdqa xmm7, xmm2
punpcklbw xmm2, xmm3
punpckhbw xmm7, xmm3
movdqa xmm3, xmm7
movdqu xmm4, [eax]
movdqu xmm5, [eax + edi]
lea eax, [eax + 2 * edi]
movdqa xmm7, xmm4
punpcklbw xmm4, xmm5
punpckhbw xmm7, xmm5
movdqa xmm5, xmm7
movdqu xmm6, [eax]
movdqu xmm7, [eax + edi]
lea eax, [eax + 2 * edi]
movdqu [esp], xmm5 // backup xmm5
neg edi
movdqa xmm5, xmm6 // use xmm5 as temp register.
punpcklbw xmm6, xmm7
punpckhbw xmm5, xmm7
movdqa xmm7, xmm5
lea eax, [eax + 8 * edi + 16]
neg edi
// Second round of bit swap.
movdqa xmm5, xmm0
punpcklwd xmm0, xmm2
punpckhwd xmm5, xmm2
movdqa xmm2, xmm5
movdqa xmm5, xmm1
punpcklwd xmm1, xmm3
punpckhwd xmm5, xmm3
movdqa xmm3, xmm5
movdqa xmm5, xmm4
punpcklwd xmm4, xmm6
punpckhwd xmm5, xmm6
movdqa xmm6, xmm5
movdqu xmm5, [esp] // restore xmm5
movdqu [esp], xmm6 // backup xmm6
movdqa xmm6, xmm5 // use xmm6 as temp register.
punpcklwd xmm5, xmm7
punpckhwd xmm6, xmm7
movdqa xmm7, xmm6
// Third round of bit swap.
// Write to the destination pointer.
movdqa xmm6, xmm0
punpckldq xmm0, xmm4
punpckhdq xmm6, xmm4
movdqa xmm4, xmm6
movdqu xmm6, [esp] // restore xmm6
movlpd qword ptr [edx], xmm0
movhpd qword ptr [ebx], xmm0
movlpd qword ptr [edx + esi], xmm4
lea edx, [edx + 2 * esi]
movhpd qword ptr [ebx + ebp], xmm4
lea ebx, [ebx + 2 * ebp]
movdqa xmm0, xmm2 // use xmm0 as the temp register.
punpckldq xmm2, xmm6
movlpd qword ptr [edx], xmm2
movhpd qword ptr [ebx], xmm2
punpckhdq xmm0, xmm6
movlpd qword ptr [edx + esi], xmm0
lea edx, [edx + 2 * esi]
movhpd qword ptr [ebx + ebp], xmm0
lea ebx, [ebx + 2 * ebp]
movdqa xmm0, xmm1 // use xmm0 as the temp register.
punpckldq xmm1, xmm5
movlpd qword ptr [edx], xmm1
movhpd qword ptr [ebx], xmm1
punpckhdq xmm0, xmm5
movlpd qword ptr [edx + esi], xmm0
lea edx, [edx + 2 * esi]
movhpd qword ptr [ebx + ebp], xmm0
lea ebx, [ebx + 2 * ebp]
movdqa xmm0, xmm3 // use xmm0 as the temp register.
punpckldq xmm3, xmm7
movlpd qword ptr [edx], xmm3
movhpd qword ptr [ebx], xmm3
punpckhdq xmm0, xmm7
sub ecx, 8
movlpd qword ptr [edx + esi], xmm0
lea edx, [edx + 2 * esi]
movhpd qword ptr [ebx + ebp], xmm0
lea ebx, [ebx + 2 * ebp]
jg convertloop
mov esp, [esp + 16]
pop ebp
pop edi
pop esi
pop ebx
ret
}
}
#endif // !defined(LIBYUV_DISABLE_X86) && defined(_M_IX86)
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif

2071
thirdparty/libyuv/source/row_any.cc vendored Normal file

File diff suppressed because it is too large Load Diff

4212
thirdparty/libyuv/source/row_common.cc vendored Normal file

File diff suppressed because it is too large Load Diff

9195
thirdparty/libyuv/source/row_gcc.cc vendored Normal file

File diff suppressed because it is too large Load Diff

7842
thirdparty/libyuv/source/row_mmi.cc vendored Normal file

File diff suppressed because it is too large Load Diff

3620
thirdparty/libyuv/source/row_msa.cc vendored Normal file

File diff suppressed because it is too large Load Diff

3577
thirdparty/libyuv/source/row_neon.cc vendored Normal file

File diff suppressed because it is too large Load Diff

3855
thirdparty/libyuv/source/row_neon64.cc vendored Normal file

File diff suppressed because it is too large Load Diff

6404
thirdparty/libyuv/source/row_win.cc vendored Normal file

File diff suppressed because it is too large Load Diff

2385
thirdparty/libyuv/source/scale.cc vendored Normal file

File diff suppressed because it is too large Load Diff

1026
thirdparty/libyuv/source/scale_any.cc vendored Normal file

File diff suppressed because it is too large Load Diff

1091
thirdparty/libyuv/source/scale_argb.cc vendored Normal file

File diff suppressed because it is too large Load Diff

1769
thirdparty/libyuv/source/scale_common.cc vendored Normal file

File diff suppressed because it is too large Load Diff

2948
thirdparty/libyuv/source/scale_gcc.cc vendored Normal file

File diff suppressed because it is too large Load Diff

1168
thirdparty/libyuv/source/scale_mmi.cc vendored Normal file

File diff suppressed because it is too large Load Diff

949
thirdparty/libyuv/source/scale_msa.cc vendored Normal file
View File

@@ -0,0 +1,949 @@
/*
* Copyright 2016 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <assert.h>
#include "libyuv/scale_row.h"
// This module is for GCC MSA
#if !defined(LIBYUV_DISABLE_MSA) && defined(__mips_msa)
#include "libyuv/macros_msa.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
#define LOAD_INDEXED_DATA(srcp, indx0, out0) \
{ \
out0[0] = srcp[indx0[0]]; \
out0[1] = srcp[indx0[1]]; \
out0[2] = srcp[indx0[2]]; \
out0[3] = srcp[indx0[3]]; \
}
void ScaleARGBRowDown2_MSA(const uint8_t* src_argb,
ptrdiff_t src_stride,
uint8_t* dst_argb,
int dst_width) {
int x;
v16u8 src0, src1, dst0;
(void)src_stride;
for (x = 0; x < dst_width; x += 4) {
src0 = (v16u8)__msa_ld_b((v16i8*)src_argb, 0);
src1 = (v16u8)__msa_ld_b((v16i8*)src_argb, 16);
dst0 = (v16u8)__msa_pckod_w((v4i32)src1, (v4i32)src0);
ST_UB(dst0, dst_argb);
src_argb += 32;
dst_argb += 16;
}
}
void ScaleARGBRowDown2Linear_MSA(const uint8_t* src_argb,
ptrdiff_t src_stride,
uint8_t* dst_argb,
int dst_width) {
int x;
v16u8 src0, src1, vec0, vec1, dst0;
(void)src_stride;
for (x = 0; x < dst_width; x += 4) {
src0 = (v16u8)__msa_ld_b((v16i8*)src_argb, 0);
src1 = (v16u8)__msa_ld_b((v16i8*)src_argb, 16);
vec0 = (v16u8)__msa_pckev_w((v4i32)src1, (v4i32)src0);
vec1 = (v16u8)__msa_pckod_w((v4i32)src1, (v4i32)src0);
dst0 = (v16u8)__msa_aver_u_b((v16u8)vec0, (v16u8)vec1);
ST_UB(dst0, dst_argb);
src_argb += 32;
dst_argb += 16;
}
}
void ScaleARGBRowDown2Box_MSA(const uint8_t* src_argb,
ptrdiff_t src_stride,
uint8_t* dst_argb,
int dst_width) {
int x;
const uint8_t* s = src_argb;
const uint8_t* t = src_argb + src_stride;
v16u8 src0, src1, src2, src3, vec0, vec1, vec2, vec3, dst0;
v8u16 reg0, reg1, reg2, reg3;
v16i8 shuffler = {0, 4, 1, 5, 2, 6, 3, 7, 8, 12, 9, 13, 10, 14, 11, 15};
for (x = 0; x < dst_width; x += 4) {
src0 = (v16u8)__msa_ld_b((v16i8*)s, 0);
src1 = (v16u8)__msa_ld_b((v16i8*)s, 16);
src2 = (v16u8)__msa_ld_b((v16i8*)t, 0);
src3 = (v16u8)__msa_ld_b((v16i8*)t, 16);
vec0 = (v16u8)__msa_vshf_b(shuffler, (v16i8)src0, (v16i8)src0);
vec1 = (v16u8)__msa_vshf_b(shuffler, (v16i8)src1, (v16i8)src1);
vec2 = (v16u8)__msa_vshf_b(shuffler, (v16i8)src2, (v16i8)src2);
vec3 = (v16u8)__msa_vshf_b(shuffler, (v16i8)src3, (v16i8)src3);
reg0 = __msa_hadd_u_h(vec0, vec0);
reg1 = __msa_hadd_u_h(vec1, vec1);
reg2 = __msa_hadd_u_h(vec2, vec2);
reg3 = __msa_hadd_u_h(vec3, vec3);
reg0 += reg2;
reg1 += reg3;
reg0 = (v8u16)__msa_srari_h((v8i16)reg0, 2);
reg1 = (v8u16)__msa_srari_h((v8i16)reg1, 2);
dst0 = (v16u8)__msa_pckev_b((v16i8)reg1, (v16i8)reg0);
ST_UB(dst0, dst_argb);
s += 32;
t += 32;
dst_argb += 16;
}
}
void ScaleARGBRowDownEven_MSA(const uint8_t* src_argb,
ptrdiff_t src_stride,
int32_t src_stepx,
uint8_t* dst_argb,
int dst_width) {
int x;
int32_t stepx = src_stepx * 4;
int32_t data0, data1, data2, data3;
(void)src_stride;
for (x = 0; x < dst_width; x += 4) {
data0 = LW(src_argb);
data1 = LW(src_argb + stepx);
data2 = LW(src_argb + stepx * 2);
data3 = LW(src_argb + stepx * 3);
SW(data0, dst_argb);
SW(data1, dst_argb + 4);
SW(data2, dst_argb + 8);
SW(data3, dst_argb + 12);
src_argb += stepx * 4;
dst_argb += 16;
}
}
void ScaleARGBRowDownEvenBox_MSA(const uint8_t* src_argb,
ptrdiff_t src_stride,
int src_stepx,
uint8_t* dst_argb,
int dst_width) {
int x;
const uint8_t* nxt_argb = src_argb + src_stride;
int32_t stepx = src_stepx * 4;
int64_t data0, data1, data2, data3;
v16u8 src0 = {0}, src1 = {0}, src2 = {0}, src3 = {0};
v16u8 vec0, vec1, vec2, vec3;
v8u16 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7;
v16u8 dst0;
for (x = 0; x < dst_width; x += 4) {
data0 = LD(src_argb);
data1 = LD(src_argb + stepx);
data2 = LD(src_argb + stepx * 2);
data3 = LD(src_argb + stepx * 3);
src0 = (v16u8)__msa_insert_d((v2i64)src0, 0, data0);
src0 = (v16u8)__msa_insert_d((v2i64)src0, 1, data1);
src1 = (v16u8)__msa_insert_d((v2i64)src1, 0, data2);
src1 = (v16u8)__msa_insert_d((v2i64)src1, 1, data3);
data0 = LD(nxt_argb);
data1 = LD(nxt_argb + stepx);
data2 = LD(nxt_argb + stepx * 2);
data3 = LD(nxt_argb + stepx * 3);
src2 = (v16u8)__msa_insert_d((v2i64)src2, 0, data0);
src2 = (v16u8)__msa_insert_d((v2i64)src2, 1, data1);
src3 = (v16u8)__msa_insert_d((v2i64)src3, 0, data2);
src3 = (v16u8)__msa_insert_d((v2i64)src3, 1, data3);
vec0 = (v16u8)__msa_ilvr_b((v16i8)src2, (v16i8)src0);
vec1 = (v16u8)__msa_ilvr_b((v16i8)src3, (v16i8)src1);
vec2 = (v16u8)__msa_ilvl_b((v16i8)src2, (v16i8)src0);
vec3 = (v16u8)__msa_ilvl_b((v16i8)src3, (v16i8)src1);
reg0 = __msa_hadd_u_h(vec0, vec0);
reg1 = __msa_hadd_u_h(vec1, vec1);
reg2 = __msa_hadd_u_h(vec2, vec2);
reg3 = __msa_hadd_u_h(vec3, vec3);
reg4 = (v8u16)__msa_pckev_d((v2i64)reg2, (v2i64)reg0);
reg5 = (v8u16)__msa_pckev_d((v2i64)reg3, (v2i64)reg1);
reg6 = (v8u16)__msa_pckod_d((v2i64)reg2, (v2i64)reg0);
reg7 = (v8u16)__msa_pckod_d((v2i64)reg3, (v2i64)reg1);
reg4 += reg6;
reg5 += reg7;
reg4 = (v8u16)__msa_srari_h((v8i16)reg4, 2);
reg5 = (v8u16)__msa_srari_h((v8i16)reg5, 2);
dst0 = (v16u8)__msa_pckev_b((v16i8)reg5, (v16i8)reg4);
ST_UB(dst0, dst_argb);
src_argb += stepx * 4;
nxt_argb += stepx * 4;
dst_argb += 16;
}
}
void ScaleRowDown2_MSA(const uint8_t* src_ptr,
ptrdiff_t src_stride,
uint8_t* dst,
int dst_width) {
int x;
v16u8 src0, src1, src2, src3, dst0, dst1;
(void)src_stride;
for (x = 0; x < dst_width; x += 32) {
src0 = (v16u8)__msa_ld_b((v16i8*)src_ptr, 0);
src1 = (v16u8)__msa_ld_b((v16i8*)src_ptr, 16);
src2 = (v16u8)__msa_ld_b((v16i8*)src_ptr, 32);
src3 = (v16u8)__msa_ld_b((v16i8*)src_ptr, 48);
dst0 = (v16u8)__msa_pckod_b((v16i8)src1, (v16i8)src0);
dst1 = (v16u8)__msa_pckod_b((v16i8)src3, (v16i8)src2);
ST_UB2(dst0, dst1, dst, 16);
src_ptr += 64;
dst += 32;
}
}
void ScaleRowDown2Linear_MSA(const uint8_t* src_ptr,
ptrdiff_t src_stride,
uint8_t* dst,
int dst_width) {
int x;
v16u8 src0, src1, src2, src3, vec0, vec1, vec2, vec3, dst0, dst1;
(void)src_stride;
for (x = 0; x < dst_width; x += 32) {
src0 = (v16u8)__msa_ld_b((v16i8*)src_ptr, 0);
src1 = (v16u8)__msa_ld_b((v16i8*)src_ptr, 16);
src2 = (v16u8)__msa_ld_b((v16i8*)src_ptr, 32);
src3 = (v16u8)__msa_ld_b((v16i8*)src_ptr, 48);
vec0 = (v16u8)__msa_pckev_b((v16i8)src1, (v16i8)src0);
vec2 = (v16u8)__msa_pckev_b((v16i8)src3, (v16i8)src2);
vec1 = (v16u8)__msa_pckod_b((v16i8)src1, (v16i8)src0);
vec3 = (v16u8)__msa_pckod_b((v16i8)src3, (v16i8)src2);
dst0 = __msa_aver_u_b(vec1, vec0);
dst1 = __msa_aver_u_b(vec3, vec2);
ST_UB2(dst0, dst1, dst, 16);
src_ptr += 64;
dst += 32;
}
}
void ScaleRowDown2Box_MSA(const uint8_t* src_ptr,
ptrdiff_t src_stride,
uint8_t* dst,
int dst_width) {
int x;
const uint8_t* s = src_ptr;
const uint8_t* t = src_ptr + src_stride;
v16u8 src0, src1, src2, src3, src4, src5, src6, src7, dst0, dst1;
v8u16 vec0, vec1, vec2, vec3;
for (x = 0; x < dst_width; x += 32) {
src0 = (v16u8)__msa_ld_b((v16i8*)s, 0);
src1 = (v16u8)__msa_ld_b((v16i8*)s, 16);
src2 = (v16u8)__msa_ld_b((v16i8*)s, 32);
src3 = (v16u8)__msa_ld_b((v16i8*)s, 48);
src4 = (v16u8)__msa_ld_b((v16i8*)t, 0);
src5 = (v16u8)__msa_ld_b((v16i8*)t, 16);
src6 = (v16u8)__msa_ld_b((v16i8*)t, 32);
src7 = (v16u8)__msa_ld_b((v16i8*)t, 48);
vec0 = __msa_hadd_u_h(src0, src0);
vec1 = __msa_hadd_u_h(src1, src1);
vec2 = __msa_hadd_u_h(src2, src2);
vec3 = __msa_hadd_u_h(src3, src3);
vec0 += __msa_hadd_u_h(src4, src4);
vec1 += __msa_hadd_u_h(src5, src5);
vec2 += __msa_hadd_u_h(src6, src6);
vec3 += __msa_hadd_u_h(src7, src7);
vec0 = (v8u16)__msa_srari_h((v8i16)vec0, 2);
vec1 = (v8u16)__msa_srari_h((v8i16)vec1, 2);
vec2 = (v8u16)__msa_srari_h((v8i16)vec2, 2);
vec3 = (v8u16)__msa_srari_h((v8i16)vec3, 2);
dst0 = (v16u8)__msa_pckev_b((v16i8)vec1, (v16i8)vec0);
dst1 = (v16u8)__msa_pckev_b((v16i8)vec3, (v16i8)vec2);
ST_UB2(dst0, dst1, dst, 16);
s += 64;
t += 64;
dst += 32;
}
}
void ScaleRowDown4_MSA(const uint8_t* src_ptr,
ptrdiff_t src_stride,
uint8_t* dst,
int dst_width) {
int x;
v16u8 src0, src1, src2, src3, vec0, vec1, dst0;
(void)src_stride;
for (x = 0; x < dst_width; x += 16) {
src0 = (v16u8)__msa_ld_b((v16i8*)src_ptr, 0);
src1 = (v16u8)__msa_ld_b((v16i8*)src_ptr, 16);
src2 = (v16u8)__msa_ld_b((v16i8*)src_ptr, 32);
src3 = (v16u8)__msa_ld_b((v16i8*)src_ptr, 48);
vec0 = (v16u8)__msa_pckev_b((v16i8)src1, (v16i8)src0);
vec1 = (v16u8)__msa_pckev_b((v16i8)src3, (v16i8)src2);
dst0 = (v16u8)__msa_pckod_b((v16i8)vec1, (v16i8)vec0);
ST_UB(dst0, dst);
src_ptr += 64;
dst += 16;
}
}
void ScaleRowDown4Box_MSA(const uint8_t* src_ptr,
ptrdiff_t src_stride,
uint8_t* dst,
int dst_width) {
int x;
const uint8_t* s = src_ptr;
const uint8_t* t0 = s + src_stride;
const uint8_t* t1 = s + src_stride * 2;
const uint8_t* t2 = s + src_stride * 3;
v16u8 src0, src1, src2, src3, src4, src5, src6, src7, dst0;
v8u16 vec0, vec1, vec2, vec3;
v4u32 reg0, reg1, reg2, reg3;
for (x = 0; x < dst_width; x += 16) {
src0 = (v16u8)__msa_ld_b((v16i8*)s, 0);
src1 = (v16u8)__msa_ld_b((v16i8*)s, 16);
src2 = (v16u8)__msa_ld_b((v16i8*)s, 32);
src3 = (v16u8)__msa_ld_b((v16i8*)s, 48);
src4 = (v16u8)__msa_ld_b((v16i8*)t0, 0);
src5 = (v16u8)__msa_ld_b((v16i8*)t0, 16);
src6 = (v16u8)__msa_ld_b((v16i8*)t0, 32);
src7 = (v16u8)__msa_ld_b((v16i8*)t0, 48);
vec0 = __msa_hadd_u_h(src0, src0);
vec1 = __msa_hadd_u_h(src1, src1);
vec2 = __msa_hadd_u_h(src2, src2);
vec3 = __msa_hadd_u_h(src3, src3);
vec0 += __msa_hadd_u_h(src4, src4);
vec1 += __msa_hadd_u_h(src5, src5);
vec2 += __msa_hadd_u_h(src6, src6);
vec3 += __msa_hadd_u_h(src7, src7);
src0 = (v16u8)__msa_ld_b((v16i8*)t1, 0);
src1 = (v16u8)__msa_ld_b((v16i8*)t1, 16);
src2 = (v16u8)__msa_ld_b((v16i8*)t1, 32);
src3 = (v16u8)__msa_ld_b((v16i8*)t1, 48);
src4 = (v16u8)__msa_ld_b((v16i8*)t2, 0);
src5 = (v16u8)__msa_ld_b((v16i8*)t2, 16);
src6 = (v16u8)__msa_ld_b((v16i8*)t2, 32);
src7 = (v16u8)__msa_ld_b((v16i8*)t2, 48);
vec0 += __msa_hadd_u_h(src0, src0);
vec1 += __msa_hadd_u_h(src1, src1);
vec2 += __msa_hadd_u_h(src2, src2);
vec3 += __msa_hadd_u_h(src3, src3);
vec0 += __msa_hadd_u_h(src4, src4);
vec1 += __msa_hadd_u_h(src5, src5);
vec2 += __msa_hadd_u_h(src6, src6);
vec3 += __msa_hadd_u_h(src7, src7);
reg0 = __msa_hadd_u_w(vec0, vec0);
reg1 = __msa_hadd_u_w(vec1, vec1);
reg2 = __msa_hadd_u_w(vec2, vec2);
reg3 = __msa_hadd_u_w(vec3, vec3);
reg0 = (v4u32)__msa_srari_w((v4i32)reg0, 4);
reg1 = (v4u32)__msa_srari_w((v4i32)reg1, 4);
reg2 = (v4u32)__msa_srari_w((v4i32)reg2, 4);
reg3 = (v4u32)__msa_srari_w((v4i32)reg3, 4);
vec0 = (v8u16)__msa_pckev_h((v8i16)reg1, (v8i16)reg0);
vec1 = (v8u16)__msa_pckev_h((v8i16)reg3, (v8i16)reg2);
dst0 = (v16u8)__msa_pckev_b((v16i8)vec1, (v16i8)vec0);
ST_UB(dst0, dst);
s += 64;
t0 += 64;
t1 += 64;
t2 += 64;
dst += 16;
}
}
void ScaleRowDown38_MSA(const uint8_t* src_ptr,
ptrdiff_t src_stride,
uint8_t* dst,
int dst_width) {
int x, width;
uint64_t dst0;
uint32_t dst1;
v16u8 src0, src1, vec0;
v16i8 mask = {0, 3, 6, 8, 11, 14, 16, 19, 22, 24, 27, 30, 0, 0, 0, 0};
(void)src_stride;
assert(dst_width % 3 == 0);
width = dst_width / 3;
for (x = 0; x < width; x += 4) {
src0 = (v16u8)__msa_ld_b((v16i8*)src_ptr, 0);
src1 = (v16u8)__msa_ld_b((v16i8*)src_ptr, 16);
vec0 = (v16u8)__msa_vshf_b(mask, (v16i8)src1, (v16i8)src0);
dst0 = __msa_copy_u_d((v2i64)vec0, 0);
dst1 = __msa_copy_u_w((v4i32)vec0, 2);
SD(dst0, dst);
SW(dst1, dst + 8);
src_ptr += 32;
dst += 12;
}
}
void ScaleRowDown38_2_Box_MSA(const uint8_t* src_ptr,
ptrdiff_t src_stride,
uint8_t* dst_ptr,
int dst_width) {
int x, width;
const uint8_t* s = src_ptr;
const uint8_t* t = src_ptr + src_stride;
uint64_t dst0;
uint32_t dst1;
v16u8 src0, src1, src2, src3, out;
v8u16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
v4u32 tmp0, tmp1, tmp2, tmp3, tmp4;
v8i16 zero = {0};
v8i16 mask = {0, 1, 2, 8, 3, 4, 5, 9};
v16i8 dst_mask = {0, 2, 16, 4, 6, 18, 8, 10, 20, 12, 14, 22, 0, 0, 0, 0};
v4u32 const_0x2AAA = (v4u32)__msa_fill_w(0x2AAA);
v4u32 const_0x4000 = (v4u32)__msa_fill_w(0x4000);
assert((dst_width % 3 == 0) && (dst_width > 0));
width = dst_width / 3;
for (x = 0; x < width; x += 4) {
src0 = (v16u8)__msa_ld_b((v16i8*)s, 0);
src1 = (v16u8)__msa_ld_b((v16i8*)s, 16);
src2 = (v16u8)__msa_ld_b((v16i8*)t, 0);
src3 = (v16u8)__msa_ld_b((v16i8*)t, 16);
vec0 = (v8u16)__msa_ilvr_b((v16i8)src2, (v16i8)src0);
vec1 = (v8u16)__msa_ilvl_b((v16i8)src2, (v16i8)src0);
vec2 = (v8u16)__msa_ilvr_b((v16i8)src3, (v16i8)src1);
vec3 = (v8u16)__msa_ilvl_b((v16i8)src3, (v16i8)src1);
vec0 = __msa_hadd_u_h((v16u8)vec0, (v16u8)vec0);
vec1 = __msa_hadd_u_h((v16u8)vec1, (v16u8)vec1);
vec2 = __msa_hadd_u_h((v16u8)vec2, (v16u8)vec2);
vec3 = __msa_hadd_u_h((v16u8)vec3, (v16u8)vec3);
vec4 = (v8u16)__msa_vshf_h(mask, zero, (v8i16)vec0);
vec5 = (v8u16)__msa_vshf_h(mask, zero, (v8i16)vec1);
vec6 = (v8u16)__msa_vshf_h(mask, zero, (v8i16)vec2);
vec7 = (v8u16)__msa_vshf_h(mask, zero, (v8i16)vec3);
vec0 = (v8u16)__msa_pckod_w((v4i32)vec1, (v4i32)vec0);
vec1 = (v8u16)__msa_pckod_w((v4i32)vec3, (v4i32)vec2);
vec0 = (v8u16)__msa_pckod_w((v4i32)vec1, (v4i32)vec0);
tmp0 = __msa_hadd_u_w(vec4, vec4);
tmp1 = __msa_hadd_u_w(vec5, vec5);
tmp2 = __msa_hadd_u_w(vec6, vec6);
tmp3 = __msa_hadd_u_w(vec7, vec7);
tmp4 = __msa_hadd_u_w(vec0, vec0);
vec0 = (v8u16)__msa_pckev_h((v8i16)tmp1, (v8i16)tmp0);
vec1 = (v8u16)__msa_pckev_h((v8i16)tmp3, (v8i16)tmp2);
tmp0 = __msa_hadd_u_w(vec0, vec0);
tmp1 = __msa_hadd_u_w(vec1, vec1);
tmp0 *= const_0x2AAA;
tmp1 *= const_0x2AAA;
tmp4 *= const_0x4000;
tmp0 = (v4u32)__msa_srai_w((v4i32)tmp0, 16);
tmp1 = (v4u32)__msa_srai_w((v4i32)tmp1, 16);
tmp4 = (v4u32)__msa_srai_w((v4i32)tmp4, 16);
vec0 = (v8u16)__msa_pckev_h((v8i16)tmp1, (v8i16)tmp0);
vec1 = (v8u16)__msa_pckev_h((v8i16)tmp4, (v8i16)tmp4);
out = (v16u8)__msa_vshf_b(dst_mask, (v16i8)vec1, (v16i8)vec0);
dst0 = __msa_copy_u_d((v2i64)out, 0);
dst1 = __msa_copy_u_w((v4i32)out, 2);
SD(dst0, dst_ptr);
SW(dst1, dst_ptr + 8);
s += 32;
t += 32;
dst_ptr += 12;
}
}
void ScaleRowDown38_3_Box_MSA(const uint8_t* src_ptr,
ptrdiff_t src_stride,
uint8_t* dst_ptr,
int dst_width) {
int x, width;
const uint8_t* s = src_ptr;
const uint8_t* t0 = s + src_stride;
const uint8_t* t1 = s + src_stride * 2;
uint64_t dst0;
uint32_t dst1;
v16u8 src0, src1, src2, src3, src4, src5, out;
v8u16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
v4u32 tmp0, tmp1, tmp2, tmp3, tmp4;
v8u16 zero = {0};
v8i16 mask = {0, 1, 2, 8, 3, 4, 5, 9};
v16i8 dst_mask = {0, 2, 16, 4, 6, 18, 8, 10, 20, 12, 14, 22, 0, 0, 0, 0};
v4u32 const_0x1C71 = (v4u32)__msa_fill_w(0x1C71);
v4u32 const_0x2AAA = (v4u32)__msa_fill_w(0x2AAA);
assert((dst_width % 3 == 0) && (dst_width > 0));
width = dst_width / 3;
for (x = 0; x < width; x += 4) {
src0 = (v16u8)__msa_ld_b((v16i8*)s, 0);
src1 = (v16u8)__msa_ld_b((v16i8*)s, 16);
src2 = (v16u8)__msa_ld_b((v16i8*)t0, 0);
src3 = (v16u8)__msa_ld_b((v16i8*)t0, 16);
src4 = (v16u8)__msa_ld_b((v16i8*)t1, 0);
src5 = (v16u8)__msa_ld_b((v16i8*)t1, 16);
vec0 = (v8u16)__msa_ilvr_b((v16i8)src2, (v16i8)src0);
vec1 = (v8u16)__msa_ilvl_b((v16i8)src2, (v16i8)src0);
vec2 = (v8u16)__msa_ilvr_b((v16i8)src3, (v16i8)src1);
vec3 = (v8u16)__msa_ilvl_b((v16i8)src3, (v16i8)src1);
vec4 = (v8u16)__msa_ilvr_b((v16i8)zero, (v16i8)src4);
vec5 = (v8u16)__msa_ilvl_b((v16i8)zero, (v16i8)src4);
vec6 = (v8u16)__msa_ilvr_b((v16i8)zero, (v16i8)src5);
vec7 = (v8u16)__msa_ilvl_b((v16i8)zero, (v16i8)src5);
vec0 = __msa_hadd_u_h((v16u8)vec0, (v16u8)vec0);
vec1 = __msa_hadd_u_h((v16u8)vec1, (v16u8)vec1);
vec2 = __msa_hadd_u_h((v16u8)vec2, (v16u8)vec2);
vec3 = __msa_hadd_u_h((v16u8)vec3, (v16u8)vec3);
vec0 += __msa_hadd_u_h((v16u8)vec4, (v16u8)vec4);
vec1 += __msa_hadd_u_h((v16u8)vec5, (v16u8)vec5);
vec2 += __msa_hadd_u_h((v16u8)vec6, (v16u8)vec6);
vec3 += __msa_hadd_u_h((v16u8)vec7, (v16u8)vec7);
vec4 = (v8u16)__msa_vshf_h(mask, (v8i16)zero, (v8i16)vec0);
vec5 = (v8u16)__msa_vshf_h(mask, (v8i16)zero, (v8i16)vec1);
vec6 = (v8u16)__msa_vshf_h(mask, (v8i16)zero, (v8i16)vec2);
vec7 = (v8u16)__msa_vshf_h(mask, (v8i16)zero, (v8i16)vec3);
vec0 = (v8u16)__msa_pckod_w((v4i32)vec1, (v4i32)vec0);
vec1 = (v8u16)__msa_pckod_w((v4i32)vec3, (v4i32)vec2);
vec0 = (v8u16)__msa_pckod_w((v4i32)vec1, (v4i32)vec0);
tmp0 = __msa_hadd_u_w(vec4, vec4);
tmp1 = __msa_hadd_u_w(vec5, vec5);
tmp2 = __msa_hadd_u_w(vec6, vec6);
tmp3 = __msa_hadd_u_w(vec7, vec7);
tmp4 = __msa_hadd_u_w(vec0, vec0);
vec0 = (v8u16)__msa_pckev_h((v8i16)tmp1, (v8i16)tmp0);
vec1 = (v8u16)__msa_pckev_h((v8i16)tmp3, (v8i16)tmp2);
tmp0 = __msa_hadd_u_w(vec0, vec0);
tmp1 = __msa_hadd_u_w(vec1, vec1);
tmp0 *= const_0x1C71;
tmp1 *= const_0x1C71;
tmp4 *= const_0x2AAA;
tmp0 = (v4u32)__msa_srai_w((v4i32)tmp0, 16);
tmp1 = (v4u32)__msa_srai_w((v4i32)tmp1, 16);
tmp4 = (v4u32)__msa_srai_w((v4i32)tmp4, 16);
vec0 = (v8u16)__msa_pckev_h((v8i16)tmp1, (v8i16)tmp0);
vec1 = (v8u16)__msa_pckev_h((v8i16)tmp4, (v8i16)tmp4);
out = (v16u8)__msa_vshf_b(dst_mask, (v16i8)vec1, (v16i8)vec0);
dst0 = __msa_copy_u_d((v2i64)out, 0);
dst1 = __msa_copy_u_w((v4i32)out, 2);
SD(dst0, dst_ptr);
SW(dst1, dst_ptr + 8);
s += 32;
t0 += 32;
t1 += 32;
dst_ptr += 12;
}
}
void ScaleAddRow_MSA(const uint8_t* src_ptr, uint16_t* dst_ptr, int src_width) {
int x;
v16u8 src0;
v8u16 dst0, dst1;
v16i8 zero = {0};
assert(src_width > 0);
for (x = 0; x < src_width; x += 16) {
src0 = LD_UB(src_ptr);
dst0 = (v8u16)__msa_ld_h((v8i16*)dst_ptr, 0);
dst1 = (v8u16)__msa_ld_h((v8i16*)dst_ptr, 16);
dst0 += (v8u16)__msa_ilvr_b(zero, (v16i8)src0);
dst1 += (v8u16)__msa_ilvl_b(zero, (v16i8)src0);
ST_UH2(dst0, dst1, dst_ptr, 8);
src_ptr += 16;
dst_ptr += 16;
}
}
void ScaleFilterCols_MSA(uint8_t* dst_ptr,
const uint8_t* src_ptr,
int dst_width,
int x,
int dx) {
int j;
v4i32 vec_x = __msa_fill_w(x);
v4i32 vec_dx = __msa_fill_w(dx);
v4i32 vec_const = {0, 1, 2, 3};
v4i32 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, vec8, vec9;
v4i32 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
v8u16 reg0, reg1;
v16u8 dst0;
v4i32 const_0xFFFF = __msa_fill_w(0xFFFF);
v4i32 const_0x40 = __msa_fill_w(0x40);
vec0 = vec_dx * vec_const;
vec1 = vec_dx * 4;
vec_x += vec0;
for (j = 0; j < dst_width - 1; j += 16) {
vec2 = vec_x >> 16;
vec6 = vec_x & const_0xFFFF;
vec_x += vec1;
vec3 = vec_x >> 16;
vec7 = vec_x & const_0xFFFF;
vec_x += vec1;
vec4 = vec_x >> 16;
vec8 = vec_x & const_0xFFFF;
vec_x += vec1;
vec5 = vec_x >> 16;
vec9 = vec_x & const_0xFFFF;
vec_x += vec1;
vec6 >>= 9;
vec7 >>= 9;
vec8 >>= 9;
vec9 >>= 9;
LOAD_INDEXED_DATA(src_ptr, vec2, tmp0);
LOAD_INDEXED_DATA(src_ptr, vec3, tmp1);
LOAD_INDEXED_DATA(src_ptr, vec4, tmp2);
LOAD_INDEXED_DATA(src_ptr, vec5, tmp3);
vec2 += 1;
vec3 += 1;
vec4 += 1;
vec5 += 1;
LOAD_INDEXED_DATA(src_ptr, vec2, tmp4);
LOAD_INDEXED_DATA(src_ptr, vec3, tmp5);
LOAD_INDEXED_DATA(src_ptr, vec4, tmp6);
LOAD_INDEXED_DATA(src_ptr, vec5, tmp7);
tmp4 -= tmp0;
tmp5 -= tmp1;
tmp6 -= tmp2;
tmp7 -= tmp3;
tmp4 *= vec6;
tmp5 *= vec7;
tmp6 *= vec8;
tmp7 *= vec9;
tmp4 += const_0x40;
tmp5 += const_0x40;
tmp6 += const_0x40;
tmp7 += const_0x40;
tmp4 >>= 7;
tmp5 >>= 7;
tmp6 >>= 7;
tmp7 >>= 7;
tmp0 += tmp4;
tmp1 += tmp5;
tmp2 += tmp6;
tmp3 += tmp7;
reg0 = (v8u16)__msa_pckev_h((v8i16)tmp1, (v8i16)tmp0);
reg1 = (v8u16)__msa_pckev_h((v8i16)tmp3, (v8i16)tmp2);
dst0 = (v16u8)__msa_pckev_b((v16i8)reg1, (v16i8)reg0);
__msa_st_b(dst0, dst_ptr, 0);
dst_ptr += 16;
}
}
void ScaleARGBCols_MSA(uint8_t* dst_argb,
const uint8_t* src_argb,
int dst_width,
int x,
int dx) {
const uint32_t* src = (const uint32_t*)(src_argb);
uint32_t* dst = (uint32_t*)(dst_argb);
int j;
v4i32 x_vec = __msa_fill_w(x);
v4i32 dx_vec = __msa_fill_w(dx);
v4i32 const_vec = {0, 1, 2, 3};
v4i32 vec0, vec1, vec2;
v4i32 dst0;
vec0 = dx_vec * const_vec;
vec1 = dx_vec * 4;
x_vec += vec0;
for (j = 0; j < dst_width; j += 4) {
vec2 = x_vec >> 16;
x_vec += vec1;
LOAD_INDEXED_DATA(src, vec2, dst0);
__msa_st_w(dst0, dst, 0);
dst += 4;
}
}
void ScaleARGBFilterCols_MSA(uint8_t* dst_argb,
const uint8_t* src_argb,
int dst_width,
int x,
int dx) {
const uint32_t* src = (const uint32_t*)(src_argb);
int j;
v4u32 src0, src1, src2, src3;
v4u32 vec0, vec1, vec2, vec3;
v16u8 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7;
v16u8 mult0, mult1, mult2, mult3;
v8u16 tmp0, tmp1, tmp2, tmp3;
v16u8 dst0, dst1;
v4u32 vec_x = (v4u32)__msa_fill_w(x);
v4u32 vec_dx = (v4u32)__msa_fill_w(dx);
v4u32 vec_const = {0, 1, 2, 3};
v16u8 const_0x7f = (v16u8)__msa_fill_b(0x7f);
vec0 = vec_dx * vec_const;
vec1 = vec_dx * 4;
vec_x += vec0;
for (j = 0; j < dst_width - 1; j += 8) {
vec2 = vec_x >> 16;
reg0 = (v16u8)(vec_x >> 9);
vec_x += vec1;
vec3 = vec_x >> 16;
reg1 = (v16u8)(vec_x >> 9);
vec_x += vec1;
reg0 = reg0 & const_0x7f;
reg1 = reg1 & const_0x7f;
reg0 = (v16u8)__msa_shf_b((v16i8)reg0, 0);
reg1 = (v16u8)__msa_shf_b((v16i8)reg1, 0);
reg2 = reg0 ^ const_0x7f;
reg3 = reg1 ^ const_0x7f;
mult0 = (v16u8)__msa_ilvr_b((v16i8)reg0, (v16i8)reg2);
mult1 = (v16u8)__msa_ilvl_b((v16i8)reg0, (v16i8)reg2);
mult2 = (v16u8)__msa_ilvr_b((v16i8)reg1, (v16i8)reg3);
mult3 = (v16u8)__msa_ilvl_b((v16i8)reg1, (v16i8)reg3);
LOAD_INDEXED_DATA(src, vec2, src0);
LOAD_INDEXED_DATA(src, vec3, src1);
vec2 += 1;
vec3 += 1;
LOAD_INDEXED_DATA(src, vec2, src2);
LOAD_INDEXED_DATA(src, vec3, src3);
reg4 = (v16u8)__msa_ilvr_b((v16i8)src2, (v16i8)src0);
reg5 = (v16u8)__msa_ilvl_b((v16i8)src2, (v16i8)src0);
reg6 = (v16u8)__msa_ilvr_b((v16i8)src3, (v16i8)src1);
reg7 = (v16u8)__msa_ilvl_b((v16i8)src3, (v16i8)src1);
tmp0 = __msa_dotp_u_h(reg4, mult0);
tmp1 = __msa_dotp_u_h(reg5, mult1);
tmp2 = __msa_dotp_u_h(reg6, mult2);
tmp3 = __msa_dotp_u_h(reg7, mult3);
tmp0 >>= 7;
tmp1 >>= 7;
tmp2 >>= 7;
tmp3 >>= 7;
dst0 = (v16u8)__msa_pckev_b((v16i8)tmp1, (v16i8)tmp0);
dst1 = (v16u8)__msa_pckev_b((v16i8)tmp3, (v16i8)tmp2);
__msa_st_b(dst0, dst_argb, 0);
__msa_st_b(dst1, dst_argb, 16);
dst_argb += 32;
}
}
void ScaleRowDown34_MSA(const uint8_t* src_ptr,
ptrdiff_t src_stride,
uint8_t* dst,
int dst_width) {
int x;
(void)src_stride;
v16u8 src0, src1, src2, src3;
v16u8 vec0, vec1, vec2;
v16i8 mask0 = {0, 1, 3, 4, 5, 7, 8, 9, 11, 12, 13, 15, 16, 17, 19, 20};
v16i8 mask1 = {5, 7, 8, 9, 11, 12, 13, 15, 16, 17, 19, 20, 21, 23, 24, 25};
v16i8 mask2 = {11, 12, 13, 15, 16, 17, 19, 20,
21, 23, 24, 25, 27, 28, 29, 31};
assert((dst_width % 3 == 0) && (dst_width > 0));
for (x = 0; x < dst_width; x += 48) {
src0 = (v16u8)__msa_ld_b((v16i8*)src_ptr, 0);
src1 = (v16u8)__msa_ld_b((v16i8*)src_ptr, 16);
src2 = (v16u8)__msa_ld_b((v16i8*)src_ptr, 32);
src3 = (v16u8)__msa_ld_b((v16i8*)src_ptr, 48);
vec0 = (v16u8)__msa_vshf_b(mask0, (v16i8)src1, (v16i8)src0);
vec1 = (v16u8)__msa_vshf_b(mask1, (v16i8)src2, (v16i8)src1);
vec2 = (v16u8)__msa_vshf_b(mask2, (v16i8)src3, (v16i8)src2);
__msa_st_b((v16i8)vec0, dst, 0);
__msa_st_b((v16i8)vec1, dst, 16);
__msa_st_b((v16i8)vec2, dst, 32);
src_ptr += 64;
dst += 48;
}
}
void ScaleRowDown34_0_Box_MSA(const uint8_t* src_ptr,
ptrdiff_t src_stride,
uint8_t* d,
int dst_width) {
const uint8_t* s = src_ptr;
const uint8_t* t = src_ptr + src_stride;
int x;
v16u8 src0, src1, src2, src3, src4, src5, src6, src7, dst0, dst1, dst2;
v16u8 vec0, vec1, vec2, vec3, vec4, vec5;
v16u8 vec6, vec7, vec8, vec9, vec10, vec11;
v8i16 reg0, reg1, reg2, reg3, reg4, reg5;
v8i16 reg6, reg7, reg8, reg9, reg10, reg11;
v16u8 const0 = {3, 1, 1, 1, 1, 3, 3, 1, 1, 1, 1, 3, 3, 1, 1, 1};
v16u8 const1 = {1, 3, 3, 1, 1, 1, 1, 3, 3, 1, 1, 1, 1, 3, 3, 1};
v16u8 const2 = {1, 1, 1, 3, 3, 1, 1, 1, 1, 3, 3, 1, 1, 1, 1, 3};
v16i8 mask0 = {0, 1, 1, 2, 2, 3, 4, 5, 5, 6, 6, 7, 8, 9, 9, 10};
v16i8 mask1 = {10, 11, 12, 13, 13, 14, 14, 15,
16, 17, 17, 18, 18, 19, 20, 21};
v16i8 mask2 = {5, 6, 6, 7, 8, 9, 9, 10, 10, 11, 12, 13, 13, 14, 14, 15};
v8i16 shft0 = {2, 1, 2, 2, 1, 2, 2, 1};
v8i16 shft1 = {2, 2, 1, 2, 2, 1, 2, 2};
v8i16 shft2 = {1, 2, 2, 1, 2, 2, 1, 2};
assert((dst_width % 3 == 0) && (dst_width > 0));
for (x = 0; x < dst_width; x += 48) {
src0 = (v16u8)__msa_ld_b((v16i8*)s, 0);
src1 = (v16u8)__msa_ld_b((v16i8*)s, 16);
src2 = (v16u8)__msa_ld_b((v16i8*)s, 32);
src3 = (v16u8)__msa_ld_b((v16i8*)s, 48);
src4 = (v16u8)__msa_ld_b((v16i8*)t, 0);
src5 = (v16u8)__msa_ld_b((v16i8*)t, 16);
src6 = (v16u8)__msa_ld_b((v16i8*)t, 32);
src7 = (v16u8)__msa_ld_b((v16i8*)t, 48);
vec0 = (v16u8)__msa_vshf_b(mask0, (v16i8)src0, (v16i8)src0);
vec1 = (v16u8)__msa_vshf_b(mask1, (v16i8)src1, (v16i8)src0);
vec2 = (v16u8)__msa_vshf_b(mask2, (v16i8)src1, (v16i8)src1);
vec3 = (v16u8)__msa_vshf_b(mask0, (v16i8)src2, (v16i8)src2);
vec4 = (v16u8)__msa_vshf_b(mask1, (v16i8)src3, (v16i8)src2);
vec5 = (v16u8)__msa_vshf_b(mask2, (v16i8)src3, (v16i8)src3);
vec6 = (v16u8)__msa_vshf_b(mask0, (v16i8)src4, (v16i8)src4);
vec7 = (v16u8)__msa_vshf_b(mask1, (v16i8)src5, (v16i8)src4);
vec8 = (v16u8)__msa_vshf_b(mask2, (v16i8)src5, (v16i8)src5);
vec9 = (v16u8)__msa_vshf_b(mask0, (v16i8)src6, (v16i8)src6);
vec10 = (v16u8)__msa_vshf_b(mask1, (v16i8)src7, (v16i8)src6);
vec11 = (v16u8)__msa_vshf_b(mask2, (v16i8)src7, (v16i8)src7);
reg0 = (v8i16)__msa_dotp_u_h(vec0, const0);
reg1 = (v8i16)__msa_dotp_u_h(vec1, const1);
reg2 = (v8i16)__msa_dotp_u_h(vec2, const2);
reg3 = (v8i16)__msa_dotp_u_h(vec3, const0);
reg4 = (v8i16)__msa_dotp_u_h(vec4, const1);
reg5 = (v8i16)__msa_dotp_u_h(vec5, const2);
reg6 = (v8i16)__msa_dotp_u_h(vec6, const0);
reg7 = (v8i16)__msa_dotp_u_h(vec7, const1);
reg8 = (v8i16)__msa_dotp_u_h(vec8, const2);
reg9 = (v8i16)__msa_dotp_u_h(vec9, const0);
reg10 = (v8i16)__msa_dotp_u_h(vec10, const1);
reg11 = (v8i16)__msa_dotp_u_h(vec11, const2);
reg0 = __msa_srar_h(reg0, shft0);
reg1 = __msa_srar_h(reg1, shft1);
reg2 = __msa_srar_h(reg2, shft2);
reg3 = __msa_srar_h(reg3, shft0);
reg4 = __msa_srar_h(reg4, shft1);
reg5 = __msa_srar_h(reg5, shft2);
reg6 = __msa_srar_h(reg6, shft0);
reg7 = __msa_srar_h(reg7, shft1);
reg8 = __msa_srar_h(reg8, shft2);
reg9 = __msa_srar_h(reg9, shft0);
reg10 = __msa_srar_h(reg10, shft1);
reg11 = __msa_srar_h(reg11, shft2);
reg0 = reg0 * 3 + reg6;
reg1 = reg1 * 3 + reg7;
reg2 = reg2 * 3 + reg8;
reg3 = reg3 * 3 + reg9;
reg4 = reg4 * 3 + reg10;
reg5 = reg5 * 3 + reg11;
reg0 = __msa_srari_h(reg0, 2);
reg1 = __msa_srari_h(reg1, 2);
reg2 = __msa_srari_h(reg2, 2);
reg3 = __msa_srari_h(reg3, 2);
reg4 = __msa_srari_h(reg4, 2);
reg5 = __msa_srari_h(reg5, 2);
dst0 = (v16u8)__msa_pckev_b((v16i8)reg1, (v16i8)reg0);
dst1 = (v16u8)__msa_pckev_b((v16i8)reg3, (v16i8)reg2);
dst2 = (v16u8)__msa_pckev_b((v16i8)reg5, (v16i8)reg4);
__msa_st_b((v16i8)dst0, d, 0);
__msa_st_b((v16i8)dst1, d, 16);
__msa_st_b((v16i8)dst2, d, 32);
s += 64;
t += 64;
d += 48;
}
}
void ScaleRowDown34_1_Box_MSA(const uint8_t* src_ptr,
ptrdiff_t src_stride,
uint8_t* d,
int dst_width) {
const uint8_t* s = src_ptr;
const uint8_t* t = src_ptr + src_stride;
int x;
v16u8 src0, src1, src2, src3, src4, src5, src6, src7, dst0, dst1, dst2;
v16u8 vec0, vec1, vec2, vec3, vec4, vec5;
v16u8 vec6, vec7, vec8, vec9, vec10, vec11;
v8i16 reg0, reg1, reg2, reg3, reg4, reg5;
v8i16 reg6, reg7, reg8, reg9, reg10, reg11;
v16u8 const0 = {3, 1, 1, 1, 1, 3, 3, 1, 1, 1, 1, 3, 3, 1, 1, 1};
v16u8 const1 = {1, 3, 3, 1, 1, 1, 1, 3, 3, 1, 1, 1, 1, 3, 3, 1};
v16u8 const2 = {1, 1, 1, 3, 3, 1, 1, 1, 1, 3, 3, 1, 1, 1, 1, 3};
v16i8 mask0 = {0, 1, 1, 2, 2, 3, 4, 5, 5, 6, 6, 7, 8, 9, 9, 10};
v16i8 mask1 = {10, 11, 12, 13, 13, 14, 14, 15,
16, 17, 17, 18, 18, 19, 20, 21};
v16i8 mask2 = {5, 6, 6, 7, 8, 9, 9, 10, 10, 11, 12, 13, 13, 14, 14, 15};
v8i16 shft0 = {2, 1, 2, 2, 1, 2, 2, 1};
v8i16 shft1 = {2, 2, 1, 2, 2, 1, 2, 2};
v8i16 shft2 = {1, 2, 2, 1, 2, 2, 1, 2};
assert((dst_width % 3 == 0) && (dst_width > 0));
for (x = 0; x < dst_width; x += 48) {
src0 = (v16u8)__msa_ld_b((v16i8*)s, 0);
src1 = (v16u8)__msa_ld_b((v16i8*)s, 16);
src2 = (v16u8)__msa_ld_b((v16i8*)s, 32);
src3 = (v16u8)__msa_ld_b((v16i8*)s, 48);
src4 = (v16u8)__msa_ld_b((v16i8*)t, 0);
src5 = (v16u8)__msa_ld_b((v16i8*)t, 16);
src6 = (v16u8)__msa_ld_b((v16i8*)t, 32);
src7 = (v16u8)__msa_ld_b((v16i8*)t, 48);
vec0 = (v16u8)__msa_vshf_b(mask0, (v16i8)src0, (v16i8)src0);
vec1 = (v16u8)__msa_vshf_b(mask1, (v16i8)src1, (v16i8)src0);
vec2 = (v16u8)__msa_vshf_b(mask2, (v16i8)src1, (v16i8)src1);
vec3 = (v16u8)__msa_vshf_b(mask0, (v16i8)src2, (v16i8)src2);
vec4 = (v16u8)__msa_vshf_b(mask1, (v16i8)src3, (v16i8)src2);
vec5 = (v16u8)__msa_vshf_b(mask2, (v16i8)src3, (v16i8)src3);
vec6 = (v16u8)__msa_vshf_b(mask0, (v16i8)src4, (v16i8)src4);
vec7 = (v16u8)__msa_vshf_b(mask1, (v16i8)src5, (v16i8)src4);
vec8 = (v16u8)__msa_vshf_b(mask2, (v16i8)src5, (v16i8)src5);
vec9 = (v16u8)__msa_vshf_b(mask0, (v16i8)src6, (v16i8)src6);
vec10 = (v16u8)__msa_vshf_b(mask1, (v16i8)src7, (v16i8)src6);
vec11 = (v16u8)__msa_vshf_b(mask2, (v16i8)src7, (v16i8)src7);
reg0 = (v8i16)__msa_dotp_u_h(vec0, const0);
reg1 = (v8i16)__msa_dotp_u_h(vec1, const1);
reg2 = (v8i16)__msa_dotp_u_h(vec2, const2);
reg3 = (v8i16)__msa_dotp_u_h(vec3, const0);
reg4 = (v8i16)__msa_dotp_u_h(vec4, const1);
reg5 = (v8i16)__msa_dotp_u_h(vec5, const2);
reg6 = (v8i16)__msa_dotp_u_h(vec6, const0);
reg7 = (v8i16)__msa_dotp_u_h(vec7, const1);
reg8 = (v8i16)__msa_dotp_u_h(vec8, const2);
reg9 = (v8i16)__msa_dotp_u_h(vec9, const0);
reg10 = (v8i16)__msa_dotp_u_h(vec10, const1);
reg11 = (v8i16)__msa_dotp_u_h(vec11, const2);
reg0 = __msa_srar_h(reg0, shft0);
reg1 = __msa_srar_h(reg1, shft1);
reg2 = __msa_srar_h(reg2, shft2);
reg3 = __msa_srar_h(reg3, shft0);
reg4 = __msa_srar_h(reg4, shft1);
reg5 = __msa_srar_h(reg5, shft2);
reg6 = __msa_srar_h(reg6, shft0);
reg7 = __msa_srar_h(reg7, shft1);
reg8 = __msa_srar_h(reg8, shft2);
reg9 = __msa_srar_h(reg9, shft0);
reg10 = __msa_srar_h(reg10, shft1);
reg11 = __msa_srar_h(reg11, shft2);
reg0 += reg6;
reg1 += reg7;
reg2 += reg8;
reg3 += reg9;
reg4 += reg10;
reg5 += reg11;
reg0 = __msa_srari_h(reg0, 1);
reg1 = __msa_srari_h(reg1, 1);
reg2 = __msa_srari_h(reg2, 1);
reg3 = __msa_srari_h(reg3, 1);
reg4 = __msa_srari_h(reg4, 1);
reg5 = __msa_srari_h(reg5, 1);
dst0 = (v16u8)__msa_pckev_b((v16i8)reg1, (v16i8)reg0);
dst1 = (v16u8)__msa_pckev_b((v16i8)reg3, (v16i8)reg2);
dst2 = (v16u8)__msa_pckev_b((v16i8)reg5, (v16i8)reg4);
__msa_st_b((v16i8)dst0, d, 0);
__msa_st_b((v16i8)dst1, d, 16);
__msa_st_b((v16i8)dst2, d, 32);
s += 64;
t += 64;
d += 48;
}
}
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif
#endif // !defined(LIBYUV_DISABLE_MSA) && defined(__mips_msa)

1494
thirdparty/libyuv/source/scale_neon.cc vendored Normal file

File diff suppressed because it is too large Load Diff

1634
thirdparty/libyuv/source/scale_neon64.cc vendored Normal file

File diff suppressed because it is too large Load Diff

1197
thirdparty/libyuv/source/scale_uv.cc vendored Normal file

File diff suppressed because it is too large Load Diff

1392
thirdparty/libyuv/source/scale_win.cc vendored Normal file

File diff suppressed because it is too large Load Diff

35
thirdparty/libyuv/source/test.sh vendored Normal file
View File

@@ -0,0 +1,35 @@
#!/bin/bash
set -x
function runbenchmark1 {
perf record /google/src/cloud/fbarchard/clean/google3/blaze-bin/third_party/libyuv/libyuv_test --gunit_filter=*$1 --libyuv_width=1280 --libyuv_height=720 --libyuv_repeat=1000 --libyuv_flags=-1 --libyuv_cpu_info=-1
perf report | grep AVX
}
runbenchmark1 ABGRToI420
runbenchmark1 Android420ToI420
runbenchmark1 ARGBToI420
runbenchmark1 Convert16To8Plane
runbenchmark1 ConvertToARGB
runbenchmark1 ConvertToI420
runbenchmark1 CopyPlane
runbenchmark1 H010ToAB30
runbenchmark1 H010ToAR30
runbenchmark1 HalfFloatPlane
runbenchmark1 I010ToAB30
runbenchmark1 I010ToAR30
runbenchmark1 I420Copy
runbenchmark1 I420Psnr
runbenchmark1 I420Scale
runbenchmark1 I420Ssim
runbenchmark1 I420ToARGB
runbenchmark1 I420ToNV12
runbenchmark1 I420ToUYVY
runbenchmark1 I422ToI420
runbenchmark1 InitCpuFlags
runbenchmark1 J420ToARGB
runbenchmark1 NV12ToARGB
runbenchmark1 NV12ToI420
runbenchmark1 NV12ToI420Rotate
runbenchmark1 SetCpuFlags
runbenchmark1 YUY2ToI420

View File

@@ -0,0 +1,62 @@
/*
* Copyright 2011 The LibYuv Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "libyuv/video_common.h"
#ifdef __cplusplus
namespace libyuv {
extern "C" {
#endif
struct FourCCAliasEntry {
uint32_t alias;
uint32_t canonical;
};
#define NUM_ALIASES 18
static const struct FourCCAliasEntry kFourCCAliases[NUM_ALIASES] = {
{FOURCC_IYUV, FOURCC_I420},
{FOURCC_YU12, FOURCC_I420},
{FOURCC_YU16, FOURCC_I422},
{FOURCC_YU24, FOURCC_I444},
{FOURCC_YUYV, FOURCC_YUY2},
{FOURCC_YUVS, FOURCC_YUY2}, // kCMPixelFormat_422YpCbCr8_yuvs
{FOURCC_HDYC, FOURCC_UYVY},
{FOURCC_2VUY, FOURCC_UYVY}, // kCMPixelFormat_422YpCbCr8
{FOURCC_JPEG, FOURCC_MJPG}, // Note: JPEG has DHT while MJPG does not.
{FOURCC_DMB1, FOURCC_MJPG},
{FOURCC_BA81, FOURCC_BGGR}, // deprecated.
{FOURCC_RGB3, FOURCC_RAW},
{FOURCC_BGR3, FOURCC_24BG},
{FOURCC_CM32, FOURCC_BGRA}, // kCMPixelFormat_32ARGB
{FOURCC_CM24, FOURCC_RAW}, // kCMPixelFormat_24RGB
{FOURCC_L555, FOURCC_RGBO}, // kCMPixelFormat_16LE555
{FOURCC_L565, FOURCC_RGBP}, // kCMPixelFormat_16LE565
{FOURCC_5551, FOURCC_RGBO}, // kCMPixelFormat_16LE5551
};
// TODO(fbarchard): Consider mapping kCMPixelFormat_32BGRA to FOURCC_ARGB.
// {FOURCC_BGRA, FOURCC_ARGB}, // kCMPixelFormat_32BGRA
LIBYUV_API
uint32_t CanonicalFourCC(uint32_t fourcc) {
int i;
for (i = 0; i < NUM_ALIASES; ++i) {
if (kFourCCAliases[i].alias == fourcc) {
return kFourCCAliases[i].canonical;
}
}
// Not an alias, so return it as-is.
return fourcc;
}
#ifdef __cplusplus
} // extern "C"
} // namespace libyuv
#endif