aboutsummaryrefslogtreecommitdiff
path: root/source/cpu_id.cc
diff options
context:
space:
mode:
authorFrank Barchard <fbarchard@google.com>2017-05-25 11:36:06 -0700
committerCommit Bot <commit-bot@chromium.org>2017-05-25 22:00:17 +0000
commit7bffe5e1c54bc22daebd57003735e61693719ac6 (patch)
tree1d7082355eef461021e0ce524e4bb2e7e1b88bc9 /source/cpu_id.cc
parentae7e2ef13ed464545a153321f377e71be9b38724 (diff)
downloadlibyuv-7bffe5e1c54bc22daebd57003735e61693719ac6.tar.gz
lint warning fixes for CpuID
The CpuId function is a wrapper for the intrinsic, or implemented with inline if unavailable. It had been using uint32, but the intrinsics use int, so it was causing casting and lint warnings. This change makes the internal implementation use int. Casting was also done for xgetbv, and the cast is simply removed, and is not causing a build error. MipCpuCaps was doing strlen to check for white space after the instruction set. Arm also does this but with a hard coded offset. This was causing a cast from size_t to int, which produced a lint warning. The change removes the white space detect. In theory the code could be used to detect SSE vs SSE2, and it would need to check SSE is followed by a space or end of line. But this code is only used on Arm and Mips, where there there is one form of SIMD detected. e.g. MSA for mips. If a new instruction set is added with a similar name, the write space check could be reintroduced. But its more likely the code can be rewritten to use a better form of detection by then. Or remove detection and require the instructions BUG=libyuv:641 TEST=try bots build on all platforms without error and lint is clean Change-Id: I9f55f8e57bba0f78571bdddbe63b945dea3e8809 Reviewed-on: https://chromium-review.googlesource.com/514524 Commit-Queue: Frank Barchard <fbarchard@google.com> Reviewed-by: Cheng Wang <wangcheng@google.com> Reviewed-by: Wan-Teh Chang <wtc@chromium.org>
Diffstat (limited to 'source/cpu_id.cc')
-rw-r--r--source/cpu_id.cc28
1 files changed, 14 insertions, 14 deletions
diff --git a/source/cpu_id.cc b/source/cpu_id.cc
index b3eef701..ce165544 100644
--- a/source/cpu_id.cc
+++ b/source/cpu_id.cc
@@ -52,11 +52,11 @@ LIBYUV_API int cpu_info_ = 0;
defined(__x86_64__)) && \
!defined(__pnacl__) && !defined(__CLR_VER)
LIBYUV_API
-void CpuId(uint32 info_eax, uint32 info_ecx, uint32* cpu_info) {
+void CpuId(int info_eax, int info_ecx, int* cpu_info) {
#if defined(_MSC_VER)
// Visual C version uses intrinsic or inline x86 assembly.
#if defined(_MSC_FULL_VER) && (_MSC_FULL_VER >= 160040219)
- __cpuidex((int*)(cpu_info), info_eax, info_ecx); // NOLINT
+ __cpuidex(cpu_info, info_eax, info_ecx);
#elif defined(_M_IX86)
__asm {
mov eax, info_eax
@@ -70,14 +70,14 @@ void CpuId(uint32 info_eax, uint32 info_ecx, uint32* cpu_info) {
}
#else // Visual C but not x86
if (info_ecx == 0) {
- __cpuid((int*)(cpu_info), info_eax); // NOLINT
+ __cpuid(cpu_info, info_eax);
} else {
cpu_info[3] = cpu_info[2] = cpu_info[1] = cpu_info[0] = 0u;
}
#endif
// GCC version uses inline x86 assembly.
#else // defined(_MSC_VER)
- uint32 info_ebx, info_edx;
+ int info_ebx, info_edx;
asm volatile(
#if defined(__i386__) && defined(__PIC__)
// Preserve ebx for fpic 32 bit.
@@ -98,7 +98,7 @@ void CpuId(uint32 info_eax, uint32 info_ecx, uint32* cpu_info) {
}
#else // (defined(_M_IX86) || defined(_M_X64) ...
LIBYUV_API
-void CpuId(uint32 eax, uint32 ecx, uint32* cpu_info) {
+void CpuId(int eax, int ecx, int* cpu_info) {
(void)eax;
(void)ecx;
cpu_info[0] = cpu_info[1] = cpu_info[2] = cpu_info[3] = 0;
@@ -122,10 +122,9 @@ void CpuId(uint32 eax, uint32 ecx, uint32* cpu_info) {
!defined(__pnacl__) && !defined(__CLR_VER) && !defined(__native_client__)
// X86 CPUs have xgetbv to detect OS saves high parts of ymm registers.
int GetXCR0() {
- uint32 xcr0 = 0u;
-// VS2010 SP1 required
+ int xcr0 = 0;
#if defined(_MSC_FULL_VER) && (_MSC_FULL_VER >= 160040219)
- xcr0 = (uint32)(_xgetbv(0)); // NOLINT
+ xcr0 = _xgetbv(0); // VS2010 SP1 required.
#elif defined(__i386__) || defined(__x86_64__)
asm(".byte 0x0f, 0x01, 0xd0" : "=a"(xcr0) : "c"(0) : "%edx");
#endif // defined(__i386__) || defined(__x86_64__)
@@ -159,7 +158,7 @@ LIBYUV_API SAFEBUFFERS int ArmCpuCaps(const char* cpuinfo_name) {
}
// aarch64 uses asimd for Neon.
p = strstr(cpuinfo_line, " asimd");
- if (p && (p[6] == ' ' || p[6] == '\n')) {
+ if (p) {
fclose(f);
return kCpuHasNEON;
}
@@ -169,10 +168,11 @@ LIBYUV_API SAFEBUFFERS int ArmCpuCaps(const char* cpuinfo_name) {
return 0;
}
+// TODO(fbarchard): Consider read_msa_ir().
+// TODO(fbarchard): Add unittest.
LIBYUV_API SAFEBUFFERS int MipsCpuCaps(const char* cpuinfo_name,
const char ase[]) {
char cpuinfo_line[512];
- int len = (int)strlen(ase); // NOLINT
FILE* f = fopen(cpuinfo_name, "r");
if (!f) {
// ase enabled if /proc/cpuinfo is unavailable.
@@ -184,7 +184,7 @@ LIBYUV_API SAFEBUFFERS int MipsCpuCaps(const char* cpuinfo_name,
while (fgets(cpuinfo_line, sizeof(cpuinfo_line) - 1, f)) {
if (memcmp(cpuinfo_line, "ASEs implemented", 16) == 0) {
char* p = strstr(cpuinfo_line, ase);
- if (p && (p[len] == ' ' || p[len] == '\n')) {
+ if (p) {
fclose(f);
if (strcmp(ase, " msa") == 0) {
return kCpuHasMSA;
@@ -219,9 +219,9 @@ static LIBYUV_BOOL TestEnv(const char*) {
static SAFEBUFFERS int GetCpuFlags(void) {
int cpu_info = 0;
#if !defined(__pnacl__) && !defined(__CLR_VER) && defined(CPU_X86)
- uint32 cpu_info0[4] = {0, 0, 0, 0};
- uint32 cpu_info1[4] = {0, 0, 0, 0};
- uint32 cpu_info7[4] = {0, 0, 0, 0};
+ int cpu_info0[4] = {0, 0, 0, 0};
+ int cpu_info1[4] = {0, 0, 0, 0};
+ int cpu_info7[4] = {0, 0, 0, 0};
CpuId(0, 0, cpu_info0);
CpuId(1, 0, cpu_info1);
if (cpu_info0[0] >= 7) {