/*
* Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#define VEC_MAX 128
#define VSX_MAX 32
#define VMX_MAX 32
/*
* unsigned long vsx[32]
* unsigned long load[128]
*/
int validate_vsx(unsigned long *vsx, unsigned long *load)
{
int i;
for (i = 0; i < VSX_MAX; i++) {
if (vsx[i] != load[2 * i + 1]) {
printf("vsx[%d]: %lx load[%d] %lx\n",
i, vsx[i], 2 * i + 1, load[2 * i + 1]);
return TEST_FAIL;
}
}
return TEST_PASS;
}
/*
* unsigned long vmx[32][2]
* unsigned long load[128]
*/
int validate_vmx(unsigned long vmx[][2], unsigned long *load)
{
int i;
for (i = 0; i < VMX_MAX; i++) {
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
if ((vmx[i][0] != load[64 + 2 * i]) ||
(vmx[i][1] != load[65 + 2 * i])) {
printf("vmx[%d][0]: %lx load[%d] %lx\n",
i, vmx[i][0], 64 + 2 * i,
load[64 + 2 * i]);
printf("vmx[%d][1]: %lx load[%d] %lx\n",
i, vmx[i][1], 65 + 2 * i,
load[65 + 2 * i]);
return TEST_FAIL;
}
#else /*
* In LE each value pair is stored in an
* alternate manner.
*/
if ((vmx[i][0] != load[65 + 2 * i]) ||
(vmx[i][1] != load[64 + 2 * i])) {
printf("vmx[%d][0]: %lx load[%d] %lx\n",
i, vmx[i][0], 65 + 2 * i,
load[65 + 2 * i]);
printf("vmx[%d][1]: %lx load[%d] %lx\n",
i, vmx[i][1], 64 + 2 * i,
load[64 + 2 * i]);
return TEST_FAIL;
}
#endif
}
return TEST_PASS;
}
/*
* unsigned long store[128]
* unsigned long load[128]
*/
int compare_vsx_vmx(unsigned long *store, unsigned long *load)
{
int i;
for (i = 0; i < VSX_MAX; i++) {
if (store[1 + 2 * i] != load[1 + 2 * i]) {
printf("store[%d]: %lx load[%d] %lx\n",
1 + 2 * i, store[i],
1 + 2 * i, load[i]);
return TEST_FAIL;
}
}
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
for (i = 64; i < VEC_MAX; i++) {
if (store[i] != load[i]) {
printf("store[%d]: %lx load[%d] %lx\n",
i, store[i], i, load[i]);
return TEST_FAIL;
}
}
#else /* In LE each value pair is stored in an alternate manner */
for (i = 64; i < VEC_MAX; i++) {
if (!(i % 2) && (store[i] != load[i+1])) {
printf("store[%d]: %lx load[%d] %lx\n",
i, store[i], i+1, load[i+1]);
return TEST_FAIL;
}
if ((i % 2) && (store[i] != load[i-1])) {
printf("here store[%d]: %lx load[%d] %lx\n",
i, store[i], i-1, load[i-1]);
return TEST_FAIL;
}
}
#endif
return TEST_PASS;
}
void load_vsx_vmx(unsigned long *load, unsigned long *vsx,
unsigned long vmx[][2])
{
int i;
for (i = 0; i < VSX_MAX; i++)
vsx[i] = load[1 + 2 * i];
for (i = 0; i < VMX_MAX; i++) {
vmx[i][0] = load[64 + 2 * i];
vmx[i][1] = load[65 + 2 * i];
}
}
void loadvsx(void *p, int tmp);
void storevsx(void *p, int tmp);
7188487a5c32e86ef471977'>mac80211/rate.c
Merge branch 'smp-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull SMP hotplug update from Thomas Gleixner:
"This contains a trivial typo fix and an extension to the core code for
dynamically allocating states in the prepare stage.
The extension is necessary right now because we need a proper way to
unbreak LTTNG, which iscurrently non functional due to the removal of
the notifiers. Surely it's out of tree, but it's widely used by
distros.
The simple solution would have been to reserve a state for LTTNG, but
I'm not fond about unused crap in the kernel and the dynamic range,
which we admittedly should have done right away, allows us to remove
quite some of the hardcoded states, i.e. those which have no ordering
requirements. So doing the right thing now is better than having an
smaller intermediate solution which needs to be reworked anyway"
* 'smp-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
cpu/hotplug: Provide dynamic range for prepare stage
perf/x86/amd/ibs: Fix typo after cleanup state names in cpu/hotplug