/* * RAID-6 data recovery in dual failure mode based on the XC instruction. * * Copyright IBM Corp. 2016 * Author(s): Martin Schwidefsky */ #include #include static inline void xor_block(u8 *p1, u8 *p2) { typedef struct { u8 _[256]; } addrtype; asm volatile( " xc 0(256,%[p1]),0(%[p2])\n" : "+m" (*(addrtype *) p1) : "m" (*(addrtype *) p2), [p1] "a" (p1), [p2] "a" (p2) : "cc"); } /* Recover two failed data blocks. */ static void raid6_2data_recov_s390xc(int disks, size_t bytes, int faila, int failb, void **ptrs) { u8 *p, *q, *dp, *dq; const u8 *pbmul; /* P multiplier table for B data */ const u8 *qmul; /* Q multiplier table (for both) */ int i; p = (u8 *)ptrs[disks-2]; q = (u8 *)ptrs[disks-1]; /* Compute syndrome with zero for the missing data pages Use the dead data pages as temporary storage for delta p and delta q */ dp = (u8 *)ptrs[faila]; ptrs[faila] = (void *)raid6_empty_zero_page; ptrs[disks-2] = dp; dq = (u8 *)ptrs[failb]; ptrs[failb] = (void *)raid6_empty_zero_page; ptrs[disks-1] = dq; raid6_call.gen_syndrome(disks, bytes, ptrs); /* Restore pointer table */ ptrs[faila] = dp; ptrs[failb] = dq; ptrs[disks-2] = p; ptrs[disks-1] = q; /* Now, pick the proper data tables */ pbmul = raid6_gfmul[raid6_gfexi[failb-faila]]; qmul = raid6_gfmul[raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]]]; /* Now do it... */ while (bytes) { xor_block(dp, p); xor_block(dq, q); for (i = 0; i < 256; i++) dq[i] = pbmul[dp[i]] ^ qmul[dq[i]]; xor_block(dp, dq); p += 256; q += 256; dp += 256; dq += 256; bytes -= 256; } } /* Recover failure of one data block plus the P block */ static void raid6_datap_recov_s390xc(int disks, size_t bytes, int faila, void **ptrs) { u8 *p, *q, *dq; const u8 *qmul; /* Q multiplier table */ int i; p = (u8 *)ptrs[disks-2]; q = (u8 *)ptrs[disks-1]; /* Compute syndrome with zero for the missing data page Use the dead data page as temporary storage for delta q */ dq = (u8 *)ptrs[faila]; ptrs[faila] = (void *)raid6_empty_zero_page; ptrs[disks-1] = dq; raid6_call.gen_syndrome(disks, bytes, ptrs); /* Restore pointer table */ ptrs[faila] = dq; ptrs[disks-1] = q; /* Now, pick the proper data tables */ qmul = raid6_gfmul[raid6_gfinv[raid6_gfexp[faila]]]; /* Now do it... */ while (bytes) { xor_block(dq, q); for (i = 0; i < 256; i++) dq[i] = qmul[dq[i]]; xor_block(p, dq); p += 256; q += 256; dq += 256; bytes -= 256; } } const struct raid6_recov_calls raid6_recov_s390xc = { .data2 = raid6_2data_recov_s390xc, .datap = raid6_datap_recov_s390xc, .valid = NULL, .name = "s390xc", .priority = 1, }; name='context' onchange='this.form.submit();'>space:mode:
authorImre Deak <imre.deak@intel.com>2016-06-21 11:51:48 +0300
committerImre Deak <imre.deak@intel.com>2016-06-22 16:16:56 +0300
commit0080b5da392243af673e752b5681e6388ede7da0 (patch)
treebb4754d3bb60c1508842f4a821c7b2f302e29c5f
parent97a824e1565e55c5be69f4ab36eb3297aa714091 (diff)
drm/i915: Initialize the PPS HW before its first use
The initial DPCD read for eDP detection involves using the PPS, but so far we only initialized the PPS registers after the DPCD read. The reason this was done so far is to preserve a possible LVDS PPS HW setup if LVDS is detected but eDP is not. This is not an issue any more after the previous patch, so we can move the init earlier now. This was caught by CI with the PPS sanity checks in place and the initial eDP DPCD readout waiting for the panel power cycle timeout without the PPS registers being initialized. CC: Ville Syrjälä <ville.syrjala@linux.intel.com> CC: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Imre Deak <imre.deak@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1466499109-20240-3-git-send-email-imre.deak@intel.com