/* * Copyright (c) 2006 Oracle. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include #include "rds.h" #include "ib.h" /* * Locking for IB rings. * We assume that allocation is always protected by a mutex * in the caller (this is a valid assumption for the current * implementation). * * Freeing always happens in an interrupt, and hence only * races with allocations, but not with other free()s. * * The interaction between allocation and freeing is that * the alloc code has to determine the number of free entries. * To this end, we maintain two counters; an allocation counter * and a free counter. Both are allowed to run freely, and wrap * around. * The number of used entries is always (alloc_ctr - free_ctr) % NR. * * The current implementation makes free_ctr atomic. When the * caller finds an allocation fails, it should set an "alloc fail" * bit and retry the allocation. The "alloc fail" bit essentially tells * the CQ completion handlers to wake it up after freeing some * more entries. */ /* * This only happens on shutdown. */ DECLARE_WAIT_QUEUE_HEAD(rds_ib_ring_empty_wait); void rds_ib_ring_init(struct rds_ib_work_ring *ring, u32 nr) { memset(ring, 0, sizeof(*ring)); ring->w_nr = nr; rdsdebug("ring %p nr %u\n", ring, ring->w_nr); } static inline u32 __rds_ib_ring_used(struct rds_ib_work_ring *ring) { u32 diff; /* This assumes that atomic_t has at least as many bits as u32 */ diff = ring->w_alloc_ctr - (u32) atomic_read(&ring->w_free_ctr); BUG_ON(diff > ring->w_nr); return diff; } void rds_ib_ring_resize(struct rds_ib_work_ring *ring, u32 nr) { /* We only ever get called from the connection setup code, * prior to creating the QP. */ BUG_ON(__rds_ib_ring_used(ring)); ring->w_nr = nr; } static int __rds_ib_ring_empty(struct rds_ib_work_ring *ring) { return __rds_ib_ring_used(ring) == 0; } u32 rds_ib_ring_alloc(struct rds_ib_work_ring *ring, u32 val, u32 *pos) { u32 ret = 0, avail; avail = ring->w_nr - __rds_ib_ring_used(ring); rdsdebug("ring %p val %u next %u free %u\n", ring, val, ring->w_alloc_ptr, avail); if (val && avail) { ret = min(val, avail); *pos = ring->w_alloc_ptr; ring->w_alloc_ptr = (ring->w_alloc_ptr + ret) % ring->w_nr; ring->w_alloc_ctr += ret; } return ret; } void rds_ib_ring_free(struct rds_ib_work_ring *ring, u32 val) { ring->w_free_ptr = (ring->w_free_ptr + val) % ring->w_nr; atomic_add(val, &ring->w_free_ctr); if (__rds_ib_ring_empty(ring) && waitqueue_active(&rds_ib_ring_empty_wait)) wake_up(&rds_ib_ring_empty_wait); } void rds_ib_ring_unalloc(struct rds_ib_work_ring *ring, u32 val) { ring->w_alloc_ptr = (ring->w_alloc_ptr - val) % ring->w_nr; ring->w_alloc_ctr -= val; } int rds_ib_ring_empty(struct rds_ib_work_ring *ring) { return __rds_ib_ring_empty(ring); } int rds_ib_ring_low(struct rds_ib_work_ring *ring) { return __rds_ib_ring_used(ring) <= (ring->w_nr >> 1); } /* * returns the oldest alloced ring entry. This will be the next one * freed. This can't be called if there are none allocated. */ u32 rds_ib_ring_oldest(struct rds_ib_work_ring *ring) { return ring->w_free_ptr; } /* * returns the number of completed work requests. */ u32 rds_ib_ring_completed(struct rds_ib_work_ring *ring, u32 wr_id, u32 oldest) { u32 ret; if (oldest <= (unsigned long long)wr_id) ret = (unsigned long long)wr_id - oldest + 1; else ret = ring->w_nr - oldest + (unsigned long long)wr_id + 1; rdsdebug("ring %p ret %u wr_id %u oldest %u\n", ring, ret, wr_id, oldest); return ret; } >2017-01-29 13:50:06 -0800 commit39cb2c9a316e77f6dfba96c543e55b6672d5a37e (patch) tree98fe974ee4e20121253de7f61fc8d01bdb3821c1 /net/xfrm parent2c5d9555d6d937966d79d4c6529a5f7b9206e405 (diff)
drm/i915: Check for NULL i915_vma in intel_unpin_fb_obj()
I've seen this trigger twice now, where the i915_gem_object_to_ggtt() call in intel_unpin_fb_obj() returns NULL, resulting in an oops immediately afterwards as the (inlined) call to i915_vma_unpin_fence() tries to dereference it. It seems to be some race condition where the object is going away at shutdown time, since both times happened when shutting down the X server. The call chains were different: - VT ioctl(KDSETMODE, KD_TEXT): intel_cleanup_plane_fb+0x5b/0xa0 [i915] drm_atomic_helper_cleanup_planes+0x6f/0x90 [drm_kms_helper] intel_atomic_commit_tail+0x749/0xfe0 [i915] intel_atomic_commit+0x3cb/0x4f0 [i915] drm_atomic_commit+0x4b/0x50 [drm] restore_fbdev_mode+0x14c/0x2a0 [drm_kms_helper] drm_fb_helper_restore_fbdev_mode_unlocked+0x34/0x80 [drm_kms_helper] drm_fb_helper_set_par+0x2d/0x60 [drm_kms_helper] intel_fbdev_set_par+0x18/0x70 [i915] fb_set_var+0x236/0x460 fbcon_blank+0x30f/0x350 do_unblank_screen+0xd2/0x1a0 vt_ioctl+0x507/0x12a0 tty_ioctl+0x355/0xc30 do_vfs_ioctl+0xa3/0x5e0 SyS_ioctl+0x79/0x90 entry_SYSCALL_64_fastpath+0x13/0x94 - i915 unpin_work workqueue: intel_unpin_work_fn+0x58/0x140 [i915] process_one_work+0x1f1/0x480 worker_thread+0x48/0x4d0 kthread+0x101/0x140 and this patch purely papers over the issue by adding a NULL pointer check and a WARN_ON_ONCE() to avoid the oops that would then generally make the machine unresponsive. Other callers of i915_gem_object_to_ggtt() seem to also check for the returned pointer being NULL and warn about it, so this clearly has happened before in other places. [ Reported it originally to the i915 developers on Jan 8, applying the ugly workaround on my own now after triggering the problem for the second time with no feedback. This is likely to be the same bug reported as https://bugs.freedesktop.org/show_bug.cgi?id=98829 https://bugs.freedesktop.org/show_bug.cgi?id=99134 which has a patch for the underlying problem, but it hasn't gotten to me, so I'm applying the workaround. ] Cc: Daniel Vetter <daniel.vetter@intel.com> Cc: Jani Nikula <jani.nikula@linux.intel.com> Cc: Ville Syrjälä <ville.syrjala@linux.intel.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Imre Deak <imre.deak@intel.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'net/xfrm')