/* * Copyright (C) 2016 Red Hat, Inc. * Author: Michael S. Tsirkin * This work is licensed under the terms of the GNU GPL, version 2. * * Simple descriptor-based ring. virtio 0.9 compatible event index is used for * signalling, unconditionally. */ #define _GNU_SOURCE #include "main.h" #include #include #include /* Next - Where next entry will be written. * Prev - "Next" value when event triggered previously. * Event - Peer requested event after writing this entry. */ static inline bool need_event(unsigned short event, unsigned short next, unsigned short prev) { return (unsigned short)(next - event - 1) < (unsigned short)(next - prev); } /* Design: * Guest adds descriptors with unique index values and DESC_HW in flags. * Host overwrites used descriptors with correct len, index, and DESC_HW clear. * Flags are always set last. */ #define DESC_HW 0x1 struct desc { unsigned short flags; unsigned short index; unsigned len; unsigned long long addr; }; /* how much padding is needed to avoid false cache sharing */ #define HOST_GUEST_PADDING 0x80 /* Mostly read */ struct event { unsigned short kick_index; unsigned char reserved0[HOST_GUEST_PADDING - 2]; unsigned short call_index; unsigned char reserved1[HOST_GUEST_PADDING - 2]; }; struct data { void *buf; /* descriptor is writeable, we can't get buf from there */ void *data; } *data; struct desc *ring; struct event *event; struct guest { unsigned avail_idx; unsigned last_used_idx; unsigned num_free; unsigned kicked_avail_idx; unsigned char reserved[HOST_GUEST_PADDING - 12]; } guest; struct host { /* we do not need to track last avail index * unless we have more than one in flight. */ unsigned used_idx; unsigned called_used_idx; unsigned char reserved[HOST_GUEST_PADDING - 4]; } host; /* implemented by ring */ void alloc_ring(void) { int ret; int i; ret = posix_memalign((void **)&ring, 0x1000, ring_size * sizeof *ring); if (ret) { perror("Unable to allocate ring buffer.\n"); exit(3); } event = malloc(sizeof *event); if (!event) { perror("Unable to allocate event buffer.\n"); exit(3); } memset(event, 0, sizeof *event); guest.avail_idx = 0; guest.kicked_avail_idx = -1; guest.last_used_idx = 0; host.used_idx = 0; host.called_used_idx = -1; for (i = 0; i < ring_size; ++i) { struct desc desc = { .index = i, }; ring[i] = desc; } guest.num_free = ring_size; data = malloc(ring_size * sizeof *data); if (!data) { perror("Unable to allocate data buffer.\n"); exit(3); } memset(data, 0, ring_size * sizeof *data); } /* guest side */ int add_inbuf(unsigned len, void *buf, void *datap) { unsigned head, index; if (!guest.num_free) return -1; guest.num_free--; head = (ring_size - 1) & (guest.avail_idx++); /* Start with a write. On MESI architectures this helps * avoid a shared state with consumer that is polling this descriptor. */ ring[head].addr = (unsigned long)(void*)buf; ring[head].len = len; /* read below might bypass write above. That is OK because it's just an * optimization. If this happens, we will get the cache line in a * shared state which is unfortunate, but probably not worth it to * add an explicit full barrier to avoid this. */ barrier(); index = ring[head].index; data[index].buf = buf; data[index].data = datap; /* Barrier A (for pairing) */ smp_release(); ring[head].flags = DESC_HW; return 0; } void *get_buf(unsigned *lenp, void **bufp) { unsigned head = (ring_size - 1) & guest.last_used_idx; unsigned index; void *datap; if (ring[head].flags & DESC_HW) return NULL; /* Barrier B (for pairing) */ smp_acquire(); *lenp = ring[head].len; index = ring[head].index & (ring_size - 1); datap = data[index].data; *bufp = data[index].buf; data[index].buf = NULL; data[index].data = NULL; guest.num_free++; guest.last_used_idx++; return datap; } bool used_empty() { unsigned head = (ring_size - 1) & guest.last_used_idx; return (ring[head].flags & DESC_HW); } void disable_call() { /* Doing nothing to disable calls might cause * extra interrupts, but reduces the number of cache misses. */ } bool enable_call() { event->call_index = guest.last_used_idx; /* Flush call index write */ /* Barrier D (for pairing) */ smp_mb(); return used_empty(); } void kick_available(void) { /* Flush in previous flags write */ /* Barrier C (for pairing) */ smp_mb(); if (!need_event(event->kick_index, guest.avail_idx, guest.kicked_avail_idx)) return; guest.kicked_avail_idx = guest.avail_idx; kick(); } /* host side */ void disable_kick() { /* Doing nothing to disable kicks might cause * extra interrupts, but reduces the number of cache misses. */ } bool enable_kick() { event->kick_index = host.used_idx; /* Barrier C (for pairing) */ smp_mb(); return avail_empty(); } bool avail_empty() { unsigned head = (ring_size - 1) & host.used_idx; return !(ring[head].flags & DESC_HW); } bool use_buf(unsigned *lenp, void **bufp) { unsigned head = (ring_size - 1) & host.used_idx; if (!(ring[head].flags & DESC_HW)) return false; /* make sure length read below is not speculated */ /* Barrier A (for pairing) */ smp_acquire(); /* simple in-order completion: we don't need * to touch index at all. This also means we * can just modify the descriptor in-place. */ ring[head].len--; /* Make sure len is valid before flags. * Note: alternative is to write len and flags in one access - * possible on 64 bit architectures but wmb is free on Intel anyway * so I have no way to test whether it's a gain. */ /* Barrier B (for pairing) */ smp_release(); ring[head].flags = 0; host.used_idx++; return true; } void call_used(void) { /* Flush in previous flags write */ /* Barrier D (for pairing) */ smp_mb(); if (!need_event(event->call_index, host.used_idx, host.called_used_idx)) return; host.called_used_idx = host.used_idx; call(); } y afterwards as the (inlined) call to i915_vma_unpin_fence() tries to dereference it. It seems to be some race condition where the object is going away at shutdown time, since both times happened when shutting down the X server. The call chains were different: - VT ioctl(KDSETMODE, KD_TEXT): intel_cleanup_plane_fb+0x5b/0xa0 [i915] drm_atomic_helper_cleanup_planes+0x6f/0x90 [drm_kms_helper] intel_atomic_commit_tail+0x749/0xfe0 [i915] intel_atomic_commit+0x3cb/0x4f0 [i915] drm_atomic_commit+0x4b/0x50 [drm] restore_fbdev_mode+0x14c/0x2a0 [drm_kms_helper] drm_fb_helper_restore_fbdev_mode_unlocked+0x34/0x80 [drm_kms_helper] drm_fb_helper_set_par+0x2d/0x60 [drm_kms_helper] intel_fbdev_set_par+0x18/0x70 [i915] fb_set_var+0x236/0x460 fbcon_blank+0x30f/0x350 do_unblank_screen+0xd2/0x1a0 vt_ioctl+0x507/0x12a0 tty_ioctl+0x355/0xc30 do_vfs_ioctl+0xa3/0x5e0 SyS_ioctl+0x79/0x90 entry_SYSCALL_64_fastpath+0x13/0x94 - i915 unpin_work workqueue: intel_unpin_work_fn+0x58/0x140 [i915] process_one_work+0x1f1/0x480 worker_thread+0x48/0x4d0 kthread+0x101/0x140 and this patch purely papers over the issue by adding a NULL pointer check and a WARN_ON_ONCE() to avoid the oops that would then generally make the machine unresponsive. Other callers of i915_gem_object_to_ggtt() seem to also check for the returned pointer being NULL and warn about it, so this clearly has happened before in other places. [ Reported it originally to the i915 developers on Jan 8, applying the ugly workaround on my own now after triggering the problem for the second time with no feedback. This is likely to be the same bug reported as https://bugs.freedesktop.org/show_bug.cgi?id=98829 https://bugs.freedesktop.org/show_bug.cgi?id=99134 which has a patch for the underlying problem, but it hasn't gotten to me, so I'm applying the workaround. ] Cc: Daniel Vetter <daniel.vetter@intel.com> Cc: Jani Nikula <jani.nikula@linux.intel.com> Cc: Ville Syrjälä <ville.syrjala@linux.intel.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Imre Deak <imre.deak@intel.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'net/ipv6/udp.c')