/* * * Copyright (c) 2009, Microsoft Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * * Authors: * Haiyang Zhang * Hank Janssen * K. Y. Srinivasan * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include #include #include #include #include #include #include "hyperv_vmbus.h" void hv_begin_read(struct hv_ring_buffer_info *rbi) { rbi->ring_buffer->interrupt_mask = 1; virt_mb(); } u32 hv_end_read(struct hv_ring_buffer_info *rbi) { rbi->ring_buffer->interrupt_mask = 0; virt_mb(); /* * Now check to see if the ring buffer is still empty. * If it is not, we raced and we need to process new * incoming messages. */ return hv_get_bytes_to_read(rbi); } /* * When we write to the ring buffer, check if the host needs to * be signaled. Here is the details of this protocol: * * 1. The host guarantees that while it is draining the * ring buffer, it will set the interrupt_mask to * indicate it does not need to be interrupted when * new data is placed. * * 2. The host guarantees that it will completely drain * the ring buffer before exiting the read loop. Further, * once the ring buffer is empty, it will clear the * interrupt_mask and re-check to see if new data has * arrived. * * KYS: Oct. 30, 2016: * It looks like Windows hosts have logic to deal with DOS attacks that * can be triggered if it receives interrupts when it is not expecting * the interrupt. The host expects interrupts only when the ring * transitions from empty to non-empty (or full to non full on the guest * to host ring). * So, base the signaling decision solely on the ring state until the * host logic is fixed. */ static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel, bool kick_q) { struct hv_ring_buffer_info *rbi = &channel->outbound; virt_mb(); if (READ_ONCE(rbi->ring_buffer->interrupt_mask)) return; /* check interrupt_mask before read_index */ virt_rmb(); /* * This is the only case we need to signal when the * ring transitions from being empty to non-empty. */ if (old_write == READ_ONCE(rbi->ring_buffer->read_index)) vmbus_setevent(channel); return; } /* Get the next write location for the specified ring buffer. */ static inline u32 hv_get_next_write_location(struct hv_ring_buffer_info *ring_info) { u32 next = ring_info->ring_buffer->write_index; return next; } /* Set the next write location for the specified ring buffer. */ static inline void hv_set_next_write_location(struct hv_ring_buffer_info *ring_info, u32 next_write_location) { ring_info->ring_buffer->write_index = next_write_location; } /* Get the next read location for the specified ring buffer. */ static inline u32 hv_get_next_read_location(struct hv_ring_buffer_info *ring_info) { u32 next = ring_info->ring_buffer->read_index; return next; } /* * Get the next read location + offset for the specified ring buffer. * This allows the caller to skip. */ static inline u32 hv_get_next_readlocation_withoffset(struct hv_ring_buffer_info *ring_info, u32 offset) { u32 next = ring_info->ring_buffer->read_index; next += offset; next %= ring_info->ring_datasize; return next; } /* Set the next read location for the specified ring buffer. */ static inline void hv_set_next_read_location(struct hv_ring_buffer_info *ring_info, u32 next_read_location) { ring_info->ring_buffer->read_index = next_read_location; ring_info->priv_read_index = next_read_location; } /* Get the size of the ring buffer. */ static inline u32 hv_get_ring_buffersize(struct hv_ring_buffer_info *ring_info) { return ring_info->ring_datasize; } /* Get the read and write indices as u64 of the specified ring buffer. */ static inline u64 hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info) { return (u64)ring_info->ring_buffer->write_index << 32; } /* * Helper routine to copy to source from ring buffer. * Assume there is enough room. Handles wrap-around in src case only!! */ static u32 hv_copyfrom_ringbuffer( struct hv_ring_buffer_info *ring_info, void *dest, u32 destlen, u32 start_read_offset) { void *ring_buffer = hv_get_ring_buffer(ring_info); u32 ring_buffer_size = hv_get_ring_buffersize(ring_info); memcpy(dest, ring_buffer + start_read_offset, destlen); start_read_offset += destlen; start_read_offset %= ring_buffer_size; return start_read_offset; } /* * Helper routine to copy from source to ring buffer. * Assume there is enough room. Handles wrap-around in dest case only!! */ static u32 hv_copyto_ringbuffer( struct hv_ring_buffer_info *ring_info, u32 start_write_offset, void *src, u32 srclen) { void *ring_buffer = hv_get_ring_buffer(ring_info); u32 ring_buffer_size = hv_get_ring_buffersize(ring_info); memcpy(ring_buffer + start_write_offset, src, srclen); start_write_offset += srclen; start_write_offset %= ring_buffer_size; return start_write_offset; } /* Get various debug metrics for the specified ring buffer. */ void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info, struct hv_ring_buffer_debug_info *debug_info) { u32 bytes_avail_towrite; u32 bytes_avail_toread; if (ring_info->ring_buffer) { hv_get_ringbuffer_availbytes(ring_info, &bytes_avail_toread, &bytes_avail_towrite); debug_info->bytes_avail_toread = bytes_avail_toread; debug_info->bytes_avail_towrite = bytes_avail_towrite; debug_info->current_read_index = ring_info->ring_buffer->read_index; debug_info->current_write_index = ring_info->ring_buffer->write_index; debug_info->current_interrupt_mask = ring_info->ring_buffer->interrupt_mask; } } /* Initialize the ring buffer. */ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info, struct page *pages, u32 page_cnt) { int i; struct page **pages_wraparound; BUILD_BUG_ON((sizeof(struct hv_ring_buffer) != PAGE_SIZE)); memset(ring_info, 0, sizeof(struct hv_ring_buffer_info)); /* * First page holds struct hv_ring_buffer, do wraparound mapping for * the rest. */ pages_wraparound = kzalloc(sizeof(struct page *) * (page_cnt * 2 - 1), GFP_KERNEL); if (!pages_wraparound) return -ENOMEM; pages_wraparound[0] = pages; for (i = 0; i < 2 * (page_cnt - 1); i++) pages_wraparound[i + 1] = &pages[i % (page_cnt - 1) + 1]; ring_info->ring_buffer = (struct hv_ring_buffer *) vmap(pages_wraparound, page_cnt * 2 - 1, VM_MAP, PAGE_KERNEL); kfree(pages_wraparound); if (!ring_info->ring_buffer) return -ENOMEM; ring_info->ring_buffer->read_index = ring_info->ring_buffer->write_index = 0; /* Set the feature bit for enabling flow control. */ ring_info->ring_buffer->feature_bits.value = 1; ring_info->ring_size = page_cnt << PAGE_SHIFT; ring_info->ring_datasize = ring_info->ring_size - sizeof(struct hv_ring_buffer); spin_lock_init(&ring_info->ring_lock); return 0; } /* Cleanup the ring buffer. */ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info) { vunmap(ring_info->ring_buffer); } /* Write to the ring buffer. */ int hv_ringbuffer_write(struct vmbus_channel *channel, struct kvec *kv_list, u32 kv_count, bool lock, bool kick_q) { int i = 0; u32 bytes_avail_towrite; u32 totalbytes_towrite = 0; u32 next_write_location; u32 old_write; u64 prev_indices = 0; unsigned long flags = 0; struct hv_ring_buffer_info *outring_info = &channel->outbound; for (i = 0; i < kv_count; i++) totalbytes_towrite += kv_list[i].iov_len; totalbytes_towrite += sizeof(u64); if (lock) spin_lock_irqsave(&outring_info->ring_lock, flags); bytes_avail_towrite = hv_get_bytes_to_write(outring_info); /* * If there is only room for the packet, assume it is full. * Otherwise, the next time around, we think the ring buffer * is empty since the read index == write index. */ if (bytes_avail_towrite <= totalbytes_towrite) { if (lock) spin_unlock_irqrestore(&outring_info->ring_lock, flags); return -EAGAIN; } /* Write to the ring buffer */ next_write_location = hv_get_next_write_location(outring_info); old_write = next_write_location; for (i = 0; i < kv_count; i++) { next_write_location = hv_copyto_ringbuffer(outring_info, next_write_location, kv_list[i].iov_base, kv_list[i].iov_len); } /* Set previous packet start */ prev_indices = hv_get_ring_bufferindices(outring_info); next_write_location = hv_copyto_ringbuffer(outring_info, next_write_location, &prev_indices, sizeof(u64)); /* Issue a full memory barrier before updating the write index */ virt_mb(); /* Now, update the write location */ hv_set_next_write_location(outring_info, next_write_location); if (lock) spin_unlock_irqrestore(&outring_info->ring_lock, flags); hv_signal_on_write(old_write, channel, kick_q); return 0; } int hv_ringbuffer_read(struct vmbus_channel *channel, void *buffer, u32 buflen, u32 *buffer_actual_len, u64 *requestid, bool raw) { u32 bytes_avail_toread; u32 next_read_location = 0; u64 prev_indices = 0; struct vmpacket_descriptor desc; u32 offset; u32 packetlen; int ret = 0; struct hv_ring_buffer_info *inring_info = &channel->inbound; if (buflen <= 0) return -EINVAL; *buffer_actual_len = 0; *requestid = 0; bytes_avail_toread = hv_get_bytes_to_read(inring_info); /* Make sure there is something to read */ if (bytes_avail_toread < sizeof(desc)) { /* * No error is set when there is even no header, drivers are * supposed to analyze buffer_actual_len. */ return ret; } next_read_location = hv_get_next_read_location(inring_info); next_read_location = hv_copyfrom_ringbuffer(inring_info, &desc, sizeof(desc), next_read_location); offset = raw ? 0 : (desc.offset8 << 3); packetlen = (desc.len8 << 3) - offset; *buffer_actual_len = packetlen; *requestid = desc.trans_id; if (bytes_avail_toread < packetlen + offset) return -EAGAIN; if (packetlen > buflen) return -ENOBUFS; next_read_location = hv_get_next_readlocation_withoffset(inring_info, offset); next_read_location = hv_copyfrom_ringbuffer(inring_info, buffer, packetlen, next_read_location); next_read_location = hv_copyfrom_ringbuffer(inring_info, &prev_indices, sizeof(u64), next_read_location); /* * Make sure all reads are done before we update the read index since * the writer may start writing to the read area once the read index * is updated. */ virt_mb(); /* Update the read index */ hv_set_next_read_location(inring_info, next_read_location); hv_signal_on_read(channel); return ret; } c78d71c6d7b62'>drm_dp_dual_mode_helper.h4532logplain -rw-r--r--drm_dp_helper.h30638logplain -rw-r--r--drm_dp_mst_helper.h16260logplain -rw-r--r--drm_drv.h14861logplain -rw-r--r--drm_edid.h14335logplain -rw-r--r--drm_encoder.h8160logplain -rw-r--r--drm_encoder_slave.h6583logplain -rw-r--r--drm_fb_cma_helper.h1929logplain -rw-r--r--drm_fb_helper.h15125logplain -rw-r--r--drm_fixed.h4825logplain -rw-r--r--drm_flip_work.h3071logplain -rw-r--r--drm_fourcc.h2748logplain -rw-r--r--drm_framebuffer.h9623logplain -rw-r--r--drm_gem.h8052logplain -rw-r--r--drm_gem_cma_helper.h2286logplain -rw-r--r--drm_global.h2011logplain -rw-r--r--drm_hashtab.h3139logplain -rw-r--r--drm_irq.h6323logplain -rw-r--r--drm_legacy.h6933logplain -rw-r--r--drm_mem_util.h2468logplain -rw-r--r--drm_mipi_dsi.h10327logplain -rw-r--r--drm_mm.h11462logplain -rw-r--r--drm_mode_config.h23728logplain -rw-r--r--drm_mode_object.h4911logplain -rw-r--r--drm_modes.h17078logplain -rw-r--r--drm_modeset_helper.h1576logplain -rw-r--r--drm_modeset_helper_vtables.h41180logplain -rw-r--r--drm_modeset_lock.h4241logplain -rw-r--r--drm_of.h2034logplain -rw-r--r--drm_os_linux.h2230logplain -rw-r--r--drm_panel.h7079logplain -rw-r--r--drm_pciids.h68035logplain -rw-r--r--drm_plane.h19435logplain -rw-r--r--drm_plane_helper.h3186logplain -rw-r--r--drm_print.h3232logplain -rw-r--r--drm_property.h11460logplain -rw-r--r--drm_rect.h5103logplain -rw-r--r--drm_simple_kms_helper.h4090logplain -rw-r--r--drm_sysfs.h300logplain -rw-r--r--drm_vma_manager.h7840logplain -rw-r--r--gma_drm.h1033logplain d---------i2c109logplain