/* * PCI Backend - Provides restricted access to the real PCI bus topology * to the frontend * * Author: Ryan Wilson */ #include #include #include #include "pciback.h" struct passthrough_dev_data { /* Access to dev_list must be protected by lock */ struct list_head dev_list; struct mutex lock; }; static struct pci_dev *__xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev, unsigned int domain, unsigned int bus, unsigned int devfn) { struct passthrough_dev_data *dev_data = pdev->pci_dev_data; struct pci_dev_entry *dev_entry; struct pci_dev *dev = NULL; mutex_lock(&dev_data->lock); list_for_each_entry(dev_entry, &dev_data->dev_list, list) { if (domain == (unsigned int)pci_domain_nr(dev_entry->dev->bus) && bus == (unsigned int)dev_entry->dev->bus->number && devfn == dev_entry->dev->devfn) { dev = dev_entry->dev; break; } } mutex_unlock(&dev_data->lock); return dev; } static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev, struct pci_dev *dev, int devid, publish_pci_dev_cb publish_cb) { struct passthrough_dev_data *dev_data = pdev->pci_dev_data; struct pci_dev_entry *dev_entry; unsigned int domain, bus, devfn; int err; dev_entry = kmalloc(sizeof(*dev_entry), GFP_KERNEL); if (!dev_entry) return -ENOMEM; dev_entry->dev = dev; mutex_lock(&dev_data->lock); list_add_tail(&dev_entry->list, &dev_data->dev_list); mutex_unlock(&dev_data->lock); /* Publish this device. */ domain = (unsigned int)pci_domain_nr(dev->bus); bus = (unsigned int)dev->bus->number; devfn = dev->devfn; err = publish_cb(pdev, domain, bus, devfn, devid); return err; } static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev, struct pci_dev *dev, bool lock) { struct passthrough_dev_data *dev_data = pdev->pci_dev_data; struct pci_dev_entry *dev_entry, *t; struct pci_dev *found_dev = NULL; mutex_lock(&dev_data->lock); list_for_each_entry_safe(dev_entry, t, &dev_data->dev_list, list) { if (dev_entry->dev == dev) { list_del(&dev_entry->list); found_dev = dev_entry->dev; kfree(dev_entry); } } mutex_unlock(&dev_data->lock); if (found_dev) { if (lock) device_lock(&found_dev->dev); pcistub_put_pci_dev(found_dev); if (lock) device_unlock(&found_dev->dev); } } static int __xen_pcibk_init_devices(struct xen_pcibk_device *pdev) { struct passthrough_dev_data *dev_data; dev_data = kmalloc(sizeof(*dev_data), GFP_KERNEL); if (!dev_data) return -ENOMEM; mutex_init(&dev_data->lock); INIT_LIST_HEAD(&dev_data->dev_list); pdev->pci_dev_data = dev_data; return 0; } static int __xen_pcibk_publish_pci_roots(struct xen_pcibk_device *pdev, publish_pci_root_cb publish_root_cb) { int err = 0; struct passthrough_dev_data *dev_data = pdev->pci_dev_data; struct pci_dev_entry *dev_entry, *e; struct pci_dev *dev; int found; unsigned int domain, bus; mutex_lock(&dev_data->lock); list_for_each_entry(dev_entry, &dev_data->dev_list, list) { /* Only publish this device as a root if none of its * parent bridges are exported */ found = 0; dev = dev_entry->dev->bus->self; for (; !found && dev != NULL; dev = dev->bus->self) { list_for_each_entry(e, &dev_data->dev_list, list) { if (dev == e->dev) { found = 1; break; } } } domain = (unsigned int)pci_domain_nr(dev_entry->dev->bus); bus = (unsigned int)dev_entry->dev->bus->number; if (!found) { err = publish_root_cb(pdev, domain, bus); if (err) break; } } mutex_unlock(&dev_data->lock); return err; } static void __xen_pcibk_release_devices(struct xen_pcibk_device *pdev) { struct passthrough_dev_data *dev_data = pdev->pci_dev_data; struct pci_dev_entry *dev_entry, *t; list_for_each_entry_safe(dev_entry, t, &dev_data->dev_list, list) { struct pci_dev *dev = dev_entry->dev; list_del(&dev_entry->list); device_lock(&dev->dev); pcistub_put_pci_dev(dev); device_unlock(&dev->dev); kfree(dev_entry); } kfree(dev_data); pdev->pci_dev_data = NULL; } static int __xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev, struct xen_pcibk_device *pdev, unsigned int *domain, unsigned int *bus, unsigned int *devfn) { *domain = pci_domain_nr(pcidev->bus); *bus = pcidev->bus->number; *devfn = pcidev->devfn; return 1; } const struct xen_pcibk_backend xen_pcibk_passthrough_backend = { .name = "passthrough", .init = __xen_pcibk_init_devices, .free = __xen_pcibk_release_devices, .find = __xen_pcibk_get_pcifront_dev, .publish = __xen_pcibk_publish_pci_roots, .release = __xen_pcibk_release_pci_dev, .add = __xen_pcibk_add_pci_dev, .get = __xen_pcibk_get_pci_dev, }; 2017-01-28 11:52:02 +0100 committerHelge Deller <deller@gmx.de>2017-01-28 21:54:23 +0100 commit2ad5d52d42810bed95100a3d912679d8864421ec (patch) tree7f93e2f906b1c86f5b76c0f4c0978d41a8a29861 /tools/perf/arch/sh/Makefile parent83b5d1e3d3013dbf90645a5d07179d018c8243fa (diff)
parisc: Don't use BITS_PER_LONG in userspace-exported swab.h header
In swab.h the "#if BITS_PER_LONG > 32" breaks compiling userspace programs if BITS_PER_LONG is #defined by userspace with the sizeof() compiler builtin. Solve this problem by using __BITS_PER_LONG instead. Since we now #include asm/bitsperlong.h avoid further potential userspace pollution by moving the #define of SHIFT_PER_LONG to bitops.h which is not exported to userspace. This patch unbreaks compiling qemu on hppa/parisc. Signed-off-by: Helge Deller <deller@gmx.de> Cc: <stable@vger.kernel.org>
Diffstat (limited to 'tools/perf/arch/sh/Makefile')