/** * dwc3-exynos.c - Samsung EXYNOS DWC3 Specific Glue layer * * Copyright (c) 2012 Samsung Electronics Co., Ltd. * http://www.samsung.com * * Author: Anton Tikhomirov * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 of * the License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include #include #include #include #include #include #include #include #include #include struct dwc3_exynos { struct platform_device *usb2_phy; struct platform_device *usb3_phy; struct device *dev; struct clk *clk; struct clk *susp_clk; struct clk *axius_clk; struct regulator *vdd33; struct regulator *vdd10; }; static int dwc3_exynos_register_phys(struct dwc3_exynos *exynos) { struct usb_phy_generic_platform_data pdata; struct platform_device *pdev; int ret; memset(&pdata, 0x00, sizeof(pdata)); pdev = platform_device_alloc("usb_phy_generic", PLATFORM_DEVID_AUTO); if (!pdev) return -ENOMEM; exynos->usb2_phy = pdev; pdata.type = USB_PHY_TYPE_USB2; pdata.gpio_reset = -1; ret = platform_device_add_data(exynos->usb2_phy, &pdata, sizeof(pdata)); if (ret) goto err1; pdev = platform_device_alloc("usb_phy_generic", PLATFORM_DEVID_AUTO); if (!pdev) { ret = -ENOMEM; goto err1; } exynos->usb3_phy = pdev; pdata.type = USB_PHY_TYPE_USB3; ret = platform_device_add_data(exynos->usb3_phy, &pdata, sizeof(pdata)); if (ret) goto err2; ret = platform_device_add(exynos->usb2_phy); if (ret) goto err2; ret = platform_device_add(exynos->usb3_phy); if (ret) goto err3; return 0; err3: platform_device_del(exynos->usb2_phy); err2: platform_device_put(exynos->usb3_phy); err1: platform_device_put(exynos->usb2_phy); return ret; } static int dwc3_exynos_remove_child(struct device *dev, void *unused) { struct platform_device *pdev = to_platform_device(dev); platform_device_unregister(pdev); return 0; } static int dwc3_exynos_probe(struct platform_device *pdev) { struct dwc3_exynos *exynos; struct device *dev = &pdev->dev; struct device_node *node = dev->of_node; int ret; exynos = devm_kzalloc(dev, sizeof(*exynos), GFP_KERNEL); if (!exynos) return -ENOMEM; platform_set_drvdata(pdev, exynos); exynos->dev = dev; exynos->clk = devm_clk_get(dev, "usbdrd30"); if (IS_ERR(exynos->clk)) { dev_err(dev, "couldn't get clock\n"); return -EINVAL; } clk_prepare_enable(exynos->clk); exynos->susp_clk = devm_clk_get(dev, "usbdrd30_susp_clk"); if (IS_ERR(exynos->susp_clk)) { dev_info(dev, "no suspend clk specified\n"); exynos->susp_clk = NULL; } clk_prepare_enable(exynos->susp_clk); if (of_device_is_compatible(node, "samsung,exynos7-dwusb3")) { exynos->axius_clk = devm_clk_get(dev, "usbdrd30_axius_clk"); if (IS_ERR(exynos->axius_clk)) { dev_err(dev, "no AXI UpScaler clk specified\n"); ret = -ENODEV; goto axius_clk_err; } clk_prepare_enable(exynos->axius_clk); } else { exynos->axius_clk = NULL; } exynos->vdd33 = devm_regulator_get(dev, "vdd33"); if (IS_ERR(exynos->vdd33)) { ret = PTR_ERR(exynos->vdd33); goto err2; } ret = regulator_enable(exynos->vdd33); if (ret) { dev_err(dev, "Failed to enable VDD33 supply\n"); goto err2; } exynos->vdd10 = devm_regulator_get(dev, "vdd10"); if (IS_ERR(exynos->vdd10)) { ret = PTR_ERR(exynos->vdd10); goto err3; } ret = regulator_enable(exynos->vdd10); if (ret) { dev_err(dev, "Failed to enable VDD10 supply\n"); goto err3; } ret = dwc3_exynos_register_phys(exynos); if (ret) { dev_err(dev, "couldn't register PHYs\n"); goto err4; } if (node) { ret = of_platform_populate(node, NULL, NULL, dev); if (ret) { dev_err(dev, "failed to add dwc3 core\n"); goto err5; } } else { dev_err(dev, "no device node, failed to add dwc3 core\n"); ret = -ENODEV; goto err5; } return 0; err5: platform_device_unregister(exynos->usb2_phy); platform_device_unregister(exynos->usb3_phy); err4: regulator_disable(exynos->vdd10); err3: regulator_disable(exynos->vdd33); err2: clk_disable_unprepare(exynos->axius_clk); axius_clk_err: clk_disable_unprepare(exynos->susp_clk); clk_disable_unprepare(exynos->clk); return ret; } static int dwc3_exynos_remove(struct platform_device *pdev) { struct dwc3_exynos *exynos = platform_get_drvdata(pdev); device_for_each_child(&pdev->dev, NULL, dwc3_exynos_remove_child); platform_device_unregister(exynos->usb2_phy); platform_device_unregister(exynos->usb3_phy); clk_disable_unprepare(exynos->axius_clk); clk_disable_unprepare(exynos->susp_clk); clk_disable_unprepare(exynos->clk); regulator_disable(exynos->vdd33); regulator_disable(exynos->vdd10); return 0; } static const struct of_device_id exynos_dwc3_match[] = { { .compatible = "samsung,exynos5250-dwusb3" }, { .compatible = "samsung,exynos7-dwusb3" }, {}, }; MODULE_DEVICE_TABLE(of, exynos_dwc3_match); #ifdef CONFIG_PM_SLEEP static int dwc3_exynos_suspend(struct device *dev) { struct dwc3_exynos *exynos = dev_get_drvdata(dev); clk_disable(exynos->axius_clk); clk_disable(exynos->clk); regulator_disable(exynos->vdd33); regulator_disable(exynos->vdd10); return 0; } static int dwc3_exynos_resume(struct device *dev) { struct dwc3_exynos *exynos = dev_get_drvdata(dev); int ret; ret = regulator_enable(exynos->vdd33); if (ret) { dev_err(dev, "Failed to enable VDD33 supply\n"); return ret; } ret = regulator_enable(exynos->vdd10); if (ret) { dev_err(dev, "Failed to enable VDD10 supply\n"); return ret; } clk_enable(exynos->clk); clk_enable(exynos->axius_clk); /* runtime set active to reflect active state. */ pm_runtime_disable(dev); pm_runtime_set_active(dev); pm_runtime_enable(dev); return 0; } static const struct dev_pm_ops dwc3_exynos_dev_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(dwc3_exynos_suspend, dwc3_exynos_resume) }; #define DEV_PM_OPS (&dwc3_exynos_dev_pm_ops) #else #define DEV_PM_OPS NULL #endif /* CONFIG_PM_SLEEP */ static struct platform_driver dwc3_exynos_driver = { .probe = dwc3_exynos_probe, .remove = dwc3_exynos_remove, .driver = { .name = "exynos-dwc3", .of_match_table = exynos_dwc3_match, .pm = DEV_PM_OPS, }, }; module_platform_driver(dwc3_exynos_driver); MODULE_ALIAS("platform:exynos-dwc3"); MODULE_AUTHOR("Anton Tikhomirov "); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("DesignWare USB3 EXYNOS Glue Layer"); henever mmci driver enables cache control by programming eMMC's EXT_CSD register, block driver may request to flush the eMMC internal caches causing mmci driver to send a MMC_SWITCH command to the card with FLUSH_CACHE operation. And because busy end interrupt may be mistakenly cleared while not yet processed, this mmc request may never complete. As a result, mmcqd task may be stuck forever. Here is an instance caught by lockup detector which shows that mmcqd task was hung while waiting for mmc_flush_cache command to complete: .. [ 240.251595] INFO: task mmcqd/1:52 blocked for more than 120 seconds. [ 240.257973] Not tainted 4.1.13-00510-g9d91424 #2 [ 240.263109] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. [ 240.270955] mmcqd/1 D c047504c 0 52 2 0x00000000 [ 240.277359] [<c047504c>] (__schedule) from [<c04754a0>] (schedule+0x40/0x98) [ 240.284418] [<c04754a0>] (schedule) from [<c0477d40>] (schedule_timeout+0x148/0x188) [ 240.292191] [<c0477d40>] (schedule_timeout) from [<c0476040>] (wait_for_common+0xa4/0x170) [ 240.300491] [<c0476040>] (wait_for_common) from [<c02efc1c>] (mmc_wait_for_req_done+0x4c/0x13c) [ 240.309224] [<c02efc1c>] (mmc_wait_for_req_done) from [<c02efd90>] (mmc_wait_for_cmd+0x64/0x84) [ 240.317953] [<c02efd90>] (mmc_wait_for_cmd) from [<c02f5b14>] (__mmc_switch+0xa4/0x2a8) [ 240.325964] [<c02f5b14>] (__mmc_switch) from [<c02f5d40>] (mmc_switch+0x28/0x30) [ 240.333389] [<c02f5d40>] (mmc_switch) from [<c02f0984>] (mmc_flush_cache+0x54/0x80) [ 240.341073] [<c02f0984>] (mmc_flush_cache) from [<c02ff0c4>] (mmc_blk_issue_rq+0x114/0x4e8) [ 240.349459] [<c02ff0c4>] (mmc_blk_issue_rq) from [<c03008d4>] (mmc_queue_thread+0xc0/0x180) [ 240.357844] [<c03008d4>] (mmc_queue_thread) from [<c003cf90>] (kthread+0xdc/0xf4) [ 240.365339] [<c003cf90>] (kthread) from [<c0010068>] (ret_from_fork+0x14/0x2c) .. .. [ 240.664311] INFO: task partprobe:564 blocked for more than 120 seconds. [ 240.670943] Not tainted 4.1.13-00510-g9d91424 #2 [ 240.676078] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. [ 240.683922] partprobe D c047504c 0 564 486 0x00000000 [ 240.690318] [<c047504c>] (__schedule) from [<c04754a0>] (schedule+0x40/0x98) [ 240.697396] [<c04754a0>] (schedule) from [<c0477d40>] (schedule_timeout+0x148/0x188) [ 240.705149] [<c0477d40>] (schedule_timeout) from [<c0476040>] (wait_for_common+0xa4/0x170) [ 240.713446] [<c0476040>] (wait_for_common) from [<c01f3300>] (submit_bio_wait+0x58/0x64) [ 240.721571] [<c01f3300>] (submit_bio_wait) from [<c01fbbd8>] (blkdev_issue_flush+0x60/0x88) [ 240.729957] [<c01fbbd8>] (blkdev_issue_flush) from [<c010ff84>] (blkdev_fsync+0x34/0x44) [ 240.738083] [<c010ff84>] (blkdev_fsync) from [<c0109594>] (do_fsync+0x3c/0x64) [ 240.745319] [<c0109594>] (do_fsync) from [<c000ffc0>] (ret_fast_syscall+0x0/0x3c) .. Here is the detailed sequence showing when this issue may happen: 1) At probe time, mmci device is initialized and card busy detection based on DAT[0] monitoring is enabled. 2) Later during run time, since card reported to support internal caches, a MMCI_SWITCH command is sent to eMMC device with FLUSH_CACHE operation. On receiving this command, eMMC may enter busy state (for a relatively short time in the case of the dead-lock). 3) Then mmci interrupt is raised and mmci_irq() is called: MMCISTATUS register is read and is equal to 0x01000440. So the following status bits are set: - MCI_CMDRESPEND (= 6) - MCI_DATABLOCKEND (= 10) - MCI_ST_CARDBUSY (= 24) Since MMCIMASK0 register is 0x3FF, status variable is set to 0x00000040 and BIT MCI_CMDRESPEND is cleared by writing MMCICLEAR register. Then mmci_cmd_irq() is called. Considering the following conditions: - host->busy_status is 0, - this is a "busy response", - reading again MMCISTATUS register gives 0x1000400, MMCIMASK0 is updated to unmask MCI_ST_BUSYEND bit. Thus, MMCIMASK0 is set to 0x010003FF and host->busy_status is set to wait for busy end completion. Back again in status loop of mmci_irq(), we quickly go through mmci_data_irq() as there are no data in that case. And we finally go through following test at the end of while(status) loop: /* * Don't poll for busy completion in irq context. */ if (host->variant->busy_detect && host->busy_status) status &= ~host->variant->busy_detect_flag; Because status variable is not yet null (is equal to 0x40), we do not leave interrupt context yet but we loop again into while(status) loop. So we run across following steps: a) MMCISTATUS register is read again and this time is equal to 0x01000400. So that following bits are set: - MCI_DATABLOCKEND (= 10) - MCI_ST_CARDBUSY (= 24) Since MMCIMASK0 register is equal to 0x010003FF: b) status variable is set to 0x01000000. c) MCI_ST_CARDBUSY bit is cleared by writing MMCICLEAR register. Then, mmci_cmd_irq() is called one more time. Since host->busy_status is set and that MCI_ST_CARDBUSY is set in status variable, we just return from this function. Back again in mmci_irq(), status variable is set to 0 and we finally leave the while(status) loop. As a result we leave interrupt context, waiting for busy end interrupt event. Now, consider that busy end completion is raised IN BETWEEN steps 3.a) and 3.c). In such a case, we may mistakenly clear busy end interrupt at step 3.c) while it has not yet been processed. This will result in mmc command to wait forever for a busy end completion that will never happen. To fix the problem, this patch implements the following changes: Considering that the mmci seems to be triggering the IRQ on both edges while monitoring DAT0 for busy completion and that same status bit is used to monitor start and end of busy detection, special care must be taken to make sure that both start and end interrupts are always cleared one after the other. 1) Clearing of card busy bit is moved in mmc_cmd_irq() function where unmasking of busy end bit is effectively handled. 2) Just before unmasking busy end event, busy start event is cleared by writing card busy bit in MMCICLEAR register. 3) Finally, once we are no more busy with a command, busy end event is cleared writing again card busy bit in MMCICLEAR register. This patch has been tested with the ST Accordo5 machine, not yet supported upstream but relies on the mmci driver. Signed-off-by: Sarang Mairal <sarang.mairal@garmin.com> Signed-off-by: Jean-Nicolas Graux <jean-nicolas.graux@st.com> Reviewed-by: Linus Walleij <linus.walleij@linaro.org> Tested-by: Ulf Hansson <ulf.hansson@linaro.org> Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Diffstat (limited to 'drivers/usb/host/whci')