/* * UWB DRP IE management. * * Copyright (C) 2005-2006 Intel Corporation * Copyright (C) 2008 Cambridge Silicon Radio Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ #include #include #include #include #include "uwb-internal.h" /* * Return the reason code for a reservations's DRP IE. */ static int uwb_rsv_reason_code(struct uwb_rsv *rsv) { static const int reason_codes[] = { [UWB_RSV_STATE_O_INITIATED] = UWB_DRP_REASON_ACCEPTED, [UWB_RSV_STATE_O_PENDING] = UWB_DRP_REASON_ACCEPTED, [UWB_RSV_STATE_O_MODIFIED] = UWB_DRP_REASON_MODIFIED, [UWB_RSV_STATE_O_ESTABLISHED] = UWB_DRP_REASON_ACCEPTED, [UWB_RSV_STATE_O_TO_BE_MOVED] = UWB_DRP_REASON_ACCEPTED, [UWB_RSV_STATE_O_MOVE_COMBINING] = UWB_DRP_REASON_MODIFIED, [UWB_RSV_STATE_O_MOVE_REDUCING] = UWB_DRP_REASON_MODIFIED, [UWB_RSV_STATE_O_MOVE_EXPANDING] = UWB_DRP_REASON_ACCEPTED, [UWB_RSV_STATE_T_ACCEPTED] = UWB_DRP_REASON_ACCEPTED, [UWB_RSV_STATE_T_CONFLICT] = UWB_DRP_REASON_CONFLICT, [UWB_RSV_STATE_T_PENDING] = UWB_DRP_REASON_PENDING, [UWB_RSV_STATE_T_DENIED] = UWB_DRP_REASON_DENIED, [UWB_RSV_STATE_T_RESIZED] = UWB_DRP_REASON_ACCEPTED, [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = UWB_DRP_REASON_ACCEPTED, [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = UWB_DRP_REASON_CONFLICT, [UWB_RSV_STATE_T_EXPANDING_PENDING] = UWB_DRP_REASON_PENDING, [UWB_RSV_STATE_T_EXPANDING_DENIED] = UWB_DRP_REASON_DENIED, }; return reason_codes[rsv->state]; } /* * Return the reason code for a reservations's companion DRP IE . */ static int uwb_rsv_companion_reason_code(struct uwb_rsv *rsv) { static const int companion_reason_codes[] = { [UWB_RSV_STATE_O_MOVE_EXPANDING] = UWB_DRP_REASON_ACCEPTED, [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = UWB_DRP_REASON_ACCEPTED, [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = UWB_DRP_REASON_CONFLICT, [UWB_RSV_STATE_T_EXPANDING_PENDING] = UWB_DRP_REASON_PENDING, [UWB_RSV_STATE_T_EXPANDING_DENIED] = UWB_DRP_REASON_DENIED, }; return companion_reason_codes[rsv->state]; } /* * Return the status bit for a reservations's DRP IE. */ int uwb_rsv_status(struct uwb_rsv *rsv) { static const int statuses[] = { [UWB_RSV_STATE_O_INITIATED] = 0, [UWB_RSV_STATE_O_PENDING] = 0, [UWB_RSV_STATE_O_MODIFIED] = 1, [UWB_RSV_STATE_O_ESTABLISHED] = 1, [UWB_RSV_STATE_O_TO_BE_MOVED] = 0, [UWB_RSV_STATE_O_MOVE_COMBINING] = 1, [UWB_RSV_STATE_O_MOVE_REDUCING] = 1, [UWB_RSV_STATE_O_MOVE_EXPANDING] = 1, [UWB_RSV_STATE_T_ACCEPTED] = 1, [UWB_RSV_STATE_T_CONFLICT] = 0, [UWB_RSV_STATE_T_PENDING] = 0, [UWB_RSV_STATE_T_DENIED] = 0, [UWB_RSV_STATE_T_RESIZED] = 1, [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = 1, [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = 1, [UWB_RSV_STATE_T_EXPANDING_PENDING] = 1, [UWB_RSV_STATE_T_EXPANDING_DENIED] = 1, }; return statuses[rsv->state]; } /* * Return the status bit for a reservations's companion DRP IE . */ int uwb_rsv_companion_status(struct uwb_rsv *rsv) { static const int companion_statuses[] = { [UWB_RSV_STATE_O_MOVE_EXPANDING] = 0, [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = 1, [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = 0, [UWB_RSV_STATE_T_EXPANDING_PENDING] = 0, [UWB_RSV_STATE_T_EXPANDING_DENIED] = 0, }; return companion_statuses[rsv->state]; } /* * Allocate a DRP IE. * * To save having to free/allocate a DRP IE when its MAS changes, * enough memory is allocated for the maxiumum number of DRP * allocation fields. This gives an overhead per reservation of up to * (UWB_NUM_ZONES - 1) * 4 = 60 octets. */ static struct uwb_ie_drp *uwb_drp_ie_alloc(void) { struct uwb_ie_drp *drp_ie; drp_ie = kzalloc(sizeof(struct uwb_ie_drp) + UWB_NUM_ZONES * sizeof(struct uwb_drp_alloc), GFP_KERNEL); if (drp_ie) { drp_ie->hdr.element_id = UWB_IE_DRP; } return drp_ie; } /* * Fill a DRP IE's allocation fields from a MAS bitmap. */ static void uwb_drp_ie_from_bm(struct uwb_ie_drp *drp_ie, struct uwb_mas_bm *mas) { int z, i, num_fields = 0, next = 0; struct uwb_drp_alloc *zones; __le16 current_bmp; DECLARE_BITMAP(tmp_bmp, UWB_NUM_MAS); DECLARE_BITMAP(tmp_mas_bm, UWB_MAS_PER_ZONE); zones = drp_ie->allocs; bitmap_copy(tmp_bmp, mas->bm, UWB_NUM_MAS); /* Determine unique MAS bitmaps in zones from bitmap. */ for (z = 0; z < UWB_NUM_ZONES; z++) { bitmap_copy(tmp_mas_bm, tmp_bmp, UWB_MAS_PER_ZONE); if (bitmap_weight(tmp_mas_bm, UWB_MAS_PER_ZONE) > 0) { bool found = false; current_bmp = (__le16) *tmp_mas_bm; for (i = 0; i < next; i++) { if (current_bmp == zones[i].mas_bm) { zones[i].zone_bm |= 1 << z; found = true; break; } } if (!found) { num_fields++; zones[next].zone_bm = 1 << z; zones[next].mas_bm = current_bmp; next++; } } bitmap_shift_right(tmp_bmp, tmp_bmp, UWB_MAS_PER_ZONE, UWB_NUM_MAS); } /* Store in format ready for transmission (le16). */ for (i = 0; i < num_fields; i++) { drp_ie->allocs[i].zone_bm = cpu_to_le16(zones[i].zone_bm); drp_ie->allocs[i].mas_bm = cpu_to_le16(zones[i].mas_bm); } drp_ie->hdr.length = sizeof(struct uwb_ie_drp) - sizeof(struct uwb_ie_hdr) + num_fields * sizeof(struct uwb_drp_alloc); } /** * uwb_drp_ie_update - update a reservation's DRP IE * @rsv: the reservation */ int uwb_drp_ie_update(struct uwb_rsv *rsv) { struct uwb_ie_drp *drp_ie; struct uwb_rsv_move *mv; int unsafe; if (rsv->state == UWB_RSV_STATE_NONE) { kfree(rsv->drp_ie); rsv->drp_ie = NULL; return 0; } unsafe = rsv->mas.unsafe ? 1 : 0; if (rsv->drp_ie == NULL) { rsv->drp_ie = uwb_drp_ie_alloc(); if (rsv->drp_ie == NULL) return -ENOMEM; } drp_ie = rsv->drp_ie; uwb_ie_drp_set_unsafe(drp_ie, unsafe); uwb_ie_drp_set_tiebreaker(drp_ie, rsv->tiebreaker); uwb_ie_drp_set_owner(drp_ie, uwb_rsv_is_owner(rsv)); uwb_ie_drp_set_status(drp_ie, uwb_rsv_status(rsv)); uwb_ie_drp_set_reason_code(drp_ie, uwb_rsv_reason_code(rsv)); uwb_ie_drp_set_stream_index(drp_ie, rsv->stream); uwb_ie_drp_set_type(drp_ie, rsv->type); if (uwb_rsv_is_owner(rsv)) { switch (rsv->target.type) { case UWB_RSV_TARGET_DEV: drp_ie->dev_addr = rsv->target.dev->dev_addr; break; case UWB_RSV_TARGET_DEVADDR: drp_ie->dev_addr = rsv->target.devaddr; break; } } else drp_ie->dev_addr = rsv->owner->dev_addr; uwb_drp_ie_from_bm(drp_ie, &rsv->mas); if (uwb_rsv_has_two_drp_ies(rsv)) { mv = &rsv->mv; if (mv->companion_drp_ie == NULL) { mv->companion_drp_ie = uwb_drp_ie_alloc(); if (mv->companion_drp_ie == NULL) return -ENOMEM; } drp_ie = mv->companion_drp_ie; /* keep all the same configuration of the main drp_ie */ memcpy(drp_ie, rsv->drp_ie, sizeof(struct uwb_ie_drp)); /* FIXME: handle properly the unsafe bit */ uwb_ie_drp_set_unsafe(drp_ie, 1); uwb_ie_drp_set_status(drp_ie, uwb_rsv_companion_status(rsv)); uwb_ie_drp_set_reason_code(drp_ie, uwb_rsv_companion_reason_code(rsv)); uwb_drp_ie_from_bm(drp_ie, &mv->companion_mas); } rsv->ie_valid = true; return 0; } /* * Set MAS bits from given MAS bitmap in a single zone of large bitmap. * * We are given a zone id and the MAS bitmap of bits that need to be set in * this zone. Note that this zone may already have bits set and this only * adds settings - we cannot simply assign the MAS bitmap contents to the * zone contents. We iterate over the the bits (MAS) in the zone and set the * bits that are set in the given MAS bitmap. */ static void uwb_drp_ie_single_zone_to_bm(struct uwb_mas_bm *bm, u8 zone, u16 mas_bm) { int mas; u16 mas_mask; for (mas = 0; mas < UWB_MAS_PER_ZONE; mas++) { mas_mask = 1 << mas; if (mas_bm & mas_mask) set_bit(zone * UWB_NUM_ZONES + mas, bm->bm); } } /** * uwb_drp_ie_zones_to_bm - convert DRP allocation fields to a bitmap * @mas: MAS bitmap that will be populated to correspond to the * allocation fields in the DRP IE * @drp_ie: the DRP IE that contains the allocation fields. * * The input format is an array of MAS allocation fields (16 bit Zone * bitmap, 16 bit MAS bitmap) as described in [ECMA-368] section * 16.8.6. The output is a full 256 bit MAS bitmap. * * We go over all the allocation fields, for each allocation field we * know which zones are impacted. We iterate over all the zones * impacted and call a function that will set the correct MAS bits in * each zone. */ void uwb_drp_ie_to_bm(struct uwb_mas_bm *bm, const struct uwb_ie_drp *drp_ie) { int numallocs = (drp_ie->hdr.length - 4) / 4; const struct uwb_drp_alloc *alloc; int cnt; u16 zone_bm, mas_bm; u8 zone; u16 zone_mask; bitmap_zero(bm->bm, UWB_NUM_MAS); for (cnt = 0; cnt < numallocs; cnt++) { alloc = &drp_ie->allocs[cnt]; zone_bm = le16_to_cpu(alloc->zone_bm); mas_bm = le16_to_cpu(alloc->mas_bm); for (zone = 0; zone < UWB_NUM_ZONES; zone++) { zone_mask = 1 << zone; if (zone_bm & zone_mask) uwb_drp_ie_single_zone_to_bm(bm, zone, mas_bm); } } } 0x64) [ 240.721571] [<c01f3300>] (submit_bio_wait) from [<c01fbbd8>] (blkdev_issue_flush+0x60/0x88) [ 240.729957] [<c01fbbd8>] (blkdev_issue_flush) from [<c010ff84>] (blkdev_fsync+0x34/0x44) [ 240.738083] [<c010ff84>] (blkdev_fsync) from [<c0109594>] (do_fsync+0x3c/0x64) [ 240.745319] [<c0109594>] (do_fsync) from [<c000ffc0>] (ret_fast_syscall+0x0/0x3c) .. Here is the detailed sequence showing when this issue may happen: 1) At probe time, mmci device is initialized and card busy detection based on DAT[0] monitoring is enabled. 2) Later during run time, since card reported to support internal caches, a MMCI_SWITCH command is sent to eMMC device with FLUSH_CACHE operation. On receiving this command, eMMC may enter busy state (for a relatively short time in the case of the dead-lock). 3) Then mmci interrupt is raised and mmci_irq() is called: MMCISTATUS register is read and is equal to 0x01000440. So the following status bits are set: - MCI_CMDRESPEND (= 6) - MCI_DATABLOCKEND (= 10) - MCI_ST_CARDBUSY (= 24) Since MMCIMASK0 register is 0x3FF, status variable is set to 0x00000040 and BIT MCI_CMDRESPEND is cleared by writing MMCICLEAR register. Then mmci_cmd_irq() is called. Considering the following conditions: - host->busy_status is 0, - this is a "busy response", - reading again MMCISTATUS register gives 0x1000400, MMCIMASK0 is updated to unmask MCI_ST_BUSYEND bit. Thus, MMCIMASK0 is set to 0x010003FF and host->busy_status is set to wait for busy end completion. Back again in status loop of mmci_irq(), we quickly go through mmci_data_irq() as there are no data in that case. And we finally go through following test at the end of while(status) loop: /* * Don't poll for busy completion in irq context. */ if (host->variant->busy_detect && host->busy_status) status &= ~host->variant->busy_detect_flag; Because status variable is not yet null (is equal to 0x40), we do not leave interrupt context yet but we loop again into while(status) loop. So we run across following steps: a) MMCISTATUS register is read again and this time is equal to 0x01000400. So that following bits are set: - MCI_DATABLOCKEND (= 10) - MCI_ST_CARDBUSY (= 24) Since MMCIMASK0 register is equal to 0x010003FF: b) status variable is set to 0x01000000. c) MCI_ST_CARDBUSY bit is cleared by writing MMCICLEAR register. Then, mmci_cmd_irq() is called one more time. Since host->busy_status is set and that MCI_ST_CARDBUSY is set in status variable, we just return from this function. Back again in mmci_irq(), status variable is set to 0 and we finally leave the while(status) loop. As a result we leave interrupt context, waiting for busy end interrupt event. Now, consider that busy end completion is raised IN BETWEEN steps 3.a) and 3.c). In such a case, we may mistakenly clear busy end interrupt at step 3.c) while it has not yet been processed. This will result in mmc command to wait forever for a busy end completion that will never happen. To fix the problem, this patch implements the following changes: Considering that the mmci seems to be triggering the IRQ on both edges while monitoring DAT0 for busy completion and that same status bit is used to monitor start and end of busy detection, special care must be taken to make sure that both start and end interrupts are always cleared one after the other. 1) Clearing of card busy bit is moved in mmc_cmd_irq() function where unmasking of busy end bit is effectively handled. 2) Just before unmasking busy end event, busy start event is cleared by writing card busy bit in MMCICLEAR register. 3) Finally, once we are no more busy with a command, busy end event is cleared writing again card busy bit in MMCICLEAR register. This patch has been tested with the ST Accordo5 machine, not yet supported upstream but relies on the mmci driver. Signed-off-by: Sarang Mairal <sarang.mairal@garmin.com> Signed-off-by: Jean-Nicolas Graux <jean-nicolas.graux@st.com> Reviewed-by: Linus Walleij <linus.walleij@linaro.org> Tested-by: Ulf Hansson <ulf.hansson@linaro.org> Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Diffstat (limited to 'drivers/usb/host/fhci-hcd.c')