| /* ========================================================================== |
| * |
| * The Software IS NOT an item of Licensed Software or Licensed Product under |
| * any End User Software License Agreement or Agreement for Licensed Product |
| * with Synopsys or any supplement thereto. You are permitted to use and |
| * redistribute this Software in source and binary forms, with or without |
| * modification, provided that redistributions of source code must retain this |
| * notice. You may not view, use, disclose, copy or distribute this file or |
| * any information contained herein except pursuant to this license grant from |
| * Synopsys. If you do not agree with this notice, including the disclaimer |
| * below, then you are not authorized to use the Software. |
| * |
| * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS |
| * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, |
| * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
| * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR |
| * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER |
| * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
| * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
| * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH |
| * DAMAGE. |
| * ========================================================================== */ |
| |
| #ifndef DWC_HOST_ONLY |
| #include <linux/interrupt.h> |
| #include <linux/dma-mapping.h> |
| |
| #include "dwc_otg_driver.h" |
| #include "dwc_otg_pcd.h" |
| |
| #define DEBUG_EP0 |
| |
| |
| /** @file |
| * This file contains the implementation of the PCD Interrupt handlers. |
| * |
| * The PCD handles the device interrupts. Many conditions can cause a |
| * device interrupt. When an interrupt occurs, the device interrupt |
| * service routine determines the cause of the interrupt and |
| * dispatches handling to the appropriate function. These interrupt |
| * handling functions are described below. |
| * All interrupt registers are processed from LSB to MSB. |
| */ |
| |
| /** |
| * This function prints the ep0 state for debug purposes. |
| */ |
| static void print_ep0_state(struct dwc_otg_pcd *pcd) |
| { |
| #ifdef DEBUG |
| char str[40]; |
| switch (pcd->ep0state) { |
| case EP0_DISCONNECT: |
| strcpy(str, "EP0_DISCONNECT"); |
| break; |
| case EP0_IDLE: |
| strcpy(str, "EP0_IDLE"); |
| break; |
| case EP0_IN_DATA_PHASE: |
| strcpy(str, "EP0_IN_DATA_PHASE"); |
| break; |
| case EP0_OUT_DATA_PHASE: |
| strcpy(str, "EP0_OUT_DATA_PHASE"); |
| break; |
| case EP0_IN_STATUS_PHASE: |
| strcpy(str, "EP0_IN_STATUS_PHASE"); |
| break; |
| case EP0_OUT_STATUS_PHASE: |
| strcpy(str, "EP0_OUT_STATUS_PHASE"); |
| break; |
| case EP0_STALL: |
| strcpy(str, "EP0_STALL"); |
| break; |
| default: |
| strcpy(str, "EP0_INVALID"); |
| } |
| DWC_DEBUGPL(DBG_ANY, "%s(%d)\n", str, pcd->ep0state); |
| #endif /* */ |
| } |
| |
| /** |
| * This function returns pointer to in ep struct with number ep_num |
| */ |
| static struct dwc_otg_pcd_ep *get_in_ep(struct dwc_otg_pcd *pcd, |
| u32 ep_num) |
| { |
| int i; |
| int num_in_eps = GET_CORE_IF(pcd)->dev_if->num_in_eps; |
| if (ep_num == 0) |
| return &pcd->ep0; |
| else { |
| for (i = 0; i < num_in_eps; ++i) { |
| if (pcd->in_ep[i].dwc_ep.num == ep_num) |
| return &pcd->in_ep[i]; |
| } |
| return NULL; |
| } |
| |
| } |
| |
| /** |
| * This function returns pointer to out ep struct with number ep_num |
| */ |
| static struct dwc_otg_pcd_ep *get_out_ep(struct dwc_otg_pcd *pcd, |
| u32 ep_num) |
| { |
| int i; |
| int num_out_eps = GET_CORE_IF(pcd)->dev_if->num_out_eps; |
| if (ep_num == 0) |
| return &pcd->ep0; |
| else { |
| for (i = 0; i < num_out_eps; ++i) { |
| if (pcd->out_ep[i].dwc_ep.num == ep_num) |
| return &pcd->out_ep[i]; |
| } |
| return NULL; |
| } |
| |
| } |
| |
| /** |
| * This functions gets a pointer to an EP from the wIndex address |
| * value of the control request. |
| */ |
| static struct dwc_otg_pcd_ep *get_ep_by_addr(struct dwc_otg_pcd *pcd, |
| u16 wIndex) |
| { |
| struct dwc_otg_pcd_ep *ep; |
| u32 ep_num = (wIndex & USB_ENDPOINT_NUMBER_MASK); |
| |
| if (ep_num == 0) { |
| ep = &pcd->ep0; |
| } else if ((wIndex & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN) { /* in ep */ |
| ep = &pcd->in_ep[ep_num - 1]; |
| } else { |
| ep = &pcd->out_ep[ep_num - 1]; |
| } |
| |
| return ep; |
| } |
| |
| |
| /** |
| * This function checks the EP request queue, if the queue is not |
| * empty the next request is started. |
| */ |
| void start_next_request(struct dwc_otg_pcd_ep *ep) |
| { |
| struct dwc_otg_pcd_request *req = NULL; |
| u32 max_transfer = |
| GET_CORE_IF(ep->pcd)->core_params->max_transfer_size; |
| |
| if (!list_empty(&ep->queue)) { |
| req = list_entry(ep->queue.next, |
| struct dwc_otg_pcd_request, queue); |
| |
| /* map virtual address to hardware */ |
| if (req->req.dma == DMA_ADDR_INVALID && req->req.length) { |
| req->req.dma = dma_map_single(ep->pcd->gadget.dev.parent, |
| req->req.buf, |
| req->req.length, |
| ep->dwc_ep.is_in |
| ? DMA_TO_DEVICE : |
| DMA_FROM_DEVICE); |
| req->mapped = 1; |
| } else { |
| dma_sync_single_for_device(ep->pcd->gadget.dev.parent, |
| req->req.dma, req->req.length, |
| ep->dwc_ep.is_in |
| ? DMA_TO_DEVICE : |
| DMA_FROM_DEVICE); |
| req->mapped = 0; |
| } |
| |
| |
| |
| /* Setup and start the Transfer */ |
| ep->dwc_ep.dma_addr = req->req.dma; |
| ep->dwc_ep.start_xfer_buff = req->req.buf; |
| ep->dwc_ep.xfer_buff = req->req.buf; |
| ep->dwc_ep.xfer_len = req->req.length; |
| ep->dwc_ep.xfer_count = 0; |
| ep->dwc_ep.sent_zlp = 0; |
| ep->dwc_ep.total_len = ep->dwc_ep.xfer_len; |
| |
| |
| ep->dwc_ep.maxxfer = max_transfer; |
| if (GET_CORE_IF(ep->pcd)->dma_desc_enable) { |
| u32 out_max_xfer = DDMA_MAX_TRANSFER_SIZE |
| - (DDMA_MAX_TRANSFER_SIZE % 4); |
| if (ep->dwc_ep.is_in) { |
| if (ep->dwc_ep.maxxfer > |
| DDMA_MAX_TRANSFER_SIZE) { |
| ep->dwc_ep.maxxfer = |
| DDMA_MAX_TRANSFER_SIZE; |
| } |
| } else { |
| if (ep->dwc_ep.maxxfer > out_max_xfer) { |
| ep->dwc_ep.maxxfer = |
| out_max_xfer; |
| } |
| } |
| } |
| if (ep->dwc_ep.maxxfer < ep->dwc_ep.total_len) { |
| ep->dwc_ep.maxxfer -= |
| (ep->dwc_ep.maxxfer % ep->dwc_ep.maxpacket); |
| } |
| |
| if (req->req.zero) { |
| if ((ep->dwc_ep.total_len % |
| ep->dwc_ep.maxpacket == 0) |
| && (ep->dwc_ep.total_len != 0)) { |
| ep->dwc_ep.sent_zlp = 1; |
| } |
| } |
| |
| |
| #ifdef CONFIG_405EZ |
| /* |
| * Added-sr: 2007-07-26 |
| * |
| * When a new transfer will be started, mark this |
| * endpoint as active. This way it will be blocked |
| * for further transfers, until the current transfer |
| * is finished. |
| */ |
| ep->dwc_ep.active = 1; |
| #endif |
| dwc_otg_ep_start_transfer(GET_CORE_IF(ep->pcd), &ep->dwc_ep); |
| } |
| } |
| |
| /** |
| * This function handles the SOF Interrupts. At this time the SOF |
| * Interrupt is disabled. |
| */ |
| static int dwc_otg_pcd_handle_sof_intr(struct dwc_otg_pcd *pcd) |
| { |
| struct dwc_otg_core_if *core_if = GET_CORE_IF(pcd); |
| union gintsts_data gintsts; |
| |
| /* Clear interrupt */ |
| gintsts.d32 = 0; |
| gintsts.b.sofintr = 1; |
| dwc_write_reg32(&core_if->core_global_regs->gintsts, gintsts.d32); |
| return 1; |
| } |
| |
| /** |
| * This function handles the Rx Status Queue Level Interrupt, which |
| * indicates that there is a least one packet in the Rx FIFO. The |
| * packets are moved from the FIFO to memory, where they will be |
| * processed when the Endpoint Interrupt Register indicates Transfer |
| * Complete or SETUP Phase Done. |
| * |
| * Repeat the following until the Rx Status Queue is empty: |
| * -# Read the Receive Status Pop Register (GRXSTSP) to get Packet |
| * info |
| * -# If Receive FIFO is empty then skip to step Clear the interrupt |
| * and exit |
| * -# If SETUP Packet call dwc_otg_read_setup_packet to copy the |
| * SETUP data to the buffer |
| * -# If OUT Data Packet call dwc_otg_read_packet to copy the data |
| * to the destination buffer |
| */ |
| static int dwc_otg_pcd_handle_rx_status_q_level_intr(struct dwc_otg_pcd *pcd) |
| { |
| struct dwc_otg_core_if *core_if = GET_CORE_IF(pcd); |
| struct dwc_otg_core_global_regs __iomem *global_regs = |
| core_if->core_global_regs; |
| union gintmsk_data gintmask = {.d32 = 0}; |
| union device_grxsts_data status; |
| struct dwc_otg_pcd_ep *ep; |
| |
| #ifdef DEBUG |
| static char *dpid_str[] = { "D0", "D2", "D1", "MDATA" }; |
| |
| #endif /* */ |
| DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, pcd); |
| |
| /* Disable the Rx Status Queue Level interrupt */ |
| gintmask.b.rxstsqlvl = 1; |
| dwc_modify_reg32(&global_regs->gintmsk, gintmask.d32, 0); |
| |
| /* Get the Status from the top of the FIFO */ |
| status.d32 = dwc_read_reg32(&global_regs->grxstsp); |
| DWC_DEBUGPL(DBG_PCD, "EP:%d BCnt:%d DPID:%s " |
| "pktsts:%x Frame:%d(0x%0x)\n", status.b.epnum, |
| status.b.bcnt, dpid_str[status.b.dpid], status.b.pktsts, |
| status.b.fn, status.b.fn); |
| |
| /* Get pointer to EP structure */ |
| ep = get_out_ep(pcd, status.b.epnum); |
| |
| switch (status.b.pktsts) { |
| case DWC_DSTS_GOUT_NAK: |
| DWC_DEBUGPL(DBG_PCDV, "Global OUT NAK\n"); |
| break; |
| case DWC_STS_DATA_UPDT: |
| DWC_DEBUGPL(DBG_PCDV, "OUT Data Packet\n"); |
| if (status.b.bcnt && ep->dwc_ep.xfer_buff) { |
| dwc_otg_read_packet(core_if, ep->dwc_ep.xfer_buff, |
| status.b.bcnt); |
| ep->dwc_ep.xfer_count += status.b.bcnt; |
| ep->dwc_ep.xfer_buff += status.b.bcnt; |
| } |
| break; |
| case DWC_STS_XFER_COMP: |
| DWC_DEBUGPL(DBG_PCDV, "OUT Complete\n"); |
| break; |
| case DWC_DSTS_SETUP_COMP: |
| #ifdef DEBUG_EP0 |
| DWC_DEBUGPL(DBG_PCDV, "Setup Complete\n"); |
| #endif /* */ |
| break; |
| case DWC_DSTS_SETUP_UPDT: |
| dwc_otg_read_setup_packet(core_if, pcd->setup_pkt->d32); |
| #ifdef DEBUG_EP0 |
| DWC_DEBUGPL(DBG_PCD, |
| "SETUP PKT: %02x.%02x v%04x i%04x l%04x\n", |
| pcd->setup_pkt->req.bRequestType, |
| pcd->setup_pkt->req.bRequest, |
| __le16_to_cpu(pcd->setup_pkt->req.wValue), |
| __le16_to_cpu(pcd->setup_pkt->req.wIndex), |
| __le16_to_cpu(pcd->setup_pkt->req.wLength)); |
| |
| #endif /* */ |
| |
| ep->dwc_ep.xfer_count += status.b.bcnt; |
| break; |
| default: |
| DWC_DEBUGPL(DBG_PCDV, "Invalid Packet Status (0x%0x)\n", |
| status.b.pktsts); |
| break; |
| } |
| |
| return 1; |
| } |
| |
| /** |
| * This function examines the Device IN Token Learning Queue to |
| * determine the EP number of the last IN token received. This |
| * implementation is for the Mass Storage device where there are only |
| * 2 IN EPs (Control-IN and BULK-IN). |
| * |
| * The EP numbers for the first six IN Tokens are in DTKNQR1 and there |
| * are 8 EP Numbers in each of the other possible DTKNQ Registers. |
| * |
| */ |
| static int get_ep_of_last_in_token(struct dwc_otg_core_if *core_if) |
| { |
| struct dwc_otg_dev_global_regs __iomem *dev_global_regs = |
| core_if->dev_if->dev_global_regs; |
| const u32 TOKEN_Q_DEPTH = core_if->hwcfg2.b.dev_token_q_depth; |
| |
| /* Number of Token Queue Registers */ |
| const int DTKNQ_REG_CNT = (TOKEN_Q_DEPTH + 7) / 8; |
| union dtknq1_data dtknqr1; |
| u32 in_tkn_epnums[4]; |
| int ndx = 0; |
| int i = 0; |
| u32 __iomem *addr = &dev_global_regs->dtknqr1; |
| int epnum = 0; |
| |
| |
| /* Read the DTKNQ Registers */ |
| for (i = 0; i < DTKNQ_REG_CNT; i++) { |
| in_tkn_epnums[i] = dwc_read_reg32(addr); |
| DWC_DEBUGPL(DBG_PCDV, "DTKNQR%d=0x%08x\n", i + 1, |
| in_tkn_epnums[i]); |
| if (addr == &dev_global_regs->dvbusdis) |
| addr = &dev_global_regs->dtknqr3_dthrctl; |
| else |
| ++addr; |
| } |
| |
| /* Copy the DTKNQR1 data to the bit field. */ |
| dtknqr1.d32 = in_tkn_epnums[0]; |
| |
| /* Get the EP numbers */ |
| in_tkn_epnums[0] = dtknqr1.b.epnums0_5; |
| ndx = dtknqr1.b.intknwptr - 1; |
| |
| if (ndx == -1) { |
| /** @todo Find a simpler way to calculate the max |
| * queue position.*/ |
| int cnt = TOKEN_Q_DEPTH; |
| if (TOKEN_Q_DEPTH <= 6) |
| cnt = TOKEN_Q_DEPTH - 1; |
| else if (TOKEN_Q_DEPTH <= 14) |
| cnt = TOKEN_Q_DEPTH - 7; |
| else if (TOKEN_Q_DEPTH <= 22) |
| cnt = TOKEN_Q_DEPTH - 15; |
| else |
| cnt = TOKEN_Q_DEPTH - 23; |
| |
| epnum = (in_tkn_epnums[DTKNQ_REG_CNT - 1] >> (cnt * 4)) & 0xF; |
| } else { |
| if (ndx <= 5) |
| epnum = (in_tkn_epnums[0] >> (ndx * 4)) & 0xF; |
| else if (ndx <= 13) { |
| ndx -= 6; |
| epnum = (in_tkn_epnums[1] >> (ndx * 4)) & 0xF; |
| } else if (ndx <= 21) { |
| ndx -= 14; |
| epnum = (in_tkn_epnums[2] >> (ndx * 4)) & 0xF; |
| } else if (ndx <= 29) { |
| ndx -= 22; |
| epnum = (in_tkn_epnums[3] >> (ndx * 4)) & 0xF; |
| } |
| } |
| |
| return epnum; |
| } |
| |
| /** |
| * This interrupt occurs when the non-periodic Tx FIFO is half-empty. |
| * The active request is checked for the next packet to be loaded into |
| * the non-periodic Tx FIFO. |
| */ |
| static int dwc_otg_pcd_handle_np_tx_fifo_empty_intr(struct dwc_otg_pcd *pcd) |
| { |
| struct dwc_otg_core_if *core_if = GET_CORE_IF(pcd); |
| struct dwc_otg_core_global_regs __iomem *global_regs = |
| core_if->core_global_regs; |
| struct dwc_otg_dev_in_ep_regs __iomem *ep_regs; |
| union gnptxsts_data txstatus = {.d32 = 0 }; |
| #ifndef OTG_PLB_DMA |
| union gintsts_data gintsts; |
| #endif |
| int epnum = 0; |
| struct dwc_otg_pcd_ep *ep = NULL; |
| u32 len = 0; |
| int dwords; |
| |
| /* Get the epnum from the IN Token Learning Queue. */ |
| epnum = get_ep_of_last_in_token(core_if); |
| ep = get_in_ep(pcd, epnum); |
| |
| DWC_DEBUGPL(DBG_PCD, "NP TxFifo Empty: %s(%d) \n", ep->ep.name, epnum); |
| ep_regs = core_if->dev_if->in_ep_regs[epnum]; |
| len = ep->dwc_ep.xfer_len - ep->dwc_ep.xfer_count; |
| if (len > ep->dwc_ep.maxpacket) |
| len = ep->dwc_ep.maxpacket; |
| |
| dwords = (len + 3) / 4; |
| |
| /* While there is space in the queue and space in the FIFO and |
| * More data to tranfer, Write packets to the Tx FIFO |
| */ |
| txstatus.d32 = dwc_read_reg32(&global_regs->gnptxsts); |
| DWC_DEBUGPL(DBG_PCDV, "b4 GNPTXSTS=0x%08x\n", txstatus.d32); |
| while (txstatus.b.nptxqspcavail > 0 |
| && txstatus.b.nptxfspcavail > dwords |
| && ep->dwc_ep.xfer_count < ep->dwc_ep.xfer_len) { |
| |
| /* Write the FIFO */ |
| #ifdef CONFIG_405EZ |
| /* |
| * Added-sr: 2007-07-26 |
| * |
| * When a new transfer will be started, mark this |
| * endpoint as active. This way it will be blocked |
| * for further transfers, until the current transfer |
| * is finished. |
| */ |
| ep->dwc_ep.active = 1; |
| #endif |
| dwc_otg_ep_write_packet(core_if, &ep->dwc_ep, 0); |
| len = ep->dwc_ep.xfer_len - ep->dwc_ep.xfer_count; |
| if (len > ep->dwc_ep.maxpacket) |
| len = ep->dwc_ep.maxpacket; |
| |
| dwords = (len + 3) / 4; |
| txstatus.d32 = dwc_read_reg32(&global_regs->gnptxsts); |
| DWC_DEBUGPL(DBG_PCDV, "GNPTXSTS=0x%08x\n", txstatus.d32); |
| } |
| DWC_DEBUGPL(DBG_PCDV, "GNPTXSTS=0x%08x\n", |
| dwc_read_reg32(&global_regs->gnptxsts)); |
| |
| /* Clear interrupt */ |
| gintsts.d32 = 0; |
| gintsts.b.nptxfempty = 1; |
| dwc_write_reg32(&global_regs->gintsts, gintsts.d32); |
| |
| return 1; |
| } |
| |
| /** |
| * This function is called when dedicated Tx FIFO Empty interrupt occurs. |
| * The active request is checked for the next packet to be loaded into |
| * appropriate Tx FIFO. |
| */ |
| static int write_empty_tx_fifo(struct dwc_otg_pcd *pcd, u32 epnum) |
| { |
| struct dwc_otg_core_if *core_if = GET_CORE_IF(pcd); |
| struct dwc_otg_dev_if *dev_if = core_if->dev_if; |
| struct dwc_otg_dev_in_ep_regs __iomem *ep_regs; |
| union dtxfsts_data txstatus = {.d32 = 0}; |
| struct dwc_otg_pcd_ep *ep = NULL; |
| u32 len = 0; |
| int dwords; |
| ep = get_in_ep(pcd, epnum); |
| |
| DWC_DEBUGPL(DBG_PCD, "Dedicated TxFifo Empty: %s(%d) \n", |
| ep->ep.name, epnum); |
| ep_regs = core_if->dev_if->in_ep_regs[epnum]; |
| len = ep->dwc_ep.xfer_len - ep->dwc_ep.xfer_count; |
| if (len > ep->dwc_ep.maxpacket) |
| len = ep->dwc_ep.maxpacket; |
| |
| dwords = (len + 3) / 4; |
| |
| /* While there is space in the queue and space in the FIFO and |
| * More data to transfer, Write packets to the Tx FIFO */ |
| txstatus.d32 = dwc_read_reg32(&dev_if->in_ep_regs[epnum]->dtxfsts); |
| DWC_DEBUGPL(DBG_PCDV, "b4 dtxfsts[%d]=0x%08x\n", epnum, txstatus.d32); |
| while (txstatus.b.txfspcavail > dwords |
| && ep->dwc_ep.xfer_count < ep->dwc_ep.xfer_len |
| && ep->dwc_ep.xfer_len != 0) { |
| |
| /* Write the FIFO */ |
| dwc_otg_ep_write_packet(core_if, &ep->dwc_ep, 0); |
| len = ep->dwc_ep.xfer_len - ep->dwc_ep.xfer_count; |
| if (len > ep->dwc_ep.maxpacket) |
| len = ep->dwc_ep.maxpacket; |
| |
| dwords = (len + 3) / 4; |
| txstatus.d32 = |
| dwc_read_reg32(&dev_if->in_ep_regs[epnum]->dtxfsts); |
| DWC_DEBUGPL(DBG_PCDV, "dtxfsts[%d]=0x%08x\n", epnum, |
| txstatus.d32); |
| } |
| DWC_DEBUGPL(DBG_PCDV, "b4 dtxfsts[%d]=0x%08x\n", epnum, |
| dwc_read_reg32(&dev_if->in_ep_regs[epnum]->dtxfsts)); |
| |
| return 1; |
| } |
| |
| /** |
| * This function is called when the Device is disconnected. It stops |
| * any active requests and informs the Gadget driver of the |
| * disconnect. |
| */ |
| void dwc_otg_pcd_stop(struct dwc_otg_pcd *pcd) |
| { |
| int i, num_in_eps, num_out_eps; |
| struct dwc_otg_pcd_ep *ep; |
| union gintmsk_data intr_mask = {.d32 = 0}; |
| unsigned long flags; |
| |
| spin_lock_irqsave(&pcd->lock, flags); |
| |
| num_in_eps = GET_CORE_IF(pcd)->dev_if->num_in_eps; |
| num_out_eps = GET_CORE_IF(pcd)->dev_if->num_out_eps; |
| DWC_DEBUGPL(DBG_PCDV, "%s() \n", __func__); |
| |
| /* don't disconnect drivers more than once */ |
| if (pcd->ep0state == EP0_DISCONNECT) { |
| DWC_DEBUGPL(DBG_ANY, "%s() Already Disconnected\n", __func__); |
| spin_unlock_irqrestore(&pcd->lock, flags); |
| return; |
| } |
| pcd->ep0state = EP0_DISCONNECT; |
| |
| /* Reset the OTG state. */ |
| dwc_otg_pcd_update_otg(pcd, 1); |
| |
| /* Disable the NP Tx Fifo Empty Interrupt. */ |
| intr_mask.b.nptxfempty = 1; |
| dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk, |
| intr_mask.d32, 0); |
| |
| /* Flush the FIFOs */ |
| dwc_otg_flush_tx_fifo(GET_CORE_IF(pcd), 0); |
| dwc_otg_flush_rx_fifo(GET_CORE_IF(pcd)); |
| |
| /* prevent new request submissions, kill any outstanding requests */ |
| ep = &pcd->ep0; |
| dwc_otg_request_nuke(ep, &flags); |
| |
| /* prevent new request submissions, kill any outstanding requests */ |
| for (i = 0; i < num_in_eps; i++) { |
| struct dwc_otg_pcd_ep *ep = &pcd->in_ep[i]; |
| dwc_otg_request_nuke(ep, &flags); |
| } |
| |
| /* prevent new request submissions, kill any outstanding requests */ |
| for (i = 0; i < num_out_eps; i++) { |
| struct dwc_otg_pcd_ep *ep = &pcd->out_ep[i]; |
| dwc_otg_request_nuke(ep, &flags); |
| } |
| |
| /* report disconnect; the driver is already quiesced */ |
| if (pcd->driver && pcd->driver->disconnect) { |
| spin_unlock_irqrestore(&pcd->lock, flags); |
| pcd->driver->disconnect(&pcd->gadget); |
| spin_lock_irqsave(&pcd->lock, flags); |
| } |
| |
| |
| spin_unlock_irqrestore(&pcd->lock, flags); |
| } |
| |
| /** |
| * This interrupt indicates that ... |
| */ |
| static int dwc_otg_pcd_handle_i2c_intr(struct dwc_otg_pcd *pcd) |
| { |
| union gintmsk_data intr_mask = {.d32 = 0}; |
| union gintsts_data gintsts; |
| DWC_PRINT("INTERRUPT Handler not implemented for %s\n", "i2cintr"); |
| intr_mask.b.i2cintr = 1; |
| dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk, |
| intr_mask.d32, 0); |
| |
| /* Clear interrupt */ |
| gintsts.d32 = 0; |
| gintsts.b.i2cintr = 1; |
| dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts, |
| gintsts.d32); |
| return 1; |
| } |
| |
| /** |
| * This interrupt indicates that ... |
| */ |
| static int dwc_otg_pcd_handle_early_suspend_intr(struct dwc_otg_pcd *pcd) |
| { |
| union gintsts_data gintsts; |
| |
| #if defined(VERBOSE) |
| DWC_PRINT("Early Suspend Detected\n"); |
| #endif /* */ |
| /* Clear interrupt */ |
| gintsts.d32 = 0; |
| gintsts.b.erlysuspend = 1; |
| dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts, |
| gintsts.d32); |
| return 1; |
| } |
| |
| /** |
| * This function configures EPO to receive SETUP packets. |
| * |
| * Program the following fields in the endpoint specific registers for Control |
| * OUT EP 0, in order to receive a setup packet: |
| * |
| * - DOEPTSIZ0.Packet Count = 3 (To receive up to 3 back to back setup packets) |
| * - DOEPTSIZE0.Transfer Size = 24 Bytes (To receive up to 3 back to back setup |
| * packets) |
| * In DMA mode, DOEPDMA0 Register with a memory address to store any setup |
| * packets received |
| * |
| */ |
| static void ep0_out_start(struct dwc_otg_core_if *core_if, |
| struct dwc_otg_pcd *pcd) |
| { |
| struct dwc_otg_dev_if *dev_if = core_if->dev_if; |
| union deptsiz0_data doeptsize0 = {.d32 = 0}; |
| struct dwc_otg_dev_dma_desc *dma_desc; |
| union depctl_data doepctl = {.d32 = 0 }; |
| |
| #ifdef VERBOSE |
| DWC_DEBUGPL(DBG_PCDV, "%s() doepctl0=%0x\n", __func__, |
| dwc_read_reg32(&dev_if->out_ep_regs[0]->doepctl)); |
| |
| #endif /* */ |
| doeptsize0.b.supcnt = 3; |
| doeptsize0.b.pktcnt = 1; |
| doeptsize0.b.xfersize = 8 * 3; |
| |
| if (core_if->dma_enable) { |
| if (!core_if->dma_desc_enable) { |
| /** |
| * put here as for Hermes mode |
| * deptisz register should not be written |
| */ |
| dwc_write_reg32(&dev_if->out_ep_regs[0]->doeptsiz, |
| doeptsize0.d32); |
| |
| /** |
| * @todo dma needs to handle multiple |
| * setup packets (up to 3) |
| */ |
| dwc_write_reg32(&dev_if->out_ep_regs[0]->doepdma, |
| pcd->setup_pkt_dma_handle); |
| } else { |
| dev_if->setup_desc_index = |
| (dev_if->setup_desc_index + 1) & 1; |
| dma_desc = |
| dev_if->setup_desc_addr[dev_if->setup_desc_index]; |
| |
| /** DMA Descriptor Setup */ |
| dma_desc->status.b.bs = BS_HOST_BUSY; |
| dma_desc->status.b.l = 1; |
| dma_desc->status.b.ioc = 1; |
| dma_desc->status.b.bytes = pcd->ep0.dwc_ep.maxpacket; |
| dma_desc->buf = pcd->setup_pkt_dma_handle; |
| dma_desc->status.b.bs = BS_HOST_READY; |
| |
| wmb(); |
| |
| /** DOEPDMA0 Register write */ |
| dwc_write_reg32(&dev_if->out_ep_regs[0]->doepdma, |
| dev_if->dma_setup_desc_addr[dev_if-> |
| setup_desc_index]); |
| } |
| |
| } else { |
| /** |
| * put here as for Hermes mode deptisz |
| * register should not be written |
| */ |
| dwc_write_reg32(&dev_if->out_ep_regs[0]->doeptsiz, |
| doeptsize0.d32); |
| } |
| |
| /** DOEPCTL0 Register write */ |
| doepctl.b.epena = 1; |
| doepctl.b.cnak = 1; |
| dwc_write_reg32(&dev_if->out_ep_regs[0]->doepctl, doepctl.d32); |
| |
| #ifdef VERBOSE |
| DWC_DEBUGPL(DBG_PCDV, "doepctl0=%0x\n", |
| dwc_read_reg32(&dev_if->out_ep_regs[0]->doepctl)); |
| DWC_DEBUGPL(DBG_PCDV, "diepctl0=%0x\n", |
| dwc_read_reg32(&dev_if->in_ep_regs[0]->diepctl)); |
| #endif |
| } |
| |
| /** |
| * This interrupt occurs when a USB Reset is detected. When the USB Reset |
| * Interrupt occurs the device state is set to DEFAULT and the EP0 state is set |
| * to IDLE. |
| * |
| * Set the NAK bit for all OUT endpoints (DOEPCTLn.SNAK = 1) |
| * |
| * Unmask the following interrupt bits: |
| * - DAINTMSK.INEP0 = 1 (Control 0 IN endpoint) |
| * - DAINTMSK.OUTEP0 = 1 (Control 0 OUT endpoint) |
| * - DOEPMSK.SETUP = 1 |
| * - DOEPMSK.XferCompl = 1 |
| * - DIEPMSK.XferCompl = 1 |
| * - DIEPMSK.TimeOut = 1 |
| * |
| * Program the following fields in the endpoint specific registers for Control |
| * OUT EP 0, in order to receive a setup packet |
| * - DOEPTSIZ0.Packet Count = 3 (To receive up to 3 back to back setup packets) |
| * - DOEPTSIZE0.Transfer Size = 24 Bytes (To receive up to 3 back to back setup |
| * packets) |
| * |
| * - In DMA mode, DOEPDMA0 Register with a memory address to store any setup |
| * packets received |
| * |
| * At this point, all the required initialization, except for enabling |
| * the control 0 OUT endpoint is done, for receiving SETUP packets. |
| */ |
| static int dwc_otg_pcd_handle_usb_reset_intr(struct dwc_otg_pcd *pcd) |
| { |
| struct dwc_otg_core_if *core_if = GET_CORE_IF(pcd); |
| struct dwc_otg_dev_if *dev_if = core_if->dev_if; |
| union depctl_data doepctl = {.d32 = 0}; |
| union daint_data daintmsk = {.d32 = 0}; |
| union doepint_data doepmsk = {.d32 = 0}; |
| union diepint_data diepmsk = {.d32 = 0}; |
| union dcfg_data dcfg = {.d32 = 0}; |
| union grstctl_data resetctl = {.d32 = 0}; |
| union dctl_data dctl = {.d32 = 0}; |
| int i = 0; |
| union gintsts_data gintsts = {.d32 = 0 }; |
| union pcgcctl_data power = {.d32 = 0 }; |
| |
| /* |
| * Fix for STAR 9000382324: |
| * When the High Speed device enters LPM state after |
| * successfully completing LPM transactions in USB, it |
| * erroneously detects Reset or Resume even though there |
| * is no Reset or Resume from the Host. |
| * As a result of this issue, the device core exits L1 |
| * state when the Host is still in L1. This issue occurs |
| * randomly if the PHY takes more than 2.5us to enable |
| * FS terminations after entering L1. |
| */ |
| if (core_if->lx_state == DWC_OTG_L1) { |
| union glpmcfg_data lpmcfg; |
| lpmcfg.d32 = dwc_read_reg32(&core_if->core_global_regs->glpmcfg); |
| if (!lpmcfg.b.sleep_state_resumeok) { |
| /* perform a soft disconnect as we are out of |
| * step with the host |
| */ |
| union dctl_data dctl = {.d32 = 0}; |
| dctl.b.sftdiscon = 1; |
| dwc_modify_reg32(&core_if->dev_if-> |
| dev_global_regs->dctl, |
| 0, |
| dctl.d32); |
| wmb(); |
| mdelay(1); |
| dwc_modify_reg32(&core_if->dev_if-> |
| dev_global_regs->dctl, |
| dctl.d32, |
| 0); |
| goto out; |
| } |
| } |
| |
| |
| power.d32 = dwc_read_reg32(core_if->pcgcctl); |
| if (power.b.stoppclk) { |
| power.d32 = 0; |
| power.b.stoppclk = 1; |
| dwc_modify_reg32(core_if->pcgcctl, power.d32, 0); |
| |
| power.b.pwrclmp = 1; |
| dwc_modify_reg32(core_if->pcgcctl, power.d32, 0); |
| |
| power.b.rstpdwnmodule = 1; |
| dwc_modify_reg32(core_if->pcgcctl, power.d32, 0); |
| } |
| |
| core_if->lx_state = DWC_OTG_L0; |
| DWC_PRINT("USB RESET\n"); |
| |
| /* reset the HNP settings */ |
| dwc_otg_pcd_update_otg(pcd, 1); |
| |
| /* Clear the Remote Wakeup Signalling */ |
| dctl.b.rmtwkupsig = 1; |
| dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dctl, dctl.d32, 0); |
| |
| /* Set NAK for all OUT EPs */ |
| doepctl.b.snak = 1; |
| for (i = 0; i <= dev_if->num_out_eps; i++) { |
| dwc_write_reg32(&dev_if->out_ep_regs[i]->doepctl, |
| doepctl.d32); |
| } |
| |
| /* Flush the NP Tx FIFO */ |
| dwc_otg_flush_tx_fifo(core_if, 0x10); |
| |
| /* Flush the Learning Queue */ |
| resetctl.b.intknqflsh = 1; |
| dwc_write_reg32(&core_if->core_global_regs->grstctl, resetctl.d32); |
| if (core_if->multiproc_int_enable) { |
| daintmsk.b.inep0 = 1; |
| daintmsk.b.outep0 = 1; |
| dwc_write_reg32(&dev_if->dev_global_regs->deachintmsk, |
| daintmsk.d32); |
| |
| doepmsk.b.setup = 1; |
| doepmsk.b.xfercompl = 1; |
| doepmsk.b.ahberr = 1; |
| doepmsk.b.epdisabled = 1; |
| |
| if (core_if->dma_desc_enable) { |
| doepmsk.b.stsphsercvd = 1; |
| doepmsk.b.bna = 1; |
| } |
| |
| dwc_write_reg32(&dev_if->dev_global_regs->doepeachintmsk[0], |
| doepmsk.d32); |
| |
| diepmsk.b.xfercompl = 1; |
| diepmsk.b.timeout = 1; |
| diepmsk.b.epdisabled = 1; |
| diepmsk.b.ahberr = 1; |
| diepmsk.b.intknepmis = 1; |
| |
| if (core_if->dma_desc_enable) |
| diepmsk.b.bna = 1; |
| |
| |
| dwc_write_reg32(&dev_if->dev_global_regs->diepeachintmsk[0], |
| diepmsk.d32); |
| } else { |
| daintmsk.b.inep0 = 1; |
| daintmsk.b.outep0 = 1; |
| dwc_write_reg32(&dev_if->dev_global_regs->daintmsk, |
| daintmsk.d32); |
| doepmsk.b.setup = 1; |
| doepmsk.b.xfercompl = 1; |
| doepmsk.b.ahberr = 1; |
| doepmsk.b.epdisabled = 0; |
| if (core_if->dma_desc_enable) { |
| doepmsk.b.stsphsercvd = 1; |
| |
| /*doepmsk.b.bna = 1;*/ |
| |
| /* |
| * NJ dont enable BNA int until we get |
| * first request. |
| */ |
| } |
| dwc_write_reg32(&dev_if->dev_global_regs->doepmsk, doepmsk.d32); |
| diepmsk.b.xfercompl = 1; |
| diepmsk.b.timeout = 1; |
| diepmsk.b.epdisabled = 0; |
| diepmsk.b.ahberr = 1; |
| diepmsk.b.intknepmis = 1; |
| |
| if (core_if->dma_desc_enable) |
| diepmsk.b.bna = 1; |
| |
| /*enable NAK effective interrupts */ |
| diepmsk.b.inepnakeff = 1; |
| |
| |
| dwc_write_reg32(&dev_if->dev_global_regs->diepmsk, diepmsk.d32); |
| } |
| |
| /* Reset Device Address */ |
| dcfg.d32 = dwc_read_reg32(&dev_if->dev_global_regs->dcfg); |
| dcfg.b.devaddr = 0; |
| |
| /* disable nzstsouthshk bit as well */ |
| dcfg.b.nzstsouthshk = 0; |
| |
| dwc_write_reg32(&dev_if->dev_global_regs->dcfg, dcfg.d32); |
| |
| /* setup EP0 to receive SETUP packets */ |
| ep0_out_start(core_if, pcd); |
| |
| out: |
| /* Clear interrupt */ |
| gintsts.d32 = 0; |
| gintsts.b.usbreset = 1; |
| dwc_write_reg32(&core_if->core_global_regs->gintsts, gintsts.d32); |
| return 1; |
| } |
| |
| /** |
| * Get the device speed from the device status register and convert it |
| * to USB speed constant. |
| * |
| * @param core_if Programming view of DWC_otg controller. |
| */ |
| static int get_device_speed(struct dwc_otg_core_if *core_if) |
| { |
| union dsts_data dsts; |
| enum usb_device_speed speed = USB_SPEED_UNKNOWN; |
| dsts.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts); |
| switch (dsts.b.enumspd) { |
| case DWC_DSTS_ENUMSPD_HS_PHY_30MHZ_OR_60MHZ: |
| speed = USB_SPEED_HIGH; |
| break; |
| case DWC_DSTS_ENUMSPD_FS_PHY_30MHZ_OR_60MHZ: |
| case DWC_DSTS_ENUMSPD_FS_PHY_48MHZ: |
| speed = USB_SPEED_FULL; |
| break; |
| case DWC_DSTS_ENUMSPD_LS_PHY_6MHZ: |
| speed = USB_SPEED_LOW; |
| break; |
| } |
| return speed; |
| } |
| |
| /** |
| * Read the device status register and set the device speed in the |
| * data structure. |
| * Set up EP0 to receive SETUP packets by calling dwc_ep0_activate. |
| */ |
| static int dwc_otg_pcd_handle_enum_done_intr(struct dwc_otg_pcd *pcd) |
| { |
| struct dwc_otg_pcd_ep *ep0 = &pcd->ep0; |
| union gintsts_data gintsts; |
| union gusbcfg_data gusbcfg; |
| struct dwc_otg_core_global_regs __iomem *global_regs = |
| GET_CORE_IF(pcd)->core_global_regs; |
| uint8_t utmi16b, utmi8b; |
| |
| DWC_DEBUGPL(DBG_PCD, "SPEED ENUM\n"); |
| |
| if (GET_CORE_IF(pcd)->snpsid >= OTG_CORE_REV_2_60a) { |
| utmi16b = 6; |
| utmi8b = 9; |
| } else { |
| utmi16b = 4; |
| utmi8b = 8; |
| } |
| dwc_otg_ep0_activate(GET_CORE_IF(pcd), &ep0->dwc_ep); |
| |
| #ifdef DEBUG_EP0 |
| print_ep0_state(pcd); |
| #endif /* */ |
| pcd->ep0state = EP0_IDLE; |
| ep0->stopped = 0; |
| pcd->gadget.speed = get_device_speed(GET_CORE_IF(pcd)); |
| |
| /* Set USB turnaround time based on device speed and PHY interface. */ |
| gusbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg); |
| if (pcd->gadget.speed == USB_SPEED_HIGH) { |
| if (GET_CORE_IF(pcd)->hwcfg2.b.hs_phy_type == |
| DWC_HWCFG2_HS_PHY_TYPE_ULPI) { |
| /* ULPI interface */ |
| gusbcfg.b.usbtrdtim = 9; |
| } |
| if (GET_CORE_IF(pcd)->hwcfg2.b.hs_phy_type == |
| DWC_HWCFG2_HS_PHY_TYPE_UTMI) { |
| |
| /* UTMI+ interface */ |
| if (GET_CORE_IF(pcd)->hwcfg4.b. |
| utmi_phy_data_width == 0) |
| |
| gusbcfg.b.usbtrdtim = utmi8b; |
| |
| else if (GET_CORE_IF(pcd)->hwcfg4.b. |
| utmi_phy_data_width == 1) |
| |
| gusbcfg.b.usbtrdtim = utmi16b; |
| |
| else if (GET_CORE_IF(pcd)->core_params-> |
| phy_utmi_width == 8) |
| |
| gusbcfg.b.usbtrdtim = utmi8b; |
| |
| else |
| gusbcfg.b.usbtrdtim = utmi16b; |
| |
| } |
| if (GET_CORE_IF(pcd)->hwcfg2.b.hs_phy_type == |
| DWC_HWCFG2_HS_PHY_TYPE_UTMI_ULPI) { |
| /* UTMI+ OR ULPI interface */ |
| if (gusbcfg.b.ulpi_utmi_sel == 1) { |
| /* ULPI interface */ |
| gusbcfg.b.usbtrdtim = 9; |
| } else { |
| /* UTMI+ interface */ |
| if (GET_CORE_IF(pcd)->core_params-> |
| phy_utmi_width == 16) { |
| gusbcfg.b.usbtrdtim = utmi16b; |
| } else |
| gusbcfg.b.usbtrdtim = utmi8b; |
| } |
| } |
| } else { |
| /* Full or low speed */ |
| gusbcfg.b.usbtrdtim = 9; |
| } |
| dwc_write_reg32(&global_regs->gusbcfg, gusbcfg.d32); |
| |
| /* Clear interrupt */ |
| gintsts.d32 = 0; |
| gintsts.b.enumdone = 1; |
| dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts, |
| gintsts.d32); |
| return 1; |
| } |
| |
| /** |
| * This interrupt indicates that the ISO OUT Packet was dropped due to |
| * Rx FIFO full or Rx Status Queue Full. If this interrupt occurs |
| * read all the data from the Rx FIFO. |
| */ |
| static int |
| dwc_otg_pcd_handle_isoc_out_packet_dropped_intr(struct dwc_otg_pcd *pcd) |
| { |
| union gintmsk_data intr_mask = {.d32 = 0}; |
| union gintsts_data gintsts; |
| DWC_PRINT("INTERRUPT Handler not implemented for %s\n", |
| "ISOC Out Dropped"); |
| intr_mask.b.isooutdrop = 1; |
| dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk, |
| intr_mask.d32, 0); |
| |
| /* Clear interrupt */ |
| gintsts.d32 = 0; |
| gintsts.b.isooutdrop = 1; |
| dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts, |
| gintsts.d32); |
| return 1; |
| } |
| |
| /** |
| * This interrupt indicates the end of the portion of the micro-frame |
| * for periodic transactions. If there is a periodic transaction for |
| * the next frame, load the packets into the EP periodic Tx FIFO. |
| */ |
| static int dwc_otg_pcd_handle_end_periodic_frame_intr(struct dwc_otg_pcd *pcd) |
| { |
| union gintmsk_data intr_mask = {.d32 = 0}; |
| union gintsts_data gintsts; |
| DWC_PRINT("INTERRUPT Handler not implemented for %s\n", |
| "End of Periodic Portion of Micro-Frame Interrupt"); |
| intr_mask.b.eopframe = 1; |
| dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk, |
| intr_mask.d32, 0); |
| |
| /* Clear interrupt */ |
| gintsts.d32 = 0; |
| gintsts.b.eopframe = 1; |
| dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts, |
| gintsts.d32); |
| return 1; |
| } |
| |
| /** |
| * This interrupt indicates that EP of the packet on the top of the |
| * non-periodic Tx FIFO does not match EP of the IN Token received. |
| * |
| * The "Device IN Token Queue" Registers are read to determine the |
| * order the IN Tokens have been received. The non-periodic Tx FIFO |
| * is flushed, so it can be reloaded in the order seen in the IN Token |
| * Queue. |
| */ |
| static int dwc_otg_pcd_handle_ep_mismatch_intr(struct dwc_otg_core_if *core_if) |
| { |
| union gintsts_data gintsts; |
| DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, core_if); |
| |
| /* Clear interrupt */ |
| gintsts.d32 = 0; |
| gintsts.b.epmismatch = 1; |
| dwc_write_reg32(&core_if->core_global_regs->gintsts, gintsts.d32); |
| return 1; |
| } |
| |
| /** |
| * This funcion stalls EP0. |
| */ |
| static void ep0_do_stall(struct dwc_otg_pcd *pcd, const int err_val) |
| { |
| struct dwc_otg_pcd_ep *ep0 = &pcd->ep0; |
| struct usb_ctrlrequest *ctrl = &pcd->setup_pkt->req; |
| DWC_WARN("req %02x.%02x protocol STALL; err %d\n", ctrl->bRequestType, |
| ctrl->bRequest, err_val); |
| ep0->dwc_ep.is_in = 1; |
| dwc_otg_ep_set_stall(pcd->otg_dev->core_if, &ep0->dwc_ep); |
| pcd->ep0.stopped = 1; |
| pcd->ep0state = EP0_IDLE; |
| ep0_out_start(GET_CORE_IF(pcd), pcd); |
| } |
| |
| /** |
| * This functions delegates the setup command to the gadget driver. |
| */ |
| static void do_gadget_setup(struct dwc_otg_pcd *pcd, |
| struct usb_ctrlrequest *_ctrl) |
| __releases(ep->pcd->lock) |
| __acquires(ep->pcd->lock) |
| { |
| int ret = 0; |
| if (pcd->driver && pcd->driver->setup) { |
| WARN_ON(!in_interrupt()); |
| |
| spin_unlock(&pcd->lock); |
| ret = pcd->driver->setup(&pcd->gadget, _ctrl); |
| spin_lock(&pcd->lock); |
| |
| if (ret < 0) |
| ep0_do_stall(pcd, ret); |
| |
| /** @todo This is a g_file_storage gadget driver specific |
| * workaround: a DELAYED_STATUS result from the fsg_setup |
| * routine will result in the gadget queueing a EP0 IN status |
| * phase for a two-stage control transfer. Exactly the same as |
| * a SET_CONFIGURATION/SET_INTERFACE except that this is a class |
| * specific request. Need a generic way to know when the gadget |
| * driver will queue the status phase. Can we assume when we |
| * call the gadget driver setup() function that it will always |
| * queue and require the following flag? Need to look into |
| * this. |
| */ |
| if (ret == 256 + 999) |
| pcd->request_config = 1; |
| } |
| } |
| |
| /** |
| * This function starts the Zero-Length Packet for the IN status phase |
| * of a 2 stage control transfer. |
| */ |
| static void do_setup_in_status_phase(struct dwc_otg_pcd *pcd) |
| { |
| struct dwc_otg_pcd_ep *ep0 = &pcd->ep0; |
| if (pcd->ep0state == EP0_STALL) |
| return; |
| |
| pcd->ep0state = EP0_IN_STATUS_PHASE; |
| |
| /* Prepare for more SETUP Packets */ |
| DWC_DEBUGPL(DBG_PCD, "EP0 IN ZLP\n"); |
| ep0->dwc_ep.xfer_len = 0; |
| ep0->dwc_ep.xfer_count = 0; |
| ep0->dwc_ep.is_in = 1; |
| ep0->dwc_ep.dma_addr = pcd->setup_pkt_dma_handle; |
| dwc_otg_ep0_start_transfer(GET_CORE_IF(pcd), &ep0->dwc_ep); |
| |
| } |
| |
| /** |
| * This function starts the Zero-Length Packet for the OUT status phase |
| * of a 2 stage control transfer. |
| */ |
| static void do_setup_out_status_phase(struct dwc_otg_pcd *pcd) |
| { |
| struct dwc_otg_pcd_ep *ep0 = &pcd->ep0; |
| if (pcd->ep0state == EP0_STALL) { |
| DWC_DEBUGPL(DBG_PCD, "EP0 STALLED\n"); |
| return; |
| } |
| pcd->ep0state = EP0_OUT_STATUS_PHASE; |
| |
| /* Prepare for more SETUP Packets */ |
| |
| DWC_DEBUGPL(DBG_PCD, "EP0 OUT ZLP\n"); |
| ep0->dwc_ep.xfer_len = 0; |
| ep0->dwc_ep.xfer_count = 0; |
| ep0->dwc_ep.is_in = 0; |
| |
| ep0->dwc_ep.dma_addr = pcd->setup_pkt_dma_handle; |
| dwc_otg_ep0_start_transfer(GET_CORE_IF(pcd), &ep0->dwc_ep); |
| |
| /* Prepare for more SETUP Packets */ |
| if (GET_CORE_IF(pcd)->dma_enable == 0) |
| ep0_out_start(GET_CORE_IF(pcd), pcd); |
| |
| } |
| |
| /** |
| * Clear the EP halt (STALL) and if pending requests start the |
| * transfer. |
| */ |
| static void pcd_clear_halt(struct dwc_otg_pcd *pcd, |
| struct dwc_otg_pcd_ep *ep) |
| { |
| if (ep->dwc_ep.stall_clear_flag == 0) |
| dwc_otg_ep_clear_stall(GET_CORE_IF(pcd), &ep->dwc_ep); |
| |
| /* Reactive the EP */ |
| dwc_otg_ep_activate(GET_CORE_IF(pcd), &ep->dwc_ep); |
| if (ep->stopped) { |
| ep->stopped = 0; |
| |
| /* If there is a request in the EP queue start it */ |
| |
| /* a tasklet to calls start_next_request(), outside of interrupt |
| * context at some time after the current time, after a |
| * clear-halt setup packet. Still need to implement ep mismatch |
| * in the future if a gadget ever uses more than one endpoint |
| * at once |
| */ |
| if (GET_CORE_IF(pcd)->dma_enable) { |
| ep->queue_sof = 1; |
| tasklet_schedule(pcd->start_xfer_tasklet); |
| } else { |
| #ifdef CONFIG_405EZ |
| /* |
| * Added-sr: 2007-07-26 |
| * |
| * To re-enable this endpoint it's important to |
| * set this next_ep number. Otherwise the endpoint |
| * will not get active again after stalling. |
| */ |
| |
| start_next_request(ep); |
| #endif |
| } |
| } |
| |
| /* Start Control Status Phase */ |
| do_setup_in_status_phase(pcd); |
| } |
| |
| /** |
| * This function is called when the SET_FEATURE TEST_MODE Setup packet |
| * is sent from the host. The Device Control register is written with |
| * the Test Mode bits set to the specified Test Mode. This is done as |
| * a tasklet so that the "Status" phase of the control transfer |
| * completes before transmitting the TEST packets. |
| * |
| * @todo This has not been tested since the tasklet struct was put |
| * into the PCD struct! |
| * |
| */ |
| static void do_test_mode(unsigned long _data) |
| { |
| union dctl_data dctl; |
| struct dwc_otg_pcd *pcd = (struct dwc_otg_pcd *) _data; |
| struct dwc_otg_core_if *core_if = GET_CORE_IF(pcd); |
| int test_mode = pcd->test_mode; |
| |
| dctl.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dctl); |
| switch (test_mode) { |
| case 1: /*TEST_J*/ |
| dctl.b.tstctl = 1; |
| break; |
| case 2: /* TEST_K*/ |
| dctl.b.tstctl = 2; |
| break; |
| case 3: /* TEST_SE0_NAK*/ |
| dctl.b.tstctl = 3; |
| break; |
| case 4: /* TEST_PACKET*/ |
| dctl.b.tstctl = 4; |
| break; |
| case 5: /* TEST_FORCE_ENABLE*/ |
| dctl.b.tstctl = 5; |
| break; |
| } |
| dwc_write_reg32(&core_if->dev_if->dev_global_regs->dctl, dctl.d32); |
| } |
| |
| /** |
| * This function process the GET_STATUS Setup Commands. |
| */ |
| static void do_get_status(struct dwc_otg_pcd *pcd) |
| { |
| struct usb_ctrlrequest ctrl = pcd->setup_pkt->req; |
| struct dwc_otg_pcd_ep *ep; |
| struct dwc_otg_pcd_ep *ep0 = &pcd->ep0; |
| u16 *status = pcd->status_buf; |
| |
| #ifdef DEBUG_EP0 |
| DWC_DEBUGPL(DBG_PCD, |
| "GET_STATUS %02x.%02x v%04x i%04x l%04x\n", |
| ctrl.bRequestType, ctrl.bRequest, |
| __le16_to_cpu(ctrl.wValue), __le16_to_cpu(ctrl.wIndex), |
| __le16_to_cpu(ctrl.wLength)); |
| #endif |
| |
| switch (ctrl.bRequestType & USB_RECIP_MASK) { |
| case USB_RECIP_DEVICE: |
| *status = 0x1; /* Self powered */ |
| *status |= pcd->remote_wakeup_enable << 1; |
| break; |
| |
| case USB_RECIP_INTERFACE: |
| *status = 0; |
| break; |
| |
| case USB_RECIP_ENDPOINT: |
| ep = get_ep_by_addr(pcd, __le16_to_cpu(ctrl.wIndex)); |
| if (!ep || __le16_to_cpu(ctrl.wLength) > 2) { |
| ep0_do_stall(pcd, -EOPNOTSUPP); |
| return; |
| } |
| /** @todo check for EP stall */ |
| *status = ep->stopped; |
| break; |
| } |
| pcd->ep0_pending = 1; |
| ep0->dwc_ep.start_xfer_buff = (uint8_t *) status; |
| ep0->dwc_ep.xfer_buff = (uint8_t *) status; |
| ep0->dwc_ep.dma_addr = pcd->status_buf_dma_handle; |
| ep0->dwc_ep.xfer_len = 2; |
| ep0->dwc_ep.xfer_count = 0; |
| ep0->dwc_ep.total_len = ep0->dwc_ep.xfer_len; |
| dwc_otg_ep0_start_transfer(GET_CORE_IF(pcd), &ep0->dwc_ep); |
| } |
| /** |
| * This function process the SET_FEATURE Setup Commands. |
| */ |
| static void do_set_feature(struct dwc_otg_pcd *pcd) |
| { |
| struct dwc_otg_core_if *core_if = GET_CORE_IF(pcd); |
| struct dwc_otg_core_global_regs __iomem *global_regs = |
| core_if->core_global_regs; |
| struct usb_ctrlrequest ctrl = pcd->setup_pkt->req; |
| struct dwc_otg_pcd_ep *ep = NULL; |
| int otg_cap_param = core_if->core_params->otg_cap; |
| union gotgctl_data gotgctl = {.d32 = 0}; |
| DWC_DEBUGPL(DBG_PCD, "SET_FEATURE:%02x.%02x v%04x i%04x l%04x\n", |
| ctrl.bRequestType, ctrl.bRequest, |
| __le16_to_cpu(ctrl.wValue), __le16_to_cpu(ctrl.wIndex), |
| __le16_to_cpu(ctrl.wLength)); |
| |
| DWC_DEBUGPL(DBG_PCD, "otg_cap=%d\n", otg_cap_param); |
| switch (ctrl.bRequestType & USB_RECIP_MASK) { |
| case USB_RECIP_DEVICE: |
| switch (__le16_to_cpu(ctrl.wValue)) { |
| case USB_DEVICE_REMOTE_WAKEUP: |
| pcd->remote_wakeup_enable = 1; |
| break; |
| case USB_DEVICE_TEST_MODE: |
| |
| /* Setup the Test Mode tasklet to do the Test |
| * Packet generation after the SETUP Status |
| * phase has completed. */ |
| |
| /** @todo This has not been tested since the |
| * tasklet struct was put into the PCD |
| * struct! */ |
| pcd->test_mode_tasklet.next = NULL; |
| pcd->test_mode_tasklet.state = 0; |
| atomic_set(&pcd->test_mode_tasklet.count, 0); |
| pcd->test_mode_tasklet.func = do_test_mode; |
| pcd->test_mode_tasklet.data = (unsigned long)pcd; |
| pcd->test_mode = __le16_to_cpu(ctrl.wIndex) >> 8; |
| tasklet_schedule(&pcd->test_mode_tasklet); |
| break; |
| case USB_DEVICE_B_HNP_ENABLE: |
| DWC_DEBUGPL(DBG_PCDV, |
| "SET_FEATURE: USB_DEVICE_B_HNP_ENABLE\n"); |
| |
| /* dev may initiate HNP */ |
| if (otg_cap_param == |
| DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE) { |
| pcd->b_hnp_enable = 1; |
| dwc_otg_pcd_update_otg(pcd, 0); |
| DWC_DEBUGPL(DBG_PCD, "Request B HNP\n"); |
| |
| /**@todo Is the gotgctl.devhnpen cleared |
| * by a USB Reset? */ |
| gotgctl.b.devhnpen = 1; |
| gotgctl.b.hnpreq = 1; |
| dwc_write_reg32(&global_regs->gotgctl, |
| gotgctl.d32); |
| } else { |
| ep0_do_stall(pcd, -EOPNOTSUPP); |
| } |
| break; |
| case USB_DEVICE_A_HNP_SUPPORT: |
| /* RH port supports HNP */ |
| DWC_DEBUGPL(DBG_PCDV, |
| "SET_FEATURE:" |
| " USB_DEVICE_A_HNP_SUPPORT\n"); |
| if (otg_cap_param == |
| DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE) { |
| pcd->a_hnp_support = 1; |
| dwc_otg_pcd_update_otg(pcd, 0); |
| } else { |
| ep0_do_stall(pcd, -EOPNOTSUPP); |
| } |
| break; |
| case USB_DEVICE_A_ALT_HNP_SUPPORT: |
| /* other RH port does */ |
| DWC_DEBUGPL(DBG_PCDV, |
| "SET_FEATURE: " |
| "USB_DEVICE_A_ALT_HNP_SUPPORT\n"); |
| if (otg_cap_param == |
| DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE) { |
| pcd->a_alt_hnp_support = 1; |
| dwc_otg_pcd_update_otg(pcd, 0); |
| } else { |
| ep0_do_stall(pcd, -EOPNOTSUPP); |
| } |
| break; |
| } |
| do_setup_in_status_phase(pcd); |
| break; |
| case USB_RECIP_INTERFACE: |
| do_gadget_setup(pcd, &ctrl); |
| break; |
| case USB_RECIP_ENDPOINT: |
| if (__le16_to_cpu(ctrl.wValue) == USB_ENDPOINT_HALT) { |
| ep = get_ep_by_addr(pcd, __le16_to_cpu(ctrl.wIndex)); |
| if (!ep) { |
| ep0_do_stall(pcd, -EOPNOTSUPP); |
| return; |
| } |
| ep->stopped = 1; |
| dwc_otg_ep_set_stall(core_if, &ep->dwc_ep); |
| } |
| do_setup_in_status_phase(pcd); |
| break; |
| } |
| } |
| |
| /** |
| * This function process the CLEAR_FEATURE Setup Commands. |
| */ |
| static void do_clear_feature(struct dwc_otg_pcd *pcd) |
| { |
| struct usb_ctrlrequest ctrl = pcd->setup_pkt->req; |
| struct dwc_otg_pcd_ep *ep = NULL; |
| DWC_DEBUGPL(DBG_PCD, "CLEAR_FEATURE:%02x.%02x v%04x i%04x l%04x\n", |
| ctrl.bRequestType, ctrl.bRequest, |
| __le16_to_cpu(ctrl.wValue), |
| __le16_to_cpu(ctrl.wIndex), |
| __le16_to_cpu(ctrl.wLength)); |
| |
| switch (ctrl.bRequestType & USB_RECIP_MASK) { |
| case USB_RECIP_DEVICE: |
| switch (__le16_to_cpu(ctrl.wValue)) { |
| case USB_DEVICE_REMOTE_WAKEUP: |
| pcd->remote_wakeup_enable = 0; |
| break; |
| case USB_DEVICE_TEST_MODE: |
| /** @todo Add CLEAR_FEATURE for TEST modes. */ |
| break; |
| } |
| do_setup_in_status_phase(pcd); |
| break; |
| case USB_RECIP_ENDPOINT: |
| ep = get_ep_by_addr(pcd, __le16_to_cpu(ctrl.wIndex)); |
| if (!ep) { |
| ep0_do_stall(pcd, -EOPNOTSUPP); |
| return; |
| } |
| pcd_clear_halt(pcd, ep); |
| DWC_DEBUGPL(DBG_PCD, "%s halt cleared by host\n", |
| ep->ep.name); |
| break; |
| } |
| } |
| |
| /** |
| * This function process the SET_ADDRESS Setup Commands. |
| */ |
| static void do_set_address(struct dwc_otg_pcd *pcd) |
| { |
| struct dwc_otg_dev_if *dev_if = GET_CORE_IF(pcd)->dev_if; |
| struct usb_ctrlrequest ctrl = pcd->setup_pkt->req; |
| |
| if (ctrl.bRequestType == USB_RECIP_DEVICE) { |
| union dcfg_data dcfg = {.d32 = 0 }; |
| |
| #ifdef DEBUG_EP0 |
| DWC_DEBUGPL(DBG_PCDV, "SET_ADDRESS:%d\n", ctrl.wValue); |
| #endif |
| dcfg.b.devaddr = __le16_to_cpu(ctrl.wValue); |
| dwc_modify_reg32(&dev_if->dev_global_regs->dcfg, 0, dcfg.d32); |
| do_setup_in_status_phase(pcd); |
| } |
| } |
| |
| /** |
| * This function processes SETUP commands. In Linux, the USB Command |
| * processing is done in two places - the first being the PCD and the |
| * second in the Gadget Driver (for example, the File-Backed Storage |
| * Gadget Driver). |
| * |
| * |
| * When the SETUP Phase Done interrupt occurs, the PCD SETUP commands are |
| * processed by pcd_setup. Calling the Function Driver's setup function from |
| * pcd_setup processes the gadget SETUP commands. |
| */ |
| static void pcd_setup(struct dwc_otg_pcd *pcd) |
| { |
| struct dwc_otg_core_if *core_if = GET_CORE_IF(pcd); |
| struct dwc_otg_dev_if *dev_if = core_if->dev_if; |
| struct usb_ctrlrequest ctrl = pcd->setup_pkt->req; |
| struct dwc_otg_pcd_ep *ep0 = &pcd->ep0; |
| union deptsiz0_data doeptsize0 = {.d32 = 0}; |
| |
| #ifdef DEBUG_EP0 |
| DWC_DEBUGPL(DBG_PCD, "SETUP %02x.%02x v%04x i%04x l%04x\n", |
| ctrl.bRequestType, ctrl.bRequest, __le16_to_cpu(ctrl.wValue), |
| __le16_to_cpu(ctrl.wIndex), __le16_to_cpu(ctrl.wLength)); |
| |
| #endif /* */ |
| doeptsize0.d32 = dwc_read_reg32(&dev_if->out_ep_regs[0]->doeptsiz); |
| |
| /** @todo handle > 1 setup packet , assert error for now */ |
| if (core_if->dma_enable && core_if->dma_desc_enable == 0 |
| && (doeptsize0.b.supcnt < 2)) { |
| DWC_ERROR("\n\n CANNOT handle > 1 setup" |
| " packet in DMA mode\n\n"); |
| } |
| |
| /* Clean up the request queue */ |
| dwc_otg_request_nuke(ep0, NULL); |
| ep0->stopped = 0; |
| if (ctrl.bRequestType & USB_DIR_IN) { |
| ep0->dwc_ep.is_in = 1; |
| pcd->ep0state = EP0_IN_DATA_PHASE; |
| } else { |
| ep0->dwc_ep.is_in = 0; |
| pcd->ep0state = EP0_OUT_DATA_PHASE; |
| } |
| if (__le16_to_cpu(ctrl.wLength) == 0) { |
| ep0->dwc_ep.is_in = 1; |
| pcd->ep0state = EP0_IN_STATUS_PHASE; |
| } |
| |
| if ((ctrl.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD) { |
| /* |
| * handle non-standard (class/vendor) |
| * requests in the gadget driver |
| */ |
| do_gadget_setup(pcd, &ctrl); |
| return; |
| } |
| |
| /** @todo NGS: Handle bad setup packet? */ |
| switch (ctrl.bRequest) { |
| case USB_REQ_GET_STATUS: |
| #ifdef DEBUG_EP0 |
| DWC_DEBUGPL(DBG_PCD, |
| "GET_STATUS %02x.%02x v%04x i%04x l%04x\n", |
| ctrl.bRequestType, ctrl.bRequest, |
| __le16_to_cpu(ctrl.wValue), |
| __le16_to_cpu(ctrl.wIndex), |
| __le16_to_cpu(ctrl.wLength)); |
| |
| #endif /* */ |
| do_get_status(pcd); |
| break; |
| |
| case USB_REQ_CLEAR_FEATURE: |
| do_clear_feature(pcd); |
| break; |
| case USB_REQ_SET_FEATURE: |
| do_set_feature(pcd); |
| break; |
| case USB_REQ_SET_ADDRESS: |
| do_set_address(pcd); |
| break; |
| case USB_REQ_SET_INTERFACE: |
| case USB_REQ_SET_CONFIGURATION: |
| do_gadget_setup(pcd, &ctrl); |
| break; |
| case USB_REQ_SYNCH_FRAME: |
| do_gadget_setup(pcd, &ctrl); |
| break; |
| default: |
| /* Call the Gadget Driver's setup functions */ |
| do_gadget_setup(pcd, &ctrl); |
| break; |
| } |
| } |
| |
| /** |
| * This function completes the ep0 control transfer. |
| */ |
| static int ep0_complete_request(struct dwc_otg_pcd_ep *ep) |
| { |
| struct dwc_otg_core_if *core_if = GET_CORE_IF(ep->pcd); |
| struct dwc_otg_dev_if *dev_if = core_if->dev_if; |
| struct dwc_otg_dev_in_ep_regs __iomem *in_ep_regs = |
| dev_if->in_ep_regs[ep->dwc_ep.num]; |
| #ifdef DEBUG_EP0 |
| struct dwc_otg_dev_out_ep_regs __iomem *out_ep_regs = |
| dev_if->out_ep_regs[ep->dwc_ep.num]; |
| #endif |
| union deptsiz0_data deptsiz; |
| union dev_dma_desc_sts desc_sts; |
| struct dwc_otg_pcd_request *req; |
| int is_last = 0; |
| struct dwc_otg_pcd *pcd = ep->pcd; |
| static int counter; /*DFX added*/ |
| counter++; |
| DWC_DEBUGPL(DBG_PCDV, "%s() %s\n", __func__, ep->ep.name); |
| |
| #if 0 |
| if (in_set_config == 1) { |
| printk(KERN_ERR "DFX ep0_complete_request in_set_config." |
| " ep0 pending: %d list empty:" |
| " %d ep.is_in: %d ep0State: %d counter: %d\n", |
| pcd->ep0_pending, |
| list_empty(&ep->queue), |
| ep->dwc_ep.is_in, |
| pcd->ep0state, |
| counter); |
| } |
| if (in_set_config == 2) { |
| printk(KERN_ERR "DFX ep0_complete_request in_set_ADDRESS. " |
| "ep0 pending: %d list empty:" |
| " %d ep.is_in: %d ep0State: %d counter: %d\n", |
| pcd->ep0_pending, |
| list_empty(&ep->queue), |
| ep->dwc_ep.is_in, |
| pcd->ep0state, |
| counter); |
| } |
| #endif |
| |
| if ((pcd->ep0_pending && list_empty(&ep->queue)) /*|| counter == 1*/) { |
| if (ep->dwc_ep.is_in) { |
| #ifdef DEBUG_EP0 |
| DWC_DEBUGPL(DBG_PCDV, "Do setup OUT status phase\n"); |
| #endif |
| do_setup_out_status_phase(pcd); |
| } else { |
| #ifdef DEBUG_EP0 |
| DWC_DEBUGPL(DBG_PCDV, "Do setup IN status phase\n"); |
| #endif |
| do_setup_in_status_phase(pcd); |
| } |
| pcd->ep0_pending = 0; |
| return 1; |
| } |
| |
| if (list_empty(&ep->queue)) |
| return 0; |
| |
| req = list_entry(ep->queue.next, struct dwc_otg_pcd_request, queue); |
| |
| if (pcd->ep0state == EP0_OUT_STATUS_PHASE |
| || pcd->ep0state == EP0_IN_STATUS_PHASE) { |
| is_last = 1; |
| } else if (ep->dwc_ep.is_in) { |
| deptsiz.d32 = dwc_read_reg32(&in_ep_regs->dieptsiz); |
| if (core_if->dma_desc_enable != 0) |
| desc_sts = dev_if->in_desc_addr->status; |
| #ifdef DEBUG_EP0 |
| DWC_DEBUGPL(DBG_PCDV, "%s len=%d xfersize=%d pktcnt=%d\n", |
| ep->ep.name, ep->dwc_ep.xfer_len, |
| deptsiz.b.xfersize, deptsiz.b.pktcnt); |
| #endif |
| if (((core_if->dma_desc_enable == 0) |
| && (deptsiz.b.xfersize == 0)) |
| || ((core_if->dma_desc_enable != 0) |
| && (desc_sts.b.bytes == 0))) { |
| req->req.actual = ep->dwc_ep.xfer_count; |
| /* Is a Zero Len Packet needed? */ |
| if (req->req.zero) { |
| #ifdef DEBUG_EP0 |
| DWC_DEBUGPL(DBG_PCD, "Setup Rx ZLP\n"); |
| #endif |
| req->req.zero = 0; |
| } |
| do_setup_out_status_phase(pcd); |
| } |
| } else { |
| /* ep0-OUT */ |
| #ifdef DEBUG_EP0 |
| deptsiz.d32 = dwc_read_reg32(&out_ep_regs->doeptsiz); |
| DWC_DEBUGPL(DBG_PCDV, "%s len=%d xsize=%d pktcnt=%d\n", |
| ep->ep.name, ep->dwc_ep.xfer_len, |
| deptsiz.b.xfersize, |
| deptsiz.b.pktcnt); |
| #endif |
| req->req.actual = ep->dwc_ep.xfer_count; |
| |
| /* Is a Zero Len Packet needed? */ |
| if (req->req.zero) { |
| #ifdef DEBUG_EP0 |
| DWC_DEBUGPL(DBG_PCDV, "Setup Tx ZLP\n"); |
| #endif |
| req->req.zero = 0; |
| } |
| if (core_if->dma_desc_enable == 0) |
| do_setup_in_status_phase(pcd); |
| } |
| |
| /* Complete the request */ |
| if (is_last) { |
| dwc_otg_request_done(ep, req, 0, NULL); |
| ep->dwc_ep.start_xfer_buff = NULL; |
| ep->dwc_ep.xfer_buff = NULL; |
| ep->dwc_ep.xfer_len = 0; |
| /* If there is a request in the queue start it. */ |
| if (ep->pcd->ep0_request_pending) |
| start_next_request(ep); |
| return 1; |
| } |
| return 0; |
| } |
| |
| /** |
| * This function completes the request for the EP. If there are |
| * additional requests for the EP in the queue they will be started. |
| */ |
| static void complete_ep(struct dwc_otg_pcd_ep *ep) |
| { |
| struct dwc_otg_core_if *core_if = GET_CORE_IF(ep->pcd); |
| struct dwc_otg_dev_if *dev_if = core_if->dev_if; |
| struct dwc_otg_dev_in_ep_regs __iomem *in_ep_regs = |
| dev_if->in_ep_regs[ep->dwc_ep.num]; |
| union deptsiz_data deptsiz; |
| union dev_dma_desc_sts desc_sts; |
| struct dwc_otg_pcd_request *req = NULL; |
| struct dwc_otg_dev_dma_desc *dma_desc; |
| int is_last = 0; |
| u32 byte_count = 0; |
| |
| DWC_DEBUGPL(DBG_PCDV, "%s() %s-%s\n", __func__, ep->ep.name, |
| (ep->dwc_ep.is_in ? "IN" : "OUT")); |
| |
| /* Get any pending requests */ |
| if (!list_empty(&ep->queue)) { |
| req = list_entry(ep->queue.next, struct dwc_otg_pcd_request, |
| queue); |
| if (!req) { |
| DWC_PRINT("complete_ep 0x%p, req = NULL!\n", ep); |
| return; |
| } |
| } else { |
| DWC_PRINT("complete_ep 0x%p, ep->queue empty!\n", ep); |
| return; |
| } |
| |
| if (ep->dwc_ep.is_in) { |
| deptsiz.d32 = dwc_read_reg32(&in_ep_regs->dieptsiz); |
| if (core_if->dma_enable) { |
| if (core_if->dma_desc_enable == 0) { |
| if (deptsiz.b.xfersize == 0 |
| && deptsiz.b.pktcnt == 0) { |
| byte_count = |
| ep->dwc_ep.xfer_len - |
| ep->dwc_ep.xfer_count; |
| |
| ep->dwc_ep.xfer_buff += byte_count; |
| ep->dwc_ep.dma_addr += byte_count; |
| ep->dwc_ep.xfer_count += byte_count; |
| |
| DWC_DEBUGPL(DBG_PCDV, |
| "%d-%s len=%d xfersize=%d " |
| "pktcnt=%d\n", |
| ep->dwc_ep.num, |
| (ep->dwc_ep. |
| is_in ? "IN" : "OUT"), |
| ep->dwc_ep.xfer_len, |
| deptsiz.b.xfersize, |
| deptsiz.b.pktcnt); |
| |
| if (ep->dwc_ep.xfer_len < |
| ep->dwc_ep.total_len) { |
| dwc_otg_ep_start_transfer |
| (core_if, &ep->dwc_ep); |
| } else if (ep->dwc_ep.sent_zlp) { |
| /* |
| * This fragment of code should initiate |
| * 0 length transfer in case if it is |
| * queued a transfer with size divisible |
| * to EPs max packet size and with |
| * usb_request zero field is set, which |
| * means that after data is transfered, |
| * it is also should be transfered a 0 |
| * length packet at the end. For Slave |
| * and Buffer DMA modes in this case |
| * SW has to initiate 2 transfers one |
| * with transfer size, and the second |
| * with 0 size. For Descriptor DMA mode |
| * SW is able to initiate a transfer, |
| * which will handle all the packets |
| * including the last 0 length. |
| */ |
| ep->dwc_ep.sent_zlp = 0; |
| dwc_otg_ep_start_zl_transfer(core_if, |
| &ep->dwc_ep); |
| } else |
| is_last = 1; |
| } else { |
| DWC_WARN |
| ("Incomplete transfer (%d - %s " |
| "[siz=%d pkt=%d])\n", |
| ep->dwc_ep.num, |
| (ep->dwc_ep.is_in ? "IN" : "OUT"), |
| deptsiz.b.xfersize, |
| deptsiz.b.pktcnt); |
| } |
| } else { /*not buffer dma, desc dma*/ |
| int i; |
| dma_desc = ep->dwc_ep.desc_addr; |
| byte_count = 0; |
| ep->dwc_ep.sent_zlp = 0; |
| |
| for (i = 0; i < ep->dwc_ep.desc_cnt; ++i) { |
| desc_sts = dma_desc->status; |
| byte_count += desc_sts.b.bytes; |
| dma_desc++; |
| } |
| |
| if (byte_count == 0) { |
| ep->dwc_ep.xfer_count = |
| ep->dwc_ep.total_len; |
| is_last = 1; |
| } else |
| DWC_WARN("Incomplete transfer\n"); |
| } |
| } else { /*no dma*/ |
| if (deptsiz.b.xfersize == 0 && deptsiz.b.pktcnt == 0) { |
| DWC_DEBUGPL(DBG_PCDV, |
| "%d-%s len=%d xfersize=%d " |
| "pktcnt=%d\n", |
| ep->dwc_ep.num, |
| ep->dwc_ep.is_in ? "IN" : "OUT", |
| ep->dwc_ep.xfer_len, |
| deptsiz.b.xfersize, |
| deptsiz.b.pktcnt); |
| |
| /* |
| * Check if the whole transfer was completed, |
| * if no, setup transfer for next portion of |
| * data |
| */ |
| if (ep->dwc_ep.xfer_len < |
| ep->dwc_ep.total_len) { |
| dwc_otg_ep_start_transfer(core_if, |
| &ep->dwc_ep); |
| } else if (ep->dwc_ep.sent_zlp) { |
| /* |
| * This fragment of code should initiate |
| * 0 length transfer in case if it is |
| * queued a transfer with size divisible |
| * to EPs max packet size and with |
| * usb_request zero field is set, which |
| * means that after data is transfered, |
| * it is also should be transfered a 0 |
| * length packet at the end. For Slave |
| * and Buffer DMA modes in this case |
| * SW has to initiate 2 transfers one |
| * with transfer size, and the second |
| * with 0 size. For Descriptor DMA mode |
| * SW is able to initiate a transfer, |
| * which will handle all the packets |
| * including the last 0 length. |
| */ |
| ep->dwc_ep.sent_zlp = 0; |
| dwc_otg_ep_start_zl_transfer(core_if, |
| &ep-> |
| dwc_ep); |
| } else |
| is_last = 1; |
| } else { |
| DWC_WARN |
| ("Incomplete transfer (%d-%s " |
| "[siz=%d pkt=%d])\n", |
| ep->dwc_ep.num, |
| (ep->dwc_ep.is_in ? "IN" : "OUT"), |
| deptsiz.b.xfersize, deptsiz.b.pktcnt); |
| } |
| } |
| } else { /*Out Endpoint */ |
| struct dwc_otg_dev_out_ep_regs __iomem *out_ep_regs = |
| dev_if->out_ep_regs[ep->dwc_ep.num]; |
| desc_sts.d32 = 0; |
| if (core_if->dma_enable) { |
| if (core_if->dma_desc_enable) { |
| int i; |
| dma_desc = ep->dwc_ep.desc_addr; |
| byte_count = 0; |
| ep->dwc_ep.sent_zlp = 0; |
| |
| for (i = 0; i < ep->dwc_ep.desc_cnt; ++i) { |
| desc_sts = dma_desc->status; |
| byte_count += desc_sts.b.bytes; |
| dma_desc++; |
| } |
| |
| ep->dwc_ep.xfer_count = ep->dwc_ep.total_len |
| - byte_count + |
| ((4 - (ep->dwc_ep.total_len & 0x3)) & 0x3); |
| is_last = 1; |
| } else { |
| deptsiz.d32 = 0; |
| deptsiz.d32 = |
| dwc_read_reg32(&out_ep_regs->doeptsiz); |
| |
| byte_count = (ep->dwc_ep.xfer_len - |
| ep->dwc_ep.xfer_count - |
| deptsiz.b.xfersize); |
| ep->dwc_ep.xfer_buff += byte_count; |
| ep->dwc_ep.dma_addr += byte_count; |
| ep->dwc_ep.xfer_count += byte_count; |
| |
| /* |
| * Check if the whole transfer was completed, |
| * if no, setup transfer for next portion of |
| * data |
| */ |
| |
| if (ep->dwc_ep.xfer_len < |
| ep->dwc_ep.total_len) { |
| dwc_otg_ep_start_transfer(core_if, |
| &ep->dwc_ep); |
| } else if (ep->dwc_ep.sent_zlp) { |
| /* |
| * This fragment of code should initiate |
| * 0 length transfer in case if it is |
| * queued a transfer with size divisible |
| * to EPs max packet size and with |
| * usb_request zero field is set, which |
| * means that after data is transfered, |
| * it is also should be transfered a 0 |
| * length packet at the end. For Slave |
| * and Buffer DMA modes in this case |
| * SW has to initiate 2 transfers one |
| * with transfer size, and the second |
| * with 0 size. For Descriptor DMA mode |
| * SW is able to initiate a transfer, |
| * which will handle all the packets |
| * including the last 0 length. |
| */ |
| ep->dwc_ep.sent_zlp = 0; |
| dwc_otg_ep_start_zl_transfer(core_if, |
| &ep-> |
| dwc_ep); |
| } else |
| is_last = 1; |
| } |
| } else { |
| /* Check if the whole transfer was completed, |
| * if no, setup transfer for next portion of data |
| */ |
| if (ep->dwc_ep.xfer_len < ep->dwc_ep.total_len) |
| dwc_otg_ep_start_transfer(core_if, &ep->dwc_ep); |
| else if (ep->dwc_ep.sent_zlp) { |
| ep->dwc_ep.sent_zlp = 0; |
| dwc_otg_ep_start_zl_transfer(core_if, |
| &ep->dwc_ep); |
| } else |
| is_last = 1; |
| } |
| |
| DWC_DEBUGPL(DBG_PCDV, |
| "addr %p, %d-%s len=%d cnt=%d xsize=%d pktcnt=%d\n", |
| &out_ep_regs->doeptsiz, ep->dwc_ep.num, |
| ep->dwc_ep.is_in ? "IN" : "OUT", |
| ep->dwc_ep.xfer_len, ep->dwc_ep.xfer_count, |
| deptsiz.b.xfersize, deptsiz.b.pktcnt); |
| } |
| |
| /* Complete the request */ |
| if (is_last) { |
| req->req.actual = ep->dwc_ep.xfer_count; |
| dwc_otg_request_done(ep, req, 0, NULL); |
| ep->dwc_ep.start_xfer_buff = NULL; |
| ep->dwc_ep.xfer_buff = NULL; |
| ep->dwc_ep.xfer_len = 0; |
| |
| /* If there is a request in the queue start it. */ |
| if (ep->request_pending) |
| start_next_request(ep); |
| |
| } |
| } |
| |
| |
| /** |
| * This function handles EP0 Control transfers. |
| * |
| * The state of the control tranfers are tracked in ep0state. |
| */ |
| static void handle_ep0(struct dwc_otg_pcd *pcd) |
| { |
| struct dwc_otg_core_if *core_if = GET_CORE_IF(pcd); |
| struct dwc_otg_pcd_ep *ep0 = &pcd->ep0; |
| union dev_dma_desc_sts desc_sts; |
| union deptsiz0_data deptsiz; |
| u32 byte_count; |
| |
| #ifdef DEBUG_EP0 |
| DWC_DEBUGPL(DBG_PCDV, "%s()\n", __func__); |
| print_ep0_state(pcd); |
| |
| #endif /* */ |
| switch (pcd->ep0state) { |
| case EP0_DISCONNECT: |
| break; |
| case EP0_IDLE: |
| pcd->request_config = 0; |
| pcd_setup(pcd); |
| break; |
| case EP0_IN_DATA_PHASE: |
| |
| #ifdef DEBUG_EP0 |
| DWC_DEBUGPL(DBG_PCD, "DATA_IN EP%d-%s: type=%d, mps=%d\n", |
| ep0->dwc_ep.num, |
| (ep0->dwc_ep.is_in ? "IN" : "OUT"), |
| ep0->dwc_ep.type, ep0->dwc_ep.maxpacket); |
| |
| #endif /* */ |
| if (core_if->dma_enable != 0) { |
| /* |
| * For EP0 we can only program 1 packet at a time so we |
| * need to do the make calculations after each complete. |
| * Call write_packet to make the calculations, as in |
| * slave mode, and use those values to determine if we |
| * can complete. |
| */ |
| if (core_if->dma_desc_enable == 0) { |
| deptsiz.d32 = |
| dwc_read_reg32(&core_if->dev_if-> |
| in_ep_regs[0]->dieptsiz); |
| byte_count = |
| ep0->dwc_ep.xfer_len - deptsiz.b.xfersize; |
| } else { |
| desc_sts = |
| core_if->dev_if->in_desc_addr->status; |
| byte_count = |
| ep0->dwc_ep.xfer_len - desc_sts.b.bytes; |
| } |
| ep0->dwc_ep.xfer_count += byte_count; |
| ep0->dwc_ep.xfer_buff += byte_count; |
| ep0->dwc_ep.dma_addr += byte_count; |
| } |
| if (ep0->dwc_ep.xfer_count < ep0->dwc_ep.total_len) { |
| dwc_otg_ep0_continue_transfer(GET_CORE_IF(pcd), |
| &ep0->dwc_ep); |
| DWC_DEBUGPL(DBG_PCD, "CONTINUE TRANSFER\n"); |
| } else if (ep0->dwc_ep.sent_zlp) { |
| dwc_otg_ep0_continue_transfer(GET_CORE_IF(pcd), |
| &ep0->dwc_ep); |
| ep0->dwc_ep.sent_zlp = 0; |
| DWC_DEBUGPL(DBG_PCD, "CONTINUE TRANSFER\n"); |
| } else { |
| ep0_complete_request(ep0); |
| DWC_DEBUGPL(DBG_PCD, "COMPLETE TRANSFER\n"); |
| } |
| break; |
| case EP0_OUT_DATA_PHASE: |
| #ifdef DEBUG_EP0 |
| DWC_DEBUGPL(DBG_PCD, "DATA_OUT EP%d-%s: type=%d, mps=%d\n", |
| ep0->dwc_ep.num, (ep0->dwc_ep.is_in ? "IN" : "OUT"), |
| ep0->dwc_ep.type, ep0->dwc_ep.maxpacket); |
| #endif |
| if (core_if->dma_enable != 0) { |
| if (core_if->dma_desc_enable == 0) { |
| deptsiz.d32 = |
| dwc_read_reg32(&core_if->dev_if-> |
| out_ep_regs[0]->doeptsiz); |
| byte_count = |
| ep0->dwc_ep.maxpacket - deptsiz.b.xfersize; |
| } else { |
| desc_sts = |
| core_if->dev_if->out_desc_addr->status; |
| byte_count = |
| ep0->dwc_ep.maxpacket - desc_sts.b.bytes; |
| } |
| ep0->dwc_ep.xfer_count += byte_count; |
| ep0->dwc_ep.xfer_buff += byte_count; |
| ep0->dwc_ep.dma_addr += byte_count; |
| } |
| if (ep0->dwc_ep.xfer_count < ep0->dwc_ep.total_len) { |
| dwc_otg_ep0_continue_transfer(GET_CORE_IF(pcd), |
| &ep0->dwc_ep); |
| DWC_DEBUGPL(DBG_PCD, "CONTINUE TRANSFER\n"); |
| } else if (ep0->dwc_ep.sent_zlp) { |
| dwc_otg_ep0_continue_transfer(GET_CORE_IF(pcd), |
| &ep0->dwc_ep); |
| ep0->dwc_ep.sent_zlp = 0; |
| DWC_DEBUGPL(DBG_PCD, "CONTINUE TRANSFER\n"); |
| } else { |
| ep0_complete_request(ep0); |
| DWC_DEBUGPL(DBG_PCD, "COMPLETE TRANSFER\n"); |
| } |
| break; |
| |
| case EP0_IN_STATUS_PHASE: |
| case EP0_OUT_STATUS_PHASE: |
| DWC_DEBUGPL(DBG_PCD, "CASE: EP0_STATUS\n"); |
| ep0_complete_request(ep0); |
| pcd->ep0state = EP0_IDLE; |
| ep0->stopped = 1; |
| ep0->dwc_ep.is_in = 0; /* OUT for next SETUP */ |
| |
| /* Prepare for more SETUP Packets */ |
| if (core_if->dma_enable) |
| ep0_out_start(core_if, pcd); |
| |
| break; |
| case EP0_STALL: |
| DWC_ERROR("EP0 STALLed, should not get here pcd_setup()\n"); |
| break; |
| } |
| |
| #ifdef DEBUG_EP0 |
| print_ep0_state(pcd); |
| #endif /* */ |
| } |
| |
| /** |
| * Restart transfer |
| */ |
| static void restart_transfer(struct dwc_otg_pcd *pcd, const u32 _epnum) |
| { |
| struct dwc_otg_core_if *core_if = GET_CORE_IF(pcd); |
| struct dwc_otg_dev_if *dev_if = core_if->dev_if; |
| union deptsiz_data dieptsiz = {.d32 = 0}; |
| |
| struct dwc_otg_pcd_ep *ep; |
| dieptsiz.d32 = dwc_read_reg32(&dev_if->in_ep_regs[_epnum]->dieptsiz); |
| ep = get_in_ep(pcd, _epnum); |
| |
| DWC_DEBUGPL(DBG_PCD, "xfer_buff=%p xfer_count=%0x xfer_len=%0x" |
| " stopped=%d\n", ep->dwc_ep.xfer_buff, |
| ep->dwc_ep.xfer_count, ep->dwc_ep.xfer_len, |
| ep->stopped); |
| |
| /* |
| * If xfersize is 0 and pktcnt in not 0, resend the last packet. |
| */ |
| if (dieptsiz.b.pktcnt && dieptsiz.b.xfersize == 0 |
| && ep->dwc_ep.start_xfer_buff) { |
| if (ep->dwc_ep.xfer_len <= ep->dwc_ep.maxpacket) { |
| ep->dwc_ep.xfer_count = 0; |
| ep->dwc_ep.xfer_buff = ep->dwc_ep.start_xfer_buff; |
| } else { |
| ep->dwc_ep.xfer_count -= ep->dwc_ep.maxpacket; |
| |
| /* convert packet size to dwords. */ |
| ep->dwc_ep.xfer_buff -= ep->dwc_ep.maxpacket; |
| } |
| ep->stopped = 0; |
| DWC_DEBUGPL(DBG_PCD, "xfer_buff=%p xfer_count=%0x " |
| "xfer_len=%0x stopped=%d\n", ep->dwc_ep.xfer_buff, |
| ep->dwc_ep.xfer_count, ep->dwc_ep.xfer_len, |
| ep->stopped); |
| if (_epnum == 0) |
| dwc_otg_ep0_start_transfer(core_if, &ep->dwc_ep); |
| else |
| dwc_otg_ep_start_transfer(core_if, &ep->dwc_ep); |
| } |
| } |
| |
| /** |
| * handle the IN EP disable interrupt. |
| */ |
| static void handle_in_ep_disable_intr(struct dwc_otg_pcd *pcd, |
| const u32 _epnum) |
| { |
| struct dwc_otg_core_if *core_if = GET_CORE_IF(pcd); |
| struct dwc_otg_dev_if *dev_if = core_if->dev_if; |
| union deptsiz_data dieptsiz = {.d32 = 0}; |
| union dctl_data dctl = {.d32 = 0}; |
| struct dwc_otg_pcd_ep *ep; |
| struct dwc_ep *dwc_ep; |
| ep = get_in_ep(pcd, _epnum); |
| dwc_ep = &ep->dwc_ep; |
| |
| if (dwc_ep->type == USB_ENDPOINT_XFER_ISOC) { |
| dwc_otg_flush_tx_fifo(core_if, dwc_ep->tx_fifo_num); |
| return; |
| } |
| |
| DWC_DEBUGPL(DBG_PCD, "diepctl%d=%0x\n", _epnum, |
| dwc_read_reg32(&dev_if->in_ep_regs[_epnum]->diepctl)); |
| dieptsiz.d32 = dwc_read_reg32(&dev_if->in_ep_regs[_epnum]->dieptsiz); |
| DWC_DEBUGPL(DBG_ANY, "pktcnt=%d size=%d\n", dieptsiz.b.pktcnt, |
| dieptsiz.b.xfersize); |
| if (ep->stopped) { |
| |
| /* Flush the Tx FIFO */ |
| /** @todo NGS: This is not the correct FIFO */ |
| dwc_otg_flush_tx_fifo(core_if, dwc_ep->tx_fifo_num); |
| |
| /* Clear the Global IN NP NAK */ |
| dctl.d32 = 0; |
| dctl.b.cgnpinnak = 1; |
| dwc_modify_reg32(&dev_if->dev_global_regs->dctl, dctl.d32, 0); |
| |
| /* Restart the transaction */ |
| if (dieptsiz.b.pktcnt != 0 || dieptsiz.b.xfersize != 0) |
| restart_transfer(pcd, _epnum); |
| } else { |
| |
| /* Restart the transaction */ |
| if (dieptsiz.b.pktcnt != 0 || dieptsiz.b.xfersize != 0) |
| restart_transfer(pcd, _epnum); |
| DWC_DEBUGPL(DBG_ANY, "STOPPED!!!\n"); |
| } |
| } |
| |
| /** |
| * Handler for the IN EP timeout handshake interrupt. |
| */ |
| static void handle_in_ep_timeout_intr(struct dwc_otg_pcd *pcd, |
| const u32 _epnum) |
| { |
| struct dwc_otg_core_if *core_if = GET_CORE_IF(pcd); |
| struct dwc_otg_dev_if *dev_if = core_if->dev_if; |
| |
| #ifdef DEBUG |
| union deptsiz_data dieptsiz = {.d32 = 0}; |
| u32 epnum = 0; |
| |
| #endif /* */ |
| union dctl_data dctl = {.d32 = 0}; |
| struct dwc_otg_pcd_ep *ep; |
| union gintmsk_data intr_mask = {.d32 = 0}; |
| ep = get_in_ep(pcd, _epnum); |
| |
| /* Disable the NP Tx Fifo Empty Interrrupt */ |
| if (!core_if->dma_enable) { |
| intr_mask.b.nptxfempty = 1; |
| dwc_modify_reg32(&core_if->core_global_regs->gintmsk, |
| intr_mask.d32, 0); |
| } |
| |
| /** @todo NGS Check EP type. |
| * Implement for Periodic EPs */ |
| /* |
| * Non-periodic EP |
| */ |
| /* Enable the Global IN NAK Effective Interrupt */ |
| intr_mask.b.ginnakeff = 1; |
| dwc_modify_reg32(&core_if->core_global_regs->gintmsk, 0, |
| intr_mask.d32); |
| |
| /* Set Global IN NAK */ |
| dctl.b.sgnpinnak = 1; |
| dwc_modify_reg32(&dev_if->dev_global_regs->dctl, dctl.d32, dctl.d32); |
| ep->stopped = 1; |
| |
| #ifdef DEBUG |
| dieptsiz.d32 = dwc_read_reg32(&dev_if->in_ep_regs[epnum]->dieptsiz); |
| DWC_DEBUGPL(DBG_ANY, "pktcnt=%d size=%d\n", dieptsiz.b.pktcnt, |
| dieptsiz.b.xfersize); |
| |
| #endif /* */ |
| |
| #ifdef DISABLE_PERIODIC_EP |
| /* |
| * Set the NAK bit for this EP to |
| * start the disable process. |
| */ |
| diepctl.d32 = 0; |
| diepctl.b.snak = 1; |
| dwc_modify_reg32(&dev_if->in_ep_regs[epnum]->diepctl, diepctl.d32, |
| diepctl.d32); |
| ep->disabling = 1; |
| ep->stopped = 1; |
| |
| #endif /* */ |
| } |
| |
| /** |
| * Handler for the IN EP NAK interrupt. |
| */ |
| static int handle_in_ep_nak_intr(struct dwc_otg_pcd *pcd, |
| const u32 epnum) |
| { |
| /** @todo implement ISR */ |
| struct dwc_otg_core_if *core_if; |
| union diepint_data intr_mask = {.d32 = 0 }; |
| |
| DWC_PRINT("INTERRUPT Handler not implemented for %s\n", "IN EP NAK"); |
| core_if = GET_CORE_IF(pcd); |
| intr_mask.b.nak = 1; |
| |
| if (core_if->multiproc_int_enable) { |
| dwc_modify_reg32(&core_if->dev_if->dev_global_regs-> |
| diepeachintmsk[epnum], intr_mask.d32, 0); |
| } else { |
| dwc_modify_reg32(&core_if->dev_if->dev_global_regs->diepmsk, |
| intr_mask.d32, 0); |
| } |
| |
| return 1; |
| } |
| |
| /** |
| * Handler for the OUT EP Babble interrupt. |
| */ |
| static int handle_out_ep_babble_intr(struct dwc_otg_pcd *pcd, |
| const u32 epnum) |
| { |
| /** @todo implement ISR */ |
| struct dwc_otg_core_if *core_if; |
| union doepint_data intr_mask = {.d32 = 0 }; |
| |
| DWC_PRINT("INTERRUPT Handler not implemented for %s\n", |
| "OUT EP Babble"); |
| core_if = GET_CORE_IF(pcd); |
| intr_mask.b.babble = 1; |
| |
| if (core_if->multiproc_int_enable) { |
| dwc_modify_reg32(&core_if->dev_if->dev_global_regs-> |
| doepeachintmsk[epnum], intr_mask.d32, 0); |
| } else { |
| dwc_modify_reg32(&core_if->dev_if->dev_global_regs->doepmsk, |
| intr_mask.d32, 0); |
| } |
| |
| return 1; |
| } |
| |
| /** |
| * Handler for the OUT EP NAK interrupt. |
| */ |
| static int handle_out_ep_nak_intr(struct dwc_otg_pcd *pcd, |
| const u32 epnum) |
| { |
| /** @todo implement ISR */ |
| struct dwc_otg_core_if *core_if; |
| union doepint_data intr_mask = {.d32 = 0 }; |
| |
| DWC_PRINT("INTERRUPT Handler not implemented for %s\n", "OUT EP NAK"); |
| core_if = GET_CORE_IF(pcd); |
| intr_mask.b.nak = 1; |
| |
| if (core_if->multiproc_int_enable) { |
| dwc_modify_reg32(&core_if->dev_if->dev_global_regs-> |
| doepeachintmsk[epnum], intr_mask.d32, 0); |
| } else { |
| dwc_modify_reg32(&core_if->dev_if->dev_global_regs->doepmsk, |
| intr_mask.d32, 0); |
| } |
| |
| return 1; |
| } |
| |
| /** |
| * Handler for the OUT EP NYET interrupt. |
| */ |
| static int handle_out_ep_nyet_intr(struct dwc_otg_pcd *pcd, |
| const u32 epnum) |
| { |
| /** @todo implement ISR */ |
| struct dwc_otg_core_if *core_if; |
| union doepint_data intr_mask = {.d32 = 0 }; |
| |
| DWC_PRINT("INTERRUPT Handler not implemented for %s\n", "OUT EP NYET"); |
| core_if = GET_CORE_IF(pcd); |
| intr_mask.b.nyet = 1; |
| |
| if (core_if->multiproc_int_enable) { |
| dwc_modify_reg32(&core_if->dev_if->dev_global_regs-> |
| doepeachintmsk[epnum], intr_mask.d32, 0); |
| } else { |
| dwc_modify_reg32(&core_if->dev_if->dev_global_regs->doepmsk, |
| intr_mask.d32, 0); |
| } |
| |
| return 1; |
| } |
| |
| /** |
| * This interrupt indicates that an IN EP has a pending Interrupt. |
| * The sequence for handling the IN EP interrupt is shown below: |
| * -# Read the Device All Endpoint Interrupt register |
| * -# Repeat the following for each IN EP interrupt bit set (from |
| * LSB to MSB). |
| * -# Read the Device Endpoint Interrupt (DIEPINTn) register |
| * -# If "Transfer Complete" call the request complete function |
| * -# If "Endpoint Disabled" complete the EP disable procedure. |
| * -# If "AHB Error Interrupt" log error |
| * -# If "Time-out Handshake" log error |
| * -# If "IN Token Received when TxFIFO Empty" write packet to Tx |
| * FIFO. |
| * -# If "IN Token EP Mismatch" (disable, this is handled by EP |
| * Mismatch Interrupt) |
| */ |
| static int dwc_otg_pcd_handle_in_ep_intr(struct dwc_otg_pcd *pcd) |
| { |
| |
| #define CLEAR_IN_EP_INTR(__core_if, __epnum, __intr) \ |
| do { \ |
| union diepint_data diepint = {.d32 = 0}; \ |
| diepint.b.__intr = 1; \ |
| dwc_write_reg32(&__core_if->dev_if->in_ep_regs[__epnum]->diepint, \ |
| diepint.d32); \ |
| } while (0) |
| |
| struct dwc_otg_core_if *core_if = GET_CORE_IF(pcd); |
| struct dwc_otg_dev_if *dev_if = core_if->dev_if; |
| union diepint_data diepint = {.d32 = 0}; |
| union dctl_data dctl = {.d32 = 0 }; |
| union depctl_data diepctl = {.d32 = 0}; |
| u32 ep_intr; |
| u32 epnum = 0; |
| struct dwc_otg_pcd_ep *ep; |
| struct dwc_ep *dwc_ep; |
| u32 _empty_msk, _diepctl; |
| union gintmsk_data intr_mask = {.d32 = 0}; |
| DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, pcd); |
| |
| /* Read in the device interrupt bits */ |
| ep_intr = dwc_otg_read_dev_all_in_ep_intr(core_if); |
| |
| /* Service the Device IN interrupts for each endpoint */ |
| while (ep_intr) { |
| if (ep_intr & 0x1) { |
| /* Get EP pointer */ |
| ep = get_in_ep(pcd, epnum); |
| dwc_ep = &ep->dwc_ep; |
| _diepctl = |
| dwc_read_reg32(&dev_if->in_ep_regs[epnum]->diepctl); |
| _empty_msk = |
| dwc_read_reg32(&dev_if->dev_global_regs->dtknqr4_fifoemptymsk); |
| DWC_DEBUGPL(DBG_PCDV, |
| "IN EP INTERRUPT - %d\nepmty_msk " |
| "- %8x diepctl - %8x\n", |
| epnum, _empty_msk, _diepctl); |
| DWC_DEBUGPL(DBG_PCD, "EP%d-%s: type=%d, mps=%d\n", |
| dwc_ep->num, |
| (dwc_ep->is_in ? "IN" : "OUT"), |
| dwc_ep->type, |
| dwc_ep->maxpacket); |
| diepint.d32 = |
| dwc_otg_read_dev_in_ep_intr(core_if, dwc_ep); |
| DWC_DEBUGPL(DBG_PCDV, "EP %d Interrupt " |
| "Register - 0x%x\n", |
| epnum, diepint.d32); |
| |
| /* Transfer complete */ |
| if (diepint.b.xfercompl) { |
| DWC_DEBUGPL(DBG_PCD, "EP%d IN Xfer Complete\n", |
| epnum); |
| |
| /* Disable the NP Tx FIFO Empty |
| * Interrrupt */ |
| if (core_if->en_multiple_tx_fifo == 0) { |
| intr_mask.b.nptxfempty = 1; |
| dwc_modify_reg32(&core_if->core_global_regs->gintmsk, |
| intr_mask.d32, 0); |
| } else { |
| /* |
| * Disable the Tx FIFO Empty |
| * Interrupt for this EP |
| */ |
| u32 fifoemptymsk = 0x1 << dwc_ep->num; |
| dwc_modify_reg32(&core_if->dev_if-> |
| dev_global_regs-> |
| dtknqr4_fifoemptymsk, |
| fifoemptymsk, |
| 0); |
| } |
| |
| /* |
| * Clear the bit in DIEPINTn |
| * for this interrupt * |
| */ |
| CLEAR_IN_EP_INTR(core_if, epnum, xfercompl); |
| |
| /* Complete the transfer */ |
| if (epnum == 0) |
| handle_ep0(pcd); |
| else |
| complete_ep(ep); |
| } |
| |
| /* Endpoint disable */ |
| if (diepint.b.epdisabled) { |
| DWC_DEBUGPL(DBG_ANY, "EP%d IN disabled\n", |
| epnum); |
| handle_in_ep_disable_intr(pcd, epnum); |
| |
| /* |
| * Clear the bit in DIEPINTn |
| * for this interrupt |
| */ |
| CLEAR_IN_EP_INTR(core_if, epnum, epdisabled); |
| } |
| |
| /* AHB Error */ |
| if (diepint.b.ahberr) { |
| DWC_DEBUGPL(DBG_ANY, "EP%d IN AHB Error\n", |
| epnum); |
| |
| /* |
| * Clear the bit in DIEPINTn |
| * for this interrupt |
| */ |
| CLEAR_IN_EP_INTR(core_if, epnum, ahberr); |
| } |
| |
| /* TimeOUT Handshake (non-ISOC IN EPs) */ |
| if (diepint.b.timeout) { |
| DWC_DEBUGPL(DBG_ANY, "EP%d IN Time-out\n", |
| epnum); |
| handle_in_ep_timeout_intr(pcd, epnum); |
| CLEAR_IN_EP_INTR(core_if, epnum, timeout); |
| } |
| |
| /** IN Token received with TxF Empty */ |
| if (diepint.b.intktxfemp) { |
| DWC_DEBUGPL(DBG_ANY, "EP%d IN TKN " |
| "TxFifo Empty\n", epnum); |
| if (!ep->stopped && epnum != 0) { |
| union diepint_data diepmsk = {.d32 = 0}; |
| diepmsk.b.intktxfemp = 1; |
| if (core_if->multiproc_int_enable) { |
| dwc_modify_reg32(&dev_if-> |
| dev_global_regs-> |
| diepeachintmsk |
| [epnum], |
| diepmsk.d32, |
| 0); |
| } else { |
| dwc_modify_reg32(&dev_if-> |
| dev_global_regs-> |
| diepmsk, |
| diepmsk.d32, |
| 0); |
| #ifdef CONFIG_405EZ |
| /* |
| * Added-sr: 2007-07-26 |
| * |
| * Only start the next transfer, |
| * when currently no other |
| * transfer is |
| * active on this endpoint. |
| */ |
| if (dwc_ep->active == 0) |
| start_next_request(ep); |
| } |
| #else |
| } |
| } else if (core_if->dma_desc_enable |
| && epnum == 0 |
| && pcd->ep0state == |
| EP0_OUT_STATUS_PHASE) { |
| |
| diepctl.d32 = |
| dwc_read_reg32(&dev_if-> |
| in_ep_regs[epnum]-> |
| diepctl); |
| |
| /* set the disable and stall bits */ |
| if (diepctl.b.epena) { |
| if (dwc_otg_can_disable_channel( |
| core_if, dwc_ep)) |
| diepctl.b.epdis = 1; |
| } |
| |
| diepctl.b.stall = 1; |
| dwc_write_reg32(&dev_if-> |
| in_ep_regs[epnum]-> |
| diepctl, diepctl.d32); |
| #endif |
| } |
| CLEAR_IN_EP_INTR(core_if, epnum, intktxfemp); |
| } |
| |
| /** IN Token Received with EP mismatch */ |
| if (diepint.b.intknepmis) { |
| DWC_DEBUGPL(DBG_ANY, |
| "EP%d IN TKN EP Mismatch\n", |
| epnum); |
| CLEAR_IN_EP_INTR(core_if, epnum, intknepmis); |
| } |
| |
| /** IN Endpoint NAK Effective */ |
| if (diepint.b.inepnakeff) { |
| DWC_DEBUGPL(DBG_ANY, "EP%d IN EP NAK " |
| "Effective\n", epnum); |
| |
| /* Periodic EP */ |
| if (ep->disabling) { |
| diepctl.d32 = 0; |
| diepctl.b.snak = 1; |
| if (dwc_otg_can_disable_channel(core_if, |
| dwc_ep)) |
| diepctl.b.epdis = 1; |
| dwc_modify_reg32(&dev_if-> |
| in_ep_regs[epnum]-> |
| diepctl, |
| diepctl.d32, |
| diepctl.d32); |
| } |
| CLEAR_IN_EP_INTR(core_if, epnum, inepnakeff); |
| } |
| |
| /** IN EP Tx FIFO Empty Intr */ |
| if (diepint.b.emptyintr) { |
| DWC_DEBUGPL(DBG_ANY, |
| "EP%d Tx FIFO Empty Intr \n", |
| epnum); |
| |
| write_empty_tx_fifo(pcd, epnum); |
| CLEAR_IN_EP_INTR(core_if, epnum, emptyintr); |
| } |
| /** IN EP BNA Intr */ |
| if (diepint.b.bna) { |
| CLEAR_IN_EP_INTR(core_if, epnum, bna); |
| if (core_if->dma_desc_enable) { |
| dctl.d32 = |
| dwc_read_reg32(&dev_if-> |
| dev_global_regs-> |
| dctl); |
| |
| /* |
| * If Global Continue on BNA is |
| * disabled - disable EP |
| */ |
| if (!dctl.b.gcontbna) { |
| diepctl.d32 = 0; |
| diepctl.b.snak = 1; |
| if (dwc_otg_can_disable_channel( |
| core_if, dwc_ep)) |
| diepctl.b.epdis = 1; |
| dwc_modify_reg32(&dev_if-> |
| in_ep_regs[epnum]-> |
| diepctl, |
| diepctl.d32, |
| diepctl.d32); |
| } else |
| start_next_request(ep); |
| } |
| } |
| /* NAK Interrutp */ |
| if (diepint.b.nak) { |
| DWC_DEBUGPL(DBG_ANY, "EP%d IN NAK Interrupt\n", |
| epnum); |
| handle_in_ep_nak_intr(pcd, epnum); |
| |
| CLEAR_IN_EP_INTR(core_if, epnum, nak); |
| } |
| } |
| epnum++; |
| ep_intr >>= 1; |
| } |
| return 1; |
| #undef CLEAR_IN_EP_INTR |
| } |
| |
| /** |
| * This interrupt indicates that an OUT EP has a pending Interrupt. |
| * The sequence for handling the OUT EP interrupt is shown below: |
| * -# Read the Device All Endpoint Interrupt register |
| * -# Repeat the following for each OUT EP interrupt bit set (from |
| * LSB to MSB). |
| * -# Read the Device Endpoint Interrupt (DOEPINTn) register |
| * -# If "Transfer Complete" call the request complete function |
| * -# If "Endpoint Disabled" complete the EP disable procedure. |
| * -# If "AHB Error Interrupt" log error |
| * -# If "Setup Phase Done" process Setup Packet (See Standard USB |
| * Command Processing) |
| */ |
| static int dwc_otg_pcd_handle_out_ep_intr(struct dwc_otg_pcd *pcd) |
| { |
| |
| #define CLEAR_OUT_EP_INTR(__core_if, __epnum, __intr) \ |
| do { \ |
| union doepint_data doepint = { .d32 = 0}; \ |
| doepint.b.__intr = 1; \ |
| dwc_write_reg32(&__core_if->dev_if->out_ep_regs[__epnum]->doepint, \ |
| doepint.d32); \ |
| } while (0) |
| |
| struct dwc_otg_core_if *core_if = GET_CORE_IF(pcd); |
| struct dwc_otg_dev_if *dev_if = core_if->dev_if; |
| u32 ep_intr; |
| union doepint_data doepint = {.d32 = 0}; |
| union dctl_data dctl = {.d32 = 0 }; |
| union depctl_data doepctl = {.d32 = 0 }; |
| u32 epnum = 0; |
| struct dwc_otg_pcd_ep *ep; |
| struct dwc_ep *dwc_ep; |
| DWC_DEBUGPL(DBG_PCDV, "%s()\n", __func__); |
| |
| /* Read in the device interrupt bits */ |
| ep_intr = dwc_otg_read_dev_all_out_ep_intr(core_if); |
| while (ep_intr) { |
| if (ep_intr & 0x1) { |
| /* Get EP pointer */ |
| ep = get_out_ep(pcd, epnum); |
| dwc_ep = &ep->dwc_ep; |
| #ifdef VERBOSE |
| DWC_DEBUGPL(DBG_PCDV, "EP%d-%s: type=%d, mps=%d\n", |
| dwc_ep->num, |
| (dwc_ep->is_in ? "IN" : "OUT"), |
| dwc_ep->type, dwc_ep->maxpacket); |
| |
| #endif /* */ |
| doepint.d32 = |
| dwc_otg_read_dev_out_ep_intr(core_if, dwc_ep); |
| |
| /* Transfer complete */ |
| if (doepint.b.xfercompl) { |
| DWC_DEBUGPL(DBG_PCD, "EP%d OUT Xfer Complete\n", |
| epnum); |
| |
| if (epnum == 0) { |
| /* |
| * Clear the bit in DOEPINTn |
| * for this interrupt |
| */ |
| CLEAR_OUT_EP_INTR(core_if, epnum, |
| xfercompl); |
| if (core_if->dma_desc_enable == 0 |
| || pcd->ep0state != EP0_IDLE) |
| handle_ep0(pcd); |
| } else { |
| /* |
| * Clear the bit in DOEPINTn |
| * for this interrupt |
| */ |
| CLEAR_OUT_EP_INTR(core_if, epnum, |
| xfercompl); |
| complete_ep(ep); |
| } |
| } |
| |
| /* Endpoint disable */ |
| if (doepint.b.epdisabled) { |
| DWC_DEBUGPL(DBG_PCD, "EP%d OUT disabled\n", |
| epnum); |
| |
| /* |
| * Clear the bit in DOEPINTn |
| * for this interrupt |
| * */ |
| CLEAR_OUT_EP_INTR(core_if, epnum, epdisabled); |
| } |
| |
| /* AHB Error */ |
| if (doepint.b.ahberr) { |
| DWC_DEBUGPL(DBG_PCD, "EP%d OUT AHB Error\n", |
| epnum); |
| DWC_DEBUGPL(DBG_PCD, "EP DMA REG\t %d\n", |
| dwc_read_reg32(&core_if->dev_if-> |
| out_ep_regs[epnum]->doepdma)); |
| CLEAR_OUT_EP_INTR(core_if, epnum, ahberr); |
| } |
| |
| /* Setup Phase Done (control EPs) */ |
| if (doepint.b.setup) { |
| #ifdef DEBUG_EP0 |
| DWC_DEBUGPL(DBG_PCD, "EP%d SETUP Done\n", |
| epnum); |
| |
| #endif /* */ |
| CLEAR_OUT_EP_INTR(core_if, epnum, setup); |
| handle_ep0(pcd); |
| } |
| /** OUT EP BNA Intr */ |
| if (doepint.b.bna) { |
| CLEAR_OUT_EP_INTR(core_if, epnum, bna); |
| if (core_if->dma_desc_enable) { |
| dctl.d32 = dwc_read_reg32(&dev_if-> |
| dev_global_regs-> |
| dctl); |
| |
| /* |
| * If Global Continue on BNA is disabled |
| * - disable EP |
| */ |
| if (!dctl.b.gcontbna) { |
| doepctl.d32 = 0; |
| doepctl.b.snak = 1; |
| doepctl.b.epdis = 1; |
| dwc_modify_reg32(&dev_if-> |
| out_ep_regs[epnum]-> |
| doepctl, |
| doepctl.d32, |
| doepctl.d32); |
| } else |
| start_next_request(ep); |
| |
| } |
| } |
| if (doepint.b.stsphsercvd) { |
| CLEAR_OUT_EP_INTR(core_if, epnum, stsphsercvd); |
| if (core_if->dma_desc_enable) |
| do_setup_in_status_phase(pcd); |
| } |
| /* Babble Interrutp */ |
| if (doepint.b.babble) { |
| DWC_DEBUGPL(DBG_ANY, "EP%d OUT Babble\n", |
| epnum); |
| handle_out_ep_babble_intr(pcd, epnum); |
| |
| CLEAR_OUT_EP_INTR(core_if, epnum, babble); |
| } |
| /* NAK Interrutp */ |
| if (doepint.b.nak) { |
| DWC_DEBUGPL(DBG_ANY, "EP%d OUT NAK\n", epnum); |
| handle_out_ep_nak_intr(pcd, epnum); |
| |
| CLEAR_OUT_EP_INTR(core_if, epnum, nak); |
| } |
| /* NYET Interrutp */ |
| if (doepint.b.nyet) { |
| DWC_DEBUGPL(DBG_ANY, "EP%d OUT NYET\n", epnum); |
| handle_out_ep_nyet_intr(pcd, epnum); |
| CLEAR_OUT_EP_INTR(core_if, epnum, nyet); |
| } |
| } |
| epnum++; |
| ep_intr >>= 1; |
| } |
| |
| return 1; |
| |
| #undef CLEAR_OUT_EP_INTR |
| } |
| |
| /** |
| * Incomplete ISO IN Transfer Interrupt. |
| */ |
| static int dwc_otg_pcd_handle_incomplete_isoc_in_intr(struct dwc_otg_pcd *pcd) |
| { |
| union gintsts_data gintsts; |
| union gintmsk_data intr_mask = {.d32 = 0}; |
| DWC_PRINT("INTERRUPT Handler not implemented for %s\n", |
| "IN ISOC Incomplete"); |
| |
| intr_mask.b.incomplisoin = 1; |
| dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk, |
| intr_mask.d32, 0); |
| |
| /* Clear interrupt */ |
| gintsts.d32 = 0; |
| gintsts.b.incomplisoin = 1; |
| dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts, |
| gintsts.d32); |
| return 1; |
| } |
| |
| /** |
| * Incomplete ISO OUT Transfer Interrupt. |
| */ |
| static int dwc_otg_pcd_handle_incomplete_isoc_out_intr(struct dwc_otg_pcd *pcd) |
| { |
| |
| union gintsts_data gintsts; |
| union gintmsk_data intr_mask = {.d32 = 0 }; |
| |
| DWC_PRINT("INTERRUPT Handler not implemented for %s\n", |
| "OUT ISOC Incomplete"); |
| |
| intr_mask.b.incomplisoout = 1; |
| dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk, |
| intr_mask.d32, 0); |
| |
| /* Clear interrupt */ |
| gintsts.d32 = 0; |
| gintsts.b.incomplisoout = 1; |
| dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts, |
| gintsts.d32); |
| return 1; |
| } |
| |
| /** |
| * This function handles the Global IN NAK Effective interrupt. |
| * |
| */ |
| static int dwc_otg_pcd_handle_in_nak_effective(struct dwc_otg_pcd *pcd) |
| { |
| struct dwc_otg_dev_if *dev_if = GET_CORE_IF(pcd)->dev_if; |
| struct dwc_otg_pcd_ep *ep; |
| union depctl_data diepctl = {.d32 = 0}; |
| union depctl_data diepctl_rd = {.d32 = 0}; |
| union gintmsk_data intr_mask = {.d32 = 0}; |
| union gintsts_data gintsts; |
| int i; |
| DWC_DEBUGPL(DBG_PCD, "Global IN NAK Effective\n"); |
| |
| /* Disable all active IN EPs */ |
| diepctl.b.snak = 1; |
| for (i = 0; i <= dev_if->num_in_eps; i++) { |
| ep = get_in_ep(pcd, i); |
| if (dwc_otg_can_disable_channel(GET_CORE_IF(pcd), &ep->dwc_ep)) |
| diepctl.b.epdis = 1; |
| else |
| diepctl.b.epdis = 0; |
| |
| diepctl_rd.d32 = |
| dwc_read_reg32(&dev_if->in_ep_regs[i]->diepctl); |
| if (diepctl_rd.b.epena) |
| dwc_write_reg32(&dev_if->in_ep_regs[i]->diepctl, |
| diepctl.d32); |
| } |
| |
| /* Disable the Global IN NAK Effective Interrupt */ |
| intr_mask.b.ginnakeff = 1; |
| dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk, |
| intr_mask.d32, 0); |
| |
| /* Clear interrupt */ |
| gintsts.d32 = 0; |
| gintsts.b.ginnakeff = 1; |
| dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts, |
| gintsts.d32); |
| return 1; |
| } |
| |
| /** |
| * OUT NAK Effective. |
| * |
| */ |
| static int dwc_otg_pcd_handle_out_nak_effective(struct dwc_otg_pcd *pcd) |
| { |
| union gintmsk_data intr_mask = {.d32 = 0}; |
| union gintsts_data gintsts; |
| DWC_PRINT("INTERRUPT Handler not implemented for %s\n", |
| "Global IN NAK Effective\n"); |
| |
| /* Disable the Global IN NAK Effective Interrupt */ |
| intr_mask.b.goutnakeff = 1; |
| dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk, |
| intr_mask.d32, 0); |
| |
| /* Clear interrupt */ |
| gintsts.d32 = 0; |
| gintsts.b.goutnakeff = 1; |
| dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts, |
| gintsts.d32); |
| return 1; |
| } |
| |
| /** |
| * PCD interrupt handler. |
| * |
| * The PCD handles the device interrupts. Many conditions can cause a |
| * device interrupt. When an interrupt occurs, the device interrupt |
| * service routine determines the cause of the interrupt and |
| * dispatches handling to the appropriate function. These interrupt |
| * handling functions are described below. |
| * |
| * All interrupt registers are processed from LSB to MSB. |
| * |
| */ |
| int dwc_otg_pcd_handle_intr(struct dwc_otg_pcd *pcd) |
| { |
| struct dwc_otg_core_if *core_if = GET_CORE_IF(pcd); |
| |
| #ifdef VERBOSE |
| struct dwc_otg_core_global_regs __iomem *global_regs = |
| core_if->core_global_regs; |
| #endif /* */ |
| union gintsts_data gintr_status; |
| int retval = 0; |
| |
| #ifdef VERBOSE |
| DWC_DEBUGPL(DBG_ANY, "%s() gintsts=%08x gintmsk=%08x\n", |
| __func__, |
| dwc_read_reg32(&global_regs->gintsts), |
| dwc_read_reg32(&global_regs->gintmsk)); |
| #endif |
| if (dwc_otg_is_device_mode(core_if)) { |
| WARN_ON(!in_interrupt()); |
| spin_lock(&pcd->lock); |
| |
| #ifdef VERBOSE |
| DWC_DEBUGPL(DBG_PCDV, "%s() gintsts=%08x gintmsk=%08x\n", |
| __func__, |
| dwc_read_reg32(&global_regs->gintsts), |
| dwc_read_reg32(&global_regs->gintmsk)); |
| |
| #endif /* */ |
| gintr_status.d32 = dwc_otg_read_core_intr(core_if); |
| if (!gintr_status.d32) { |
| spin_unlock(&pcd->lock); |
| return 0; |
| } |
| |
| DWC_DEBUGPL(DBG_PCDV, "%s: gintsts&gintmsk=%08x\n", __func__, |
| gintr_status.d32); |
| if (gintr_status.b.sofintr) { |
| retval |= |
| dwc_otg_pcd_handle_sof_intr(pcd); |
| } |
| #ifndef OTG_PLB_DMA_TASKLET |
| if (gintr_status.b.rxstsqlvl) { |
| retval |= |
| dwc_otg_pcd_handle_rx_status_q_level_intr(pcd); |
| } |
| if (gintr_status.b.nptxfempty) { |
| retval |= |
| dwc_otg_pcd_handle_np_tx_fifo_empty_intr(pcd); |
| } |
| #endif |
| if (gintr_status.b.ginnakeff) { |
| retval |= |
| dwc_otg_pcd_handle_in_nak_effective(pcd); |
| } |
| if (gintr_status.b.goutnakeff) { |
| retval |= |
| dwc_otg_pcd_handle_out_nak_effective(pcd); |
| } |
| if (gintr_status.b.i2cintr) { |
| retval |= |
| dwc_otg_pcd_handle_i2c_intr(pcd); |
| } |
| if (gintr_status.b.erlysuspend) { |
| retval |= |
| dwc_otg_pcd_handle_early_suspend_intr(pcd); |
| } |
| if (gintr_status.b.usbreset) { |
| retval |= |
| dwc_otg_pcd_handle_usb_reset_intr(pcd); |
| } |
| if (gintr_status.b.enumdone) { |
| retval |= |
| dwc_otg_pcd_handle_enum_done_intr(pcd); |
| } |
| if (gintr_status.b.isooutdrop) { |
| retval |= |
| dwc_otg_pcd_handle_isoc_out_packet_dropped_intr(pcd); |
| } |
| if (gintr_status.b.eopframe) { |
| retval |= |
| dwc_otg_pcd_handle_end_periodic_frame_intr(pcd); |
| } |
| if (gintr_status.b.epmismatch) { |
| retval |= |
| dwc_otg_pcd_handle_ep_mismatch_intr(core_if); |
| } |
| if (gintr_status.b.inepint) { |
| if (!core_if->multiproc_int_enable) |
| retval |= dwc_otg_pcd_handle_in_ep_intr(pcd); |
| } |
| if (gintr_status.b.outepintr) { |
| if (!core_if->multiproc_int_enable) |
| retval |= dwc_otg_pcd_handle_out_ep_intr(pcd); |
| } |
| if (gintr_status.b.incomplisoin) { |
| retval |= |
| dwc_otg_pcd_handle_incomplete_isoc_in_intr(pcd); |
| } |
| if (gintr_status.b.incomplisoout) { |
| retval |= |
| dwc_otg_pcd_handle_incomplete_isoc_out_intr(pcd); |
| } |
| /* In MPI mode Device Endpoints intterrupts are asserted |
| * without setting outepintr and inepint bits set, so these |
| * Interrupt handlers are called without checking these |
| * bit-fields |
| */ |
| if (core_if->multiproc_int_enable) { |
| retval |= dwc_otg_pcd_handle_in_ep_intr(pcd); |
| retval |= dwc_otg_pcd_handle_out_ep_intr(pcd); |
| } |
| #ifdef VERBOSE |
| DWC_DEBUGPL(DBG_PCDV, "%s() gintsts=%0x\n", __func__, |
| dwc_read_reg32(&global_regs->gintsts)); |
| |
| #endif /* */ |
| #ifdef OTG_PLB_DMA_TASKLET |
| if (gintr_status.b.rxstsqlvl) { |
| retval |= |
| dwc_otg_pcd_handle_rx_status_q_level_intr(pcd); |
| } |
| if (!atomic_read(&release_later) && |
| gintr_status.b.nptxfempty) { |
| retval |= |
| dwc_otg_pcd_handle_np_tx_fifo_empty_intr(pcd); |
| } |
| #endif |
| spin_unlock(&pcd->lock); |
| } |
| return retval; |
| } |
| |
| |
| #endif /* DWC_HOST_ONLY */ |