2011-10-05 15:11:40 +00:00
|
|
|
/*
|
|
|
|
* Copyright 2011, Marvell Semiconductor Inc.
|
|
|
|
* Lei Wen <leiwen@marvell.com>
|
|
|
|
*
|
2013-07-08 07:37:19 +00:00
|
|
|
* SPDX-License-Identifier: GPL-2.0+
|
2011-10-05 15:11:40 +00:00
|
|
|
*
|
|
|
|
* Back ported to the 8xx platform (from the 8260 platform) by
|
|
|
|
* Murray.Jensen@cmst.csiro.au, 27-Jan-01.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <common.h>
|
|
|
|
#include <command.h>
|
|
|
|
#include <config.h>
|
|
|
|
#include <net.h>
|
|
|
|
#include <malloc.h>
|
2013-10-10 22:28:03 +00:00
|
|
|
#include <asm/byteorder.h>
|
2016-09-21 02:28:55 +00:00
|
|
|
#include <linux/errno.h>
|
2011-10-05 15:11:40 +00:00
|
|
|
#include <asm/io.h>
|
2013-10-10 22:28:00 +00:00
|
|
|
#include <asm/unaligned.h>
|
2011-10-05 15:11:40 +00:00
|
|
|
#include <linux/types.h>
|
2013-10-10 22:28:03 +00:00
|
|
|
#include <linux/usb/ch9.h>
|
|
|
|
#include <linux/usb/gadget.h>
|
2014-02-06 01:43:45 +00:00
|
|
|
#include <usb/ci_udc.h>
|
2013-10-10 22:28:03 +00:00
|
|
|
#include "../host/ehci.h"
|
2014-02-06 01:43:45 +00:00
|
|
|
#include "ci_udc.h"
|
2011-10-05 15:11:40 +00:00
|
|
|
|
2013-07-10 01:16:38 +00:00
|
|
|
/*
|
|
|
|
* Check if the system has too long cachelines. If the cachelines are
|
|
|
|
* longer then 128b, the driver will not be able flush/invalidate data
|
|
|
|
* cache over separate QH entries. We use 128b because one QH entry is
|
|
|
|
* 64b long and there are always two QH list entries for each endpoint.
|
|
|
|
*/
|
|
|
|
#if ARCH_DMA_MINALIGN > 128
|
|
|
|
#error This driver can not work on systems with caches longer than 128b
|
|
|
|
#endif
|
|
|
|
|
2014-07-01 17:41:15 +00:00
|
|
|
/*
|
usb: ci_udc: fix items array size/stride calculation
2 QTDs are allocated for each EP. The current allocation scheme aligns
the first QTD in each pair, but simply adds the struct size to calculate
the second QTD's address. This will result in a non-cache-aligned
addresss IF the system's ARCH_DMA_MINALIGN is not 32 bytes (i.e. the
size of struct ept_queue_item).
Similarly, the original ilist_ent_sz calculation aligned the value to
ARCH_DMA_MINALIGN but didn't take the USB HW's 32-byte alignment
requirement into account. This doesn't cause a practical issue unless
ARCH_DMA_MINALIGN < 32 (which I suspect is quite unlikely), but we may
as well fix the code to be explicit, so it's obviously completely
correct.
The new value of ILIST_ENT_SZ takes all alignment requirements into
account, so we can simplify ci_{flush,invalidate}_qtd() by simply using
that macro rather than calling roundup().
Similarly, the calculation of controller.items[i] can be simplified,
since each QTD is evenly spaced at its individual alignment requirement,
rather than each pair being aligned, and entries within the pair being
spaced apart only by structure size.
Signed-off-by: Stephen Warren <swarren@nvidia.com>
2014-07-01 17:41:16 +00:00
|
|
|
* Every QTD must be individually aligned, since we can program any
|
|
|
|
* QTD's address into HW. Cache flushing requires ARCH_DMA_MINALIGN,
|
|
|
|
* and the USB HW requires 32-byte alignment. Align to both:
|
2014-07-01 17:41:15 +00:00
|
|
|
*/
|
|
|
|
#define ILIST_ALIGN roundup(ARCH_DMA_MINALIGN, 32)
|
usb: ci_udc: fix items array size/stride calculation
2 QTDs are allocated for each EP. The current allocation scheme aligns
the first QTD in each pair, but simply adds the struct size to calculate
the second QTD's address. This will result in a non-cache-aligned
addresss IF the system's ARCH_DMA_MINALIGN is not 32 bytes (i.e. the
size of struct ept_queue_item).
Similarly, the original ilist_ent_sz calculation aligned the value to
ARCH_DMA_MINALIGN but didn't take the USB HW's 32-byte alignment
requirement into account. This doesn't cause a practical issue unless
ARCH_DMA_MINALIGN < 32 (which I suspect is quite unlikely), but we may
as well fix the code to be explicit, so it's obviously completely
correct.
The new value of ILIST_ENT_SZ takes all alignment requirements into
account, so we can simplify ci_{flush,invalidate}_qtd() by simply using
that macro rather than calling roundup().
Similarly, the calculation of controller.items[i] can be simplified,
since each QTD is evenly spaced at its individual alignment requirement,
rather than each pair being aligned, and entries within the pair being
spaced apart only by structure size.
Signed-off-by: Stephen Warren <swarren@nvidia.com>
2014-07-01 17:41:16 +00:00
|
|
|
/* Each QTD is this size */
|
|
|
|
#define ILIST_ENT_RAW_SZ sizeof(struct ept_queue_item)
|
|
|
|
/*
|
|
|
|
* Align the size of the QTD too, so we can add this value to each
|
|
|
|
* QTD's address to get another aligned address.
|
|
|
|
*/
|
|
|
|
#define ILIST_ENT_SZ roundup(ILIST_ENT_RAW_SZ, ILIST_ALIGN)
|
|
|
|
/* For each endpoint, we need 2 QTDs, one for each of IN and OUT */
|
|
|
|
#define ILIST_SZ (NUM_ENDPOINTS * 2 * ILIST_ENT_SZ)
|
2014-07-01 17:41:15 +00:00
|
|
|
|
2015-04-29 05:12:10 +00:00
|
|
|
#define EP_MAX_LENGTH_TRANSFER 0x4000
|
|
|
|
|
2011-10-05 15:11:40 +00:00
|
|
|
#ifndef DEBUG
|
|
|
|
#define DBG(x...) do {} while (0)
|
|
|
|
#else
|
|
|
|
#define DBG(x...) printf(x)
|
|
|
|
static const char *reqname(unsigned r)
|
|
|
|
{
|
|
|
|
switch (r) {
|
|
|
|
case USB_REQ_GET_STATUS: return "GET_STATUS";
|
|
|
|
case USB_REQ_CLEAR_FEATURE: return "CLEAR_FEATURE";
|
|
|
|
case USB_REQ_SET_FEATURE: return "SET_FEATURE";
|
|
|
|
case USB_REQ_SET_ADDRESS: return "SET_ADDRESS";
|
|
|
|
case USB_REQ_GET_DESCRIPTOR: return "GET_DESCRIPTOR";
|
|
|
|
case USB_REQ_SET_DESCRIPTOR: return "SET_DESCRIPTOR";
|
|
|
|
case USB_REQ_GET_CONFIGURATION: return "GET_CONFIGURATION";
|
|
|
|
case USB_REQ_SET_CONFIGURATION: return "SET_CONFIGURATION";
|
|
|
|
case USB_REQ_GET_INTERFACE: return "GET_INTERFACE";
|
|
|
|
case USB_REQ_SET_INTERFACE: return "SET_INTERFACE";
|
|
|
|
default: return "*UNKNOWN*";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2014-05-29 20:53:01 +00:00
|
|
|
static struct usb_endpoint_descriptor ep0_desc = {
|
2011-10-05 15:11:40 +00:00
|
|
|
.bLength = sizeof(struct usb_endpoint_descriptor),
|
|
|
|
.bDescriptorType = USB_DT_ENDPOINT,
|
|
|
|
.bEndpointAddress = USB_DIR_IN,
|
|
|
|
.bmAttributes = USB_ENDPOINT_XFER_CONTROL,
|
|
|
|
};
|
|
|
|
|
2014-02-06 01:43:45 +00:00
|
|
|
static int ci_pullup(struct usb_gadget *gadget, int is_on);
|
|
|
|
static int ci_ep_enable(struct usb_ep *ep,
|
2011-10-05 15:11:40 +00:00
|
|
|
const struct usb_endpoint_descriptor *desc);
|
2014-02-06 01:43:45 +00:00
|
|
|
static int ci_ep_disable(struct usb_ep *ep);
|
|
|
|
static int ci_ep_queue(struct usb_ep *ep,
|
2011-10-05 15:11:40 +00:00
|
|
|
struct usb_request *req, gfp_t gfp_flags);
|
2015-08-28 01:20:30 +00:00
|
|
|
static int ci_ep_dequeue(struct usb_ep *ep, struct usb_request *req);
|
2011-10-05 15:11:40 +00:00
|
|
|
static struct usb_request *
|
2014-02-06 01:43:45 +00:00
|
|
|
ci_ep_alloc_request(struct usb_ep *ep, unsigned int gfp_flags);
|
|
|
|
static void ci_ep_free_request(struct usb_ep *ep, struct usb_request *_req);
|
2011-10-05 15:11:40 +00:00
|
|
|
|
2014-02-06 01:43:45 +00:00
|
|
|
static struct usb_gadget_ops ci_udc_ops = {
|
|
|
|
.pullup = ci_pullup,
|
2011-10-05 15:11:40 +00:00
|
|
|
};
|
|
|
|
|
2014-02-06 01:43:45 +00:00
|
|
|
static struct usb_ep_ops ci_ep_ops = {
|
|
|
|
.enable = ci_ep_enable,
|
|
|
|
.disable = ci_ep_disable,
|
|
|
|
.queue = ci_ep_queue,
|
2015-08-28 01:20:30 +00:00
|
|
|
.dequeue = ci_ep_dequeue,
|
2014-02-06 01:43:45 +00:00
|
|
|
.alloc_request = ci_ep_alloc_request,
|
|
|
|
.free_request = ci_ep_free_request,
|
2011-10-05 15:11:40 +00:00
|
|
|
};
|
|
|
|
|
2013-07-10 01:16:30 +00:00
|
|
|
/* Init values for USB endpoints. */
|
2015-04-29 05:12:10 +00:00
|
|
|
static const struct usb_ep ci_ep_init[5] = {
|
2013-07-10 01:16:30 +00:00
|
|
|
[0] = { /* EP 0 */
|
|
|
|
.maxpacket = 64,
|
|
|
|
.name = "ep0",
|
2014-02-06 01:43:45 +00:00
|
|
|
.ops = &ci_ep_ops,
|
2013-07-10 01:16:30 +00:00
|
|
|
},
|
2015-04-29 05:12:10 +00:00
|
|
|
[1] = {
|
|
|
|
.maxpacket = 512,
|
|
|
|
.name = "ep1in-bulk",
|
|
|
|
.ops = &ci_ep_ops,
|
|
|
|
},
|
|
|
|
[2] = {
|
|
|
|
.maxpacket = 512,
|
|
|
|
.name = "ep2out-bulk",
|
|
|
|
.ops = &ci_ep_ops,
|
|
|
|
},
|
|
|
|
[3] = {
|
|
|
|
.maxpacket = 512,
|
|
|
|
.name = "ep3in-int",
|
|
|
|
.ops = &ci_ep_ops,
|
|
|
|
},
|
|
|
|
[4] = {
|
2013-07-10 01:16:30 +00:00
|
|
|
.maxpacket = 512,
|
|
|
|
.name = "ep-",
|
2014-02-06 01:43:45 +00:00
|
|
|
.ops = &ci_ep_ops,
|
2013-07-10 01:16:30 +00:00
|
|
|
},
|
|
|
|
};
|
|
|
|
|
2014-02-06 01:43:45 +00:00
|
|
|
static struct ci_drv controller = {
|
2013-07-10 01:16:35 +00:00
|
|
|
.gadget = {
|
2014-02-06 01:43:45 +00:00
|
|
|
.name = "ci_udc",
|
|
|
|
.ops = &ci_udc_ops,
|
2013-09-26 01:41:09 +00:00
|
|
|
.is_dualspeed = 1,
|
2011-10-05 15:11:40 +00:00
|
|
|
},
|
|
|
|
};
|
|
|
|
|
2013-07-10 01:16:39 +00:00
|
|
|
/**
|
2014-02-06 01:43:45 +00:00
|
|
|
* ci_get_qh() - return queue head for endpoint
|
2013-07-10 01:16:39 +00:00
|
|
|
* @ep_num: Endpoint number
|
|
|
|
* @dir_in: Direction of the endpoint (IN = 1, OUT = 0)
|
|
|
|
*
|
|
|
|
* This function returns the QH associated with particular endpoint
|
|
|
|
* and it's direction.
|
|
|
|
*/
|
2014-02-06 01:43:45 +00:00
|
|
|
static struct ept_queue_head *ci_get_qh(int ep_num, int dir_in)
|
2013-07-10 01:16:39 +00:00
|
|
|
{
|
|
|
|
return &controller.epts[(ep_num * 2) + dir_in];
|
|
|
|
}
|
|
|
|
|
2013-07-10 01:16:41 +00:00
|
|
|
/**
|
2014-02-06 01:43:45 +00:00
|
|
|
* ci_get_qtd() - return queue item for endpoint
|
2013-07-10 01:16:41 +00:00
|
|
|
* @ep_num: Endpoint number
|
|
|
|
* @dir_in: Direction of the endpoint (IN = 1, OUT = 0)
|
|
|
|
*
|
|
|
|
* This function returns the QH associated with particular endpoint
|
|
|
|
* and it's direction.
|
|
|
|
*/
|
2014-02-06 01:43:45 +00:00
|
|
|
static struct ept_queue_item *ci_get_qtd(int ep_num, int dir_in)
|
2013-07-10 01:16:41 +00:00
|
|
|
{
|
2014-07-01 17:41:17 +00:00
|
|
|
int index = (ep_num * 2) + dir_in;
|
|
|
|
uint8_t *imem = controller.items_mem + (index * ILIST_ENT_SZ);
|
|
|
|
return (struct ept_queue_item *)imem;
|
2013-07-10 01:16:41 +00:00
|
|
|
}
|
|
|
|
|
2013-07-10 01:16:42 +00:00
|
|
|
/**
|
2014-02-06 01:43:45 +00:00
|
|
|
* ci_flush_qh - flush cache over queue head
|
2013-07-10 01:16:42 +00:00
|
|
|
* @ep_num: Endpoint number
|
|
|
|
*
|
|
|
|
* This function flushes cache over QH for particular endpoint.
|
|
|
|
*/
|
2014-02-06 01:43:45 +00:00
|
|
|
static void ci_flush_qh(int ep_num)
|
2013-07-10 01:16:42 +00:00
|
|
|
{
|
2014-02-06 01:43:45 +00:00
|
|
|
struct ept_queue_head *head = ci_get_qh(ep_num, 0);
|
2015-03-17 20:46:35 +00:00
|
|
|
const unsigned long start = (unsigned long)head;
|
|
|
|
const unsigned long end = start + 2 * sizeof(*head);
|
2013-07-10 01:16:42 +00:00
|
|
|
|
|
|
|
flush_dcache_range(start, end);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2014-02-06 01:43:45 +00:00
|
|
|
* ci_invalidate_qh - invalidate cache over queue head
|
2013-07-10 01:16:42 +00:00
|
|
|
* @ep_num: Endpoint number
|
|
|
|
*
|
|
|
|
* This function invalidates cache over QH for particular endpoint.
|
|
|
|
*/
|
2014-02-06 01:43:45 +00:00
|
|
|
static void ci_invalidate_qh(int ep_num)
|
2013-07-10 01:16:42 +00:00
|
|
|
{
|
2014-02-06 01:43:45 +00:00
|
|
|
struct ept_queue_head *head = ci_get_qh(ep_num, 0);
|
2015-03-17 20:46:35 +00:00
|
|
|
unsigned long start = (unsigned long)head;
|
|
|
|
unsigned long end = start + 2 * sizeof(*head);
|
2013-07-10 01:16:42 +00:00
|
|
|
|
|
|
|
invalidate_dcache_range(start, end);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2014-02-06 01:43:45 +00:00
|
|
|
* ci_flush_qtd - flush cache over queue item
|
2013-07-10 01:16:42 +00:00
|
|
|
* @ep_num: Endpoint number
|
|
|
|
*
|
|
|
|
* This function flushes cache over qTD pair for particular endpoint.
|
|
|
|
*/
|
2014-02-06 01:43:45 +00:00
|
|
|
static void ci_flush_qtd(int ep_num)
|
2013-07-10 01:16:42 +00:00
|
|
|
{
|
2014-02-06 01:43:45 +00:00
|
|
|
struct ept_queue_item *item = ci_get_qtd(ep_num, 0);
|
2015-03-17 20:46:35 +00:00
|
|
|
const unsigned long start = (unsigned long)item;
|
|
|
|
const unsigned long end = start + 2 * ILIST_ENT_SZ;
|
2013-07-10 01:16:42 +00:00
|
|
|
|
|
|
|
flush_dcache_range(start, end);
|
|
|
|
}
|
|
|
|
|
2015-04-29 05:12:10 +00:00
|
|
|
/**
|
|
|
|
* ci_flush_td - flush cache over queue item
|
|
|
|
* @td: td pointer
|
|
|
|
*
|
|
|
|
* This function flushes cache for particular transfer descriptor.
|
|
|
|
*/
|
|
|
|
static void ci_flush_td(struct ept_queue_item *td)
|
|
|
|
{
|
2015-07-22 21:16:20 +00:00
|
|
|
const unsigned long start = (unsigned long)td;
|
|
|
|
const unsigned long end = (unsigned long)td + ILIST_ENT_SZ;
|
2015-04-29 05:12:10 +00:00
|
|
|
flush_dcache_range(start, end);
|
|
|
|
}
|
|
|
|
|
2013-07-10 01:16:42 +00:00
|
|
|
/**
|
2014-02-06 01:43:45 +00:00
|
|
|
* ci_invalidate_qtd - invalidate cache over queue item
|
2013-07-10 01:16:42 +00:00
|
|
|
* @ep_num: Endpoint number
|
|
|
|
*
|
|
|
|
* This function invalidates cache over qTD pair for particular endpoint.
|
|
|
|
*/
|
2014-02-06 01:43:45 +00:00
|
|
|
static void ci_invalidate_qtd(int ep_num)
|
2013-07-10 01:16:42 +00:00
|
|
|
{
|
2014-02-06 01:43:45 +00:00
|
|
|
struct ept_queue_item *item = ci_get_qtd(ep_num, 0);
|
2015-03-17 20:46:35 +00:00
|
|
|
const unsigned long start = (unsigned long)item;
|
|
|
|
const unsigned long end = start + 2 * ILIST_ENT_SZ;
|
2013-07-10 01:16:42 +00:00
|
|
|
|
|
|
|
invalidate_dcache_range(start, end);
|
|
|
|
}
|
|
|
|
|
2015-04-29 05:12:10 +00:00
|
|
|
/**
|
|
|
|
* ci_invalidate_td - invalidate cache over queue item
|
|
|
|
* @td: td pointer
|
|
|
|
*
|
|
|
|
* This function invalidates cache for particular transfer descriptor.
|
|
|
|
*/
|
|
|
|
static void ci_invalidate_td(struct ept_queue_item *td)
|
|
|
|
{
|
2015-07-22 21:16:20 +00:00
|
|
|
const unsigned long start = (unsigned long)td;
|
|
|
|
const unsigned long end = start + ILIST_ENT_SZ;
|
2015-04-29 05:12:10 +00:00
|
|
|
invalidate_dcache_range(start, end);
|
|
|
|
}
|
|
|
|
|
2011-10-05 15:11:40 +00:00
|
|
|
static struct usb_request *
|
2014-02-06 01:43:45 +00:00
|
|
|
ci_ep_alloc_request(struct usb_ep *ep, unsigned int gfp_flags)
|
2011-10-05 15:11:40 +00:00
|
|
|
{
|
2014-05-29 20:53:02 +00:00
|
|
|
struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep);
|
2015-07-24 15:14:21 +00:00
|
|
|
int num = -1;
|
2014-05-05 23:48:11 +00:00
|
|
|
struct ci_req *ci_req;
|
|
|
|
|
2015-07-24 15:14:21 +00:00
|
|
|
if (ci_ep->desc)
|
|
|
|
num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
|
|
|
|
|
2014-05-29 20:53:02 +00:00
|
|
|
if (num == 0 && controller.ep0_req)
|
|
|
|
return &controller.ep0_req->req;
|
|
|
|
|
2014-07-01 17:41:18 +00:00
|
|
|
ci_req = calloc(1, sizeof(*ci_req));
|
2014-05-05 23:48:11 +00:00
|
|
|
if (!ci_req)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(&ci_req->queue);
|
|
|
|
|
2014-05-29 20:53:02 +00:00
|
|
|
if (num == 0)
|
|
|
|
controller.ep0_req = ci_req;
|
|
|
|
|
2014-05-05 23:48:11 +00:00
|
|
|
return &ci_req->req;
|
2011-10-05 15:11:40 +00:00
|
|
|
}
|
|
|
|
|
2014-05-05 23:48:11 +00:00
|
|
|
static void ci_ep_free_request(struct usb_ep *ep, struct usb_request *req)
|
2011-10-05 15:11:40 +00:00
|
|
|
{
|
2014-06-10 17:02:36 +00:00
|
|
|
struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep);
|
|
|
|
struct ci_req *ci_req = container_of(req, struct ci_req, req);
|
2015-07-24 15:14:21 +00:00
|
|
|
int num = -1;
|
|
|
|
|
|
|
|
if (ci_ep->desc)
|
|
|
|
num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
|
2014-06-10 17:02:36 +00:00
|
|
|
|
usb: ci_udc: fix interaction with CONFIG_USB_ETH_CDC
ci_udc.c's usb_gadget_unregister_driver() doesn't call driver->unbind()
unlike other USB gadget drivers. Fix it to do this.
Without this, when ether.c's CDC Ethernet device is torn down,
eth_unbind() is never called, so dev->gadget is never set to NULL.
For some reason, usb_eth_halt() is called both at the end of the first
use of the Ethernet device, and prior to any subsequent use. Since
dev->gadget is never cleared, all calls to usb_eth_halt() attempt to
stop, disconnect, and clean up the device, resulting in double cleanup,
which hangs U-Boot on my Tegra device at least.
ci_udc allocates its own singleton EP0 request object, and cleans it up
during usb_gadget_unregister_driver(). This appears necessary when using
the USB gadget framework in U-Boot, since that does not allocate/free
the EP0 request. However, the CDC Ethernet driver *does* allocate and
free its own EP0 requests. Consequently, we must protect
ci_ep_free_request() against double-freeing the request.
Signed-off-by: Stephen Warren <swarren@nvidia.com>
2014-06-23 18:02:48 +00:00
|
|
|
if (num == 0) {
|
|
|
|
if (!controller.ep0_req)
|
|
|
|
return;
|
2014-06-10 17:02:36 +00:00
|
|
|
controller.ep0_req = 0;
|
usb: ci_udc: fix interaction with CONFIG_USB_ETH_CDC
ci_udc.c's usb_gadget_unregister_driver() doesn't call driver->unbind()
unlike other USB gadget drivers. Fix it to do this.
Without this, when ether.c's CDC Ethernet device is torn down,
eth_unbind() is never called, so dev->gadget is never set to NULL.
For some reason, usb_eth_halt() is called both at the end of the first
use of the Ethernet device, and prior to any subsequent use. Since
dev->gadget is never cleared, all calls to usb_eth_halt() attempt to
stop, disconnect, and clean up the device, resulting in double cleanup,
which hangs U-Boot on my Tegra device at least.
ci_udc allocates its own singleton EP0 request object, and cleans it up
during usb_gadget_unregister_driver(). This appears necessary when using
the USB gadget framework in U-Boot, since that does not allocate/free
the EP0 request. However, the CDC Ethernet driver *does* allocate and
free its own EP0 requests. Consequently, we must protect
ci_ep_free_request() against double-freeing the request.
Signed-off-by: Stephen Warren <swarren@nvidia.com>
2014-06-23 18:02:48 +00:00
|
|
|
}
|
2014-05-05 23:48:11 +00:00
|
|
|
|
|
|
|
if (ci_req->b_buf)
|
|
|
|
free(ci_req->b_buf);
|
|
|
|
free(ci_req);
|
2011-10-05 15:11:40 +00:00
|
|
|
}
|
|
|
|
|
2013-10-10 22:28:00 +00:00
|
|
|
static void ep_enable(int num, int in, int maxpacket)
|
2011-10-05 15:11:40 +00:00
|
|
|
{
|
2014-02-06 01:43:45 +00:00
|
|
|
struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
|
2011-10-05 15:11:40 +00:00
|
|
|
unsigned n;
|
|
|
|
|
|
|
|
n = readl(&udc->epctrl[num]);
|
|
|
|
if (in)
|
|
|
|
n |= (CTRL_TXE | CTRL_TXR | CTRL_TXT_BULK);
|
|
|
|
else
|
|
|
|
n |= (CTRL_RXE | CTRL_RXR | CTRL_RXT_BULK);
|
|
|
|
|
2013-07-10 01:16:42 +00:00
|
|
|
if (num != 0) {
|
2014-02-06 01:43:45 +00:00
|
|
|
struct ept_queue_head *head = ci_get_qh(num, in);
|
2013-10-10 22:28:02 +00:00
|
|
|
|
2013-10-10 22:28:00 +00:00
|
|
|
head->config = CONFIG_MAX_PKT(maxpacket) | CONFIG_ZLT;
|
2014-02-06 01:43:45 +00:00
|
|
|
ci_flush_qh(num);
|
2013-07-10 01:16:42 +00:00
|
|
|
}
|
2011-10-05 15:11:40 +00:00
|
|
|
writel(n, &udc->epctrl[num]);
|
|
|
|
}
|
|
|
|
|
2014-02-06 01:43:45 +00:00
|
|
|
static int ci_ep_enable(struct usb_ep *ep,
|
2011-10-05 15:11:40 +00:00
|
|
|
const struct usb_endpoint_descriptor *desc)
|
|
|
|
{
|
2014-02-06 01:43:45 +00:00
|
|
|
struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep);
|
2011-10-05 15:11:40 +00:00
|
|
|
int num, in;
|
|
|
|
num = desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
|
|
|
|
in = (desc->bEndpointAddress & USB_DIR_IN) != 0;
|
2014-02-06 01:43:45 +00:00
|
|
|
ci_ep->desc = desc;
|
2013-10-10 22:28:00 +00:00
|
|
|
|
|
|
|
if (num) {
|
|
|
|
int max = get_unaligned_le16(&desc->wMaxPacketSize);
|
|
|
|
|
|
|
|
if ((max > 64) && (controller.gadget.speed == USB_SPEED_FULL))
|
|
|
|
max = 64;
|
|
|
|
if (ep->maxpacket != max) {
|
|
|
|
DBG("%s: from %d to %d\n", __func__,
|
|
|
|
ep->maxpacket, max);
|
|
|
|
ep->maxpacket = max;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ep_enable(num, in, ep->maxpacket);
|
|
|
|
DBG("%s: num=%d maxpacket=%d\n", __func__, num, ep->maxpacket);
|
2011-10-05 15:11:40 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-02-06 01:43:45 +00:00
|
|
|
static int ci_ep_disable(struct usb_ep *ep)
|
2011-10-05 15:11:40 +00:00
|
|
|
{
|
2014-02-06 01:43:45 +00:00
|
|
|
struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep);
|
2013-09-26 01:41:15 +00:00
|
|
|
|
2014-02-06 01:43:45 +00:00
|
|
|
ci_ep->desc = NULL;
|
2011-10-05 15:11:40 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-05-05 23:48:11 +00:00
|
|
|
static int ci_bounce(struct ci_req *ci_req, int in)
|
2013-07-10 01:16:43 +00:00
|
|
|
{
|
2014-05-05 23:48:11 +00:00
|
|
|
struct usb_request *req = &ci_req->req;
|
2015-03-17 20:46:35 +00:00
|
|
|
unsigned long addr = (unsigned long)req->buf;
|
|
|
|
unsigned long hwaddr;
|
2014-05-05 23:48:11 +00:00
|
|
|
uint32_t aligned_used_len;
|
2013-07-10 01:16:43 +00:00
|
|
|
|
|
|
|
/* Input buffer address is not aligned. */
|
|
|
|
if (addr & (ARCH_DMA_MINALIGN - 1))
|
|
|
|
goto align;
|
|
|
|
|
|
|
|
/* Input buffer length is not aligned. */
|
2014-05-05 23:48:11 +00:00
|
|
|
if (req->length & (ARCH_DMA_MINALIGN - 1))
|
2013-07-10 01:16:43 +00:00
|
|
|
goto align;
|
|
|
|
|
|
|
|
/* The buffer is well aligned, only flush cache. */
|
2014-05-05 23:48:11 +00:00
|
|
|
ci_req->hw_len = req->length;
|
|
|
|
ci_req->hw_buf = req->buf;
|
2013-07-10 01:16:43 +00:00
|
|
|
goto flush;
|
|
|
|
|
|
|
|
align:
|
2014-05-05 23:48:11 +00:00
|
|
|
if (ci_req->b_buf && req->length > ci_req->b_len) {
|
|
|
|
free(ci_req->b_buf);
|
|
|
|
ci_req->b_buf = 0;
|
|
|
|
}
|
|
|
|
if (!ci_req->b_buf) {
|
|
|
|
ci_req->b_len = roundup(req->length, ARCH_DMA_MINALIGN);
|
|
|
|
ci_req->b_buf = memalign(ARCH_DMA_MINALIGN, ci_req->b_len);
|
|
|
|
if (!ci_req->b_buf)
|
2013-07-10 01:16:43 +00:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2014-05-05 23:48:11 +00:00
|
|
|
ci_req->hw_len = ci_req->b_len;
|
|
|
|
ci_req->hw_buf = ci_req->b_buf;
|
|
|
|
|
2013-10-10 22:28:01 +00:00
|
|
|
if (in)
|
2014-05-05 23:48:11 +00:00
|
|
|
memcpy(ci_req->hw_buf, req->buf, req->length);
|
2013-07-10 01:16:43 +00:00
|
|
|
|
|
|
|
flush:
|
2015-03-17 20:46:35 +00:00
|
|
|
hwaddr = (unsigned long)ci_req->hw_buf;
|
2014-05-05 23:48:11 +00:00
|
|
|
aligned_used_len = roundup(req->length, ARCH_DMA_MINALIGN);
|
|
|
|
flush_dcache_range(hwaddr, hwaddr + aligned_used_len);
|
2013-07-10 01:16:43 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-05-05 23:48:11 +00:00
|
|
|
static void ci_debounce(struct ci_req *ci_req, int in)
|
2013-07-10 01:16:43 +00:00
|
|
|
{
|
2014-05-05 23:48:11 +00:00
|
|
|
struct usb_request *req = &ci_req->req;
|
2015-03-17 20:46:35 +00:00
|
|
|
unsigned long addr = (unsigned long)req->buf;
|
|
|
|
unsigned long hwaddr = (unsigned long)ci_req->hw_buf;
|
2014-05-05 23:48:11 +00:00
|
|
|
uint32_t aligned_used_len;
|
2013-07-10 01:16:43 +00:00
|
|
|
|
2014-05-05 23:48:11 +00:00
|
|
|
if (in)
|
|
|
|
return;
|
|
|
|
|
|
|
|
aligned_used_len = roundup(req->actual, ARCH_DMA_MINALIGN);
|
|
|
|
invalidate_dcache_range(hwaddr, hwaddr + aligned_used_len);
|
2013-07-10 01:16:43 +00:00
|
|
|
|
2014-05-05 23:48:11 +00:00
|
|
|
if (addr == hwaddr)
|
|
|
|
return; /* not a bounce */
|
2013-07-10 01:16:43 +00:00
|
|
|
|
2014-05-05 23:48:11 +00:00
|
|
|
memcpy(req->buf, ci_req->hw_buf, req->actual);
|
2013-07-10 01:16:43 +00:00
|
|
|
}
|
|
|
|
|
2014-05-05 23:48:11 +00:00
|
|
|
static void ci_ep_submit_next_request(struct ci_ep *ci_ep)
|
2011-10-05 15:11:40 +00:00
|
|
|
{
|
2014-02-06 01:43:45 +00:00
|
|
|
struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
|
2011-10-05 15:11:40 +00:00
|
|
|
struct ept_queue_item *item;
|
|
|
|
struct ept_queue_head *head;
|
2014-05-05 23:48:11 +00:00
|
|
|
int bit, num, len, in;
|
|
|
|
struct ci_req *ci_req;
|
2015-04-29 05:12:10 +00:00
|
|
|
u8 *buf;
|
2015-09-11 23:10:02 +00:00
|
|
|
uint32_t len_left, len_this_dtd;
|
2015-04-29 05:12:10 +00:00
|
|
|
struct ept_queue_item *dtd, *qtd;
|
2014-05-05 23:48:11 +00:00
|
|
|
|
|
|
|
ci_ep->req_primed = true;
|
|
|
|
|
2014-02-06 01:43:45 +00:00
|
|
|
num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
|
|
|
|
in = (ci_ep->desc->bEndpointAddress & USB_DIR_IN) != 0;
|
|
|
|
item = ci_get_qtd(num, in);
|
|
|
|
head = ci_get_qh(num, in);
|
2011-10-05 15:11:40 +00:00
|
|
|
|
2014-05-05 23:48:11 +00:00
|
|
|
ci_req = list_first_entry(&ci_ep->queue, struct ci_req, queue);
|
|
|
|
len = ci_req->req.length;
|
2013-07-10 01:16:43 +00:00
|
|
|
|
2015-03-17 20:46:35 +00:00
|
|
|
head->next = (unsigned long)item;
|
2011-10-05 15:11:40 +00:00
|
|
|
head->info = 0;
|
|
|
|
|
2015-04-29 05:12:10 +00:00
|
|
|
ci_req->dtd_count = 0;
|
|
|
|
buf = ci_req->hw_buf;
|
2015-09-11 23:10:02 +00:00
|
|
|
len_left = len;
|
2015-04-29 05:12:10 +00:00
|
|
|
dtd = item;
|
|
|
|
|
|
|
|
do {
|
2015-09-11 23:10:02 +00:00
|
|
|
len_this_dtd = min(len_left, (unsigned)EP_MAX_LENGTH_TRANSFER);
|
2015-04-29 05:12:10 +00:00
|
|
|
|
2015-09-11 23:10:02 +00:00
|
|
|
dtd->info = INFO_BYTES(len_this_dtd) | INFO_ACTIVE;
|
2015-04-29 05:12:10 +00:00
|
|
|
dtd->page0 = (unsigned long)buf;
|
|
|
|
dtd->page1 = ((unsigned long)buf & 0xfffff000) + 0x1000;
|
|
|
|
dtd->page2 = ((unsigned long)buf & 0xfffff000) + 0x2000;
|
|
|
|
dtd->page3 = ((unsigned long)buf & 0xfffff000) + 0x3000;
|
|
|
|
dtd->page4 = ((unsigned long)buf & 0xfffff000) + 0x4000;
|
|
|
|
|
2015-09-11 23:10:02 +00:00
|
|
|
len_left -= len_this_dtd;
|
|
|
|
buf += len_this_dtd;
|
2015-04-29 05:12:10 +00:00
|
|
|
|
2015-09-11 23:10:02 +00:00
|
|
|
if (len_left) {
|
2015-04-29 05:12:10 +00:00
|
|
|
qtd = (struct ept_queue_item *)
|
|
|
|
memalign(ILIST_ALIGN, ILIST_ENT_SZ);
|
2015-07-22 21:16:20 +00:00
|
|
|
dtd->next = (unsigned long)qtd;
|
2015-04-29 05:12:10 +00:00
|
|
|
dtd = qtd;
|
|
|
|
memset(dtd, 0, ILIST_ENT_SZ);
|
|
|
|
}
|
|
|
|
|
|
|
|
ci_req->dtd_count++;
|
2015-09-11 23:10:02 +00:00
|
|
|
} while (len_left);
|
2015-04-29 05:12:10 +00:00
|
|
|
|
|
|
|
item = dtd;
|
2014-06-10 21:27:39 +00:00
|
|
|
/*
|
|
|
|
* When sending the data for an IN transaction, the attached host
|
|
|
|
* knows that all data for the IN is sent when one of the following
|
|
|
|
* occurs:
|
|
|
|
* a) A zero-length packet is transmitted.
|
|
|
|
* b) A packet with length that isn't an exact multiple of the ep's
|
|
|
|
* maxpacket is transmitted.
|
|
|
|
* c) Enough data is sent to exactly fill the host's maximum expected
|
|
|
|
* IN transaction size.
|
|
|
|
*
|
|
|
|
* One of these conditions MUST apply at the end of an IN transaction,
|
|
|
|
* or the transaction will not be considered complete by the host. If
|
|
|
|
* none of (a)..(c) already applies, then we must force (a) to apply
|
|
|
|
* by explicitly sending an extra zero-length packet.
|
|
|
|
*/
|
|
|
|
/* IN !a !b !c */
|
|
|
|
if (in && len && !(len % ci_ep->ep.maxpacket) && ci_req->req.zero) {
|
|
|
|
/*
|
|
|
|
* Each endpoint has 2 items allocated, even though typically
|
|
|
|
* only 1 is used at a time since either an IN or an OUT but
|
|
|
|
* not both is queued. For an IN transaction, item currently
|
|
|
|
* points at the second of these items, so we know that we
|
2014-07-01 17:41:14 +00:00
|
|
|
* can use the other to transmit the extra zero-length packet.
|
2014-06-10 21:27:39 +00:00
|
|
|
*/
|
2014-07-01 17:41:14 +00:00
|
|
|
struct ept_queue_item *other_item = ci_get_qtd(num, 0);
|
2015-03-17 20:46:35 +00:00
|
|
|
item->next = (unsigned long)other_item;
|
2014-07-01 17:41:14 +00:00
|
|
|
item = other_item;
|
2014-06-10 21:27:39 +00:00
|
|
|
item->info = INFO_ACTIVE;
|
|
|
|
}
|
|
|
|
|
|
|
|
item->next = TERMINATE;
|
|
|
|
item->info |= INFO_IOC;
|
|
|
|
|
|
|
|
ci_flush_qtd(num);
|
|
|
|
|
2015-07-22 21:16:20 +00:00
|
|
|
item = (struct ept_queue_item *)(unsigned long)head->next;
|
2015-04-29 05:12:10 +00:00
|
|
|
while (item->next != TERMINATE) {
|
2015-07-22 21:16:20 +00:00
|
|
|
ci_flush_td((struct ept_queue_item *)(unsigned long)item->next);
|
|
|
|
item = (struct ept_queue_item *)(unsigned long)item->next;
|
2015-04-29 05:12:10 +00:00
|
|
|
}
|
|
|
|
|
2014-05-05 23:48:11 +00:00
|
|
|
DBG("ept%d %s queue len %x, req %p, buffer %p\n",
|
|
|
|
num, in ? "in" : "out", len, ci_req, ci_req->hw_buf);
|
2014-02-06 01:43:45 +00:00
|
|
|
ci_flush_qh(num);
|
2011-10-05 15:11:40 +00:00
|
|
|
|
|
|
|
if (in)
|
|
|
|
bit = EPT_TX(num);
|
|
|
|
else
|
|
|
|
bit = EPT_RX(num);
|
|
|
|
|
|
|
|
writel(bit, &udc->epprime);
|
2014-05-05 23:48:11 +00:00
|
|
|
}
|
|
|
|
|
2015-08-28 01:20:30 +00:00
|
|
|
static int ci_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
|
|
|
|
{
|
|
|
|
struct ci_ep *ci_ep = container_of(_ep, struct ci_ep, ep);
|
|
|
|
struct ci_req *ci_req;
|
|
|
|
|
|
|
|
list_for_each_entry(ci_req, &ci_ep->queue, queue) {
|
|
|
|
if (&ci_req->req == _req)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (&ci_req->req != _req)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
list_del_init(&ci_req->queue);
|
|
|
|
|
|
|
|
if (ci_req->req.status == -EINPROGRESS) {
|
|
|
|
ci_req->req.status = -ECONNRESET;
|
|
|
|
if (ci_req->req.complete)
|
|
|
|
ci_req->req.complete(_ep, _req);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-05-05 23:48:11 +00:00
|
|
|
static int ci_ep_queue(struct usb_ep *ep,
|
|
|
|
struct usb_request *req, gfp_t gfp_flags)
|
|
|
|
{
|
|
|
|
struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep);
|
|
|
|
struct ci_req *ci_req = container_of(req, struct ci_req, req);
|
|
|
|
int in, ret;
|
|
|
|
int __maybe_unused num;
|
|
|
|
|
|
|
|
num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
|
|
|
|
in = (ci_ep->desc->bEndpointAddress & USB_DIR_IN) != 0;
|
|
|
|
|
2014-05-29 20:53:00 +00:00
|
|
|
if (!num && ci_ep->req_primed) {
|
|
|
|
/*
|
|
|
|
* The flipping of ep0 between IN and OUT relies on
|
|
|
|
* ci_ep_queue consuming the current IN/OUT setting
|
|
|
|
* immediately. If this is deferred to a later point when the
|
|
|
|
* req is pulled out of ci_req->queue, then the IN/OUT setting
|
|
|
|
* may have been changed since the req was queued, and state
|
|
|
|
* will get out of sync. This condition doesn't occur today,
|
|
|
|
* but could if bugs were introduced later, and this error
|
|
|
|
* check will save a lot of debugging time.
|
|
|
|
*/
|
|
|
|
printf("%s: ep0 transaction already in progress\n", __func__);
|
|
|
|
return -EPROTO;
|
|
|
|
}
|
|
|
|
|
2014-05-05 23:48:11 +00:00
|
|
|
ret = ci_bounce(ci_req, in);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
DBG("ept%d %s pre-queue req %p, buffer %p\n",
|
|
|
|
num, in ? "in" : "out", ci_req, ci_req->hw_buf);
|
|
|
|
list_add_tail(&ci_req->queue, &ci_ep->queue);
|
|
|
|
|
|
|
|
if (!ci_ep->req_primed)
|
|
|
|
ci_ep_submit_next_request(ci_ep);
|
2011-10-05 15:11:40 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-05-29 20:53:03 +00:00
|
|
|
static void flip_ep0_direction(void)
|
|
|
|
{
|
|
|
|
if (ep0_desc.bEndpointAddress == USB_DIR_IN) {
|
2014-06-25 17:03:18 +00:00
|
|
|
DBG("%s: Flipping ep0 to OUT\n", __func__);
|
2014-05-29 20:53:03 +00:00
|
|
|
ep0_desc.bEndpointAddress = 0;
|
|
|
|
} else {
|
2014-06-25 17:03:18 +00:00
|
|
|
DBG("%s: Flipping ep0 to IN\n", __func__);
|
2014-05-29 20:53:03 +00:00
|
|
|
ep0_desc.bEndpointAddress = USB_DIR_IN;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-01 20:22:27 +00:00
|
|
|
static void handle_ep_complete(struct ci_ep *ci_ep)
|
2011-10-05 15:11:40 +00:00
|
|
|
{
|
2015-04-29 05:12:10 +00:00
|
|
|
struct ept_queue_item *item, *next_td;
|
|
|
|
int num, in, len, j;
|
2014-05-05 23:48:11 +00:00
|
|
|
struct ci_req *ci_req;
|
|
|
|
|
2014-07-01 20:22:27 +00:00
|
|
|
num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
|
|
|
|
in = (ci_ep->desc->bEndpointAddress & USB_DIR_IN) != 0;
|
2014-02-06 01:43:45 +00:00
|
|
|
item = ci_get_qtd(num, in);
|
|
|
|
ci_invalidate_qtd(num);
|
2015-04-29 05:12:10 +00:00
|
|
|
ci_req = list_first_entry(&ci_ep->queue, struct ci_req, queue);
|
2013-10-07 11:07:26 +00:00
|
|
|
|
2015-04-29 05:12:10 +00:00
|
|
|
next_td = item;
|
|
|
|
len = 0;
|
|
|
|
for (j = 0; j < ci_req->dtd_count; j++) {
|
|
|
|
ci_invalidate_td(next_td);
|
|
|
|
item = next_td;
|
|
|
|
len += (item->info >> 16) & 0x7fff;
|
|
|
|
if (item->info & 0xff)
|
|
|
|
printf("EP%d/%s FAIL info=%x pg0=%x\n",
|
|
|
|
num, in ? "in" : "out", item->info, item->page0);
|
|
|
|
if (j != ci_req->dtd_count - 1)
|
2015-07-22 21:16:20 +00:00
|
|
|
next_td = (struct ept_queue_item *)(unsigned long)
|
|
|
|
item->next;
|
2015-04-29 05:12:10 +00:00
|
|
|
if (j != 0)
|
|
|
|
free(item);
|
|
|
|
}
|
2011-10-05 15:11:40 +00:00
|
|
|
|
2014-05-05 23:48:11 +00:00
|
|
|
list_del_init(&ci_req->queue);
|
2014-07-01 20:22:27 +00:00
|
|
|
ci_ep->req_primed = false;
|
2014-05-05 23:48:11 +00:00
|
|
|
|
2014-07-01 20:22:27 +00:00
|
|
|
if (!list_empty(&ci_ep->queue))
|
|
|
|
ci_ep_submit_next_request(ci_ep);
|
2014-05-05 23:48:11 +00:00
|
|
|
|
|
|
|
ci_req->req.actual = ci_req->req.length - len;
|
|
|
|
ci_debounce(ci_req, in);
|
2013-10-10 22:28:01 +00:00
|
|
|
|
2014-05-05 23:48:11 +00:00
|
|
|
DBG("ept%d %s req %p, complete %x\n",
|
|
|
|
num, in ? "in" : "out", ci_req, len);
|
2014-05-29 20:53:03 +00:00
|
|
|
if (num != 0 || controller.ep0_data_phase)
|
2014-07-01 20:22:27 +00:00
|
|
|
ci_req->req.complete(&ci_ep->ep, &ci_req->req);
|
2014-05-29 20:53:03 +00:00
|
|
|
if (num == 0 && controller.ep0_data_phase) {
|
|
|
|
/*
|
|
|
|
* Data Stage is complete, so flip ep0 dir for Status Stage,
|
|
|
|
* which always transfers a packet in the opposite direction.
|
|
|
|
*/
|
|
|
|
DBG("%s: flip ep0 dir for Status Stage\n", __func__);
|
|
|
|
flip_ep0_direction();
|
|
|
|
controller.ep0_data_phase = false;
|
2014-05-05 23:48:11 +00:00
|
|
|
ci_req->req.length = 0;
|
2014-07-01 20:22:27 +00:00
|
|
|
usb_ep_queue(&ci_ep->ep, &ci_req->req, 0);
|
2011-10-05 15:11:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#define SETUP(type, request) (((type) << 8) | (request))
|
|
|
|
|
|
|
|
static void handle_setup(void)
|
|
|
|
{
|
2014-05-05 23:48:11 +00:00
|
|
|
struct ci_ep *ci_ep = &controller.ep[0];
|
|
|
|
struct ci_req *ci_req;
|
|
|
|
struct usb_request *req;
|
2014-02-06 01:43:45 +00:00
|
|
|
struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
|
2011-10-05 15:11:40 +00:00
|
|
|
struct ept_queue_head *head;
|
|
|
|
struct usb_ctrlrequest r;
|
|
|
|
int status = 0;
|
|
|
|
int num, in, _num, _in, i;
|
|
|
|
char *buf;
|
2014-05-05 23:48:11 +00:00
|
|
|
|
2014-05-29 20:53:02 +00:00
|
|
|
ci_req = controller.ep0_req;
|
2014-05-05 23:48:11 +00:00
|
|
|
req = &ci_req->req;
|
2014-02-06 01:43:45 +00:00
|
|
|
head = ci_get_qh(0, 0); /* EP0 OUT */
|
2011-10-05 15:11:40 +00:00
|
|
|
|
2014-02-06 01:43:45 +00:00
|
|
|
ci_invalidate_qh(0);
|
2011-10-05 15:11:40 +00:00
|
|
|
memcpy(&r, head->setup_data, sizeof(struct usb_ctrlrequest));
|
2014-04-24 23:52:39 +00:00
|
|
|
#ifdef CONFIG_CI_UDC_HAS_HOSTPC
|
|
|
|
writel(EPT_RX(0), &udc->epsetupstat);
|
|
|
|
#else
|
2011-10-05 15:11:40 +00:00
|
|
|
writel(EPT_RX(0), &udc->epstat);
|
2014-04-24 23:52:39 +00:00
|
|
|
#endif
|
2014-05-29 20:53:03 +00:00
|
|
|
DBG("handle setup %s, %x, %x index %x value %x length %x\n",
|
|
|
|
reqname(r.bRequest), r.bRequestType, r.bRequest, r.wIndex,
|
|
|
|
r.wValue, r.wLength);
|
|
|
|
|
|
|
|
/* Set EP0 dir for Data Stage based on Setup Stage data */
|
|
|
|
if (r.bRequestType & USB_DIR_IN) {
|
|
|
|
DBG("%s: Set ep0 to IN for Data Stage\n", __func__);
|
|
|
|
ep0_desc.bEndpointAddress = USB_DIR_IN;
|
|
|
|
} else {
|
|
|
|
DBG("%s: Set ep0 to OUT for Data Stage\n", __func__);
|
|
|
|
ep0_desc.bEndpointAddress = 0;
|
|
|
|
}
|
|
|
|
if (r.wLength) {
|
|
|
|
controller.ep0_data_phase = true;
|
|
|
|
} else {
|
|
|
|
/* 0 length -> no Data Stage. Flip dir for Status Stage */
|
|
|
|
DBG("%s: 0 length: flip ep0 dir for Status Stage\n", __func__);
|
|
|
|
flip_ep0_direction();
|
|
|
|
controller.ep0_data_phase = false;
|
|
|
|
}
|
2011-10-05 15:11:40 +00:00
|
|
|
|
2014-05-05 23:48:11 +00:00
|
|
|
list_del_init(&ci_req->queue);
|
|
|
|
ci_ep->req_primed = false;
|
|
|
|
|
2011-10-05 15:11:40 +00:00
|
|
|
switch (SETUP(r.bRequestType, r.bRequest)) {
|
|
|
|
case SETUP(USB_RECIP_ENDPOINT, USB_REQ_CLEAR_FEATURE):
|
|
|
|
_num = r.wIndex & 15;
|
|
|
|
_in = !!(r.wIndex & 0x80);
|
|
|
|
|
|
|
|
if ((r.wValue == 0) && (r.wLength == 0)) {
|
|
|
|
req->length = 0;
|
|
|
|
for (i = 0; i < NUM_ENDPOINTS; i++) {
|
2014-02-06 01:43:45 +00:00
|
|
|
struct ci_ep *ep = &controller.ep[i];
|
2013-10-10 22:28:00 +00:00
|
|
|
|
|
|
|
if (!ep->desc)
|
2011-10-05 15:11:40 +00:00
|
|
|
continue;
|
2013-10-10 22:28:00 +00:00
|
|
|
num = ep->desc->bEndpointAddress
|
2013-07-10 01:16:29 +00:00
|
|
|
& USB_ENDPOINT_NUMBER_MASK;
|
2013-10-10 22:28:00 +00:00
|
|
|
in = (ep->desc->bEndpointAddress
|
2011-10-05 15:11:40 +00:00
|
|
|
& USB_DIR_IN) != 0;
|
|
|
|
if ((num == _num) && (in == _in)) {
|
2013-10-10 22:28:00 +00:00
|
|
|
ep_enable(num, in, ep->ep.maxpacket);
|
2011-10-05 15:11:40 +00:00
|
|
|
usb_ep_queue(controller.gadget.ep0,
|
|
|
|
req, 0);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
|
|
|
|
case SETUP(USB_RECIP_DEVICE, USB_REQ_SET_ADDRESS):
|
|
|
|
/*
|
|
|
|
* write address delayed (will take effect
|
|
|
|
* after the next IN txn)
|
|
|
|
*/
|
|
|
|
writel((r.wValue << 25) | (1 << 24), &udc->devaddr);
|
|
|
|
req->length = 0;
|
|
|
|
usb_ep_queue(controller.gadget.ep0, req, 0);
|
|
|
|
return;
|
|
|
|
|
|
|
|
case SETUP(USB_DIR_IN | USB_RECIP_DEVICE, USB_REQ_GET_STATUS):
|
|
|
|
req->length = 2;
|
|
|
|
buf = (char *)req->buf;
|
|
|
|
buf[0] = 1 << USB_DEVICE_SELF_POWERED;
|
|
|
|
buf[1] = 0;
|
|
|
|
usb_ep_queue(controller.gadget.ep0, req, 0);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
/* pass request up to the gadget driver */
|
|
|
|
if (controller.driver)
|
|
|
|
status = controller.driver->setup(&controller.gadget, &r);
|
|
|
|
else
|
|
|
|
status = -ENODEV;
|
|
|
|
|
|
|
|
if (!status)
|
|
|
|
return;
|
|
|
|
DBG("STALL reqname %s type %x value %x, index %x\n",
|
|
|
|
reqname(r.bRequest), r.bRequestType, r.wValue, r.wIndex);
|
|
|
|
writel((1<<16) | (1 << 0), &udc->epctrl[0]);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void stop_activity(void)
|
|
|
|
{
|
|
|
|
int i, num, in;
|
|
|
|
struct ept_queue_head *head;
|
2014-02-06 01:43:45 +00:00
|
|
|
struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
|
2011-10-05 15:11:40 +00:00
|
|
|
writel(readl(&udc->epcomp), &udc->epcomp);
|
2014-04-24 23:52:39 +00:00
|
|
|
#ifdef CONFIG_CI_UDC_HAS_HOSTPC
|
|
|
|
writel(readl(&udc->epsetupstat), &udc->epsetupstat);
|
|
|
|
#endif
|
2011-10-05 15:11:40 +00:00
|
|
|
writel(readl(&udc->epstat), &udc->epstat);
|
|
|
|
writel(0xffffffff, &udc->epflush);
|
|
|
|
|
|
|
|
/* error out any pending reqs */
|
|
|
|
for (i = 0; i < NUM_ENDPOINTS; i++) {
|
|
|
|
if (i != 0)
|
|
|
|
writel(0, &udc->epctrl[i]);
|
2013-07-10 01:16:29 +00:00
|
|
|
if (controller.ep[i].desc) {
|
|
|
|
num = controller.ep[i].desc->bEndpointAddress
|
2011-10-05 15:11:40 +00:00
|
|
|
& USB_ENDPOINT_NUMBER_MASK;
|
2013-07-10 01:16:29 +00:00
|
|
|
in = (controller.ep[i].desc->bEndpointAddress
|
|
|
|
& USB_DIR_IN) != 0;
|
2014-02-06 01:43:45 +00:00
|
|
|
head = ci_get_qh(num, in);
|
2011-10-05 15:11:40 +00:00
|
|
|
head->info = INFO_ACTIVE;
|
2014-02-06 01:43:45 +00:00
|
|
|
ci_flush_qh(num);
|
2011-10-05 15:11:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void udc_irq(void)
|
|
|
|
{
|
2014-02-06 01:43:45 +00:00
|
|
|
struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
|
2011-10-05 15:11:40 +00:00
|
|
|
unsigned n = readl(&udc->usbsts);
|
|
|
|
writel(n, &udc->usbsts);
|
|
|
|
int bit, i, num, in;
|
|
|
|
|
|
|
|
n &= (STS_SLI | STS_URI | STS_PCI | STS_UI | STS_UEI);
|
|
|
|
if (n == 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (n & STS_URI) {
|
|
|
|
DBG("-- reset --\n");
|
|
|
|
stop_activity();
|
|
|
|
}
|
|
|
|
if (n & STS_SLI)
|
|
|
|
DBG("-- suspend --\n");
|
|
|
|
|
|
|
|
if (n & STS_PCI) {
|
2013-10-10 22:28:00 +00:00
|
|
|
int max = 64;
|
|
|
|
int speed = USB_SPEED_FULL;
|
|
|
|
|
2014-04-24 23:52:39 +00:00
|
|
|
#ifdef CONFIG_CI_UDC_HAS_HOSTPC
|
|
|
|
bit = (readl(&udc->hostpc1_devlc) >> 25) & 3;
|
|
|
|
#else
|
2011-10-05 15:11:40 +00:00
|
|
|
bit = (readl(&udc->portsc) >> 26) & 3;
|
2014-04-24 23:52:39 +00:00
|
|
|
#endif
|
2013-10-10 22:28:00 +00:00
|
|
|
DBG("-- portchange %x %s\n", bit, (bit == 2) ? "High" : "Full");
|
2011-10-05 15:11:40 +00:00
|
|
|
if (bit == 2) {
|
2013-10-10 22:28:00 +00:00
|
|
|
speed = USB_SPEED_HIGH;
|
|
|
|
max = 512;
|
|
|
|
}
|
|
|
|
controller.gadget.speed = speed;
|
|
|
|
for (i = 1; i < NUM_ENDPOINTS; i++) {
|
|
|
|
if (controller.ep[i].ep.maxpacket > max)
|
|
|
|
controller.ep[i].ep.maxpacket = max;
|
2011-10-05 15:11:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (n & STS_UEI)
|
|
|
|
printf("<UEI %x>\n", readl(&udc->epcomp));
|
|
|
|
|
|
|
|
if ((n & STS_UI) || (n & STS_UEI)) {
|
2014-04-24 23:52:39 +00:00
|
|
|
#ifdef CONFIG_CI_UDC_HAS_HOSTPC
|
|
|
|
n = readl(&udc->epsetupstat);
|
|
|
|
#else
|
2011-10-05 15:11:40 +00:00
|
|
|
n = readl(&udc->epstat);
|
2014-04-24 23:52:39 +00:00
|
|
|
#endif
|
2011-10-05 15:11:40 +00:00
|
|
|
if (n & EPT_RX(0))
|
|
|
|
handle_setup();
|
|
|
|
|
|
|
|
n = readl(&udc->epcomp);
|
|
|
|
if (n != 0)
|
|
|
|
writel(n, &udc->epcomp);
|
|
|
|
|
|
|
|
for (i = 0; i < NUM_ENDPOINTS && n; i++) {
|
2013-07-10 01:16:29 +00:00
|
|
|
if (controller.ep[i].desc) {
|
|
|
|
num = controller.ep[i].desc->bEndpointAddress
|
2011-10-05 15:11:40 +00:00
|
|
|
& USB_ENDPOINT_NUMBER_MASK;
|
2013-07-10 01:16:29 +00:00
|
|
|
in = (controller.ep[i].desc->bEndpointAddress
|
2011-10-05 15:11:40 +00:00
|
|
|
& USB_DIR_IN) != 0;
|
|
|
|
bit = (in) ? EPT_TX(num) : EPT_RX(num);
|
|
|
|
if (n & bit)
|
2013-07-10 01:16:29 +00:00
|
|
|
handle_ep_complete(&controller.ep[i]);
|
2011-10-05 15:11:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-02-23 13:10:23 +00:00
|
|
|
int usb_gadget_handle_interrupts(int index)
|
2011-10-05 15:11:40 +00:00
|
|
|
{
|
|
|
|
u32 value;
|
2014-02-06 01:43:45 +00:00
|
|
|
struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
|
2011-10-05 15:11:40 +00:00
|
|
|
|
|
|
|
value = readl(&udc->usbsts);
|
|
|
|
if (value)
|
|
|
|
udc_irq();
|
|
|
|
|
|
|
|
return value;
|
|
|
|
}
|
|
|
|
|
2014-06-10 17:02:35 +00:00
|
|
|
void udc_disconnect(void)
|
|
|
|
{
|
|
|
|
struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
|
|
|
|
/* disable pullup */
|
|
|
|
stop_activity();
|
|
|
|
writel(USBCMD_FS2, &udc->usbcmd);
|
|
|
|
udelay(800);
|
|
|
|
if (controller.driver)
|
|
|
|
controller.driver->disconnect(&controller.gadget);
|
|
|
|
}
|
|
|
|
|
2014-02-06 01:43:45 +00:00
|
|
|
static int ci_pullup(struct usb_gadget *gadget, int is_on)
|
2011-10-05 15:11:40 +00:00
|
|
|
{
|
2014-02-06 01:43:45 +00:00
|
|
|
struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
|
2011-10-05 15:11:40 +00:00
|
|
|
if (is_on) {
|
|
|
|
/* RESET */
|
|
|
|
writel(USBCMD_ITC(MICRO_8FRAME) | USBCMD_RST, &udc->usbcmd);
|
|
|
|
udelay(200);
|
|
|
|
|
2015-03-17 20:46:35 +00:00
|
|
|
writel((unsigned long)controller.epts, &udc->epinitaddr);
|
2011-10-05 15:11:40 +00:00
|
|
|
|
|
|
|
/* select DEVICE mode */
|
|
|
|
writel(USBMODE_DEVICE, &udc->usbmode);
|
|
|
|
|
2014-09-29 01:35:14 +00:00
|
|
|
#if !defined(CONFIG_USB_GADGET_DUALSPEED)
|
|
|
|
/* Port force Full-Speed Connect */
|
|
|
|
setbits_le32(&udc->portsc, PFSC);
|
|
|
|
#endif
|
|
|
|
|
2011-10-05 15:11:40 +00:00
|
|
|
writel(0xffffffff, &udc->epflush);
|
|
|
|
|
|
|
|
/* Turn on the USB connection by enabling the pullup resistor */
|
|
|
|
writel(USBCMD_ITC(MICRO_8FRAME) | USBCMD_RUN, &udc->usbcmd);
|
|
|
|
} else {
|
2014-06-10 17:02:35 +00:00
|
|
|
udc_disconnect();
|
2011-10-05 15:11:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-02-06 01:43:45 +00:00
|
|
|
static int ci_udc_probe(void)
|
2011-10-05 15:11:40 +00:00
|
|
|
{
|
|
|
|
struct ept_queue_head *head;
|
|
|
|
int i;
|
2013-07-10 01:16:37 +00:00
|
|
|
|
2013-07-10 01:16:34 +00:00
|
|
|
const int num = 2 * NUM_ENDPOINTS;
|
2011-10-05 15:11:40 +00:00
|
|
|
|
2013-07-10 01:16:37 +00:00
|
|
|
const int eplist_min_align = 4096;
|
|
|
|
const int eplist_align = roundup(eplist_min_align, ARCH_DMA_MINALIGN);
|
|
|
|
const int eplist_raw_sz = num * sizeof(struct ept_queue_head);
|
|
|
|
const int eplist_sz = roundup(eplist_raw_sz, ARCH_DMA_MINALIGN);
|
|
|
|
|
|
|
|
/* The QH list must be aligned to 4096 bytes. */
|
|
|
|
controller.epts = memalign(eplist_align, eplist_sz);
|
|
|
|
if (!controller.epts)
|
|
|
|
return -ENOMEM;
|
|
|
|
memset(controller.epts, 0, eplist_sz);
|
|
|
|
|
2014-07-01 17:41:15 +00:00
|
|
|
controller.items_mem = memalign(ILIST_ALIGN, ILIST_SZ);
|
2013-07-10 01:16:40 +00:00
|
|
|
if (!controller.items_mem) {
|
|
|
|
free(controller.epts);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2014-07-01 17:41:15 +00:00
|
|
|
memset(controller.items_mem, 0, ILIST_SZ);
|
2013-07-10 01:16:40 +00:00
|
|
|
|
2011-10-05 15:11:40 +00:00
|
|
|
for (i = 0; i < 2 * NUM_ENDPOINTS; i++) {
|
|
|
|
/*
|
2013-07-10 01:16:37 +00:00
|
|
|
* Configure QH for each endpoint. The structure of the QH list
|
|
|
|
* is such that each two subsequent fields, N and N+1 where N is
|
|
|
|
* even, in the QH list represent QH for one endpoint. The Nth
|
|
|
|
* entry represents OUT configuration and the N+1th entry does
|
|
|
|
* represent IN configuration of the endpoint.
|
2011-10-05 15:11:40 +00:00
|
|
|
*/
|
2013-07-10 01:16:36 +00:00
|
|
|
head = controller.epts + i;
|
2011-10-05 15:11:40 +00:00
|
|
|
if (i < 2)
|
|
|
|
head->config = CONFIG_MAX_PKT(EP0_MAX_PACKET_SIZE)
|
|
|
|
| CONFIG_ZLT | CONFIG_IOS;
|
|
|
|
else
|
|
|
|
head->config = CONFIG_MAX_PKT(EP_MAX_PACKET_SIZE)
|
|
|
|
| CONFIG_ZLT;
|
|
|
|
head->next = TERMINATE;
|
|
|
|
head->info = 0;
|
|
|
|
|
2013-07-10 01:16:42 +00:00
|
|
|
if (i & 1) {
|
2014-07-01 17:41:13 +00:00
|
|
|
ci_flush_qh(i / 2);
|
|
|
|
ci_flush_qtd(i / 2);
|
2013-07-10 01:16:42 +00:00
|
|
|
}
|
2011-10-05 15:11:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(&controller.gadget.ep_list);
|
2013-07-10 01:16:30 +00:00
|
|
|
|
|
|
|
/* Init EP 0 */
|
2014-02-06 01:43:45 +00:00
|
|
|
memcpy(&controller.ep[0].ep, &ci_ep_init[0], sizeof(*ci_ep_init));
|
2014-05-29 20:53:01 +00:00
|
|
|
controller.ep[0].desc = &ep0_desc;
|
2014-05-05 23:48:11 +00:00
|
|
|
INIT_LIST_HEAD(&controller.ep[0].queue);
|
|
|
|
controller.ep[0].req_primed = false;
|
2013-07-10 01:16:30 +00:00
|
|
|
controller.gadget.ep0 = &controller.ep[0].ep;
|
2011-10-05 15:11:40 +00:00
|
|
|
INIT_LIST_HEAD(&controller.gadget.ep0->ep_list);
|
2013-07-10 01:16:30 +00:00
|
|
|
|
2015-04-29 05:12:10 +00:00
|
|
|
/* Init EP 1..3 */
|
|
|
|
for (i = 1; i < 4; i++) {
|
|
|
|
memcpy(&controller.ep[i].ep, &ci_ep_init[i],
|
|
|
|
sizeof(*ci_ep_init));
|
|
|
|
INIT_LIST_HEAD(&controller.ep[i].queue);
|
|
|
|
controller.ep[i].req_primed = false;
|
|
|
|
list_add_tail(&controller.ep[i].ep.ep_list,
|
|
|
|
&controller.gadget.ep_list);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Init EP 4..n */
|
|
|
|
for (i = 4; i < NUM_ENDPOINTS; i++) {
|
|
|
|
memcpy(&controller.ep[i].ep, &ci_ep_init[4],
|
2014-02-06 01:43:45 +00:00
|
|
|
sizeof(*ci_ep_init));
|
2014-05-05 23:48:11 +00:00
|
|
|
INIT_LIST_HEAD(&controller.ep[i].queue);
|
|
|
|
controller.ep[i].req_primed = false;
|
2013-07-10 01:16:30 +00:00
|
|
|
list_add_tail(&controller.ep[i].ep.ep_list,
|
|
|
|
&controller.gadget.ep_list);
|
2011-10-05 15:11:40 +00:00
|
|
|
}
|
2014-05-29 20:53:02 +00:00
|
|
|
|
|
|
|
ci_ep_alloc_request(&controller.ep[0].ep, 0);
|
|
|
|
if (!controller.ep0_req) {
|
2014-06-10 17:02:37 +00:00
|
|
|
free(controller.items_mem);
|
2014-05-29 20:53:02 +00:00
|
|
|
free(controller.epts);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2013-07-10 01:16:30 +00:00
|
|
|
|
2011-10-05 15:11:40 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int usb_gadget_register_driver(struct usb_gadget_driver *driver)
|
|
|
|
{
|
2013-07-10 01:16:32 +00:00
|
|
|
int ret;
|
2011-10-05 15:11:40 +00:00
|
|
|
|
2013-07-10 01:16:33 +00:00
|
|
|
if (!driver)
|
|
|
|
return -EINVAL;
|
|
|
|
if (!driver->bind || !driver->setup || !driver->disconnect)
|
|
|
|
return -EINVAL;
|
|
|
|
if (driver->speed != USB_SPEED_FULL && driver->speed != USB_SPEED_HIGH)
|
2011-10-05 15:11:40 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
2015-03-25 18:22:32 +00:00
|
|
|
#ifdef CONFIG_DM_USB
|
|
|
|
ret = usb_setup_ehci_gadget(&controller.ctrl);
|
|
|
|
#else
|
2013-10-10 22:27:56 +00:00
|
|
|
ret = usb_lowlevel_init(0, USB_INIT_DEVICE, (void **)&controller.ctrl);
|
2015-03-25 18:22:32 +00:00
|
|
|
#endif
|
2013-07-10 01:16:32 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2014-02-06 01:43:45 +00:00
|
|
|
ret = ci_udc_probe();
|
2015-12-31 07:24:45 +00:00
|
|
|
if (ret) {
|
|
|
|
DBG("udc probe failed, returned %d\n", ret);
|
|
|
|
return ret;
|
2011-10-05 15:11:40 +00:00
|
|
|
}
|
2013-07-10 01:16:32 +00:00
|
|
|
|
|
|
|
ret = driver->bind(&controller.gadget);
|
|
|
|
if (ret) {
|
|
|
|
DBG("driver->bind() returned %d\n", ret);
|
|
|
|
return ret;
|
2011-10-05 15:11:40 +00:00
|
|
|
}
|
|
|
|
controller.driver = driver;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
|
|
|
|
{
|
2014-06-10 17:02:38 +00:00
|
|
|
udc_disconnect();
|
|
|
|
|
usb: ci_udc: fix interaction with CONFIG_USB_ETH_CDC
ci_udc.c's usb_gadget_unregister_driver() doesn't call driver->unbind()
unlike other USB gadget drivers. Fix it to do this.
Without this, when ether.c's CDC Ethernet device is torn down,
eth_unbind() is never called, so dev->gadget is never set to NULL.
For some reason, usb_eth_halt() is called both at the end of the first
use of the Ethernet device, and prior to any subsequent use. Since
dev->gadget is never cleared, all calls to usb_eth_halt() attempt to
stop, disconnect, and clean up the device, resulting in double cleanup,
which hangs U-Boot on my Tegra device at least.
ci_udc allocates its own singleton EP0 request object, and cleans it up
during usb_gadget_unregister_driver(). This appears necessary when using
the USB gadget framework in U-Boot, since that does not allocate/free
the EP0 request. However, the CDC Ethernet driver *does* allocate and
free its own EP0 requests. Consequently, we must protect
ci_ep_free_request() against double-freeing the request.
Signed-off-by: Stephen Warren <swarren@nvidia.com>
2014-06-23 18:02:48 +00:00
|
|
|
driver->unbind(&controller.gadget);
|
|
|
|
controller.driver = NULL;
|
|
|
|
|
2014-06-10 17:02:38 +00:00
|
|
|
ci_ep_free_request(&controller.ep[0].ep, &controller.ep0_req->req);
|
|
|
|
free(controller.items_mem);
|
|
|
|
free(controller.epts);
|
|
|
|
|
2011-10-05 15:11:40 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2014-08-25 20:02:15 +00:00
|
|
|
|
|
|
|
bool dfu_usb_get_reset(void)
|
|
|
|
{
|
|
|
|
struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor;
|
|
|
|
|
|
|
|
return !!(readl(&udc->usbsts) & STS_URI);
|
|
|
|
}
|