musb host 部分

这篇主要介绍 drivers/usb/musb/musb_host.c

在 musb_core.c 中的musb_init_controller()函数中,调用了 allocate_instance()函数, 如下:

       /* allocate */
        musb = allocate_instance(dev, plat->config, ctrl);
        if (!musb) {
                status = -ENOMEM;
                goto fail0;
        }
而在 allocate_instance()函数中,又调用了usb_create_hcd()函数, 如下:

        hcd = usb_create_hcd(&musb_hc_driver, dev, dev_name(dev));  //这篇主要介绍的就是这个 musb_hc_driver
        if (!hcd)
                return NULL;
usb_create_hcd()函数在 drivers/usb/core/hcd.c 中,是用来生成usb host driver的一个函数, 定义如下:

/**
 * usb_create_hcd - create and initialize an HCD structure
 * @driver: HC driver that will use this hcd
 * @dev: device for this HC, stored in hcd->self.controller
 * @bus_name: value to store in hcd->self.bus_name
 * Context: !in_interrupt()
 *
 * Allocate a struct usb_hcd, with extra space at the end for the
 * HC driver's private data.  Initialize the generic members of the
 * hcd structure.
 *
 * If memory is unavailable, returns NULL.
 */
struct usb_hcd *usb_create_hcd (const struct hc_driver *driver,
                struct device *dev, const char *bus_name)
{
        struct usb_hcd *hcd;

        hcd = kzalloc(sizeof(*hcd) + driver->hcd_priv_size, GFP_KERNEL);
        if (!hcd) {
                dev_dbg (dev, "hcd alloc failed\n");
                return NULL;
        }
        dev_set_drvdata(dev, hcd);
        kref_init(&hcd->kref);

        usb_bus_init(&hcd->self);
        hcd->self.controller = dev;
        hcd->self.bus_name = bus_name;
        hcd->self.uses_dma = (dev->dma_mask != NULL);

        init_timer(&hcd->rh_timer);
        hcd->rh_timer.function = rh_timer_func;
        hcd->rh_timer.data = (unsigned long) hcd;
#ifdef CONFIG_USB_SUSPEND
        INIT_WORK(&hcd->wakeup_work, hcd_resume_work);
#endif
        mutex_init(&hcd->bandwidth_mutex);

        hcd->driver = driver;
        hcd->product_desc = (driver->product_desc) ? driver->product_desc :
                        "USB Host Controller";
        return hcd;
}
EXPORT_SYMBOL_GPL(usb_create_hcd);
这篇不是介绍 driver/usb/core/hcd.c 的,所以不累述这个函数了。

1.  这篇的重点是 musb_hc_driver, 定义在/driver/usb/musb/musb_host.c中, 如下:

const struct hc_driver musb_hc_driver = {
        .description            = "musb-hcd",
        .product_desc           = "MUSB HDRC host driver",          // product/vendor string
        .hcd_priv_size          = sizeof(struct musb),              // size of private-data.
        .flags                  = HCD_USB2 | HCD_MEMORY,            

        /* not using irq handler or reset hooks from usbcore, since
         * those must be shared with peripheral code for OTG configs
         */

        .start                  = musb_h_start,
        .stop                   = musb_h_stop,

        .get_frame_number       = musb_h_get_frame_number,

        .urb_enqueue            = musb_urb_enqueue,
        .urb_dequeue            = musb_urb_dequeue,
        .endpoint_disable       = musb_h_disable,

        .hub_status_data        = musb_hub_status_data,
        .hub_control            = musb_hub_control,
        .bus_suspend            = musb_bus_suspend,
        .bus_resume             = musb_bus_resume,
        /* .start_port_reset    = NULL, */
        /* .hub_irq_enable      = NULL, */
};
这是个 hc_driver 的实现,
1.1 flags 有如下一些选项:

        int     flags;
#define HCD_MEMORY      0x0001          /* HC regs use memory (else I/O) */
#define HCD_LOCAL_MEM   0x0002          /* HC needs local memory */
#define HCD_USB11       0x0010          /* USB 1.1 */
#define HCD_USB2        0x0020          /* USB 2.0 */
#define HCD_USB3        0x0040          /* USB 3.0 */
#define HCD_MASK        0x0070
2. musb_h_start() / musb_h_stop():

2.1 static int musb_h_start(struct usb_hcd *hcd)

static int musb_h_start(struct usb_hcd *hcd)
{
        struct musb     *musb = hcd_to_musb(hcd);

        /* NOTE: musb_start() is called when the hub driver turns
         * on port power, or when (OTG) peripheral starts.
         */
        hcd->state = HC_STATE_RUNNING;
        musb->port1_status = 0;
        return 0;
}
hcd_to_musb:

static inline struct musb *hcd_to_musb(struct usb_hcd *hcd)
{
        return (struct musb *) (hcd->hcd_priv);
}
2.2 static void musb_h_stop(struct usb_hcd *hcd)
static void musb_h_stop(struct usb_hcd *hcd)
{
        musb_stop(hcd_to_musb(hcd));
        hcd->state = HC_STATE_HALT;
}
void musb_stop(struct musb *musb) --- 定义在 musb_core.c 中

/*
 * Make the HDRC stop (disable interrupts, etc.);
 * reversible by musb_start
 * called on gadget driver unregister
 * with controller locked, irqs blocked
 * acts as a NOP unless some role activated the hardware
 */
void musb_stop(struct musb *musb)
{
        /* stop IRQs, timers, ... */
        musb_platform_disable(musb);
        musb_generic_disable(musb);
        DBG(3, "HDRC disabled\n");

        /* FIXME
         *  - mark host and/or peripheral drivers unusable/inactive
         *  - disable DMA (and enable it in HdrcStart)
         *  - make sure we can musb_start() after musb_stop(); with
         *    OTG mode, gadget driver module rmmod/modprobe cycles that
         *  - ...
         */
        musb_platform_try_idle(musb, 0);
}
3. musb_h_get_frame_number() --- /* return current frame number */

static int musb_h_get_frame_number(struct usb_hcd *hcd)
{
        struct musb     *musb = hcd_to_musb(hcd);

        return musb_readw(musb->mregs, MUSB_FRAME);
}
4. musb_urb_enqueue() / musb_urb_dequeue()

4.1 musb_urb_enqueue():

static int musb_urb_enqueue(
        struct usb_hcd                  *hcd,
        struct urb                      *urb,
        gfp_t                           mem_flags)
{
        unsigned long                   flags;
        struct musb                     *musb = hcd_to_musb(hcd);
        struct usb_host_endpoint        *hep = urb->ep;
        struct musb_qh                  *qh;
        struct usb_endpoint_descriptor  *epd = &hep->desc;
        int                             ret;
        unsigned                        type_reg;
        unsigned                        interval;

        /* host role must be active */
        if (!is_host_active(musb) || !musb->is_active)
                return -ENODEV;

        spin_lock_irqsave(&musb->lock, flags);
        ret = usb_hcd_link_urb_to_ep(hcd, urb);
        qh = ret ? NULL : hep->hcpriv;
        if (qh)
                urb->hcpriv = qh;
        spin_unlock_irqrestore(&musb->lock, flags);

        /* DMA mapping was already done, if needed, and this urb is on
         * hep->urb_list now ... so we're done, unless hep wasn't yet
         * scheduled onto a live qh.
         *
         * REVISIT best to keep hep->hcpriv valid until the endpoint gets
         * disabled, testing for empty qh->ring and avoiding qh setup costs
         * except for the first urb queued after a config change.
         */
        if (qh || ret)
                return ret;

        /* Allocate and initialize qh, minimizing the work done each time
         * hw_ep gets reprogrammed, or with irqs blocked.  Then schedule it.
         *
         * REVISIT consider a dedicated qh kmem_cache, so it's harder
         * for bugs in other kernel code to break this driver...
         */
        qh = kzalloc(sizeof *qh, mem_flags);
        if (!qh) {
                spin_lock_irqsave(&musb->lock, flags);
                usb_hcd_unlink_urb_from_ep(hcd, urb);
                spin_unlock_irqrestore(&musb->lock, flags);
                return -ENOMEM;
        }

        qh->hep = hep;
        qh->dev = urb->dev;
        INIT_LIST_HEAD(&qh->ring);
        qh->is_ready = 1;

        qh->maxpacket = le16_to_cpu(epd->wMaxPacketSize);
        qh->type = usb_endpoint_type(epd);

        /* Bits 11 & 12 of wMaxPacketSize encode high bandwidth multiplier.
         * Some musb cores don't support high bandwidth ISO transfers; and
         * we don't (yet!) support high bandwidth interrupt transfers.
         */
        qh->hb_mult = 1 + ((qh->maxpacket >> 11) & 0x03);
        if (qh->hb_mult > 1) {
                int ok = (qh->type == USB_ENDPOINT_XFER_ISOC);

                if (ok)
                        ok = (usb_pipein(urb->pipe) && musb->hb_iso_rx)
                                || (usb_pipeout(urb->pipe) && musb->hb_iso_tx);
                if (!ok) {
                        ret = -EMSGSIZE;
                        goto done;
                }
                qh->maxpacket &= 0x7ff;
        }

        qh->epnum = usb_endpoint_num(epd);

        /* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */
        qh->addr_reg = (u8) usb_pipedevice(urb->pipe);

        /* precompute rxtype/txtype/type0 register */
        type_reg = (qh->type << 4) | qh->epnum;
        switch (urb->dev->speed) {                   //判断传输速度
        case USB_SPEED_LOW:
                type_reg |= 0xc0;
                break;
        case USB_SPEED_FULL:
                type_reg |= 0x80;
                break;
        default:
                type_reg |= 0x40;
        }
        qh->type_reg = type_reg;

        /* Precompute RXINTERVAL/TXINTERVAL register */
        switch (qh->type) {
        case USB_ENDPOINT_XFER_INT:                               //判断endpoint类型
                /*
                 * Full/low speeds use the  linear encoding,
                 * high speed uses the logarithmic encoding.
                 */
                if (urb->dev->speed <= USB_SPEED_FULL) {
                        interval = max_t(u8, epd->bInterval, 1);
                        break;
                }
                /* FALLTHROUGH */
        case USB_ENDPOINT_XFER_ISOC:
                /* ISO always uses logarithmic encoding */
                interval = min_t(u8, epd->bInterval, 16);
                break;
        default:
                /* REVISIT we actually want to use NAK limits, hinting to the
                 * transfer scheduling logic to try some other qh, e.g. try
                 * for 2 msec first:
                 *
                 * interval = (USB_SPEED_HIGH == urb->dev->speed) ? 16 : 2;
                 *
                 * The downside of disabling this is that transfer scheduling
                 * gets VERY unfair for nonperiodic transfers; a misbehaving
                 * peripheral could make that hurt.  That's perfectly normal
                 * for reads from network or serial adapters ... so we have
                 * partial NAKlimit support for bulk RX.
                 *
                 * The upside of disabling it is simpler transfer scheduling.
                 */
                interval = 0;
        }
        qh->intv_reg = interval;

        /* precompute addressing for external hub/tt ports */
        if (musb->is_multipoint) {
                struct usb_device       *parent = urb->dev->parent;

                if (parent != hcd->self.root_hub) {
                        qh->h_addr_reg = (u8) parent->devnum;

                        /* set up tt info if needed */
                        if (urb->dev->tt) {
                                qh->h_port_reg = (u8) urb->dev->ttport;
                                if (urb->dev->tt->hub)
                                        qh->h_addr_reg =
                                                (u8) urb->dev->tt->hub->devnum;
                                if (urb->dev->tt->multi)
                                        qh->h_addr_reg |= 0x80;
                        }
                }
        }

        /* invariant: hep->hcpriv is null OR the qh that's already scheduled.
         * until we get real dma queues (with an entry for each urb/buffer),
         * we only have work to do in the former case.
         */
        spin_lock_irqsave(&musb->lock, flags);
        if (hep->hcpriv) {
                /* some concurrent activity submitted another urb to hep...
                 * odd, rare, error prone, but legal.
                 */
                kfree(qh);
                qh = NULL;
                ret = 0;
        } else {
                urb->hcpriv = qh;
                ret = musb_schedule(musb, qh,
                                epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK);
        }

        /* By the time control returns here, urb may be
         * completed back to the class driver. set urb->hcpriv before musb_schedule.
         */
        if (ret != 0) {
                urb->hcpriv = NULL;
                /* FIXME set urb->start_frame for iso/intr, it's tested in
                 * musb_start_urb(), but otherwise only konicawc cares ...
                 */
        }
        spin_unlock_irqrestore(&musb->lock, flags);

done:
        if (ret != 0) {
                spin_lock_irqsave(&musb->lock, flags);
                usb_hcd_unlink_urb_from_ep(hcd, urb);
                spin_unlock_irqrestore(&musb->lock, flags);
                kfree(qh);
        }
        return ret;
}
static int musb_schedule(
        struct musb             *musb,
        struct musb_qh          *qh,
        int                     is_in)
           -------------- 调度musb

/* schedule nodes correspond to peripheral endpoints, like an OHCI QH.
 * the software schedule associates multiple such nodes with a given
 * host side hardware endpoint + direction; scheduling may activate
 * that hardware endpoint.
 */
static int musb_schedule(
        struct musb             *musb,
        struct musb_qh          *qh,
        int                     is_in)
{
        int                     idle = 0;
        int                     best_diff;
        int                     best_end, epnum;
        struct musb_hw_ep       *hw_ep = NULL;
        struct list_head        *head = NULL;
        u8                      toggle;
        u8                      txtype;
        struct urb              *urb = next_urb(qh);

        /* use fixed hardware for control and bulk */
        if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
                head = &musb->control;
                hw_ep = musb->control_ep;
                goto success;
        }

        /* else, periodic transfers get muxed to other endpoints */

        /*
         * We know this qh hasn't been scheduled, so all we need to do
         * is choose which hardware endpoint to put it on ...
         *
         * REVISIT what we really want here is a regular schedule tree
         * like e.g. OHCI uses.
         */
        best_diff = 4096;
        best_end = -1;

        for (epnum = 1, hw_ep = musb->endpoints + 1;
                        epnum < musb->nr_endpoints;
                        epnum++, hw_ep++) {
                int     diff;

                if (musb_ep_get_qh(hw_ep, is_in) != NULL)
                        continue;

                if (hw_ep == musb->bulk_ep)
                        continue;

                if (is_in)
                        diff = hw_ep->max_packet_sz_rx;
                else
                        diff = hw_ep->max_packet_sz_tx;
                diff -= (qh->maxpacket * qh->hb_mult);

                if (diff >= 0 && best_diff > diff) {

                        /*
                         * Mentor controller has a bug in that if we schedule
                         * a BULK Tx transfer on an endpoint that had earlier
                         * handled ISOC then the BULK transfer has to start on
                         * a zero toggle.  If the BULK transfer starts on a 1
                         * toggle then this transfer will fail as the mentor
                         * controller starts the Bulk transfer on a 0 toggle
                         * irrespective of the programming of the toggle bits
                         * in the TXCSR register.  Check for this condition
                         * while allocating the EP for a Tx Bulk transfer.  If
                         * so skip this EP.
                         */
                        hw_ep = musb->endpoints + epnum;
                        toggle = usb_gettoggle(urb->dev, qh->epnum, !is_in);
                        txtype = (musb_readb(hw_ep->regs, MUSB_TXTYPE)
                                        >> 4) & 0x3;
                        if (!is_in && (qh->type == USB_ENDPOINT_XFER_BULK) &&
                                toggle && (txtype == USB_ENDPOINT_XFER_ISOC))
                                continue;

                        best_diff = diff;
                        best_end = epnum;
                }
        }
        /* use bulk reserved ep1 if no other ep is free */
        if (best_end < 0 && qh->type == USB_ENDPOINT_XFER_BULK) {
                hw_ep = musb->bulk_ep;
                if (is_in)
                        head = &musb->in_bulk;
                else
                        head = &musb->out_bulk;

                /* Enable bulk RX NAK timeout scheme when bulk requests are
                 * multiplexed.  This scheme doen't work in high speed to full
                 * speed scenario as NAK interrupts are not coming from a
                 * full speed device connected to a high speed device.
                 * NAK timeout interval is 8 (128 uframe or 16ms) for HS and
                 * 4 (8 frame or 8ms) for FS device.
                 */
                if (is_in && qh->dev)
                        qh->intv_reg =
#ifdef CONFIG_ARCH_CARTESIO_STA2062
                /*
                 * After introducing a delay to fix a race condition affecting
                 * the toggle flag in Cartesio STA2062 SoC's USB-FS controller,
                 * timeout interval needs to be relaxed. The check on dyn_fifo
                 * is to avoid introducing delay on HS-OTG controller as well.
                 */
                                ((USB_SPEED_HIGH == qh->dev->speed) ||
                                (!musb->config->dyn_fifo)) ? 8 : 4;
#else
                                (USB_SPEED_HIGH == qh->dev->speed) ? 8 : 4;
#endif
                goto success;
        } else if (best_end < 0) {
                return -ENOSPC;
        }

        idle = 1;
        qh->mux = 0;
        hw_ep = musb->endpoints + best_end;
        DBG(4, "qh %p periodic slot %d\n", qh, best_end);
success:
        if (head) {
                idle = list_empty(head);
                list_add_tail(&qh->ring, head);
                qh->mux = 1;
        }
        qh->hw_ep = hw_ep;
        qh->hep->hcpriv = qh;
        if (idle)
                musb_start_urb(musb, is_in, qh);
        return 0;
}
static struct musb_qh *musb_ep_get_qh(struct musb_hw_ep *ep, int is_in)
static struct musb_qh *musb_ep_get_qh(struct musb_hw_ep *ep, int is_in)
{
        return is_in ? ep->in_qh : ep->out_qh;
}

static void
musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
  --- 开始传输

/*
 * Start the URB at the front of an endpoint's queue
 * end must be claimed from the caller.
 *
 * Context: controller locked, irqs blocked
 */
static void
musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
{
        u16                     frame;
        u32                     len;
        void __iomem            *mbase =  musb->mregs;
        struct urb              *urb = next_urb(qh);
        void                    *buf = urb->transfer_buffer;
        u32                     offset = 0;
        struct musb_hw_ep       *hw_ep = qh->hw_ep;
        unsigned                pipe = urb->pipe;
        u8                      address = usb_pipedevice(pipe);
        int                     epnum = hw_ep->epnum;

        /* initialize software qh state */
        qh->offset = 0;
        qh->segsize = 0;

        /* gather right source of data */
        switch (qh->type) {
        case USB_ENDPOINT_XFER_CONTROL:
                /* control transfers always start with SETUP */
                is_in = 0;
                musb->ep0_stage = MUSB_EP0_START;
                buf = urb->setup_packet;
                len = 8;
                break;
        case USB_ENDPOINT_XFER_ISOC:
                qh->iso_idx = 0;
                qh->frame = 0;
                offset = urb->iso_frame_desc[0].offset;
                len = urb->iso_frame_desc[0].length;
                break;
        default:                /* bulk, interrupt */
                /* actual_length may be nonzero on retry paths */
                buf = urb->transfer_buffer + urb->actual_length;
                len = urb->transfer_buffer_length - urb->actual_length;
        }

        DBG(4, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n",
                        qh, urb, address, qh->epnum,
                        is_in ? "in" : "out",
                        ({char *s; switch (qh->type) {
                        case USB_ENDPOINT_XFER_CONTROL: s = ""; break;
                        case USB_ENDPOINT_XFER_BULK:    s = "-bulk"; break;
                        case USB_ENDPOINT_XFER_ISOC:    s = "-iso"; break;
                        default:                        s = "-intr"; break;
                        }; s; }),
                        epnum, buf + offset, len);

        /* Configure endpoint */
        musb_ep_set_qh(hw_ep, is_in, qh);
        musb_ep_program(musb, epnum, urb, !is_in, buf, offset, len);

        /* transmit may have more work: start it when it is time */
        if (is_in)
                return;

        /* determine if the time is right for a periodic transfer */
        switch (qh->type) {
        case USB_ENDPOINT_XFER_ISOC:
        case USB_ENDPOINT_XFER_INT:
                DBG(3, "check whether there's still time for periodic Tx\n");
                frame = musb_readw(mbase, MUSB_FRAME);
                /* FIXME this doesn't implement that scheduling policy ...
                 * or handle framecounter wrapping
                 */
                if ((urb->transfer_flags & URB_ISO_ASAP)
                                || (frame >= urb->start_frame)) {
                        /* REVISIT the SOF irq handler shouldn't duplicate
                         * this code; and we don't init urb->start_frame...
                         */
                        qh->frame = 0;
                        goto start;
                } else {
                        qh->frame = urb->start_frame;
                        /* enable SOF interrupt so we can count down */
                        DBG(1, "SOF for %d\n", epnum);
#if 1 /* ifndef CONFIG_ARCH_DAVINCI */
                        musb_writeb(mbase, MUSB_INTRUSBE, 0xff);
#endif
                }
                break;
        default:
start:
                DBG(4, "Start TX%d %s\n", epnum,
                        hw_ep->tx_channel ? "dma" : "pio");

                if (!hw_ep->tx_channel)
                        musb_h_tx_start(hw_ep);
                else if (is_cppi_enabled() || tusb_dma_omap())
                        musb_h_tx_dma_start(hw_ep);
        }
}
static void musb_ep_program(struct musb *musb, u8 epnum,
                        struct urb *urb, int is_out,
                        u8 *buf, u32 offset, u32 len)

/*
 * Program an HDRC endpoint as per the given URB
 * Context: irqs blocked, controller lock held
 */
static void musb_ep_program(struct musb *musb, u8 epnum,
                        struct urb *urb, int is_out,
                        u8 *buf, u32 offset, u32 len)
{
        struct dma_controller   *dma_controller;
        struct dma_channel      *dma_channel;
        u8                      dma_ok;
        void __iomem            *mbase = musb->mregs;
        struct musb_hw_ep       *hw_ep = musb->endpoints + epnum;
        void __iomem            *epio = hw_ep->regs;
        struct musb_qh          *qh = musb_ep_get_qh(hw_ep, !is_out);
        u16                     packet_sz = qh->maxpacket;

        DBG(3, "%s hw%d urb %p spd%d dev%d ep%d%s "
                                "h_addr%02x h_port%02x bytes %d\n",
                        is_out ? "-->" : "<--",
                        epnum, urb, urb->dev->speed,
                        qh->addr_reg, qh->epnum, is_out ? "out" : "in",
                        qh->h_addr_reg, qh->h_port_reg,
                        len);

        musb_ep_select(mbase, epnum);

        /* candidate for DMA? */
        dma_controller = musb->dma_controller;
        if (is_dma_capable() && epnum && dma_controller) {
                dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel;
                if (!dma_channel) {
                        dma_channel = dma_controller->channel_alloc(
                                        dma_controller, hw_ep, is_out);
                        if (is_out)
                                hw_ep->tx_channel = dma_channel;
                        else
                                hw_ep->rx_channel = dma_channel;
                }
        } else
                dma_channel = NULL;

        /* make sure we clear DMAEnab, autoSet bits from previous run */

        /* OUT/transmit/EP0 or IN/receive? */
        if (is_out) {
                u16     csr;
                u16     int_txe;
                u16     load_count;

                csr = musb_readw(epio, MUSB_TXCSR);

                /* disable interrupt in case we flush */
                int_txe = musb_readw(mbase, MUSB_INTRTXE);
                musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));

                /* general endpoint setup */
                if (epnum) {
                        /* flush all old state, set default */
                        musb_h_tx_flush_fifo(hw_ep);

                        /*
                         * We must not clear the DMAMODE bit before or in
                         * the same cycle with the DMAENAB bit, so we clear
                         * the latter first...
                         */
                        csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT
                                        | MUSB_TXCSR_AUTOSET
                                        | MUSB_TXCSR_DMAENAB
                                        | MUSB_TXCSR_FRCDATATOG
                                        | MUSB_TXCSR_H_RXSTALL
                                        | MUSB_TXCSR_H_ERROR
                                        | MUSB_TXCSR_TXPKTRDY
                                        );
                        csr |= MUSB_TXCSR_MODE;

                        if (usb_gettoggle(urb->dev, qh->epnum, 1))
                                csr |= MUSB_TXCSR_H_WR_DATATOGGLE
                                        | MUSB_TXCSR_H_DATATOGGLE;
                        else
                                csr |= MUSB_TXCSR_CLRDATATOG;

                        musb_writew(epio, MUSB_TXCSR, csr);
                        /* REVISIT may need to clear FLUSHFIFO ... */
                        csr &= ~MUSB_TXCSR_DMAMODE;
                        musb_writew(epio, MUSB_TXCSR, csr);
                        csr = musb_readw(epio, MUSB_TXCSR);
                } else {
                        /* endpoint 0: just flush */
                        musb_h_ep0_flush_fifo(hw_ep);
                }

                /* target addr and (for multipoint) hub addr/port */
                if (musb->is_multipoint) {
                        musb_write_txfunaddr(mbase, epnum, qh->addr_reg);
                        musb_write_txhubaddr(mbase, epnum, qh->h_addr_reg);
                        musb_write_txhubport(mbase, epnum, qh->h_port_reg);
/* FIXME if !epnum, do the same for RX ... */
                } else
                        musb_writeb(mbase, MUSB_FADDR, qh->addr_reg);

                /* protocol/endpoint/interval/NAKlimit */
                if (epnum) {
                        musb_writeb(epio, MUSB_TXTYPE, qh->type_reg);
                        if (can_bulk_split(musb, qh->type))
                                musb_writew(epio, MUSB_TXMAXP,
                                        packet_sz
                                        | ((hw_ep->max_packet_sz_tx /
                                                packet_sz) - 1) << 11);
                        else
                                musb_writew(epio, MUSB_TXMAXP,
                                        packet_sz);
                        musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg);
                } else {
                        musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg);
                        if (musb->is_multipoint)
                                musb_writeb(epio, MUSB_TYPE0,
                                                qh->type_reg);
                }

                if (can_bulk_split(musb, qh->type))
                        load_count = min((u32) hw_ep->max_packet_sz_tx,
                                                len);
                else
                        load_count = min((u32) packet_sz, len);

                if (dma_channel && musb_tx_dma_program(dma_controller,
                                        hw_ep, qh, urb, offset, len))
                        load_count = 0;

                if (load_count) {
                        /* PIO to load FIFO */
                        qh->segsize = load_count;
                        musb_write_fifo(hw_ep, load_count, buf);
                }

                /* re-enable interrupt */
                musb_writew(mbase, MUSB_INTRTXE, int_txe);

        /* IN/receive */
        } else {
                u16     csr = 0;
#ifdef CONFIG_USB_CARTESIO_DMA
                uint16_t rx_count = musb_readw(hw_ep->regs, MUSB_RXCOUNT);
                /* In case of DMA mode 1, we are receiving the last packet even before
                   programming it. This logic checks for the received data and return
                   it to class driver. */
                if(usb_pipebulk(urb->pipe))
                {
                        csr = musb_readw(hw_ep->regs,MUSB_RXCSR);
                        if (csr & MUSB_RXCSR_RXPKTRDY)
                        {
                                uint16_t rx_count = musb_readw(hw_ep->regs, MUSB_RXCOUNT);
                                int done = false;
                                DBG(4, "reading residual rx_count = %d packet_size = %x\n", rx_count, packet_sz);
                                if(rx_count < packet_sz) {
                                        done = musb_host_packet_rx(musb,urb,epnum,false);
                                        DBG(4,"residual found done=%d\n", done);
                                        if (done) {
                                                urb->status = 0;
                                                musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN);
                                                DBG(4,"RX short packet exit\n");
                                                return;
                                        }
                                }
                        }
                }

                if (hw_ep->rx_reinit && (!hw_ep->is_reqpkt_clear)) {
#else
                if (hw_ep->rx_reinit) {
#endif
                        musb_rx_reinit(musb, qh, hw_ep);

                        /* init new state: toggle and NYET, maybe DMA later */
                        if (usb_gettoggle(urb->dev, qh->epnum, 0))
                                csr = MUSB_RXCSR_H_WR_DATATOGGLE
                                        | MUSB_RXCSR_H_DATATOGGLE;
                        else
                                csr = 0;
                        if (qh->type == USB_ENDPOINT_XFER_INT)
                                csr |= MUSB_RXCSR_DISNYET;

                }
#ifndef CONFIG_USB_CARTESIO_DMA
                else {
                        csr = musb_readw(hw_ep->regs, MUSB_RXCSR);

                        if (csr & (MUSB_RXCSR_RXPKTRDY
                                        | MUSB_RXCSR_DMAENAB
                                        | MUSB_RXCSR_H_REQPKT))
                                ERR("broken !rx_reinit, ep%d csr %04x\n",
                                                hw_ep->epnum, csr);

                        /* scrub any stale state, leaving toggle alone */
                        csr &= MUSB_RXCSR_DISNYET;
                }
#endif

                /* kick things off */

                if ((is_cppi_enabled() || tusb_dma_omap() ||
                        is_cartesio_dma_enabled()) && dma_channel) {
                        /* candidate for DMA */
#ifdef CONFIG_USB_CARTESIO_DMA

                        /* len=192 special case to avoid warning in case of usb1.1 drives */
                        if((usb_pipebulk(urb->pipe)) && ((len < packet_sz) || (len == 192))) {
                                DBG(4, "Program RX in PIO mode\n");
                                dma_controller->channel_release(dma_channel);
                                hw_ep->rx_channel = NULL;
                                goto PIO;
                        }
#endif

                        if (dma_channel) {
                                dma_channel->actual_len = 0L;
                                qh->segsize = len;

                                /* unless caller treats short rx transfers as
                                 * errors, we dare not queue multiple transfers.
                                 */
                                dma_ok = dma_controller->channel_program(
                                                dma_channel, packet_sz,
                                                !(urb->transfer_flags
                                                        & URB_SHORT_NOT_OK),
                                                urb->transfer_dma + offset,
                                                qh->segsize);
                                if (!dma_ok) {
                                        dma_controller->channel_release(
                                                        dma_channel);
                                        hw_ep->rx_channel = NULL;
                                        dma_channel = NULL;
                                } else {
#ifdef CONFIG_USB_CARTESIO_DMA
                                        /* Program in DMA mode mode 1. issue with DMA mode 0*/
                                        csr |= MUSB_RXCSR_DMAENAB | MUSB_RXCSR_AUTOCLEAR
                                                | MUSB_RXCSR_DMAMODE | MUSB_RXCSR_H_AUTOREQ;
#else
                                        csr |= MUSB_RXCSR_DMAENAB;
#endif
                                }
                        }
                }

                /* setting RXCSR multiple time leads to some random behaviour */
#ifdef CONFIG_USB_CARTESIO_DMA
PIO :
                if((!hw_ep->is_reqpkt_clear) || !(rx_count)) {
#endif
                csr |= MUSB_RXCSR_H_REQPKT;
                DBG(7, "RXCSR%d := %04x\n", epnum, csr);
                musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
                csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
#ifdef CONFIG_USB_CARTESIO_DMA
                }
#endif
        }
}

static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)

/*
 * Clear TX fifo. Needed to avoid BABBLE errors.
 */
static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
{
        void __iomem    *epio = ep->regs;
        u16             csr;
        u16             lastcsr = 0;
        int             retries = 1000;

        csr = musb_readw(epio, MUSB_TXCSR);
        while (csr & MUSB_TXCSR_FIFONOTEMPTY) {
                if (csr != lastcsr)
                        DBG(3, "Host TX FIFONOTEMPTY csr: %02x\n", csr);
                lastcsr = csr;
                csr |= MUSB_TXCSR_FLUSHFIFO;
                musb_writew(epio, MUSB_TXCSR, csr);
                csr = musb_readw(epio, MUSB_TXCSR);
                if (WARN(retries-- < 1,
                                "Could not flush host TX%d fifo: csr: %04x\n",
                                ep->epnum, csr))
                        return;
                mdelay(1);
        }
}
static void musb_h_ep0_flush_fifo(struct musb_hw_ep *ep)

static void musb_h_ep0_flush_fifo(struct musb_hw_ep *ep)
{
        void __iomem    *epio = ep->regs;
        u16             csr;
        int             retries = 5;

        /* scrub any data left in the fifo */
        do {
                csr = musb_readw(epio, MUSB_TXCSR);
                if (!(csr & (MUSB_CSR0_TXPKTRDY | MUSB_CSR0_RXPKTRDY)))
                        break;
                musb_writew(epio, MUSB_TXCSR, MUSB_CSR0_FLUSHFIFO);
                csr = musb_readw(epio, MUSB_TXCSR);
                udelay(10);
        } while (--retries);

        WARN(!retries, "Could not flush host TX%d fifo: csr: %04x\n",
                        ep->epnum, csr);

        /* and reset for the next transfer */
        musb_writew(epio, MUSB_TXCSR, 0);
}
static void
musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)  --重新开始接收

/* we don't always need to reinit a given side of an endpoint...
 * when we do, use tx/rx reinit routine and then construct a new CSR
 * to address data toggle, NYET, and DMA or PIO.
 *
 * it's possible that driver bugs (especially for DMA) or aborting a
 * transfer might have left the endpoint busier than it should be.
 * the busy/not-empty tests are basically paranoia.
 */
static void
musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
{
        u16     csr;

        /* NOTE:  we know the "rx" fifo reinit never triggers for ep0.
         * That always uses tx_reinit since ep0 repurposes TX register
         * offsets; the initial SETUP packet is also a kind of OUT.
         */

        /* if programmed for Tx, put it in RX mode */
        if (ep->is_shared_fifo) {
                csr = musb_readw(ep->regs, MUSB_TXCSR);
                if (csr & MUSB_TXCSR_MODE) {
                        musb_h_tx_flush_fifo(ep);
                        csr = musb_readw(ep->regs, MUSB_TXCSR);
                        musb_writew(ep->regs, MUSB_TXCSR,
                                    csr | MUSB_TXCSR_FRCDATATOG);
                }

                /*
                 * Clear the MODE bit (and everything else) to enable Rx.
                 * NOTE: we mustn't clear the DMAMODE bit before DMAENAB.
                 */
                if (csr & MUSB_TXCSR_DMAMODE)
                        musb_writew(ep->regs, MUSB_TXCSR, MUSB_TXCSR_DMAMODE);
                musb_writew(ep->regs, MUSB_TXCSR, 0);

        /* scrub all previous state, clearing toggle */
        } else {
                csr = musb_readw(ep->regs, MUSB_RXCSR);
                if (csr & MUSB_RXCSR_RXPKTRDY)
                        WARNING("rx%d, packet/%d ready?\n", ep->epnum,
                                musb_readw(ep->regs, MUSB_RXCOUNT));

                musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
        }

        /* target addr and (for multipoint) hub addr/port */
        if (musb->is_multipoint) {
                musb_write_rxfunaddr(ep->target_regs, qh->addr_reg);
                musb_write_rxhubaddr(ep->target_regs, qh->h_addr_reg);
                musb_write_rxhubport(ep->target_regs, qh->h_port_reg);

        } else
                musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg);

        /* protocol/endpoint, interval/NAKlimit, i/o size */
        musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg);
        musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg);
        /* NOTE: bulk combining rewrites high bits of maxpacket */
        /* Set RXMAXP with the FIFO size of the endpoint
         * to disable double buffer mode.
         */
        if (musb->hwvers < MUSB_HWVERS_2000)
                musb_writew(ep->regs, MUSB_RXMAXP, ep->max_packet_sz_rx);
        else
                musb_writew(ep->regs, MUSB_RXMAXP,
                                qh->maxpacket | ((qh->hb_mult - 1) << 11));

        ep->rx_reinit = 0;
}
4.2 static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)

static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
        struct musb             *musb = hcd_to_musb(hcd);
        struct musb_qh          *qh;
        unsigned long           flags;
        int                     is_in  = usb_pipein(urb->pipe);
        int                     ret;

        DBG(4, "urb=%p, dev%d ep%d%s\n", urb,
                        usb_pipedevice(urb->pipe),
                        usb_pipeendpoint(urb->pipe),
                        is_in ? "in" : "out");

        spin_lock_irqsave(&musb->lock, flags);
        ret = usb_hcd_check_unlink_urb(hcd, urb, status);    //check whether an URB may be unlinked
        if (ret)
                goto done;

        qh = urb->hcpriv;
        if (!qh)
                goto done;

        /*
         * Any URB not actively programmed into endpoint hardware can be
         * immediately given back; that's any URB not at the head of an
         * endpoint queue, unless someday we get real DMA queues.  And even
         * if it's at the head, it might not be known to the hardware...
         *
         * Otherwise abort current transfer, pending DMA, etc.; urb->status
         * has already been updated.  This is a synchronous abort; it'd be
         * OK to hold off until after some IRQ, though.
         *
         * NOTE: qh is invalid unless !list_empty(&hep->urb_list)
         */
        if (!qh->is_ready
                        || urb->urb_list.prev != &qh->hep->urb_list
                        || musb_ep_get_qh(qh->hw_ep, is_in) != qh) {
                int     ready = qh->is_ready;

                qh->is_ready = 0;
                musb_giveback(musb, urb, 0);
                qh->is_ready = ready;

                /* If nothing else (usually musb_giveback) is using it
                 * and its URB list has emptied, recycle this qh.
                 */
                if (ready && list_empty(&qh->hep->urb_list)) {
                        qh->hep->hcpriv = NULL;
                        list_del(&qh->ring);
                        kfree(qh);
                }
        } else
                ret = musb_cleanup_urb(urb, qh);
done:
        spin_unlock_irqrestore(&musb->lock, flags);
        return ret;
}
static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh)

/*
 * abort a transfer that's at the head of a hardware queue.
 * called with controller locked, irqs blocked
 * that hardware queue advances to the next transfer, unless prevented
 */
static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh)
{
        struct musb_hw_ep       *ep = qh->hw_ep;
        void __iomem            *epio = ep->regs;
        unsigned                hw_end = ep->epnum;
        void __iomem            *regs = ep->musb->mregs;
        int                     is_in = usb_pipein(urb->pipe);
        int                     status = 0;
        u16                     csr;

        musb_ep_select(regs, hw_end);

        if (is_dma_capable()) {
                struct dma_channel      *dma;

                dma = is_in ? ep->rx_channel : ep->tx_channel;
                if (dma) {
                        status = ep->musb->dma_controller->channel_abort(dma);
                        DBG(status ? 1 : 3,
                                "abort %cX%d DMA for urb %p --> %d\n",
                                is_in ? 'R' : 'T', ep->epnum,
                                urb, status);
                        urb->actual_length += dma->actual_len;
                }
        }

        /* turn off DMA requests, discard state, stop polling ... */
        if (is_in) {
                /* giveback saves bulk toggle */
                csr = musb_h_flush_rxfifo(ep, 0);

                /* REVISIT we still get an irq; should likely clear the
                 * endpoint's irq status here to avoid bogus irqs.
                 * clearing that status is platform-specific...
                 */
        } else if (ep->epnum) {
                musb_h_tx_flush_fifo(ep);
                csr = musb_readw(epio, MUSB_TXCSR);
                csr &= ~(MUSB_TXCSR_AUTOSET
                        | MUSB_TXCSR_DMAENAB
                        | MUSB_TXCSR_H_RXSTALL
                        | MUSB_TXCSR_H_NAKTIMEOUT
                        | MUSB_TXCSR_H_ERROR
                        | MUSB_TXCSR_TXPKTRDY);
                musb_writew(epio, MUSB_TXCSR, csr);
                /* REVISIT may need to clear FLUSHFIFO ... */
                musb_writew(epio, MUSB_TXCSR, csr);
                /* flush cpu writebuffer */
                csr = musb_readw(epio, MUSB_TXCSR);
        } else  {
                musb_h_ep0_flush_fifo(ep);
        }
        if (status == 0)
                musb_advance_schedule(ep->musb, urb, ep, is_in);
        return status;
}
static void musb_advance_schedule(struct musb *musb, struct urb *urb,
                                  struct musb_hw_ep *hw_ep, int is_in)

/*
 * Advance this hardware endpoint's queue, completing the specified URB and
 * advancing to either the next URB queued to that qh, or else invalidating
 * that qh and advancing to the next qh scheduled after the current one.
 *
 * Context: caller owns controller lock, IRQs are blocked
 */
static void musb_advance_schedule(struct musb *musb, struct urb *urb,
                                  struct musb_hw_ep *hw_ep, int is_in)
{
        struct musb_qh          *qh = musb_ep_get_qh(hw_ep, is_in);
        struct musb_hw_ep       *ep = qh->hw_ep;
        int                     ready = qh->is_ready;
        int                     status;

        status = (urb->status == -EINPROGRESS) ? 0 : urb->status;

        /* save toggle eagerly, for paranoia */
        switch (qh->type) {
        case USB_ENDPOINT_XFER_BULK:
        case USB_ENDPOINT_XFER_INT:
                musb_save_toggle(qh, is_in, urb);
                break;
        case USB_ENDPOINT_XFER_ISOC:
                if (status == 0 && urb->error_count)
                        status = -EXDEV;
                break;
        }

        qh->is_ready = 0;
        musb_giveback(musb, urb, status);
        qh->is_ready = ready;

        /* reclaim resources (and bandwidth) ASAP; deschedule it, and
         * invalidate qh as soon as list_empty(&hep->urb_list)
         */
        if (list_empty(&qh->hep->urb_list)) {
                struct list_head        *head;

                if (is_in)
                        ep->rx_reinit = 1;
                else
                        ep->tx_reinit = 1;

                /* Clobber old pointers to this qh */
                musb_ep_set_qh(ep, is_in, NULL);
                qh->hep->hcpriv = NULL;

                switch (qh->type) {

                case USB_ENDPOINT_XFER_CONTROL:
                case USB_ENDPOINT_XFER_BULK:
                        /* fifo policy for these lists, except that NAKing
                         * should rotate a qh to the end (for fairness).
                         */
                        if (qh->mux == 1) {
                                head = qh->ring.prev;
                                list_del(&qh->ring);
                                kfree(qh);
                                qh = first_qh(head);
                                break;
                        }

                case USB_ENDPOINT_XFER_ISOC:
                case USB_ENDPOINT_XFER_INT:
                        /* this is where periodic bandwidth should be
                         * de-allocated if it's tracked and allocated;
                         * and where we'd update the schedule tree...
                         */
                        kfree(qh);
                        qh = NULL;
                        break;
                }
        }

        if (qh != NULL && qh->is_ready) {
                DBG(4, "... next ep%d %cX urb %p\n",
                    hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh));
                musb_start_urb(musb, is_in, qh);
        }
}
static void musb_ep_set_qh(struct musb_hw_ep *ep, int is_in, struct musb_qh *qh)

static void musb_ep_set_qh(struct musb_hw_ep *ep, int is_in, struct musb_qh *qh)
{
        if (is_in != 0 || ep->is_shared_fifo)
                ep->in_qh  = qh;
        if (is_in == 0 || ep->is_shared_fifo)
                ep->out_qh = qh;
}
5. static void
musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)

/* disable an endpoint */
static void
musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
{
        u8                      is_in = hep->desc.bEndpointAddress & USB_DIR_IN;
        unsigned long           flags;
        struct musb             *musb = hcd_to_musb(hcd);
        struct musb_qh          *qh;
        struct urb              *urb;

        spin_lock_irqsave(&musb->lock, flags);

        qh = hep->hcpriv;
        if (qh == NULL)
                goto exit;

        /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */

        /* Kick the first URB off the hardware, if needed */
        qh->is_ready = 0;
        if (musb_ep_get_qh(qh->hw_ep, is_in) == qh) {
                urb = next_urb(qh);

                /* make software (then hardware) stop ASAP */
                if (!urb->unlinked)
                        urb->status = -ESHUTDOWN;

                /* cleanup */
                musb_cleanup_urb(urb, qh);

                /* Then nuke all the others ... and advance the
                 * queue on hw_ep (e.g. bulk ring) when we're done.
                 */
                while (!list_empty(&hep->urb_list)) {
                        urb = next_urb(qh);
                        urb->status = -ESHUTDOWN;
                        musb_advance_schedule(musb, urb, qh->hw_ep, is_in);
                }
        } else {
                /* Just empty the queue; the hardware is busy with
                 * other transfers, and since !qh->is_ready nothing
                 * will activate any of these as it advances.
                 */
                while (!list_empty(&hep->urb_list))
                        musb_giveback(musb, next_urb(qh), -ESHUTDOWN);

                hep->hcpriv = NULL;
                list_del(&qh->ring);
                kfree(qh);
        }
exit:
        spin_unlock_irqrestore(&musb->lock, flags);
}
6. musb_hub_status_data() /  musb_hub_control()  --- hub 相关两函数

6.1 int musb_hub_status_data(struct usb_hcd *hcd, char *buf) --- 在 drivers/usb/musb/musb_virthub.c 中

/* Caller may or may not hold musb->lock */
int musb_hub_status_data(struct usb_hcd *hcd, char *buf)
{
        struct musb     *musb = hcd_to_musb(hcd);
        int             retval = 0;

        /* called in_irq() via usb_hcd_poll_rh_status() */
        if (musb->port1_status & 0xffff0000) {
                *buf = 0x02;
                retval = 1;
        }
        return retval;
}
6.2 int musb_hub_control(
        struct usb_hcd  *hcd,
        u16             typeReq,
        u16             wValue,
        u16             wIndex,
        char            *buf,
        u16             wLength)

int musb_hub_control(
        struct usb_hcd  *hcd,
        u16             typeReq,
        u16             wValue,
        u16             wIndex,
        char            *buf,
        u16             wLength)
{
        struct musb     *musb = hcd_to_musb(hcd);
        u32             temp;
        int             retval = 0;
        unsigned long   flags;

        spin_lock_irqsave(&musb->lock, flags);

        if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags))) {
                spin_unlock_irqrestore(&musb->lock, flags);
                return -ESHUTDOWN;
        }

        /* hub features:  always zero, setting is a NOP
         * port features: reported, sometimes updated when host is active
         * no indicators
         */
        switch (typeReq) {
        case ClearHubFeature:
        case SetHubFeature:
                switch (wValue) {
                case C_HUB_OVER_CURRENT:
                case C_HUB_LOCAL_POWER:
                        break;
                default:
                        goto error;
                }
                break;
        case ClearPortFeature:
                if ((wIndex & 0xff) != 1)
                        goto error;

                switch (wValue) {
                case USB_PORT_FEAT_ENABLE:
                        break;
                case USB_PORT_FEAT_SUSPEND:
                        musb_port_suspend(musb, false);
                        break;
                case USB_PORT_FEAT_POWER:
                        if (!(is_otg_enabled(musb) && hcd->self.is_b_host))
                                musb_set_vbus(musb, 0);
                        break;
                case USB_PORT_FEAT_C_CONNECTION:
                case USB_PORT_FEAT_C_ENABLE:
                case USB_PORT_FEAT_C_OVER_CURRENT:
                case USB_PORT_FEAT_C_RESET:
                case USB_PORT_FEAT_C_SUSPEND:
                        break;
                default:
                        goto error;
                }
                DBG(5, "clear feature %d\n", wValue);
                musb->port1_status &= ~(1 << wValue);
                break;
        case GetHubDescriptor:
                {
                struct usb_hub_descriptor *desc = (void *)buf;

                desc->bDescLength = 9;
                desc->bDescriptorType = 0x29;
                desc->bNbrPorts = 1;
                desc->wHubCharacteristics = cpu_to_le16(
                                  0x0001        /* per-port power switching */
                                | 0x0010        /* no overcurrent reporting */
                                );
                desc->bPwrOn2PwrGood = 5;       /* msec/2 */
                desc->bHubContrCurrent = 0;

                /* workaround bogus struct definition */
                desc->DeviceRemovable[0] = 0x02;        /* port 1 */
                desc->DeviceRemovable[1] = 0xff;
                }
                break;
        case GetHubStatus:
                temp = 0;
                *(__le32 *) buf = cpu_to_le32(temp);
                break;
        case GetPortStatus:
                if (wIndex != 1)
                        goto error;

                /* finish RESET signaling? */
                if ((musb->port1_status & USB_PORT_STAT_RESET)
                                && time_after_eq(jiffies, musb->rh_timer))
                        musb_port_reset(musb, false);

                /* finish RESUME signaling? */
                if ((musb->port1_status & MUSB_PORT_STAT_RESUME)
                                && time_after_eq(jiffies, musb->rh_timer)) {
                        u8              power;

                        power = musb_readb(musb->mregs, MUSB_POWER);
                        power &= ~MUSB_POWER_RESUME;
                        DBG(4, "root port resume stopped, power %02x\n",
                                        power);
                        musb_writeb(musb->mregs, MUSB_POWER, power);

                        /* ISSUE:  DaVinci (RTL 1.300) disconnects after
                         * resume of high speed peripherals (but not full
                         * speed ones).
                         */

                        musb->is_active = 1;
                        musb->port1_status &= ~(USB_PORT_STAT_SUSPEND
                                        | MUSB_PORT_STAT_RESUME);
                        musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16;
                        usb_hcd_poll_rh_status(musb_to_hcd(musb));
                        /* NOTE: it might really be A_WAIT_BCON ... */
                        musb->xceiv->state = OTG_STATE_A_HOST;
                }

                put_unaligned(cpu_to_le32(musb->port1_status
                                        & ~MUSB_PORT_STAT_RESUME),
                                (__le32 *) buf);

                /* port change status is more interesting */
                DBG(get_unaligned((u16 *)(buf+2)) ? 2 : 5, "port status %08x\n",
                                musb->port1_status);
                break;
        case SetPortFeature:
                if ((wIndex & 0xff) != 1)
                        goto error;

                switch (wValue) {
                case USB_PORT_FEAT_POWER:
                        /* NOTE: this controller has a strange state machine
                         * that involves "requesting sessions" according to
                         * magic side effects from incompletely-described
                         * rules about startup...
                         *
                         * This call is what really starts the host mode; be
                         * very careful about side effects if you reorder any
                         * initialization logic, e.g. for OTG, or change any
                         * logic relating to VBUS power-up.
                         */
                        if (!(is_otg_enabled(musb) && hcd->self.is_b_host))
                                musb_start(musb);
                        break;
                case USB_PORT_FEAT_RESET:
                        musb_port_reset(musb, true);
                        break;
                case USB_PORT_FEAT_SUSPEND:
                        musb_port_suspend(musb, true);
                        break;
                case USB_PORT_FEAT_TEST:
                        if (unlikely(is_host_active(musb)))
                                goto error;

                        wIndex >>= 8;
                        switch (wIndex) {
                        case 1:
                                pr_debug("TEST_J\n");
                                temp = MUSB_TEST_J;
                                break;
                        case 2:
                                pr_debug("TEST_K\n");
                                temp = MUSB_TEST_K;
                                break;
                        case 3:
                                pr_debug("TEST_SE0_NAK\n");
                                temp = MUSB_TEST_SE0_NAK;
                                break;
                        case 4:
                                pr_debug("TEST_PACKET\n");
                                temp = MUSB_TEST_PACKET;
                                musb_load_testpacket(musb);
                                break;
                        case 5:
                                pr_debug("TEST_FORCE_ENABLE\n");
                                temp = MUSB_TEST_FORCE_HOST
                                        | MUSB_TEST_FORCE_HS;

                                musb_writeb(musb->mregs, MUSB_DEVCTL,
                                                MUSB_DEVCTL_SESSION);
                                break;
                        case 6:
                                pr_debug("TEST_FIFO_ACCESS\n");
                                temp = MUSB_TEST_FIFO_ACCESS;
                                break;
                        default:
                                goto error;
                        }
                        musb_writeb(musb->mregs, MUSB_TESTMODE, temp);
                        break;
                default:
                        goto error;
                }
                DBG(5, "set feature %d\n", wValue);
                musb->port1_status |= 1 << wValue;
                break;

        default:
error:
                /* "protocol stall" on error */
                retval = -EPIPE;
        }
        spin_unlock_irqrestore(&musb->lock, flags);
        return retval;
}



















  • 0
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
musb 中文翻译和英文文档.可以通过会话请求协议(SRP)发起USB流量,而双角色设备同时支持SRP和主机协商协议(HNP),并且可以根据需要担任主机或外设的角色。MUSBMHDRC还支持拆分事务,这反过来允许它支持使用带有USB 2.0集线器的全速度或低速设备。核心还包括支持在不使用时关闭便携式设备。 除了端点0之外,MUSBMHDRC是用户可配置的,可支持最多15个‘传输’端点和/或最多15个‘接收’端点。(对于IN事务和OUT事务使用这些端点取决于MUSBMHDRC是用作外设还是用作主机。当用作外设时,IN事务通过TX端点处理,OUT事务通过Rx端点处理。当用作主机时,IN事务通过Rx端点处理,OUT事务通过TX端点处理。)这些附加端点可以在软件中单独配置,以处理批量传输(这也允许它们处理中断传输)、同步传输或控制传输。此外,还可以动态地将端点分配给不同的目标设备函数——最大限度地同时支持设备的数量。 每个端点都需要一个FIFO与之关联。MUSBMHDRC有一个RAM接口,用于连接到用于所有端点FIFOs的同步单端口RAM的单个块。(RAM块本身需要由用户添加。) 端点0的FIFO需要为64字节深,并缓冲1个数据包。RAM接口可以根据其他端点FIFOs进行配置,它的大小可以从8到8192字节,可以缓冲1个或2个数据包。单独的FIFOs可以与每个端点相关联:或者,具有相同端点编号的TX端点和Rx端点可以配置为使用相同的FIFO,例如,如果它们永远不能同时活动,可以减少所需RAM块的大小。 MUSBMHDRC提供了一个32位同步CPU接口,设计用于连接AMBA AHB bus1。接口支持使用AHB总线运行在一个大范围的总线速度。AHB总线上的多层操作也被支持。通过添加合适的包装器/桥接器,MUSBMHDRC还可以很容易地连接到一系列其他标准总线。 还支持对端点FIFOs的DMA访问。 MUSBMHDRC提供了一个UTMI+ 3级兼容接口,用于连接到一个合适的USB高/全速收发器。包含了一个可选的ULPI链接包装器(在musbhdrc /docs目录中包含的musbhdrc_ulpi_an.pdf文档中描述),用于连接到与ULPI兼容的物理。另一种接口也提供,允许使用USB 1.1与核心全速PHY,但仅为全速和低速事务。(此接口见8.1节)。 MUSBMHDRC提供发送和接收USB数据包所需的所有编码、解码、检查和重新请求——仅当端点数据已被成功传输时才中断CPU。 当充当主机时,MUSBMHDRC另外维护一个帧计数器,并自动调度SOF、同步、中断和批量传输。它还包括对在点对点通信中使用的会话请求和主机协商协议的支持,其细节在USB 2.0规范的USB on - go补充中给出。MUSBMHDRC提供了一系列的测试模式——主要是USB 2.0规范中描述的高速运行的四种测试模式。它还包括选项,允许它被迫进入全速模式,高速模式或主机模式。最后一个可能在帮助调试硬件PHY问题时有用。 提供了图形用户界面脚本,用于根据用户的需求配置核心。要使用的脚本取决于所选的CPU接口。请注意:在撰写本文时,内核仅在Verilog中可用。 本规范应与USB运行规范一起阅读,该规范还提供了电源要求、电压水平、连接器等细节。.
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值