Changeset 1d758fc in mainline


Ignore:
Timestamp:
2018-02-12T10:11:47Z (6 years ago)
Author:
Ondřej Hlavatý <aearsis@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
5fe3f954
Parents:
2f762a7
git-author:
Ondřej Hlavatý <aearsis@…> (2018-02-05 03:28:50)
git-committer:
Ondřej Hlavatý <aearsis@…> (2018-02-12 10:11:47)
Message:

usb: rethinking DMA buffers

Location:
uspace
Files:
27 edited

Legend:

Unmodified
Added
Removed
  • uspace/drv/bus/usb/ehci/ehci_batch.c

    r2f762a7 r1d758fc  
    102102                : 0;
    103103
    104         const size_t size = ehci_batch->base.buffer_size;
     104        const size_t size = ehci_batch->base.size;
    105105
    106106        /* Add TD left over by the previous transfer */
     
    180180
    181181        /* Assume all data got through */
    182         ehci_batch->base.transferred_size = ehci_batch->base.buffer_size;
     182        ehci_batch->base.transferred_size = ehci_batch->base.size;
    183183
    184184        /* Check all TDs */
     
    216216        }
    217217
    218         assert(ehci_batch->base.transferred_size <= ehci_batch->base.buffer_size);
     218        assert(ehci_batch->base.transferred_size <= ehci_batch->base.size);
    219219
    220220        /* Clear TD pointers */
     
    281281        /* Data stage */
    282282        unsigned td_current = 1;
    283         size_t remain_size = ehci_batch->base.buffer_size;
     283        size_t remain_size = ehci_batch->base.size;
    284284        uintptr_t buffer = dma_buffer_phys(&ehci_batch->base.dma_buffer,
    285285            ehci_batch->data_buffer);
     
    335335
    336336        size_t td_current = 0;
    337         size_t remain_size = ehci_batch->base.buffer_size;
     337        size_t remain_size = ehci_batch->base.size;
    338338        uintptr_t buffer = dma_buffer_phys(&ehci_batch->base.dma_buffer,
    339339            ehci_batch->data_buffer);
  • uspace/drv/bus/usb/ehci/ehci_rh.c

    r2f762a7 r1d758fc  
    147147        batch->error = virthub_base_request(&instance->base, batch->target,
    148148            batch->dir, (void*) batch->setup.buffer,
    149             batch->dma_buffer.virt, batch->buffer_size,
     149            batch->dma_buffer.virt, batch->size,
    150150            &batch->transferred_size);
    151151        if (batch->error == ENAK) {
     
    206206                batch->error = virthub_base_request(&instance->base, batch->target,
    207207                    batch->dir, (void*) batch->setup.buffer,
    208                     batch->dma_buffer.virt, batch->buffer_size,
     208                    batch->dma_buffer.virt, batch->size,
    209209                    &batch->transferred_size);
    210210                usb_transfer_batch_finish(batch);
  • uspace/drv/bus/usb/ohci/ohci_batch.c

    r2f762a7 r1d758fc  
    9696                return ENOTSUP;
    9797
    98         ohci_batch->td_count = (usb_batch->buffer_size + OHCI_TD_MAX_TRANSFER - 1)
     98        ohci_batch->td_count = (usb_batch->size + OHCI_TD_MAX_TRANSFER - 1)
    9999            / OHCI_TD_MAX_TRANSFER;
    100100        /* Control transfer need Setup and Status stage */
     
    166166
    167167        /* Assume all data got through */
    168         usb_batch->transferred_size = usb_batch->buffer_size;
     168        usb_batch->transferred_size = usb_batch->size;
    169169
    170170        /* Check all TDs */
     
    212212                }
    213213        }
    214         assert(usb_batch->transferred_size <= usb_batch->buffer_size);
     214        assert(usb_batch->transferred_size <= usb_batch->size);
    215215
    216216        /* Make sure that we are leaving the right TD behind */
     
    289289        size_t td_current = 1;
    290290        const char* buffer = ohci_batch->data_buffer;
    291         size_t remain_size = ohci_batch->base.buffer_size;
     291        size_t remain_size = ohci_batch->base.size;
    292292        while (remain_size > 0) {
    293293                const size_t transfer_size =
     
    343343
    344344        size_t td_current = 0;
    345         size_t remain_size = ohci_batch->base.buffer_size;
     345        size_t remain_size = ohci_batch->base.size;
    346346        char *buffer = ohci_batch->data_buffer;
    347347        while (remain_size > 0) {
  • uspace/drv/bus/usb/ohci/ohci_rh.c

    r2f762a7 r1d758fc  
    182182        batch->error = virthub_base_request(&instance->base, batch->target,
    183183            batch->dir, &batch->setup.packet,
    184             batch->dma_buffer.virt, batch->buffer_size, &batch->transferred_size);
     184            batch->dma_buffer.virt, batch->size, &batch->transferred_size);
    185185        if (batch->error == ENAK) {
    186186                /* Lock the HC guard */
     
    233233                batch->error = virthub_base_request(&instance->base, batch->target,
    234234                    batch->dir, &batch->setup.packet,
    235                     batch->dma_buffer.virt, batch->buffer_size, &batch->transferred_size);
     235                    batch->dma_buffer.virt, batch->size, &batch->transferred_size);
    236236                usb_transfer_batch_finish(batch);
    237237        }
  • uspace/drv/bus/usb/uhci/uhci_batch.c

    r2f762a7 r1d758fc  
    9898        usb_transfer_batch_t *usb_batch = &uhci_batch->base;
    9999
    100         uhci_batch->td_count = (usb_batch->buffer_size + usb_batch->ep->max_packet_size - 1)
     100        uhci_batch->td_count = (usb_batch->size + usb_batch->ep->max_packet_size - 1)
    101101                / usb_batch->ep->max_packet_size;
    102102
     
    190190        }
    191191
    192         assert(batch->transferred_size <= batch->buffer_size);
     192        assert(batch->transferred_size <= batch->size);
    193193
    194194        return true;
     
    228228
    229229        size_t td = 0;
    230         size_t remain_size = uhci_batch->base.buffer_size;
     230        size_t remain_size = uhci_batch->base.size;
    231231        char *buffer = uhci_transfer_batch_data_buffer(uhci_batch);
    232232
     
    297297        size_t td = 1;
    298298        unsigned toggle = 1;
    299         size_t remain_size = uhci_batch->base.buffer_size;
     299        size_t remain_size = uhci_batch->base.size;
    300300        char *buffer = uhci_transfer_batch_data_buffer(uhci_batch);
    301301
  • uspace/drv/bus/usb/uhci/uhci_rh.c

    r2f762a7 r1d758fc  
    107107                batch->error = virthub_base_request(&instance->base, batch->target,
    108108                    batch->dir, (void*) batch->setup.buffer,
    109                     batch->dma_buffer.virt, batch->buffer_size, &batch->transferred_size);
     109                    batch->dma_buffer.virt, batch->size, &batch->transferred_size);
    110110                if (batch->error == ENAK)
    111111                        async_usleep(instance->base.endpoint_descriptor.poll_interval * 1000);
  • uspace/drv/bus/usb/vhc/transfer.c

    r2f762a7 r1d758fc  
    7272                        rc = usbvirt_control_read(dev,
    7373                            batch->setup.buffer, USB_SETUP_PACKET_SIZE,
    74                             batch->dma_buffer.virt, batch->buffer_size,
     74                            batch->dma_buffer.virt, batch->size,
    7575                            actual_data_size);
    7676                } else {
     
    7878                        rc = usbvirt_control_write(dev,
    7979                            batch->setup.buffer, USB_SETUP_PACKET_SIZE,
    80                             batch->dma_buffer.virt, batch->buffer_size);
     80                            batch->dma_buffer.virt, batch->size);
    8181                }
    8282        } else {
     
    8484                        rc = usbvirt_data_in(dev, batch->ep->transfer_type,
    8585                            batch->ep->endpoint,
    86                             batch->dma_buffer.virt, batch->buffer_size,
     86                            batch->dma_buffer.virt, batch->size,
    8787                            actual_data_size);
    8888                } else {
     
    9090                        rc = usbvirt_data_out(dev, batch->ep->transfer_type,
    9191                            batch->ep->endpoint,
    92                             batch->dma_buffer.virt, batch->buffer_size);
     92                            batch->dma_buffer.virt, batch->size);
    9393                }
    9494        }
     
    108108                        rc = usbvirt_ipc_send_control_read(sess,
    109109                            batch->setup.buffer, USB_SETUP_PACKET_SIZE,
    110                             batch->dma_buffer.virt, batch->buffer_size,
     110                            batch->dma_buffer.virt, batch->size,
    111111                            actual_data_size);
    112112                } else {
     
    114114                        rc = usbvirt_ipc_send_control_write(sess,
    115115                            batch->setup.buffer, USB_SETUP_PACKET_SIZE,
    116                             batch->dma_buffer.virt, batch->buffer_size);
     116                            batch->dma_buffer.virt, batch->size);
    117117                }
    118118        } else {
     
    120120                        rc = usbvirt_ipc_send_data_in(sess, batch->ep->endpoint,
    121121                            batch->ep->transfer_type,
    122                             batch->dma_buffer.virt, batch->buffer_size,
     122                            batch->dma_buffer.virt, batch->size,
    123123                            actual_data_size);
    124124                } else {
     
    126126                        rc = usbvirt_ipc_send_data_out(sess, batch->ep->endpoint,
    127127                            batch->ep->transfer_type,
    128                             batch->dma_buffer.virt, batch->buffer_size);
     128                            batch->dma_buffer.virt, batch->size);
    129129                }
    130130        }
  • uspace/drv/bus/usb/xhci/commands.c

    r2f762a7 r1d758fc  
    470470        xhci_trb_clean(&cmd->_header.trb);
    471471
    472         TRB_SET_ICTX(cmd->_header.trb, cmd->input_ctx.phys);
     472        const uintptr_t phys = dma_buffer_phys_base(&cmd->input_ctx);
     473        TRB_SET_ICTX(cmd->_header.trb, phys);
    473474
    474475        /**
     
    496497                assert(dma_buffer_is_set(&cmd->input_ctx));
    497498
    498                 TRB_SET_ICTX(cmd->_header.trb, cmd->input_ctx.phys);
     499                const uintptr_t phys = dma_buffer_phys_base(&cmd->input_ctx);
     500                TRB_SET_ICTX(cmd->_header.trb, phys);
    499501        }
    500502
     
    520522        xhci_trb_clean(&cmd->_header.trb);
    521523
    522         TRB_SET_ICTX(cmd->_header.trb, cmd->input_ctx.phys);
     524        const uintptr_t phys = dma_buffer_phys_base(&cmd->input_ctx);
     525        TRB_SET_ICTX(cmd->_header.trb, phys);
    523526
    524527        TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_EVALUATE_CONTEXT_CMD);
     
    594597        xhci_trb_clean(&cmd->_header.trb);
    595598
    596         TRB_SET_ICTX(cmd->_header.trb, cmd->bandwidth_ctx.phys);
     599        const uintptr_t phys = dma_buffer_phys_base(&cmd->input_ctx);
     600        TRB_SET_ICTX(cmd->_header.trb, phys);
    597601
    598602        TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_GET_PORT_BANDWIDTH_CMD);
  • uspace/drv/bus/usb/xhci/endpoint.c

    r2f762a7 r1d758fc  
    116116                goto err;
    117117
    118         /* Driver can handle non-contiguous buffers */
    119         ep->transfer_buffer_policy &= ~DMA_POLICY_CONTIGUOUS;
    120 
    121         /* Driver can handle buffers crossing boundaries */
    122         ep->transfer_buffer_policy &= ~DMA_POLICY_NOT_CROSSING;
     118        unsigned flags = -1U;
    123119
    124120        /* Some xHCs can handle 64-bit addresses */
    125121        xhci_bus_t *bus = bus_to_xhci_bus(ep->device->bus);
    126122        if (bus->hc->ac64)
    127                 ep->transfer_buffer_policy &= ~DMA_POLICY_4GiB;
     123                flags &= ~DMA_POLICY_4GiB;
     124
     125        /* xHCI works best if it can fit 65k transfers in one TRB */
     126        ep->transfer_buffer_policy = dma_policy_create(flags, 1 << 16);
     127
     128        /* But actualy can do full scatter-gather. */
     129        ep->required_transfer_buffer_policy = dma_policy_create(flags, PAGE_SIZE);
    128130
    129131        return EOK;
  • uspace/drv/bus/usb/xhci/hc.c

    r2f762a7 r1d758fc  
    476476                return ETIMEOUT;
    477477
    478         XHCI_REG_WR(hc->op_regs, XHCI_OP_DCBAAP, hc->dcbaa_dma.phys);
     478        uintptr_t dcbaa_phys = dma_buffer_phys_base(&hc->dcbaa_dma);
     479        XHCI_REG_WR(hc->op_regs, XHCI_OP_DCBAAP, dcbaa_phys);
    479480        XHCI_REG_WR(hc->op_regs, XHCI_OP_MAX_SLOTS_EN, hc->max_slots);
    480481
     
    490491        XHCI_REG_WR(intr0, XHCI_INTR_ERSTSZ, hc->event_ring.segment_count);
    491492        XHCI_REG_WR(intr0, XHCI_INTR_ERDP, hc->event_ring.dequeue_ptr);
    492         XHCI_REG_WR(intr0, XHCI_INTR_ERSTBA, hc->event_ring.erst.phys);
     493
     494        const uintptr_t erstba_phys = dma_buffer_phys_base(&hc->event_ring.erst);
     495        XHCI_REG_WR(intr0, XHCI_INTR_ERSTBA, erstba_phys);
    493496
    494497        if (hc->base.irq_cap > 0) {
     
    799802        if (err == EOK) {
    800803                dev->slot_id = cmd.slot_id;
    801                 hc->dcbaa[dev->slot_id] = host2xhci(64, dev->dev_ctx.phys);
     804                hc->dcbaa[dev->slot_id] =
     805                    host2xhci(64, dma_buffer_phys_base(&dev->dev_ctx));
    802806        }
    803807
  • uspace/drv/bus/usb/xhci/isoch.c

    r2f762a7 r1d758fc  
    176176        xhci_trb_clean(&trb);
    177177
    178         trb.parameter = it->data.phys;
     178        trb.parameter = host2xhci(64, dma_buffer_phys_base(&it->data));
    179179        TRB_CTRL_SET_XFER_LEN(trb, it->size);
    180180        TRB_CTRL_SET_TD_SIZE(trb, 0);
     
    481481
    482482        /* This shall be already checked by endpoint */
    483         assert(transfer->batch.buffer_size <= ep->base.max_transfer_size);
     483        assert(transfer->batch.size <= ep->base.max_transfer_size);
    484484
    485485        fibril_mutex_lock(&isoch->guard);
     
    521521
    522522        /* Prepare the transfer. */
    523         it->size = transfer->batch.buffer_size;
     523        it->size = transfer->batch.size;
    524524        memcpy(it->data.virt, transfer->batch.dma_buffer.virt, it->size);
    525525        it->state = ISOCH_FILLED;
     
    544544        xhci_isoch_t * const isoch = ep->isoch;
    545545
    546         if (transfer->batch.buffer_size < ep->base.max_transfer_size) {
     546        if (transfer->batch.size < ep->base.max_transfer_size) {
    547547                usb_log_error("Cannot schedule an undersized isochronous transfer.");
    548548                return ELIMIT;
  • uspace/drv/bus/usb/xhci/scratchpad.c

    r2f762a7 r1d758fc  
    7272        memset(hc->scratchpad_array.virt, 0, size);
    7373
    74         uint64_t phys_begin = hc->scratchpad_array.phys + array_size;
     74        const char *base = hc->scratchpad_array.virt + array_size;
    7575        uint64_t *array = hc->scratchpad_array.virt;
    7676
    77         for (unsigned i = 0; i < num_bufs; ++i)
    78                 array[i] = host2xhci(64, phys_begin + i * PAGE_SIZE);
     77        for (unsigned i = 0; i < num_bufs; ++i) {
     78                array[i] = host2xhci(64, dma_buffer_phys(&hc->scratchpad_array,
     79                            base + i * PAGE_SIZE));
     80        }
    7981
    80         hc->dcbaa[0] = host2xhci(64, hc->scratchpad_array.phys);
     82        hc->dcbaa[0] = host2xhci(64, dma_buffer_phys_base(&hc->scratchpad_array));
    8183
    8284        usb_log_debug("Allocated %d scratchpad buffers.", num_bufs);
  • uspace/drv/bus/usb/xhci/streams.c

    r2f762a7 r1d758fc  
    239239        data->secondary_stream_ctx_array = data->secondary_stream_ctx_dma.virt;
    240240
    241         XHCI_STREAM_DEQ_PTR_SET(*ctx, data->secondary_stream_ctx_dma.phys);
     241        XHCI_STREAM_DEQ_PTR_SET(*ctx, dma_buffer_phys_base(&data->secondary_stream_ctx_dma));
    242242        XHCI_STREAM_SCT_SET(*ctx, fnzb32(count) + 1);
    243243
     
    283283
    284284        XHCI_EP_MAX_P_STREAMS_SET(*ctx, pstreams);
    285         XHCI_EP_TR_DPTR_SET(*ctx, xhci_ep->primary_stream_ctx_dma.phys);
     285        XHCI_EP_TR_DPTR_SET(*ctx, dma_buffer_phys_base(&xhci_ep->primary_stream_ctx_dma));
    286286        XHCI_EP_LSA_SET(*ctx, lsa);
    287287}
  • uspace/drv/bus/usb/xhci/transfers.c

    r2f762a7 r1d758fc  
    126126static int calculate_trb_count(xhci_transfer_t *transfer)
    127127{
    128         const size_t size = transfer->batch.buffer_size;
     128        const size_t size = transfer->batch.size;
    129129        return (size + PAGE_SIZE - 1 )/ PAGE_SIZE;
    130130}
     
    184184                int stage_dir = REQUEST_TYPE_IS_DEVICE_TO_HOST(setup->request_type)
    185185                                        ? STAGE_IN : STAGE_OUT;
    186                 size_t remaining = transfer->batch.buffer_size;
     186                size_t remaining = transfer->batch.size;
    187187
    188188                for (size_t i = 0; i < buffer_count; ++i) {
     
    227227                const size_t buffer_count = calculate_trb_count(transfer);
    228228                xhci_trb_t trbs[buffer_count];
    229                 size_t remaining = transfer->batch.buffer_size;
     229                size_t remaining = transfer->batch.size;
    230230
    231231                for (size_t i = 0; i < buffer_count; ++i) {
     
    254254                const size_t buffer_count = calculate_trb_count(transfer);
    255255                xhci_trb_t trbs[buffer_count + 1];
    256                 size_t remaining = transfer->batch.buffer_size;
     256                size_t remaining = transfer->batch.size;
    257257
    258258                for (size_t i = 0; i < buffer_count; ++i) {
     
    278278        const size_t buffer_count = calculate_trb_count(transfer);
    279279        xhci_trb_t trbs[buffer_count];
    280         size_t remaining = transfer->batch.buffer_size;
     280        size_t remaining = transfer->batch.size;
    281281
    282282        for (size_t i = 0; i < buffer_count; ++i) {
     
    372372                case XHCI_TRBC_SUCCESS:
    373373                        batch->error = EOK;
    374                         batch->transferred_size = batch->buffer_size - TRB_TRANSFER_LENGTH(*trb);
     374                        batch->transferred_size = batch->size - TRB_TRANSFER_LENGTH(*trb);
    375375                        break;
    376376
     
    416416        }
    417417
    418         assert(batch->transferred_size <= batch->buffer_size);
     418        assert(batch->transferred_size <= batch->size);
    419419
    420420        usb_transfer_batch_finish(batch);
  • uspace/drv/bus/usb/xhci/trb_ring.c

    r2f762a7 r1d758fc  
    8989static errno_t trb_segment_alloc(trb_segment_t **segment)
    9090{
    91         dma_buffer_t dbuf;
    92 
    93         const errno_t err = dma_buffer_alloc(&dbuf, PAGE_SIZE);
     91        *segment = AS_AREA_ANY;
     92        uintptr_t phys;
     93
     94        const int err = dmamem_map_anonymous(PAGE_SIZE,
     95            DMAMEM_4GiB, AS_AREA_READ | AS_AREA_WRITE, 0,
     96            &phys, (void **) segment);
    9497        if (err)
    9598                return err;
    9699
    97         *segment = dbuf.virt;
    98100        memset(*segment, 0, PAGE_SIZE);
    99         (*segment)->phys = dbuf.phys;
     101        (*segment)->phys = phys;
    100102        usb_log_debug("Allocated new ring segment.");
    101103        return EOK;
     
    104106static void trb_segment_free(trb_segment_t *segment)
    105107{
    106         dma_buffer_t dbuf = { .virt = segment, .phys = segment->phys };
    107         dma_buffer_free(&dbuf);
     108        dmamem_unmap_anonymous(segment);
    108109}
    109110
  • uspace/lib/drv/generic/remote_usbhc.c

    r2f762a7 r1d758fc  
    210210                        ? AS_AREA_WRITE : AS_AREA_READ;
    211211
    212                 const errno_t ret = async_share_out_start(exch, req->base, flags);
     212                const errno_t ret = async_share_out_start(exch, req->buffer.virt, flags);
    213213                if (ret != EOK) {
    214214                        async_forget(opening_request);
     
    374374                return;
    375375        }
    376         if (trans->request.base != NULL) {
    377                 as_area_destroy(trans->request.base);
     376        if (trans->request.buffer.virt != NULL) {
     377                as_area_destroy(trans->request.buffer.virt);
    378378        }
    379379
     
    422422        }
    423423
    424         if ((err = async_share_out_finalize(data_callid, &trans->request.base)))
     424        if ((err = async_share_out_finalize(data_callid, &trans->request.buffer.virt)))
    425425                return err;
    426426
    427427        /*
    428          * As we're going to check the mapping, we must make sure the memory is
    429          * actually mapped. We must do it right now, because the area might be
    430          * read-only or write-only, and we may be unsure later.
     428         * As we're going to get physical addresses of the mapping, we must make
     429         * sure the memory is actually mapped. We must do it right now, because
     430         * the area might be read-only or write-only, and we may be unsure
     431         * later.
    431432         */
    432433        if (flags & AS_AREA_READ) {
    433434                char foo = 0;
    434                 volatile const char *buf = trans->request.base + trans->request.offset;
     435                volatile const char *buf = trans->request.buffer.virt + trans->request.offset;
    435436                for (size_t i = 0; i < size; i += PAGE_SIZE)
    436437                        foo += buf[i];
    437438        } else {
    438                 volatile char *buf = trans->request.base + trans->request.offset;
     439                volatile char *buf = trans->request.buffer.virt + trans->request.offset;
    439440                for (size_t i = 0; i < size; i += PAGE_SIZE)
    440441                        buf[i] = 0xff;
     
    482483        } else {
    483484                /* The value was valid on the other side, for us, its garbage. */
    484                 trans->request.base = NULL;
     485                trans->request.buffer.virt = NULL;
    485486        }
    486487
  • uspace/lib/drv/include/usbhc_iface.h

    r2f762a7 r1d758fc  
    104104
    105105// FIXME: DMA buffers shall be part of libdrv anyway.
    106 typedef unsigned dma_policy_t;
     106typedef uintptr_t dma_policy_t;
     107
     108typedef struct dma_buffer {
     109        void *virt;
     110        dma_policy_t policy;
     111} dma_buffer_t;
    107112
    108113typedef struct usb_pipe_desc {
     
    134139
    135140        /**
    136          * Base address of the buffer to share. Must be at least offset + size
    137          * large. Is patched after being transmitted over IPC, so the pointer is
    138          * still valid.
    139          *
    140          * Note that offset might be actually more than PAGE_SIZE.
     141         * The DMA buffer to share. Must be at least offset + size large. Is
     142         * patched after being transmitted over IPC, so the pointer is still
     143         * valid.
    141144         */
    142         void *base;
     145        dma_buffer_t buffer;
    143146        size_t offset;                  /**< Offset to the buffer */
    144147        size_t size;                    /**< Requested size. */
    145         dma_policy_t buffer_policy;     /**< Properties of the buffer. */
    146148} usbhc_iface_transfer_request_t;
    147149
  • uspace/lib/usb/include/usb/dma_buffer.h

    r2f762a7 r1d758fc  
    3737 * shared through IPC).
    3838 *
    39  * Note that although allocated memory is always page-aligned, the buffer itself
    40  * may be only a part of it, justifying the existence of page-alignment and
    41  * page-crossing flags.
     39 * Currently, it is possible to allocate either completely contiguous buffers
     40 * (with dma_map_anonymous) or arbitrary memory (with as_area_create). Shall the
     41 * kernel be updated, this is a subject of major optimization of memory usage.
     42 * The other way to do it without the kernel is building an userspace IO vector
     43 * in a similar way how QEMU does it.
    4244 *
    43  * Also, currently the buffers that are allocated are always contiguous and
    44  * page-aligned, regardless of whether the policy requires it. We blindly
    45  * believe this fact in dma_buffer_phys, which will yield wrong results if the
    46  * buffer is not contiguous.
     45 * The structures themselves are defined in usbhc_iface, because they need to be
     46 * passed through IPC.
    4747 */
    4848#ifndef LIB_USB_DMA_BUFFER
    4949#define LIB_USB_DMA_BUFFER
    5050
     51#include <as.h>
     52#include <bitops.h>
     53#include <errno.h>
    5154#include <stdint.h>
    5255#include <stdlib.h>
    5356#include <usbhc_iface.h>
    54 #include <errno.h>
    5557
    56 #define DMA_POLICY_4GiB         (1<<0)  /**< Must use only 32-bit addresses */
    57 #define DMA_POLICY_PAGE_ALIGNED (1<<1)  /**< The first pointer must be page-aligned */
    58 #define DMA_POLICY_CONTIGUOUS   (1<<2)  /**< Pages must follow each other physically */
    59 #define DMA_POLICY_NOT_CROSSING (1<<3)  /**< Buffer must not cross page boundary. (Implies buffer is no larger than page).  */
     58/**
     59 * The DMA policy describes properties of the buffer. It is used in two
     60 * different contexts. Either it represents requirements, which shall be
     61 * satisfied to avoid copying the buffer to a more strict one. Or, it is the
     62 * actual property of the buffer, which can be more strict than requested. It
     63 * always holds that more bits set means more restrictive policy, and that by
     64 * computing a bitwise OR one gets the restriction that holds for both.
     65 *
     66 * The high bits of a DMA policy represent a physical contiguity. If bit i is
     67 * set, it means that chunks of a size 2^(i+1) are contiguous in memory. It
     68 * shall never happen that bit i > j is set when j is not.
     69 *
     70 * The previous applies for i >= PAGE_WIDTH. Lower bits are used as bit flags.
     71 */
     72#define DMA_POLICY_FLAGS_MASK           (PAGE_SIZE - 1)
     73#define DMA_POLICY_CHUNK_SIZE_MASK      (~DMA_POLICY_FLAGS_MASK)
    6074
    61 #define DMA_POLICY_STRICT       (-1U)
    62 #define DMA_POLICY_DEFAULT      DMA_POLICY_STRICT
     75#define DMA_POLICY_4GiB (1<<0)          /**< Must use only 32-bit addresses */
    6376
    64 typedef struct dma_buffer {
    65         void *virt;
    66         uintptr_t phys;
    67 } dma_buffer_t;
     77#define DMA_POLICY_STRICT               (-1UL)
     78#define DMA_POLICY_DEFAULT              DMA_POLICY_STRICT
     79
     80extern dma_policy_t dma_policy_create(unsigned, size_t);
     81
     82/**
     83 * Get mask which defines bits of offset in chunk.
     84 */
     85static inline size_t dma_policy_chunk_mask(const dma_policy_t policy)
     86{
     87        return policy | DMA_POLICY_FLAGS_MASK;
     88}
    6889
    6990extern errno_t dma_buffer_alloc(dma_buffer_t *db, size_t size);
    7091extern errno_t dma_buffer_alloc_policy(dma_buffer_t *, size_t, dma_policy_t);
    7192extern void dma_buffer_free(dma_buffer_t *);
    72 extern uintptr_t dma_buffer_phys(const dma_buffer_t *, void *);
    7393
    74 extern bool dma_buffer_check_policy(const void *, size_t, const dma_policy_t);
     94extern uintptr_t dma_buffer_phys(const dma_buffer_t *, const void *);
     95
     96static inline uintptr_t dma_buffer_phys_base(const dma_buffer_t *db)
     97{
     98        return dma_buffer_phys(db, db->virt);
     99}
    75100
    76101extern errno_t dma_buffer_lock(dma_buffer_t *, void *, size_t);
    77102extern void dma_buffer_unlock(dma_buffer_t *, size_t);
    78103
    79 static inline int dma_buffer_is_set(dma_buffer_t *db)
     104extern void dma_buffer_acquire(dma_buffer_t *);
     105extern void dma_buffer_release(dma_buffer_t *);
     106
     107static inline bool dma_buffer_is_set(const dma_buffer_t *db)
    80108{
    81109        return !!db->virt;
  • uspace/lib/usb/src/dma_buffer.c

    r2f762a7 r1d758fc  
    3939#include "usb/dma_buffer.h"
    4040
     41dma_policy_t dma_policy_create(unsigned flags, size_t chunk_size)
     42{
     43        assert((chunk_size & (chunk_size - 1)) == 0); /* Check if power of 2 */
     44        assert(chunk_size >= PAGE_SIZE || chunk_size == 0);
     45
     46        return ((chunk_size - 1) & DMA_POLICY_CHUNK_SIZE_MASK)
     47                | (flags & DMA_POLICY_FLAGS_MASK);
     48}
     49
     50/**
     51 * As the driver is typically using only a few buffers at once, we cache the
     52 * physical mapping to avoid calling the kernel unnecessarily often. This cache
     53 * is global for a task.
     54 *
     55 * TODO: "few" is currently limited to one.
     56 */
     57static struct {
     58        const void *last;
     59        uintptr_t phys;
     60} phys_mapping_cache = { 0 };
     61
     62static void cache_insert(const void *v, uintptr_t p)
     63{
     64        phys_mapping_cache.last = v;
     65        phys_mapping_cache.phys = p;
     66}
     67
     68static void cache_evict(const void *v)
     69{
     70        if (phys_mapping_cache.last == v)
     71                phys_mapping_cache.last = NULL;
     72}
     73
     74static bool cache_find(const void *v, uintptr_t *p)
     75{
     76        *p = phys_mapping_cache.phys;
     77        return phys_mapping_cache.last == v;
     78}
     79
    4180/**
    4281 * Allocate a DMA buffer.
    43  *
    44  * XXX: Currently cannot make much use of missing constraints, as it always
    45  * allocates page-aligned contiguous buffer. We rely on it in dma_buffer_phys.
    4682 *
    4783 * @param[in] db dma_buffer_t structure to fill
     
    6298        void *address = AS_AREA_ANY;
    6399
    64         const int ret = dmamem_map_anonymous(real_size,
     100        const int err = dmamem_map_anonymous(real_size,
    65101            flags, AS_AREA_READ | AS_AREA_WRITE, 0,
    66102            &phys, &address);
    67 
    68         if (ret == EOK) {
    69                 /* Access the pages to force mapping */
    70                 volatile char *buf = address;
    71                 for (size_t i = 0; i < size; i += PAGE_SIZE)
    72                         buf[i] = 0xff;
    73 
    74                 db->virt = address;
    75                 db->phys = phys;
    76         }
    77         return ret;
     103        if (err)
     104                return err;
     105
     106        /* Access the pages to force mapping */
     107        volatile char *buf = address;
     108        for (size_t i = 0; i < size; i += PAGE_SIZE)
     109                buf[i] = 0xff;
     110
     111        db->virt = address;
     112        db->policy = dma_policy_create(policy, 0);
     113        cache_insert(db->virt, phys);
     114
     115        return EOK;
    78116}
    79117
     
    102140                dmamem_unmap_anonymous(db->virt);
    103141                db->virt = NULL;
    104                 db->phys = 0;
     142                db->policy = 0;
    105143        }
    106144}
     
    112150 * @param[in] virt Pointer somewhere inside db
    113151 */
    114 uintptr_t dma_buffer_phys(const dma_buffer_t *db, void *virt)
    115 {
    116         return db->phys + (virt - db->virt);
    117 }
    118 
    119 /**
    120  * Check whether a memory area is compatible with a policy.
    121  *
    122  * Useful to skip copying when the buffer is already ready to be given to
    123  * hardware as is.
    124  *
    125  * Note that the "as_get_physical_mapping" fails when the page is not mapped
    126  * yet, and that the caller is responsible for forcing the mapping.
    127  */
    128 bool dma_buffer_check_policy(const void *buffer, size_t size, const dma_policy_t policy)
    129 {
    130         uintptr_t addr = (uintptr_t) buffer;
    131 
    132         const bool check_4gib       = !!(policy & DMA_POLICY_4GiB);
    133         const bool check_crossing   = !!(policy & DMA_POLICY_NOT_CROSSING);
    134         const bool check_alignment  = !!(policy & DMA_POLICY_PAGE_ALIGNED);
    135         const bool check_contiguous = !!(policy & DMA_POLICY_CONTIGUOUS);
    136 
    137         /* Check the two conditions that are easy */
    138         if (check_crossing && (addr + size - 1) / PAGE_SIZE != addr / PAGE_SIZE)
    139                 goto violated;
    140 
    141         if (check_alignment && ((uintptr_t) buffer) % PAGE_SIZE)
    142                 goto violated;
    143 
    144         /*
    145          * For these conditions, we need to walk through pages and check
    146          * physical address of each one
    147          */
    148         if (check_contiguous || check_4gib) {
    149                 const void *virt = buffer;
    150                 uintptr_t phys;
    151 
    152                 /* Get the mapping of the first page */
    153                 if (as_get_physical_mapping(virt, &phys))
    154                         goto error;
    155 
    156                 /* First page can already break 4GiB condition */
    157                 if (check_4gib && (phys & DMAMEM_4GiB) != 0)
    158                         goto violated;
    159 
    160                 while (size >= PAGE_SIZE) {
    161                         /* Move to the next page */
    162                         virt += PAGE_SIZE;
    163                         size -= PAGE_SIZE;
    164 
    165                         uintptr_t last_phys = phys;
    166                         if (as_get_physical_mapping(virt, &phys))
    167                                 goto error;
    168 
    169                         if (check_contiguous && (phys - last_phys) != PAGE_SIZE)
    170                                 goto violated;
    171 
    172                         if (check_4gib && (phys & DMAMEM_4GiB) != 0)
    173                                 goto violated;
    174                 }
    175         }
    176 
    177         /* All checks passed */
     152uintptr_t dma_buffer_phys(const dma_buffer_t *db, const void *virt)
     153{
     154        const size_t chunk_mask = dma_policy_chunk_mask(db->policy);
     155        const uintptr_t offset = (virt - db->virt) & chunk_mask;
     156        const void *chunk_base = virt - offset;
     157
     158        uintptr_t phys;
     159
     160        if (!cache_find(chunk_base, &phys)) {
     161                if (as_get_physical_mapping(chunk_base, &phys))
     162                        return 0;
     163                cache_insert(chunk_base, phys);
     164        }
     165
     166        return phys + offset;
     167}
     168
     169static bool dma_buffer_is_4gib(dma_buffer_t *db, size_t size)
     170{
     171        if (sizeof(uintptr_t) <= 32)
     172                return true;
     173
     174        const size_t chunk_size = dma_policy_chunk_mask(db->policy) + 1;
     175        const size_t chunks = chunk_size ? 1 : size / chunk_size;
     176
     177        for (size_t c = 0; c < chunks; c++) {
     178                const void *addr = db->virt + (c * chunk_size);
     179                const uintptr_t phys = dma_buffer_phys(db, addr);
     180       
     181                if ((phys & DMAMEM_4GiB) != 0)
     182                        return false;
     183        }
     184
    178185        return true;
    179 
    180 violated:
    181 error:
    182         return false;
    183186}
    184187
     
    192195errno_t dma_buffer_lock(dma_buffer_t *db, void *virt, size_t size)
    193196{
     197        assert(virt);
     198
     199        uintptr_t phys;
     200
     201        const errno_t err = dmamem_map(virt, size, 0, 0, &phys);
     202        if (err)
     203                return err;
     204
    194205        db->virt = virt;
    195         return dmamem_map(db->virt, size, 0, 0, &db->phys);
     206        db->policy = dma_policy_create(0, PAGE_SIZE);
     207        cache_insert(virt, phys);
     208
     209        unsigned flags = -1U;
     210        if (!dma_buffer_is_4gib(db, size))
     211                flags &= ~DMA_POLICY_4GiB;
     212        db->policy = dma_policy_create(flags, PAGE_SIZE);
     213
     214        return EOK;
    196215}
    197216
     
    204223                dmamem_unmap(db->virt, size);
    205224                db->virt = NULL;
    206                 db->phys = 0;
    207         }
     225                db->policy = 0;
     226        }
     227}
     228
     229/**
     230 * Must be called when the buffer is received over IPC. Clears potentially
     231 * leftover value from different buffer mapped to the same virtual address.
     232 */
     233void dma_buffer_acquire(dma_buffer_t *db)
     234{
     235        cache_evict(db->virt);
     236}
     237
     238/**
     239 * Counterpart of acquire.
     240 */
     241void dma_buffer_release(dma_buffer_t *db)
     242{
     243        cache_evict(db->virt);
    208244}
    209245
  • uspace/lib/usbdev/src/pipes.c

    r2f762a7 r1d758fc  
    8383
    8484        /* Only control writes make sense without buffer */
    85         if ((t->dir != USB_DIRECTION_OUT || !t->is_control)
    86             && (t->req.base == NULL || t->req.size == 0))
     85        if ((t->dir != USB_DIRECTION_OUT || !t->is_control) && t->req.size == 0)
    8786                return EINVAL;
    8887
    8988        /* Nonzero size requires buffer */
    90         if (t->req.base == NULL && t->req.size != 0)
     89        if (!dma_buffer_is_set(&t->req.buffer) && t->req.size != 0)
    9190                return EINVAL;
    9291
     
    119118/**
    120119 * Setup the transfer request inside transfer according to dma buffer provided.
     120 *
     121 * TODO: The buffer could have been allocated as a more strict one. Currently,
     122 * we assume that the policy is just the requested one.
    121123 */
    122124static void setup_dma_buffer(transfer_t *t, void *base, void *ptr, size_t size)
    123125{
    124         t->req.base = base;
     126        t->req.buffer.virt = base;
     127        t->req.buffer.policy = t->pipe->desc.transfer_buffer_policy;
    125128        t->req.offset = ptr - base;
    126129        t->req.size = size;
    127         t->req.buffer_policy = t->pipe->desc.transfer_buffer_policy;
    128130}
    129131
     
    133135static errno_t transfer_wrap_dma(transfer_t *t, void *buf, size_t size)
    134136{
     137        if (size == 0) {
     138                setup_dma_buffer(t, NULL, NULL, 0);
     139                return transfer_common(t);
     140        }
     141
    135142        void *dma_buf = usb_pipe_alloc_buffer(t->pipe, size);
    136143        setup_dma_buffer(t, dma_buf, dma_buf, size);
     
    364371        .direction = USB_DIRECTION_BOTH,
    365372        .max_transfer_size = CTRL_PIPE_MIN_PACKET_SIZE,
     373        .transfer_buffer_policy = DMA_POLICY_STRICT,
    366374};
    367375
  • uspace/lib/usbhost/include/usb/host/bus.h

    r2f762a7 r1d758fc  
    153153int bus_device_offline(device_t *);
    154154
    155 int bus_device_send_batch(device_t *, usb_target_t,
    156     usb_direction_t direction, char *, size_t, uint64_t,
    157     usbhc_iface_transfer_callback_t, void *, const char *);
     155/**
     156 * A proforma to USB transfer batch. As opposed to transfer batch, which is
     157 * supposed to be a dynamic structrure, this one is static and descriptive only.
     158 * Its fields are copied to the final batch.
     159 */
     160typedef struct transfer_request {
     161        usb_target_t target;
     162        usb_direction_t dir;
     163
     164        dma_buffer_t buffer;
     165        size_t offset, size;
     166        uint64_t setup;
     167
     168        usbhc_iface_transfer_callback_t on_complete;
     169        void *arg;
     170
     171        const char *name;
     172} transfer_request_t;
     173
     174int bus_issue_transfer(device_t *, const transfer_request_t *);
    158175
    159176errno_t bus_device_send_batch_sync(device_t *, usb_target_t,
  • uspace/lib/usbhost/include/usb/host/endpoint.h

    r2f762a7 r1d758fc  
    5151typedef struct bus bus_t;
    5252typedef struct device device_t;
     53typedef struct transfer_request transfer_request_t;
    5354typedef struct usb_transfer_batch usb_transfer_batch_t;
    5455
     
    9899        /** Maximum size of one transfer */
    99100        size_t max_transfer_size;
    100         /** Policy for transfer buffers */
    101         dma_policy_t transfer_buffer_policy;
     101
     102        /* Policies for transfer buffers */
     103        dma_policy_t transfer_buffer_policy;            /**< A hint for optimal performance. */
     104        dma_policy_t required_transfer_buffer_policy;   /**< Enforced by the library. */
    102105
    103106        /**
     
    122125extern void endpoint_deactivate_locked(endpoint_t *);
    123126
    124 int endpoint_send_batch(endpoint_t *, usb_target_t, usb_direction_t,
    125     char *, size_t, uint64_t, usbhc_iface_transfer_callback_t, void *,
    126     const char *);
     127int endpoint_send_batch(endpoint_t *, const transfer_request_t *);
    127128
    128129static inline bus_t *endpoint_get_bus(endpoint_t *ep)
  • uspace/lib/usbhost/include/usb/host/usb_transfer_batch.h

    r2f762a7 r1d758fc  
    7070        } setup;
    7171
     72        /** DMA buffer with enforced policy */
     73        dma_buffer_t dma_buffer;
     74        /** Size of memory buffer */
     75        size_t offset, size;
     76
    7277        /**
    7378         * In case a bounce buffer is allocated, the original buffer must to be
    7479         * stored to be filled after the IN transaction is finished.
    7580         */
    76         char *buffer;
    77         /** Size of memory buffer */
    78         size_t buffer_size;
    79 
    80         /** DMA buffer with enforced policy */
    81         dma_buffer_t dma_buffer;
     81        char *original_buffer;
    8282        bool is_bounced;
    8383
     
    107107        usb_str_transfer_type_short((batch).ep->transfer_type), \
    108108        usb_str_direction((batch).dir), \
    109         (batch).buffer_size, (batch).ep->max_packet_size
     109        (batch).size, (batch).ep->max_packet_size
    110110
    111111/** Wrapper for bus operation. */
     
    115115void usb_transfer_batch_init(usb_transfer_batch_t *, endpoint_t *);
    116116
     117/** Buffer handling */
     118bool usb_transfer_batch_bounce_required(usb_transfer_batch_t *);
    117119errno_t usb_transfer_batch_bounce(usb_transfer_batch_t *);
    118 /** Buffer preparation */
    119 errno_t usb_transfer_batch_prepare_buffer(usb_transfer_batch_t *, char *);
    120120
    121121/** Batch finalization. */
  • uspace/lib/usbhost/src/bus.c

    r2f762a7 r1d758fc  
    4646#include <str_error.h>
    4747#include <usb/debug.h>
     48#include <usb/dma_buffer.h>
    4849
    4950#include "endpoint.h"
     
    389390                endpoint_init(ep, device, desc);
    390391        }
     392
     393        assert((ep->required_transfer_buffer_policy & ~ep->transfer_buffer_policy) == 0);
    391394
    392395        /* Bus reference */
     
    557560
    558561/**
    559  * Initiate a transfer on the bus. Finds the target endpoint, creates
    560  * a transfer batch and schedules it.
    561  *
    562  * @param device Device for which to send the batch
    563  * @param target The target of the transfer.
    564  * @param direction A direction of the transfer.
    565  * @param data A pointer to the data buffer.
    566  * @param size Size of the data buffer.
    567  * @param setup_data Data to use in the setup stage (Control communication type)
    568  * @param on_complete Callback which is called after the batch is complete
    569  * @param arg Callback parameter.
    570  * @param name Communication identifier (for nicer output).
     562 * Assert some conditions on transfer request. As the request is an entity of
     563 * HC driver only, we can force these conditions harder. Invalid values from
     564 * devices shall be caught on DDF interface already.
     565 */
     566static void check_request(const transfer_request_t *request)
     567{
     568        assert(usb_target_is_valid(&request->target));
     569        assert(request->dir != USB_DIRECTION_BOTH);
     570        /* Non-zero offset => size is non-zero */
     571        assert(request->offset == 0 || request->size != 0);
     572        /* Non-zero size => buffer is set */
     573        assert(request->size == 0 || dma_buffer_is_set(&request->buffer));
     574        /* Non-null arg => callback is set */
     575        assert(request->arg == NULL || request->on_complete != NULL);
     576        assert(request->name);
     577}
     578
     579/**
     580 * Initiate a transfer with given device.
     581 *
    571582 * @return Error code.
    572583 */
    573 int bus_device_send_batch(device_t *device, usb_target_t target,
    574     usb_direction_t direction, char *data, size_t size, uint64_t setup_data,
    575     usbhc_iface_transfer_callback_t on_complete, void *arg, const char *name)
    576 {
    577         assert(device->address == target.address);
     584int bus_issue_transfer(device_t *device, const transfer_request_t *request)
     585{
     586        assert(device);
     587        assert(request);
     588
     589        check_request(request);
     590        assert(device->address == request->target.address);
    578591
    579592        /* Temporary reference */
    580         endpoint_t *ep = bus_find_endpoint(device, target.endpoint, direction);
     593        endpoint_t *ep = bus_find_endpoint(device, request->target.endpoint, request->dir);
    581594        if (ep == NULL) {
    582595                usb_log_error("Endpoint(%d:%d) not registered for %s.",
    583                     device->address, target.endpoint, name);
     596                    device->address, request->target.endpoint, request->name);
    584597                return ENOENT;
    585598        }
     
    587600        assert(ep->device == device);
    588601
    589         /*
    590          * This method is already callable from HC only, so we can force these
    591          * conditions harder.
    592          * Invalid values from devices shall be caught on DDF interface already.
    593          */
    594         assert(usb_target_is_valid(&target));
    595         assert(direction != USB_DIRECTION_BOTH);
    596         assert(size == 0 || data != NULL);
    597         assert(arg == NULL || on_complete != NULL);
    598         assert(name);
    599 
    600         const int err = endpoint_send_batch(ep, target, direction,
    601             data, size, setup_data, on_complete, arg, name);
     602        const int err = endpoint_send_batch(ep, request);
    602603
    603604        /* Temporary reference */
     
    650651    const char *name, size_t *transferred_size)
    651652{
     653        int err;
    652654        sync_data_t sd = { .done = false };
    653655        fibril_mutex_initialize(&sd.done_mtx);
    654656        fibril_condvar_initialize(&sd.done_cv);
    655657
    656         const int ret = bus_device_send_batch(device, target, direction,
    657             data, size, setup_data, sync_transfer_complete, &sd, name);
    658         if (ret != EOK)
    659                 return ret;
     658        transfer_request_t request = {
     659                .target = target,
     660                .dir = direction,
     661                .offset = ((uintptr_t) data) % PAGE_SIZE,
     662                .size = size,
     663                .setup = setup_data,
     664                .on_complete = sync_transfer_complete,
     665                .arg = &sd,
     666                .name = name,
     667        };
     668
     669        if (data &&
     670            (err = dma_buffer_lock(&request.buffer, data - request.offset, size)))
     671                return err;
     672
     673        if ((err = bus_issue_transfer(device, &request))) {
     674                dma_buffer_unlock(&request.buffer, size);
     675                return err;
     676        }
    660677
    661678        /*
     
    668685        fibril_mutex_unlock(&sd.done_mtx);
    669686
     687        dma_buffer_unlock(&request.buffer, size);
     688
    670689        if (transferred_size)
    671690                *transferred_size = sd.transferred_size;
  • uspace/lib/usbhost/src/ddf_helpers.c

    r2f762a7 r1d758fc  
    4646#include <usb/descriptor.h>
    4747#include <usb/usb.h>
     48#include <usb/dma_buffer.h>
    4849#include <usb_iface.h>
    4950#include <usbhc_iface.h>
     
    271272 * @return Error code.
    272273 */
    273 static errno_t transfer(ddf_fun_t *fun, const usbhc_iface_transfer_request_t *req,
     274static errno_t transfer(ddf_fun_t *fun,
     275    const usbhc_iface_transfer_request_t *ifreq,
    274276    usbhc_iface_transfer_callback_t callback, void *arg)
    275277{
     
    280282        const usb_target_t target = {{
    281283                .address = dev->address,
    282                 .endpoint = req->endpoint,
    283                 .stream = req->stream,
     284                .endpoint = ifreq->endpoint,
     285                .stream = ifreq->stream,
    284286        }};
    285287
     
    287289                return EINVAL;
    288290
    289         if (req->size > 0 && req->base == NULL)
     291        if (ifreq->offset > 0 && ifreq->size == 0)
     292                return EINVAL;
     293
     294        if (ifreq->size > 0 && !dma_buffer_is_set(&ifreq->buffer))
    290295                return EBADMEM;
    291296
     
    293298                return EBADMEM;
    294299
    295         const char *name = (req->dir == USB_DIRECTION_IN) ? "READ" : "WRITE";
    296 
    297         char *buffer = req->base + req->offset;
    298 
    299         return bus_device_send_batch(dev, target, req->dir,
    300             buffer, req->size, req->setup,
    301             callback, arg, name);
     300        const transfer_request_t request = {
     301                .target = target,
     302                .dir = ifreq->dir,
     303                .buffer = ifreq->buffer,
     304                .offset = ifreq->offset,
     305                .size = ifreq->size,
     306                .setup = ifreq->setup,
     307                .on_complete = callback,
     308                .arg = arg,
     309                .name = (ifreq->dir == USB_DIRECTION_IN) ? "READ" : "WRITE",
     310        };
     311
     312        return bus_issue_transfer(dev, &request);
    302313}
    303314
  • uspace/lib/usbhost/src/endpoint.c

    r2f762a7 r1d758fc  
    7575        ep->max_transfer_size = ep->max_packet_size * ep->packets_per_uframe;
    7676        ep->transfer_buffer_policy = DMA_POLICY_STRICT;
     77        ep->required_transfer_buffer_policy = DMA_POLICY_STRICT;
    7778}
    7879
     
    207208 *
    208209 * @param endpoint Endpoint for which to send the batch
    209  * @param target The target of the transfer.
    210  * @param direction A direction of the transfer.
    211  * @param data A pointer to the data buffer.
    212  * @param size Size of the data buffer.
    213  * @param setup_data Data to use in the setup stage (Control communication type)
    214  * @param on_complete Callback which is called after the batch is complete
    215  * @param arg Callback parameter.
    216  * @param name Communication identifier (for nicer output).
    217  */
    218 errno_t endpoint_send_batch(endpoint_t *ep, usb_target_t target,
    219     usb_direction_t direction, char *data, size_t size, uint64_t setup_data,
    220     usbhc_iface_transfer_callback_t on_complete, void *arg, const char *name)
    221 {
    222         if (!ep)
    223                 return EBADMEM;
     210 */
     211errno_t endpoint_send_batch(endpoint_t *ep, const transfer_request_t *req)
     212{
     213        assert(ep);
     214        assert(req);
    224215
    225216        if (ep->transfer_type == USB_TRANSFER_CONTROL) {
    226                 usb_log_debug("%s %d:%d %zu/%zuB, setup %#016" PRIx64, name,
    227                     target.address, target.endpoint, size, ep->max_packet_size,
    228                     setup_data);
     217                usb_log_debug("%s %d:%d %zu/%zuB, setup %#016" PRIx64, req->name,
     218                    req->target.address, req->target.endpoint,
     219                    req->size, ep->max_packet_size,
     220                    req->setup);
    229221        } else {
    230                 usb_log_debug("%s %d:%d %zu/%zuB", name, target.address,
    231                     target.endpoint, size, ep->max_packet_size);
     222                usb_log_debug("%s %d:%d %zu/%zuB", req->name,
     223                    req->target.address, req->target.endpoint,
     224                    req->size, ep->max_packet_size);
    232225        }
    233226
     
    244237        }
    245238
     239        size_t size = req->size;
    246240        /*
    247241         * Limit transfers with reserved bandwidth to the amount reserved.
    248242         * OUT transfers are rejected, IN can be just trimmed in advance.
    249243         */
    250         if ((ep->transfer_type == USB_TRANSFER_INTERRUPT || ep->transfer_type == USB_TRANSFER_ISOCHRONOUS) && size > ep->max_transfer_size) {
    251                 if (direction == USB_DIRECTION_OUT)
     244        if (size > ep->max_transfer_size &&
     245            (ep->transfer_type == USB_TRANSFER_INTERRUPT
     246             || ep->transfer_type == USB_TRANSFER_ISOCHRONOUS)) {
     247                if (req->dir == USB_DIRECTION_OUT)
    252248                        return ENOSPC;
    253249                else
    254250                        size = ep->max_transfer_size;
    255 
    256251        }
    257252
     
    266261        }
    267262
    268         batch->target = target;
    269         batch->setup.packed = setup_data;
    270         batch->dir = direction;
    271         batch->buffer_size = size;
    272 
    273         errno_t err;
    274         if ((err = usb_transfer_batch_prepare_buffer(batch, data))) {
    275                 usb_log_warning("Failed to prepare buffer for batch: %s", str_error(err));
    276                 usb_transfer_batch_destroy(batch);
    277                 return err;
    278         }
    279 
    280         batch->on_complete = on_complete;
    281         batch->on_complete_data = arg;
     263        batch->target = req->target;
     264        batch->setup.packed = req->setup;
     265        batch->dir = req->dir;
     266        batch->size = size;
     267        batch->offset = req->offset;
     268        batch->dma_buffer = req->buffer;
     269
     270        dma_buffer_acquire(&batch->dma_buffer);
     271
     272        if (batch->offset != 0) {
     273                usb_log_debug("A transfer with nonzero offset requested.");
     274                usb_transfer_batch_bounce(batch);
     275        }
     276
     277        if (usb_transfer_batch_bounce_required(batch))
     278                usb_transfer_batch_bounce(batch);
     279
     280        batch->on_complete = req->on_complete;
     281        batch->on_complete_data = req->arg;
    282282
    283283        const int ret = ops->batch_schedule(batch);
  • uspace/lib/usbhost/src/usb_transfer_batch.c

    r2f762a7 r1d758fc  
    103103}
    104104
     105bool usb_transfer_batch_bounce_required(usb_transfer_batch_t *batch)
     106{
     107        if (!batch->size)
     108                return false;
     109
     110        unsigned flags = batch->dma_buffer.policy & DMA_POLICY_FLAGS_MASK;
     111        unsigned required_flags =
     112            batch->ep->required_transfer_buffer_policy & DMA_POLICY_FLAGS_MASK;
     113
     114        if (required_flags & ~flags)
     115                return true;
     116
     117        size_t chunk_mask = dma_policy_chunk_mask(batch->dma_buffer.policy);
     118        size_t required_chunk_mask =
     119             dma_policy_chunk_mask(batch->ep->required_transfer_buffer_policy);
     120
     121        /* If the chunks are at least as large as required, we're good */
     122        if ((required_chunk_mask & ~chunk_mask) == 0)
     123                return false;
     124
     125        size_t start_chunk = batch->offset & ~chunk_mask;
     126        size_t end_chunk = (batch->offset + batch->size - 1) & ~chunk_mask;
     127
     128        /* The requested area crosses a chunk boundary */
     129        if (start_chunk != end_chunk)
     130                return true;
     131
     132        return false;
     133}
     134
    105135errno_t usb_transfer_batch_bounce(usb_transfer_batch_t *batch)
    106136{
     
    108138        assert(!batch->is_bounced);
    109139
    110         if (dma_buffer_is_set(&batch->dma_buffer))
    111                 dma_buffer_unlock(&batch->dma_buffer, batch->buffer_size);
     140        dma_buffer_release(&batch->dma_buffer);
     141
     142        batch->original_buffer = batch->dma_buffer.virt + batch->offset;
    112143
    113144        usb_log_debug("Batch(%p): Buffer cannot be used directly, "
     
    115146
    116147        const errno_t err = dma_buffer_alloc_policy(&batch->dma_buffer,
    117             batch->buffer_size, batch->ep->transfer_buffer_policy);
     148            batch->size, batch->ep->transfer_buffer_policy);
    118149        if (err)
    119150                return err;
     
    121152        /* Copy the data out */
    122153        if (batch->dir == USB_DIRECTION_OUT)
    123                 memcpy(batch->dma_buffer.virt, batch->buffer, batch->buffer_size);
     154                memcpy(batch->dma_buffer.virt,
     155                    batch->original_buffer,
     156                    batch->size);
    124157
    125158        batch->is_bounced = true;
     159        batch->offset = 0;
     160
    126161        return err;
    127 }
    128 
    129 /**
    130  * Prepare a DMA buffer according to endpoint policy.
    131  *
    132  * If the buffer is suitable to be used directly, it is. Otherwise, a bounce
    133  * buffer is created.
    134  */
    135 errno_t usb_transfer_batch_prepare_buffer(usb_transfer_batch_t *batch, char *buf)
    136 {
    137         /* Empty transfers do not need a buffer */
    138         if (batch->buffer_size == 0)
    139                 return EOK;
    140 
    141         batch->buffer = buf;
    142 
    143         const dma_policy_t policy = batch->ep->transfer_buffer_policy;
    144 
    145         /*
    146          * We don't have enough information (yet, WIP) to know if we can skip
    147          * the bounce, so check the conditions carefully.
    148          */
    149         if (!dma_buffer_check_policy(buf, batch->buffer_size, policy))
    150                 return usb_transfer_batch_bounce(batch);
    151 
    152         /* Fill the buffer with virtual address and lock it for DMA */
    153         return dma_buffer_lock(&batch->dma_buffer, buf, batch->buffer_size);
    154162}
    155163
     
    167175            batch, USB_TRANSFER_BATCH_ARGS(*batch));
    168176
    169         if (batch->error == EOK && batch->buffer_size > 0) {
    170                 if (!batch->is_bounced) {
    171                         /* Unlock the buffer for DMA */
    172                         dma_buffer_unlock(&batch->dma_buffer,
    173                             batch->buffer_size);
     177        if (batch->error == EOK && batch->size > 0) {
     178                if (batch->is_bounced) {
     179                        /* We we're forced to use bounce buffer, copy it back */
     180                        if (batch->dir == USB_DIRECTION_IN)
     181                        memcpy(batch->original_buffer,
     182                            batch->dma_buffer.virt,
     183                            batch->transferred_size);
     184
     185                        dma_buffer_free(&batch->dma_buffer);
    174186                }
    175187                else {
    176                         /* We we're forced to use bounce buffer, copy it back */
    177                         if (batch->dir == USB_DIRECTION_IN)
    178                                 memcpy(batch->buffer,
    179                                     batch->dma_buffer.virt,
    180                                     batch->transferred_size);
    181 
    182                         dma_buffer_free(&batch->dma_buffer);
     188                        dma_buffer_release(&batch->dma_buffer);
    183189                }
    184190        }
Note: See TracChangeset for help on using the changeset viewer.