系统相关
首页 > 系统相关> > Linux DMA:使用DMAengine进行分散收集事务

Linux DMA:使用DMAengine进行分散收集事务

作者:互联网

我尝试使用自定义内核驱动程序中的DMAengine API来执行分散收集操作.我有一个连续的内存区域作为源,我想通过散点列表结构将其数据复制到几个分布式缓冲区中. DMA控制器是PL330之一,它支持DMAengine API(请参见PL330 DMA controller).

我的测试代码如下:

在我的驱动程序头文件(test_driver.h)中:

#ifndef __TEST_DRIVER_H__
#define __TEST_DRIVER_H__

#include <linux/platform_device.h>
#include <linux/device.h>

#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/of_dma.h>

#define SG_ENTRIES 3
#define BUF_SIZE 16
#define DEV_BUF 0x10000000

struct dma_block {
    void * data;
    int size;
};

struct dma_private_info {

    struct sg_table sgt;

    struct dma_block * blocks;
    int nblocks;

    int dma_started;

    struct dma_chan * dma_chan;
    struct dma_slave_config dma_config;
    struct dma_async_tx_descriptor * dma_desc;
    dma_cookie_t cookie;
};

struct test_platform_device {
    struct platform_device * pdev;

    struct dma_private_info dma_priv;
};

#define _get_devp(tdev) (&((tdev)->pdev->dev))
#define _get_dmapip(tdev) (&((tdev)->dma_priv))

int dma_stop(struct test_platform_device * tdev);
int dma_start(struct test_platform_device * tdev);
int dma_start_block(struct test_platform_device * tdev);
int dma_init(struct test_platform_device * tdev);
int dma_exit(struct test_platform_device * tdev);

#endif

在我的包含dma函数(dma_functions.c)的源代码中:

#include <linux/slab.h>

#include "test_driver.h"

#define BARE_RAM_BASE 0x10000000
#define BARE_RAM_SIZE 0x10000000

struct ram_bare {
    uint32_t * __iomem map;

    uint32_t base;
    uint32_t size;
};

static void dma_sg_check(struct test_platform_device * tdev)
{
    struct dma_private_info * dma_priv = _get_dmapip(tdev);
    struct device * dev = _get_devp(tdev);
    uint32_t * buf;
    unsigned int bufsize;
    int nwords;
    int nbytes_word = sizeof(uint32_t);
    int nblocks;
    struct ram_bare ramb;
    uint32_t * p;
    int i;
    int j;

    ramb.map = ioremap(BARE_RAM_BASE,BARE_RAM_SIZE);
    ramb.base = BARE_RAM_BASE;
    ramb.size = BARE_RAM_SIZE;

    dev_info(dev,"nblocks: %d \n",dma_priv->nblocks);

    p = ramb.map;

    nblocks = dma_priv->nblocks;

    for( i = 0 ; i < nblocks ; i++ ) {

        buf = (uint32_t *) dma_priv->blocks[i].data;
        bufsize = dma_priv->blocks[i].size;
        nwords = dma_priv->blocks[i].size/nbytes_word;

        dev_info(dev,"block[%d],size %d: ",i,bufsize);

        for ( j = 0 ; j <  nwords; j++, p++) {
            dev_info(dev,"DMA: 0x%x, RAM: 0x%x",buf[j],ioread32(p));
        }
    }

    iounmap(ramb.map);
}

static int dma_sg_exit(struct test_platform_device * tdev)
{
    struct dma_private_info * dma_priv = _get_dmapip(tdev);
    int ret = 0;
    int i;

    for( i = 0 ; i < dma_priv->nblocks ; i++ ) {
        kfree(dma_priv->blocks[i].data);
    }

    kfree(dma_priv->blocks);

    sg_free_table(&(dma_priv->sgt));

    return ret;
}

int dma_stop(struct test_platform_device * tdev)
{
    struct dma_private_info * dma_priv = _get_dmapip(tdev);
    struct device * dev = _get_devp(tdev);
    int ret = 0;

    dma_unmap_sg(dev,dma_priv->sgt.sgl,\
        dma_priv->sgt.nents, DMA_FROM_DEVICE);

    dma_sg_exit(tdev);

    dma_priv->dma_started = 0;

    return ret;
}

static void dma_callback(void * param)
{
    enum dma_status dma_stat;
    struct test_platform_device * tdev = (struct test_platform_device *) param;
    struct dma_private_info * dma_priv = _get_dmapip(tdev);
    struct device * dev = _get_devp(tdev);

    dev_info(dev,"Checking the DMA state....\n");

    dma_stat = dma_async_is_tx_complete(dma_priv->dma_chan,\
        dma_priv->cookie, NULL, NULL);

    if(dma_stat == DMA_COMPLETE) {
        dev_info(dev,"DMA complete! \n");
        dma_sg_check(tdev);
        dma_stop(tdev);
    } else if (unlikely(dma_stat == DMA_ERROR)) {
        dev_info(dev,"DMA error! \n");
        dma_stop(tdev);
    }
}

static void dma_busy_loop(struct test_platform_device * tdev)
{
    struct dma_private_info * dma_priv = _get_dmapip(tdev);
    struct device * dev = _get_devp(tdev);

    enum dma_status status;
    int status_change = -1;

    do {
        status = dma_async_is_tx_complete(dma_priv->dma_chan, dma_priv->cookie, NULL, NULL);

        switch(status) {
        case DMA_COMPLETE:
            if(status_change != 0)
                dev_info(dev,"DMA status: COMPLETE\n");
            status_change = 0;
            break;
        case DMA_PAUSED:
            if (status_change != 1)
                dev_info(dev,"DMA status: PAUSED\n");
            status_change = 1;
            break;
        case DMA_IN_PROGRESS:
            if(status_change != 2)
                dev_info(dev,"DMA status: IN PROGRESS\n");
            status_change = 2;
            break;
        case DMA_ERROR:
            if (status_change != 3)
                dev_info(dev,"DMA status: ERROR\n");
            status_change = 3;
            break;
        default:
            dev_info(dev,"DMA status: UNKNOWN\n");
            status_change = -1;
            break;
        }
    } while(status != DMA_COMPLETE);

    dev_info(dev,"DMA transaction completed! \n");
}

static int dma_sg_init(struct test_platform_device * tdev)
{

    struct dma_private_info * dma_priv = _get_dmapip(tdev);
    struct scatterlist *sg;
    int ret = 0;
    int i;

    ret = sg_alloc_table(&(dma_priv->sgt), SG_ENTRIES, GFP_ATOMIC);
    if(ret)
        goto out_mem2;

    dma_priv->nblocks = SG_ENTRIES;
    dma_priv->blocks = (struct dma_block *) kmalloc(dma_priv->nblocks\
        *sizeof(struct dma_block), GFP_ATOMIC);
    if(dma_priv->blocks == NULL) 
         goto out_mem1;


    for( i = 0 ; i < dma_priv->nblocks ; i++ ) {
        dma_priv->blocks[i].size = BUF_SIZE;
        dma_priv->blocks[i].data = kmalloc(dma_priv->blocks[i].size, GFP_ATOMIC);
        if(dma_priv->blocks[i].data == NULL)
            goto out_mem3;
    }

    for_each_sg(dma_priv->sgt.sgl, sg, dma_priv->sgt.nents, i)
        sg_set_buf(sg,dma_priv->blocks[i].data,dma_priv->blocks[i].size);

    return ret;

out_mem3:
    i--;

    while(i >= 0)
        kfree(dma_priv->blocks[i].data);

    kfree(dma_priv->blocks);

out_mem2:
    sg_free_table(&(dma_priv->sgt));

out_mem1:
    ret = -ENOMEM;  

    return ret;

}

static int _dma_start(struct test_platform_device * tdev,int block)
{
    struct dma_private_info * dma_priv = _get_dmapip(tdev);
    struct device * dev = _get_devp(tdev);
    int ret = 0;
    int sglen;

    /* Step 1: Allocate and initialize the SG list */
    dma_sg_init(tdev);

    /* Step 2: Map the SG list */
    sglen = dma_map_sg(dev,dma_priv->sgt.sgl,\
        dma_priv->sgt.nents, DMA_FROM_DEVICE);
    if(! sglen)
        goto out2;

    /* Step 3: Configure the DMA */
    (dma_priv->dma_config).direction = DMA_DEV_TO_MEM;
    (dma_priv->dma_config).src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
    (dma_priv->dma_config).src_maxburst = 1;
    (dma_priv->dma_config).src_addr = (dma_addr_t) DEV_BUF;

    dmaengine_slave_config(dma_priv->dma_chan, \
        &(dma_priv->dma_config));

    /* Step 4: Prepare the SG descriptor */
    dma_priv->dma_desc = dmaengine_prep_slave_sg(dma_priv->dma_chan, \
        dma_priv->sgt.sgl, dma_priv->sgt.nents, DMA_DEV_TO_MEM, \
        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
    if (dma_priv->dma_desc == NULL) {
        dev_err(dev,"DMA could not assign a descriptor! \n");
        goto out1;
    }

    /* Step 5: Set the callback method */
    (dma_priv->dma_desc)->callback = dma_callback;
    (dma_priv->dma_desc)->callback_param = (void *) tdev;

    /* Step 6: Put the DMA descriptor in the queue */
    dma_priv->cookie = dmaengine_submit(dma_priv->dma_desc);

    /* Step 7: Fires the DMA transaction */
    dma_async_issue_pending(dma_priv->dma_chan);

    dma_priv->dma_started = 1;

    if(block)
        dma_busy_loop(tdev);

    return ret;

out1:
    dma_stop(tdev);
out2:
    ret = -1;

    return ret;
}

int dma_start(struct test_platform_device * tdev) {
    return _dma_start(tdev,0);
}

int dma_start_block(struct test_platform_device * tdev) {
    return _dma_start(tdev,1);
}

int dma_init(struct test_platform_device * tdev)
{
    int ret = 0;
    struct dma_private_info * dma_priv = _get_dmapip(tdev);
    struct device * dev = _get_devp(tdev);

    dma_priv->dma_chan = dma_request_slave_channel(dev, \
        "dma_chan0");
    if (dma_priv->dma_chan == NULL) {
        dev_err(dev,"DMA channel busy! \n");
        ret = -1;
    }

    dma_priv->dma_started = 0;

    return ret;
}

int dma_exit(struct test_platform_device * tdev)
{
    int ret = 0;
    struct dma_private_info * dma_priv = _get_dmapip(tdev);

    if(dma_priv->dma_started) {
        dmaengine_terminate_all(dma_priv->dma_chan);
        dma_stop(tdev);
        dma_priv->dma_started = 0;
    }

    if(dma_priv->dma_chan != NULL)
        dma_release_channel(dma_priv->dma_chan);

    return ret;
}

在我的驱动程序源文件(test_driver.c)中:

#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/version.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/of_device.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/interrupt.h>

#include "test_driver.h"

static int dma_block=0;
module_param_named(dma_block, dma_block, int, 0444);

static struct test_platform_device tdev;

static struct of_device_id test_of_match[] = {
  { .compatible = "custom,test-driver-1.0", },
  {}
};

static int test_probe(struct platform_device *op)
{
    int ret = 0;
    struct device * dev = &(op->dev);

    const struct of_device_id *match = of_match_device(test_of_match, &op->dev);

    if (!match)
        return -EINVAL;

    tdev.pdev = op;

    dma_init(&tdev);

    if(dma_block)
        ret = dma_start_block(&tdev);
    else
        ret = dma_start(&tdev);

    if(ret) {
        dev_err(dev,"Error to start DMA transaction! \n");
    } else {
        dev_info(dev,"DMA OK! \n");
    }

    return ret;
}

static int test_remove(struct platform_device *op)
{       
    dma_exit(&tdev);

    return 0;
}

static struct platform_driver test_platform_driver = {
  .probe = test_probe,
  .remove = test_remove,
  .driver = {
    .name = "test-driver",
    .owner = THIS_MODULE,
    .of_match_table = test_of_match,
  },
};

static int test_init(void)
{
    platform_driver_register(&test_platform_driver);
    return 0;
}

static void test_exit(void)
{
    platform_driver_unregister(&test_platform_driver);
}

module_init(test_init);
module_exit(test_exit);

MODULE_AUTHOR("klyone");
MODULE_DESCRIPTION("DMA SG test module");
MODULE_LICENSE("GPL");

但是,DMA从不调用我的回调函数,我也不知道为什么会发生这种情况.也许是我误会了…

有人可以帮我吗?

提前致谢.

解决方法:

警告:我没有为您提供确定的解决方案,但仅基于[基于多年编写/调试linux设备驱动程序的经验]进行调试的一些观察和建议.

我认为您认为回调未完成,因为您没有收到任何printk消息.但是,回调是唯一拥有它们的地方.但是,printk级别是否设置得足够高以查看消息?我将dev_info添加到您的模块init中,以证明它可以按预期方式打印.

另外,如果dma_start无法按预期工作,您[可能]将不会得到回调,因此我也将在其中添加一些dev_info调用(例如,在步骤7中的调用之前和之后).我还注意到,并非dma_start中的所有调用都检查错误返回[可能是正常的返回还是无效的返回,只是在您错过一次的情况下提及]

在这一点上,应该指出的是,这里确实存在两个问题:(1)您的DMA请求是否成功[并完成]启动? (2)您收到回叫了吗?

因此,我将一些代码从dma_complete拆分为(例如)dma_test_done.后者进行相同的检查,但仅打印“完成”消息.您可以在轮询模式下调用它以验证DMA完成.

因此,如果您(最终)获得完成,那么问题将归结为您为什么没有获得回调.但是,如果您还没有完成,那将是一个更根本的问题.

这使我想起.您没有显示任何调用dma_start的代码,也没有显示等待完成的方式.我假设如果您的回调工作正常,它将发出某种唤醒,基础级别将等待.或者,回调函数将执行请求解除分配/清除(即,您编写的更多代码)

在第7步中,您要调用dma_async_issue_pending,而后者应调用pl330_issue_pending. pl330_issue_pending将调用pl330_tasklet.

pl330_tasklet是一个tasklet函数,但也可以直接调用[在没有活动请求时启动DMA].

pl330_tasklet将在其“工作”队列中循环,并将所有已完成的项目移至其“已完成”队列.然后,它尝试启动新请求.然后,它在已完成的队列上循环并发出回调.

pl330_tasklet会获取回调指针,但是如果为null,它将被静默忽略.您已经设置了回调,但是最好验证设置回调的位置与pl330_tasklet将从其获取的位置相同(或传播到该位置).

当您拨打电话时,一切都可能很忙,因此没有完成的请求,没有空间来发起新的请求,因此没有任何事情可以完成.在这种情况下,稍后将再次调用pl330_tasklet.

因此,当dma_async_issue_pending返回时,可能尚未发生任何事情.对于您的情况,这很有可能.

pl330_tasklet尝试通过调用fill_queue来启动新的DMA.通过查看状态!=忙,它将检查描述符是否[不忙].因此,您可能希望验证自己的值是否正确.否则,您将永远不会得到回调(甚至没有任何DMA开始).

然后,fill_queue将尝试通过pl330_submit_req启动请求.但是,这可能会返回错误(例如队列已满),因此再次推迟了事情.

作为参考,请注意pl330_submit_req顶部的以下注释:

Submit a list of xfers after which the client wants notification.
Client is not notified after each xfer unit, just once after all
xfer units are done or some error occurs.

我要做的是开始入侵pl330.c,并添加调试消息和交叉检查.如果您的系统使pl330服务于许多其他请求,则可以通过检查设备的私有数据指针是否与您的私有数据指针匹配来限制调试消息.

特别是,您希望在请求真正开始时得到一条消息,因此可以在pl330_submit_req的末尾添加调试消息.

然后,在pl330_tasklet中添加用于请求的消息也将有所帮助.

这是两个很好的起点.但是,不要害怕根据需要添加更多的printk调用.您可能会对被称为[或未被称为]或以什么顺序感到惊讶.

更新:

If I install the kernel module with the blocking behaviour, everything is initialized well. However, the dma_busy_loop function shows that the DMA descriptor is always IN PROGESS and the DMA transaction never completes. For this reason, the callback function is not executed. What could be happening?

做了更多的研究. Cookies只是递增的序列号.例如,如果您发出的请求被分解为[说] 10个单独的分散/收集操作[描述符],则每个请求都会获得一个唯一的cookie值. Cookie的返回值是最新/最后一批(例如10).

调用(1)dma_async_is_tx_complete时,(2)调用chan-> device-> device_tx_status,(3)是pl330_tx_status,(4)调用dma_cookie_status

旁注/提示:当我进行跟踪时,我一直在dmaengine.h和pl330.c之间来回切换.就像:看(1),它调用(2).那是在哪儿?我假设在pl330.c中.因此,我为该字符串做了grep并获得了pl330函数的名称(即(3)).因此,我去了那里,发现确实如此(4).所以…回到dmaengine.h …

但是,在进行外部调用时,您将忽略最后两个参数[设置为NULL].这些之所以有用,是因为它们返回“最后一个”和“已使用”的cookie.因此,即使您没有完全完成,这些值也可能会更改并显示部分进度.

其中之一最终应为> =“返回” cookie值. (即)整个操作应完成.因此,这将有助于区分可能发生的情况.

另外,请注意,在dmaengine.h中,在dma_async_is_tx_complete的正下方,有dma_async_is_complete.该函数根据您传递的cookie值以及“ last”和“ used” cookie值来决定是返回DMA_COMPLETE还是DMA_IN_PROGRESS.它是被动的,未在代码路径[AFAICT]中使用,但确实显示了如何自己计算完成.

标签:dma,linux-kernel,linux-device-driver,c-3,linux
来源: https://codeday.me/bug/20191027/1941474.html