linux spi
spi_sync()执行流程?
spi_async()执行流程?
spi peripheral driver中,调用spi_sync() 为什么会看到spi* 内核线程会有负载?
int spi_sync(struct spi_device *spi, struct spi_message *message)
{
int ret;
mutex_lock(&spi->controller->bus_lock_mutex);
ret = __spi_sync(spi, message);
mutex_unlock(&spi->controller->bus_lock_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(spi_sync);
可以看到__spi_sync() 加了mutex, 不会多线程并发;
一般spi controler driver,会实现
ctrl->prepare_transfer_hardware = soc_spi_prepare_xfer_hardware;
ctrl->transfer_one = soc_spi_transfer_one;
ctrl->unprepare_transfer_hardware = soc_spi_unprepare_xfer_hardware;
spi 硬件传输ops func;
不会实现ctlr->transfer ops func;
spi_register_controller()
| spi_controller_initialize_queue()
static int spi_controller_initialize_queue(struct spi_controller *ctlr)
{
int ret;
ctlr->transfer = spi_queued_transfer;
if (!ctlr->transfer_one_message)
ctlr->transfer_one_message = spi_transfer_one_message;
/* Initialize and start queue */
ret = spi_init_queue(ctlr);
if (ret) {
dev_err(&ctlr->dev, "problem initializing queue\n");
goto err_init_queue;
}
ctlr->queued = true;
ret = spi_start_queue(ctlr);
if (ret) {
dev_err(&ctlr->dev, "problem starting queue\n");
goto err_start_queue;
}
return 0;
err_start_queue:
spi_destroy_queue(ctlr);
err_init_queue:
return ret;
}
有:
ctlr->transfer = spi_queued_transfer;
ctlr->transfer_one_message = spi_transfer_one_message;
spi_init_queue()中,kthread_run kworker_task;
static void spi_complete(void *arg)
{
complete(arg);
}
static int __spi_sync(struct spi_device *spi, struct spi_message *message)
{
DECLARE_COMPLETION_ONSTACK(done);
int status;
struct spi_controller *ctlr = spi->controller;
unsigned long flags;
status = __spi_validate(spi, message);
if (status != 0)
return status;
message->complete = spi_complete;
message->context = &done;
message->spi = spi;
SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_sync);
SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync);
/* If we're not using the legacy transfer method then we will
* try to transfer in the calling context so special case.
* This code would be less tricky if we could remove the
* support for driver implemented message queues.
*/
if (ctlr->transfer == spi_queued_transfer) {
spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
trace_spi_message_submit(message);
status = __spi_queued_transfer(spi, message, false);
spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
} else {
status = spi_async_locked(spi, message);
}
if (status == 0) {
/* Push out the messages in the calling context if we
* can.
*/
if (ctlr->transfer == spi_queued_transfer) {
SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
spi_sync_immediate);
SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics,
spi_sync_immediate);
__spi_pump_messages(ctlr, false);
}
wait_for_completion(&done);
status = message->status;
}
message->context = NULL;
return status;
}
调用:
__spi_queued_transfer()
__spi_pump_messages()
wait_for_completion()
static int __spi_queued_transfer(struct spi_device *spi,
struct spi_message *msg,
bool need_pump)
{
struct spi_controller *ctlr = spi->controller;
unsigned long flags;
spin_lock_irqsave(&ctlr->queue_lock, flags);
if (!ctlr->running) {
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
return -ESHUTDOWN;
}
msg->actual_length = 0;
msg->status = -EINPROGRESS;
list_add_tail(&msg->queue, &ctlr->queue);
if (!ctlr->busy && need_pump)
kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
return 0;
}
__spi_sync()->__spi_queued_transfer(spi, message, false);
spi_message 加入ctlr->queue;
注意这里need_pump:false;
并不会唤醒spi* 内核线程 执行work;
__spi_sync()
|__spi_queued_transfer(spi, message, false);
|__spi_pump_messages(ctlr, false)---> ctlr->transfer_one_message(ctlr, ctlr->cur_msg);
|wait_for_completion(&done);
static int spi_transfer_one_message(struct spi_controller *ctlr,
struct spi_message *msg)
{
struct spi_transfer *xfer;
bool keep_cs = false;
int ret = 0;
unsigned long long ms = 1;
struct spi_statistics *statm = &ctlr->statistics;
struct spi_statistics *stats = &msg->spi->statistics;
spi_set_cs(msg->spi, true);
SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
trace_spi_transfer_start(msg, xfer);
spi_statistics_add_transfer_stats(statm, xfer, ctlr);
spi_statistics_add_transfer_stats(stats, xfer, ctlr);
if (xfer->tx_buf || xfer->rx_buf) {
reinit_completion(&ctlr->xfer_completion);
ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
if (ret < 0) {
SPI_STATISTICS_INCREMENT_FIELD(statm,
errors);
SPI_STATISTICS_INCREMENT_FIELD(stats,
errors);
dev_err(&msg->spi->dev,
"SPI transfer failed: %d\n", ret);
goto out;
}
if (ret > 0) {
ret = 0;
ms = 8LL * 1000LL * xfer->len;
do_div(ms, xfer->speed_hz);
ms += ms + 200; /* some tolerance */
if (ms > UINT_MAX)
ms = UINT_MAX;
ms = wait_for_completion_timeout(&ctlr->xfer_completion,
msecs_to_jiffies(ms));
}
if (ms == 0) {
SPI_STATISTICS_INCREMENT_FIELD(statm,
timedout);
SPI_STATISTICS_INCREMENT_FIELD(stats,
timedout);
dev_err(&msg->spi->dev,
"SPI transfer timed out\n");
msg->status = -ETIMEDOUT;
}
} else {
if (xfer->len)
dev_err(&msg->spi->dev,
"Bufferless transfer has length %u\n",
xfer->len);
}
trace_spi_transfer_stop(msg, xfer);
if (msg->status != -EINPROGRESS)
goto out;
if (xfer->delay_usecs) {
u16 us = xfer->delay_usecs;
if (us <= 10)
udelay(us);
else
usleep_range(us, us + DIV_ROUND_UP(us, 10));
}
if (xfer->cs_change) {
if (list_is_last(&xfer->transfer_list,
&msg->transfers)) {
keep_cs = true;
} else {
spi_set_cs(msg->spi, false);
udelay(10);
spi_set_cs(msg->spi, true);
}
}
msg->actual_length += xfer->len;
}
out:
if (ret != 0 || !keep_cs)
spi_set_cs(msg->spi, false);
if (msg->status == -EINPROGRESS)
msg->status = ret;
if (msg->status && ctlr->handle_err)
ctlr->handle_err(ctlr, msg);
spi_res_release(ctlr, msg);
spi_finalize_current_message(ctlr);
return ret;
}
__spi_sync()
|__spi_queued_transfer(spi, message, false);
|__spi_pump_messages(ctlr, false)---> ctlr->transfer_one_message(ctlr, ctlr->cur_msg);
____| spi_set_cs(msg->spi, true);
____| ret = ctlr->transfer_one(ctlr, msg->spi, xfer); /* spi controller transfer ops /
____| spi_set_cs(msg->spi, false);
____| spi_finalize_current_message(ctlr);
________| kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages); / 关键点 */
________| mesg->complete(mesg->context);
|wait_for_completion(&done);
关键是:
void spi_finalize_current_message(struct spi_controller *ctlr)
{
struct spi_message *mesg;
unsigned long flags;
int ret;
spin_lock_irqsave(&ctlr->queue_lock, flags);
mesg = ctlr->cur_msg;
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
spi_unmap_msg(ctlr, mesg);
if (ctlr->cur_msg_prepared && ctlr->unprepare_message) {
ret = ctlr->unprepare_message(ctlr, mesg);
if (ret) {
dev_err(&ctlr->dev, "failed to unprepare message: %d\n",
ret);
}
}
spin_lock_irqsave(&ctlr->queue_lock, flags);
ctlr->cur_msg = NULL;
ctlr->cur_msg_prepared = false;
kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
trace_spi_message_done(mesg);
mesg->state = NULL;
if (mesg->complete)
mesg->complete(mesg->context);
}
spi_finalize_current_message()-> kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
spi kthread 执行work:
static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
{
unsigned long flags;
bool was_busy = false;
int ret;
/* Lock queue */
spin_lock_irqsave(&ctlr->queue_lock, flags);
/* Make sure we are not already running a message */
if (ctlr->cur_msg) {
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
return;
}
/* If another context is idling the device then defer */
if (ctlr->idling) {
kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
return;
}
/* Check if the queue is idle */
if (list_empty(&ctlr->queue) || !ctlr->running) {
if (!ctlr->busy) {
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
return;
}
/* Only do teardown in the thread */
if (!in_kthread) {
kthread_queue_work(&ctlr->kworker,
&ctlr->pump_messages);
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
return;
}
ctlr->busy = false;
ctlr->idling = true;
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
kfree(ctlr->dummy_rx);
ctlr->dummy_rx = NULL;
kfree(ctlr->dummy_tx);
ctlr->dummy_tx = NULL;
if (ctlr->unprepare_transfer_hardware &&
ctlr->unprepare_transfer_hardware(ctlr))
dev_err(&ctlr->dev,
"failed to unprepare transfer hardware\n");
if (ctlr->auto_runtime_pm) {
pm_runtime_mark_last_busy(ctlr->dev.parent);
pm_runtime_put_autosuspend(ctlr->dev.parent);
}
trace_spi_controller_idle(ctlr);
spin_lock_irqsave(&ctlr->queue_lock, flags);
ctlr->idling = false;
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
return;
}
/* Extract head of queue */
ctlr->cur_msg =
list_first_entry(&ctlr->queue, struct spi_message, queue);
list_del_init(&ctlr->cur_msg->queue);
if (ctlr->busy)
was_busy = true;
else
ctlr->busy = true;
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
mutex_lock(&ctlr->io_mutex);
if (!was_busy && ctlr->auto_runtime_pm) {
ret = pm_runtime_get_sync(ctlr->dev.parent);
if (ret < 0) {
pm_runtime_put_noidle(ctlr->dev.parent);
dev_err(&ctlr->dev, "Failed to power device: %d\n",
ret);
mutex_unlock(&ctlr->io_mutex);
return;
}
}
if (!was_busy)
trace_spi_controller_busy(ctlr);
if (!was_busy && ctlr->prepare_transfer_hardware) {
ret = ctlr->prepare_transfer_hardware(ctlr);
if (ret) {
dev_err(&ctlr->dev,
"failed to prepare transfer hardware\n");
if (ctlr->auto_runtime_pm)
pm_runtime_put(ctlr->dev.parent);
mutex_unlock(&ctlr->io_mutex);
return;
}
}
trace_spi_message_start(ctlr->cur_msg);
if (ctlr->prepare_message) {
ret = ctlr->prepare_message(ctlr, ctlr->cur_msg);
if (ret) {
dev_err(&ctlr->dev, "failed to prepare message: %d\n",
ret);
ctlr->cur_msg->status = ret;
spi_finalize_current_message(ctlr);
goto out;
}
ctlr->cur_msg_prepared = true;
}
ret = spi_map_msg(ctlr, ctlr->cur_msg);
if (ret) {
ctlr->cur_msg->status = ret;
spi_finalize_current_message(ctlr);
goto out;
}
ret = ctlr->transfer_one_message(ctlr, ctlr->cur_msg);
if (ret) {
dev_err(&ctlr->dev,
"failed to transfer one message from queue\n");
goto out;
}
out:
mutex_unlock(&ctlr->io_mutex);
/* Prod the scheduler in case transfer_one() was busy waiting */
if (!ret)
cond_resched();
}
再看下__spi_sync() 代码,发起下一次__spi_sync(), 很可能就是在 spi kthread work中发起spi 控制器数据传输;
调试,注释掉spi_finalize_current_message()->kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);操作;
spi_sync() 就都在 spi外设驱动进程(非 spi* 内核线程)执行了;
spi_async() 依赖 kthread_queue_work() 这个操作;
spi_async() ->__spi_queued_transfer(spi, msg, true) -> list_add_tail(&msg->queue, &ctlr->queue) ->kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
代码比较简单;
把spi message加入ctlr->queue; kthread_queue_work;
written by [email protected]
转载请注明author、出处.