目录
4.2.5 cancel_delayed_work_sync
1 描述
软件意义上的定时器最终依赖硬件定时器来实现,内核在时钟中断发生后检测各定时器是否到期,到期后的定时器处理函数将作为软中断在底半部执行。实质上,时钟中断处理程序会唤起TIMER_SOFTIRQ软中断,运行当前处理器上到期的所有定时器
2 结构体
2.1 timer_list
timer_list 结构体,它通常用于在Linux内核中表示一个定时器。定时器是操作系统中用于在将来某个时间点执行特定任务的机制。当定时器期满后,其中function()成员将被执行,expires则是定时器到期的时间(jiffies)。
12 struct timer_list {
13 /*
14 * All fields that change during normal runtime grouped to the
15 * same cacheline
16 */
17 struct hlist_node entry;
18 unsigned long expires;
19 void (*function)(struct timer_list *);
20 u32 flags;
21
22 #ifdef CONFIG_LOCKDEP
23 struct lockdep_map lockdep_map;
24 #endif
25
26 ANDROID_KABI_RESERVE(1);
27 ANDROID_KABI_RESERVE(2);
28 };
struct hlist_node entry;
这个字段是一个双向链表节点,用于将定时器链接到内核的定时器链表中。hlist_node 是Linux内核中用于实现哈希链表的一个基本结构,它允许高效地存储和查找元素。在这个上下文中,它使得内核能够管理大量的定时器,并能够在需要时快速找到和删除它们。
unsigned long expires;
这个字段表示定时器到期的时间。它通常是一个时间戳,表示从某个固定点(如系统启动时间)到定时器应该执行其回调函数的时间间隔。内核使用这个时间戳来决定何时执行定时器的回调函数。
void (*function)(struct timer_list *);
这是一个函数指针,指向当定时器到期时应该被调用的函数。这个函数接收一个指向定时器自身的指针作为参数,允许回调函数访问和修改定时器的状态或执行其他与定时器相关的操作。
u32 flags;
这个字段用于存储定时器的状态标志。这些标志可以用来表示定时器的不同状态,如是否正在运行、是否已被删除等。具体的标志位和它们的含义取决于内核的实现和定时器的用途。
3 相关函数
3.1 DEFINE_TIMER
初始化定时器。
76 #define DEFINE_TIMER(_name, _function) \
77 struct timer_list _name = \
78 __TIMER_INITIALIZER(_function, 0)
68 #define __TIMER_INITIALIZER(_function, _flags) { \
69 .entry = { .next = TIMER_ENTRY_STATIC }, \
70 .function = (_function), \
71 .flags = (_flags), \
72 __TIMER_LOCKDEP_MAP_INITIALIZER( \
73 __FILE__ ":" __stringify(__LINE__)) \
74 }
3.2 add_timer
函数原型 | void add_timer(struct timer_list *timer) | |
参数 | struct timer_list *timer | 指向 timer_list 结构体的指针 |
返回值 | ||
功能 | 将一个定时器添加到内核的定时器管理系统中,以便在指定的时间执行 |
1147 void add_timer(struct timer_list *timer)
1148 {
1149 BUG_ON(timer_pending(timer));
1150 mod_timer(timer, timer->expires);
1151 }
1112 int mod_timer(struct timer_list *timer, unsigned long expires)
1113 {
1114 return __mod_timer(timer, expires, 0);
1115 }
962 static inline int
963 __mod_timer(struct timer_list *timer, unsigned long expires, unsigned int options)
964 {
965 struct timer_base *base, *new_base;
966 unsigned int idx = UINT_MAX;
967 unsigned long clk = 0, flags;
968 int ret = 0;
969
970 BUG_ON(!timer->function);
971
972 /*
973 * This is a common optimization triggered by the networking code - if
974 * the timer is re-modified to have the same timeout or ends up in the
975 * same array bucket then just return:
976 */
977 if (timer_pending(timer)) {
978 /*
979 * The downside of this optimization is that it can result in
980 * larger granularity than you would get from adding a new
981 * timer with this expiry.
982 */
983 long diff = timer->expires - expires;
984
985 if (!diff)
986 return 1;
987 if (options & MOD_TIMER_REDUCE && diff <= 0)
988 return 1;
989
990 /*
991 * We lock timer base and calculate the bucket index right
992 * here. If the timer ends up in the same bucket, then we
993 * just update the expiry time and avoid the whole
994 * dequeue/enqueue dance.
995 */
996 base = lock_timer_base(timer, &flags);
997 forward_timer_base(base);
998
999 if (timer_pending(timer) && (options & MOD_TIMER_REDUCE) &&
1000 time_before_eq(timer->expires, expires)) {
1001 ret = 1;
1002 goto out_unlock;
1003 }
1004
1005 clk = base->clk;
1006 idx = calc_wheel_index(expires, clk);
1007
1008 /*
1009 * Retrieve and compare the array index of the pending
1010 * timer. If it matches set the expiry to the new value so a
1011 * subsequent call will exit in the expires check above.
1012 */
1013 if (idx == timer_get_idx(timer)) {
1014 if (!(options & MOD_TIMER_REDUCE))
1015 timer->expires = expires;
1016 else if (time_after(timer->expires, expires))
1017 timer->expires = expires;
1018 ret = 1;
1019 goto out_unlock;
1020 }
1021 } else {
1022 base = lock_timer_base(timer, &flags);
1023 forward_timer_base(base);
1024 }
1025
1026 ret = detach_if_pending(timer, base, false);
1027 if (!ret && (options & MOD_TIMER_PENDING_ONLY))
1028 goto out_unlock;
1029
1030 new_base = get_target_base(base, timer->flags);
1031
1032 if (base != new_base) {
1033 /*
1034 * We are trying to schedule the timer on the new base.
1035 * However we can't change timer's base while it is running,
1036 * otherwise del_timer_sync() can't detect that the timer's
1037 * handler yet has not finished. This also guarantees that the
1038 * timer is serialized wrt itself.
1039 */
1040 if (likely(base->running_timer != timer)) {
1041 /* See the comment in lock_timer_base() */
1042 timer->flags |= TIMER_MIGRATING;
1043
1044 raw_spin_unlock(&base->lock);
1045 base = new_base;
1046 raw_spin_lock(&base->lock);
1047 WRITE_ONCE(timer->flags,
1048 (timer->flags & ~TIMER_BASEMASK) | base->cpu);
1049 forward_timer_base(base);
1050 }
1051 }
1052
1053 debug_activate(timer, expires);
1054
1055 timer->expires = expires;
1056 /*
1057 * If 'idx' was calculated above and the base time did not advance
1058 * between calculating 'idx' and possibly switching the base, only
1059 * enqueue_timer() and trigger_dyntick_cpu() is required. Otherwise
1060 * we need to (re)calculate the wheel index via
1061 * internal_add_timer().
1062 */
1063 if (idx != UINT_MAX && clk == base->clk) {
1064 enqueue_timer(base, timer, idx);
1065 trigger_dyntick_cpu(base, timer);
1066 } else {
1067 internal_add_timer(base, timer);
1068 }
1069
1070 out_unlock:
1071 raw_spin_unlock_irqrestore(&base->lock, flags);
1072
1073 return ret;
1074 }
3.3 del_timer
函数原型 | void del_timer(struct timer_list *timer) | |
参数 | struct timer_list *timer | 指向 timer_list 结构体的指针 |
返回值 | ||
功能 | 删除(或取消)定时器的函数 |
1204 int del_timer(struct timer_list *timer)
1205 {
1206 struct timer_base *base;
1207 unsigned long flags;
1208 int ret = 0;
1209
1210 debug_assert_init(timer);
1211
1212 if (timer_pending(timer)) {
1213 base = lock_timer_base(timer, &flags);
1214 ret = detach_if_pending(timer, base, true);
1215 raw_spin_unlock_irqrestore(&base->lock, flags);
1216 }
1217
1218 return ret;
1219 }
3.4 msecs_to_jiffies
函数原型 | unsigned long msecs_to_jiffies(const unsigned int m) | |
参数 | const unsigned int m | 毫秒数 |
返回值 | unsigned long | 相同时长的jiffy的个数 |
功能 | 将以毫秒为单位的时长转换为以jiffy为单位的时长 |
361 static __always_inline unsigned long msecs_to_jiffies(const unsigned int m)
362 {
363 if (__builtin_constant_p(m)) {
364 if ((int)m < 0)
365 return MAX_JIFFY_OFFSET;
366 return _msecs_to_jiffies(m);
367 } else {
368 return __msecs_to_jiffies(m);
369 }
370 }
589 unsigned long __msecs_to_jiffies(const unsigned int m)
590 {
591 /*
592 * Negative value, means infinite timeout:
593 */
594 if ((int)m < 0)
595 return MAX_JIFFY_OFFSET;
596 return _msecs_to_jiffies(m);
597 }
306 static inline unsigned long _msecs_to_jiffies(const unsigned int m)
307 {
308 return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ);
309 }
6 #define MSEC_PER_SEC 1000L
3.5 usecs_to_jiffies
函数原型 | unsigned long usecs_to_jiffies(const unsigned int m) | |
参数 | const unsigned int m | 微秒数 |
返回值 | unsigned long | 相同时长的jiffy的个数 |
功能 | 将以微秒为单位的时长转换为以jiffy为单位的时长 |
408 static __always_inline unsigned long usecs_to_jiffies(const unsigned int u)
409 {
410 if (__builtin_constant_p(u)) {
411 if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET))
412 return MAX_JIFFY_OFFSET;
413 return _usecs_to_jiffies(u);
414 } else {
415 return __usecs_to_jiffies(u);
416 }
417 }
374 static inline unsigned long _usecs_to_jiffies(const unsigned int u)
375 {
376 return (u + (USEC_PER_SEC / HZ) - 1) / (USEC_PER_SEC / HZ);
377 }
#define USEC_PER_SEC 1000000L
5 示例
每隔 1 秒不断执行timer_function 函数。
#include "linux/timer.h"
#include <linux/init.h>
#include <linux/module.h>
void timer_function(struct timer_list *timer_list);
DEFINE_TIMER(timer_test, timer_function);
void timer_function(struct timer_list *timer_list)
{
printk("timer_function\r\n");
mod_timer(&timer_test, jiffies + 1*HZ);
}
static int timer_test_init(void)
{
printk("add_timer\r\n");
add_timer(&timer_test);
return 0;
}
static void timer_test_exit(void)
{
printk("del_timer\r\n");
del_timer(&timer_test);
}
module_init(timer_test_init);
module_exit(timer_test_exit);
MODULE_LICENSE("GPL");
操作如下所示。
console:/data # insmod timer_test.ko
[ 425.884540] add_timer
[ 425.886079] timer_function
[ 427.919613] timer_function
[ 428.933020] timer_function
[ 429.946226] timer_function
[ 430.959564] timer_function
[ 431.972895] timer_function
console:/data # rmmod timer_test.ko
[ 450.540771] del_timer
4 延迟工作队列delayed_work
对于周期性的任务,除了定时器以外,在Linux内核中还可以利用一套封装得很好的快捷机制,其本质是利用工作队列和定时器实现,这套快捷机制就是delayed_work。
4.1 结构体
4.1.1 delayed_work
struct delayed_work 是 Linux 内核中用于管理延迟工作的一个结构体。这个结构体结合了工作队列(workqueue)和定时器(timer)的功能,使得工作可以在未来的某个时间点被调度执行。
118 struct delayed_work {
119 struct work_struct work;
120 struct timer_list timer;
121
122 /* target workqueue and CPU ->timer uses to queue ->work */
123 struct workqueue_struct *wq;
124 int cpu;
125
126 ANDROID_KABI_RESERVE(1);
127 ANDROID_KABI_RESERVE(2);
128 };
1. struct work_struct work
表示一个工作队列结构体。work_struct 是 Linux 内核中用于表示工作项的基础结构体。通过它,你可以将工作项添加到工作队列中,并在未来的某个时刻处理它。
2. struct timer_list timer
表示一个定时器结构体。timer_list 用于管理定时器相关的操作。延迟工作的机制是基于定时器的,因此需要这个字段来跟踪和管理定时器的状态以及触发时机。
3. struct workqueue_struct *wq
这是一个指向工作队列结构体的指针。wq 指定了这个延迟工作所关联的工作队列。工作队列负责实际的工作调度和执行,定时器会将工作项添加到这个工作队列中,以便在延迟时间结束后处理。
4. int cpu
这个字段指定了在执行延迟工作时应使用的 CPU 编号。这个字段可以帮助控制工作项在特定的 CPU 上执行,有助于优化性能和减少调度延迟。
5. ANDROID_KABI_RESERVE(1)
6. ANDROID_KABI_RESERVE(2)
这两个字段是保留字段,用于确保结构体在未来的内核版本中保持兼容性。ANDROID_KABI_RESERVE 是 Android 内核 ABI 兼容性保留的机制,用于避免因结构体的字段变更而导致的 ABI 破坏。它们不用于实际的数据存储,只是占位符,以确保结构体的布局在添加新字段时不会影响现有的二进制接口。
4.2 相关函数
4.2.1 DECLARE_DELAYED_WORK
这个宏用于声明和初始化一个 delayed_work 结构体实例,简化了声明和初始化的过程。通过使用这个宏,可以方便地创建和初始化 delayed_work 结构体实例,并为其设置工作函数和定时器。
206 #define DECLARE_DELAYED_WORK(n, f) \
207 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0)
197 #define __DELAYED_WORK_INITIALIZER(n, f, tflags) { \
198 .work = __WORK_INITIALIZER((n).work, (f)), \
199 .timer = __TIMER_INITIALIZER(delayed_work_timer_fn,\
200 (tflags) | TIMER_IRQSAFE), \
201 }
190 #define __WORK_INITIALIZER(n, f) { \
191 .data = WORK_DATA_STATIC_INIT(), \
192 .entry = { &(n).entry, &(n).entry }, \
193 .func = (f), \
194 __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \
195 }
68 #define __TIMER_INITIALIZER(_function, _flags) { \
69 .entry = { .next = TIMER_ENTRY_STATIC }, \
70 .function = (_function), \
71 .flags = (_flags), \
72 __TIMER_LOCKDEP_MAP_INITIALIZER( \
73 __FILE__ ":" __stringify(__LINE__)) \
74 }
1518 void delayed_work_timer_fn(struct timer_list *t)
1519 {
1520 struct delayed_work *dwork = from_timer(dwork, t, timer);
1521
1522 /* should have been called from irqsafe timer with irq already off */
1523 __queue_work(dwork->cpu, dwork->wq, &dwork->work);
1524 }
4.2.2 INIT_DELAYED_WORK
函数原型 | INIT_DELAYED_WORK(_work, _func) | |
参数 | _work | 指向 struct delayed_work 类型的指针,该结构体用于表示要延迟执行的工作项 |
_func | 这是一个函数指针,指向当工作项被调度执行时应该调用的函数。这个函数需要符合特定的原型,通常是 void func(struct work_struct *work) | |
返回值 | ||
功能 | 用于初始化一个 delayed_work 结构体,这个结构体允许你延迟执行一个函数(即工作项)。这个宏通常与内核的工作队列机制一起使用,用于在稍后的某个时间点调度任务执行,而不是立即执行 |
277 #define INIT_DELAYED_WORK(_work, _func) \
278 __INIT_DELAYED_WORK(_work, _func, 0)
261 #define __INIT_DELAYED_WORK(_work, _func, _tflags) \
262 do { \
263 INIT_WORK(&(_work)->work, (_func)); \
264 __init_timer(&(_work)->timer, \
265 delayed_work_timer_fn, \
266 (_tflags) | TIMER_IRQSAFE); \
267 } while (0)
255 #define INIT_WORK(_work, _func) \
256 __INIT_WORK((_work), (_func), 0)
234 #ifdef CONFIG_LOCKDEP
235 #define __INIT_WORK(_work, _func, _onstack) \
236 do { \
237 static struct lock_class_key __key; \
238 \
239 __init_work((_work), _onstack); \
240 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
241 lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, &__key, 0); \
242 INIT_LIST_HEAD(&(_work)->entry); \
243 (_work)->func = (_func); \
244 } while (0)
245 #else
246 #define __INIT_WORK(_work, _func, _onstack) \
247 do { \
248 __init_work((_work), _onstack); \
249 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
250 INIT_LIST_HEAD(&(_work)->entry); \
251 (_work)->func = (_func); \
252 } while (0)
253 #endif
103 #ifdef CONFIG_LOCKDEP
104 #define __init_timer(_timer, _fn, _flags) \
105 do { \
106 static struct lock_class_key __key; \
107 init_timer_key((_timer), (_fn), (_flags), #_timer, &__key);\
108 } while (0)
116 #else
117 #define __init_timer(_timer, _fn, _flags) \
118 init_timer_key((_timer), (_fn), (_flags), NULL, NULL)
121 #endif
4.2.3 schedule_delayed_work
函数原型 | bool schedule_delayed_work(struct delayed_work *dwork,unsigned long delay) | |
参数 | struct delayed_work *dwork | 指向一个 delayed_work 结构体的指针,这个结构体描述了一个延迟工作 |
unsigned long delay | 延迟时间,单位通常是毫秒 | |
返回值 | bool | 表示调度是否成功 |
功能 | 将一个延迟工作项排队到系统的默认工作队列中,并设置一个延迟时间。在指定的延迟时间之后,工作项会被调度执行。函数的返回值表示工作项是否成功被排队到工作队列中 |
631 static inline bool schedule_delayed_work(struct delayed_work *dwork,
632 unsigned long delay)
633 {
634 return queue_delayed_work(system_wq, dwork, delay);
635 }
529 static inline bool queue_delayed_work(struct workqueue_struct *wq,
530 struct delayed_work *dwork,
531 unsigned long delay)
532 {
533 return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
534 }
1572 bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
1573 struct delayed_work *dwork, unsigned long delay)
1574 {
1575 struct work_struct *work = &dwork->work;
1576 bool ret = false;
1577 unsigned long flags;
1578
1579 /* read the comment in __queue_work() */
1580 local_irq_save(flags);
1581
1582 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1583 __queue_delayed_work(cpu, wq, dwork, delay);
1584 ret = true;
1585 }
1586
1587 local_irq_restore(flags);
1588 return ret;
1589 }
1527 static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
1528 struct delayed_work *dwork, unsigned long delay)
1529 {
1530 struct timer_list *timer = &dwork->timer;
1531 struct work_struct *work = &dwork->work;
1532
1533 WARN_ON_ONCE(!wq);
1534 #ifndef CONFIG_CFI_CLANG
1535 WARN_ON_ONCE(timer->function != delayed_work_timer_fn);
1536 #endif
1537 WARN_ON_ONCE(timer_pending(timer));
1538 WARN_ON_ONCE(!list_empty(&work->entry));
1539
1540 /*
1541 * If @delay is 0, queue @dwork->work immediately. This is for
1542 * both optimization and correctness. The earliest @timer can
1543 * expire is on the closest next tick and delayed_work users depend
1544 * on that there's no such delay when @delay is 0.
1545 */
1546 if (!delay) {
1547 __queue_work(cpu, wq, &dwork->work);
1548 return;
1549 }
1550
1551 dwork->wq = wq;
1552 dwork->cpu = cpu;
1553 timer->expires = jiffies + delay;
1554
1555 if (unlikely(cpu != WORK_CPU_UNBOUND))
1556 add_timer_on(timer, cpu);
1557 else
1558 add_timer(timer);
1559 }
4.2.4 cancel_delayed_work
函数原型 | bool cancel_delayed_work(struct delayed_work *dwork) | |
参数 | struct delayed_work *dwork | 指向一个 delayed_work 结构体的指针,这个结构体描述了一个延迟工作 |
返回值 | bool | 表示取消调度是否成功 |
功能 | 取消一个已经排队的延迟工作。如果工作项已经被排队等待执行,函数会尝试取消它,并返回一个布尔值指示操作是否成功 |
3153 bool cancel_delayed_work(struct delayed_work *dwork)
3154 {
3155 return __cancel_work(&dwork->work, true);
3156 }
3120 static bool __cancel_work(struct work_struct *work, bool is_dwork)
3121 {
3122 unsigned long flags;
3123 int ret;
3124
3125 do {
3126 ret = try_to_grab_pending(work, is_dwork, &flags);
3127 } while (unlikely(ret == -EAGAIN));
3128
3129 if (unlikely(ret < 0))
3130 return false;
3131
3132 set_work_pool_and_clear_pending(work, get_work_pool_id(work));
3133 local_irq_restore(flags);
3134 return ret;
3135 }
4.2.5 cancel_delayed_work_sync
函数原型 | bool cancel_delayed_work_sync(struct delayed_work *dwork) | |
参数 | struct delayed_work *dwork | 指向一个 delayed_work 结构体的指针,这个结构体描述了一个延迟工作 |
返回值 | bool | 表示取消调度是否成功 |
功能 | 取消一个延迟工作,并确保在函数返回之前,该工作项已被完全取消 |
3168 bool cancel_delayed_work_sync(struct delayed_work *dwork)
3169 {
3170 return __cancel_work_timer(&dwork->work, true);
3171 }
2990 static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
2991 {
2992 static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq);
2993 unsigned long flags;
2994 int ret;
2995
2996 do {
2997 ret = try_to_grab_pending(work, is_dwork, &flags);
2998 /*
2999 * If someone else is already canceling, wait for it to
3000 * finish. flush_work() doesn't work for PREEMPT_NONE
3001 * because we may get scheduled between @work's completion
3002 * and the other canceling task resuming and clearing
3003 * CANCELING - flush_work() will return false immediately
3004 * as @work is no longer busy, try_to_grab_pending() will
3005 * return -ENOENT as @work is still being canceled and the
3006 * other canceling task won't be able to clear CANCELING as
3007 * we're hogging the CPU.
3008 *
3009 * Let's wait for completion using a waitqueue. As this
3010 * may lead to the thundering herd problem, use a custom
3011 * wake function which matches @work along with exclusive
3012 * wait and wakeup.
3013 */
3014 if (unlikely(ret == -ENOENT)) {
3015 struct cwt_wait cwait;
3016
3017 init_wait(&cwait.wait);
3018 cwait.wait.func = cwt_wakefn;
3019 cwait.work = work;
3020
3021 prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait,
3022 TASK_UNINTERRUPTIBLE);
3023 if (work_is_canceling(work))
3024 schedule();
3025 finish_wait(&cancel_waitq, &cwait.wait);
3026 }
3027 } while (unlikely(ret < 0));
3028
3029 /* tell other tasks trying to grab @work to back off */
3030 mark_work_canceling(work);
3031 local_irq_restore(flags);
3032
3033 /*
3034 * This allows canceling during early boot. We know that @work
3035 * isn't executing.
3036 */
3037 if (wq_online)
3038 __flush_work(work, true);
3039
3040 clear_work_data(work);
4.3 例程
#define WATCHDOG_FEED_COUNT 1000
struct spirit_mcu {
struct i2c_client *i2c;
struct regmap *regmap;
struct device_node *np;
int feed;
struct DeviceInfomation deviceInfo;
struct delayed_work work;
struct mutex m_lock;
int userfeed;
void (*pre_reboot)(unsigned long code);
};
static void mcu_watchdog_feed_task(struct work_struct *work)
{
struct spirit_mcu *spirit_mcu = container_of(work, struct spirit_mcu, work.work);
mcu_watchdog_feed(spirit_mcu);
schedule_delayed_work(&spirit_mcu->work, msecs_to_jiffies(WATCHDOG_FEED_COUNT));
}
static int mcu_init(struct spirit_mcu *spirit_mcu)
{
INIT_DELAYED_WORK(&spirit_mcu->work, mcu_watchdog_feed_task);
schedule_delayed_work(&spirit_mcu->work, msecs_to_jiffies(WATCHDOG_FEED_COUNT));
return 0;
}
static int spirit_mcu_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
int ret = 0;
struct spirit_mcu *spirit_mcu;
struct device_node *np = client->dev.of_node;
printk("%s: probe\n", __FUNCTION__);
spirit_mcu = devm_kzalloc(&client->dev, sizeof(struct spirit_mcu), GFP_KERNEL);
if (!spirit_mcu)
return -ENOMEM;
spirit_mcu->regmap = devm_regmap_init_i2c(client, &mcu_regmap_config);
if (IS_ERR(spirit_mcu->regmap)) {
dev_err(&client->dev, "regmap initialization failed\n");
return PTR_ERR(spirit_mcu->regmap);
}
ret = mcu_init(spirit_mcu);
if (ret < 0) {
printk("mcu_update_vmac_mac_addr fail\n");
}
return ret;
}
static const struct i2c_device_id spirit_mcu_id[] = {
{ "spirit_mcu", 0 },
{ }
};
static struct i2c_driver spirit_mcu_driver = {
.driver = {
.name = "spirit_mcu",
.owner = THIS_MODULE,
},
.probe = spirit_mcu_probe,
.id_table = spirit_mcu_id,
};
static int __init spirit_mcu_init(void)
{
return i2c_add_driver(&spirit_mcu_driver);
}
static void __exit spirit_mcu_exit(void)
{
i2c_del_driver(&spirit_mcu_driver);
}
MODULE_AUTHOR("[email protected]");
MODULE_DESCRIPTION("spirit mcu driver");
MODULE_LICENSE("GPL");
late_initcall(spirit_mcu_init);
module_exit(spirit_mcu_exit);
标签:__,定时器,struct,work,delayed,timer,内核,linux,mcu
From: https://blog.csdn.net/weixin_49406449/article/details/142213424