发布时间:2014-09-05 16:57:31作者:知识屋
字符设备 open->sys_open->(current->task_struct->fops = kobj_map->probes[主设备号]->ops)
module_init->是把所有驱动的初始化函数指针放到一个独立的段里,从那个段得起始地址依次调用.
ioremap 会重叠映射物理地址
include/linux/init.h -> #define module_init
内核启动时,会通过协处理指令(MCR P15,0,(Rd orr 0x2000),C1,C0,0)配置ARM内核的中断跳转地址为0xFFFF0000
----------------------------------
CP15 Register 1: Control registers
V (bit[13]) This bit is used to select the location of the exception vectors:
0 = Normal exception vectors selected (address range 0x00000000-0x0000001C)
1 = High exception vectors selected (address range 0xFFFF0000-0xFFFF001C).
An implementation can provide an input signal that determines the state of this bit after reset.
#define __initdata __section(.init.data)
实在计算不出他的地址了,就反汇编内核(arm-linux-gnu-objdump -D vmlinux > linux.dis).
系统调用->fs/*.c
vmlinux(带调试信息的ELF内核)->image(纯内核(D3 F0 21 E3 10 9F 10 EE))->zImage(加了压缩代码的内核)->uImage(加了64字节信息的内核)
ARM-ATPCS规则
----------------------------------------------------------------------------------------------------------
FP当前参数栈帧指针和当前函数体的局部变量
SP当前模式栈指针和当前函数调用传参参数的指针
第五个参数开始的压栈方向是从左到右
内核启动流程
----------------------------------------------------------------------------------------------------------
arch/arm/kernel/head.S->stext()->init/main.c->start_kernel()
原子操作
----------------------------------------------------------------------------------------------------------
自旋锁
static inline void atomic_add(int i, atomic_t *v)
{
unsigned long tmp;
int result;
__asm__ __volatile__("@ atomic_add/n"
"1:ldrex %0, [%3]/n"
"add %0, %0, %4/n"
"strex %1, %0, [%3]/n"
"teq %1, #0/n"
"bne 1b"
: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
: "r" (&v->counter), "Ir" (i)
: "cc");
}
static inline void atomic_sub(int i, atomic_t *v)
{
unsigned long tmp;
int result;
__asm__ __volatile__("@ atomic_sub/n"
"1:ldrex %0, [%3]/n"
"sub %0, %0, %4/n"
"strex %1, %0, [%3]/n"
"teq %1, #0/n"
"bne 1b"
: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
: "r" (&v->counter), "Ir" (i)
: "cc");
}
----------------------------------------------------------------------------------------------------------
信号量
void down(struct semaphore *sem)
{
unsigned long flags;
spin_lock_irqsave(&sem->lock, flags);
if (likely(sem->count > 0))
sem->count--;
else
__down(sem);//关中断,把自己放到等待队列里,然后把自己设置为不可中断睡眠,开中断,请求调度.
spin_unlock_irqrestore(&sem->lock, flags);
}
static noinline void __sched __up(struct semaphore *sem)
{
struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list,
struct semaphore_waiter, list);
list_del(&waiter->list);
waiter->up = 1;
wake_up_process(waiter->task);
}
可中断睡眠/不可中断睡眠
-----------------------
两种任务状态,区别在于时钟中断检查时间片时,如果是可中断睡眠,就会检查该任务的信号队列,如果是不可中断睡眠,则不会检查.
中断和系统调用
-----------------------
arch/arm/kernel/Traps.c
----------------------------------------------------------
void __init early_trap_init(void)
{
......
memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start); /* 重定位异常跳转指令 */
memcpy((void *)vectors + 0x200, __stubs_start, __stubs_end - __stubs_start); /* 重定位异常处理例程 */
......
}
arch/arm/kernel/entry-armv.s
----------------------------------------------------------
.equ stubs_offset, __vectors_start + 0x200 - __stubs_start /* 因为重定位 */
__vectors_start: /* 0xFFFF0000 (CP15 Reg1[13] == 1) */
swi 0x9F0000
b vector_und + stubs_offset
ldr pc, .LCvswi + stubs_offset /* 软中断,系统调用,信号例程返回...... */
b vector_pabt + stubs_offset
b vector_dabt + stubs_offset
b vector_addrexcptn + stubs_offset
b vector_irq + stubs_offset /* 外部中断 */
b vector_fiq + stubs_offset /* Linux没用到FIQ */
__vectors_end:
/*
stubs_offset的理解.对于(stubs_offset, __vectors_start + 0x200 - __stubs_start),
如果把(+ 0x200)去掉,那么得到的偏移则是__vectors_start到__stubs_start之间的长度,
例如这个长度加上(vector_irq <=> __stubs_start)地址,则计算出来的地址恰恰是__vect
ors_start的地址(不管__stubs_start在前或者在后,都能得到正确两者之间长度),好,那么
对于(b vector_irq + stubs_offset)这条跳转指令,编译后所计算出来的偏移值是当前PC
加+/-偏移值,最终结果则是跳到__vectors_start.关键是(+ 0x200),前面我们说无论vecto
r_irq地址在何处,最终计算结果地址还是__vectors_start,那么加上0x200后,这就意味着,
无论vector_irq在何处,(b vector_fiq + stubs_offset)这条指令都一定会跳到(__vector
s_start + 0x200)处,结合arch/arm/kernel/Traps.c->early_trap_init()->memcpy......
,再看中断,高端向量配置后(CP15[13] == 1),异常跳转基址为vectors(0xFFFF0000),按照
上面分析可知,外部中断是跳到0xFFFF0200处,而此处就是中断例程的函数体起始地址,这个
可以从memcpy((void *)vectors + 0x200......看出.
*/
----------------------------------------------------------
__stubs_start:
vector_irq:
sub lr, lr, #4
/* 保存用到的两个寄存器r0,irq_lr和irq_spsr */
stmia sp, {r0, lr} @ save r0, lr
mrs lr, spsr
str lr, [sp, #8] @ save spsr
/* 设置返回模式为SVC模式 */
mrs r0, cpsr
eor r0, r0, #1
msr spsr_cxsf, r0
/* 判断该中断是在User模式下发生的还是SVC模式下发生的(User = 0000,Supervisor = 0011). */
and lr, lr, #0x0f
mov r0, sp
ldr lr, [pc, lr, lsl #2] /* (pc + lr * 4) */
movs pc, lr /* 根据进来时模式选择不同的中断例程irq_lr为模式值的低4位,并且把CPSR设置为SPSR(MOV指令加S,并且目的操作寄存器为PC). */
.long __irq_usr @ 0 (USR_26 / USR_32)
.long __irq_invalid @ 1 (FIQ_26 / FIQ_32)
.long __irq_invalid @ 2 (IRQ_26 / IRQ_32)
.long __irq_svc @ 3 (SVC_26 / SVC_32)
.long __irq_invalid @ 4
.long __irq_invalid @ 5
.long __irq_invalid @ 6
.long __irq_invalid @ 7
.long __irq_invalid @ 8
.long __irq_invalid @ 9
.long __irq_invalid @ a
.long __irq_invalid @ b
.long __irq_invalid @ c
.long __irq_invalid @ d
.long __irq_invalid @ e
.long __irq_invalid @ f
----------------------------------------------------------
__irq_usr:
......
get_irqnr_preamble r5, lr
1:get_irqnr_and_base r0, r6, r5, lr
movne r1, sp
@
@ routine called with r0 = irq number, r1 = struct pt_regs *
@
adrne lr, 1b
bne asm_do_IRQ
......
__irq_svc:
......
get_irqnr_preamble r5, lr
1:get_irqnr_and_base r0, r6, r5, lr
movne r1, sp
@
@ routine called with r0 = irq number, r1 = struct pt_regs *
@
adrne lr, 1b
bne asm_do_IRQ
......
----------------------------------------------------------
arch/arm/kernel/Irq.c/asm_do_IRQ()
desc_handle_irq(irq, desc);
----------------------------------------------------------
include/asm-arm/mach/Irq.h/desc_handle_irq()
desc->handle_irq(irq, desc);
----------------------------------------------------------
kernel/irq/Chip.c/handle_level_irq()
......
action = desc->action;
action_ret = handle_IRQ_event(irq, action);
......
----------------------------------------------------------
kernel/irq/Handle.c/handle_IRQ_event()
......
local_irq_enable_in_hardirq();/* 开中断 */
do {
/* 依次调用同中断号的共享中断 */
ret = action->handler(irq, action->dev_id);/* 执行中断例程 */
if (ret == IRQ_HANDLED)
status |= action->flags;
retval |= ret;
action = action->next;
} while (action);
local_irq_disable(); /* 关中断 */
......
----------------------------------------------------------
系统调用
-----------------------
arch/arm/kernel/entry-armv.s
----------------------------------------------------------
.LCvswi:
.word vector_swi
----------------------------------------------------------
arch/arm/kernel/entry-common.S
----------------------------------------------------------
ENTRY(vector_swi)
/* 处理软中断上下文 */
sub sp, sp, #72 /* 在SP_svc申请所有寄存器大小的栈空间保存现场,大小为S_FRAME_SIZE <=> sizeof(struct pt_regs) */
stmia sp, {r0 - r12} @ Calling r0 - r12 /* 保存通用寄存器R0-R12 */
add r8, sp, #60 /* S_PC <=> uregs[15] */
stmdb r8, {sp, lr}^ @ Calling sp, lr /* 保存寄存器SP_user和LR_user */
mrs r8, spsr @ called from non-FIQ mode, so ok. /* 获取触发软中断时的状态寄存器 */
str lr, [sp, #S_PC] @ Save calling PC /* 保存LR_svc */
str r8, [sp, #S_PSR] @ Save CPSR /* 保存SPSR_svc */
str r0, [sp, #S_OLD_R0] @ Save OLD_R0 /* 再保存一下R0 */
mov fp, #0 /* 清零栈帧指针R11 */
/* 获取系统调用号 */
ldr r7, [lr, #-4] @ get SWI instruction /* (LR_svc - 4)等于SWI指令本身 */
/* 设置对齐?不懂... */
ldr ip, __cr_alignment
ldr ip, [ip]
mcr p15, 0, ip, c1, c0 @ update control register
/* 开中断 */
msr cpsr_c, #0x00000013 /* SVC Mode And Open IRQ */
/* 获取当前thread_info指针,任务调度时SP_svc已经设置为当前任务的内核栈 */
mov r9, sp
lsr r9, r9, #13 /* 当前任务的thread_info结构指针是和任务内核栈是以联合存放一起的,7点几K的内核栈+sizeof(struct thread_info) */
mov r9, r9, lsl #13
/* 载入系统调用表首地址到R8中 */
adr r8, sys_call_table @ load syscall table pointer
ldr ip, [r9, #TI_FLAGS] @ check for syscall tracing /* 检查是否启用系统调用跟踪 */
/* 取SWI指令的低24位,即系统调用号 */
bic r7, r7, #0xff000000 @ mask off SWI op-code
eor r7, r7, #__NR_SYSCALL_BASE @ check OS number
/* 压栈第五和第六个参数,看是否开启了系统调用跟踪,开启则调用跟踪代码 */
stmdb sp!, {r4, r5} @ push fifth and sixth args
tst ip, #256 @ are we tracing syscalls?
bne __sys_trace /* 判断thread_info->flags的第八位是否为1,为1则表示启用系统调用跟踪. */
/* 执行系统调用 */
cmp r7, #NR_syscalls @ check upper syscall limit /* 检查系统调用号是否越界 */
adr lr, ret_fast_syscall @ return address /* 系统调用处理完后直接跳到ret_fast_syscall */
ldrcc pc, [r8, r7, lsl #2] @ call sys_* routine /* 跳到对应的系统调用处理例程 */
/* 如果系统调用号大于最大界限,则做以下处理 */
add r1, sp, #8
2:mov r8, #0 @ no longer a real syscall
cmp r7, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
eor r0, r7, #__NR_SYSCALL_BASE @ put OS number back
bcs arm_syscall
b sys_ni_syscall @ not private func
ENDPROC(vector_swi)
----------------------------------------------------------
mmu
----------------------------------------------------------
----------------------------------------------------------
从字符串匹配查找nand平台驱动
----------------------------------------------------------
内核启动时,发现关键字符串"NAND 64MiB 3,3V 8-bit"
SI全局搜索"NAND 64MiB 3,3V 8-bit",最后发现在drivers/mtd/na
nd/Nand_ids.c下有个结构数组nand_flash_ids.查看有哪些文件调
用了它.发现在drivers/mtd/nand/Nand_base.c的nand_get_flash_
type函数中比较吻合,继续查看nand_get_flash_type被谁调用,继续
跳到同文件的nand_scan_ident函数,继续查看,也是同文件的nand_s
can函数用,好了.这个nand_scan函数的功能就是识别nand设备.最终
,在我们的目标平台相关驱动文件driver/mtd/nand/s3c2410.c中的
s3c24xx_nand_probe函数调用了nand_scan函数.
s3c2410的nand Flash的注册流程:
module_init(s3c2410_nand_init); #driver/mtd/nand/S3c2410.c
/* 注册一个平台驱动 */
platform_driver_register(&s3c2410_nand_driver); #driver/mtd/nand/S3c2410.c
/* 当找到对应的或被找到对应的平台设备时就被调用 */
s3c24xx_nand_probe(dev, TYPE_S3C2410); #driver/mtd/nand/S3c2410.c
{
...
/* 通过提供的nand_chip结构体,向设备发出命令,枚举出设备型号.并且通过n
* and_chip提供的设备访问函数指针和设备相关的寄存器地址,使得该类设备
* 的读写擦除控制等操作的函数指针设置到mtd_info结构体中. */
nmtd->scan_res = nand_scan(&nmtd->mtd, (sets) ? sets->nr_chips : 1);
/* 注册这个mtd_info结构体 */
s3c2410_nand_add_partition(info, nmtd, sets); #driver/mtd/nand/S3c2410.c
...
}
MTD框架
----------------------------------------------------------
int add_mtd_device(struct mtd_info *mtd) /* MTD设备注册 */ #drivers/mtd/Mtdcore.c
{
...
/* 查找出mtd_table中还没有被注册的MTD设备项,最大项数为32 */
for (i=0; i < MAX_MTD_DEVICES; i++)
if (!mtd_table[i]) {
struct list_head *this;
mtd_table[i] = mtd;
mtd->index = i;
mtd->usecount = 0;
...
list_for_each(this, &mtd_notifiers) {
struct mtd_notifier *not = list_entry(this, struct mtd_notifier, list);
not->add(mtd);
}
...
}
...
}
----------------------------------------------------------
我们就以mtd_notifiers为线索,了解MTD框架 /* static LIST_HEAD(mtd_notifiers); MTD块设备通知者类(操作)链表 */
对于块设备,我们先看一下他的大概的框架:
--------------------
| 块传输控制层 |
--------------------
| MTD传输控制层 |
--------------------
| 具体的块设备操作 |
--------------------
----------------------------------------------------------
static LIST_HEAD(blktrans_majors) /* 所有已经注册的块设备类链表,MTD是其中的一项 */
----------------------------------------------------------blktrans_notifier
static struct mtd_notifier blktrans_notifier = {
.add = blktrans_notify_add,
.remove = blktrans_notify_remove,
};
static void blktrans_notify_add(struct mtd_info *mtd)
{
struct list_head *this;
...
/* 而blktrans_notifier是靠查找blktrans_majors块设备操作类链表,找到已经注册的mtd_blktrans_ops结构体,调用它的添加函数 */
list_for_each(this, &blktrans_majors) {
struct mtd_blktrans_ops *tr = list_entry(this, struct mtd_blktrans_ops, list);
tr->add_mtd(tr, mtd);
}
}
----------------------------------------------------------mtdblock_tr /* 定义一个MTD类设备的操作结构体 */
/* MTD控制层结构体 */
static struct mtd_blktrans_ops mtdblock_tr = {
.name = "mtdblock",
.major = 31,
.part_bits = 0,
.blksize = 512,
.open = mtdblock_open,
...
.readsect = mtdblock_readsect,
.writesect = mtdblock_writesect,
.add_mtd = mtdblock_add_mtd,
...
};
/* 存放已经打开的块设备信息,与它设备具体操作的函数指针 */
static struct mtdblk_dev *mtdblks[MAX_MTD_DEVICES];
/* MTD块设备的统一注册接口 */
static void mtdblock_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
{
struct mtd_blktrans_dev *dev = kzalloc(sizeof(*dev), GFP_KERNEL);
..
dev->mtd = mtd;
dev->devnum = mtd->index;
dev->size = mtd->size >> 9;
dev->tr = tr;
...
add_mtd_blktrans_dev(dev);
}
/* MTD块设备的统一打开接口 */
static int mtdblock_open(struct mtd_blktrans_dev *mbd)
{
struct mtdblk_dev *mtdblk;
struct mtd_info *mtd = mbd->mtd;
int dev = mbd->devnum;
...
mtdblk->count = 1;
mtdblk->mtd = mtd;
...
mtdblks[dev] = mtdblk;
...
}
/* MTD块设备的统一写接口 */
static int mtdblock_writesect(struct mtd_blktrans_dev *dev, unsigned long block, char *buf)
{
struct mtdblk_dev *mtdblk = mtdblks[dev->devnum];
...
return do_cached_write(mtdblk, block<<9, 512, buf);
}
/* MTD块设备的统一读接口 */
static int mtdblock_readsect(struct mtd_blktrans_dev *dev, unsigned long block, char *buf)
{
struct mtdblk_dev *mtdblk = mtdblks[dev->devnum];
return do_cached_read(mtdblk, block<<9, 512, buf);
}
----------------------------------------------------------
/* 内核启动时,把mtdblock_tr这个MTD操作类注册到blktrans_ma
* jors中去,以后每当添加一个MTD子设备时都用mtdblock_tr操作
类的添加函数执行添加操作 */
static int __init init_mtdblock(void)
{
return register_mtd_blktrans(&mtdblock_tr);
}
/* 注册一个MTD控制层 */
int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
{
...
if (!blktrans_notifier.list.next) /* 只被调用一次,很明显只有一类块设备操作 */
register_mtd_user(&blktrans_notifier);
tr->blkcore_priv = kzalloc(sizeof(*tr->blkcore_priv), GFP_KERNEL);
...
ret = register_blkdev(tr->major, tr->name);
...
/* mtd_blktrans_request这个函数里只做唤醒mtd_blktrans_thread内核线程操作,
* blk_init_queue申请一个request_queue结构体,并设置它的make_request_fn函
* 数,而make_request_fn函数里则把BIO合并成Request放到请求队列里. */
tr->blkcore_priv->rq = blk_init_queue(mtd_blktrans_request, &tr->blkcore_priv->queue_lock);
...
tr->blkcore_priv->rq->queuedata = tr;
blk_queue_hardsect_size(tr->blkcore_priv->rq, tr->blksize);
tr->blkshift = ffs(tr->blksize) - 1;
/* 而mtd_blktrans_thread线程就是用电梯算法取出合适的请求进行读写操作. */
tr->blkcore_priv->thread = kthread_run(mtd_blktrans_thread, tr, "%sd", tr->name);
...
INIT_LIST_HEAD(&tr->devs);
/* 把这一类的块设备操作的结构体添加到blktrans_majors链表中 */
list_add(&tr->list, &blktrans_majors);
for (i=0; i<MAX_MTD_DEVICES; i++) {
if (mtd_table[i] && mtd_table[i]->type != MTD_ABSENT)
tr->add_mtd(tr, mtd_table[i]);
}
...
}
----------------------------------------------------------
/* 注册一个MTD块设备类通知者,存放到mtd_notifiers链表中 */
void register_mtd_user (struct mtd_notifier *new)
{
...
list_add(&new->list, &mtd_notifiers); /* static LIST_HEAD(mtd_notifiers); */
...
for (i=0; i< MAX_MTD_DEVICES; i++)
if (mtd_table[i])
new->add(mtd_table[i]);
...
}
----------------------------------------------------------
OK!到这里,已经明白了MTD的大概了.我们主要的操作也就是在MTD
层,而MTD层又是内核启动时被构建好的,被添加到块设备层.当我们
注册一个块设备时,内核已经帮我们构建好gendisk,当然make_requ
est和queue_request也是内核已经构建好的.然后就会把我们提供好
的mtd_info结构体放到一个全局指针数组mtd_table中去,这个mtd_
table数组就是专门存放已经注册了的MTD设备.当我们打开一个MTD
或者挂载一个MTD时,MTD的open函数就会根据次设备号找到对应的
mtd_info,操作之.而另一种块设备打开则是以次设备号在bdev_map
里找到对应的gendisk结构体,把存放在在私有数据中的mtd_info指针
取出插到mtdblks指针数组中,mtdblks就是专门存放已经open了的mtd
_info结构体指针.当有读或写操作时,MTD层就会把读写操作转换成一
个个request,插到队列中,等待处理队列的内核线程处理.
----------------------------------------------------------
添加一个MTD设备:
add_mtd_device
(blktrans_notifier)mtd_notifiers->add
(mtd_blktrans_ops)blktrans_majors->add_mtd
add_mtd_blktrans_dev
alloc_disk
gd->fops = &mtd_blktrans_ops;
gd->queue = tr->blkcore_priv->rq;
add_disk
----------------
打开一个MTD设备:
app:open
sys_open
blkdev_open
do_open
get_gendisk->fops->open(bdev->bd_inode, file);
blktrans_open
mtdblock_open
mtdblks[dev] = mtdblk;
----------------
读写一个MTD设备:
app:read | write
sys_read | sys_write
ll_rw_block
submit_bh
submit_bio
__make_request
add_request
mtd_blktrans_request
wake_up_process
mtd_blktrans_thread
elv_next_request
do_blktrans_request
tr->readsect | tr->writesect
struct mtdblk_dev *mtdblk = mtdblks[dev->devnum];
do_cached_write | do_cached_read
----------------------------------------------------------
tty
----------------------------------------------------------
drivers/char/tty_io.c/tty_register_driver
vty_init
console_map_init
tty1
输入:初始化时关联按键类输入值系统
输出:取vc_cons可用项控制台
register_framebuffer
registered_fb[i] = fb_info;
module_init(fb_console_init);
fb_console_init
static void fbcon_start(void)
for (i = 0; i < FB_MAX; i++) {
if (registered_fb[i] != NULL) {
info_idx = i;
break;
}
}
release_console_sem();
fbcon_takeover(0);
fbcon_startup
vc_cons
con_init
currcons = fg_console = 0;
master_display_fg = vc = vc_cons[currcons].d;
tty1
write
con_write
do_con_write
insert_char
vc_cons初始化:
FBcon的入口函数会对vc_cons插入一项fb_console
static int __init fb_console_init(void)
而vc_cons又会到fb_display里取出已经注册的FB设备
----------------------------------------------------------
作者“逆水行舟不进则退-敏少”
linux一键安装web环境全攻略 在linux系统中怎么一键安装web环境方法
Linux网络基本网络配置方法介绍 如何配置Linux系统的网络方法
Linux下DNS服务器搭建详解 Linux下搭建DNS服务器和配置文件
对Linux进行详细的性能监控的方法 Linux 系统性能监控命令详解
linux系统root密码忘了怎么办 linux忘记root密码后找回密码的方法
Linux基本命令有哪些 Linux系统常用操作命令有哪些
Linux必学的网络操作命令 linux网络操作相关命令汇总
linux系统从入侵到提权的详细过程 linux入侵提权服务器方法技巧
linux系统怎么用命令切换用户登录 Linux切换用户的命令是什么
在linux中添加普通新用户登录 如何在Linux中添加一个新的用户
2012-07-10
CentOS 6.3安装(详细图解教程)
Linux怎么查看网卡驱动?Linux下查看网卡的驱动程序
centos修改主机名命令
Ubuntu或UbuntuKyKin14.04Unity桌面风格与Gnome桌面风格的切换
FEDORA 17中设置TIGERVNC远程访问
StartOS 5.0相关介绍,新型的Linux系统!
解决vSphere Client登录linux版vCenter失败
LINUX最新提权 Exploits Linux Kernel <= 2.6.37
nginx在网站中的7层转发功能