原创 ARM的嵌入式Linux移植体验之设备驱动

2009-4-15 11:01 1599 5 5 分类: MCU/ 嵌入式

  设备驱动程序是操作系统内核和机器硬件之间的接口,它为应用程序屏蔽硬件的细节,一般来说,Linux的设备驱动程序需要完成如下功能:


  ·设备初始化、释放;


  ·提供各类设备服务;


  ·负责内核和设备之间的数据交换;


  ·检测和处理设备工作过程中出现的错误。


  Linux下的设备驱动程序被组织为一组完成不同任务的函数的集合,通过这些函数使得Windows的设备操作犹如文件一般。在应用程序看来,硬件设备只是一个设备文件,应用程序可以象操作普通文件一样对硬件设备进行操作,如open ()、close ()、read ()、write () 等。


  Linux主要将设备分为二类:字符设备和块设备。字符设备是指设备发送和接收数据以字符的形式进行;而块设备则以整个数据缓冲区的形式进行。在对字符设备发出读/写请求时,实际的硬件I/O一般就紧接着发生了;而块设备则不然,它利用一块系统内存作缓冲区,当用户进程对设备请求能满足用户的要求,就返回请求的数据,如果不能,就调用请求函数来进行实际的I/O操作。块设备主要针对磁盘等慢速设备。


  1.内存分配


  由于Linux驱动程序在内核中运行,因此在设备驱动程序需要申请/释放内存时,不能使用用户级的malloc/free函数,而需由内核级的函数kmalloc/kfree () 来实现,kmalloc()函数的原型为:


<TABLE borderColor="#cccccc" ;90%" align="center" bgColor="#e7e9e9" border="1"><TBODY><TR><TD>void kmalloc (size_t size ,int priority);</TD></TR></TBODY></TABLE>
  参数size为申请分配内存的字节数,kmalloc最多只能开辟128k的内存;参数priority说明若kmalloc()不能马上分配内存时用户进程要采用的动作:GFP_KERNEL 表示等待,即等kmalloc()函数将一些内存安排到交换区来满足你的内存需要,GFP_ATOMIC 表示不等待,如不能立即分配到内存则返回0 值;函数的返回值指向已分配内存的起始地址,出错时,返回0。


  kmalloc ()分配的内存需用kfree()函数来释放,kfree ()被定义为:


<TABLE borderColor="#cccccc" ;90%" align="center" bgColor="#e7e9e9" border="1"><TBODY><TR><TD># define kfree (n) kfree_s( (n) ,0)</TD></TR></TBODY></TABLE>
  其中kfree_s () 函数原型为:


<TABLE borderColor="#cccccc" ;90%" align="center" bgColor="#e7e9e9" border="1"><TBODY><TR><TD>void kfree_s (void * ptr ,int size);</TD></TR></TBODY></TABLE>
  参数ptr为kmalloc()返回的已分配内存的指针,size是要释放内存的字节数,若为0 时,由内核自动确定内存的大小。


  2.中断


  许多设备涉及到中断操作,因此,在这样的设备的驱动程序中需要对硬件产生的中断请求提供中断服务程序。与注册基本入口点一样,驱动程序也要请求内核将特定的中断请求和中断服务程序联系在一起。在Linux中,用request_irq()函数来实现请求:


<TABLE borderColor="#cccccc" ;90%" align="center" bgColor="#e7e9e9" border="1"><TBODY><TR><TD>int request_irq (unsigned int irq ,void( * handler) int ,unsigned long type ,char * name);</TD></TR></TBODY></TABLE>
  参数irq为要中断请求号,参数handler为指向中断服务程序的指针,参数type 用来确定是正常中断还是快速中断(正常中断指中断服务子程序返回后,内核可以执行调度程序来确定将运行哪一个进程;而快速中断是指中断服务子程序返回后,立即执行被中断程序,正常中断type 取值为0 ,快速中断type 取值为SA_INTERRUPT),参数name是设备驱动程序的名称。
  4.块设备驱动


  块设备驱动程序的编写是一个浩繁的工程,其难度远超过字符设备,上千行的代码往往只能搞定一个简单的块设备,而数十行代码就可能搞定一个字符设备。因此,非得有相当的基本功才能完成此项工作。下面先给出一个实例,即mtdblock块设备的驱动。我们通过分析此实例中的代码来说明块设备驱动程序的写法(由于篇幅的关系,大量的代码被省略,只保留了必要的主干):


<TABLE borderColor="#cccccc" ;90%" align="center" bgColor="#e7e9e9" border="1"><TBODY><TR><TD>#include <Linux/config.h>
#include <Linux/devfs_fs_kernel.h>
static void mtd_notify_add(struct mtd_info* mtd);
static void mtd_notify_remove(struct mtd_info* mtd);
static struct mtd_notifier notifier = {
  mtd_notify_add,
  mtd_notify_remove,
  NULL
};
static devfs_handle_t devfs_dir_handle = NULL;
static devfs_handle_t devfs_rw_handle[MAX_MTD_DEVICES];


static struct mtdblk_dev {
  struct mtd_info *mtd; /* Locked */
  int count;
  struct semaphore cache_sem;
  unsigned char *cache_data;
  unsigned long cache_offset;
  unsigned int cache_size;
  enum { STATE_EMPTY, STATE_CLEAN, STATE_DIRTY } cache_state;
} *mtdblks[MAX_MTD_DEVICES];


static spinlock_t mtdblks_lock;
/* this lock is used just in kernels &gt;= 2.5.x */
static spinlock_t mtdblock_lock;


static int mtd_sizes[MAX_MTD_DEVICES];
static int mtd_blksizes[MAX_MTD_DEVICES];


static void erase_callback(struct erase_info *done)
{
  wait_queue_head_t *wait_q = (wait_queue_head_t *)done-&gt;priv;
  wake_up(wait_q);
}


static int erase_write (struct mtd_info *mtd, unsigned long pos,
int len, const char *buf)
{
  struct erase_info erase;
  DECLARE_WAITQUEUE(wait, current);
  wait_queue_head_t wait_q;
  size_t retlen;
  int ret;


  /*
  * First, let's erase the flash block.
  */


  init_waitqueue_head(&wait_q);
  erase.mtd = mtd;
  erase.callback = erase_callback;
  erase.addr = pos;
  erase.len = len;
  erase.priv = (u_long)&wait_q;


  set_current_state(TASK_INTERRUPTIBLE);
  add_wait_queue(&wait_q, &wait);


  ret = MTD_ERASE(mtd, &erase);
  if (ret) {
  set_current_state(TASK_RUNNING);
  remove_wait_queue(&wait_q, &wait);
  printk (KERN_WARNING "mtdblock: erase of region [0x%lx, 0x%x] " "on /"%s/" failed/n",
pos, len, mtd-&gt;name);
  return ret;
  }


  schedule(); /* Wait for erase to finish. */
  remove_wait_queue(&wait_q, &wait);


  /*
  * Next, writhe data to flash.
  */


  ret = MTD_WRITE (mtd, pos, len, &retlen, buf);
  if (ret)
  return ret;
  if (retlen != len)
  return -EIO;
  return 0;
}


static int write_cached_data (struct mtdblk_dev *mtdblk)
{
  struct mtd_info *mtd = mtdblk-&gt;mtd;
  int ret;


  if (mtdblk-&gt;cache_state != STATE_DIRTY)
  return 0;


  DEBUG(MTD_DEBUG_LEVEL2, "mtdblock: writing cached data for /"%s/" "
"at 0x%lx, size 0x%x/n", mtd-&gt;name,
mtdblk-&gt;cache_offset, mtdblk-&gt;cache_size);


  ret = erase_write (mtd, mtdblk-&gt;cache_offset,
mtdblk-&gt;cache_size, mtdblk-&gt;cache_data);
  if (ret)
  return ret;


  mtdblk-&gt;cache_state = STATE_EMPTY;
  return 0;
}


static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
int len, const char *buf)
{
  …
}


static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
int len, char *buf)
{
  …
}


static int mtdblock_open(struct inode *inode, struct file *file)
{
  …
}


static release_t mtdblock_release(struct inode *inode, struct file *file)
{
  int dev;
  struct mtdblk_dev *mtdblk;
  DEBUG(MTD_DEBUG_LEVEL1, "mtdblock_release/n");


  if (inode == NULL)
  release_return(-ENODEV);


  dev = minor(inode-&gt;i_rdev);
  mtdblk = mtdblks[dev];


  down(&mtdblk-&gt;cache_sem);
  write_cached_data(mtdblk);
  up(&mtdblk-&gt;cache_sem);


  spin_lock(&mtdblks_lock);
  if (!--mtdblk-&gt;count) {
  /* It was the last usage. Free the device */
  mtdblks[dev] = NULL;
  spin_unlock(&mtdblks_lock);
  if (mtdblk-&gt;mtd-&gt;sync)
  mtdblk-&gt;mtd-&gt;sync(mtdblk-&gt;mtd);
  put_mtd_device(mtdblk-&gt;mtd);
  vfree(mtdblk-&gt;cache_data);
  kfree(mtdblk);
  } else {
  spin_unlock(&mtdblks_lock);
  }


  DEBUG(MTD_DEBUG_LEVEL1, "ok/n");
 
  BLK_DEC_USE_COUNT;
  release_return(0);
}


/*
* This is a special request_fn because it is executed in a process context
* to be able to sleep independently of the caller. The
* io_request_lock (for <2.5) or queue_lock (for >=2.5) is held upon entry
* and exit. The head of our request queue is considered active so there is
* no need to dequeue requests before we are done.
*/
static void handle_mtdblock_request(void)
{
  struct request *req;
  struct mtdblk_dev *mtdblk;
  unsigned int res;


  for (;;) {
  INIT_REQUEST;
  req = CURRENT;
  spin_unlock_irq(QUEUE_LOCK(QUEUE));
  mtdblk = mtdblks[minor(req-&gt;rq_dev)];
  res = 0;


  if (minor(req-&gt;rq_dev) &gt;= MAX_MTD_DEVICES)
  panic("%s : minor out of bound", __FUNCTION__);


  if (!IS_REQ_CMD(req))
  goto end_req;


  if ((req-&gt;sector + req-&gt;current_nr_sectors) &gt; (mtdblk-&gt;mtd-&gt;size &gt;&gt; 9))
  goto end_req;


  // Handle the request
  switch (rq_data_dir(req))
  {
  int err;


  case READ:
  down(&mtdblk-&gt;cache_sem);
  err = do_cached_read (mtdblk, req-&gt;sector <&lt; 9,
req->current_nr_sectors <&lt; 9,
req->buffer);
  up(&mtdblk-&gt;cache_sem);
  if (!err)
  res = 1;
  break;


  case WRITE:
  // Read only device
  if ( !(mtdblk-&gt;mtd-&gt;flags & MTD_WRITEABLE) )
  break;


  // Do the write
  down(&mtdblk-&gt;cache_sem);
  err = do_cached_write (mtdblk, req-&gt;sector <&lt; 9,req->current_nr_sectors <&lt; 9, req->buffer);
  up(&mtdblk-&gt;cache_sem);
  if (!err)
  res = 1;
  break;
  }


  end_req:
  spin_lock_irq(QUEUE_LOCK(QUEUE));
  end_request(res);
}
}


 
更详细资料,请登陆凌阳教育嵌入式培训论坛http://bbs.sunplusedu.com/showtopic-2990.aspx

PARTNER CONTENT

文章评论0条评论)

登录后参与讨论
EE直播间
更多
我要评论
0
5
关闭 站长推荐上一条 /3 下一条