AlcapDAQ  1
vmedrv.c
Go to the documentation of this file.
1 /* vmedrv.c */
2 /* VME device driver for Bit3 Model 617/618/620 on Linux 2.6.x */
3 /* Created by Enomoto Sanshiro on 28 November 1999. */
4 /* Updated by Enomoto Sanshiro on 7 September 2008. */
5 /* Updated by Enomoto Sanshiro on 24 February 2012. */
6 
7 
8 #define TRACE_CONFIG 1
9 #ifdef TRACE_CONFIG
10 #define DEBUG_CONFIG(x) x
11 #else
12 #define DEBUG_CONFIG(x)
13 #endif
14 
15 #ifdef TRACE_PIO
16 #define DEBUG_PIO(x) x
17 #else
18 #define DEBUG_PIO(x)
19 #endif
20 
21 #ifdef TRACE_MAP
22 #define DEBUG_MAP(x) x
23 #else
24 #define DEBUG_MAP(x)
25 #endif
26 
27 #ifdef TRACE_INTERRUPT
28 #define DEBUG_INT(x) x
29 #else
30 #define DEBUG_INT(x)
31 #endif
32 
33 #ifdef TRACE_DMA
34 #define DEBUG_DMA(x) x
35 #else
36 #define DEBUG_DMA(x)
37 #endif
38 
39 
40 #if defined(USE_MODVERSIONS) && USE_MODVERSIONS
41 # define MODVERSIONS
42 # include <config/modversions.h>
43 #endif
44 
45 
46 #include <linux/version.h>
47 #include <linux/kernel.h>
48 #include <linux/module.h>
49 #include <linux/fs.h>
50 #include <linux/cdev.h>
51 #include <linux/init.h>
52 #include <linux/slab.h>
53 #include <linux/ioctl.h>
54 #include <linux/mm.h>
55 #include <linux/poll.h>
56 #include <linux/sched.h>
57 #include <linux/interrupt.h>
58 #include <linux/pci.h>
59 #include <linux/ioport.h>
60 #include <linux/errno.h>
61 #include <linux/dma-mapping.h>
62 #include <asm/io.h>
63 #include <asm/uaccess.h>
64 #include <asm/segment.h>
65 #include "vmedrv.h"
66 #include "vmedrv_params.h"
67 #include "vmedrv_conf.h"
68 
69 MODULE_LICENSE("GPL");
70 MODULE_AUTHOR("Enomoto Sanshiro");
71 MODULE_DESCRIPTION("VME Driver for SBS (Bit3) 617/618/620 Bus Bridge");
72 
73 
74 #ifdef SYSTEM_FC2
75 #include "vmedrv-FC2.h"
76 #endif
77 #ifdef SYSTEM_FC3
78 #include "vmedrv-FC3.h"
79 #endif
80 
81 
82 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18))
83 #define USE_OLD_IRQ 1
84 #endif
85 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36))
86 #define USE_OLD_IOCTL 1
87 #endif
88 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32))
89 #define USE_OLD_DMABITMASK 1
90 #endif
91 
92 
93 
95  struct task_struct* task;
96  int irq;
97  int vector;
98  int signal_id;
103 };
104 
108 };
109 
111  int dev_id;
112  struct pci_dev* pci_dev;
113  unsigned char irq;
114  unsigned io_node_io_base;
125  void* dma_buffer;
126  unsigned long dma_buffer_size;
127  unsigned long dma_buffer_bus_address;
129 };
130 
131 struct dev_prop_t {
134  unsigned function_code;
135  unsigned byte_swapping;
136  unsigned mapping_flags;
138  unsigned data_width;
139  unsigned transfer_method;
140  void* pio_buffer;
146 };
147 
148 
149 static int vmedrv_open(struct inode* inode, struct file* filep);
150 static int vmedrv_release(struct inode* inode, struct file* filep);
151 static ssize_t vmedrv_read(struct file* filep, char* buf, size_t count, loff_t* f_pos);
152 static ssize_t vmedrv_write(struct file* filep, const char* buf, size_t count, loff_t* f_pos);
153 static loff_t vmedrv_lseek(struct file* filep, loff_t offset, int whence);
154 static long vmedrv_unlocked_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
155 static int vmedrv_mmap(struct file* filep, struct vm_area_struct* vma);
156 static unsigned int vmedrv_poll(struct file* filep, poll_table* table);
157 
158 #ifdef USE_OLD_IRQ
159 static irqreturn_t vmedrv_interrupt(int irq, void* dev_id, struct pt_regs* regs);
160 #else
161 static irqreturn_t vmedrv_interrupt(int irq, void* dev_id);
162 #endif
163 
164 static int vmedrv_pci_probe(struct pci_dev* dev, const struct pci_device_id* id);
165 static void vmedrv_pci_remove(struct pci_dev* dev);
166 
167 static int initialize(void);
168 static int set_access_mode(struct dev_prop_t* dev_prop, int mode);
169 static int set_transfer_method(struct dev_prop_t* dev_prop, int method);
170 static int probe(struct dev_prop_t* dev_prop, __user vmedrv_word_access_t* word_access);
171 
172 static int pio_read(struct dev_prop_t* dev_prop, char* buf, unsigned long vme_address, int count);
173 static int pio_write(struct dev_prop_t* dev_prop, const char* buf, unsigned long vme_address, int count);
174 static int prepare_pio(struct dev_prop_t* dev_prop);
175 static int allocate_windows(int number_of_windows);
176 static void free_windows(int window_index, int number_of_windows);
177 static void* map_windows(unsigned vme_address, unsigned window_index, unsigned number_of_windows, unsigned flags);
178 
179 static int enable_normal_interrupt(void);
180 static int disable_normal_interrupt(void);
181 static int enable_error_interrupt(void);
182 static int disable_error_interrupt(void);
183 static void save_interrupt_flags(void);
184 static void restore_interrupt_flags(void);
185 static int acknowledge_error_interrupt(unsigned local_status);
186 static int acknowledge_pt_interrupt(unsigned local_status);
187 static int acknowledge_dma_interrupt(unsigned dma_status);
188 static int acknowledge_pr_interrupt(unsigned remote_status);
189 static int acknowledge_vme_interrupt(unsigned interrupt_status);
190 static int register_interrupt_notification(struct dev_prop_t* dev_prop, struct task_struct* task, int irq, int vector, int signal_id);
191 static int unregister_interrupt_notification(struct dev_prop_t* dev_prop, struct task_struct* task, int irq, int vector);
192 static int wait_for_interrupt_notification(struct task_struct* task, int irq, int vector, int timeout);
193 static int check_interrupt_notification(int irq, int vector);
194 static int clear_interrupt_notification(int irq, int vector);
195 static int set_interrupt_autodisable(int irq, int vector);
196 static int set_vector_mask(int irq, int vector, int vector_mask);
197 static int reset_adapter(void);
198 
199 static int dma_read(struct dev_prop_t* dev_prop, char* buf, unsigned long vme_adress, int count);
200 static int dma_write(struct dev_prop_t* dev_prop, const char* buf, unsigned long vme_adress, int count);
201 static int prepare_dma(struct dev_prop_t* dev_prop);
202 static int start_dma(struct dev_prop_t* dev_prop, unsigned long pci_address, unsigned long vme_address, unsigned long size, int direction);
203 static void* allocate_dma_buffer(unsigned long size);
204 static void release_dma_buffer(void);
205 static unsigned map_dma_windows(unsigned pci_address, unsigned size, unsigned dma_flags);
206 static int initiate_dma(struct dev_prop_t* dev_prop, unsigned mapped_pci_address, unsigned vme_address, unsigned size, int direction);
207 static int release_dma(void);
208 
209 
210 #ifdef USE_OLD_IOCTL
211 static int vmedrv_ioctl(struct inode *inode, struct file *filep, unsigned int cmd, unsigned long arg);
212 #endif
213 
214 
215 static struct file_operations vmedrv_fops = {
216  .owner = THIS_MODULE,
217  .llseek = vmedrv_lseek,
218  .read = vmedrv_read,
219  .write = vmedrv_write,
220 #ifdef USE_OLD_IOCTL
221  .ioctl = vmedrv_ioctl,
222 #else
223  .unlocked_ioctl = vmedrv_unlocked_ioctl,
224 #endif
225  .mmap = vmedrv_mmap,
226  .poll = vmedrv_poll,
227  .open = vmedrv_open,
228  .release = vmedrv_release,
229 };
230 
231 static struct pci_driver vmedrv_pci_driver = {
232  .id_table = vmedrv_device_id_table,
233  .probe = vmedrv_pci_probe,
234  .remove = vmedrv_pci_remove,
235 };
236 
237 static wait_queue_head_t vmedrv_dmadone_wait_queue;
238 static wait_queue_head_t vmedrv_vmebusirq_wait_queue;
239 static int is_dma_running;
240 static int module_open_count;
241 static int bit3_card_count = 0;
244 
245 static dev_t dev;
246 static struct cdev cdev;
247 
248 static struct bit3_config_t bit3;
249 
250 
251 static int __init vmedrv_initialize_module(void)
252 {
253  int result = -ENODEV;
254 
255  bit3.pci_dev = NULL;
257  result = pci_register_driver(&vmedrv_pci_driver);
258  if (result < 0) {
259  printk(KERN_WARNING "%s: unable to register PCI driver\n", vmedrv_name);
260  goto FAIL_PCI_DRIVER_REGISTRATION;
261  }
262  if (bit3.pci_dev == NULL) {
263  printk(KERN_WARNING "%s: unable to find VME-PCI Bus Adapter.\n", vmedrv_name);
264  result = -ENODEV;
265  goto FAIL_DEVICE_DETECTION;
266  }
267 
268  if (vmedrv_major == 0) {
269  result = alloc_chrdev_region(
271  );
272  }
273  else {
274  dev = MKDEV(vmedrv_major, 0);
275  result = register_chrdev_region(
277  );
278  }
279  if (result < 0) {
280  printk(KERN_WARNING "%s: unable to get major %d\n", vmedrv_name, vmedrv_major);
281  goto FAIL_DRIVER_REGISTRATION;
282  }
283  if (vmedrv_major == 0) {
284  vmedrv_major = MAJOR(dev);
285  }
286 
288  bit3.dma_buffer_size = 0;
289 
294  module_open_count = 0;
297 
299  bit3.pci_dev->resource[bit3_IO_NODE_IO_BASE_INDEX].start
300  );
301  bit3.irq = bit3.pci_dev->irq;
302 
303  init_waitqueue_head(&vmedrv_dmadone_wait_queue);
304  init_waitqueue_head(&vmedrv_vmebusirq_wait_queue);
305 
306  cdev_init(&cdev, &vmedrv_fops);
307  cdev.owner = THIS_MODULE;
308  cdev.ops = &vmedrv_fops;
309  result = cdev_add(&cdev, dev, vmedrv_NUMBER_OF_MINOR_IDS);
310  if (result < 0) {
311  printk(KERN_WARNING "%s: unable to register driver.\n", vmedrv_name);
312  goto FAIL_CDEV_REGISTRATION;
313  }
314 
315  printk(KERN_INFO "%s: successfully installed at 0x%04x on irq %d (major = %d).\n",
317  );
318 
319  return 0;
320 
321  FAIL_CDEV_REGISTRATION:
322  unregister_chrdev_region(dev, vmedrv_NUMBER_OF_MINOR_IDS);
323  FAIL_DRIVER_REGISTRATION:
324  ;
325  FAIL_DEVICE_DETECTION:
326  pci_unregister_driver(&vmedrv_pci_driver);
327  FAIL_PCI_DRIVER_REGISTRATION:
328  ;
329 
330  return result;
331 }
332 
333 
334 static void __exit vmedrv_cleanup_module(void)
335 {
337 
338  cdev_del(&cdev);
339  unregister_chrdev_region(dev, vmedrv_NUMBER_OF_MINOR_IDS);
340 
341  pci_unregister_driver(&vmedrv_pci_driver);
342 
343  printk(KERN_INFO "%s: removed.\n", vmedrv_name);
344 }
345 
346 
349 
350 
351 static int vmedrv_open(struct inode* inode, struct file* filep)
352 {
353  struct dev_prop_t* dev_prop;
354  int minor_id, mode, method;
355 
356  minor_id = MINOR(inode->i_rdev);
357  if (minor_id >= vmedrv_NUMBER_OF_MINOR_IDS) {
358  printk(KERN_WARNING "%s: invalid minor ID.\n", vmedrv_name);
359  return -ENODEV;
360  }
361 
362  filep->private_data = kmalloc(sizeof(struct dev_prop_t), GFP_KERNEL);
363  if (filep->private_data == 0) {
364  printk(KERN_WARNING "%s: unable to allocate memory.", vmedrv_name);
365  return -ENOMEM;
366  }
367  dev_prop = filep->private_data;
368 
369  while (module_open_count == 0) {
370  struct resource* resource;
371  int result;
372 
373  resource = request_region(
375  );
376  if (resource == NULL) {
377  printk(KERN_WARNING "%s: I/O region request fault.\n", vmedrv_name);
378  result = -EIO;
379  goto FAIL_REGION_REQUEST;
380  }
381 
382  result = initialize();
383  if (result < 0) {
384  printk(KERN_WARNING "%s: initialization fault.\n", vmedrv_name);
385  goto FAIL_INITIALIZATION;
386  }
387 
388 #ifdef USE_OLD_IRQ
389  result = request_irq(
391  SA_INTERRUPT | SA_SHIRQ, vmedrv_name, &bit3.dev_id
392  );
393 #else
394  result = request_irq(
396  IRQF_SHARED, vmedrv_name, &bit3.dev_id
397  );
398 #endif
399  if (result < 0) {
400  printk(KERN_WARNING "%s: IRQ request fault.\n", vmedrv_name);
401  goto FAIL_IRQ_REQUEST;
402  }
403 
404  break;
405 
406  FAIL_IRQ_REQUEST:
407  ;
408  FAIL_INITIALIZATION:
409  if (bit3.window_region_base != 0) {
410  iounmap(bit3.mapped_node_io_base);
411  iounmap(bit3.mapping_registers_base);
412  iounmap(bit3.window_region_base);
414  }
415  release_region(bit3.io_node_io_base, bit3_IO_NODE_IO_SIZE);
416  FAIL_REGION_REQUEST:
417  ;
418 
419  return result;
420  }
421 
422  dev_prop->pio_buffer = 0;
423  dev_prop->number_of_pio_windows = 0;
424  dev_prop->number_of_mmap_windows = 0;
425  dev_prop->local_interrupt_client = 0;
426 
427  mode = minor_to_access_mode[minor_id];
428  method = minor_to_transfer_method[minor_id];
429  set_access_mode(dev_prop, mode);
430  set_transfer_method(dev_prop, method);
431 
432  try_module_get(THIS_MODULE);
434 
435  return 0;
436 }
437 
438 
439 static int vmedrv_release(struct inode* inode, struct file* filep)
440 {
441  struct dev_prop_t* dev_prop;
442  dev_prop = filep->private_data;
443 
445  if (module_open_count == 0) {
448  }
449 
450  /* unregister all registered interrupts */
451  unregister_interrupt_notification(dev_prop, current, 0, 0);
452 
453  if (dev_prop->number_of_pio_windows > 0) {
455  }
456  if (dev_prop->number_of_mmap_windows > 0) {
458  }
459 
460  if (dev_prop->pio_buffer > 0) {
461  kfree(dev_prop->pio_buffer);
462  }
463  kfree(dev_prop);
464 
465  if (module_open_count == 0) {
466  if (bit3.window_region_base != 0) {
467  iounmap(bit3.mapped_node_io_base);
468  iounmap(bit3.mapping_registers_base);
469  iounmap(bit3.window_region_base);
471  }
472  free_irq(bit3.irq, &bit3.dev_id);
473  release_region(bit3.io_node_io_base, bit3_IO_NODE_IO_SIZE);
474  }
475 
476  module_put(THIS_MODULE);
477 
478  return 0;
479 }
480 
481 
482 static ssize_t vmedrv_read(struct file* filep, char* buf, size_t count, loff_t* f_pos)
483 {
484  struct dev_prop_t* dev_prop;
485  unsigned long vme_address;
486  int total_read_size, read_size, remainder_size;
487 
488  dev_prop = filep->private_data;
489  vme_address = *f_pos;
490  read_size = 0;
491  total_read_size = 0;
492  remainder_size = count;
493 
494  if ((count % dev_prop->data_width) != 0) {
495  return -EINVAL;
496  }
497 
498  while (remainder_size > 0) {
499  if (
500  (dev_prop->transfer_method == tmDMA) ||
501  (dev_prop->transfer_method == tmNBDMA)
502  ){
503  read_size = dma_read(dev_prop, buf, vme_address, remainder_size);
504  }
505  else if (dev_prop->transfer_method == tmPIO) {
506  read_size = pio_read(dev_prop, buf, vme_address, remainder_size);
507  }
508  else {
509  return -EINVAL;
510  }
511 
512  if (read_size < 0) {
513  return read_size;
514  }
515  else if (read_size == 0) {
516  break;
517  }
518 
519  remainder_size -= read_size;
520  total_read_size += read_size;
521  vme_address += read_size;
522  buf += read_size;
523  }
524 
525  *f_pos += total_read_size;
526 
527  return total_read_size;
528 }
529 
530 
531 static ssize_t vmedrv_write(struct file* filep, const char* buf, size_t count, loff_t* f_pos)
532 {
533  struct dev_prop_t* dev_prop;
534  unsigned long vme_address;
535  int total_written_size, written_size, remainder_size;
536 
537  dev_prop = filep->private_data;
538  vme_address = *f_pos;
539  written_size = 0;
540  total_written_size = 0;
541  remainder_size = count;
542 
543  if ((count % dev_prop->data_width) != 0) {
544  return -EINVAL;
545  }
546 
547  while (remainder_size > 0) {
548  if (
549  (dev_prop->transfer_method == tmDMA) ||
550  (dev_prop->transfer_method == tmNBDMA)
551  ){
552  written_size = dma_write(dev_prop, buf, vme_address, remainder_size);
553  }
554  else if (dev_prop->transfer_method == tmPIO) {
555  written_size = pio_write(dev_prop, buf, vme_address, remainder_size);
556  }
557  else {
558  return -EINVAL;
559  }
560 
561  if (written_size < 0) {
562  return written_size;
563  }
564  else if (written_size == 0) {
565  break;
566  }
567 
568  remainder_size -= written_size;
569  total_written_size += written_size;
570  vme_address += written_size;
571  buf += written_size;
572  }
573 
574  *f_pos += total_written_size;
575 
576  return total_written_size;
577 }
578 
579 
580 static loff_t vmedrv_lseek(struct file* filep, loff_t offset, int whence)
581 {
582  switch (whence) {
583  case 0: /* SEEK_SET */
584  filep->f_pos = offset;
585  break;
586  case 1: /* SEEK_CUR */
587  filep->f_pos += offset;
588  break;
589  case 2: /* SEEK_END */
590  return -EINVAL;
591  default:
592  return -EINVAL;
593  };
594 
595  return filep->f_pos;
596 }
597 
598 
599 #ifdef USE_OLD_IOCTL
600 static int vmedrv_ioctl(struct inode* inode, struct file *filep, unsigned int cmd, unsigned long arg)
601 {
602  return vmedrv_unlocked_ioctl(filep, cmd, arg);
603 }
604 #endif
605 
606 static long vmedrv_unlocked_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
607 {
608  struct dev_prop_t* dev_prop;
609  int argument_size;
610  int result = -EINVAL;
611  int value = 0;
612  unsigned long uncopied_length;
613  struct vmedrv_interrupt_property_t interrupt_property;
614  int irq = 0, vector = 0, signal_id = 0, timeout = 0;
615  int vector_mask = 0xffff;
616 
617  if (_IOC_TYPE(cmd) != VMEDRV_IOC_MAGIC) {
618  return -EINVAL;
619  }
620 
621  /* read arguments from user area */
622  if (
623  (cmd == VMEDRV_IOC_SET_ACCESS_MODE) ||
625  ){
626  if (get_user(value, (int*) arg) < 0) {
627  return -EFAULT;
628  }
629  }
630  else if (
634  (cmd == VMEDRV_IOC_CHECK_INTERRUPT) ||
635  (cmd == VMEDRV_IOC_CLEAR_INTERRUPT) ||
638  ){
639  argument_size = sizeof(struct vmedrv_interrupt_property_t);
640  uncopied_length = copy_from_user(
641  &interrupt_property, (const __user void *) arg, argument_size
642  );
643  if (uncopied_length != 0) {
644  return -EFAULT;
645  }
646 
647  irq = interrupt_property.irq;
648  vector = interrupt_property.vector;
649  signal_id = interrupt_property.signal_id;
650  timeout = interrupt_property.timeout;
651  vector_mask = interrupt_property.vector_mask;
652  }
653 
654  dev_prop = filep->private_data;
655 
656  switch (cmd) {
658  result = set_access_mode(dev_prop, value);
659  break;
661  result = set_transfer_method(dev_prop, value);
662  break;
663  case VMEDRV_IOC_PROBE:
664  result = probe(dev_prop, (__user vmedrv_word_access_t*) arg);
665  break;
667  result = register_interrupt_notification(dev_prop, current, irq, vector, signal_id);
668  break;
670  result = unregister_interrupt_notification(dev_prop, current, irq, vector);
671  break;
673  result = wait_for_interrupt_notification(current, irq, vector, timeout);
674  break;
676  result = check_interrupt_notification(irq, vector);
677  break;
679  result = clear_interrupt_notification(irq, vector);
680  break;
682  result = set_interrupt_autodisable(irq, vector);
683  break;
685  result = enable_normal_interrupt();
686  break;
688  result = disable_normal_interrupt();
689  break;
691  result = set_vector_mask(irq, vector, vector_mask);
692  break;
694  result = enable_error_interrupt();
695  break;
697  result = disable_error_interrupt();
698  break;
700  result = reset_adapter();
701  break;
702  default:
703  return -EINVAL;
704  }
705 
706  return result;
707 }
708 
709 
710 
711 static int vmedrv_mmap(struct file* filep, struct vm_area_struct* vma)
712 {
713  unsigned long vme_address, size;
714  unsigned long physical_address, page_frame_number;
715  struct dev_prop_t* dev_prop;
716 
717  dev_prop = filep->private_data;
718  size = vma->vm_end - vma->vm_start;
719  vme_address = vma->vm_pgoff * PAGE_SIZE;
720 
721  DEBUG_MAP(printk(KERN_DEBUG "mapping vme memory...\n"));
722  DEBUG_MAP(printk(KERN_DEBUG " vme address: 0x%lx\n", vme_address));
723  DEBUG_MAP(printk(KERN_DEBUG " mapping size: 0x%lx\n", size));
724 
725  if (vme_address & (PAGE_SIZE - 1)) {
726  /* offset address must be aligned with the MMU page */
727  return -ENXIO;
728  }
729 
730  if (dev_prop->number_of_mmap_windows > 0) {
731  /* FIXME: mmap() can be called only once every open() */
732  return -ENXIO;
733  }
734 
735  dev_prop->number_of_mmap_windows = ((unsigned long) size - 1) / (unsigned long) bit3_WINDOW_SIZE + 1;
737  if (dev_prop->mmap_window_index < 0) {
738  dev_prop->number_of_mmap_windows = 0;
739  return dev_prop->mmap_window_index;
740  }
741 
742  DEBUG_MAP(printk(KERN_DEBUG " map pages: %d\n", dev_prop->number_of_mmap_windows));
743  DEBUG_MAP(printk(KERN_DEBUG " window index: %d\n", dev_prop->mmap_window_index));
744 
745  map_windows(vme_address, size, dev_prop->mmap_window_index, dev_prop->mapping_flags);
746  physical_address = (
748  dev_prop->mmap_window_index * bit3_WINDOW_SIZE +
749  (vme_address & bit3_PAGE_OFFSET_MASK)
750  );
751  page_frame_number = physical_address >> PAGE_SHIFT;
752 
753  DEBUG_MAP(printk(KERN_DEBUG " physical address: 0x%lx\n", physical_address));
754 
755  if (remap_pfn_range(vma, vma->vm_start, page_frame_number, size, vma->vm_page_prot) < 0) {
756  return -EAGAIN;
757  }
758 
759  DEBUG_MAP(printk(KERN_DEBUG " mapped address: 0x%lx\n", vma->vm_start));
760 
761  return 0;
762 }
763 
764 
765 static unsigned int vmedrv_poll(struct file* filep, poll_table* table)
766 {
767  struct dev_prop_t* dev_prop = filep->private_data;
768  struct local_interrupt_client_t* local_interrupt_client;
769  unsigned int mask = 0;
770 
771  poll_wait(filep, &vmedrv_vmebusirq_wait_queue, table);
772 
773  for (
774  local_interrupt_client = dev_prop->local_interrupt_client;
775  local_interrupt_client != 0;
776  local_interrupt_client = local_interrupt_client->next
777  ){
778  if (local_interrupt_client->interrupt_client->interrupt_count > 0) {
779  mask |= POLLIN | POLLRDNORM;
780  }
781  }
782 
783  return mask;
784 }
785 
786 
787 #ifdef USE_OLD_IRQ
788 static irqreturn_t vmedrv_interrupt(int irq, void* dev_id, struct pt_regs* regs)
789 #else
790 static irqreturn_t vmedrv_interrupt(int irq, void* dev_id)
791 #endif
792 {
793  unsigned status;
794 
795  DEBUG_INT(printk(KERN_DEBUG "interrupt handled.\n"));
796 
797  if (module_open_count <= 0) {
798  /* note that access to the VME card unavailable */
799  return IRQ_NONE;
800  }
801 
802  /* check whether the PCI card is generating an interrupt */
804  if (! (status & icINTERRUPT_ACTIVE)) {
805  DEBUG_INT(printk(KERN_DEBUG "the card is not generating an interrupt.\n"));
806  return IRQ_NONE;
807  }
808 
809  /* checek for a error interrupt */
811  if (status & (lsERROR_BITS & ~lsRECEIVING_PR_INTERRUPT)) {
813  return IRQ_HANDLED;
814  }
815 
816  /* check for a PR interrupt */
817  if (status & lsRECEIVING_PR_INTERRUPT) {
818  acknowledge_pr_interrupt(status);
819  return IRQ_HANDLED;
820  }
821 
822  /* check for a DMA DONE interrupt */
823  if (bit3.is_dma_available) {
825  if ((status & dcDMA_DONE_FLAG) && (status & dcENABLE_DMA_DONE_INTERRUPT)) {
827  return IRQ_HANDLED;
828  }
829  }
830 
831  /* check for a PT interrupt */
833  if (status & rsRECEIVING_PT_INTERRUPT) {
834  acknowledge_pt_interrupt(status);
835  return IRQ_HANDLED;
836  }
837 
838  /* check for a VME backplane interrupt */
840  if (status) {
842  return IRQ_HANDLED;
843  }
844 
845  printk(KERN_WARNING "%s: Unknown interrupt handled...\n", vmedrv_name);
846  return IRQ_HANDLED;
847 }
848 
849 
850 static int vmedrv_pci_probe(struct pci_dev* pci_dev, const struct pci_device_id* id)
851 {
852  int result;
853 
854  printk(
855  KERN_INFO "%s: %s is detected at ioport 0x%04lx on irq %d.\n",
856  vmedrv_name,
857  vmedrv_model_name_table[id->driver_data],
858  (unsigned long) pci_dev->resource[bit3_IO_NODE_IO_BASE_INDEX].start,
859  pci_dev->irq
860  );
861  printk(
862  KERN_INFO " I/O Mapped Node at 0x%04lx.\n",
863  (unsigned long) pci_dev->resource[bit3_IO_NODE_IO_BASE_INDEX].start
864  );
865  printk(
866  KERN_INFO " Memory Mapped Node at 0x%04lx.\n",
867  (unsigned long) pci_dev->resource[bit3_MAPPED_NODE_IO_BASE_INDEX].start
868  );
869  printk(
870  KERN_INFO " Mapping Register at 0x%04lx.\n",
871  (unsigned long) pci_dev->resource[bit3_MAPPING_REGISTERS_BASE_INDEX].start
872  );
873  printk(
874  KERN_INFO " Remote Memory at 0x%04lx.\n",
875  (unsigned long) pci_dev->resource[bit3_WINDOW_REGION_BASE_INDEX].start
876  );
877 
879  bit3_card_count++;
880  printk(KERN_INFO "%s: this is not our target card...\n", vmedrv_name);
881  return -ENODEV;
882  }
883  bit3_card_count++;
884 
885  bit3.pci_dev = pci_dev;
886  bit3.is_dma_available = (id->device == di616) ? 0 : 1;
887 
888  result = pci_enable_device(pci_dev);
889  if (result < 0) {
890  printk(KERN_WARNING "%s: enabling device failed.\n", vmedrv_name);
891  }
892 
893  /* force the DMA buffer be allocated in 32bit accessible range */
894 #ifdef USE_OLD_DMABITMASK
895  pci_set_dma_mask(pci_dev, DMA_32BIT_MASK);
896 #else
897  pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32));
898 #endif
899 
900 
901  return result;
902 }
903 
904 
905 static void vmedrv_pci_remove(struct pci_dev* pci_dev)
906 {
907  if (pci_dev == bit3.pci_dev) {
908  printk(KERN_WARNING "%s: device removed.\n", vmedrv_name);
909  }
910 }
911 
912 
913 static int initialize(void)
914 {
915  unsigned status;
916 
917  /* read PCI configurations */
918  bit3.mapped_node_io_base = ioremap_nocache(
921  );
922  bit3.mapping_registers_base = ioremap_nocache(
925  );
926  bit3.window_region_base = ioremap_nocache(
927  bit3.pci_dev->resource[bit3_WINDOW_REGION_BASE_INDEX].start,
929  );
930 
933  );
936  );
937 
938  DEBUG_CONFIG(printk(KERN_DEBUG "remapping pci memories...\n"));
939  DEBUG_CONFIG(printk(KERN_DEBUG " memory mapped node: --> 0x%08lx\n", bit3.mapped_node_io_base));
940  DEBUG_CONFIG(printk(KERN_DEBUG " mapping regs: --> 0x%08lx\n", bit3.mapping_registers_base));
941  DEBUG_CONFIG(printk(KERN_DEBUG " window base: --> 0x%08lx\n", bit3.window_region_base));
942 
943  /* check whether remote power is on */
944 #if 0
945  status = inb(bit3.io_node_io_base + regLOCAL_STATUS);
946 #else
948 #endif
949  if (status & lsREMOTE_BUS_POWER_OFF) {
950  printk(KERN_WARNING "%s: ERROR: VME chassis power is off.\n", vmedrv_name);
951  printk(KERN_WARNING " (or I/O cable is not connected, or SYSRESET is active.)\n");
952  printk(KERN_WARNING " Local Status Register: 0x%02x\n", status);
953  return -EIO;
954  }
955 
956  /* clear error caused by the power on transition */
957 #if 0
958  status = inb(bit3.io_node_io_base + regREMOTE_STATUS);
959  rmb();
961  wmb();
962  status = inb(bit3.io_node_io_base + regLOCAL_STATUS);
963 #else
965  rmb();
967  wmb();
969 #endif
970 
971  /* make sure no error bits are set */
972  if (status & lsERROR_BITS) {
973  if (status & lsINTERFACE_PARITY_ERROR) {
974  printk(KERN_WARNING "%s: ERROR: interface parity error.\n", vmedrv_name);
975  }
976  if (status & lsREMOTE_BUS_ERROR) {
977  printk(KERN_WARNING "%s: ERROR: remote bus error.\n", vmedrv_name);
978  }
979  if (status & lsRECEIVING_PR_INTERRUPT) {
980  printk(KERN_WARNING "%s: ERROR: receiving PR interrupt.\n", vmedrv_name);
981  }
982  if (status & lsINTERFACE_TIMEOUT) {
983  printk(KERN_WARNING "%s: ERROR: interface timed out.\n", vmedrv_name);
984  }
985  if (status & lsLRC_ERROR) {
986  printk(KERN_WARNING "%s: ERROR: LRC(Longitudinal Redundancy Check) error.\n", vmedrv_name);
987  }
988  if (status & lsREMOTE_BUS_POWER_OFF) {
989  printk(KERN_WARNING "%s: ERROR: remote bus power off or I/O cable is off.\n", vmedrv_name);
990  }
991 
992  printk(KERN_WARNING " Local Status Register: 0x%02x\n", status);
993 
994  status = inb(bit3.io_node_io_base + regADAPTER_ID);
995  printk(KERN_WARNING " Adapter ID (I/O): 0x%02x\n", status);
997  printk(KERN_WARNING " Adapter ID (mem): 0x%02x\n", status);
998 
999  return -EIO;
1000  }
1001 
1002  DEBUG_CONFIG(printk("%s: remote VME card successfully initialized\n", vmedrv_name));
1003 
1004  return 0;
1005 }
1006 
1007 
1008 static int set_access_mode(struct dev_prop_t* dev_prop, int mode)
1009 {
1010  if (mode >= vmedrv_NUMBER_OF_ACCESS_MODES) {
1011  return -EINVAL;
1012  }
1013 
1016  dev_prop->data_width = vmedrv_config_table[mode].data_width;
1019 
1020  dev_prop->mapping_flags = (
1021  (dev_prop->address_modifier << bit3_AM_SHIFT) |
1022  (dev_prop->function_code << bit3_FUNCTION_SHIFT) |
1023  (dev_prop->byte_swapping << bit3_BYTESWAP_SHIFT)
1024  );
1025 
1026  dev_prop->dma_mapping_flags = (
1028  );
1029 
1030  /*DEBUG_CONFIG(printk(KERN_DEBUG "setting access modes...\n"));*/
1031  /*DEBUG_CONFIG(printk(KERN_DEBUG " address modifier: 0x%02x\n", dev_prop->address_modifier));*/
1032  /*DEBUG_CONFIG(printk(KERN_DEBUG " data width: %d\n", dev_prop->data_width));*/
1033  /*DEBUG_CONFIG(printk(KERN_DEBUG " function code: 0x%02x\n", dev_prop->function_code));*/
1034  /*DEBUG_CONFIG(printk(KERN_DEBUG " byte swapping: 0x%02x\n", dev_prop->byte_swapping));*/
1035 
1036  return 0;
1037 }
1038 
1039 
1040 static int set_transfer_method(struct dev_prop_t* dev_prop, int method)
1041 {
1042  if (method == VMEDRV_PIO) {
1043  dev_prop->transfer_method = tmPIO;
1044  DEBUG_CONFIG(printk(KERN_DEBUG "transfer mode is set to PIO.\n"));
1045  }
1046  else if (method == VMEDRV_DMA) {
1047  if (dev_prop->dma_address_modifier == amINVALID) {
1048  return -EINVAL;
1049  }
1050  if (bit3.is_dma_available) {
1051  dev_prop->transfer_method = tmDMA;
1052  DEBUG_CONFIG(printk(KERN_DEBUG "transfer mode is set to DMA.\n"));
1053  }
1054  else {
1055  dev_prop->transfer_method = tmPIO;
1056  DEBUG_CONFIG(printk(KERN_DEBUG "DMA transfer mode is requested, but not available.\n"));
1057  DEBUG_CONFIG(printk(KERN_DEBUG "transfer mode is set to PIO.\n"));
1058  }
1059  }
1060  else if (method == VMEDRV_NBDMA) {
1061  if (dev_prop->address_modifier == amINVALID) {
1062  return -EINVAL;
1063  }
1064  dev_prop->transfer_method = tmNBDMA;
1065  DEBUG_CONFIG(printk(KERN_DEBUG "transfer mode is set to Non-Blocking DMA.\n"));
1066  }
1067  else {
1068  return -EINVAL;
1069  }
1070 
1071  return 0;
1072 }
1073 
1074 
1075 static int probe(struct dev_prop_t* dev_prop, __user vmedrv_word_access_t* word_access)
1076 {
1077  int result;
1078  unsigned long address = 0, data = 0;
1079 
1080  if (get_user(address, &word_access->address) < 0) {
1081  return -EFAULT;
1082  }
1083  if (put_user(data, &word_access->data) < 0) {
1084  return -EFAULT;
1085  }
1086 
1087  result = pio_read(dev_prop, (__user char*) &word_access->data, address, 1);
1088  if (result >= 0) {
1089  result = 0;
1090  }
1091 
1092  return result;
1093 }
1094 
1095 
1096 static int pio_read(struct dev_prop_t* dev_prop, char* buf, unsigned long vme_address, int count)
1097 {
1098  unsigned long offset_address;
1099  unsigned long size, read_size;
1100  unsigned long uncopied_length;
1101  void* window_address;
1102  void* pio_buf;
1103  unsigned long pio_buf_data_size, pio_buf_index;
1104  unsigned status;
1105  int result;
1106 
1107  /* allocate PIO buffer and mapping windows, if it has not been yet */
1108  if ((result = prepare_pio(dev_prop)) < 0) {
1109  return result;
1110  }
1111 
1112  /* map windows */
1113  offset_address = vme_address & bit3_PAGE_OFFSET_MASK;
1114  if (offset_address + count <= bit3_WINDOW_SIZE * dev_prop->number_of_pio_windows) {
1115  size = count;
1116  }
1117  else {
1118  size = bit3_WINDOW_SIZE * dev_prop->number_of_pio_windows - offset_address;
1119  }
1120  window_address = map_windows(vme_address, size, dev_prop->pio_window_index, dev_prop->mapping_flags);
1121 
1122  /* read from mapped windows */
1123  pio_buf = dev_prop->pio_buffer;
1124  pio_buf_data_size = 0;
1125  pio_buf_index = 0;
1126  for (read_size = 0; read_size < size; read_size += dev_prop->data_width) {
1127  if (dev_prop->data_width == dwWORD) {
1128  ((unsigned short *) pio_buf)[pio_buf_index] = ioread16(window_address);
1129  }
1130  else if (dev_prop->data_width == dwLONG) {
1131  ((unsigned int *) pio_buf)[pio_buf_index] = ioread32(window_address);
1132  }
1133 
1134  window_address += dev_prop->data_width;
1135  pio_buf_data_size += dev_prop->data_width;
1136  pio_buf_index++;
1137 
1138  if (pio_buf_data_size + dwLONG >= PIO_BUFFER_SIZE) {
1139  uncopied_length = copy_to_user(buf, pio_buf, pio_buf_data_size);
1140  if (uncopied_length > 0) {
1141  return -EFAULT;
1142  }
1143  buf += pio_buf_data_size;
1144  pio_buf_data_size = 0;
1145  pio_buf_index = 0;
1146  }
1147  }
1148 
1149  uncopied_length = copy_to_user(buf, pio_buf, pio_buf_data_size);
1150  if (uncopied_length > 0) {
1151  return -EFAULT;
1152  }
1153 
1154  /* error check */
1156  rmb();
1158  if (status & lsINTERFACE_TIMEOUT) {
1159  return -ETIMEDOUT;
1160  }
1161  if (status & lsERROR_BITS) {
1162  printk(KERN_WARNING "%s: VME I/O failed.\n", vmedrv_name);
1163  printk(KERN_WARNING " Local Status Register: 0x%02x\n", status);
1164  return -EIO;
1165  }
1166 
1167  return read_size;
1168 }
1169 
1170 
1171 static int pio_write(struct dev_prop_t* dev_prop, const char* buf, unsigned long vme_address, int count)
1172 {
1173  unsigned long offset_address;
1174  unsigned long size, remain_size;
1175  unsigned long uncopied_length;
1176  void* window_address;
1177  void* pio_buf;
1178  unsigned long pio_buf_data_size, pio_buf_index;
1179  unsigned status;
1180  int result;
1181 
1182  /* allocate PIO buffer and mapping windows, if it has not been yet */
1183  if ((result = prepare_pio(dev_prop)) < 0) {
1184  return result;
1185  }
1186 
1187  /* map windows */
1188  offset_address = vme_address & bit3_PAGE_OFFSET_MASK;
1189  if (offset_address + count <= bit3_WINDOW_SIZE * dev_prop->number_of_pio_windows) {
1190  size = count;
1191  }
1192  else {
1193  size = bit3_WINDOW_SIZE * dev_prop->number_of_pio_windows - offset_address;
1194  }
1195  window_address = map_windows(vme_address, size, dev_prop->pio_window_index, dev_prop->mapping_flags);
1196 
1197  /* write to mapped windows */
1198  pio_buf = dev_prop->pio_buffer;
1199  pio_buf_data_size = 0;
1200  pio_buf_index = 0;
1201  for (remain_size = size; remain_size > 0; remain_size -= dev_prop->data_width) {
1202  if (pio_buf_data_size <= 0) {
1203  if (remain_size < PIO_BUFFER_SIZE) {
1204  pio_buf_data_size = remain_size;
1205  }
1206  else {
1207  pio_buf_data_size = PIO_BUFFER_SIZE;
1208  }
1209  uncopied_length = copy_from_user(pio_buf, buf, pio_buf_data_size);
1210  if (uncopied_length > 0) {
1211  return -EFAULT;
1212  }
1213  buf += pio_buf_data_size;
1214  pio_buf_index = 0;
1215  }
1216 
1217  if (dev_prop->data_width == dwWORD) {
1218  iowrite16(
1219  ((unsigned short *) pio_buf)[pio_buf_index], window_address
1220  );
1221  }
1222  else if (dev_prop->data_width == dwLONG) {
1223  iowrite32(
1224  ((unsigned int *) pio_buf)[pio_buf_index], window_address
1225  );
1226  }
1227 
1228  window_address += dev_prop->data_width;
1229  pio_buf_data_size -= dev_prop->data_width;
1230  pio_buf_index++;
1231  }
1232 
1233  /* error check */
1235  rmb();
1237  if (status & lsINTERFACE_TIMEOUT) {
1238  return -ETIMEDOUT;
1239  }
1240  if (status & lsERROR_BITS) {
1241  printk(KERN_WARNING "%s: VME I/O failed.\n", vmedrv_name);
1242  printk(KERN_WARNING " Local Status Register: 0x%02x\n", status);
1243  return -EIO;
1244  }
1245 
1246  return size;
1247 }
1248 
1249 
1250 static int prepare_pio(struct dev_prop_t* dev_prop)
1251 {
1252  /* allocate PIO buffer, if it has not been allocated. */
1253  DEBUG_PIO(printk(KERN_DEBUG "allocating PIO buffer\n"));
1254  DEBUG_PIO(printk(KERN_DEBUG " size=%d\n", PIO_BUFFER_SIZE));
1255  if (dev_prop->pio_buffer == 0) {
1256  dev_prop->pio_buffer = kmalloc(PIO_BUFFER_SIZE, GFP_KERNEL);
1257  if (dev_prop->pio_buffer == 0) {
1258  printk(KERN_WARNING "%s: unable to allocate PIO buffer.", vmedrv_name);
1259  printk(KERN_WARNING " requested size: %d\n", PIO_BUFFER_SIZE);
1260  return -ENOMEM;
1261  }
1262  }
1263  DEBUG_PIO(printk(KERN_DEBUG " ok, address=%0x\n", dev_prop->pio_buffer));
1264 
1265  /* allocate PIO windows, if it has not been allocated. */
1266  if (dev_prop->number_of_pio_windows == 0) {
1267  DEBUG_PIO(printk(KERN_DEBUG "allocating PIO windows\n"));
1268  DEBUG_PIO(printk(KERN_DEBUG " size=%d\n", PIO_WINDOW_PAGES));
1270  if (dev_prop->pio_window_index < 0) {
1271  return dev_prop->pio_window_index;
1272  }
1273  DEBUG_PIO(printk(KERN_DEBUG " ok, start_index=%d\n", dev_prop->pio_window_index));
1275  }
1276 
1277  return 0;
1278 }
1279 
1280 static int allocate_windows(int number_of_windows)
1281 {
1282  int number_of_free_windows;
1283  int window_index = -ENOMEM;
1284  int i;
1285 
1286  number_of_free_windows = 0;
1287  for (i = 0; i < bit3_NUMBER_OF_WINDOWS; i++) {
1288  if (bit3.window_status_table[i] != 0) {
1289  number_of_free_windows = 0;
1290  continue;
1291  }
1292 
1293  if (number_of_free_windows == 0) {
1294  window_index = i;
1295  }
1296 
1297  number_of_free_windows++;
1298  if (number_of_free_windows == number_of_windows) {
1299  break;
1300  }
1301  }
1302 
1303  if (i == bit3_NUMBER_OF_WINDOWS) {
1304  return -ENOMEM;
1305  }
1306 
1307  for (i = 0; i < number_of_windows; i++) {
1308  bit3.window_status_table[window_index + i] = 1;
1309  }
1310 
1311  return window_index;
1312 }
1313 
1314 
1315 static void free_windows(int window_index, int number_of_windows)
1316 {
1317  int i;
1318  for (i = 0; i < number_of_windows; i++) {
1319  bit3.window_status_table[window_index + i] = 0;
1320  }
1321 }
1322 
1323 
1324 static void* map_windows(unsigned vme_address, unsigned size, unsigned window_index, unsigned flags)
1325 {
1326  unsigned base_address, offset_address;
1327  unsigned number_of_windows;
1328  void* window_address;
1329  int i;
1330 
1331  base_address = vme_address & bit3_PAGE_BASE_MASK;
1332  offset_address = vme_address & bit3_PAGE_OFFSET_MASK;
1333  number_of_windows = ((unsigned long) size - 1) / (unsigned long) bit3_WINDOW_SIZE + 1;
1334  window_address = bit3.window_region_base + bit3_WINDOW_SIZE * window_index + offset_address;
1335 
1336  for (i = 0; i < number_of_windows; i++) {
1337  iowrite32(
1338  base_address | flags,
1340  );
1341  base_address += bit3_WINDOW_SIZE;
1342  window_index++;
1343  }
1344 
1345  return window_address;
1346 }
1347 
1348 
1349 static int enable_normal_interrupt(void)
1350 {
1352  iowrite8(
1355  );
1356 
1357  DEBUG_INT(printk(KERN_DEBUG "normal interrupt enabled.\n"));
1358 
1359  return 0;
1360 }
1361 
1362 
1364 {
1366  iowrite8(
1369  );
1370 
1371  DEBUG_INT(printk(KERN_DEBUG "normal interrupt disabled.\n"));
1372 
1373  return 0;
1374 }
1375 
1376 
1377 static int enable_error_interrupt(void)
1378 {
1380  iowrite8(
1383  );
1384 
1385  return 0;
1386 }
1387 
1388 
1389 static int disable_error_interrupt(void)
1390 {
1392  iowrite8(
1395  );
1396 
1397  return 0;
1398 }
1399 
1400 
1401 static void save_interrupt_flags(void)
1402 {
1404  DEBUG_INT(printk(
1405  KERN_DEBUG "interrupt flags are saved: 0x%02x\n",
1407  ));
1408 }
1409 
1410 
1411 static void restore_interrupt_flags(void)
1412 {
1415  iowrite8(
1418  );
1419  }
1420 
1421  DEBUG_INT(printk(
1422  KERN_DEBUG "interrupt flags are restored: 0x%02x\n",
1424  ));
1425 }
1426 
1427 
1428 static int acknowledge_error_interrupt(unsigned local_status)
1429 {
1431  local_status_on_error = local_status;
1432 
1433  printk(KERN_WARNING "%s: error interrupt handled.\n", vmedrv_name);
1434  printk(KERN_WARNING " Local Status Register: 0x%02x\n", local_status);
1435  if (local_status & lsINTERFACE_PARITY_ERROR) {
1436  printk(KERN_WARNING " Fiber-Optic Interface Data Error\n");
1437  }
1438  if (local_status & lsREMOTE_BUS_ERROR) {
1439  printk(KERN_WARNING " Remote Bus Error\n");
1440  }
1441  if (local_status & lsINTERFACE_TIMEOUT) {
1442  printk(KERN_WARNING " Interface Timeout\n");
1443  }
1444  if (local_status & lsREMOTE_BUS_POWER_OFF) {
1445  printk(KERN_WARNING " Remote Bus Power Off or I/O Cable Is Off\n");
1446  }
1447 
1449  if (local_status & lsINTERFACE_TIMEOUT) {
1450  /* flush the interface error */
1452  }
1453 
1454  return 0;
1455 }
1456 
1457 
1458 static int acknowledge_pt_interrupt(unsigned local_status)
1459 {
1460  DEBUG_INT(printk(KERN_DEBUG "pt interrupt handled...\n"));
1461 
1463 
1464  return 0;
1465 }
1466 
1467 
1468 static int acknowledge_dma_interrupt(unsigned dma_status)
1469 {
1470  DEBUG_INT(printk(KERN_DEBUG "DEBUG_INT: dma interrupt handled...\n"));
1471  DEBUG_DMA(printk(KERN_DEBUG "DEBUG_DMA: dma interrupt handled...\n"));
1472 
1473  /* clear the DMA Command Register */
1475 
1476  /* wake up the process */
1477  is_dma_running = 0;
1478  wake_up(&vmedrv_dmadone_wait_queue);
1479 
1480  return 0;
1481 }
1482 
1483 
1484 static int acknowledge_pr_interrupt(unsigned remote_status)
1485 {
1486  DEBUG_INT(printk(KERN_DEBUG "pr interrupt handled...\n"));
1487 
1488  iowrite8(
1491  );
1492 
1493  return 0;
1494 }
1495 
1496 
1497 static int acknowledge_vme_interrupt(unsigned interrupt_status)
1498 {
1499  unsigned irq, vector;
1500  struct interrupt_client_t* interrupt_client;
1501  struct task_struct* task;
1502  int signal_id;
1503  int priv;
1504 
1505  DEBUG_INT(printk(KERN_DEBUG "vme interrupt handled...\n"));
1506 
1507  for (irq = 1; irq < vmeNUMBER_OF_IRQ_LINES; irq++) {
1508  /* check whether this IRQ is asserted */
1509  if (! (interrupt_status & (0x0001 << irq))) {
1510  continue;
1511  }
1512  DEBUG_INT(printk(KERN_DEBUG " irq = %d\n", irq));
1513 
1514  /* acknowledge IRQ request (send IACK) */
1516  wmb();
1518  DEBUG_INT(printk(KERN_DEBUG " vector = 0x%04x\n", vector));
1519 
1520  /* send signal to registered processes */
1521  interrupt_client = bit3.interrupt_client_list[irq];
1522  while (interrupt_client != 0) {
1523  if ((vector & interrupt_client->vector_mask) == (interrupt_client->vector & interrupt_client->vector_mask)) {
1524  if (interrupt_client->autodisable_flag != 0) {
1526  DEBUG_INT(printk(KERN_DEBUG " auto-disabled.\n"));
1527  }
1528 
1529  task = interrupt_client->task;
1530  signal_id = interrupt_client->signal_id;
1531  if (signal_id > 0) {
1532  send_sig(signal_id, task, priv = 1);
1533  DEBUG_INT(printk(KERN_DEBUG " send signal.\n"));
1534  }
1535  else {
1536  interrupt_client->interrupt_count++;
1537  wake_up_interruptible(&vmedrv_vmebusirq_wait_queue);
1538  DEBUG_INT(printk(KERN_DEBUG " wake up.\n"));
1539  }
1540  }
1541  interrupt_client = interrupt_client->next;
1542  }
1543  }
1544 
1545  DEBUG_INT(printk(KERN_DEBUG "now exit vme interrupt handling routine.\n"));
1546 
1547  return 0;
1548 }
1549 
1550 
1551 static int register_interrupt_notification(struct dev_prop_t* dev_prop, struct task_struct* task, int irq, int vector, int signal_id)
1552 {
1553  struct interrupt_client_t* interrupt_client;
1554  struct local_interrupt_client_t* local_interrupt_client;
1555 
1556  if ((irq < 1) || (irq >= vmeNUMBER_OF_IRQ_LINES)) {
1557  return -EINVAL;
1558  }
1559 
1560  interrupt_client = kmalloc(sizeof(struct interrupt_client_t), GFP_KERNEL);
1561  local_interrupt_client = kmalloc(sizeof(struct local_interrupt_client_t), GFP_KERNEL);
1562  if ((interrupt_client == 0) || (local_interrupt_client == 0)) {
1563  printk(KERN_WARNING "%s: unable to allocate memory for interrupt client entry.\n", vmedrv_name);
1564  return -ENOMEM;
1565  }
1566 
1567  interrupt_client->task = task;
1568  interrupt_client->irq = irq;
1569  interrupt_client->vector = vector;
1570  interrupt_client->signal_id = signal_id;
1571  interrupt_client->interrupt_count = 0;
1572  interrupt_client->autodisable_flag = 0;
1573  interrupt_client->vector_mask = 0xffff;
1574 
1575  interrupt_client->next = bit3.interrupt_client_list[irq];
1577 
1578  local_interrupt_client->interrupt_client = interrupt_client;
1579  local_interrupt_client->next = dev_prop->local_interrupt_client;
1580  dev_prop->local_interrupt_client = local_interrupt_client;
1581 
1582  DEBUG_INT(printk(KERN_DEBUG
1583  "vme interrupt is registered, "
1584  "irq: %d, vector: 0x%04x, pid: %d, signal: %d.\n",
1585  irq, vector, task->pid, signal_id
1586  ));
1587 
1588  return 0;
1589 }
1590 
1591 
1592 static int unregister_interrupt_notification(struct dev_prop_t* dev_prop, struct task_struct* task, int irq, int vector)
1593 {
1594  struct interrupt_client_t *interrupt_client, *prev_interrupt_client;
1595  struct interrupt_client_t *unregistered_interrupt_client;
1596  struct local_interrupt_client_t *local_interrupt_client, *prev_local_interrupt_client;
1597  struct local_interrupt_client_t *unregistered_local_interrupt_client;
1598  int process_id;
1599  int irq_index;
1600 
1601  process_id = task->pid;
1602  for (irq_index = 1; irq_index < vmeNUMBER_OF_IRQ_LINES; irq_index++) {
1603 
1604  interrupt_client = bit3.interrupt_client_list[irq_index];
1605  prev_interrupt_client = 0;
1606  while (interrupt_client != 0) {
1607  if (
1608  (process_id != interrupt_client->task->pid) ||
1609  ((irq != 0) && (irq != interrupt_client->irq)) ||
1610  ((vector != 0) && (vector != interrupt_client->vector))
1611  ){
1612  prev_interrupt_client = interrupt_client;
1613  interrupt_client = interrupt_client->next;
1614  continue;
1615  }
1616 
1617  DEBUG_INT(printk(KERN_DEBUG
1618  "vme interrupt is unregistered, "
1619  "irq: %d, vector: 0x%04x, pid: %d.\n",
1620  interrupt_client->irq, interrupt_client->vector, process_id
1621  ));
1622 
1623  unregistered_interrupt_client = interrupt_client;
1624  interrupt_client = interrupt_client->next;
1625  if (prev_interrupt_client == 0) {
1627  }
1628  else {
1629  prev_interrupt_client->next = interrupt_client;
1630  }
1631 
1632  local_interrupt_client = dev_prop->local_interrupt_client;
1633  prev_local_interrupt_client = 0;
1634  while (local_interrupt_client != 0) {
1635  if (local_interrupt_client->interrupt_client != unregistered_interrupt_client) {
1636  prev_local_interrupt_client = local_interrupt_client;
1637  local_interrupt_client = local_interrupt_client->next;
1638  continue;
1639  }
1640 
1641  unregistered_local_interrupt_client = local_interrupt_client;
1642  local_interrupt_client = local_interrupt_client->next;
1643  if (prev_local_interrupt_client == 0) {
1644  dev_prop->local_interrupt_client = local_interrupt_client;
1645  }
1646  else {
1647  prev_local_interrupt_client->next = local_interrupt_client;
1648  }
1649  kfree(unregistered_local_interrupt_client);
1650  }
1651 
1652  kfree(unregistered_interrupt_client);
1653  }
1654  }
1655 
1656  return 0;
1657 }
1658 
1659 
1660 static int wait_for_interrupt_notification(struct task_struct* task, int irq, int vector, int timeout)
1661 {
1662  long remaining_time;
1663  int process_id;
1664  struct interrupt_client_t *interrupt_client;
1665 
1666  timeout *= HZ;
1667 
1668  /* find matching registered interrupt signature */
1669  process_id = task->pid;
1670  interrupt_client = bit3.interrupt_client_list[irq];
1671  while (interrupt_client != 0) {
1672  if (
1673  (process_id == interrupt_client->task->pid) &&
1674  (vector == interrupt_client->vector) &&
1675  (interrupt_client->signal_id <= 0)
1676  ){
1677  break;
1678  }
1679  interrupt_client = interrupt_client->next;
1680  }
1681  if (interrupt_client == 0)
1682  {
1683  printk(KERN_WARNING "%s: no interrupt is registered to wait for\n", vmedrv_name);
1684  return -ENODEV;
1685  }
1686 
1687  /* now process or wait for interrupt */
1688  remaining_time = wait_event_interruptible_timeout(
1690  interrupt_client->interrupt_count > 0,
1691  timeout
1692  );
1693  if (remaining_time == 0) {
1694  return -ETIMEDOUT;
1695  }
1696  if (interrupt_client->interrupt_count == 0) {
1697  return -ERESTARTSYS;
1698  }
1699 
1700  interrupt_client->interrupt_count = 0;
1701 
1702  return 1;
1703 }
1704 
1705 
1706 static int check_interrupt_notification(int irq, int vector)
1707 {
1708  int result = 0;
1709  struct interrupt_client_t *interrupt_client;
1710 
1711  interrupt_client = bit3.interrupt_client_list[irq];
1712  while (interrupt_client != 0) {
1713  if (vector == interrupt_client->vector) {
1714  result += interrupt_client->interrupt_count;
1715 
1716  }
1717  interrupt_client = interrupt_client->next;
1718  }
1719 
1720  return result;
1721 }
1722 
1723 
1724 static int clear_interrupt_notification(int irq, int vector)
1725 {
1726  struct interrupt_client_t *interrupt_client;
1727 
1728  interrupt_client = bit3.interrupt_client_list[irq];
1729  while (interrupt_client != 0) {
1730  if (vector == interrupt_client->vector) {
1731  interrupt_client->interrupt_count = 0;
1732  }
1733  interrupt_client = interrupt_client->next;
1734  }
1735 
1736  return 0;
1737 }
1738 
1739 
1740 static int set_interrupt_autodisable(int irq, int vector)
1741 {
1742  struct interrupt_client_t *interrupt_client;
1743 
1744  interrupt_client = bit3.interrupt_client_list[irq];
1745  while (interrupt_client != 0) {
1746  if (vector == interrupt_client->vector) {
1747  interrupt_client->autodisable_flag = 1;
1748  }
1749  interrupt_client = interrupt_client->next;
1750  }
1751 
1752  return 0;
1753 }
1754 
1755 
1756 static int set_vector_mask(int irq, int vector, int vector_mask)
1757 {
1758  struct interrupt_client_t *interrupt_client;
1759 
1760  interrupt_client = bit3.interrupt_client_list[irq];
1761  while (interrupt_client != 0) {
1762  if (vector == interrupt_client->vector) {
1763  interrupt_client->vector_mask = vector_mask;
1764  }
1765  interrupt_client = interrupt_client->next;
1766  }
1767 
1768  return 0;
1769 }
1770 
1771 static int reset_adapter(void)
1772 {
1773  iowrite8(
1774  rcRESET_ADAPTER,
1776  );
1777 
1778  return 0;
1779 }
1780 
1781 
1782 static int dma_read(struct dev_prop_t* dev_prop, char* buf, unsigned long vme_address, int count)
1783 {
1784  unsigned long pci_address, size;
1785  unsigned long uncopied_length;
1786  int direction;
1787  int result;
1788 
1789  /* allocate DMA buffer, map them (only first time) */
1790  if ((result = prepare_dma(dev_prop)) < 0) {
1791  return result;
1792  }
1793 
1794  /* set transfer size and direction */
1795  if (count > bit3.dma_buffer_size) {
1796  size = bit3.dma_buffer_size;
1797  }
1798  else {
1799  size = count;
1800  }
1801  direction = tdREAD;
1802 
1803  pci_address = bit3.dma_buffer_mapped_pci_address;
1804 
1805  result = start_dma(dev_prop, pci_address, vme_address, size, direction);
1806  if (result < 0) {
1807  return result;
1808  }
1809 
1810  dma_cache_sync(&bit3.pci_dev->dev, bit3.dma_buffer, size, DMA_FROM_DEVICE);
1811 
1812  uncopied_length = copy_to_user(buf, bit3.dma_buffer, size);
1813  if (uncopied_length > 0) {
1814  return -EFAULT;
1815  }
1816 
1817  return size;
1818 }
1819 
1820 
1821 static int dma_write(struct dev_prop_t* dev_prop, const char* buf, unsigned long vme_address, int count)
1822 {
1823  unsigned long pci_address, size;
1824  unsigned long uncopied_length;
1825  int direction;
1826  int result;
1827 
1828  /* allocate DMA buffer, map them (only first time) */
1829  if ((result = prepare_dma(dev_prop)) < 0) {
1830  return result;
1831  }
1832 
1833  /* set transfer size and direction */
1834  if (count > bit3.dma_buffer_size) {
1835  size = bit3.dma_buffer_size;
1836  }
1837  else {
1838  size = count;
1839  }
1840  direction = tdWRITE;
1841 
1842  uncopied_length = copy_from_user(bit3.dma_buffer, buf, size);
1843  if (uncopied_length > 0) {
1844  return -EFAULT;
1845  }
1846 
1847  dma_cache_sync(&bit3.pci_dev->dev, bit3.dma_buffer, size, DMA_TO_DEVICE);
1848 
1849  pci_address = bit3.dma_buffer_mapped_pci_address;
1850 
1851  result = start_dma(dev_prop, pci_address, vme_address, size, direction);
1852 
1853  return (result < 0) ? result : size;
1854 }
1855 
1856 
1857 static int prepare_dma(struct dev_prop_t* dev_prop)
1858 {
1859  /* allocate and map DMA buffer, if it has not been allocated yet.*/
1860  if (bit3.dma_buffer_size == 0) {
1862  printk(KERN_WARNING "%s: unable to allocate dma buffer.\n", vmedrv_name);
1863  printk(KERN_WARNING " requested size: %d\n", DMA_BUFFER_SIZE);
1864  return -ENOMEM;
1865  }
1869  /* byte swapping = */ 0
1870  );
1871  }
1872 
1873  return 0;
1874 }
1875 
1876 static int start_dma(struct dev_prop_t* dev_prop, unsigned long pci_address, unsigned long vme_address, unsigned long size, int direction)
1877 {
1878  unsigned status;
1879  long timeout, remaining_time;
1880  int error_interrupt_count_before_dma;
1881 
1882  /* set DMA registers, and start */
1883  initiate_dma(dev_prop, pci_address, vme_address, size, direction);
1884  DEBUG_DMA(printk(KERN_DEBUG "now start dma transfer...\n"));
1885 
1886  error_interrupt_count_before_dma = error_interrupt_count;
1887 
1888  /* wait for DMA DONE interrupt */
1889  timeout = ((size*HZ)/100000 + HZ/10); /* 100kB/sec+100msec; slow enough */
1890  remaining_time = wait_event_timeout(
1892  (! is_dma_running),
1893  timeout
1894  );
1895  if (is_dma_running) {
1896  /* clear the DMA Command Register */
1897  /* this is done in the interrupt handler, but not for timeout */
1899 
1900  printk(KERN_WARNING "%s: dma transfer timed out.\n", vmedrv_name);
1901  release_dma();
1902  return -ETIMEDOUT;
1903  }
1904 
1905  /* release DMA settings */
1906  DEBUG_DMA(printk(KERN_DEBUG "dma transfer completed.\n"));
1907  status = release_dma();
1908 
1909  /* release DMA settings */
1910  if (status & lsERROR_BITS) {
1911  printk(KERN_WARNING "%s: dma transfer failed.\n", vmedrv_name);
1912  printk(KERN_WARNING " Local Status Register: 0x%02x\n", status);
1913  return -EIO;
1914  }
1915  if (error_interrupt_count != error_interrupt_count_before_dma) {
1916  status = local_status_on_error;
1917  printk(KERN_WARNING "%s: error during dma transfer.\n", vmedrv_name);
1918  printk(KERN_WARNING " Local Status Register: 0x%02x\n", status);
1919  return -EIO;
1920  }
1921 
1922  return 0;
1923 }
1924 
1925 
1926 static void* allocate_dma_buffer(unsigned long size)
1927 {
1928  dma_addr_t dma_handle;
1929 
1930  if (size > bit3_DMA_MAPPING_SIZE) {
1931  size = bit3_DMA_MAPPING_SIZE;
1932  }
1933 
1934  bit3.dma_buffer = pci_alloc_consistent(
1935  bit3.pci_dev, size, &dma_handle
1936  );
1937 
1938  if (bit3.dma_buffer > 0) {
1940  bit3.dma_buffer_bus_address = dma_handle;
1941 
1942  DEBUG_DMA(printk(KERN_DEBUG "dma buffer is allocated.\n"));
1943  DEBUG_DMA(printk(KERN_DEBUG " size: 0x%lx.\n", bit3.dma_buffer_size));
1944  DEBUG_DMA(printk(KERN_DEBUG " virtual address: 0x%08lx.\n", (long) bit3.dma_buffer));
1945  DEBUG_DMA(printk(KERN_DEBUG " bus address: 0x%08lx.\n", (long) bit3.dma_buffer_bus_address));
1946  }
1947 
1948  return bit3.dma_buffer;
1949 }
1950 
1951 
1952 static void release_dma_buffer(void)
1953 {
1954  if (bit3.dma_buffer_size > 0) {
1955  pci_free_consistent(
1958  );
1959  bit3.dma_buffer_size = 0;
1960  DEBUG_DMA(printk(KERN_DEBUG "dma buffer is released.\n"));
1961  }
1962 }
1963 
1964 
1965 static unsigned map_dma_windows(unsigned pci_address, unsigned size, unsigned flags)
1966 {
1967  unsigned base_address, offset_address;
1968  unsigned number_of_windows;
1969  unsigned window_index;
1970  void* mapping_register_address;
1971  unsigned mapped_pci_address;
1972  int i;
1973 
1974  base_address = pci_address & bit3_DMA_PAGE_BASE_MASK;
1975  offset_address = pci_address & bit3_DMA_PAGE_OFFSET_MASK;
1976  number_of_windows = ((unsigned long) (size - 1)) / (unsigned long) bit3_DMA_WINDOW_SIZE + 1;
1977  if (offset_address > 0) {
1978  number_of_windows += 1;
1979  }
1980 
1981  window_index = 0;
1982  mapping_register_address = (
1984  bit3_DMA_MAPPING_REGISTER_WIDTH * window_index
1985  );
1986 
1987  DEBUG_DMA(printk(KERN_DEBUG "writing dma mapping registers...\n"));
1988  for (i = 0; i < number_of_windows; i++) {
1989  iowrite32(
1990  base_address | flags,
1991  mapping_register_address
1992  );
1993 
1994  DEBUG_DMA(printk(
1995  KERN_DEBUG " reg: 0x%08x, value: 0x%08x\n",
1996  (unsigned) mapping_register_address,
1997  base_address | flags
1998  ));
1999 
2000  base_address += bit3_DMA_WINDOW_SIZE;
2001  mapping_register_address += bit3_DMA_MAPPING_REGISTER_WIDTH;
2002  }
2003 
2004  mapped_pci_address = window_index << bit3_DMA_MAPPING_REGISTER_INDEX_SHIFT;
2005  mapped_pci_address |= offset_address;
2006 
2007  DEBUG_DMA(printk(KERN_DEBUG " mapped pci address: 0x%08x\n", mapped_pci_address));
2008 
2009  return mapped_pci_address;
2010 }
2011 
2012 
2013 static int initiate_dma(struct dev_prop_t* dev_prop, unsigned mapped_pci_address, unsigned vme_address, unsigned size, int direction)
2014 {
2015  unsigned remainder_count, packet_count;
2016  unsigned dma_register_value, address_modifier, remote_command2_value;
2017 
2018  DEBUG_DMA(printk(KERN_DEBUG "setting dma parameters...\n"));
2019 
2020  /* program the Local DMA Command Register */
2021  dma_register_value = dcENABLE_DMA_DONE_INTERRUPT;
2022  if (direction == tdREAD) {
2023  dma_register_value |= dcDMA_TRANSFER_DIRECTION_READ;
2024  }
2025  else {
2026  dma_register_value |= dcDMA_TRANSFER_DIRECTION_WRITE;
2027  }
2028  if (dev_prop->data_width == dwWORD) {
2029  dma_register_value |= dcDMA_WORD_LONGWORD_SELECT_WORD;
2030  }
2031  else {
2032  dma_register_value |= dcDMA_WORD_LONGWORD_SELECT_LONGWORD;
2033  }
2034  iowrite8(
2035  dma_register_value,
2037  );
2038  DEBUG_DMA(printk(KERN_DEBUG " dma reg value: 0x%02x\n", dma_register_value));
2039 
2040  /* program the Local DMA Address Register */
2041  iowrite8(
2042  (mapped_pci_address >> 0) & 0x000000ff,
2044  );
2045  iowrite8(
2046  (mapped_pci_address >> 8) & 0x000000ff,
2048  );
2049  iowrite8(
2050  (mapped_pci_address >> 16) & 0x000000ff,
2052  );
2053  DEBUG_DMA(printk(KERN_DEBUG " mapped pci address: 0x%08x\n", mapped_pci_address));
2054 
2055  /* load the Remote DMA Address Register */
2056  iowrite8(
2057  (vme_address >> 0) & 0x000000ff,
2059  );
2060  iowrite8(
2061  (vme_address >> 8) & 0x000000ff,
2063  );
2064  iowrite8(
2065  (vme_address >> 16) & 0x000000ff,
2067  );
2068  iowrite8(
2069  (vme_address >> 24) & 0x000000ff,
2071  );
2072 
2073  DEBUG_DMA(printk(KERN_DEBUG " vme address: 0x%08x\n", vme_address));
2074 
2075  /* load the Remainder/Packet Count Register */
2076  remainder_count = size % bit3_DMA_PACKET_SIZE;
2077  packet_count = size / bit3_DMA_PACKET_SIZE;
2078  iowrite8(
2079  remainder_count,
2081  );
2082  iowrite8(
2083  remainder_count,
2085  );
2086  iowrite8(
2087  (packet_count >> 0) & 0x00ff,
2089  );
2090  iowrite8(
2091  (packet_count >> 8) & 0x00ff,
2093  );
2094  DEBUG_DMA(printk(KERN_DEBUG " remainder count: 0x%02x\n", remainder_count));
2095  DEBUG_DMA(printk(KERN_DEBUG " packet count: 0x%04x\n", packet_count));
2096 
2097  /* program the other CSRs */
2098  if (dev_prop->transfer_method == tmDMA) {
2099  address_modifier = dev_prop->dma_address_modifier;
2100  remote_command2_value = rcBLOCK_MODE_DMA | rcDISABLE_INTERRUPT_PASSING;
2101  }
2102  else /* Non-Blocking DMA */ {
2103  address_modifier = dev_prop->address_modifier;
2104  remote_command2_value = rcDISABLE_INTERRUPT_PASSING;
2105  }
2106  writeb(
2107  address_modifier,
2109  );
2110  writeb(
2111  remote_command2_value,
2113  );
2114  DEBUG_DMA(printk(KERN_DEBUG " dma am code: 0x%02x\n", dev_prop->dma_address_modifier));
2115  DEBUG_DMA(printk(KERN_DEBUG " remote command 2: 0x%02x\n", remote_command2_value));
2116 
2117  /* enable normal interrupt*/
2120 
2121  /* now, start the DMA transfer */
2122  is_dma_running = 1;
2123  wmb();
2124  iowrite8(
2125  dma_register_value | dcSTART_DMA,
2127  );
2128 
2129  return 0;
2130 }
2131 
2132 
2133 static int release_dma(void)
2134 {
2135  unsigned status;
2136 
2138  DEBUG_DMA(printk(KERN_DEBUG "Local Status Register: 0x%02x\n", status));
2139 
2140  rmb();
2142 
2143  /* restore VME interrupt flags */
2145  iowrite8(
2146  0,
2148  );
2149 
2150  return status;
2151 }