umem.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438
  1. /**
  2. *
  3. * @file umem.c
  4. * @brief This file contains the functions handling user space memory.
  5. * @author Guillermo Marcus
  6. * @date 2009-04-05
  7. *
  8. */
  9. #include <linux/version.h>
  10. #include <linux/string.h>
  11. #include <linux/types.h>
  12. #include <linux/list.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/pci.h>
  15. #include <linux/cdev.h>
  16. #include <linux/wait.h>
  17. #include <linux/mm.h>
  18. #include <linux/pagemap.h>
  19. #include <linux/sched.h>
  20. #include "config.h" /* compile-time configuration */
  21. #include "compat.h" /* compatibility definitions for older linux */
  22. #include "pciDriver.h" /* external interface for the driver */
  23. #include "common.h" /* internal definitions for all parts */
  24. #include "umem.h" /* prototypes for kernel memory */
  25. #include "sysfs.h" /* prototypes for sysfs */
  26. /**
  27. *
  28. * Reserve a new scatter/gather list and map it from memory to PCI bus addresses.
  29. *
  30. */
  31. int pcidriver_umem_sgmap(pcidriver_privdata_t *privdata, umem_handle_t *umem_handle)
  32. {
  33. int i, res, nr_pages;
  34. struct page **pages;
  35. struct scatterlist *sg = NULL;
  36. pcidriver_umem_entry_t *umem_entry;
  37. unsigned int nents;
  38. unsigned long count,offset,length;
  39. /*
  40. * We do some checks first. Then, the following is necessary to create a
  41. * Scatter/Gather list from a user memory area:
  42. * - Determine the number of pages
  43. * - Get the pages for the memory area
  44. * - Lock them.
  45. * - Create a scatter/gather list of the pages
  46. * - Map the list from memory to PCI bus addresses
  47. *
  48. * Then, we:
  49. * - Create an entry on the umem list of the device, to cache the mapping.
  50. * - Create a sysfs attribute that gives easy access to the SG list
  51. */
  52. /* zero-size?? */
  53. if (umem_handle->size == 0)
  54. return -EINVAL;
  55. /* Direction is better ignoring during mapping. */
  56. /* We assume bidirectional buffers always, except when sync'ing */
  57. /* calculate the number of pages */
  58. nr_pages = ((umem_handle->vma & ~PAGE_MASK) + umem_handle->size + ~PAGE_MASK) >> PAGE_SHIFT;
  59. mod_info_dbg("nr_pages computed: %u\n", nr_pages);
  60. /* Allocate space for the page information */
  61. /* This can be very big, so we use vmalloc */
  62. if ((pages = vmalloc(nr_pages * sizeof(*pages))) == NULL)
  63. return -ENOMEM;
  64. mod_info_dbg("allocated space for the pages.\n");
  65. /* Allocate space for the scatterlist */
  66. /* We do not know how many entries will be, but the maximum is nr_pages. */
  67. /* This can be very big, so we use vmalloc */
  68. if ((sg = vmalloc(nr_pages * sizeof(*sg))) == NULL)
  69. goto umem_sgmap_pages;
  70. sg_init_table(sg, nr_pages);
  71. mod_info_dbg("allocated space for the SG list.\n");
  72. /* Get the page information */
  73. down_read(&current->mm->mmap_sem);
  74. res = get_user_pages(
  75. current,
  76. current->mm,
  77. umem_handle->vma,
  78. nr_pages,
  79. 1,
  80. 0, /* do not force, FIXME: shall I? */
  81. pages,
  82. NULL );
  83. up_read(&current->mm->mmap_sem);
  84. /* Error, not all pages mapped */
  85. if (res < (int)nr_pages) {
  86. mod_info("Could not map all user pages (%d of %d)\n", res, nr_pages);
  87. /* If only some pages could be mapped, we release those. If a real
  88. * error occured, we set nr_pages to 0 */
  89. nr_pages = (res > 0 ? res : 0);
  90. goto umem_sgmap_unmap;
  91. }
  92. mod_info_dbg("Got the pages (%d).\n", res);
  93. /* Lock the pages, then populate the SG list with the pages */
  94. /* page0 is different */
  95. if ( !PageReserved(pages[0]) )
  96. compat_lock_page(pages[0]);
  97. offset = (umem_handle->vma & ~PAGE_MASK);
  98. length = (umem_handle->size > (PAGE_SIZE-offset) ? (PAGE_SIZE-offset) : umem_handle->size);
  99. sg_set_page(&sg[0], pages[0], length, offset);
  100. count = umem_handle->size - length;
  101. for(i=1;i<nr_pages;i++) {
  102. /* Lock page first */
  103. if ( !PageReserved(pages[i]) )
  104. compat_lock_page(pages[i]);
  105. /* Populate the list */
  106. sg_set_page(&sg[i], pages[i], ((count > PAGE_SIZE) ? PAGE_SIZE : count), 0);
  107. count -= sg[i].length;
  108. }
  109. /* Use the page list to populate the SG list */
  110. /* SG entries may be merged, res is the number of used entries */
  111. /* We have originally nr_pages entries in the sg list */
  112. if ((nents = pci_map_sg(privdata->pdev, sg, nr_pages, PCI_DMA_BIDIRECTIONAL)) == 0)
  113. goto umem_sgmap_unmap;
  114. mod_info_dbg("Mapped SG list (%d entries).\n", nents);
  115. /* Add an entry to the umem_list of the device, and update the handle with the id */
  116. /* Allocate space for the new umem entry */
  117. if ((umem_entry = kmalloc(sizeof(*umem_entry), GFP_KERNEL)) == NULL)
  118. goto umem_sgmap_entry;
  119. /* Fill entry to be added to the umem list */
  120. umem_entry->id = atomic_inc_return(&privdata->umem_count) - 1;
  121. umem_entry->nr_pages = nr_pages; /* Will be needed when unmapping */
  122. umem_entry->pages = pages;
  123. umem_entry->nents = nents;
  124. umem_entry->sg = sg;
  125. if (pcidriver_sysfs_initialize_umem(privdata, umem_entry->id, &(umem_entry->sysfs_attr)) != 0)
  126. goto umem_sgmap_name_fail;
  127. /* Add entry to the umem list */
  128. spin_lock( &(privdata->umemlist_lock) );
  129. list_add_tail( &(umem_entry->list), &(privdata->umem_list) );
  130. spin_unlock( &(privdata->umemlist_lock) );
  131. /* Update the Handle with the Handle ID of the entry */
  132. umem_handle->handle_id = umem_entry->id;
  133. return 0;
  134. umem_sgmap_name_fail:
  135. kfree(umem_entry);
  136. umem_sgmap_entry:
  137. pci_unmap_sg( privdata->pdev, sg, nr_pages, PCI_DMA_BIDIRECTIONAL );
  138. umem_sgmap_unmap:
  139. /* release pages */
  140. if (nr_pages > 0) {
  141. for(i=0;i<nr_pages;i++) {
  142. if (PageLocked(pages[i]))
  143. compat_unlock_page(pages[i]);
  144. if (!PageReserved(pages[i]))
  145. set_page_dirty(pages[i]);
  146. page_cache_release(pages[i]);
  147. }
  148. }
  149. vfree(sg);
  150. umem_sgmap_pages:
  151. vfree(pages);
  152. return -ENOMEM;
  153. }
  154. /**
  155. *
  156. * Unmap a scatter/gather list
  157. *
  158. */
  159. int pcidriver_umem_sgunmap(pcidriver_privdata_t *privdata, pcidriver_umem_entry_t *umem_entry)
  160. {
  161. int i;
  162. pcidriver_sysfs_remove(privdata, &(umem_entry->sysfs_attr));
  163. /* Unmap user memory */
  164. pci_unmap_sg( privdata->pdev, umem_entry->sg, umem_entry->nr_pages, PCI_DMA_BIDIRECTIONAL );
  165. /* Release the pages */
  166. if (umem_entry->nr_pages > 0) {
  167. for(i=0;i<(umem_entry->nr_pages);i++) {
  168. /* Mark pages as Dirty and unlock it */
  169. if ( !PageReserved( umem_entry->pages[i] )) {
  170. SetPageDirty( umem_entry->pages[i] );
  171. compat_unlock_page(umem_entry->pages[i]);
  172. }
  173. /* and release it from the cache */
  174. page_cache_release( umem_entry->pages[i] );
  175. }
  176. }
  177. /* Remove the umem list entry */
  178. spin_lock( &(privdata->umemlist_lock) );
  179. list_del( &(umem_entry->list) );
  180. spin_unlock( &(privdata->umemlist_lock) );
  181. /* Release SG list and page list memory */
  182. /* These two are in the vm area of the kernel */
  183. vfree(umem_entry->pages);
  184. vfree(umem_entry->sg);
  185. /* Release umem_entry memory */
  186. kfree(umem_entry);
  187. return 0;
  188. }
  189. /**
  190. *
  191. * Unmap all scatter/gather lists.
  192. *
  193. */
  194. int pcidriver_umem_sgunmap_all(pcidriver_privdata_t *privdata)
  195. {
  196. struct list_head *ptr, *next;
  197. pcidriver_umem_entry_t *umem_entry;
  198. /* iterate safely over the entries and delete them */
  199. list_for_each_safe( ptr, next, &(privdata->umem_list) ) {
  200. umem_entry = list_entry(ptr, pcidriver_umem_entry_t, list );
  201. pcidriver_umem_sgunmap( privdata, umem_entry ); /* spin lock inside! */
  202. }
  203. return 0;
  204. }
  205. /**
  206. *
  207. * Copies the scatter/gather list from kernelspace to userspace.
  208. *
  209. */
  210. int pcidriver_umem_sgget(pcidriver_privdata_t *privdata, umem_sglist_t *umem_sglist)
  211. {
  212. int i;
  213. pcidriver_umem_entry_t *umem_entry;
  214. struct scatterlist *sg;
  215. int idx = 0;
  216. dma_addr_t cur_addr;
  217. unsigned int cur_size;
  218. /* Find the associated umem_entry for this buffer */
  219. umem_entry = pcidriver_umem_find_entry_id( privdata, umem_sglist->handle_id );
  220. if (umem_entry == NULL)
  221. return -EINVAL; /* umem_handle is not valid */
  222. /* Check if passed SG list is enough */
  223. if (umem_sglist->nents < umem_entry->nents)
  224. return -EINVAL; /* sg has not enough entries */
  225. /* Copy the SG list to the user format */
  226. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
  227. if (umem_sglist->type == PCIDRIVER_SG_MERGED) {
  228. for_each_sg(umem_entry->sg, sg, umem_entry->nents, i ) {
  229. if (i==0) {
  230. umem_sglist->sg[0].addr = sg_dma_address( sg );
  231. umem_sglist->sg[0].size = sg_dma_len( sg );
  232. idx = 0;
  233. }
  234. else {
  235. cur_addr = sg_dma_address( sg );
  236. cur_size = sg_dma_len( sg );
  237. /* Check if entry fits after current entry */
  238. if (cur_addr == (umem_sglist->sg[idx].addr + umem_sglist->sg[idx].size)) {
  239. umem_sglist->sg[idx].size += cur_size;
  240. continue;
  241. }
  242. /* Skip if the entry is zero-length (yes, it can happen.... at the end of the list) */
  243. if (cur_size == 0)
  244. continue;
  245. /* None of the above, add new entry */
  246. idx++;
  247. umem_sglist->sg[idx].addr = cur_addr;
  248. umem_sglist->sg[idx].size = cur_size;
  249. }
  250. }
  251. /* Set the used size of the SG list */
  252. umem_sglist->nents = idx+1;
  253. } else {
  254. for_each_sg(umem_entry->sg, sg, umem_entry->nents, i ) {
  255. mod_info("entry: %d\n",i);
  256. umem_sglist->sg[i].addr = sg_dma_address( sg );
  257. umem_sglist->sg[i].size = sg_dma_len( sg );
  258. }
  259. /* Set the used size of the SG list */
  260. /* Check if the last one is zero-length */
  261. if ( umem_sglist->sg[ umem_entry->nents - 1].size == 0)
  262. umem_sglist->nents = umem_entry->nents -1;
  263. else
  264. umem_sglist->nents = umem_entry->nents;
  265. }
  266. #else
  267. if (umem_sglist->type == PCIDRIVER_SG_MERGED) {
  268. /* Merge entries that are contiguous into a single entry */
  269. /* Non-optimal but fast for most cases */
  270. /* First one always true */
  271. sg=umem_entry->sg;
  272. umem_sglist->sg[0].addr = sg_dma_address( sg );
  273. umem_sglist->sg[0].size = sg_dma_len( sg );
  274. sg++;
  275. idx = 0;
  276. /* Iterate over the SG entries */
  277. for(i=1; i< umem_entry->nents; i++, sg++ ) {
  278. cur_addr = sg_dma_address( sg );
  279. cur_size = sg_dma_len( sg );
  280. /* Check if entry fits after current entry */
  281. if (cur_addr == (umem_sglist->sg[idx].addr + umem_sglist->sg[idx].size)) {
  282. umem_sglist->sg[idx].size += cur_size;
  283. continue;
  284. }
  285. /* Skip if the entry is zero-length (yes, it can happen.... at the end of the list) */
  286. if (cur_size == 0)
  287. continue;
  288. /* None of the above, add new entry */
  289. idx++;
  290. umem_sglist->sg[idx].addr = cur_addr;
  291. umem_sglist->sg[idx].size = cur_size;
  292. }
  293. /* Set the used size of the SG list */
  294. umem_sglist->nents = idx+1;
  295. } else {
  296. /* Assume pci_map_sg made a good job (ehem..) and just copy it.
  297. * actually, now I assume it just gives them plainly to me. */
  298. for(i=0, sg=umem_entry->sg ; i< umem_entry->nents; i++, sg++ ) {
  299. umem_sglist->sg[i].addr = sg_dma_address( sg );
  300. umem_sglist->sg[i].size = sg_dma_len( sg );
  301. }
  302. /* Set the used size of the SG list */
  303. /* Check if the last one is zero-length */
  304. if ( umem_sglist->sg[ umem_entry->nents - 1].size == 0)
  305. umem_sglist->nents = umem_entry->nents -1;
  306. else
  307. umem_sglist->nents = umem_entry->nents;
  308. }
  309. #endif
  310. return 0;
  311. }
  312. /**
  313. *
  314. * Sync user space memory from/to device
  315. *
  316. */
  317. int pcidriver_umem_sync( pcidriver_privdata_t *privdata, umem_handle_t *umem_handle )
  318. {
  319. pcidriver_umem_entry_t *umem_entry;
  320. /* Find the associated umem_entry for this buffer */
  321. umem_entry = pcidriver_umem_find_entry_id( privdata, umem_handle->handle_id );
  322. if (umem_entry == NULL)
  323. return -EINVAL; /* umem_handle is not valid */
  324. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11)
  325. switch (umem_handle->dir) {
  326. case PCIDRIVER_DMA_TODEVICE:
  327. pci_dma_sync_sg_for_device( privdata->pdev, umem_entry->sg, umem_entry->nents, PCI_DMA_TODEVICE );
  328. break;
  329. case PCIDRIVER_DMA_FROMDEVICE:
  330. pci_dma_sync_sg_for_cpu( privdata->pdev, umem_entry->sg, umem_entry->nents, PCI_DMA_FROMDEVICE );
  331. break;
  332. case PCIDRIVER_DMA_BIDIRECTIONAL:
  333. pci_dma_sync_sg_for_device( privdata->pdev, umem_entry->sg, umem_entry->nents, PCI_DMA_BIDIRECTIONAL );
  334. pci_dma_sync_sg_for_cpu( privdata->pdev, umem_entry->sg, umem_entry->nents, PCI_DMA_BIDIRECTIONAL );
  335. break;
  336. default:
  337. return -EINVAL; /* wrong direction parameter */
  338. }
  339. #else
  340. switch (umem_handle->dir) {
  341. case PCIDRIVER_DMA_TODEVICE:
  342. pci_dma_sync_sg( privdata->pdev, umem_entry->sg, umem_entry->nents, PCI_DMA_TODEVICE );
  343. break;
  344. case PCIDRIVER_DMA_FROMDEVICE:
  345. pci_dma_sync_sg( privdata->pdev, umem_entry->sg, umem_entry->nents, PCI_DMA_FROMDEVICE );
  346. break;
  347. case PCIDRIVER_DMA_BIDIRECTIONAL:
  348. pci_dma_sync_sg( privdata->pdev, umem_entry->sg, umem_entry->nents, PCI_DMA_BIDIRECTIONAL );
  349. break;
  350. default:
  351. return -EINVAL; /* wrong direction parameter */
  352. }
  353. #endif
  354. return 0;
  355. }
  356. /*
  357. *
  358. * Get the pcidriver_umem_entry_t structure for the given id.
  359. *
  360. * @param id ID of the umem entry to search for
  361. *
  362. */
  363. pcidriver_umem_entry_t *pcidriver_umem_find_entry_id(pcidriver_privdata_t *privdata, int id)
  364. {
  365. struct list_head *ptr;
  366. pcidriver_umem_entry_t *entry;
  367. spin_lock(&(privdata->umemlist_lock));
  368. list_for_each(ptr, &(privdata->umem_list)) {
  369. entry = list_entry(ptr, pcidriver_umem_entry_t, list );
  370. if (entry->id == id) {
  371. spin_unlock( &(privdata->umemlist_lock) );
  372. return entry;
  373. }
  374. }
  375. spin_unlock(&(privdata->umemlist_lock));
  376. return NULL;
  377. }