umem.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370
  1. /**
  2. *
  3. * @file umem.c
  4. * @brief This file contains the functions handling user space memory.
  5. * @author Guillermo Marcus
  6. * @date 2009-04-05
  7. *
  8. */
  9. #include <linux/version.h>
  10. #include <linux/string.h>
  11. #include <linux/types.h>
  12. #include <linux/list.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/pci.h>
  15. #include <linux/cdev.h>
  16. #include <linux/wait.h>
  17. #include <linux/mm.h>
  18. #include <linux/pagemap.h>
  19. #include <linux/sched.h>
  20. #include <linux/vmalloc.h>
  21. #include "base.h"
  22. /**
  23. *
  24. * Reserve a new scatter/gather list and map it from memory to PCI bus addresses.
  25. *
  26. */
  27. int pcidriver_umem_sgmap(pcidriver_privdata_t *privdata, umem_handle_t *umem_handle)
  28. {
  29. int i, res, nr_pages;
  30. struct page **pages;
  31. struct scatterlist *sg = NULL;
  32. pcidriver_umem_entry_t *umem_entry;
  33. unsigned int nents;
  34. unsigned long count,offset,length;
  35. /*
  36. * We do some checks first. Then, the following is necessary to create a
  37. * Scatter/Gather list from a user memory area:
  38. * - Determine the number of pages
  39. * - Get the pages for the memory area
  40. * - Lock them.
  41. * - Create a scatter/gather list of the pages
  42. * - Map the list from memory to PCI bus addresses
  43. *
  44. * Then, we:
  45. * - Create an entry on the umem list of the device, to cache the mapping.
  46. * - Create a sysfs attribute that gives easy access to the SG list
  47. */
  48. /* zero-size?? */
  49. if (umem_handle->size == 0)
  50. return -EINVAL;
  51. /* Direction is better ignoring during mapping. */
  52. /* We assume bidirectional buffers always, except when sync'ing */
  53. /* calculate the number of pages */
  54. nr_pages = ((umem_handle->vma & ~PAGE_MASK) + umem_handle->size + ~PAGE_MASK) >> PAGE_SHIFT;
  55. mod_info_dbg("nr_pages computed: %u\n", nr_pages);
  56. /* Allocate space for the page information */
  57. /* This can be very big, so we use vmalloc */
  58. if ((pages = vmalloc(nr_pages * sizeof(*pages))) == NULL)
  59. return -ENOMEM;
  60. mod_info_dbg("allocated space for the pages.\n");
  61. /* Allocate space for the scatterlist */
  62. /* We do not know how many entries will be, but the maximum is nr_pages. */
  63. /* This can be very big, so we use vmalloc */
  64. if ((sg = vmalloc(nr_pages * sizeof(*sg))) == NULL)
  65. goto umem_sgmap_pages;
  66. sg_init_table(sg, nr_pages);
  67. mod_info_dbg("allocated space for the SG list.\n");
  68. /* Get the page information */
  69. down_read(&current->mm->mmap_sem);
  70. res = get_user_pages(
  71. current,
  72. current->mm,
  73. umem_handle->vma,
  74. nr_pages,
  75. 1,
  76. 0, /* do not force, FIXME: shall I? */
  77. pages,
  78. NULL );
  79. up_read(&current->mm->mmap_sem);
  80. /* Error, not all pages mapped */
  81. if (res < (int)nr_pages) {
  82. mod_info("Could not map all user pages (%d of %d)\n", res, nr_pages);
  83. /* If only some pages could be mapped, we release those. If a real
  84. * error occured, we set nr_pages to 0 */
  85. nr_pages = (res > 0 ? res : 0);
  86. goto umem_sgmap_unmap;
  87. }
  88. mod_info_dbg("Got the pages (%d).\n", res);
  89. /* Lock the pages, then populate the SG list with the pages */
  90. /* page0 is different */
  91. if ( !PageReserved(pages[0]) )
  92. __set_page_locked(pages[0]);
  93. offset = (umem_handle->vma & ~PAGE_MASK);
  94. length = (umem_handle->size > (PAGE_SIZE-offset) ? (PAGE_SIZE-offset) : umem_handle->size);
  95. sg_set_page(&sg[0], pages[0], length, offset);
  96. count = umem_handle->size - length;
  97. for(i=1; i<nr_pages; i++) {
  98. /* Lock page first */
  99. if ( !PageReserved(pages[i]) )
  100. __set_page_locked(pages[i]);
  101. /* Populate the list */
  102. sg_set_page(&sg[i], pages[i], ((count > PAGE_SIZE) ? PAGE_SIZE : count), 0);
  103. count -= sg[i].length;
  104. }
  105. /* Use the page list to populate the SG list */
  106. /* SG entries may be merged, res is the number of used entries */
  107. /* We have originally nr_pages entries in the sg list */
  108. if ((nents = pci_map_sg(privdata->pdev, sg, nr_pages, PCI_DMA_BIDIRECTIONAL)) == 0)
  109. goto umem_sgmap_unmap;
  110. mod_info_dbg("Mapped SG list (%d entries).\n", nents);
  111. /* Add an entry to the umem_list of the device, and update the handle with the id */
  112. /* Allocate space for the new umem entry */
  113. if ((umem_entry = kmalloc(sizeof(*umem_entry), GFP_KERNEL)) == NULL)
  114. goto umem_sgmap_entry;
  115. /* Fill entry to be added to the umem list */
  116. umem_entry->id = atomic_inc_return(&privdata->umem_count) - 1;
  117. umem_entry->nr_pages = nr_pages; /* Will be needed when unmapping */
  118. umem_entry->pages = pages;
  119. umem_entry->nents = nents;
  120. umem_entry->sg = sg;
  121. if (pcidriver_sysfs_initialize_umem(privdata, umem_entry->id, &(umem_entry->sysfs_attr)) != 0)
  122. goto umem_sgmap_name_fail;
  123. /* Add entry to the umem list */
  124. spin_lock( &(privdata->umemlist_lock) );
  125. list_add_tail( &(umem_entry->list), &(privdata->umem_list) );
  126. spin_unlock( &(privdata->umemlist_lock) );
  127. /* Update the Handle with the Handle ID of the entry */
  128. umem_handle->handle_id = umem_entry->id;
  129. return 0;
  130. umem_sgmap_name_fail:
  131. kfree(umem_entry);
  132. umem_sgmap_entry:
  133. pci_unmap_sg( privdata->pdev, sg, nr_pages, PCI_DMA_BIDIRECTIONAL );
  134. umem_sgmap_unmap:
  135. /* release pages */
  136. if (nr_pages > 0) {
  137. for(i=0; i<nr_pages; i++) {
  138. if (PageLocked(pages[i]))
  139. __clear_page_locked(pages[i]);
  140. if (!PageReserved(pages[i]))
  141. set_page_dirty(pages[i]);
  142. page_cache_release(pages[i]);
  143. }
  144. }
  145. vfree(sg);
  146. umem_sgmap_pages:
  147. vfree(pages);
  148. return -ENOMEM;
  149. }
  150. /**
  151. *
  152. * Unmap a scatter/gather list
  153. *
  154. */
  155. int pcidriver_umem_sgunmap(pcidriver_privdata_t *privdata, pcidriver_umem_entry_t *umem_entry)
  156. {
  157. int i;
  158. pcidriver_sysfs_remove(privdata, &(umem_entry->sysfs_attr));
  159. /* Unmap user memory */
  160. pci_unmap_sg( privdata->pdev, umem_entry->sg, umem_entry->nr_pages, PCI_DMA_BIDIRECTIONAL );
  161. /* Release the pages */
  162. if (umem_entry->nr_pages > 0) {
  163. for(i=0; i<(umem_entry->nr_pages); i++) {
  164. /* Mark pages as Dirty and unlock it */
  165. if ( !PageReserved( umem_entry->pages[i] )) {
  166. SetPageDirty( umem_entry->pages[i] );
  167. __clear_page_locked(umem_entry->pages[i]);
  168. }
  169. /* and release it from the cache */
  170. page_cache_release( umem_entry->pages[i] );
  171. }
  172. }
  173. /* Remove the umem list entry */
  174. spin_lock( &(privdata->umemlist_lock) );
  175. list_del( &(umem_entry->list) );
  176. spin_unlock( &(privdata->umemlist_lock) );
  177. /* Release SG list and page list memory */
  178. /* These two are in the vm area of the kernel */
  179. vfree(umem_entry->pages);
  180. vfree(umem_entry->sg);
  181. /* Release umem_entry memory */
  182. kfree(umem_entry);
  183. return 0;
  184. }
  185. /**
  186. *
  187. * Unmap all scatter/gather lists.
  188. *
  189. */
  190. int pcidriver_umem_sgunmap_all(pcidriver_privdata_t *privdata)
  191. {
  192. struct list_head *ptr, *next;
  193. pcidriver_umem_entry_t *umem_entry;
  194. /* iterate safely over the entries and delete them */
  195. list_for_each_safe( ptr, next, &(privdata->umem_list) ) {
  196. umem_entry = list_entry(ptr, pcidriver_umem_entry_t, list );
  197. pcidriver_umem_sgunmap( privdata, umem_entry ); /* spin lock inside! */
  198. }
  199. return 0;
  200. }
  201. /**
  202. *
  203. * Copies the scatter/gather list from kernelspace to userspace.
  204. *
  205. */
  206. int pcidriver_umem_sgget(pcidriver_privdata_t *privdata, umem_sglist_t *umem_sglist)
  207. {
  208. int i;
  209. pcidriver_umem_entry_t *umem_entry;
  210. struct scatterlist *sg;
  211. int idx = 0;
  212. dma_addr_t cur_addr;
  213. unsigned int cur_size;
  214. /* Find the associated umem_entry for this buffer */
  215. umem_entry = pcidriver_umem_find_entry_id( privdata, umem_sglist->handle_id );
  216. if (umem_entry == NULL)
  217. return -EINVAL; /* umem_handle is not valid */
  218. /* Check if passed SG list is enough */
  219. if (umem_sglist->nents < umem_entry->nents)
  220. return -EINVAL; /* sg has not enough entries */
  221. /* Copy the SG list to the user format */
  222. if (umem_sglist->type == PCIDRIVER_SG_MERGED) {
  223. for_each_sg(umem_entry->sg, sg, umem_entry->nents, i ) {
  224. if (i==0) {
  225. umem_sglist->sg[0].addr = sg_dma_address( sg );
  226. umem_sglist->sg[0].size = sg_dma_len( sg );
  227. idx = 0;
  228. }
  229. else {
  230. cur_addr = sg_dma_address( sg );
  231. cur_size = sg_dma_len( sg );
  232. /* Check if entry fits after current entry */
  233. if (cur_addr == (umem_sglist->sg[idx].addr + umem_sglist->sg[idx].size)) {
  234. umem_sglist->sg[idx].size += cur_size;
  235. continue;
  236. }
  237. /* Skip if the entry is zero-length (yes, it can happen.... at the end of the list) */
  238. if (cur_size == 0)
  239. continue;
  240. /* None of the above, add new entry */
  241. idx++;
  242. umem_sglist->sg[idx].addr = cur_addr;
  243. umem_sglist->sg[idx].size = cur_size;
  244. }
  245. }
  246. /* Set the used size of the SG list */
  247. umem_sglist->nents = idx+1;
  248. } else {
  249. for_each_sg(umem_entry->sg, sg, umem_entry->nents, i ) {
  250. mod_info("entry: %d\n",i);
  251. umem_sglist->sg[i].addr = sg_dma_address( sg );
  252. umem_sglist->sg[i].size = sg_dma_len( sg );
  253. }
  254. /* Set the used size of the SG list */
  255. /* Check if the last one is zero-length */
  256. if ( umem_sglist->sg[ umem_entry->nents - 1].size == 0)
  257. umem_sglist->nents = umem_entry->nents -1;
  258. else
  259. umem_sglist->nents = umem_entry->nents;
  260. }
  261. return 0;
  262. }
  263. /**
  264. *
  265. * Sync user space memory from/to device
  266. *
  267. */
  268. int pcidriver_umem_sync( pcidriver_privdata_t *privdata, umem_handle_t *umem_handle )
  269. {
  270. pcidriver_umem_entry_t *umem_entry;
  271. /* Find the associated umem_entry for this buffer */
  272. umem_entry = pcidriver_umem_find_entry_id( privdata, umem_handle->handle_id );
  273. if (umem_entry == NULL)
  274. return -EINVAL; /* umem_handle is not valid */
  275. switch (umem_handle->dir) {
  276. case PCIDRIVER_DMA_TODEVICE:
  277. pci_dma_sync_sg_for_device( privdata->pdev, umem_entry->sg, umem_entry->nents, PCI_DMA_TODEVICE );
  278. break;
  279. case PCIDRIVER_DMA_FROMDEVICE:
  280. pci_dma_sync_sg_for_cpu( privdata->pdev, umem_entry->sg, umem_entry->nents, PCI_DMA_FROMDEVICE );
  281. break;
  282. case PCIDRIVER_DMA_BIDIRECTIONAL:
  283. pci_dma_sync_sg_for_device( privdata->pdev, umem_entry->sg, umem_entry->nents, PCI_DMA_BIDIRECTIONAL );
  284. pci_dma_sync_sg_for_cpu( privdata->pdev, umem_entry->sg, umem_entry->nents, PCI_DMA_BIDIRECTIONAL );
  285. break;
  286. default:
  287. return -EINVAL; /* wrong direction parameter */
  288. }
  289. return 0;
  290. }
  291. /*
  292. *
  293. * Get the pcidriver_umem_entry_t structure for the given id.
  294. *
  295. * @param id ID of the umem entry to search for
  296. *
  297. */
  298. pcidriver_umem_entry_t *pcidriver_umem_find_entry_id(pcidriver_privdata_t *privdata, int id)
  299. {
  300. struct list_head *ptr;
  301. pcidriver_umem_entry_t *entry;
  302. spin_lock(&(privdata->umemlist_lock));
  303. list_for_each(ptr, &(privdata->umem_list)) {
  304. entry = list_entry(ptr, pcidriver_umem_entry_t, list );
  305. if (entry->id == id) {
  306. spin_unlock( &(privdata->umemlist_lock) );
  307. return entry;
  308. }
  309. }
  310. spin_unlock(&(privdata->umemlist_lock));
  311. return NULL;
  312. }