umem.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362
  1. /**
  2. *
  3. * @file umem.c
  4. * @brief This file contains the functions handling user space memory.
  5. * @author Guillermo Marcus
  6. * @date 2009-04-05
  7. *
  8. */
  9. #include <linux/version.h>
  10. #include <linux/string.h>
  11. #include <linux/types.h>
  12. #include <linux/list.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/pci.h>
  15. #include <linux/cdev.h>
  16. #include <linux/wait.h>
  17. #include <linux/mm.h>
  18. #include <linux/pagemap.h>
  19. #include <linux/sched.h>
  20. #include <linux/vmalloc.h>
  21. #include "base.h"
  22. /**
  23. *
  24. * Reserve a new scatter/gather list and map it from memory to PCI bus addresses.
  25. *
  26. */
  27. int pcidriver_umem_sgmap(pcidriver_privdata_t *privdata, umem_handle_t *umem_handle)
  28. {
  29. int i, res, nr_pages;
  30. struct page **pages;
  31. struct scatterlist *sg = NULL;
  32. pcidriver_umem_entry_t *umem_entry;
  33. unsigned int nents;
  34. unsigned long count,offset,length;
  35. /*
  36. * We do some checks first. Then, the following is necessary to create a
  37. * Scatter/Gather list from a user memory area:
  38. * - Determine the number of pages
  39. * - Get the pages for the memory area
  40. * - Lock them.
  41. * - Create a scatter/gather list of the pages
  42. * - Map the list from memory to PCI bus addresses
  43. *
  44. * Then, we:
  45. * - Create an entry on the umem list of the device, to cache the mapping.
  46. * - Create a sysfs attribute that gives easy access to the SG list
  47. */
  48. /* zero-size?? */
  49. if (umem_handle->size == 0)
  50. return -EINVAL;
  51. /* Direction is better ignoring during mapping. */
  52. /* We assume bidirectional buffers always, except when sync'ing */
  53. /* calculate the number of pages */
  54. nr_pages = ((umem_handle->vma & ~PAGE_MASK) + umem_handle->size + ~PAGE_MASK) >> PAGE_SHIFT;
  55. mod_info_dbg("nr_pages computed: %u\n", nr_pages);
  56. /* Allocate space for the page information */
  57. /* This can be very big, so we use vmalloc */
  58. if ((pages = vmalloc(nr_pages * sizeof(*pages))) == NULL)
  59. return -ENOMEM;
  60. mod_info_dbg("allocated space for the pages.\n");
  61. /* Allocate space for the scatterlist */
  62. /* We do not know how many entries will be, but the maximum is nr_pages. */
  63. /* This can be very big, so we use vmalloc */
  64. if ((sg = vmalloc(nr_pages * sizeof(*sg))) == NULL)
  65. goto umem_sgmap_pages;
  66. sg_init_table(sg, nr_pages);
  67. mod_info_dbg("allocated space for the SG list.\n");
  68. /* Get the page information */
  69. down_read(&current->mm->mmap_sem);
  70. res = get_user_pages_compat(umem_handle->vma, nr_pages, pages);
  71. up_read(&current->mm->mmap_sem);
  72. /* Error, not all pages mapped */
  73. if (res < (int)nr_pages) {
  74. mod_info("Could not map all user pages (%d of %d)\n", res, nr_pages);
  75. /* If only some pages could be mapped, we release those. If a real
  76. * error occured, we set nr_pages to 0 */
  77. nr_pages = (res > 0 ? res : 0);
  78. goto umem_sgmap_unmap;
  79. }
  80. mod_info_dbg("Got the pages (%d).\n", res);
  81. /* Lock the pages, then populate the SG list with the pages */
  82. /* page0 is different */
  83. if ( !PageReserved(pages[0]) )
  84. lock_page(pages[0]);
  85. offset = (umem_handle->vma & ~PAGE_MASK);
  86. length = (umem_handle->size > (PAGE_SIZE-offset) ? (PAGE_SIZE-offset) : umem_handle->size);
  87. sg_set_page(&sg[0], pages[0], length, offset);
  88. count = umem_handle->size - length;
  89. for(i=1; i<nr_pages; i++) {
  90. /* Lock page first */
  91. if ( !PageReserved(pages[i]) )
  92. lock_page(pages[i]);
  93. /* Populate the list */
  94. sg_set_page(&sg[i], pages[i], ((count > PAGE_SIZE) ? PAGE_SIZE : count), 0);
  95. count -= sg[i].length;
  96. }
  97. /* Use the page list to populate the SG list */
  98. /* SG entries may be merged, res is the number of used entries */
  99. /* We have originally nr_pages entries in the sg list */
  100. if ((nents = pci_map_sg(privdata->pdev, sg, nr_pages, PCI_DMA_BIDIRECTIONAL)) == 0)
  101. goto umem_sgmap_unmap;
  102. mod_info_dbg("Mapped SG list (%d entries).\n", nents);
  103. /* Add an entry to the umem_list of the device, and update the handle with the id */
  104. /* Allocate space for the new umem entry */
  105. if ((umem_entry = kmalloc(sizeof(*umem_entry), GFP_KERNEL)) == NULL)
  106. goto umem_sgmap_entry;
  107. /* Fill entry to be added to the umem list */
  108. umem_entry->id = atomic_inc_return(&privdata->umem_count) - 1;
  109. umem_entry->nr_pages = nr_pages; /* Will be needed when unmapping */
  110. umem_entry->pages = pages;
  111. umem_entry->nents = nents;
  112. umem_entry->sg = sg;
  113. if (pcidriver_sysfs_initialize_umem(privdata, umem_entry->id, &(umem_entry->sysfs_attr)) != 0)
  114. goto umem_sgmap_name_fail;
  115. /* Add entry to the umem list */
  116. spin_lock( &(privdata->umemlist_lock) );
  117. list_add_tail( &(umem_entry->list), &(privdata->umem_list) );
  118. spin_unlock( &(privdata->umemlist_lock) );
  119. /* Update the Handle with the Handle ID of the entry */
  120. umem_handle->handle_id = umem_entry->id;
  121. return 0;
  122. umem_sgmap_name_fail:
  123. kfree(umem_entry);
  124. umem_sgmap_entry:
  125. pci_unmap_sg( privdata->pdev, sg, nr_pages, PCI_DMA_BIDIRECTIONAL );
  126. umem_sgmap_unmap:
  127. /* release pages */
  128. if (nr_pages > 0) {
  129. for(i=0; i<nr_pages; i++) {
  130. if (PageLocked(pages[i]))
  131. unlock_page(pages[i]);
  132. if (!PageReserved(pages[i]))
  133. set_page_dirty(pages[i]);
  134. put_page(pages[i]);
  135. }
  136. }
  137. vfree(sg);
  138. umem_sgmap_pages:
  139. vfree(pages);
  140. return -ENOMEM;
  141. }
  142. /**
  143. *
  144. * Unmap a scatter/gather list
  145. *
  146. */
  147. int pcidriver_umem_sgunmap(pcidriver_privdata_t *privdata, pcidriver_umem_entry_t *umem_entry)
  148. {
  149. int i;
  150. pcidriver_sysfs_remove(privdata, &(umem_entry->sysfs_attr));
  151. /* Unmap user memory */
  152. pci_unmap_sg( privdata->pdev, umem_entry->sg, umem_entry->nr_pages, PCI_DMA_BIDIRECTIONAL );
  153. /* Release the pages */
  154. if (umem_entry->nr_pages > 0) {
  155. for(i=0; i<(umem_entry->nr_pages); i++) {
  156. /* Mark pages as Dirty and unlock it */
  157. if ( !PageReserved( umem_entry->pages[i] )) {
  158. SetPageDirty( umem_entry->pages[i] );
  159. unlock_page(umem_entry->pages[i]);
  160. }
  161. /* and release it from the cache */
  162. put_page( umem_entry->pages[i] );
  163. }
  164. }
  165. /* Remove the umem list entry */
  166. spin_lock( &(privdata->umemlist_lock) );
  167. list_del( &(umem_entry->list) );
  168. spin_unlock( &(privdata->umemlist_lock) );
  169. /* Release SG list and page list memory */
  170. /* These two are in the vm area of the kernel */
  171. vfree(umem_entry->pages);
  172. vfree(umem_entry->sg);
  173. /* Release umem_entry memory */
  174. kfree(umem_entry);
  175. return 0;
  176. }
  177. /**
  178. *
  179. * Unmap all scatter/gather lists.
  180. *
  181. */
  182. int pcidriver_umem_sgunmap_all(pcidriver_privdata_t *privdata)
  183. {
  184. struct list_head *ptr, *next;
  185. pcidriver_umem_entry_t *umem_entry;
  186. /* iterate safely over the entries and delete them */
  187. list_for_each_safe( ptr, next, &(privdata->umem_list) ) {
  188. umem_entry = list_entry(ptr, pcidriver_umem_entry_t, list );
  189. pcidriver_umem_sgunmap( privdata, umem_entry ); /* spin lock inside! */
  190. }
  191. return 0;
  192. }
  193. /**
  194. *
  195. * Copies the scatter/gather list from kernelspace to userspace.
  196. *
  197. */
  198. int pcidriver_umem_sgget(pcidriver_privdata_t *privdata, umem_sglist_t *umem_sglist)
  199. {
  200. int i;
  201. pcidriver_umem_entry_t *umem_entry;
  202. struct scatterlist *sg;
  203. int idx = 0;
  204. dma_addr_t cur_addr;
  205. unsigned int cur_size;
  206. /* Find the associated umem_entry for this buffer */
  207. umem_entry = pcidriver_umem_find_entry_id( privdata, umem_sglist->handle_id );
  208. if (umem_entry == NULL)
  209. return -EINVAL; /* umem_handle is not valid */
  210. /* Check if passed SG list is enough */
  211. if (umem_sglist->nents < umem_entry->nents)
  212. return -EINVAL; /* sg has not enough entries */
  213. /* Copy the SG list to the user format */
  214. if (umem_sglist->type == PCIDRIVER_SG_MERGED) {
  215. for_each_sg(umem_entry->sg, sg, umem_entry->nents, i ) {
  216. if (i==0) {
  217. umem_sglist->sg[0].addr = sg_dma_address( sg );
  218. umem_sglist->sg[0].size = sg_dma_len( sg );
  219. idx = 0;
  220. }
  221. else {
  222. cur_addr = sg_dma_address( sg );
  223. cur_size = sg_dma_len( sg );
  224. /* Check if entry fits after current entry */
  225. if (cur_addr == (umem_sglist->sg[idx].addr + umem_sglist->sg[idx].size)) {
  226. umem_sglist->sg[idx].size += cur_size;
  227. continue;
  228. }
  229. /* Skip if the entry is zero-length (yes, it can happen.... at the end of the list) */
  230. if (cur_size == 0)
  231. continue;
  232. /* None of the above, add new entry */
  233. idx++;
  234. umem_sglist->sg[idx].addr = cur_addr;
  235. umem_sglist->sg[idx].size = cur_size;
  236. }
  237. }
  238. /* Set the used size of the SG list */
  239. umem_sglist->nents = idx+1;
  240. } else {
  241. for_each_sg(umem_entry->sg, sg, umem_entry->nents, i ) {
  242. mod_info("entry: %d\n",i);
  243. umem_sglist->sg[i].addr = sg_dma_address( sg );
  244. umem_sglist->sg[i].size = sg_dma_len( sg );
  245. }
  246. /* Set the used size of the SG list */
  247. /* Check if the last one is zero-length */
  248. if ( umem_sglist->sg[ umem_entry->nents - 1].size == 0)
  249. umem_sglist->nents = umem_entry->nents -1;
  250. else
  251. umem_sglist->nents = umem_entry->nents;
  252. }
  253. return 0;
  254. }
  255. /**
  256. *
  257. * Sync user space memory from/to device
  258. *
  259. */
  260. int pcidriver_umem_sync( pcidriver_privdata_t *privdata, umem_handle_t *umem_handle )
  261. {
  262. pcidriver_umem_entry_t *umem_entry;
  263. /* Find the associated umem_entry for this buffer */
  264. umem_entry = pcidriver_umem_find_entry_id( privdata, umem_handle->handle_id );
  265. if (umem_entry == NULL)
  266. return -EINVAL; /* umem_handle is not valid */
  267. switch (umem_handle->dir) {
  268. case PCIDRIVER_DMA_TODEVICE:
  269. pci_dma_sync_sg_for_device( privdata->pdev, umem_entry->sg, umem_entry->nents, PCI_DMA_TODEVICE );
  270. break;
  271. case PCIDRIVER_DMA_FROMDEVICE:
  272. pci_dma_sync_sg_for_cpu( privdata->pdev, umem_entry->sg, umem_entry->nents, PCI_DMA_FROMDEVICE );
  273. break;
  274. case PCIDRIVER_DMA_BIDIRECTIONAL:
  275. pci_dma_sync_sg_for_device( privdata->pdev, umem_entry->sg, umem_entry->nents, PCI_DMA_BIDIRECTIONAL );
  276. pci_dma_sync_sg_for_cpu( privdata->pdev, umem_entry->sg, umem_entry->nents, PCI_DMA_BIDIRECTIONAL );
  277. break;
  278. default:
  279. return -EINVAL; /* wrong direction parameter */
  280. }
  281. return 0;
  282. }
  283. /*
  284. *
  285. * Get the pcidriver_umem_entry_t structure for the given id.
  286. *
  287. * @param id ID of the umem entry to search for
  288. *
  289. */
  290. pcidriver_umem_entry_t *pcidriver_umem_find_entry_id(pcidriver_privdata_t *privdata, int id)
  291. {
  292. struct list_head *ptr;
  293. pcidriver_umem_entry_t *entry;
  294. spin_lock(&(privdata->umemlist_lock));
  295. list_for_each(ptr, &(privdata->umem_list)) {
  296. entry = list_entry(ptr, pcidriver_umem_entry_t, list );
  297. if (entry->id == id) {
  298. spin_unlock( &(privdata->umemlist_lock) );
  299. return entry;
  300. }
  301. }
  302. spin_unlock(&(privdata->umemlist_lock));
  303. return NULL;
  304. }