umem.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373
  1. /**
  2. *
  3. * @file umem.c
  4. * @brief This file contains the functions handling user space memory.
  5. * @author Guillermo Marcus
  6. * @date 2009-04-05
  7. *
  8. */
  9. #include <linux/version.h>
  10. #include <linux/string.h>
  11. #include <linux/types.h>
  12. #include <linux/list.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/pci.h>
  15. #include <linux/cdev.h>
  16. #include <linux/wait.h>
  17. #include <linux/mm.h>
  18. #include <linux/pagemap.h>
  19. #include <linux/sched.h>
  20. #include <linux/vmalloc.h>
  21. #include "base.h"
  22. /**
  23. *
  24. * Reserve a new scatter/gather list and map it from memory to PCI bus addresses.
  25. *
  26. */
  27. int pcidriver_umem_sgmap(pcidriver_privdata_t *privdata, umem_handle_t *umem_handle)
  28. {
  29. int i, res, nr_pages;
  30. struct page **pages;
  31. struct scatterlist *sg = NULL;
  32. pcidriver_umem_entry_t *umem_entry;
  33. unsigned int nents;
  34. unsigned long count,offset,length;
  35. /*
  36. * We do some checks first. Then, the following is necessary to create a
  37. * Scatter/Gather list from a user memory area:
  38. * - Determine the number of pages
  39. * - Get the pages for the memory area
  40. * - Lock them.
  41. * - Create a scatter/gather list of the pages
  42. * - Map the list from memory to PCI bus addresses
  43. *
  44. * Then, we:
  45. * - Create an entry on the umem list of the device, to cache the mapping.
  46. * - Create a sysfs attribute that gives easy access to the SG list
  47. */
  48. /* zero-size?? */
  49. if (umem_handle->size == 0)
  50. return -EINVAL;
  51. /* Direction is better ignoring during mapping. */
  52. /* We assume bidirectional buffers always, except when sync'ing */
  53. /* calculate the number of pages */
  54. nr_pages = ((umem_handle->vma & ~PAGE_MASK) + umem_handle->size + ~PAGE_MASK) >> PAGE_SHIFT;
  55. mod_info_dbg("nr_pages computed: %u\n", nr_pages);
  56. /* Allocate space for the page information */
  57. /* This can be very big, so we use vmalloc */
  58. if ((pages = vmalloc(nr_pages * sizeof(*pages))) == NULL)
  59. return -ENOMEM;
  60. mod_info_dbg("allocated space for the pages.\n");
  61. /* Allocate space for the scatterlist */
  62. /* We do not know how many entries will be, but the maximum is nr_pages. */
  63. /* This can be very big, so we use vmalloc */
  64. if ((sg = vmalloc(nr_pages * sizeof(*sg))) == NULL)
  65. goto umem_sgmap_pages;
  66. sg_init_table(sg, nr_pages);
  67. mod_info_dbg("allocated space for the SG list.\n");
  68. /* Get the page information */
  69. #if LINUX_VERSION_CODE >= KERNEL_VERSION(5,8,0)
  70. /*As of Kernel 5.8.0, the mmap_sem member of the MM struct has been
  71. * renamed to mmap_lock
  72. * See:
  73. * https://github.com/torvalds/linux/commit/da1c55f1b272f4bd54671d459b39ea7b54944ef9
  74. */
  75. down_read(&current->mm->mmap_lock);
  76. res = get_user_pages_compat(umem_handle->vma, nr_pages, pages);
  77. up_read(&current->mm->mmap_lock);
  78. #else
  79. down_read(&current->mm->mmap_sem);
  80. res = get_user_pages_compat(umem_handle->vma, nr_pages, pages);
  81. up_read(&current->mm->mmap_sem);
  82. #endif
  83. /* Error, not all pages mapped */
  84. if (res < (int)nr_pages) {
  85. mod_info("Could not map all user pages (%d of %d)\n", res, nr_pages);
  86. /* If only some pages could be mapped, we release those. If a real
  87. * error occured, we set nr_pages to 0 */
  88. nr_pages = (res > 0 ? res : 0);
  89. goto umem_sgmap_unmap;
  90. }
  91. mod_info_dbg("Got the pages (%d).\n", res);
  92. /* Lock the pages, then populate the SG list with the pages */
  93. /* page0 is different */
  94. if ( !PageReserved(pages[0]) )
  95. lock_page(pages[0]);
  96. offset = (umem_handle->vma & ~PAGE_MASK);
  97. length = (umem_handle->size > (PAGE_SIZE-offset) ? (PAGE_SIZE-offset) : umem_handle->size);
  98. sg_set_page(&sg[0], pages[0], length, offset);
  99. count = umem_handle->size - length;
  100. for(i=1; i<nr_pages; i++) {
  101. /* Lock page first */
  102. if ( !PageReserved(pages[i]) )
  103. lock_page(pages[i]);
  104. /* Populate the list */
  105. sg_set_page(&sg[i], pages[i], ((count > PAGE_SIZE) ? PAGE_SIZE : count), 0);
  106. count -= sg[i].length;
  107. }
  108. /* Use the page list to populate the SG list */
  109. /* SG entries may be merged, res is the number of used entries */
  110. /* We have originally nr_pages entries in the sg list */
  111. if ((nents = pci_map_sg(privdata->pdev, sg, nr_pages, PCI_DMA_BIDIRECTIONAL)) == 0)
  112. goto umem_sgmap_unmap;
  113. mod_info_dbg("Mapped SG list (%d entries).\n", nents);
  114. /* Add an entry to the umem_list of the device, and update the handle with the id */
  115. /* Allocate space for the new umem entry */
  116. if ((umem_entry = kmalloc(sizeof(*umem_entry), GFP_KERNEL)) == NULL)
  117. goto umem_sgmap_entry;
  118. /* Fill entry to be added to the umem list */
  119. umem_entry->id = atomic_inc_return(&privdata->umem_count) - 1;
  120. umem_entry->nr_pages = nr_pages; /* Will be needed when unmapping */
  121. umem_entry->pages = pages;
  122. umem_entry->nents = nents;
  123. umem_entry->sg = sg;
  124. if (pcidriver_sysfs_initialize_umem(privdata, umem_entry->id, &(umem_entry->sysfs_attr)) != 0)
  125. goto umem_sgmap_name_fail;
  126. /* Add entry to the umem list */
  127. spin_lock( &(privdata->umemlist_lock) );
  128. list_add_tail( &(umem_entry->list), &(privdata->umem_list) );
  129. spin_unlock( &(privdata->umemlist_lock) );
  130. /* Update the Handle with the Handle ID of the entry */
  131. umem_handle->handle_id = umem_entry->id;
  132. return 0;
  133. umem_sgmap_name_fail:
  134. kfree(umem_entry);
  135. umem_sgmap_entry:
  136. pci_unmap_sg( privdata->pdev, sg, nr_pages, PCI_DMA_BIDIRECTIONAL );
  137. umem_sgmap_unmap:
  138. /* release pages */
  139. if (nr_pages > 0) {
  140. for(i=0; i<nr_pages; i++) {
  141. if (PageLocked(pages[i]))
  142. unlock_page(pages[i]);
  143. if (!PageReserved(pages[i]))
  144. set_page_dirty(pages[i]);
  145. put_page(pages[i]);
  146. }
  147. }
  148. vfree(sg);
  149. umem_sgmap_pages:
  150. vfree(pages);
  151. return -ENOMEM;
  152. }
  153. /**
  154. *
  155. * Unmap a scatter/gather list
  156. *
  157. */
  158. int pcidriver_umem_sgunmap(pcidriver_privdata_t *privdata, pcidriver_umem_entry_t *umem_entry)
  159. {
  160. int i;
  161. pcidriver_sysfs_remove(privdata, &(umem_entry->sysfs_attr));
  162. /* Unmap user memory */
  163. pci_unmap_sg( privdata->pdev, umem_entry->sg, umem_entry->nr_pages, PCI_DMA_BIDIRECTIONAL );
  164. /* Release the pages */
  165. if (umem_entry->nr_pages > 0) {
  166. for(i=0; i<(umem_entry->nr_pages); i++) {
  167. /* Mark pages as Dirty and unlock it */
  168. if ( !PageReserved( umem_entry->pages[i] )) {
  169. SetPageDirty( umem_entry->pages[i] );
  170. unlock_page(umem_entry->pages[i]);
  171. }
  172. /* and release it from the cache */
  173. put_page( umem_entry->pages[i] );
  174. }
  175. }
  176. /* Remove the umem list entry */
  177. spin_lock( &(privdata->umemlist_lock) );
  178. list_del( &(umem_entry->list) );
  179. spin_unlock( &(privdata->umemlist_lock) );
  180. /* Release SG list and page list memory */
  181. /* These two are in the vm area of the kernel */
  182. vfree(umem_entry->pages);
  183. vfree(umem_entry->sg);
  184. /* Release umem_entry memory */
  185. kfree(umem_entry);
  186. return 0;
  187. }
  188. /**
  189. *
  190. * Unmap all scatter/gather lists.
  191. *
  192. */
  193. int pcidriver_umem_sgunmap_all(pcidriver_privdata_t *privdata)
  194. {
  195. struct list_head *ptr, *next;
  196. pcidriver_umem_entry_t *umem_entry;
  197. /* iterate safely over the entries and delete them */
  198. list_for_each_safe( ptr, next, &(privdata->umem_list) ) {
  199. umem_entry = list_entry(ptr, pcidriver_umem_entry_t, list );
  200. pcidriver_umem_sgunmap( privdata, umem_entry ); /* spin lock inside! */
  201. }
  202. return 0;
  203. }
  204. /**
  205. *
  206. * Copies the scatter/gather list from kernelspace to userspace.
  207. *
  208. */
  209. int pcidriver_umem_sgget(pcidriver_privdata_t *privdata, umem_sglist_t *umem_sglist)
  210. {
  211. int i;
  212. pcidriver_umem_entry_t *umem_entry;
  213. struct scatterlist *sg;
  214. int idx = 0;
  215. dma_addr_t cur_addr;
  216. unsigned int cur_size;
  217. /* Find the associated umem_entry for this buffer */
  218. umem_entry = pcidriver_umem_find_entry_id( privdata, umem_sglist->handle_id );
  219. if (umem_entry == NULL)
  220. return -EINVAL; /* umem_handle is not valid */
  221. /* Check if passed SG list is enough */
  222. if (umem_sglist->nents < umem_entry->nents)
  223. return -EINVAL; /* sg has not enough entries */
  224. /* Copy the SG list to the user format */
  225. if (umem_sglist->type == PCIDRIVER_SG_MERGED) {
  226. for_each_sg(umem_entry->sg, sg, umem_entry->nents, i ) {
  227. if (i==0) {
  228. umem_sglist->sg[0].addr = sg_dma_address( sg );
  229. umem_sglist->sg[0].size = sg_dma_len( sg );
  230. idx = 0;
  231. }
  232. else {
  233. cur_addr = sg_dma_address( sg );
  234. cur_size = sg_dma_len( sg );
  235. /* Check if entry fits after current entry */
  236. if (cur_addr == (umem_sglist->sg[idx].addr + umem_sglist->sg[idx].size)) {
  237. umem_sglist->sg[idx].size += cur_size;
  238. continue;
  239. }
  240. /* Skip if the entry is zero-length (yes, it can happen.... at the end of the list) */
  241. if (cur_size == 0)
  242. continue;
  243. /* None of the above, add new entry */
  244. idx++;
  245. umem_sglist->sg[idx].addr = cur_addr;
  246. umem_sglist->sg[idx].size = cur_size;
  247. }
  248. }
  249. /* Set the used size of the SG list */
  250. umem_sglist->nents = idx+1;
  251. } else {
  252. for_each_sg(umem_entry->sg, sg, umem_entry->nents, i ) {
  253. mod_info("entry: %d\n",i);
  254. umem_sglist->sg[i].addr = sg_dma_address( sg );
  255. umem_sglist->sg[i].size = sg_dma_len( sg );
  256. }
  257. /* Set the used size of the SG list */
  258. /* Check if the last one is zero-length */
  259. if ( umem_sglist->sg[ umem_entry->nents - 1].size == 0)
  260. umem_sglist->nents = umem_entry->nents -1;
  261. else
  262. umem_sglist->nents = umem_entry->nents;
  263. }
  264. return 0;
  265. }
  266. /**
  267. *
  268. * Sync user space memory from/to device
  269. *
  270. */
  271. int pcidriver_umem_sync( pcidriver_privdata_t *privdata, umem_handle_t *umem_handle )
  272. {
  273. pcidriver_umem_entry_t *umem_entry;
  274. /* Find the associated umem_entry for this buffer */
  275. umem_entry = pcidriver_umem_find_entry_id( privdata, umem_handle->handle_id );
  276. if (umem_entry == NULL)
  277. return -EINVAL; /* umem_handle is not valid */
  278. switch (umem_handle->dir) {
  279. case PCIDRIVER_DMA_TODEVICE:
  280. pci_dma_sync_sg_for_device( privdata->pdev, umem_entry->sg, umem_entry->nents, PCI_DMA_TODEVICE );
  281. break;
  282. case PCIDRIVER_DMA_FROMDEVICE:
  283. pci_dma_sync_sg_for_cpu( privdata->pdev, umem_entry->sg, umem_entry->nents, PCI_DMA_FROMDEVICE );
  284. break;
  285. case PCIDRIVER_DMA_BIDIRECTIONAL:
  286. pci_dma_sync_sg_for_device( privdata->pdev, umem_entry->sg, umem_entry->nents, PCI_DMA_BIDIRECTIONAL );
  287. pci_dma_sync_sg_for_cpu( privdata->pdev, umem_entry->sg, umem_entry->nents, PCI_DMA_BIDIRECTIONAL );
  288. break;
  289. default:
  290. return -EINVAL; /* wrong direction parameter */
  291. }
  292. return 0;
  293. }
  294. /*
  295. *
  296. * Get the pcidriver_umem_entry_t structure for the given id.
  297. *
  298. * @param id ID of the umem entry to search for
  299. *
  300. */
  301. pcidriver_umem_entry_t *pcidriver_umem_find_entry_id(pcidriver_privdata_t *privdata, int id)
  302. {
  303. struct list_head *ptr;
  304. pcidriver_umem_entry_t *entry;
  305. spin_lock(&(privdata->umemlist_lock));
  306. list_for_each(ptr, &(privdata->umem_list)) {
  307. entry = list_entry(ptr, pcidriver_umem_entry_t, list );
  308. if (entry->id == id) {
  309. spin_unlock( &(privdata->umemlist_lock) );
  310. return entry;
  311. }
  312. }
  313. spin_unlock(&(privdata->umemlist_lock));
  314. return NULL;
  315. }