commit-hurd
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[gnumach] 05/06: patches/{71, 72}_dde.patch: DDE fixes from Richard Brau


From: Samuel Thibault
Subject: [gnumach] 05/06: patches/{71, 72}_dde.patch: DDE fixes from Richard Braun
Date: Tue, 15 Nov 2016 01:01:20 +0000

This is an automated email from the git hooks/post-receive script.

sthibault pushed a commit to branch master
in repository gnumach.

commit 9fde2511af8357a74013ab753ad8166468575c96
Author: Samuel Thibault <address@hidden>
Date:   Tue Nov 15 00:30:30 2016 +0000

    patches/{71,72}_dde.patch: DDE fixes from Richard Braun
---
 debian/changelog            |   1 +
 debian/patches/71_dde.patch | 157 ++++++++++++++++++++++++++++++++++++++++++++
 debian/patches/72_dde.patch |  18 +++++
 debian/patches/series       |   2 +
 4 files changed, 178 insertions(+)

diff --git a/debian/changelog b/debian/changelog
index 7546247..9a86fa9 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -4,6 +4,7 @@ gnumach (2:1.7+git20161115-1) UNRELEASED; urgency=medium
   * copyright: Add missing licence terms. Closes: Bug#792622.
   * patches/{git-LDFLAGS,git-unregister-boot-data.patch}: Drop, merged
     upstream.
+  * patches/{71,72}_dde.patch: DDE fixes from Richard Braun.
 
  -- Samuel Thibault <address@hidden>  Tue, 15 Nov 2016 00:23:19 +0000
 
diff --git a/debian/patches/71_dde.patch b/debian/patches/71_dde.patch
new file mode 100644
index 0000000..10ae036
--- /dev/null
+++ b/debian/patches/71_dde.patch
@@ -0,0 +1,157 @@
+commit d3b5f56297204bf8b9b7df94ddf72fe81cb4786d
+Author: Richard Braun <address@hidden>
+Date:   Tue Nov 8 17:28:14 2016 +0100
+
+    Make experimental_vm_allocate_contiguous cleaner
+
+diff --git a/vm/vm_user.c b/vm/vm_user.c
+index 1f68297..597d7a3 100644
+--- a/vm/vm_user.c
++++ b/vm/vm_user.c
+@@ -457,15 +457,14 @@ kern_return_t 
experimental_vm_allocate_contiguous(host_priv, map, result_vaddr,
+       vm_address_t            *result_paddr;
+       vm_size_t               size;
+ {
++      vm_size_t               alloc_size;
+       unsigned int            npages;
+       unsigned int            i;
+       unsigned int            order;
+       vm_page_t               pages;
+       vm_object_t             object;
+-      vm_map_entry_t          entry;
+       kern_return_t           kr;
+       vm_address_t            vaddr;
+-      vm_offset_t             offset = 0;
+ 
+       if (host_priv == HOST_NULL)
+               return KERN_INVALID_HOST;
+@@ -473,82 +472,72 @@ kern_return_t 
experimental_vm_allocate_contiguous(host_priv, map, result_vaddr,
+       if (map == VM_MAP_NULL)
+               return KERN_INVALID_TASK;
+ 
++      size = vm_page_round(size);
++
++      if (size == 0)
++              return KERN_INVALID_ARGUMENT;
++
++      object = vm_object_allocate(size);
++
++      if (object == NULL)
++              return KERN_RESOURCE_SHORTAGE;
++
+       /*
+        * XXX The page allocator returns blocks with a power-of-two size.
+-       * The requested size may not be a power-of-two, causing the pages
+-       * at the end of a block to be unused. In order to keep track of
+-       * those pages, they must all be inserted in the VM object created
+-       * by this function.
++       * The requested size may not be a power-of-two, requiring some
++       * work to release back the pages that aren't needed.
+        */
+       order = vm_page_order(size);
+-      size = (1 << (order + PAGE_SHIFT));
++      alloc_size = (1 << (order + PAGE_SHIFT));
++      npages = vm_page_atop(alloc_size);
+ 
+-      /* We allocate the contiguous physical pages for the buffer. */
++      pages = vm_page_grab_contig(alloc_size, VM_PAGE_SEL_DIRECTMAP);
+ 
+-      npages = size / PAGE_SIZE;
+-      pages = vm_page_grab_contig(size, VM_PAGE_SEL_DIRECTMAP);
+-      if (pages == NULL)
+-      {
++      if (pages == NULL) {
++              vm_object_deallocate(object);
+               return KERN_RESOURCE_SHORTAGE;
+       }
+-      
+-#if 0
+-      kr = vm_page_grab_contig(npages, pages, NULL, TRUE);
+-      if (kr)
+-      {
+-              kfree (pages, npages * sizeof (vm_page_t));
+-              return kr;
++
++      vm_object_lock(object);
++      vm_page_lock_queues();
++
++      for (i = 0; i < vm_page_atop(size); i++) {
++              /*
++               * XXX We can safely handle contiguous pages as an array,
++               * but this relies on knowing the implementation of the
++               * page allocator.
++               */
++              pages[i].busy = FALSE;
++              vm_page_insert(&pages[i], object, vm_page_ptoa(i));
+       }
+-#endif
+ 
+-      /* Allocate the object 
+-       * and find the virtual address for the DMA buffer */
++      vm_page_unlock_queues();
++      vm_object_unlock(object);
+ 
+-      object = vm_object_allocate(size);
+-      vm_map_lock(map);
+-      /* TODO user_wired_count might need to be set as 1 */
+-      kr = vm_map_find_entry(map, &vaddr, size, (vm_offset_t) 0,
+-                             VM_OBJECT_NULL, &entry);
+-      if (kr != KERN_SUCCESS) 
+-      {
+-              vm_map_unlock(map);
++      for (i = vm_page_atop(size); i < npages; i++) {
++              vm_page_release(&pages[i], FALSE, FALSE);
++      }
++
++      vaddr = 0;
++      kr = vm_map_enter(map, &vaddr, size, 0, TRUE, object, 0, FALSE,
++                        VM_PROT_READ | VM_PROT_WRITE,
++                        VM_PROT_READ | VM_PROT_WRITE, VM_INHERIT_DEFAULT);
++
++      if (kr != KERN_SUCCESS) {
+               vm_object_deallocate(object);
+-              vm_page_free_contig(pages, size);
+               return kr;
+       }
+-              
+-      entry->object.vm_object = object;
+-      entry->offset = 0;
+ 
+-      /* We can unlock map now.  */
+-      vm_map_unlock(map);
++      kr = vm_map_pageable(map, vaddr, vaddr + size,
++                           VM_PROT_READ | VM_PROT_WRITE);
+ 
+-      /* We have physical pages we need and now we need to do the mapping. */
+-
+-      pmap_pageable (map->pmap, vaddr, vaddr + size, FALSE);
++      if (kr != KERN_SUCCESS) {
++              vm_map_remove(map, vaddr, vaddr + size);
++              return kr;
++      }
+ 
+       *result_vaddr = vaddr;
+       *result_paddr = pages->phys_addr;
+ 
+-      for (i = 0; i < npages; i++)
+-      {
+-              vm_object_lock(object);
+-              vm_page_lock_queues();
+-              vm_page_insert(&pages[i], object, offset);
+-              vm_page_wire(&pages[i]);
+-              vm_page_unlock_queues();
+-              vm_object_unlock(object);
+-
+-              /* Enter it in the kernel pmap */
+-              PMAP_ENTER(map->pmap, vaddr, &pages[i], VM_PROT_DEFAULT, TRUE);
+-
+-              vm_object_lock(object);
+-              PAGE_WAKEUP_DONE(&pages[i]);
+-              vm_object_unlock(object);
+-
+-              vaddr += PAGE_SIZE;
+-              offset += PAGE_SIZE;
+-      }
+-
+       return KERN_SUCCESS;
+ }
diff --git a/debian/patches/72_dde.patch b/debian/patches/72_dde.patch
new file mode 100644
index 0000000..e1553b6
--- /dev/null
+++ b/debian/patches/72_dde.patch
@@ -0,0 +1,18 @@
+commit 89d914869bb0802d6148fdb4c2bac83ec1fe15c0
+Author: Richard Braun <address@hidden>
+Date:   Tue Nov 8 17:36:27 2016 +0100
+
+    Fix experimental_vm_allocate_contiguous to wire down pages early
+
+diff --git a/vm/vm_user.c b/vm/vm_user.c
+index 597d7a3..403c7ee 100644
+--- a/vm/vm_user.c
++++ b/vm/vm_user.c
+@@ -509,6 +509,7 @@ kern_return_t 
experimental_vm_allocate_contiguous(host_priv, map, result_vaddr,
+                */
+               pages[i].busy = FALSE;
+               vm_page_insert(&pages[i], object, vm_page_ptoa(i));
++              vm_page_wire(&pages[i]);
+       }
+ 
+       vm_page_unlock_queues();
diff --git a/debian/patches/series b/debian/patches/series
index 418767b..ae9523c 100644
--- a/debian/patches/series
+++ b/debian/patches/series
@@ -3,3 +3,5 @@
 20_FP_NO.patch
 50_initrd.patch
 70_dde.patch
+71_dde.patch
+72_dde.patch

-- 
Alioth's /usr/local/bin/git-commit-notice on 
/srv/git.debian.org/git/pkg-hurd/gnumach.git



reply via email to

[Prev in Thread] Current Thread [Next in Thread]