memory/virtual.c: ROUND_SIZE() bugfix + .SYS loading

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Hi,

I had incorrect warnings "Section %.8s too large (%lx+%lx/%lx)" during loading
of some low-level system libraries. It got fixed by the attached
"wine-memory-virtual.c-too_large_section.diff" which changes (I think fixes)
ROUND_SIZE() behaviour. It also comments ROUND_ADDR() and ROUND_SIZE(), I hope
I guessed its intended description from the style of its usage around the code.

Attached "wine-memory-virtual.c-mmap_cache-preliminary.diff" is just a RFC as
I found out that I was unable to load some .SYS files as they were being
attempted to be loaded with page-overlapping sections, unaligned offsets and
incompatible protections. Therefore I had to implement my own mapping cache and
handle the overlapping/merging myself. It would be more simple/clean to use
Linux /proc/$$/maps mmap map but it would be probably no-no as it is
Linux-dependent.

The cache is not yet fully finished - it is missing unregistering of unmmapped
pages, re-registering of re-protect()-ed pages and it should get probably
unified with 'FILE_VIEW'. But I didn't want to bother with it more if it gets
rejected anyway. These two attached patches were mandatory to be able to
successfuly load relocated .SYS files (although they don't yet run, of course).


Commit requested if approved as I have no CVS commit access.

I am missing some general utility library such as GLib, Qt, NSPR or APR to
provide general datastructures such as hashes etc. Maybe it is provided by W32?
Therefore /* FIXME: Some AVL tree, currently unsorted! */.


Jan Kratochvil
Index: memory/virtual.c
===================================================================
RCS file: /home/wine/wine/memory/virtual.c,v
retrieving revision 1.84
diff -u -r1.84 virtual.c
--- memory/virtual.c	17 Aug 2002 00:43:19 -0000	1.84
+++ memory/virtual.c	15 Sep 2002 22:27:49 -0000
@@ -114,11 +114,15 @@
 
 #define ADDRESS_SPACE_LIMIT  ((void *)0xc0000000)  /* top of the user address space */
 
+/* Use ROUND_ADDR(<some_address>, page_mask) for base address align to lower */
 #define ROUND_ADDR(addr,mask) \
    ((void *)((UINT_PTR)(addr) & ~(mask)))
 
+/* Use with both params unaligned, returns size aligned to higher but adjusted
+ * to match the aligned upper end bounder after its addition to unaligned 'addr'.
+ */
 #define ROUND_SIZE(addr,size) \
-   (((UINT)(size) + ((UINT_PTR)(addr) & page_mask) + page_mask) & ~page_mask)
+   ((((UINT_PTR)(addr) + (size) + page_mask) & ~page_mask) - (UINT_PTR)(addr))
 
 #define VIRTUAL_DEBUG_DUMP_VIEW(view) \
    if (!TRACE_ON(virtual)); else VIRTUAL_DumpView(view)
@@ -648,8 +652,8 @@
         size = sec->VirtualAddress + ROUND_SIZE( sec->VirtualAddress, sec->Misc.VirtualSize );
         if (sec->VirtualAddress > total_size || size > total_size || size < sec->VirtualAddress)
         {
-            ERR_(module)( "Section %.8s too large (%lx+%lx/%lx)\n",
-                          sec->Name, sec->VirtualAddress, sec->Misc.VirtualSize, total_size );
+            ERR_(module)( "Section %.8s too large (%lx+%lx=%lx/%lx)\n",
+                          sec->Name, sec->VirtualAddress, sec->Misc.VirtualSize, size, total_size );
             goto error;
         }
 
Index: memory/virtual.c
===================================================================
RCS file: /home/wine/wine/memory/virtual.c,v
retrieving revision 1.84
diff -u -r1.84 virtual.c
--- memory/virtual.c	17 Aug 2002 00:43:19 -0000	1.84
+++ memory/virtual.c	15 Sep 2002 22:27:49 -0000
@@ -865,9 +869,11 @@
  * these reasons we do a direct system call here.
  */
 static void *unaligned_mmap( void *addr, size_t length, unsigned int prot,
-                             unsigned int flags, int fd, unsigned int offset_low,
-                             unsigned int offset_high )
+                             unsigned int flags, int fd, off_t offset )
 {
+    unsigned int offset_low  = offset;
+    unsigned int offset_high = offset >> 32U;
+
 #if defined(linux) && defined(__i386__) && defined(__GNUC__)
     if (!offset_high && (offset_low & page_mask))
     {
@@ -910,6 +916,219 @@
 
 
 /***********************************************************************
+ *           unaligned_mmap_cached
+ *
+ * We may need to map to one memory page several times. The second and
+ * further mapping is already done by the first one but the mmap()
+ * kernel call would fail. We must check the same file offset and
+ * protection flags, of course.
+ */
+
+/* FIXME: Some AVL tree, currently unsorted! */
+struct unaligned_mmap_cache_elem
+{
+    void *addr;
+    size_t length;
+    unsigned int prot;
+    unsigned int flags;
+    dev_t fd_st_dev;
+    dev_t fd_st_ino;
+    time_t fd_st_mtime;
+    off_t offset;
+};
+static struct unaligned_mmap_cache_elem *unaligned_mmap_cache;
+static unsigned unaligned_mmap_cache_used = 0, unaligned_mmap_cache_alloc = 0;
+static CRITICAL_SECTION unaligned_mmap_cache_cs
+       = CRITICAL_SECTION_INIT("unaligned_mmap_cache_cs");
+
+static void unaligned_mmap_cache_add( void *addr, size_t length,
+                                      unsigned int prot, unsigned int flags,
+                                      const struct stat *fd_stat, off_t offset )
+{
+    struct unaligned_mmap_cache_elem *elem;
+
+    EnterCriticalSection(&unaligned_mmap_cache_cs);
+    if (unaligned_mmap_cache_used == unaligned_mmap_cache_alloc)
+    {
+        unsigned alloc_new = max(0x100, unaligned_mmap_cache_alloc*2);
+        struct unaligned_mmap_cache_elem *cache_new =
+                realloc(unaligned_mmap_cache, alloc_new*sizeof(*unaligned_mmap_cache));
+
+        if (!cache_new)
+        {
+            LeaveCriticalSection(&unaligned_mmap_cache_cs);
+            return;
+        }
+        unaligned_mmap_cache = cache_new;
+        unaligned_mmap_cache_alloc = alloc_new;
+    }
+    assert(unaligned_mmap_cache_used < unaligned_mmap_cache_alloc);
+    /* FIXME: area coalescence */
+    elem = unaligned_mmap_cache + (unaligned_mmap_cache_used++);
+    LeaveCriticalSection(&unaligned_mmap_cache_cs);
+    elem->addr   = addr;
+    elem->length = length;
+    elem->prot   = prot;
+    elem->flags  = flags;
+    elem->fd_st_dev   = fd_stat->st_dev;
+    elem->fd_st_ino   = fd_stat->st_ino;
+    elem->fd_st_mtime = fd_stat->st_mtime;
+    elem->offset = offset;
+    TRACE("stored: addr=%p, length=0x%lx, prot=0x%x, flags=0x%x, offset=0x%lx\n",
+            addr, (unsigned long)length, prot, flags, (unsigned long)offset);
+}
+
+static void *unaligned_mmap_cached( void *addr, size_t length, unsigned int prot,
+                                    unsigned int flags, int fd, unsigned int offset_low,
+                                    unsigned int offset_high )
+{
+    void *addr_orig = addr;
+    UINT shift_down, shift_up;
+    off_t offset = ((off_t)offset_high << 32) | offset_low;
+    struct stat fd_stat;
+
+    if (fstat(fd, &fd_stat))
+    {
+        WARN("fstat() failed, mmap failure may occur: %s\n", strerror(errno));
+        goto failedpass;
+    }
+
+    if (addr)
+    {
+        shift_down = (UINT_PTR)addr & page_mask;
+        if (addr < (void *)shift_down || offset < shift_down)
+        {
+            WARN("Unalignable mmap request: addr=%p, file offset=0x%lx, mmap failure may occur\n",
+                    addr, (unsigned long)offset);
+            goto failedpass;
+        }
+        addr   -= shift_down;
+        length += shift_down;
+        offset -= shift_down;
+    }
+    
+    shift_up   = page_size - ((UINT_PTR)((!addr ? 0 : addr) + length - 1) & page_mask) - 1;
+    length += shift_up;
+
+    if (addr)
+        assert(ROUND_ADDR(addr  , page_mask) == addr  );
+    assert(length>=0);
+    assert(ROUND_SIZE(addr, length) == length);
+    
+    if (!length)
+        return addr_orig;
+
+    while (length)
+    {
+        /* found cache element containing current point ('addr') */
+        struct unaligned_mmap_cache_elem *found = NULL;
+        /* nearest upper cache->addr to limit current map when memory
+         * map is free at the current point ('addr')
+         */
+        void *addr_near = NULL;
+        size_t chunk_length;
+        struct unaligned_mmap_cache_elem *cachep;
+
+        EnterCriticalSection(&unaligned_mmap_cache_cs);
+        if (addr)
+            for (cachep = unaligned_mmap_cache;
+                 cachep < unaligned_mmap_cache + unaligned_mmap_cache_used;
+                 cachep++)
+            {
+                if (addr >= cachep->addr && addr < cachep->addr+cachep->length)
+                {
+                    found = cachep;
+                    if (!addr_near || found->addr + found->length < addr_near)
+                        addr_near = found->addr + found->length;
+                }
+                if (addr < cachep->addr && (!addr_near || addr_near > cachep->addr))
+                    addr_near = cachep->addr;
+            }
+        chunk_length = !addr_near || !addr ? length : min(length, addr_near - addr);
+        assert(ROUND_SIZE(addr, chunk_length) == chunk_length);
+        if (found)
+        {
+#           define CHECK_ATTR(reqd,mapped,name) do { \
+                        if ((reqd) != (mapped)) \
+                        { \
+                            LeaveCriticalSection(&unaligned_mmap_cache_cs); \
+                            WARN("mapping at %p-%p reqd " name "=0x%lx, mapped " name "=0x%lx: reuse refused\n", \
+                                    addr, addr+chunk_length, (unsigned long)(reqd), (unsigned long)(mapped)); \
+                            goto failedpass; \
+                        } \
+                    } while (0)
+            CHECK_ATTR(flags, found->flags, "flags");
+            CHECK_ATTR(fd_stat.st_dev  , found->fd_st_dev  , "file st_dev"  );
+            CHECK_ATTR(fd_stat.st_ino  , found->fd_st_ino  , "file st_ino"  );
+            CHECK_ATTR(fd_stat.st_mtime, found->fd_st_mtime, "file st_mtime");
+            CHECK_ATTR(offset, found->offset + (addr - found->addr), "offset");
+#           undef CHECK_ATTR
+            if (prot != found->prot) {
+                if (addr > found->addr)
+                {
+                    unaligned_mmap_cache_add(found->addr, (addr - found->addr) /* length */,
+                                             found->prot,
+                                             flags,    /* ==found->flags */
+                                             &fd_stat, /* ==found->fd_*  */
+                                             found->offset);
+                    found->length -= addr - found->addr;
+                    found->offset += addr - found->addr;
+                    found->addr = addr;
+                }
+                assert(addr   == found->addr  );
+                assert(offset == found->offset);
+                if (chunk_length < found->length)
+                {
+                    unaligned_mmap_cache_add(addr + chunk_length, (found->length - chunk_length),
+                                             found->prot,
+                                             flags,    /* ==found->flags  */
+                                             &fd_stat, /* ==found->fd_*   */
+                                             offset    /* ==found->offset */ + chunk_length);
+                    found->length = chunk_length;
+                }
+                assert(chunk_length == found->length);
+                WARN("mapping at %p-%p reqd prot=0x%x, mapped prot=0x%x: extending protections to 0x%x!\n",
+                        addr, addr+chunk_length, prot, found->prot, prot|found->prot);
+                prot = found->prot = prot|found->prot;
+                if (mprotect(addr, chunk_length, prot))
+                {
+                    LeaveCriticalSection(&unaligned_mmap_cache_cs);
+                    goto failedpass;
+                }
+            }
+            TRACE("cache used: addr=%p, chunk_length=0x%lx, prot=0x%x, flags=0x%x, offset=0x%lx\n",
+                    addr, (unsigned long)chunk_length, prot, flags, (unsigned long)offset);
+        }
+        else
+        {
+            void *addr_got = unaligned_mmap(addr, chunk_length, prot, flags, fd, offset);
+            if (!addr_got || (addr && addr != addr_got))
+            {
+                LeaveCriticalSection(&unaligned_mmap_cache_cs);
+                goto failedpass;
+            }
+            addr = addr_got;    /* if !addr */
+        }
+        if (!addr_orig)
+            addr_orig = addr;
+        if (!found)
+            unaligned_mmap_cache_add(addr, chunk_length, prot, flags, &fd_stat, offset);
+        LeaveCriticalSection(&unaligned_mmap_cache_cs);
+        addr   += chunk_length;
+        length -= chunk_length;
+        offset += chunk_length;
+        assert(length>=0);
+    }
+    return addr_orig;
+
+failedpass:
+    TRACE("cache failed: addr=%p, length=0x%lx, prot=0x%x, flags=0x%x, offset=0x%lx\n",
+            addr, (unsigned long)length, prot, flags, (unsigned long)offset);
+    return unaligned_mmap(addr, length, prot, flags, fd, offset);
+}
+
+
+/***********************************************************************
  *           VIRTUAL_mmap
  *
  * Wrapper for mmap() that handles anonymous mappings portably,
@@ -943,7 +1162,7 @@
         *removable = FALSE;
     }
 
-    if ((ret = unaligned_mmap( start, size, prot, flags, fd,
+    if ((ret = unaligned_mmap_cached( start, size, prot, flags, fd,
                                offset_low, offset_high )) != (LPVOID)-1) return ret;
 
     /* mmap() failed; if this is because the file offset is not    */

[Index of Archives]     [Gimp for Windows]     [Red Hat]     [Samba]     [Yosemite Camping]     [Graphics Cards]     [Wine Home]

  Powered by Linux