Re: [PATCH 1/1] NFSD: fix WARN_ON_ONCE in __queue_delayed_work

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Wed, 2023-01-11 at 13:15 +0100, Mike Galbraith wrote:
> On Wed, 2023-01-11 at 12:19 +0100, Mike Galbraith wrote:
> > On Wed, 2023-01-11 at 05:55 -0500, Jeff Layton wrote:
> > > > 
> > > > 
> > > > 
> > > > It might be interesting to turn up KASAN if you're able.
> > 
> > I can try that.
> 
> KASAN did not squeak.
> 
> > > If you still have this vmcore, it might be interesting to do the pointer
> > > math and find the nfsd_net structure that contains the above
> > > delayed_work. Does the rest of it also seem to be corrupt?
> 
> Virgin source with workqueue.c WARN_ON_ONCE() landmine.
> 

Thanks. Mixed bag here...


> crash> nfsd_net -x 0xFFFF8881114E9800
> struct nfsd_net {
>   cld_net = 0x0,
>   svc_expkey_cache = 0xffff8881420f8a00,
>   svc_export_cache = 0xffff8881420f8800,
>   idtoname_cache = 0xffff8881420f9a00,
>   nametoid_cache = 0xffff8881420f9c00,
>   nfsd4_manager = {
>     list = {
>       next = 0x0,
>       prev = 0x0
>     },
>     block_opens = 0x0
>   },
>   grace_ended = 0x0,


>   boot_time = 0x0,
>   nfsd_client_dir = 0x0,
>   reclaim_str_hashtbl = 0x0,
>   reclaim_str_hashtbl_size = 0x0,
>   conf_id_hashtbl = 0x0,
>   conf_name_tree = {
>     rb_node = 0x0
>   },
>   unconf_id_hashtbl = 0x0,
>   unconf_name_tree = {
>     rb_node = 0x0
>   },
>   sessionid_hashtbl = 0x0,
>   client_lru = {
>     next = 0x0,
>     prev = 0x0
>   },
>   close_lru = {
>     next = 0x0,
>     prev = 0x0
>   },
>   del_recall_lru = {
>     next = 0x0,
>     prev = 0x0
>   },
>   blocked_locks_lru = {
>     next = 0x0,
>     prev = 0x0
>   },

All of the above list_heads are zeroed out and they shouldn't be.

>   laundromat_work = {
>     work = {
>       data = {
>         counter = 0x0
>       },
>       entry = {
>         next = 0x0,
>         prev = 0x0
>       },
>       func = 0x0
>     },
>     timer = {
>       entry = {
>         next = 0x0,
>         pprev = 0x0
>       },
>       expires = 0x0,
>       function = 0x0,
>       flags = 0x0
>     },
>     wq = 0x0,
>     cpu = 0x0
>   },
>   client_lock = {
>     {
>       rlock = {
>         raw_lock = {
>           {
>             val = {
>               counter = 0x0
>             },
>             {
>               locked = 0x0,
>               pending = 0x0
>             },
>             {
>               locked_pending = 0x0,
>               tail = 0x0
>             }
>           }
>         }
>       }
>     }
>   },
>   blocked_locks_lock = {
>     {
>       rlock = {
>         raw_lock = {
>           {
>             val = {
>               counter = 0x0
>             },
>             {
>               locked = 0x0,
>               pending = 0x0
>             },
>             {
>               locked_pending = 0x0,
>               tail = 0x0
>             }
>           }
>         }
>       }
>     }
>   },
>   rec_file = 0x0,
>   in_grace = 0x0,
>   client_tracking_ops = 0x0,
>   nfsd4_lease = 0x5a,
>   nfsd4_grace = 0x5a,

The grace and lease times look ok, oddly enough.

>   somebody_reclaimed = 0x0,
>   track_reclaim_completes = 0x0,
>   nr_reclaim_complete = {
>     counter = 0x0
>   },
>   nfsd_net_up = 0x0,

nfsd_net_up is false, which means that this server isn't running (or
that the memory here was scribbled over).

>   lockd_up = 0x0,
>   writeverf_lock = {
>     seqcount = {
>       seqcount = {
>         sequence = 0x0
>       }
>     },
>     lock = {
>       {
>         rlock = {
>           raw_lock = {
>             {
>               val = {
>                 counter = 0x0
>               },
>               {
>                 locked = 0x0,
>                 pending = 0x0
>               },
>               {
>                 locked_pending = 0x0,
>                 tail = 0x0
>               }
>             }
>           }
>         }
>       }
>     }
>   },
>   writeverf = "\000\000\000\000\000\000\000",
>   max_connections = 0x0,
>   clientid_base = 0x37b4ca7b,
>   clientid_counter = 0x37b4ca7d,
>   clverifier_counter = 0xa8ee910d,
>   nfsd_serv = 0x0,
>   keep_active = 0x0,
>   s2s_cp_cl_id = 0x37b4ca7c,
>   s2s_cp_stateids = {
>     idr_rt = {
>       xa_lock = {
>         {
>           rlock = {
>             raw_lock = {
>               {
>                 val = {
>                   counter = 0x0
>                 },
>                 {
>                   locked = 0x0,
>                   pending = 0x0
>                 },
>                 {
>                   locked_pending = 0x0,
>                   tail = 0x0
>                 }
>               }
>             }
>           }
>         }
>       },
>       xa_flags = 0x0,
>       xa_head = 0x0
>     },
>     idr_base = 0x0,
>     idr_next = 0x0
>   },
>   s2s_cp_lock = {
>     {
>       rlock = {
>         raw_lock = {
>           {
>             val = {
>               counter = 0x0
>             },
>             {
>               locked = 0x0,
>               pending = 0x0
>             },
>             {
>               locked_pending = 0x0,
>               tail = 0x0
>             }
>           }
>         }
>       }
>     }
>   },
>   nfsd_versions = 0x0,
>   nfsd4_minorversions = 0x0,
>   drc_hashtbl = 0xffff88810a2f0000,
>   max_drc_entries = 0x14740,
>   maskbits = 0xb,
>   drc_hashsize = 0x800,
>   num_drc_entries = {
>     counter = 0x0
>   },
>   counter = {{
>       lock = {
>         raw_lock = {
>           {
>             val = {
>               counter = 0x0
>             },
>             {
>               locked = 0x0,
>               pending = 0x0
>             },
>             {
>               locked_pending = 0x0,
>               tail = 0x0
>             }
>           }
>         }
>       },
>       count = 0x0,
>       list = {
>         next = 0xffff888103f98dd0,
>         prev = 0xffff8881114e9a18
>       },
>       counters = 0x607dc8402e10
>     }, {
>       lock = {
>         raw_lock = {
>           {
>             val = {
>               counter = 0x0
>             },
>             {
>               locked = 0x0,
>               pending = 0x0
>             },
>             {
>               locked_pending = 0x0,
>               tail = 0x0
>             }
>           }
>         }
>       },
>       count = 0x0,
>       list = {
>         next = 0xffff8881114e99f0,
>         prev = 0xffff88810b5743e0
>       },
>       counters = 0x607dc8402e14
>     }},
>   longest_chain = 0x0,
>   longest_chain_cachesize = 0x0,
>   nfsd_reply_cache_shrinker = {
>     count_objects = 0xffffffffa0e4e9b0 <nfsd_reply_cache_count>,
>     scan_objects = 0xffffffffa0e4f020 <nfsd_reply_cache_scan>,

Shrinker pointers look ok, as does its list_head.

>     batch = 0x0,
>     seeks = 0x1,
>     flags = 0x1,
>     list = {
>       next = 0xffff888111daf420,
>       prev = 0xffff8881114e9b30
>     },
>     nr_deferred = 0xffff88813a544a00
>   },
>   nfsd_ssc_lock = {
>     {
>       rlock = {
>         raw_lock = {
>           {
>             val = {
>               counter = 0x0
>             },
>             {
>               locked = 0x0,
>               pending = 0x0
>             },
>             {
>               locked_pending = 0x0,
>               tail = 0x0
>             }
>           }
>         }
>       }
>     }
>   },
>   nfsd_ssc_mount_list = {
>     next = 0x0,
>     prev = 0x0
>   },
>   nfsd_ssc_waitq = {
>     lock = {
>       {
>         rlock = {
>           raw_lock = {
>             {
>               val = {
>                 counter = 0x0
>               },
>               {
>                 locked = 0x0,
>                 pending = 0x0
>               },
>               {
>                 locked_pending = 0x0,
>                 tail = 0x0
>               }
>             }
>           }
>         }
>       }
>     },
>     head = {
>       next = 0x0,
>       prev = 0x0
>     }
>   },
>   nfsd_name = "\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000",
> 
> 

nfsd_name is usually set to utsname, so that looks bogus.

>   fcache_disposal = 0x0,
>   siphash_key = {
>     key = {0x2a5ba10a35b36754, 0xd6b3a5a0e7696876}
>   },
>   nfs4_client_count = {
>     counter = 0x0
>   },
>   nfs4_max_clients = 0x1800,
>   nfsd_courtesy_clients = {
>     counter = 0x0
>   },
>   nfsd_client_shrinker = {
>     count_objects = 0xffffffffa0e742c0 <nfsd4_state_shrinker_count>,
>     scan_objects = 0xffffffffa0e73a90 <nfsd4_state_shrinker_scan>,
>     batch = 0x0,
>     seeks = 0x2,
>     flags = 0x1,
>     list = {
>       next = 0xffff8881114e9a58,
>       prev = 0xffffffffa131b280 <mmu_shrinker+32>
>     },
>     nr_deferred = 0xffff88813a5449d8
>   },
>   nfsd_shrinker_work = {
>     work = {
>       data = {
>         counter = 0x1
>       },
>       entry = {
>         next = 0x0,
>         prev = 0x0
>       },
>       func = 0x0
>     },
>     timer = {
>       entry = {
>         next = 0x0,
>         pprev = 0x0
>       },
>       expires = 0x0,
>       function = 0x0,
>       flags = 0x0
>     },
>     wq = 0x0,
>     cpu = 0x0
>   }
> }
> crash> kmem -s 0xFFFF8881114E9800
> CACHE             OBJSIZE  ALLOCATED     TOTAL  SLABS  SSIZE  NAME
> ffff888100042dc0     1024      18325     18352   1147    32k  kmalloc-1k
>   SLAB              MEMORY            NODE  TOTAL  ALLOCATED  FREE
>   ffffea0004453a00  ffff8881114e8000     0     16         16     0
>   FREE / [ALLOCATED]
>   [ffff8881114e9800]
> crash>
> 

Bit of a mixed bag here. A lot of these fields are corrupt, but not all
of them.

One thing that might interesting to rule out a UAF would be to
explicitly poison this struct in nfsd_exit_net. Basically do something
like this at the end of exit_net:

	memset(net, 0x7c, sizeof(*net));

That might help trigger an oops sooner after the problem occurs.

If you're feeling ambitious, another thing you could do is track down
some of the running nfsd's in the vmcore, find their rqstp values and
see whether the sockets are pointed at the same nfsd_net as the one you
found above (see nfsd() function to see how to get from one to the
other).

If they're pointed at a different nfsd_net that that would suggest that
we are looking at a UAF. If it's the same nfsd_net, then I'd lean more
toward some sort of memory scribble.
-- 
Jeff Layton <jlayton@xxxxxxxxxx>




[Index of Archives]     [Linux Filesystem Development]     [Linux USB Development]     [Linux Media Development]     [Video for Linux]     [Linux NILFS]     [Linux Audio Users]     [Yosemite Info]     [Linux SCSI]

  Powered by Linux