New Defects reported by Coverity Scan for ceph

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Hi,


Please find the latest report on new defect(s) introduced to ceph found with Coverity Scan.

Defect(s) Reported-by: Coverity Scan
Showing 6 of 6 defect(s)


** CID 1019567:  Thread deadlock  (ORDER_REVERSAL)


** CID 1231681:  Thread deadlock  (ORDER_REVERSAL)


** CID 1231682:  Thread deadlock  (ORDER_REVERSAL)


** CID 1231683:  Thread deadlock  (ORDER_REVERSAL)


** CID 1231684:  Thread deadlock  (ORDER_REVERSAL)



** CID 1231685:  Use after free  (USE_AFTER_FREE)



________________________________________________________________________________________________________
*** CID 1019567:  Thread deadlock  (ORDER_REVERSAL)
/osd/OSD.cc: 3689 in OSD::handle_osd_ping(MOSDPing *)()
3683     		  << ", " << debug_heartbeat_drops_remaining[from]
3684     		  << " remaining to drop" << dendl;
3685     	  break;
3686     	}
3687           }
3688     
>>>     CID 1019567:  Thread deadlock  (ORDER_REVERSAL)
>>>     Calling "is_healthy" acquires lock "RWLock.L" while holding lock "Mutex._m" (count: 7 / 14).
3689           if (!cct->get_heartbeat_map()->is_healthy()) {
3690     	dout(10) << "internal heartbeat not healthy, dropping ping request" << dendl;
3691     	break;
3692           }
3693     
3694           Message *r = new MOSDPing(monc->get_fsid(),

________________________________________________________________________________________________________
*** CID 1231681:  Thread deadlock  (ORDER_REVERSAL)
/librados/RadosClient.cc: 111 in librados::RadosClient::lookup_pool(const char *)()
105       int r = wait_for_osdmap();
106       if (r < 0) {
107         lock.Unlock();
108         return r;
109       }
110       int64_t ret = osdmap.lookup_pg_pool_name(name);
>>>     CID 1231681:  Thread deadlock  (ORDER_REVERSAL)
>>>     Calling "get_write" acquires lock "RWLock.L" while holding lock "Mutex._m" (count: 7 / 14).
111       pool_cache_rwl.get_write();
112       lock.Unlock();
113       if (ret < 0) {
114         pool_cache_rwl.unlock();
115         return -ENOENT;
116       }

________________________________________________________________________________________________________
*** CID 1231682:  Thread deadlock  (ORDER_REVERSAL)
/osd/OSD.cc: 2369 in OSD::shutdown()()
2363       service.start_shutdown();
2364     
2365       clear_waiting_sessions();
2366     
2367       // Shutdown PGs
2368       {
>>>     CID 1231682:  Thread deadlock  (ORDER_REVERSAL)
>>>     Calling "RLocker" acquires lock "RWLock.L" while holding lock "Mutex._m" (count: 7 / 14).
2369         RWLock::RLocker l(pg_map_lock);
2370         for (ceph::unordered_map<spg_t, PG*>::iterator p = pg_map.begin();
2371             p != pg_map.end();
2372             ++p) {
2373           dout(20) << " kicking pg " << p->first << dendl;
2374           p->second->lock();

________________________________________________________________________________________________________
*** CID 1231683:  Thread deadlock  (ORDER_REVERSAL)
/client/Client.cc: 372 in Client::init()()
366       client_lock.Unlock();
367       objecter->init_unlocked();
368       client_lock.Lock();
369     
370       objecter->init_locked();
371     
>>>     CID 1231683:  Thread deadlock  (ORDER_REVERSAL)
>>>     Calling "set_want_keys" acquires lock "RWLock.L" while holding lock "Mutex._m" (count: 7 / 14).
372       monclient->set_want_keys(CEPH_ENTITY_TYPE_MDS | CEPH_ENTITY_TYPE_OSD);
373       monclient->sub_want("mdsmap", 0, 0);
374       monclient->sub_want("osdmap", 0, CEPH_SUBSCRIBE_ONETIME);
375       monclient->renew_subs();
376     
377       // logger

________________________________________________________________________________________________________
*** CID 1231684:  Thread deadlock  (ORDER_REVERSAL)
/osd/OSD.h: 2237 in OSD::RepScrubWQ::_process(MOSDRepScrub *, ThreadPool::TPHandle &)()
2231           ThreadPool::TPHandle &handle) {
2232           osd->osd_lock.Lock();
2233           if (osd->is_stopping()) {
2234     	osd->osd_lock.Unlock();
2235     	return;
2236           }
>>>     CID 1231684:  Thread deadlock  (ORDER_REVERSAL)
>>>     Calling "_have_pg" acquires lock "RWLock.L" while holding lock "Mutex._m" (count: 7 / 14).
2237           if (osd->_have_pg(msg->pgid)) {
2238     	PG *pg = osd->_lookup_lock_pg(msg->pgid);
2239     	osd->osd_lock.Unlock();
2240     	pg->replica_scrub(msg, handle);
2241     	msg->put();
2242     	pg->unlock();
/osd/OSD.h: 2238 in OSD::RepScrubWQ::_process(MOSDRepScrub *, ThreadPool::TPHandle &)()
2232           osd->osd_lock.Lock();
2233           if (osd->is_stopping()) {
2234     	osd->osd_lock.Unlock();
2235     	return;
2236           }
2237           if (osd->_have_pg(msg->pgid)) {
>>>     CID 1231684:  Thread deadlock  (ORDER_REVERSAL)
>>>     Calling "_lookup_lock_pg" acquires lock "RWLock.L" while holding lock "Mutex._m" (count: 7 / 14).
2238     	PG *pg = osd->_lookup_lock_pg(msg->pgid);
2239     	osd->osd_lock.Unlock();
2240     	pg->replica_scrub(msg, handle);
2241     	msg->put();
2242     	pg->unlock();
2243           } else {

________________________________________________________________________________________________________
*** CID 1231685:  Use after free  (USE_AFTER_FREE)
/osd/OSD.cc: 6223 in OSD::handle_osd_map(MOSDMap *)()
6217     
6218           if (o->test_flag(CEPH_OSDMAP_FULL))
6219     	last_marked_full = e;
6220           pinned_maps.push_back(add_map(o));
6221     
6222           bufferlist fbl;
>>>     CID 1231685:  Use after free  (USE_AFTER_FREE)
>>>     Calling "encode" dereferences freed pointer "o".
6223           o->encode(fbl);
6224     
6225           hobject_t fulloid = get_osdmap_pobject_name(e);
6226           t.write(coll_t::META_COLL, fulloid, 0, fbl.length(), fbl);
6227           pin_map_bl(e, fbl);
6228           continue;


________________________________________________________________________________________________________
To view the defects in Coverity Scan visit, http://scan.coverity.com/projects/25?tab=overview

To unsubscribe from the email notification for new defects, http://scan5.coverity.com/cgi-bin/unsubscribe.py



--
To unsubscribe from this list: send the line "unsubscribe ceph-devel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html



[Index of Archives]     [CEPH Users]     [Ceph Large]     [Information on CEPH]     [Linux BTRFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]
  Powered by Linux