The attached patch hooks up the new preferences options to enable/disable stats polling. When polling is disabled, performance graphs in the VM Details->Overview tab are labeled as 'Disabled'. The graphs continue to update, but all the data passed to it is 0: http://fedorapeople.org/~crobinso/virt-manager/vmm-stats-disabled.png Also, if the associated stats column is visible in the manager window when the user disables polling, the manager column disappears. The View-> entry for that stats type is also disabled, with a tooltip informing the user that polling is disabled in the preferences dialog. Thanks, Cole
# HG changeset patch # User "Cole Robinson <crobinso@xxxxxxxxxx>" # Date 1228749285 18000 # Node ID 31d00d8e89d2f073c727d4952b04650eb7c54d0c # Parent c7cd0d65d7a67da18e6b7b44e5bbee98710569ce Hook up new global stats enable/disable work. diff -r c7cd0d65d7a6 -r 31d00d8e89d2 src/virtManager/details.py --- a/src/virtManager/details.py Mon Dec 08 10:11:25 2008 -0500 +++ b/src/virtManager/details.py Mon Dec 08 10:14:45 2008 -0500 @@ -826,29 +826,40 @@ def _rx_tx_text(rx, tx, unit): return '<span color="#82003B">%(rx)d %(unit)s in</span>\n<span color="#295C45">%(tx)d %(unit)s out</span>' % locals() - self.window.get_widget("overview-cpu-usage-text").set_text("%d %%" % self.vm.cpu_time_percentage()) - vm_memory = self.vm.current_memory() - host_memory = self.vm.get_connection().host_memory_size() - self.window.get_widget("overview-memory-usage-text").set_text("%d MB of %d MB" % \ - (int(round(vm_memory/1024.0)), \ - int(round(host_memory/1024.0)))) + cpu_txt = _("Disabled") + mem_txt = _("Disabled") + dsk_txt = _("Disabled") + net_txt = _("Disabled") - self.cpu_usage_graph.set_property("data_array", self.vm.cpu_time_vector()) - self.memory_usage_graph.set_property("data_array", self.vm.current_memory_vector()) + if self.config.get_stats_enable_cpu_poll(): + cpu_txt = "%d %%" % self.vm.cpu_time_percentage() - if self.config.is_vmlist_network_traffic_visible(): - text = _rx_tx_text(self.vm.network_rx_rate(), self.vm.network_tx_rate(), "KBytes/s") - self.network_traffic_graph.set_property("data_array", self.vm.network_traffic_vector()) - else: - text = "sampling\ndisabled" - self.window.get_widget("overview-network-traffic-text").set_markup(text) + if self.config.get_stats_enable_mem_poll(): + vm_memory = self.vm.current_memory() + host_memory = self.vm.get_connection().host_memory_size() + mem_txt = "%d MB of %d MB" % (int(round(vm_memory/1024.0)), + int(round(host_memory/1024.0))) + if self.config.get_stats_enable_disk_poll(): + dsk_txt = _rx_tx_text(self.vm.disk_read_rate(), + self.vm.disk_write_rate(), "KBytes/s") - if self.config.is_vmlist_disk_io_visible(): - text = _rx_tx_text(self.vm.disk_read_rate(), self.vm.disk_write_rate(), "KBytes/s") - self.disk_io_graph.set_property("data_array", self.vm.disk_io_vector()) - else: - text = "sampling\ndisabled" - self.window.get_widget("overview-disk-usage-text").set_markup(text) + if self.config.get_stats_enable_net_poll(): + net_txt = _rx_tx_text(self.vm.network_rx_rate(), + self.vm.network_tx_rate(), "KBytes/s") + + self.window.get_widget("overview-cpu-usage-text").set_text(cpu_txt) + self.window.get_widget("overview-memory-usage-text").set_text(mem_txt) + self.window.get_widget("overview-network-traffic-text").set_markup(net_txt) + self.window.get_widget("overview-disk-usage-text").set_markup(dsk_txt) + + self.cpu_usage_graph.set_property("data_array", + self.vm.cpu_time_vector()) + self.memory_usage_graph.set_property("data_array", + self.vm.current_memory_vector()) + self.disk_io_graph.set_property("data_array", + self.vm.disk_io_vector()) + self.network_traffic_graph.set_property("data_array", + self.vm.network_traffic_vector()) def refresh_config_cpu(self): self.window.get_widget("state-host-cpus").set_text("%d" % self.vm.get_connection().host_active_processor_count()) diff -r c7cd0d65d7a6 -r 31d00d8e89d2 src/virtManager/domain.py --- a/src/virtManager/domain.py Mon Dec 08 10:11:25 2008 -0500 +++ b/src/virtManager/domain.py Mon Dec 08 10:14:45 2008 -0500 @@ -52,13 +52,21 @@ self._update_status() self.xml = None + self._mem_stats = None + self._cpu_stats = None self._network_traffic = None - self.config.on_vmlist_network_traffic_visible_changed(self.toggle_sample_network_traffic) + self._disk_io = None + + self.config.on_stats_enable_mem_poll_changed(self.toggle_sample_mem_stats) + self.config.on_stats_enable_cpu_poll_changed(self.toggle_sample_cpu_stats) + self.config.on_stats_enable_net_poll_changed(self.toggle_sample_network_traffic) + self.config.on_stats_enable_disk_poll_changed(self.toggle_sample_disk_io) + + self.toggle_sample_mem_stats() + self.toggle_sample_cpu_stats() self.toggle_sample_network_traffic() + self.toggle_sample_disk_io() - self._disk_io = None - self.config.on_vmlist_disk_io_visible_changed(self.toggle_sample_disk_io) - self.toggle_sample_disk_io() def get_xml(self): if self.xml is None: @@ -161,6 +169,47 @@ self.lastStatus = status self.emit("status-changed", status) + def _sample_mem_stats_dummy(self, ignore): + return 0, 0 + + def _sample_mem_stats(self, info): + pcentCurrMem = info[2] * 100.0 / self.connection.host_memory_size() + pcentMaxMem = info[1] * 100.0 / self.connection.host_memory_size() + return pcentCurrMem, pcentMaxMem + + def _sample_cpu_stats_dummy(self, ignore, ignore1): + return 0, 0, 0 + + def _sample_cpu_stats(self, info, now): + prevCpuTime = 0 + prevTimestamp = 0 + if len(self.record) > 0: + prevTimestamp = self.record[0]["timestamp"] + prevCpuTime = self.record[0]["cpuTimeAbs"] + + cpuTime = 0 + cpuTimeAbs = 0 + pcentCpuTime = 0 + if not (info[0] in [libvirt.VIR_DOMAIN_SHUTOFF, + libvirt.VIR_DOMAIN_CRASHED]): + cpuTime = info[4] - prevCpuTime + cpuTimeAbs = info[4] + + pcentCpuTime = ((cpuTime) * 100.0 / + (((now - prevTimestamp)*1000.0*1000.0*1000.0) * + self.connection.host_active_processor_count())) + # Due to timing diffs between getting wall time & getting + # the domain's time, its possible to go a tiny bit over + # 100% utilization. This freaks out users of the data, so + # we hard limit it. + if pcentCpuTime > 100.0: + pcentCpuTime = 100.0 + # Enforce >= 0 just in case + if pcentCpuTime < 0.0: + pcentCpuTime = 0.0 + + return cpuTime, cpuTimeAbs, pcentCpuTime + def _sample_network_traffic_dummy(self): return 0, 0 @@ -208,6 +257,7 @@ def tick(self, now): if self.connection.get_state() != self.connection.STATE_ACTIVE: return + # Clear cached XML self.xml = None info = self.vm.info() @@ -215,30 +265,6 @@ current = len(self.record) if current > expected: del self.record[expected:current] - - prevCpuTime = 0 - prevTimestamp = 0 - if len(self.record) > 0: - prevTimestamp = self.record[0]["timestamp"] - prevCpuTime = self.record[0]["cpuTimeAbs"] - - cpuTime = 0 - cpuTimeAbs = 0 - pcentCpuTime = 0 - if not(info[0] in [libvirt.VIR_DOMAIN_SHUTOFF, libvirt.VIR_DOMAIN_CRASHED]): - cpuTime = info[4] - prevCpuTime - cpuTimeAbs = info[4] - - pcentCpuTime = (cpuTime) * 100.0 / ((now - prevTimestamp)*1000.0*1000.0*1000.0*self.connection.host_active_processor_count()) - # Due to timing diffs between getting wall time & getting - # the domain's time, its possible to go a tiny bit over - # 100% utilization. This freaks out users of the data, so - # we hard limit it. - if pcentCpuTime > 100.0: - pcentCpuTime = 100.0 - # Enforce >= 0 just in case - if pcentCpuTime < 0.0: - pcentCpuTime = 0.0 # Xen reports complete crap for Dom0 max memory # (ie MAX_LONG) so lets clamp it to the actual @@ -248,9 +274,8 @@ if self.get_id() == 0: info[1] = self.connection.host_memory_size() - pcentCurrMem = info[2] * 100.0 / self.connection.host_memory_size() - pcentMaxMem = info[1] * 100.0 / self.connection.host_memory_size() - + cpuTime, cpuTimeAbs, pcentCpuTime = self._cpu_stats(info, now) + pcentCurrMem, pcentMaxMem = self._mem_stats(info) rdBytes, wrBytes = self._disk_io() rxBytes, txBytes = self._network_traffic() @@ -271,7 +296,6 @@ self.record.insert(0, newStats) nSamples = 5 - #nSamples = len(self.record) if nSamples > len(self.record): nSamples = len(self.record) @@ -1073,8 +1097,23 @@ # Invalidate cached xml self.xml = None - def toggle_sample_network_traffic(self, ignore1=None, ignore2=None, ignore3=None, ignore4=None): - if self.config.is_vmlist_network_traffic_visible(): + def toggle_sample_cpu_stats(self, ignore1=None, ignore2=None, + ignore3=None, ignore4=None): + if self.config.get_stats_enable_cpu_poll(): + self._cpu_stats = self._sample_cpu_stats + else: + self._cpu_stats = self._sample_cpu_stats_dummy + + def toggle_sample_mem_stats(self, ignore1=None, ignore2=None, + ignore3=None, ignore4=None): + if self.config.get_stats_enable_mem_poll(): + self._mem_stats = self._sample_mem_stats + else: + self._mem_stats = self._sample_mem_stats_dummy + + def toggle_sample_network_traffic(self, ignore1=None, ignore2=None, + ignore3=None, ignore4=None): + if self.config.get_stats_enable_net_poll(): if len(self.record) > 1: # resample the current value before calculating the rate in # self.tick() otherwise we'd get a huge spike when switching @@ -1086,8 +1125,9 @@ else: self._network_traffic = self._sample_network_traffic_dummy - def toggle_sample_disk_io(self, ignore1=None, ignore2=None, ignore3=None, ignore4=None): - if self.config.is_vmlist_disk_io_visible(): + def toggle_sample_disk_io(self, ignore1=None, ignore2=None, + ignore3=None, ignore4=None): + if self.config.get_stats_enable_disk_poll(): if len(self.record) > 1: # resample the current value before calculating the rate in # self.tick() otherwise we'd get a huge spike when switching diff -r c7cd0d65d7a6 -r 31d00d8e89d2 src/virtManager/manager.py --- a/src/virtManager/manager.py Mon Dec 08 10:11:25 2008 -0500 +++ b/src/virtManager/manager.py Mon Dec 08 10:14:45 2008 -0500 @@ -136,6 +136,17 @@ self.window.get_widget("menu_view_disk_io").set_active(self.config.is_vmlist_disk_io_visible()) self.window.get_widget("menu_view_network_traffic").set_active(self.config.is_vmlist_network_traffic_visible()) + # Register callbacks with the global stats enable/disable values + # that disable the associated vmlist widgets if reporting is disabled + self.config.on_stats_enable_disk_poll_changed(self.enable_polling, + VMLIST_SORT_DISK_IO) + self.config.on_stats_enable_net_poll_changed(self.enable_polling, + VMLIST_SORT_NETWORK_USAGE) + self.config.on_stats_enable_cpu_poll_changed(self.enable_polling, + VMLIST_SORT_CPU_USAGE) + self.config.on_stats_enable_mem_poll_changed(self.enable_polling, + VMLIST_SORT_MEMORY_USAGE) + self.window.get_widget("vm-view").set_active(0) self.vmmenu_icons = {} @@ -291,6 +302,19 @@ self.vm_selected(None) self.window.get_widget("vm-list").get_selection().connect("changed", self.vm_selected) + + # Initialize stat polling columns based on global polling + # preferences (we want signal handlers for this) + for typ, init_val in \ + [ (VMLIST_SORT_DISK_IO, + self.config.get_stats_enable_disk_poll()), + (VMLIST_SORT_NETWORK_USAGE, + self.config.get_stats_enable_net_poll()), + (VMLIST_SORT_CPU_USAGE, + self.config.get_stats_enable_cpu_poll()), + (VMLIST_SORT_MEMORY_USAGE, + self.config.get_stats_enable_mem_poll())]: + self.enable_polling(None, None, init_val, typ) # store any error message from the restore-domain callback self.domain_restore_error = "" @@ -981,6 +1005,27 @@ col = vmlist.get_column(COL_CPU) col.set_visible(self.config.is_vmlist_cpu_usage_visible()) + def enable_polling(self, ignore1, ignore2, conf_entry, userdata): + if userdata == VMLIST_SORT_CPU_USAGE: + widgn = "menu_view_cpu_usage" + elif userdata == VMLIST_SORT_MEMORY_USAGE: + widgn = "menu_view_memory_usage" + elif userdata == VMLIST_SORT_DISK_IO: + widgn = "menu_view_disk_io" + elif userdata == VMLIST_SORT_NETWORK_USAGE: + widgn = "menu_view_network_traffic" + widget = self.window.get_widget(widgn) + + if conf_entry and (conf_entry == True or \ + conf_entry.get_value().get_bool()): + widget.set_sensitive(True) + widget.set_tooltip_text("") + else: + if widget.get_active(): + widget.set_active(False) + widget.set_sensitive(False) + widget.set_tooltip_text(_("Disabled in preferences dialog.")) + def toggle_virtual_cpus_visible_conf(self, menu): self.config.set_vmlist_virtual_cpus_visible(menu.get_active())
_______________________________________________ et-mgmt-tools mailing list et-mgmt-tools@xxxxxxxxxx https://www.redhat.com/mailman/listinfo/et-mgmt-tools