Kaydet (Commit) 566a1af6 authored tarafından Hakan Dündar's avatar Hakan Dündar

New: Video encoding/decoding engine load for AMD GPUs

üst a9d80fda
......@@ -599,6 +599,7 @@ def da_upper_lower_label(text, alignment):
label = Gtk.Label()
label.set_halign(alignment)
label.add_css_class("dim-label")
label.set_ellipsize(Pango.EllipsizeMode.END)
label.set_label(text)
return label
......
......@@ -151,8 +151,8 @@ class Gpu:
self.da_gpu_memory_encoder_decoder_load_grid.attach(grid, 1, 0, 1, 1)
# Label (drawingarea upper-left)
label = Common.da_upper_lower_label(_tr("Video Encoder"), Gtk.Align.START)
grid.attach(label, 0, 0, 1, 1)
self.label_video_encoder_or_encoder_decoder_load = Common.da_upper_lower_label(_tr("Video Encoder"), Gtk.Align.START)
grid.attach(self.label_video_encoder_or_encoder_decoder_load, 0, 0, 1, 1)
# Label (drawingarea upper-right)
label = Common.da_upper_lower_label("100%", Gtk.Align.END)
......@@ -173,26 +173,26 @@ class Gpu:
"""
# Grid (drawingarea)
grid = Gtk.Grid()
grid.set_hexpand(True)
grid.set_vexpand(True)
self.da_gpu_memory_encoder_decoder_load_grid.attach(grid, 2, 0, 1, 1)
self.grid_video_decoder_load = Gtk.Grid()
self.grid_video_decoder_load.set_hexpand(True)
self.grid_video_decoder_load.set_vexpand(True)
self.da_gpu_memory_encoder_decoder_load_grid.attach(self.grid_video_decoder_load, 2, 0, 1, 1)
# Label (drawingarea upper-left)
label = Common.da_upper_lower_label(_tr("Video Decoder"), Gtk.Align.START)
grid.attach(label, 0, 0, 1, 1)
self.grid_video_decoder_load.attach(label, 0, 0, 1, 1)
# Label (drawingarea upper-right)
label = Common.da_upper_lower_label("100%", Gtk.Align.END)
grid.attach(label, 1, 0, 1, 1)
self.grid_video_decoder_load.attach(label, 1, 0, 1, 1)
# DrawingArea
self.da_gpu_decoder_load = Common.drawingarea(Performance.performance_line_charts_draw, "da_gpu_decoder_load")
grid.attach(self.da_gpu_decoder_load, 0, 2, 2, 1)
self.grid_video_decoder_load.attach(self.da_gpu_decoder_load, 0, 2, 2, 1)
# Label (drawingarea lower-right)
label = Common.da_upper_lower_label("0", Gtk.Align.END)
grid.attach(label, 0, 3, 2, 1)
self.grid_video_decoder_load.attach(label, 0, 3, 2, 1)
def information_grid(self):
......@@ -661,6 +661,9 @@ class Gpu:
self.da_gpu_encoder_load.queue_draw()
self.da_gpu_decoder_load.queue_draw()
# Update video encoder and decoder load graphs GUI
self.update_video_encoder_decoder_load_graph_gui()
self.gpu_information_share_dict2 = dict(gpu_load_memory_frequency_power_dict)
# Run "main_gui_device_selection_list" if selected device list is changed since the last loop.
......@@ -684,5 +687,23 @@ class Gpu:
self.memory_frequency_label.set_label(f'{gpu_memory_current_frequency} / {gpu_memory_max_frequency}')
def update_video_encoder_decoder_load_graph_gui(self):
"""
A single video engine load is get for new AMD GPUs.
Because AMD GPUs have a single engine (VCN) for video encoding and decoding after 2018.
In this case, video engine load value is tracked by "gpu_encoder_load" variable.
"gpu_decoder_load" variable ise set as "-9999". Code using this function may recognize that
there is a single video engine load value.
"""
if self.gpu_decoder_load_list[-1] == float(-9999):
if self.grid_video_decoder_load.get_visible() == True:
self.grid_video_decoder_load.set_visible(False)
self.label_video_encoder_or_encoder_decoder_load.set_text(_tr("Video Encoder") + " + " + _tr("Video Decoder"))
elif self.gpu_decoder_load_list[-1] != float(-9999):
if self.grid_video_decoder_load.get_visible() == False:
self.grid_video_decoder_load.set_visible(True)
self.label_video_encoder_or_encoder_decoder_load.set_text(_tr("Video Encoder"))
Gpu = Gpu()
......@@ -2830,6 +2830,18 @@ def get_gpu_load_memory_frequency_power(gpu_pci_address, device_vendor_id, selec
# Update the GPU load value. Because it is not get in "get_gpu_load_memory_frequency_power_amd" function.
gpu_load_memory_frequency_power_dict["gpu_load"] = gpu_load
# Get encoder/decoder engine load of AMD GPU by using "amdgpu_top" tool.
threading.Thread(target=gpu_encoder_decoder_load_amd_func, daemon=True).start()
global gpu_tool_output_amdgpu_top
try:
check_value = gpu_tool_output_amdgpu_top
except NameError:
gpu_tool_output_amdgpu_top = "-"
# Update encoder/decoder engine load values. Because they are not get in "get_gpu_load_memory_frequency_power_amd" function.
gpu_load_memory_frequency_power_dict = process_gpu_tool_output_amdgpu_top(gpu_pci_address, gpu_tool_output_amdgpu_top, gpu_load_memory_frequency_power_dict)
# If selected GPU vendor is Broadcom (for RB-Pi ARM devices).
elif device_vendor_id in ["Brcm"]:
gpu_load_memory_frequency_power_dict = get_gpu_load_memory_frequency_power_broadcom_arm()
......@@ -2840,14 +2852,14 @@ def get_gpu_load_memory_frequency_power(gpu_pci_address, device_vendor_id, selec
# the main thread and GUI for a very small time which stops the GUI for a very small time.
threading.Thread(target=gpu_load_nvidia_func, daemon=True).start()
global gpu_tool_output, nvidia_smi_encoder_decoder
global gpu_tool_output_nvidia_smi, nvidia_smi_encoder_decoder
try:
check_value = gpu_tool_output
check_value = gpu_tool_output_nvidia_smi
except NameError:
gpu_tool_output = "-"
gpu_tool_output_nvidia_smi = "-"
nvidia_smi_encoder_decoder = 1
gpu_load_memory_frequency_power_dict = process_gpu_tool_output_nvidia(gpu_pci_address, gpu_tool_output, nvidia_smi_encoder_decoder)
gpu_load_memory_frequency_power_dict = process_gpu_tool_output_nvidia(gpu_pci_address, gpu_tool_output_nvidia_smi, nvidia_smi_encoder_decoder)
# If selected GPU vendor is NVIDIA and selected GPU is used on an ARM system.
elif device_vendor_id in ["v000010DE", "Nvidia"] and gpu_device_path.startswith("/sys/devices/") == True:
......@@ -3275,6 +3287,66 @@ def gpu_load_amd_func(gpu_device_path, event):
time.sleep(amd_gpu_load_read_frequency)
def gpu_encoder_decoder_load_amd_func():
"""
Get video encoder/decoder engine loads of AMD GPUs by using "amdgpu_top" tool.
This is a 3rd party tool.
"""
command_list = ["amdgpu_top", "-J", "-n", "0"]
if get_environment_type() == "flatpak":
command_list = ["flatpak-spawn", "--host"] + command_list
global gpu_tool_output_amdgpu_top
try:
gpu_tool_output_amdgpu_top = (subprocess.check_output(command_list, shell=False)).decode().strip()
import json
gpu_tool_output_amdgpu_top = json.loads(gpu_tool_output_amdgpu_top)
# Prevent errors because "amdgpu_top" may not be installed on systems.
except Exception:
pass
def process_gpu_tool_output_amdgpu_top(gpu_pci_address, gpu_tool_output_amdgpu_top, gpu_load_memory_frequency_power_dict):
"""
Get values from command output if there was no error when running the command.
"""
# Define initial values
gpu_encoder_load = "-"
gpu_decoder_load = "-"
not_supported_text = ["[Not Supported]", "[N/A]", "null", "Null", "NULL"]
if gpu_tool_output_amdgpu_top != "-":
all_gpus_information = gpu_tool_output_amdgpu_top["devices"]
for gpu_information in all_gpus_information:
if gpu_pci_address != gpu_information["Info"]["PCI"]:
continue
try:
media_engine_load = gpu_information["gpu_activity"]["MediaEngine"]
#media_engine_load_unit = media_engine_load["unit"]
media_engine_load_value = float(media_engine_load["value"])
except KeyError:
pass
if media_engine_load_value == 65535 or media_engine_load_value in not_supported_text:
pass
else:
gpu_encoder_load = f'{float(media_engine_load_value):.0f} %'
gpu_decoder_load = "-9999 %"
# A single video engine load is get for new AMD GPUs.
# Because AMD GPUs have a single engine (VCN) for video encoding and decoding after 2018.
# In this case, video engine load value is tracked by "gpu_encoder_load" variable.
# "gpu_decoder_load" variable ise set as "-9999 %". Code using this function may recognize that
# there is a single video engine load value.
gpu_load_memory_frequency_power_dict["gpu_encoder_load"] = gpu_encoder_load
gpu_load_memory_frequency_power_dict["gpu_decoder_load"] = gpu_decoder_load
return gpu_load_memory_frequency_power_dict
def gpu_load_nvidia_func():
"""
Get GPU load average for NVIDIA (PCI) GPUs.
......@@ -3284,10 +3356,10 @@ def gpu_load_nvidia_func():
if get_environment_type() == "flatpak":
command_list = ["flatpak-spawn", "--host"] + command_list
global gpu_tool_output, nvidia_smi_encoder_decoder
global gpu_tool_output_nvidia_smi, nvidia_smi_encoder_decoder
try:
nvidia_smi_encoder_decoder = 1
gpu_tool_output = (subprocess.check_output(command_list, shell=False)).decode().strip().split("\n")
gpu_tool_output_nvidia_smi = (subprocess.check_output(command_list, shell=False)).decode().strip().split("\n")
# Prevent errors because "nvidia-smi" may not be installed on some devices (such as N.Switch with NVIDIA Tegra GPU).
except Exception:
nvidia_smi_encoder_decoder = 0
......@@ -3295,13 +3367,13 @@ def gpu_load_nvidia_func():
if get_environment_type() == "flatpak":
command_list = ["flatpak-spawn", "--host"] + command_list
try:
gpu_tool_output = (subprocess.check_output(command_list, shell=False)).decode().strip().split("\n")
gpu_tool_output_nvidia_smi = (subprocess.check_output(command_list, shell=False)).decode().strip().split("\n")
# Prevent errors because "nvidia-smi" may not be installed on some devices (such as N.Switch with NVIDIA Tegra GPU).
except Exception:
pass
def process_gpu_tool_output_nvidia(gpu_pci_address, gpu_tool_output, nvidia_smi_encoder_decoder):
def process_gpu_tool_output_nvidia(gpu_pci_address, gpu_tool_output_nvidia_smi, nvidia_smi_encoder_decoder):
"""
Get values from command output if there was no error when running the command.
"""
......@@ -3325,15 +3397,15 @@ def process_gpu_tool_output_nvidia(gpu_pci_address, gpu_tool_output, nvidia_smi_
gpu_enforced_power_limit = "-"
if gpu_tool_output != "-":
if gpu_tool_output_nvidia_smi != "-":
# Get line number of the selected GPU by using its PCI address.
for i, line in enumerate(gpu_tool_output):
for i, line in enumerate(gpu_tool_output_nvidia_smi):
if gpu_pci_address in line or gpu_pci_address.upper() in line:
gpu_info_line_no = i
break
gpu_tool_output_for_selected_gpu = gpu_tool_output[gpu_info_line_no].split(",")
gpu_tool_output_for_selected_gpu = gpu_tool_output_nvidia_smi[gpu_info_line_no].split(",")
if len(gpu_tool_output_for_selected_gpu) == 18:
gpu_driver_version = gpu_tool_output_for_selected_gpu[2].strip()
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment