[gnome-remote-desktop] hwaccel-nvidia: Push and pop CUDA context to and from the EGL thread
- From: Jonas Ådahl <jadahl src gnome org>
- To: commits-list gnome org
- Cc:
- Subject: [gnome-remote-desktop] hwaccel-nvidia: Push and pop CUDA context to and from the EGL thread
- Date: Thu, 3 Mar 2022 14:23:08 +0000 (UTC)
commit 2de749d5817e06d7175b0310e6eafb05846e9ee4
Author: Pascal Nowack <Pascal Nowack gmx de>
Date: Sun Jan 2 10:05:55 2022 +0100
hwaccel-nvidia: Push and pop CUDA context to and from the EGL thread
Registering and mapping GL resources can only be done in the EGL thread.
In order to be able to do that, the CUDA context needs to be pushed to
the EGL thread.
So, add the handling to do that.
src/grd-hwaccel-nvidia.c | 56 ++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 56 insertions(+)
---
diff --git a/src/grd-hwaccel-nvidia.c b/src/grd-hwaccel-nvidia.c
index 8aa4fd78..44862523 100644
--- a/src/grd-hwaccel-nvidia.c
+++ b/src/grd-hwaccel-nvidia.c
@@ -64,6 +64,8 @@ struct _GrdHwAccelNvidia
{
GObject parent;
+ GrdEglThread *egl_thread;
+
CudaFunctions *cuda_funcs;
NvencFunctions *nvenc_funcs;
NV_ENCODE_API_FUNCTION_LIST nvenc_api;
@@ -601,6 +603,54 @@ get_cuda_devices_from_gl_context (GrdHwAccelNvidia *hwaccel_nvidia,
return success;
}
+static gboolean
+push_cuda_context_in_egl_thread (gpointer user_data)
+{
+ GrdHwAccelNvidia *hwaccel_nvidia = user_data;
+
+ grd_hwaccel_nvidia_push_cuda_context (hwaccel_nvidia);
+
+ return TRUE;
+}
+
+static gboolean
+pop_cuda_context_in_egl_thread (gpointer user_data)
+{
+ GrdHwAccelNvidia *hwaccel_nvidia = user_data;
+
+ grd_hwaccel_nvidia_pop_cuda_context (hwaccel_nvidia);
+
+ return TRUE;
+}
+
+static void
+complete_sync (gboolean success,
+ gpointer user_data)
+{
+ GrdSyncPoint *sync_point = user_data;
+
+ grd_sync_point_complete (sync_point, success);
+}
+
+static void
+run_function_in_egl_thread (GrdHwAccelNvidia *hwaccel_nvidia,
+ GrdEglThreadCustomFunc function)
+{
+ GrdSyncPoint sync_point = {};
+
+ grd_sync_point_init (&sync_point);
+
+ grd_egl_thread_run_custom_task (hwaccel_nvidia->egl_thread,
+ function,
+ hwaccel_nvidia,
+ complete_sync,
+ &sync_point,
+ NULL);
+
+ grd_sync_point_wait_for_completion (&sync_point);
+ grd_sync_point_clear (&sync_point);
+}
+
GrdHwAccelNvidia *
grd_hwaccel_nvidia_new (GrdEglThread *egl_thread)
{
@@ -617,6 +667,8 @@ grd_hwaccel_nvidia_new (GrdEglThread *egl_thread)
unsigned int i;
hwaccel_nvidia = g_object_new (GRD_TYPE_HWACCEL_NVIDIA, NULL);
+ hwaccel_nvidia->egl_thread = egl_thread;
+
cuda_load_functions (&hwaccel_nvidia->cuda_funcs, NULL);
nvenc_load_functions (&hwaccel_nvidia->nvenc_funcs, NULL);
@@ -693,6 +745,8 @@ grd_hwaccel_nvidia_new (GrdEglThread *egl_thread)
return NULL;
}
+ run_function_in_egl_thread (hwaccel_nvidia, push_cuda_context_in_egl_thread);
+
hwaccel_nvidia->initialized = TRUE;
avc_ptx_path = g_strdup_printf ("%s/grd-cuda-avc-utils_30.ptx", GRD_DATA_DIR);
@@ -724,6 +778,8 @@ grd_hwaccel_nvidia_dispose (GObject *object)
if (hwaccel_nvidia->initialized)
{
+ run_function_in_egl_thread (hwaccel_nvidia, pop_cuda_context_in_egl_thread);
+
hwaccel_nvidia->cuda_funcs->cuCtxPopCurrent (&hwaccel_nvidia->cu_context);
hwaccel_nvidia->cuda_funcs->cuDevicePrimaryCtxRelease (hwaccel_nvidia->cu_device);
[
Date Prev][
Date Next] [
Thread Prev][
Thread Next]
[
Thread Index]
[
Date Index]
[
Author Index]