[gnome-remote-desktop] Rename GrdRdpNvEnc to GrdHwAccelNvidia
- From: Jonas Ådahl <jadahl src gnome org>
- To: commits-list gnome org
- Cc:
- Subject: [gnome-remote-desktop] Rename GrdRdpNvEnc to GrdHwAccelNvidia
- Date: Wed, 16 Feb 2022 09:17:08 +0000 (UTC)
commit 01bf87b0b6d3c5a7bbcbfc8bf906c09b36d0689b
Author: Pascal Nowack <Pascal Nowack gmx de>
Date: Thu Dec 30 10:44:29 2021 +0100
Rename GrdRdpNvEnc to GrdHwAccelNvidia
Hardware acceleration for NVIDIA GPU based systems will later not just
be used for NVENC, but for plain CUDA too.
Also, this class is itself not dependent on the RDP backend.
So, change the name accordingly.
config.h.meson | 4 +-
data/meson.build | 2 +-
meson.build | 14 +-
meson_options.txt | 2 +-
src/{grd-rdp-nvenc.c => grd-hwaccel-nvidia.c} | 299 +++++++++++++-------------
src/grd-hwaccel-nvidia.h | 55 +++++
src/grd-rdp-graphics-pipeline.c | 78 +++----
src/grd-rdp-graphics-pipeline.h | 8 +-
src/grd-rdp-nvenc.h | 55 -----
src/grd-rdp-server.c | 31 +--
src/grd-session-rdp.c | 48 ++---
src/grd-session-rdp.h | 6 +-
src/grd-types.h | 2 +-
src/meson.build | 8 +-
14 files changed, 306 insertions(+), 306 deletions(-)
---
diff --git a/config.h.meson b/config.h.meson
index 98647d79..fb558762 100644
--- a/config.h.meson
+++ b/config.h.meson
@@ -12,8 +12,8 @@
/* Defined if VNC backend is enabled */
#mesondefine HAVE_VNC
-/* Defined if NVENC is available */
-#mesondefine HAVE_NVENC
+/* Defined if CUDA is available */
+#mesondefine HAVE_HWACCEL_NVIDIA
/* Path of the data dir */
#mesondefine GRD_DATA_DIR
diff --git a/data/meson.build b/data/meson.build
index a65423c1..faa51473 100644
--- a/data/meson.build
+++ b/data/meson.build
@@ -1,4 +1,4 @@
-if have_nvenc
+if have_hwaccel_nvidia
install_data(['grd-cuda-avc-utils_30.ptx'],
install_dir: grd_datadir,
)
diff --git a/meson.build b/meson.build
index df7cd195..36950656 100644
--- a/meson.build
+++ b/meson.build
@@ -4,9 +4,9 @@ project('gnome-remote-desktop', 'c',
default_options: ['warning_level=1',
'buildtype=debugoptimized'])
+cuda_req = '>= 11'
freerdp_req = '>= 2.4.1'
fuse_req = '>= 3.9.1'
-nvenc_req = '>= 11'
xkbcommon_req = '>= 1.0.0'
epoxy_req = '>= 1.4'
@@ -28,13 +28,13 @@ drm_dep = dependency('libdrm')
have_rdp = get_option('rdp')
have_vnc = get_option('vnc')
-have_nvenc = get_option('nvenc')
+have_hwaccel_nvidia = get_option('nvenc_and_cuda')
if not have_rdp and not have_vnc
error('Must enable at least one backend')
endif
-if have_nvenc and not have_rdp
+if have_hwaccel_nvidia and not have_rdp
error('Support for hardware acceleration using NVENC requires the RDP backend')
endif
@@ -48,9 +48,9 @@ if have_rdp
winpr_dep = dependency('winpr2', version: freerdp_req)
xkbcommon_dep = dependency('xkbcommon', version: xkbcommon_req)
- if have_nvenc
+ if have_hwaccel_nvidia
dl_dep = cc.find_library('dl', required: true)
- nvenc_dep = dependency('ffnvcodec', version: nvenc_req)
+ cuda_dep = dependency('ffnvcodec', version: cuda_req)
endif
endif
@@ -72,7 +72,7 @@ cdata.set_quoted('VERSION', meson.project_version())
cdata.set('HAVE_RDP', have_rdp)
cdata.set('HAVE_VNC', have_vnc)
-cdata.set('HAVE_NVENC', have_nvenc)
+cdata.set('HAVE_HWACCEL_NVIDIA', have_hwaccel_nvidia)
cdata.set_quoted('GRD_DATA_DIR', grd_datadir)
@@ -122,7 +122,7 @@ output = [
'',
' Options for the RDP backend:',
'',
- ' Support for hardware acceleration using NVENC and CUDA........' + have_nvenc.to_string(),
+ ' Support for hardware acceleration using NVENC and CUDA........' + have_hwaccel_nvidia.to_string(),
'',
' Now type \'ninja -C ' + meson.build_root() + '\' to build ' + meson.project_name(),
'',
diff --git a/meson_options.txt b/meson_options.txt
index b321070a..16d46d1b 100644
--- a/meson_options.txt
+++ b/meson_options.txt
@@ -8,7 +8,7 @@ option('vnc',
value: true,
description: 'Enable the VNC backend')
-option('nvenc',
+option('nvenc_and_cuda',
type: 'boolean',
value: true,
description: 'Build with support for hardware acceleration using NVENC and CUDA')
diff --git a/src/grd-rdp-nvenc.c b/src/grd-hwaccel-nvidia.c
similarity index 53%
rename from src/grd-rdp-nvenc.c
rename to src/grd-hwaccel-nvidia.c
index d9e916be..c68bf2c8 100644
--- a/src/grd-rdp-nvenc.c
+++ b/src/grd-hwaccel-nvidia.c
@@ -19,7 +19,7 @@
#include "config.h"
-#include "grd-rdp-nvenc.h"
+#include "grd-hwaccel-nvidia.h"
#include <ffnvcodec/dynlink_loader.h>
@@ -33,7 +33,7 @@ typedef struct _NvEncEncodeSession
NV_ENC_OUTPUT_PTR buffer_out;
} NvEncEncodeSession;
-struct _GrdRdpNvenc
+struct _GrdHwAccelNvidia
{
GObject parent;
@@ -53,43 +53,45 @@ struct _GrdRdpNvenc
uint32_t next_encode_session_id;
};
-G_DEFINE_TYPE (GrdRdpNvenc, grd_rdp_nvenc, G_TYPE_OBJECT)
+G_DEFINE_TYPE (GrdHwAccelNvidia, grd_hwaccel_nvidia, G_TYPE_OBJECT)
void
-grd_rdp_nvenc_push_cuda_context (GrdRdpNvenc *rdp_nvenc)
+grd_hwaccel_nvidia_push_cuda_context (GrdHwAccelNvidia *hwaccel_nvidia)
{
- if (rdp_nvenc->cuda_funcs->cuCtxPushCurrent (rdp_nvenc->cu_context) != CUDA_SUCCESS)
+ CudaFunctions *cuda_funcs = hwaccel_nvidia->cuda_funcs;
+
+ if (cuda_funcs->cuCtxPushCurrent (hwaccel_nvidia->cu_context) != CUDA_SUCCESS)
g_error ("[HWAccel.CUDA] Failed to push CUDA context");
}
void
-grd_rdp_nvenc_pop_cuda_context (GrdRdpNvenc *rdp_nvenc)
+grd_hwaccel_nvidia_pop_cuda_context (GrdHwAccelNvidia *hwaccel_nvidia)
{
CUcontext cu_context;
- rdp_nvenc->cuda_funcs->cuCtxPopCurrent (&cu_context);
+ hwaccel_nvidia->cuda_funcs->cuCtxPopCurrent (&cu_context);
}
static uint32_t
-get_next_free_encode_session_id (GrdRdpNvenc *rdp_nvenc)
+get_next_free_encode_session_id (GrdHwAccelNvidia *hwaccel_nvidia)
{
- uint32_t encode_session_id = rdp_nvenc->next_encode_session_id;
+ uint32_t encode_session_id = hwaccel_nvidia->next_encode_session_id;
- while (g_hash_table_contains (rdp_nvenc->encode_sessions,
+ while (g_hash_table_contains (hwaccel_nvidia->encode_sessions,
GUINT_TO_POINTER (encode_session_id)))
++encode_session_id;
- rdp_nvenc->next_encode_session_id = encode_session_id + 1;
+ hwaccel_nvidia->next_encode_session_id = encode_session_id + 1;
return encode_session_id;
}
gboolean
-grd_rdp_nvenc_create_encode_session (GrdRdpNvenc *rdp_nvenc,
- uint32_t *encode_session_id,
- uint16_t surface_width,
- uint16_t surface_height,
- uint16_t refresh_rate)
+grd_hwaccel_nvidia_create_nvenc_session (GrdHwAccelNvidia *hwaccel_nvidia,
+ uint32_t *encode_session_id,
+ uint16_t surface_width,
+ uint16_t surface_height,
+ uint16_t refresh_rate)
{
NvEncEncodeSession *encode_session;
NV_ENC_OPEN_ENCODE_SESSION_EX_PARAMS open_params = {0};
@@ -102,17 +104,17 @@ grd_rdp_nvenc_create_encode_session (GrdRdpNvenc *rdp_nvenc,
aligned_width = surface_width + (surface_width % 16 ? 16 - surface_width % 16 : 0);
aligned_height = surface_height + (surface_height % 64 ? 64 - surface_height % 64 : 0);
- *encode_session_id = get_next_free_encode_session_id (rdp_nvenc);
+ *encode_session_id = get_next_free_encode_session_id (hwaccel_nvidia);
encode_session = g_malloc0 (sizeof (NvEncEncodeSession));
encode_session->enc_width = aligned_width;
encode_session->enc_height = aligned_height;
open_params.version = NV_ENC_OPEN_ENCODE_SESSION_EX_PARAMS_VER;
open_params.deviceType = NV_ENC_DEVICE_TYPE_CUDA;
- open_params.device = rdp_nvenc->cu_context;
+ open_params.device = hwaccel_nvidia->cu_context;
open_params.apiVersion = NVENCAPI_VERSION;
- if (rdp_nvenc->nvenc_api.nvEncOpenEncodeSessionEx (
+ if (hwaccel_nvidia->nvenc_api.nvEncOpenEncodeSessionEx (
&open_params, &encode_session->encoder) != NV_ENC_SUCCESS)
{
g_debug ("[HWAccel.NVENC] Failed to open encode session");
@@ -144,39 +146,39 @@ grd_rdp_nvenc_create_encode_session (GrdRdpNvenc *rdp_nvenc,
init_params.frameRateDen = 1;
init_params.enablePTD = 1;
init_params.encodeConfig = &encode_config;
- if (rdp_nvenc->nvenc_api.nvEncInitializeEncoder (
+ if (hwaccel_nvidia->nvenc_api.nvEncInitializeEncoder (
encode_session->encoder, &init_params) != NV_ENC_SUCCESS)
{
NV_ENC_PIC_PARAMS pic_params = {0};
g_warning ("[HWAccel.NVENC] Failed to initialize encoder");
pic_params.encodePicFlags = NV_ENC_PIC_FLAG_EOS;
- rdp_nvenc->nvenc_api.nvEncEncodePicture (encode_session->encoder,
- &pic_params);
- rdp_nvenc->nvenc_api.nvEncDestroyEncoder (encode_session->encoder);
+ hwaccel_nvidia->nvenc_api.nvEncEncodePicture (encode_session->encoder,
+ &pic_params);
+ hwaccel_nvidia->nvenc_api.nvEncDestroyEncoder (encode_session->encoder);
g_free (encode_session);
return FALSE;
}
create_bitstream_buffer.version = NV_ENC_CREATE_BITSTREAM_BUFFER_VER;
- if (rdp_nvenc->nvenc_api.nvEncCreateBitstreamBuffer (
+ if (hwaccel_nvidia->nvenc_api.nvEncCreateBitstreamBuffer (
encode_session->encoder, &create_bitstream_buffer) != NV_ENC_SUCCESS)
{
NV_ENC_PIC_PARAMS pic_params = {0};
g_warning ("[HWAccel.NVENC] Failed to create bitstream buffer");
pic_params.encodePicFlags = NV_ENC_PIC_FLAG_EOS;
- rdp_nvenc->nvenc_api.nvEncEncodePicture (encode_session->encoder,
- &pic_params);
- rdp_nvenc->nvenc_api.nvEncDestroyEncoder (encode_session->encoder);
+ hwaccel_nvidia->nvenc_api.nvEncEncodePicture (encode_session->encoder,
+ &pic_params);
+ hwaccel_nvidia->nvenc_api.nvEncDestroyEncoder (encode_session->encoder);
g_free (encode_session);
return FALSE;
}
encode_session->buffer_out = create_bitstream_buffer.bitstreamBuffer;
- g_hash_table_insert (rdp_nvenc->encode_sessions,
+ g_hash_table_insert (hwaccel_nvidia->encode_sessions,
GUINT_TO_POINTER (*encode_session_id),
encode_session);
@@ -184,38 +186,38 @@ grd_rdp_nvenc_create_encode_session (GrdRdpNvenc *rdp_nvenc,
}
void
-grd_rdp_nvenc_free_encode_session (GrdRdpNvenc *rdp_nvenc,
- uint32_t encode_session_id)
+grd_hwaccel_nvidia_free_nvenc_session (GrdHwAccelNvidia *hwaccel_nvidia,
+ uint32_t encode_session_id)
{
NvEncEncodeSession *encode_session;
NV_ENC_PIC_PARAMS pic_params = {0};
- if (!g_hash_table_steal_extended (rdp_nvenc->encode_sessions,
+ if (!g_hash_table_steal_extended (hwaccel_nvidia->encode_sessions,
GUINT_TO_POINTER (encode_session_id),
NULL, (gpointer *) &encode_session))
return;
- rdp_nvenc->nvenc_api.nvEncDestroyBitstreamBuffer (encode_session->encoder,
- encode_session->buffer_out);
+ hwaccel_nvidia->nvenc_api.nvEncDestroyBitstreamBuffer (encode_session->encoder,
+ encode_session->buffer_out);
pic_params.encodePicFlags = NV_ENC_PIC_FLAG_EOS;
- rdp_nvenc->nvenc_api.nvEncEncodePicture (encode_session->encoder,
- &pic_params);
- rdp_nvenc->nvenc_api.nvEncDestroyEncoder (encode_session->encoder);
+ hwaccel_nvidia->nvenc_api.nvEncEncodePicture (encode_session->encoder,
+ &pic_params);
+ hwaccel_nvidia->nvenc_api.nvEncDestroyEncoder (encode_session->encoder);
g_free (encode_session);
}
gboolean
-grd_rdp_nvenc_avc420_encode_bgrx_frame (GrdRdpNvenc *rdp_nvenc,
- uint32_t encode_session_id,
- uint8_t *src_data,
- uint16_t src_width,
- uint16_t src_height,
- uint16_t aligned_width,
- uint16_t aligned_height,
- uint8_t **bitstream,
- uint32_t *bitstream_size)
+grd_hwaccel_nvidia_avc420_encode_bgrx_frame (GrdHwAccelNvidia *hwaccel_nvidia,
+ uint32_t encode_session_id,
+ uint8_t *src_data,
+ uint16_t src_width,
+ uint16_t src_height,
+ uint16_t aligned_width,
+ uint16_t aligned_height,
+ uint8_t **bitstream,
+ uint32_t *bitstream_size)
{
NvEncEncodeSession *encode_session;
CUDA_MEMCPY2D cu_memcpy_2d = {0};
@@ -230,7 +232,7 @@ grd_rdp_nvenc_avc420_encode_bgrx_frame (GrdRdpNvenc *rdp_nvenc,
unsigned int block_dim_x, block_dim_y, block_dim_z;
void *args[8];
- if (!g_hash_table_lookup_extended (rdp_nvenc->encode_sessions,
+ if (!g_hash_table_lookup_extended (hwaccel_nvidia->encode_sessions,
GUINT_TO_POINTER (encode_session_id),
NULL, (gpointer *) &encode_session))
return FALSE;
@@ -238,17 +240,17 @@ grd_rdp_nvenc_avc420_encode_bgrx_frame (GrdRdpNvenc *rdp_nvenc,
g_assert (encode_session->enc_width == aligned_width);
g_assert (encode_session->enc_height == aligned_height);
- if (rdp_nvenc->cuda_funcs->cuStreamCreate (&cu_stream, 0) != CUDA_SUCCESS)
+ if (hwaccel_nvidia->cuda_funcs->cuStreamCreate (&cu_stream, 0) != CUDA_SUCCESS)
{
g_warning ("[HWAccel.CUDA] Failed to create stream");
return FALSE;
}
- if (rdp_nvenc->cuda_funcs->cuMemAllocPitch (
+ if (hwaccel_nvidia->cuda_funcs->cuMemAllocPitch (
&bgrx_buffer, &bgrx_pitch, src_width * 4, src_height, 4) != CUDA_SUCCESS)
{
g_warning ("[HWAccel.CUDA] Failed to allocate BGRX buffer");
- rdp_nvenc->cuda_funcs->cuStreamDestroy (cu_stream);
+ hwaccel_nvidia->cuda_funcs->cuStreamDestroy (cu_stream);
return FALSE;
}
@@ -263,23 +265,23 @@ grd_rdp_nvenc_avc420_encode_bgrx_frame (GrdRdpNvenc *rdp_nvenc,
cu_memcpy_2d.WidthInBytes = src_width * 4;
cu_memcpy_2d.Height = src_height;
- if (rdp_nvenc->cuda_funcs->cuMemcpy2DAsync (
+ if (hwaccel_nvidia->cuda_funcs->cuMemcpy2DAsync (
&cu_memcpy_2d, cu_stream) != CUDA_SUCCESS)
{
g_warning ("[HWAccel.CUDA] Failed to initiate H2D copy");
- rdp_nvenc->cuda_funcs->cuMemFree (bgrx_buffer);
- rdp_nvenc->cuda_funcs->cuStreamDestroy (cu_stream);
+ hwaccel_nvidia->cuda_funcs->cuMemFree (bgrx_buffer);
+ hwaccel_nvidia->cuda_funcs->cuStreamDestroy (cu_stream);
return FALSE;
}
- if (rdp_nvenc->cuda_funcs->cuMemAllocPitch (
+ if (hwaccel_nvidia->cuda_funcs->cuMemAllocPitch (
&nv12_buffer, &nv12_pitch,
aligned_width, aligned_height + aligned_height / 2, 4) != CUDA_SUCCESS)
{
g_warning ("[HWAccel.CUDA] Failed to allocate NV12 buffer");
- rdp_nvenc->cuda_funcs->cuStreamSynchronize (cu_stream);
- rdp_nvenc->cuda_funcs->cuMemFree (bgrx_buffer);
- rdp_nvenc->cuda_funcs->cuStreamDestroy (cu_stream);
+ hwaccel_nvidia->cuda_funcs->cuStreamSynchronize (cu_stream);
+ hwaccel_nvidia->cuda_funcs->cuMemFree (bgrx_buffer);
+ hwaccel_nvidia->cuda_funcs->cuStreamDestroy (cu_stream);
return FALSE;
}
@@ -303,29 +305,29 @@ grd_rdp_nvenc_avc420_encode_bgrx_frame (GrdRdpNvenc *rdp_nvenc,
args[6] = &aligned_height;
args[7] = &aligned_width;
- if (rdp_nvenc->cuda_funcs->cuLaunchKernel (
- rdp_nvenc->cu_bgrx_to_yuv420, grid_dim_x, grid_dim_y, grid_dim_z,
+ if (hwaccel_nvidia->cuda_funcs->cuLaunchKernel (
+ hwaccel_nvidia->cu_bgrx_to_yuv420, grid_dim_x, grid_dim_y, grid_dim_z,
block_dim_x, block_dim_y, block_dim_z, 0, cu_stream, args, NULL) != CUDA_SUCCESS)
{
g_warning ("[HWAccel.CUDA] Failed to launch BGRX_TO_YUV420 kernel");
- rdp_nvenc->cuda_funcs->cuStreamSynchronize (cu_stream);
- rdp_nvenc->cuda_funcs->cuMemFree (nv12_buffer);
- rdp_nvenc->cuda_funcs->cuMemFree (bgrx_buffer);
- rdp_nvenc->cuda_funcs->cuStreamDestroy (cu_stream);
+ hwaccel_nvidia->cuda_funcs->cuStreamSynchronize (cu_stream);
+ hwaccel_nvidia->cuda_funcs->cuMemFree (nv12_buffer);
+ hwaccel_nvidia->cuda_funcs->cuMemFree (bgrx_buffer);
+ hwaccel_nvidia->cuda_funcs->cuStreamDestroy (cu_stream);
return FALSE;
}
- if (rdp_nvenc->cuda_funcs->cuStreamSynchronize (cu_stream) != CUDA_SUCCESS)
+ if (hwaccel_nvidia->cuda_funcs->cuStreamSynchronize (cu_stream) != CUDA_SUCCESS)
{
g_warning ("[HWAccel.CUDA] Failed to synchronize stream");
- rdp_nvenc->cuda_funcs->cuMemFree (nv12_buffer);
- rdp_nvenc->cuda_funcs->cuMemFree (bgrx_buffer);
- rdp_nvenc->cuda_funcs->cuStreamDestroy (cu_stream);
+ hwaccel_nvidia->cuda_funcs->cuMemFree (nv12_buffer);
+ hwaccel_nvidia->cuda_funcs->cuMemFree (bgrx_buffer);
+ hwaccel_nvidia->cuda_funcs->cuStreamDestroy (cu_stream);
return FALSE;
}
- rdp_nvenc->cuda_funcs->cuStreamDestroy (cu_stream);
- rdp_nvenc->cuda_funcs->cuMemFree (bgrx_buffer);
+ hwaccel_nvidia->cuda_funcs->cuStreamDestroy (cu_stream);
+ hwaccel_nvidia->cuda_funcs->cuMemFree (bgrx_buffer);
register_res.version = NV_ENC_REGISTER_RESOURCE_VER;
register_res.resourceType = NV_ENC_INPUT_RESOURCE_TYPE_CUDADEVICEPTR;
@@ -336,24 +338,24 @@ grd_rdp_nvenc_avc420_encode_bgrx_frame (GrdRdpNvenc *rdp_nvenc,
register_res.bufferFormat = NV_ENC_BUFFER_FORMAT_NV12;
register_res.bufferUsage = NV_ENC_INPUT_IMAGE;
- if (rdp_nvenc->nvenc_api.nvEncRegisterResource (
+ if (hwaccel_nvidia->nvenc_api.nvEncRegisterResource (
encode_session->encoder, ®ister_res) != NV_ENC_SUCCESS)
{
g_warning ("[HWAccel.NVENC] Failed to register resource");
- rdp_nvenc->cuda_funcs->cuMemFree (nv12_buffer);
+ hwaccel_nvidia->cuda_funcs->cuMemFree (nv12_buffer);
return FALSE;
}
map_input_res.version = NV_ENC_MAP_INPUT_RESOURCE_VER;
map_input_res.registeredResource = register_res.registeredResource;
- if (rdp_nvenc->nvenc_api.nvEncMapInputResource (
+ if (hwaccel_nvidia->nvenc_api.nvEncMapInputResource (
encode_session->encoder, &map_input_res) != NV_ENC_SUCCESS)
{
g_warning ("[HWAccel.NVENC] Failed to map input resource");
- rdp_nvenc->nvenc_api.nvEncUnregisterResource (encode_session->encoder,
- register_res.registeredResource);
- rdp_nvenc->cuda_funcs->cuMemFree (nv12_buffer);
+ hwaccel_nvidia->nvenc_api.nvEncUnregisterResource (encode_session->encoder,
+ register_res.registeredResource);
+ hwaccel_nvidia->cuda_funcs->cuMemFree (nv12_buffer);
return FALSE;
}
@@ -366,189 +368,184 @@ grd_rdp_nvenc_avc420_encode_bgrx_frame (GrdRdpNvenc *rdp_nvenc,
pic_params.bufferFmt = map_input_res.mappedBufferFmt;
pic_params.pictureStruct = NV_ENC_PIC_STRUCT_FRAME;
- if (rdp_nvenc->nvenc_api.nvEncEncodePicture (
+ if (hwaccel_nvidia->nvenc_api.nvEncEncodePicture (
encode_session->encoder, &pic_params) != NV_ENC_SUCCESS)
{
g_warning ("[HWAccel.NVENC] Failed to encode frame");
- rdp_nvenc->nvenc_api.nvEncUnmapInputResource (encode_session->encoder,
- map_input_res.mappedResource);
- rdp_nvenc->nvenc_api.nvEncUnregisterResource (encode_session->encoder,
- register_res.registeredResource);
- rdp_nvenc->cuda_funcs->cuMemFree (nv12_buffer);
+ hwaccel_nvidia->nvenc_api.nvEncUnmapInputResource (encode_session->encoder,
+ map_input_res.mappedResource);
+ hwaccel_nvidia->nvenc_api.nvEncUnregisterResource (encode_session->encoder,
+ register_res.registeredResource);
+ hwaccel_nvidia->cuda_funcs->cuMemFree (nv12_buffer);
return FALSE;
}
lock_bitstream.version = NV_ENC_LOCK_BITSTREAM_VER;
lock_bitstream.outputBitstream = encode_session->buffer_out;
- if (rdp_nvenc->nvenc_api.nvEncLockBitstream (
+ if (hwaccel_nvidia->nvenc_api.nvEncLockBitstream (
encode_session->encoder, &lock_bitstream) != NV_ENC_SUCCESS)
{
g_warning ("[HWAccel.NVENC] Failed to lock bitstream");
- rdp_nvenc->nvenc_api.nvEncUnmapInputResource (encode_session->encoder,
- map_input_res.mappedResource);
- rdp_nvenc->nvenc_api.nvEncUnregisterResource (encode_session->encoder,
- register_res.registeredResource);
- rdp_nvenc->cuda_funcs->cuMemFree (nv12_buffer);
+ hwaccel_nvidia->nvenc_api.nvEncUnmapInputResource (encode_session->encoder,
+ map_input_res.mappedResource);
+ hwaccel_nvidia->nvenc_api.nvEncUnregisterResource (encode_session->encoder,
+ register_res.registeredResource);
+ hwaccel_nvidia->cuda_funcs->cuMemFree (nv12_buffer);
return FALSE;
}
*bitstream_size = lock_bitstream.bitstreamSizeInBytes;
*bitstream = g_memdup2 (lock_bitstream.bitstreamBufferPtr, *bitstream_size);
- rdp_nvenc->nvenc_api.nvEncUnlockBitstream (encode_session->encoder,
- lock_bitstream.outputBitstream);
+ hwaccel_nvidia->nvenc_api.nvEncUnlockBitstream (encode_session->encoder,
+ lock_bitstream.outputBitstream);
- rdp_nvenc->nvenc_api.nvEncUnmapInputResource (encode_session->encoder,
- map_input_res.mappedResource);
- rdp_nvenc->nvenc_api.nvEncUnregisterResource (encode_session->encoder,
- register_res.registeredResource);
- rdp_nvenc->cuda_funcs->cuMemFree (nv12_buffer);
+ hwaccel_nvidia->nvenc_api.nvEncUnmapInputResource (encode_session->encoder,
+ map_input_res.mappedResource);
+ hwaccel_nvidia->nvenc_api.nvEncUnregisterResource (encode_session->encoder,
+ register_res.registeredResource);
+ hwaccel_nvidia->cuda_funcs->cuMemFree (nv12_buffer);
return TRUE;
}
-GrdRdpNvenc *
-grd_rdp_nvenc_new (void)
+GrdHwAccelNvidia *
+grd_hwaccel_nvidia_new (void)
{
- GrdRdpNvenc *rdp_nvenc;
- gboolean nvenc_device_found = FALSE;
+ g_autoptr (GrdHwAccelNvidia) hwaccel_nvidia = NULL;
+ gboolean cuda_device_found = FALSE;
CUdevice cu_device = 0;
int cu_device_count = 0;
+ CudaFunctions *cuda_funcs;
+ NvencFunctions *nvenc_funcs;
g_autofree char *avc_ptx_path = NULL;
g_autofree char *avc_ptx_instructions = NULL;
g_autoptr (GError) error = NULL;
int i;
- rdp_nvenc = g_object_new (GRD_TYPE_RDP_NVENC, NULL);
- cuda_load_functions (&rdp_nvenc->cuda_funcs, NULL);
- nvenc_load_functions (&rdp_nvenc->nvenc_funcs, NULL);
+ hwaccel_nvidia = g_object_new (GRD_TYPE_HWACCEL_NVIDIA, NULL);
+ cuda_load_functions (&hwaccel_nvidia->cuda_funcs, NULL);
+ nvenc_load_functions (&hwaccel_nvidia->nvenc_funcs, NULL);
- if (!rdp_nvenc->cuda_funcs || !rdp_nvenc->nvenc_funcs)
+ if (!hwaccel_nvidia->cuda_funcs || !hwaccel_nvidia->nvenc_funcs)
{
g_debug ("[HWAccel.CUDA] Failed to load CUDA or NVENC library");
- g_clear_object (&rdp_nvenc);
return NULL;
}
- rdp_nvenc->cuda_funcs->cuInit (0);
- rdp_nvenc->cuda_funcs->cuDeviceGetCount (&cu_device_count);
+ cuda_funcs = hwaccel_nvidia->cuda_funcs;
+ nvenc_funcs = hwaccel_nvidia->nvenc_funcs;
+
+ cuda_funcs->cuInit (0);
+ cuda_funcs->cuDeviceGetCount (&cu_device_count);
g_debug ("[HWAccel.CUDA] Found %i CUDA devices", cu_device_count);
for (i = 0; i < cu_device_count; ++i)
{
int cc_major = 0, cc_minor = 0;
- rdp_nvenc->cuda_funcs->cuDeviceGet (&cu_device, i);
- rdp_nvenc->cuda_funcs->cuDeviceComputeCapability (&cc_major, &cc_minor,
- cu_device);
+ cuda_funcs->cuDeviceGet (&cu_device, i);
+ cuda_funcs->cuDeviceComputeCapability (&cc_major, &cc_minor, cu_device);
g_debug ("[HWAccel.CUDA] Device %i compute capability: [%i, %i]",
i, cc_major, cc_minor);
if (cc_major >= 3)
{
- g_debug ("[HWAccel.NVENC] Choosing CUDA device with id %i", i);
- nvenc_device_found = TRUE;
+ g_debug ("[HWAccel.CUDA] Choosing CUDA device with id %i", i);
+ cuda_device_found = TRUE;
break;
}
}
- if (!cu_device_count || !nvenc_device_found)
+ if (!cu_device_count || !cuda_device_found)
{
- g_debug ("[HWAccel.NVENC] No NVENC capable gpu found");
- g_clear_object (&rdp_nvenc);
+ g_debug ("[HWAccel.CUDA] No appropriate CUDA capable gpu found");
return NULL;
}
- rdp_nvenc->cu_device = cu_device;
- if (rdp_nvenc->cuda_funcs->cuDevicePrimaryCtxRetain (
- &rdp_nvenc->cu_context, rdp_nvenc->cu_device) != CUDA_SUCCESS)
+ hwaccel_nvidia->cu_device = cu_device;
+ if (cuda_funcs->cuDevicePrimaryCtxRetain (&hwaccel_nvidia->cu_context,
+ hwaccel_nvidia->cu_device) != CUDA_SUCCESS)
{
g_warning ("[HWAccel.CUDA] Failed to retain CUDA context");
- g_clear_object (&rdp_nvenc);
return NULL;
}
- rdp_nvenc->nvenc_api.version = NV_ENCODE_API_FUNCTION_LIST_VER;
- if (rdp_nvenc->nvenc_funcs->NvEncodeAPICreateInstance (&rdp_nvenc->nvenc_api) != NV_ENC_SUCCESS)
+ hwaccel_nvidia->nvenc_api.version = NV_ENCODE_API_FUNCTION_LIST_VER;
+ if (nvenc_funcs->NvEncodeAPICreateInstance (&hwaccel_nvidia->nvenc_api) != NV_ENC_SUCCESS)
{
g_warning ("[HWAccel.NVENC] Could not create NVENC API instance");
-
- rdp_nvenc->cuda_funcs->cuDevicePrimaryCtxRelease (rdp_nvenc->cu_device);
- g_clear_object (&rdp_nvenc);
-
+ cuda_funcs->cuDevicePrimaryCtxRelease (hwaccel_nvidia->cu_device);
return NULL;
}
- if (rdp_nvenc->cuda_funcs->cuCtxPushCurrent (rdp_nvenc->cu_context) != CUDA_SUCCESS)
+ if (cuda_funcs->cuCtxPushCurrent (hwaccel_nvidia->cu_context) != CUDA_SUCCESS)
{
g_warning ("[HWAccel.CUDA] Failed to push CUDA context");
- rdp_nvenc->cuda_funcs->cuDevicePrimaryCtxRelease (rdp_nvenc->cu_device);
- g_clear_object (&rdp_nvenc);
+ cuda_funcs->cuDevicePrimaryCtxRelease (hwaccel_nvidia->cu_device);
return NULL;
}
- rdp_nvenc->initialized = TRUE;
+ hwaccel_nvidia->initialized = TRUE;
avc_ptx_path = g_strdup_printf ("%s/grd-cuda-avc-utils_30.ptx", GRD_DATA_DIR);
if (!g_file_get_contents (avc_ptx_path, &avc_ptx_instructions, NULL, &error))
g_error ("[HWAccel.CUDA] Failed to read PTX instructions: %s", error->message);
- if (rdp_nvenc->cuda_funcs->cuModuleLoadData (
- &rdp_nvenc->cu_module_avc_utils, avc_ptx_instructions) != CUDA_SUCCESS)
+ if (cuda_funcs->cuModuleLoadData (&hwaccel_nvidia->cu_module_avc_utils,
+ avc_ptx_instructions) != CUDA_SUCCESS)
{
g_warning ("[HWAccel.CUDA] Failed to load CUDA module");
- g_clear_object (&rdp_nvenc);
return NULL;
}
- if (rdp_nvenc->cuda_funcs->cuModuleGetFunction (
- &rdp_nvenc->cu_bgrx_to_yuv420, rdp_nvenc->cu_module_avc_utils,
- "convert_2x2_bgrx_area_to_yuv420_nv12") != CUDA_SUCCESS)
+ if (cuda_funcs->cuModuleGetFunction (&hwaccel_nvidia->cu_bgrx_to_yuv420,
+ hwaccel_nvidia->cu_module_avc_utils,
+ "convert_2x2_bgrx_area_to_yuv420_nv12") != CUDA_SUCCESS)
{
g_warning ("[HWAccel.CUDA] Failed to get AVC CUDA kernel");
- g_clear_object (&rdp_nvenc);
return NULL;
}
- return rdp_nvenc;
+ return g_steal_pointer (&hwaccel_nvidia);
}
static void
-grd_rdp_nvenc_dispose (GObject *object)
+grd_hwaccel_nvidia_dispose (GObject *object)
{
- GrdRdpNvenc *rdp_nvenc = GRD_RDP_NVENC (object);
+ GrdHwAccelNvidia *hwaccel_nvidia = GRD_HWACCEL_NVIDIA (object);
- if (rdp_nvenc->initialized)
+ if (hwaccel_nvidia->initialized)
{
- rdp_nvenc->cuda_funcs->cuCtxPopCurrent (&rdp_nvenc->cu_context);
- rdp_nvenc->cuda_funcs->cuDevicePrimaryCtxRelease (rdp_nvenc->cu_device);
+ hwaccel_nvidia->cuda_funcs->cuCtxPopCurrent (&hwaccel_nvidia->cu_context);
+ hwaccel_nvidia->cuda_funcs->cuDevicePrimaryCtxRelease (hwaccel_nvidia->cu_device);
- rdp_nvenc->initialized = FALSE;
+ hwaccel_nvidia->initialized = FALSE;
}
- g_clear_pointer (&rdp_nvenc->cu_module_avc_utils,
- rdp_nvenc->cuda_funcs->cuModuleUnload);
+ g_clear_pointer (&hwaccel_nvidia->cu_module_avc_utils,
+ hwaccel_nvidia->cuda_funcs->cuModuleUnload);
- nvenc_free_functions (&rdp_nvenc->nvenc_funcs);
- cuda_free_functions (&rdp_nvenc->cuda_funcs);
+ nvenc_free_functions (&hwaccel_nvidia->nvenc_funcs);
+ cuda_free_functions (&hwaccel_nvidia->cuda_funcs);
- g_assert (g_hash_table_size (rdp_nvenc->encode_sessions) == 0);
- g_clear_pointer (&rdp_nvenc->encode_sessions, g_hash_table_destroy);
+ g_assert (g_hash_table_size (hwaccel_nvidia->encode_sessions) == 0);
+ g_clear_pointer (&hwaccel_nvidia->encode_sessions, g_hash_table_destroy);
- G_OBJECT_CLASS (grd_rdp_nvenc_parent_class)->dispose (object);
+ G_OBJECT_CLASS (grd_hwaccel_nvidia_parent_class)->dispose (object);
}
static void
-grd_rdp_nvenc_init (GrdRdpNvenc *rdp_nvenc)
+grd_hwaccel_nvidia_init (GrdHwAccelNvidia *hwaccel_nvidia)
{
- rdp_nvenc->encode_sessions = g_hash_table_new (NULL, NULL);
+ hwaccel_nvidia->encode_sessions = g_hash_table_new (NULL, NULL);
}
static void
-grd_rdp_nvenc_class_init (GrdRdpNvencClass *klass)
+grd_hwaccel_nvidia_class_init (GrdHwAccelNvidiaClass *klass)
{
GObjectClass *object_class = G_OBJECT_CLASS (klass);
- object_class->dispose = grd_rdp_nvenc_dispose;
+ object_class->dispose = grd_hwaccel_nvidia_dispose;
}
diff --git a/src/grd-hwaccel-nvidia.h b/src/grd-hwaccel-nvidia.h
new file mode 100644
index 00000000..99565486
--- /dev/null
+++ b/src/grd-hwaccel-nvidia.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2021 Pascal Nowack
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+
+#ifndef GRD_HWACCEL_NVIDIA_H
+#define GRD_HWACCEL_NVIDIA_H
+
+#include <glib-object.h>
+#include <stdint.h>
+
+#define GRD_TYPE_HWACCEL_NVIDIA (grd_hwaccel_nvidia_get_type ())
+G_DECLARE_FINAL_TYPE (GrdHwAccelNvidia, grd_hwaccel_nvidia,
+ GRD, HWACCEL_NVIDIA, GObject)
+
+GrdHwAccelNvidia *grd_hwaccel_nvidia_new (void);
+
+void grd_hwaccel_nvidia_push_cuda_context (GrdHwAccelNvidia *hwaccel_nvidia);
+
+void grd_hwaccel_nvidia_pop_cuda_context (GrdHwAccelNvidia *hwaccel_nvidia);
+
+gboolean grd_hwaccel_nvidia_create_nvenc_session (GrdHwAccelNvidia *hwaccel_nvidia,
+ uint32_t *encode_session_id,
+ uint16_t surface_width,
+ uint16_t surface_height,
+ uint16_t refresh_rate);
+
+void grd_hwaccel_nvidia_free_nvenc_session (GrdHwAccelNvidia *hwaccel_nvidia,
+ uint32_t encode_session_id);
+
+gboolean grd_hwaccel_nvidia_avc420_encode_bgrx_frame (GrdHwAccelNvidia *hwaccel_nvidia,
+ uint32_t encode_session_id,
+ uint8_t *src_data,
+ uint16_t src_width,
+ uint16_t src_height,
+ uint16_t aligned_width,
+ uint16_t aligned_height,
+ uint8_t **bitstream,
+ uint32_t *bitstream_size);
+
+#endif /* GRD_HWACCEL_NVIDIA_H */
diff --git a/src/grd-rdp-graphics-pipeline.c b/src/grd-rdp-graphics-pipeline.c
index 8ef46a7d..1ea82630 100644
--- a/src/grd-rdp-graphics-pipeline.c
+++ b/src/grd-rdp-graphics-pipeline.c
@@ -29,9 +29,9 @@
#include "grd-rdp-surface.h"
#include "grd-session-rdp.h"
-#ifdef HAVE_NVENC
-#include "grd-rdp-nvenc.h"
-#endif /* HAVE_NVENC */
+#ifdef HAVE_HWACCEL_NVIDIA
+#include "grd-hwaccel-nvidia.h"
+#endif /* HAVE_HWACCEL_NVIDIA */
#define ENC_TIMES_CHECK_INTERVAL_MS 1000
#define MAX_TRACKED_ENC_FRAMES 1000
@@ -99,9 +99,9 @@ struct _GrdRdpGraphicsPipeline
GQueue *enc_times;
GHashTable *surface_hwaccel_table;
-#ifdef HAVE_NVENC
- GrdRdpNvenc *rdp_nvenc;
-#endif /* HAVE_NVENC */
+#ifdef HAVE_HWACCEL_NVIDIA
+ GrdHwAccelNvidia *hwaccel_nvidia;
+#endif /* HAVE_HWACCEL_NVIDIA */
uint32_t next_frame_id;
uint16_t next_surface_id;
@@ -110,14 +110,14 @@ struct _GrdRdpGraphicsPipeline
G_DEFINE_TYPE (GrdRdpGraphicsPipeline, grd_rdp_graphics_pipeline, G_TYPE_OBJECT)
-#ifdef HAVE_NVENC
+#ifdef HAVE_HWACCEL_NVIDIA
void
-grd_rdp_graphics_pipeline_set_nvenc (GrdRdpGraphicsPipeline *graphics_pipeline,
- GrdRdpNvenc *rdp_nvenc)
+grd_rdp_graphics_pipeline_set_hwaccel_nvidia (GrdRdpGraphicsPipeline *graphics_pipeline,
+ GrdHwAccelNvidia *hwaccel_nvidia)
{
- graphics_pipeline->rdp_nvenc = rdp_nvenc;
+ graphics_pipeline->hwaccel_nvidia = hwaccel_nvidia;
}
-#endif /* HAVE_NVENC */
+#endif /* HAVE_HWACCEL_NVIDIA */
void
grd_rdp_graphics_pipeline_create_surface (GrdRdpGraphicsPipeline *graphics_pipeline,
@@ -129,10 +129,10 @@ grd_rdp_graphics_pipeline_create_surface (GrdRdpGraphicsPipeline *graphics_pipel
uint16_t surface_id = grd_rdp_gfx_surface_get_surface_id (gfx_surface);
uint32_t surface_serial = grd_rdp_gfx_surface_get_serial (gfx_surface);
GfxSurfaceContext *surface_context;
-#ifdef HAVE_NVENC
+#ifdef HAVE_HWACCEL_NVIDIA
HWAccelContext *hwaccel_context;
uint32_t encode_session_id;
-#endif /* HAVE_NVENC */
+#endif /* HAVE_HWACCEL_NVIDIA */
surface_context = g_malloc0 (sizeof (GfxSurfaceContext));
@@ -144,16 +144,16 @@ grd_rdp_graphics_pipeline_create_surface (GrdRdpGraphicsPipeline *graphics_pipel
g_hash_table_insert (graphics_pipeline->serial_surface_table,
GUINT_TO_POINTER (surface_serial), surface_context);
-#ifdef HAVE_NVENC
+#ifdef HAVE_HWACCEL_NVIDIA
if ((rdpgfx_context->rdpcontext->settings->GfxAVC444v2 ||
rdpgfx_context->rdpcontext->settings->GfxAVC444 ||
rdpgfx_context->rdpcontext->settings->GfxH264) &&
- graphics_pipeline->rdp_nvenc &&
- grd_rdp_nvenc_create_encode_session (graphics_pipeline->rdp_nvenc,
- &encode_session_id,
- rdp_surface->width,
- rdp_surface->height,
- rdp_surface->refresh_rate))
+ graphics_pipeline->hwaccel_nvidia &&
+ grd_hwaccel_nvidia_create_nvenc_session (graphics_pipeline->hwaccel_nvidia,
+ &encode_session_id,
+ rdp_surface->width,
+ rdp_surface->height,
+ rdp_surface->refresh_rate))
{
g_debug ("[RDP.RDPGFX] Creating NVENC session for surface %u", surface_id);
@@ -164,7 +164,7 @@ grd_rdp_graphics_pipeline_create_surface (GrdRdpGraphicsPipeline *graphics_pipel
g_hash_table_insert (graphics_pipeline->surface_hwaccel_table,
GUINT_TO_POINTER (surface_id), hwaccel_context);
}
-#endif /* HAVE_NVENC */
+#endif /* HAVE_HWACCEL_NVIDIA */
g_mutex_unlock (&graphics_pipeline->gfx_mutex);
create_surface.surfaceId = surface_id;
@@ -184,9 +184,9 @@ grd_rdp_graphics_pipeline_delete_surface (GrdRdpGraphicsPipeline *graphics_pipel
RDPGFX_DELETE_SURFACE_PDU delete_surface = {0};
gboolean needs_encoding_context_deletion = FALSE;
GfxSurfaceContext *surface_context;
-#ifdef HAVE_NVENC
+#ifdef HAVE_HWACCEL_NVIDIA
HWAccelContext *hwaccel_context;
-#endif /* HAVE_NVENC */
+#endif /* HAVE_HWACCEL_NVIDIA */
uint16_t surface_id;
uint32_t codec_context_id;
uint32_t surface_serial;
@@ -211,7 +211,7 @@ grd_rdp_graphics_pipeline_delete_surface (GrdRdpGraphicsPipeline *graphics_pipel
GUINT_TO_POINTER (surface_serial));
}
-#ifdef HAVE_NVENC
+#ifdef HAVE_HWACCEL_NVIDIA
if (g_hash_table_steal_extended (graphics_pipeline->surface_hwaccel_table,
GUINT_TO_POINTER (surface_id),
NULL, (gpointer *) &hwaccel_context))
@@ -219,11 +219,11 @@ grd_rdp_graphics_pipeline_delete_surface (GrdRdpGraphicsPipeline *graphics_pipel
g_debug ("[RDP.RDPGFX] Destroying NVENC session for surface %u", surface_id);
g_assert (hwaccel_context->api == HW_ACCEL_API_NVENC);
- grd_rdp_nvenc_free_encode_session (graphics_pipeline->rdp_nvenc,
- hwaccel_context->encode_session_id);
+ grd_hwaccel_nvidia_free_nvenc_session (graphics_pipeline->hwaccel_nvidia,
+ hwaccel_context->encode_session_id);
g_free (hwaccel_context);
}
-#endif /* HAVE_NVENC */
+#endif /* HAVE_HWACCEL_NVIDIA */
if (g_hash_table_steal_extended (graphics_pipeline->codec_context_table,
GUINT_TO_POINTER (codec_context_id),
@@ -399,7 +399,7 @@ enqueue_tracked_frame_info (GrdRdpGraphicsPipeline *graphics_pipeline,
g_queue_push_tail (graphics_pipeline->encoded_frames, gfx_frame_info);
}
-#ifdef HAVE_NVENC
+#ifdef HAVE_HWACCEL_NVIDIA
static gboolean
refresh_gfx_surface_avc420 (GrdRdpGraphicsPipeline *graphics_pipeline,
HWAccelContext *hwaccel_context,
@@ -431,12 +431,12 @@ refresh_gfx_surface_avc420 (GrdRdpGraphicsPipeline *graphics_pipeline,
aligned_width = surface_width + (surface_width % 16 ? 16 - surface_width % 16 : 0);
aligned_height = surface_height + (surface_height % 64 ? 64 - surface_height % 64 : 0);
- if (!grd_rdp_nvenc_avc420_encode_bgrx_frame (graphics_pipeline->rdp_nvenc,
- hwaccel_context->encode_session_id,
- src_data,
- surface_width, surface_height,
- aligned_width, aligned_height,
- &avc420.data, &avc420.length))
+ if (!grd_hwaccel_nvidia_avc420_encode_bgrx_frame (graphics_pipeline->hwaccel_nvidia,
+ hwaccel_context->encode_session_id,
+ src_data,
+ surface_width, surface_height,
+ aligned_width, aligned_height,
+ &avc420.data, &avc420.length))
{
g_warning ("[RDP.RDPGFX] Failed to encode YUV420 frame");
return FALSE;
@@ -513,7 +513,7 @@ refresh_gfx_surface_avc420 (GrdRdpGraphicsPipeline *graphics_pipeline,
return TRUE;
}
-#endif /* HAVE_NVENC */
+#endif /* HAVE_HWACCEL_NVIDIA */
static gboolean
rfx_progressive_write_message (RFX_MESSAGE *rfx_message,
@@ -894,10 +894,10 @@ grd_rdp_graphics_pipeline_refresh_gfx (GrdRdpGraphicsPipeline *graphics_pipeline
RdpgfxServerContext *rdpgfx_context = graphics_pipeline->rdpgfx_context;
rdpSettings *rdp_settings = rdpgfx_context->rdpcontext->settings;
GrdSessionRdp *session_rdp = graphics_pipeline->session_rdp;
-#ifdef HAVE_NVENC
+#ifdef HAVE_HWACCEL_NVIDIA
HWAccelContext *hwaccel_context;
uint16_t surface_id;
-#endif /* HAVE_NVENC */
+#endif /* HAVE_HWACCEL_NVIDIA */
int64_t enc_time_us;
gboolean success;
@@ -919,7 +919,7 @@ grd_rdp_graphics_pipeline_refresh_gfx (GrdRdpGraphicsPipeline *graphics_pipeline
map_surface_to_output (graphics_pipeline, rdp_surface->gfx_surface);
}
-#ifdef HAVE_NVENC
+#ifdef HAVE_HWACCEL_NVIDIA
surface_id = grd_rdp_gfx_surface_get_surface_id (rdp_surface->gfx_surface);
if (rdp_settings->GfxH264 &&
g_hash_table_lookup_extended (graphics_pipeline->surface_hwaccel_table,
@@ -932,7 +932,7 @@ grd_rdp_graphics_pipeline_refresh_gfx (GrdRdpGraphicsPipeline *graphics_pipeline
&enc_time_us);
}
else
-#endif /* HAVE_NVENC */
+#endif /* HAVE_HWACCEL_NVIDIA */
{
success = refresh_gfx_surface_rfx_progressive (graphics_pipeline, rdp_surface,
region, src_data, &enc_time_us);
diff --git a/src/grd-rdp-graphics-pipeline.h b/src/grd-rdp-graphics-pipeline.h
index c4b0de0d..535022b2 100644
--- a/src/grd-rdp-graphics-pipeline.h
+++ b/src/grd-rdp-graphics-pipeline.h
@@ -41,10 +41,10 @@ GrdRdpGraphicsPipeline *grd_rdp_graphics_pipeline_new (GrdSessionRdp
void grd_rdp_graphics_pipeline_maybe_init (GrdRdpGraphicsPipeline *graphics_pipeline);
-#ifdef HAVE_NVENC
-void grd_rdp_graphics_pipeline_set_nvenc (GrdRdpGraphicsPipeline *graphics_pipeline,
- GrdRdpNvenc *rdp_nvenc);
-#endif /* HAVE_NVENC */
+#ifdef HAVE_HWACCEL_NVIDIA
+void grd_rdp_graphics_pipeline_set_hwaccel_nvidia (GrdRdpGraphicsPipeline *graphics_pipeline,
+ GrdHwAccelNvidia *hwaccel_nvidia);
+#endif /* HAVE_HWACCEL_NVIDIA */
void grd_rdp_graphics_pipeline_create_surface (GrdRdpGraphicsPipeline *graphics_pipeline,
GrdRdpGfxSurface *gfx_surface);
diff --git a/src/grd-rdp-server.c b/src/grd-rdp-server.c
index 4b5d166f..bebe1032 100644
--- a/src/grd-rdp-server.c
+++ b/src/grd-rdp-server.c
@@ -28,9 +28,12 @@
#include <winpr/ssl.h>
#include "grd-context.h"
-#include "grd-rdp-nvenc.h"
#include "grd-session-rdp.h"
+#ifdef HAVE_HWACCEL_NVIDIA
+#include "grd-hwaccel-nvidia.h"
+#endif /* HAVE_HWACCEL_NVIDIA */
+
enum
{
PROP_0,
@@ -48,9 +51,9 @@ struct _GrdRdpServer
guint idle_task;
GrdContext *context;
-#ifdef HAVE_NVENC
- GrdRdpNvenc *rdp_nvenc;
-#endif /* HAVE_NVENC */
+#ifdef HAVE_HWACCEL_NVIDIA
+ GrdHwAccelNvidia *hwaccel_nvidia;
+#endif /* HAVE_HWACCEL_NVIDIA */
};
G_DEFINE_TYPE (GrdRdpServer, grd_rdp_server, G_TYPE_SOCKET_SERVICE)
@@ -116,9 +119,9 @@ on_incoming (GSocketService *service,
g_debug ("New incoming RDP connection");
if (!(session_rdp = grd_session_rdp_new (rdp_server, connection,
-#ifdef HAVE_NVENC
- rdp_server->rdp_nvenc,
-#endif /* HAVE_NVENC */
+#ifdef HAVE_HWACCEL_NVIDIA
+ rdp_server->hwaccel_nvidia,
+#endif /* HAVE_HWACCEL_NVIDIA */
0)))
return TRUE;
@@ -198,9 +201,9 @@ grd_rdp_server_dispose (GObject *object)
{
GrdRdpServer *rdp_server = GRD_RDP_SERVER (object);
-#ifdef HAVE_NVENC
- g_clear_object (&rdp_server->rdp_nvenc);
-#endif /* HAVE_NVENC */
+#ifdef HAVE_HWACCEL_NVIDIA
+ g_clear_object (&rdp_server->hwaccel_nvidia);
+#endif /* HAVE_HWACCEL_NVIDIA */
if (rdp_server->idle_task)
{
@@ -239,9 +242,9 @@ grd_rdp_server_init (GrdRdpServer *rdp_server)
*/
primitives_get ();
-#ifdef HAVE_NVENC
- rdp_server->rdp_nvenc = grd_rdp_nvenc_new ();
- if (rdp_server->rdp_nvenc)
+#ifdef HAVE_HWACCEL_NVIDIA
+ rdp_server->hwaccel_nvidia = grd_hwaccel_nvidia_new ();
+ if (rdp_server->hwaccel_nvidia)
{
g_debug ("[RDP] Initialization of NVENC was successful");
}
@@ -253,7 +256,7 @@ grd_rdp_server_init (GrdRdpServer *rdp_server)
#else
g_message ("[RDP] RDP backend is built WITHOUT support for NVENC and CUDA. "
"No hardware acceleration available");
-#endif /* HAVE_NVENC */
+#endif /* HAVE_HWACCEL_NVIDIA */
}
static void
diff --git a/src/grd-session-rdp.c b/src/grd-session-rdp.c
index 809ce2d4..47620f64 100644
--- a/src/grd-session-rdp.c
+++ b/src/grd-session-rdp.c
@@ -42,9 +42,9 @@
#include "grd-settings.h"
#include "grd-stream.h"
-#ifdef HAVE_NVENC
-#include "grd-rdp-nvenc.h"
-#endif /* HAVE_NVENC */
+#ifdef HAVE_HWACCEL_NVIDIA
+#include "grd-hwaccel-nvidia.h"
+#endif /* HAVE_HWACCEL_NVIDIA */
#define DISCRETE_SCROLL_STEP 10.0
@@ -148,9 +148,9 @@ struct _GrdSessionRdp
NSCThreadPoolContext nsc_thread_pool_context;
RawThreadPoolContext raw_thread_pool_context;
-#ifdef HAVE_NVENC
- GrdRdpNvenc *rdp_nvenc;
-#endif /* HAVE_NVENC */
+#ifdef HAVE_HWACCEL_NVIDIA
+ GrdHwAccelNvidia *hwaccel_nvidia;
+#endif /* HAVE_HWACCEL_NVIDIA */
GSource *pending_encode_source;
@@ -1654,10 +1654,10 @@ rdp_peer_post_connect (freerdp_peer *peer)
rdp_peer_context->network_autodetection,
rdp_peer_context->encode_stream,
rdp_peer_context->rfx_context);
-#ifdef HAVE_NVENC
- grd_rdp_graphics_pipeline_set_nvenc (rdp_peer_context->graphics_pipeline,
- session_rdp->rdp_nvenc);
-#endif /* HAVE_NVENC */
+#ifdef HAVE_HWACCEL_NVIDIA
+ grd_rdp_graphics_pipeline_set_hwaccel_nvidia (
+ rdp_peer_context->graphics_pipeline, session_rdp->hwaccel_nvidia);
+#endif /* HAVE_HWACCEL_NVIDIA */
}
grd_session_start (GRD_SESSION (session_rdp));
@@ -1927,18 +1927,18 @@ graphics_thread_func (gpointer data)
{
GrdSessionRdp *session_rdp = data;
-#ifdef HAVE_NVENC
- if (session_rdp->rdp_nvenc)
- grd_rdp_nvenc_push_cuda_context (session_rdp->rdp_nvenc);
-#endif /* HAVE_NVENC */
+#ifdef HAVE_HWACCEL_NVIDIA
+ if (session_rdp->hwaccel_nvidia)
+ grd_hwaccel_nvidia_push_cuda_context (session_rdp->hwaccel_nvidia);
+#endif /* HAVE_HWACCEL_NVIDIA */
while (WaitForSingleObject (session_rdp->stop_event, 0) == WAIT_TIMEOUT)
g_main_context_iteration (session_rdp->graphics_context, TRUE);
-#ifdef HAVE_NVENC
- if (session_rdp->rdp_nvenc)
- grd_rdp_nvenc_pop_cuda_context (session_rdp->rdp_nvenc);
-#endif /* HAVE_NVENC */
+#ifdef HAVE_HWACCEL_NVIDIA
+ if (session_rdp->hwaccel_nvidia)
+ grd_hwaccel_nvidia_pop_cuda_context (session_rdp->hwaccel_nvidia);
+#endif /* HAVE_HWACCEL_NVIDIA */
return NULL;
}
@@ -1946,9 +1946,9 @@ graphics_thread_func (gpointer data)
GrdSessionRdp *
grd_session_rdp_new (GrdRdpServer *rdp_server,
GSocketConnection *connection,
-#ifdef HAVE_NVENC
- GrdRdpNvenc *rdp_nvenc,
-#endif /* HAVE_NVENC */
+#ifdef HAVE_HWACCEL_NVIDIA
+ GrdHwAccelNvidia *hwaccel_nvidia,
+#endif /* HAVE_HWACCEL_NVIDIA */
int reserved)
{
GrdSessionRdp *session_rdp;
@@ -1979,9 +1979,9 @@ grd_session_rdp_new (GrdRdpServer *rdp_server,
NULL);
session_rdp->connection = g_object_ref (connection);
-#ifdef HAVE_NVENC
- session_rdp->rdp_nvenc = rdp_nvenc;
-#endif /* HAVE_NVENC */
+#ifdef HAVE_HWACCEL_NVIDIA
+ session_rdp->hwaccel_nvidia = hwaccel_nvidia;
+#endif /* HAVE_HWACCEL_NVIDIA */
session_rdp->socket_thread = g_thread_new ("RDP socket thread",
socket_thread_func,
diff --git a/src/grd-session-rdp.h b/src/grd-session-rdp.h
index 03de5b76..1dfe13dc 100644
--- a/src/grd-session-rdp.h
+++ b/src/grd-session-rdp.h
@@ -34,9 +34,9 @@ G_DECLARE_FINAL_TYPE (GrdSessionRdp,
GrdSessionRdp *grd_session_rdp_new (GrdRdpServer *rdp_server,
GSocketConnection *connection,
-#ifdef HAVE_NVENC
- GrdRdpNvenc *rdp_nvenc,
-#endif /* HAVE_NVENC */
+#ifdef HAVE_HWACCEL_NVIDIA
+ GrdHwAccelNvidia *hwaccel_nvidia,
+#endif /* HAVE_HWACCEL_NVIDIA */
int reserved);
void grd_session_rdp_notify_error (GrdSessionRdp *session_rdp,
diff --git a/src/grd-types.h b/src/grd-types.h
index f88efd01..96685c6b 100644
--- a/src/grd-types.h
+++ b/src/grd-types.h
@@ -28,12 +28,12 @@ typedef struct _GrdClipboard GrdClipboard;
typedef struct _GrdClipboardRdp GrdClipboardRdp;
typedef struct _GrdClipboardVnc GrdClipboardVnc;
typedef struct _GrdEglThread GrdEglThread;
+typedef struct _GrdHwAccelNvidia GrdHwAccelNvidia;
typedef struct _GrdRdpEventQueue GrdRdpEventQueue;
typedef struct _GrdRdpGfxFrameLog GrdRdpGfxFrameLog;
typedef struct _GrdRdpGfxSurface GrdRdpGfxSurface;
typedef struct _GrdRdpGraphicsPipeline GrdRdpGraphicsPipeline;
typedef struct _GrdRdpNetworkAutodetection GrdRdpNetworkAutodetection;
-typedef struct _GrdRdpNvenc GrdRdpNvenc;
typedef struct _GrdRdpSAMFile GrdRdpSAMFile;
typedef struct _GrdRdpServer GrdRdpServer;
typedef struct _GrdRdpSurface GrdRdpSurface;
diff --git a/src/meson.build b/src/meson.build
index fb98d22a..7016d41d 100644
--- a/src/meson.build
+++ b/src/meson.build
@@ -78,15 +78,15 @@ if have_rdp
xkbcommon_dep,
]
- if have_nvenc
+ if have_hwaccel_nvidia
daemon_sources += files([
- 'grd-rdp-nvenc.c',
- 'grd-rdp-nvenc.h',
+ 'grd-hwaccel-nvidia.c',
+ 'grd-hwaccel-nvidia.h',
])
deps += [
+ cuda_dep,
dl_dep,
- nvenc_dep,
]
endif
endif
[
Date Prev][
Date Next] [
Thread Prev][
Thread Next]
[
Thread Index]
[
Date Index]
[
Author Index]