format code

This commit is contained in:
pigeatgarlic 2023-12-20 20:19:43 +07:00
parent 1b0f31c1e9
commit 4b229873f3
49 changed files with 11585 additions and 11426 deletions

View File

@ -16,101 +16,102 @@ extern "C" {
using namespace std::literals;
namespace cbs {
void
close(CodedBitstreamContext *c) {
ff_cbs_close(&c);
}
void close(CodedBitstreamContext *c) { ff_cbs_close(&c); }
using ctx_t = util::safe_ptr<CodedBitstreamContext, close>;
using ctx_t = util::safe_ptr<CodedBitstreamContext, close>;
class frag_t: public CodedBitstreamFragment {
public:
class frag_t : public CodedBitstreamFragment {
public:
frag_t(frag_t &&o) {
std::copy((std::uint8_t *) &o, (std::uint8_t *) (&o + 1), (std::uint8_t *) this);
std::copy((std::uint8_t *)&o, (std::uint8_t *)(&o + 1),
(std::uint8_t *)this);
o.data = nullptr;
o.units = nullptr;
o.data = nullptr;
o.units = nullptr;
};
frag_t() {
std::fill_n((std::uint8_t *) this, sizeof(*this), 0);
}
frag_t() { std::fill_n((std::uint8_t *)this, sizeof(*this), 0); }
frag_t &
operator=(frag_t &&o) {
std::copy((std::uint8_t *) &o, (std::uint8_t *) (&o + 1), (std::uint8_t *) this);
frag_t &operator=(frag_t &&o) {
std::copy((std::uint8_t *)&o, (std::uint8_t *)(&o + 1),
(std::uint8_t *)this);
o.data = nullptr;
o.units = nullptr;
o.data = nullptr;
o.units = nullptr;
return *this;
return *this;
};
~frag_t() {
if (data || units) {
ff_cbs_fragment_free(this);
}
if (data || units) {
ff_cbs_fragment_free(this);
}
}
};
};
util::buffer_t<std::uint8_t>
write(cbs::ctx_t &cbs_ctx, std::uint8_t nal, void *uh, AVCodecID codec_id) {
util::buffer_t<std::uint8_t> write(cbs::ctx_t &cbs_ctx, std::uint8_t nal,
void *uh, AVCodecID codec_id) {
cbs::frag_t frag;
auto err = ff_cbs_insert_unit_content(&frag, -1, nal, uh, nullptr);
if (err < 0) {
char err_str[AV_ERROR_MAX_STRING_SIZE] { 0 };
BOOST_LOG(error) << "Could not insert NAL unit SPS: "sv << av_make_error_string(err_str, AV_ERROR_MAX_STRING_SIZE, err);
char err_str[AV_ERROR_MAX_STRING_SIZE]{0};
BOOST_LOG(error) << "Could not insert NAL unit SPS: "sv
<< av_make_error_string(err_str,
AV_ERROR_MAX_STRING_SIZE, err);
return {};
return {};
}
err = ff_cbs_write_fragment_data(cbs_ctx.get(), &frag);
if (err < 0) {
char err_str[AV_ERROR_MAX_STRING_SIZE] { 0 };
BOOST_LOG(error) << "Could not write fragment data: "sv << av_make_error_string(err_str, AV_ERROR_MAX_STRING_SIZE, err);
char err_str[AV_ERROR_MAX_STRING_SIZE]{0};
BOOST_LOG(error) << "Could not write fragment data: "sv
<< av_make_error_string(err_str,
AV_ERROR_MAX_STRING_SIZE, err);
return {};
return {};
}
// frag.data_size * 8 - frag.data_bit_padding == bits in fragment
util::buffer_t<std::uint8_t> data { frag.data_size };
util::buffer_t<std::uint8_t> data{frag.data_size};
std::copy_n(frag.data, frag.data_size, std::begin(data));
return data;
}
}
util::buffer_t<std::uint8_t>
write(std::uint8_t nal, void *uh, AVCodecID codec_id) {
util::buffer_t<std::uint8_t> write(std::uint8_t nal, void *uh,
AVCodecID codec_id) {
cbs::ctx_t cbs_ctx;
ff_cbs_init(&cbs_ctx, codec_id, nullptr);
return write(cbs_ctx, nal, uh, codec_id);
}
}
h264_t
make_sps_h264(const AVCodecContext *avctx, const AVPacket *packet) {
h264_t make_sps_h264(const AVCodecContext *avctx, const AVPacket *packet) {
cbs::ctx_t ctx;
if (ff_cbs_init(&ctx, AV_CODEC_ID_H264, nullptr)) {
return {};
return {};
}
cbs::frag_t frag;
int err = ff_cbs_read_packet(ctx.get(), &frag, packet);
if (err < 0) {
char err_str[AV_ERROR_MAX_STRING_SIZE] { 0 };
BOOST_LOG(error) << "Couldn't read packet: "sv << av_make_error_string(err_str, AV_ERROR_MAX_STRING_SIZE, err);
char err_str[AV_ERROR_MAX_STRING_SIZE]{0};
BOOST_LOG(error) << "Couldn't read packet: "sv
<< av_make_error_string(err_str,
AV_ERROR_MAX_STRING_SIZE, err);
return {};
return {};
}
auto sps_p = ((CodedBitstreamH264Context *) ctx->priv_data)->active_sps;
auto sps_p = ((CodedBitstreamH264Context *)ctx->priv_data)->active_sps;
// This is a very large struct that cannot safely be stored on the stack
auto sps = std::make_unique<H264RawSPS>(*sps_p);
if (avctx->refs > 0) {
sps->max_num_ref_frames = avctx->refs;
sps->max_num_ref_frames = avctx->refs;
}
sps->vui_parameters_present_flag = 1;
@ -138,31 +139,32 @@ namespace cbs {
cbs::ctx_t write_ctx;
ff_cbs_init(&write_ctx, AV_CODEC_ID_H264, nullptr);
return h264_t {
write(write_ctx, sps->nal_unit_header.nal_unit_type, (void *) &sps->nal_unit_header, AV_CODEC_ID_H264),
write(ctx, sps_p->nal_unit_header.nal_unit_type, (void *) &sps_p->nal_unit_header, AV_CODEC_ID_H264)
};
}
return h264_t{write(write_ctx, sps->nal_unit_header.nal_unit_type,
(void *)&sps->nal_unit_header, AV_CODEC_ID_H264),
write(ctx, sps_p->nal_unit_header.nal_unit_type,
(void *)&sps_p->nal_unit_header, AV_CODEC_ID_H264)};
}
hevc_t
make_sps_hevc(const AVCodecContext *avctx, const AVPacket *packet) {
hevc_t make_sps_hevc(const AVCodecContext *avctx, const AVPacket *packet) {
cbs::ctx_t ctx;
if (ff_cbs_init(&ctx, AV_CODEC_ID_H265, nullptr)) {
return {};
return {};
}
cbs::frag_t frag;
int err = ff_cbs_read_packet(ctx.get(), &frag, packet);
if (err < 0) {
char err_str[AV_ERROR_MAX_STRING_SIZE] { 0 };
BOOST_LOG(error) << "Couldn't read packet: "sv << av_make_error_string(err_str, AV_ERROR_MAX_STRING_SIZE, err);
char err_str[AV_ERROR_MAX_STRING_SIZE]{0};
BOOST_LOG(error) << "Couldn't read packet: "sv
<< av_make_error_string(err_str,
AV_ERROR_MAX_STRING_SIZE, err);
return {};
return {};
}
auto vps_p = ((CodedBitstreamH265Context *) ctx->priv_data)->active_vps;
auto sps_p = ((CodedBitstreamH265Context *) ctx->priv_data)->active_sps;
auto vps_p = ((CodedBitstreamH265Context *)ctx->priv_data)->active_vps;
auto sps_p = ((CodedBitstreamH265Context *)ctx->priv_data)->active_sps;
// These are very large structs that cannot safely be stored on the stack
auto sps = std::make_unique<H265RawSPS>(*sps_p);
@ -189,8 +191,10 @@ namespace cbs {
vui.vui_timing_info_present_flag = vps->vps_timing_info_present_flag;
vui.vui_num_units_in_tick = vps->vps_num_units_in_tick;
vui.vui_time_scale = vps->vps_time_scale;
vui.vui_poc_proportional_to_timing_flag = vps->vps_poc_proportional_to_timing_flag;
vui.vui_num_ticks_poc_diff_one_minus1 = vps->vps_num_ticks_poc_diff_one_minus1;
vui.vui_poc_proportional_to_timing_flag =
vps->vps_poc_proportional_to_timing_flag;
vui.vui_num_ticks_poc_diff_one_minus1 =
vps->vps_num_ticks_poc_diff_one_minus1;
vui.vui_hrd_parameters_present_flag = 0;
vui.bitstream_restriction_flag = 1;
@ -204,46 +208,52 @@ namespace cbs {
cbs::ctx_t write_ctx;
ff_cbs_init(&write_ctx, AV_CODEC_ID_H265, nullptr);
return hevc_t {
nal_t {
write(write_ctx, vps->nal_unit_header.nal_unit_type, (void *) &vps->nal_unit_header, AV_CODEC_ID_H265),
write(ctx, vps_p->nal_unit_header.nal_unit_type, (void *) &vps_p->nal_unit_header, AV_CODEC_ID_H265),
},
return hevc_t{
nal_t{
write(write_ctx, vps->nal_unit_header.nal_unit_type,
(void *)&vps->nal_unit_header, AV_CODEC_ID_H265),
write(ctx, vps_p->nal_unit_header.nal_unit_type,
(void *)&vps_p->nal_unit_header, AV_CODEC_ID_H265),
},
nal_t {
write(write_ctx, sps->nal_unit_header.nal_unit_type, (void *) &sps->nal_unit_header, AV_CODEC_ID_H265),
write(ctx, sps_p->nal_unit_header.nal_unit_type, (void *) &sps_p->nal_unit_header, AV_CODEC_ID_H265),
},
nal_t{
write(write_ctx, sps->nal_unit_header.nal_unit_type,
(void *)&sps->nal_unit_header, AV_CODEC_ID_H265),
write(ctx, sps_p->nal_unit_header.nal_unit_type,
(void *)&sps_p->nal_unit_header, AV_CODEC_ID_H265),
},
};
}
}
bool
validate_sps(const AVPacket *packet, int codec_id) {
bool validate_sps(const AVPacket *packet, int codec_id) {
cbs::ctx_t ctx;
if (ff_cbs_init(&ctx, (AVCodecID) codec_id, nullptr)) {
return false;
if (ff_cbs_init(&ctx, (AVCodecID)codec_id, nullptr)) {
return false;
}
cbs::frag_t frag;
int err = ff_cbs_read_packet(ctx.get(), &frag, packet);
if (err < 0) {
char err_str[AV_ERROR_MAX_STRING_SIZE] { 0 };
BOOST_LOG(error) << "Couldn't read packet: "sv << av_make_error_string(err_str, AV_ERROR_MAX_STRING_SIZE, err);
char err_str[AV_ERROR_MAX_STRING_SIZE]{0};
BOOST_LOG(error) << "Couldn't read packet: "sv
<< av_make_error_string(err_str,
AV_ERROR_MAX_STRING_SIZE, err);
return false;
return false;
}
if (codec_id == AV_CODEC_ID_H264) {
auto h264 = (CodedBitstreamH264Context *) ctx->priv_data;
auto h264 = (CodedBitstreamH264Context *)ctx->priv_data;
if (!h264->active_sps->vui_parameters_present_flag) {
return false;
}
if (!h264->active_sps->vui_parameters_present_flag) {
return false;
}
return true;
return true;
}
return ((CodedBitstreamH265Context *) ctx->priv_data)->active_sps->vui_parameters_present_flag;
}
return ((CodedBitstreamH265Context *)ctx->priv_data)
->active_sps->vui_parameters_present_flag;
}
} // namespace cbs

View File

@ -11,28 +11,25 @@ struct AVCodecContext;
namespace cbs {
struct nal_t {
struct nal_t {
util::buffer_t<std::uint8_t> _new;
util::buffer_t<std::uint8_t> old;
};
};
struct hevc_t {
struct hevc_t {
nal_t vps;
nal_t sps;
};
};
struct h264_t {
struct h264_t {
nal_t sps;
};
};
hevc_t
make_sps_hevc(const AVCodecContext *ctx, const AVPacket *packet);
h264_t
make_sps_h264(const AVCodecContext *ctx, const AVPacket *packet);
hevc_t make_sps_hevc(const AVCodecContext *ctx, const AVPacket *packet);
h264_t make_sps_h264(const AVCodecContext *ctx, const AVPacket *packet);
/**
* Check if SPS->VUI is present
*/
bool
validate_sps(const AVPacket *packet, int codec_id);
/**
* Check if SPS->VUI is present
*/
bool validate_sps(const AVPacket *packet, int codec_id);
} // namespace cbs

File diff suppressed because it is too large Load Diff

View File

@ -14,7 +14,7 @@
#include "nvenc/nvenc_config.h"
namespace config {
struct video_t {
struct video_t {
// ffmpeg params
int qp; // higher == more compression and less quality
@ -23,60 +23,60 @@ namespace config {
int min_threads; // Minimum number of threads/slices for CPU encoding
struct {
std::string sw_preset;
std::string sw_tune;
std::optional<int> svtav1_preset;
std::string sw_preset;
std::string sw_tune;
std::optional<int> svtav1_preset;
} sw;
nvenc::nvenc_config nv;
bool nv_realtime_hags;
struct {
int preset;
int multipass;
int h264_coder;
int preset;
int multipass;
int h264_coder;
} nv_legacy;
struct {
std::optional<int> qsv_preset;
std::optional<int> qsv_cavlc;
std::optional<int> qsv_preset;
std::optional<int> qsv_cavlc;
} qsv;
struct {
std::optional<int> amd_quality_h264;
std::optional<int> amd_quality_hevc;
std::optional<int> amd_quality_av1;
std::optional<int> amd_rc_h264;
std::optional<int> amd_rc_hevc;
std::optional<int> amd_rc_av1;
std::optional<int> amd_usage_h264;
std::optional<int> amd_usage_hevc;
std::optional<int> amd_usage_av1;
std::optional<int> amd_preanalysis;
std::optional<int> amd_vbaq;
int amd_coder;
std::optional<int> amd_quality_h264;
std::optional<int> amd_quality_hevc;
std::optional<int> amd_quality_av1;
std::optional<int> amd_rc_h264;
std::optional<int> amd_rc_hevc;
std::optional<int> amd_rc_av1;
std::optional<int> amd_usage_h264;
std::optional<int> amd_usage_hevc;
std::optional<int> amd_usage_av1;
std::optional<int> amd_preanalysis;
std::optional<int> amd_vbaq;
int amd_coder;
} amd;
struct {
int vt_allow_sw;
int vt_require_sw;
int vt_realtime;
int vt_coder;
int vt_allow_sw;
int vt_require_sw;
int vt_realtime;
int vt_coder;
} vt;
std::string capture;
std::string encoder;
std::string adapter_name;
std::string output_name;
};
};
struct audio_t {
struct audio_t {
std::string sink;
std::string virtual_sink;
bool install_steam_drivers;
};
};
struct stream_t {
struct stream_t {
std::chrono::milliseconds ping_timeout;
std::string file_apps;
@ -85,9 +85,9 @@ namespace config {
// max unique instances of video and audio streams
int channels;
};
};
struct nvhttp_t {
struct nvhttp_t {
// Could be any of the following values:
// pc|lan|wan
std::string origin_web_ui_allowed;
@ -102,31 +102,31 @@ namespace config {
std::string external_ip;
std::vector<std::string> resolutions;
std::vector<int> fps;
};
};
namespace flag {
enum flag_e : std::size_t {
PIN_STDIN = 0, // Read PIN from stdin instead of http
FRESH_STATE, // Do not load or save state
FORCE_VIDEO_HEADER_REPLACE, // force replacing headers inside video data
UPNP, // Try Universal Plug 'n Play
CONST_PIN, // Use "universal" pin
FLAG_SIZE
};
}
namespace flag {
enum flag_e : std::size_t {
PIN_STDIN = 0, // Read PIN from stdin instead of http
FRESH_STATE, // Do not load or save state
FORCE_VIDEO_HEADER_REPLACE, // force replacing headers inside video data
UPNP, // Try Universal Plug 'n Play
CONST_PIN, // Use "universal" pin
FLAG_SIZE
};
}
struct prep_cmd_t {
prep_cmd_t(std::string &&do_cmd, std::string &&undo_cmd, bool &&elevated):
do_cmd(std::move(do_cmd)), undo_cmd(std::move(undo_cmd)), elevated(std::move(elevated)) {}
explicit prep_cmd_t(std::string &&do_cmd, bool &&elevated):
do_cmd(std::move(do_cmd)), elevated(std::move(elevated)) {}
struct prep_cmd_t {
prep_cmd_t(std::string &&do_cmd, std::string &&undo_cmd, bool &&elevated)
: do_cmd(std::move(do_cmd)),
undo_cmd(std::move(undo_cmd)),
elevated(std::move(elevated)) {}
explicit prep_cmd_t(std::string &&do_cmd, bool &&elevated)
: do_cmd(std::move(do_cmd)), elevated(std::move(elevated)) {}
std::string do_cmd;
std::string undo_cmd;
bool elevated;
};
struct sunshine_t {
};
struct sunshine_t {
int min_log_level;
std::bitset<flag::FLAG_SIZE> flags;
std::string credentials_file;
@ -138,9 +138,9 @@ namespace config {
std::string config_file;
struct cmd_t {
std::string name;
int argc;
char **argv;
std::string name;
int argc;
char **argv;
} cmd;
std::uint16_t port;
@ -149,11 +149,11 @@ namespace config {
std::string log_file;
std::vector<prep_cmd_t> prep_cmds;
};
};
extern video_t video;
extern audio_t audio;
extern stream_t stream;
extern nvhttp_t nvhttp;
extern sunshine_t sunshine;
extern video_t video;
extern audio_t audio;
extern stream_t stream;
extern nvhttp_t nvhttp;
extern sunshine_t sunshine;
} // namespace config

View File

@ -12,161 +12,128 @@
// local includes
#include "config.h"
#include "main.h"
#include "dll.h"
#include "main.h"
#include "platform/common.h"
#include "video.h"
using namespace std::literals;
struct _VideoPipeline
{
std::chrono::steady_clock::time_point start;
video::config_t monitor;
safe::mail_t mail;
struct _VideoPipeline {
std::chrono::steady_clock::time_point start;
video::config_t monitor;
safe::mail_t mail;
};
extern VideoPipeline *__cdecl StartQueue(int video_codec,
char* display_name)
{
static bool init = false;
if (!init)
{
// If any of the following fail, we log an error and continue event though sunshine will not function correctly.
// This allows access to the UI to fix configuration problems or view the logs.
if (platf::init())
{
BOOST_LOG(error) << "Platform failed to initialize"sv;
return NULL;
}
else if (video::probe_encoders())
{
BOOST_LOG(error) << "Video failed to find working encoder"sv;
return NULL;
}
init = true;
}
extern VideoPipeline *__cdecl StartQueue(int video_codec, char *display_name) {
static bool init = false;
if (!init) {
// If any of the following fail, we log an error and continue event
// though sunshine will not function correctly. This allows access to
// the UI to fix configuration problems or view the logs.
if (platf::init()) {
BOOST_LOG(error) << "Platform failed to initialize"sv;
return NULL;
} else if (video::probe_encoders()) {
BOOST_LOG(error) << "Video failed to find working encoder"sv;
return NULL;
}
init = true;
}
static VideoPipeline pipeline = {};
pipeline.mail = std::make_shared<safe::mail_raw_t>();
pipeline.monitor = {1920, 1080, 120, 6000, 1, 0, 1, 0, 0};
pipeline.start = std::chrono::steady_clock::now();
static VideoPipeline pipeline = {};
pipeline.mail = std::make_shared<safe::mail_raw_t>();
pipeline.monitor = {1920, 1080, 120, 6000, 1, 0, 1, 0, 0};
pipeline.start = std::chrono::steady_clock::now();
switch (video_codec)
{
case H265: // h265
BOOST_LOG(info) << ("starting pipeline with h265 codec\n");
pipeline.monitor.videoFormat = 1;
config::video.hevc_mode = 1;
config::video.av1_mode = 0;
break;
case AV1: // av1
BOOST_LOG(info) << ("starting pipeline with av1 codec\n");
pipeline.monitor.videoFormat = 2;
config::video.hevc_mode = 0;
config::video.av1_mode = 1;
break;
default:
BOOST_LOG(info) << ("starting pipeline with h264 codec\n");
pipeline.monitor.videoFormat = 0;
config::video.hevc_mode = 0;
config::video.av1_mode = 0;
break;
}
switch (video_codec) {
case H265: // h265
BOOST_LOG(info) << ("starting pipeline with h265 codec\n");
pipeline.monitor.videoFormat = 1;
config::video.hevc_mode = 1;
config::video.av1_mode = 0;
break;
case AV1: // av1
BOOST_LOG(info) << ("starting pipeline with av1 codec\n");
pipeline.monitor.videoFormat = 2;
config::video.hevc_mode = 0;
config::video.av1_mode = 1;
break;
default:
BOOST_LOG(info) << ("starting pipeline with h264 codec\n");
pipeline.monitor.videoFormat = 0;
config::video.hevc_mode = 0;
config::video.av1_mode = 0;
break;
}
auto thread = std::thread{[&](){
video::capture(
pipeline.mail,
pipeline.monitor,
NULL);
}};
thread.detach();
auto thread = std::thread{
[&]() { video::capture(pipeline.mail, pipeline.monitor, NULL); }};
thread.detach();
RaiseEventS(&pipeline,CHANGE_DISPLAY,display_name);
return &pipeline;
RaiseEventS(&pipeline, CHANGE_DISPLAY, display_name);
return &pipeline;
}
long long
duration_to_latency(std::chrono::steady_clock::duration duration) {
const auto duration_ns = std::chrono::duration_cast<std::chrono::nanoseconds>(duration).count();
return std::clamp<decltype(duration_ns)>((duration_ns + 50) / 100, 0, std::numeric_limits<int>::max());
long long duration_to_latency(std::chrono::steady_clock::duration duration) {
const auto duration_ns =
std::chrono::duration_cast<std::chrono::nanoseconds>(duration).count();
return std::clamp<decltype(duration_ns)>((duration_ns + 50) / 100, 0,
std::numeric_limits<int>::max());
};
int __cdecl
PopFromQueue(VideoPipeline *pipeline,
void *data,
int *duration)
{
auto packet = pipeline->mail->queue<video::packet_t>(mail::video_packets)->pop();
// if (packet->frame_timestamp) {
// *duration = duration_to_latency(*packet->frame_timestamp - pipeline->start);
// pipeline->start = *packet->frame_timestamp;
// }
int __cdecl PopFromQueue(VideoPipeline *pipeline, void *data, int *duration) {
auto packet =
pipeline->mail->queue<video::packet_t>(mail::video_packets)->pop();
// if (packet->frame_timestamp) {
// *duration = duration_to_latency(*packet->frame_timestamp -
// pipeline->start); pipeline->start = *packet->frame_timestamp;
// }
memcpy(data, packet->data(), packet->data_size());
int size = packet->data_size();
return size;
memcpy(data, packet->data(), packet->data_size());
int size = packet->data_size();
return size;
}
void __cdecl
RaiseEventS(VideoPipeline *pipeline,
EventType event,
char* value)
{
switch (event)
{
case CHANGE_DISPLAY: // IDR FRAME
pipeline->mail->event<std::string>(mail::switch_display)->raise(std::string(value));
break;
default:
break;
}
void __cdecl RaiseEventS(VideoPipeline *pipeline, EventType event,
char *value) {
switch (event) {
case CHANGE_DISPLAY: // IDR FRAME
pipeline->mail->event<std::string>(mail::switch_display)
->raise(std::string(value));
break;
default:
break;
}
}
void __cdecl
RaiseEvent(VideoPipeline *pipeline,
EventType event,
int value)
{
switch (event)
{
case IDR_FRAME: // IDR FRAME
pipeline->mail->event<bool>(mail::idr)->raise(true);
break;
case STOP: // IDR FRAME
pipeline->mail->event<bool>(mail::shutdown)->raise(true);
break;
case POINTER_VISIBLE: // IDR FRAME
pipeline->mail->event<bool>(mail::toggle_cursor)->raise(value != 0);
break;
case CHANGE_BITRATE: // IDR FRAME
pipeline->mail->event<int>(mail::bitrate)->raise(value);
break;
default:
break;
}
void __cdecl RaiseEvent(VideoPipeline *pipeline, EventType event, int value) {
switch (event) {
case IDR_FRAME: // IDR FRAME
pipeline->mail->event<bool>(mail::idr)->raise(true);
break;
case STOP: // IDR FRAME
pipeline->mail->event<bool>(mail::shutdown)->raise(true);
break;
case POINTER_VISIBLE: // IDR FRAME
pipeline->mail->event<bool>(mail::toggle_cursor)->raise(value != 0);
break;
case CHANGE_BITRATE: // IDR FRAME
pipeline->mail->event<int>(mail::bitrate)->raise(value);
break;
default:
break;
}
}
void __cdecl
WaitEvent(VideoPipeline* pipeline,
EventType event)
{
while(!PeekEvent(pipeline,event))
std::this_thread::sleep_for(10ms);
void __cdecl WaitEvent(VideoPipeline *pipeline, EventType event) {
while (!PeekEvent(pipeline, event)) std::this_thread::sleep_for(10ms);
}
int __cdecl
PeekEvent(VideoPipeline* pipeline,
EventType event)
{
switch (event)
{
case STOP: // IDR FRAME
return pipeline->mail->event<bool>(mail::shutdown)->peek();
default:
return 0;
}
int __cdecl PeekEvent(VideoPipeline *pipeline, EventType event) {
switch (event) {
case STOP: // IDR FRAME
return pipeline->mail->event<bool>(mail::shutdown)->peek();
default:
return 0;
}
}

View File

@ -1,11 +1,9 @@
/*
*/
*/
extern "C" {
typedef struct _VideoPipeline VideoPipeline;
typedef struct _VideoPipeline VideoPipeline;
typedef enum _EventType {
POINTER_VISIBLE,
@ -14,54 +12,42 @@ typedef enum _EventType {
IDR_FRAME,
STOP
}EventType;
} EventType;
typedef enum _Codec {
H264 = 1,
H265,
AV1,
}Codec;
} Codec;
__declspec(dllexport) VideoPipeline* __cdecl StartQueue(int video_codec,
char* display_name);
__declspec(dllexport) int __cdecl PopFromQueue(VideoPipeline* pipeline,
void* data,
int* duration);
__declspec(dllexport) int __cdecl PopFromQueue(VideoPipeline* pipeline,
void* data, int* duration);
__declspec(dllexport) void __cdecl RaiseEvent(VideoPipeline* pipeline,
EventType event,
int value);
EventType event, int value);
__declspec(dllexport) void __cdecl RaiseEventS(VideoPipeline* pipeline,
EventType event,
char* value);
EventType event, char* value);
__declspec(dllexport) void __cdecl WaitEvent(VideoPipeline* pipeline,
EventType event);
__declspec(dllexport) void __cdecl WaitEvent(VideoPipeline* pipeline,
EventType event);
__declspec(dllexport) int __cdecl PeekEvent(VideoPipeline* pipeline,
EventType event);
__declspec(dllexport) int __cdecl PeekEvent(VideoPipeline* pipeline,
EventType event);
typedef VideoPipeline* (*STARTQUEUE)(int video_codec, char* display_name);
typedef VideoPipeline* (*STARTQUEUE) ( int video_codec,
char* display_name);
typedef int (*POPFROMQUEUE)(VideoPipeline* pipeline, void* data, int* duration);
typedef int (*POPFROMQUEUE) (VideoPipeline* pipeline,
void* data,
int* duration);
typedef void (*RAISEEVENT)(VideoPipeline* pipeline, EventType event, int value);
typedef void (*RAISEEVENT) (VideoPipeline* pipeline,
EventType event,
int value);
typedef void (*RAISEEVENTS)(VideoPipeline* pipeline, EventType event,
char* value);
typedef void (*RAISEEVENTS) (VideoPipeline* pipeline,
EventType event,
char* value);
typedef void (*WAITEVENT)(VideoPipeline* pipeline, EventType event);
typedef void (*WAITEVENT) (VideoPipeline* pipeline,
EventType event);
typedef int (*PEEKEVENT) (VideoPipeline* pipeline,
EventType event);
typedef int (*PEEKEVENT)(VideoPipeline* pipeline, EventType event);
}

View File

@ -4,81 +4,76 @@
*/
// standard includes
#include <Windows.h>
#include <iostream>
#include <thread>
#include <Windows.h>
#include "dll.h"
static HMODULE hModule;
static STARTQUEUE callstart;
static POPFROMQUEUE callpop;
static WAITEVENT callwait;
static RAISEEVENT callraise;
static RAISEEVENTS callraiseS;
static PEEKEVENT callpeek;
int initlibrary() {
char szFullPath[MAX_PATH] = {};
GetCurrentDirectoryA(MAX_PATH, szFullPath);
strcat(szFullPath, "\\libsunshine.dll");
hModule = LoadLibraryA(szFullPath);
if (hModule == 0) return 1;
static HMODULE hModule;
static STARTQUEUE callstart;
static POPFROMQUEUE callpop;
static WAITEVENT callwait;
static RAISEEVENT callraise;
static RAISEEVENTS callraiseS;
static PEEKEVENT callpeek;
callstart = (STARTQUEUE)GetProcAddress(hModule, "StartQueue");
callpop = (POPFROMQUEUE)GetProcAddress(hModule, "PopFromQueue");
callraise = (RAISEEVENT)GetProcAddress(hModule, "RaiseEvent");
callraiseS = (RAISEEVENTS)GetProcAddress(hModule, "RaiseEventS");
callwait = (WAITEVENT)GetProcAddress(hModule, "WaitEvent");
callpeek = (PEEKEVENT)GetProcAddress(hModule, "PeekEvent");
int
initlibrary() {
char szFullPath[MAX_PATH] = {};
GetCurrentDirectoryA(MAX_PATH, szFullPath);
strcat(szFullPath, "\\libsunshine.dll");
hModule = LoadLibraryA(szFullPath);
if(hModule == 0)
return 1;
if (callpop == 0 || callstart == 0 || callraise == 0 || callwait == 0)
return 1;
callstart = (STARTQUEUE) GetProcAddress( hModule,"StartQueue");
callpop = (POPFROMQUEUE) GetProcAddress( hModule,"PopFromQueue");
callraise = (RAISEEVENT) GetProcAddress( hModule,"RaiseEvent");
callraiseS = (RAISEEVENTS) GetProcAddress( hModule,"RaiseEventS");
callwait = (WAITEVENT) GetProcAddress( hModule,"WaitEvent");
callpeek = (PEEKEVENT) GetProcAddress( hModule,"PeekEvent");
if(callpop ==0 || callstart == 0 || callraise == 0 || callwait == 0)
return 1;
return 0;
return 0;
}
int main(int argc, char *argv[])
{
if(initlibrary()) {
printf("failed to load libsunshine.dll");
return 1;
}
int main(int argc, char *argv[]) {
if (initlibrary()) {
printf("failed to load libsunshine.dll");
return 1;
}
// second encode session
for (int i =0; i < 30; i++)
{
VideoPipeline *pipeline = callstart(1,"\\\\.\\DISPLAY1");
auto video = std::thread{[&]() {
// Video traffic is sent on this thread
int duration = 0;
void *data = malloc(100 * 1000 * 1000);
// second encode session
for (int i = 0; i < 30; i++) {
VideoPipeline *pipeline = callstart(1, "\\\\.\\DISPLAY1");
auto video = std::thread{[&]() {
// Video traffic is sent on this thread
int duration = 0;
void *data = malloc(100 * 1000 * 1000);
int count = 0;
while (true) {
int size = callpop(pipeline, data, &duration);
if (callpeek(pipeline,STOP) || count == 1000) {
break;
} else if (count % 100 == 0) {
callraise(pipeline,CHANGE_BITRATE,2000);
} else if (count % 100 == 50) {
callraiseS(pipeline,CHANGE_DISPLAY,"\\\\.\\DISPLAY1");
}
int count = 0;
while (true) {
int size = callpop(pipeline, data, &duration);
if (callpeek(pipeline, STOP) || count == 1000) {
break;
} else if (count % 100 == 0) {
callraise(pipeline, CHANGE_BITRATE, 2000);
} else if (count % 100 == 50) {
callraiseS(pipeline, CHANGE_DISPLAY, "\\\\.\\DISPLAY1");
}
printf("received packet with size %d\n", size);
count++;
}
printf("received packet with size %d\n", size);
count++;
}
callraise(pipeline,STOP,0);
free(data);
}};
callraise(pipeline, STOP, 0);
free(data);
}};
callwait(pipeline,STOP);
video.join();
}
return 0;
callwait(pipeline, STOP);
video.join();
}
return 0;
}

View File

@ -11,40 +11,45 @@
#include <boost/log/common.hpp>
// standard includes
#include <filesystem>
#include <string_view>
#include <assert.h> /* assert */
#include <stdio.h> /* printf */
#include <bitset>
#include <climits>
#include <cstring>
#include <filesystem>
#include <iostream>
#include <stdio.h> /* printf */
#include <assert.h> /* assert */
#include <string_view>
// local includes
#include "thread_safe.h"
extern boost::log::sources::severity_logger<int> verbose; // Dominating output
extern boost::log::sources::severity_logger<int> debug; // Follow what is happening
extern boost::log::sources::severity_logger<int> info; // Should be informed about
extern boost::log::sources::severity_logger<int>
debug; // Follow what is happening
extern boost::log::sources::severity_logger<int>
info; // Should be informed about
extern boost::log::sources::severity_logger<int> warning; // Strange events
extern boost::log::sources::severity_logger<int> error; // Recoverable errors
extern boost::log::sources::severity_logger<int> error; // Recoverable errors
extern boost::log::sources::severity_logger<int> fatal; // Unrecoverable errors
// namespaces
namespace mail {
#define MAIL(x) constexpr auto x = std::string_view { #x }
#define MAIL(x) \
constexpr auto x = std::string_view { \
#x \
}
//queue
MAIL(video_packets);
// queue
MAIL(video_packets);
//event
MAIL(shutdown);
MAIL(switch_display);
MAIL(toggle_cursor);
MAIL(idr);
MAIL(bitrate);
MAIL(hdr);
// event
MAIL(shutdown);
MAIL(switch_display);
MAIL(toggle_cursor);
MAIL(idr);
MAIL(bitrate);
MAIL(hdr);
#undef MAIL

View File

@ -6,53 +6,45 @@
#include <utility>
namespace move_by_copy_util {
/**
* When a copy is made, it moves the object
* This allows you to move an object when a move can't be done.
*/
template <class T>
class MoveByCopy {
public:
/**
* When a copy is made, it moves the object
* This allows you to move an object when a move can't be done.
*/
template <class T>
class MoveByCopy {
public:
typedef T move_type;
private:
private:
move_type _to_move;
public:
explicit MoveByCopy(move_type &&to_move):
_to_move(std::move(to_move)) {}
public:
explicit MoveByCopy(move_type &&to_move) : _to_move(std::move(to_move)) {}
MoveByCopy(MoveByCopy &&other) = default;
MoveByCopy(const MoveByCopy &other) {
*this = other;
MoveByCopy(const MoveByCopy &other) { *this = other; }
MoveByCopy &operator=(MoveByCopy &&other) = default;
MoveByCopy &operator=(const MoveByCopy &other) {
this->_to_move = std::move(const_cast<MoveByCopy &>(other)._to_move);
return *this;
}
MoveByCopy &
operator=(MoveByCopy &&other) = default;
operator move_type() { return std::move(_to_move); }
};
MoveByCopy &
operator=(const MoveByCopy &other) {
this->_to_move = std::move(const_cast<MoveByCopy &>(other)._to_move);
return *this;
}
operator move_type() {
return std::move(_to_move);
}
};
template <class T>
MoveByCopy<T>
cmove(T &movable) {
template <class T>
MoveByCopy<T> cmove(T &movable) {
return MoveByCopy<T>(std::move(movable));
}
}
// Do NOT use this unless you are absolutely certain the object to be moved is no longer used by the caller
template <class T>
MoveByCopy<T>
const_cmove(const T &movable) {
// Do NOT use this unless you are absolutely certain the object to be moved is
// no longer used by the caller
template <class T>
MoveByCopy<T> const_cmove(const T &movable) {
return MoveByCopy<T>(std::move(const_cast<T &>(movable)));
}
}
} // namespace move_by_copy_util

View File

@ -5,81 +5,78 @@
namespace {
GUID
quality_preset_guid_from_number(unsigned number) {
GUID quality_preset_guid_from_number(unsigned number) {
if (number > 7) number = 7;
switch (number) {
case 1:
default:
return NV_ENC_PRESET_P1_GUID;
case 1:
default:
return NV_ENC_PRESET_P1_GUID;
case 2:
return NV_ENC_PRESET_P2_GUID;
case 2:
return NV_ENC_PRESET_P2_GUID;
case 3:
return NV_ENC_PRESET_P3_GUID;
case 3:
return NV_ENC_PRESET_P3_GUID;
case 4:
return NV_ENC_PRESET_P4_GUID;
case 4:
return NV_ENC_PRESET_P4_GUID;
case 5:
return NV_ENC_PRESET_P5_GUID;
case 5:
return NV_ENC_PRESET_P5_GUID;
case 6:
return NV_ENC_PRESET_P6_GUID;
case 6:
return NV_ENC_PRESET_P6_GUID;
case 7:
return NV_ENC_PRESET_P7_GUID;
case 7:
return NV_ENC_PRESET_P7_GUID;
}
};
};
bool
equal_guids(const GUID &guid1, const GUID &guid2) {
bool equal_guids(const GUID &guid1, const GUID &guid2) {
return std::memcmp(&guid1, &guid2, sizeof(GUID)) == 0;
}
}
auto
quality_preset_string_from_guid(const GUID &guid) {
auto quality_preset_string_from_guid(const GUID &guid) {
if (equal_guids(guid, NV_ENC_PRESET_P1_GUID)) {
return "P1";
return "P1";
}
if (equal_guids(guid, NV_ENC_PRESET_P2_GUID)) {
return "P2";
return "P2";
}
if (equal_guids(guid, NV_ENC_PRESET_P3_GUID)) {
return "P3";
return "P3";
}
if (equal_guids(guid, NV_ENC_PRESET_P4_GUID)) {
return "P4";
return "P4";
}
if (equal_guids(guid, NV_ENC_PRESET_P5_GUID)) {
return "P5";
return "P5";
}
if (equal_guids(guid, NV_ENC_PRESET_P6_GUID)) {
return "P6";
return "P6";
}
if (equal_guids(guid, NV_ENC_PRESET_P7_GUID)) {
return "P7";
return "P7";
}
return "Unknown";
}
}
} // namespace
namespace nvenc {
nvenc_base::nvenc_base(NV_ENC_DEVICE_TYPE device_type, void *device):
device_type(device_type),
device(device) {
}
nvenc_base::nvenc_base(NV_ENC_DEVICE_TYPE device_type, void *device)
: device_type(device_type), device(device) {}
nvenc_base::~nvenc_base() {
nvenc_base::~nvenc_base() {
// Use destroy_encoder() instead
}
}
bool
nvenc_base::create_encoder(const nvenc_config &config, const video::config_t &client_config, const nvenc_colorspace_t &colorspace, NV_ENC_BUFFER_FORMAT buffer_format) {
bool nvenc_base::create_encoder(const nvenc_config &config,
const video::config_t &client_config,
const nvenc_colorspace_t &colorspace,
NV_ENC_BUFFER_FORMAT buffer_format) {
if (!nvenc && !init_library()) return false;
if (encoder) destroy_encoder();
@ -90,106 +87,129 @@ namespace nvenc {
encoder_params.buffer_format = buffer_format;
encoder_params.rfi = true;
NV_ENC_OPEN_ENCODE_SESSION_EX_PARAMS session_params = { NV_ENC_OPEN_ENCODE_SESSION_EX_PARAMS_VER };
NV_ENC_OPEN_ENCODE_SESSION_EX_PARAMS session_params = {
NV_ENC_OPEN_ENCODE_SESSION_EX_PARAMS_VER};
session_params.device = device;
session_params.deviceType = device_type;
session_params.apiVersion = NVENCAPI_VERSION;
if (nvenc_failed(nvenc->nvEncOpenEncodeSessionEx(&session_params, &encoder))) {
BOOST_LOG(error) << "NvEncOpenEncodeSessionEx failed";
return false;
if (nvenc_failed(
nvenc->nvEncOpenEncodeSessionEx(&session_params, &encoder))) {
BOOST_LOG(error) << "NvEncOpenEncodeSessionEx failed";
return false;
}
uint32_t encode_guid_count = 0;
if (nvenc_failed(nvenc->nvEncGetEncodeGUIDCount(encoder, &encode_guid_count))) {
BOOST_LOG(error) << "NvEncGetEncodeGUIDCount failed: " << last_error_string;
return false;
if (nvenc_failed(
nvenc->nvEncGetEncodeGUIDCount(encoder, &encode_guid_count))) {
BOOST_LOG(error) << "NvEncGetEncodeGUIDCount failed: "
<< last_error_string;
return false;
};
std::vector<GUID> encode_guids(encode_guid_count);
if (nvenc_failed(nvenc->nvEncGetEncodeGUIDs(encoder, encode_guids.data(), encode_guids.size(), &encode_guid_count))) {
BOOST_LOG(error) << "NvEncGetEncodeGUIDs failed: " << last_error_string;
return false;
if (nvenc_failed(nvenc->nvEncGetEncodeGUIDs(encoder, encode_guids.data(),
encode_guids.size(),
&encode_guid_count))) {
BOOST_LOG(error) << "NvEncGetEncodeGUIDs failed: " << last_error_string;
return false;
}
init_params = { NV_ENC_INITIALIZE_PARAMS_VER };
init_params = {NV_ENC_INITIALIZE_PARAMS_VER};
switch (client_config.videoFormat) {
case 0:
// H.264
init_params.encodeGUID = NV_ENC_CODEC_H264_GUID;
break;
case 0:
// H.264
init_params.encodeGUID = NV_ENC_CODEC_H264_GUID;
break;
case 1:
// HEVC
init_params.encodeGUID = NV_ENC_CODEC_HEVC_GUID;
break;
case 1:
// HEVC
init_params.encodeGUID = NV_ENC_CODEC_HEVC_GUID;
break;
case 2:
// AV1
init_params.encodeGUID = NV_ENC_CODEC_AV1_GUID;
break;
case 2:
// AV1
init_params.encodeGUID = NV_ENC_CODEC_AV1_GUID;
break;
default:
BOOST_LOG(error) << "NvEnc: unknown video format " << client_config.videoFormat;
return false;
default:
BOOST_LOG(error)
<< "NvEnc: unknown video format " << client_config.videoFormat;
return false;
}
{
auto search_predicate = [&](const GUID &guid) {
return equal_guids(init_params.encodeGUID, guid);
};
if (std::find_if(encode_guids.begin(), encode_guids.end(), search_predicate) == encode_guids.end()) {
BOOST_LOG(error) << "NvEnc: encoding format is not supported by the gpu";
return false;
}
auto search_predicate = [&](const GUID &guid) {
return equal_guids(init_params.encodeGUID, guid);
};
if (std::find_if(encode_guids.begin(), encode_guids.end(),
search_predicate) == encode_guids.end()) {
BOOST_LOG(error)
<< "NvEnc: encoding format is not supported by the gpu";
return false;
}
}
auto get_encoder_cap = [&](NV_ENC_CAPS cap) {
NV_ENC_CAPS_PARAM param = { NV_ENC_CAPS_PARAM_VER, cap };
int value = 0;
nvenc->nvEncGetEncodeCaps(encoder, init_params.encodeGUID, &param, &value);
return value;
NV_ENC_CAPS_PARAM param = {NV_ENC_CAPS_PARAM_VER, cap};
int value = 0;
nvenc->nvEncGetEncodeCaps(encoder, init_params.encodeGUID, &param,
&value);
return value;
};
auto buffer_is_10bit = [&]() {
return buffer_format == NV_ENC_BUFFER_FORMAT_YUV420_10BIT || buffer_format == NV_ENC_BUFFER_FORMAT_YUV444_10BIT;
return buffer_format == NV_ENC_BUFFER_FORMAT_YUV420_10BIT ||
buffer_format == NV_ENC_BUFFER_FORMAT_YUV444_10BIT;
};
auto buffer_is_yuv444 = [&]() {
return buffer_format == NV_ENC_BUFFER_FORMAT_YUV444 || buffer_format == NV_ENC_BUFFER_FORMAT_YUV444_10BIT;
return buffer_format == NV_ENC_BUFFER_FORMAT_YUV444 ||
buffer_format == NV_ENC_BUFFER_FORMAT_YUV444_10BIT;
};
{
auto supported_width = get_encoder_cap(NV_ENC_CAPS_WIDTH_MAX);
auto supported_height = get_encoder_cap(NV_ENC_CAPS_HEIGHT_MAX);
if (encoder_params.width > supported_width || encoder_params.height > supported_height) {
BOOST_LOG(error) << "NvEnc: gpu max encode resolution " << supported_width << "x" << supported_height << ", requested " << encoder_params.width << "x" << encoder_params.height;
auto supported_width = get_encoder_cap(NV_ENC_CAPS_WIDTH_MAX);
auto supported_height = get_encoder_cap(NV_ENC_CAPS_HEIGHT_MAX);
if (encoder_params.width > supported_width ||
encoder_params.height > supported_height) {
BOOST_LOG(error)
<< "NvEnc: gpu max encode resolution " << supported_width << "x"
<< supported_height << ", requested " << encoder_params.width
<< "x" << encoder_params.height;
return false;
}
}
if (buffer_is_10bit() &&
!get_encoder_cap(NV_ENC_CAPS_SUPPORT_10BIT_ENCODE)) {
BOOST_LOG(error) << "NvEnc: gpu doesn't support 10-bit encode";
return false;
}
}
if (buffer_is_10bit() && !get_encoder_cap(NV_ENC_CAPS_SUPPORT_10BIT_ENCODE)) {
BOOST_LOG(error) << "NvEnc: gpu doesn't support 10-bit encode";
return false;
if (buffer_is_yuv444() &&
!get_encoder_cap(NV_ENC_CAPS_SUPPORT_YUV444_ENCODE)) {
BOOST_LOG(error) << "NvEnc: gpu doesn't support YUV444 encode";
return false;
}
if (buffer_is_yuv444() && !get_encoder_cap(NV_ENC_CAPS_SUPPORT_YUV444_ENCODE)) {
BOOST_LOG(error) << "NvEnc: gpu doesn't support YUV444 encode";
return false;
if (async_event_handle &&
!get_encoder_cap(NV_ENC_CAPS_ASYNC_ENCODE_SUPPORT)) {
BOOST_LOG(warning) << "NvEnc: gpu doesn't support async encode";
async_event_handle = nullptr;
}
if (async_event_handle && !get_encoder_cap(NV_ENC_CAPS_ASYNC_ENCODE_SUPPORT)) {
BOOST_LOG(warning) << "NvEnc: gpu doesn't support async encode";
async_event_handle = nullptr;
}
encoder_params.rfi =
get_encoder_cap(NV_ENC_CAPS_SUPPORT_REF_PIC_INVALIDATION);
encoder_params.rfi = get_encoder_cap(NV_ENC_CAPS_SUPPORT_REF_PIC_INVALIDATION);
init_params.presetGUID = quality_preset_guid_from_number(config.quality_preset);
init_params.presetGUID =
quality_preset_guid_from_number(config.quality_preset);
init_params.tuningInfo = NV_ENC_TUNING_INFO_ULTRA_LOW_LATENCY;
init_params.enablePTD = 1;
init_params.enableEncodeAsync = async_event_handle ? 1 : 0;
init_params.enableWeightedPrediction = config.weighted_prediction && get_encoder_cap(NV_ENC_CAPS_SUPPORT_WEIGHTED_PREDICTION);
init_params.enableWeightedPrediction =
config.weighted_prediction &&
get_encoder_cap(NV_ENC_CAPS_SUPPORT_WEIGHTED_PREDICTION);
init_params.encodeWidth = encoder_params.width;
init_params.darWidth = encoder_params.width;
@ -198,10 +218,14 @@ namespace nvenc {
init_params.frameRateNum = client_config.framerate;
init_params.frameRateDen = 1;
NV_ENC_PRESET_CONFIG preset_config = { NV_ENC_PRESET_CONFIG_VER, { NV_ENC_CONFIG_VER } };
if (nvenc_failed(nvenc->nvEncGetEncodePresetConfigEx(encoder, init_params.encodeGUID, init_params.presetGUID, init_params.tuningInfo, &preset_config))) {
BOOST_LOG(error) << "NvEncGetEncodePresetConfigEx failed: " << last_error_string;
return false;
NV_ENC_PRESET_CONFIG preset_config = {NV_ENC_PRESET_CONFIG_VER,
{NV_ENC_CONFIG_VER}};
if (nvenc_failed(nvenc->nvEncGetEncodePresetConfigEx(
encoder, init_params.encodeGUID, init_params.presetGUID,
init_params.tuningInfo, &preset_config))) {
BOOST_LOG(error) << "NvEncGetEncodePresetConfigEx failed: "
<< last_error_string;
return false;
}
NV_ENC_CONFIG enc_config = preset_config.presetCfg;
@ -213,220 +237,253 @@ namespace nvenc {
enc_config.rcParams.zeroReorderDelay = 1;
enc_config.rcParams.enableLookahead = 0;
enc_config.rcParams.lowDelayKeyFrameScale = 1;
enc_config.rcParams.multiPass = config.two_pass == nvenc_two_pass::quarter_resolution ? NV_ENC_TWO_PASS_QUARTER_RESOLUTION :
config.two_pass == nvenc_two_pass::full_resolution ? NV_ENC_TWO_PASS_FULL_RESOLUTION :
NV_ENC_MULTI_PASS_DISABLED;
enc_config.rcParams.multiPass =
config.two_pass == nvenc_two_pass::quarter_resolution
? NV_ENC_TWO_PASS_QUARTER_RESOLUTION
: config.two_pass == nvenc_two_pass::full_resolution
? NV_ENC_TWO_PASS_FULL_RESOLUTION
: NV_ENC_MULTI_PASS_DISABLED;
enc_config.rcParams.enableAQ = config.adaptive_quantization;
enc_config.rcParams.averageBitRate = client_config.bitrate * 1000;
if (get_encoder_cap(NV_ENC_CAPS_SUPPORT_CUSTOM_VBV_BUF_SIZE)) {
enc_config.rcParams.vbvBufferSize = client_config.bitrate * 1000 / client_config.framerate;
enc_config.rcParams.vbvBufferSize =
client_config.bitrate * 1000 / client_config.framerate;
}
auto set_h264_hevc_common_format_config = [&](auto &format_config) {
format_config.repeatSPSPPS = 1;
format_config.idrPeriod = NVENC_INFINITE_GOPLENGTH;
format_config.sliceMode = 3;
format_config.sliceModeData = client_config.slicesPerFrame;
if (buffer_is_yuv444()) {
format_config.chromaFormatIDC = 3;
}
format_config.enableFillerDataInsertion = config.insert_filler_data;
format_config.repeatSPSPPS = 1;
format_config.idrPeriod = NVENC_INFINITE_GOPLENGTH;
format_config.sliceMode = 3;
format_config.sliceModeData = client_config.slicesPerFrame;
if (buffer_is_yuv444()) {
format_config.chromaFormatIDC = 3;
}
format_config.enableFillerDataInsertion = config.insert_filler_data;
};
auto set_ref_frames = [&](uint32_t &ref_frames_option, NV_ENC_NUM_REF_FRAMES &L0_option, uint32_t ref_frames_default) {
if (client_config.numRefFrames > 0) {
ref_frames_option = client_config.numRefFrames;
}
else {
ref_frames_option = ref_frames_default;
}
if (ref_frames_option > 0 && !get_encoder_cap(NV_ENC_CAPS_SUPPORT_MULTIPLE_REF_FRAMES)) {
ref_frames_option = 1;
encoder_params.rfi = false;
}
encoder_params.ref_frames_in_dpb = ref_frames_option;
// This limits ref frames any frame can use to 1, but allows larger buffer size for fallback if some frames are invalidated through rfi
L0_option = NV_ENC_NUM_REF_FRAMES_1;
auto set_ref_frames = [&](uint32_t &ref_frames_option,
NV_ENC_NUM_REF_FRAMES &L0_option,
uint32_t ref_frames_default) {
if (client_config.numRefFrames > 0) {
ref_frames_option = client_config.numRefFrames;
} else {
ref_frames_option = ref_frames_default;
}
if (ref_frames_option > 0 &&
!get_encoder_cap(NV_ENC_CAPS_SUPPORT_MULTIPLE_REF_FRAMES)) {
ref_frames_option = 1;
encoder_params.rfi = false;
}
encoder_params.ref_frames_in_dpb = ref_frames_option;
// This limits ref frames any frame can use to 1, but allows larger
// buffer size for fallback if some frames are invalidated through rfi
L0_option = NV_ENC_NUM_REF_FRAMES_1;
};
auto set_minqp_if_enabled = [&](int value) {
if (config.enable_min_qp) {
enc_config.rcParams.enableMinQP = 1;
enc_config.rcParams.minQP.qpInterP = value;
enc_config.rcParams.minQP.qpIntra = value;
}
if (config.enable_min_qp) {
enc_config.rcParams.enableMinQP = 1;
enc_config.rcParams.minQP.qpInterP = value;
enc_config.rcParams.minQP.qpIntra = value;
}
};
auto fill_h264_hevc_vui = [&colorspace](auto &vui_config) {
vui_config.videoSignalTypePresentFlag = 1;
vui_config.videoFormat = NV_ENC_VUI_VIDEO_FORMAT_UNSPECIFIED;
vui_config.videoFullRangeFlag = colorspace.full_range;
vui_config.colourDescriptionPresentFlag = 1;
vui_config.colourPrimaries = colorspace.primaries;
vui_config.transferCharacteristics = colorspace.tranfer_function;
vui_config.colourMatrix = colorspace.matrix;
vui_config.chromaSampleLocationFlag = 1;
vui_config.chromaSampleLocationTop = 0;
vui_config.chromaSampleLocationBot = 0;
vui_config.videoSignalTypePresentFlag = 1;
vui_config.videoFormat = NV_ENC_VUI_VIDEO_FORMAT_UNSPECIFIED;
vui_config.videoFullRangeFlag = colorspace.full_range;
vui_config.colourDescriptionPresentFlag = 1;
vui_config.colourPrimaries = colorspace.primaries;
vui_config.transferCharacteristics = colorspace.tranfer_function;
vui_config.colourMatrix = colorspace.matrix;
vui_config.chromaSampleLocationFlag = 1;
vui_config.chromaSampleLocationTop = 0;
vui_config.chromaSampleLocationBot = 0;
};
switch (client_config.videoFormat) {
case 0: {
// H.264
enc_config.profileGUID = buffer_is_yuv444() ? NV_ENC_H264_PROFILE_HIGH_444_GUID : NV_ENC_H264_PROFILE_HIGH_GUID;
auto &format_config = enc_config.encodeCodecConfig.h264Config;
set_h264_hevc_common_format_config(format_config);
if (config.h264_cavlc || !get_encoder_cap(NV_ENC_CAPS_SUPPORT_CABAC)) {
format_config.entropyCodingMode = NV_ENC_H264_ENTROPY_CODING_MODE_CAVLC;
case 0: {
// H.264
enc_config.profileGUID = buffer_is_yuv444()
? NV_ENC_H264_PROFILE_HIGH_444_GUID
: NV_ENC_H264_PROFILE_HIGH_GUID;
auto &format_config = enc_config.encodeCodecConfig.h264Config;
set_h264_hevc_common_format_config(format_config);
if (config.h264_cavlc ||
!get_encoder_cap(NV_ENC_CAPS_SUPPORT_CABAC)) {
format_config.entropyCodingMode =
NV_ENC_H264_ENTROPY_CODING_MODE_CAVLC;
} else {
format_config.entropyCodingMode =
NV_ENC_H264_ENTROPY_CODING_MODE_CABAC;
}
set_ref_frames(format_config.maxNumRefFrames,
format_config.numRefL0, 5);
set_minqp_if_enabled(config.min_qp_h264);
fill_h264_hevc_vui(format_config.h264VUIParameters);
break;
}
else {
format_config.entropyCodingMode = NV_ENC_H264_ENTROPY_CODING_MODE_CABAC;
}
set_ref_frames(format_config.maxNumRefFrames, format_config.numRefL0, 5);
set_minqp_if_enabled(config.min_qp_h264);
fill_h264_hevc_vui(format_config.h264VUIParameters);
break;
}
case 1: {
// HEVC
auto &format_config = enc_config.encodeCodecConfig.hevcConfig;
set_h264_hevc_common_format_config(format_config);
if (buffer_is_10bit()) {
format_config.pixelBitDepthMinus8 = 2;
case 1: {
// HEVC
auto &format_config = enc_config.encodeCodecConfig.hevcConfig;
set_h264_hevc_common_format_config(format_config);
if (buffer_is_10bit()) {
format_config.pixelBitDepthMinus8 = 2;
}
set_ref_frames(format_config.maxNumRefFramesInDPB,
format_config.numRefL0, 5);
set_minqp_if_enabled(config.min_qp_hevc);
fill_h264_hevc_vui(format_config.hevcVUIParameters);
break;
}
set_ref_frames(format_config.maxNumRefFramesInDPB, format_config.numRefL0, 5);
set_minqp_if_enabled(config.min_qp_hevc);
fill_h264_hevc_vui(format_config.hevcVUIParameters);
break;
}
case 2: {
// AV1
auto &format_config = enc_config.encodeCodecConfig.av1Config;
format_config.repeatSeqHdr = 1;
format_config.idrPeriod = NVENC_INFINITE_GOPLENGTH;
format_config.chromaFormatIDC = 1; // YUV444 not supported by NVENC yet
format_config.enableBitstreamPadding = config.insert_filler_data;
if (buffer_is_10bit()) {
format_config.inputPixelBitDepthMinus8 = 2;
format_config.pixelBitDepthMinus8 = 2;
}
format_config.colorPrimaries = colorspace.primaries;
format_config.transferCharacteristics = colorspace.tranfer_function;
format_config.matrixCoefficients = colorspace.matrix;
format_config.colorRange = colorspace.full_range;
format_config.chromaSamplePosition = 1;
set_ref_frames(format_config.maxNumRefFramesInDPB, format_config.numFwdRefs, 8);
set_minqp_if_enabled(config.min_qp_av1);
case 2: {
// AV1
auto &format_config = enc_config.encodeCodecConfig.av1Config;
format_config.repeatSeqHdr = 1;
format_config.idrPeriod = NVENC_INFINITE_GOPLENGTH;
format_config.chromaFormatIDC =
1; // YUV444 not supported by NVENC yet
format_config.enableBitstreamPadding = config.insert_filler_data;
if (buffer_is_10bit()) {
format_config.inputPixelBitDepthMinus8 = 2;
format_config.pixelBitDepthMinus8 = 2;
}
format_config.colorPrimaries = colorspace.primaries;
format_config.transferCharacteristics = colorspace.tranfer_function;
format_config.matrixCoefficients = colorspace.matrix;
format_config.colorRange = colorspace.full_range;
format_config.chromaSamplePosition = 1;
set_ref_frames(format_config.maxNumRefFramesInDPB,
format_config.numFwdRefs, 8);
set_minqp_if_enabled(config.min_qp_av1);
if (client_config.slicesPerFrame > 1) {
// NVENC only supports slice counts that are powers of two, so we'll pick powers of two
// with bias to rows due to hopefully more similar macroblocks with a row vs a column.
format_config.numTileRows = std::pow(2, std::ceil(std::log2(client_config.slicesPerFrame) / 2));
format_config.numTileColumns = std::pow(2, std::floor(std::log2(client_config.slicesPerFrame) / 2));
if (client_config.slicesPerFrame > 1) {
// NVENC only supports slice counts that are powers of two, so
// we'll pick powers of two with bias to rows due to hopefully
// more similar macroblocks with a row vs a column.
format_config.numTileRows = std::pow(
2, std::ceil(std::log2(client_config.slicesPerFrame) / 2));
format_config.numTileColumns = std::pow(
2, std::floor(std::log2(client_config.slicesPerFrame) / 2));
}
break;
}
break;
}
}
init_params.encodeConfig = &enc_config;
if (nvenc_failed(nvenc->nvEncInitializeEncoder(encoder, &init_params))) {
BOOST_LOG(error) << "NvEncInitializeEncoder failed: " << last_error_string;
return false;
BOOST_LOG(error) << "NvEncInitializeEncoder failed: "
<< last_error_string;
return false;
}
if (async_event_handle) {
NV_ENC_EVENT_PARAMS event_params = { NV_ENC_EVENT_PARAMS_VER };
event_params.completionEvent = async_event_handle;
if (nvenc_failed(nvenc->nvEncRegisterAsyncEvent(encoder, &event_params))) {
BOOST_LOG(error) << "NvEncRegisterAsyncEvent failed: " << last_error_string;
return false;
}
NV_ENC_EVENT_PARAMS event_params = {NV_ENC_EVENT_PARAMS_VER};
event_params.completionEvent = async_event_handle;
if (nvenc_failed(
nvenc->nvEncRegisterAsyncEvent(encoder, &event_params))) {
BOOST_LOG(error)
<< "NvEncRegisterAsyncEvent failed: " << last_error_string;
return false;
}
}
NV_ENC_CREATE_BITSTREAM_BUFFER create_bitstream_buffer = { NV_ENC_CREATE_BITSTREAM_BUFFER_VER };
if (nvenc_failed(nvenc->nvEncCreateBitstreamBuffer(encoder, &create_bitstream_buffer))) {
BOOST_LOG(error) << "NvEncCreateBitstreamBuffer failed: " << last_error_string;
return false;
NV_ENC_CREATE_BITSTREAM_BUFFER create_bitstream_buffer = {
NV_ENC_CREATE_BITSTREAM_BUFFER_VER};
if (nvenc_failed(nvenc->nvEncCreateBitstreamBuffer(
encoder, &create_bitstream_buffer))) {
BOOST_LOG(error) << "NvEncCreateBitstreamBuffer failed: "
<< last_error_string;
return false;
}
output_bitstream = create_bitstream_buffer.bitstreamBuffer;
if (!create_and_register_input_buffer()) {
return false;
return false;
}
{
// auto f = stat_trackers::one_digit_after_decimal();
// BOOST_LOG(debug) << "NvEnc: requested encoded frame size " << f % (client_config.bitrate / 8. / client_config.framerate) << " kB";
// auto f = stat_trackers::one_digit_after_decimal();
// BOOST_LOG(debug) << "NvEnc: requested encoded frame size " << f %
// (client_config.bitrate / 8. / client_config.framerate) << " kB";
}
{
std::string extra;
if (init_params.enableEncodeAsync) extra += " async";
if (buffer_is_10bit()) extra += " 10-bit";
if (enc_config.rcParams.multiPass != NV_ENC_MULTI_PASS_DISABLED) extra += " two-pass";
if (encoder_params.rfi) extra += " rfi";
if (init_params.enableWeightedPrediction) extra += " weighted-prediction";
if (enc_config.rcParams.enableAQ) extra += " adaptive-quantization";
if (enc_config.rcParams.enableMinQP) extra += " qpmin=" + std::to_string(enc_config.rcParams.minQP.qpInterP);
if (config.insert_filler_data) extra += " filler-data";
BOOST_LOG(info) << "NvEnc: created encoder " << quality_preset_string_from_guid(init_params.presetGUID) << extra;
std::string extra;
if (init_params.enableEncodeAsync) extra += " async";
if (buffer_is_10bit()) extra += " 10-bit";
if (enc_config.rcParams.multiPass != NV_ENC_MULTI_PASS_DISABLED)
extra += " two-pass";
if (encoder_params.rfi) extra += " rfi";
if (init_params.enableWeightedPrediction)
extra += " weighted-prediction";
if (enc_config.rcParams.enableAQ) extra += " adaptive-quantization";
if (enc_config.rcParams.enableMinQP)
extra +=
" qpmin=" + std::to_string(enc_config.rcParams.minQP.qpInterP);
if (config.insert_filler_data) extra += " filler-data";
BOOST_LOG(info) << "NvEnc: created encoder "
<< quality_preset_string_from_guid(
init_params.presetGUID)
<< extra;
}
encoder_state = {};
fail_guard.disable();
return true;
}
}
void
nvenc_base::destroy_encoder() {
void nvenc_base::destroy_encoder() {
if (output_bitstream) {
nvenc->nvEncDestroyBitstreamBuffer(encoder, output_bitstream);
output_bitstream = nullptr;
nvenc->nvEncDestroyBitstreamBuffer(encoder, output_bitstream);
output_bitstream = nullptr;
}
if (encoder && async_event_handle) {
NV_ENC_EVENT_PARAMS event_params = { NV_ENC_EVENT_PARAMS_VER };
event_params.completionEvent = async_event_handle;
nvenc->nvEncUnregisterAsyncEvent(encoder, &event_params);
NV_ENC_EVENT_PARAMS event_params = {NV_ENC_EVENT_PARAMS_VER};
event_params.completionEvent = async_event_handle;
nvenc->nvEncUnregisterAsyncEvent(encoder, &event_params);
}
if (registered_input_buffer) {
nvenc->nvEncUnregisterResource(encoder, registered_input_buffer);
registered_input_buffer = nullptr;
nvenc->nvEncUnregisterResource(encoder, registered_input_buffer);
registered_input_buffer = nullptr;
}
if (encoder) {
nvenc->nvEncDestroyEncoder(encoder);
encoder = nullptr;
nvenc->nvEncDestroyEncoder(encoder);
encoder = nullptr;
}
encoder_state = {};
encoder_params = {};
}
}
nvenc_encoded_frame
nvenc_base::encode_frame(uint64_t frame_index, bool force_idr) {
nvenc_encoded_frame nvenc_base::encode_frame(uint64_t frame_index,
bool force_idr) {
if (!encoder) {
return {};
return {};
}
assert(registered_input_buffer);
assert(output_bitstream);
NV_ENC_MAP_INPUT_RESOURCE mapped_input_buffer = { NV_ENC_MAP_INPUT_RESOURCE_VER };
NV_ENC_MAP_INPUT_RESOURCE mapped_input_buffer = {
NV_ENC_MAP_INPUT_RESOURCE_VER};
mapped_input_buffer.registeredResource = registered_input_buffer;
if (nvenc_failed(nvenc->nvEncMapInputResource(encoder, &mapped_input_buffer))) {
BOOST_LOG(error) << "NvEncMapInputResource failed: " << last_error_string;
return {};
if (nvenc_failed(
nvenc->nvEncMapInputResource(encoder, &mapped_input_buffer))) {
BOOST_LOG(error) << "NvEncMapInputResource failed: "
<< last_error_string;
return {};
}
auto unmap_guard = util::fail_guard([&] { nvenc->nvEncUnmapInputResource(encoder, &mapped_input_buffer); });
auto unmap_guard = util::fail_guard(
[&] { nvenc->nvEncUnmapInputResource(encoder, &mapped_input_buffer); });
NV_ENC_PIC_PARAMS pic_params = { NV_ENC_PIC_PARAMS_VER };
NV_ENC_PIC_PARAMS pic_params = {NV_ENC_PIC_PARAMS_VER};
pic_params.inputWidth = encoder_params.width;
pic_params.inputHeight = encoder_params.height;
pic_params.encodePicFlags = force_idr ? NV_ENC_PIC_FLAG_FORCEIDR : 0;
@ -438,112 +495,117 @@ namespace nvenc {
pic_params.completionEvent = async_event_handle;
if (nvenc_failed(nvenc->nvEncEncodePicture(encoder, &pic_params))) {
BOOST_LOG(error) << "NvEncEncodePicture failed: " << last_error_string;
return {};
BOOST_LOG(error) << "NvEncEncodePicture failed: " << last_error_string;
return {};
}
NV_ENC_LOCK_BITSTREAM lock_bitstream = { NV_ENC_LOCK_BITSTREAM_VER };
NV_ENC_LOCK_BITSTREAM lock_bitstream = {NV_ENC_LOCK_BITSTREAM_VER};
lock_bitstream.outputBitstream = output_bitstream;
lock_bitstream.doNotWait = 0;
if (async_event_handle && !wait_for_async_event(100)) {
BOOST_LOG(error) << "NvEnc: frame " << frame_index << " encode wait timeout";
return {};
BOOST_LOG(error) << "NvEnc: frame " << frame_index
<< " encode wait timeout";
return {};
}
if (nvenc_failed(nvenc->nvEncLockBitstream(encoder, &lock_bitstream))) {
BOOST_LOG(error) << "NvEncLockBitstream failed: " << last_error_string;
return {};
BOOST_LOG(error) << "NvEncLockBitstream failed: " << last_error_string;
return {};
}
auto data_pointer = (uint8_t *) lock_bitstream.bitstreamBufferPtr;
nvenc_encoded_frame encoded_frame {
{ data_pointer, data_pointer + lock_bitstream.bitstreamSizeInBytes },
lock_bitstream.outputTimeStamp,
lock_bitstream.pictureType == NV_ENC_PIC_TYPE_IDR,
encoder_state.rfi_needs_confirmation,
auto data_pointer = (uint8_t *)lock_bitstream.bitstreamBufferPtr;
nvenc_encoded_frame encoded_frame{
{data_pointer, data_pointer + lock_bitstream.bitstreamSizeInBytes},
lock_bitstream.outputTimeStamp,
lock_bitstream.pictureType == NV_ENC_PIC_TYPE_IDR,
encoder_state.rfi_needs_confirmation,
};
if (encoder_state.rfi_needs_confirmation) {
// Invalidation request has been fulfilled, and video network packet will be marked as such
encoder_state.rfi_needs_confirmation = false;
// Invalidation request has been fulfilled, and video network packet
// will be marked as such
encoder_state.rfi_needs_confirmation = false;
}
encoder_state.last_encoded_frame_index = frame_index;
if (encoded_frame.idr) {
BOOST_LOG(debug) << "NvEnc: idr frame " << encoded_frame.frame_index;
BOOST_LOG(debug) << "NvEnc: idr frame " << encoded_frame.frame_index;
}
if (nvenc_failed(nvenc->nvEncUnlockBitstream(encoder, lock_bitstream.outputBitstream))) {
BOOST_LOG(error) << "NvEncUnlockBitstream failed: " << last_error_string;
if (nvenc_failed(nvenc->nvEncUnlockBitstream(
encoder, lock_bitstream.outputBitstream))) {
BOOST_LOG(error) << "NvEncUnlockBitstream failed: "
<< last_error_string;
}
// if (config::sunshine.min_log_level <= 1) {
// // Print encoded frame size stats to debug log every 20 seconds
// auto callback = [&](float stat_min, float stat_max, double stat_avg) {
// auto f = stat_trackers::one_digit_after_decimal();
// BOOST_LOG(debug) << "NvEnc: encoded frame sizes (min max avg) " << f % stat_min << " " << f % stat_max << " " << f % stat_avg << " kB";
// BOOST_LOG(debug) << "NvEnc: encoded frame sizes (min max avg) " << f
// % stat_min << " " << f % stat_max << " " << f % stat_avg << " kB";
// };
// using namespace std::literals;
// encoder_state.frame_size_tracker.collect_and_callback_on_interval(encoded_frame.data.size() / 1000., callback, 20s);
// encoder_state.frame_size_tracker.collect_and_callback_on_interval(encoded_frame.data.size()
// / 1000., callback, 20s);
// }
return encoded_frame;
}
}
bool
nvenc_base::nvenc_failed(NVENCSTATUS status) {
bool nvenc_base::nvenc_failed(NVENCSTATUS status) {
auto status_string = [](NVENCSTATUS status) -> std::string {
switch (status) {
switch (status) {
#define nvenc_status_case(x) \
case x: \
return #x;
nvenc_status_case(NV_ENC_SUCCESS);
nvenc_status_case(NV_ENC_ERR_NO_ENCODE_DEVICE);
nvenc_status_case(NV_ENC_ERR_UNSUPPORTED_DEVICE);
nvenc_status_case(NV_ENC_ERR_INVALID_ENCODERDEVICE);
nvenc_status_case(NV_ENC_ERR_INVALID_DEVICE);
nvenc_status_case(NV_ENC_ERR_DEVICE_NOT_EXIST);
nvenc_status_case(NV_ENC_ERR_INVALID_PTR);
nvenc_status_case(NV_ENC_ERR_INVALID_EVENT);
nvenc_status_case(NV_ENC_ERR_INVALID_PARAM);
nvenc_status_case(NV_ENC_ERR_INVALID_CALL);
nvenc_status_case(NV_ENC_ERR_OUT_OF_MEMORY);
nvenc_status_case(NV_ENC_ERR_ENCODER_NOT_INITIALIZED);
nvenc_status_case(NV_ENC_ERR_UNSUPPORTED_PARAM);
nvenc_status_case(NV_ENC_ERR_LOCK_BUSY);
nvenc_status_case(NV_ENC_ERR_NOT_ENOUGH_BUFFER);
nvenc_status_case(NV_ENC_ERR_INVALID_VERSION);
nvenc_status_case(NV_ENC_ERR_MAP_FAILED);
nvenc_status_case(NV_ENC_ERR_NEED_MORE_INPUT);
nvenc_status_case(NV_ENC_ERR_ENCODER_BUSY);
nvenc_status_case(NV_ENC_ERR_EVENT_NOT_REGISTERD);
nvenc_status_case(NV_ENC_ERR_GENERIC);
nvenc_status_case(NV_ENC_ERR_INCOMPATIBLE_CLIENT_KEY);
nvenc_status_case(NV_ENC_ERR_UNIMPLEMENTED);
nvenc_status_case(NV_ENC_ERR_RESOURCE_REGISTER_FAILED);
nvenc_status_case(NV_ENC_ERR_RESOURCE_NOT_REGISTERED);
nvenc_status_case(NV_ENC_ERR_RESOURCE_NOT_MAPPED);
// Newer versions of sdk may add more constants, look for them the end of NVENCSTATUS enum
case x: \
return #x;
nvenc_status_case(NV_ENC_SUCCESS);
nvenc_status_case(NV_ENC_ERR_NO_ENCODE_DEVICE);
nvenc_status_case(NV_ENC_ERR_UNSUPPORTED_DEVICE);
nvenc_status_case(NV_ENC_ERR_INVALID_ENCODERDEVICE);
nvenc_status_case(NV_ENC_ERR_INVALID_DEVICE);
nvenc_status_case(NV_ENC_ERR_DEVICE_NOT_EXIST);
nvenc_status_case(NV_ENC_ERR_INVALID_PTR);
nvenc_status_case(NV_ENC_ERR_INVALID_EVENT);
nvenc_status_case(NV_ENC_ERR_INVALID_PARAM);
nvenc_status_case(NV_ENC_ERR_INVALID_CALL);
nvenc_status_case(NV_ENC_ERR_OUT_OF_MEMORY);
nvenc_status_case(NV_ENC_ERR_ENCODER_NOT_INITIALIZED);
nvenc_status_case(NV_ENC_ERR_UNSUPPORTED_PARAM);
nvenc_status_case(NV_ENC_ERR_LOCK_BUSY);
nvenc_status_case(NV_ENC_ERR_NOT_ENOUGH_BUFFER);
nvenc_status_case(NV_ENC_ERR_INVALID_VERSION);
nvenc_status_case(NV_ENC_ERR_MAP_FAILED);
nvenc_status_case(NV_ENC_ERR_NEED_MORE_INPUT);
nvenc_status_case(NV_ENC_ERR_ENCODER_BUSY);
nvenc_status_case(NV_ENC_ERR_EVENT_NOT_REGISTERD);
nvenc_status_case(NV_ENC_ERR_GENERIC);
nvenc_status_case(NV_ENC_ERR_INCOMPATIBLE_CLIENT_KEY);
nvenc_status_case(NV_ENC_ERR_UNIMPLEMENTED);
nvenc_status_case(NV_ENC_ERR_RESOURCE_REGISTER_FAILED);
nvenc_status_case(NV_ENC_ERR_RESOURCE_NOT_REGISTERED);
nvenc_status_case(NV_ENC_ERR_RESOURCE_NOT_MAPPED);
// Newer versions of sdk may add more constants, look for them the
// end of NVENCSTATUS enum
#undef nvenc_status_case
default:
return std::to_string(status);
}
default:
return std::to_string(status);
}
};
last_error_string.clear();
if (status != NV_ENC_SUCCESS) {
if (nvenc && encoder) {
last_error_string = nvenc->nvEncGetLastErrorString(encoder);
if (!last_error_string.empty()) last_error_string += " ";
}
last_error_string += status_string(status);
return true;
if (nvenc && encoder) {
last_error_string = nvenc->nvEncGetLastErrorString(encoder);
if (!last_error_string.empty()) last_error_string += " ";
}
last_error_string += status_string(status);
return true;
}
return false;
}
}
} // namespace nvenc

View File

@ -1,46 +1,39 @@
#pragma once
#include <ffnvcodec/nvEncodeAPI.h>
#include "nvenc_colorspace.h"
#include "nvenc_config.h"
#include "nvenc_encoded_frame.h"
#include "src/video.h"
#include <ffnvcodec/nvEncodeAPI.h>
namespace nvenc {
class nvenc_base {
public:
class nvenc_base {
public:
nvenc_base(NV_ENC_DEVICE_TYPE device_type, void *device);
virtual ~nvenc_base();
nvenc_base(const nvenc_base &) = delete;
nvenc_base &
operator=(const nvenc_base &) = delete;
nvenc_base &operator=(const nvenc_base &) = delete;
bool
create_encoder(const nvenc_config &config, const video::config_t &client_config, const nvenc_colorspace_t &colorspace, NV_ENC_BUFFER_FORMAT buffer_format);
bool create_encoder(const nvenc_config &config,
const video::config_t &client_config,
const nvenc_colorspace_t &colorspace,
NV_ENC_BUFFER_FORMAT buffer_format);
void
destroy_encoder();
void destroy_encoder();
nvenc_encoded_frame encode_frame(uint64_t frame_index, bool force_idr);
nvenc_encoded_frame
encode_frame(uint64_t frame_index, bool force_idr);
protected:
virtual bool init_library() = 0;
protected:
virtual bool
init_library() = 0;
virtual bool create_and_register_input_buffer() = 0;
virtual bool
create_and_register_input_buffer() = 0;
virtual bool wait_for_async_event(uint32_t timeout_ms) { return false; }
virtual bool
wait_for_async_event(uint32_t timeout_ms) { return false; }
bool
nvenc_failed(NVENCSTATUS status);
bool nvenc_failed(NVENCSTATUS status);
const NV_ENC_DEVICE_TYPE device_type;
void *const device;
@ -51,11 +44,11 @@ namespace nvenc {
NV_ENC_INITIALIZE_PARAMS init_params;
struct {
uint32_t width = 0;
uint32_t height = 0;
NV_ENC_BUFFER_FORMAT buffer_format = NV_ENC_BUFFER_FORMAT_UNDEFINED;
uint32_t ref_frames_in_dpb = 0;
bool rfi = false;
uint32_t width = 0;
uint32_t height = 0;
NV_ENC_BUFFER_FORMAT buffer_format = NV_ENC_BUFFER_FORMAT_UNDEFINED;
uint32_t ref_frames_in_dpb = 0;
bool rfi = false;
} encoder_params;
// Derived classes set these variables
@ -64,15 +57,15 @@ namespace nvenc {
std::string last_error_string;
private:
private:
NV_ENC_OUTPUT_PTR output_bitstream = nullptr;
struct {
uint64_t last_encoded_frame_index = 0;
bool rfi_needs_confirmation = false;
std::pair<uint64_t, uint64_t> last_rfi_range;
// stat_trackers::min_max_avg_tracker<float> frame_size_tracker;
uint64_t last_encoded_frame_index = 0;
bool rfi_needs_confirmation = false;
std::pair<uint64_t, uint64_t> last_rfi_range;
// stat_trackers::min_max_avg_tracker<float> frame_size_tracker;
} encoder_state;
};
};
} // namespace nvenc

View File

@ -3,10 +3,10 @@
#include <ffnvcodec/nvEncodeAPI.h>
namespace nvenc {
struct nvenc_colorspace_t {
struct nvenc_colorspace_t {
NV_ENC_VUI_COLOR_PRIMARIES primaries;
NV_ENC_VUI_TRANSFER_CHARACTERISTIC tranfer_function;
NV_ENC_VUI_MATRIX_COEFFS matrix;
bool full_range;
};
};
} // namespace nvenc

View File

@ -2,7 +2,7 @@
namespace nvenc {
enum class nvenc_two_pass {
enum class nvenc_two_pass {
// Single pass, the fastest and no extra vram
disabled,
@ -11,22 +11,25 @@ namespace nvenc {
// Better overall statistics, slower and uses more extra vram
full_resolution,
};
};
struct nvenc_config {
struct nvenc_config {
// Quality preset from 1 to 7, higher is slower
int quality_preset = 1;
// Use optional preliminary pass for better motion vectors, bitrate distribution and stricter VBV(HRD), uses CUDA cores
// Use optional preliminary pass for better motion vectors, bitrate
// distribution and stricter VBV(HRD), uses CUDA cores
nvenc_two_pass two_pass = nvenc_two_pass::quarter_resolution;
// Improves fades compression, uses CUDA cores
bool weighted_prediction = false;
// Allocate more bitrate to flat regions since they're visually more perceptible, uses CUDA cores
// Allocate more bitrate to flat regions since they're visually more
// perceptible, uses CUDA cores
bool adaptive_quantization = false;
// Don't use QP below certain value, limits peak image quality to save bitrate
// Don't use QP below certain value, limits peak image quality to save
// bitrate
bool enable_min_qp = false;
// Min QP value for H.264 when enable_min_qp is selected
@ -38,11 +41,13 @@ namespace nvenc {
// Min QP value for AV1 when enable_min_qp is selected
unsigned min_qp_av1 = 23;
// Use CAVLC entropy coding in H.264 instead of CABAC, not relevant and here for historical reasons
// Use CAVLC entropy coding in H.264 instead of CABAC, not relevant and here
// for historical reasons
bool h264_cavlc = false;
// Add filler data to encoded frames to stay at target bitrate, mainly for testing
// Add filler data to encoded frames to stay at target bitrate, mainly for
// testing
bool insert_filler_data = false;
};
};
} // namespace nvenc

View File

@ -1,104 +1,106 @@
#ifdef _WIN32
#include "nvenc_d3d11.h"
#include "nvenc_d3d11.h"
#include "nvenc_utils.h"
#include "nvenc_utils.h"
namespace nvenc {
nvenc_d3d11::nvenc_d3d11(ID3D11Device *d3d_device):
nvenc_base(NV_ENC_DEVICE_TYPE_DIRECTX, d3d_device),
d3d_device(d3d_device) {
}
nvenc_d3d11::nvenc_d3d11(ID3D11Device *d3d_device)
: nvenc_base(NV_ENC_DEVICE_TYPE_DIRECTX, d3d_device),
d3d_device(d3d_device) {}
nvenc_d3d11::~nvenc_d3d11() {
nvenc_d3d11::~nvenc_d3d11() {
if (encoder) destroy_encoder();
if (dll) {
FreeLibrary(dll);
dll = NULL;
FreeLibrary(dll);
dll = NULL;
}
}
}
ID3D11Texture2D *
nvenc_d3d11::get_input_texture() {
ID3D11Texture2D *nvenc_d3d11::get_input_texture() {
return d3d_input_texture.GetInterfacePtr();
}
}
bool
nvenc_d3d11::init_library() {
bool nvenc_d3d11::init_library() {
if (dll) return true;
#ifdef _WIN64
#ifdef _WIN64
auto dll_name = "nvEncodeAPI64.dll";
#else
#else
auto dll_name = "nvEncodeAPI.dll";
#endif
#endif
if ((dll = LoadLibraryEx(dll_name, NULL, LOAD_LIBRARY_SEARCH_SYSTEM32))) {
if (auto create_instance = (decltype(NvEncodeAPICreateInstance) *) GetProcAddress(dll, "NvEncodeAPICreateInstance")) {
auto new_nvenc = std::make_unique<NV_ENCODE_API_FUNCTION_LIST>();
new_nvenc->version = NV_ENCODE_API_FUNCTION_LIST_VER;
if (nvenc_failed(create_instance(new_nvenc.get()))) {
BOOST_LOG(error) << "NvEncodeAPICreateInstance failed: " << last_error_string;
if (auto create_instance =
(decltype(NvEncodeAPICreateInstance) *)GetProcAddress(
dll, "NvEncodeAPICreateInstance")) {
auto new_nvenc = std::make_unique<NV_ENCODE_API_FUNCTION_LIST>();
new_nvenc->version = NV_ENCODE_API_FUNCTION_LIST_VER;
if (nvenc_failed(create_instance(new_nvenc.get()))) {
BOOST_LOG(error) << "NvEncodeAPICreateInstance failed: "
<< last_error_string;
} else {
nvenc = std::move(new_nvenc);
return true;
}
} else {
BOOST_LOG(error) << "No NvEncodeAPICreateInstance in " << dll_name;
}
else {
nvenc = std::move(new_nvenc);
return true;
}
}
else {
BOOST_LOG(error) << "No NvEncodeAPICreateInstance in " << dll_name;
}
}
else {
BOOST_LOG(debug) << "Couldn't load NvEnc library " << dll_name;
} else {
BOOST_LOG(debug) << "Couldn't load NvEnc library " << dll_name;
}
if (dll) {
FreeLibrary(dll);
dll = NULL;
FreeLibrary(dll);
dll = NULL;
}
return false;
}
}
bool
nvenc_d3d11::create_and_register_input_buffer() {
bool nvenc_d3d11::create_and_register_input_buffer() {
if (!d3d_input_texture) {
D3D11_TEXTURE2D_DESC desc = {};
desc.Width = encoder_params.width;
desc.Height = encoder_params.height;
desc.MipLevels = 1;
desc.ArraySize = 1;
desc.Format = dxgi_format_from_nvenc_format(encoder_params.buffer_format);
desc.SampleDesc.Count = 1;
desc.Usage = D3D11_USAGE_DEFAULT;
desc.BindFlags = D3D11_BIND_RENDER_TARGET;
if (d3d_device->CreateTexture2D(&desc, nullptr, &d3d_input_texture) != S_OK) {
BOOST_LOG(error) << "NvEnc: couldn't create input texture";
return false;
}
D3D11_TEXTURE2D_DESC desc = {};
desc.Width = encoder_params.width;
desc.Height = encoder_params.height;
desc.MipLevels = 1;
desc.ArraySize = 1;
desc.Format =
dxgi_format_from_nvenc_format(encoder_params.buffer_format);
desc.SampleDesc.Count = 1;
desc.Usage = D3D11_USAGE_DEFAULT;
desc.BindFlags = D3D11_BIND_RENDER_TARGET;
if (d3d_device->CreateTexture2D(&desc, nullptr, &d3d_input_texture) !=
S_OK) {
BOOST_LOG(error) << "NvEnc: couldn't create input texture";
return false;
}
}
if (!registered_input_buffer) {
NV_ENC_REGISTER_RESOURCE register_resource = { NV_ENC_REGISTER_RESOURCE_VER };
register_resource.resourceType = NV_ENC_INPUT_RESOURCE_TYPE_DIRECTX;
register_resource.width = encoder_params.width;
register_resource.height = encoder_params.height;
register_resource.resourceToRegister = d3d_input_texture.GetInterfacePtr();
register_resource.bufferFormat = encoder_params.buffer_format;
register_resource.bufferUsage = NV_ENC_INPUT_IMAGE;
NV_ENC_REGISTER_RESOURCE register_resource = {
NV_ENC_REGISTER_RESOURCE_VER};
register_resource.resourceType = NV_ENC_INPUT_RESOURCE_TYPE_DIRECTX;
register_resource.width = encoder_params.width;
register_resource.height = encoder_params.height;
register_resource.resourceToRegister =
d3d_input_texture.GetInterfacePtr();
register_resource.bufferFormat = encoder_params.buffer_format;
register_resource.bufferUsage = NV_ENC_INPUT_IMAGE;
if (nvenc_failed(nvenc->nvEncRegisterResource(encoder, &register_resource))) {
BOOST_LOG(error) << "NvEncRegisterResource failed: " << last_error_string;
return false;
}
if (nvenc_failed(
nvenc->nvEncRegisterResource(encoder, &register_resource))) {
BOOST_LOG(error)
<< "NvEncRegisterResource failed: " << last_error_string;
return false;
}
registered_input_buffer = register_resource.registeredResource;
registered_input_buffer = register_resource.registeredResource;
}
return true;
}
}
} // namespace nvenc
#endif

View File

@ -1,35 +1,32 @@
#pragma once
#ifdef _WIN32
#include <comdef.h>
#include <d3d11.h>
#include <comdef.h>
#include <d3d11.h>
#include "nvenc_base.h"
#include "nvenc_base.h"
namespace nvenc {
_COM_SMARTPTR_TYPEDEF(ID3D11Device, IID_ID3D11Device);
_COM_SMARTPTR_TYPEDEF(ID3D11Texture2D, IID_ID3D11Texture2D);
_COM_SMARTPTR_TYPEDEF(ID3D11Device, IID_ID3D11Device);
_COM_SMARTPTR_TYPEDEF(ID3D11Texture2D, IID_ID3D11Texture2D);
class nvenc_d3d11 final: public nvenc_base {
public:
class nvenc_d3d11 final : public nvenc_base {
public:
nvenc_d3d11(ID3D11Device *d3d_device);
~nvenc_d3d11();
ID3D11Texture2D *
get_input_texture();
ID3D11Texture2D *get_input_texture();
private:
bool
init_library() override;
private:
bool init_library() override;
bool
create_and_register_input_buffer() override;
bool create_and_register_input_buffer() override;
HMODULE dll = NULL;
const ID3D11DevicePtr d3d_device;
ID3D11Texture2DPtr d3d_input_texture;
};
};
} // namespace nvenc
#endif

View File

@ -4,10 +4,10 @@
#include <vector>
namespace nvenc {
struct nvenc_encoded_frame {
struct nvenc_encoded_frame {
std::vector<uint8_t> data;
uint64_t frame_index = 0;
bool idr = false;
bool after_ref_frame_invalidation = false;
};
};
} // namespace nvenc

View File

@ -3,74 +3,78 @@
namespace nvenc {
#ifdef _WIN32
DXGI_FORMAT
dxgi_format_from_nvenc_format(NV_ENC_BUFFER_FORMAT format) {
DXGI_FORMAT
dxgi_format_from_nvenc_format(NV_ENC_BUFFER_FORMAT format) {
switch (format) {
case NV_ENC_BUFFER_FORMAT_YUV420_10BIT:
return DXGI_FORMAT_P010;
case NV_ENC_BUFFER_FORMAT_YUV420_10BIT:
return DXGI_FORMAT_P010;
case NV_ENC_BUFFER_FORMAT_NV12:
return DXGI_FORMAT_NV12;
case NV_ENC_BUFFER_FORMAT_NV12:
return DXGI_FORMAT_NV12;
default:
return DXGI_FORMAT_UNKNOWN;
default:
return DXGI_FORMAT_UNKNOWN;
}
}
}
#endif
NV_ENC_BUFFER_FORMAT
nvenc_format_from_sunshine_format(platf::pix_fmt_e format) {
NV_ENC_BUFFER_FORMAT
nvenc_format_from_sunshine_format(platf::pix_fmt_e format) {
switch (format) {
case platf::pix_fmt_e::nv12:
return NV_ENC_BUFFER_FORMAT_NV12;
case platf::pix_fmt_e::nv12:
return NV_ENC_BUFFER_FORMAT_NV12;
case platf::pix_fmt_e::p010:
return NV_ENC_BUFFER_FORMAT_YUV420_10BIT;
case platf::pix_fmt_e::p010:
return NV_ENC_BUFFER_FORMAT_YUV420_10BIT;
default:
return NV_ENC_BUFFER_FORMAT_UNDEFINED;
default:
return NV_ENC_BUFFER_FORMAT_UNDEFINED;
}
}
}
nvenc_colorspace_t
nvenc_colorspace_from_sunshine_colorspace(const video::sunshine_colorspace_t &sunshine_colorspace) {
nvenc_colorspace_t nvenc_colorspace_from_sunshine_colorspace(
const video::sunshine_colorspace_t &sunshine_colorspace) {
nvenc_colorspace_t colorspace;
switch (sunshine_colorspace.colorspace) {
case video::colorspace_e::rec601:
// Rec. 601
colorspace.primaries = NV_ENC_VUI_COLOR_PRIMARIES_SMPTE170M;
colorspace.tranfer_function = NV_ENC_VUI_TRANSFER_CHARACTERISTIC_SMPTE170M;
colorspace.matrix = NV_ENC_VUI_MATRIX_COEFFS_SMPTE170M;
break;
case video::colorspace_e::rec601:
// Rec. 601
colorspace.primaries = NV_ENC_VUI_COLOR_PRIMARIES_SMPTE170M;
colorspace.tranfer_function =
NV_ENC_VUI_TRANSFER_CHARACTERISTIC_SMPTE170M;
colorspace.matrix = NV_ENC_VUI_MATRIX_COEFFS_SMPTE170M;
break;
case video::colorspace_e::rec709:
// Rec. 709
colorspace.primaries = NV_ENC_VUI_COLOR_PRIMARIES_BT709;
colorspace.tranfer_function = NV_ENC_VUI_TRANSFER_CHARACTERISTIC_BT709;
colorspace.matrix = NV_ENC_VUI_MATRIX_COEFFS_BT709;
break;
case video::colorspace_e::rec709:
// Rec. 709
colorspace.primaries = NV_ENC_VUI_COLOR_PRIMARIES_BT709;
colorspace.tranfer_function =
NV_ENC_VUI_TRANSFER_CHARACTERISTIC_BT709;
colorspace.matrix = NV_ENC_VUI_MATRIX_COEFFS_BT709;
break;
case video::colorspace_e::bt2020sdr:
// Rec. 2020
colorspace.primaries = NV_ENC_VUI_COLOR_PRIMARIES_BT2020;
assert(sunshine_colorspace.bit_depth == 10);
colorspace.tranfer_function = NV_ENC_VUI_TRANSFER_CHARACTERISTIC_BT2020_10;
colorspace.matrix = NV_ENC_VUI_MATRIX_COEFFS_BT2020_NCL;
break;
case video::colorspace_e::bt2020sdr:
// Rec. 2020
colorspace.primaries = NV_ENC_VUI_COLOR_PRIMARIES_BT2020;
assert(sunshine_colorspace.bit_depth == 10);
colorspace.tranfer_function =
NV_ENC_VUI_TRANSFER_CHARACTERISTIC_BT2020_10;
colorspace.matrix = NV_ENC_VUI_MATRIX_COEFFS_BT2020_NCL;
break;
case video::colorspace_e::bt2020:
// Rec. 2020 with ST 2084 perceptual quantizer
colorspace.primaries = NV_ENC_VUI_COLOR_PRIMARIES_BT2020;
assert(sunshine_colorspace.bit_depth == 10);
colorspace.tranfer_function = NV_ENC_VUI_TRANSFER_CHARACTERISTIC_SMPTE2084;
colorspace.matrix = NV_ENC_VUI_MATRIX_COEFFS_BT2020_NCL;
break;
case video::colorspace_e::bt2020:
// Rec. 2020 with ST 2084 perceptual quantizer
colorspace.primaries = NV_ENC_VUI_COLOR_PRIMARIES_BT2020;
assert(sunshine_colorspace.bit_depth == 10);
colorspace.tranfer_function =
NV_ENC_VUI_TRANSFER_CHARACTERISTIC_SMPTE2084;
colorspace.matrix = NV_ENC_VUI_MATRIX_COEFFS_BT2020_NCL;
break;
}
colorspace.full_range = sunshine_colorspace.full_range;
return colorspace;
}
}
} // namespace nvenc

View File

@ -1,27 +1,26 @@
#pragma once
#ifdef _WIN32
#include <dxgiformat.h>
#include <dxgiformat.h>
#endif
#include "nvenc_colorspace.h"
#include <ffnvcodec/nvEncodeAPI.h>
#include "nvenc_colorspace.h"
#include "src/platform/common.h"
#include "src/video_colorspace.h"
#include <ffnvcodec/nvEncodeAPI.h>
namespace nvenc {
#ifdef _WIN32
DXGI_FORMAT
dxgi_format_from_nvenc_format(NV_ENC_BUFFER_FORMAT format);
DXGI_FORMAT
dxgi_format_from_nvenc_format(NV_ENC_BUFFER_FORMAT format);
#endif
NV_ENC_BUFFER_FORMAT
nvenc_format_from_sunshine_format(platf::pix_fmt_e format);
NV_ENC_BUFFER_FORMAT
nvenc_format_from_sunshine_format(platf::pix_fmt_e format);
nvenc_colorspace_t
nvenc_colorspace_from_sunshine_colorspace(const video::sunshine_colorspace_t &sunshine_colorspace);
nvenc_colorspace_t nvenc_colorspace_from_sunshine_colorspace(
const video::sunshine_colorspace_t &sunshine_colorspace);
} // namespace nvenc

View File

@ -15,225 +15,200 @@
#include "src/utility.h"
#include "src/video_colorspace.h"
extern "C" {
}
extern "C" {}
typedef struct _SS_HDR_METADATA {
// RGB order
struct {
uint16_t x; // Normalized to 50,000
uint16_t y; // Normalized to 50,000
uint16_t x; // Normalized to 50,000
uint16_t y; // Normalized to 50,000
} displayPrimaries[3];
struct {
uint16_t x; // Normalized to 50,000
uint16_t y; // Normalized to 50,000
uint16_t x; // Normalized to 50,000
uint16_t y; // Normalized to 50,000
} whitePoint;
uint16_t maxDisplayLuminance; // Nits
uint16_t minDisplayLuminance; // 1/10000th of a nit
uint16_t maxDisplayLuminance; // Nits
uint16_t minDisplayLuminance; // 1/10000th of a nit
// These are content-specific values which may not be available for all hosts.
uint16_t maxContentLightLevel; // Nits
uint16_t maxFrameAverageLightLevel; // Nits
// These are content-specific values which may not be available for all
// hosts.
uint16_t maxContentLightLevel; // Nits
uint16_t maxFrameAverageLightLevel; // Nits
// These are display-specific values which may not be available for all hosts.
uint16_t maxFullFrameLuminance; // Nits
// These are display-specific values which may not be available for all
// hosts.
uint16_t maxFullFrameLuminance; // Nits
} SS_HDR_METADATA, *PSS_HDR_METADATA;
struct sockaddr;
struct AVFrame;
struct AVBufferRef;
struct AVHWFramesContext;
namespace video {
struct config_t;
struct config_t;
} // namespace video
namespace nvenc {
class nvenc_base;
class nvenc_base;
}
namespace platf {
// Limited by bits in activeGamepadMask
constexpr auto MAX_GAMEPADS = 16;
// Limited by bits in activeGamepadMask
constexpr auto MAX_GAMEPADS = 16;
constexpr std::uint32_t DPAD_UP = 0x0001;
constexpr std::uint32_t DPAD_DOWN = 0x0002;
constexpr std::uint32_t DPAD_LEFT = 0x0004;
constexpr std::uint32_t DPAD_RIGHT = 0x0008;
constexpr std::uint32_t START = 0x0010;
constexpr std::uint32_t BACK = 0x0020;
constexpr std::uint32_t LEFT_STICK = 0x0040;
constexpr std::uint32_t RIGHT_STICK = 0x0080;
constexpr std::uint32_t LEFT_BUTTON = 0x0100;
constexpr std::uint32_t RIGHT_BUTTON = 0x0200;
constexpr std::uint32_t HOME = 0x0400;
constexpr std::uint32_t A = 0x1000;
constexpr std::uint32_t B = 0x2000;
constexpr std::uint32_t X = 0x4000;
constexpr std::uint32_t Y = 0x8000;
constexpr std::uint32_t PADDLE1 = 0x010000;
constexpr std::uint32_t PADDLE2 = 0x020000;
constexpr std::uint32_t PADDLE3 = 0x040000;
constexpr std::uint32_t PADDLE4 = 0x080000;
constexpr std::uint32_t TOUCHPAD_BUTTON = 0x100000;
constexpr std::uint32_t MISC_BUTTON = 0x200000;
constexpr std::uint32_t DPAD_UP = 0x0001;
constexpr std::uint32_t DPAD_DOWN = 0x0002;
constexpr std::uint32_t DPAD_LEFT = 0x0004;
constexpr std::uint32_t DPAD_RIGHT = 0x0008;
constexpr std::uint32_t START = 0x0010;
constexpr std::uint32_t BACK = 0x0020;
constexpr std::uint32_t LEFT_STICK = 0x0040;
constexpr std::uint32_t RIGHT_STICK = 0x0080;
constexpr std::uint32_t LEFT_BUTTON = 0x0100;
constexpr std::uint32_t RIGHT_BUTTON = 0x0200;
constexpr std::uint32_t HOME = 0x0400;
constexpr std::uint32_t A = 0x1000;
constexpr std::uint32_t B = 0x2000;
constexpr std::uint32_t X = 0x4000;
constexpr std::uint32_t Y = 0x8000;
constexpr std::uint32_t PADDLE1 = 0x010000;
constexpr std::uint32_t PADDLE2 = 0x020000;
constexpr std::uint32_t PADDLE3 = 0x040000;
constexpr std::uint32_t PADDLE4 = 0x080000;
constexpr std::uint32_t TOUCHPAD_BUTTON = 0x100000;
constexpr std::uint32_t MISC_BUTTON = 0x200000;
enum class gamepad_feedback_e {
enum class gamepad_feedback_e {
rumble,
rumble_triggers,
set_motion_event_state,
set_rgb_led,
};
};
struct gamepad_feedback_msg_t {
static gamepad_feedback_msg_t
make_rumble(std::uint16_t id, std::uint16_t lowfreq, std::uint16_t highfreq) {
gamepad_feedback_msg_t msg;
msg.type = gamepad_feedback_e::rumble;
msg.id = id;
msg.data.rumble = { lowfreq, highfreq };
return msg;
struct gamepad_feedback_msg_t {
static gamepad_feedback_msg_t make_rumble(std::uint16_t id,
std::uint16_t lowfreq,
std::uint16_t highfreq) {
gamepad_feedback_msg_t msg;
msg.type = gamepad_feedback_e::rumble;
msg.id = id;
msg.data.rumble = {lowfreq, highfreq};
return msg;
}
static gamepad_feedback_msg_t
make_rumble_triggers(std::uint16_t id, std::uint16_t left, std::uint16_t right) {
gamepad_feedback_msg_t msg;
msg.type = gamepad_feedback_e::rumble_triggers;
msg.id = id;
msg.data.rumble_triggers = { left, right };
return msg;
static gamepad_feedback_msg_t make_rumble_triggers(std::uint16_t id,
std::uint16_t left,
std::uint16_t right) {
gamepad_feedback_msg_t msg;
msg.type = gamepad_feedback_e::rumble_triggers;
msg.id = id;
msg.data.rumble_triggers = {left, right};
return msg;
}
static gamepad_feedback_msg_t
make_motion_event_state(std::uint16_t id, std::uint8_t motion_type, std::uint16_t report_rate) {
gamepad_feedback_msg_t msg;
msg.type = gamepad_feedback_e::set_motion_event_state;
msg.id = id;
msg.data.motion_event_state.motion_type = motion_type;
msg.data.motion_event_state.report_rate = report_rate;
return msg;
static gamepad_feedback_msg_t make_motion_event_state(
std::uint16_t id, std::uint8_t motion_type, std::uint16_t report_rate) {
gamepad_feedback_msg_t msg;
msg.type = gamepad_feedback_e::set_motion_event_state;
msg.id = id;
msg.data.motion_event_state.motion_type = motion_type;
msg.data.motion_event_state.report_rate = report_rate;
return msg;
}
static gamepad_feedback_msg_t
make_rgb_led(std::uint16_t id, std::uint8_t r, std::uint8_t g, std::uint8_t b) {
gamepad_feedback_msg_t msg;
msg.type = gamepad_feedback_e::set_rgb_led;
msg.id = id;
msg.data.rgb_led = { r, g, b };
return msg;
static gamepad_feedback_msg_t make_rgb_led(std::uint16_t id, std::uint8_t r,
std::uint8_t g, std::uint8_t b) {
gamepad_feedback_msg_t msg;
msg.type = gamepad_feedback_e::set_rgb_led;
msg.id = id;
msg.data.rgb_led = {r, g, b};
return msg;
}
gamepad_feedback_e type;
std::uint16_t id;
union {
struct {
std::uint16_t lowfreq;
std::uint16_t highfreq;
} rumble;
struct {
std::uint16_t left_trigger;
std::uint16_t right_trigger;
} rumble_triggers;
struct {
std::uint16_t report_rate;
std::uint8_t motion_type;
} motion_event_state;
struct {
std::uint8_t r;
std::uint8_t g;
std::uint8_t b;
} rgb_led;
struct {
std::uint16_t lowfreq;
std::uint16_t highfreq;
} rumble;
struct {
std::uint16_t left_trigger;
std::uint16_t right_trigger;
} rumble_triggers;
struct {
std::uint16_t report_rate;
std::uint8_t motion_type;
} motion_event_state;
struct {
std::uint8_t r;
std::uint8_t g;
std::uint8_t b;
} rgb_led;
} data;
};
};
using feedback_queue_t = safe::mail_raw_t::queue_t<gamepad_feedback_msg_t>;
using feedback_queue_t = safe::mail_raw_t::queue_t<gamepad_feedback_msg_t>;
namespace speaker {
enum speaker_e {
FRONT_LEFT,
FRONT_RIGHT,
FRONT_CENTER,
LOW_FREQUENCY,
BACK_LEFT,
BACK_RIGHT,
SIDE_LEFT,
SIDE_RIGHT,
MAX_SPEAKERS,
};
namespace speaker {
enum speaker_e {
FRONT_LEFT,
FRONT_RIGHT,
FRONT_CENTER,
LOW_FREQUENCY,
BACK_LEFT,
BACK_RIGHT,
SIDE_LEFT,
SIDE_RIGHT,
MAX_SPEAKERS,
};
constexpr std::uint8_t map_stereo[] {
FRONT_LEFT, FRONT_RIGHT
};
constexpr std::uint8_t map_surround51[] {
FRONT_LEFT,
FRONT_RIGHT,
FRONT_CENTER,
LOW_FREQUENCY,
BACK_LEFT,
BACK_RIGHT,
};
constexpr std::uint8_t map_surround71[] {
FRONT_LEFT,
FRONT_RIGHT,
FRONT_CENTER,
LOW_FREQUENCY,
BACK_LEFT,
BACK_RIGHT,
SIDE_LEFT,
SIDE_RIGHT,
};
} // namespace speaker
constexpr std::uint8_t map_stereo[]{FRONT_LEFT, FRONT_RIGHT};
constexpr std::uint8_t map_surround51[]{
FRONT_LEFT, FRONT_RIGHT, FRONT_CENTER, LOW_FREQUENCY, BACK_LEFT, BACK_RIGHT,
};
constexpr std::uint8_t map_surround71[]{
FRONT_LEFT, FRONT_RIGHT, FRONT_CENTER, LOW_FREQUENCY,
BACK_LEFT, BACK_RIGHT, SIDE_LEFT, SIDE_RIGHT,
};
} // namespace speaker
enum class mem_type_e {
system,
vaapi,
dxgi,
cuda,
videotoolbox,
unknown
};
enum class mem_type_e { system, vaapi, dxgi, cuda, videotoolbox, unknown };
enum class pix_fmt_e {
yuv420p,
yuv420p10,
nv12,
p010,
unknown
};
enum class pix_fmt_e { yuv420p, yuv420p10, nv12, p010, unknown };
inline std::string_view
from_pix_fmt(pix_fmt_e pix_fmt) {
inline std::string_view from_pix_fmt(pix_fmt_e pix_fmt) {
using namespace std::literals;
#define _CONVERT(x) \
case pix_fmt_e::x: \
return #x##sv
#define _CONVERT(x) \
case pix_fmt_e::x: \
return #x##sv
switch (pix_fmt) {
_CONVERT(yuv420p);
_CONVERT(yuv420p10);
_CONVERT(nv12);
_CONVERT(p010);
_CONVERT(unknown);
_CONVERT(yuv420p);
_CONVERT(yuv420p10);
_CONVERT(nv12);
_CONVERT(p010);
_CONVERT(unknown);
}
#undef _CONVERT
return "unknown"sv;
}
}
// Dimensions for touchscreen input
struct touch_port_t {
// Dimensions for touchscreen input
struct touch_port_t {
int offset_x, offset_y;
int width, height;
};
};
// These values must match Limelight-internal.h's SS_FF_* constants!
namespace platform_caps {
typedef uint32_t caps_t;
// These values must match Limelight-internal.h's SS_FF_* constants!
namespace platform_caps {
typedef uint32_t caps_t;
constexpr caps_t pen_touch = 0x01; // Pen and touch events
constexpr caps_t controller_touch = 0x02; // Controller touch events
}; // namespace platform_caps
constexpr caps_t pen_touch = 0x01; // Pen and touch events
constexpr caps_t controller_touch = 0x02; // Controller touch events
}; // namespace platform_caps
struct gamepad_state_t {
struct gamepad_state_t {
std::uint32_t buttonFlags;
std::uint8_t lt;
std::uint8_t rt;
@ -241,9 +216,9 @@ namespace platf {
std::int16_t lsY;
std::int16_t rsX;
std::int16_t rsY;
};
};
struct gamepad_id_t {
struct gamepad_id_t {
// The global index is used when looking up gamepads in the platform's
// gamepad array. It identifies gamepads uniquely among all clients.
int globalIndex;
@ -252,24 +227,24 @@ namespace platf {
// client. It must be used when communicating back to the client via
// the input feedback queue.
std::uint8_t clientRelativeIndex;
};
};
struct gamepad_arrival_t {
struct gamepad_arrival_t {
std::uint8_t type;
std::uint16_t capabilities;
std::uint32_t supportedButtons;
};
};
struct gamepad_touch_t {
struct gamepad_touch_t {
gamepad_id_t id;
std::uint8_t eventType;
std::uint32_t pointerId;
float x;
float y;
float pressure;
};
};
struct gamepad_motion_t {
struct gamepad_motion_t {
gamepad_id_t id;
std::uint8_t motionType;
@ -278,15 +253,15 @@ namespace platf {
float x;
float y;
float z;
};
};
struct gamepad_battery_t {
struct gamepad_battery_t {
gamepad_id_t id;
std::uint8_t state;
std::uint8_t percentage;
};
};
struct touch_input_t {
struct touch_input_t {
std::uint8_t eventType;
std::uint16_t rotation; // Degrees (0..360) or LI_ROT_UNKNOWN
std::uint32_t pointerId;
@ -295,128 +270,113 @@ namespace platf {
float pressureOrDistance; // Distance for hover and pressure for contact
float contactAreaMajor;
float contactAreaMinor;
};
};
struct pen_input_t {
struct pen_input_t {
std::uint8_t eventType;
std::uint8_t toolType;
std::uint8_t penButtons;
std::uint8_t tilt; // Degrees (0..90) or LI_TILT_UNKNOWN
std::uint8_t tilt; // Degrees (0..90) or LI_TILT_UNKNOWN
std::uint16_t rotation; // Degrees (0..360) or LI_ROT_UNKNOWN
float x;
float y;
float pressureOrDistance; // Distance for hover and pressure for contact
float contactAreaMajor;
float contactAreaMinor;
};
};
class deinit_t {
public:
class deinit_t {
public:
virtual ~deinit_t() = default;
};
};
struct img_t: std::enable_shared_from_this<img_t> {
public:
struct img_t : std::enable_shared_from_this<img_t> {
public:
img_t() = default;
img_t(img_t &&) = delete;
img_t(const img_t &) = delete;
img_t &
operator=(img_t &&) = delete;
img_t &
operator=(const img_t &) = delete;
img_t &operator=(img_t &&) = delete;
img_t &operator=(const img_t &) = delete;
std::uint8_t *data {};
std::int32_t width {};
std::int32_t height {};
std::int32_t pixel_pitch {};
std::int32_t row_pitch {};
std::uint8_t *data{};
std::int32_t width{};
std::int32_t height{};
std::int32_t pixel_pitch{};
std::int32_t row_pitch{};
std::optional<std::chrono::steady_clock::time_point> frame_timestamp;
virtual ~img_t() = default;
};
};
struct sink_t {
struct sink_t {
// Play on host PC
std::string host;
// On macOS and Windows, it is not possible to create a virtual sink
// Therefore, it is optional
struct null_t {
std::string stereo;
std::string surround51;
std::string surround71;
std::string stereo;
std::string surround51;
std::string surround71;
};
std::optional<null_t> null;
};
};
struct encode_device_t {
struct encode_device_t {
virtual ~encode_device_t() = default;
virtual int
convert(platf::img_t &img) = 0;
virtual int convert(platf::img_t &img) = 0;
video::sunshine_colorspace_t colorspace;
};
};
struct avcodec_encode_device_t: encode_device_t {
void *data {};
AVFrame *frame {};
struct avcodec_encode_device_t : encode_device_t {
void *data{};
AVFrame *frame{};
int
convert(platf::img_t &img) override {
return -1;
}
int convert(platf::img_t &img) override { return -1; }
virtual void
apply_colorspace() {
}
virtual void apply_colorspace() {}
/**
* implementations must take ownership of 'frame'
*/
virtual int
set_frame(AVFrame *frame, AVBufferRef *hw_frames_ctx) {
BOOST_LOG(error) << "Illegal call to hwdevice_t::set_frame(). Did you forget to override it?";
return -1;
virtual int set_frame(AVFrame *frame, AVBufferRef *hw_frames_ctx) {
BOOST_LOG(error) << "Illegal call to hwdevice_t::set_frame(). Did you "
"forget to override it?";
return -1;
};
/**
* Implementations may set parameters during initialization of the hwframes context
* Implementations may set parameters during initialization of the hwframes
* context
*/
virtual void
init_hwframes(AVHWFramesContext *frames) {};
virtual void init_hwframes(AVHWFramesContext *frames){};
/**
* Implementations may make modifications required before context derivation
*/
virtual int
prepare_to_derive_context(int hw_device_type) {
return 0;
};
};
virtual int prepare_to_derive_context(int hw_device_type) { return 0; };
};
struct nvenc_encode_device_t: encode_device_t {
virtual bool
init_encoder(const video::config_t &client_config, const video::sunshine_colorspace_t &colorspace) = 0;
struct nvenc_encode_device_t : encode_device_t {
virtual bool init_encoder(
const video::config_t &client_config,
const video::sunshine_colorspace_t &colorspace) = 0;
nvenc::nvenc_base *nvenc = nullptr;
};
};
enum class capture_e : int {
ok,
reinit,
timeout,
interrupted,
error
};
enum class capture_e : int { ok, reinit, timeout, interrupted, error };
class display_t {
public:
class display_t {
public:
/**
* When display has a new image ready or a timeout occurs, this callback will be called with the image.
* If a frame was captured, frame_captured will be true. If a timeout occurred, it will be false.
* When display has a new image ready or a timeout occurs, this callback
* will be called with the image. If a frame was captured, frame_captured
* will be true. If a timeout occurred, it will be false.
*
* On Break Request -->
* Returns false
@ -424,7 +384,8 @@ namespace platf {
* On Success -->
* Returns true
*/
using push_captured_image_cb_t = std::function<bool(std::shared_ptr<img_t> &&img, bool frame_captured)>;
using push_captured_image_cb_t =
std::function<bool(std::shared_ptr<img_t> &&img, bool frame_captured)>;
/**
* Use to get free image from the pool. Calls must be synchronized.
@ -434,53 +395,49 @@ namespace platf {
* 'true' on success, img_out contains free image
* 'false' when capture has been interrupted, img_out contains nullptr
*/
using pull_free_image_cb_t = std::function<bool(std::shared_ptr<img_t> &img_out)>;
using pull_free_image_cb_t =
std::function<bool(std::shared_ptr<img_t> &img_out)>;
display_t() noexcept:
offset_x { 0 }, offset_y { 0 } {}
display_t() noexcept : offset_x{0}, offset_y{0} {}
/**
* push_captured_image_cb --> The callback that is called with captured image,
* must be called from the same thread as capture()
* pull_free_image_cb --> Capture backends call this callback to get empty image
* from the pool. If backend uses multiple threads, calls to this
* callback must be synchronized. Calls to this callback and
* push_captured_image_cb must be synchronized as well.
* bool *cursor --> A pointer to the flag that indicates whether the cursor should be captured as well
* push_captured_image_cb --> The callback that is called with captured
* image, must be called from the same thread as capture()
* pull_free_image_cb --> Capture backends call this callback to get empty
* image from the pool. If backend uses multiple threads, calls to this
* callback must be synchronized. Calls to this
* callback and push_captured_image_cb must be synchronized as well. bool
* *cursor --> A pointer to the flag that indicates whether the cursor
* should be captured as well
*
* Returns either:
* capture_e::ok when stopping
* capture_e::error on error
* capture_e::reinit when need of reinitialization
*/
virtual capture_e
capture(const push_captured_image_cb_t &push_captured_image_cb, const pull_free_image_cb_t &pull_free_image_cb, bool *cursor) = 0;
virtual capture_e capture(
const push_captured_image_cb_t &push_captured_image_cb,
const pull_free_image_cb_t &pull_free_image_cb, bool *cursor) = 0;
virtual std::shared_ptr<img_t>
alloc_img() = 0;
virtual std::shared_ptr<img_t> alloc_img() = 0;
virtual int
dummy_img(img_t *img) = 0;
virtual int dummy_img(img_t *img) = 0;
virtual std::unique_ptr<avcodec_encode_device_t>
make_avcodec_encode_device(pix_fmt_e pix_fmt) {
return nullptr;
virtual std::unique_ptr<avcodec_encode_device_t> make_avcodec_encode_device(
pix_fmt_e pix_fmt) {
return nullptr;
}
virtual std::unique_ptr<nvenc_encode_device_t>
make_nvenc_encode_device(pix_fmt_e pix_fmt) {
return nullptr;
virtual std::unique_ptr<nvenc_encode_device_t> make_nvenc_encode_device(
pix_fmt_e pix_fmt) {
return nullptr;
}
virtual bool
is_hdr() {
return false;
}
virtual bool is_hdr() { return false; }
virtual bool
get_hdr_metadata(SS_HDR_METADATA &metadata) {
std::memset(&metadata, 0, sizeof(metadata));
return false;
virtual bool get_hdr_metadata(SS_HDR_METADATA &metadata) {
std::memset(&metadata, 0, sizeof(metadata));
return false;
}
/**
@ -489,9 +446,9 @@ namespace platf {
* @param config The codec configuration.
* @return true if supported, false otherwise.
*/
virtual bool
is_codec_supported(std::string_view name, const ::video::config_t &config) {
return true;
virtual bool is_codec_supported(std::string_view name,
const ::video::config_t &config) {
return true;
}
virtual ~display_t() = default;
@ -501,77 +458,59 @@ namespace platf {
int env_width, env_height;
int width, height;
};
};
class mic_t {
public:
virtual capture_e
sample(std::vector<std::int16_t> &frame_buffer) = 0;
class mic_t {
public:
virtual capture_e sample(std::vector<std::int16_t> &frame_buffer) = 0;
virtual ~mic_t() = default;
};
};
class audio_control_t {
public:
virtual int
set_sink(const std::string &sink) = 0;
class audio_control_t {
public:
virtual int set_sink(const std::string &sink) = 0;
virtual std::unique_ptr<mic_t>
microphone(const std::uint8_t *mapping, int channels, std::uint32_t sample_rate, std::uint32_t frame_size) = 0;
virtual std::unique_ptr<mic_t> microphone(const std::uint8_t *mapping,
int channels,
std::uint32_t sample_rate,
std::uint32_t frame_size) = 0;
virtual std::optional<sink_t>
sink_info() = 0;
virtual std::optional<sink_t> sink_info() = 0;
virtual ~audio_control_t() = default;
};
};
std::filesystem::path appdata();
std::string from_sockaddr(const sockaddr *const);
std::pair<std::uint16_t, std::string> from_sockaddr_ex(const sockaddr *const);
std::unique_ptr<audio_control_t> audio_control();
std::filesystem::path
appdata();
/**
* display_name --> The name of the monitor that SHOULD be displayed
* If display_name is empty --> Use the first monitor that's compatible you
* can find If you require to use this parameter in a separate thread --> make a
* copy of it.
*
* config --> Stream configuration
*
* Returns display_t based on hwdevice_type
*/
std::shared_ptr<display_t> display(mem_type_e hwdevice_type,
const std::string &display_name,
const video::config_t &config);
// A list of names of displays accepted as display_name with the mem_type_e
std::vector<std::string> display_names(mem_type_e hwdevice_type);
enum class thread_priority_e : int { low, normal, high, critical };
void adjust_thread_priority(thread_priority_e priority);
std::string
from_sockaddr(const sockaddr *const);
std::pair<std::uint16_t, std::string>
from_sockaddr_ex(const sockaddr *const);
enum class qos_data_type_e : int { audio, video };
std::unique_ptr<audio_control_t>
audio_control();
typedef deinit_t client_input_t;
/**
* display_name --> The name of the monitor that SHOULD be displayed
* If display_name is empty --> Use the first monitor that's compatible you can find
* If you require to use this parameter in a separate thread --> make a copy of it.
*
* config --> Stream configuration
*
* Returns display_t based on hwdevice_type
*/
std::shared_ptr<display_t>
display(mem_type_e hwdevice_type, const std::string &display_name, const video::config_t &config);
// A list of names of displays accepted as display_name with the mem_type_e
std::vector<std::string>
display_names(mem_type_e hwdevice_type);
enum class thread_priority_e : int {
low,
normal,
high,
critical
};
void
adjust_thread_priority(thread_priority_e priority);
enum class qos_data_type_e : int {
audio,
video
};
typedef deinit_t client_input_t;
bool init();
bool init();
} // namespace platf

File diff suppressed because it is too large Load Diff

View File

@ -6,115 +6,109 @@
#if defined(SUNSHINE_BUILD_CUDA)
#include "src/video_colorspace.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <vector>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <vector>
#include "src/video_colorspace.h"
namespace platf {
class avcodec_encode_device_t;
class img_t;
class avcodec_encode_device_t;
class img_t;
} // namespace platf
namespace cuda {
namespace nvfbc {
std::vector<std::string>
display_names();
}
std::unique_ptr<platf::avcodec_encode_device_t>
make_avcodec_encode_device(int width, int height, bool vram);
int
init();
namespace nvfbc {
std::vector<std::string> display_names();
}
std::unique_ptr<platf::avcodec_encode_device_t> make_avcodec_encode_device(
int width, int height, bool vram);
int init();
} // namespace cuda
typedef struct cudaArray *cudaArray_t;
#if !defined(__CUDACC__)
#if !defined(__CUDACC__)
typedef struct CUstream_st *cudaStream_t;
typedef unsigned long long cudaTextureObject_t;
#else /* defined(__CUDACC__) */
#else /* defined(__CUDACC__) */
typedef __location__(device_builtin) struct CUstream_st *cudaStream_t;
typedef __location__(device_builtin) unsigned long long cudaTextureObject_t;
#endif /* !defined(__CUDACC__) */
#endif /* !defined(__CUDACC__) */
namespace cuda {
class freeCudaPtr_t {
public:
void
operator()(void *ptr);
};
class freeCudaPtr_t {
public:
void operator()(void *ptr);
};
class freeCudaStream_t {
public:
void
operator()(cudaStream_t ptr);
};
class freeCudaStream_t {
public:
void operator()(cudaStream_t ptr);
};
using ptr_t = std::unique_ptr<void, freeCudaPtr_t>;
using stream_t = std::unique_ptr<CUstream_st, freeCudaStream_t>;
using ptr_t = std::unique_ptr<void, freeCudaPtr_t>;
using stream_t = std::unique_ptr<CUstream_st, freeCudaStream_t>;
stream_t
make_stream(int flags = 0);
stream_t make_stream(int flags = 0);
struct viewport_t {
struct viewport_t {
int width, height;
int offsetX, offsetY;
};
};
class tex_t {
public:
static std::optional<tex_t>
make(int height, int pitch);
class tex_t {
public:
static std::optional<tex_t> make(int height, int pitch);
tex_t();
tex_t(tex_t &&);
tex_t &
operator=(tex_t &&other);
tex_t &operator=(tex_t &&other);
~tex_t();
int
copy(std::uint8_t *src, int height, int pitch);
int copy(std::uint8_t *src, int height, int pitch);
cudaArray_t array;
struct texture {
cudaTextureObject_t point;
cudaTextureObject_t linear;
cudaTextureObject_t point;
cudaTextureObject_t linear;
} texture;
};
};
class sws_t {
public:
class sws_t {
public:
sws_t() = default;
sws_t(int in_width, int in_height, int out_width, int out_height, int pitch, int threadsPerBlock, ptr_t &&color_matrix);
sws_t(int in_width, int in_height, int out_width, int out_height, int pitch,
int threadsPerBlock, ptr_t &&color_matrix);
/**
* in_width, in_height -- The width and height of the captured image in pixels
* out_width, out_height -- the width and height of the NV12 image in pixels
* in_width, in_height -- The width and height of the captured image in
* pixels out_width, out_height -- the width and height of the NV12 image in
* pixels
*
* pitch -- The size of a single row of pixels in bytes
*/
static std::optional<sws_t>
make(int in_width, int in_height, int out_width, int out_height, int pitch);
static std::optional<sws_t> make(int in_width, int in_height, int out_width,
int out_height, int pitch);
// Converts loaded image into a CUDevicePtr
int
convert(std::uint8_t *Y, std::uint8_t *UV, std::uint32_t pitchY, std::uint32_t pitchUV, cudaTextureObject_t texture, stream_t::pointer stream);
int
convert(std::uint8_t *Y, std::uint8_t *UV, std::uint32_t pitchY, std::uint32_t pitchUV, cudaTextureObject_t texture, stream_t::pointer stream, const viewport_t &viewport);
int convert(std::uint8_t *Y, std::uint8_t *UV, std::uint32_t pitchY,
std::uint32_t pitchUV, cudaTextureObject_t texture,
stream_t::pointer stream);
int convert(std::uint8_t *Y, std::uint8_t *UV, std::uint32_t pitchY,
std::uint32_t pitchUV, cudaTextureObject_t texture,
stream_t::pointer stream, const viewport_t &viewport);
void
apply_colorspace(const video::sunshine_colorspace_t &colorspace);
void apply_colorspace(const video::sunshine_colorspace_t &colorspace);
int
load_ram(platf::img_t &img, cudaArray_t array);
int load_ram(platf::img_t &img, cudaArray_t array);
ptr_t color_matrix;
@ -123,7 +117,7 @@ namespace cuda {
viewport_t viewport;
float scale;
};
};
} // namespace cuda
#endif

File diff suppressed because it is too large Load Diff

View File

@ -4,12 +4,12 @@
*/
#pragma once
#include <optional>
#include <string_view>
#include <glad/egl.h>
#include <glad/gl.h>
#include <optional>
#include <string_view>
#include "misc.h"
#include "src/main.h"
#include "src/platform/common.h"
@ -19,135 +19,126 @@
#define SUNSHINE_STRINGIFY_HELPER(x) #x
#define SUNSHINE_STRINGIFY(x) SUNSHINE_STRINGIFY_HELPER(x)
#define gl_drain_errors_helper(x) gl::drain_errors(x)
#define gl_drain_errors gl_drain_errors_helper(__FILE__ ":" SUNSHINE_STRINGIFY(__LINE__))
#define gl_drain_errors \
gl_drain_errors_helper(__FILE__ ":" SUNSHINE_STRINGIFY(__LINE__))
extern "C" int
close(int __fd);
extern "C" int close(int __fd);
// X11 Display
extern "C" struct _XDisplay;
struct AVFrame;
void
free_frame(AVFrame *frame);
void free_frame(AVFrame *frame);
using frame_t = util::safe_ptr<AVFrame, free_frame>;
namespace gl {
extern GladGLContext ctx;
void
drain_errors(const std::string_view &prefix);
extern GladGLContext ctx;
void drain_errors(const std::string_view &prefix);
class tex_t: public util::buffer_t<GLuint> {
class tex_t : public util::buffer_t<GLuint> {
using util::buffer_t<GLuint>::buffer_t;
public:
public:
tex_t(tex_t &&) = default;
tex_t &
operator=(tex_t &&) = default;
tex_t &operator=(tex_t &&) = default;
~tex_t();
static tex_t
make(std::size_t count);
};
static tex_t make(std::size_t count);
};
class frame_buf_t: public util::buffer_t<GLuint> {
class frame_buf_t : public util::buffer_t<GLuint> {
using util::buffer_t<GLuint>::buffer_t;
public:
public:
frame_buf_t(frame_buf_t &&) = default;
frame_buf_t &
operator=(frame_buf_t &&) = default;
frame_buf_t &operator=(frame_buf_t &&) = default;
~frame_buf_t();
static frame_buf_t
make(std::size_t count);
static frame_buf_t make(std::size_t count);
inline void
bind(std::nullptr_t, std::nullptr_t) {
int x = 0;
for (auto fb : (*this)) {
ctx.BindFramebuffer(GL_FRAMEBUFFER, fb);
ctx.FramebufferTexture(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + x, 0, 0);
inline void bind(std::nullptr_t, std::nullptr_t) {
int x = 0;
for (auto fb : (*this)) {
ctx.BindFramebuffer(GL_FRAMEBUFFER, fb);
ctx.FramebufferTexture(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + x, 0,
0);
++x;
}
return;
++x;
}
return;
}
template <class It>
void
bind(It it_begin, It it_end) {
using namespace std::literals;
if (std::distance(it_begin, it_end) > size()) {
BOOST_LOG(warning) << "To many elements to bind"sv;
return;
}
void bind(It it_begin, It it_end) {
using namespace std::literals;
if (std::distance(it_begin, it_end) > size()) {
BOOST_LOG(warning) << "To many elements to bind"sv;
return;
}
int x = 0;
std::for_each(it_begin, it_end, [&](auto tex) {
ctx.BindFramebuffer(GL_FRAMEBUFFER, (*this)[x]);
ctx.BindTexture(GL_TEXTURE_2D, tex);
int x = 0;
std::for_each(it_begin, it_end, [&](auto tex) {
ctx.BindFramebuffer(GL_FRAMEBUFFER, (*this)[x]);
ctx.BindTexture(GL_TEXTURE_2D, tex);
ctx.FramebufferTexture(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + x, tex, 0);
ctx.FramebufferTexture(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + x,
tex, 0);
++x;
});
++x;
});
}
/**
* Copies a part of the framebuffer to texture
*/
void
copy(int id, int texture, int offset_x, int offset_y, int width, int height);
};
void copy(int id, int texture, int offset_x, int offset_y, int width,
int height);
};
class shader_t {
KITTY_USING_MOVE_T(shader_internal_t, GLuint, std::numeric_limits<GLuint>::max(), {
if (el != std::numeric_limits<GLuint>::max()) {
ctx.DeleteShader(el);
}
});
class shader_t {
KITTY_USING_MOVE_T(shader_internal_t, GLuint,
std::numeric_limits<GLuint>::max(), {
if (el != std::numeric_limits<GLuint>::max()) {
ctx.DeleteShader(el);
}
});
public:
std::string
err_str();
public:
std::string err_str();
static util::Either<shader_t, std::string>
compile(const std::string_view &source, GLenum type);
static util::Either<shader_t, std::string> compile(
const std::string_view &source, GLenum type);
GLuint
handle() const;
GLuint handle() const;
private:
private:
shader_internal_t _shader;
};
};
class buffer_t {
KITTY_USING_MOVE_T(buffer_internal_t, GLuint, std::numeric_limits<GLuint>::max(), {
if (el != std::numeric_limits<GLuint>::max()) {
ctx.DeleteBuffers(1, &el);
}
});
class buffer_t {
KITTY_USING_MOVE_T(buffer_internal_t, GLuint,
std::numeric_limits<GLuint>::max(), {
if (el != std::numeric_limits<GLuint>::max()) {
ctx.DeleteBuffers(1, &el);
}
});
public:
static buffer_t
make(util::buffer_t<GLint> &&offsets, const char *block, const std::string_view &data);
public:
static buffer_t make(util::buffer_t<GLint> &&offsets, const char *block,
const std::string_view &data);
GLuint
handle() const;
GLuint handle() const;
const char *
block() const;
const char *block() const;
void
update(const std::string_view &view, std::size_t offset = 0);
void
update(std::string_view *members, std::size_t count, std::size_t offset = 0);
void update(const std::string_view &view, std::size_t offset = 0);
void update(std::string_view *members, std::size_t count,
std::size_t offset = 0);
private:
private:
const char *_block;
std::size_t _size;
@ -155,62 +146,60 @@ namespace gl {
util::buffer_t<GLint> _offsets;
buffer_internal_t _buffer;
};
};
class program_t {
KITTY_USING_MOVE_T(program_internal_t, GLuint, std::numeric_limits<GLuint>::max(), {
if (el != std::numeric_limits<GLuint>::max()) {
ctx.DeleteProgram(el);
}
});
class program_t {
KITTY_USING_MOVE_T(program_internal_t, GLuint,
std::numeric_limits<GLuint>::max(), {
if (el != std::numeric_limits<GLuint>::max()) {
ctx.DeleteProgram(el);
}
});
public:
std::string
err_str();
public:
std::string err_str();
static util::Either<program_t, std::string>
link(const shader_t &vert, const shader_t &frag);
static util::Either<program_t, std::string> link(const shader_t &vert,
const shader_t &frag);
void
bind(const buffer_t &buffer);
void bind(const buffer_t &buffer);
std::optional<buffer_t>
uniform(const char *block, std::pair<const char *, std::string_view> *members, std::size_t count);
std::optional<buffer_t> uniform(
const char *block, std::pair<const char *, std::string_view> *members,
std::size_t count);
GLuint
handle() const;
GLuint handle() const;
private:
private:
program_internal_t _program;
};
};
} // namespace gl
namespace gbm {
struct device;
typedef void (*device_destroy_fn)(device *gbm);
typedef device *(*create_device_fn)(int fd);
struct device;
typedef void (*device_destroy_fn)(device *gbm);
typedef device *(*create_device_fn)(int fd);
extern device_destroy_fn device_destroy;
extern create_device_fn create_device;
extern device_destroy_fn device_destroy;
extern create_device_fn create_device;
using gbm_t = util::dyn_safe_ptr<device, &device_destroy>;
using gbm_t = util::dyn_safe_ptr<device, &device_destroy>;
int
init();
int init();
} // namespace gbm
namespace egl {
using display_t = util::dyn_safe_ptr_v2<void, EGLBoolean, &eglTerminate>;
using display_t = util::dyn_safe_ptr_v2<void, EGLBoolean, &eglTerminate>;
struct rgb_img_t {
struct rgb_img_t {
display_t::pointer display;
EGLImage xrgb8;
gl::tex_t tex;
};
};
struct nv12_img_t {
struct nv12_img_t {
display_t::pointer display;
EGLImage r8;
EGLImage bg88;
@ -218,37 +207,38 @@ namespace egl {
gl::tex_t tex;
gl::frame_buf_t buf;
// sizeof(va::DRMPRIMESurfaceDescriptor::objects) / sizeof(va::DRMPRIMESurfaceDescriptor::objects[0]);
// sizeof(va::DRMPRIMESurfaceDescriptor::objects) /
// sizeof(va::DRMPRIMESurfaceDescriptor::objects[0]);
static constexpr std::size_t num_fds = 4;
std::array<file_t, num_fds> fds;
};
};
KITTY_USING_MOVE_T(rgb_t, rgb_img_t, , {
KITTY_USING_MOVE_T(rgb_t, rgb_img_t, , {
if (el.xrgb8) {
eglDestroyImage(el.display, el.xrgb8);
eglDestroyImage(el.display, el.xrgb8);
}
});
});
KITTY_USING_MOVE_T(nv12_t, nv12_img_t, , {
KITTY_USING_MOVE_T(nv12_t, nv12_img_t, , {
if (el.r8) {
eglDestroyImage(el.display, el.r8);
eglDestroyImage(el.display, el.r8);
}
if (el.bg88) {
eglDestroyImage(el.display, el.bg88);
eglDestroyImage(el.display, el.bg88);
}
});
});
KITTY_USING_MOVE_T(ctx_t, (std::tuple<display_t::pointer, EGLContext>), , {
KITTY_USING_MOVE_T(ctx_t, (std::tuple<display_t::pointer, EGLContext>), , {
TUPLE_2D_REF(disp, ctx, el);
if (ctx) {
eglMakeCurrent(disp, EGL_NO_SURFACE, EGL_NO_SURFACE, EGL_NO_CONTEXT);
eglDestroyContext(disp, ctx);
eglMakeCurrent(disp, EGL_NO_SURFACE, EGL_NO_SURFACE, EGL_NO_CONTEXT);
eglDestroyContext(disp, ctx);
}
});
});
struct surface_descriptor_t {
struct surface_descriptor_t {
int width;
int height;
int fds[4];
@ -256,79 +246,70 @@ namespace egl {
std::uint64_t modifier;
std::uint32_t pitches[4];
std::uint32_t offsets[4];
};
};
display_t
make_display(std::variant<gbm::gbm_t::pointer, wl_display *, _XDisplay *> native_display);
std::optional<ctx_t>
make_ctx(display_t::pointer display);
display_t make_display(
std::variant<gbm::gbm_t::pointer, wl_display *, _XDisplay *>
native_display);
std::optional<ctx_t> make_ctx(display_t::pointer display);
std::optional<rgb_t>
import_source(
display_t::pointer egl_display,
const surface_descriptor_t &xrgb);
std::optional<rgb_t> import_source(display_t::pointer egl_display,
const surface_descriptor_t &xrgb);
std::optional<nv12_t>
import_target(
std::optional<nv12_t> import_target(
display_t::pointer egl_display,
std::array<file_t, nv12_img_t::num_fds> &&fds,
const surface_descriptor_t &r8, const surface_descriptor_t &gr88);
class cursor_t: public platf::img_t {
public:
class cursor_t : public platf::img_t {
public:
int x, y;
unsigned long serial;
std::vector<std::uint8_t> buffer;
};
};
// Allow cursor and the underlying image to be kept together
class img_descriptor_t: public cursor_t {
public:
~img_descriptor_t() {
reset();
}
// Allow cursor and the underlying image to be kept together
class img_descriptor_t : public cursor_t {
public:
~img_descriptor_t() { reset(); }
void
reset() {
for (auto x = 0; x < 4; ++x) {
if (sd.fds[x] >= 0) {
close(sd.fds[x]);
void reset() {
for (auto x = 0; x < 4; ++x) {
if (sd.fds[x] >= 0) {
close(sd.fds[x]);
sd.fds[x] = -1;
sd.fds[x] = -1;
}
}
}
}
surface_descriptor_t sd;
// Increment sequence when new rgb_t needs to be created
std::uint64_t sequence;
};
};
class sws_t {
public:
static std::optional<sws_t>
make(int in_width, int in_height, int out_width, int out_heigth, gl::tex_t &&tex);
static std::optional<sws_t>
make(int in_width, int in_height, int out_width, int out_heigth);
class sws_t {
public:
static std::optional<sws_t> make(int in_width, int in_height, int out_width,
int out_heigth, gl::tex_t &&tex);
static std::optional<sws_t> make(int in_width, int in_height, int out_width,
int out_heigth);
// Convert the loaded image into the first two framebuffers
int
convert(gl::frame_buf_t &fb);
int convert(gl::frame_buf_t &fb);
// Make an area of the image black
int
blank(gl::frame_buf_t &fb, int offsetX, int offsetY, int width, int height);
int blank(gl::frame_buf_t &fb, int offsetX, int offsetY, int width,
int height);
void
load_ram(platf::img_t &img);
void
load_vram(img_descriptor_t &img, int offset_x, int offset_y, int texture);
void load_ram(platf::img_t &img);
void load_vram(img_descriptor_t &img, int offset_x, int offset_y,
int texture);
void
apply_colorspace(const video::sunshine_colorspace_t &colorspace);
void apply_colorspace(const video::sunshine_colorspace_t &colorspace);
// The first texture is the monitor image.
// The second texture is the cursor image
@ -351,8 +332,7 @@ namespace egl {
// Store latest cursor for load_vram
std::uint64_t serial;
};
};
bool
fail();
bool fail();
} // namespace egl

File diff suppressed because it is too large Load Diff

View File

@ -5,7 +5,7 @@
// Required for in6_pktinfo with glibc headers
#ifndef _GNU_SOURCE
#define _GNU_SOURCE 1
#define _GNU_SOURCE 1
#endif
// standard includes
@ -13,8 +13,6 @@
// lib includes
#include <arpa/inet.h>
#include <boost/asio/ip/address.hpp>
#include <boost/process.hpp>
#include <dlfcn.h>
#include <fcntl.h>
#include <ifaddrs.h>
@ -22,6 +20,9 @@
#include <pwd.h>
#include <unistd.h>
#include <boost/asio/ip/address.hpp>
#include <boost/process.hpp>
// local includes
#include "graphics.h"
#include "misc.h"
@ -31,9 +32,9 @@
#include "vaapi.h"
#ifdef __GNUC__
#define SUNSHINE_GNUC_EXTENSION __extension__
#define SUNSHINE_GNUC_EXTENSION __extension__
#else
#define SUNSHINE_GNUC_EXTENSION
#define SUNSHINE_GNUC_EXTENSION
#endif
using namespace std::literals;
@ -43,152 +44,151 @@ namespace bp = boost::process;
window_system_e window_system;
namespace dyn {
void *
handle(const std::vector<const char *> &libs) {
void *handle(const std::vector<const char *> &libs) {
void *handle;
for (auto lib : libs) {
handle = dlopen(lib, RTLD_LAZY | RTLD_LOCAL);
if (handle) {
return handle;
}
handle = dlopen(lib, RTLD_LAZY | RTLD_LOCAL);
if (handle) {
return handle;
}
}
std::stringstream ss;
ss << "Couldn't find any of the following libraries: ["sv << libs.front();
std::for_each(std::begin(libs) + 1, std::end(libs), [&](auto lib) {
ss << ", "sv << lib;
});
std::for_each(std::begin(libs) + 1, std::end(libs),
[&](auto lib) { ss << ", "sv << lib; });
ss << ']';
BOOST_LOG(error) << ss.str();
return nullptr;
}
}
int
load(void *handle, const std::vector<std::tuple<apiproc *, const char *>> &funcs, bool strict) {
int load(void *handle,
const std::vector<std::tuple<apiproc *, const char *>> &funcs,
bool strict) {
int err = 0;
for (auto &func : funcs) {
TUPLE_2D_REF(fn, name, func);
TUPLE_2D_REF(fn, name, func);
*fn = SUNSHINE_GNUC_EXTENSION(apiproc) dlsym(handle, name);
*fn = SUNSHINE_GNUC_EXTENSION(apiproc) dlsym(handle, name);
if (!*fn && strict) {
BOOST_LOG(error) << "Couldn't find function: "sv << name;
if (!*fn && strict) {
BOOST_LOG(error) << "Couldn't find function: "sv << name;
err = -1;
}
err = -1;
}
}
return err;
}
}
} // namespace dyn
namespace platf {
using ifaddr_t = util::safe_ptr<ifaddrs, freeifaddrs>;
using ifaddr_t = util::safe_ptr<ifaddrs, freeifaddrs>;
ifaddr_t
get_ifaddrs() {
ifaddrs *p { nullptr };
ifaddr_t get_ifaddrs() {
ifaddrs *p{nullptr};
getifaddrs(&p);
return ifaddr_t { p };
}
return ifaddr_t{p};
}
fs::path
appdata() {
fs::path appdata() {
const char *homedir;
if ((homedir = getenv("HOME")) == nullptr) {
homedir = getpwuid(geteuid())->pw_dir;
homedir = getpwuid(geteuid())->pw_dir;
}
return fs::path { homedir } / ".config/sunshine"sv;
}
return fs::path{homedir} / ".config/sunshine"sv;
}
std::string
from_sockaddr(const sockaddr *const ip_addr) {
std::string from_sockaddr(const sockaddr *const ip_addr) {
char data[INET6_ADDRSTRLEN] = {};
auto family = ip_addr->sa_family;
if (family == AF_INET6) {
inet_ntop(AF_INET6, &((sockaddr_in6 *) ip_addr)->sin6_addr, data,
INET6_ADDRSTRLEN);
}
else if (family == AF_INET) {
inet_ntop(AF_INET, &((sockaddr_in *) ip_addr)->sin_addr, data,
INET_ADDRSTRLEN);
inet_ntop(AF_INET6, &((sockaddr_in6 *)ip_addr)->sin6_addr, data,
INET6_ADDRSTRLEN);
} else if (family == AF_INET) {
inet_ntop(AF_INET, &((sockaddr_in *)ip_addr)->sin_addr, data,
INET_ADDRSTRLEN);
}
return std::string { data };
}
return std::string{data};
}
std::pair<std::uint16_t, std::string>
from_sockaddr_ex(const sockaddr *const ip_addr) {
std::pair<std::uint16_t, std::string> from_sockaddr_ex(
const sockaddr *const ip_addr) {
char data[INET6_ADDRSTRLEN] = {};
auto family = ip_addr->sa_family;
std::uint16_t port = 0;
if (family == AF_INET6) {
inet_ntop(AF_INET6, &((sockaddr_in6 *) ip_addr)->sin6_addr, data,
INET6_ADDRSTRLEN);
port = ((sockaddr_in6 *) ip_addr)->sin6_port;
}
else if (family == AF_INET) {
inet_ntop(AF_INET, &((sockaddr_in *) ip_addr)->sin_addr, data,
INET_ADDRSTRLEN);
port = ((sockaddr_in *) ip_addr)->sin_port;
inet_ntop(AF_INET6, &((sockaddr_in6 *)ip_addr)->sin6_addr, data,
INET6_ADDRSTRLEN);
port = ((sockaddr_in6 *)ip_addr)->sin6_port;
} else if (family == AF_INET) {
inet_ntop(AF_INET, &((sockaddr_in *)ip_addr)->sin_addr, data,
INET_ADDRSTRLEN);
port = ((sockaddr_in *)ip_addr)->sin_port;
}
return { port, std::string { data } };
}
return {port, std::string{data}};
}
std::string
get_mac_address(const std::string_view &address) {
std::string get_mac_address(const std::string_view &address) {
auto ifaddrs = get_ifaddrs();
for (auto pos = ifaddrs.get(); pos != nullptr; pos = pos->ifa_next) {
if (pos->ifa_addr && address == from_sockaddr(pos->ifa_addr)) {
std::ifstream mac_file("/sys/class/net/"s + pos->ifa_name + "/address");
if (mac_file.good()) {
std::string mac_address;
std::getline(mac_file, mac_address);
return mac_address;
if (pos->ifa_addr && address == from_sockaddr(pos->ifa_addr)) {
std::ifstream mac_file("/sys/class/net/"s + pos->ifa_name +
"/address");
if (mac_file.good()) {
std::string mac_address;
std::getline(mac_file, mac_address);
return mac_address;
}
}
}
}
BOOST_LOG(warning) << "Unable to find MAC address for "sv << address;
return "00:00:00:00:00:00"s;
}
}
bp::child
run_command(bool elevated, bool interactive, const std::string &cmd, boost::filesystem::path &working_dir, const bp::environment &env, FILE *file, std::error_code &ec, bp::group *group) {
bp::child run_command(bool elevated, bool interactive, const std::string &cmd,
boost::filesystem::path &working_dir,
const bp::environment &env, FILE *file,
std::error_code &ec, bp::group *group) {
if (!group) {
if (!file) {
return bp::child(cmd, env, bp::start_dir(working_dir), bp::std_out > bp::null, bp::std_err > bp::null, ec);
}
else {
return bp::child(cmd, env, bp::start_dir(working_dir), bp::std_out > file, bp::std_err > file, ec);
}
if (!file) {
return bp::child(cmd, env, bp::start_dir(working_dir),
bp::std_out > bp::null, bp::std_err > bp::null,
ec);
} else {
return bp::child(cmd, env, bp::start_dir(working_dir),
bp::std_out > file, bp::std_err > file, ec);
}
} else {
if (!file) {
return bp::child(cmd, env, bp::start_dir(working_dir),
bp::std_out > bp::null, bp::std_err > bp::null, ec,
*group);
} else {
return bp::child(cmd, env, bp::start_dir(working_dir),
bp::std_out > file, bp::std_err > file, ec,
*group);
}
}
else {
if (!file) {
return bp::child(cmd, env, bp::start_dir(working_dir), bp::std_out > bp::null, bp::std_err > bp::null, ec, *group);
}
else {
return bp::child(cmd, env, bp::start_dir(working_dir), bp::std_out > file, bp::std_err > file, ec, *group);
}
}
}
}
void
adjust_thread_priority(thread_priority_e priority) {
void adjust_thread_priority(thread_priority_e priority) {
// Unimplemented
}
}
struct sockaddr_in
to_sockaddr(boost::asio::ip::address_v4 address, uint16_t port) {
struct sockaddr_in to_sockaddr(boost::asio::ip::address_v4 address,
uint16_t port) {
struct sockaddr_in saddr_v4 = {};
saddr_v4.sin_family = AF_INET;
@ -198,10 +198,10 @@ namespace platf {
memcpy(&saddr_v4.sin_addr, addr_bytes.data(), sizeof(saddr_v4.sin_addr));
return saddr_v4;
}
}
struct sockaddr_in6
to_sockaddr(boost::asio::ip::address_v6 address, uint16_t port) {
struct sockaddr_in6 to_sockaddr(boost::asio::ip::address_v6 address,
uint16_t port) {
struct sockaddr_in6 saddr_v6 = {};
saddr_v6.sin6_family = AF_INET6;
@ -212,33 +212,34 @@ namespace platf {
memcpy(&saddr_v6.sin6_addr, addr_bytes.data(), sizeof(saddr_v6.sin6_addr));
return saddr_v6;
}
}
bool
send_batch(batched_send_info_t &send_info) {
auto sockfd = (int) send_info.native_socket;
bool send_batch(batched_send_info_t &send_info) {
auto sockfd = (int)send_info.native_socket;
struct msghdr msg = {};
// Convert the target address into a sockaddr
struct sockaddr_in taddr_v4 = {};
struct sockaddr_in6 taddr_v6 = {};
if (send_info.target_address.is_v6()) {
taddr_v6 = to_sockaddr(send_info.target_address.to_v6(), send_info.target_port);
taddr_v6 = to_sockaddr(send_info.target_address.to_v6(),
send_info.target_port);
msg.msg_name = (struct sockaddr *) &taddr_v6;
msg.msg_namelen = sizeof(taddr_v6);
}
else {
taddr_v4 = to_sockaddr(send_info.target_address.to_v4(), send_info.target_port);
msg.msg_name = (struct sockaddr *)&taddr_v6;
msg.msg_namelen = sizeof(taddr_v6);
} else {
taddr_v4 = to_sockaddr(send_info.target_address.to_v4(),
send_info.target_port);
msg.msg_name = (struct sockaddr *) &taddr_v4;
msg.msg_namelen = sizeof(taddr_v4);
msg.msg_name = (struct sockaddr *)&taddr_v4;
msg.msg_namelen = sizeof(taddr_v4);
}
union {
char buf[CMSG_SPACE(sizeof(uint16_t)) +
std::max(CMSG_SPACE(sizeof(struct in_pktinfo)), CMSG_SPACE(sizeof(struct in6_pktinfo)))];
struct cmsghdr alignment;
char buf[CMSG_SPACE(sizeof(uint16_t)) +
std::max(CMSG_SPACE(sizeof(struct in_pktinfo)),
CMSG_SPACE(sizeof(struct in6_pktinfo)))];
struct cmsghdr alignment;
} cmbuf = {}; // Must be zeroed for CMSG_NXTHDR()
socklen_t cmbuflen = 0;
@ -249,171 +250,180 @@ namespace platf {
// append the UDP_SEGMENT option next if applicable.
auto pktinfo_cm = CMSG_FIRSTHDR(&msg);
if (send_info.source_address.is_v6()) {
struct in6_pktinfo pktInfo;
struct in6_pktinfo pktInfo;
struct sockaddr_in6 saddr_v6 = to_sockaddr(send_info.source_address.to_v6(), 0);
pktInfo.ipi6_addr = saddr_v6.sin6_addr;
pktInfo.ipi6_ifindex = 0;
struct sockaddr_in6 saddr_v6 =
to_sockaddr(send_info.source_address.to_v6(), 0);
pktInfo.ipi6_addr = saddr_v6.sin6_addr;
pktInfo.ipi6_ifindex = 0;
cmbuflen += CMSG_SPACE(sizeof(pktInfo));
cmbuflen += CMSG_SPACE(sizeof(pktInfo));
pktinfo_cm->cmsg_level = IPPROTO_IPV6;
pktinfo_cm->cmsg_type = IPV6_PKTINFO;
pktinfo_cm->cmsg_len = CMSG_LEN(sizeof(pktInfo));
memcpy(CMSG_DATA(pktinfo_cm), &pktInfo, sizeof(pktInfo));
}
else {
struct in_pktinfo pktInfo;
pktinfo_cm->cmsg_level = IPPROTO_IPV6;
pktinfo_cm->cmsg_type = IPV6_PKTINFO;
pktinfo_cm->cmsg_len = CMSG_LEN(sizeof(pktInfo));
memcpy(CMSG_DATA(pktinfo_cm), &pktInfo, sizeof(pktInfo));
} else {
struct in_pktinfo pktInfo;
struct sockaddr_in saddr_v4 = to_sockaddr(send_info.source_address.to_v4(), 0);
pktInfo.ipi_spec_dst = saddr_v4.sin_addr;
pktInfo.ipi_ifindex = 0;
struct sockaddr_in saddr_v4 =
to_sockaddr(send_info.source_address.to_v4(), 0);
pktInfo.ipi_spec_dst = saddr_v4.sin_addr;
pktInfo.ipi_ifindex = 0;
cmbuflen += CMSG_SPACE(sizeof(pktInfo));
cmbuflen += CMSG_SPACE(sizeof(pktInfo));
pktinfo_cm->cmsg_level = IPPROTO_IP;
pktinfo_cm->cmsg_type = IP_PKTINFO;
pktinfo_cm->cmsg_len = CMSG_LEN(sizeof(pktInfo));
memcpy(CMSG_DATA(pktinfo_cm), &pktInfo, sizeof(pktInfo));
pktinfo_cm->cmsg_level = IPPROTO_IP;
pktinfo_cm->cmsg_type = IP_PKTINFO;
pktinfo_cm->cmsg_len = CMSG_LEN(sizeof(pktInfo));
memcpy(CMSG_DATA(pktinfo_cm), &pktInfo, sizeof(pktInfo));
}
#ifdef UDP_SEGMENT
{
struct iovec iov = {};
struct iovec iov = {};
msg.msg_iov = &iov;
msg.msg_iovlen = 1;
msg.msg_iov = &iov;
msg.msg_iovlen = 1;
// UDP GSO on Linux currently only supports sending 64K or 64 segments at a time
size_t seg_index = 0;
const size_t seg_max = 65536 / 1500;
while (seg_index < send_info.block_count) {
iov.iov_base = (void *) &send_info.buffer[seg_index * send_info.block_size];
iov.iov_len = send_info.block_size * std::min(send_info.block_count - seg_index, seg_max);
// UDP GSO on Linux currently only supports sending 64K or 64 segments
// at a time
size_t seg_index = 0;
const size_t seg_max = 65536 / 1500;
while (seg_index < send_info.block_count) {
iov.iov_base =
(void *)&send_info.buffer[seg_index * send_info.block_size];
iov.iov_len = send_info.block_size *
std::min(send_info.block_count - seg_index, seg_max);
// We should not use GSO if the data is <= one full block size
if (iov.iov_len > send_info.block_size) {
msg.msg_controllen = cmbuflen + CMSG_SPACE(sizeof(uint16_t));
// We should not use GSO if the data is <= one full block size
if (iov.iov_len > send_info.block_size) {
msg.msg_controllen = cmbuflen + CMSG_SPACE(sizeof(uint16_t));
// Enable GSO to perform segmentation of our buffer for us
auto cm = CMSG_NXTHDR(&msg, pktinfo_cm);
cm->cmsg_level = SOL_UDP;
cm->cmsg_type = UDP_SEGMENT;
cm->cmsg_len = CMSG_LEN(sizeof(uint16_t));
*((uint16_t *) CMSG_DATA(cm)) = send_info.block_size;
}
else {
msg.msg_controllen = cmbuflen;
}
// This will fail if GSO is not available, so we will fall back to non-GSO if
// it's the first sendmsg() call. On subsequent calls, we will treat errors as
// actual failures and return to the caller.
auto bytes_sent = sendmsg(sockfd, &msg, 0);
if (bytes_sent < 0) {
// If there's no send buffer space, wait for some to be available
if (errno == EAGAIN) {
struct pollfd pfd;
pfd.fd = sockfd;
pfd.events = POLLOUT;
if (poll(&pfd, 1, -1) != 1) {
BOOST_LOG(warning) << "poll() failed: "sv << errno;
break;
// Enable GSO to perform segmentation of our buffer for us
auto cm = CMSG_NXTHDR(&msg, pktinfo_cm);
cm->cmsg_level = SOL_UDP;
cm->cmsg_type = UDP_SEGMENT;
cm->cmsg_len = CMSG_LEN(sizeof(uint16_t));
*((uint16_t *)CMSG_DATA(cm)) = send_info.block_size;
} else {
msg.msg_controllen = cmbuflen;
}
// Try to send again
continue;
}
// This will fail if GSO is not available, so we will fall back to
// non-GSO if it's the first sendmsg() call. On subsequent calls, we
// will treat errors as actual failures and return to the caller.
auto bytes_sent = sendmsg(sockfd, &msg, 0);
if (bytes_sent < 0) {
// If there's no send buffer space, wait for some to be
// available
if (errno == EAGAIN) {
struct pollfd pfd;
break;
pfd.fd = sockfd;
pfd.events = POLLOUT;
if (poll(&pfd, 1, -1) != 1) {
BOOST_LOG(warning) << "poll() failed: "sv << errno;
break;
}
// Try to send again
continue;
}
break;
}
seg_index += bytes_sent / send_info.block_size;
}
seg_index += bytes_sent / send_info.block_size;
}
// If we sent something, return the status and don't fall back to the non-GSO path.
if (seg_index != 0) {
return seg_index >= send_info.block_count;
}
// If we sent something, return the status and don't fall back to the
// non-GSO path.
if (seg_index != 0) {
return seg_index >= send_info.block_count;
}
}
#endif
{
// If GSO is not supported, use sendmmsg() instead.
struct mmsghdr msgs[send_info.block_count];
struct iovec iovs[send_info.block_count];
for (size_t i = 0; i < send_info.block_count; i++) {
iovs[i] = {};
iovs[i].iov_base = (void *) &send_info.buffer[i * send_info.block_size];
iovs[i].iov_len = send_info.block_size;
// If GSO is not supported, use sendmmsg() instead.
struct mmsghdr msgs[send_info.block_count];
struct iovec iovs[send_info.block_count];
for (size_t i = 0; i < send_info.block_count; i++) {
iovs[i] = {};
iovs[i].iov_base =
(void *)&send_info.buffer[i * send_info.block_size];
iovs[i].iov_len = send_info.block_size;
msgs[i] = {};
msgs[i].msg_hdr.msg_name = msg.msg_name;
msgs[i].msg_hdr.msg_namelen = msg.msg_namelen;
msgs[i].msg_hdr.msg_iov = &iovs[i];
msgs[i].msg_hdr.msg_iovlen = 1;
msgs[i].msg_hdr.msg_control = cmbuf.buf;
msgs[i].msg_hdr.msg_controllen = cmbuflen;
}
// Call sendmmsg() until all messages are sent
size_t blocks_sent = 0;
while (blocks_sent < send_info.block_count) {
int msgs_sent = sendmmsg(sockfd, &msgs[blocks_sent], send_info.block_count - blocks_sent, 0);
if (msgs_sent < 0) {
// If there's no send buffer space, wait for some to be available
if (errno == EAGAIN) {
struct pollfd pfd;
pfd.fd = sockfd;
pfd.events = POLLOUT;
if (poll(&pfd, 1, -1) != 1) {
BOOST_LOG(warning) << "poll() failed: "sv << errno;
break;
}
// Try to send again
continue;
}
BOOST_LOG(warning) << "sendmmsg() failed: "sv << errno;
return false;
msgs[i] = {};
msgs[i].msg_hdr.msg_name = msg.msg_name;
msgs[i].msg_hdr.msg_namelen = msg.msg_namelen;
msgs[i].msg_hdr.msg_iov = &iovs[i];
msgs[i].msg_hdr.msg_iovlen = 1;
msgs[i].msg_hdr.msg_control = cmbuf.buf;
msgs[i].msg_hdr.msg_controllen = cmbuflen;
}
blocks_sent += msgs_sent;
}
// Call sendmmsg() until all messages are sent
size_t blocks_sent = 0;
while (blocks_sent < send_info.block_count) {
int msgs_sent = sendmmsg(sockfd, &msgs[blocks_sent],
send_info.block_count - blocks_sent, 0);
if (msgs_sent < 0) {
// If there's no send buffer space, wait for some to be
// available
if (errno == EAGAIN) {
struct pollfd pfd;
return true;
pfd.fd = sockfd;
pfd.events = POLLOUT;
if (poll(&pfd, 1, -1) != 1) {
BOOST_LOG(warning) << "poll() failed: "sv << errno;
break;
}
// Try to send again
continue;
}
BOOST_LOG(warning) << "sendmmsg() failed: "sv << errno;
return false;
}
blocks_sent += msgs_sent;
}
return true;
}
}
}
bool
send(send_info_t &send_info) {
auto sockfd = (int) send_info.native_socket;
bool send(send_info_t &send_info) {
auto sockfd = (int)send_info.native_socket;
struct msghdr msg = {};
// Convert the target address into a sockaddr
struct sockaddr_in taddr_v4 = {};
struct sockaddr_in6 taddr_v6 = {};
if (send_info.target_address.is_v6()) {
taddr_v6 = to_sockaddr(send_info.target_address.to_v6(), send_info.target_port);
taddr_v6 = to_sockaddr(send_info.target_address.to_v6(),
send_info.target_port);
msg.msg_name = (struct sockaddr *) &taddr_v6;
msg.msg_namelen = sizeof(taddr_v6);
}
else {
taddr_v4 = to_sockaddr(send_info.target_address.to_v4(), send_info.target_port);
msg.msg_name = (struct sockaddr *)&taddr_v6;
msg.msg_namelen = sizeof(taddr_v6);
} else {
taddr_v4 = to_sockaddr(send_info.target_address.to_v4(),
send_info.target_port);
msg.msg_name = (struct sockaddr *) &taddr_v4;
msg.msg_namelen = sizeof(taddr_v4);
msg.msg_name = (struct sockaddr *)&taddr_v4;
msg.msg_namelen = sizeof(taddr_v4);
}
union {
char buf[std::max(CMSG_SPACE(sizeof(struct in_pktinfo)), CMSG_SPACE(sizeof(struct in6_pktinfo)))];
struct cmsghdr alignment;
char buf[std::max(CMSG_SPACE(sizeof(struct in_pktinfo)),
CMSG_SPACE(sizeof(struct in6_pktinfo)))];
struct cmsghdr alignment;
} cmbuf;
socklen_t cmbuflen = 0;
@ -422,36 +432,37 @@ namespace platf {
auto pktinfo_cm = CMSG_FIRSTHDR(&msg);
if (send_info.source_address.is_v6()) {
struct in6_pktinfo pktInfo;
struct in6_pktinfo pktInfo;
struct sockaddr_in6 saddr_v6 = to_sockaddr(send_info.source_address.to_v6(), 0);
pktInfo.ipi6_addr = saddr_v6.sin6_addr;
pktInfo.ipi6_ifindex = 0;
struct sockaddr_in6 saddr_v6 =
to_sockaddr(send_info.source_address.to_v6(), 0);
pktInfo.ipi6_addr = saddr_v6.sin6_addr;
pktInfo.ipi6_ifindex = 0;
cmbuflen += CMSG_SPACE(sizeof(pktInfo));
cmbuflen += CMSG_SPACE(sizeof(pktInfo));
pktinfo_cm->cmsg_level = IPPROTO_IPV6;
pktinfo_cm->cmsg_type = IPV6_PKTINFO;
pktinfo_cm->cmsg_len = CMSG_LEN(sizeof(pktInfo));
memcpy(CMSG_DATA(pktinfo_cm), &pktInfo, sizeof(pktInfo));
}
else {
struct in_pktinfo pktInfo;
pktinfo_cm->cmsg_level = IPPROTO_IPV6;
pktinfo_cm->cmsg_type = IPV6_PKTINFO;
pktinfo_cm->cmsg_len = CMSG_LEN(sizeof(pktInfo));
memcpy(CMSG_DATA(pktinfo_cm), &pktInfo, sizeof(pktInfo));
} else {
struct in_pktinfo pktInfo;
struct sockaddr_in saddr_v4 = to_sockaddr(send_info.source_address.to_v4(), 0);
pktInfo.ipi_spec_dst = saddr_v4.sin_addr;
pktInfo.ipi_ifindex = 0;
struct sockaddr_in saddr_v4 =
to_sockaddr(send_info.source_address.to_v4(), 0);
pktInfo.ipi_spec_dst = saddr_v4.sin_addr;
pktInfo.ipi_ifindex = 0;
cmbuflen += CMSG_SPACE(sizeof(pktInfo));
cmbuflen += CMSG_SPACE(sizeof(pktInfo));
pktinfo_cm->cmsg_level = IPPROTO_IP;
pktinfo_cm->cmsg_type = IP_PKTINFO;
pktinfo_cm->cmsg_len = CMSG_LEN(sizeof(pktInfo));
memcpy(CMSG_DATA(pktinfo_cm), &pktInfo, sizeof(pktInfo));
pktinfo_cm->cmsg_level = IPPROTO_IP;
pktinfo_cm->cmsg_type = IP_PKTINFO;
pktinfo_cm->cmsg_len = CMSG_LEN(sizeof(pktInfo));
memcpy(CMSG_DATA(pktinfo_cm), &pktInfo, sizeof(pktInfo));
}
struct iovec iov = {};
iov.iov_base = (void *) send_info.buffer;
iov.iov_base = (void *)send_info.buffer;
iov.iov_len = send_info.size;
msg.msg_iov = &iov;
@ -463,101 +474,95 @@ namespace platf {
// If there's no send buffer space, wait for some to be available
while (bytes_sent < 0 && errno == EAGAIN) {
struct pollfd pfd;
struct pollfd pfd;
pfd.fd = sockfd;
pfd.events = POLLOUT;
pfd.fd = sockfd;
pfd.events = POLLOUT;
if (poll(&pfd, 1, -1) != 1) {
BOOST_LOG(warning) << "poll() failed: "sv << errno;
break;
}
if (poll(&pfd, 1, -1) != 1) {
BOOST_LOG(warning) << "poll() failed: "sv << errno;
break;
}
// Try to send again
bytes_sent = sendmsg(sockfd, &msg, 0);
// Try to send again
bytes_sent = sendmsg(sockfd, &msg, 0);
}
if (bytes_sent < 0) {
BOOST_LOG(warning) << "sendmsg() failed: "sv << errno;
return false;
BOOST_LOG(warning) << "sendmsg() failed: "sv << errno;
return false;
}
return true;
}
}
namespace source {
enum source_e : std::size_t {
namespace source {
enum source_e : std::size_t {
#ifdef SUNSHINE_BUILD_CUDA
NVFBC,
NVFBC,
#endif
#ifdef SUNSHINE_BUILD_WAYLAND
WAYLAND,
WAYLAND,
#endif
#ifdef SUNSHINE_BUILD_DRM
KMS,
KMS,
#endif
#ifdef SUNSHINE_BUILD_X11
X11,
X11,
#endif
MAX_FLAGS
};
} // namespace source
MAX_FLAGS
};
} // namespace source
static std::bitset<source::MAX_FLAGS> sources;
static std::bitset<source::MAX_FLAGS> sources;
#ifdef SUNSHINE_BUILD_CUDA
std::vector<std::string>
nvfbc_display_names();
std::shared_ptr<display_t>
nvfbc_display(mem_type_e hwdevice_type, const std::string &display_name, const video::config_t &config);
std::vector<std::string> nvfbc_display_names();
std::shared_ptr<display_t> nvfbc_display(mem_type_e hwdevice_type,
const std::string &display_name,
const video::config_t &config);
bool
verify_nvfbc() {
return !nvfbc_display_names().empty();
}
bool verify_nvfbc() { return !nvfbc_display_names().empty(); }
#endif
#ifdef SUNSHINE_BUILD_WAYLAND
std::vector<std::string>
wl_display_names();
std::shared_ptr<display_t>
wl_display(mem_type_e hwdevice_type, const std::string &display_name, const video::config_t &config);
std::vector<std::string> wl_display_names();
std::shared_ptr<display_t> wl_display(mem_type_e hwdevice_type,
const std::string &display_name,
const video::config_t &config);
bool
verify_wl() {
return window_system == window_system_e::WAYLAND && !wl_display_names().empty();
}
bool verify_wl() {
return window_system == window_system_e::WAYLAND &&
!wl_display_names().empty();
}
#endif
#ifdef SUNSHINE_BUILD_DRM
std::vector<std::string>
kms_display_names();
std::shared_ptr<display_t>
kms_display(mem_type_e hwdevice_type, const std::string &display_name, const video::config_t &config);
std::vector<std::string> kms_display_names();
std::shared_ptr<display_t> kms_display(mem_type_e hwdevice_type,
const std::string &display_name,
const video::config_t &config);
bool
verify_kms() {
return !kms_display_names().empty();
}
bool verify_kms() { return !kms_display_names().empty(); }
#endif
#ifdef SUNSHINE_BUILD_X11
std::vector<std::string>
x11_display_names();
std::shared_ptr<display_t>
x11_display(mem_type_e hwdevice_type, const std::string &display_name, const video::config_t &config);
std::vector<std::string> x11_display_names();
std::shared_ptr<display_t> x11_display(mem_type_e hwdevice_type,
const std::string &display_name,
const video::config_t &config);
bool
verify_x11() {
return window_system == window_system_e::X11 && !x11_display_names().empty();
}
bool verify_x11() {
return window_system == window_system_e::X11 &&
!x11_display_names().empty();
}
#endif
std::vector<std::string>
display_names(mem_type_e hwdevice_type) {
std::vector<std::string> display_names(mem_type_e hwdevice_type) {
#ifdef SUNSHINE_BUILD_CUDA
// display using NvFBC only supports mem_type_e::cuda
if (sources[source::NVFBC] && hwdevice_type == mem_type_e::cuda) return nvfbc_display_names();
if (sources[source::NVFBC] && hwdevice_type == mem_type_e::cuda)
return nvfbc_display_names();
#endif
#ifdef SUNSHINE_BUILD_WAYLAND
if (sources[source::WAYLAND]) return wl_display_names();
@ -569,40 +574,40 @@ namespace platf {
if (sources[source::X11]) return x11_display_names();
#endif
return {};
}
}
std::shared_ptr<display_t>
display(mem_type_e hwdevice_type, const std::string &display_name, const video::config_t &config) {
std::shared_ptr<display_t> display(mem_type_e hwdevice_type,
const std::string &display_name,
const video::config_t &config) {
#ifdef SUNSHINE_BUILD_CUDA
if (sources[source::NVFBC] && hwdevice_type == mem_type_e::cuda) {
BOOST_LOG(info) << "Screencasting with NvFBC"sv;
return nvfbc_display(hwdevice_type, display_name, config);
BOOST_LOG(info) << "Screencasting with NvFBC"sv;
return nvfbc_display(hwdevice_type, display_name, config);
}
#endif
#ifdef SUNSHINE_BUILD_WAYLAND
if (sources[source::WAYLAND]) {
BOOST_LOG(info) << "Screencasting with Wayland's protocol"sv;
return wl_display(hwdevice_type, display_name, config);
BOOST_LOG(info) << "Screencasting with Wayland's protocol"sv;
return wl_display(hwdevice_type, display_name, config);
}
#endif
#ifdef SUNSHINE_BUILD_DRM
if (sources[source::KMS]) {
BOOST_LOG(info) << "Screencasting with KMS"sv;
return kms_display(hwdevice_type, display_name, config);
BOOST_LOG(info) << "Screencasting with KMS"sv;
return kms_display(hwdevice_type, display_name, config);
}
#endif
#ifdef SUNSHINE_BUILD_X11
if (sources[source::X11]) {
BOOST_LOG(info) << "Screencasting with X11"sv;
return x11_display(hwdevice_type, display_name, config);
BOOST_LOG(info) << "Screencasting with X11"sv;
return x11_display(hwdevice_type, display_name, config);
}
#endif
return nullptr;
}
}
std::unique_ptr<deinit_t>
init() {
std::unique_ptr<deinit_t> init() {
// These are allowed to fail.
gbm::init();
va::init();
@ -610,63 +615,64 @@ namespace platf {
window_system = window_system_e::NONE;
#ifdef SUNSHINE_BUILD_WAYLAND
if (std::getenv("WAYLAND_DISPLAY")) {
window_system = window_system_e::WAYLAND;
window_system = window_system_e::WAYLAND;
}
#endif
#if defined(SUNSHINE_BUILD_X11) || defined(SUNSHINE_BUILD_CUDA)
if (std::getenv("DISPLAY") && window_system != window_system_e::WAYLAND) {
if (std::getenv("WAYLAND_DISPLAY")) {
BOOST_LOG(warning) << "Wayland detected, yet sunshine will use X11 for screencasting, screencasting will only work on XWayland applications"sv;
}
if (std::getenv("WAYLAND_DISPLAY")) {
BOOST_LOG(warning)
<< "Wayland detected, yet sunshine will use X11 for screencasting, screencasting will only work on XWayland applications"sv;
}
window_system = window_system_e::X11;
window_system = window_system_e::X11;
}
#endif
#ifdef SUNSHINE_BUILD_CUDA
if (config::video.capture.empty() || config::video.capture == "nvfbc") {
if (verify_nvfbc()) {
sources[source::NVFBC] = true;
}
if (verify_nvfbc()) {
sources[source::NVFBC] = true;
}
}
#endif
#ifdef SUNSHINE_BUILD_WAYLAND
if (config::video.capture.empty() || config::video.capture == "wlr") {
if (verify_wl()) {
sources[source::WAYLAND] = true;
}
if (verify_wl()) {
sources[source::WAYLAND] = true;
}
}
#endif
#ifdef SUNSHINE_BUILD_DRM
if (config::video.capture.empty() || config::video.capture == "kms") {
if (verify_kms()) {
if (window_system == window_system_e::WAYLAND) {
// On Wayland, using KMS, the cursor is unreliable.
// Hide it by default
display_cursor = false;
if (verify_kms()) {
if (window_system == window_system_e::WAYLAND) {
// On Wayland, using KMS, the cursor is unreliable.
// Hide it by default
display_cursor = false;
}
sources[source::KMS] = true;
}
sources[source::KMS] = true;
}
}
#endif
#ifdef SUNSHINE_BUILD_X11
if (config::video.capture.empty() || config::video.capture == "x11") {
if (verify_x11()) {
sources[source::X11] = true;
}
if (verify_x11()) {
sources[source::X11] = true;
}
}
#endif
if (sources.none()) {
BOOST_LOG(error) << "Unable to initialize capture method"sv;
return true;
BOOST_LOG(error) << "Unable to initialize capture method"sv;
return true;
}
if (!gladLoaderLoadEGL(EGL_NO_DISPLAY) || !eglGetPlatformDisplay) {
BOOST_LOG(warning) << "Couldn't load EGL library"sv;
return true;
BOOST_LOG(warning) << "Couldn't load EGL library"sv;
return true;
}
return false;
}
}
} // namespace platf

View File

@ -5,30 +5,31 @@
#pragma once
#include <unistd.h>
#include <vector>
#include "src/utility.h"
KITTY_USING_MOVE_T(file_t, int, -1, {
if (el >= 0) {
close(el);
}
if (el >= 0) {
close(el);
}
});
enum class window_system_e {
NONE,
X11,
WAYLAND,
NONE,
X11,
WAYLAND,
};
extern window_system_e window_system;
namespace dyn {
typedef void (*apiproc)(void);
typedef void (*apiproc)(void);
int
load(void *handle, const std::vector<std::tuple<apiproc *, const char *>> &funcs, bool strict = true);
void *
handle(const std::vector<const char *> &libs);
int load(void *handle,
const std::vector<std::tuple<apiproc *, const char *>> &funcs,
bool strict = true);
void *handle(const std::vector<const char *> &libs);
} // namespace dyn

File diff suppressed because it is too large Load Diff

View File

@ -8,27 +8,27 @@
#include "src/platform/common.h"
namespace egl {
struct surface_descriptor_t;
struct surface_descriptor_t;
}
namespace va {
/**
* Width --> Width of the image
* Height --> Height of the image
* offset_x --> Horizontal offset of the image in the texture
* offset_y --> Vertical offset of the image in the texture
* file_t card --> The file descriptor of the render device used for encoding
*/
std::unique_ptr<platf::avcodec_encode_device_t>
make_avcodec_encode_device(int width, int height, bool vram);
std::unique_ptr<platf::avcodec_encode_device_t>
make_avcodec_encode_device(int width, int height, int offset_x, int offset_y, bool vram);
std::unique_ptr<platf::avcodec_encode_device_t>
make_avcodec_encode_device(int width, int height, file_t &&card, int offset_x, int offset_y, bool vram);
/**
* Width --> Width of the image
* Height --> Height of the image
* offset_x --> Horizontal offset of the image in the texture
* offset_y --> Vertical offset of the image in the texture
* file_t card --> The file descriptor of the render device used for encoding
*/
std::unique_ptr<platf::avcodec_encode_device_t> make_avcodec_encode_device(
int width, int height, bool vram);
std::unique_ptr<platf::avcodec_encode_device_t> make_avcodec_encode_device(
int width, int height, int offset_x, int offset_y, bool vram);
std::unique_ptr<platf::avcodec_encode_device_t> make_avcodec_encode_device(
int width, int height, file_t &&card, int offset_x, int offset_y,
bool vram);
// Ensure the render device pointed to by fd is capable of encoding h264 with the hevc_mode configured
bool
validate(int fd);
// Ensure the render device pointed to by fd is capable of encoding h264 with
// the hevc_mode configured
bool validate(int fd);
int
init();
int init();
} // namespace va

View File

@ -2,6 +2,8 @@
* @file src/platform/linux/wayland.cpp
* @brief todo
*/
#include "wayland.h"
#include <wayland-client.h>
#include <wayland-util.h>
@ -12,7 +14,6 @@
#include "src/platform/common.h"
#include "src/round_robin.h"
#include "src/utility.h"
#include "wayland.h"
extern const wl_interface wl_output_interface;
@ -25,241 +26,218 @@ using namespace std::literals;
namespace wl {
// Helper to call C++ method from wayland C callback
template <class T, class Method, Method m, class... Params>
static auto
classCall(void *data, Params... params) -> decltype(((*reinterpret_cast<T *>(data)).*m)(params...)) {
// Helper to call C++ method from wayland C callback
template <class T, class Method, Method m, class... Params>
static auto classCall(void *data, Params... params)
-> decltype(((*reinterpret_cast<T *>(data)).*m)(params...)) {
return ((*reinterpret_cast<T *>(data)).*m)(params...);
}
}
#define CLASS_CALL(c, m) classCall<c, decltype(&c::m), &c::m>
int
display_t::init(const char *display_name) {
int display_t::init(const char *display_name) {
if (!display_name) {
display_name = std::getenv("WAYLAND_DISPLAY");
display_name = std::getenv("WAYLAND_DISPLAY");
}
if (!display_name) {
BOOST_LOG(error) << "Environment variable WAYLAND_DISPLAY has not been defined"sv;
return -1;
BOOST_LOG(error)
<< "Environment variable WAYLAND_DISPLAY has not been defined"sv;
return -1;
}
display_internal.reset(wl_display_connect(display_name));
if (!display_internal) {
BOOST_LOG(error) << "Couldn't connect to Wayland display: "sv << display_name;
return -1;
BOOST_LOG(error) << "Couldn't connect to Wayland display: "sv
<< display_name;
return -1;
}
BOOST_LOG(info) << "Found display ["sv << display_name << ']';
return 0;
}
}
void
display_t::roundtrip() {
wl_display_roundtrip(display_internal.get());
}
void display_t::roundtrip() { wl_display_roundtrip(display_internal.get()); }
wl_registry *
display_t::registry() {
wl_registry *display_t::registry() {
return wl_display_get_registry(display_internal.get());
}
}
inline monitor_t::monitor_t(wl_output *output):
output { output }, listener {
&CLASS_CALL(monitor_t, xdg_position),
&CLASS_CALL(monitor_t, xdg_size),
&CLASS_CALL(monitor_t, xdg_done),
&CLASS_CALL(monitor_t, xdg_name),
&CLASS_CALL(monitor_t, xdg_description)
} {}
inline monitor_t::monitor_t(wl_output *output)
: output{output},
listener{&CLASS_CALL(monitor_t, xdg_position),
&CLASS_CALL(monitor_t, xdg_size),
&CLASS_CALL(monitor_t, xdg_done),
&CLASS_CALL(monitor_t, xdg_name),
&CLASS_CALL(monitor_t, xdg_description)} {}
inline void
monitor_t::xdg_name(zxdg_output_v1 *, const char *name) {
inline void monitor_t::xdg_name(zxdg_output_v1 *, const char *name) {
this->name = name;
BOOST_LOG(info) << "Name: "sv << this->name;
}
}
void
monitor_t::xdg_description(zxdg_output_v1 *, const char *description) {
void monitor_t::xdg_description(zxdg_output_v1 *, const char *description) {
this->description = description;
BOOST_LOG(info) << "Found monitor: "sv << this->description;
}
}
void
monitor_t::xdg_position(zxdg_output_v1 *, std::int32_t x, std::int32_t y) {
void monitor_t::xdg_position(zxdg_output_v1 *, std::int32_t x, std::int32_t y) {
viewport.offset_x = x;
viewport.offset_y = y;
BOOST_LOG(info) << "Offset: "sv << x << 'x' << y;
}
}
void
monitor_t::xdg_size(zxdg_output_v1 *, std::int32_t width, std::int32_t height) {
void monitor_t::xdg_size(zxdg_output_v1 *, std::int32_t width,
std::int32_t height) {
viewport.width = width;
viewport.height = height;
BOOST_LOG(info) << "Resolution: "sv << width << 'x' << height;
}
}
void
monitor_t::xdg_done(zxdg_output_v1 *) {
BOOST_LOG(info) << "All info about monitor ["sv << name << "] has been send"sv;
}
void monitor_t::xdg_done(zxdg_output_v1 *) {
BOOST_LOG(info) << "All info about monitor ["sv << name
<< "] has been send"sv;
}
void
monitor_t::listen(zxdg_output_manager_v1 *output_manager) {
auto xdg_output = zxdg_output_manager_v1_get_xdg_output(output_manager, output);
void monitor_t::listen(zxdg_output_manager_v1 *output_manager) {
auto xdg_output =
zxdg_output_manager_v1_get_xdg_output(output_manager, output);
zxdg_output_v1_add_listener(xdg_output, &listener, this);
}
}
interface_t::interface_t() noexcept
:
output_manager { nullptr },
listener {
&CLASS_CALL(interface_t, add_interface),
&CLASS_CALL(interface_t, del_interface)
} {}
interface_t::interface_t() noexcept
: output_manager{nullptr},
listener{&CLASS_CALL(interface_t, add_interface),
&CLASS_CALL(interface_t, del_interface)} {}
void
interface_t::listen(wl_registry *registry) {
void interface_t::listen(wl_registry *registry) {
wl_registry_add_listener(registry, &listener, this);
}
}
void
interface_t::add_interface(wl_registry *registry, std::uint32_t id, const char *interface, std::uint32_t version) {
BOOST_LOG(debug) << "Available interface: "sv << interface << '(' << id << ") version "sv << version;
void interface_t::add_interface(wl_registry *registry, std::uint32_t id,
const char *interface, std::uint32_t version) {
BOOST_LOG(debug) << "Available interface: "sv << interface << '(' << id
<< ") version "sv << version;
if (!std::strcmp(interface, wl_output_interface.name)) {
BOOST_LOG(info) << "Found interface: "sv << interface << '(' << id << ") version "sv << version;
monitors.emplace_back(
std::make_unique<monitor_t>(
(wl_output *) wl_registry_bind(registry, id, &wl_output_interface, version)));
}
else if (!std::strcmp(interface, zxdg_output_manager_v1_interface.name)) {
BOOST_LOG(info) << "Found interface: "sv << interface << '(' << id << ") version "sv << version;
output_manager = (zxdg_output_manager_v1 *) wl_registry_bind(registry, id, &zxdg_output_manager_v1_interface, version);
BOOST_LOG(info) << "Found interface: "sv << interface << '(' << id
<< ") version "sv << version;
monitors.emplace_back(
std::make_unique<monitor_t>((wl_output *)wl_registry_bind(
registry, id, &wl_output_interface, version)));
} else if (!std::strcmp(interface, zxdg_output_manager_v1_interface.name)) {
BOOST_LOG(info) << "Found interface: "sv << interface << '(' << id
<< ") version "sv << version;
output_manager = (zxdg_output_manager_v1 *)wl_registry_bind(
registry, id, &zxdg_output_manager_v1_interface, version);
this->interface[XDG_OUTPUT] = true;
}
else if (!std::strcmp(interface, zwlr_export_dmabuf_manager_v1_interface.name)) {
BOOST_LOG(info) << "Found interface: "sv << interface << '(' << id << ") version "sv << version;
dmabuf_manager = (zwlr_export_dmabuf_manager_v1 *) wl_registry_bind(registry, id, &zwlr_export_dmabuf_manager_v1_interface, version);
this->interface[XDG_OUTPUT] = true;
} else if (!std::strcmp(interface,
zwlr_export_dmabuf_manager_v1_interface.name)) {
BOOST_LOG(info) << "Found interface: "sv << interface << '(' << id
<< ") version "sv << version;
dmabuf_manager = (zwlr_export_dmabuf_manager_v1 *)wl_registry_bind(
registry, id, &zwlr_export_dmabuf_manager_v1_interface, version);
this->interface[WLR_EXPORT_DMABUF] = true;
this->interface[WLR_EXPORT_DMABUF] = true;
}
}
}
void
interface_t::del_interface(wl_registry *registry, uint32_t id) {
void interface_t::del_interface(wl_registry *registry, uint32_t id) {
BOOST_LOG(info) << "Delete: "sv << id;
}
}
dmabuf_t::dmabuf_t():
status { READY }, frames {}, current_frame { &frames[0] }, listener {
&CLASS_CALL(dmabuf_t, frame),
&CLASS_CALL(dmabuf_t, object),
&CLASS_CALL(dmabuf_t, ready),
&CLASS_CALL(dmabuf_t, cancel)
} {
}
dmabuf_t::dmabuf_t()
: status{READY},
frames{},
current_frame{&frames[0]},
listener{&CLASS_CALL(dmabuf_t, frame), &CLASS_CALL(dmabuf_t, object),
&CLASS_CALL(dmabuf_t, ready), &CLASS_CALL(dmabuf_t, cancel)} {}
void
dmabuf_t::listen(zwlr_export_dmabuf_manager_v1 *dmabuf_manager, wl_output *output, bool blend_cursor) {
auto frame = zwlr_export_dmabuf_manager_v1_capture_output(dmabuf_manager, blend_cursor, output);
void dmabuf_t::listen(zwlr_export_dmabuf_manager_v1 *dmabuf_manager,
wl_output *output, bool blend_cursor) {
auto frame = zwlr_export_dmabuf_manager_v1_capture_output(
dmabuf_manager, blend_cursor, output);
zwlr_export_dmabuf_frame_v1_add_listener(frame, &listener, this);
status = WAITING;
}
}
dmabuf_t::~dmabuf_t() {
dmabuf_t::~dmabuf_t() {
for (auto &frame : frames) {
frame.destroy();
frame.destroy();
}
}
}
void
dmabuf_t::frame(
zwlr_export_dmabuf_frame_v1 *frame,
std::uint32_t width, std::uint32_t height,
std::uint32_t x, std::uint32_t y,
std::uint32_t buffer_flags, std::uint32_t flags,
std::uint32_t format,
std::uint32_t high, std::uint32_t low,
std::uint32_t obj_count) {
void dmabuf_t::frame(zwlr_export_dmabuf_frame_v1 *frame, std::uint32_t width,
std::uint32_t height, std::uint32_t x, std::uint32_t y,
std::uint32_t buffer_flags, std::uint32_t flags,
std::uint32_t format, std::uint32_t high,
std::uint32_t low, std::uint32_t obj_count) {
auto next_frame = get_next_frame();
next_frame->sd.fourcc = format;
next_frame->sd.width = width;
next_frame->sd.height = height;
next_frame->sd.modifier = (((std::uint64_t) high) << 32) | low;
}
next_frame->sd.modifier = (((std::uint64_t)high) << 32) | low;
}
void
dmabuf_t::object(
zwlr_export_dmabuf_frame_v1 *frame,
std::uint32_t index,
std::int32_t fd,
std::uint32_t size,
std::uint32_t offset,
std::uint32_t stride,
std::uint32_t plane_index) {
void dmabuf_t::object(zwlr_export_dmabuf_frame_v1 *frame, std::uint32_t index,
std::int32_t fd, std::uint32_t size, std::uint32_t offset,
std::uint32_t stride, std::uint32_t plane_index) {
auto next_frame = get_next_frame();
next_frame->sd.fds[plane_index] = fd;
next_frame->sd.pitches[plane_index] = stride;
next_frame->sd.offsets[plane_index] = offset;
}
}
void
dmabuf_t::ready(
zwlr_export_dmabuf_frame_v1 *frame,
std::uint32_t tv_sec_hi, std::uint32_t tv_sec_lo, std::uint32_t tv_nsec) {
void dmabuf_t::ready(zwlr_export_dmabuf_frame_v1 *frame,
std::uint32_t tv_sec_hi, std::uint32_t tv_sec_lo,
std::uint32_t tv_nsec) {
zwlr_export_dmabuf_frame_v1_destroy(frame);
current_frame->destroy();
current_frame = get_next_frame();
status = READY;
}
}
void
dmabuf_t::cancel(
zwlr_export_dmabuf_frame_v1 *frame,
std::uint32_t reason) {
void dmabuf_t::cancel(zwlr_export_dmabuf_frame_v1 *frame,
std::uint32_t reason) {
zwlr_export_dmabuf_frame_v1_destroy(frame);
auto next_frame = get_next_frame();
next_frame->destroy();
status = REINIT;
}
}
void
frame_t::destroy() {
void frame_t::destroy() {
for (auto x = 0; x < 4; ++x) {
if (sd.fds[x] >= 0) {
close(sd.fds[x]);
if (sd.fds[x] >= 0) {
close(sd.fds[x]);
sd.fds[x] = -1;
}
sd.fds[x] = -1;
}
}
}
}
frame_t::frame_t() {
frame_t::frame_t() {
// File descriptors aren't open
std::fill_n(sd.fds, 4, -1);
};
};
std::vector<std::unique_ptr<monitor_t>>
monitors(const char *display_name) {
std::vector<std::unique_ptr<monitor_t>> monitors(const char *display_name) {
display_t display;
if (display.init(display_name)) {
return {};
return {};
}
interface_t interface;
@ -268,32 +246,30 @@ namespace wl {
display.roundtrip();
if (!interface[interface_t::XDG_OUTPUT]) {
BOOST_LOG(error) << "Missing Wayland wire XDG_OUTPUT"sv;
return {};
BOOST_LOG(error) << "Missing Wayland wire XDG_OUTPUT"sv;
return {};
}
for (auto &monitor : interface.monitors) {
monitor->listen(interface.output_manager);
monitor->listen(interface.output_manager);
}
display.roundtrip();
return std::move(interface.monitors);
}
}
static bool
validate() {
static bool validate() {
display_t display;
return display.init() == 0;
}
}
int
init() {
int init() {
static bool validated = validate();
return !validated;
}
}
} // namespace wl

View File

@ -7,8 +7,8 @@
#include <bitset>
#ifdef SUNSHINE_BUILD_WAYLAND
#include <wlr-export-dmabuf-unstable-v1.h>
#include <xdg-output-unstable-v1.h>
#include <wlr-export-dmabuf-unstable-v1.h>
#include <xdg-output-unstable-v1.h>
#endif
#include "graphics.h"
@ -20,73 +20,54 @@
#ifdef SUNSHINE_BUILD_WAYLAND
namespace wl {
using display_internal_t = util::safe_ptr<wl_display, wl_display_disconnect>;
using display_internal_t = util::safe_ptr<wl_display, wl_display_disconnect>;
class frame_t {
public:
class frame_t {
public:
frame_t();
egl::surface_descriptor_t sd;
void
destroy();
};
void destroy();
};
class dmabuf_t {
public:
class dmabuf_t {
public:
enum status_e {
WAITING,
READY,
REINIT,
WAITING,
READY,
REINIT,
};
dmabuf_t(dmabuf_t &&) = delete;
dmabuf_t(const dmabuf_t &) = delete;
dmabuf_t &
operator=(const dmabuf_t &) = delete;
dmabuf_t &
operator=(dmabuf_t &&) = delete;
dmabuf_t &operator=(const dmabuf_t &) = delete;
dmabuf_t &operator=(dmabuf_t &&) = delete;
dmabuf_t();
void
listen(zwlr_export_dmabuf_manager_v1 *dmabuf_manager, wl_output *output, bool blend_cursor = false);
void listen(zwlr_export_dmabuf_manager_v1 *dmabuf_manager,
wl_output *output, bool blend_cursor = false);
~dmabuf_t();
void
frame(
zwlr_export_dmabuf_frame_v1 *frame,
std::uint32_t width, std::uint32_t height,
std::uint32_t x, std::uint32_t y,
std::uint32_t buffer_flags, std::uint32_t flags,
std::uint32_t format,
std::uint32_t high, std::uint32_t low,
std::uint32_t obj_count);
void frame(zwlr_export_dmabuf_frame_v1 *frame, std::uint32_t width,
std::uint32_t height, std::uint32_t x, std::uint32_t y,
std::uint32_t buffer_flags, std::uint32_t flags,
std::uint32_t format, std::uint32_t high, std::uint32_t low,
std::uint32_t obj_count);
void
object(
zwlr_export_dmabuf_frame_v1 *frame,
std::uint32_t index,
std::int32_t fd,
std::uint32_t size,
std::uint32_t offset,
std::uint32_t stride,
std::uint32_t plane_index);
void object(zwlr_export_dmabuf_frame_v1 *frame, std::uint32_t index,
std::int32_t fd, std::uint32_t size, std::uint32_t offset,
std::uint32_t stride, std::uint32_t plane_index);
void
ready(
zwlr_export_dmabuf_frame_v1 *frame,
std::uint32_t tv_sec_hi, std::uint32_t tv_sec_lo, std::uint32_t tv_nsec);
void ready(zwlr_export_dmabuf_frame_v1 *frame, std::uint32_t tv_sec_hi,
std::uint32_t tv_sec_lo, std::uint32_t tv_nsec);
void
cancel(
zwlr_export_dmabuf_frame_v1 *frame,
std::uint32_t reason);
void cancel(zwlr_export_dmabuf_frame_v1 *frame, std::uint32_t reason);
inline frame_t *
get_next_frame() {
return current_frame == &frames[0] ? &frames[1] : &frames[0];
inline frame_t *get_next_frame() {
return current_frame == &frames[0] ? &frames[1] : &frames[0];
}
status_e status;
@ -95,33 +76,25 @@ namespace wl {
frame_t *current_frame;
zwlr_export_dmabuf_frame_v1_listener listener;
};
};
class monitor_t {
public:
class monitor_t {
public:
monitor_t(monitor_t &&) = delete;
monitor_t(const monitor_t &) = delete;
monitor_t &
operator=(const monitor_t &) = delete;
monitor_t &
operator=(monitor_t &&) = delete;
monitor_t &operator=(const monitor_t &) = delete;
monitor_t &operator=(monitor_t &&) = delete;
monitor_t(wl_output *output);
void
xdg_name(zxdg_output_v1 *, const char *name);
void
xdg_description(zxdg_output_v1 *, const char *description);
void
xdg_position(zxdg_output_v1 *, std::int32_t x, std::int32_t y);
void
xdg_size(zxdg_output_v1 *, std::int32_t width, std::int32_t height);
void
xdg_done(zxdg_output_v1 *);
void xdg_name(zxdg_output_v1 *, const char *name);
void xdg_description(zxdg_output_v1 *, const char *description);
void xdg_position(zxdg_output_v1 *, std::int32_t x, std::int32_t y);
void xdg_size(zxdg_output_v1 *, std::int32_t width, std::int32_t height);
void xdg_done(zxdg_output_v1 *);
void
listen(zxdg_output_manager_v1 *output_manager);
void listen(zxdg_output_manager_v1 *output_manager);
wl_output *output;
@ -131,87 +104,74 @@ namespace wl {
platf::touch_port_t viewport;
zxdg_output_v1_listener listener;
};
};
class interface_t {
class interface_t {
struct bind_t {
std::uint32_t id;
std::uint32_t version;
std::uint32_t id;
std::uint32_t version;
};
public:
public:
enum interface_e {
XDG_OUTPUT,
WLR_EXPORT_DMABUF,
MAX_INTERFACES,
XDG_OUTPUT,
WLR_EXPORT_DMABUF,
MAX_INTERFACES,
};
interface_t(interface_t &&) = delete;
interface_t(const interface_t &) = delete;
interface_t &
operator=(const interface_t &) = delete;
interface_t &
operator=(interface_t &&) = delete;
interface_t &operator=(const interface_t &) = delete;
interface_t &operator=(interface_t &&) = delete;
interface_t() noexcept;
void
listen(wl_registry *registry);
void listen(wl_registry *registry);
std::vector<std::unique_ptr<monitor_t>> monitors;
zwlr_export_dmabuf_manager_v1 *dmabuf_manager;
zxdg_output_manager_v1 *output_manager;
bool
operator[](interface_e bit) const {
return interface[bit];
}
bool operator[](interface_e bit) const { return interface[bit]; }
private:
void
add_interface(wl_registry *registry, std::uint32_t id, const char *interface, std::uint32_t version);
void
del_interface(wl_registry *registry, uint32_t id);
private:
void add_interface(wl_registry *registry, std::uint32_t id,
const char *interface, std::uint32_t version);
void del_interface(wl_registry *registry, uint32_t id);
std::bitset<MAX_INTERFACES> interface;
wl_registry_listener listener;
};
};
class display_t {
public:
class display_t {
public:
/**
* Initialize display with display_name
* If display_name == nullptr -> display_name = std::getenv("WAYLAND_DISPLAY")
* If display_name == nullptr -> display_name =
* std::getenv("WAYLAND_DISPLAY")
*/
int
init(const char *display_name = nullptr);
int init(const char *display_name = nullptr);
// Roundtrip with Wayland connection
void
roundtrip();
void roundtrip();
// Get the registry associated with the display
// No need to manually free the registry
wl_registry *
registry();
wl_registry *registry();
inline display_internal_t::pointer
get() {
return display_internal.get();
}
inline display_internal_t::pointer get() { return display_internal.get(); }
private:
private:
display_internal_t display_internal;
};
};
std::vector<std::unique_ptr<monitor_t>>
monitors(const char *display_name = nullptr);
std::vector<std::unique_ptr<monitor_t>> monitors(
const char *display_name = nullptr);
int
init();
int init();
} // namespace wl
#else
@ -219,20 +179,17 @@ struct wl_output;
struct zxdg_output_manager_v1;
namespace wl {
class monitor_t {
public:
class monitor_t {
public:
monitor_t(monitor_t &&) = delete;
monitor_t(const monitor_t &) = delete;
monitor_t &
operator=(const monitor_t &) = delete;
monitor_t &
operator=(monitor_t &&) = delete;
monitor_t &operator=(const monitor_t &) = delete;
monitor_t &operator=(monitor_t &&) = delete;
monitor_t(wl_output *output);
void
listen(zxdg_output_manager_v1 *output_manager);
void listen(zxdg_output_manager_v1 *output_manager);
wl_output *output;
@ -240,12 +197,13 @@ namespace wl {
std::string description;
platf::touch_port_t viewport;
};
};
inline std::vector<std::unique_ptr<monitor_t>>
monitors(const char *display_name = nullptr) { return {}; }
inline std::vector<std::unique_ptr<monitor_t>> monitors(
const char *display_name = nullptr) {
return {};
}
inline int
init() { return -1; }
inline int init() { return -1; }
} // namespace wl
#endif

View File

@ -2,110 +2,110 @@
* @file src/platform/linux/wlgrab.cpp
* @brief todo
*/
#include "src/platform/common.h"
#include "src/main.h"
#include "src/platform/common.h"
#include "src/video.h"
#include "vaapi.h"
#include "wayland.h"
using namespace std::literals;
namespace wl {
static int env_width;
static int env_height;
static int env_width;
static int env_height;
struct img_t: public platf::img_t {
struct img_t : public platf::img_t {
~img_t() override {
delete[] data;
data = nullptr;
delete[] data;
data = nullptr;
}
};
};
class wlr_t: public platf::display_t {
public:
int
init(platf::mem_type_e hwdevice_type, const std::string &display_name, const ::video::config_t &config) {
delay = std::chrono::nanoseconds { 1s } / config.framerate;
mem_type = hwdevice_type;
class wlr_t : public platf::display_t {
public:
int init(platf::mem_type_e hwdevice_type, const std::string &display_name,
const ::video::config_t &config) {
delay = std::chrono::nanoseconds{1s} / config.framerate;
mem_type = hwdevice_type;
if (display.init()) {
return -1;
}
interface.listen(display.registry());
display.roundtrip();
if (!interface[wl::interface_t::XDG_OUTPUT]) {
BOOST_LOG(error) << "Missing Wayland wire for xdg_output"sv;
return -1;
}
if (!interface[wl::interface_t::WLR_EXPORT_DMABUF]) {
BOOST_LOG(error) << "Missing Wayland wire for wlr-export-dmabuf"sv;
return -1;
}
auto monitor = interface.monitors[0].get();
if (!display_name.empty()) {
auto streamedMonitor = util::from_view(display_name);
if (streamedMonitor >= 0 && streamedMonitor < interface.monitors.size()) {
monitor = interface.monitors[streamedMonitor].get();
if (display.init()) {
return -1;
}
}
monitor->listen(interface.output_manager);
interface.listen(display.registry());
display.roundtrip();
output = monitor->output;
offset_x = monitor->viewport.offset_x;
offset_y = monitor->viewport.offset_y;
width = monitor->viewport.width;
height = monitor->viewport.height;
this->env_width = ::wl::env_width;
this->env_height = ::wl::env_height;
BOOST_LOG(info) << "Selected monitor ["sv << monitor->description << "] for streaming"sv;
BOOST_LOG(debug) << "Offset: "sv << offset_x << 'x' << offset_y;
BOOST_LOG(debug) << "Resolution: "sv << width << 'x' << height;
BOOST_LOG(debug) << "Desktop Resolution: "sv << env_width << 'x' << env_height;
return 0;
}
int
dummy_img(platf::img_t *img) override {
return 0;
}
inline platf::capture_e
snapshot(const pull_free_image_cb_t &pull_free_image_cb, std::shared_ptr<platf::img_t> &img_out, std::chrono::milliseconds timeout, bool cursor) {
auto to = std::chrono::steady_clock::now() + timeout;
dmabuf.listen(interface.dmabuf_manager, output, cursor);
do {
display.roundtrip();
if (to < std::chrono::steady_clock::now()) {
return platf::capture_e::timeout;
if (!interface[wl::interface_t::XDG_OUTPUT]) {
BOOST_LOG(error) << "Missing Wayland wire for xdg_output"sv;
return -1;
}
} while (dmabuf.status == dmabuf_t::WAITING);
auto current_frame = dmabuf.current_frame;
if (!interface[wl::interface_t::WLR_EXPORT_DMABUF]) {
BOOST_LOG(error) << "Missing Wayland wire for wlr-export-dmabuf"sv;
return -1;
}
if (
dmabuf.status == dmabuf_t::REINIT ||
current_frame->sd.width != width ||
current_frame->sd.height != height) {
return platf::capture_e::reinit;
}
auto monitor = interface.monitors[0].get();
return platf::capture_e::ok;
if (!display_name.empty()) {
auto streamedMonitor = util::from_view(display_name);
if (streamedMonitor >= 0 &&
streamedMonitor < interface.monitors.size()) {
monitor = interface.monitors[streamedMonitor].get();
}
}
monitor->listen(interface.output_manager);
display.roundtrip();
output = monitor->output;
offset_x = monitor->viewport.offset_x;
offset_y = monitor->viewport.offset_y;
width = monitor->viewport.width;
height = monitor->viewport.height;
this->env_width = ::wl::env_width;
this->env_height = ::wl::env_height;
BOOST_LOG(info) << "Selected monitor ["sv << monitor->description
<< "] for streaming"sv;
BOOST_LOG(debug) << "Offset: "sv << offset_x << 'x' << offset_y;
BOOST_LOG(debug) << "Resolution: "sv << width << 'x' << height;
BOOST_LOG(debug) << "Desktop Resolution: "sv << env_width << 'x'
<< env_height;
return 0;
}
int dummy_img(platf::img_t *img) override { return 0; }
inline platf::capture_e snapshot(
const pull_free_image_cb_t &pull_free_image_cb,
std::shared_ptr<platf::img_t> &img_out,
std::chrono::milliseconds timeout, bool cursor) {
auto to = std::chrono::steady_clock::now() + timeout;
dmabuf.listen(interface.dmabuf_manager, output, cursor);
do {
display.roundtrip();
if (to < std::chrono::steady_clock::now()) {
return platf::capture_e::timeout;
}
} while (dmabuf.status == dmabuf_t::WAITING);
auto current_frame = dmabuf.current_frame;
if (dmabuf.status == dmabuf_t::REINIT ||
current_frame->sd.width != width ||
current_frame->sd.height != height) {
return platf::capture_e::reinit;
}
return platf::capture_e::ok;
}
platf::mem_type_e mem_type;
@ -117,272 +117,287 @@ namespace wl {
dmabuf_t dmabuf;
wl_output *output;
};
};
class wlr_ram_t: public wlr_t {
public:
platf::capture_e
capture(const push_captured_image_cb_t &push_captured_image_cb, const pull_free_image_cb_t &pull_free_image_cb, bool *cursor) override {
auto next_frame = std::chrono::steady_clock::now();
class wlr_ram_t : public wlr_t {
public:
platf::capture_e capture(
const push_captured_image_cb_t &push_captured_image_cb,
const pull_free_image_cb_t &pull_free_image_cb, bool *cursor) override {
auto next_frame = std::chrono::steady_clock::now();
while (true) {
auto now = std::chrono::steady_clock::now();
while (true) {
auto now = std::chrono::steady_clock::now();
if (next_frame > now) {
std::this_thread::sleep_for((next_frame - now) / 3 * 2);
}
while (next_frame > now) {
now = std::chrono::steady_clock::now();
}
next_frame = now + delay;
std::shared_ptr<platf::img_t> img_out;
auto status = snapshot(pull_free_image_cb, img_out, 1000ms, *cursor);
switch (status) {
case platf::capture_e::reinit:
case platf::capture_e::error:
case platf::capture_e::interrupted:
return status;
case platf::capture_e::timeout:
if (!push_captured_image_cb(std::move(img_out), false)) {
return platf::capture_e::ok;
if (next_frame > now) {
std::this_thread::sleep_for((next_frame - now) / 3 * 2);
}
break;
case platf::capture_e::ok:
if (!push_captured_image_cb(std::move(img_out), true)) {
return platf::capture_e::ok;
while (next_frame > now) {
now = std::chrono::steady_clock::now();
}
break;
default:
BOOST_LOG(error) << "Unrecognized capture status ["sv << (int) status << ']';
next_frame = now + delay;
std::shared_ptr<platf::img_t> img_out;
auto status =
snapshot(pull_free_image_cb, img_out, 1000ms, *cursor);
switch (status) {
case platf::capture_e::reinit:
case platf::capture_e::error:
case platf::capture_e::interrupted:
return status;
case platf::capture_e::timeout:
if (!push_captured_image_cb(std::move(img_out), false)) {
return platf::capture_e::ok;
}
break;
case platf::capture_e::ok:
if (!push_captured_image_cb(std::move(img_out), true)) {
return platf::capture_e::ok;
}
break;
default:
BOOST_LOG(error) << "Unrecognized capture status ["sv
<< (int)status << ']';
return status;
}
}
return platf::capture_e::ok;
}
platf::capture_e snapshot(const pull_free_image_cb_t &pull_free_image_cb,
std::shared_ptr<platf::img_t> &img_out,
std::chrono::milliseconds timeout, bool cursor) {
auto status =
wlr_t::snapshot(pull_free_image_cb, img_out, timeout, cursor);
if (status != platf::capture_e::ok) {
return status;
}
}
return platf::capture_e::ok;
auto current_frame = dmabuf.current_frame;
auto rgb_opt = egl::import_source(egl_display.get(), current_frame->sd);
if (!rgb_opt) {
return platf::capture_e::reinit;
}
if (!pull_free_image_cb(img_out)) {
return platf::capture_e::interrupted;
}
gl::ctx.BindTexture(GL_TEXTURE_2D, (*rgb_opt)->tex[0]);
int w, h;
gl::ctx.GetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_WIDTH, &w);
gl::ctx.GetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_HEIGHT, &h);
BOOST_LOG(debug) << "width and height: w "sv << w << " h "sv << h;
gl::ctx.GetTextureSubImage((*rgb_opt)->tex[0], 0, 0, 0, 0, width,
height, 1, GL_BGRA, GL_UNSIGNED_BYTE,
img_out->height * img_out->row_pitch,
img_out->data);
gl::ctx.BindTexture(GL_TEXTURE_2D, 0);
return platf::capture_e::ok;
}
platf::capture_e
snapshot(const pull_free_image_cb_t &pull_free_image_cb, std::shared_ptr<platf::img_t> &img_out, std::chrono::milliseconds timeout, bool cursor) {
auto status = wlr_t::snapshot(pull_free_image_cb, img_out, timeout, cursor);
if (status != platf::capture_e::ok) {
return status;
}
int init(platf::mem_type_e hwdevice_type, const std::string &display_name,
const ::video::config_t &config) {
if (wlr_t::init(hwdevice_type, display_name, config)) {
return -1;
}
auto current_frame = dmabuf.current_frame;
egl_display = egl::make_display(display.get());
if (!egl_display) {
return -1;
}
auto rgb_opt = egl::import_source(egl_display.get(), current_frame->sd);
auto ctx_opt = egl::make_ctx(egl_display.get());
if (!ctx_opt) {
return -1;
}
if (!rgb_opt) {
return platf::capture_e::reinit;
}
ctx = std::move(*ctx_opt);
if (!pull_free_image_cb(img_out)) {
return platf::capture_e::interrupted;
}
gl::ctx.BindTexture(GL_TEXTURE_2D, (*rgb_opt)->tex[0]);
int w, h;
gl::ctx.GetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_WIDTH, &w);
gl::ctx.GetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_HEIGHT, &h);
BOOST_LOG(debug) << "width and height: w "sv << w << " h "sv << h;
gl::ctx.GetTextureSubImage((*rgb_opt)->tex[0], 0, 0, 0, 0, width, height, 1, GL_BGRA, GL_UNSIGNED_BYTE, img_out->height * img_out->row_pitch, img_out->data);
gl::ctx.BindTexture(GL_TEXTURE_2D, 0);
return platf::capture_e::ok;
return 0;
}
int
init(platf::mem_type_e hwdevice_type, const std::string &display_name, const ::video::config_t &config) {
if (wlr_t::init(hwdevice_type, display_name, config)) {
return -1;
}
std::unique_ptr<platf::avcodec_encode_device_t> make_avcodec_encode_device(
platf::pix_fmt_e pix_fmt) override {
if (mem_type == platf::mem_type_e::vaapi) {
return va::make_avcodec_encode_device(width, height, false);
}
egl_display = egl::make_display(display.get());
if (!egl_display) {
return -1;
}
auto ctx_opt = egl::make_ctx(egl_display.get());
if (!ctx_opt) {
return -1;
}
ctx = std::move(*ctx_opt);
return 0;
return std::make_unique<platf::avcodec_encode_device_t>();
}
std::unique_ptr<platf::avcodec_encode_device_t>
make_avcodec_encode_device(platf::pix_fmt_e pix_fmt) override {
if (mem_type == platf::mem_type_e::vaapi) {
return va::make_avcodec_encode_device(width, height, false);
}
std::shared_ptr<platf::img_t> alloc_img() override {
auto img = std::make_shared<img_t>();
img->width = width;
img->height = height;
img->pixel_pitch = 4;
img->row_pitch = img->pixel_pitch * width;
img->data = new std::uint8_t[height * img->row_pitch];
return std::make_unique<platf::avcodec_encode_device_t>();
}
std::shared_ptr<platf::img_t>
alloc_img() override {
auto img = std::make_shared<img_t>();
img->width = width;
img->height = height;
img->pixel_pitch = 4;
img->row_pitch = img->pixel_pitch * width;
img->data = new std::uint8_t[height * img->row_pitch];
return img;
return img;
}
egl::display_t egl_display;
egl::ctx_t ctx;
};
};
class wlr_vram_t: public wlr_t {
public:
platf::capture_e
capture(const push_captured_image_cb_t &push_captured_image_cb, const pull_free_image_cb_t &pull_free_image_cb, bool *cursor) override {
auto next_frame = std::chrono::steady_clock::now();
class wlr_vram_t : public wlr_t {
public:
platf::capture_e capture(
const push_captured_image_cb_t &push_captured_image_cb,
const pull_free_image_cb_t &pull_free_image_cb, bool *cursor) override {
auto next_frame = std::chrono::steady_clock::now();
while (true) {
auto now = std::chrono::steady_clock::now();
while (true) {
auto now = std::chrono::steady_clock::now();
if (next_frame > now) {
std::this_thread::sleep_for((next_frame - now) / 3 * 2);
if (next_frame > now) {
std::this_thread::sleep_for((next_frame - now) / 3 * 2);
}
while (next_frame > now) {
now = std::chrono::steady_clock::now();
}
next_frame = now + delay;
std::shared_ptr<platf::img_t> img_out;
auto status =
snapshot(pull_free_image_cb, img_out, 1000ms, *cursor);
switch (status) {
case platf::capture_e::reinit:
case platf::capture_e::error:
case platf::capture_e::interrupted:
return status;
case platf::capture_e::timeout:
if (!push_captured_image_cb(std::move(img_out), false)) {
return platf::capture_e::ok;
}
break;
case platf::capture_e::ok:
if (!push_captured_image_cb(std::move(img_out), true)) {
return platf::capture_e::ok;
}
break;
default:
BOOST_LOG(error) << "Unrecognized capture status ["sv
<< (int)status << ']';
return status;
}
}
while (next_frame > now) {
now = std::chrono::steady_clock::now();
}
next_frame = now + delay;
return platf::capture_e::ok;
}
platf::capture_e snapshot(const pull_free_image_cb_t &pull_free_image_cb,
std::shared_ptr<platf::img_t> &img_out,
std::chrono::milliseconds timeout, bool cursor) {
auto status =
wlr_t::snapshot(pull_free_image_cb, img_out, timeout, cursor);
if (status != platf::capture_e::ok) {
return status;
}
if (!pull_free_image_cb(img_out)) {
return platf::capture_e::interrupted;
}
auto img = (egl::img_descriptor_t *)img_out.get();
img->reset();
auto current_frame = dmabuf.current_frame;
++sequence;
img->sequence = sequence;
img->sd = current_frame->sd;
// Prevent dmabuf from closing the file descriptors.
std::fill_n(current_frame->sd.fds, 4, -1);
return platf::capture_e::ok;
}
std::shared_ptr<platf::img_t> alloc_img() override {
auto img = std::make_shared<egl::img_descriptor_t>();
img->sequence = 0;
img->serial = std::numeric_limits<decltype(img->serial)>::max();
img->data = nullptr;
// File descriptors aren't open
std::fill_n(img->sd.fds, 4, -1);
return img;
}
std::unique_ptr<platf::avcodec_encode_device_t> make_avcodec_encode_device(
platf::pix_fmt_e pix_fmt) override {
if (mem_type == platf::mem_type_e::vaapi) {
return va::make_avcodec_encode_device(width, height, 0, 0, true);
}
return std::make_unique<platf::avcodec_encode_device_t>();
}
int dummy_img(platf::img_t *img) override {
// TODO: stop cheating and give black image
if (!img) {
return -1;
};
auto pull_dummy_img_callback =
[&img](std::shared_ptr<platf::img_t> &img_out) -> bool {
img_out = img->shared_from_this();
return true;
};
std::shared_ptr<platf::img_t> img_out;
auto status = snapshot(pull_free_image_cb, img_out, 1000ms, *cursor);
switch (status) {
case platf::capture_e::reinit:
case platf::capture_e::error:
case platf::capture_e::interrupted:
return status;
case platf::capture_e::timeout:
if (!push_captured_image_cb(std::move(img_out), false)) {
return platf::capture_e::ok;
}
break;
case platf::capture_e::ok:
if (!push_captured_image_cb(std::move(img_out), true)) {
return platf::capture_e::ok;
}
break;
default:
BOOST_LOG(error) << "Unrecognized capture status ["sv << (int) status << ']';
return status;
}
}
return platf::capture_e::ok;
return snapshot(pull_dummy_img_callback, img_out, 1000ms, false) !=
platf::capture_e::ok;
}
platf::capture_e
snapshot(const pull_free_image_cb_t &pull_free_image_cb, std::shared_ptr<platf::img_t> &img_out, std::chrono::milliseconds timeout, bool cursor) {
auto status = wlr_t::snapshot(pull_free_image_cb, img_out, timeout, cursor);
if (status != platf::capture_e::ok) {
return status;
}
if (!pull_free_image_cb(img_out)) {
return platf::capture_e::interrupted;
}
auto img = (egl::img_descriptor_t *) img_out.get();
img->reset();
auto current_frame = dmabuf.current_frame;
++sequence;
img->sequence = sequence;
img->sd = current_frame->sd;
// Prevent dmabuf from closing the file descriptors.
std::fill_n(current_frame->sd.fds, 4, -1);
return platf::capture_e::ok;
}
std::shared_ptr<platf::img_t>
alloc_img() override {
auto img = std::make_shared<egl::img_descriptor_t>();
img->sequence = 0;
img->serial = std::numeric_limits<decltype(img->serial)>::max();
img->data = nullptr;
// File descriptors aren't open
std::fill_n(img->sd.fds, 4, -1);
return img;
}
std::unique_ptr<platf::avcodec_encode_device_t>
make_avcodec_encode_device(platf::pix_fmt_e pix_fmt) override {
if (mem_type == platf::mem_type_e::vaapi) {
return va::make_avcodec_encode_device(width, height, 0, 0, true);
}
return std::make_unique<platf::avcodec_encode_device_t>();
}
int
dummy_img(platf::img_t *img) override {
// TODO: stop cheating and give black image
if (!img) {
return -1;
};
auto pull_dummy_img_callback = [&img](std::shared_ptr<platf::img_t> &img_out) -> bool {
img_out = img->shared_from_this();
return true;
};
std::shared_ptr<platf::img_t> img_out;
return snapshot(pull_dummy_img_callback, img_out, 1000ms, false) != platf::capture_e::ok;
}
std::uint64_t sequence {};
};
std::uint64_t sequence{};
};
} // namespace wl
namespace platf {
std::shared_ptr<display_t>
wl_display(mem_type_e hwdevice_type, const std::string &display_name, const video::config_t &config) {
if (hwdevice_type != platf::mem_type_e::system && hwdevice_type != platf::mem_type_e::vaapi && hwdevice_type != platf::mem_type_e::cuda) {
BOOST_LOG(error) << "Could not initialize display with the given hw device type."sv;
return nullptr;
std::shared_ptr<display_t> wl_display(mem_type_e hwdevice_type,
const std::string &display_name,
const video::config_t &config) {
if (hwdevice_type != platf::mem_type_e::system &&
hwdevice_type != platf::mem_type_e::vaapi &&
hwdevice_type != platf::mem_type_e::cuda) {
BOOST_LOG(error)
<< "Could not initialize display with the given hw device type."sv;
return nullptr;
}
if (hwdevice_type == platf::mem_type_e::vaapi) {
auto wlr = std::make_shared<wl::wlr_vram_t>();
if (wlr->init(hwdevice_type, display_name, config)) {
return nullptr;
}
auto wlr = std::make_shared<wl::wlr_vram_t>();
if (wlr->init(hwdevice_type, display_name, config)) {
return nullptr;
}
return wlr;
return wlr;
}
auto wlr = std::make_shared<wl::wlr_ram_t>();
if (wlr->init(hwdevice_type, display_name, config)) {
return nullptr;
return nullptr;
}
return wlr;
}
}
std::vector<std::string>
wl_display_names() {
std::vector<std::string> wl_display_names() {
std::vector<std::string> display_names;
wl::display_t display;
if (display.init()) {
return {};
return {};
}
wl::interface_t interface;
@ -391,34 +406,38 @@ namespace platf {
display.roundtrip();
if (!interface[wl::interface_t::XDG_OUTPUT]) {
BOOST_LOG(warning) << "Missing Wayland wire for xdg_output"sv;
return {};
BOOST_LOG(warning) << "Missing Wayland wire for xdg_output"sv;
return {};
}
if (!interface[wl::interface_t::WLR_EXPORT_DMABUF]) {
BOOST_LOG(warning) << "Missing Wayland wire for wlr-export-dmabuf"sv;
return {};
BOOST_LOG(warning) << "Missing Wayland wire for wlr-export-dmabuf"sv;
return {};
}
wl::env_width = 0;
wl::env_height = 0;
for (auto &monitor : interface.monitors) {
monitor->listen(interface.output_manager);
monitor->listen(interface.output_manager);
}
display.roundtrip();
for (int x = 0; x < interface.monitors.size(); ++x) {
auto monitor = interface.monitors[x].get();
auto monitor = interface.monitors[x].get();
wl::env_width = std::max(wl::env_width, (int) (monitor->viewport.offset_x + monitor->viewport.width));
wl::env_height = std::max(wl::env_height, (int) (monitor->viewport.offset_y + monitor->viewport.height));
wl::env_width = std::max(
wl::env_width,
(int)(monitor->viewport.offset_x + monitor->viewport.width));
wl::env_height = std::max(
wl::env_height,
(int)(monitor->viewport.offset_y + monitor->viewport.height));
display_names.emplace_back(std::to_string(x));
display_names.emplace_back(std::to_string(x));
}
return display_names;
}
}
} // namespace platf

File diff suppressed because it is too large Load Diff

View File

@ -13,28 +13,24 @@
extern "C" struct _XDisplay;
namespace egl {
class cursor_t;
class cursor_t;
}
namespace platf::x11 {
#ifdef SUNSHINE_BUILD_X11
struct cursor_ctx_raw_t;
void
freeCursorCtx(cursor_ctx_raw_t *ctx);
void
freeDisplay(_XDisplay *xdisplay);
struct cursor_ctx_raw_t;
void freeCursorCtx(cursor_ctx_raw_t *ctx);
void freeDisplay(_XDisplay *xdisplay);
using cursor_ctx_t = util::safe_ptr<cursor_ctx_raw_t, freeCursorCtx>;
using xdisplay_t = util::safe_ptr<_XDisplay, freeDisplay>;
using cursor_ctx_t = util::safe_ptr<cursor_ctx_raw_t, freeCursorCtx>;
using xdisplay_t = util::safe_ptr<_XDisplay, freeDisplay>;
class cursor_t {
public:
static std::optional<cursor_t>
make();
class cursor_t {
public:
static std::optional<cursor_t> make();
void
capture(egl::cursor_t &img);
void capture(egl::cursor_t &img);
/**
* Capture and blend the cursor into the image
@ -42,30 +38,24 @@ namespace platf::x11 {
* img <-- destination image
* offsetX, offsetY <--- Top left corner of the virtual screen
*/
void
blend(img_t &img, int offsetX, int offsetY);
void blend(img_t &img, int offsetX, int offsetY);
cursor_ctx_t ctx;
};
};
xdisplay_t
make_display();
xdisplay_t make_display();
#else
// It's never something different from nullptr
util::safe_ptr<_XDisplay, std::default_delete<_XDisplay>>;
// It's never something different from nullptr
util::safe_ptr<_XDisplay, std::default_delete<_XDisplay> >;
class cursor_t {
public:
static std::optional<cursor_t>
make() { return std::nullopt; }
class cursor_t {
public:
static std::optional<cursor_t> make() { return std::nullopt; }
void
capture(egl::cursor_t &) {}
void
blend(img_t &, int, int) {}
};
void capture(egl::cursor_t &) {}
void blend(img_t &, int, int) {}
};
xdisplay_t
make_display() { return nullptr; }
xdisplay_t make_display() { return nullptr; }
#endif
} // namespace platf::x11

View File

@ -11,15 +11,21 @@
#include <mmdeviceapi.h>
#ifdef __MINGW32__
#undef DEFINE_GUID
#ifdef __cplusplus
#define DEFINE_GUID(name, l, w1, w2, b1, b2, b3, b4, b5, b6, b7, b8) EXTERN_C const GUID DECLSPEC_SELECTANY name = { l, w1, w2, { b1, b2, b3, b4, b5, b6, b7, b8 } }
#else
#define DEFINE_GUID(name, l, w1, w2, b1, b2, b3, b4, b5, b6, b7, b8) const GUID DECLSPEC_SELECTANY name = { l, w1, w2, { b1, b2, b3, b4, b5, b6, b7, b8 } }
#endif
#undef DEFINE_GUID
#ifdef __cplusplus
#define DEFINE_GUID(name, l, w1, w2, b1, b2, b3, b4, b5, b6, b7, b8) \
EXTERN_C const GUID DECLSPEC_SELECTANY name = { \
l, w1, w2, {b1, b2, b3, b4, b5, b6, b7, b8}}
#else
#define DEFINE_GUID(name, l, w1, w2, b1, b2, b3, b4, b5, b6, b7, b8) \
const GUID DECLSPEC_SELECTANY name = { \
l, w1, w2, {b1, b2, b3, b4, b5, b6, b7, b8}}
#endif
DEFINE_GUID(IID_IPolicyConfig, 0xf8679f50, 0x850a, 0x41cf, 0x9c, 0x72, 0x43, 0x0f, 0x29, 0x02, 0x90, 0xc8);
DEFINE_GUID(CLSID_CPolicyConfigClient, 0x870af99c, 0x171d, 0x4f9e, 0xaf, 0x0d, 0xe6, 0x3d, 0xf4, 0x0c, 0x2b, 0xc9);
DEFINE_GUID(IID_IPolicyConfig, 0xf8679f50, 0x850a, 0x41cf, 0x9c, 0x72, 0x43,
0x0f, 0x29, 0x02, 0x90, 0xc8);
DEFINE_GUID(CLSID_CPolicyConfigClient, 0x870af99c, 0x171d, 0x4f9e, 0xaf, 0x0d,
0xe6, 0x3d, 0xf4, 0x0c, 0x2b, 0xc9);
#endif
@ -38,66 +44,39 @@ class DECLSPEC_UUID("870af99c-171d-4f9e-af0d-e63df40c2bc9") CPolicyConfigClient;
//
// @compatible: Windows 7 and Later
// ----------------------------------------------------------------------------
interface IPolicyConfig: public IUnknown {
public:
virtual HRESULT
GetMixFormat(
PCWSTR,
WAVEFORMATEX **);
interface IPolicyConfig : public IUnknown {
public:
virtual HRESULT GetMixFormat(PCWSTR, WAVEFORMATEX **);
virtual HRESULT STDMETHODCALLTYPE
GetDeviceFormat(
PCWSTR,
INT,
WAVEFORMATEX **);
virtual HRESULT STDMETHODCALLTYPE GetDeviceFormat(PCWSTR, INT,
WAVEFORMATEX **);
virtual HRESULT STDMETHODCALLTYPE ResetDeviceFormat(
PCWSTR);
virtual HRESULT STDMETHODCALLTYPE ResetDeviceFormat(PCWSTR);
virtual HRESULT STDMETHODCALLTYPE
SetDeviceFormat(
PCWSTR,
WAVEFORMATEX *,
WAVEFORMATEX *);
virtual HRESULT STDMETHODCALLTYPE SetDeviceFormat(PCWSTR, WAVEFORMATEX *,
WAVEFORMATEX *);
virtual HRESULT STDMETHODCALLTYPE GetProcessingPeriod(
PCWSTR,
INT,
PINT64,
PINT64);
virtual HRESULT STDMETHODCALLTYPE GetProcessingPeriod(PCWSTR, INT, PINT64,
PINT64);
virtual HRESULT STDMETHODCALLTYPE SetProcessingPeriod(
PCWSTR,
PINT64);
virtual HRESULT STDMETHODCALLTYPE SetProcessingPeriod(PCWSTR, PINT64);
virtual HRESULT STDMETHODCALLTYPE
GetShareMode(
PCWSTR,
struct DeviceShareMode *);
virtual HRESULT STDMETHODCALLTYPE GetShareMode(PCWSTR,
struct DeviceShareMode *);
virtual HRESULT STDMETHODCALLTYPE
SetShareMode(
PCWSTR,
struct DeviceShareMode *);
virtual HRESULT STDMETHODCALLTYPE SetShareMode(PCWSTR,
struct DeviceShareMode *);
virtual HRESULT STDMETHODCALLTYPE
GetPropertyValue(
PCWSTR,
const PROPERTYKEY &,
PROPVARIANT *);
virtual HRESULT STDMETHODCALLTYPE GetPropertyValue(PCWSTR,
const PROPERTYKEY &,
PROPVARIANT *);
virtual HRESULT STDMETHODCALLTYPE
SetPropertyValue(
PCWSTR,
const PROPERTYKEY &,
PROPVARIANT *);
virtual HRESULT STDMETHODCALLTYPE SetPropertyValue(PCWSTR,
const PROPERTYKEY &,
PROPVARIANT *);
virtual HRESULT STDMETHODCALLTYPE
SetDefaultEndpoint(
PCWSTR wszDeviceId,
ERole eRole);
virtual HRESULT STDMETHODCALLTYPE SetDefaultEndpoint(PCWSTR wszDeviceId,
ERole eRole);
virtual HRESULT STDMETHODCALLTYPE SetEndpointVisibility(
PCWSTR,
INT);
virtual HRESULT STDMETHODCALLTYPE SetEndpointVisibility(PCWSTR, INT);
};

View File

@ -16,123 +16,140 @@
#include "src/video.h"
namespace platf::dxgi {
extern const char *format_str[];
extern const char *format_str[];
// Add D3D11_CREATE_DEVICE_DEBUG here to enable the D3D11 debug runtime.
// You should have a debugger like WinDbg attached to receive debug messages.
auto constexpr D3D11_CREATE_DEVICE_FLAGS = D3D11_CREATE_DEVICE_VIDEO_SUPPORT;
// Add D3D11_CREATE_DEVICE_DEBUG here to enable the D3D11 debug runtime.
// You should have a debugger like WinDbg attached to receive debug messages.
auto constexpr D3D11_CREATE_DEVICE_FLAGS = D3D11_CREATE_DEVICE_VIDEO_SUPPORT;
template <class T>
void
Release(T *dxgi) {
template <class T>
void Release(T *dxgi) {
dxgi->Release();
}
}
using factory1_t = util::safe_ptr<IDXGIFactory1, Release<IDXGIFactory1>>;
using dxgi_t = util::safe_ptr<IDXGIDevice, Release<IDXGIDevice>>;
using dxgi1_t = util::safe_ptr<IDXGIDevice1, Release<IDXGIDevice1>>;
using device_t = util::safe_ptr<ID3D11Device, Release<ID3D11Device>>;
using device1_t = util::safe_ptr<ID3D11Device1, Release<ID3D11Device1>>;
using device_ctx_t = util::safe_ptr<ID3D11DeviceContext, Release<ID3D11DeviceContext>>;
using adapter_t = util::safe_ptr<IDXGIAdapter1, Release<IDXGIAdapter1>>;
using output_t = util::safe_ptr<IDXGIOutput, Release<IDXGIOutput>>;
using output1_t = util::safe_ptr<IDXGIOutput1, Release<IDXGIOutput1>>;
using output5_t = util::safe_ptr<IDXGIOutput5, Release<IDXGIOutput5>>;
using output6_t = util::safe_ptr<IDXGIOutput6, Release<IDXGIOutput6>>;
using dup_t = util::safe_ptr<IDXGIOutputDuplication, Release<IDXGIOutputDuplication>>;
using texture2d_t = util::safe_ptr<ID3D11Texture2D, Release<ID3D11Texture2D>>;
using texture1d_t = util::safe_ptr<ID3D11Texture1D, Release<ID3D11Texture1D>>;
using resource_t = util::safe_ptr<IDXGIResource, Release<IDXGIResource>>;
using resource1_t = util::safe_ptr<IDXGIResource1, Release<IDXGIResource1>>;
using multithread_t = util::safe_ptr<ID3D11Multithread, Release<ID3D11Multithread>>;
using vs_t = util::safe_ptr<ID3D11VertexShader, Release<ID3D11VertexShader>>;
using ps_t = util::safe_ptr<ID3D11PixelShader, Release<ID3D11PixelShader>>;
using blend_t = util::safe_ptr<ID3D11BlendState, Release<ID3D11BlendState>>;
using input_layout_t = util::safe_ptr<ID3D11InputLayout, Release<ID3D11InputLayout>>;
using render_target_t = util::safe_ptr<ID3D11RenderTargetView, Release<ID3D11RenderTargetView>>;
using shader_res_t = util::safe_ptr<ID3D11ShaderResourceView, Release<ID3D11ShaderResourceView>>;
using buf_t = util::safe_ptr<ID3D11Buffer, Release<ID3D11Buffer>>;
using raster_state_t = util::safe_ptr<ID3D11RasterizerState, Release<ID3D11RasterizerState>>;
using sampler_state_t = util::safe_ptr<ID3D11SamplerState, Release<ID3D11SamplerState>>;
using blob_t = util::safe_ptr<ID3DBlob, Release<ID3DBlob>>;
using depth_stencil_state_t = util::safe_ptr<ID3D11DepthStencilState, Release<ID3D11DepthStencilState>>;
using depth_stencil_view_t = util::safe_ptr<ID3D11DepthStencilView, Release<ID3D11DepthStencilView>>;
using keyed_mutex_t = util::safe_ptr<IDXGIKeyedMutex, Release<IDXGIKeyedMutex>>;
using factory1_t = util::safe_ptr<IDXGIFactory1, Release<IDXGIFactory1>>;
using dxgi_t = util::safe_ptr<IDXGIDevice, Release<IDXGIDevice>>;
using dxgi1_t = util::safe_ptr<IDXGIDevice1, Release<IDXGIDevice1>>;
using device_t = util::safe_ptr<ID3D11Device, Release<ID3D11Device>>;
using device1_t = util::safe_ptr<ID3D11Device1, Release<ID3D11Device1>>;
using device_ctx_t =
util::safe_ptr<ID3D11DeviceContext, Release<ID3D11DeviceContext>>;
using adapter_t = util::safe_ptr<IDXGIAdapter1, Release<IDXGIAdapter1>>;
using output_t = util::safe_ptr<IDXGIOutput, Release<IDXGIOutput>>;
using output1_t = util::safe_ptr<IDXGIOutput1, Release<IDXGIOutput1>>;
using output5_t = util::safe_ptr<IDXGIOutput5, Release<IDXGIOutput5>>;
using output6_t = util::safe_ptr<IDXGIOutput6, Release<IDXGIOutput6>>;
using dup_t =
util::safe_ptr<IDXGIOutputDuplication, Release<IDXGIOutputDuplication>>;
using texture2d_t = util::safe_ptr<ID3D11Texture2D, Release<ID3D11Texture2D>>;
using texture1d_t = util::safe_ptr<ID3D11Texture1D, Release<ID3D11Texture1D>>;
using resource_t = util::safe_ptr<IDXGIResource, Release<IDXGIResource>>;
using resource1_t = util::safe_ptr<IDXGIResource1, Release<IDXGIResource1>>;
using multithread_t =
util::safe_ptr<ID3D11Multithread, Release<ID3D11Multithread>>;
using vs_t = util::safe_ptr<ID3D11VertexShader, Release<ID3D11VertexShader>>;
using ps_t = util::safe_ptr<ID3D11PixelShader, Release<ID3D11PixelShader>>;
using blend_t = util::safe_ptr<ID3D11BlendState, Release<ID3D11BlendState>>;
using input_layout_t =
util::safe_ptr<ID3D11InputLayout, Release<ID3D11InputLayout>>;
using render_target_t =
util::safe_ptr<ID3D11RenderTargetView, Release<ID3D11RenderTargetView>>;
using shader_res_t =
util::safe_ptr<ID3D11ShaderResourceView, Release<ID3D11ShaderResourceView>>;
using buf_t = util::safe_ptr<ID3D11Buffer, Release<ID3D11Buffer>>;
using raster_state_t =
util::safe_ptr<ID3D11RasterizerState, Release<ID3D11RasterizerState>>;
using sampler_state_t =
util::safe_ptr<ID3D11SamplerState, Release<ID3D11SamplerState>>;
using blob_t = util::safe_ptr<ID3DBlob, Release<ID3DBlob>>;
using depth_stencil_state_t =
util::safe_ptr<ID3D11DepthStencilState, Release<ID3D11DepthStencilState>>;
using depth_stencil_view_t =
util::safe_ptr<ID3D11DepthStencilView, Release<ID3D11DepthStencilView>>;
using keyed_mutex_t = util::safe_ptr<IDXGIKeyedMutex, Release<IDXGIKeyedMutex>>;
namespace video {
using device_t = util::safe_ptr<ID3D11VideoDevice, Release<ID3D11VideoDevice>>;
using ctx_t = util::safe_ptr<ID3D11VideoContext, Release<ID3D11VideoContext>>;
using processor_t = util::safe_ptr<ID3D11VideoProcessor, Release<ID3D11VideoProcessor>>;
using processor_out_t = util::safe_ptr<ID3D11VideoProcessorOutputView, Release<ID3D11VideoProcessorOutputView>>;
using processor_in_t = util::safe_ptr<ID3D11VideoProcessorInputView, Release<ID3D11VideoProcessorInputView>>;
using processor_enum_t = util::safe_ptr<ID3D11VideoProcessorEnumerator, Release<ID3D11VideoProcessorEnumerator>>;
} // namespace video
namespace video {
using device_t = util::safe_ptr<ID3D11VideoDevice, Release<ID3D11VideoDevice>>;
using ctx_t = util::safe_ptr<ID3D11VideoContext, Release<ID3D11VideoContext>>;
using processor_t =
util::safe_ptr<ID3D11VideoProcessor, Release<ID3D11VideoProcessor>>;
using processor_out_t = util::safe_ptr<ID3D11VideoProcessorOutputView,
Release<ID3D11VideoProcessorOutputView>>;
using processor_in_t = util::safe_ptr<ID3D11VideoProcessorInputView,
Release<ID3D11VideoProcessorInputView>>;
using processor_enum_t =
util::safe_ptr<ID3D11VideoProcessorEnumerator,
Release<ID3D11VideoProcessorEnumerator>>;
} // namespace video
class hwdevice_t;
struct cursor_t {
class hwdevice_t;
struct cursor_t {
std::vector<std::uint8_t> img_data;
DXGI_OUTDUPL_POINTER_SHAPE_INFO shape_info;
int x, y;
bool visible;
};
};
class gpu_cursor_t {
public:
gpu_cursor_t():
cursor_view { 0, 0, 0, 0, 0.0f, 1.0f } {};
class gpu_cursor_t {
public:
gpu_cursor_t() : cursor_view{0, 0, 0, 0, 0.0f, 1.0f} {};
void
set_pos(LONG topleft_x, LONG topleft_y, LONG display_width, LONG display_height, DXGI_MODE_ROTATION display_rotation, bool visible) {
this->topleft_x = topleft_x;
this->topleft_y = topleft_y;
this->display_width = display_width;
this->display_height = display_height;
this->display_rotation = display_rotation;
this->visible = visible;
update_viewport();
void set_pos(LONG topleft_x, LONG topleft_y, LONG display_width,
LONG display_height, DXGI_MODE_ROTATION display_rotation,
bool visible) {
this->topleft_x = topleft_x;
this->topleft_y = topleft_y;
this->display_width = display_width;
this->display_height = display_height;
this->display_rotation = display_rotation;
this->visible = visible;
update_viewport();
}
void
set_texture(LONG texture_width, LONG texture_height, texture2d_t &&texture) {
this->texture = std::move(texture);
this->texture_width = texture_width;
this->texture_height = texture_height;
update_viewport();
void set_texture(LONG texture_width, LONG texture_height,
texture2d_t &&texture) {
this->texture = std::move(texture);
this->texture_width = texture_width;
this->texture_height = texture_height;
update_viewport();
}
void
update_viewport() {
switch (display_rotation) {
case DXGI_MODE_ROTATION_UNSPECIFIED:
case DXGI_MODE_ROTATION_IDENTITY:
cursor_view.TopLeftX = topleft_x;
cursor_view.TopLeftY = topleft_y;
cursor_view.Width = texture_width;
cursor_view.Height = texture_height;
break;
void update_viewport() {
switch (display_rotation) {
case DXGI_MODE_ROTATION_UNSPECIFIED:
case DXGI_MODE_ROTATION_IDENTITY:
cursor_view.TopLeftX = topleft_x;
cursor_view.TopLeftY = topleft_y;
cursor_view.Width = texture_width;
cursor_view.Height = texture_height;
break;
case DXGI_MODE_ROTATION_ROTATE90:
cursor_view.TopLeftX = topleft_y;
cursor_view.TopLeftY = display_width - texture_width - topleft_x;
cursor_view.Width = texture_height;
cursor_view.Height = texture_width;
break;
case DXGI_MODE_ROTATION_ROTATE90:
cursor_view.TopLeftX = topleft_y;
cursor_view.TopLeftY =
display_width - texture_width - topleft_x;
cursor_view.Width = texture_height;
cursor_view.Height = texture_width;
break;
case DXGI_MODE_ROTATION_ROTATE180:
cursor_view.TopLeftX = display_width - texture_width - topleft_x;
cursor_view.TopLeftY = display_height - texture_height - topleft_y;
cursor_view.Width = texture_width;
cursor_view.Height = texture_height;
break;
case DXGI_MODE_ROTATION_ROTATE180:
cursor_view.TopLeftX =
display_width - texture_width - topleft_x;
cursor_view.TopLeftY =
display_height - texture_height - topleft_y;
cursor_view.Width = texture_width;
cursor_view.Height = texture_height;
break;
case DXGI_MODE_ROTATION_ROTATE270:
cursor_view.TopLeftX = display_height - texture_height - topleft_y;
cursor_view.TopLeftY = topleft_x;
cursor_view.Width = texture_height;
cursor_view.Height = texture_width;
break;
}
case DXGI_MODE_ROTATION_ROTATE270:
cursor_view.TopLeftX =
display_height - texture_height - topleft_y;
cursor_view.TopLeftY = topleft_x;
cursor_view.Width = texture_height;
cursor_view.Height = texture_width;
break;
}
}
texture2d_t texture;
@ -151,34 +168,32 @@ namespace platf::dxgi {
D3D11_VIEWPORT cursor_view;
bool visible;
};
};
class duplication_t {
public:
class duplication_t {
public:
dup_t dup;
bool has_frame {};
std::chrono::steady_clock::time_point last_protected_content_warning_time {};
bool has_frame{};
std::chrono::steady_clock::time_point last_protected_content_warning_time{};
capture_e
next_frame(DXGI_OUTDUPL_FRAME_INFO &frame_info, std::chrono::milliseconds timeout, resource_t::pointer *res_p);
capture_e
reset(dup_t::pointer dup_p = dup_t::pointer());
capture_e
release_frame();
capture_e next_frame(DXGI_OUTDUPL_FRAME_INFO &frame_info,
std::chrono::milliseconds timeout,
resource_t::pointer *res_p);
capture_e reset(dup_t::pointer dup_p = dup_t::pointer());
capture_e release_frame();
~duplication_t();
};
};
class display_base_t: public display_t {
public:
int
init(const ::video::config_t &config, const std::string &display_name);
class display_base_t : public display_t {
public:
int init(const ::video::config_t &config, const std::string &display_name);
void
high_precision_sleep(std::chrono::nanoseconds duration);
void high_precision_sleep(std::chrono::nanoseconds duration);
capture_e
capture(const push_captured_image_cb_t &push_captured_image_cb, const pull_free_image_cb_t &pull_free_image_cb, bool *cursor) override;
capture_e capture(const push_captured_image_cb_t &push_captured_image_cb,
const pull_free_image_cb_t &pull_free_image_cb,
bool *cursor) override;
factory1_t factory;
adapter_t adapter;
@ -201,127 +216,119 @@ namespace platf::dxgi {
util::safe_ptr_v2<std::remove_pointer_t<HANDLE>, BOOL, CloseHandle> timer;
typedef enum _D3DKMT_SCHEDULINGPRIORITYCLASS {
D3DKMT_SCHEDULINGPRIORITYCLASS_IDLE,
D3DKMT_SCHEDULINGPRIORITYCLASS_BELOW_NORMAL,
D3DKMT_SCHEDULINGPRIORITYCLASS_NORMAL,
D3DKMT_SCHEDULINGPRIORITYCLASS_ABOVE_NORMAL,
D3DKMT_SCHEDULINGPRIORITYCLASS_HIGH,
D3DKMT_SCHEDULINGPRIORITYCLASS_REALTIME
D3DKMT_SCHEDULINGPRIORITYCLASS_IDLE,
D3DKMT_SCHEDULINGPRIORITYCLASS_BELOW_NORMAL,
D3DKMT_SCHEDULINGPRIORITYCLASS_NORMAL,
D3DKMT_SCHEDULINGPRIORITYCLASS_ABOVE_NORMAL,
D3DKMT_SCHEDULINGPRIORITYCLASS_HIGH,
D3DKMT_SCHEDULINGPRIORITYCLASS_REALTIME
} D3DKMT_SCHEDULINGPRIORITYCLASS;
typedef UINT D3DKMT_HANDLE;
typedef struct _D3DKMT_OPENADAPTERFROMLUID {
LUID AdapterLuid;
D3DKMT_HANDLE hAdapter;
LUID AdapterLuid;
D3DKMT_HANDLE hAdapter;
} D3DKMT_OPENADAPTERFROMLUID;
typedef struct _D3DKMT_WDDM_2_7_CAPS {
union {
struct
{
UINT HwSchSupported : 1;
UINT HwSchEnabled : 1;
UINT HwSchEnabledByDefault : 1;
UINT IndependentVidPnVSyncControl : 1;
UINT Reserved : 28;
union {
struct {
UINT HwSchSupported : 1;
UINT HwSchEnabled : 1;
UINT HwSchEnabledByDefault : 1;
UINT IndependentVidPnVSyncControl : 1;
UINT Reserved : 28;
};
UINT Value;
};
UINT Value;
};
} D3DKMT_WDDM_2_7_CAPS;
typedef struct _D3DKMT_QUERYADAPTERINFO {
D3DKMT_HANDLE hAdapter;
UINT Type;
VOID *pPrivateDriverData;
UINT PrivateDriverDataSize;
D3DKMT_HANDLE hAdapter;
UINT Type;
VOID *pPrivateDriverData;
UINT PrivateDriverDataSize;
} D3DKMT_QUERYADAPTERINFO;
const UINT KMTQAITYPE_WDDM_2_7_CAPS = 70;
typedef struct _D3DKMT_CLOSEADAPTER {
D3DKMT_HANDLE hAdapter;
D3DKMT_HANDLE hAdapter;
} D3DKMT_CLOSEADAPTER;
typedef NTSTATUS(WINAPI *PD3DKMTSetProcessSchedulingPriorityClass)(HANDLE, D3DKMT_SCHEDULINGPRIORITYCLASS);
typedef NTSTATUS(WINAPI *PD3DKMTOpenAdapterFromLuid)(D3DKMT_OPENADAPTERFROMLUID *);
typedef NTSTATUS(WINAPI *PD3DKMTQueryAdapterInfo)(D3DKMT_QUERYADAPTERINFO *);
typedef NTSTATUS(WINAPI *PD3DKMTSetProcessSchedulingPriorityClass)(
HANDLE, D3DKMT_SCHEDULINGPRIORITYCLASS);
typedef NTSTATUS(WINAPI *PD3DKMTOpenAdapterFromLuid)(
D3DKMT_OPENADAPTERFROMLUID *);
typedef NTSTATUS(WINAPI *PD3DKMTQueryAdapterInfo)(
D3DKMT_QUERYADAPTERINFO *);
typedef NTSTATUS(WINAPI *PD3DKMTCloseAdapter)(D3DKMT_CLOSEADAPTER *);
virtual bool
is_hdr() override;
virtual bool
get_hdr_metadata(SS_HDR_METADATA &metadata) override;
virtual bool is_hdr() override;
virtual bool get_hdr_metadata(SS_HDR_METADATA &metadata) override;
protected:
int
get_pixel_pitch() {
return (capture_format == DXGI_FORMAT_R16G16B16A16_FLOAT) ? 8 : 4;
protected:
int get_pixel_pitch() {
return (capture_format == DXGI_FORMAT_R16G16B16A16_FLOAT) ? 8 : 4;
}
const char *
dxgi_format_to_string(DXGI_FORMAT format);
const char *
colorspace_to_string(DXGI_COLOR_SPACE_TYPE type);
const char *dxgi_format_to_string(DXGI_FORMAT format);
const char *colorspace_to_string(DXGI_COLOR_SPACE_TYPE type);
virtual capture_e
snapshot(const pull_free_image_cb_t &pull_free_image_cb, std::shared_ptr<platf::img_t> &img_out, std::chrono::milliseconds timeout, bool cursor_visible) = 0;
virtual int
complete_img(img_t *img, bool dummy) = 0;
virtual std::vector<DXGI_FORMAT>
get_supported_capture_formats() = 0;
};
virtual capture_e snapshot(const pull_free_image_cb_t &pull_free_image_cb,
std::shared_ptr<platf::img_t> &img_out,
std::chrono::milliseconds timeout,
bool cursor_visible) = 0;
virtual int complete_img(img_t *img, bool dummy) = 0;
virtual std::vector<DXGI_FORMAT> get_supported_capture_formats() = 0;
};
class display_ram_t: public display_base_t {
public:
virtual capture_e
snapshot(const pull_free_image_cb_t &pull_free_image_cb, std::shared_ptr<platf::img_t> &img_out, std::chrono::milliseconds timeout, bool cursor_visible) override;
class display_ram_t : public display_base_t {
public:
virtual capture_e snapshot(const pull_free_image_cb_t &pull_free_image_cb,
std::shared_ptr<platf::img_t> &img_out,
std::chrono::milliseconds timeout,
bool cursor_visible) override;
std::shared_ptr<img_t>
alloc_img() override;
int
dummy_img(img_t *img) override;
int
complete_img(img_t *img, bool dummy) override;
std::vector<DXGI_FORMAT>
get_supported_capture_formats() override;
std::shared_ptr<img_t> alloc_img() override;
int dummy_img(img_t *img) override;
int complete_img(img_t *img, bool dummy) override;
std::vector<DXGI_FORMAT> get_supported_capture_formats() override;
int
init(const ::video::config_t &config, const std::string &display_name);
int init(const ::video::config_t &config, const std::string &display_name);
std::unique_ptr<avcodec_encode_device_t>
make_avcodec_encode_device(pix_fmt_e pix_fmt) override;
std::unique_ptr<avcodec_encode_device_t> make_avcodec_encode_device(
pix_fmt_e pix_fmt) override;
cursor_t cursor;
D3D11_MAPPED_SUBRESOURCE img_info;
texture2d_t texture;
};
};
class display_vram_t: public display_base_t, public std::enable_shared_from_this<display_vram_t> {
public:
virtual capture_e
snapshot(const pull_free_image_cb_t &pull_free_image_cb, std::shared_ptr<platf::img_t> &img_out, std::chrono::milliseconds timeout, bool cursor_visible) override;
class display_vram_t : public display_base_t,
public std::enable_shared_from_this<display_vram_t> {
public:
virtual capture_e snapshot(const pull_free_image_cb_t &pull_free_image_cb,
std::shared_ptr<platf::img_t> &img_out,
std::chrono::milliseconds timeout,
bool cursor_visible) override;
std::shared_ptr<img_t>
alloc_img() override;
int
dummy_img(img_t *img_base) override;
int
complete_img(img_t *img_base, bool dummy) override;
std::vector<DXGI_FORMAT>
get_supported_capture_formats() override;
std::shared_ptr<img_t> alloc_img() override;
int dummy_img(img_t *img_base) override;
int complete_img(img_t *img_base, bool dummy) override;
std::vector<DXGI_FORMAT> get_supported_capture_formats() override;
int
init(const ::video::config_t &config, const std::string &display_name);
int init(const ::video::config_t &config, const std::string &display_name);
bool
is_codec_supported(std::string_view name, const ::video::config_t &config) override;
bool is_codec_supported(std::string_view name,
const ::video::config_t &config) override;
std::unique_ptr<avcodec_encode_device_t>
make_avcodec_encode_device(pix_fmt_e pix_fmt) override;
std::unique_ptr<avcodec_encode_device_t> make_avcodec_encode_device(
pix_fmt_e pix_fmt) override;
std::unique_ptr<nvenc_encode_device_t>
make_nvenc_encode_device(pix_fmt_e pix_fmt) override;
std::unique_ptr<nvenc_encode_device_t> make_nvenc_encode_device(
pix_fmt_e pix_fmt) override;
sampler_state_t sampler_linear;
@ -337,8 +344,9 @@ namespace platf::dxgi {
texture2d_t old_surface_delayed_destruction;
std::chrono::steady_clock::time_point old_surface_timestamp;
std::variant<std::monostate, texture2d_t, std::shared_ptr<platf::img_t>> last_frame_variant;
std::variant<std::monostate, texture2d_t, std::shared_ptr<platf::img_t>>
last_frame_variant;
std::atomic<uint32_t> next_image_id;
};
};
} // namespace platf::dxgi

File diff suppressed because it is too large Load Diff

View File

@ -3,24 +3,22 @@
* @brief todo
*/
#include "display.h"
#include "misc.h"
#include "src/main.h"
namespace platf {
using namespace std::literals;
using namespace std::literals;
}
namespace platf::dxgi {
struct img_t: public ::platf::img_t {
struct img_t : public ::platf::img_t {
~img_t() override {
delete[] data;
data = nullptr;
delete[] data;
data = nullptr;
}
};
};
void
blend_cursor_monochrome(const cursor_t &cursor, img_t &img) {
void blend_cursor_monochrome(const cursor_t &cursor, img_t &img) {
int height = cursor.shape_info.Height / 2;
int width = cursor.shape_info.Width;
int pitch = cursor.shape_info.Pitch;
@ -37,7 +35,7 @@ namespace platf::dxgi {
auto cursor_height = height - cursor_skip_y - cursor_truncate_y;
if (cursor_height > height || cursor_width > width) {
return;
return;
}
auto img_skip_y = std::max(0, cursor.y);
@ -45,74 +43,78 @@ namespace platf::dxgi {
auto cursor_img_data = cursor.img_data.data() + cursor_skip_y * pitch;
int delta_height = std::min(cursor_height - cursor_truncate_y, std::max(0, img.height - img_skip_y));
int delta_width = std::min(cursor_width - cursor_truncate_x, std::max(0, img.width - img_skip_x));
int delta_height = std::min(cursor_height - cursor_truncate_y,
std::max(0, img.height - img_skip_y));
int delta_width = std::min(cursor_width - cursor_truncate_x,
std::max(0, img.width - img_skip_x));
auto pixels_per_byte = width / pitch;
auto bytes_per_row = delta_width / pixels_per_byte;
auto img_data = (int *) img.data;
auto img_data = (int *)img.data;
for (int i = 0; i < delta_height; ++i) {
auto and_mask = &cursor_img_data[i * pitch];
auto xor_mask = &cursor_img_data[(i + height) * pitch];
auto and_mask = &cursor_img_data[i * pitch];
auto xor_mask = &cursor_img_data[(i + height) * pitch];
auto img_pixel_p = &img_data[(i + img_skip_y) * (img.row_pitch / img.pixel_pitch) + img_skip_x];
auto img_pixel_p =
&img_data[(i + img_skip_y) * (img.row_pitch / img.pixel_pitch) +
img_skip_x];
auto skip_x = cursor_skip_x;
for (int x = 0; x < bytes_per_row; ++x) {
for (auto bit = 0u; bit < 8; ++bit) {
if (skip_x > 0) {
--skip_x;
auto skip_x = cursor_skip_x;
for (int x = 0; x < bytes_per_row; ++x) {
for (auto bit = 0u; bit < 8; ++bit) {
if (skip_x > 0) {
--skip_x;
continue;
}
continue;
}
int and_ = *and_mask & (1 << (7 - bit)) ? -1 : 0;
int xor_ = *xor_mask & (1 << (7 - bit)) ? -1 : 0;
int and_ = *and_mask & (1 << (7 - bit)) ? -1 : 0;
int xor_ = *xor_mask & (1 << (7 - bit)) ? -1 : 0;
*img_pixel_p &= and_;
*img_pixel_p ^= xor_;
*img_pixel_p &= and_;
*img_pixel_p ^= xor_;
++img_pixel_p;
++img_pixel_p;
}
++and_mask;
++xor_mask;
}
++and_mask;
++xor_mask;
}
}
}
}
void
apply_color_alpha(int *img_pixel_p, int cursor_pixel) {
auto colors_out = (std::uint8_t *) &cursor_pixel;
auto colors_in = (std::uint8_t *) img_pixel_p;
void apply_color_alpha(int *img_pixel_p, int cursor_pixel) {
auto colors_out = (std::uint8_t *)&cursor_pixel;
auto colors_in = (std::uint8_t *)img_pixel_p;
// TODO: When use of IDXGIOutput5 is implemented, support different color formats
// TODO: When use of IDXGIOutput5 is implemented, support different color
// formats
auto alpha = colors_out[3];
if (alpha == 255) {
*img_pixel_p = cursor_pixel;
*img_pixel_p = cursor_pixel;
} else {
colors_in[0] =
colors_out[0] + (colors_in[0] * (255 - alpha) + 255 / 2) / 255;
colors_in[1] =
colors_out[1] + (colors_in[1] * (255 - alpha) + 255 / 2) / 255;
colors_in[2] =
colors_out[2] + (colors_in[2] * (255 - alpha) + 255 / 2) / 255;
}
else {
colors_in[0] = colors_out[0] + (colors_in[0] * (255 - alpha) + 255 / 2) / 255;
colors_in[1] = colors_out[1] + (colors_in[1] * (255 - alpha) + 255 / 2) / 255;
colors_in[2] = colors_out[2] + (colors_in[2] * (255 - alpha) + 255 / 2) / 255;
}
}
}
void
apply_color_masked(int *img_pixel_p, int cursor_pixel) {
// TODO: When use of IDXGIOutput5 is implemented, support different color formats
auto alpha = ((std::uint8_t *) &cursor_pixel)[3];
void apply_color_masked(int *img_pixel_p, int cursor_pixel) {
// TODO: When use of IDXGIOutput5 is implemented, support different color
// formats
auto alpha = ((std::uint8_t *)&cursor_pixel)[3];
if (alpha == 0xFF) {
*img_pixel_p ^= cursor_pixel;
*img_pixel_p ^= cursor_pixel;
} else {
*img_pixel_p = cursor_pixel;
}
else {
*img_pixel_p = cursor_pixel;
}
}
}
void
blend_cursor_color(const cursor_t &cursor, img_t &img, const bool masked) {
void blend_cursor_color(const cursor_t &cursor, img_t &img, const bool masked) {
int height = cursor.shape_info.Height;
int width = cursor.shape_info.Width;
int pitch = cursor.shape_info.Pitch;
@ -132,203 +134,232 @@ namespace platf::dxgi {
auto cursor_height = height - cursor_skip_y - cursor_truncate_y;
if (cursor_height > height || cursor_width > width) {
return;
return;
}
auto cursor_img_data = (int *) &cursor.img_data[cursor_skip_y * pitch];
auto cursor_img_data = (int *)&cursor.img_data[cursor_skip_y * pitch];
int delta_height = std::min(cursor_height - cursor_truncate_y, std::max(0, img.height - img_skip_y));
int delta_width = std::min(cursor_width - cursor_truncate_x, std::max(0, img.width - img_skip_x));
int delta_height = std::min(cursor_height - cursor_truncate_y,
std::max(0, img.height - img_skip_y));
int delta_width = std::min(cursor_width - cursor_truncate_x,
std::max(0, img.width - img_skip_x));
auto img_data = (int *) img.data;
auto img_data = (int *)img.data;
for (int i = 0; i < delta_height; ++i) {
auto cursor_begin = &cursor_img_data[i * cursor.shape_info.Width + cursor_skip_x];
auto cursor_end = &cursor_begin[delta_width];
auto cursor_begin =
&cursor_img_data[i * cursor.shape_info.Width + cursor_skip_x];
auto cursor_end = &cursor_begin[delta_width];
auto img_pixel_p = &img_data[(i + img_skip_y) * (img.row_pitch / img.pixel_pitch) + img_skip_x];
std::for_each(cursor_begin, cursor_end, [&](int cursor_pixel) {
if (masked) {
apply_color_masked(img_pixel_p, cursor_pixel);
}
else {
apply_color_alpha(img_pixel_p, cursor_pixel);
}
++img_pixel_p;
});
auto img_pixel_p =
&img_data[(i + img_skip_y) * (img.row_pitch / img.pixel_pitch) +
img_skip_x];
std::for_each(cursor_begin, cursor_end, [&](int cursor_pixel) {
if (masked) {
apply_color_masked(img_pixel_p, cursor_pixel);
} else {
apply_color_alpha(img_pixel_p, cursor_pixel);
}
++img_pixel_p;
});
}
}
}
void
blend_cursor(const cursor_t &cursor, img_t &img) {
void blend_cursor(const cursor_t &cursor, img_t &img) {
switch (cursor.shape_info.Type) {
case DXGI_OUTDUPL_POINTER_SHAPE_TYPE_COLOR:
blend_cursor_color(cursor, img, false);
break;
case DXGI_OUTDUPL_POINTER_SHAPE_TYPE_MONOCHROME:
blend_cursor_monochrome(cursor, img);
break;
case DXGI_OUTDUPL_POINTER_SHAPE_TYPE_MASKED_COLOR:
blend_cursor_color(cursor, img, true);
break;
default:
BOOST_LOG(warning) << "Unsupported cursor format ["sv << cursor.shape_info.Type << ']';
break;
case DXGI_OUTDUPL_POINTER_SHAPE_TYPE_COLOR:
blend_cursor_color(cursor, img, false);
break;
case DXGI_OUTDUPL_POINTER_SHAPE_TYPE_MONOCHROME:
blend_cursor_monochrome(cursor, img);
break;
case DXGI_OUTDUPL_POINTER_SHAPE_TYPE_MASKED_COLOR:
blend_cursor_color(cursor, img, true);
break;
default:
BOOST_LOG(warning) << "Unsupported cursor format ["sv
<< cursor.shape_info.Type << ']';
break;
}
}
}
capture_e
display_ram_t::snapshot(const pull_free_image_cb_t &pull_free_image_cb, std::shared_ptr<platf::img_t> &img_out, std::chrono::milliseconds timeout, bool cursor_visible) {
capture_e display_ram_t::snapshot(
const pull_free_image_cb_t &pull_free_image_cb,
std::shared_ptr<platf::img_t> &img_out, std::chrono::milliseconds timeout,
bool cursor_visible) {
HRESULT status;
DXGI_OUTDUPL_FRAME_INFO frame_info;
resource_t::pointer res_p {};
resource_t::pointer res_p{};
auto capture_status = dup.next_frame(frame_info, timeout, &res_p);
resource_t res { res_p };
resource_t res{res_p};
if (capture_status != capture_e::ok) {
return capture_status;
return capture_status;
}
const bool mouse_update_flag = frame_info.LastMouseUpdateTime.QuadPart != 0 || frame_info.PointerShapeBufferSize > 0;
const bool frame_update_flag = frame_info.AccumulatedFrames != 0 || frame_info.LastPresentTime.QuadPart != 0;
const bool mouse_update_flag =
frame_info.LastMouseUpdateTime.QuadPart != 0 ||
frame_info.PointerShapeBufferSize > 0;
const bool frame_update_flag = frame_info.AccumulatedFrames != 0 ||
frame_info.LastPresentTime.QuadPart != 0;
const bool update_flag = mouse_update_flag || frame_update_flag;
if (!update_flag) {
return capture_e::timeout;
return capture_e::timeout;
}
std::optional<std::chrono::steady_clock::time_point> frame_timestamp;
if (auto qpc_displayed = std::max(frame_info.LastPresentTime.QuadPart, frame_info.LastMouseUpdateTime.QuadPart)) {
// Translate QueryPerformanceCounter() value to steady_clock time point
frame_timestamp = std::chrono::steady_clock::now() - qpc_time_difference(qpc_counter(), qpc_displayed);
if (auto qpc_displayed =
std::max(frame_info.LastPresentTime.QuadPart,
frame_info.LastMouseUpdateTime.QuadPart)) {
// Translate QueryPerformanceCounter() value to steady_clock time point
frame_timestamp = std::chrono::steady_clock::now() -
qpc_time_difference(qpc_counter(), qpc_displayed);
}
if (frame_info.PointerShapeBufferSize > 0) {
auto &img_data = cursor.img_data;
auto &img_data = cursor.img_data;
img_data.resize(frame_info.PointerShapeBufferSize);
img_data.resize(frame_info.PointerShapeBufferSize);
UINT dummy;
status = dup.dup->GetFramePointerShape(img_data.size(), img_data.data(), &dummy, &cursor.shape_info);
if (FAILED(status)) {
BOOST_LOG(error) << "Failed to get new pointer shape [0x"sv << util::hex(status).to_string_view() << ']';
UINT dummy;
status = dup.dup->GetFramePointerShape(img_data.size(), img_data.data(),
&dummy, &cursor.shape_info);
if (FAILED(status)) {
BOOST_LOG(error) << "Failed to get new pointer shape [0x"sv
<< util::hex(status).to_string_view() << ']';
return capture_e::error;
}
return capture_e::error;
}
}
if (frame_info.LastMouseUpdateTime.QuadPart) {
cursor.x = frame_info.PointerPosition.Position.x;
cursor.y = frame_info.PointerPosition.Position.y;
cursor.visible = frame_info.PointerPosition.Visible;
cursor.x = frame_info.PointerPosition.Position.x;
cursor.y = frame_info.PointerPosition.Position.y;
cursor.visible = frame_info.PointerPosition.Visible;
}
if (frame_update_flag) {
{
texture2d_t src {};
status = res->QueryInterface(IID_ID3D11Texture2D, (void **) &src);
{
texture2d_t src{};
status = res->QueryInterface(IID_ID3D11Texture2D, (void **)&src);
if (FAILED(status)) {
BOOST_LOG(error) << "Couldn't query interface [0x"sv << util::hex(status).to_string_view() << ']';
return capture_e::error;
if (FAILED(status)) {
BOOST_LOG(error) << "Couldn't query interface [0x"sv
<< util::hex(status).to_string_view() << ']';
return capture_e::error;
}
D3D11_TEXTURE2D_DESC desc;
src->GetDesc(&desc);
// If we don't know the capture format yet, grab it from this
// texture and create the staging texture
if (capture_format == DXGI_FORMAT_UNKNOWN) {
capture_format = desc.Format;
BOOST_LOG(info) << "Capture format ["sv
<< dxgi_format_to_string(capture_format) << ']';
D3D11_TEXTURE2D_DESC t{};
t.Width = width;
t.Height = height;
t.MipLevels = 1;
t.ArraySize = 1;
t.SampleDesc.Count = 1;
t.Usage = D3D11_USAGE_STAGING;
t.Format = capture_format;
t.CPUAccessFlags = D3D11_CPU_ACCESS_READ;
auto status = device->CreateTexture2D(&t, nullptr, &texture);
if (FAILED(status)) {
BOOST_LOG(error)
<< "Failed to create staging texture [0x"sv
<< util::hex(status).to_string_view() << ']';
return capture_e::error;
}
}
// It's possible for our display enumeration to race with mode
// changes and result in mismatched image pool and desktop texture
// sizes. If this happens, just reinit again.
if (desc.Width != width || desc.Height != height) {
BOOST_LOG(info)
<< "Capture size changed ["sv << width << 'x' << height
<< " -> "sv << desc.Width << 'x' << desc.Height << ']';
return capture_e::reinit;
}
// It's also possible for the capture format to change on the fly.
// If that happens, reinitialize capture to try format detection
// again and create new images.
if (capture_format != desc.Format) {
BOOST_LOG(info)
<< "Capture format changed ["sv
<< dxgi_format_to_string(capture_format) << " -> "sv
<< dxgi_format_to_string(desc.Format) << ']';
return capture_e::reinit;
}
// Copy from GPU to CPU
device_ctx->CopyResource(texture.get(), src.get());
}
D3D11_TEXTURE2D_DESC desc;
src->GetDesc(&desc);
// If we don't know the capture format yet, grab it from this texture and create the staging texture
if (capture_format == DXGI_FORMAT_UNKNOWN) {
capture_format = desc.Format;
BOOST_LOG(info) << "Capture format ["sv << dxgi_format_to_string(capture_format) << ']';
D3D11_TEXTURE2D_DESC t {};
t.Width = width;
t.Height = height;
t.MipLevels = 1;
t.ArraySize = 1;
t.SampleDesc.Count = 1;
t.Usage = D3D11_USAGE_STAGING;
t.Format = capture_format;
t.CPUAccessFlags = D3D11_CPU_ACCESS_READ;
auto status = device->CreateTexture2D(&t, nullptr, &texture);
if (FAILED(status)) {
BOOST_LOG(error) << "Failed to create staging texture [0x"sv << util::hex(status).to_string_view() << ']';
return capture_e::error;
}
}
// It's possible for our display enumeration to race with mode changes and result in
// mismatched image pool and desktop texture sizes. If this happens, just reinit again.
if (desc.Width != width || desc.Height != height) {
BOOST_LOG(info) << "Capture size changed ["sv << width << 'x' << height << " -> "sv << desc.Width << 'x' << desc.Height << ']';
return capture_e::reinit;
}
// It's also possible for the capture format to change on the fly. If that happens,
// reinitialize capture to try format detection again and create new images.
if (capture_format != desc.Format) {
BOOST_LOG(info) << "Capture format changed ["sv << dxgi_format_to_string(capture_format) << " -> "sv << dxgi_format_to_string(desc.Format) << ']';
return capture_e::reinit;
}
// Copy from GPU to CPU
device_ctx->CopyResource(texture.get(), src.get());
}
}
if (!pull_free_image_cb(img_out)) {
return capture_e::interrupted;
return capture_e::interrupted;
}
auto img = (img_t *) img_out.get();
auto img = (img_t *)img_out.get();
// If we don't know the final capture format yet, encode a dummy image
if (capture_format == DXGI_FORMAT_UNKNOWN) {
BOOST_LOG(debug) << "Capture format is still unknown. Encoding a blank image"sv;
BOOST_LOG(debug)
<< "Capture format is still unknown. Encoding a blank image"sv;
if (dummy_img(img)) {
return capture_e::error;
}
}
else {
// Map the staging texture for CPU access (making it inaccessible for the GPU)
status = device_ctx->Map(texture.get(), 0, D3D11_MAP_READ, 0, &img_info);
if (FAILED(status)) {
BOOST_LOG(error) << "Failed to map texture [0x"sv << util::hex(status).to_string_view() << ']';
if (dummy_img(img)) {
return capture_e::error;
}
} else {
// Map the staging texture for CPU access (making it inaccessible for
// the GPU)
status =
device_ctx->Map(texture.get(), 0, D3D11_MAP_READ, 0, &img_info);
if (FAILED(status)) {
BOOST_LOG(error) << "Failed to map texture [0x"sv
<< util::hex(status).to_string_view() << ']';
return capture_e::error;
}
return capture_e::error;
}
// Now that we know the capture format, we can finish creating the image
if (complete_img(img, false)) {
// Now that we know the capture format, we can finish creating the image
if (complete_img(img, false)) {
device_ctx->Unmap(texture.get(), 0);
img_info.pData = nullptr;
return capture_e::error;
}
std::copy_n((std::uint8_t *)img_info.pData, height * img_info.RowPitch,
(std::uint8_t *)img->data);
// Unmap the staging texture to allow GPU access again
device_ctx->Unmap(texture.get(), 0);
img_info.pData = nullptr;
return capture_e::error;
}
std::copy_n((std::uint8_t *) img_info.pData, height * img_info.RowPitch, (std::uint8_t *) img->data);
// Unmap the staging texture to allow GPU access again
device_ctx->Unmap(texture.get(), 0);
img_info.pData = nullptr;
}
if (cursor_visible && cursor.visible) {
blend_cursor(cursor, *img);
blend_cursor(cursor, *img);
}
if (img) {
img->frame_timestamp = frame_timestamp;
img->frame_timestamp = frame_timestamp;
}
return capture_e::ok;
}
}
std::shared_ptr<platf::img_t>
display_ram_t::alloc_img() {
std::shared_ptr<platf::img_t> display_ram_t::alloc_img() {
auto img = std::make_shared<img_t>();
// Initialize fields that are format-independent
@ -336,64 +367,62 @@ namespace platf::dxgi {
img->height = height;
return img;
}
}
int
display_ram_t::complete_img(platf::img_t *img, bool dummy) {
int display_ram_t::complete_img(platf::img_t *img, bool dummy) {
// If this is not a dummy image, we must know the format by now
if (!dummy && capture_format == DXGI_FORMAT_UNKNOWN) {
BOOST_LOG(error) << "display_ram_t::complete_img() called with unknown capture format!";
return -1;
BOOST_LOG(error) << "display_ram_t::complete_img() called with unknown "
"capture format!";
return -1;
}
img->pixel_pitch = get_pixel_pitch();
if (dummy && !img->row_pitch) {
// Assume our dummy image will have no padding
img->row_pitch = img->pixel_pitch * img->width;
// Assume our dummy image will have no padding
img->row_pitch = img->pixel_pitch * img->width;
}
// Reallocate the image buffer if the pitch changes
if (!dummy && img->row_pitch != img_info.RowPitch) {
img->row_pitch = img_info.RowPitch;
delete img->data;
img->data = nullptr;
img->row_pitch = img_info.RowPitch;
delete img->data;
img->data = nullptr;
}
if (!img->data) {
img->data = new std::uint8_t[img->row_pitch * height];
img->data = new std::uint8_t[img->row_pitch * height];
}
return 0;
}
}
int
display_ram_t::dummy_img(platf::img_t *img) {
int display_ram_t::dummy_img(platf::img_t *img) {
if (complete_img(img, true)) {
return -1;
return -1;
}
std::fill_n((std::uint8_t *) img->data, height * img->row_pitch, 0);
std::fill_n((std::uint8_t *)img->data, height * img->row_pitch, 0);
return 0;
}
}
std::vector<DXGI_FORMAT>
display_ram_t::get_supported_capture_formats() {
return { DXGI_FORMAT_B8G8R8A8_UNORM, DXGI_FORMAT_B8G8R8X8_UNORM };
}
std::vector<DXGI_FORMAT> display_ram_t::get_supported_capture_formats() {
return {DXGI_FORMAT_B8G8R8A8_UNORM, DXGI_FORMAT_B8G8R8X8_UNORM};
}
int
display_ram_t::init(const ::video::config_t &config, const std::string &display_name) {
int display_ram_t::init(const ::video::config_t &config,
const std::string &display_name) {
if (display_base_t::init(config, display_name)) {
return -1;
return -1;
}
return 0;
}
}
std::unique_ptr<avcodec_encode_device_t>
display_ram_t::make_avcodec_encode_device(pix_fmt_e pix_fmt) {
std::unique_ptr<avcodec_encode_device_t>
display_ram_t::make_avcodec_encode_device(pix_fmt_e pix_fmt) {
return std::make_unique<avcodec_encode_device_t>();
}
}
} // namespace platf::dxgi

File diff suppressed because it is too large Load Diff

View File

@ -8,264 +8,265 @@
#include <cstdint>
namespace platf {
// Virtual Key to Scan Code mapping for the US English layout (00000409).
// GameStream uses this as the canonical key layout for scancode conversion.
constexpr std::array<std::uint8_t, std::numeric_limits<std::uint8_t>::max() + 1> VK_TO_SCANCODE_MAP {
0, /* 0x00 */
0, /* 0x01 */
0, /* 0x02 */
70, /* 0x03 */
0, /* 0x04 */
0, /* 0x05 */
0, /* 0x06 */
0, /* 0x07 */
14, /* 0x08 */
15, /* 0x09 */
0, /* 0x0a */
0, /* 0x0b */
76, /* 0x0c */
28, /* 0x0d */
0, /* 0x0e */
0, /* 0x0f */
42, /* 0x10 */
29, /* 0x11 */
56, /* 0x12 */
0, /* 0x13 */
58, /* 0x14 */
0, /* 0x15 */
0, /* 0x16 */
0, /* 0x17 */
0, /* 0x18 */
0, /* 0x19 */
0, /* 0x1a */
1, /* 0x1b */
0, /* 0x1c */
0, /* 0x1d */
0, /* 0x1e */
0, /* 0x1f */
57, /* 0x20 */
73, /* 0x21 */
81, /* 0x22 */
79, /* 0x23 */
71, /* 0x24 */
75, /* 0x25 */
72, /* 0x26 */
77, /* 0x27 */
80, /* 0x28 */
0, /* 0x29 */
0, /* 0x2a */
0, /* 0x2b */
84, /* 0x2c */
82, /* 0x2d */
83, /* 0x2e */
99, /* 0x2f */
11, /* 0x30 */
2, /* 0x31 */
3, /* 0x32 */
4, /* 0x33 */
5, /* 0x34 */
6, /* 0x35 */
7, /* 0x36 */
8, /* 0x37 */
9, /* 0x38 */
10, /* 0x39 */
0, /* 0x3a */
0, /* 0x3b */
0, /* 0x3c */
0, /* 0x3d */
0, /* 0x3e */
0, /* 0x3f */
0, /* 0x40 */
30, /* 0x41 */
48, /* 0x42 */
46, /* 0x43 */
32, /* 0x44 */
18, /* 0x45 */
33, /* 0x46 */
34, /* 0x47 */
35, /* 0x48 */
23, /* 0x49 */
36, /* 0x4a */
37, /* 0x4b */
38, /* 0x4c */
50, /* 0x4d */
49, /* 0x4e */
24, /* 0x4f */
25, /* 0x50 */
16, /* 0x51 */
19, /* 0x52 */
31, /* 0x53 */
20, /* 0x54 */
22, /* 0x55 */
47, /* 0x56 */
17, /* 0x57 */
45, /* 0x58 */
21, /* 0x59 */
44, /* 0x5a */
91, /* 0x5b */
92, /* 0x5c */
93, /* 0x5d */
0, /* 0x5e */
95, /* 0x5f */
82, /* 0x60 */
79, /* 0x61 */
80, /* 0x62 */
81, /* 0x63 */
75, /* 0x64 */
76, /* 0x65 */
77, /* 0x66 */
71, /* 0x67 */
72, /* 0x68 */
73, /* 0x69 */
55, /* 0x6a */
78, /* 0x6b */
0, /* 0x6c */
74, /* 0x6d */
83, /* 0x6e */
53, /* 0x6f */
59, /* 0x70 */
60, /* 0x71 */
61, /* 0x72 */
62, /* 0x73 */
63, /* 0x74 */
64, /* 0x75 */
65, /* 0x76 */
66, /* 0x77 */
67, /* 0x78 */
68, /* 0x79 */
87, /* 0x7a */
88, /* 0x7b */
100, /* 0x7c */
101, /* 0x7d */
102, /* 0x7e */
103, /* 0x7f */
104, /* 0x80 */
105, /* 0x81 */
106, /* 0x82 */
107, /* 0x83 */
108, /* 0x84 */
109, /* 0x85 */
110, /* 0x86 */
118, /* 0x87 */
0, /* 0x88 */
0, /* 0x89 */
0, /* 0x8a */
0, /* 0x8b */
0, /* 0x8c */
0, /* 0x8d */
0, /* 0x8e */
0, /* 0x8f */
69, /* 0x90 */
70, /* 0x91 */
0, /* 0x92 */
0, /* 0x93 */
0, /* 0x94 */
0, /* 0x95 */
0, /* 0x96 */
0, /* 0x97 */
0, /* 0x98 */
0, /* 0x99 */
0, /* 0x9a */
0, /* 0x9b */
0, /* 0x9c */
0, /* 0x9d */
0, /* 0x9e */
0, /* 0x9f */
42, /* 0xa0 */
54, /* 0xa1 */
29, /* 0xa2 */
29, /* 0xa3 */
56, /* 0xa4 */
56, /* 0xa5 */
106, /* 0xa6 */
105, /* 0xa7 */
103, /* 0xa8 */
104, /* 0xa9 */
101, /* 0xaa */
102, /* 0xab */
50, /* 0xac */
32, /* 0xad */
46, /* 0xae */
48, /* 0xaf */
25, /* 0xb0 */
16, /* 0xb1 */
36, /* 0xb2 */
34, /* 0xb3 */
108, /* 0xb4 */
109, /* 0xb5 */
107, /* 0xb6 */
33, /* 0xb7 */
0, /* 0xb8 */
0, /* 0xb9 */
39, /* 0xba */
13, /* 0xbb */
51, /* 0xbc */
12, /* 0xbd */
52, /* 0xbe */
53, /* 0xbf */
41, /* 0xc0 */
115, /* 0xc1 */
126, /* 0xc2 */
0, /* 0xc3 */
0, /* 0xc4 */
0, /* 0xc5 */
0, /* 0xc6 */
0, /* 0xc7 */
0, /* 0xc8 */
0, /* 0xc9 */
0, /* 0xca */
0, /* 0xcb */
0, /* 0xcc */
0, /* 0xcd */
0, /* 0xce */
0, /* 0xcf */
0, /* 0xd0 */
0, /* 0xd1 */
0, /* 0xd2 */
0, /* 0xd3 */
0, /* 0xd4 */
0, /* 0xd5 */
0, /* 0xd6 */
0, /* 0xd7 */
0, /* 0xd8 */
0, /* 0xd9 */
0, /* 0xda */
26, /* 0xdb */
43, /* 0xdc */
27, /* 0xdd */
40, /* 0xde */
0, /* 0xdf */
0, /* 0xe0 */
0, /* 0xe1 */
86, /* 0xe2 */
0, /* 0xe3 */
0, /* 0xe4 */
0, /* 0xe5 */
0, /* 0xe6 */
0, /* 0xe7 */
0, /* 0xe8 */
113, /* 0xe9 */
92, /* 0xea */
123, /* 0xeb */
0, /* 0xec */
111, /* 0xed */
90, /* 0xee */
0, /* 0xef */
0, /* 0xf0 */
91, /* 0xf1 */
0, /* 0xf2 */
95, /* 0xf3 */
0, /* 0xf4 */
94, /* 0xf5 */
0, /* 0xf6 */
0, /* 0xf7 */
0, /* 0xf8 */
93, /* 0xf9 */
0, /* 0xfa */
98, /* 0xfb */
0, /* 0xfc */
0, /* 0xfd */
0, /* 0xfe */
0, /* 0xff */
};
// Virtual Key to Scan Code mapping for the US English layout (00000409).
// GameStream uses this as the canonical key layout for scancode conversion.
constexpr std::array<std::uint8_t, std::numeric_limits<std::uint8_t>::max() + 1>
VK_TO_SCANCODE_MAP{
0, /* 0x00 */
0, /* 0x01 */
0, /* 0x02 */
70, /* 0x03 */
0, /* 0x04 */
0, /* 0x05 */
0, /* 0x06 */
0, /* 0x07 */
14, /* 0x08 */
15, /* 0x09 */
0, /* 0x0a */
0, /* 0x0b */
76, /* 0x0c */
28, /* 0x0d */
0, /* 0x0e */
0, /* 0x0f */
42, /* 0x10 */
29, /* 0x11 */
56, /* 0x12 */
0, /* 0x13 */
58, /* 0x14 */
0, /* 0x15 */
0, /* 0x16 */
0, /* 0x17 */
0, /* 0x18 */
0, /* 0x19 */
0, /* 0x1a */
1, /* 0x1b */
0, /* 0x1c */
0, /* 0x1d */
0, /* 0x1e */
0, /* 0x1f */
57, /* 0x20 */
73, /* 0x21 */
81, /* 0x22 */
79, /* 0x23 */
71, /* 0x24 */
75, /* 0x25 */
72, /* 0x26 */
77, /* 0x27 */
80, /* 0x28 */
0, /* 0x29 */
0, /* 0x2a */
0, /* 0x2b */
84, /* 0x2c */
82, /* 0x2d */
83, /* 0x2e */
99, /* 0x2f */
11, /* 0x30 */
2, /* 0x31 */
3, /* 0x32 */
4, /* 0x33 */
5, /* 0x34 */
6, /* 0x35 */
7, /* 0x36 */
8, /* 0x37 */
9, /* 0x38 */
10, /* 0x39 */
0, /* 0x3a */
0, /* 0x3b */
0, /* 0x3c */
0, /* 0x3d */
0, /* 0x3e */
0, /* 0x3f */
0, /* 0x40 */
30, /* 0x41 */
48, /* 0x42 */
46, /* 0x43 */
32, /* 0x44 */
18, /* 0x45 */
33, /* 0x46 */
34, /* 0x47 */
35, /* 0x48 */
23, /* 0x49 */
36, /* 0x4a */
37, /* 0x4b */
38, /* 0x4c */
50, /* 0x4d */
49, /* 0x4e */
24, /* 0x4f */
25, /* 0x50 */
16, /* 0x51 */
19, /* 0x52 */
31, /* 0x53 */
20, /* 0x54 */
22, /* 0x55 */
47, /* 0x56 */
17, /* 0x57 */
45, /* 0x58 */
21, /* 0x59 */
44, /* 0x5a */
91, /* 0x5b */
92, /* 0x5c */
93, /* 0x5d */
0, /* 0x5e */
95, /* 0x5f */
82, /* 0x60 */
79, /* 0x61 */
80, /* 0x62 */
81, /* 0x63 */
75, /* 0x64 */
76, /* 0x65 */
77, /* 0x66 */
71, /* 0x67 */
72, /* 0x68 */
73, /* 0x69 */
55, /* 0x6a */
78, /* 0x6b */
0, /* 0x6c */
74, /* 0x6d */
83, /* 0x6e */
53, /* 0x6f */
59, /* 0x70 */
60, /* 0x71 */
61, /* 0x72 */
62, /* 0x73 */
63, /* 0x74 */
64, /* 0x75 */
65, /* 0x76 */
66, /* 0x77 */
67, /* 0x78 */
68, /* 0x79 */
87, /* 0x7a */
88, /* 0x7b */
100, /* 0x7c */
101, /* 0x7d */
102, /* 0x7e */
103, /* 0x7f */
104, /* 0x80 */
105, /* 0x81 */
106, /* 0x82 */
107, /* 0x83 */
108, /* 0x84 */
109, /* 0x85 */
110, /* 0x86 */
118, /* 0x87 */
0, /* 0x88 */
0, /* 0x89 */
0, /* 0x8a */
0, /* 0x8b */
0, /* 0x8c */
0, /* 0x8d */
0, /* 0x8e */
0, /* 0x8f */
69, /* 0x90 */
70, /* 0x91 */
0, /* 0x92 */
0, /* 0x93 */
0, /* 0x94 */
0, /* 0x95 */
0, /* 0x96 */
0, /* 0x97 */
0, /* 0x98 */
0, /* 0x99 */
0, /* 0x9a */
0, /* 0x9b */
0, /* 0x9c */
0, /* 0x9d */
0, /* 0x9e */
0, /* 0x9f */
42, /* 0xa0 */
54, /* 0xa1 */
29, /* 0xa2 */
29, /* 0xa3 */
56, /* 0xa4 */
56, /* 0xa5 */
106, /* 0xa6 */
105, /* 0xa7 */
103, /* 0xa8 */
104, /* 0xa9 */
101, /* 0xaa */
102, /* 0xab */
50, /* 0xac */
32, /* 0xad */
46, /* 0xae */
48, /* 0xaf */
25, /* 0xb0 */
16, /* 0xb1 */
36, /* 0xb2 */
34, /* 0xb3 */
108, /* 0xb4 */
109, /* 0xb5 */
107, /* 0xb6 */
33, /* 0xb7 */
0, /* 0xb8 */
0, /* 0xb9 */
39, /* 0xba */
13, /* 0xbb */
51, /* 0xbc */
12, /* 0xbd */
52, /* 0xbe */
53, /* 0xbf */
41, /* 0xc0 */
115, /* 0xc1 */
126, /* 0xc2 */
0, /* 0xc3 */
0, /* 0xc4 */
0, /* 0xc5 */
0, /* 0xc6 */
0, /* 0xc7 */
0, /* 0xc8 */
0, /* 0xc9 */
0, /* 0xca */
0, /* 0xcb */
0, /* 0xcc */
0, /* 0xcd */
0, /* 0xce */
0, /* 0xcf */
0, /* 0xd0 */
0, /* 0xd1 */
0, /* 0xd2 */
0, /* 0xd3 */
0, /* 0xd4 */
0, /* 0xd5 */
0, /* 0xd6 */
0, /* 0xd7 */
0, /* 0xd8 */
0, /* 0xd9 */
0, /* 0xda */
26, /* 0xdb */
43, /* 0xdc */
27, /* 0xdd */
40, /* 0xde */
0, /* 0xdf */
0, /* 0xe0 */
0, /* 0xe1 */
86, /* 0xe2 */
0, /* 0xe3 */
0, /* 0xe4 */
0, /* 0xe5 */
0, /* 0xe6 */
0, /* 0xe7 */
0, /* 0xe8 */
113, /* 0xe9 */
92, /* 0xea */
123, /* 0xeb */
0, /* 0xec */
111, /* 0xed */
90, /* 0xee */
0, /* 0xef */
0, /* 0xf0 */
91, /* 0xf1 */
0, /* 0xf2 */
95, /* 0xf3 */
0, /* 0xf4 */
94, /* 0xf5 */
0, /* 0xf6 */
0, /* 0xf7 */
0, /* 0xf8 */
93, /* 0xf9 */
0, /* 0xfa */
98, /* 0xfb */
0, /* 0xfc */
0, /* 0xfd */
0, /* 0xfe */
0, /* 0xff */
};
} // namespace platf

View File

@ -8,7 +8,6 @@
#include <iomanip>
#include <sstream>
// prevent clang format from "optimizing" the header include order
// clang-format off
#include <dwmapi.h>
@ -25,20 +24,21 @@
#include <sddl.h>
// clang-format on
#include <iterator>
#include "src/main.h"
#include "src/platform/common.h"
#include "src/utility.h"
#include <iterator>
// UDP_SEND_MSG_SIZE was added in the Windows 10 20H1 SDK
#ifndef UDP_SEND_MSG_SIZE
#define UDP_SEND_MSG_SIZE 2
#define UDP_SEND_MSG_SIZE 2
#endif
// PROC_THREAD_ATTRIBUTE_JOB_LIST is currently missing from MinGW headers
#ifndef PROC_THREAD_ATTRIBUTE_JOB_LIST
#define PROC_THREAD_ATTRIBUTE_JOB_LIST ProcThreadAttributeValue(13, FALSE, TRUE, FALSE)
#define PROC_THREAD_ATTRIBUTE_JOB_LIST \
ProcThreadAttributeValue(13, FALSE, TRUE, FALSE)
#endif
#ifndef HAS_QOS_FLOWID
@ -51,137 +51,134 @@ typedef UINT32 *PQOS_FLOWID;
#include <qos2.h>
using namespace std::literals;
namespace platf {
static std::wstring_convert<std::codecvt_utf8_utf16<wchar_t>, wchar_t> converter;
static std::wstring_convert<std::codecvt_utf8_utf16<wchar_t>, wchar_t>
converter;
bool enabled_mouse_keys = false;
MOUSEKEYS previous_mouse_keys_state;
bool enabled_mouse_keys = false;
MOUSEKEYS previous_mouse_keys_state;
HANDLE qos_handle = nullptr;
HANDLE qos_handle = nullptr;
decltype(QOSCreateHandle) *fn_QOSCreateHandle = nullptr;
decltype(QOSAddSocketToFlow) *fn_QOSAddSocketToFlow = nullptr;
decltype(QOSRemoveSocketFromFlow) *fn_QOSRemoveSocketFromFlow = nullptr;
decltype(QOSCreateHandle) *fn_QOSCreateHandle = nullptr;
decltype(QOSAddSocketToFlow) *fn_QOSAddSocketToFlow = nullptr;
decltype(QOSRemoveSocketFromFlow) *fn_QOSRemoveSocketFromFlow = nullptr;
HANDLE wlan_handle = nullptr;
HANDLE wlan_handle = nullptr;
decltype(WlanOpenHandle) *fn_WlanOpenHandle = nullptr;
decltype(WlanCloseHandle) *fn_WlanCloseHandle = nullptr;
decltype(WlanFreeMemory) *fn_WlanFreeMemory = nullptr;
decltype(WlanEnumInterfaces) *fn_WlanEnumInterfaces = nullptr;
decltype(WlanSetInterface) *fn_WlanSetInterface = nullptr;
decltype(WlanOpenHandle) *fn_WlanOpenHandle = nullptr;
decltype(WlanCloseHandle) *fn_WlanCloseHandle = nullptr;
decltype(WlanFreeMemory) *fn_WlanFreeMemory = nullptr;
decltype(WlanEnumInterfaces) *fn_WlanEnumInterfaces = nullptr;
decltype(WlanSetInterface) *fn_WlanSetInterface = nullptr;
std::filesystem::path
appdata() {
std::filesystem::path appdata() {
WCHAR sunshine_path[MAX_PATH];
GetModuleFileNameW(NULL, sunshine_path, _countof(sunshine_path));
return std::filesystem::path { sunshine_path }.remove_filename() / L"config"sv;
}
return std::filesystem::path{sunshine_path}.remove_filename() / L"config"sv;
}
std::string
from_sockaddr(const sockaddr *const socket_address) {
std::string from_sockaddr(const sockaddr *const socket_address) {
char data[INET6_ADDRSTRLEN] = {};
auto family = socket_address->sa_family;
if (family == AF_INET6) {
inet_ntop(AF_INET6, &((sockaddr_in6 *) socket_address)->sin6_addr, data, INET6_ADDRSTRLEN);
}
else if (family == AF_INET) {
inet_ntop(AF_INET, &((sockaddr_in *) socket_address)->sin_addr, data, INET_ADDRSTRLEN);
inet_ntop(AF_INET6, &((sockaddr_in6 *)socket_address)->sin6_addr, data,
INET6_ADDRSTRLEN);
} else if (family == AF_INET) {
inet_ntop(AF_INET, &((sockaddr_in *)socket_address)->sin_addr, data,
INET_ADDRSTRLEN);
}
return std::string { data };
}
return std::string{data};
}
std::pair<std::uint16_t, std::string>
from_sockaddr_ex(const sockaddr *const ip_addr) {
std::pair<std::uint16_t, std::string> from_sockaddr_ex(
const sockaddr *const ip_addr) {
char data[INET6_ADDRSTRLEN] = {};
auto family = ip_addr->sa_family;
std::uint16_t port = 0;
if (family == AF_INET6) {
inet_ntop(AF_INET6, &((sockaddr_in6 *) ip_addr)->sin6_addr, data, INET6_ADDRSTRLEN);
port = ((sockaddr_in6 *) ip_addr)->sin6_port;
}
else if (family == AF_INET) {
inet_ntop(AF_INET, &((sockaddr_in *) ip_addr)->sin_addr, data, INET_ADDRSTRLEN);
port = ((sockaddr_in *) ip_addr)->sin_port;
inet_ntop(AF_INET6, &((sockaddr_in6 *)ip_addr)->sin6_addr, data,
INET6_ADDRSTRLEN);
port = ((sockaddr_in6 *)ip_addr)->sin6_port;
} else if (family == AF_INET) {
inet_ntop(AF_INET, &((sockaddr_in *)ip_addr)->sin_addr, data,
INET_ADDRSTRLEN);
port = ((sockaddr_in *)ip_addr)->sin_port;
}
return { port, std::string { data } };
}
return {port, std::string{data}};
}
HDESK
syncThreadDesktop() {
HDESK
syncThreadDesktop() {
auto hDesk = OpenInputDesktop(DF_ALLOWOTHERACCOUNTHOOK, FALSE, GENERIC_ALL);
if (!hDesk) {
auto err = GetLastError();
BOOST_LOG(error) << "Failed to Open Input Desktop [0x"sv << util::hex(err).to_string_view() << ']';
auto err = GetLastError();
BOOST_LOG(error) << "Failed to Open Input Desktop [0x"sv
<< util::hex(err).to_string_view() << ']';
return nullptr;
return nullptr;
}
if (!SetThreadDesktop(hDesk)) {
auto err = GetLastError();
BOOST_LOG(error) << "Failed to sync desktop to thread [0x"sv << util::hex(err).to_string_view() << ']';
auto err = GetLastError();
BOOST_LOG(error) << "Failed to sync desktop to thread [0x"sv
<< util::hex(err).to_string_view() << ']';
}
CloseDesktop(hDesk);
return hDesk;
}
}
void
print_status(const std::string_view &prefix, HRESULT status) {
void print_status(const std::string_view &prefix, HRESULT status) {
char err_string[1024];
DWORD bytes = FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS,
nullptr,
status,
MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
err_string,
sizeof(err_string),
nullptr);
DWORD bytes = FormatMessage(
FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, nullptr,
status, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), err_string,
sizeof(err_string), nullptr);
BOOST_LOG(error) << prefix << ": "sv << std::string_view { err_string, bytes };
}
BOOST_LOG(error) << prefix << ": "sv << std::string_view{err_string, bytes};
}
bool
IsUserAdmin(HANDLE user_token) {
bool IsUserAdmin(HANDLE user_token) {
WINBOOL ret;
SID_IDENTIFIER_AUTHORITY NtAuthority = SECURITY_NT_AUTHORITY;
PSID AdministratorsGroup;
ret = AllocateAndInitializeSid(
&NtAuthority,
2,
SECURITY_BUILTIN_DOMAIN_RID,
DOMAIN_ALIAS_RID_ADMINS,
0, 0, 0, 0, 0, 0,
&AdministratorsGroup);
ret = AllocateAndInitializeSid(&NtAuthority, 2, SECURITY_BUILTIN_DOMAIN_RID,
DOMAIN_ALIAS_RID_ADMINS, 0, 0, 0, 0, 0, 0,
&AdministratorsGroup);
if (ret) {
if (!CheckTokenMembership(user_token, AdministratorsGroup, &ret)) {
ret = false;
BOOST_LOG(error) << "Failed to verify token membership for administrative access: " << GetLastError();
}
FreeSid(AdministratorsGroup);
}
else {
BOOST_LOG(error) << "Unable to allocate SID to check administrative access: " << GetLastError();
if (!CheckTokenMembership(user_token, AdministratorsGroup, &ret)) {
ret = false;
BOOST_LOG(error) << "Failed to verify token membership for "
"administrative access: "
<< GetLastError();
}
FreeSid(AdministratorsGroup);
} else {
BOOST_LOG(error)
<< "Unable to allocate SID to check administrative access: "
<< GetLastError();
}
return ret;
}
}
/**
* @brief Obtain the current sessions user's primary token with elevated privileges.
* @return The user's token. If user has admin capability it will be elevated, otherwise it will be a limited token. On error, `nullptr`.
*/
HANDLE
retrieve_users_token(bool elevated) {
/**
* @brief Obtain the current sessions user's primary token with elevated
* privileges.
* @return The user's token. If user has admin capability it will be elevated,
* otherwise it will be a limited token. On error, `nullptr`.
*/
HANDLE
retrieve_users_token(bool elevated) {
DWORD consoleSessionId;
HANDLE userToken;
TOKEN_ELEVATION_TYPE elevationType;
@ -190,65 +187,82 @@ namespace platf {
// Get the session ID of the active console session
consoleSessionId = WTSGetActiveConsoleSessionId();
if (0xFFFFFFFF == consoleSessionId) {
// If there is no active console session, log a warning and return null
BOOST_LOG(warning) << "There isn't an active user session, therefore it is not possible to execute commands under the users profile.";
return nullptr;
// If there is no active console session, log a warning and return null
BOOST_LOG(warning)
<< "There isn't an active user session, therefore it is not "
"possible to execute commands under the users profile.";
return nullptr;
}
// Get the user token for the active console session
if (!WTSQueryUserToken(consoleSessionId, &userToken)) {
BOOST_LOG(debug) << "QueryUserToken failed, this would prevent commands from launching under the users profile.";
return nullptr;
BOOST_LOG(debug) << "QueryUserToken failed, this would prevent "
"commands from launching under the users profile.";
return nullptr;
}
// We need to know if this is an elevated token or not.
// Get the elevation type of the user token
// Elevation - Default: User is not an admin, UAC enabled/disabled does not matter.
// Elevation - Limited: User is an admin, has UAC enabled.
// Elevation - Full: User is an admin, has UAC disabled.
if (!GetTokenInformation(userToken, TokenElevationType, &elevationType, sizeof(TOKEN_ELEVATION_TYPE), &dwSize)) {
BOOST_LOG(debug) << "Retrieving token information failed: " << GetLastError();
CloseHandle(userToken);
return nullptr;
// Elevation - Default: User is not an admin, UAC enabled/disabled does not
// matter. Elevation - Limited: User is an admin, has UAC enabled. Elevation
// - Full: User is an admin, has UAC disabled.
if (!GetTokenInformation(userToken, TokenElevationType, &elevationType,
sizeof(TOKEN_ELEVATION_TYPE), &dwSize)) {
BOOST_LOG(debug) << "Retrieving token information failed: "
<< GetLastError();
CloseHandle(userToken);
return nullptr;
}
// User is currently not an administrator
// The documentation for this scenario is conflicting, so we'll double check to see if user is actually an admin.
if (elevated && (elevationType == TokenElevationTypeDefault && !IsUserAdmin(userToken))) {
// We don't have to strip the token or do anything here, but let's give the user a warning so they're aware what is happening.
BOOST_LOG(warning) << "This command requires elevation and the current user account logged in does not have administrator rights. "
<< "For security reasons Sunshine will retain the same access level as the current user and will not elevate it.";
// The documentation for this scenario is conflicting, so we'll double check
// to see if user is actually an admin.
if (elevated && (elevationType == TokenElevationTypeDefault &&
!IsUserAdmin(userToken))) {
// We don't have to strip the token or do anything here, but let's give
// the user a warning so they're aware what is happening.
BOOST_LOG(warning)
<< "This command requires elevation and the current user account "
"logged in does not have administrator rights. "
<< "For security reasons Sunshine will retain the same access "
"level as the current user and will not elevate it.";
}
// User has a limited token, this means they have UAC enabled and is an Administrator
// User has a limited token, this means they have UAC enabled and is an
// Administrator
if (elevated && elevationType == TokenElevationTypeLimited) {
TOKEN_LINKED_TOKEN linkedToken;
// Retrieve the administrator token that is linked to the limited token
if (!GetTokenInformation(userToken, TokenLinkedToken, reinterpret_cast<void *>(&linkedToken), sizeof(TOKEN_LINKED_TOKEN), &dwSize)) {
// If the retrieval failed, log an error message and return null
BOOST_LOG(error) << "Retrieving linked token information failed: " << GetLastError();
TOKEN_LINKED_TOKEN linkedToken;
// Retrieve the administrator token that is linked to the limited token
if (!GetTokenInformation(userToken, TokenLinkedToken,
reinterpret_cast<void *>(&linkedToken),
sizeof(TOKEN_LINKED_TOKEN), &dwSize)) {
// If the retrieval failed, log an error message and return null
BOOST_LOG(error) << "Retrieving linked token information failed: "
<< GetLastError();
CloseHandle(userToken);
// There is no scenario where this should be hit, except for an
// actual error.
return nullptr;
}
// Since we need the elevated token, we'll replace it with their
// administrative token.
CloseHandle(userToken);
// There is no scenario where this should be hit, except for an actual error.
return nullptr;
}
// Since we need the elevated token, we'll replace it with their administrative token.
CloseHandle(userToken);
userToken = linkedToken.LinkedToken;
userToken = linkedToken.LinkedToken;
}
// We don't need to do anything for TokenElevationTypeFull users here, because they're already elevated.
// We don't need to do anything for TokenElevationTypeFull users here,
// because they're already elevated.
return userToken;
}
}
/**
* @brief Check if the current process is running with system-level privileges.
* @return `true` if the current process has system-level privileges, `false` otherwise.
*/
bool
is_running_as_system() {
/**
* @brief Check if the current process is running with system-level privileges.
* @return `true` if the current process has system-level privileges, `false`
* otherwise.
*/
bool is_running_as_system() {
BOOL ret;
PSID SystemSid;
DWORD dwSize = SECURITY_MAX_SID_SIZE;
@ -256,165 +270,161 @@ namespace platf {
// Allocate memory for the SID structure
SystemSid = LocalAlloc(LMEM_FIXED, dwSize);
if (SystemSid == nullptr) {
BOOST_LOG(error) << "Failed to allocate memory for the SID structure: " << GetLastError();
return false;
BOOST_LOG(error) << "Failed to allocate memory for the SID structure: "
<< GetLastError();
return false;
}
// Create a SID for the local system account
ret = CreateWellKnownSid(WinLocalSystemSid, nullptr, SystemSid, &dwSize);
if (ret) {
// Check if the current process token contains this SID
if (!CheckTokenMembership(nullptr, SystemSid, &ret)) {
BOOST_LOG(error) << "Failed to check token membership: " << GetLastError();
ret = false;
}
}
else {
BOOST_LOG(error) << "Failed to create a SID for the local system account. This may happen if the system is out of memory or if the SID buffer is too small: " << GetLastError();
// Check if the current process token contains this SID
if (!CheckTokenMembership(nullptr, SystemSid, &ret)) {
BOOST_LOG(error)
<< "Failed to check token membership: " << GetLastError();
ret = false;
}
} else {
BOOST_LOG(error) << "Failed to create a SID for the local system "
"account. This may happen if the system is out of "
"memory or if the SID buffer is too small: "
<< GetLastError();
}
// Free the memory allocated for the SID structure
LocalFree(SystemSid);
return ret;
}
}
// Note: This does NOT append a null terminator
void
append_string_to_environment_block(wchar_t *env_block, int &offset, const std::wstring &wstr) {
std::memcpy(&env_block[offset], wstr.data(), wstr.length() * sizeof(wchar_t));
// Note: This does NOT append a null terminator
void append_string_to_environment_block(wchar_t *env_block, int &offset,
const std::wstring &wstr) {
std::memcpy(&env_block[offset], wstr.data(),
wstr.length() * sizeof(wchar_t));
offset += wstr.length();
}
}
LPPROC_THREAD_ATTRIBUTE_LIST
allocate_proc_thread_attr_list(DWORD attribute_count) {
LPPROC_THREAD_ATTRIBUTE_LIST
allocate_proc_thread_attr_list(DWORD attribute_count) {
SIZE_T size;
InitializeProcThreadAttributeList(NULL, attribute_count, 0, &size);
auto list = (LPPROC_THREAD_ATTRIBUTE_LIST) HeapAlloc(GetProcessHeap(), 0, size);
auto list =
(LPPROC_THREAD_ATTRIBUTE_LIST)HeapAlloc(GetProcessHeap(), 0, size);
if (list == NULL) {
return NULL;
return NULL;
}
if (!InitializeProcThreadAttributeList(list, attribute_count, 0, &size)) {
HeapFree(GetProcessHeap(), 0, list);
return NULL;
HeapFree(GetProcessHeap(), 0, list);
return NULL;
}
return list;
}
}
void
free_proc_thread_attr_list(LPPROC_THREAD_ATTRIBUTE_LIST list) {
void free_proc_thread_attr_list(LPPROC_THREAD_ATTRIBUTE_LIST list) {
DeleteProcThreadAttributeList(list);
HeapFree(GetProcessHeap(), 0, list);
}
}
/**
* @brief Impersonate the current user and invoke the callback function.
* @param user_token A handle to the user's token that was obtained from the shell.
* @param callback A function that will be executed while impersonating the user.
* @return An `std::error_code` object that will store any error that occurred during the impersonation
*/
std::error_code
impersonate_current_user(HANDLE user_token, std::function<void()> callback) {
/**
* @brief Impersonate the current user and invoke the callback function.
* @param user_token A handle to the user's token that was obtained from the
* shell.
* @param callback A function that will be executed while impersonating the
* user.
* @return An `std::error_code` object that will store any error that occurred
* during the impersonation
*/
std::error_code impersonate_current_user(HANDLE user_token,
std::function<void()> callback) {
std::error_code ec;
// Impersonate the user when launching the process. This will ensure that appropriate access
// checks are done against the user token, not our SYSTEM token. It will also allow network
// shares and mapped network drives to be used as launch targets, since those credentials
// are stored per-user.
// Impersonate the user when launching the process. This will ensure that
// appropriate access checks are done against the user token, not our SYSTEM
// token. It will also allow network shares and mapped network drives to be
// used as launch targets, since those credentials are stored per-user.
if (!ImpersonateLoggedOnUser(user_token)) {
auto winerror = GetLastError();
// Log the failure of impersonating the user and its error code
BOOST_LOG(error) << "Failed to impersonate user: "sv << winerror;
ec = std::make_error_code(std::errc::permission_denied);
return ec;
auto winerror = GetLastError();
// Log the failure of impersonating the user and its error code
BOOST_LOG(error) << "Failed to impersonate user: "sv << winerror;
ec = std::make_error_code(std::errc::permission_denied);
return ec;
}
// Execute the callback function while impersonating the user
callback();
// End impersonation of the logged on user. If this fails (which is extremely unlikely),
// we will be running with an unknown user token. The only safe thing to do in that case
// is terminate ourselves.
// End impersonation of the logged on user. If this fails (which is
// extremely unlikely), we will be running with an unknown user token. The
// only safe thing to do in that case is terminate ourselves.
if (!RevertToSelf()) {
auto winerror = GetLastError();
// Log the failure of reverting to self and its error code
BOOST_LOG(fatal) << "Failed to revert to self after impersonation: "sv << winerror;
std::abort();
auto winerror = GetLastError();
// Log the failure of reverting to self and its error code
BOOST_LOG(fatal) << "Failed to revert to self after impersonation: "sv
<< winerror;
std::abort();
}
return ec;
}
}
void
adjust_thread_priority(thread_priority_e priority) {
void adjust_thread_priority(thread_priority_e priority) {
int win32_priority;
switch (priority) {
case thread_priority_e::low:
win32_priority = THREAD_PRIORITY_BELOW_NORMAL;
break;
case thread_priority_e::normal:
win32_priority = THREAD_PRIORITY_NORMAL;
break;
case thread_priority_e::high:
win32_priority = THREAD_PRIORITY_ABOVE_NORMAL;
break;
case thread_priority_e::critical:
win32_priority = THREAD_PRIORITY_HIGHEST;
break;
default:
BOOST_LOG(error) << "Unknown thread priority: "sv << (int) priority;
return;
case thread_priority_e::low:
win32_priority = THREAD_PRIORITY_BELOW_NORMAL;
break;
case thread_priority_e::normal:
win32_priority = THREAD_PRIORITY_NORMAL;
break;
case thread_priority_e::high:
win32_priority = THREAD_PRIORITY_ABOVE_NORMAL;
break;
case thread_priority_e::critical:
win32_priority = THREAD_PRIORITY_HIGHEST;
break;
default:
BOOST_LOG(error) << "Unknown thread priority: "sv << (int)priority;
return;
}
if (!SetThreadPriority(GetCurrentThread(), win32_priority)) {
auto winerr = GetLastError();
BOOST_LOG(warning) << "Unable to set thread priority to "sv << win32_priority << ": "sv << winerr;
auto winerr = GetLastError();
BOOST_LOG(warning) << "Unable to set thread priority to "sv
<< win32_priority << ": "sv << winerr;
}
}
}
int64_t
qpc_counter() {
int64_t qpc_counter() {
LARGE_INTEGER performace_counter;
if (QueryPerformanceCounter(&performace_counter)) return performace_counter.QuadPart;
if (QueryPerformanceCounter(&performace_counter))
return performace_counter.QuadPart;
return 0;
}
}
std::chrono::nanoseconds
qpc_time_difference(int64_t performance_counter1, int64_t performance_counter2) {
std::chrono::nanoseconds qpc_time_difference(int64_t performance_counter1,
int64_t performance_counter2) {
auto get_frequency = []() {
LARGE_INTEGER frequency;
frequency.QuadPart = 0;
QueryPerformanceFrequency(&frequency);
return frequency.QuadPart;
LARGE_INTEGER frequency;
frequency.QuadPart = 0;
QueryPerformanceFrequency(&frequency);
return frequency.QuadPart;
};
static const double frequency = get_frequency();
if (frequency) {
return std::chrono::nanoseconds((int64_t) ((performance_counter1 - performance_counter2) * frequency / std::nano::den));
return std::chrono::nanoseconds(
(int64_t)((performance_counter1 - performance_counter2) *
frequency / std::nano::den));
}
return {};
}
}
// It's not big enough to justify it's own source file :/
namespace dxgi {
int init();
}
// It's not big enough to justify it's own source file :/
namespace dxgi {
int
init();
}
bool
init() {
return dxgi::init() != 0;
}
bool init() { return dxgi::init() != 0; }
} // namespace platf

View File

@ -4,20 +4,19 @@
*/
#pragma once
#include <chrono>
#include <string_view>
#include <windows.h>
#include <winnt.h>
#include <chrono>
#include <string_view>
namespace platf {
void
print_status(const std::string_view &prefix, HRESULT status);
HDESK
syncThreadDesktop();
void print_status(const std::string_view &prefix, HRESULT status);
HDESK
syncThreadDesktop();
int64_t
qpc_counter();
int64_t qpc_counter();
std::chrono::nanoseconds
qpc_time_difference(int64_t performance_counter1, int64_t performance_counter2);
std::chrono::nanoseconds qpc_time_difference(int64_t performance_counter1,
int64_t performance_counter2);
} // namespace platf

View File

@ -10,92 +10,74 @@
namespace sync_util {
template <class T, class M = std::mutex>
class sync_t {
public:
template <class T, class M = std::mutex>
class sync_t {
public:
using value_t = T;
using mutex_t = M;
std::lock_guard<mutex_t>
lock() {
return std::lock_guard { _lock };
}
std::lock_guard<mutex_t> lock() { return std::lock_guard{_lock}; }
template <class... Args>
sync_t(Args &&...args):
raw { std::forward<Args>(args)... } {}
sync_t(Args &&...args) : raw{std::forward<Args>(args)...} {}
sync_t &
operator=(sync_t &&other) noexcept {
std::lock(_lock, other._lock);
sync_t &operator=(sync_t &&other) noexcept {
std::lock(_lock, other._lock);
raw = std::move(other.raw);
raw = std::move(other.raw);
_lock.unlock();
other._lock.unlock();
_lock.unlock();
other._lock.unlock();
return *this;
return *this;
}
sync_t &
operator=(sync_t &other) noexcept {
std::lock(_lock, other._lock);
sync_t &operator=(sync_t &other) noexcept {
std::lock(_lock, other._lock);
raw = other.raw;
raw = other.raw;
_lock.unlock();
other._lock.unlock();
_lock.unlock();
other._lock.unlock();
return *this;
return *this;
}
template <class V>
sync_t &
operator=(V &&val) {
auto lg = lock();
sync_t &operator=(V &&val) {
auto lg = lock();
raw = val;
raw = val;
return *this;
return *this;
}
sync_t &
operator=(const value_t &val) noexcept {
auto lg = lock();
sync_t &operator=(const value_t &val) noexcept {
auto lg = lock();
raw = val;
raw = val;
return *this;
return *this;
}
sync_t &
operator=(value_t &&val) noexcept {
auto lg = lock();
sync_t &operator=(value_t &&val) noexcept {
auto lg = lock();
raw = std::move(val);
raw = std::move(val);
return *this;
return *this;
}
value_t *
operator->() {
return &raw;
}
value_t *operator->() { return &raw; }
value_t &
operator*() {
return raw;
}
value_t &operator*() { return raw; }
const value_t &
operator*() const {
return raw;
}
const value_t &operator*() const { return raw; }
value_t raw;
private:
private:
mutex_t _lock;
};
};
} // namespace sync_util

View File

@ -4,486 +4,433 @@
*/
#pragma once
#include <thread>
#include <array>
#include <atomic>
#include <condition_variable>
#include <functional>
#include <map>
#include <mutex>
#include <thread>
#include <vector>
#include "utility.h"
using namespace std::literals;
namespace safe {
template <class T>
class event_t {
public:
template <class T>
class event_t {
public:
using status_t = util::optional_t<T>;
template <class... Args>
void
raise(Args &&...args) {
std::lock_guard lg { _lock };
if (!_continue) {
return;
}
void raise(Args &&...args) {
std::lock_guard lg{_lock};
if (!_continue) {
return;
}
if constexpr (std::is_same_v<std::optional<T>, status_t>) {
_status = std::make_optional<T>(std::forward<Args>(args)...);
}
else {
_status = status_t { std::forward<Args>(args)... };
}
if constexpr (std::is_same_v<std::optional<T>, status_t>) {
_status = std::make_optional<T>(std::forward<Args>(args)...);
} else {
_status = status_t{std::forward<Args>(args)...};
}
_cv.notify_all();
_cv.notify_all();
}
// pop and view should not be used interchangeably
status_t
pop() {
std::unique_lock ul { _lock };
if (!_continue) {
return util::false_v<status_t>;
}
while (!_status) {
_cv.wait(ul);
status_t pop() {
std::unique_lock ul{_lock};
if (!_continue) {
return util::false_v<status_t>;
return util::false_v<status_t>;
}
}
auto val = std::move(_status);
_status = util::false_v<status_t>;
return val;
while (!_status) {
_cv.wait(ul);
if (!_continue) {
return util::false_v<status_t>;
}
}
auto val = std::move(_status);
_status = util::false_v<status_t>;
return val;
}
// pop and view should not be used interchangeably
template <class Rep, class Period>
status_t
pop(std::chrono::duration<Rep, Period> delay) {
std::unique_lock ul { _lock };
status_t pop(std::chrono::duration<Rep, Period> delay) {
std::unique_lock ul{_lock};
if (!_continue) {
return util::false_v<status_t>;
}
while (!_status) {
if (!_continue || _cv.wait_for(ul, delay) == std::cv_status::timeout) {
return util::false_v<status_t>;
if (!_continue) {
return util::false_v<status_t>;
}
}
auto val = std::move(_status);
_status = util::false_v<status_t>;
return val;
while (!_status) {
if (!_continue ||
_cv.wait_for(ul, delay) == std::cv_status::timeout) {
return util::false_v<status_t>;
}
}
auto val = std::move(_status);
_status = util::false_v<status_t>;
return val;
}
// pop and view should not be used interchangeably
const status_t &
view() {
std::unique_lock ul { _lock };
if (!_continue) {
return util::false_v<status_t>;
}
while (!_status) {
_cv.wait(ul);
const status_t &view() {
std::unique_lock ul{_lock};
if (!_continue) {
return util::false_v<status_t>;
return util::false_v<status_t>;
}
}
return _status;
while (!_status) {
_cv.wait(ul);
if (!_continue) {
return util::false_v<status_t>;
}
}
return _status;
}
// pop and view should not be used interchangeably
template <class Rep, class Period>
status_t
view(std::chrono::duration<Rep, Period> delay) {
std::unique_lock ul { _lock };
status_t view(std::chrono::duration<Rep, Period> delay) {
std::unique_lock ul{_lock};
if (!_continue) {
return util::false_v<status_t>;
}
while (!_status) {
if (!_continue || _cv.wait_for(ul, delay) == std::cv_status::timeout) {
return util::false_v<status_t>;
if (!_continue) {
return util::false_v<status_t>;
}
}
return _status;
while (!_status) {
if (!_continue ||
_cv.wait_for(ul, delay) == std::cv_status::timeout) {
return util::false_v<status_t>;
}
}
return _status;
}
bool
peek() {
return _continue && (bool) _status;
bool peek() { return _continue && (bool)_status; }
void stop() {
std::lock_guard lg{_lock};
_continue = false;
_cv.notify_all();
}
void
stop() {
std::lock_guard lg { _lock };
void reset() {
std::lock_guard lg{_lock};
_continue = false;
_continue = true;
_cv.notify_all();
_status = util::false_v<status_t>;
}
void
reset() {
std::lock_guard lg { _lock };
[[nodiscard]] bool running() const { return _continue; }
_continue = true;
_status = util::false_v<status_t>;
}
[[nodiscard]] bool
running() const {
return _continue;
}
private:
bool _continue { true };
status_t _status { util::false_v<status_t> };
private:
bool _continue{true};
status_t _status{util::false_v<status_t>};
std::condition_variable _cv;
std::mutex _lock;
};
};
template <class T>
class alarm_raw_t {
public:
template <class T>
class alarm_raw_t {
public:
using status_t = util::optional_t<T>;
void
ring(const status_t &status) {
std::lock_guard lg(_lock);
void ring(const status_t &status) {
std::lock_guard lg(_lock);
_status = status;
_rang = true;
_cv.notify_one();
_status = status;
_rang = true;
_cv.notify_one();
}
void
ring(status_t &&status) {
std::lock_guard lg(_lock);
void ring(status_t &&status) {
std::lock_guard lg(_lock);
_status = std::move(status);
_rang = true;
_cv.notify_one();
_status = std::move(status);
_rang = true;
_cv.notify_one();
}
template <class Rep, class Period>
auto
wait_for(const std::chrono::duration<Rep, Period> &rel_time) {
std::unique_lock ul(_lock);
auto wait_for(const std::chrono::duration<Rep, Period> &rel_time) {
std::unique_lock ul(_lock);
return _cv.wait_for(ul, rel_time, [this]() { return _rang; });
return _cv.wait_for(ul, rel_time, [this]() { return _rang; });
}
template <class Rep, class Period, class Pred>
auto
wait_for(const std::chrono::duration<Rep, Period> &rel_time, Pred &&pred) {
std::unique_lock ul(_lock);
auto wait_for(const std::chrono::duration<Rep, Period> &rel_time,
Pred &&pred) {
std::unique_lock ul(_lock);
return _cv.wait_for(ul, rel_time, [this, &pred]() { return _rang || pred(); });
return _cv.wait_for(ul, rel_time,
[this, &pred]() { return _rang || pred(); });
}
template <class Rep, class Period>
auto
wait_until(const std::chrono::duration<Rep, Period> &rel_time) {
std::unique_lock ul(_lock);
auto wait_until(const std::chrono::duration<Rep, Period> &rel_time) {
std::unique_lock ul(_lock);
return _cv.wait_until(ul, rel_time, [this]() { return _rang; });
return _cv.wait_until(ul, rel_time, [this]() { return _rang; });
}
template <class Rep, class Period, class Pred>
auto
wait_until(const std::chrono::duration<Rep, Period> &rel_time, Pred &&pred) {
std::unique_lock ul(_lock);
auto wait_until(const std::chrono::duration<Rep, Period> &rel_time,
Pred &&pred) {
std::unique_lock ul(_lock);
return _cv.wait_until(ul, rel_time, [this, &pred]() { return _rang || pred(); });
return _cv.wait_until(ul, rel_time,
[this, &pred]() { return _rang || pred(); });
}
auto
wait() {
std::unique_lock ul(_lock);
_cv.wait(ul, [this]() { return _rang; });
auto wait() {
std::unique_lock ul(_lock);
_cv.wait(ul, [this]() { return _rang; });
}
template <class Pred>
auto
wait(Pred &&pred) {
std::unique_lock ul(_lock);
_cv.wait(ul, [this, &pred]() { return _rang || pred(); });
auto wait(Pred &&pred) {
std::unique_lock ul(_lock);
_cv.wait(ul, [this, &pred]() { return _rang || pred(); });
}
const status_t &
status() const {
return _status;
const status_t &status() const { return _status; }
status_t &status() { return _status; }
void reset() {
_status = status_t{};
_rang = false;
}
status_t &
status() {
return _status;
}
void
reset() {
_status = status_t {};
_rang = false;
}
private:
private:
std::mutex _lock;
std::condition_variable _cv;
status_t _status { util::false_v<status_t> };
bool _rang { false };
};
status_t _status{util::false_v<status_t>};
bool _rang{false};
};
template <class T>
using alarm_t = std::shared_ptr<alarm_raw_t<T>>;
template <class T>
using alarm_t = std::shared_ptr<alarm_raw_t<T>>;
template <class T>
alarm_t<T>
make_alarm() {
template <class T>
alarm_t<T> make_alarm() {
return std::make_shared<alarm_raw_t<T>>();
}
}
template <class T>
class queue_t {
public:
template <class T>
class queue_t {
public:
using status_t = util::optional_t<T>;
queue_t(std::uint32_t max_elements = 32):
_max_elements { max_elements } {}
queue_t(std::uint32_t max_elements = 32) : _max_elements{max_elements} {}
template <class... Args>
void
raise(Args &&...args) {
std::lock_guard ul { _lock };
void raise(Args &&...args) {
std::lock_guard ul{_lock};
if (!_continue) {
if (!_continue) {
return;
}
if (_queue.size() == _max_elements) {
_queue.clear();
}
_queue.emplace_back(std::forward<Args>(args)...);
_cv.notify_all();
}
bool peek() { return _continue && !_queue.empty(); }
int size() {
if (!_continue) return -1;
return _queue.size();
}
void wait(int max_size) {
while (size() > max_size) {
std::this_thread::sleep_for(1ms);
}
return;
}
if (_queue.size() == _max_elements) {
_queue.clear();
}
_queue.emplace_back(std::forward<Args>(args)...);
_cv.notify_all();
}
bool
peek() {
return _continue && !_queue.empty();
}
int
size() {
if(!_continue)
return -1;
return _queue.size();
}
void
wait(int max_size) {
while(size() > max_size) {
std::this_thread::sleep_for(1ms);
}
return;
}
template <class Rep, class Period>
status_t
pop(std::chrono::duration<Rep, Period> delay) {
std::unique_lock ul { _lock };
if (!_continue) {
return util::false_v<status_t>;
}
while (_queue.empty()) {
if (!_continue || _cv.wait_for(ul, delay) == std::cv_status::timeout) {
return util::false_v<status_t>;
}
}
auto val = std::move(_queue.front());
_queue.erase(std::begin(_queue));
return val;
}
status_t
pop() {
std::unique_lock ul { _lock };
if (!_continue) {
return util::false_v<status_t>;
}
while (_queue.empty()) {
_cv.wait(ul);
status_t pop(std::chrono::duration<Rep, Period> delay) {
std::unique_lock ul{_lock};
if (!_continue) {
return util::false_v<status_t>;
return util::false_v<status_t>;
}
}
auto val = std::move(_queue.front());
_queue.erase(std::begin(_queue));
while (_queue.empty()) {
if (!_continue ||
_cv.wait_for(ul, delay) == std::cv_status::timeout) {
return util::false_v<status_t>;
}
}
return val;
auto val = std::move(_queue.front());
_queue.erase(std::begin(_queue));
return val;
}
std::vector<T> &
unsafe() {
return _queue;
status_t pop() {
std::unique_lock ul{_lock};
if (!_continue) {
return util::false_v<status_t>;
}
while (_queue.empty()) {
_cv.wait(ul);
if (!_continue) {
return util::false_v<status_t>;
}
}
auto val = std::move(_queue.front());
_queue.erase(std::begin(_queue));
return val;
}
void
stop() {
std::lock_guard lg { _lock };
std::vector<T> &unsafe() { return _queue; }
_continue = false;
void stop() {
std::lock_guard lg{_lock};
_cv.notify_all();
_continue = false;
_cv.notify_all();
}
[[nodiscard]] bool
running() const {
return _continue;
}
[[nodiscard]] bool running() const { return _continue; }
private:
bool _continue { true };
private:
bool _continue{true};
std::uint32_t _max_elements;
std::mutex _lock;
std::condition_variable _cv;
std::vector<T> _queue;
};
};
template <class T>
class shared_t {
public:
template <class T>
class shared_t {
public:
using element_type = T;
using construct_f = std::function<int(element_type &)>;
using destruct_f = std::function<void(element_type &)>;
struct ptr_t {
shared_t *owner;
shared_t *owner;
ptr_t():
owner { nullptr } {}
explicit ptr_t(shared_t *owner):
owner { owner } {}
ptr_t() : owner{nullptr} {}
explicit ptr_t(shared_t *owner) : owner{owner} {}
ptr_t(ptr_t &&ptr) noexcept:
owner { ptr.owner } {
ptr.owner = nullptr;
}
ptr_t(ptr_t &&ptr) noexcept : owner{ptr.owner} { ptr.owner = nullptr; }
ptr_t(const ptr_t &ptr) noexcept:
owner { ptr.owner } {
if (!owner) {
return;
ptr_t(const ptr_t &ptr) noexcept : owner{ptr.owner} {
if (!owner) {
return;
}
auto tmp = ptr.owner->ref();
tmp.owner = nullptr;
}
auto tmp = ptr.owner->ref();
tmp.owner = nullptr;
}
ptr_t &operator=(const ptr_t &ptr) noexcept {
if (!ptr.owner) {
release();
ptr_t &
operator=(const ptr_t &ptr) noexcept {
if (!ptr.owner) {
release();
return *this;
}
return *this;
return *this = std::move(*ptr.owner->ref());
}
return *this = std::move(*ptr.owner->ref());
}
ptr_t &operator=(ptr_t &&ptr) noexcept {
if (owner) {
release();
}
ptr_t &
operator=(ptr_t &&ptr) noexcept {
if (owner) {
release();
std::swap(owner, ptr.owner);
return *this;
}
std::swap(owner, ptr.owner);
return *this;
}
~ptr_t() {
if (owner) {
release();
}
}
operator bool() const {
return owner != nullptr;
}
void
release() {
std::lock_guard lg { owner->_lock };
if (!--owner->_count) {
owner->_destruct(*get());
(*this)->~element_type();
~ptr_t() {
if (owner) {
release();
}
}
owner = nullptr;
}
operator bool() const { return owner != nullptr; }
element_type *
get() const {
return reinterpret_cast<element_type *>(owner->_object_buf.data());
}
void release() {
std::lock_guard lg{owner->_lock};
element_type *
operator->() {
return reinterpret_cast<element_type *>(owner->_object_buf.data());
}
if (!--owner->_count) {
owner->_destruct(*get());
(*this)->~element_type();
}
owner = nullptr;
}
element_type *get() const {
return reinterpret_cast<element_type *>(owner->_object_buf.data());
}
element_type *operator->() {
return reinterpret_cast<element_type *>(owner->_object_buf.data());
}
};
template <class FC, class FD>
shared_t(FC &&fc, FD &&fd):
_construct { std::forward<FC>(fc) }, _destruct { std::forward<FD>(fd) } {}
[[nodiscard]] ptr_t
ref() {
std::lock_guard lg { _lock };
shared_t(FC &&fc, FD &&fd)
: _construct{std::forward<FC>(fc)}, _destruct{std::forward<FD>(fd)} {}
[[nodiscard]] ptr_t ref() {
std::lock_guard lg{_lock};
if (!_count) {
new (_object_buf.data()) element_type;
if (_construct(*reinterpret_cast<element_type *>(_object_buf.data()))) {
return ptr_t { nullptr };
if (!_count) {
new (_object_buf.data()) element_type;
if (_construct(
*reinterpret_cast<element_type *>(_object_buf.data()))) {
return ptr_t{nullptr};
}
}
}
++_count;
++_count;
return ptr_t { this };
return ptr_t{this};
}
private:
private:
construct_f _construct;
destruct_f _destruct;
@ -491,45 +438,39 @@ namespace safe {
std::uint32_t _count;
std::mutex _lock;
};
};
template <class T, class F_Construct, class F_Destruct>
auto
make_shared(F_Construct &&fc, F_Destruct &&fd) {
return shared_t<T> {
std::forward<F_Construct>(fc), std::forward<F_Destruct>(fd)
};
}
template <class T, class F_Construct, class F_Destruct>
auto make_shared(F_Construct &&fc, F_Destruct &&fd) {
return shared_t<T>{std::forward<F_Construct>(fc),
std::forward<F_Destruct>(fd)};
}
using signal_t = event_t<bool>;
using signal_t = event_t<bool>;
class mail_raw_t;
using mail_t = std::shared_ptr<mail_raw_t>;
class mail_raw_t;
using mail_t = std::shared_ptr<mail_raw_t>;
void
cleanup(mail_raw_t *);
template <class T>
class post_t: public T {
public:
void cleanup(mail_raw_t *);
template <class T>
class post_t : public T {
public:
template <class... Args>
post_t(mail_t mail, Args &&...args):
T(std::forward<Args>(args)...), mail { std::move(mail) } {}
post_t(mail_t mail, Args &&...args)
: T(std::forward<Args>(args)...), mail{std::move(mail)} {}
mail_t mail;
~post_t() {
cleanup(mail.get());
}
};
~post_t() { cleanup(mail.get()); }
};
template <class T>
inline auto
lock(const std::weak_ptr<void> &wp) {
template <class T>
inline auto lock(const std::weak_ptr<void> &wp) {
return std::reinterpret_pointer_cast<typename T::element_type>(wp.lock());
}
}
class mail_raw_t: public std::enable_shared_from_this<mail_raw_t> {
public:
class mail_raw_t : public std::enable_shared_from_this<mail_raw_t> {
public:
template <class T>
using event_t = std::shared_ptr<post_t<event_t<T>>>;
@ -537,59 +478,58 @@ namespace safe {
using queue_t = std::shared_ptr<post_t<queue_t<T>>>;
template <class T>
event_t<T>
event(const std::string_view &id) {
std::lock_guard lg { mutex };
event_t<T> event(const std::string_view &id) {
std::lock_guard lg{mutex};
auto it = id_to_post.find(id);
if (it != std::end(id_to_post)) {
return lock<event_t<T>>(it->second);
}
auto it = id_to_post.find(id);
if (it != std::end(id_to_post)) {
return lock<event_t<T>>(it->second);
}
auto post = std::make_shared<typename event_t<T>::element_type>(shared_from_this());
id_to_post.emplace(std::pair<std::string, std::weak_ptr<void>> { std::string { id }, post });
auto post = std::make_shared<typename event_t<T>::element_type>(
shared_from_this());
id_to_post.emplace(
std::pair<std::string, std::weak_ptr<void>>{std::string{id}, post});
return post;
return post;
}
template <class T>
queue_t<T>
queue(const std::string_view &id) {
std::lock_guard lg { mutex };
queue_t<T> queue(const std::string_view &id) {
std::lock_guard lg{mutex};
auto it = id_to_post.find(id);
if (it != std::end(id_to_post)) {
return lock<queue_t<T>>(it->second);
}
auto it = id_to_post.find(id);
if (it != std::end(id_to_post)) {
return lock<queue_t<T>>(it->second);
}
auto post = std::make_shared<typename queue_t<T>::element_type>(shared_from_this(), 32);
id_to_post.emplace(std::pair<std::string, std::weak_ptr<void>> { std::string { id }, post });
auto post = std::make_shared<typename queue_t<T>::element_type>(
shared_from_this(), 32);
id_to_post.emplace(
std::pair<std::string, std::weak_ptr<void>>{std::string{id}, post});
return post;
return post;
}
void
cleanup() {
std::lock_guard lg { mutex };
void cleanup() {
std::lock_guard lg{mutex};
for (auto it = std::begin(id_to_post); it != std::end(id_to_post); ++it) {
auto &weak = it->second;
for (auto it = std::begin(id_to_post); it != std::end(id_to_post);
++it) {
auto &weak = it->second;
if (weak.expired()) {
id_to_post.erase(it);
if (weak.expired()) {
id_to_post.erase(it);
return;
return;
}
}
}
}
std::mutex mutex;
std::map<std::string, std::weak_ptr<void>, std::less<>> id_to_post;
};
};
inline void
cleanup(mail_raw_t *mail) {
mail->cleanup();
}
inline void cleanup(mail_raw_t *mail) { mail->cleanup(); }
} // namespace safe

File diff suppressed because it is too large Load Diff

View File

@ -7,77 +7,73 @@
#include <random>
namespace uuid_util {
union uuid_t {
union uuid_t {
std::uint8_t b8[16];
std::uint16_t b16[8];
std::uint32_t b32[4];
std::uint64_t b64[2];
static uuid_t
generate(std::default_random_engine &engine) {
std::uniform_int_distribution<std::uint8_t> dist(0, std::numeric_limits<std::uint8_t>::max());
static uuid_t generate(std::default_random_engine &engine) {
std::uniform_int_distribution<std::uint8_t> dist(
0, std::numeric_limits<std::uint8_t>::max());
uuid_t buf;
for (auto &el : buf.b8) {
el = dist(engine);
}
uuid_t buf;
for (auto &el : buf.b8) {
el = dist(engine);
}
buf.b8[7] &= (std::uint8_t) 0b00101111;
buf.b8[9] &= (std::uint8_t) 0b10011111;
buf.b8[7] &= (std::uint8_t)0b00101111;
buf.b8[9] &= (std::uint8_t)0b10011111;
return buf;
return buf;
}
static uuid_t
generate() {
std::random_device r;
static uuid_t generate() {
std::random_device r;
std::default_random_engine engine { r() };
std::default_random_engine engine{r()};
return generate(engine);
return generate(engine);
}
[[nodiscard]] std::string
string() const {
std::string result;
[[nodiscard]] std::string string() const {
std::string result;
result.reserve(sizeof(uuid_t) * 2 + 4);
result.reserve(sizeof(uuid_t) * 2 + 4);
auto hex = util::hex(*this, true);
auto hex_view = hex.to_string_view();
auto hex = util::hex(*this, true);
auto hex_view = hex.to_string_view();
std::string_view slices[] = {
hex_view.substr(0, 8),
hex_view.substr(8, 4),
hex_view.substr(12, 4),
hex_view.substr(16, 4)
};
auto last_slice = hex_view.substr(20, 12);
std::string_view slices[] = {
hex_view.substr(0, 8), hex_view.substr(8, 4),
hex_view.substr(12, 4), hex_view.substr(16, 4)};
auto last_slice = hex_view.substr(20, 12);
for (auto &slice : slices) {
std::copy(std::begin(slice), std::end(slice), std::back_inserter(result));
for (auto &slice : slices) {
std::copy(std::begin(slice), std::end(slice),
std::back_inserter(result));
result.push_back('-');
}
result.push_back('-');
}
std::copy(std::begin(last_slice), std::end(last_slice), std::back_inserter(result));
std::copy(std::begin(last_slice), std::end(last_slice),
std::back_inserter(result));
return result;
return result;
}
constexpr bool
operator==(const uuid_t &other) const {
return b64[0] == other.b64[0] && b64[1] == other.b64[1];
constexpr bool operator==(const uuid_t &other) const {
return b64[0] == other.b64[0] && b64[1] == other.b64[1];
}
constexpr bool
operator<(const uuid_t &other) const {
return (b64[0] < other.b64[0] || (b64[0] == other.b64[0] && b64[1] < other.b64[1]));
constexpr bool operator<(const uuid_t &other) const {
return (b64[0] < other.b64[0] ||
(b64[0] == other.b64[0] && b64[1] < other.b64[1]));
}
constexpr bool
operator>(const uuid_t &other) const {
return (b64[0] > other.b64[0] || (b64[0] == other.b64[0] && b64[1] > other.b64[1]));
constexpr bool operator>(const uuid_t &other) const {
return (b64[0] > other.b64[0] ||
(b64[0] == other.b64[0] && b64[1] > other.b64[1]));
}
};
};
} // namespace uuid_util

File diff suppressed because it is too large Load Diff

View File

@ -15,150 +15,112 @@ extern "C" {
struct AVPacket;
namespace video {
struct packet_raw_t {
struct packet_raw_t {
virtual ~packet_raw_t() = default;
virtual bool
is_idr() = 0;
virtual bool is_idr() = 0;
virtual int64_t
frame_index() = 0;
virtual int64_t frame_index() = 0;
virtual uint8_t *
data() = 0;
virtual uint8_t *data() = 0;
virtual size_t
data_size() = 0;
virtual size_t data_size() = 0;
struct replace_t {
std::string_view old;
std::string_view _new;
std::string_view old;
std::string_view _new;
KITTY_DEFAULT_CONSTR_MOVE(replace_t)
KITTY_DEFAULT_CONSTR_MOVE(replace_t)
replace_t(std::string_view old, std::string_view _new) noexcept:
old { std::move(old) }, _new { std::move(_new) } {}
replace_t(std::string_view old, std::string_view _new) noexcept
: old{std::move(old)}, _new{std::move(_new)} {}
};
std::vector<replace_t> *replacements = nullptr;
void *channel_data = nullptr;
bool after_ref_frame_invalidation = false;
std::optional<std::chrono::steady_clock::time_point> frame_timestamp;
};
};
struct packet_raw_avcodec: packet_raw_t {
packet_raw_avcodec() {
av_packet = av_packet_alloc();
}
struct packet_raw_avcodec : packet_raw_t {
packet_raw_avcodec() { av_packet = av_packet_alloc(); }
~packet_raw_avcodec() {
av_packet_free(&this->av_packet);
}
~packet_raw_avcodec() { av_packet_free(&this->av_packet); }
bool
is_idr() override {
return av_packet->flags & AV_PKT_FLAG_KEY;
}
bool is_idr() override { return av_packet->flags & AV_PKT_FLAG_KEY; }
int64_t
frame_index() override {
return av_packet->pts;
}
int64_t frame_index() override { return av_packet->pts; }
uint8_t *
data() override {
return av_packet->data;
}
uint8_t *data() override { return av_packet->data; }
size_t
data_size() override {
return av_packet->size;
}
size_t data_size() override { return av_packet->size; }
AVPacket *av_packet;
};
};
struct packet_raw_generic: packet_raw_t {
packet_raw_generic(std::vector<uint8_t> &&frame_data, int64_t frame_index, bool idr):
frame_data { std::move(frame_data) }, index { frame_index }, idr { idr } {
}
struct packet_raw_generic : packet_raw_t {
packet_raw_generic(std::vector<uint8_t> &&frame_data, int64_t frame_index,
bool idr)
: frame_data{std::move(frame_data)}, index{frame_index}, idr{idr} {}
bool
is_idr() override {
return idr;
}
bool is_idr() override { return idr; }
int64_t
frame_index() override {
return index;
}
int64_t frame_index() override { return index; }
uint8_t *
data() override {
return frame_data.data();
}
uint8_t *data() override { return frame_data.data(); }
size_t
data_size() override {
return frame_data.size();
}
size_t data_size() override { return frame_data.size(); }
std::vector<uint8_t> frame_data;
int64_t index;
bool idr;
};
};
using packet_t = std::unique_ptr<packet_raw_t>;
using packet_t = std::unique_ptr<packet_raw_t>;
struct hdr_info_raw_t {
explicit hdr_info_raw_t(bool enabled):
enabled { enabled }, metadata {} {};
explicit hdr_info_raw_t(bool enabled, const SS_HDR_METADATA &metadata):
enabled { enabled }, metadata { metadata } {};
struct hdr_info_raw_t {
explicit hdr_info_raw_t(bool enabled) : enabled{enabled}, metadata{} {};
explicit hdr_info_raw_t(bool enabled, const SS_HDR_METADATA &metadata)
: enabled{enabled}, metadata{metadata} {};
bool enabled;
SS_HDR_METADATA metadata;
};
};
using hdr_info_t = std::unique_ptr<hdr_info_raw_t>;
using hdr_info_t = std::unique_ptr<hdr_info_raw_t>;
/* Encoding configuration requested by remote client */
struct config_t {
int width; // Video width in pixels
int height; // Video height in pixels
int framerate; // Requested framerate, used in individual frame bitrate budget calculation
int bitrate; // Video bitrate in kilobits (1000 bits) for requested framerate
/* Encoding configuration requested by remote client */
struct config_t {
int width; // Video width in pixels
int height; // Video height in pixels
int framerate; // Requested framerate, used in individual frame bitrate
// budget calculation
int bitrate; // Video bitrate in kilobits (1000 bits) for requested
// framerate
int slicesPerFrame; // Number of slices per frame
int numRefFrames; // Max number of reference frames
int numRefFrames; // Max number of reference frames
/* Requested color range and SDR encoding colorspace, HDR encoding colorspace is always BT.2020+ST2084
Color range (encoderCscMode & 0x1) : 0 - limited, 1 - full
SDR encoding colorspace (encoderCscMode >> 1) : 0 - BT.601, 1 - BT.709, 2 - BT.2020 */
/* Requested color range and SDR encoding colorspace, HDR encoding
colorspace is always BT.2020+ST2084 Color range (encoderCscMode & 0x1) :
0 - limited, 1 - full SDR encoding colorspace (encoderCscMode >> 1) : 0 -
BT.601, 1 - BT.709, 2 - BT.2020 */
int encoderCscMode;
int videoFormat; // 0 - H.264, 1 - HEVC, 2 - AV1
/* Encoding color depth (bit depth): 0 - 8-bit, 1 - 10-bit
HDR encoding activates when color depth is higher than 8-bit and the display which is being captured is operating in HDR mode */
HDR encoding activates when color depth is higher than 8-bit and the
display which is being captured is operating in HDR mode */
int dynamicRange;
};
};
extern int active_hevc_mode;
extern int active_av1_mode;
extern bool last_encoder_probe_supported_ref_frames_invalidation;
extern int active_hevc_mode;
extern int active_av1_mode;
extern bool last_encoder_probe_supported_ref_frames_invalidation;
void
capture(
safe::mail_t mail,
config_t config,
void *channel_data);
void capture(safe::mail_t mail, config_t config, void *channel_data);
void
update_resolution(
config_t* config,
const std::string &display_name
);
void update_resolution(config_t *config, const std::string &display_name);
int
probe_encoders();
int probe_encoders();
} // namespace video

View File

@ -9,173 +9,184 @@ extern "C" {
namespace video {
bool
colorspace_is_hdr(const sunshine_colorspace_t &colorspace) {
bool colorspace_is_hdr(const sunshine_colorspace_t &colorspace) {
return colorspace.colorspace == colorspace_e::bt2020;
}
}
sunshine_colorspace_t
colorspace_from_client_config(const config_t &config, bool hdr_display) {
sunshine_colorspace_t colorspace_from_client_config(const config_t &config,
bool hdr_display) {
sunshine_colorspace_t colorspace;
/* See video::config_t declaration for details */
if (config.dynamicRange > 0 && hdr_display) {
// Rec. 2020 with ST 2084 perceptual quantizer
colorspace.colorspace = colorspace_e::bt2020;
}
else {
switch (config.encoderCscMode >> 1) {
case 0:
// Rec. 601
colorspace.colorspace = colorspace_e::rec601;
break;
// Rec. 2020 with ST 2084 perceptual quantizer
colorspace.colorspace = colorspace_e::bt2020;
} else {
switch (config.encoderCscMode >> 1) {
case 0:
// Rec. 601
colorspace.colorspace = colorspace_e::rec601;
break;
case 1:
// Rec. 709
colorspace.colorspace = colorspace_e::rec709;
break;
case 1:
// Rec. 709
colorspace.colorspace = colorspace_e::rec709;
break;
case 2:
// Rec. 2020
colorspace.colorspace = colorspace_e::bt2020sdr;
break;
case 2:
// Rec. 2020
colorspace.colorspace = colorspace_e::bt2020sdr;
break;
default:
BOOST_LOG(error) << "Unknown video colorspace in csc, falling back to Rec. 709";
colorspace.colorspace = colorspace_e::rec709;
break;
}
default:
BOOST_LOG(error) << "Unknown video colorspace in csc, falling "
"back to Rec. 709";
colorspace.colorspace = colorspace_e::rec709;
break;
}
}
colorspace.full_range = (config.encoderCscMode & 0x1);
switch (config.dynamicRange) {
case 0:
colorspace.bit_depth = 8;
break;
case 0:
colorspace.bit_depth = 8;
break;
case 1:
colorspace.bit_depth = 10;
break;
case 1:
colorspace.bit_depth = 10;
break;
default:
BOOST_LOG(error) << "Unknown dynamicRange value, falling back to 10-bit color depth";
colorspace.bit_depth = 10;
break;
default:
BOOST_LOG(error) << "Unknown dynamicRange value, falling back to "
"10-bit color depth";
colorspace.bit_depth = 10;
break;
}
if (colorspace.colorspace == colorspace_e::bt2020sdr && colorspace.bit_depth != 10) {
BOOST_LOG(error) << "BT.2020 SDR colorspace expects 10-bit color depth, falling back to Rec. 709";
colorspace.colorspace = colorspace_e::rec709;
if (colorspace.colorspace == colorspace_e::bt2020sdr &&
colorspace.bit_depth != 10) {
BOOST_LOG(error) << "BT.2020 SDR colorspace expects 10-bit color "
"depth, falling back to Rec. 709";
colorspace.colorspace = colorspace_e::rec709;
}
return colorspace;
}
}
avcodec_colorspace_t
avcodec_colorspace_from_sunshine_colorspace(const sunshine_colorspace_t &sunshine_colorspace) {
avcodec_colorspace_t avcodec_colorspace_from_sunshine_colorspace(
const sunshine_colorspace_t &sunshine_colorspace) {
avcodec_colorspace_t avcodec_colorspace;
switch (sunshine_colorspace.colorspace) {
case colorspace_e::rec601:
// Rec. 601
avcodec_colorspace.primaries = AVCOL_PRI_SMPTE170M;
avcodec_colorspace.transfer_function = AVCOL_TRC_SMPTE170M;
avcodec_colorspace.matrix = AVCOL_SPC_SMPTE170M;
avcodec_colorspace.software_format = SWS_CS_SMPTE170M;
break;
case colorspace_e::rec601:
// Rec. 601
avcodec_colorspace.primaries = AVCOL_PRI_SMPTE170M;
avcodec_colorspace.transfer_function = AVCOL_TRC_SMPTE170M;
avcodec_colorspace.matrix = AVCOL_SPC_SMPTE170M;
avcodec_colorspace.software_format = SWS_CS_SMPTE170M;
break;
case colorspace_e::rec709:
// Rec. 709
avcodec_colorspace.primaries = AVCOL_PRI_BT709;
avcodec_colorspace.transfer_function = AVCOL_TRC_BT709;
avcodec_colorspace.matrix = AVCOL_SPC_BT709;
avcodec_colorspace.software_format = SWS_CS_ITU709;
break;
case colorspace_e::rec709:
// Rec. 709
avcodec_colorspace.primaries = AVCOL_PRI_BT709;
avcodec_colorspace.transfer_function = AVCOL_TRC_BT709;
avcodec_colorspace.matrix = AVCOL_SPC_BT709;
avcodec_colorspace.software_format = SWS_CS_ITU709;
break;
case colorspace_e::bt2020sdr:
// Rec. 2020
avcodec_colorspace.primaries = AVCOL_PRI_BT2020;
assert(sunshine_colorspace.bit_depth == 10);
avcodec_colorspace.transfer_function = AVCOL_TRC_BT2020_10;
avcodec_colorspace.matrix = AVCOL_SPC_BT2020_NCL;
avcodec_colorspace.software_format = SWS_CS_BT2020;
break;
case colorspace_e::bt2020sdr:
// Rec. 2020
avcodec_colorspace.primaries = AVCOL_PRI_BT2020;
assert(sunshine_colorspace.bit_depth == 10);
avcodec_colorspace.transfer_function = AVCOL_TRC_BT2020_10;
avcodec_colorspace.matrix = AVCOL_SPC_BT2020_NCL;
avcodec_colorspace.software_format = SWS_CS_BT2020;
break;
case colorspace_e::bt2020:
// Rec. 2020 with ST 2084 perceptual quantizer
avcodec_colorspace.primaries = AVCOL_PRI_BT2020;
assert(sunshine_colorspace.bit_depth == 10);
avcodec_colorspace.transfer_function = AVCOL_TRC_SMPTE2084;
avcodec_colorspace.matrix = AVCOL_SPC_BT2020_NCL;
avcodec_colorspace.software_format = SWS_CS_BT2020;
break;
case colorspace_e::bt2020:
// Rec. 2020 with ST 2084 perceptual quantizer
avcodec_colorspace.primaries = AVCOL_PRI_BT2020;
assert(sunshine_colorspace.bit_depth == 10);
avcodec_colorspace.transfer_function = AVCOL_TRC_SMPTE2084;
avcodec_colorspace.matrix = AVCOL_SPC_BT2020_NCL;
avcodec_colorspace.software_format = SWS_CS_BT2020;
break;
}
avcodec_colorspace.range = sunshine_colorspace.full_range ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG;
avcodec_colorspace.range =
sunshine_colorspace.full_range ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG;
return avcodec_colorspace;
}
}
const color_t *
color_vectors_from_colorspace(const sunshine_colorspace_t &colorspace) {
return color_vectors_from_colorspace(colorspace.colorspace, colorspace.full_range);
}
const color_t *color_vectors_from_colorspace(
const sunshine_colorspace_t &colorspace) {
return color_vectors_from_colorspace(colorspace.colorspace,
colorspace.full_range);
}
const color_t *
color_vectors_from_colorspace(colorspace_e colorspace, bool full_range) {
const color_t *color_vectors_from_colorspace(colorspace_e colorspace,
bool full_range) {
using float2 = float[2];
auto make_color_matrix = [](float Cr, float Cb, const float2 &range_Y, const float2 &range_UV) -> color_t {
float Cg = 1.0f - Cr - Cb;
auto make_color_matrix = [](float Cr, float Cb, const float2 &range_Y,
const float2 &range_UV) -> color_t {
float Cg = 1.0f - Cr - Cb;
float Cr_i = 1.0f - Cr;
float Cb_i = 1.0f - Cb;
float Cr_i = 1.0f - Cr;
float Cb_i = 1.0f - Cb;
float shift_y = range_Y[0] / 255.0f;
float shift_uv = range_UV[0] / 255.0f;
float shift_y = range_Y[0] / 255.0f;
float shift_uv = range_UV[0] / 255.0f;
float scale_y = (range_Y[1] - range_Y[0]) / 255.0f;
float scale_uv = (range_UV[1] - range_UV[0]) / 255.0f;
return {
{ Cr, Cg, Cb, 0.0f },
{ -(Cr * 0.5f / Cb_i), -(Cg * 0.5f / Cb_i), 0.5f, 0.5f },
{ 0.5f, -(Cg * 0.5f / Cr_i), -(Cb * 0.5f / Cr_i), 0.5f },
{ scale_y, shift_y },
{ scale_uv, shift_uv },
};
float scale_y = (range_Y[1] - range_Y[0]) / 255.0f;
float scale_uv = (range_UV[1] - range_UV[0]) / 255.0f;
return {
{Cr, Cg, Cb, 0.0f},
{-(Cr * 0.5f / Cb_i), -(Cg * 0.5f / Cb_i), 0.5f, 0.5f},
{0.5f, -(Cg * 0.5f / Cr_i), -(Cb * 0.5f / Cr_i), 0.5f},
{scale_y, shift_y},
{scale_uv, shift_uv},
};
};
static const color_t colors[] {
make_color_matrix(0.299f, 0.114f, { 16.0f, 235.0f }, { 16.0f, 240.0f }), // BT601 MPEG
make_color_matrix(0.299f, 0.114f, { 0.0f, 255.0f }, { 0.0f, 255.0f }), // BT601 JPEG
make_color_matrix(0.2126f, 0.0722f, { 16.0f, 235.0f }, { 16.0f, 240.0f }), // BT709 MPEG
make_color_matrix(0.2126f, 0.0722f, { 0.0f, 255.0f }, { 0.0f, 255.0f }), // BT709 JPEG
make_color_matrix(0.2627f, 0.0593f, { 16.0f, 235.0f }, { 16.0f, 240.0f }), // BT2020 MPEG
make_color_matrix(0.2627f, 0.0593f, { 0.0f, 255.0f }, { 0.0f, 255.0f }), // BT2020 JPEG
static const color_t colors[]{
make_color_matrix(0.299f, 0.114f, {16.0f, 235.0f},
{16.0f, 240.0f}), // BT601 MPEG
make_color_matrix(0.299f, 0.114f, {0.0f, 255.0f},
{0.0f, 255.0f}), // BT601 JPEG
make_color_matrix(0.2126f, 0.0722f, {16.0f, 235.0f},
{16.0f, 240.0f}), // BT709 MPEG
make_color_matrix(0.2126f, 0.0722f, {0.0f, 255.0f},
{0.0f, 255.0f}), // BT709 JPEG
make_color_matrix(0.2627f, 0.0593f, {16.0f, 235.0f},
{16.0f, 240.0f}), // BT2020 MPEG
make_color_matrix(0.2627f, 0.0593f, {0.0f, 255.0f},
{0.0f, 255.0f}), // BT2020 JPEG
};
const color_t *result = nullptr;
switch (colorspace) {
case colorspace_e::rec601:
default:
result = &colors[0];
break;
case colorspace_e::rec709:
result = &colors[2];
break;
case colorspace_e::bt2020:
case colorspace_e::bt2020sdr:
result = &colors[4];
break;
case colorspace_e::rec601:
default:
result = &colors[0];
break;
case colorspace_e::rec709:
result = &colors[2];
break;
case colorspace_e::bt2020:
case colorspace_e::bt2020sdr:
result = &colors[4];
break;
};
if (full_range) {
result++;
result++;
}
return result;
}
}
} // namespace video

View File

@ -6,51 +6,50 @@ extern "C" {
namespace video {
enum class colorspace_e {
enum class colorspace_e {
rec601,
rec709,
bt2020sdr,
bt2020,
};
};
struct sunshine_colorspace_t {
struct sunshine_colorspace_t {
colorspace_e colorspace;
bool full_range;
unsigned bit_depth;
};
};
bool
colorspace_is_hdr(const sunshine_colorspace_t &colorspace);
bool colorspace_is_hdr(const sunshine_colorspace_t &colorspace);
// Declared in video.h
struct config_t;
// Declared in video.h
struct config_t;
sunshine_colorspace_t
colorspace_from_client_config(const config_t &config, bool hdr_display);
sunshine_colorspace_t colorspace_from_client_config(const config_t &config,
bool hdr_display);
struct avcodec_colorspace_t {
struct avcodec_colorspace_t {
AVColorPrimaries primaries;
AVColorTransferCharacteristic transfer_function;
AVColorSpace matrix;
AVColorRange range;
int software_format;
};
};
avcodec_colorspace_t
avcodec_colorspace_from_sunshine_colorspace(const sunshine_colorspace_t &sunshine_colorspace);
avcodec_colorspace_t avcodec_colorspace_from_sunshine_colorspace(
const sunshine_colorspace_t &sunshine_colorspace);
struct alignas(16) color_t {
struct alignas(16) color_t {
float color_vec_y[4];
float color_vec_u[4];
float color_vec_v[4];
float range_y[2];
float range_uv[2];
};
};
const color_t *
color_vectors_from_colorspace(const sunshine_colorspace_t &colorspace);
const color_t *color_vectors_from_colorspace(
const sunshine_colorspace_t &colorspace);
const color_t *
color_vectors_from_colorspace(colorspace_e colorspace, bool full_range);
const color_t *color_vectors_from_colorspace(colorspace_e colorspace,
bool full_range);
} // namespace video