13 Commits

Author SHA1 Message Date
dijunkun
2d6cfb5c76 Support cursor control on MacOS 2023-12-06 15:24:01 +08:00
dijunkun
ed8b536ac0 Add signal connection status 'NoSuchTransmissionId' 2023-12-04 14:04:36 +08:00
dijunkun
5e2d27e9d2 Fix ffmpeg link error on MacOS 2023-12-04 11:20:30 +08:00
dijunkun
47a2dc85f9 Fix screen capture on MacOS 2023-12-01 17:16:38 +08:00
dijunkun
5febe99bc2 Support screen capture on MacOS 2023-12-01 16:54:17 +08:00
dijunkun
97bed80088 Fix link flag for MacOS 2023-12-01 10:12:06 +08:00
dijunkun
070f7bd21e Disable ffmpeg openh264 codec 2023-11-29 22:57:29 -08:00
dijunkun
4a65a59803 Enable audio transmission 2023-11-29 22:04:53 -08:00
dijunkun
733434f9b3 Switch rtc core to branch opus 2023-11-21 22:36:36 -08:00
dijunkun
7b7787f638 Add audio capture test 2023-11-21 22:32:06 -08:00
dijunkun
b64d399967 Capture full screen and resize to 1280x720 for Linux platform 2023-11-20 23:11:21 -08:00
dijunkun
65f9862929 Enable password checking when login 2023-11-20 22:48:18 -08:00
dijunkun
3065c15631 Save password in cache 2023-11-20 22:24:27 -08:00
19 changed files with 2310 additions and 377 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -2,6 +2,8 @@
#include <iostream>
#include "log.h"
#define NV12_BUFFER_SIZE 1280 * 720 * 3 / 2
unsigned char nv12_buffer_[NV12_BUFFER_SIZE];
@@ -19,6 +21,8 @@ int ScreenCaptureX11::Init(const RECORD_DESKTOP_RECT &rect, const int fps,
_on_data = cb;
}
av_log_set_level(AV_LOG_QUIET);
pFormatCtx_ = avformat_alloc_context();
avdevice_register_all();
@@ -26,9 +30,9 @@ int ScreenCaptureX11::Init(const RECORD_DESKTOP_RECT &rect, const int fps,
// grabbing frame rate
av_dict_set(&options_, "framerate", "30", 0);
// Make the grabbed area follow the mouse
av_dict_set(&options_, "follow_mouse", "centered", 0);
// av_dict_set(&options_, "follow_mouse", "centered", 0);
// Video frame size. The default is to capture the full screen
av_dict_set(&options_, "video_size", "1280x720", 0);
// av_dict_set(&options_, "video_size", "1280x720", 0);
ifmt_ = (AVInputFormat *)av_find_input_format("x11grab");
if (!ifmt_) {
printf("Couldn't find_input_format\n");
@@ -71,21 +75,22 @@ int ScreenCaptureX11::Init(const RECORD_DESKTOP_RECT &rect, const int fps,
return -1;
}
const int screen_w = pFormatCtx_->streams[videoindex_]->codecpar->width;
const int screen_h = pFormatCtx_->streams[videoindex_]->codecpar->height;
pFrame_ = av_frame_alloc();
pFrameNv12_ = av_frame_alloc();
const int pixel_w = 1280, pixel_h = 720;
int screen_w = 1280, screen_h = 720;
screen_w = 1280;
screen_h = 720;
pFrame_->width = screen_w;
pFrame_->height = screen_h;
pFrameNv12_->width = 1280;
pFrameNv12_->height = 720;
packet_ = (AVPacket *)av_malloc(sizeof(AVPacket));
img_convert_ctx_ =
sws_getContext(pCodecCtx_->width, pCodecCtx_->height, pCodecCtx_->pix_fmt,
pCodecCtx_->width, pCodecCtx_->height, AV_PIX_FMT_NV12,
SWS_BICUBIC, NULL, NULL, NULL);
img_convert_ctx_ = sws_getContext(
pFrame_->width, pFrame_->height, pCodecCtx_->pix_fmt, pFrameNv12_->width,
pFrameNv12_->height, AV_PIX_FMT_NV12, SWS_BICUBIC, NULL, NULL, NULL);
return 0;
}
@@ -101,16 +106,16 @@ int ScreenCaptureX11::Start() {
if (!got_picture_) {
av_image_fill_arrays(pFrameNv12_->data, pFrameNv12_->linesize,
nv12_buffer_, AV_PIX_FMT_NV12, pFrame_->width,
pFrame_->height, 1);
nv12_buffer_, AV_PIX_FMT_NV12,
pFrameNv12_->width, pFrameNv12_->height, 1);
sws_scale(img_convert_ctx_, pFrame_->data, pFrame_->linesize, 0,
pFrame_->height, pFrameNv12_->data,
pFrameNv12_->linesize);
_on_data((unsigned char *)nv12_buffer_,
pFrame_->width * pFrame_->height * 3 / 2, pFrame_->width,
pFrame_->height);
pFrameNv12_->width * pFrameNv12_->height * 3 / 2,
pFrameNv12_->width, pFrameNv12_->height);
}
}
}

View File

@@ -0,0 +1,140 @@
#include "screen_capture_avf.h"
#include <iostream>
#include "log.h"
#define NV12_BUFFER_SIZE 1280 * 720 * 3 / 2
unsigned char nv12_buffer_[NV12_BUFFER_SIZE];
ScreenCaptureAvf::ScreenCaptureAvf() {}
ScreenCaptureAvf::~ScreenCaptureAvf() {
if (capture_thread_->joinable()) {
capture_thread_->join();
}
}
int ScreenCaptureAvf::Init(const RECORD_DESKTOP_RECT &rect, const int fps,
cb_desktop_data cb) {
if (cb) {
_on_data = cb;
}
av_log_set_level(AV_LOG_QUIET);
pFormatCtx_ = avformat_alloc_context();
avdevice_register_all();
// grabbing frame rate
av_dict_set(&options_, "framerate", "60", 0);
av_dict_set(&options_, "pixel_format", "nv12", 0);
// show remote cursor
av_dict_set(&options_, "capture_cursor", "1", 0);
// Make the grabbed area follow the mouse
// av_dict_set(&options_, "follow_mouse", "centered", 0);
// Video frame size. The default is to capture the full screen
// av_dict_set(&options_, "video_size", "1280x720", 0);
ifmt_ = (AVInputFormat *)av_find_input_format("avfoundation");
if (!ifmt_) {
printf("Couldn't find_input_format\n");
}
// Grab at position 10,20
if (avformat_open_input(&pFormatCtx_, "Capture screen 0", ifmt_, &options_) !=
0) {
printf("Couldn't open input stream.\n");
return -1;
}
if (avformat_find_stream_info(pFormatCtx_, NULL) < 0) {
printf("Couldn't find stream information.\n");
return -1;
}
videoindex_ = -1;
for (i_ = 0; i_ < pFormatCtx_->nb_streams; i_++)
if (pFormatCtx_->streams[i_]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
videoindex_ = i_;
break;
}
if (videoindex_ == -1) {
printf("Didn't find a video stream.\n");
return -1;
}
pCodecParam_ = pFormatCtx_->streams[videoindex_]->codecpar;
pCodecCtx_ = avcodec_alloc_context3(NULL);
avcodec_parameters_to_context(pCodecCtx_, pCodecParam_);
pCodec_ = const_cast<AVCodec *>(avcodec_find_decoder(pCodecCtx_->codec_id));
if (pCodec_ == NULL) {
printf("Codec not found.\n");
return -1;
}
if (avcodec_open2(pCodecCtx_, pCodec_, NULL) < 0) {
printf("Could not open codec.\n");
return -1;
}
const int screen_w = pFormatCtx_->streams[videoindex_]->codecpar->width;
const int screen_h = pFormatCtx_->streams[videoindex_]->codecpar->height;
pFrame_ = av_frame_alloc();
pFrameNv12_ = av_frame_alloc();
pFrame_->width = screen_w;
pFrame_->height = screen_h;
pFrameNv12_->width = 1280;
pFrameNv12_->height = 720;
packet_ = (AVPacket *)av_malloc(sizeof(AVPacket));
img_convert_ctx_ = sws_getContext(
pFrame_->width, pFrame_->height, pCodecCtx_->pix_fmt, pFrameNv12_->width,
pFrameNv12_->height, AV_PIX_FMT_NV12, SWS_BICUBIC, NULL, NULL, NULL);
return 0;
}
int ScreenCaptureAvf::Start() {
capture_thread_.reset(new std::thread([this]() {
while (1) {
if (av_read_frame(pFormatCtx_, packet_) >= 0) {
if (packet_->stream_index == videoindex_) {
avcodec_send_packet(pCodecCtx_, packet_);
av_packet_unref(packet_);
got_picture_ = avcodec_receive_frame(pCodecCtx_, pFrame_);
if (!got_picture_) {
av_image_fill_arrays(pFrameNv12_->data, pFrameNv12_->linesize,
nv12_buffer_, AV_PIX_FMT_NV12,
pFrameNv12_->width, pFrameNv12_->height, 1);
sws_scale(img_convert_ctx_, pFrame_->data, pFrame_->linesize, 0,
pFrame_->height, pFrameNv12_->data,
pFrameNv12_->linesize);
_on_data((unsigned char *)nv12_buffer_,
pFrameNv12_->width * pFrameNv12_->height * 3 / 2,
pFrameNv12_->width, pFrameNv12_->height);
}
}
}
}
}));
return 0;
}
int ScreenCaptureAvf::Pause() { return 0; }
int ScreenCaptureAvf::Resume() { return 0; }
int ScreenCaptureAvf::Stop() { return 0; }
void ScreenCaptureAvf::OnFrame() {}
void ScreenCaptureAvf::CleanUp() {}

View File

@@ -0,0 +1,91 @@
/*
* @Author: DI JUNKUN
* @Date: 2023-12-01
* Copyright (c) 2023 by DI JUNKUN, All Rights Reserved.
*/
#ifndef _SCREEN_CAPTURE_AVF_H_
#define _SCREEN_CAPTURE_AVF_H_
#include <atomic>
#include <functional>
#include <string>
#include <thread>
#ifdef __cplusplus
extern "C" {
#endif
#include <libavcodec/avcodec.h>
#include <libavdevice/avdevice.h>
#include <libavformat/avformat.h>
#include <libavutil/imgutils.h>
#include <libswscale/swscale.h>
#ifdef __cplusplus
};
#endif
typedef struct {
int left;
int top;
int right;
int bottom;
} RECORD_DESKTOP_RECT;
typedef std::function<void(unsigned char *, int, int, int)> cb_desktop_data;
typedef std::function<void(int)> cb_desktop_error;
class ScreenCaptureAvf {
public:
ScreenCaptureAvf();
~ScreenCaptureAvf();
public:
int Init(const RECORD_DESKTOP_RECT &rect, const int fps, cb_desktop_data cb);
int Start();
int Pause();
int Resume();
int Stop();
void OnFrame();
protected:
void CleanUp();
private:
std::atomic_bool _running;
std::atomic_bool _paused;
std::atomic_bool _inited;
std::thread _thread;
std::string _device_name;
RECORD_DESKTOP_RECT _rect;
int _fps;
cb_desktop_data _on_data;
cb_desktop_error _on_error;
private:
int i_ = 0;
int videoindex_ = 0;
int got_picture_ = 0;
// ffmpeg
AVFormatContext *pFormatCtx_ = nullptr;
AVCodecContext *pCodecCtx_ = nullptr;
AVCodec *pCodec_ = nullptr;
AVCodecParameters *pCodecParam_ = nullptr;
AVDictionary *options_ = nullptr;
AVInputFormat *ifmt_ = nullptr;
AVFrame *pFrame_ = nullptr;
AVFrame *pFrameNv12_ = nullptr;
AVPacket *packet_ = nullptr;
struct SwsContext *img_convert_ctx_ = nullptr;
// thread
std::unique_ptr<std::thread> capture_thread_ = nullptr;
};
#endif

View File

@@ -1,26 +0,0 @@
#include "screen_capture_wgc.h"
#include <iostream>
ScreenCaptureWgc::ScreenCaptureWgc() {}
ScreenCaptureWgc::~ScreenCaptureWgc() {}
bool ScreenCaptureWgc::IsWgcSupported() { return false; }
int ScreenCaptureWgc::Init(const RECORD_DESKTOP_RECT &rect, const int fps,
cb_desktop_data cb) {
return 0;
}
int ScreenCaptureWgc::Start() { return 0; }
int ScreenCaptureWgc::Pause() { return 0; }
int ScreenCaptureWgc::Resume() { return 0; }
int ScreenCaptureWgc::Stop() { return 0; }
void ScreenCaptureWgc::OnFrame() {}
void ScreenCaptureWgc::CleanUp() {}

View File

@@ -1,56 +0,0 @@
#ifndef _SCREEN_CAPTURE_WGC_H_
#define _SCREEN_CAPTURE_WGC_H_
#include <atomic>
#include <functional>
#include <string>
#include <thread>
typedef struct {
int left;
int top;
int right;
int bottom;
} RECORD_DESKTOP_RECT;
typedef std::function<void(unsigned char *, int, int, int)> cb_desktop_data;
typedef std::function<void(int)> cb_desktop_error;
class ScreenCaptureWgc {
public:
ScreenCaptureWgc();
~ScreenCaptureWgc();
public:
bool IsWgcSupported();
int Init(const RECORD_DESKTOP_RECT &rect, const int fps, cb_desktop_data cb);
int Start();
int Pause();
int Resume();
int Stop();
void OnFrame();
protected:
void CleanUp();
private:
std::atomic_bool _running;
std::atomic_bool _paused;
std::atomic_bool _inited;
std::thread _thread;
std::string _device_name;
RECORD_DESKTOP_RECT _rect;
int _fps;
cb_desktop_data _on_data;
cb_desktop_error _on_error;
};
#endif

View File

@@ -0,0 +1,232 @@
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavdevice/avdevice.h>
#include <libavfilter/avfilter.h>
#include <libavformat/avformat.h>
#include <libavutil/channel_layout.h>
#include <libavutil/imgutils.h>
#include <libavutil/opt.h>
#include <libavutil/samplefmt.h>
#include <libswresample/swresample.h>
#include <libswscale/swscale.h>
};
static int get_format_from_sample_fmt(const char **fmt,
enum AVSampleFormat sample_fmt) {
int i;
struct sample_fmt_entry {
enum AVSampleFormat sample_fmt;
const char *fmt_be, *fmt_le;
} sample_fmt_entries[] = {
{AV_SAMPLE_FMT_U8, "u8", "u8"},
{AV_SAMPLE_FMT_S16, "s16be", "s16le"},
{AV_SAMPLE_FMT_S32, "s32be", "s32le"},
{AV_SAMPLE_FMT_FLT, "f32be", "f32le"},
{AV_SAMPLE_FMT_DBL, "f64be", "f64le"},
};
*fmt = NULL;
for (i = 0; i < FF_ARRAY_ELEMS(sample_fmt_entries); i++) {
struct sample_fmt_entry *entry = &sample_fmt_entries[i];
if (sample_fmt == entry->sample_fmt) {
*fmt = AV_NE(entry->fmt_be, entry->fmt_le);
return 0;
}
}
fprintf(stderr, "Sample format %s not supported as output format\n",
av_get_sample_fmt_name(sample_fmt));
return AVERROR(EINVAL);
}
/**
* Fill dst buffer with nb_samples, generated starting from t. <20><><EFBFBD><EFBFBD>ģʽ<C4A3><CABD>
*/
static void fill_samples(double *dst, int nb_samples, int nb_channels,
int sample_rate, double *t) {
int i, j;
double tincr = 1.0 / sample_rate, *dstp = dst;
const double c = 2 * M_PI * 440.0;
/* generate sin tone with 440Hz frequency and duplicated channels */
for (i = 0; i < nb_samples; i++) {
*dstp = sin(c * *t);
for (j = 1; j < nb_channels; j++) dstp[j] = dstp[0];
dstp += nb_channels;
*t += tincr;
}
}
int main(int argc, char **argv) {
// <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
int64_t src_ch_layout = AV_CH_LAYOUT_MONO;
int src_rate = 44100;
enum AVSampleFormat src_sample_fmt = AV_SAMPLE_FMT_DBL;
int src_nb_channels = 0;
uint8_t **src_data = NULL; // <20><><EFBFBD><EFBFBD>ָ<EFBFBD><D6B8>
int src_linesize;
int src_nb_samples = 1024;
// <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
int64_t dst_ch_layout = AV_CH_LAYOUT_STEREO;
int dst_rate = 48000;
enum AVSampleFormat dst_sample_fmt = AV_SAMPLE_FMT_S16;
int dst_nb_channels = 0;
uint8_t **dst_data = NULL; // <20><><EFBFBD><EFBFBD>ָ<EFBFBD><D6B8>
int dst_linesize;
int dst_nb_samples;
int max_dst_nb_samples;
// <20><><EFBFBD><EFBFBD><EFBFBD>ļ<EFBFBD>
const char *dst_filename = NULL; // <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>pcm<63><6D><EFBFBD><EFBFBD><EFBFBD>أ<EFBFBD>Ȼ<EFBFBD>󲥷<EFBFBD><F3B2A5B7><EFBFBD>֤
FILE *dst_file;
int dst_bufsize;
const char *fmt;
// <20>ز<EFBFBD><D8B2><EFBFBD>ʵ<EFBFBD><CAB5>
struct SwrContext *swr_ctx;
double t;
int ret;
dst_filename = "res.pcm";
dst_file = fopen(dst_filename, "wb");
if (!dst_file) {
fprintf(stderr, "Could not open destination file %s\n", dst_filename);
exit(1);
}
// <20><><EFBFBD><EFBFBD><EFBFBD>ز<EFBFBD><D8B2><EFBFBD><EFBFBD><EFBFBD>
/* create resampler context */
swr_ctx = swr_alloc();
if (!swr_ctx) {
fprintf(stderr, "Could not allocate resampler context\n");
ret = AVERROR(ENOMEM);
goto end;
}
// <20><><EFBFBD><EFBFBD><EFBFBD>ز<EFBFBD><D8B2><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
/* set options */
// <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
av_opt_set_int(swr_ctx, "in_channel_layout", src_ch_layout, 0);
av_opt_set_int(swr_ctx, "in_sample_rate", src_rate, 0);
av_opt_set_sample_fmt(swr_ctx, "in_sample_fmt", src_sample_fmt, 0);
// <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
av_opt_set_int(swr_ctx, "out_channel_layout", dst_ch_layout, 0);
av_opt_set_int(swr_ctx, "out_sample_rate", dst_rate, 0);
av_opt_set_sample_fmt(swr_ctx, "out_sample_fmt", dst_sample_fmt, 0);
// <20><>ʼ<EFBFBD><CABC><EFBFBD>ز<EFBFBD><D8B2><EFBFBD>
/* initialize the resampling context */
if ((ret = swr_init(swr_ctx)) < 0) {
fprintf(stderr, "Failed to initialize the resampling context\n");
goto end;
}
/* allocate source and destination samples buffers */
// <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>Դ<EFBFBD><D4B4>ͨ<EFBFBD><CDA8><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
src_nb_channels = av_get_channel_layout_nb_channels(src_ch_layout);
// <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD>Դ<EFBFBD><D4B4><EFBFBD><EFBFBD><EFBFBD>ڴ<EFBFBD><DAB4>ռ<EFBFBD>
ret = av_samples_alloc_array_and_samples(&src_data, &src_linesize,
src_nb_channels, src_nb_samples,
src_sample_fmt, 0);
if (ret < 0) {
fprintf(stderr, "Could not allocate source samples\n");
goto end;
}
/* compute the number of converted samples: buffering is avoided
* ensuring that the output buffer will contain at least all the
* converted input samples */
// <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
max_dst_nb_samples = dst_nb_samples =
av_rescale_rnd(src_nb_samples, dst_rate, src_rate, AV_ROUND_UP);
/* buffer is going to be directly written to a rawaudio file, no alignment
*/
dst_nb_channels = av_get_channel_layout_nb_channels(dst_ch_layout);
// <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ڴ<EFBFBD>
ret = av_samples_alloc_array_and_samples(&dst_data, &dst_linesize,
dst_nb_channels, dst_nb_samples,
dst_sample_fmt, 0);
if (ret < 0) {
fprintf(stderr, "Could not allocate destination samples\n");
goto end;
}
t = 0;
do {
/* generate synthetic audio */
// <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>Դ
fill_samples((double *)src_data[0], src_nb_samples, src_nb_channels,
src_rate, &t);
/* compute destination number of samples */
int64_t delay = swr_get_delay(swr_ctx, src_rate);
dst_nb_samples =
av_rescale_rnd(delay + src_nb_samples, dst_rate, src_rate, AV_ROUND_UP);
if (dst_nb_samples > max_dst_nb_samples) {
av_freep(&dst_data[0]);
ret = av_samples_alloc(dst_data, &dst_linesize, dst_nb_channels,
dst_nb_samples, dst_sample_fmt, 1);
if (ret < 0) break;
max_dst_nb_samples = dst_nb_samples;
}
// int fifo_size = swr_get_out_samples(swr_ctx,src_nb_samples);
// printf("fifo_size:%d\n", fifo_size);
// if(fifo_size < 1024)
// continue;
/* convert to destination format */
// ret = swr_convert(swr_ctx, dst_data, dst_nb_samples, (const
// uint8_t **)src_data, src_nb_samples);
ret = swr_convert(swr_ctx, dst_data, dst_nb_samples,
(const uint8_t **)src_data, src_nb_samples);
if (ret < 0) {
fprintf(stderr, "Error while converting\n");
goto end;
}
dst_bufsize = av_samples_get_buffer_size(&dst_linesize, dst_nb_channels,
ret, dst_sample_fmt, 1);
if (dst_bufsize < 0) {
fprintf(stderr, "Could not get sample buffer size\n");
goto end;
}
printf("t:%f in:%d out:%d\n", t, src_nb_samples, ret);
fwrite(dst_data[0], 1, dst_bufsize, dst_file);
} while (t < 10);
ret = swr_convert(swr_ctx, dst_data, dst_nb_samples, NULL, 0);
if (ret < 0) {
fprintf(stderr, "Error while converting\n");
goto end;
}
dst_bufsize = av_samples_get_buffer_size(&dst_linesize, dst_nb_channels, ret,
dst_sample_fmt, 1);
if (dst_bufsize < 0) {
fprintf(stderr, "Could not get sample buffer size\n");
goto end;
}
printf("flush in:%d out:%d\n", 0, ret);
fwrite(dst_data[0], 1, dst_bufsize, dst_file);
if ((ret = get_format_from_sample_fmt(&fmt, dst_sample_fmt)) < 0) goto end;
fprintf(stderr,
"Resampling succeeded. Play the output file with the command:\n"
"ffplay -f %s -channel_layout %" PRId64 " -channels %d -ar %d %s\n",
fmt, dst_ch_layout, dst_nb_channels, dst_rate, dst_filename);
end:
fclose(dst_file);
if (src_data) av_freep(&src_data[0]);
av_freep(&src_data);
if (dst_data) av_freep(&dst_data[0]);
av_freep(&dst_data);
swr_free(&swr_ctx);
return ret < 0;
}

View File

@@ -0,0 +1,53 @@
#include <SDL2/SDL.h>
int main(int argc, char *argv[]) {
int ret;
SDL_AudioSpec wanted_spec, obtained_spec;
// Initialize SDL
if (SDL_Init(SDL_INIT_AUDIO) < 0) {
SDL_LogError(SDL_LOG_CATEGORY_APPLICATION, "Failed to initialize SDL: %s",
SDL_GetError());
return -1;
}
// Set audio format
wanted_spec.freq = 44100; // Sample rate
wanted_spec.format =
AUDIO_F32SYS; // Sample format (32-bit float, system byte order)
wanted_spec.channels = 2; // Number of channels (stereo)
wanted_spec.samples = 1024; // Buffer size (in samples)
wanted_spec.callback = NULL; // Audio callback function (not used here)
// Open audio device
ret = SDL_OpenAudio(&wanted_spec, &obtained_spec);
if (ret < 0) {
SDL_LogError(SDL_LOG_CATEGORY_APPLICATION,
"Failed to open audio device: %s", SDL_GetError());
return -1;
}
// Start playing audio
SDL_PauseAudio(0);
// Write PCM data to audio buffer
float *pcm_data = ...; // PCM data buffer (float, interleaved)
int pcm_data_size = ...; // Size of PCM data buffer (in bytes)
int bytes_written = SDL_QueueAudio(0, pcm_data, pcm_data_size);
// Wait until audio buffer is empty
while (SDL_GetQueuedAudioSize(0) > 0) {
SDL_Delay(100);
}
// Stop playing audio
SDL_PauseAudio(1);
// Close audio device
SDL_CloseAudio();
// Quit SDL
SDL_Quit();
return 0;
}

View File

@@ -0,0 +1,89 @@
#include <SDL2/SDL.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
int main(int argc, char* argv[]) {
if (SDL_Init(SDL_INIT_AUDIO)) {
printf("SDL init error\n");
return -1;
}
// SDL_AudioSpec
SDL_AudioSpec wanted_spec;
SDL_zero(wanted_spec);
wanted_spec.freq = 48000;
wanted_spec.format = AUDIO_S16LSB;
wanted_spec.channels = 2;
wanted_spec.silence = 0;
wanted_spec.samples = 960;
wanted_spec.callback = NULL;
SDL_AudioDeviceID deviceID = 0;
// <20><><EFBFBD><EFBFBD><EFBFBD>
if ((deviceID = SDL_OpenAudioDevice(NULL, 0, &wanted_spec, NULL,
SDL_AUDIO_ALLOW_FREQUENCY_CHANGE)) < 2) {
printf("could not open audio device: %s\n", SDL_GetError());
// <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>е<EFBFBD><D0B5><EFBFBD>ϵͳ
SDL_Quit();
return 0;
}
SDL_PauseAudioDevice(deviceID, 0);
FILE* fp = nullptr;
fopen_s(&fp, "ls.pcm", "rb+");
if (fp == NULL) {
printf("cannot open this file\n");
return -1;
}
if (fp == NULL) {
printf("error \n");
}
Uint32 buffer_size = 4096;
char* buffer = (char*)malloc(buffer_size);
while (true) {
if (fread(buffer, 1, buffer_size, fp) != buffer_size) {
printf("end of file\n");
break;
}
SDL_QueueAudio(deviceID, buffer, buffer_size);
}
printf("Play...\n");
SDL_Delay(10000);
// Uint32 residueAudioLen = 0;
// while (true) {
// residueAudioLen = SDL_GetQueuedAudioSize(deviceID);
// printf("%10d\n", residueAudioLen);
// if (residueAudioLen <= 0) break;
// SDL_Delay(1);
// }
// while (true) {
// printf("1 <20><>ͣ 2 <20><><EFBFBD><EFBFBD> 3 <20>˳<EFBFBD> \n");
// int flag = 0;
// scanf_s("%d", &flag);
// if (flag == 1)
// SDL_PauseAudioDevice(deviceID, 1);
// else if (flag == 2)
// SDL_PauseAudioDevice(deviceID, 0);
// else if (flag == 3)
// break;
// }
SDL_CloseAudio();
SDL_Quit();
fclose(fp);
return 0;
}

View File

@@ -0,0 +1,225 @@
#include <SDL2/SDL.h>
#include <stdio.h>
#include <stdlib.h>
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavdevice/avdevice.h>
#include <libavfilter/avfilter.h>
#include <libavformat/avformat.h>
#include <libavutil/channel_layout.h>
#include <libavutil/imgutils.h>
#include <libavutil/opt.h>
#include <libavutil/samplefmt.h>
#include <libswresample/swresample.h>
#include <libswscale/swscale.h>
};
static SDL_AudioDeviceID input_dev;
static SDL_AudioDeviceID output_dev;
static Uint8 *buffer = 0;
static int in_pos = 0;
static int out_pos = 0;
int64_t src_ch_layout = AV_CH_LAYOUT_MONO;
int src_rate = 48000;
enum AVSampleFormat src_sample_fmt = AV_SAMPLE_FMT_S16;
int src_nb_channels = 0;
uint8_t **src_data = NULL; // <20><><EFBFBD><EFBFBD>ָ<EFBFBD><D6B8>
int src_linesize;
int src_nb_samples = 480;
// <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
int64_t dst_ch_layout = AV_CH_LAYOUT_MONO;
int dst_rate = 48000;
enum AVSampleFormat dst_sample_fmt = AV_SAMPLE_FMT_S16;
int dst_nb_channels = 0;
uint8_t **dst_data = NULL; // <20><><EFBFBD><EFBFBD>ָ<EFBFBD><D6B8>
int dst_linesize;
int dst_nb_samples;
int max_dst_nb_samples;
static unsigned char audio_buffer[960 * 3];
static int audio_len = 0;
// <20><><EFBFBD><EFBFBD><EFBFBD>ļ<EFBFBD>
const char *dst_filename = NULL; // <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>pcm<63><6D><EFBFBD><EFBFBD><EFBFBD>أ<EFBFBD>Ȼ<EFBFBD>󲥷<EFBFBD><F3B2A5B7><EFBFBD>֤
FILE *dst_file;
int dst_bufsize;
const char *fmt;
// <20>ز<EFBFBD><D8B2><EFBFBD>ʵ<EFBFBD><CAB5>
struct SwrContext *swr_ctx;
double t;
int ret;
char *out = "audio_old.pcm";
FILE *outfile = fopen(out, "wb+");
void cb_in(void *userdata, Uint8 *stream, int len) {
// If len < 4, the printf below will probably segfault
// SDL_QueueAudio(output_dev, stream, len);
int64_t delay = swr_get_delay(swr_ctx, src_rate);
dst_nb_samples =
av_rescale_rnd(delay + src_nb_samples, dst_rate, src_rate, AV_ROUND_UP);
if (dst_nb_samples > max_dst_nb_samples) {
av_freep(&dst_data[0]);
ret = av_samples_alloc(dst_data, &dst_linesize, dst_nb_channels,
dst_nb_samples, dst_sample_fmt, 1);
if (ret < 0) return;
max_dst_nb_samples = dst_nb_samples;
}
ret = swr_convert(swr_ctx, dst_data, dst_nb_samples,
(const uint8_t **)&stream, src_nb_samples);
if (ret < 0) {
fprintf(stderr, "Error while converting\n");
return;
}
dst_bufsize = av_samples_get_buffer_size(&dst_linesize, dst_nb_channels, ret,
dst_sample_fmt, 1);
if (dst_bufsize < 0) {
fprintf(stderr, "Could not get sample buffer size\n");
return;
}
printf("t:%f in:%d out:%d %d\n", t, src_nb_samples, ret, len);
memcpy(audio_buffer, dst_data[0], len);
// SDL_QueueAudio(output_dev, dst_data[0], len);
audio_len = len;
}
void cb_out(void *userdata, Uint8 *stream, int len) {
// If len < 4, the printf below will probably segfault
printf("cb_out len = %d\n", len);
SDL_memset(stream, 0, len);
if (audio_len == 0) return;
len = (len > audio_len ? audio_len : len);
SDL_MixAudioFormat(stream, audio_buffer, AUDIO_S16LSB, len,
SDL_MIX_MAXVOLUME);
}
int init() {
dst_filename = "res.pcm";
dst_file = fopen(dst_filename, "wb");
if (!dst_file) {
fprintf(stderr, "Could not open destination file %s\n", dst_filename);
exit(1);
}
// <20><><EFBFBD><EFBFBD><EFBFBD>ز<EFBFBD><D8B2><EFBFBD><EFBFBD><EFBFBD>
/* create resampler context */
swr_ctx = swr_alloc();
if (!swr_ctx) {
fprintf(stderr, "Could not allocate resampler context\n");
ret = AVERROR(ENOMEM);
return -1;
}
// <20><><EFBFBD><EFBFBD><EFBFBD>ز<EFBFBD><D8B2><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
/* set options */
// <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
av_opt_set_int(swr_ctx, "in_channel_layout", src_ch_layout, 0);
av_opt_set_int(swr_ctx, "in_sample_rate", src_rate, 0);
av_opt_set_sample_fmt(swr_ctx, "in_sample_fmt", src_sample_fmt, 0);
// <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
av_opt_set_int(swr_ctx, "out_channel_layout", dst_ch_layout, 0);
av_opt_set_int(swr_ctx, "out_sample_rate", dst_rate, 0);
av_opt_set_sample_fmt(swr_ctx, "out_sample_fmt", dst_sample_fmt, 0);
// <20><>ʼ<EFBFBD><CABC><EFBFBD>ز<EFBFBD><D8B2><EFBFBD>
/* initialize the resampling context */
if ((ret = swr_init(swr_ctx)) < 0) {
fprintf(stderr, "Failed to initialize the resampling context\n");
return -1;
}
/* allocate source and destination samples buffers */
// <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>Դ<EFBFBD><D4B4>ͨ<EFBFBD><CDA8><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
src_nb_channels = av_get_channel_layout_nb_channels(src_ch_layout);
// <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD>Դ<EFBFBD><D4B4><EFBFBD><EFBFBD><EFBFBD>ڴ<EFBFBD><DAB4>ռ<EFBFBD>
ret = av_samples_alloc_array_and_samples(&src_data, &src_linesize,
src_nb_channels, src_nb_samples,
src_sample_fmt, 0);
if (ret < 0) {
fprintf(stderr, "Could not allocate source samples\n");
return -1;
}
/* compute the number of converted samples: buffering is avoided
* ensuring that the output buffer will contain at least all the
* converted input samples */
// <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
max_dst_nb_samples = dst_nb_samples =
av_rescale_rnd(src_nb_samples, dst_rate, src_rate, AV_ROUND_UP);
/* buffer is going to be directly written to a rawaudio file, no alignment */
dst_nb_channels = av_get_channel_layout_nb_channels(dst_ch_layout);
// <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ڴ<EFBFBD>
ret = av_samples_alloc_array_and_samples(&dst_data, &dst_linesize,
dst_nb_channels, dst_nb_samples,
dst_sample_fmt, 0);
if (ret < 0) {
fprintf(stderr, "Could not allocate destination samples\n");
return -1;
}
}
int main() {
init();
SDL_Init(SDL_INIT_AUDIO);
// 16Mb should be enough; the test lasts 5 seconds
buffer = (Uint8 *)malloc(16777215);
SDL_AudioSpec want_in, want_out, have_in, have_out;
SDL_zero(want_in);
want_in.freq = 48000;
want_in.format = AUDIO_S16LSB;
want_in.channels = 1;
want_in.samples = 480;
want_in.callback = cb_in;
input_dev = SDL_OpenAudioDevice(NULL, 1, &want_in, &have_in, 0);
printf("%d %d %d %d\n", have_in.freq, have_in.format, have_in.channels,
have_in.samples);
if (input_dev == 0) {
SDL_Log("Failed to open input: %s", SDL_GetError());
return 1;
}
SDL_zero(want_out);
want_out.freq = 48000;
want_out.format = AUDIO_S16LSB;
want_out.channels = 1;
want_out.samples = 480;
want_out.callback = cb_out;
output_dev = SDL_OpenAudioDevice(NULL, 0, &want_out, &have_out, 0);
printf("%d %d %d %d\n", have_out.freq, have_out.format, have_out.channels,
have_out.samples);
if (output_dev == 0) {
SDL_Log("Failed to open input: %s", SDL_GetError());
return 1;
}
SDL_PauseAudioDevice(input_dev, 0);
SDL_PauseAudioDevice(output_dev, 0);
while (1) {
}
SDL_CloseAudioDevice(output_dev);
SDL_CloseAudioDevice(input_dev);
free(buffer);
fclose(outfile);
}

View File

@@ -0,0 +1,95 @@
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswresample/swresample.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
int main(int argc, char *argv[]) {
int ret;
AVFrame *frame = NULL;
AVFrame *resampled_frame = NULL;
AVCodecContext *codec_ctx = NULL;
SwrContext *swr_ctx = NULL;
// Initialize FFmpeg
av_log_set_level(AV_LOG_INFO);
av_register_all();
// Allocate input frame
frame = av_frame_alloc();
if (!frame) {
av_log(NULL, AV_LOG_ERROR, "Failed to allocate input frame\n");
return -1;
}
// Allocate output frame for resampled data
resampled_frame = av_frame_alloc();
if (!resampled_frame) {
av_log(NULL, AV_LOG_ERROR, "Failed to allocate output frame\n");
return -1;
}
// Set input frame properties
frame->format = AV_SAMPLE_FMT_FLTP; // Input sample format (float planar)
frame->channel_layout = AV_CH_LAYOUT_STEREO; // Input channel layout (stereo)
frame->sample_rate = 44100; // Input sample rate (44100 Hz)
frame->nb_samples = 1024; // Number of input samples
// Set output frame properties
resampled_frame->format =
AV_SAMPLE_FMT_S16; // Output sample format (signed 16-bit)
resampled_frame->channel_layout =
AV_CH_LAYOUT_STEREO; // Output channel layout (stereo)
resampled_frame->sample_rate = 48000; // Output sample rate (48000 Hz)
resampled_frame->nb_samples = av_rescale_rnd(
frame->nb_samples, resampled_frame->sample_rate, frame->sample_rate,
AV_ROUND_UP); // Number of output samples
// Initialize resampler context
swr_ctx = swr_alloc_set_opts(
NULL, av_get_default_channel_layout(resampled_frame->channel_layout),
av_get_default_sample_fmt(resampled_frame->format),
resampled_frame->sample_rate,
av_get_default_channel_layout(frame->channel_layout),
av_get_default_sample_fmt(frame->format), frame->sample_rate, 0, NULL);
if (!swr_ctx) {
av_log(NULL, AV_LOG_ERROR, "Failed to allocate resampler context\n");
return -1;
}
// Initialize and configure the resampler
if ((ret = swr_init(swr_ctx)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Failed to initialize resampler context: %s\n",
av_err2str(ret));
return -1;
}
// Allocate buffer for output samples
ret = av_samples_alloc(resampled_frame->data, resampled_frame->linesize,
resampled_frame->channels, resampled_frame->nb_samples,
resampled_frame->format, 0);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Failed to allocate output samples buffer: %s\n",
av_err2str(ret));
return -1;
}
// Resample the input data
ret = swr_convert(swr_ctx, resampled_frame->data, resampled_frame->nb_samples,
(const uint8_t **)frame->data, frame->nb_samples);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Failed to resample input data: %s\n",
av_err2str(ret));
return -1;
}
// Cleanup and free resources
swr_free(&swr_ctx);
av_frame_free(&frame);
av_frame_free(&resampled_frame);
return 0;
}

View File

@@ -0,0 +1,205 @@
#include <SDL2/SDL.h>
#include <stdio.h>
#include <stdlib.h>
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavdevice/avdevice.h>
#include <libavfilter/avfilter.h>
#include <libavformat/avformat.h>
#include <libavutil/channel_layout.h>
#include <libavutil/imgutils.h>
#include <libavutil/opt.h>
#include <libavutil/samplefmt.h>
#include <libswresample/swresample.h>
#include <libswscale/swscale.h>
};
static SDL_AudioDeviceID input_dev;
static SDL_AudioDeviceID output_dev;
static Uint8 *buffer = 0;
static int in_pos = 0;
static int out_pos = 0;
int64_t src_ch_layout = AV_CH_LAYOUT_MONO;
int src_rate = 48000;
enum AVSampleFormat src_sample_fmt = AV_SAMPLE_FMT_FLT;
int src_nb_channels = 0;
uint8_t **src_data = NULL; // <20><><EFBFBD><EFBFBD>ָ<EFBFBD><D6B8>
int src_linesize;
int src_nb_samples = 480;
// <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
int64_t dst_ch_layout = AV_CH_LAYOUT_STEREO;
int dst_rate = 48000;
enum AVSampleFormat dst_sample_fmt = AV_SAMPLE_FMT_S16;
int dst_nb_channels = 0;
uint8_t **dst_data = NULL; // <20><><EFBFBD><EFBFBD>ָ<EFBFBD><D6B8>
int dst_linesize;
int dst_nb_samples;
int max_dst_nb_samples;
// <20><><EFBFBD><EFBFBD><EFBFBD>ļ<EFBFBD>
const char *dst_filename = NULL; // <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>pcm<63><6D><EFBFBD><EFBFBD><EFBFBD>أ<EFBFBD>Ȼ<EFBFBD>󲥷<EFBFBD><F3B2A5B7><EFBFBD>֤
FILE *dst_file;
int dst_bufsize;
const char *fmt;
// <20>ز<EFBFBD><D8B2><EFBFBD>ʵ<EFBFBD><CAB5>
struct SwrContext *swr_ctx;
double t;
int ret;
char *out = "audio_old.pcm";
FILE *outfile = fopen(out, "wb+");
void cb_in(void *userdata, Uint8 *stream, int len) {
// If len < 4, the printf below will probably segfault
{
fwrite(stream, 1, len, outfile);
fflush(outfile);
}
{
int64_t delay = swr_get_delay(swr_ctx, src_rate);
dst_nb_samples =
av_rescale_rnd(delay + src_nb_samples, dst_rate, src_rate, AV_ROUND_UP);
if (dst_nb_samples > max_dst_nb_samples) {
av_freep(&dst_data[0]);
ret = av_samples_alloc(dst_data, &dst_linesize, dst_nb_channels,
dst_nb_samples, dst_sample_fmt, 1);
if (ret < 0) return;
max_dst_nb_samples = dst_nb_samples;
}
ret = swr_convert(swr_ctx, dst_data, dst_nb_samples,
(const uint8_t **)&stream, src_nb_samples);
if (ret < 0) {
fprintf(stderr, "Error while converting\n");
return;
}
dst_bufsize = av_samples_get_buffer_size(&dst_linesize, dst_nb_channels,
ret, dst_sample_fmt, 1);
if (dst_bufsize < 0) {
fprintf(stderr, "Could not get sample buffer size\n");
return;
}
printf("t:%f in:%d out:%d\n", t, src_nb_samples, ret);
fwrite(dst_data[0], 1, dst_bufsize, dst_file);
}
}
void cb_out(void *userdata, Uint8 *stream, int len) {
// If len < 4, the printf below will probably segfault
SDL_memcpy(buffer + out_pos, stream, len);
out_pos += len;
}
int init() {
dst_filename = "res.pcm";
dst_file = fopen(dst_filename, "wb");
if (!dst_file) {
fprintf(stderr, "Could not open destination file %s\n", dst_filename);
exit(1);
}
// <20><><EFBFBD><EFBFBD><EFBFBD>ز<EFBFBD><D8B2><EFBFBD><EFBFBD><EFBFBD>
/* create resampler context */
swr_ctx = swr_alloc();
if (!swr_ctx) {
fprintf(stderr, "Could not allocate resampler context\n");
ret = AVERROR(ENOMEM);
return -1;
}
// <20><><EFBFBD><EFBFBD><EFBFBD>ز<EFBFBD><D8B2><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
/* set options */
// <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
av_opt_set_int(swr_ctx, "in_channel_layout", src_ch_layout, 0);
av_opt_set_int(swr_ctx, "in_sample_rate", src_rate, 0);
av_opt_set_sample_fmt(swr_ctx, "in_sample_fmt", src_sample_fmt, 0);
// <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
av_opt_set_int(swr_ctx, "out_channel_layout", dst_ch_layout, 0);
av_opt_set_int(swr_ctx, "out_sample_rate", dst_rate, 0);
av_opt_set_sample_fmt(swr_ctx, "out_sample_fmt", dst_sample_fmt, 0);
// <20><>ʼ<EFBFBD><CABC><EFBFBD>ز<EFBFBD><D8B2><EFBFBD>
/* initialize the resampling context */
if ((ret = swr_init(swr_ctx)) < 0) {
fprintf(stderr, "Failed to initialize the resampling context\n");
return -1;
}
/* allocate source and destination samples buffers */
// <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>Դ<EFBFBD><D4B4>ͨ<EFBFBD><CDA8><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
src_nb_channels = av_get_channel_layout_nb_channels(src_ch_layout);
// <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD>Դ<EFBFBD><D4B4><EFBFBD><EFBFBD><EFBFBD>ڴ<EFBFBD><DAB4>ռ<EFBFBD>
ret = av_samples_alloc_array_and_samples(&src_data, &src_linesize,
src_nb_channels, src_nb_samples,
src_sample_fmt, 0);
if (ret < 0) {
fprintf(stderr, "Could not allocate source samples\n");
return -1;
}
/* compute the number of converted samples: buffering is avoided
* ensuring that the output buffer will contain at least all the
* converted input samples */
// <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
max_dst_nb_samples = dst_nb_samples =
av_rescale_rnd(src_nb_samples, dst_rate, src_rate, AV_ROUND_UP);
/* buffer is going to be directly written to a rawaudio file, no alignment */
dst_nb_channels = av_get_channel_layout_nb_channels(dst_ch_layout);
// <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ڴ<EFBFBD>
ret = av_samples_alloc_array_and_samples(&dst_data, &dst_linesize,
dst_nb_channels, dst_nb_samples,
dst_sample_fmt, 0);
if (ret < 0) {
fprintf(stderr, "Could not allocate destination samples\n");
return -1;
}
}
int main() {
init();
SDL_Init(SDL_INIT_AUDIO);
// 16Mb should be enough; the test lasts 5 seconds
buffer = (Uint8 *)malloc(16777215);
SDL_AudioSpec want_in, want_out, have_in, have_out;
SDL_zero(want_in);
want_in.freq = 48000;
want_in.format = AUDIO_F32LSB;
want_in.channels = 2;
want_in.samples = 960;
want_in.callback = cb_in;
input_dev = SDL_OpenAudioDevice(NULL, 1, &want_in, &have_in,
SDL_AUDIO_ALLOW_ANY_CHANGE);
printf("%d %d %d %d\n", have_in.freq, have_in.format, have_in.channels,
have_in.samples);
if (input_dev == 0) {
SDL_Log("Failed to open input: %s", SDL_GetError());
return 1;
}
SDL_PauseAudioDevice(input_dev, 0);
SDL_PauseAudioDevice(output_dev, 0);
SDL_Delay(5000);
SDL_CloseAudioDevice(output_dev);
SDL_CloseAudioDevice(input_dev);
free(buffer);
fclose(outfile);
}

View File

@@ -0,0 +1,123 @@
#define __STDC_CONSTANT_MACROS
extern "C" {
#include <libavdevice/avdevice.h>
#include <libavformat/avformat.h>
#include <libavutil/log.h>
#include <libswresample/swresample.h>
}
#include <windows.h>
#include <memory>
#include <string>
#include <vector>
#pragma comment(lib, "avutil.lib")
#pragma comment(lib, "avdevice.lib")
#pragma comment(lib, "avformat.lib")
#pragma comment(lib, "avcodec.lib")
#pragma comment(lib, "Winmm.lib")
using std::shared_ptr;
using std::string;
using std::vector;
void capture_audio() {
// windows api <20><>ȡ<EFBFBD><C8A1>Ƶ<EFBFBD><EFBFBD>б<EFBFBD><D0B1><EFBFBD>ffmpeg <20><><EFBFBD><EFBFBD>û<EFBFBD><C3BB><EFBFBD><EFBFBD><E1B9A9>ȡ<EFBFBD><C8A1><EFBFBD><EFBFBD>Ƶ<EFBFBD><EFBFBD><E8B1B8>api<70><69>
int nDeviceNum = waveInGetNumDevs();
vector<string> vecDeviceName;
for (int i = 0; i < nDeviceNum; ++i) {
WAVEINCAPS wic;
waveInGetDevCaps(i, &wic, sizeof(wic));
// ת<><D7AA>utf-8
int nSize = WideCharToMultiByte(CP_UTF8, 0, wic.szPname,
wcslen(wic.szPname), NULL, 0, NULL, NULL);
shared_ptr<char> spDeviceName(new char[nSize + 1]);
memset(spDeviceName.get(), 0, nSize + 1);
WideCharToMultiByte(CP_UTF8, 0, wic.szPname, wcslen(wic.szPname),
spDeviceName.get(), nSize, NULL, NULL);
vecDeviceName.push_back(spDeviceName.get());
av_log(NULL, AV_LOG_DEBUG, "audio input device : %s \n",
spDeviceName.get());
}
if (vecDeviceName.size() <= 0) {
av_log(NULL, AV_LOG_ERROR, "not find audio input device.\n");
return;
}
string sDeviceName = "audio=" + vecDeviceName[0]; // ʹ<>õ<EFBFBD>һ<EFBFBD><D2BB><EFBFBD><EFBFBD>Ƶ<EFBFBD>
// ffmpeg
avdevice_register_all(); // ע<><D7A2><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
AVInputFormat* ifmt =
(AVInputFormat*)av_find_input_format("dshow"); // <20><><EFBFBD>òɼ<C3B2><C9BC><EFBFBD>ʽ dshow
if (ifmt == NULL) {
av_log(NULL, AV_LOG_ERROR, "av_find_input_format for dshow fail.\n");
return;
}
AVFormatContext* fmt_ctx = NULL; // format <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
int ret = avformat_open_input(&fmt_ctx, sDeviceName.c_str(), ifmt,
NULL); // <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD>Ƶ<EFBFBD>
if (ret != 0) {
av_log(NULL, AV_LOG_ERROR, "avformat_open_input fail. return %d.\n", ret);
return;
}
AVPacket pkt;
int64_t src_rate = 44100;
int64_t dst_rate = 48000;
SwrContext* swr_ctx = swr_alloc();
uint8_t** dst_data = NULL;
int dst_linesize = 0;
// <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
av_opt_set_int(swr_ctx, "in_channel_layout", AV_CH_LAYOUT_MONO, 0);
av_opt_set_int(swr_ctx, "in_sample_rate", src_rate, 0);
av_opt_set_sample_fmt(swr_ctx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
// <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
av_opt_set_int(swr_ctx, "out_channel_layout", AV_CH_LAYOUT_STEREO, 0);
av_opt_set_int(swr_ctx, "out_sample_rate", dst_rate, 0);
av_opt_set_sample_fmt(swr_ctx, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0);
// <20><>ʼ<EFBFBD><CABC>SwrContext
swr_init(swr_ctx);
FILE* fp = fopen("dst.pcm", "wb");
int count = 0;
while (count++ < 10) {
ret = av_read_frame(fmt_ctx, &pkt);
if (ret != 0) {
av_log(NULL, AV_LOG_ERROR, "av_read_frame fail, return %d .\n", ret);
break;
}
int out_samples_per_channel =
(int)av_rescale_rnd(1024, dst_rate, src_rate, AV_ROUND_UP);
int out_buffer_size = av_samples_get_buffer_size(
NULL, 1, out_samples_per_channel, AV_SAMPLE_FMT_S16, 0);
// uint8_t* out_buffer = (uint8_t*)av_malloc(out_buffer_size);
ret = av_samples_alloc_array_and_samples(
&dst_data, &dst_linesize, 2, out_buffer_size, AV_SAMPLE_FMT_S16, 0);
// <20><><EFBFBD><EFBFBD><EFBFBD>ز<EFBFBD><D8B2><EFBFBD>
swr_convert(swr_ctx, dst_data, out_samples_per_channel,
(const uint8_t**)&pkt.data, 1024);
fwrite(dst_data[1], 1, out_buffer_size, fp);
av_packet_unref(&pkt); // <20><><EFBFBD><EFBFBD><EFBFBD>ͷ<EFBFBD>pkt<6B><74><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ڴ棬<DAB4><E6A3AC><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ڴ<EFBFBD>й¶
}
fflush(fp); // ˢ<><CBA2><EFBFBD>ļ<EFBFBD>io<69><6F><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
fclose(fp);
avformat_close_input(&fmt_ctx);
}
int main(int argc, char** argv) {
av_log_set_level(AV_LOG_DEBUG); // <20><><EFBFBD><EFBFBD>ffmpeg<65><67>־<EFBFBD><D6BE><EFBFBD>ȼ<EFBFBD>
capture_audio();
Sleep(1);
}

View File

@@ -0,0 +1,286 @@
#include <stdio.h>
#define __STDC_CONSTANT_MACROS
#ifdef _WIN32
// Windows
extern "C" {
#include "SDL/SDL.h"
#include "libavcodec/avcodec.h"
#include "libavdevice/avdevice.h"
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
};
#else
// Linux...
#ifdef __cplusplus
extern "C" {
#endif
#include <SDL2/SDL.h>
#include <libavcodec/avcodec.h>
#include <libavdevice/avdevice.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#ifdef __cplusplus
};
#endif
#endif
// Output YUV420P
#define OUTPUT_YUV420P 0
//'1' Use Dshow
//'0' Use GDIgrab
#define USE_DSHOW 0
// Refresh Event
#define SFM_REFRESH_EVENT (SDL_USEREVENT + 1)
#define SFM_BREAK_EVENT (SDL_USEREVENT + 2)
int thread_exit = 0;
int sfp_refresh_thread(void *opaque) {
thread_exit = 0;
while (!thread_exit) {
SDL_Event event;
event.type = SFM_REFRESH_EVENT;
SDL_PushEvent(&event);
SDL_Delay(40);
}
thread_exit = 0;
// Break
SDL_Event event;
event.type = SFM_BREAK_EVENT;
SDL_PushEvent(&event);
return 0;
}
// Show AVFoundation Device
void show_avfoundation_device() {
AVFormatContext *pFormatCtx = avformat_alloc_context();
AVDictionary *options = NULL;
av_dict_set(&options, "list_devices", "true", 0);
AVInputFormat *iformat =
(AVInputFormat *)av_find_input_format("avfoundation");
printf("==AVFoundation Device Info===\n");
avformat_open_input(&pFormatCtx, "", iformat, &options);
printf("=============================\n");
}
int main(int argc, char *argv[]) {
AVFormatContext *pFormatCtx;
int i, videoindex;
AVCodecContext *pCodecCtx;
AVCodec *pCodec;
avformat_network_init();
pFormatCtx = avformat_alloc_context();
// Open File
// char filepath[]="src01_480x272_22.h265";
// avformat_open_input(&pFormatCtx,filepath,NULL,NULL)
// Register Device
avdevice_register_all();
// Windows
#ifdef _WIN32
#if USE_DSHOW
// Use dshow
//
// Need to Install screen-capture-recorder
// screen-capture-recorder
// Website: http://sourceforge.net/projects/screencapturer/
//
AVInputFormat *ifmt = av_find_input_format("dshow");
if (avformat_open_input(&pFormatCtx, "video=screen-capture-recorder", ifmt,
NULL) != 0) {
printf("Couldn't open input stream.\n");
return -1;
}
#else
// Use gdigrab
AVDictionary *options = NULL;
// Set some options
// grabbing frame rate
// av_dict_set(&options,"framerate","5",0);
// The distance from the left edge of the screen or desktop
// av_dict_set(&options,"offset_x","20",0);
// The distance from the top edge of the screen or desktop
// av_dict_set(&options,"offset_y","40",0);
// Video frame size. The default is to capture the full screen
// av_dict_set(&options,"video_size","640x480",0);
AVInputFormat *ifmt = av_find_input_format("gdigrab");
if (avformat_open_input(&pFormatCtx, "desktop", ifmt, &options) != 0) {
printf("Couldn't open input stream.\n");
return -1;
}
#endif
#elif defined linux
// Linux
AVDictionary *options = NULL;
// Set some options
// grabbing frame rate
// av_dict_set(&options,"framerate","5",0);
// Make the grabbed area follow the mouse
// av_dict_set(&options,"follow_mouse","centered",0);
// Video frame size. The default is to capture the full screen
// av_dict_set(&options,"video_size","640x480",0);
AVInputFormat *ifmt = av_find_input_format("x11grab");
// Grab at position 10,20
if (avformat_open_input(&pFormatCtx, ":0.0+10,20", ifmt, &options) != 0) {
printf("Couldn't open input stream.\n");
return -1;
}
#else
show_avfoundation_device();
// Mac
AVInputFormat *ifmt = (AVInputFormat *)av_find_input_format("avfoundation");
// Avfoundation
//[video]:[audio]
if (avformat_open_input(&pFormatCtx, "1", ifmt, NULL) != 0) {
printf("Couldn't open input stream.\n");
return -1;
}
#endif
if (avformat_find_stream_info(pFormatCtx, NULL) < 0) {
printf("Couldn't find stream information.\n");
return -1;
}
videoindex = -1;
for (i = 0; i < pFormatCtx->nb_streams; i++)
if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
videoindex = i;
break;
}
if (videoindex == -1) {
printf("Didn't find a video stream.\n");
return -1;
}
pCodecCtx = pFormatCtx->streams[videoindex]->codec;
pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
if (pCodec == NULL) {
printf("Codec not found.\n");
return -1;
}
if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
printf("Could not open codec.\n");
return -1;
}
AVFrame *pFrame, *pFrameYUV;
pFrame = av_frame_alloc();
pFrameYUV = av_frame_alloc();
// unsigned char *out_buffer=(unsigned char
// *)av_malloc(avpicture_get_size(AV_PIX_FMT_YUV420P, pCodecCtx->width,
// pCodecCtx->height)); avpicture_fill((AVPicture *)pFrameYUV, out_buffer,
// AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);
// SDL----------------------------
if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
printf("Could not initialize SDL - %s\n", SDL_GetError());
return -1;
}
int screen_w = 640, screen_h = 360;
const SDL_VideoInfo *vi = SDL_GetVideoInfo();
// Half of the Desktop's width and height.
screen_w = vi->current_w / 2;
screen_h = vi->current_h / 2;
SDL_Surface *screen;
screen = SDL_SetVideoMode(screen_w, screen_h, 0, 0);
if (!screen) {
printf("SDL: could not set video mode - exiting:%s\n", SDL_GetError());
return -1;
}
SDL_Overlay *bmp;
bmp = SDL_CreateYUVOverlay(pCodecCtx->width, pCodecCtx->height,
SDL_YV12_OVERLAY, screen);
SDL_Rect rect;
rect.x = 0;
rect.y = 0;
rect.w = screen_w;
rect.h = screen_h;
// SDL End------------------------
int ret, got_picture;
AVPacket *packet = (AVPacket *)av_malloc(sizeof(AVPacket));
#if OUTPUT_YUV420P
FILE *fp_yuv = fopen("output.yuv", "wb+");
#endif
struct SwsContext *img_convert_ctx;
img_convert_ctx = sws_getContext(
pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width,
pCodecCtx->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
//------------------------------
SDL_Thread *video_tid = SDL_CreateThread(sfp_refresh_thread, NULL);
//
SDL_WM_SetCaption("Simplest FFmpeg Grab Desktop", NULL);
// Event Loop
SDL_Event event;
for (;;) {
// Wait
SDL_WaitEvent(&event);
if (event.type == SFM_REFRESH_EVENT) {
//------------------------------
if (av_read_frame(pFormatCtx, packet) >= 0) {
if (packet->stream_index == videoindex) {
ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
if (ret < 0) {
printf("Decode Error.\n");
return -1;
}
if (got_picture) {
SDL_LockYUVOverlay(bmp);
pFrameYUV->data[0] = bmp->pixels[0];
pFrameYUV->data[1] = bmp->pixels[2];
pFrameYUV->data[2] = bmp->pixels[1];
pFrameYUV->linesize[0] = bmp->pitches[0];
pFrameYUV->linesize[1] = bmp->pitches[2];
pFrameYUV->linesize[2] = bmp->pitches[1];
sws_scale(img_convert_ctx,
(const unsigned char *const *)pFrame->data,
pFrame->linesize, 0, pCodecCtx->height, pFrameYUV->data,
pFrameYUV->linesize);
#if OUTPUT_YUV420P
int y_size = pCodecCtx->width * pCodecCtx->height;
fwrite(pFrameYUV->data[0], 1, y_size, fp_yuv); // Y
fwrite(pFrameYUV->data[1], 1, y_size / 4, fp_yuv); // U
fwrite(pFrameYUV->data[2], 1, y_size / 4, fp_yuv); // V
#endif
SDL_UnlockYUVOverlay(bmp);
SDL_DisplayYUVOverlay(bmp, &rect);
}
}
av_free_packet(packet);
} else {
// Exit Thread
thread_exit = 1;
}
} else if (event.type == SDL_QUIT) {
thread_exit = 1;
} else if (event.type == SFM_BREAK_EVENT) {
break;
}
}
sws_freeContext(img_convert_ctx);
#if OUTPUT_YUV420P
fclose(fp_yuv);
#endif
SDL_Quit();
// av_free(out_buffer);
av_free(pFrameYUV);
avcodec_close(pCodecCtx);
avformat_close_input(&pFormatCtx);
return 0;
}

View File

@@ -2,7 +2,7 @@ package("ffmpeg")
set_homepage("https://www.ffmpeg.org")
set_description("A collection of libraries to process multimedia content such as audio, video, subtitles and related metadata.")
set_license("GPL-3.0")
set_license("LGPL-3.0")
if is_plat("windows", "mingw") then
add_urls("https://www.gyan.dev/ffmpeg/builds/packages/ffmpeg-$(version)-full_build-shared.7z")
@@ -42,14 +42,15 @@ package("ffmpeg")
add_configs("vdpau", {description = "Enable vdpau library.", default = false, type = "boolean"})
add_configs("hardcoded-tables", {description = "Enable hardcoded tables.", default = true, type = "boolean"})
add_configs("asm", {description = "Enable asm", default = false, type = "boolean"})
add_configs("libopenh264", {description = "Enable libopenh264", default = true, type = "boolean"})
add_configs("libopenh264", {description = "Enable libopenh264", default = false, type = "boolean"})
end
add_links("avfilter", "avdevice", "avformat", "avcodec", "swscale", "swresample", "avutil")
add_links("avfilter", "avdevice", "avformat", "avcodec", "swscale", "swresample", "avutil", "postproc")
if is_plat("macosx") then
add_frameworks("CoreFoundation", "Foundation", "CoreVideo", "CoreMedia", "AudioToolbox", "VideoToolbox", "Security")
elseif is_plat("linux") then
add_syslinks("pthread", "openh264")
-- add_syslinks("pthread", "openh264")
add_syslinks("pthread")
end
if is_plat("linux", "macosx") then

View File

@@ -2,4 +2,6 @@ includes("sdl2", "projectx")
if is_plat("windows") then
elseif is_plat("linux") then
includes("ffmpeg")
elseif is_plat("macosx") then
includes("ffmpeg")
end

137
xmake.lua
View File

@@ -5,46 +5,59 @@ set_license("LGPL-3.0")
add_rules("mode.release", "mode.debug")
set_languages("c++17")
add_requires("spdlog 1.11.0", {system = false})
add_requires("imgui 1.89.9", {configs = {sdl2 = true, sdl2_renderer = true}})
add_defines("UNICODE")
add_requires("sdl2", {system = false})
add_requires("projectx")
if is_os("windows") then
add_links("Shell32", "windowsapp", "dwmapi", "User32", "kernel32")
add_requires("vcpkg::ffmpeg 5.1.2", {configs = {shared = false}})
elseif is_os("linux") then
add_requires("ffmpeg 5.1.2", {system = false})
add_syslinks("pthread", "dl")
elseif is_os("macosx") then
add_requires("ffmpeg 5.1.2", {system = false})
end
if is_mode("debug") then
add_defines("REMOTE_DESK_DEBUG")
end
add_packages("spdlog")
add_requires("sdl2", {system = false})
add_requires("spdlog 1.11.0", {system = false})
add_requires("imgui 1.89.9", {configs = {sdl2 = true, sdl2_renderer = true}})
if is_os("windows") then
add_links("Shell32", "windowsapp", "dwmapi", "User32", "kernel32",
"SDL2-static", "SDL2main", "gdi32", "winmm", "setupapi", "version",
"Imm32", "iphlpapi")
add_requires("vcpkg::ffmpeg 5.1.2", {configs = {shared = false}})
add_packages("vcpkg::ffmpeg")
elseif is_os("linux") then
add_requires("ffmpeg 5.1.2", {system = false})
add_packages("ffmpeg")
add_syslinks("pthread", "dl")
add_links("SDL2")
add_ldflags("-lavformat", "-lavdevice", "-lavfilter", "-lavcodec",
"-lswscale", "-lavutil", "-lswresample",
"-lasound", "-lxcb-shape", "-lxcb-xfixes", "-lsndio", "-lxcb",
"-lxcb-shm", "-lXext", "-lX11", "-lXv", "-ldl", "-lpthread",
{force = true})
elseif is_os("macosx") then
add_requires("ffmpeg 5.1.2", {system = false})
add_requires("libxcb", {system = false})
add_packages("ffmpeg", "libxcb")
add_links("SDL2", "SDL2main")
add_ldflags("-Wl,-ld_classic")
add_frameworks("OpenGL")
end
add_packages("spdlog", "sdl2", "imgui")
includes("thirdparty")
target("log")
set_kind("headeronly")
set_kind("object")
add_packages("spdlog")
add_headerfiles("src/log/log.h")
add_includedirs("src/log", {public = true})
target("screen_capture")
set_kind("static")
add_packages("log", "ffmpeg")
set_kind("object")
add_deps("log")
if is_os("windows") then
add_files("src/screen_capture/windows/*.cpp")
add_includedirs("src/screen_capture/windows", {public = true})
elseif is_os("macosx") then
add_files("src/screen_capture/macosx/*.cpp")
add_includedirs("ssrc/creen_capture/macosx", {public = true})
add_includedirs("src/screen_capture/macosx", {public = true})
elseif is_os("linux") then
add_files("src/screen_capture/linux/*.cpp")
add_includedirs("src/screen_capture/linux", {public = true})
@@ -52,36 +65,68 @@ target("screen_capture")
target("remote_desk")
set_kind("binary")
add_deps("log", "screen_capture")
add_packages("sdl2", "imgui", "ffmpeg", "projectx")
add_deps("log", "screen_capture", "projectx")
add_files("src/gui/main.cpp")
if is_os("windows") then
add_links("SDL2-static", "SDL2main", "gdi32", "winmm",
"setupapi", "version", "Imm32", "iphlpapi")
elseif is_os("macosx") then
add_links("SDL2")
elseif is_os("linux") then
add_links("SDL2")
add_ldflags("-lavformat", "-lavdevice", "-lavfilter", "-lavcodec",
"-lswscale", "-lavutil", "-lswresample",
"-lasound", "-lxcb-shape", "-lxcb-xfixes", "-lsndio", "-lxcb",
"-lxcb-shm", "-lXext", "-lX11", "-lXv", "-ldl", "-lpthread",
{force = true})
end
after_install(function (target)
os.cp("$(projectdir)/thirdparty/nvcodec/Lib/x64/*.so", "$(projectdir)/out/bin")
os.cp("$(projectdir)/thirdparty/nvcodec/Lib/x64/*.so.1", "$(projectdir)/out/bin")
os.cp("$(projectdir)/out/lib/*.so", "$(projectdir)/out/bin")
os.rm("$(projectdir)/out/include")
os.rm("$(projectdir)/out/lib")
end)
-- target("linux_capture")
-- after_install(function (target)
-- os.cp("$(projectdir)/thirdparty/nvcodec/Lib/x64/*.so", "$(projectdir)/out/bin")
-- os.cp("$(projectdir)/thirdparty/nvcodec/Lib/x64/*.so.1", "$(projectdir)/out/bin")
-- os.cp("$(projectdir)/out/lib/*.so", "$(projectdir)/out/bin")
-- os.rm("$(projectdir)/out/include")
-- os.rm("$(projectdir)/out/lib")
-- end)
-- target("screen_capture")
-- set_kind("binary")
-- add_packages("sdl2", "imgui", "ffmpeg", "openh264")
-- add_files("test/linux_capture.cpp")
-- add_files("test/screen_capture/linux_capture.cpp")
-- add_ldflags("-lavformat", "-lavdevice", "-lavfilter", "-lavcodec",
-- "-lswscale", "-lavutil", "-lswresample",
-- "-lasound", "-lxcb-shape", "-lxcb-xfixes", "-lsndio", "-lxcb",
-- "-lxcb-shm", "-lXext", "-lX11", "-lXv", "-lpthread", "-lSDL2", "-lopenh264",
-- "-ldl" ,{force = true})
-- "-ldl", {force = true})
-- target("screen_capture")
-- set_kind("binary")
-- add_packages("sdl2", "imgui", "ffmpeg", "openh264")
-- add_files("test/screen_capture/mac_capture.cpp")
-- add_ldflags("-lavformat", "-lavdevice", "-lavfilter", "-lavcodec",
-- "-lswscale", "-lavutil", "-lswresample",
-- "-lasound", "-lxcb-shape", "-lxcb-xfixes", "-lsndio", "-lxcb",
-- "-lxcb-shm", "-lXext", "-lX11", "-lXv", "-lpthread", "-lSDL2", "-lopenh264",
-- "-ldl", {force = true})
-- target("audio_capture")
-- set_kind("binary")
-- add_packages("ffmpeg")
-- add_files("test/audio_capture/sdl2_audio_capture.cpp")
-- add_includedirs("test/audio_capture")
-- add_ldflags("-lavformat", "-lavdevice", "-lavfilter", "-lavcodec",
-- "-lswscale", "-lavutil", "-lswresample",
-- "-lasound", "-lxcb-shape", "-lxcb-xfixes", "-lsndio", "-lxcb",
-- "-lxcb-shm", "-lXext", "-lX11", "-lXv", "-lpthread", "-lSDL2", "-lopenh264",
-- "-ldl", {force = true})
-- add_links("Shlwapi", "Strmiids", "Vfw32", "Secur32", "Mfuuid")
-- target("play_audio")
-- set_kind("binary")
-- add_packages("ffmpeg")
-- add_files("test/audio_capture/play_loopback.cpp")
-- add_includedirs("test/audio_capture")
-- add_ldflags("-lavformat", "-lavdevice", "-lavfilter", "-lavcodec",
-- "-lswscale", "-lavutil", "-lswresample",
-- "-lasound", "-lxcb-shape", "-lxcb-xfixes", "-lsndio", "-lxcb",
-- "-lxcb-shm", "-lXext", "-lX11", "-lXv", "-lpthread", "-lSDL2", "-lopenh264",
-- "-ldl", {force = true})
-- add_links("Shlwapi", "Strmiids", "Vfw32", "Secur32", "Mfuuid")
-- target("audio_capture")
-- set_kind("binary")
-- add_packages("libopus")
-- add_files("test/audio_capture/sdl2_audio_capture.cpp")
-- add_includedirs("test/audio_capture")
-- add_ldflags("-lavformat", "-lavdevice", "-lavfilter", "-lavcodec",
-- "-lswscale", "-lavutil", "-lswresample",
-- "-lasound", "-lxcb-shape", "-lxcb-xfixes", "-lsndio", "-lxcb",
-- "-lxcb-shm", "-lXext", "-lX11", "-lXv", "-lpthread", "-lSDL2", "-lopenh264",
-- "-ldl", {force = true})