我的编程空间,编程开发者的网络收藏夹
学习永远不晚

基于Live555实现RTSP服务器来推送H264实时码流

短信预约 -IT技能 免费直播动态提醒
省份

北京

  • 北京
  • 上海
  • 天津
  • 重庆
  • 河北
  • 山东
  • 辽宁
  • 黑龙江
  • 吉林
  • 甘肃
  • 青海
  • 河南
  • 江苏
  • 湖北
  • 湖南
  • 江西
  • 浙江
  • 广东
  • 云南
  • 福建
  • 海南
  • 山西
  • 四川
  • 陕西
  • 贵州
  • 安徽
  • 广西
  • 内蒙
  • 西藏
  • 新疆
  • 宁夏
  • 兵团
手机号立即预约

请填写图片验证码后获取短信验证码

看不清楚,换张图片

免费获取短信验证码

基于Live555实现RTSP服务器来推送H264实时码流

实现了一个单播的rtsp服务器来推送实时的h264码流,参考了官方的testProgs目录下的testOnDemandRTSPServer例程和liveMedia目录下的DeviceSource.cpp文件。我这边是把从摄像头采集出来的码流放入了一个缓冲队列,然后直接从缓冲队列里取出来。

rtsp.h:

#ifndef _RTSP_H_#define _RTSP_H_#include "liveMedia.hh"#include "BasicUsageEnvironment.hh"void create_rtsp_server(void);class H264LiveServerMediaSession : public OnDemandServerMediaSubsession{public:    static H264LiveServerMediaSession *createNew(UsageEnvironment &env, Boolean reuseFirstSource);    void checkForAuxSDPLine1();    void afterPlayingDummy1();protected:    H264LiveServerMediaSession(UsageEnvironment &env, Boolean reuseFirstSource);    virtual ~H264LiveServerMediaSession(void);    void setDoneFlag() { fDoneFlag = ~0; }protected:    virtual char const *getAuxSDPLine(RTPSink *rtpSink, FramedSource *inputSource);    virtual FramedSource *createNewStreamSource(unsigned clientSessionId, unsigned &estBitrate);    virtual RTPSink *createNewRTPSink(Groupsock *rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic, FramedSource *inputSource);private:    char *fAuxSDPLine;    char fDoneFlag;    RTPSink *fDummyRTPSink;};// 创建一个自定义的实时码流数据源类class H264VideoStreamSource : public FramedSource{public:    static H264VideoStreamSource *createNew(UsageEnvironment &env);    unsigned maxFrameSize() const;protected:    H264VideoStreamSource(UsageEnvironment &env);    virtual ~H264VideoStreamSource();private:    virtual void doGetNextFrame();    virtual void doStopGettingFrames();};#endif // _RTSP_H_

rtsp.cpp:
 

#include #include "rtsp.h"#include "ringQueue.h"extern ringQueue *rQueue;void create_rtsp_server(void){    TaskScheduler *scheduler;    UsageEnvironment *env;    RTSPServer *rtspServer;    scheduler = BasicTaskScheduler::createNew();    env = BasicUsageEnvironment::createNew(*scheduler);    rtspServer = RTSPServer::createNew(*env, 8554);    if (rtspServer == NULL)    {        *env << "Failed to create RTSP server: " << env->getResultMsg() << "\n";        return;    }    ServerMediaSession *sms = ServerMediaSession::createNew(*env);    sms->addSubsession(H264LiveServerMediaSession::createNew(*env, true));    rtspServer->addServerMediaSession(sms);    char *url = rtspServer->rtspURL(sms);    *env << "Play the stream using url " << url << "\n";    delete[] url;    env->taskScheduler().doEventLoop(); // 进入事件循环}// H264LiveServerMediaSession 实现:H264LiveServerMediaSession *H264LiveServerMediaSession::createNew(UsageEnvironment &env, Boolean reuseFirstSource){    return new H264LiveServerMediaSession(env, reuseFirstSource);}H264LiveServerMediaSession::H264LiveServerMediaSession(UsageEnvironment &env, Boolean reuseFirstSource) : OnDemandServerMediaSubsession(env, reuseFirstSource){    fAuxSDPLine = NULL;    fDoneFlag = 0;    fDummyRTPSink = NULL;}H264LiveServerMediaSession::~H264LiveServerMediaSession(){    delete[] fAuxSDPLine;}static void afterPlayingDummy(void *clientData){    H264LiveServerMediaSession *subsess = (H264LiveServerMediaSession *)clientData;    subsess->afterPlayingDummy1();}void H264LiveServerMediaSession::afterPlayingDummy1(){    envir().taskScheduler().unscheduleDelayedTask(nextTask());    setDoneFlag();}static void checkForAuxSDPLine(void *clientData){    H264LiveServerMediaSession *subsess = (H264LiveServerMediaSession *)clientData;    subsess->checkForAuxSDPLine1();}void H264LiveServerMediaSession::checkForAuxSDPLine1(){    nextTask() = NULL;    char const *dasl;    if (fAuxSDPLine != NULL)    {        setDoneFlag();    }    else if (fDummyRTPSink != NULL && (dasl = fDummyRTPSink->auxSDPLine()) != NULL)    {        fAuxSDPLine = strDup(dasl);        fDummyRTPSink = NULL;        setDoneFlag();    }    else if (!fDoneFlag)    {        // try again after a brief delay:        int uSecsToDelay = 100000; // 100 ms        nextTask() = envir().taskScheduler().scheduleDelayedTask(uSecsToDelay,         (TaskFunc *)checkForAuxSDPLine, this);    }}char const *H264LiveServerMediaSession::getAuxSDPLine(RTPSink *rtpSink, FramedSource *inputSource){    if (fAuxSDPLine != NULL)    {        return fAuxSDPLine;    }    if (fDummyRTPSink == NULL)    {        fDummyRTPSink = rtpSink;        fDummyRTPSink->startPlaying(*inputSource, afterPlayingDummy, this);        checkForAuxSDPLine(this);    }    envir().taskScheduler().doEventLoop(&fDoneFlag);    return fAuxSDPLine;}FramedSource *H264LiveServerMediaSession::createNewStreamSource(unsigned clientSessionId, unsigned &estBitrate){    estBitrate = 5000; // kbps, estimate    H264VideoStreamSource *videoSource = H264VideoStreamSource::createNew(envir());    if (videoSource == NULL)    {        return NULL;    }    return H264VideoStreamFramer::createNew(envir(), videoSource);}RTPSink *H264LiveServerMediaSession ::createNewRTPSink(Groupsock *rtpGroupsock,                           unsigned char rtpPayloadTypeIfDynamic,                           FramedSource *inputSource){    // OutPacketBuffer::maxSize = 2000000;    return H264VideoRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic);}// H264VideoStreamSource 实现:H264VideoStreamSource *H264VideoStreamSource::createNew(UsageEnvironment &env){    return new H264VideoStreamSource(env);}H264VideoStreamSource::H264VideoStreamSource(UsageEnvironment &env) : FramedSource(env){}H264VideoStreamSource::~H264VideoStreamSource(){}unsigned  int H264VideoStreamSource::maxFrameSize() const{    return 100000; // 设置fMaxSize的值}void H264VideoStreamSource::doGetNextFrame(){    rQueue_data e;    uint32_t timestamp = 0;    static uint8_t buffer_data[1024 * 512] = {0};    // 还没准备好要数据    if (!isCurrentlyAwaitingData())    {        std::cout << "isCurrentlyAwaitingData" << std::endl;        return;    }    // 从队列中取出数据    e.buffer = buffer_data;    e.len = sizeof(buffer_data);    if(rQueue_de(rQueue, &e) == -1)    {        FramedSource::afterGetting(this);        return;    }    if (e.len > fMaxSize)    {        fFrameSize = fMaxSize;        fNumTruncatedBytes = e.len - fMaxSize;    }    else    {        fFrameSize = e.len;    }    gettimeofday(&fPresentationTime, NULL);    memcpy(fTo, buffer_data, fFrameSize);    FramedSource::afterGetting(this);}void H264VideoStreamSource::doStopGettingFrames(){    std::cout << "doStopGettingFrames" << std::endl;}

来源地址:https://blog.csdn.net/qq_42161913/article/details/131881574

免责声明:

① 本站未注明“稿件来源”的信息均来自网络整理。其文字、图片和音视频稿件的所属权归原作者所有。本站收集整理出于非商业性的教育和科研之目的,并不意味着本站赞同其观点或证实其内容的真实性。仅作为临时的测试数据,供内部测试之用。本站并未授权任何人以任何方式主动获取本站任何信息。

② 本站未注明“稿件来源”的临时测试数据将在测试完成后最终做删除处理。有问题或投稿请发送至: 邮箱/279061341@qq.com QQ/279061341

基于Live555实现RTSP服务器来推送H264实时码流

下载Word文档到电脑,方便收藏和打印~

下载Word文档

猜你喜欢

给数字人生成加上界面,基于ER-NeRF/RAD-NeRF/AD-NeRF,Gradio框架构建WEBUI,使用HLS流媒体,实现边推理边播放——之一:在WEBUI中实时输出服务器控制台日志

前言 目前数字人实现技术众多,我这里采用基于ER-NeRF,在这里可以看到其介绍:ICCV 2023 | ER-NeRF: 用于合成高保真Talking Portrait的高效区域感知神经辐射场-https://zhuanlan.zhihu
给数字人生成加上界面,基于ER-NeRF/RAD-NeRF/AD-NeRF,Gradio框架构建WEBUI,使用HLS流媒体,实现边推理边播放——之一:在WEBUI中实时输出服务器控制台日志
2023-12-22

编程热搜

目录