1
0
mirror of https://github.com/qTox/qTox.git synced 2024-03-22 14:00:36 +08:00

Merge branch 'ffmpeg'

This commit is contained in:
tux3 2015-06-03 11:54:53 +02:00
commit e9e41a302c
No known key found for this signature in database
GPG Key ID: 7E086DD661263264
40 changed files with 1984 additions and 1275 deletions

View File

@ -36,6 +36,8 @@ FORMS += \
CONFIG += c++11
QMAKE_CXXFLAGS += -fno-exceptions
# Rules for creating/updating {ts|qm}-files
include(translations/i18n.pri)
# Build all the qm files now, to make RCC happy
@ -130,9 +132,10 @@ contains(DEFINES, QTOX_PLATFORM_EXT) {
win32 {
RC_FILE = windows/qtox.rc
LIBS += -L$$PWD/libs/lib -ltoxav -ltoxcore -ltoxencryptsave -ltoxdns -lsodium -lvpx -lpthread
LIBS += -L$$PWD/libs/lib -lopencv_core249 -lopencv_highgui249 -lopencv_imgproc249 -lOpenAL32 -lopus
LIBS += -lopengl32 -lole32 -loleaut32 -luuid -lvfw32 -lws2_32 -liphlpapi -lz
LIBS += -L$$PWD/libs/lib -lavformat -lavdevice -lavcodec -lavutil -lswscale -lOpenAL32 -lopus
LIBS += -lopengl32 -lole32 -loleaut32 -lvfw32 -lws2_32 -liphlpapi -lz -luuid
LIBS += -lqrencode
LIBS += -lstrmiids # For DirectShow
contains(DEFINES, QTOX_FILTER_AUDIO) {
contains(STATICPKG, YES) {
LIBS += -Wl,-Bstatic -lfilteraudio
@ -146,27 +149,26 @@ win32 {
ICON = img/icons/qtox.icns
QMAKE_INFO_PLIST = osx/info.plist
QMAKE_MACOSX_DEPLOYMENT_TARGET = 10.7
LIBS += -L$$PWD/libs/lib/ -ltoxcore -ltoxav -ltoxencryptsave -ltoxdns -lsodium -lvpx -lopus -framework OpenAL -lopencv_core -lopencv_highgui -mmacosx-version-min=10.7
LIBS += -L$$PWD/libs/lib/ -ltoxcore -ltoxav -ltoxencryptsave -ltoxdns -lsodium -lvpx -lopus -framework OpenAL -lavformat -lavdevice -lavcodec -lavutil -lswscale -mmacosx-version-min=10.7
LIBS += -lqrencode
contains(DEFINES, QTOX_PLATFORM_EXT) { LIBS += -framework IOKit -framework CoreFoundation }
contains(DEFINES, QTOX_FILTER_AUDIO) { LIBS += -lfilteraudio }
} else {
android {
LIBS += -ltoxcore -ltoxav -ltoxencryptsave -ltoxdns
LIBS += -lopencv_videoio -lopencv_imgcodecs -lopencv_highgui -lopencv_imgproc -lopencv_androidcamera
LIBS += -llibjpeg -llibwebp -llibpng -llibtiff -llibjasper -lIlmImf -lopencv_core
LIBS += -llibjpeg -llibwebp -llibpng -llibtiff -llibjasper -lIlmImf
LIBS += -lopus -lvpx -lsodium -lopenal
} else {
# If we're building a package, static link libtox[core,av] and libsodium, since they are not provided by any package
contains(STATICPKG, YES) {
target.path = /usr/bin
INSTALLS += target
LIBS += -L$$PWD/libs/lib/ -lopus -lvpx -lopenal -Wl,-Bstatic -ltoxcore -ltoxav -ltoxencryptsave -ltoxdns -lsodium -lopencv_highgui -lopencv_imgproc -lopencv_core -lz -Wl,-Bdynamic
LIBS += -L$$PWD/libs/lib/ -lopus -lvpx -lopenal -Wl,-Bstatic -ltoxcore -ltoxav -ltoxencryptsave -ltoxdns -lsodium -lavformat -lavdevice -lavcodec -lavutil -lswscale -lz -Wl,-Bdynamic
LIBS += -Wl,-Bstatic -ljpeg -ltiff -lpng -ljasper -lIlmImf -lIlmThread -lIex -ldc1394 -lraw1394 -lHalf -lz -llzma -ljbig
LIBS += -Wl,-Bdynamic -lv4l1 -lv4l2 -lavformat -lavcodec -lavutil -lswscale -lusb-1.0
LIBS += -lqrencode
} else {
LIBS += -L$$PWD/libs/lib/ -ltoxcore -ltoxav -ltoxencryptsave -ltoxdns -lvpx -lsodium -lopenal -lopencv_core -lopencv_highgui -lopencv_imgproc
LIBS += -L$$PWD/libs/lib/ -ltoxcore -ltoxav -ltoxencryptsave -ltoxdns -lvpx -lsodium -lopenal -lavformat -lavdevice -lavcodec -lavutil -lswscale
LIBS += -lqrencode
}
@ -183,7 +185,7 @@ win32 {
}
contains(JENKINS, YES) {
LIBS = ./libs/lib/libtoxav.a ./libs/lib/libvpx.a ./libs/lib/libopus.a ./libs/lib/libtoxdns.a ./libs/lib/libtoxencryptsave.a ./libs/lib/libtoxcore.a ./libs/lib/libopenal.a ./libs/lib/libsodium.a ./libs/lib/libfilteraudio.a /usr/lib/libopencv_core.so /usr/lib/libopencv_highgui.so /usr/lib/libopencv_imgproc.so -lX11 -lXss -lqrencode
LIBS = ./libs/lib/libtoxav.a ./libs/lib/libvpx.a ./libs/lib/libopus.a ./libs/lib/libtoxdns.a ./libs/lib/libtoxencryptsave.a ./libs/lib/libtoxcore.a ./libs/lib/libopenal.a ./libs/lib/libsodium.a ./libs/lib/libfilteraudio.a -lX11 -lXss -lqrencode
contains(ENABLE_SYSTRAY_UNITY_BACKEND, YES) {
LIBS += -lgobject-2.0 -lappindicator -lgtk-x11-2.0
}
@ -358,7 +360,7 @@ contains(ENABLE_SYSTRAY_GTK_BACKEND, NO) {
src/misc/qrwidget.h \
src/widget/systemtrayicon_private.h
SOURCES += \
SOURCES += \
src/widget/form/addfriendform.cpp \
src/widget/form/settingswidget.cpp \
src/widget/form/settings/generalform.cpp \
@ -419,6 +421,22 @@ contains(ENABLE_SYSTRAY_GTK_BACKEND, NO) {
src/widget/genericchatroomwidget.cpp
}
win32 {
HEADERS += \
src/platform/camera/directshow.h
SOURCES += \
src/platform/camera/directshow.cpp
}
unix {
HEADERS += \
src/platform/camera/v4l2.h
SOURCES += \
src/platform/camera/v4l2.cpp
}
SOURCES += \
src/audio.cpp \
src/historykeeper.cpp \
@ -430,9 +448,6 @@ SOURCES += \
src/misc/db/genericddinterface.cpp \
src/misc/db/plaindb.cpp \
src/misc/db/encrypteddb.cpp \
src/video/camera.cpp \
src/video/cameraworker.cpp \
src/video/netvideosource.cpp \
src/video/videoframe.cpp \
src/widget/gui.cpp \
src/toxme.cpp \
@ -449,9 +464,11 @@ SOURCES += \
src/widget/tool/toolboxgraphicsitem.cpp \
src/widget/tool/flyoutoverlaywidget.cpp \
src/widget/form/settings/verticalonlyscroller.cpp \
src/video/cameradevice.cpp \
src/video/camerasource.cpp \
src/video/corevideosource.cpp \
src/core/toxid.cpp
HEADERS += \
src/audio.h \
src/core/core.h \
@ -467,11 +484,8 @@ HEADERS += \
src/misc/db/genericddinterface.h \
src/misc/db/plaindb.h \
src/misc/db/encrypteddb.h \
src/video/camera.h \
src/video/cameraworker.h \
src/video/videoframe.h \
src/video/videosource.h \
src/video/netvideosource.h \
src/widget/gui.h \
src/toxme.h \
src/profilelocker.h \
@ -482,4 +496,8 @@ HEADERS += \
src/widget/tool/toolboxgraphicsitem.h \
src/widget/tool/flyoutoverlaywidget.h \
src/widget/form/settings/verticalonlyscroller.h \
src/video/cameradevice.h \
src/video/camerasource.h \
src/video/corevideosource.h \
src/video/videomode.h \
src/core/toxid.h

View File

@ -52,8 +52,8 @@ QThread* Core::coreThread{nullptr};
#define MAX_GROUP_MESSAGE_LEN 1024
Core::Core(Camera* cam, QThread *CoreThread, QString loadPath) :
tox(nullptr), toxav(nullptr), camera(cam), loadPath(loadPath), ready{false}
Core::Core(QThread *CoreThread, QString loadPath) :
tox(nullptr), toxav(nullptr), loadPath(loadPath), ready{false}
{
qDebug() << "loading Tox from" << loadPath;
@ -76,10 +76,7 @@ Core::Core(Camera* cam, QThread *CoreThread, QString loadPath) :
calls[i].active = false;
calls[i].alSource = 0;
calls[i].sendAudioTimer = new QTimer();
calls[i].sendVideoTimer = new QTimer();
calls[i].sendAudioTimer->moveToThread(coreThread);
calls[i].sendVideoTimer->moveToThread(coreThread);
connect(calls[i].sendVideoTimer, &QTimer::timeout, [this,i](){sendCallVideo(i);});
}
// OpenAL init

View File

@ -30,11 +30,11 @@
#include "toxid.h"
template <typename T> class QList;
class Camera;
class QTimer;
class QString;
class CString;
class VideoSource;
class VideoFrame;
#ifdef QTOX_FILTER_AUDIO
class AudioFilterer;
#endif
@ -45,7 +45,7 @@ class Core : public QObject
public:
enum PasswordType {ptMain = 0, ptHistory, ptCounter};
explicit Core(Camera* cam, QThread* coreThread, QString initialLoadPath);
explicit Core(QThread* coreThread, QString initialLoadPath);
static Core* getInstance(); ///< Returns the global widget's Core instance
~Core();
@ -274,7 +274,7 @@ private:
static void playAudioBuffer(ALuint alSource, const int16_t *data, int samples,
unsigned channels, int sampleRate);
static void playCallVideo(void *toxav, int32_t callId, const vpx_image_t* img, void *user_data);
void sendCallVideo(int callId);
static void sendCallVideo(int callId, ToxAv* toxav, std::shared_ptr<VideoFrame> frame);
bool checkConnection();
@ -292,7 +292,6 @@ private:
Tox* tox;
ToxAv* toxav;
QTimer *toxTimer, *fileTimer; //, *saveTimer;
Camera* camera;
QString loadPath; // meaningless after start() is called
int dhtServerId;
static ToxCall calls[TOXAV_MAX_CALLS];

View File

@ -15,7 +15,9 @@
*/
#include "core.h"
#include "src/video/camera.h"
#include "src/video/camerasource.h"
#include "src/video/corevideosource.h"
#include "src/video/videoframe.h"
#include "src/audio.h"
#ifdef QTOX_FILTER_AUDIO
#include "src/audiofilterer.h"
@ -71,12 +73,13 @@ void Core::prepareCall(uint32_t friendId, int32_t callId, ToxAv* toxav, bool vid
calls[callId].sendAudioTimer->setSingleShot(true);
connect(calls[callId].sendAudioTimer, &QTimer::timeout, [=](){sendCallAudio(callId,toxav);});
calls[callId].sendAudioTimer->start();
calls[callId].sendVideoTimer->setInterval(50);
calls[callId].sendVideoTimer->setSingleShot(true);
if (calls[callId].videoEnabled)
{
calls[callId].sendVideoTimer->start();
Camera::getInstance()->subscribe();
calls[callId].videoSource = new CoreVideoSource;
calls[callId].camera = new CameraSource;
calls[callId].camera->subscribe();
connect(calls[callId].camera, &VideoSource::frameAvailable,
[=](std::shared_ptr<VideoFrame> frame){sendCallVideo(callId,toxav,frame);});
}
#ifdef QTOX_FILTER_AUDIO
@ -109,17 +112,20 @@ void Core::onAvMediaChange(void* toxav, int32_t callId, void* core)
if (cap == (av_VideoEncoding|av_VideoDecoding)) // Video call
{
Camera::getInstance()->subscribe();
calls[callId].videoEnabled = true;
calls[callId].sendVideoTimer->start();
emit static_cast<Core*>(core)->avMediaChange(friendId, callId, true);
calls[callId].videoSource = new CoreVideoSource;
calls[callId].camera = new CameraSource;
calls[callId].camera->subscribe();
calls[callId].videoEnabled = true;
}
else // Audio call
{
calls[callId].videoEnabled = false;
calls[callId].sendVideoTimer->stop();
Camera::getInstance()->unsubscribe();
emit static_cast<Core*>(core)->avMediaChange(friendId, callId, false);
calls[callId].videoEnabled = false;
delete calls[callId].camera;
calls[callId].camera = nullptr;
calls[callId].videoSource->setDeleteOnClose(true);
calls[callId].videoSource = nullptr;
}
return;
@ -226,9 +232,13 @@ void Core::cleanupCall(int32_t callId)
calls[callId].active = false;
disconnect(calls[callId].sendAudioTimer,0,0,0);
calls[callId].sendAudioTimer->stop();
calls[callId].sendVideoTimer->stop();
if (calls[callId].videoEnabled)
Camera::getInstance()->unsubscribe();
{
delete calls[callId].camera;
calls[callId].camera = nullptr;
calls[callId].videoSource->setDeleteOnClose(true);
calls[callId].videoSource = nullptr;
}
Audio::unsuscribeInput();
toxav_kill_transmission(Core::getInstance()->toxav, callId);
@ -314,37 +324,35 @@ void Core::playCallVideo(void*, int32_t callId, const vpx_image_t* img, void *us
if (!calls[callId].active || !calls[callId].videoEnabled)
return;
calls[callId].videoSource.pushVPXFrame(img);
calls[callId].videoSource->pushFrame(img);
}
void Core::sendCallVideo(int32_t callId)
void Core::sendCallVideo(int32_t callId, ToxAv* toxav, std::shared_ptr<VideoFrame> vframe)
{
if (!calls[callId].active || !calls[callId].videoEnabled)
return;
vpx_image frame = camera->getLastFrame().createVpxImage();
if (frame.w && frame.h)
// This frame shares vframe's buffers, we don't call vpx_img_free but just delete it
vpx_image* frame = vframe->toVpxImage();
if (frame->fmt == VPX_IMG_FMT_NONE)
{
int result;
if ((result = toxav_prepare_video_frame(toxav, callId, videobuf, videobufsize, &frame)) < 0)
{
qDebug() << QString("toxav_prepare_video_frame: error %1").arg(result);
vpx_img_free(&frame);
calls[callId].sendVideoTimer->start();
return;
}
if ((result = toxav_send_video(toxav, callId, (uint8_t*)videobuf, result)) < 0)
qDebug() << QString("toxav_send_video error: %1").arg(result);
vpx_img_free(&frame);
}
else
{
qDebug("sendCallVideo: Invalid frame (bad camera ?)");
qWarning() << "Invalid frame";
delete frame;
return;
}
calls[callId].sendVideoTimer->start();
int result;
if ((result = toxav_prepare_video_frame(toxav, callId, videobuf, videobufsize, frame)) < 0)
{
qDebug() << QString("toxav_prepare_video_frame: error %1").arg(result);
delete frame;
return;
}
if ((result = toxav_send_video(toxav, callId, (uint8_t*)videobuf, result)) < 0)
qDebug() << QString("toxav_send_video error: %1").arg(result);
delete frame;
}
void Core::micMuteToggle(int32_t callId)
@ -412,9 +420,9 @@ void Core::onAvEnd(void* _toxav, int32_t call_index, void* core)
}
qDebug() << QString("AV end from %1").arg(friendId);
cleanupCall(call_index);
emit static_cast<Core*>(core)->avEnd(friendId, call_index);
cleanupCall(call_index);
}
void Core::onAvRinging(void* _toxav, int32_t call_index, void* core)
@ -452,9 +460,9 @@ void Core::onAvRequestTimeout(void* _toxav, int32_t call_index, void* core)
}
qDebug() << QString("AV request timeout with %1").arg(friendId);
cleanupCall(call_index);
emit static_cast<Core*>(core)->avRequestTimeout(friendId, call_index);
cleanupCall(call_index);
}
void Core::onAvPeerTimeout(void* _toxav, int32_t call_index, void* core)
@ -469,9 +477,9 @@ void Core::onAvPeerTimeout(void* _toxav, int32_t call_index, void* core)
}
qDebug() << QString("AV peer timeout with %1").arg(friendId);
cleanupCall(call_index);
emit static_cast<Core*>(core)->avPeerTimeout(friendId, call_index);
cleanupCall(call_index);
}
@ -593,7 +601,7 @@ void Core::playAudioBuffer(ALuint alSource, const int16_t *data, int samples, un
VideoSource *Core::getVideoSourceFromCall(int callNumber)
{
return &calls[callNumber].videoSource;
return calls[callNumber].videoSource;
}
void Core::joinGroupCall(int groupId)

View File

@ -3,7 +3,6 @@
#include <QHash>
#include <tox/toxav.h>
#include "src/video/netvideosource.h"
#if defined(__APPLE__) && defined(__MACH__)
#include <OpenAL/al.h>
@ -14,11 +13,13 @@
#endif
class QTimer;
class CoreVideoSource;
class CameraSource;
struct ToxCall
{
ToxAvCSettings codecSettings;
QTimer *sendAudioTimer, *sendVideoTimer;
QTimer *sendAudioTimer;
int32_t callId;
uint32_t friendId;
bool videoEnabled;
@ -26,7 +27,8 @@ struct ToxCall
bool muteMic;
bool muteVol;
ALuint alSource;
NetVideoSource videoSource;
CoreVideoSource* videoSource;
CameraSource* camera;
};
struct ToxGroupCall

View File

@ -55,7 +55,6 @@ private:
private:
static QMutex fileSendMutex;
static QHash<uint64_t, ToxFile> fileMap;
/// TODO: Replace the two queues by a hash map uint64_t -> unique_ptr<ToxFile>
};
#endif // COREFILE_H

View File

@ -29,7 +29,6 @@
#include <QFontDatabase>
#include <QMutexLocker>
#include <QProcess>
#include <opencv2/core/core_c.h>
#include <sodium.h>

View File

@ -314,6 +314,7 @@ void Settings::load()
s.endGroup();
s.beginGroup("Video");
videoDev = s.value("videoDev", "").toString();
camVideoRes = s.value("camVideoRes",QSize()).toSize();
s.endGroup();
@ -479,6 +480,7 @@ void Settings::saveGlobal(QString path)
s.endGroup();
s.beginGroup("Video");
s.setValue("videoDev", videoDev);
s.setValue("camVideoRes",camVideoRes);
s.endGroup();
}
@ -1124,6 +1126,16 @@ void Settings::setInDev(const QString& deviceSpecifier)
inDev = deviceSpecifier;
}
QString Settings::getVideoDev() const
{
return videoDev;
}
void Settings::setVideoDev(const QString& deviceSpecifier)
{
videoDev = deviceSpecifier;
}
QString Settings::getOutDev() const
{
return outDev;

View File

@ -149,6 +149,9 @@ public:
bool getFilterAudio() const;
void setFilterAudio(bool newValue);
QString getVideoDev() const;
void setVideoDev(const QString& deviceSpecifier);
QSize getCamVideoRes() const;
void setCamVideoRes(QSize newValue);
@ -347,6 +350,7 @@ private:
bool filterAudio;
// Video
QString videoDev;
QSize camVideoRes;
struct friendProp

View File

@ -1,7 +1,7 @@
#include "nexus.h"
#include "src/core/core.h"
#include "misc/settings.h"
#include "video/camera.h"
#include "video/camerasource.h"
#include "widget/gui.h"
#include <QThread>
#include <QDebug>
@ -57,6 +57,7 @@ void Nexus::start()
qRegisterMetaType<ToxFile>("ToxFile");
qRegisterMetaType<ToxFile::FileDirection>("ToxFile::FileDirection");
qRegisterMetaType<Core::PasswordType>("Core::PasswordType");
qRegisterMetaType<std::shared_ptr<VideoFrame>>("std::shared_ptr<VideoFrame>");
// Create GUI
#ifndef Q_OS_ANDROID
@ -67,7 +68,7 @@ void Nexus::start()
QString profilePath = Settings::getInstance().detectProfile();
coreThread = new QThread(this);
coreThread->setObjectName("qTox Core");
core = new Core(Camera::getInstance(), coreThread, profilePath);
core = new Core(coreThread, profilePath);
core->moveToThread(coreThread);
connect(coreThread, &QThread::started, core, &Core::start);

View File

@ -0,0 +1,232 @@
#include "directshow.h"
#include <cstdint>
#include <objbase.h>
#include <strmif.h>
#include <amvideo.h>
#include <dvdmedia.h>
#include <uuids.h>
#include <cassert>
#include <QDebug>
/**
* Most of this file is adapted from libavdevice's dshow.c,
* which retrieves useful information but only exposes it to
* stdout and is not part of the public API for some reason.
*/
static char *wcharToUtf8(wchar_t *w)
{
int l = WideCharToMultiByte(CP_UTF8, 0, w, -1, 0, 0, 0, 0);
char *s = new char[l];
if (s)
WideCharToMultiByte(CP_UTF8, 0, w, -1, s, l, 0, 0);
return s;
}
QVector<QPair<QString,QString>> DirectShow::getDeviceList()
{
IMoniker* m = nullptr;
QVector<QPair<QString,QString>> devices;
ICreateDevEnum* devenum = nullptr;
if (CoCreateInstance(CLSID_SystemDeviceEnum, nullptr, CLSCTX_INPROC_SERVER,
IID_ICreateDevEnum, (void**) &devenum) != S_OK)
return devices;
IEnumMoniker* classenum = nullptr;
if (devenum->CreateClassEnumerator(CLSID_VideoInputDeviceCategory,
(IEnumMoniker**)&classenum, 0) != S_OK)
return devices;
while (classenum->Next(1, &m, nullptr) == S_OK)
{
VARIANT var;
IPropertyBag* bag = nullptr;
LPMALLOC coMalloc = nullptr;
IBindCtx* bindCtx = nullptr;
LPOLESTR olestr = nullptr;
char *devIdString=nullptr, *devHumanName=nullptr;
if (CoGetMalloc(1, &coMalloc) != S_OK)
goto fail;
if (CreateBindCtx(0, &bindCtx) != S_OK)
goto fail;
// Get an uuid for the device that we can pass to ffmpeg directly
if (m->GetDisplayName(bindCtx, nullptr, &olestr) != S_OK)
goto fail;
devIdString = wcharToUtf8(olestr);
// replace ':' with '_' since FFmpeg uses : to delimitate sources
for (unsigned i = 0; i < strlen(devIdString); i++)
if (devIdString[i] == ':')
devIdString[i] = '_';
// Get a human friendly name/description
if (m->BindToStorage(nullptr, nullptr, IID_IPropertyBag, (void**)&bag) != S_OK)
goto fail;
var.vt = VT_BSTR;
if (bag->Read(L"FriendlyName", &var, nullptr) != S_OK)
goto fail;
devHumanName = wcharToUtf8(var.bstrVal);
devices += {QString("video=")+devIdString, devHumanName};
fail:
if (olestr && coMalloc)
coMalloc->Free(olestr);
if (bindCtx)
bindCtx->Release();
delete[] devIdString;
delete[] devHumanName;
if (bag)
bag->Release();
m->Release();
}
classenum->Release();
return devices;
}
// Used (by getDeviceModes) to select a device
// so we can list its properties
static IBaseFilter* getDevFilter(QString devName)
{
IBaseFilter* devFilter = nullptr;
devName = devName.mid(6); // Remove the "video="
IMoniker* m = nullptr;
ICreateDevEnum* devenum = nullptr;
if (CoCreateInstance(CLSID_SystemDeviceEnum, nullptr, CLSCTX_INPROC_SERVER,
IID_ICreateDevEnum, (void**) &devenum) != S_OK)
return devFilter;
IEnumMoniker* classenum = nullptr;
if (devenum->CreateClassEnumerator(CLSID_VideoInputDeviceCategory,
(IEnumMoniker**)&classenum, 0) != S_OK)
return devFilter;
while (classenum->Next(1, &m, nullptr) == S_OK)
{
LPMALLOC coMalloc = nullptr;
IBindCtx* bindCtx = nullptr;
LPOLESTR olestr = nullptr;
char* devIdString;
if (CoGetMalloc(1, &coMalloc) != S_OK)
goto fail;
if (CreateBindCtx(0, &bindCtx) != S_OK)
goto fail;
if (m->GetDisplayName(bindCtx, nullptr, &olestr) != S_OK)
goto fail;
devIdString = wcharToUtf8(olestr);
// replace ':' with '_' since FFmpeg uses : to delimitate sources
for (unsigned i = 0; i < strlen(devIdString); i++)
if (devIdString[i] == ':')
devIdString[i] = '_';
if (devName != devIdString)
goto fail;
if (m->BindToObject(0, 0, IID_IBaseFilter, (void**)&devFilter) != S_OK)
goto fail;
fail:
if (olestr && coMalloc)
coMalloc->Free(olestr);
if (bindCtx)
bindCtx->Release();
delete[] devIdString;
m->Release();
}
classenum->Release();
if (!devFilter)
qWarning() << "Could't find the device "<<devName;
return devFilter;
}
QVector<VideoMode> DirectShow::getDeviceModes(QString devName)
{
QVector<VideoMode> modes;
IBaseFilter* devFilter = getDevFilter(devName);
if (!devFilter)
return modes;
// The outter loop tries to find a valid output pin
GUID category;
DWORD r2;
IEnumPins *pins = nullptr;
IPin *pin;
if (devFilter->EnumPins(&pins) != S_OK)
return modes;
while (pins->Next(1, &pin, nullptr) == S_OK)
{
IKsPropertySet *p = nullptr;
PIN_INFO info;
pin->QueryPinInfo(&info);
info.pFilter->Release();
if (info.dir != PINDIR_OUTPUT)
goto next;
if (pin->QueryInterface(IID_IKsPropertySet, (void**)&p) != S_OK)
goto next;
if (p->Get(AMPROPSETID_Pin, AMPROPERTY_PIN_CATEGORY,
nullptr, 0, &category, sizeof(GUID), &r2) != S_OK)
goto next;
if (!IsEqualGUID(category, PIN_CATEGORY_CAPTURE))
goto next;
// Now we can list the video modes for the current pin
// Prepare for another wall of spaghetti DIRECT SHOW QUALITY code
{
IAMStreamConfig *config = nullptr;
VIDEO_STREAM_CONFIG_CAPS *vcaps = nullptr;
int size, n;
if (pin->QueryInterface(IID_IAMStreamConfig, (void**)&config) != S_OK)
goto next;
if (config->GetNumberOfCapabilities(&n, &size) != S_OK)
goto pinend;
assert(size == sizeof(VIDEO_STREAM_CONFIG_CAPS));
vcaps = new VIDEO_STREAM_CONFIG_CAPS;
for (int i=0; i<n; ++i)
{
AM_MEDIA_TYPE* type = nullptr;
if (config->GetStreamCaps(i, &type, (BYTE*)vcaps) != S_OK)
goto nextformat;
if (!IsEqualGUID(type->formattype, FORMAT_VideoInfo)
&& !IsEqualGUID(type->formattype, FORMAT_VideoInfo2))
goto nextformat;
VideoMode mode;
mode.width = vcaps->MaxOutputSize.cx;
mode.height = vcaps->MaxOutputSize.cy;
mode.FPS = 1e7 / vcaps->MinFrameInterval;
if (!modes.contains(mode))
modes.append(std::move(mode));
nextformat:
if (type->pbFormat)
CoTaskMemFree(type->pbFormat);
CoTaskMemFree(type);
}
pinend:
config->Release();
delete vcaps;
}
next:
if (p)
p->Release();
pin->Release();
}
return modes;
}

View File

@ -0,0 +1,19 @@
#ifndef DIRECTSHOW_H
#define DIRECTSHOW_H
#include <QString>
#include <QVector>
#include <QPair>
#include "src/video/videomode.h"
#ifndef Q_OS_WIN
#error "This file is only meant to be compiled for Windows targets"
#endif
namespace DirectShow
{
QVector<QPair<QString,QString>> getDeviceList();
QVector<VideoMode> getDeviceModes(QString devName);
}
#endif // DIRECTSHOW_H

View File

@ -0,0 +1,114 @@
#include "v4l2.h"
#include <unistd.h>
#include <fcntl.h>
#include <sys/ioctl.h>
#include <linux/videodev2.h>
/**
* Most of this file is adapted from libavdevice's v4l2.c,
* which retrieves useful information but only exposes it to
* stdout and is not part of the public API for some reason.
*/
static int deviceOpen(QString devName)
{
struct v4l2_capability cap;
int fd;
int err;
fd = open(devName.toStdString().c_str(), O_RDWR, 0);
if (fd < 0)
return errno;
if (ioctl(fd, VIDIOC_QUERYCAP, &cap) < 0) {
err = errno;
goto fail;
}
if (!(cap.capabilities & V4L2_CAP_VIDEO_CAPTURE)) {
err = ENODEV;
goto fail;
}
if (!(cap.capabilities & V4L2_CAP_STREAMING)) {
err = ENOSYS;
goto fail;
}
return fd;
fail:
close(fd);
return err;
}
static QVector<unsigned short> getDeviceModeFramerates(int fd, unsigned w, unsigned h, uint32_t pixelFormat)
{
QVector<unsigned short> rates;
v4l2_frmivalenum vfve{};
vfve.pixel_format = pixelFormat;
vfve.height = h;
vfve.width = w;
while(!ioctl(fd, VIDIOC_ENUM_FRAMEINTERVALS, &vfve)) {
int rate;
switch (vfve.type) {
case V4L2_FRMSIZE_TYPE_DISCRETE:
rate = vfve.discrete.denominator / vfve.discrete.numerator;
if (!rates.contains(rate))
rates.append(rate);
break;
case V4L2_FRMSIZE_TYPE_CONTINUOUS:
case V4L2_FRMSIZE_TYPE_STEPWISE:
rate = vfve.stepwise.min.denominator / vfve.stepwise.min.numerator;
if (!rates.contains(rate))
rates.append(rate);
}
vfve.index++;
}
return rates;
}
QVector<VideoMode> v4l2::getDeviceModes(QString devName)
{
QVector<VideoMode> modes;
int fd = deviceOpen(devName);
if (fd < 0)
return modes;
v4l2_fmtdesc vfd{};
vfd.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
while(!ioctl(fd, VIDIOC_ENUM_FMT, &vfd)) {
vfd.index++;
v4l2_frmsizeenum vfse{};
vfse.pixel_format = vfd.pixelformat;
while(!ioctl(fd, VIDIOC_ENUM_FRAMESIZES, &vfse)) {
VideoMode mode;
switch (vfse.type) {
case V4L2_FRMSIZE_TYPE_DISCRETE:
mode.width = vfse.discrete.width;
mode.height = vfse.discrete.height;
break;
case V4L2_FRMSIZE_TYPE_CONTINUOUS:
case V4L2_FRMSIZE_TYPE_STEPWISE:
mode.width = vfse.stepwise.max_width;
mode.height = vfse.stepwise.max_height;
}
QVector<unsigned short> rates = getDeviceModeFramerates(fd, mode.width, mode.height, vfd.pixelformat);
for (unsigned short rate : rates)
{
mode.FPS = rate;
if (!modes.contains(mode))
modes.append(std::move(mode));
}
vfse.index++;
}
}
return modes;
}

View File

@ -0,0 +1,19 @@
#ifndef V4L2_H
#define V4L2_H
#include <QString>
#include <QVector>
#include <QPair>
#include "src/video/videomode.h"
#ifndef Q_OS_LINUX
#error "This file is only meant to be compiled for Linux targets"
#endif
namespace v4l2
{
QVector<VideoMode> getDeviceModes(QString devName);
}
#endif // V4L2_H

View File

@ -1,113 +0,0 @@
/*
This file is part of qTox, a Qt-based graphical interface for Tox.
This program is libre software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the COPYING file for more details.
*/
#include "camera.h"
#include "src/video/cameraworker.h"
#include <QDebug>
#include <QThread>
#include <QMutexLocker>
Camera* Camera::getInstance()
{
static Camera instance;
return &instance;
}
Camera::Camera()
: refcount(0)
, workerThread(nullptr)
, worker(nullptr)
{
worker = new CameraWorker(0);
workerThread = new QThread();
worker->moveToThread(workerThread);
connect(workerThread, &QThread::started, worker, &CameraWorker::onStart);
connect(workerThread, &QThread::finished, worker, &CameraWorker::deleteLater);
connect(worker, &CameraWorker::newFrameAvailable, this, &Camera::onNewFrameAvailable);
connect(worker, &CameraWorker::resProbingFinished, this, &Camera::resolutionProbingFinished);
connect(worker, &CameraWorker::propProbingFinished, this, [=](int prop, double val) { emit propProbingFinished(Prop(prop), val); } );
workerThread->start();
}
Camera::~Camera()
{
workerThread->exit();
workerThread->deleteLater();
}
void Camera::subscribe()
{
if (refcount++ <= 0)
worker->resume();
}
void Camera::unsubscribe()
{
if (--refcount <= 0)
{
worker->suspend();
refcount = 0;
}
}
void Camera::probeProp(Camera::Prop prop)
{
worker->probeProp(int(prop));
}
void Camera::probeResolutions()
{
worker->probeResolutions();
}
void Camera::setResolution(QSize res)
{
worker->setProp(CV_CAP_PROP_FRAME_WIDTH, res.width());
worker->setProp(CV_CAP_PROP_FRAME_HEIGHT, res.height());
}
QSize Camera::getCurrentResolution()
{
return QSize(worker->getProp(CV_CAP_PROP_FRAME_WIDTH), worker->getProp(CV_CAP_PROP_FRAME_HEIGHT));
}
void Camera::setProp(Camera::Prop prop, double val)
{
worker->setProp(int(prop), val);
}
double Camera::getProp(Camera::Prop prop)
{
return worker->getProp(int(prop));
}
void Camera::onNewFrameAvailable(const VideoFrame &frame)
{
emit frameAvailable(frame);
mutex.lock();
currFrame = frame;
mutex.unlock();
}
VideoFrame Camera::getLastFrame()
{
QMutexLocker lock(&mutex);
return currFrame;
}

View File

@ -1,84 +0,0 @@
/*
This file is part of qTox, a Qt-based graphical interface for Tox.
This program is libre software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the COPYING file for more details.
*/
#ifndef CAMERA_H
#define CAMERA_H
#include <QImage>
#include <QList>
#include <QMutex>
#include "vpx/vpx_image.h"
#include "opencv2/highgui/highgui.hpp"
#include "src/video/videosource.h"
class CameraWorker;
/**
* This class is a wrapper to share a camera's captured video frames
* It allows objects to suscribe and unsuscribe to the stream, starting
* the camera only when needed, and giving access to the last frames
**/
class Camera : public VideoSource
{
Q_OBJECT
public:
enum Prop : int {
BRIGHTNESS = CV_CAP_PROP_BRIGHTNESS,
SATURATION = CV_CAP_PROP_SATURATION,
CONTRAST = CV_CAP_PROP_CONTRAST,
HUE = CV_CAP_PROP_HUE,
WIDTH = CV_CAP_PROP_FRAME_WIDTH,
HEIGHT = CV_CAP_PROP_FRAME_HEIGHT,
};
~Camera();
static Camera* getInstance(); ///< Returns the global widget's Camera instance
VideoFrame getLastFrame();
void setResolution(QSize res);
QSize getCurrentResolution();
void setProp(Prop prop, double val);
double getProp(Prop prop);
void probeProp(Prop prop);
void probeResolutions();
// VideoSource interface
virtual void subscribe();
virtual void unsubscribe();
signals:
void resolutionProbingFinished(QList<QSize> res);
void propProbingFinished(Prop prop, double val);
protected:
Camera();
private:
int refcount; ///< Number of users suscribed to the camera
VideoFrame currFrame;
QMutex mutex;
QThread* workerThread;
CameraWorker* worker;
private slots:
void onNewFrameAvailable(const VideoFrame& frame);
};
#endif // CAMERA_H

309
src/video/cameradevice.cpp Normal file
View File

@ -0,0 +1,309 @@
#include <QDebug>
#include <QApplication>
#include <QDesktopWidget>
extern "C" {
#include <libavformat/avformat.h>
#include <libavdevice/avdevice.h>
}
#include "cameradevice.h"
#include "src/misc/settings.h"
#ifdef Q_OS_WIN
#include "src/platform/camera/directshow.h"
#endif
#ifdef Q_OS_LINUX
#include "src/platform/camera/v4l2.h"
#endif
QHash<QString, CameraDevice*> CameraDevice::openDevices;
QMutex CameraDevice::openDeviceLock, CameraDevice::iformatLock;
AVInputFormat* CameraDevice::iformat{nullptr};
AVInputFormat* CameraDevice::idesktopFormat{nullptr};
CameraDevice::CameraDevice(const QString devName, AVFormatContext *context)
: devName{devName}, context{context}, refcount{1}
{
}
CameraDevice* CameraDevice::open(QString devName, AVDictionary** options)
{
openDeviceLock.lock();
AVFormatContext* fctx = nullptr;
CameraDevice* dev = openDevices.value(devName);
if (dev)
goto out;
AVInputFormat* format;
if (devName.startsWith("x11grab#"))
{
devName = devName.mid(8);
format = idesktopFormat;
}
else if (devName.startsWith("gdigrab#"))
{
devName = devName.mid(8);
format = idesktopFormat;
}
else
{
format = iformat;
}
if (avformat_open_input(&fctx, devName.toStdString().c_str(), format, options)<0)
goto out;
if (avformat_find_stream_info(fctx, NULL) < 0)
{
avformat_close_input(&fctx);
goto out;
}
dev = new CameraDevice{devName, fctx};
openDevices[devName] = dev;
out:
openDeviceLock.unlock();
return dev;
}
CameraDevice* CameraDevice::open(QString devName)
{
VideoMode mode{0,0,0};
return open(devName, mode);
}
CameraDevice* CameraDevice::open(QString devName, VideoMode mode)
{
if (!getDefaultInputFormat())
return nullptr;
AVDictionary* options = nullptr;
if (false);
#ifdef Q_OS_LINUX
else if (devName.startsWith("x11grab#"))
{
QSize screen;
if (mode)
{
screen.setWidth(mode.width);
screen.setHeight(mode.height);
}
else
{
screen = QApplication::desktop()->screenGeometry().size();
// Workaround https://trac.ffmpeg.org/ticket/4574 by choping 1 px bottom and right
screen.setWidth(screen.width()-1);
screen.setHeight(screen.height()-1);
}
av_dict_set(&options, "video_size", QString("%1x%2").arg(screen.width()).arg(screen.height()).toStdString().c_str(), 0);
}
#endif
#ifdef Q_OS_WIN
else if (iformat->name == QString("dshow") && mode)
{
av_dict_set(&options, "video_size", QString("%1x%2").arg(mode.width).arg(mode.height).toStdString().c_str(), 0);
av_dict_set(&options, "framerate", QString().setNum(mode.FPS).toStdString().c_str(), 0);
}
#endif
#ifdef Q_OS_LINUX
else if (iformat->name == QString("video4linux2,v4l2") && mode)
{
av_dict_set(&options, "video_size", QString("%1x%2").arg(mode.width).arg(mode.height).toStdString().c_str(), 0);
av_dict_set(&options, "framerate", QString().setNum(mode.FPS).toStdString().c_str(), 0);
}
#endif
else if (mode)
{
qWarning() << "Video mode-setting not implemented for input "<<iformat->name;
(void)mode;
}
CameraDevice* dev = open(devName, &options);
if (options)
av_dict_free(&options);
return dev;
}
void CameraDevice::open()
{
++refcount;
}
bool CameraDevice::close()
{
if (--refcount <= 0)
{
openDeviceLock.lock();
openDevices.remove(devName);
openDeviceLock.unlock();
avformat_close_input(&context);
delete this;
return true;
}
else
{
return false;
}
}
QVector<QPair<QString, QString>> CameraDevice::getRawDeviceListGeneric()
{
QVector<QPair<QString, QString>> devices;
if (!getDefaultInputFormat())
return devices;
// Alloc an input device context
AVFormatContext *s;
if (!(s = avformat_alloc_context()))
return devices;
if (!iformat->priv_class || !AV_IS_INPUT_DEVICE(iformat->priv_class->category))
{
avformat_free_context(s);
return devices;
}
s->iformat = iformat;
if (s->iformat->priv_data_size > 0)
{
s->priv_data = av_mallocz(s->iformat->priv_data_size);
if (!s->priv_data)
{
avformat_free_context(s);
return devices;
}
if (s->iformat->priv_class)
{
*(const AVClass**)s->priv_data= s->iformat->priv_class;
av_opt_set_defaults(s->priv_data);
}
}
else
{
s->priv_data = NULL;
}
// List the devices for this context
AVDeviceInfoList* devlist = nullptr;
AVDictionary *tmp = nullptr;
av_dict_copy(&tmp, nullptr, 0);
if (av_opt_set_dict2(s, &tmp, AV_OPT_SEARCH_CHILDREN) < 0)
{
av_dict_free(&tmp);
avformat_free_context(s);
}
avdevice_list_devices(s, &devlist);
if (!devlist)
qWarning() << "avdevice_list_devices failed";
// Convert the list to a QVector
devices.resize(devlist->nb_devices);
for (int i=0; i<devlist->nb_devices; i++)
{
AVDeviceInfo* dev = devlist->devices[i];
devices[i].first = dev->device_name;
devices[i].second = dev->device_description;
}
avdevice_free_list_devices(&devlist);
return devices;
}
QVector<QPair<QString, QString>> CameraDevice::getDeviceList()
{
QVector<QPair<QString, QString>> devices;
if (!getDefaultInputFormat())
return devices;
if (false);
#ifdef Q_OS_WIN
else if (iformat->name == QString("dshow"))
devices = DirectShow::getDeviceList();
#endif
else
devices = getRawDeviceListGeneric();
if (idesktopFormat)
{
if (idesktopFormat->name == QString("x11grab"))
devices.push_back(QPair<QString,QString>{"x11grab#:0", "Desktop"});
if (idesktopFormat->name == QString("gdigrab"))
devices.push_back(QPair<QString,QString>{"gdigrab#desktop", "Desktop"});
}
return devices;
}
QString CameraDevice::getDefaultDeviceName()
{
QString defaultdev = Settings::getInstance().getVideoDev();
if (!getDefaultInputFormat())
return defaultdev;
QVector<QPair<QString, QString>> devlist = getDeviceList();
for (const QPair<QString,QString>& device : devlist)
if (defaultdev == device.first)
return defaultdev;
if (devlist.isEmpty())
return defaultdev;
return devlist[0].first;
}
QVector<VideoMode> CameraDevice::getVideoModes(QString devName)
{
if (false);
#ifdef Q_OS_WIN
else if (iformat->name == QString("dshow"))
return DirectShow::getDeviceModes(devName);
#endif
#ifdef Q_OS_LINUX
else if (iformat->name == QString("video4linux2,v4l2"))
return v4l2::getDeviceModes(devName);
#endif
else
qWarning() << "Video mode listing not implemented for input "<<iformat->name;
(void)devName;
return {};
}
bool CameraDevice::getDefaultInputFormat()
{
QMutexLocker locker(&iformatLock);
if (iformat)
return true;
avdevice_register_all();
// Desktop capture input formats
#ifdef Q_OS_LINUX
idesktopFormat = av_find_input_format("x11grab");
#endif
#ifdef Q_OS_WIN
idesktopFormat = av_find_input_format("gdigrab");
#endif
// Webcam input formats
#ifdef Q_OS_LINUX
if ((iformat = av_find_input_format("v4l2")))
return true;
#endif
#ifdef Q_OS_WIN
if ((iformat = av_find_input_format("dshow")))
return true;
if ((iformat = av_find_input_format("vfwcap")))
#endif
#ifdef Q_OS_OSX
if ((iformat = av_find_input_format("avfoundation")))
return true;
if ((iformat = av_find_input_format("qtkit")))
return true;
#endif
qWarning() << "No valid input format found";
return false;
}

65
src/video/cameradevice.h Normal file
View File

@ -0,0 +1,65 @@
#ifndef CAMERADEVICE_H
#define CAMERADEVICE_H
#include <QHash>
#include <QString>
#include <QMutex>
#include <QVector>
#include <atomic>
#include "videomode.h"
struct AVFormatContext;
struct AVInputFormat;
struct AVDeviceInfoList;
struct AVDictionary;
/// Maintains an FFmpeg context for open camera devices,
/// takes care of sharing the context accross users
/// and closing the camera device when not in use.
/// The device can be opened recursively,
/// and must then be closed recursively
class CameraDevice
{
public:
/// Opens a device, creating a new one if needed
/// Returns a nullptr if the device couldn't be opened
static CameraDevice* open(QString devName);
/// Opens a device, creating a new one if needed
/// If the device is alreay open in another mode, the mode
/// will be ignored and the existing device is used
/// If the mode does not exist, a new device can't be opened
/// Returns a nullptr if the device couldn't be opened
static CameraDevice* open(QString devName, VideoMode mode);
void open(); ///< Opens the device again. Never fails
bool close(); ///< Closes the device. Never fails. If returns true, "this" becomes invalid
/// Returns a list of device names and descriptions
/// The names are the first part of the pair and can be passed to open(QString)
static QVector<QPair<QString, QString>> getDeviceList();
/// Get the list of video modes for a device
static QVector<VideoMode> getVideoModes(QString devName);
/// Returns the short name of the default defice
/// This is either the device in the settings
/// or the system default.
static QString getDefaultDeviceName();
private:
CameraDevice(const QString devName, AVFormatContext *context);
static CameraDevice* open(QString devName, AVDictionary** options);
static bool getDefaultInputFormat(); ///< Sets CameraDevice::iformat, returns success/failure
static QVector<QPair<QString, QString> > getRawDeviceListGeneric(); ///< Uses avdevice_list_devices
public:
const QString devName; ///< Short name of the device
AVFormatContext* context; ///< Context of the open device, must always be valid
private:
std::atomic_int refcount; ///< Number of times the device was opened
static QHash<QString, CameraDevice*> openDevices;
static QMutex openDeviceLock, iformatLock;
static AVInputFormat* iformat, *idesktopFormat;
};
#endif // CAMERADEVICE_H

294
src/video/camerasource.cpp Normal file
View File

@ -0,0 +1,294 @@
/*
This file is part of qTox, a Qt-based graphical interface for Tox.
This program is libre software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the COPYING file for more details.
*/
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavdevice/avdevice.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
}
#include <QMutexLocker>
#include <QDebug>
#include <QtConcurrent/QtConcurrentRun>
#include <memory>
#include <functional>
#include "camerasource.h"
#include "cameradevice.h"
#include "videoframe.h"
CameraSource::CameraSource()
: CameraSource{CameraDevice::getDefaultDeviceName()}
{
}
CameraSource::CameraSource(const QString deviceName)
: CameraSource{deviceName, VideoMode{0,0,0}}
{
}
CameraSource::CameraSource(const QString deviceName, VideoMode mode)
: deviceName{deviceName}, device{nullptr}, mode(mode),
cctx{nullptr}, videoStreamIndex{-1},
biglock{false}, freelistLock{false}, subscriptions{0}
{
av_register_all();
avdevice_register_all();
}
CameraSource::~CameraSource()
{
// Fast lock, in case our stream thread is running
{
bool expected = false;
while (!biglock.compare_exchange_weak(expected, true))
expected = false;
}
// Free all remaining VideoFrame
// Locking must be done precisely this way to avoid races
for (int i=0; i<freelist.size(); i++)
{
std::shared_ptr<VideoFrame> vframe = freelist[i].lock();
if (!vframe)
continue;
vframe->releaseFrame();
}
if (cctx)
avcodec_free_context(&cctx);
if (cctxOrig)
avcodec_close(cctxOrig);
for (int i=subscriptions; i; --i)
device->close();
device = nullptr;
biglock=false;
// Synchronize with our stream thread
while (streamFuture.isRunning())
QThread::yieldCurrentThread();
}
bool CameraSource::subscribe()
{
// Fast lock
{
bool expected = false;
while (!biglock.compare_exchange_weak(expected, true))
expected = false;
}
if (device)
{
device->open();
++subscriptions;
biglock = false;
return true;
}
// We need to create a new CameraDevice
AVCodec* codec;
if (mode)
device = CameraDevice::open(deviceName, mode);
else
device = CameraDevice::open(deviceName);
if (!device)
{
biglock = false;
return false;
}
// Find the first video stream
for (unsigned i=0; i<device->context->nb_streams; i++)
{
if(device->context->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO)
{
videoStreamIndex=i;
break;
}
}
if (videoStreamIndex == -1)
goto fail;
// Get a pointer to the codec context for the video stream
cctxOrig=device->context->streams[videoStreamIndex]->codec;
codec=avcodec_find_decoder(cctxOrig->codec_id);
if(!codec)
goto fail;
// Copy context, since we apparently aren't allowed to use the original
cctx = avcodec_alloc_context3(codec);
if(avcodec_copy_context(cctx, cctxOrig) != 0)
goto fail;
cctx->refcounted_frames = 1;
// Open codec
if(avcodec_open2(cctx, codec, nullptr)<0)
{
avcodec_free_context(&cctx);
goto fail;
}
if (streamFuture.isRunning())
qCritical() << "The stream thread is already running! Keeping the current one open.";
else
streamFuture = QtConcurrent::run(std::bind(&CameraSource::stream, this));
// Synchronize with our stream thread
while (!streamFuture.isRunning())
QThread::yieldCurrentThread();
++subscriptions;
biglock = false;
return true;
fail:
while (!device->close()) {}
biglock = false;
return false;
}
void CameraSource::unsubscribe()
{
// Fast lock
{
bool expected = false;
while (!biglock.compare_exchange_weak(expected, true))
expected = false;
}
if (!device)
{
qWarning() << "Unsubscribing with zero subscriber";
biglock = false;
return;
}
if (--subscriptions == 0)
{
// Free all remaining VideoFrame
// Locking must be done precisely this way to avoid races
for (int i=0; i<freelist.size(); i++)
{
std::shared_ptr<VideoFrame> vframe = freelist[i].lock();
if (!vframe)
continue;
vframe->releaseFrame();
}
// Free our resources and close the device
videoStreamIndex = -1;
avcodec_free_context(&cctx);
avcodec_close(cctxOrig);
cctxOrig = nullptr;
device->close();
device = nullptr;
biglock = false;
// Synchronize with our stream thread
while (streamFuture.isRunning())
QThread::yieldCurrentThread();
}
else
{
device->close();
biglock = false;
}
}
void CameraSource::stream()
{
auto streamLoop = [=]()
{
AVFrame* frame = av_frame_alloc();
if (!frame)
return;
frame->opaque = nullptr;
AVPacket packet;
if (av_read_frame(device->context, &packet)<0)
return;
// Only keep packets from the right stream;
if (packet.stream_index==videoStreamIndex)
{
// Decode video frame
int frameFinished;
avcodec_decode_video2(cctx, frame, &frameFinished, &packet);
if (!frameFinished)
return;
// Broadcast a new VideoFrame, it takes ownership of the AVFrame
{
bool expected = false;
while (!freelistLock.compare_exchange_weak(expected, true))
expected = false;
}
int freeFreelistSlot = getFreelistSlotLockless();
auto frameFreeCb = std::bind(&CameraSource::freelistCallback, this, freeFreelistSlot);
std::shared_ptr<VideoFrame> vframe = std::make_shared<VideoFrame>(frame, frameFreeCb);
freelist.append(vframe);
freelistLock = false;
emit frameAvailable(vframe);
}
// Free the packet that was allocated by av_read_frame
av_free_packet(&packet);
};
forever {
// Fast lock
{
bool expected = false;
while (!biglock.compare_exchange_weak(expected, true))
expected = false;
}
if (!device)
{
biglock = false;
return;
}
streamLoop();
// Give a chance to other functions to pick up the lock if needed
biglock = false;
QThread::yieldCurrentThread();
}
}
void CameraSource::freelistCallback(int freelistIndex)
{
// Fast lock
{
bool expected = false;
while (!freelistLock.compare_exchange_weak(expected, true))
expected = false;
}
freelist[freelistIndex].reset();
freelistLock = false;
}
int CameraSource::getFreelistSlotLockless()
{
int size = freelist.size();
for (int i=0; i<size; ++i)
if (freelist[i].expired())
return i;
freelist.resize(size+(size>>1)+4); // Arbitrary growth strategy, should work well
return size;
}

75
src/video/camerasource.h Normal file
View File

@ -0,0 +1,75 @@
/*
This file is part of qTox, a Qt-based graphical interface for Tox.
This program is libre software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the COPYING file for more details.
*/
#ifndef CAMERA_H
#define CAMERA_H
#include <QHash>
#include <QString>
#include <QFuture>
#include <QVector>
#include <atomic>
#include "src/video/videosource.h"
#include "src/video/videomode.h"
class CameraDevice;
struct AVCodecContext;
/**
* This class is a wrapper to share a camera's captured video frames
* It allows objects to suscribe and unsuscribe to the stream, starting
* the camera and streaming new video frames only when needed.
**/
class CameraSource : public VideoSource
{
Q_OBJECT
public:
CameraSource(); ///< Opens the camera device in the settings, or the system default
CameraSource(const QString deviceName);
CameraSource(const QString deviceName, VideoMode mode);
~CameraSource();
// VideoSource interface
virtual bool subscribe() override;
virtual void unsubscribe() override;
private:
/// Blocking. Decodes video stream and emits new frames.
/// Designed to run in its own thread.
void stream();
/// All VideoFrames must be deleted or released before we can close the device
/// or the device will forcibly free them, and then ~VideoFrame() will double free.
/// In theory very careful coding from our users could ensure all VideoFrames
/// die before unsubscribing, even the ones currently in flight in the metatype system.
/// But that's just asking for trouble and mysterious crashes, so we'll just
/// maintain a freelist and have all VideoFrames tell us when they die so we can forget them.
void freelistCallback(int freelistIndex);
/// Get the index of a free slot in the freelist
/// Callers must hold the freelistLock
int getFreelistSlotLockless();
private:
QVector<std::weak_ptr<VideoFrame>> freelist; ///< Frames that need freeing before we can safely close the device
QFuture<void> streamFuture; ///< Future of the streaming thread
const QString deviceName; ///< Short name of the device for CameraDevice's open(QString)
CameraDevice* device; ///< Non-owning pointer to an open CameraDevice, or nullptr
VideoMode mode; ///< What mode we tried to open the device in, all zeros means default mode
AVCodecContext* cctx, *cctxOrig; ///< Codec context of the camera's selected video stream
int videoStreamIndex; ///< A camera can have multiple streams, this is the one we're decoding
std::atomic_bool biglock, freelistLock; ///< True when locked. Faster than mutexes for video decoding.
std::atomic_int subscriptions; ///< Remember how many times we subscribed for RAII
};
#endif // CAMERA_H

View File

@ -1,240 +0,0 @@
/*
This file is part of qTox, a Qt-based graphical interface for Tox.
This program is libre software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the COPYING file for more details.
*/
#include "cameraworker.h"
#include <QTimer>
#include <QDebug>
#include <QThread>
CameraWorker::CameraWorker(int index)
: clock(nullptr)
, camIndex(index)
, refCount(0)
{
qRegisterMetaType<VideoFrame>();
qRegisterMetaType<QList<QSize>>();
}
CameraWorker::~CameraWorker()
{
if (clock)
delete clock;
}
void CameraWorker::onStart()
{
if (!clock)
{
clock = new QTimer(this);
clock->setSingleShot(false);
clock->setInterval(1000/60);
connect(clock, &QTimer::timeout, this, &CameraWorker::doWork);
}
emit started();
}
void CameraWorker::_suspend()
{
qDebug() << "Suspend";
clock->stop();
unsubscribe();
}
void CameraWorker::_resume()
{
qDebug() << "Resume";
subscribe();
clock->start();
}
void CameraWorker::_setProp(int prop, double val)
{
props[prop] = val;
if (cam.isOpened())
cam.set(prop, val);
}
double CameraWorker::_getProp(int prop)
{
if (!props.contains(prop))
{
subscribe();
props[prop] = cam.get(prop);
emit propProbingFinished(prop, props[prop]);
unsubscribe();
}
return props.value(prop);
}
void CameraWorker::_probeResolutions()
{
if (resolutions.isEmpty())
{
subscribe();
// probe resolutions
QList<QSize> propbeRes = {
QSize( 160, 120), // QQVGA
QSize( 320, 240), // HVGA
QSize( 432, 240), // WQVGA
QSize( 640, 360), // nHD
QSize( 640, 480),
QSize( 800, 600),
QSize( 960, 640),
QSize(1024, 768), // XGA
QSize(1280, 720),
QSize(1280, 1024),
QSize(1360, 768),
QSize(1366, 768),
QSize(1400, 1050),
QSize(1440, 900),
QSize(1600, 1200),
QSize(1680, 1050),
QSize(1920, 1200),
};
for (QSize res : propbeRes)
{
cam.set(CV_CAP_PROP_FRAME_WIDTH, res.width());
cam.set(CV_CAP_PROP_FRAME_HEIGHT, res.height());
double w = cam.get(CV_CAP_PROP_FRAME_WIDTH);
double h = cam.get(CV_CAP_PROP_FRAME_HEIGHT);
//qDebug() << "PROBING:" << res << " got " << w << h;
if (w>0 && h>0 && !resolutions.contains(QSize(w,h)))
resolutions.append(QSize(w,h));
}
unsubscribe();
qDebug() << "Resolutions" <<resolutions;
}
emit resProbingFinished(resolutions);
}
void CameraWorker::applyProps()
{
if (!cam.isOpened())
return;
for (int prop : props.keys())
cam.set(prop, props.value(prop));
}
void CameraWorker::subscribe()
{
if (refCount++ == 0)
{
if (!cam.isOpened())
{
queue.clear();
bool bSuccess = false;
try
{
bSuccess = cam.open(camIndex);
}
catch( cv::Exception& e )
{
qDebug() << "OpenCV exception caught: " << e.what();
}
if (!bSuccess)
{
qDebug() << "Could not open camera";
}
applyProps(); // restore props
}
}
}
void CameraWorker::unsubscribe()
{
if (--refCount <= 0)
{
cam.release();
frame = cv::Mat3b();
queue.clear();
refCount = 0;
}
}
void CameraWorker::doWork()
{
if (!cam.isOpened())
return;
bool bSuccess = false;
try
{
bSuccess = cam.read(frame);
}
catch( cv::Exception& e )
{
qDebug() << "OpenCV exception caught: " << e.what();;
this->clock->stop(); // prevent log spamming
qDebug() << "stopped clock";
}
if (!bSuccess)
{
qDebug() << "Cannot read frame";
return;
}
QByteArray frameData = QByteArray::fromRawData(reinterpret_cast<char*>(frame.data), frame.total() * frame.channels());
emit newFrameAvailable(VideoFrame{frameData, QSize(frame.cols, frame.rows), VideoFrame::BGR});
}
void CameraWorker::suspend()
{
QMetaObject::invokeMethod(this, "_suspend");
}
void CameraWorker::resume()
{
QMetaObject::invokeMethod(this, "_resume");
}
void CameraWorker::setProp(int prop, double val)
{
QMetaObject::invokeMethod(this, "_setProp", Q_ARG(int, prop), Q_ARG(double, val));
}
void CameraWorker::probeProp(int prop)
{
QMetaObject::invokeMethod(this, "_getProp", Q_ARG(int, prop));
}
void CameraWorker::probeResolutions()
{
QMetaObject::invokeMethod(this, "_probeResolutions");
}
double CameraWorker::getProp(int prop)
{
double ret = 0.0;
QMetaObject::invokeMethod(this, "_getProp", Qt::BlockingQueuedConnection, Q_RETURN_ARG(double, ret), Q_ARG(int, prop));
return ret;
}

View File

@ -1,78 +0,0 @@
/*
This file is part of qTox, a Qt-based graphical interface for Tox.
This program is libre software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the COPYING file for more details.
*/
#ifndef CAMERAWORKER_H
#define CAMERAWORKER_H
#include <QObject>
#include <QList>
#include <QMap>
#include <QMutex>
#include <QQueue>
#include <QSize>
#include "opencv2/highgui/highgui.hpp"
#include "videosource.h"
class QTimer;
class CameraWorker : public QObject
{
Q_OBJECT
public:
CameraWorker(int index);
~CameraWorker();
void doWork();
void suspend();
void resume();
void setProp(int prop, double val);
double getProp(int prop); // blocking call!
public slots:
void onStart();
void probeProp(int prop);
void probeResolutions();
signals:
void started();
void newFrameAvailable(const VideoFrame& frame);
void resProbingFinished(QList<QSize> res);
void propProbingFinished(int prop, double val);
private slots:
void _suspend();
void _resume();
void _setProp(int prop, double val);
double _getProp(int prop);
void _probeResolutions();
private:
void applyProps();
void subscribe();
void unsubscribe();
private:
QMutex mutex;
QQueue<cv::Mat3b> queue;
QTimer* clock;
cv::VideoCapture cam;
cv::Mat3b frame;
int camIndex;
QMap<int, double> props;
QList<QSize> resolutions;
int refCount;
};
#endif // CAMERAWORKER_H

View File

@ -0,0 +1,108 @@
extern "C" {
#include <libavcodec/avcodec.h>
}
#include "corevideosource.h"
#include "videoframe.h"
CoreVideoSource::CoreVideoSource()
: subscribers{0}, deleteOnClose{false},
biglock{false}
{
}
void CoreVideoSource::pushFrame(const vpx_image_t* vpxframe)
{
// Fast lock
{
bool expected = false;
while (!biglock.compare_exchange_weak(expected, true))
expected = false;
}
std::shared_ptr<VideoFrame> vframe;
AVFrame* avframe;
uint8_t* buf;
int width = vpxframe->d_w, height = vpxframe->d_h;
int dstStride, srcStride, minStride;
if (subscribers <= 0)
goto end;
avframe = av_frame_alloc();
if (!avframe)
goto end;
avframe->width = width;
avframe->height = height;
avframe->format = AV_PIX_FMT_YUV420P;
buf = (uint8_t*)av_malloc(avpicture_get_size(AV_PIX_FMT_YUV420P, width, height));
if (!buf)
{
av_frame_free(&avframe);
goto end;
}
avframe->opaque = buf;
avpicture_fill((AVPicture*)avframe, buf, AV_PIX_FMT_YUV420P, width, height);
dstStride=avframe->linesize[0], srcStride=vpxframe->stride[0], minStride=std::min(dstStride, srcStride);
for (int i=0; i<height; i++)
memcpy(avframe->data[0]+dstStride*i, vpxframe->planes[0]+srcStride*i, minStride);
dstStride=avframe->linesize[1], srcStride=vpxframe->stride[1], minStride=std::min(dstStride, srcStride);
for (int i=0; i<height/2; i++)
memcpy(avframe->data[1]+dstStride*i, vpxframe->planes[1]+srcStride*i, minStride);
dstStride=avframe->linesize[2], srcStride=vpxframe->stride[2], minStride=std::min(dstStride, srcStride);
for (int i=0; i<height/2; i++)
memcpy(avframe->data[2]+dstStride*i, vpxframe->planes[2]+srcStride*i, minStride);
vframe = std::make_shared<VideoFrame>(avframe);
emit frameAvailable(vframe);
end:
biglock = false;
}
bool CoreVideoSource::subscribe()
{
// Fast lock
{
bool expected = false;
while (!biglock.compare_exchange_weak(expected, true))
expected = false;
}
++subscribers;
biglock = false;
return true;
}
void CoreVideoSource::unsubscribe()
{
// Fast lock
{
bool expected = false;
while (!biglock.compare_exchange_weak(expected, true))
expected = false;
}
if (--subscribers == 0)
{
if (deleteOnClose)
{
biglock = false;
delete this;
return;
}
}
biglock = false;
}
void CoreVideoSource::setDeleteOnClose(bool newstate)
{
// Fast lock
{
bool expected = false;
while (!biglock.compare_exchange_weak(expected, true))
expected = false;
}
deleteOnClose = newstate;
biglock = false;
}

View File

@ -0,0 +1,35 @@
#ifndef COREVIDEOSOURCE_H
#define COREVIDEOSOURCE_H
#include <vpx/vpx_image.h>
#include <atomic>
#include "videosource.h"
/// A VideoSource that emits frames received by Core
class CoreVideoSource : public VideoSource
{
Q_OBJECT
public:
// VideoSource interface
virtual bool subscribe() override;
virtual void unsubscribe() override;
private:
// Only Core should create a CoreVideoSource since
// only Core can push images to it
CoreVideoSource();
/// Makes a copy of the vpx_image_t and emits it as a new VideoFrame
void pushFrame(const vpx_image_t *frame);
/// If true, self-delete after the last suscriber is gone
void setDeleteOnClose(bool newstate);
private:
std::atomic_int subscribers; ///< Number of suscribers
std::atomic_bool deleteOnClose; ///< If true, self-delete after the last suscriber is gone
std::atomic_bool biglock; ///< Fast lock
friend class Core;
};
#endif // COREVIDEOSOURCE_H

View File

@ -1,62 +0,0 @@
/*
This file is part of qTox, a Qt-based graphical interface for Tox.
This program is libre software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the COPYING file for more details.
*/
#include "netvideosource.h"
#include <QDebug>
#include <vpx/vpx_image.h>
NetVideoSource::NetVideoSource()
{
}
void NetVideoSource::pushFrame(VideoFrame frame)
{
emit frameAvailable(frame);
}
void NetVideoSource::pushVPXFrame(const vpx_image *image)
{
const int dw = image->d_w;
const int dh = image->d_h;
const int bpl = image->stride[VPX_PLANE_Y];
const int cxbpl = image->stride[VPX_PLANE_V];
VideoFrame frame;
frame.frameData.resize(dw * dh * 3); //YUV 24bit
frame.resolution = QSize(dw, dh);
frame.format = VideoFrame::YUV;
const uint8_t* yData = image->planes[VPX_PLANE_Y];
const uint8_t* uData = image->planes[VPX_PLANE_U];
const uint8_t* vData = image->planes[VPX_PLANE_V];
// convert from planar to packed
for (int y = 0; y < dh; ++y)
{
for (int x = 0; x < dw; ++x)
{
uint8_t Y = yData[x + y * bpl];
uint8_t U = uData[x/(1 << image->x_chroma_shift) + y/(1 << image->y_chroma_shift)*cxbpl];
uint8_t V = vData[x/(1 << image->x_chroma_shift) + y/(1 << image->y_chroma_shift)*cxbpl];
frame.frameData.data()[dw * 3 * y + x * 3 + 0] = Y;
frame.frameData.data()[dw * 3 * y + x * 3 + 1] = U;
frame.frameData.data()[dw * 3 * y + x * 3 + 2] = V;
}
}
pushFrame(frame);
}

View File

@ -1,34 +0,0 @@
/*
This file is part of qTox, a Qt-based graphical interface for Tox.
This program is libre software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the COPYING file for more details.
*/
#ifndef NETVIDEOSOURCE_H
#define NETVIDEOSOURCE_H
#include "videosource.h"
struct vpx_image;
class NetVideoSource : public VideoSource
{
public:
NetVideoSource();
void pushFrame(VideoFrame frame);
void pushVPXFrame(const vpx_image *image);
virtual void subscribe() {}
virtual void unsubscribe() {}
};
#endif // NETVIDEOSOURCE_H

View File

@ -12,43 +12,230 @@
See the COPYING file for more details.
*/
#include <QMutexLocker>
#include <QDebug>
#include <vpx/vpx_image.h>
extern "C" {
#include <libavcodec/avcodec.h>
#include <libswscale/swscale.h>
}
#include "videoframe.h"
vpx_image_t VideoFrame::createVpxImage() const
VideoFrame::VideoFrame(AVFrame* frame, int w, int h, int fmt, std::function<void()> freelistCallback)
: freelistCallback{freelistCallback},
frameOther{nullptr}, frameYUV420{nullptr}, frameRGB24{nullptr},
width{w}, height{h}, pixFmt{fmt}
{
vpx_image img;
img.w = img.h = img.d_w = img.d_h = 0;
if (pixFmt == AV_PIX_FMT_YUV420P)
frameYUV420 = frame;
else if (pixFmt == AV_PIX_FMT_RGB24)
frameRGB24 = frame;
else
frameOther = frame;
}
if (!isValid())
VideoFrame::VideoFrame(AVFrame* frame, std::function<void()> freelistCallback)
: VideoFrame{frame, frame->width, frame->height, frame->format, freelistCallback}
{
}
VideoFrame::VideoFrame(AVFrame* frame)
: VideoFrame{frame, frame->width, frame->height, frame->format, nullptr}
{
}
VideoFrame::~VideoFrame()
{
if (freelistCallback)
freelistCallback();
releaseFrameLockless();
}
QImage VideoFrame::toQImage(QSize size)
{
if (!convertToRGB24(size))
return QImage();
QMutexLocker locker(&biglock);
return QImage(*frameRGB24->data, frameRGB24->width, frameRGB24->height, *frameRGB24->linesize, QImage::Format_RGB888);
}
vpx_image *VideoFrame::toVpxImage()
{
// libvpx doesn't provide a clean way to reuse an existing external buffer
// so we'll manually fill-in the vpx_image fields and hope for the best.
vpx_image* img = new vpx_image;
memset(img, 0, sizeof(vpx_image));
if (!convertToYUV420())
return img;
const int w = resolution.width();
const int h = resolution.height();
// I420 "It comprises an NxM Y plane followed by (N/2)x(M/2) V and U planes."
// http://fourcc.org/yuv.php#IYUV
vpx_img_alloc(&img, VPX_IMG_FMT_VPXI420, w, h, 1);
for (int y = 0; y < h; ++y)
{
for (int x = 0; x < w; ++x)
{
uint8_t b = frameData.data()[(x + y * w) * 3 + 0];
uint8_t g = frameData.data()[(x + y * w) * 3 + 1];
uint8_t r = frameData.data()[(x + y * w) * 3 + 2];
img.planes[VPX_PLANE_Y][x + y * img.stride[VPX_PLANE_Y]] = ((66 * r + 129 * g + 25 * b) >> 8) + 16;
if (!(x % (1 << img.x_chroma_shift)) && !(y % (1 << img.y_chroma_shift)))
{
const int i = x / (1 << img.x_chroma_shift);
const int j = y / (1 << img.y_chroma_shift);
img.planes[VPX_PLANE_V][i + j * img.stride[VPX_PLANE_V]] = ((112 * r + -94 * g + -18 * b) >> 8) + 128;
img.planes[VPX_PLANE_U][i + j * img.stride[VPX_PLANE_U]] = ((-38 * r + -74 * g + 112 * b) >> 8) + 128;
}
}
}
img->w = img->d_w = width;
img->h = img->d_h = height;
img->fmt = VPX_IMG_FMT_I420;
img->planes[0] = frameYUV420->data[0];
img->planes[1] = frameYUV420->data[1];
img->planes[2] = frameYUV420->data[2];
img->planes[3] = nullptr;
img->stride[0] = frameYUV420->linesize[0];
img->stride[1] = frameYUV420->linesize[1];
img->stride[2] = frameYUV420->linesize[2];
img->stride[3] = frameYUV420->linesize[3];
return img;
}
bool VideoFrame::convertToRGB24(QSize size)
{
QMutexLocker locker(&biglock);
AVFrame* sourceFrame;
if (frameOther)
{
sourceFrame = frameOther;
}
else if (frameYUV420)
{
sourceFrame = frameYUV420;
}
else
{
qCritical() << "None of the frames are valid! Did someone release us?";
return false;
}
if (size.isEmpty())
{
size.setWidth(sourceFrame->width);
size.setHeight(sourceFrame->height);
}
if (frameRGB24)
{
if (frameRGB24->width == size.width() && frameRGB24->height == size.height())
return true;
av_free(frameRGB24->opaque);
av_frame_unref(frameRGB24);
av_frame_free(&frameRGB24);
}
frameRGB24=av_frame_alloc();
if (!frameRGB24)
{
qCritical() << "av_frame_alloc failed";
return false;
}
uint8_t* buf = (uint8_t*)av_malloc(avpicture_get_size(AV_PIX_FMT_RGB24, size.width(), size.height()));
if (!buf)
{
qCritical() << "av_malloc failed";
av_frame_free(&frameRGB24);
return false;
}
frameRGB24->opaque = buf;
avpicture_fill((AVPicture*)frameRGB24, buf, AV_PIX_FMT_RGB24, size.width(), size.height());
frameRGB24->width = size.width();
frameRGB24->height = size.height();
// Bilinear is better for shrinking, bicubic better for upscaling
int resizeAlgo = size.width()<=width ? SWS_BILINEAR : SWS_BICUBIC;
SwsContext *swsCtx = sws_getContext(width, height, (AVPixelFormat)pixFmt,
size.width(), size.height(), AV_PIX_FMT_RGB24,
resizeAlgo, nullptr, nullptr, nullptr);
sws_scale(swsCtx, (uint8_t const * const *)sourceFrame->data,
sourceFrame->linesize, 0, height,
frameRGB24->data, frameRGB24->linesize);
sws_freeContext(swsCtx);
return true;
}
bool VideoFrame::convertToYUV420()
{
QMutexLocker locker(&biglock);
if (frameYUV420)
return true;
AVFrame* sourceFrame;
if (frameOther)
{
sourceFrame = frameOther;
}
else if (frameRGB24)
{
sourceFrame = frameRGB24;
}
else
{
qCritical() << "None of the frames are valid! Did someone release us?";
return false;
}
frameYUV420=av_frame_alloc();
if (!frameYUV420)
{
qCritical() << "av_frame_alloc failed";
return false;
}
uint8_t* buf = (uint8_t*)av_malloc(avpicture_get_size(AV_PIX_FMT_RGB24, width, height));
if (!buf)
{
qCritical() << "av_malloc failed";
av_frame_free(&frameYUV420);
return false;
}
frameYUV420->opaque = buf;
avpicture_fill((AVPicture*)frameYUV420, buf, AV_PIX_FMT_YUV420P, width, height);
SwsContext *swsCtx = sws_getContext(width, height, (AVPixelFormat)pixFmt,
width, height, AV_PIX_FMT_YUV420P,
SWS_BILINEAR, nullptr, nullptr, nullptr);
sws_scale(swsCtx, (uint8_t const * const *)sourceFrame->data,
sourceFrame->linesize, 0, height,
frameYUV420->data, frameYUV420->linesize);
sws_freeContext(swsCtx);
return true;
}
void VideoFrame::releaseFrame()
{
QMutexLocker locker(&biglock);
freelistCallback = nullptr;
releaseFrameLockless();
}
void VideoFrame::releaseFrameLockless()
{
if (frameOther)
{
av_free(frameOther->opaque);
av_frame_unref(frameOther);
av_frame_free(&frameOther);
}
if (frameYUV420)
{
av_free(frameYUV420->opaque);
av_frame_unref(frameYUV420);
av_frame_free(&frameYUV420);
}
if (frameRGB24)
{
av_free(frameRGB24->opaque);
av_frame_unref(frameRGB24);
av_frame_free(&frameRGB24);
}
}
QSize VideoFrame::getSize()
{
return {width, height};
}

View File

@ -15,42 +15,57 @@
#ifndef VIDEOFRAME_H
#define VIDEOFRAME_H
#include <QMetaType>
#include <QByteArray>
#include <QSize>
#include <QMutex>
#include <QImage>
#include <functional>
#include "vpx/vpx_image.h"
struct AVFrame;
struct AVCodecContext;
struct vpx_image;
struct VideoFrame
/// VideoFrame takes ownership of an AVFrame* and allows fast conversions to other formats
/// Ownership of all video frame buffers is kept by the VideoFrame, even after conversion
/// All references to the frame data become invalid when the VideoFrame is deleted
/// We try to avoid pixel format conversions as much as possible, at the cost of some memory
/// All methods are thread-safe. If provided freelistCallback will be called by the destructor,
/// unless releaseFrame was called in between.
class VideoFrame
{
enum ColorFormat
{
NONE,
BGR,
YUV,
};
public:
VideoFrame(AVFrame* frame);
VideoFrame(AVFrame* frame, std::function<void()> freelistCallback);
VideoFrame(AVFrame* frame, int w, int h, int fmt, std::function<void()> freelistCallback);
~VideoFrame();
QByteArray frameData;
QSize resolution;
ColorFormat format;
/// Return the size of the original frame
QSize getSize();
VideoFrame() : format(NONE) {}
VideoFrame(QByteArray d, QSize r, ColorFormat f) : frameData(d), resolution(r), format(f) {}
/// Frees all internal buffers and frame data, removes the freelistCallback
/// This makes all converted objects that shares our internal buffers invalid
void releaseFrame();
void invalidate()
{
frameData = QByteArray();
resolution = QSize(-1,-1);
}
/// Converts the VideoFrame to a QImage that shares our internal video buffer
QImage toQImage(QSize size = QSize());
/// Converts the VideoFrame to a vpx_image_t that shares our internal video buffer
/// Free it with operator delete, NOT vpx_img_free
vpx_image* toVpxImage();
bool isValid() const
{
return !frameData.isEmpty() && resolution.isValid() && format != NONE;
}
protected:
bool convertToRGB24(QSize size = QSize());
bool convertToYUV420();
void releaseFrameLockless();
vpx_image_t createVpxImage() const;
private:
// Disable copy. Use a shared_ptr if you need copies.
VideoFrame(const VideoFrame& other)=delete;
VideoFrame& operator=(const VideoFrame& other)=delete;
private:
std::function<void()> freelistCallback;
QMutex biglock;
AVFrame* frameOther, *frameYUV420, *frameRGB24;
int width, height;
int pixFmt;
};
Q_DECLARE_METATYPE(VideoFrame)
#endif // VIDEOFRAME_H

25
src/video/videomode.h Normal file
View File

@ -0,0 +1,25 @@
#ifndef VIDEOMODE_H
#define VIDEOMODE_H
/// Describes a video mode supported by a device
struct VideoMode
{
unsigned short width, height; ///< Displayed video resolution (NOT frame resolution)
unsigned short FPS; ///< Max frames per second supported by the device at this resolution
/// All zeros means a default/unspecified mode
operator bool()
{
return width || height || FPS;
}
bool operator==(const VideoMode& other)
{
return width == other.width
&& height == other.height
&& FPS == other.FPS;
}
};
#endif // VIDEOMODE_H

View File

@ -16,22 +16,25 @@
#define VIDEOSOURCE_H
#include <QObject>
#include <QSize>
#include <QRgb>
#include <memory>
#include "videoframe.h"
class VideoFrame;
/// An abstract source of video frames
/// When it has at least one subscriber the source will emit new video frames
/// Subscribing is recursive, multiple users can subscribe to the same VideoSource
class VideoSource : public QObject
{
Q_OBJECT
public:
virtual void subscribe() = 0;
/// If subscribe sucessfully opens the source, it will start emitting frameAvailable signals
virtual bool subscribe() = 0;
/// Stop emitting frameAvailable signals, and free associated resources if necessary
virtual void unsubscribe() = 0;
signals:
void frameAvailable(const VideoFrame& frame);
void frameAvailable(std::shared_ptr<VideoFrame> frame);
};
#endif // VIDEOSOURCE_H

View File

@ -401,13 +401,15 @@ void ChatForm::onAvRinging(uint32_t FriendId, int CallId, bool video)
addSystemInfoMessage(tr("Calling to %1").arg(f->getDisplayedName()), ChatMessage::INFO, QDateTime::currentDateTime());
}
void ChatForm::onAvStarting(uint32_t FriendId, int, bool video)
void ChatForm::onAvStarting(uint32_t FriendId, int CallId, bool video)
{
if (FriendId != f->getFriendID())
return;
qDebug() << "onAvStarting";
callId = CallId;
callButton->disconnect();
videoButton->disconnect();
if (video)

View File

@ -16,6 +16,9 @@
#include "ui_avsettings.h"
#include "src/misc/settings.h"
#include "src/audio.h"
#include "src/video/camerasource.h"
#include "src/video/cameradevice.h"
#include "src/widget/videosurface.h"
#if defined(__APPLE__) && defined(__MACH__)
#include <OpenAL/al.h>
@ -25,13 +28,15 @@
#include <AL/al.h>
#endif
#include <QDebug>
#ifndef ALC_ALL_DEVICES_SPECIFIER
#define ALC_ALL_DEVICES_SPECIFIER ALC_DEVICE_SPECIFIER
#endif
AVForm::AVForm() :
GenericForm(tr("Audio/Video"), QPixmap(":/img/settings/av.png")),
CamVideoSurface{nullptr}
camVideoSurface{nullptr}, camera{nullptr}
{
bodyUI = new Ui::AVSettings;
bodyUI->setupUi(this);
@ -42,12 +47,11 @@ AVForm::AVForm() :
bodyUI->filterAudio->setDisabled(true);
#endif
connect(Camera::getInstance(), &Camera::propProbingFinished, this, &AVForm::onPropProbingFinished);
connect(Camera::getInstance(), &Camera::resolutionProbingFinished, this, &AVForm::onResProbingFinished);
auto qcomboboxIndexChanged = (void(QComboBox::*)(const QString&)) &QComboBox::currentIndexChanged;
connect(bodyUI->inDevCombobox, qcomboboxIndexChanged, this, &AVForm::onInDevChanged);
connect(bodyUI->outDevCombobox, qcomboboxIndexChanged, this, &AVForm::onOutDevChanged);
auto qcbxIndexChangedStr = (void(QComboBox::*)(const QString&)) &QComboBox::currentIndexChanged;
auto qcbxIndexChangedInt = (void(QComboBox::*)(int)) &QComboBox::currentIndexChanged;
connect(bodyUI->inDevCombobox, qcbxIndexChangedStr, this, &AVForm::onInDevChanged);
connect(bodyUI->outDevCombobox, qcbxIndexChangedStr, this, &AVForm::onOutDevChanged);
connect(bodyUI->videoDevCombobox, qcbxIndexChangedInt, this, &AVForm::onVideoDevChanged);
connect(bodyUI->filterAudio, &QCheckBox::toggled, this, &AVForm::onFilterAudioToggled);
connect(bodyUI->rescanButton, &QPushButton::clicked, this, [=](){getAudioInDevices(); getAudioOutDevices();});
bodyUI->playbackSlider->setValue(100);
@ -63,73 +67,132 @@ AVForm::AVForm() :
AVForm::~AVForm()
{
delete bodyUI;
if (camera)
{
delete camera;
camera = nullptr;
}
}
void AVForm::present()
{
getAudioOutDevices();
getAudioInDevices();
createVideoSurface();
CamVideoSurface->setSource(Camera::getInstance());
Camera::getInstance()->probeProp(Camera::SATURATION);
Camera::getInstance()->probeProp(Camera::CONTRAST);
Camera::getInstance()->probeProp(Camera::BRIGHTNESS);
Camera::getInstance()->probeProp(Camera::HUE);
Camera::getInstance()->probeResolutions();
bodyUI->videoModescomboBox->blockSignals(true);
bodyUI->videoModescomboBox->addItem(tr("Initializing Camera..."));
bodyUI->videoModescomboBox->blockSignals(false);
}
void AVForm::on_ContrastSlider_sliderMoved(int position)
{
Camera::getInstance()->setProp(Camera::CONTRAST, position / 100.0);
}
void AVForm::on_SaturationSlider_sliderMoved(int position)
{
Camera::getInstance()->setProp(Camera::SATURATION, position / 100.0);
}
void AVForm::on_BrightnessSlider_sliderMoved(int position)
{
Camera::getInstance()->setProp(Camera::BRIGHTNESS, position / 100.0);
}
void AVForm::on_HueSlider_sliderMoved(int position)
{
Camera::getInstance()->setProp(Camera::HUE, position / 100.0);
getVideoDevices();
}
void AVForm::on_videoModescomboBox_currentIndexChanged(int index)
{
QSize res = bodyUI->videoModescomboBox->itemData(index).toSize();
Settings::getInstance().setCamVideoRes(res);
Camera::getInstance()->setResolution(res);
if (index<0 || index>=videoModes.size())
{
qWarning() << "Invalid mode index";
return;
}
int devIndex = bodyUI->videoDevCombobox->currentIndex();
if (devIndex<0 || devIndex>=videoModes.size())
{
qWarning() << "Invalid device index";
return;
}
QString devName = videoDeviceList[devIndex].first;
VideoMode mode = videoModes[index];
Settings::getInstance().setCamVideoRes(QSize(mode.width, mode.height));
camVideoSurface->setSource(nullptr);
if (camera)
delete camera;
camera = new CameraSource(devName, mode);
camVideoSurface->setSource(camera);
}
void AVForm::onPropProbingFinished(Camera::Prop prop, double val)
void AVForm::updateVideoModes(int curIndex)
{
switch (prop)
if (curIndex<0 || curIndex>=videoDeviceList.size())
{
case Camera::BRIGHTNESS:
bodyUI->BrightnessSlider->setValue(val*100);
break;
case Camera::CONTRAST:
bodyUI->ContrastSlider->setValue(val*100);
break;
case Camera::SATURATION:
bodyUI->SaturationSlider->setValue(val*100);
break;
case Camera::HUE:
bodyUI->HueSlider->setValue(val*100);
break;
default:
break;
qWarning() << "Invalid index";
return;
}
QString devName = videoDeviceList[curIndex].first;
videoModes = CameraDevice::getVideoModes(devName);
std::sort(videoModes.begin(), videoModes.end(),
[](const VideoMode& a, const VideoMode& b)
{return a.width!=b.width ? a.width>b.width :
a.height!=b.height ? a.height>b.height :
a.FPS>b.FPS;});
bodyUI->videoModescomboBox->blockSignals(true);
bodyUI->videoModescomboBox->clear();
int prefResIndex = -1;
QSize prefRes = Settings::getInstance().getCamVideoRes();
for (int i=0; i<videoModes.size(); ++i)
{
VideoMode mode = videoModes[i];
if (mode.width==prefRes.width() && mode.height==prefRes.height() && prefResIndex==-1)
prefResIndex = i;
QString str = tr("%1x%2 at %3 FPS").arg(mode.width).arg(mode.height).arg(mode.FPS);
bodyUI->videoModescomboBox->addItem(str);
}
if (videoModes.isEmpty())
bodyUI->videoModescomboBox->addItem(tr("Default resolution"));
bodyUI->videoModescomboBox->blockSignals(false);
if (prefResIndex != -1)
{
bodyUI->videoModescomboBox->setCurrentIndex(prefResIndex);
}
else
{
// If the user hasn't set a preffered resolution yet,
// we'll pick the resolution in the middle of the list,
// and the best FPS for that resolution.
// If we picked the lowest resolution, the quality would be awful
// but if we picked the largest, FPS would be bad and thus quality bad too.
int numRes=0;
QSize lastSize;
for (int i=0; i<videoModes.size(); i++)
{
if (lastSize != QSize{videoModes[i].width, videoModes[i].height})
{
numRes++;
lastSize = {videoModes[i].width, videoModes[i].height};
}
}
int target = numRes/2;
numRes=0;
for (int i=0; i<videoModes.size(); i++)
{
if (lastSize != QSize{videoModes[i].width, videoModes[i].height})
{
numRes++;
lastSize = {videoModes[i].width, videoModes[i].height};
}
if (numRes==target)
{
bodyUI->videoModescomboBox->setCurrentIndex(i);
break;
}
}
}
}
void AVForm::onVideoDevChanged(int index)
{
if (index<0 || index>=videoDeviceList.size())
{
qWarning() << "Invalid index";
return;
}
camVideoSurface->setSource(nullptr);
if (camera)
{
delete camera;
camera = nullptr;
}
QString dev = videoDeviceList[index].first;
Settings::getInstance().setVideoDev(dev);
updateVideoModes(index);
camera = new CameraSource(dev);
camVideoSurface->setSource(camera);
}
void AVForm::onResProbingFinished(QList<QSize> res)
@ -157,17 +220,38 @@ void AVForm::onResProbingFinished(QList<QSize> res)
void AVForm::hideEvent(QHideEvent *)
{
if (CamVideoSurface)
if (camVideoSurface)
{
CamVideoSurface->setSource(nullptr);
camVideoSurface->setSource(nullptr);
killVideoSurface();
}
if (camera)
{
delete camera;
camera = nullptr;
}
videoDeviceList.clear();
}
void AVForm::showEvent(QShowEvent *)
void AVForm::getVideoDevices()
{
createVideoSurface();
CamVideoSurface->setSource(Camera::getInstance());
QString settingsInDev = Settings::getInstance().getVideoDev();
int videoDevIndex = 0;
videoDeviceList = CameraDevice::getDeviceList();
//prevent currentIndexChanged to be fired while adding items
bodyUI->videoDevCombobox->blockSignals(true);
bodyUI->videoDevCombobox->clear();
for (QPair<QString, QString> device : videoDeviceList)
{
bodyUI->videoDevCombobox->addItem(device.second);
if (device.first == settingsInDev)
videoDevIndex = bodyUI->videoDevCombobox->count()-1;
}
//addItem changes currentIndex -> reset
bodyUI->videoDevCombobox->setCurrentIndex(-1);
bodyUI->videoDevCombobox->blockSignals(false);
bodyUI->videoDevCombobox->setCurrentIndex(videoDevIndex);
updateVideoModes(videoDevIndex);
}
void AVForm::getAudioInDevices()
@ -190,9 +274,7 @@ void AVForm::getAudioInDevices()
#endif
bodyUI->inDevCombobox->addItem(inDev);
if (settingsInDev == inDev)
{
inDevIndex = bodyUI->inDevCombobox->count()-1;
}
pDeviceList += len+1;
}
//addItem changes currentIndex -> reset
@ -255,30 +337,6 @@ void AVForm::onFilterAudioToggled(bool filterAudio)
Settings::getInstance().setFilterAudio(filterAudio);
}
void AVForm::on_HueSlider_valueChanged(int value)
{
Camera::getInstance()->setProp(Camera::HUE, value / 100.0);
bodyUI->hueMax->setText(QString::number(value));
}
void AVForm::on_BrightnessSlider_valueChanged(int value)
{
Camera::getInstance()->setProp(Camera::BRIGHTNESS, value / 100.0);
bodyUI->brightnessMax->setText(QString::number(value));
}
void AVForm::on_SaturationSlider_valueChanged(int value)
{
Camera::getInstance()->setProp(Camera::SATURATION, value / 100.0);
bodyUI->saturationMax->setText(QString::number(value));
}
void AVForm::on_ContrastSlider_valueChanged(int value)
{
Camera::getInstance()->setProp(Camera::CONTRAST, value / 100.0);
bodyUI->contrastMax->setText(QString::number(value));
}
void AVForm::on_playbackSlider_valueChanged(int value)
{
Audio::setOutputVolume(value / 100.0);
@ -304,22 +362,22 @@ bool AVForm::eventFilter(QObject *o, QEvent *e)
void AVForm::createVideoSurface()
{
if (CamVideoSurface)
if (camVideoSurface)
return;
CamVideoSurface = new VideoSurface(bodyUI->CamFrame);
CamVideoSurface->setObjectName(QStringLiteral("CamVideoSurface"));
CamVideoSurface->setMinimumSize(QSize(160, 120));
bodyUI->gridLayout->addWidget(CamVideoSurface, 0, 0, 1, 1);
camVideoSurface = new VideoSurface(bodyUI->CamFrame);
camVideoSurface->setObjectName(QStringLiteral("CamVideoSurface"));
camVideoSurface->setMinimumSize(QSize(160, 120));
bodyUI->gridLayout->addWidget(camVideoSurface, 0, 0, 1, 1);
}
void AVForm::killVideoSurface()
{
if (!CamVideoSurface)
if (!camVideoSurface)
return;
QLayoutItem *child;
while ((child = bodyUI->gridLayout->takeAt(0)) != 0)
delete child;
delete CamVideoSurface;
CamVideoSurface = nullptr;
delete camVideoSurface;
camVideoSurface = nullptr;
}

View File

@ -15,18 +15,17 @@
#ifndef AVFORM_H
#define AVFORM_H
#include <QObject>
#include <QList>
#include "genericsettings.h"
#include "src/widget/videosurface.h"
#include "src/video/camera.h"
#include <QGroupBox>
#include <QVBoxLayout>
#include <QPushButton>
#include "src/video/videomode.h"
namespace Ui {
class AVSettings;
}
class Camera;
class CameraSource;
class VideoSurface;
class AVForm : public GenericForm
{
@ -39,15 +38,12 @@ public:
private:
void getAudioInDevices();
void getAudioOutDevices();
void getVideoDevices();
void createVideoSurface();
void killVideoSurface();
private slots:
void on_ContrastSlider_sliderMoved(int position);
void on_SaturationSlider_sliderMoved(int position);
void on_BrightnessSlider_sliderMoved(int position);
void on_HueSlider_sliderMoved(int position);
void on_videoModescomboBox_currentIndexChanged(int index);
// audio
@ -58,23 +54,21 @@ private slots:
void on_microphoneSlider_valueChanged(int value);
// camera
void onPropProbingFinished(Camera::Prop prop, double val);
void onVideoDevChanged(int index);
void onResProbingFinished(QList<QSize> res);
virtual void hideEvent(QHideEvent*);
virtual void showEvent(QShowEvent*);
void on_HueSlider_valueChanged(int value);
void on_BrightnessSlider_valueChanged(int value);
void on_SaturationSlider_valueChanged(int value);
void on_ContrastSlider_valueChanged(int value);
protected:
bool eventFilter(QObject *o, QEvent *e);
void updateVideoModes(int curIndex);
private:
Ui::AVSettings *bodyUI;
VideoSurface* CamVideoSurface;
VideoSurface* camVideoSurface;
CameraSource* camera;
QVector<QPair<QString, QString>> videoDeviceList;
QVector<VideoMode> videoModes;
};
#endif

View File

@ -41,10 +41,36 @@
<string>Audio Settings</string>
</property>
<layout class="QGridLayout" name="gridLayout_3">
<item row="2" column="3">
<widget class="QLabel" name="playbackMax">
<item row="2" column="1">
<widget class="QLabel" name="playbackMin">
<property name="text">
<string>100</string>
<string>0</string>
</property>
</widget>
</item>
<item row="5" column="0">
<widget class="QCheckBox" name="filterAudio">
<property name="toolTip">
<string>Filter sound from your microphone, so that people hearing you would get better sound.</string>
</property>
<property name="text">
<string>Filter audio</string>
</property>
</widget>
</item>
<item row="1" column="2">
<widget class="QComboBox" name="outDevCombobox"/>
</item>
<item row="2" column="2">
<widget class="QSlider" name="playbackSlider">
<property name="toolTip">
<string>Use slider to set volume of your speakers.</string>
</property>
<property name="maximum">
<number>100</number>
</property>
<property name="orientation">
<enum>Qt::Horizontal</enum>
</property>
</widget>
</item>
@ -55,17 +81,24 @@
</property>
</widget>
</item>
<item row="2" column="0">
<widget class="QLabel" name="playbackLabel">
<item row="2" column="3">
<widget class="QLabel" name="playbackMax">
<property name="text">
<string>Playback</string>
<string>100</string>
</property>
</widget>
</item>
<item row="3" column="0">
<widget class="QLabel" name="inDevLabel">
<item row="0" column="2">
<widget class="QPushButton" name="rescanButton">
<property name="text">
<string>Capture device</string>
<string>Rescan audio devices</string>
</property>
</widget>
</item>
<item row="4" column="3">
<widget class="QLabel" name="microphoneMax">
<property name="text">
<string>100</string>
</property>
</widget>
</item>
@ -83,47 +116,10 @@ WARNING: slider is not supposed to work yet.</string>
</property>
</widget>
</item>
<item row="2" column="2">
<widget class="QSlider" name="playbackSlider">
<property name="toolTip">
<string>Use slider to set volume of your speakers.</string>
</property>
<property name="maximum">
<number>100</number>
</property>
<property name="orientation">
<enum>Qt::Horizontal</enum>
</property>
</widget>
</item>
<item row="5" column="0">
<widget class="QCheckBox" name="filterAudio">
<property name="toolTip">
<string>Filter sound from your microphone, so that people hearing you would get better sound.</string>
</property>
<item row="3" column="0">
<widget class="QLabel" name="inDevLabel">
<property name="text">
<string>Filter audio</string>
</property>
</widget>
</item>
<item row="1" column="0">
<widget class="QLabel" name="outDevLabel">
<property name="text">
<string>Playback device</string>
</property>
</widget>
</item>
<item row="2" column="1">
<widget class="QLabel" name="playbackMin">
<property name="text">
<string>0</string>
</property>
</widget>
</item>
<item row="0" column="2">
<widget class="QPushButton" name="rescanButton">
<property name="text">
<string>Rescan audio devices</string>
<string>Capture device</string>
</property>
</widget>
</item>
@ -134,18 +130,22 @@ WARNING: slider is not supposed to work yet.</string>
</property>
</widget>
</item>
<item row="4" column="3">
<widget class="QLabel" name="microphoneMax">
<item row="2" column="0">
<widget class="QLabel" name="playbackLabel">
<property name="text">
<string>100</string>
<string>Playback</string>
</property>
</widget>
</item>
<item row="3" column="2">
<widget class="QComboBox" name="inDevCombobox"/>
</item>
<item row="1" column="2">
<widget class="QComboBox" name="outDevCombobox"/>
<item row="1" column="0">
<widget class="QLabel" name="outDevLabel">
<property name="text">
<string>Playback device</string>
</property>
</widget>
</item>
</layout>
</widget>
@ -158,21 +158,28 @@ WARNING: slider is not supposed to work yet.</string>
<layout class="QVBoxLayout" name="verticalLayout">
<item>
<layout class="QGridLayout" name="gridLayout_2">
<item row="4" column="2">
<widget class="QSlider" name="ContrastSlider">
<property name="orientation">
<enum>Qt::Horizontal</enum>
<item row="0" column="0">
<widget class="QLabel" name="videoDevLabel">
<property name="text">
<string>Video device</string>
</property>
</widget>
</item>
<item row="1" column="2">
<widget class="QSlider" name="HueSlider">
<property name="orientation">
<enum>Qt::Horizontal</enum>
<item row="1" column="0">
<widget class="QLabel" name="resolutionLabel">
<property name="toolTip">
<string>Set resolution of your camera.
The higher values, the better video quality your friends may get.
Note though that with better video quality there is needed better internet connection.
Sometimes your connection may not be good enough to handle higher video quality,
which may lead to problems with video calls.</string>
</property>
<property name="text">
<string>Resolution</string>
</property>
</widget>
</item>
<item row="0" column="2">
<item row="1" column="1">
<widget class="QComboBox" name="videoModescomboBox">
<property name="sizePolicy">
<sizepolicy hsizetype="Expanding" vsizetype="Fixed">
@ -189,117 +196,8 @@ which may lead to problems with video calls.</string>
</property>
</widget>
</item>
<item row="4" column="1">
<widget class="QLabel" name="contrastMin">
<property name="text">
<string>0</string>
</property>
</widget>
</item>
<item row="4" column="0">
<widget class="QLabel" name="contrastLabel">
<property name="text">
<string>Contrast</string>
</property>
</widget>
</item>
<item row="1" column="1">
<widget class="QLabel" name="hueMin">
<property name="text">
<string>0</string>
</property>
</widget>
</item>
<item row="3" column="2">
<widget class="QSlider" name="SaturationSlider">
<property name="orientation">
<enum>Qt::Horizontal</enum>
</property>
</widget>
</item>
<item row="1" column="0">
<widget class="QLabel" name="hueLabel">
<property name="text">
<string>Hue</string>
</property>
</widget>
</item>
<item row="0" column="0">
<widget class="QLabel" name="resolutionLabel">
<property name="toolTip">
<string>Set resolution of your camera.
The higher values, the better video quality your friends may get.
Note though that with better video quality there is needed better internet connection.
Sometimes your connection may not be good enough to handle higher video quality,
which may lead to problems with video calls.</string>
</property>
<property name="text">
<string>Resolution</string>
</property>
</widget>
</item>
<item row="3" column="1">
<widget class="QLabel" name="saturationMin">
<property name="text">
<string>0</string>
</property>
</widget>
</item>
<item row="2" column="2">
<widget class="QSlider" name="BrightnessSlider">
<property name="orientation">
<enum>Qt::Horizontal</enum>
</property>
</widget>
</item>
<item row="2" column="1">
<widget class="QLabel" name="brightnessMin">
<property name="text">
<string>0</string>
</property>
</widget>
</item>
<item row="3" column="0">
<widget class="QLabel" name="saturationLabel">
<property name="text">
<string>Saturation</string>
</property>
</widget>
</item>
<item row="2" column="0">
<widget class="QLabel" name="brightnessLabel">
<property name="text">
<string>Brightness</string>
</property>
</widget>
</item>
<item row="1" column="3">
<widget class="QLabel" name="hueMax">
<property name="text">
<string>100</string>
</property>
</widget>
</item>
<item row="2" column="3">
<widget class="QLabel" name="brightnessMax">
<property name="text">
<string>100</string>
</property>
</widget>
</item>
<item row="3" column="3">
<widget class="QLabel" name="saturationMax">
<property name="text">
<string>100</string>
</property>
</widget>
</item>
<item row="4" column="3">
<widget class="QLabel" name="contrastMax">
<property name="text">
<string>100</string>
</property>
</widget>
<item row="0" column="1">
<widget class="QComboBox" name="videoDevCombobox"/>
</item>
</layout>
</item>
@ -308,7 +206,7 @@ which may lead to problems with video calls.</string>
<property name="sizePolicy">
<sizepolicy hsizetype="MinimumExpanding" vsizetype="MinimumExpanding">
<horstretch>1</horstretch>
<verstretch>99</verstretch>
<verstretch>150</verstretch>
</sizepolicy>
</property>
<property name="frameShape">

View File

@ -21,6 +21,8 @@ namespace Ui {
class GeneralSettings;
}
class SettingsWidget;
class GeneralForm : public GenericForm
{
Q_OBJECT

View File

@ -16,7 +16,6 @@
#define GENERICFORM_H
#include <QWidget>
#include "src/widget/form/settingswidget.h"
class GenericForm : public QWidget
{

View File

@ -15,7 +15,7 @@
#include "settingswidget.h"
#include "src/widget/widget.h"
#include "ui_mainwindow.h"
#include "src/video/camera.h"
#include "src/video/camerasource.h"
#include "src/widget/form/settings/generalform.h"
#include "src/widget/form/settings/privacyform.h"
#include "src/widget/form/settings/avform.h"

View File

@ -13,22 +13,15 @@
*/
#include "videosurface.h"
#include "src/video/camera.h"
#include <QTimer>
#include <QOpenGLBuffer>
#include <QOpenGLShaderProgram>
#include <QDebug>
#include "src/video/videoframe.h"
#include <QPainter>
#include <QLabel>
VideoSurface::VideoSurface(QWidget* parent)
: QGLWidget(QGLFormat(QGL::SingleBuffer), parent)
: QWidget{parent}
, source{nullptr}
, pbo{nullptr, nullptr}
, bgrProgramm{nullptr}
, yuvProgramm{nullptr}
, textureId{0}
, pboAllocSize{0}
, frameLock{false}
, hasSubscribed{false}
, pboIndex{0}
{
}
@ -41,18 +34,6 @@ VideoSurface::VideoSurface(VideoSource *source, QWidget* parent)
VideoSurface::~VideoSurface()
{
if (pbo[0])
{
delete pbo[0];
delete pbo[1];
}
if (textureId != 0)
glDeleteTextures(1, &textureId);
delete bgrProgramm;
delete yuvProgramm;
unsubscribe();
}
@ -66,182 +47,6 @@ void VideoSurface::setSource(VideoSource *src)
subscribe();
}
void VideoSurface::initializeGL()
{
QGLWidget::initializeGL();
qDebug() << "Init";
// pbo
pbo[0] = new QOpenGLBuffer(QOpenGLBuffer::PixelUnpackBuffer);
pbo[0]->setUsagePattern(QOpenGLBuffer::StreamDraw);
pbo[0]->create();
pbo[1] = new QOpenGLBuffer(QOpenGLBuffer::PixelUnpackBuffer);
pbo[1]->setUsagePattern(QOpenGLBuffer::StreamDraw);
pbo[1]->create();
// shaders
bgrProgramm = new QOpenGLShaderProgram;
bgrProgramm->addShaderFromSourceCode(QOpenGLShader::Vertex,
"attribute vec4 vertices;"
"varying vec2 coords;"
"void main() {"
" gl_Position = vec4(vertices.xy, 0.0, 1.0);"
" coords = vertices.xy*vec2(0.5, 0.5) + vec2(0.5, 0.5);"
"}");
// brg frag-shader
bgrProgramm->addShaderFromSourceCode(QOpenGLShader::Fragment,
"uniform sampler2D texture0;"
"varying vec2 coords;"
"void main() {"
" vec4 color = texture2D(texture0,coords*vec2(1.0, -1.0));"
" gl_FragColor = vec4(color.bgr, 1.0);"
"}");
bgrProgramm->bindAttributeLocation("vertices", 0);
bgrProgramm->link();
// shaders
yuvProgramm = new QOpenGLShaderProgram;
yuvProgramm->addShaderFromSourceCode(QOpenGLShader::Vertex,
"attribute vec4 vertices;"
"varying vec2 coords;"
"void main() {"
" gl_Position = vec4(vertices.xy, 0.0, 1.0);"
" coords = vertices.xy*vec2(0.5, 0.5) + vec2(0.5, 0.5);"
"}");
// yuv frag-shader
yuvProgramm->addShaderFromSourceCode(QOpenGLShader::Fragment,
"uniform sampler2D texture0;"
"varying vec2 coords;"
"void main() {"
" vec3 yuv = texture2D(texture0,coords*vec2(1.0, -1.0)).rgb - vec3(0.0, 0.5, 0.5);"
" vec3 rgb = mat3(1.0, 1.0, 1.0, 0.0, -0.21482, 2.12798, 1.28033, -0.38059, 0.0)*yuv;"
" gl_FragColor = vec4(rgb, 1.0);"
"}");
yuvProgramm->bindAttributeLocation("vertices", 0);
yuvProgramm->link();
}
void VideoSurface::paintGL()
{
mutex.lock();
VideoFrame currFrame = frame;
frame.invalidate();
mutex.unlock();
if (currFrame.isValid() && res != currFrame.resolution)
{
res = currFrame.resolution;
// delete old texture
if (textureId != 0)
glDeleteTextures(1, &textureId);
// a texture used to render the pbo (has the match the pixelformat of the source)
glGenTextures(1,&textureId);
glBindTexture(GL_TEXTURE_2D, textureId);
glTexImage2D(GL_TEXTURE_2D,0, GL_RGB, res.width(), res.height(), 0, GL_RGB, GL_UNSIGNED_BYTE, 0);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
}
if (currFrame.isValid())
{
pboIndex = (pboIndex + 1) % 2;
int nextPboIndex = (pboIndex + 1) % 2;
if (pboAllocSize != currFrame.frameData.size())
{
qDebug() << "Resize pbo " << currFrame.frameData.size() << "(" << currFrame.resolution << ")" << "bytes (before" << pboAllocSize << ")";
pbo[0]->bind();
pbo[0]->allocate(currFrame.frameData.size());
pbo[0]->release();
pbo[1]->bind();
pbo[1]->allocate(currFrame.frameData.size());
pbo[1]->release();
pboAllocSize = currFrame.frameData.size();
}
pbo[pboIndex]->bind();
glBindTexture(GL_TEXTURE_2D, textureId);
glTexSubImage2D(GL_TEXTURE_2D,0,0,0, res.width(), res.height(), GL_RGB, GL_UNSIGNED_BYTE, 0);
pbo[pboIndex]->unmap();
pbo[pboIndex]->release();
// transfer data
pbo[nextPboIndex]->bind();
void* ptr = pbo[nextPboIndex]->map(QOpenGLBuffer::WriteOnly);
if (ptr)
memcpy(ptr, currFrame.frameData.data(), currFrame.frameData.size());
pbo[nextPboIndex]->unmap();
pbo[nextPboIndex]->release();
}
// background
glClearColor(0, 0, 0, 1);
glClear(GL_COLOR_BUFFER_BIT);
// keep aspect ratio
float aspectRatio = float(res.width()) / float(res.height());
if (width() < float(height()) * aspectRatio)
{
float h = float(width()) / aspectRatio;
glViewport(0, (height() - h)*0.5f, width(), h);
}
else
{
float w = float(height()) * float(aspectRatio);
glViewport((width() - w)*0.5f, 0, w, height());
}
QOpenGLShaderProgram* programm = nullptr;
switch (frame.format)
{
case VideoFrame::YUV:
programm = yuvProgramm;
break;
case VideoFrame::BGR:
programm = bgrProgramm;
break;
default:
break;
}
if (programm)
{
// render pbo
static float values[] = {
-1, -1,
1, -1,
-1, 1,
1, 1
};
programm->bind();
programm->setAttributeArray(0, GL_FLOAT, values, 2);
programm->enableAttributeArray(0);
}
glBindTexture(GL_TEXTURE_2D, textureId);
//draw fullscreen quad
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
glBindTexture(GL_TEXTURE_2D, 0);
if (programm)
{
programm->disableAttributeArray(0);
programm->release();
}
}
void VideoSurface::subscribe()
{
if (source && !hasSubscribed)
@ -254,22 +59,58 @@ void VideoSurface::subscribe()
void VideoSurface::unsubscribe()
{
if (source && hasSubscribed)
if (!source || !hasSubscribed)
return;
// Fast lock
{
source->unsubscribe();
hasSubscribed = false;
disconnect(source, &VideoSource::frameAvailable, this, &VideoSurface::onNewFrameAvailable);
bool expected = false;
while (!frameLock.compare_exchange_weak(expected, true))
expected = false;
}
lastFrame.reset();
frameLock = false;
source->unsubscribe();
hasSubscribed = false;
disconnect(source, &VideoSource::frameAvailable, this, &VideoSurface::onNewFrameAvailable);
}
void VideoSurface::onNewFrameAvailable(const VideoFrame& newFrame)
void VideoSurface::onNewFrameAvailable(std::shared_ptr<VideoFrame> newFrame)
{
mutex.lock();
frame = newFrame;
mutex.unlock();
// Fast lock
{
bool expected = false;
while (!frameLock.compare_exchange_weak(expected, true))
expected = false;
}
updateGL();
lastFrame = newFrame;
frameLock = false;
update();
}
void VideoSurface::paintEvent(QPaintEvent*)
{
// Fast lock
{
bool expected = false;
while (!frameLock.compare_exchange_weak(expected, true))
expected = false;
}
QPainter painter(this);
painter.fillRect(painter.viewport(), Qt::black);
if (lastFrame)
{
QSize frameSize = lastFrame->getSize();
QRect rect = painter.viewport();
int width = frameSize.width()*rect.height()/frameSize.height();
rect.setLeft((rect.width()-width)/2);
rect.setWidth(width);
QImage frame = lastFrame->toQImage(rect.size());
painter.drawImage(rect, frame, frame.rect(), Qt::NoFormatConversion);
}
frameLock = false;
}

View File

@ -15,14 +15,12 @@
#ifndef SELFCAMVIEW_H
#define SELFCAMVIEW_H
#include <QGLWidget>
#include <QMutex>
#include <QWidget>
#include <memory>
#include <atomic>
#include "src/video/videosource.h"
class QOpenGLBuffer;
class QOpenGLShaderProgram;
class VideoSurface : public QGLWidget
class VideoSurface : public QWidget
{
Q_OBJECT
@ -33,30 +31,20 @@ public:
void setSource(VideoSource* src); //NULL is a valid option
// QGLWidget interface
protected:
virtual void initializeGL();
virtual void paintGL();
void subscribe();
void unsubscribe();
virtual void paintEvent(QPaintEvent * event) override;
private slots:
void onNewFrameAvailable(const VideoFrame &newFrame);
void onNewFrameAvailable(std::shared_ptr<VideoFrame> newFrame);
private:
VideoSource* source;
QOpenGLBuffer* pbo[2];
QOpenGLShaderProgram* bgrProgramm;
QOpenGLShaderProgram* yuvProgramm;
GLuint textureId;
int pboAllocSize;
QSize res;
std::shared_ptr<VideoFrame> lastFrame;
std::atomic_bool frameLock; ///< Fast lock for lastFrame
bool hasSubscribed;
QMutex mutex;
VideoFrame frame;
int pboIndex;
};
#endif // SELFCAMVIEW_H

View File

@ -26,7 +26,6 @@
#include "form/groupchatform.h"
#include "src/misc/style.h"
#include "friendlistwidget.h"
#include "src/video/camera.h"
#include "form/chatform.h"
#include "maskablepixmapwidget.h"
#include "src/historykeeper.h"