2
0
mirror of https://github.com/kotatogram/kotatogram-desktop synced 2025-08-31 14:45:14 +00:00

Fix build on Linux 64 bit.

This commit is contained in:
John Preston
2020-08-11 13:59:48 +04:00
parent bd16708781
commit eda22b925f
9 changed files with 71 additions and 55 deletions

View File

@@ -87,7 +87,7 @@ void AppendServer(
if (host.isEmpty()) {
return;
}
list.push_back({
list.push_back(tgcalls::RtcServer{
.host = host.toStdString(),
.port = port,
.isTurn = false
@@ -100,7 +100,7 @@ void AppendServer(
const auto password = qs(data.vpassword());
if (data.is_turn() && !username.isEmpty() && !password.isEmpty()) {
const auto pushTurn = [&](const QString &host) {
list.push_back({
list.push_back(tgcalls::RtcServer{
.host = host.toStdString(),
.port = port,
.login = username.toStdString(),
@@ -143,8 +143,8 @@ uint64 ComputeFingerprint(bytes::const_span authKey) {
return WrapVersions(tgcalls::Meta::Versions() | ranges::action::reverse);
}
[[nodiscard]] webrtc::VideoState StartVideoState(bool enabled) {
using State = webrtc::VideoState;
[[nodiscard]] Webrtc::VideoState StartVideoState(bool enabled) {
using State = Webrtc::VideoState;
return enabled ? State::Active : State::Inactive;
}
@@ -159,8 +159,8 @@ Call::Call(
, _user(user)
, _api(&_user->session().mtp())
, _type(type)
, _videoIncoming(std::make_unique<webrtc::VideoTrack>(StartVideoState(video)))
, _videoOutgoing(std::make_unique<webrtc::VideoTrack>(StartVideoState(video))) {
, _videoIncoming(std::make_unique<Webrtc::VideoTrack>(StartVideoState(video)))
, _videoOutgoing(std::make_unique<Webrtc::VideoTrack>(StartVideoState(video))) {
_discardByTimeoutTimer.setCallback([=] { hangup(); });
if (_type == Type::Outgoing) {
@@ -345,14 +345,14 @@ void Call::setMuted(bool mute) {
void Call::setupOutgoingVideo() {
const auto started = _videoOutgoing->state();
_videoOutgoing->stateValue(
) | rpl::start_with_next([=](webrtc::VideoState state) {
) | rpl::start_with_next([=](Webrtc::VideoState state) {
if (_state.current() != State::Established
&& state != started
&& !_videoCapture) {
_videoOutgoing->setState(started);
} else if (state != webrtc::VideoState::Inactive) {
} else if (state != Webrtc::VideoState::Inactive) {
// Paused not supported right now.
Assert(state == webrtc::VideoState::Active);
Assert(state == Webrtc::VideoState::Active);
if (!_videoCapture) {
_videoCapture = tgcalls::VideoCaptureInterface::Create();
_videoCapture->setOutput(_videoOutgoing->sink());
@@ -367,11 +367,11 @@ void Call::setupOutgoingVideo() {
}, _lifetime);
}
not_null<webrtc::VideoTrack*> Call::videoIncoming() const {
not_null<Webrtc::VideoTrack*> Call::videoIncoming() const {
return _videoIncoming.get();
}
not_null<webrtc::VideoTrack*> Call::videoOutgoing() const {
not_null<Webrtc::VideoTrack*> Call::videoOutgoing() const {
return _videoOutgoing.get();
}
@@ -589,7 +589,7 @@ void Call::updateRemoteMediaState(
}();
_videoIncoming->setState([&] {
using From = tgcalls::VideoState;
using To = webrtc::VideoState;
using To = Webrtc::VideoState;
switch (video) {
case From::Inactive: return To::Inactive;
case From::Paused: return To::Paused;
@@ -992,8 +992,8 @@ void Call::finish(FinishType type, const MTPPhoneCallDiscardReason &reason) {
auto duration = getDurationMs() / 1000;
auto connectionId = _instance ? _instance->getPreferredRelayId() : 0;
_finishByTimeoutTimer.call(kHangupTimeoutMs, [this, finalState] { setState(finalState); });
const auto flags = ((_videoIncoming->state() != webrtc::VideoState::Inactive)
|| (_videoOutgoing->state() != webrtc::VideoState::Inactive))
const auto flags = ((_videoIncoming->state() != Webrtc::VideoState::Inactive)
|| (_videoOutgoing->state() != Webrtc::VideoState::Inactive))
? MTPphone_DiscardCall::Flag::f_video
: MTPphone_DiscardCall::Flag(0);
_api.request(MTPphone_DiscardCall(

View File

@@ -27,10 +27,10 @@ enum class VideoState;
enum class AudioState;
} // namespace tgcalls
namespace webrtc {
namespace Webrtc {
enum class VideoState;
class VideoTrack;
} // namespace webrtc
} // namespace Webrtc
namespace Calls {
@@ -117,11 +117,11 @@ public:
return _remoteAudioState.value();
}
[[nodiscard]] webrtc::VideoState remoteVideoState() const {
[[nodiscard]] Webrtc::VideoState remoteVideoState() const {
return _remoteVideoState.current();
}
[[nodiscard]] auto remoteVideoStateValue() const
-> rpl::producer<webrtc::VideoState> {
-> rpl::producer<Webrtc::VideoState> {
return _remoteVideoState.value();
}
@@ -140,8 +140,8 @@ public:
return _muted.value();
}
[[nodiscard]] not_null<webrtc::VideoTrack*> videoIncoming() const;
[[nodiscard]] not_null<webrtc::VideoTrack*> videoOutgoing() const;
[[nodiscard]] not_null<Webrtc::VideoTrack*> videoIncoming() const;
[[nodiscard]] not_null<Webrtc::VideoTrack*> videoOutgoing() const;
crl::time getDurationMs() const;
float64 getWaitingSoundPeakValue() const;
@@ -212,7 +212,7 @@ private:
Type _type = Type::Outgoing;
rpl::variable<State> _state = State::Starting;
rpl::variable<RemoteAudioState> _remoteAudioState = RemoteAudioState::Active;
rpl::variable<webrtc::VideoState> _remoteVideoState;
rpl::variable<Webrtc::VideoState> _remoteVideoState;
FinishType _finishAfterRequestingCall = FinishType::None;
bool _answerAfterDhConfigReceived = false;
rpl::variable<int> _signalBarCount = kSignalBarStarting;
@@ -236,8 +236,8 @@ private:
std::unique_ptr<tgcalls::Instance> _instance;
std::shared_ptr<tgcalls::VideoCaptureInterface> _videoCapture;
const std::unique_ptr<webrtc::VideoTrack> _videoIncoming;
const std::unique_ptr<webrtc::VideoTrack> _videoOutgoing;
const std::unique_ptr<Webrtc::VideoTrack> _videoIncoming;
const std::unique_ptr<Webrtc::VideoTrack> _videoOutgoing;
std::unique_ptr<Media::Audio::Track> _waitingTrack;

View File

@@ -312,9 +312,9 @@ void Panel::initControls() {
_camera->setClickedCallback([=] {
if (_call) {
_call->videoOutgoing()->setState(
(_call->videoOutgoing()->state() == webrtc::VideoState::Active)
? webrtc::VideoState::Inactive
: webrtc::VideoState::Active);
(_call->videoOutgoing()->state() == Webrtc::VideoState::Active)
? Webrtc::VideoState::Inactive
: Webrtc::VideoState::Active);
}
});
@@ -399,8 +399,8 @@ void Panel::reinitWithCall(Call *call) {
}, _callLifetime);
_call->videoOutgoing()->stateValue(
) | rpl::start_with_next([=](webrtc::VideoState state) {
_camera->setIconOverride((state == webrtc::VideoState::Active)
) | rpl::start_with_next([=](Webrtc::VideoState state) {
_camera->setIconOverride((state == Webrtc::VideoState::Active)
? nullptr
: &st::callNoCameraIcon);
}, _callLifetime);
@@ -547,7 +547,7 @@ void Panel::initGeometry() {
void Panel::refreshOutgoingPreviewInBody(State state) {
const auto inBody = (state != State::Established)
&& (_call->videoOutgoing()->state() != webrtc::VideoState::Inactive)
&& (_call->videoOutgoing()->state() != Webrtc::VideoState::Inactive)
&& !_call->videoOutgoing()->frameSize().isEmpty();
if (_outgoingPreviewInBody == inBody) {
return;
@@ -722,7 +722,7 @@ void Panel::paintEvent(QPaintEvent *e) {
}
const auto incomingFrame = _call
? _call->videoIncoming()->frame(webrtc::FrameRequest())
? _call->videoIncoming()->frame(Webrtc::FrameRequest())
: QImage();
if (!incomingFrame.isNull()) {
const auto to = rect().marginsRemoved(_padding);
@@ -877,7 +877,7 @@ void Panel::stateChanged(State state) {
}
bool Panel::hasActiveVideo() const {
const auto inactive = webrtc::VideoState::Inactive;
const auto inactive = Webrtc::VideoState::Inactive;
return (_call->videoIncoming()->state() != inactive)
|| (_call->videoOutgoing()->state() != inactive);
}

View File

@@ -18,10 +18,10 @@ namespace Calls {
VideoBubble::VideoBubble(
not_null<QWidget*> parent,
not_null<webrtc::VideoTrack*> track)
not_null<Webrtc::VideoTrack*> track)
: _content(parent)
, _track(track)
, _state(webrtc::VideoState::Inactive) {
, _state(Webrtc::VideoState::Inactive) {
setup();
}
@@ -35,7 +35,7 @@ void VideoBubble::setup() {
}, lifetime());
_track->stateValue(
) | rpl::start_with_next([=](webrtc::VideoState state) {
) | rpl::start_with_next([=](Webrtc::VideoState state) {
setState(state);
}, lifetime());
@@ -137,7 +137,7 @@ void VideoBubble::prepareFrame() {
* cIntRetinaFactor();
// Should we check 'original' and 'size' aspect ratios?..
const auto request = webrtc::FrameRequest{
const auto request = Webrtc::FrameRequest{
.resize = size,
.outer = size,
};
@@ -165,13 +165,13 @@ void VideoBubble::prepareFrame() {
QRect(QPoint(), size));
}
void VideoBubble::setState(webrtc::VideoState state) {
if (state == webrtc::VideoState::Paused) {
void VideoBubble::setState(Webrtc::VideoState state) {
if (state == Webrtc::VideoState::Paused) {
using namespace Images;
static constexpr auto kRadius = 24;
_pausedFrame = Images::BlurLargeImage(_track->frame({}), kRadius);
if (_pausedFrame.isNull()) {
state = webrtc::VideoState::Inactive;
state = Webrtc::VideoState::Inactive;
}
}
_state = state;
@@ -240,7 +240,7 @@ void VideoBubble::setInnerSize(QSize size) {
void VideoBubble::updateVisibility() {
const auto size = _track->frameSize();
const auto visible = (_state != webrtc::VideoState::Inactive)
const auto visible = (_state != Webrtc::VideoState::Inactive)
&& !size.isEmpty();
if (visible) {
updateSizeToFrame(size);

View File

@@ -9,10 +9,10 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
#include "ui/rp_widget.h"
namespace webrtc {
namespace Webrtc {
class VideoTrack;
enum class VideoState;
} // namespace webrtc
} // namespace Webrtc
namespace Calls {
@@ -20,7 +20,7 @@ class VideoBubble final {
public:
VideoBubble(
not_null<QWidget*> parent,
not_null<webrtc::VideoTrack*> track);
not_null<Webrtc::VideoTrack*> track);
enum class DragMode {
None,
@@ -39,7 +39,7 @@ public:
private:
void setup();
void paint();
void setState(webrtc::VideoState state);
void setState(Webrtc::VideoState state);
void applyDragMode(DragMode mode);
void applyBoundingRect(QRect rect);
void applySizeConstraints(QSize min, QSize max);
@@ -49,8 +49,8 @@ private:
void prepareFrame();
Ui::RpWidget _content;
const not_null<webrtc::VideoTrack*> _track;
webrtc::VideoState _state = webrtc::VideoState();
const not_null<Webrtc::VideoTrack*> _track;
Webrtc::VideoState _state = Webrtc::VideoState();
QImage _frame, _pausedFrame;
QSize _min, _max, _size, _lastDraggableSize, _lastFrameSize;
QRect _boundingRect;