- html - 出于某种原因,IE8 对我的 Sass 文件中继承的 html5 CSS 不友好?
- JMeter 在响应断言中使用 span 标签的问题
- html - 在 :hover and :active? 上具有不同效果的 CSS 动画
- html - 相对于居中的 html 内容固定的 CSS 重复背景?
我已经成功实现了 ReplayKit。
样本处理器.swift
class SampleHandler: RPBroadcastSampleHandler {
override func broadcastStarted(withSetupInfo setupInfo: [String : NSObject]?) {
}
override func processSampleBuffer(_ sampleBuffer: CMSampleBuffer, with sampleBufferType: RPSampleBufferType) {
switch sampleBufferType {
case RPSampleBufferType.video:
break
case RPSampleBufferType.audioApp:
break
case RPSampleBufferType.audioMic:
break
@unknown default:
return
}
}
}
问题:
我正在快速使用 WebRTC SDK for mobile。我的 WebRTCClient 文件。
final class WebRTCClient: NSObject {
// The `RTCPeerConnectionFactory` is in charge of creating new RTCPeerConnection instances.
// A new RTCPeerConnection should be created every new call, but the factory is shared.
static let factory: RTCPeerConnectionFactory = {
RTCInitializeSSL()
let videoEncoderFactory = RTCDefaultVideoEncoderFactory()
let videoDecoderFactory = RTCDefaultVideoDecoderFactory()
return RTCPeerConnectionFactory(encoderFactory: videoEncoderFactory, decoderFactory: videoDecoderFactory)
}()
weak var delegate: WebRTCClientDelegate?
let peerConnection: RTCPeerConnection
private let rtcAudioSession = RTCAudioSession.sharedInstance()
private let audioQueue = DispatchQueue(label: "audio")
private let mediaConstrains = [kRTCMediaConstraintsOfferToReceiveAudio: kRTCMediaConstraintsValueTrue,
kRTCMediaConstraintsOfferToReceiveVideo: kRTCMediaConstraintsValueTrue]
private var videoCapturer: RTCVideoCapturer?
private var localVideoTrack: RTCVideoTrack?
private var remoteVideoTrack: RTCVideoTrack?
private var localDataChannel: RTCDataChannel?
private var remoteDataChannel: RTCDataChannel?
@available(*, unavailable)
override init() {
fatalError("WebRTCClient:init is unavailable")
}
required init(iceServers: [String]) {
let config = RTCConfiguration()
// config.iceServers = [RTCIceServer(urlStrings: iceServers)]
config.iceServers = [RTCIceServer(urlStrings:["//Turn server URL"],
username:"username",
credential:"password")]
// Unified plan is more superior than planB
config.sdpSemantics = .unifiedPlan
// gatherContinually will let WebRTC to listen to any network changes and send any new candidates to the other client
config.continualGatheringPolicy = .gatherContinually
let constraints = RTCMediaConstraints(mandatoryConstraints: nil,
optionalConstraints: ["DtlsSrtpKeyAgreement":kRTCMediaConstraintsValueTrue])
self.peerConnection = WebRTCClient.factory.peerConnection(with: config, constraints: constraints, delegate: nil)
super.init()
self.createMediaSenders()
self.configureAudioSession()
self.peerConnection.delegate = self
}
// MARK: Signaling
func offer(completion: @escaping (_ sdp: RTCSessionDescription) -> Void) {
let constrains = RTCMediaConstraints(mandatoryConstraints: self.mediaConstrains,
optionalConstraints: nil)
self.peerConnection.offer(for: constrains) { (sdp, error) in
guard let sdp = sdp else {
return
}
self.peerConnection.setLocalDescription(sdp, completionHandler: { (error) in
completion(sdp)
})
}
}
func answer(completion: @escaping (_ sdp: RTCSessionDescription) -> Void) {
let constrains = RTCMediaConstraints(mandatoryConstraints: self.mediaConstrains,
optionalConstraints: nil)
self.peerConnection.answer(for: constrains) { (sdp, error) in
guard let sdp = sdp else {
return
}
self.peerConnection.setLocalDescription(sdp, completionHandler: { (error) in
completion(sdp)
})
}
}
func set(remoteSdp: RTCSessionDescription, completion: @escaping (Error?) -> ()) {
self.peerConnection.setRemoteDescription(remoteSdp, completionHandler: completion)
}
func set(remoteCandidate: RTCIceCandidate) {
self.peerConnection.add(remoteCandidate)
}
// MARK: Media
func startCaptureLocalVideo(renderer: RTCVideoRenderer, position : AVCaptureDevice.Position) {
guard let capturer = self.videoCapturer as? RTCCameraVideoCapturer else {
return
}
guard
let frontCamera = (RTCCameraVideoCapturer.captureDevices().first { $0.position == position }),
// choose highest res
let format = (RTCCameraVideoCapturer.supportedFormats(for: frontCamera).sorted { (f1, f2) -> Bool in
let width1 = CMVideoFormatDescriptionGetDimensions(f1.formatDescription).width
let width2 = CMVideoFormatDescriptionGetDimensions(f2.formatDescription).width
return width1 < width2
}).last,
// choose highest fps
let fps = (format.videoSupportedFrameRateRanges.sorted { return $0.maxFrameRate < $1.maxFrameRate }.last) else {
return
}
capturer.startCapture(with: frontCamera,
format: format,
fps: Int(fps.maxFrameRate))
self.localVideoTrack?.add(renderer)
}
func startCaptureLocalTestVideo(renderer: RTCVideoRenderer, position : AVCaptureDevice.Position) {
guard (self.videoCapturer as? RTCCameraVideoCapturer) != nil else {
return
}
guard
let frontCamera = (RTCCameraVideoCapturer.captureDevices().first { $0.position == position }),
// choose highest res
let format = (RTCCameraVideoCapturer.supportedFormats(for: frontCamera).sorted { (f1, f2) -> Bool in
let width1 = CMVideoFormatDescriptionGetDimensions(f1.formatDescription).width
let width2 = CMVideoFormatDescriptionGetDimensions(f2.formatDescription).width
return width1 < width2
}).last,
// choose highest fps
let _ = (format.videoSupportedFrameRateRanges.sorted { return $0.maxFrameRate < $1.maxFrameRate }.last) else {
return
}
capturer.startCapture(with: frontCamera,
format: format,
fps: Int(fps.maxFrameRate))
self.localVideoTrack?.add(renderer)
}
func renderRemoteVideo(to renderer: RTCVideoRenderer) {
self.remoteVideoTrack?.add(renderer)
}
private func configureAudioSession() {
self.rtcAudioSession.lockForConfiguration()
do {
try self.rtcAudioSession.setCategory(AVAudioSession.Category.playAndRecord.rawValue)
try self.rtcAudioSession.setMode(AVAudioSession.Mode.voiceChat.rawValue)
} catch let error {
debugPrint("Error changeing AVAudioSession category: \(error)")
}
self.rtcAudioSession.unlockForConfiguration()
}
private func createMediaSenders() {
let streamId = "stream"
// Audio
let audioTrack = self.createAudioTrack()
self.peerConnection.add(audioTrack, streamIds: [streamId])
// Video
let videoTrack = self.createVideoTrack()
self.localVideoTrack = videoTrack
self.peerConnection.add(videoTrack, streamIds: [streamId])
self.remoteVideoTrack = self.peerConnection.transceivers.first { $0.mediaType == .video }?.receiver.track as? RTCVideoTrack
// Data
if let dataChannel = createDataChannel() {
dataChannel.delegate = self
self.localDataChannel = dataChannel
}
}
private func createAudioTrack() -> RTCAudioTrack {
let audioConstrains = RTCMediaConstraints(mandatoryConstraints: nil, optionalConstraints: nil)
let audioSource = WebRTCClient.factory.audioSource(with: audioConstrains)
let audioTrack = WebRTCClient.factory.audioTrack(with: audioSource, trackId: "audio0")
return audioTrack
}
private func createVideoTrack() -> RTCVideoTrack {
let videoSource = WebRTCClient.factory.videoSource()
#if TARGET_OS_SIMULATOR
self.videoCapturer = RTCFileVideoCapturer(delegate: videoSource)
#else
self.videoCapturer = RTCCameraVideoCapturer(delegate: videoSource)
#endif
let videoTrack = WebRTCClient.factory.videoTrack(with: videoSource, trackId: "video0")
return videoTrack
}
// MARK: Data Channels
private func createDataChannel() -> RTCDataChannel? {
let config = RTCDataChannelConfiguration()
guard let dataChannel = self.peerConnection.dataChannel(forLabel: "WebRTCData", configuration: config) else {
debugPrint("Warning: Couldn't create data channel.")
return nil
}
return dataChannel
}
func sendData(_ data: Data) {
let buffer = RTCDataBuffer(data: data, isBinary: true)
self.remoteDataChannel?.sendData(buffer)
}
}
extension WebRTCClient: RTCPeerConnectionDelegate {
func peerConnection(_ peerConnection: RTCPeerConnection, didChange stateChanged: RTCSignalingState) {
debugPrint("peerConnection new signaling state: \(stateChanged)")
}
func peerConnection(_ peerConnection: RTCPeerConnection, didAdd stream: RTCMediaStream) {
debugPrint("peerConnection did add stream")
}
func peerConnection(_ peerConnection: RTCPeerConnection, didRemove stream: RTCMediaStream) {
debugPrint("peerConnection did remote stream")
}
func peerConnectionShouldNegotiate(_ peerConnection: RTCPeerConnection) {
debugPrint("peerConnection should negotiate")
}
func peerConnection(_ peerConnection: RTCPeerConnection, didChange newState: RTCIceConnectionState) {
debugPrint("peerConnection new connection state: \(newState)")
self.delegate?.webRTCClient(self, didChangeConnectionState: newState)
}
func peerConnection(_ peerConnection: RTCPeerConnection, didChange newState: RTCIceGatheringState) {
debugPrint("peerConnection new gathering state: \(newState)")
}
func peerConnection(_ peerConnection: RTCPeerConnection, didGenerate candidate: RTCIceCandidate) {
self.delegate?.webRTCClient(self, didDiscoverLocalCandidate: candidate)
}
func peerConnection(_ peerConnection: RTCPeerConnection, didRemove candidates: [RTCIceCandidate]) {
debugPrint("peerConnection did remove candidate(s)")
}
func peerConnection(_ peerConnection: RTCPeerConnection, didOpen dataChannel: RTCDataChannel) {
debugPrint("peerConnection did open data channel")
self.remoteDataChannel = dataChannel
}
}
// MARK:- Audio control
extension WebRTCClient {
func muteAudio() {
// self.setAudioEnabled(false)
swapCameraToFront()
}
func unmuteAudio() {
self.setAudioEnabled(true)
}
func muteVideo() {
self.setVideoEnabled(false)
}
func unmuteVideo() {
self.setVideoEnabled(true)
}
// Fallback to the default playing device: headphones/bluetooth/ear speaker
func speakerOff() {
self.audioQueue.async { [weak self] in
guard let self = self else {
return
}
self.rtcAudioSession.lockForConfiguration()
do {
try self.rtcAudioSession.setCategory(AVAudioSession.Category.playAndRecord.rawValue)
try self.rtcAudioSession.overrideOutputAudioPort(.none)
} catch let error {
debugPrint("Error setting AVAudioSession category: \(error)")
}
self.rtcAudioSession.unlockForConfiguration()
}
}
// Force speaker
func speakerOn() {
self.audioQueue.async { [weak self] in
guard let self = self else {
return
}
self.rtcAudioSession.lockForConfiguration()
do {
try self.rtcAudioSession.setCategory(AVAudioSession.Category.playAndRecord.rawValue)
try self.rtcAudioSession.overrideOutputAudioPort(.speaker)
try self.rtcAudioSession.setActive(true)
} catch let error {
debugPrint("Couldn't force audio to speaker: \(error)")
}
self.rtcAudioSession.unlockForConfiguration()
}
}
private func setAudioEnabled(_ isEnabled: Bool) {
let audioTracks = self.peerConnection.transceivers.compactMap { return $0.sender.track as? RTCAudioTrack }
audioTracks.forEach { $0.isEnabled = isEnabled }
}
private func setVideoEnabled(_ isEnabled: Bool) {
let videoTracks = self.peerConnection.transceivers.compactMap { return $0.sender.track as? RTCVideoTrack }
videoTracks.forEach { $0.isEnabled = isEnabled }
}
func swapCameraToFront() {
let localStream: RTCMediaStream? = peerConnection.localStreams.first
localStream?.removeVideoTrack((localStream?.videoTracks.first)!)
let localVideoTrack: RTCVideoTrack? = self.createVideoTrack()
if localVideoTrack != nil {
localStream?.addVideoTrack(localVideoTrack!)
// delegate?.appClient(self, didReceiveLocalVideoTrack: localVideoTrack!)
}
peerConnection.remove(localStream!)
peerConnection.add(localStream!)
}
func swapCameraToBack() {
let localStream: RTCMediaStream? = peerConnection.localStreams.first
localStream?.removeVideoTrack((localStream?.videoTracks.first)!)
let localVideoTrack: RTCVideoTrack? = self.createVideoTrack()
if localVideoTrack != nil {
localStream?.addVideoTrack(localVideoTrack!)
}
peerConnection.remove(localStream!)
peerConnection.add(localStream!)
}
}
extension WebRTCClient: RTCDataChannelDelegate {
func dataChannelDidChangeState(_ dataChannel: RTCDataChannel) {
debugPrint("dataChannel did change state: \(dataChannel.readyState)")
}
func dataChannel(_ dataChannel: RTCDataChannel, didReceiveMessageWith buffer: RTCDataBuffer) {
self.delegate?.webRTCClient(self, didReceiveData: buffer.data)
}
}
ViewController.swift文件
override func viewDidLoad() {
super.viewDidLoad()
let localRenderer = RTCMTLVideoView(frame: self.localVideoView?.frame ?? CGRect.zero
let remoteRenderer = RTCMTLVideoView(frame: self.view.frame)
self.view.insertSubview(localRenderer, at: 0)
self.view.insertSubview(remoteRenderer, at: -1)
self.webRTCClient.startCaptureLocalVideo(renderer: localRenderer, position: .front)
self.webRTCClient.renderRemoteVideo(to: remoteRenderer)
我正在使用下面的 WEBRTC 项目 https://github.com/stasel/WebRTC-iOS
用于演示 WebRTC 的更详细的应用程序:
最佳答案
您应该以RTCVideoFrame 的相关格式发送每个字节的samplebuffer 数据,然后使用推送视频帧发送到WebRTC。
videoSource.capturer(videoCapturer, didCapture: videoFrame)
像这样。
执行步骤:
var cgImage:CGImage?
guard let sourceImageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else{return}
VTCreateCGImageFromCVPixelBuffer(sourceImageBuffer, options: nil, imageOut: &cgImage)
let image = UIImage(cgImage: cgImage!)
let kDesiredFrameRate = 6.0
let currentTimestamp = CMSampleBufferGetPresentationTimeStamp(sampleBuffer)
let delta = CMTimeSubtract(currentTimestamp, lastVideoTimestamp).seconds
let threshold = Double(1.0/kDesiredFrameRate)
guard delta > threshold else {return}
lastVideoTimestamp = currentTimestamp
enum VideoRotation: Int {
case _0 = 0
case _90 = 90
case _180 = 180
case _270 = 270
}
var videoOrientation = VideoRotation._0
let orientationAttachment = CMGetAttachment(sampleBuffer, key: RPVideoSampleOrientationKey as CFString, attachmentModeOut: nil) as? NSNumber
let orientation: CGImagePropertyOrientation = CGImagePropertyOrientation(rawValue: orientationAttachment.uint32Value) ?? .up
switch (orientation) {
case .up, .upMirrored, .down, .downMirrored:
videoOrientation = ._0
break
case .leftMirrored:
videoOrientation = ._90
case .left:
videoOrientation = ._90
case .rightMirrored:
videoOrientation = ._270
case .right:
videoOrientation = ._270
@unknown default:
break
}
现在接下来的 2 个步骤将在主应用程序中处理:
转换 CVPixelBuffer 帧中的图像:
guard let videoImage = UIImage(data: imageData!) else {return}
guard let cgImage = videoImage.cgImage else {return}
guard let imageBuffer = videoImage.pixelBuffer(forImage: cgImage) else {return}
辅助方法
```
func pixelBuffer(forImage image:CGImage) -> CVPixelBuffer? {
let frameSize = CGSize(width: image.width, height: image.height)
var pixelBuffer:CVPixelBuffer? = nil
let status = CVPixelBufferCreate(kCFAllocatorDefault, Int(frameSize.width), Int(frameSize.height), kCVPixelFormatType_32BGRA , nil, &pixelBuffer)
guaed status == kCVReturnSuccess else {return nil}
CVPixelBufferLockBaseAddress(pixelBuffer!, CVPixelBufferLockFlags.init(rawValue: 0))
let data = CVPixelBufferGetBaseAddress(pixelBuffer!)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGBitmapInfo(rawValue: CGBitmapInfo.byteOrder32Little.rawValue | CGImageAlphaInfo.premultipliedFirst.rawValue)
let context = CGContext(data: data, width: Int(frameSize.width), height: Int(frameSize.height), bitsPerComponent: 8, bytesPerRow: CVPixelBufferGetBytesPerRow(pixelBuffer!), space: rgbColorSpace, bitmapInfo: bitmapInfo.rawValue)
context?.draw(image, in: CGRect(x: 0, y: 0, width: image.width, height: image.height))
CVPixelBufferUnlockBaseAddress(pixelBuffer!, CVPixelBufferLockFlags(rawValue: 0))
return pixelBuffer
}
```
最后一步 - CVPixelBuffer 到 RTVideoFrame 的对话
let rotation = RTCVideoRotation(rawValue: videoRotation)
let rtcPixlBuffer = RTCCVPixelBuffer(pixelBuffer: imageBuffer)
let rtcVideoFrame = RTCVideoFrame(buffer: rtcPixlBuffer,
rotation: rotation ?? ._0,
timeStampNs: Int64(timeStampNs))
关于swift - 在 swift 中使用 WebRTC 进行 iOS 屏幕共享(使用 ReplayKit),我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/62664114/
只是想知道是否有可能找出谁从 Windows 共享中读取了文件(最好使用 .NET,但 win32 native 可以)? 我想做的是创建类似 awstats 的东西对于 Windows 共享,这样我
是否可以列出 Intent.ACTION_SEND ?我的意思是我需要知道是否有人通过 action_send 在 Facebook 上分享或在 Twitter 上发推文。 最佳答案 也许你想要一个更
我正在使用 Google Apps 应用程序。实际上,我想在不使用密码的情况下访问另一个 ID。我使用了 OAuth,它运行良好。但我无法分享特定人的日历。我尝试了以下代码。 GoogleOAuthP
我怎样才能只创建模拟器...可能吗?我知道,设备需要分发证书。 最佳答案 您只需将应用程序目录从 iPhone 模拟器复制到另一个实例/操作系统版本,它就应该可以工作。 因此,如果您想分发 3.1.3
我想使用多阶段构建来避免每次构建应用程序时都下载我的 Java 项目所需的所有 Maven 依赖项。 我正在考虑在第一阶段解决 Maven 依赖项,然后在第二阶段构建应用程序,这将需要访问在前一阶段下
我正在寻找保护用户下载内容的初步想法。用户下载充满有趣资源的 zip 文件,这些资源被提取到本地文件系统中以供应用程序使用。我的目标是防止用户通过互联网将下载的资源共享给其他用户(假设他们获得了对文件
我想知道在具有移动和桌面版本的网站上共享身份验证、 session 管理等的最佳方法是什么。我们正在运行 Tomcat,并且更愿意将移动站点和桌面站点的应用程序保持在不同的节点上。 我看过类似的帖子,
我发现了这个单例的实现。我怎样才能创建指向它的指针或共享指针?` 为什么这不起作用?自动测试 = Singleton::Instance(); class Singleton { public: st
我有一个 heroku 项目,我想与其他人分享。作为the instructions describe ,我使用 virtualenv 来管理环境和依赖项。有没有办法在新机器上从 requiremen
Maven 将所有 jar 存储在本地存储库 ~/.m2/repository/ 下。用户多时占用空间大。 那么,是否可以由多个用户共享这个本地存储库,或许在不同的目录结构下? 最佳答案 简单的回答
为什么共享 worker 在重新加载页面时死了?应该是复活了我该如何解决这个问题? 重装前 重新加载后(在example.com上按F5) parent worker var port = new S
我正在开发多个小型应用程序,这些应用程序将共享通用和共享模块和 Assets 。 关于如何创建项目结构的部分在这里回答:https://stackoverflow.com/a/61254557/135
我在 RHEL 上安装了 jenkins (localhost:8080),我能够成功地构建代码 现在,我想设置主/从代理。 我的笔记本电脑将充当“Master Jenkins”,而我同事的笔记本电脑
我有这种方法可以根据我使用的 EXTRA_STREAM 共享文本文件或图片。我有这两个我可以选择 i.putExtra(Intent.EXTRA_STREAM, uri); i.putExtra(In
我正在使用 R 中的一个数据分析项目,我正在使用 R 中的敏感私有(private)数据进行一些逻辑和多级建模。我爱上了 。预订 包,我已经创建了一本关于我们的工作流程和分析管道的相当广泛的书。问题是
我正在构建的应用程序需要在 UITabBarController 框架内为多个 View (及其 subview )显示共享的自定义 UIToolbar。自定义工具栏的内容在所有 View 中都是相同
我有多个应用程序,我想共享相同的 eslint 配置: - project_root/ - app1/ - node_modules/ - eslint.rc
我有多个 Electron 应用程序。一个是主应用程序,其他几个功能应用程序。主应用程序上的按钮很少,这将导致功能应用程序打开。这里的问题是每个应用程序都有一个主进程,该进程导致要利用更多的CPU。是
我正在开发一个 Node.js 后端,它通过 websocket 与一些桌面客户端进行通信,而服务器端的通信是从 Web 前端发起的。一切正常,因为我将 SockJS Connection 实例存储在
我对托管多个网站的服务器上的多个用户帐户使用私有(private) SSH key 和无密码条目。 我为每个用户帐户使用相同的私钥。 (因为我很懒?或者那是“正确”的方式)。 我现在想授权该国不同地区
我是一名优秀的程序员,十分优秀!