Reputation: 69
I'm practicing some code by making a webRTC app which also recognizes users within the call. Almost everything is working, except when someone joins in the call, there is always an extra div being created. What I was planning to do was create a single div and add the video in only once, below is the code :
const socket = io('/')
const videoGrid = document.getElementById('video-grid');
const vidHeight = 500
const vidWidth = 500
const checkerBoxHeight = 53
//Flags used to check workers gone or not and for how long
var absentTime = 0;
var present = true;
var time=0;
//Load facial recognition models
Promise.all([
faceapi.nets.tinyFaceDetector.loadFromUri('/models'),
faceapi.nets.faceLandmark68Net.loadFromUri('/models'),
faceapi.nets.faceRecognitionNet.loadFromUri('/models'),
]).then(console.log("Models Loaded!"));
const myPeer = new Peer(undefined,{
host : '/',
port : '3001'
})
/**************************************************/
//Initial Self Video Creation Starts Here
/*************************************************/
//Create Video
const myVideo = document.createElement('video');
myVideo.setAttribute("id","myVideo");
myVideo.setAttribute("class","vidNum");
myVideo.style.height = vidHeight - checkerBoxHeight + 'px'
myVideo.style.width = vidWidth + 'px'
myVideo.muted = true
/**************************************************/
//Initial Self Video Creation Ends Here
/*************************************************/
/**************************************************/
//Peers and Streaming begins here
/*************************************************/
const peers = {}
navigator.mediaDevices.getUserMedia({
video : true,
audio : true,
}).then(stream =>{
//add own video to the stream
addVideoStream(myVideo,stream)
//When someone calls Answer and create new video element WITH set height and width for facial recognition canvas(new)
//answer their call then send them our stream
myPeer.on('call', call =>{
call.answer(stream);
const video = document.createElement('video')
video.style.height = vidHeight - checkerBoxHeight + 'px'
video.style.width = vidWidth + 'px'
//On call, add new video stream to newly created video
call.on('stream', userVideoStream =>{
addVideoStream(video, userVideoStream)
})
})
//When user is connected, connect them
//then add them in to own screen
socket.on('user-connected', userId =>{
setTimeout(connectToNewUser,500,userId,stream)
console.log(userId + ' Joined the call!');
// connectToNewUser(userId, stream)
})
socket.on('user-disconnected', userId =>{
console.log(userId + ' Left the call!');
if(peers[userId]) peers[userId].close()
})
})
myPeer.on('open', id =>{
socket.emit('join-room', ROOM_ID, id)
})
/**************************************************/
//Peers and Streaming ends here
/*************************************************/
/*************************************************************/
//FUNCTIONS BEGIN HERE
/************************************************************/
//Connect to new user function
function connectToNewUser(userId,stream){
const call = myPeer.call(userId, stream)
//to show existing users
const video = document.createElement('video')
video.style.height = vidHeight - checkerBoxHeight + 'px'
video.style.width = vidWidth + 'px'
call.on('stream', userVideoStream =>{
addVideoStream(video, userVideoStream)
})
call.on('close', ()=>{
video.parentElement.remove()
})
peers[userId] = call
}
function createBox(){
}
//Add video stream function
function addVideoStream(video,stream){
video.srcObject = stream
video.addEventListener('loadedmetadata', () =>{
video.play()
})
//ADD VIDEO BOX TO VIDEO GRID
const videoBox = document.createElement("div");
videoBox.setAttribute("class",'videoBox');
videoGrid.append(videoBox);
//THEN, ADD CHECKER BOX
const checkerBox = document.createElement("div");
checkerBox.style.width = vidWidth + 'px'
checkerBox.style.height = checkerBoxHeight - 6 + 'px'
videoBox.append(checkerBox);
checkerBox.setAttribute("id","checkerBox");
videoBox.append(video)
//detectFace(video,videoBox);
//checkIfPresent();
}
//Facial Detection
function detectFace(faceCam,vidBox){
faceCam.addEventListener('play', () =>{
const canvas = faceapi.createCanvasFromMedia(faceCam)
vidBox.append(canvas);
setInterval(async()=>{
//finding detections
const detections = await faceapi.detectAllFaces(faceCam,
new faceapi.TinyFaceDetectorOptions()).withFaceLandmarks()
const displaySize = {
width: vidWidth ,
height: vidHeight - checkerBoxHeight
}
faceapi.matchDimensions(canvas,displaySize)
const resizedDetections = faceapi.resizeResults(detections,displaySize)
//clear
canvas.getContext('2d').clearRect(0,0, canvas.width,canvas.height)
faceapi.draw.drawDetections(canvas, resizedDetections)
faceapi.draw.drawFaceLandmarks(canvas,resizedDetections)
//Create green or red box depending on if the person is present
var checkerBox = document.getElementById("checkerBox");
if(detections.length>=1){
present = true;
absentTime = 0;
checkerBoxTrue(checkerBox)
}else{
present = false;
absentTime++;
checkerBoxFalse(checkerBox)
}
}, 100)
})
}
function checkIfPresent(){
setInterval(function(){
if(present == false){
console.log("Worker is not present for : " + Math.ceil(absentTime/10) + " seconds");
}
},1000)
}
function checkerBoxTrue(checkerBox){
checkerBox.style.backgroundColor="green";
checkerBox.innerHTML = "<h1 class='workerStatus'>Worker is Present</h1>";
}
function checkerBoxFalse(checkerBox){
checkerBox.style.backgroundColor="red";
checkerBox.innerHTML = "<h1 class='workerStatus'>Worker is NOT Present</h1>";
}
HTML Is just a basic script with 2 divs
<body>
<div class = "grid-container">
<div id="video-grid"> </div>
</div>
</body>
Upvotes: 0
Views: 44
Reputation: 196
While I haven't built exactly what you're trying to build before, when I've built video apps on top of third-party WebRTC libraries like Daily (full disclosure, I work there), I've seen an extra video "tile" div show up when I forget to loop over everyone but the local camera stream. Are you accounting for that already?
For example, here's the getTiles
function in a React video chat app:
function getTiles() {
let largeTiles = [];
let smallTiles = [];
Object.entries(callState.callItems).forEach(([id, callItem]) => {
const isLarge =
isScreenShare(id) ||
(!isLocal(id) && !containsScreenShare(callState.callItems));
const tile = (
<Tile
key={id}
videoTrackState={callItem.videoTrackState}
audioTrackState={callItem.audioTrackState}
isLocalPerson={isLocal(id)}
isLarge={isLarge}
disableCornerMessage={isScreenShare(id)}
onClick={
isLocal(id)
? null
: () => {
sendHello(id);
}
}
/>
);
if (isLarge) {
largeTiles.push(tile);
} else {
smallTiles.push(tile);
}
});
return [largeTiles, smallTiles];
}
Where isLocal
is imported from separate call state. I hope that could help somewhat, and good luck!
Upvotes: 1