Commit fcb4755d by Christoph

Merge branch 'videoinput'

parents fa8e1b58 641b1d97
<!DOCTYPE html>
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta charset="utf-8" />
<title></title>
<script src="./bundle/apps.js"></script>
</head>
<body>
<div id="videoinputapp1">
<h1>Callapp1:</h1>
URL to connect:
<p class="callapp_url">
</p>
<input type="checkbox" name="audio" class="callapp_send_audio" checked autocomplete="off"> Audio
<input type="checkbox" name="video" class="callapp_send_video" checked autocomplete="off"> Video
<input type= "text" class="callapp_address" autocomplete="off">
<button class="callapp_button"> Join </button>
<div class="callapp_local_video">local video</div>
<div class="callapp_remote_video">remote video</div>
</div>
<canvas id="canvas1"> </canvas>
<script>
var rgbToHex = function (rgb) {
var hex = Number(rgb).toString(16);
if (hex.length < 2) {
hex = "0" + hex;
}
return hex;
};
const canvas = document.querySelector("#canvas1");
const ctx = canvas.getContext("2d");
let counter = 0;
setInterval(()=>{
const color = "#FFFF" + rgbToHex(counter%255);
ctx.fillStyle = color;
ctx.fillRect(0, 0, canvas.width, canvas.height);
counter++;
}, 50);
apps.videoinputapp(document.querySelector("#videoinputapp1"), canvas);
//apps.callapp(document.querySelector("#callapp2"));
</script>
</body>
</html>
\ No newline at end of file
This source diff could not be displayed because it is too large. You can view the blob instead.
<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>WebGL Demo</title>
<link rel="stylesheet" href="../webgl.css" type="text/css">
</head>
<body>
<canvas id="glcanvas" width="640" height="480"></canvas>
</body>
<script src="../bundle/awrtc.js"></script>
<script src="gl-matrix.js"></script>
<script src="webgl-demo_changed.js"></script>
<script>
let canvas = document.querySelector("#glcanvas");
let nconfig = new awrtc.NetworkConfig();
let call = new awrtc.BrowserWebRtcCall(nconfig);
call.addEventListener((sender, args) => {
if(args.Type === awrtc.CallEventType.FrameUpdate)
{
let gl = canvas.getContext("webgl");
if(args.Frame.Width != globalTextureWidth || args.Frame.Height != globalTextureHeight)
{
const pixel = new Uint8Array(args.Frame.Width * args.Frame.Height * 3 );
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGB, args.Frame.Width, args.Frame.Height, 0, gl.RGB, gl.UNSIGNED_BYTE, pixel);
globalTextureWidth = args.Frame.Width ;
globalTextureHeight = args.Frame.Height ;
}
args.Frame.ToTexture(gl, globalTextureId);
}
});
//As the system is designed for realtime graphics we have to call the Update method. Events are only
//triggered during this Update call!
let intervalId = setInterval(() => {
call.Update();
}, 50);
let config = new awrtc.MediaConfig();
config.Audio = false;
config.Video = true;
config.FrameUpdates = true;
config.IdealWidth = 640;
config.IdealHeight = 480;
config.IdealFps = 30;
console.log("requested config:" + JSON.stringify(config));
call.Configure(config);
</script>
</html>
\ No newline at end of file
Slightly changed WebGL example from mozzila to test the frame copy from WebRTC -> VideoElement -> WebGL Texture
Source:
https://developer.mozilla.org/en-US/docs/Web/API/WebGL_API/Tutorial/Using_textures_in_WebGL
https://github.com/mdn/webgl-examples/tree/gh-pages/tutorial/sample6
\ No newline at end of file
var globalTextureId = -1;
var globalTextureWidth = -1;
var globalTextureHeight = -1;
var cubeRotation = 0.0;
main();
//
// Start here
//
function main() {
const canvas = document.querySelector('#glcanvas');
const gl = canvas.getContext('webgl');
// If we don't have a GL context, give up now
if (!gl) {
alert('Unable to initialize WebGL. Your browser or machine may not support it.');
return;
}
// Vertex shader program
const vsSource = `
attribute vec4 aVertexPosition;
attribute vec2 aTextureCoord;
uniform mat4 uModelViewMatrix;
uniform mat4 uProjectionMatrix;
varying highp vec2 vTextureCoord;
void main(void) {
gl_Position = uProjectionMatrix * uModelViewMatrix * aVertexPosition;
vTextureCoord = aTextureCoord;
}
`;
// Fragment shader program
const fsSource = `
varying highp vec2 vTextureCoord;
uniform sampler2D uSampler;
void main(void) {
gl_FragColor = texture2D(uSampler, vTextureCoord);
}
`;
// Initialize a shader program; this is where all the lighting
// for the vertices and so forth is established.
const shaderProgram = initShaderProgram(gl, vsSource, fsSource);
// Collect all the info needed to use the shader program.
// Look up which attributes our shader program is using
// for aVertexPosition, aTextureCoord and also
// look up uniform locations.
const programInfo = {
program: shaderProgram,
attribLocations: {
vertexPosition: gl.getAttribLocation(shaderProgram, 'aVertexPosition'),
textureCoord: gl.getAttribLocation(shaderProgram, 'aTextureCoord'),
},
uniformLocations: {
projectionMatrix: gl.getUniformLocation(shaderProgram, 'uProjectionMatrix'),
modelViewMatrix: gl.getUniformLocation(shaderProgram, 'uModelViewMatrix'),
uSampler: gl.getUniformLocation(shaderProgram, 'uSampler'),
},
};
// Here's where we call the routine that builds all the
// objects we'll be drawing.
const buffers = initBuffers(gl);
loadTexture(gl, 'cubetexture.png');
var then = 0;
// Draw the scene repeatedly
function render(now) {
now *= 0.001; // convert to seconds
const deltaTime = now - then;
then = now;
drawScene(gl, programInfo, buffers, globalTextureId, deltaTime);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
}
//
// initBuffers
//
// Initialize the buffers we'll need. For this demo, we just
// have one object -- a simple three-dimensional cube.
//
function initBuffers(gl) {
// Create a buffer for the cube's vertex positions.
const positionBuffer = gl.createBuffer();
// Select the positionBuffer as the one to apply buffer
// operations to from here out.
gl.bindBuffer(gl.ARRAY_BUFFER, positionBuffer);
// Now create an array of positions for the cube.
const positions = [
// Front face
-1.0, -1.0, 1.0,
1.0, -1.0, 1.0,
1.0, 1.0, 1.0,
-1.0, 1.0, 1.0,
// Back face
-1.0, -1.0, -1.0,
-1.0, 1.0, -1.0,
1.0, 1.0, -1.0,
1.0, -1.0, -1.0,
// Top face
-1.0, 1.0, -1.0,
-1.0, 1.0, 1.0,
1.0, 1.0, 1.0,
1.0, 1.0, -1.0,
// Bottom face
-1.0, -1.0, -1.0,
1.0, -1.0, -1.0,
1.0, -1.0, 1.0,
-1.0, -1.0, 1.0,
// Right face
1.0, -1.0, -1.0,
1.0, 1.0, -1.0,
1.0, 1.0, 1.0,
1.0, -1.0, 1.0,
// Left face
-1.0, -1.0, -1.0,
-1.0, -1.0, 1.0,
-1.0, 1.0, 1.0,
-1.0, 1.0, -1.0,
];
// Now pass the list of positions into WebGL to build the
// shape. We do this by creating a Float32Array from the
// JavaScript array, then use it to fill the current buffer.
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(positions), gl.STATIC_DRAW);
// Now set up the texture coordinates for the faces.
const textureCoordBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, textureCoordBuffer);
const textureCoordinates = [
// Front
0.0, 0.0,
1.0, 0.0,
1.0, 1.0,
0.0, 1.0,
// Back
0.0, 0.0,
1.0, 0.0,
1.0, 1.0,
0.0, 1.0,
// Top
0.0, 0.0,
1.0, 0.0,
1.0, 1.0,
0.0, 1.0,
// Bottom
0.0, 0.0,
1.0, 0.0,
1.0, 1.0,
0.0, 1.0,
// Right
0.0, 0.0,
1.0, 0.0,
1.0, 1.0,
0.0, 1.0,
// Left
0.0, 0.0,
1.0, 0.0,
1.0, 1.0,
0.0, 1.0,
];
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(textureCoordinates),
gl.STATIC_DRAW);
// Build the element array buffer; this specifies the indices
// into the vertex arrays for each face's vertices.
const indexBuffer = gl.createBuffer();
gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, indexBuffer);
// This array defines each face as two triangles, using the
// indices into the vertex array to specify each triangle's
// position.
const indices = [
0, 1, 2, 0, 2, 3, // front
4, 5, 6, 4, 6, 7, // back
8, 9, 10, 8, 10, 11, // top
12, 13, 14, 12, 14, 15, // bottom
16, 17, 18, 16, 18, 19, // right
20, 21, 22, 20, 22, 23, // left
];
// Now send the element array to GL
gl.bufferData(gl.ELEMENT_ARRAY_BUFFER,
new Uint16Array(indices), gl.STATIC_DRAW);
return {
position: positionBuffer,
textureCoord: textureCoordBuffer,
indices: indexBuffer,
};
}
//
// Initialize a texture and load an image.
// When the image finished loading copy it into the texture.
//
function loadTexture(gl, url) {
const texture = gl.createTexture();
gl.bindTexture(gl.TEXTURE_2D, texture);
// Because images have to be download over the internet
// they might take a moment until they are ready.
// Until then put a single pixel in the texture so we can
// use it immediately. When the image has finished downloading
// we'll update the texture with the contents of the image.
const level = 0;
const internalFormat = gl.RGBA;
const width = 1;
const height = 1;
const border = 0;
const srcFormat = gl.RGBA;
const srcType = gl.UNSIGNED_BYTE;
const pixel = new Uint8Array([0, 0, 255, 255]); // opaque blue
gl.texImage2D(gl.TEXTURE_2D, level, internalFormat,
width, height, border, srcFormat, srcType,
pixel);
const image = new Image();
image.onload = function() {
gl.bindTexture(gl.TEXTURE_2D, texture);
gl.texImage2D(gl.TEXTURE_2D, level, internalFormat,
srcFormat, srcType, image);
// WebGL1 has different requirements for power of 2 images
// vs non power of 2 images so check if the image is a
// power of 2 in both dimensions.
//if (isPowerOf2(image.width) && isPowerOf2(image.height)) {
// // Yes, it's a power of 2. Generate mips.
// gl.generateMipmap(gl.TEXTURE_2D);
//} else {
// No, it's not a power of 2. Turn of mips and set
// wrapping to clamp to edge
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR);
//}
globalTextureWidth = image.width;
globalTextureHeight = image.height;
};
image.src = url;
globalTextureId = texture;
return texture;
}
function isPowerOf2(value) {
return (value & (value - 1)) == 0;
}
//
// Draw the scene.
//
function drawScene(gl, programInfo, buffers, texture, deltaTime) {
gl.clearColor(0.0, 0.0, 0.0, 1.0); // Clear to black, fully opaque
gl.clearDepth(1.0); // Clear everything
gl.enable(gl.DEPTH_TEST); // Enable depth testing
gl.depthFunc(gl.LEQUAL); // Near things obscure far things
// Clear the canvas before we start drawing on it.
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
// Create a perspective matrix, a special matrix that is
// used to simulate the distortion of perspective in a camera.
// Our field of view is 45 degrees, with a width/height
// ratio that matches the display size of the canvas
// and we only want to see objects between 0.1 units
// and 100 units away from the camera.
const fieldOfView = 45 * Math.PI / 180; // in radians
const aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
const zNear = 0.1;
const zFar = 100.0;
const projectionMatrix = mat4.create();
// note: glmatrix.js always has the first argument
// as the destination to receive the result.
mat4.perspective(projectionMatrix,
fieldOfView,
aspect,
zNear,
zFar);
// Set the drawing position to the "identity" point, which is
// the center of the scene.
const modelViewMatrix = mat4.create();
// Now move the drawing position a bit to where we want to
// start drawing the square.
mat4.translate(modelViewMatrix, // destination matrix
modelViewMatrix, // matrix to translate
[-0.0, 0.0, -6.0]); // amount to translate
mat4.rotate(modelViewMatrix, // destination matrix
modelViewMatrix, // matrix to rotate
cubeRotation, // amount to rotate in radians
[0, 0, 1]); // axis to rotate around (Z)
mat4.rotate(modelViewMatrix, // destination matrix
modelViewMatrix, // matrix to rotate
cubeRotation * .7,// amount to rotate in radians
[0, 1, 0]); // axis to rotate around (X)
// Tell WebGL how to pull out the positions from the position
// buffer into the vertexPosition attribute
{
const numComponents = 3;
const type = gl.FLOAT;
const normalize = false;
const stride = 0;
const offset = 0;
gl.bindBuffer(gl.ARRAY_BUFFER, buffers.position);
gl.vertexAttribPointer(
programInfo.attribLocations.vertexPosition,
numComponents,
type,
normalize,
stride,
offset);
gl.enableVertexAttribArray(
programInfo.attribLocations.vertexPosition);
}
// Tell WebGL how to pull out the texture coordinates from
// the texture coordinate buffer into the textureCoord attribute.
{
const numComponents = 2;
const type = gl.FLOAT;
const normalize = false;
const stride = 0;
const offset = 0;
gl.bindBuffer(gl.ARRAY_BUFFER, buffers.textureCoord);
gl.vertexAttribPointer(
programInfo.attribLocations.textureCoord,
numComponents,
type,
normalize,
stride,
offset);
gl.enableVertexAttribArray(
programInfo.attribLocations.textureCoord);
}
// Tell WebGL which indices to use to index the vertices
gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, buffers.indices);
// Tell WebGL to use our program when drawing
gl.useProgram(programInfo.program);
// Set the shader uniforms
gl.uniformMatrix4fv(
programInfo.uniformLocations.projectionMatrix,
false,
projectionMatrix);
gl.uniformMatrix4fv(
programInfo.uniformLocations.modelViewMatrix,
false,
modelViewMatrix);
// Specify the texture to map onto the faces.
// Tell WebGL we want to affect texture unit 0
gl.activeTexture(gl.TEXTURE0);
// Bind the texture to texture unit 0
gl.bindTexture(gl.TEXTURE_2D, texture);
// Tell the shader we bound the texture to texture unit 0
gl.uniform1i(programInfo.uniformLocations.uSampler, 0);
{
const vertexCount = 36;
const type = gl.UNSIGNED_SHORT;
const offset = 0;
gl.drawElements(gl.TRIANGLES, vertexCount, type, offset);
}
// Update the rotation for the next draw
cubeRotation += deltaTime / 4;
}
//
// Initialize a shader program, so WebGL knows how to draw our data
//
function initShaderProgram(gl, vsSource, fsSource) {
const vertexShader = loadShader(gl, gl.VERTEX_SHADER, vsSource);
const fragmentShader = loadShader(gl, gl.FRAGMENT_SHADER, fsSource);
// Create the shader program
const shaderProgram = gl.createProgram();
gl.attachShader(shaderProgram, vertexShader);
gl.attachShader(shaderProgram, fragmentShader);
gl.linkProgram(shaderProgram);
// If creating the shader program failed, alert
if (!gl.getProgramParameter(shaderProgram, gl.LINK_STATUS)) {
alert('Unable to initialize the shader program: ' + gl.getProgramInfoLog(shaderProgram));
return null;
}
return shaderProgram;
}
//
// creates a shader of the given type, uploads the source and
// compiles it.
//
function loadShader(gl, type, source) {
const shader = gl.createShader(type);
// Send the source to the shader object
gl.shaderSource(shader, source);
// Compile the shader program
gl.compileShader(shader);
// See if it compiled successfully
if (!gl.getShaderParameter(shader, gl.COMPILE_STATUS)) {
alert('An error occurred compiling the shaders: ' + gl.getShaderInfoLog(shader));
gl.deleteShader(shader);
return null;
}
return shader;
}
// Karma configuration
// Generated on Mon Jun 24 2019 19:59:32 GMT+1200 (New Zealand Standard Time)
module.exports = function(config) {
config.set({
// base path that will be used to resolve all patterns (eg. files, exclude)
basePath: '',
// frameworks to use
// available frameworks: https://npmjs.org/browse/keyword/karma-adapter
frameworks: ['jasmine'],
// list of files / patterns to load in the browser
files: [
'build/bundle/*.js'
],
// list of files / patterns to exclude
exclude: [
],
// preprocess matching files before serving them to the browser
// available preprocessors: https://npmjs.org/browse/keyword/karma-preprocessor
preprocessors: {
},
// test results reporter to use
// possible values: 'dots', 'progress'
// available reporters: https://npmjs.org/browse/keyword/karma-reporter
reporters: ['progress'],
// web server port
port: 9876,
// enable / disable colors in the output (reporters and logs)
colors: true,
// level of logging
// possible values: config.LOG_DISABLE || config.LOG_ERROR || config.LOG_WARN || config.LOG_INFO || config.LOG_DEBUG
logLevel: config.LOG_INFO,
// enable / disable watching file and executing tests whenever any file changes
autoWatch: true,
// start these browsers
// available browser launchers: https://npmjs.org/browse/keyword/karma-launcher
browsers: ['FirefoxCustom'],
customLaunchers: {
FirefoxCustom: {
base: 'Firefox',
prefs: {
'media.navigator.permission.disabled': true,
'media.navigator.streams.fake' : true
}
}
},
// Continuous Integration mode
// if true, Karma captures browsers, runs the tests and exits
singleRun: false,
// Concurrency level
// how many browser should be started simultaneous
concurrency: Infinity
})
}
{
"name": "awrtc_browser",
"version": "1.984.5",
"version": "0.985.0",
"lockfileVersion": 1,
"requires": true,
"dependencies": {
......
{
"name": "awrtc_browser",
"version": "1.984.5",
"version": "1.985.0",
"description": "Compatible browser implementation to the Unity asset WebRTC Video Chat. Try examples in build folder",
"author": "because-why-not.com Limited",
"license": "BSD-3-Clause",
......
......@@ -29,7 +29,6 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
import * as awrtc from "../awrtc/index"
import { ConnectionId } from "../awrtc/index";
/**
* Main (and most complicated) example for using BrowserWebRtcCall.
......@@ -357,7 +356,7 @@ export class CallApp
this.mUiLocalVideoParent.appendChild(video);
}
private Ui_OnRemoteVideo(video : HTMLVideoElement, id: ConnectionId){
private Ui_OnRemoteVideo(video : HTMLVideoElement, id: awrtc.ConnectionId){
this.mUiRemoteVideoParent.appendChild( document.createElement("br"));
this.mUiRemoteVideoParent.appendChild(new Text("connection " + id.id));
......
......@@ -256,7 +256,6 @@ class MinimalCall
//other.
export function BrowserWebRtcCall_minimal() {
awrtc.BrowserMediaStream.sUseLazyFrames = true;
let netConfig = new awrtc.NetworkConfig();
netConfig.IsConference = false;
netConfig.SignalingUrl = DefaultValues.Signaling;
......
......@@ -30,4 +30,5 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
export * from "./apphelpers"
export * from "./testapps"
export * from "./examples"
export * from "./callapp"
\ No newline at end of file
export * from "./callapp"
export * from "./videoinputapp"
\ No newline at end of file
......@@ -29,7 +29,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
import * as awrtc from "../awrtc/index"
import {DefaultValues, GetRandomKey} from "./apphelpers"
import { DeviceApi, DeviceInfo } from "../awrtc/index";
import { DeviceApi, DeviceInfo, BrowserMediaStream } from "../awrtc/index";
//This file only contains badly maintained
//test apps. Use only experimentation.
......@@ -387,26 +387,34 @@ class FpsCounter
lastRefresh = 0;
fps = 0;
counter = 0;
isNew = false;
public get Fps()
{
return Math.round(this.fps);
}
public get Counter()
public get IsNew() : boolean
{
return this.counter;
if(this.isNew){
this.isNew = false;
return true;
}
return false;
}
Update():void
{
this.counter++;
let diff = new Date().getTime() - this.lastRefresh;
if(diff > 1000)
let refresh_time = 2000;
if(diff > refresh_time)
{
this.fps = this.counter / (diff / 1000);
this.counter = 0;
this.lastRefresh = new Date().getTime();
this.isNew = true;
}
}
}
......@@ -415,7 +423,7 @@ class FpsCounter
//and accesses the resulting frame data directly
export function BrowserMediaNetwork_frameaccess() {
//BrowserMediaStream.DEFAULT_FRAMERATE = 60;
//awrtc.BrowserMediaStream.DEBUG_SHOW_ELEMENTS = true;
let address = GetRandomKey();
......@@ -427,8 +435,15 @@ export function BrowserMediaNetwork_frameaccess() {
let network2 = new awrtc.BrowserMediaNetwork(networkConfig);
let mediaConfig1 = new awrtc.MediaConfig();
mediaConfig1.Audio = true;
mediaConfig1.Audio = false;
mediaConfig1.Video = true;
/*
mediaConfig1.IdealWidth = 320;
mediaConfig1.IdealHeight = 240;
//fps seems to be ignored by browsers even if
//the camera specifically supports that setting
mediaConfig1.IdealFps = 15;
*/
let mediaConfig2 = new awrtc.MediaConfig();
mediaConfig2.Audio = false;
mediaConfig2.Video = false;
......@@ -436,6 +451,7 @@ export function BrowserMediaNetwork_frameaccess() {
let localFps = new FpsCounter();
let remoteFps = new FpsCounter();
let loopRate = new FpsCounter();
......@@ -466,15 +482,17 @@ export function BrowserMediaNetwork_frameaccess() {
setInterval(() => {
network1.Update();
loopRate.Update();
if(loopRate.IsNew)
console.log("Loop rate: " + loopRate.Fps);
let frame1: awrtc.IFrameData = null;
let frame2: awrtc.IFrameData = null;
frame1 = network1.TryGetFrame(awrtc.ConnectionId.INVALID);
if (frame1 != null)
{
localFps.Update();
if(localFps.Counter % 30 == 0)
if(localFps.IsNew)
console.log("local1 width" + frame1.Width + " height:" + frame1.Height + "fps: " + localFps.Fps + " data:" + frame1.Buffer[0]);
}
......@@ -515,7 +533,7 @@ export function BrowserMediaNetwork_frameaccess() {
if (frame2 != null)
{
remoteFps.Update();
if(remoteFps.Counter % 30 == 0)
if(remoteFps.IsNew)
console.log("remote2 width" + frame2.Width + " height:" + frame2.Height + "fps: " + remoteFps.Fps + " data:" + frame2.Buffer[0]);
}
}
......
import * as awrtc from "../awrtc/index"
/**
* Copy of the CallApp to test custom video input
*/
export class VideoInputApp {
public static sVideoDevice = null;
private mAddress;
private mNetConfig = new awrtc.NetworkConfig();
private mCall: awrtc.BrowserWebRtcCall = null;
//update loop
private mIntervalId: any = -1;
private mLocalVideo: HTMLVideoElement = null;
private mRemoteVideo = {};
private mIsRunning = false;
public constructor() {
this.mNetConfig.IceServers = [
{ urls: "stun:stun.because-why-not.com:443" },
{ urls: "stun:stun.l.google.com:19302" }
];
//use for testing conferences
//this.mNetConfig.IsConference = true;
//this.mNetConfig.SignalingUrl = "wss://signaling.because-why-not.com/testshared";
this.mNetConfig.IsConference = false;
this.mNetConfig.SignalingUrl = "wss://signaling.because-why-not.com/callapp";
}
private GetParameterByName(name) {
var url = window.location.href;
name = name.replace(/[\[\]]/g, "\\$&");
var regex = new RegExp("[?&]" + name + "(=([^&#]*)|&|#|$)"), results = regex.exec(url);
if (!results)
return null;
if (!results[2])
return '';
return decodeURIComponent(results[2].replace(/\+/g, " "));
}
private tobool(value, defaultval) {
if (value === true || value === "true")
return true;
if (value === false || value === "false")
return false;
return defaultval;
}
public Start(address, audio, video): void {
if (this.mCall != null)
this.Stop();
this.mIsRunning = true;
this.Ui_OnStart()
console.log("start");
console.log("Using signaling server url: " + this.mNetConfig.SignalingUrl);
//create media configuration
var config = new awrtc.MediaConfig();
config.Audio = audio;
config.Video = video;
config.IdealWidth = 640;
config.IdealHeight = 480;
config.IdealFps = 30;
if (VideoInputApp.sVideoDevice !== null) {
config.VideoDeviceName = VideoInputApp.sVideoDevice;
}
//For usage in HTML set FrameUpdates to false and wait for MediaUpdate to
//get the VideoElement. By default awrtc would deliver frames individually
//for use in Unity WebGL
console.log("requested config:" + JSON.stringify(config));
//setup our high level call class.
this.mCall = new awrtc.BrowserWebRtcCall(this.mNetConfig);
//handle events (get triggered after Configure / Listen call)
//+ugly lambda to avoid loosing "this" reference
this.mCall.addEventListener((sender, args) => {
this.OnNetworkEvent(sender, args);
});
//As the system is designed for realtime graphics we have to call the Update method. Events are only
//triggered during this Update call!
this.mIntervalId = setInterval(() => {
this.Update();
}, 50);
//configure media. This will request access to media and can fail if the user doesn't have a proper device or
//blocks access
this.mCall.Configure(config);
//Try to listen to the address
//Conference mode = everyone listening will connect to each other
//Call mode -> If the address is free it will wait for someone else to connect
// -> If the address is used then it will fail to listen and then try to connect via Call(address);
this.mCall.Listen(address);
}
public Stop(): void {
this.Cleanup();
}
private Cleanup(): void {
if (this.mCall != null) {
this.mCall.Dispose();
this.mCall = null;
clearInterval(this.mIntervalId);
this.mIntervalId = -1;
this.mIsRunning = false;
this.mLocalVideo = null;
this.mRemoteVideo = {};
}
this.Ui_OnCleanup();
}
private Update(): void {
if (this.mCall != null)
this.mCall.Update();
}
private OnNetworkEvent(sender: any, args: awrtc.CallEventArgs): void {
//User gave access to requested camera/ microphone
if (args.Type == awrtc.CallEventType.ConfigurationComplete) {
console.log("configuration complete");
}
else if (args.Type == awrtc.CallEventType.MediaUpdate) {
let margs = args as awrtc.MediaUpdatedEventArgs;
if (this.mLocalVideo == null && margs.ConnectionId == awrtc.ConnectionId.INVALID) {
var videoElement = margs.VideoElement;
this.mLocalVideo = videoElement;
this.Ui_OnLocalVideo(videoElement);
console.log("local video added resolution:" + videoElement.videoWidth + videoElement.videoHeight + " fps: ??");
}
else if (margs.ConnectionId != awrtc.ConnectionId.INVALID && this.mRemoteVideo[margs.ConnectionId.id] == null) {
var videoElement = margs.VideoElement;
this.mRemoteVideo[margs.ConnectionId.id] = videoElement;
this.Ui_OnRemoteVideo(videoElement, margs.ConnectionId);
console.log("remote video added resolution:" + videoElement.videoWidth + videoElement.videoHeight + " fps: ??");
}
}
else if (args.Type == awrtc.CallEventType.ListeningFailed) {
//First attempt of this example is to try to listen on a certain address
//for conference calls this should always work (expect the internet is dead)
if (this.mNetConfig.IsConference == false) {
//no conference call and listening failed? someone might have claimed the address.
//Try to connect to existing call
this.mCall.Call(this.mAddress);
}
else {
let errorMsg = "Listening failed. Offline? Server dead?";
console.error(errorMsg);
this.Ui_OnError(errorMsg);
this.Cleanup();
return;
}
}
else if (args.Type == awrtc.CallEventType.ConnectionFailed) {
//Outgoing call failed entirely. This can mean there is no address to connect to,
//server is offline, internet is dead, firewall blocked access, ...
let errorMsg = "Connection failed. Offline? Server dead? ";
console.error(errorMsg);
this.Ui_OnError(errorMsg);
this.Cleanup();
return;
}
else if (args.Type == awrtc.CallEventType.CallEnded) {
//call ended or was disconnected
var callEndedEvent = args as awrtc.CallEndedEventArgs;
console.log("call ended with id " + callEndedEvent.ConnectionId.id);
delete this.mRemoteVideo[callEndedEvent.ConnectionId.id];
this.Ui_OnLog("Disconnected from user with id " + callEndedEvent.ConnectionId.id);
//check if this was the last user
if (this.mNetConfig.IsConference == false && Object.keys(this.mRemoteVideo).length == 0) {
//1 to 1 call and only user left -> quit
this.Cleanup();
return;
}
}
else if (args.Type == awrtc.CallEventType.Message) {
//no ui for this yet. simply echo messages for testing
let messageArgs = args as awrtc.MessageEventArgs;
this.mCall.Send(messageArgs.Content, messageArgs.Reliable, messageArgs.ConnectionId);
}
else if (args.Type == awrtc.CallEventType.DataMessage) {
//no ui for this yet. simply echo messages for testing
let messageArgs = args as awrtc.DataMessageEventArgs;
this.mCall.SendData(messageArgs.Content, messageArgs.Reliable, messageArgs.ConnectionId);
}
else if (args.Type == awrtc.CallEventType.CallAccepted) {
let arg = args as awrtc.CallAcceptedEventArgs;
console.log("New call accepted id: " + arg.ConnectionId.id);
}
else if (args.Type == awrtc.CallEventType.WaitForIncomingCall) {
console.log("Waiting for incoming call ...");
}
else {
console.log("Unhandled event: " + args.Type);
}
}
//UI calls. should be moved out into its own class later
private mAudio;
private mVideo;
private mAutostart;
private mUiAddress: HTMLInputElement;
private mUiAudio: HTMLInputElement;
private mUiVideo: HTMLInputElement;
private mUiButton: HTMLButtonElement;
private mUiUrl: HTMLElement;
private mUiLocalVideoParent: HTMLElement;
private mUiRemoteVideoParent: HTMLElement;
public setupUi(parent: HTMLElement) {
this.mUiAddress = parent.querySelector<HTMLInputElement>(".callapp_address");
this.mUiAudio = parent.querySelector<HTMLInputElement>(".callapp_send_audio");
this.mUiVideo = parent.querySelector<HTMLInputElement>(".callapp_send_video");
this.mUiUrl = parent.querySelector<HTMLParagraphElement>(".callapp_url");
this.mUiButton = parent.querySelector<HTMLInputElement>(".callapp_button");
this.mUiLocalVideoParent = parent.querySelector<HTMLParagraphElement>(".callapp_local_video");
this.mUiRemoteVideoParent = parent.querySelector<HTMLParagraphElement>(".callapp_remote_video");
this.mUiAudio.onclick = this.Ui_OnUpdate;
this.mUiVideo.onclick = this.Ui_OnUpdate;
this.mUiAddress.onkeyup = this.Ui_OnUpdate;
this.mUiButton.onclick = this.Ui_OnStartStopButtonClicked;
//set default value + make string "true"/"false" to proper booleans
this.mAudio = this.GetParameterByName("audio");
this.mAudio = this.tobool(this.mAudio, true)
this.mVideo = this.GetParameterByName("video");
this.mVideo = this.tobool(this.mVideo, true);
this.mAutostart = this.GetParameterByName("autostart");
this.mAutostart = this.tobool(this.mAutostart, false);
this.mAddress = this.GetParameterByName("a");
//if autostart is set but no address is given -> create one and reopen the page
if (this.mAddress === null && this.mAutostart == true) {
this.mAddress = this.GenerateRandomKey();
window.location.href = this.GetUrlParams();
}
else {
if (this.mAddress === null)
this.mAddress = this.GenerateRandomKey();
this.Ui_Update();
}
//used for interacting with the Unity CallApp
//current hack to get the html element delivered. by default this
//just the image is copied and given as array
//Lazy frames will be the default soon though
if (this.mAutostart) {
console.log("Starting automatically ... ")
this.Start(this.mAddress, this.mAudio, this.mVideo);
}
console.log("address: " + this.mAddress + " audio: " + this.mAudio + " video: " + this.mVideo + " autostart: " + this.mAutostart);
}
private Ui_OnStart() {
this.mUiButton.textContent = "Stop";
}
private Ui_OnCleanup() {
this.mUiButton.textContent = "Join";
while (this.mUiLocalVideoParent.hasChildNodes()) {
this.mUiLocalVideoParent.removeChild(this.mUiLocalVideoParent.firstChild);
}
while (this.mUiRemoteVideoParent.hasChildNodes()) {
this.mUiRemoteVideoParent.removeChild(this.mUiRemoteVideoParent.firstChild);
}
}
private Ui_OnLog(msg: string) {
}
private Ui_OnError(msg: string) {
}
private Ui_OnLocalVideo(video: HTMLVideoElement) {
this.mUiLocalVideoParent.appendChild(document.createElement("br"));
this.mUiLocalVideoParent.appendChild(video);
}
private Ui_OnRemoteVideo(video: HTMLVideoElement, id: awrtc.ConnectionId) {
this.mUiRemoteVideoParent.appendChild(document.createElement("br"));
this.mUiRemoteVideoParent.appendChild(new Text("connection " + id.id));
this.mUiRemoteVideoParent.appendChild(document.createElement("br"));
this.mUiRemoteVideoParent.appendChild(video);
}
public Ui_OnStartStopButtonClicked = () => {
if (this.mIsRunning) {
this.Stop();
} else {
this.Start(this.mAddress, this.mAudio, this.mVideo);
}
}
public Ui_OnUpdate = () => {
console.debug("OnUiUpdate");
this.mAddress = this.mUiAddress.value;
this.mAudio = this.mUiAudio.checked;
this.mVideo = this.mUiVideo.checked;
this.mUiUrl.innerHTML = this.GetUrl();
}
public Ui_Update(): void {
console.log("UpdateUi");
this.mUiAddress.value = this.mAddress;
this.mUiAudio.checked = this.mAudio;
this.mUiVideo.checked = this.mVideo;
this.mUiUrl.innerHTML = this.GetUrl();
}
private GenerateRandomKey() {
var result = "";
for (var i = 0; i < 7; i++) {
result += String.fromCharCode(65 + Math.round(Math.random() * 25));
}
return result;
}
private GetUrlParams() {
return "?a=" + this.mAddress + "&audio=" + this.mAudio + "&video=" + this.mVideo + "&" + "autostart=" + true;
}
private GetUrl() {
return location.protocol + '//' + location.host + location.pathname + this.GetUrlParams();
}
}
export function videoinputapp(parent: HTMLElement, canvas: HTMLCanvasElement) {
let callApp: VideoInputApp;
console.log("init callapp");
if (parent == null) {
console.log("parent was null");
parent = document.body;
}
awrtc.SLog.SetLogLevel(awrtc.SLogLevel.Info);
callApp = new VideoInputApp();
const media = new awrtc.Media();
const devname = "canvas";
awrtc.Media.SharedInstance.VideoInput.AddCanvasDevice(canvas, devname, canvas.width / 2, canvas.height / 2, 30);
setInterval(() => {
awrtc.Media.SharedInstance.VideoInput.UpdateFrame(devname);
}, 50);
VideoInputApp.sVideoDevice = devname;
callApp.setupUi(parent);
}
/*
Copyright (c) 2019, because-why-not.com Limited
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
//obsolete. not needed for unity build anymore
//special entry point only needed for backwards compatibility
//it will merge awrtc namespace into window so old code still works
//that accesses objects directly instead using the global awrtc object
//the index will include all external modules
import * as awrtc from "./index"
//we merge awrtc into the global namespace
Object.assign(window, awrtc);
//for less risky global access
(window as any).awrtc = awrtc;
......@@ -42,4 +42,5 @@ export * from "./media/index"
//it could as well be built and deployed separately
export * from "./media_browser/index"
export * from "./unity/index"
console.debug("loading awrtc modules completed");
\ No newline at end of file
console.debug("loading awrtc modules completed!");
......@@ -59,6 +59,15 @@ export class IFrameData {
}
public constructor() { }
public ToTexture(gl: WebGL2RenderingContext, texture: WebGLTexture) : boolean{
return false;
}
/*
public ToTexture2(gl: WebGL2RenderingContext) : WebGLTexture{
return null;
}
*/
}
//Container for the raw bytes of the current frame + height and width.
......@@ -96,6 +105,10 @@ export class RawFrame extends IFrameData{
* only create a lazy frame which will delay the creation of the RawFrame until the user actually tries
* to access any data.
* Thus if the game slows down or the user doesn't access any data the expensive copy is avoided.
*
* This comes with the downside of risking a change in Width / Height at the moment. In theory the video could
* change the resolution causing the values of Width / Height to change over time before Buffer is accessed to create
* a copy that will be save to use. This should be ok as long as the frame is used at the time it is received.
*/
export class LazyFrame extends IFrameData{
......@@ -113,20 +126,42 @@ export class LazyFrame extends IFrameData{
return this.mRawFrame.Buffer;
}
/**Returns the expected width of the frame.
* Watch out this might change inbetween frames!
*
*/
public get Width(): number {
if (this.mRawFrame == null)
{
return this.mFrameGenerator.VideoElement.videoWidth;
}else{
return this.mRawFrame.Width;
}
/*
this.GenerateFrame();
if (this.mRawFrame == null)
return -1;
return this.mRawFrame.Width;
*/
}
/**Returns the expected height of the frame.
* Watch out this might change inbetween frames!
*
*/
public get Height(): number {
if (this.mRawFrame == null)
{
return this.mFrameGenerator.VideoElement.videoHeight;
}else{
return this.mRawFrame.Height;
}
/*
this.GenerateFrame();
if (this.mRawFrame == null)
return -1;
return this.mRawFrame.Height;
*/
}
......@@ -135,6 +170,37 @@ export class LazyFrame extends IFrameData{
this.mFrameGenerator = frameGenerator;
}
/**Intendet for use via the Unity plugin.
* Will copy the image directly into a texture to avoid overhead of a CPU side copy.
*
* The given texture should have the correct size before calling this method.
*
* @param gl
* @param texture
*/
public ToTexture(gl: WebGL2RenderingContext, texture: WebGLTexture) : boolean{
gl.bindTexture(gl.TEXTURE_2D, texture);
/*
const level = 0;
const internalFormat = gl.RGBA;
const srcFormat = gl.RGBA;
const srcType = gl.UNSIGNED_BYTE;
gl.texImage2D(gl.TEXTURE_2D, level, internalFormat, srcFormat, srcType, this.mFrameGenerator.VideoElement);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR);
*/
gl.texSubImage2D(gl.TEXTURE_2D, 0, 0, 0, gl.RGB, gl.UNSIGNED_BYTE, this.mFrameGenerator.VideoElement);
return true;
}
/*
public ToTexture2(gl: WebGL2RenderingContext) : WebGLTexture{
let tex = gl.createTexture()
this.ToTexture(gl, tex)
return;
}
*/
//Called before access of any frame data triggering the creation of the raw frame data
private GenerateFrame() {
......
......@@ -36,6 +36,7 @@ import { IFrameData } from "../media/RawFrame";
import { MediaPeer } from "./MediaPeer";
import { BrowserMediaStream } from "./BrowserMediaStream";
import { DeviceApi } from "./DeviceApi";
import { Media } from "./Media";
/**Avoid using this class directly whenever possible. Use BrowserWebRtcCall instead.
......@@ -60,6 +61,7 @@ import { DeviceApi } from "./DeviceApi";
*/
export class BrowserMediaNetwork extends WebRtcNetwork implements IMediaNetwork {
//media configuration set by the user
private mMediaConfig: MediaConfig = null;
//keeps track of audio / video tracks based on local devices
......@@ -69,6 +71,7 @@ export class BrowserMediaNetwork extends WebRtcNetwork implements IMediaNetwork
private mConfigurationError: string = null;
private mMediaEvents: Queue<MediaEvent> = new Queue<MediaEvent>();
constructor(config: NetworkConfig) {
super(BrowserMediaNetwork.BuildSignalingConfig(config.SignalingUrl),
......@@ -91,103 +94,12 @@ export class BrowserMediaNetwork extends WebRtcNetwork implements IMediaNetwork
if (config.Audio || config.Video) {
//ugly part starts -> call get user media data (no typescript support)
//different browsers have different calls...
//check getSupportedConstraints()???
//see https://w3c.github.io/mediacapture-main/getusermedia.html#constrainable-interface
//set default ideal to very common low 320x240 to avoid overloading weak computers
var constraints = {
audio: config.Audio
} as any;
let width = {} as any;
let height = {} as any;
let video = {} as any;
let fps = {} as any;
if (config.MinWidth != -1)
width.min = config.MinWidth;
if (config.MaxWidth != -1)
width.max = config.MaxWidth;
if (config.IdealWidth != -1)
width.ideal = config.IdealWidth;
if (config.MinHeight != -1)
height.min = config.MinHeight;
if (config.MaxHeight != -1)
height.max = config.MaxHeight;
if (config.IdealHeight != -1)
height.ideal = config.IdealHeight;
if (config.MinFps != -1)
fps.min = config.MinFps;
if (config.MaxFps != -1)
fps.max = config.MaxFps;
if (config.IdealFps != -1)
fps.ideal = config.IdealFps;
//user requested specific device? get it now to properly add it to the
//constraints later
let deviceId:string = null;
if(config.Video && config.VideoDeviceName && config.VideoDeviceName !== "")
{
deviceId = DeviceApi.GetDeviceId(config.VideoDeviceName);
SLog.L("using device " + config.VideoDeviceName);
if(deviceId !== null && deviceId !== "")
{
//SLog.L("using device id " + deviceId);
}
else{
deviceId = null;
SLog.LW("Failed to find deviceId for label " + config.VideoDeviceName + "! Using default device instead");
}
}
//watch out: unity changed behaviour and will now
//give 0 / 1 instead of false/true
//using === won't work
if(config.Video == false)
SLog.L("calling GetUserMedia. Media config: " + JSON.stringify(config));
if(DeviceApi.IsUserMediaAvailable())
{
//video is off
video = false;
}else {
if(Object.keys(width).length > 0){
video.width = width;
}
if(Object.keys(height).length > 0){
video.height = height;
}
if(Object.keys(fps).length > 0){
video.frameRate = fps;
}
if(deviceId !== null){
video.deviceId = {"exact":deviceId};
}
let promise : Promise<MediaStream> = null;
promise = Media.SharedInstance.getUserMedia(config);
//if we didn't add anything we need to set it to true
//at least (I assume?)
if(Object.keys(video).length == 0){
video = true;
}
}
constraints.video = video;
SLog.L("calling GetUserMedia. Media constraints: " + JSON.stringify(constraints));
if(navigator && navigator.mediaDevices)
{
let promise = navigator.mediaDevices.getUserMedia(constraints);
promise.then((stream) => { //user gave permission
//totally unrelated -> user gave access to devices. use this
......@@ -196,6 +108,7 @@ export class BrowserMediaNetwork extends WebRtcNetwork implements IMediaNetwork
//call worked -> setup a frame buffer that deals with the rest
this.mLocalStream = new BrowserMediaStream(stream as MediaStream);
//console.debug("Local tracks: ", stream.getTracks());
this.mLocalStream.InternalStreamAdded = (stream)=>{
this.EnqueueMediaEvent(MediaEventType.StreamAdded, ConnectionId.INVALID, this.mLocalStream.VideoElement);
};
......
......@@ -31,6 +31,32 @@ import { IFrameData, RawFrame, LazyFrame } from "../media/RawFrame";
import { SLog } from "../network/Helper";
/**
* Mostly used for debugging at the moment. Browser API doesn't seem to have a standard way to
* determine if a frame was updated. This class currently uses several different methods based
* on availability
*
*/
enum FrameEventMethod{
/**We use a set default framerate. FPS is unknown and we can't recognize if a frame was updated.
* Used for remote video tracks on firefox as the "framerate" property will not be set.
*/
DEFAULT_FALLBACK = "DEFAULT_FALLBACK",
/**
* Using the tracks meta data to decide the framerate. We might drop frames or deliver them twice
* because we can't tell when exactly they are updated.
* Some video devices also claim 30 FPS but generate less causing us to waste performance copying the same image
* multipel times
*
* This system works with local video in firefox
*/
TRACK = "TRACK",
/**
* uses frame numbers returned by the browser. This works for webkit based browsers only so far.
* Firefox is either missing the needed properties or they return always 0
*/
EXACT = "EXACT"
}
/**Internal use only.
* Bundles all functionality related to MediaStream, Tracks and video processing.
......@@ -46,15 +72,14 @@ export class BrowserMediaStream {
//for debugging. Will attach the HTMLVideoElement used to play the local and remote
//video streams to the document.
public static DEBUG_SHOW_ELEMENTS = false;
//TODO: remove this flag. it is now always using lazy frames
public static sUseLazyFrames = true;
//Gives each FrameBuffer and its HTMLVideoElement a fixed id for debugging purposes.
public static sNextInstanceId = 1;
public static VERBOSE = false;
private mStream: MediaStream;
......@@ -74,12 +99,15 @@ export class BrowserMediaStream {
//Framerate used as a workaround if
//the actual framerate is unknown due to browser restrictions
public static DEFAULT_FRAMERATE = 25;
public static DEFAULT_FRAMERATE = 30;
private mMsPerFrame = 1.0 / BrowserMediaStream.DEFAULT_FRAMERATE * 1000;
private mFrameRateKnown = false;
private mFrameEventMethod = FrameEventMethod.DEFAULT_FALLBACK;
//Time the last frame was generated
private mLastFrameTime = 0;
private mNextFrameTime = 0;
/** Number of the last frame (not yet supported in all browsers)
* if it remains at <= 0 then we just generate frames based on
......@@ -98,37 +126,56 @@ export class BrowserMediaStream {
this.mInstanceId = BrowserMediaStream.sNextInstanceId;
BrowserMediaStream.sNextInstanceId++;
if (this.mStream.getVideoTracks().length > 0)
{
this.mHasVideo = true;
let vtrack = this.mStream.getVideoTracks()[0];
let settings = vtrack.getSettings();
let fps = settings.frameRate;
if(fps)
{
this.mMsPerFrame = 1.0 / fps * 1000;
this.mFrameRateKnown = true;
}
}
this.mMsPerFrame = 1.0 / BrowserMediaStream.DEFAULT_FRAMERATE * 1000;
this.mFrameEventMethod = FrameEventMethod.DEFAULT_FALLBACK;
this.SetupElements();
}
private CheckFrameRate():void
{
//in chrome the track itself might miss the framerate but
//we still know when it updates trough webkitDecodedFrameCount
if(this.mVideoElement && typeof (this.mVideoElement as any).webkitDecodedFrameCount !== "undefined")
{
this.mFrameRateKnown = true;
}
if(this.mFrameRateKnown === false)
if(this.mVideoElement)
{
//firefox and co won't tell us the FPS for remote stream
SLog.LW("Framerate unknown. Using default framerate of " + BrowserMediaStream.DEFAULT_FRAMERATE);
if (this.mStream.getVideoTracks().length > 0)
{
this.mHasVideo = true;
let vtrack = this.mStream.getVideoTracks()[0];
let settings = vtrack.getSettings();
let fps = settings.frameRate;
if(fps)
{
if(BrowserMediaStream.VERBOSE)
{
console.log("Track FPS: " + fps);
}
this.mMsPerFrame = 1.0 / fps * 1000;
this.mFrameEventMethod = FrameEventMethod.TRACK;
}
}
//try to get the video fps via the track
//fails on firefox if the track comes from a remote source
if(this.GetFrameNumber() != -1)
{
if(BrowserMediaStream.VERBOSE)
{
console.log("Get frame available.");
}
//browser returns exact frame information
this.mFrameEventMethod = FrameEventMethod.EXACT;
}
//failed to determine any frame rate. This happens on firefox with
//remote tracks
if(this.mFrameEventMethod === FrameEventMethod.DEFAULT_FALLBACK)
{
//firefox and co won't tell us the FPS for remote stream
SLog.LW("Framerate unknown for stream " + this.mInstanceId + ". Using default framerate of " + BrowserMediaStream.DEFAULT_FRAMERATE);
}
}
}
public SetupElements() {
private SetupElements() {
this.mVideoElement = this.SetupVideoElement();
//TOOD: investigate bug here
......@@ -138,7 +185,7 @@ export class BrowserMediaStream {
//with 720p. (video device "BisonCam, NB Pro" on MSI laptop)
SLog.L("video element created. video tracks: " + this.mStream.getVideoTracks().length);
this.mVideoElement.onloadedmetadata = (e) => {
//console.log("onloadedmetadata");
//we might have shutdown everything by now already
if(this.mVideoElement == null)
return;
......@@ -162,7 +209,12 @@ export class BrowserMediaStream {
this.CheckFrameRate();
SLog.L("Resolution: " + this.mVideoElement.videoWidth + "x" + this.mVideoElement.videoHeight);
let video_log = "Resolution: " + this.mVideoElement.videoWidth + "x" + this.mVideoElement.videoHeight
+ " fps method: " + this.mFrameEventMethod + " " + Math.round(1000/(this.mMsPerFrame));
SLog.L(video_log);
if(BrowserMediaStream.VERBOSE){
console.log(video_log)
}
//now create canvas after the meta data of the video are known
if (this.mHasVideo) {
this.mCanvasElement = this.SetupCanvas();
......@@ -199,26 +251,42 @@ export class BrowserMediaStream {
let frameNumber;
if(this.mVideoElement)
{
//to find out if we got a new frame
//chrome has webkitDecodedFrameCount
//firefox mozDecodedFrames, mozParsedFrames, mozPresentedFrames seems to be always 0 so far
//mozPaintedFrames turned out useless as it only updates if the tag is visible
//no idea about all others
//
frameNumber = (this.mVideoElement as any).webkitDecodedFrameCount
//|| this.mVideoElement.currentTime can't be used updates every call
|| -1;
if((this.mVideoElement as any).webkitDecodedFrameCount)
{
frameNumber = (this.mVideoElement as any).webkitDecodedFrameCount;
}
/*
None of these work and future versions might return numbers that are only
updated once a second or so. For now it is best to ignore these.
TODO: Check if any of these will work in the future. this.mVideoElement.getVideoPlaybackQuality().totalVideoFrames;
might also help in the future (so far always 0)
this.mVideoElement.currentTime also won't work because this is updated faster than the framerate (would result in >100+ framerate)
else if((this.mVideoElement as any).mozParsedFrames)
{
frameNumber = (this.mVideoElement as any).mozParsedFrames;
}else if((this.mVideoElement as any).mozDecodedFrames)
{
frameNumber = (this.mVideoElement as any).mozDecodedFrames;
}else if((this.mVideoElement as any).decodedFrameCount)
{
frameNumber = (this.mVideoElement as any).decodedFrameCount;
}
*/
else
{
frameNumber = -1;
}
}else{
frameNumber = -1;
}
return frameNumber;
}
//TODO: Buffering
public TryGetFrame(): IFrameData
{
//make sure we get the newest frame
this.EnsureLatestFrame();
//this.EnsureLatestFrame();
//remove the buffered frame if any
var result = this.mBufferedFrame;
......@@ -230,7 +298,7 @@ export class BrowserMediaStream {
this.mVideoElement.muted = mute;
}
public PeekFrame(): IFrameData {
this.EnsureLatestFrame();
//this.EnsureLatestFrame();
return this.mBufferedFrame;
}
......@@ -240,7 +308,7 @@ export class BrowserMediaStream {
private EnsureLatestFrame():boolean
{
if (this.HasNewerFrame()) {
this.FrameToBuffer();
this.GenerateFrame();
return true;
}
return false;
......@@ -258,6 +326,7 @@ export class BrowserMediaStream {
{
if(this.mLastFrameNumber > 0)
{
this.mFrameEventMethod = FrameEventMethod.EXACT;
//we are getting frame numbers. use those to
//check if we have a new one
if(this.GetFrameNumber() > this.mLastFrameNumber)
......@@ -268,10 +337,8 @@ export class BrowserMediaStream {
else
{
//many browsers do not share the frame info
//so far we just generate 30 FPS as a work around
let now = new Date().getTime();
let div = now - this.mLastFrameTime;
if (div >= this.mMsPerFrame) {
if (this.mNextFrameTime <= now) {
{
return true;
}
......@@ -284,8 +351,7 @@ export class BrowserMediaStream {
public Update(): void {
//moved to avoid creating buffered frames if not needed
//this.EnsureLatestFrame();
this.EnsureLatestFrame();
}
public DestroyCanvas(): void {
......@@ -319,11 +385,12 @@ export class BrowserMediaStream {
this.mCanvasElement.width = this.mVideoElement.videoWidth;
this.mCanvasElement.height = this.mVideoElement.videoHeight;
let ctx = this.mCanvasElement.getContext("2d");
/*
var fillBackgroundFirst = true;
if (fillBackgroundFirst) {
ctx.clearRect(0, 0, this.mCanvasElement.width, this.mCanvasElement.height);
}
*/
ctx.drawImage(this.mVideoElement, 0, 0);
try {
......@@ -359,10 +426,21 @@ export class BrowserMediaStream {
}
}
private FrameToBuffer(): void
//Old buffed frame was replaced with a wrapepr that avoids buffering internally
//Only point of generate frame is now to ensure a consistent framerate
private GenerateFrame(): void
{
this.mLastFrameTime = new Date().getTime();
this.mLastFrameNumber = this.GetFrameNumber();
let now = new Date().getTime();
//js timing is very inaccurate. reduce time until next frame if we are
//late with this one.
let diff = now - this.mNextFrameTime;
let delta = (this.mMsPerFrame - diff);
delta = Math.min(this.mMsPerFrame, Math.max(1, delta))
this.mLastFrameTime = now;
this.mNextFrameTime = now + delta;
//console.log("last frame , new frame", this.mLastFrameTime, this.mNextFrameTime, delta);
this.mBufferedFrame = new LazyFrame(this);
}
......
......@@ -28,6 +28,8 @@ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
import { SLog } from "../network/index";
import { MediaConfig } from "media/MediaConfig";
import { VideoInput } from "./VideoInput";
export class DeviceInfo
{
......@@ -79,7 +81,11 @@ export class DeviceApi
{
let index = DeviceApi.sUpdateEvents.indexOf(evt);
if(index >= 0)
{
DeviceApi.sUpdateEvents.splice(index, 1);
}else{
SLog.LW("Tried to remove an unknown event handler in DeviceApi.RemOnChangedHandler");
}
}
private static TriggerChangedEvent()
......@@ -166,6 +172,15 @@ export class DeviceApi
{
return DeviceApi.sDeviceInfo;
}
public static GetVideoDevices(): string[]{
const devices = DeviceApi.Devices;
const keys = Object.keys(devices);
const labels = keys.map((x)=>{return devices[x].label});
return labels;
}
public static Reset()
{
DeviceApi.sUpdateEvents = [];
......@@ -196,7 +211,7 @@ export class DeviceApi
DeviceApi.Update();
}
static ENUM_FAILED = "Can't access mediaDevices or enumerateDevices";
/**Updates the device list based on the current
* access. Gives the devices numbers if the name isn't known.
*/
......@@ -210,9 +225,28 @@ export class DeviceApi
.then(DeviceApi.InternalOnEnum)
.catch(DeviceApi.InternalOnErrorCatch);
}else{
DeviceApi.InternalOnErrorString("Can't access mediaDevices or enumerateDevices");
DeviceApi.InternalOnErrorString(DeviceApi.ENUM_FAILED);
}
}
public static async UpdateAsync():Promise<void>
{
return new Promise((resolve, fail)=>{
DeviceApi.sLastError = null;
if(DeviceApi.IsApiAvailable() == false)
{
DeviceApi.InternalOnErrorString(DeviceApi.ENUM_FAILED);
fail(DeviceApi.ENUM_FAILED);
}
resolve();
}).then(()=>{
DeviceApi.sIsPending = true;
return navigator.mediaDevices.enumerateDevices()
.then(DeviceApi.InternalOnEnum)
.catch(DeviceApi.InternalOnErrorCatch);
});
}
/**Checks if the API is available in the browser.
* false - browser doesn't support this API
* true - browser supports the API (might still refuse to give
......@@ -255,4 +289,127 @@ export class DeviceApi
}
return null;
}
public static IsUserMediaAvailable()
{
if(navigator && navigator.mediaDevices)
return true;
return false;
}
public static ToConstraints(config: MediaConfig): MediaStreamConstraints
{
//ugly part starts -> call get user media data (no typescript support)
//different browsers have different calls...
//check getSupportedConstraints()???
//see https://w3c.github.io/mediacapture-main/getusermedia.html#constrainable-interface
//set default ideal to very common low 320x240 to avoid overloading weak computers
var constraints = {
audio: config.Audio
} as any;
let width = {} as any;
let height = {} as any;
let video = {} as any;
let fps = {} as any;
if (config.MinWidth != -1)
width.min = config.MinWidth;
if (config.MaxWidth != -1)
width.max = config.MaxWidth;
if (config.IdealWidth != -1)
width.ideal = config.IdealWidth;
if (config.MinHeight != -1)
height.min = config.MinHeight;
if (config.MaxHeight != -1)
height.max = config.MaxHeight;
if (config.IdealHeight != -1)
height.ideal = config.IdealHeight;
if (config.MinFps != -1)
fps.min = config.MinFps;
if (config.MaxFps != -1)
fps.max = config.MaxFps;
if (config.IdealFps != -1)
fps.ideal = config.IdealFps;
//user requested specific device? get it now to properly add it to the
//constraints later
let deviceId:string = null;
if(config.Video && config.VideoDeviceName && config.VideoDeviceName !== "")
{
deviceId = DeviceApi.GetDeviceId(config.VideoDeviceName);
SLog.L("using device " + config.VideoDeviceName);
if(deviceId === "")
{
//Workaround for Chrome 81: If no camera access is allowed chrome returns the deviceId ""
//thus we can only request any video device. We can't select a specific one
deviceId = null;
}else if(deviceId !== null)
{
//all good
}
else{
SLog.LE("Failed to find deviceId for label " + config.VideoDeviceName);
throw new Error("Unknown device " + config.VideoDeviceName);
}
}
//watch out: unity changed behaviour and will now
//give 0 / 1 instead of false/true
//using === won't work
if(config.Video == false)
{
//video is off
video = false;
}else {
if(Object.keys(width).length > 0){
video.width = width;
}
if(Object.keys(height).length > 0){
video.height = height;
}
if(Object.keys(fps).length > 0){
video.frameRate = fps;
}
if(deviceId !== null){
video.deviceId = {"exact":deviceId};
}
//if we didn't add anything we need to set it to true
//at least (I assume?)
if(Object.keys(video).length == 0){
video = true;
}
}
constraints.video = video;
return constraints;
}
public static getBrowserUserMedia(constraints?: MediaStreamConstraints): Promise<MediaStream>{
return navigator.mediaDevices.getUserMedia(constraints);
}
public static getAssetUserMedia(config: MediaConfig): Promise<MediaStream>{
return new Promise((resolve)=>{
const res = DeviceApi.ToConstraints(config);
resolve(res);
}).then((constraints)=>{
return DeviceApi.getBrowserUserMedia(constraints as MediaStreamConstraints);
});
}
}
\ No newline at end of file
import { DeviceApi } from "./DeviceApi";
import { VideoInput } from "./VideoInput";
import { MediaConfig } from "media/MediaConfig";
export class Media{
//experimental. Will be used instead of the device api to create streams
private static sSharedInstance :Media = new Media();
/**
* Singleton used for now as the browser version is missing a proper factory yet.
* Might be removed later.
*/
public static get SharedInstance(){
return this.sSharedInstance;
}
public static ResetSharedInstance(){
this.sSharedInstance = new Media();
}
private videoInput: VideoInput = null;
public get VideoInput() : VideoInput{
if(this.videoInput === null)
this.videoInput = new VideoInput();
return this.videoInput;
}
public constructor(){
}
public GetVideoDevices(): string[] {
const real_devices = DeviceApi.GetVideoDevices();
const virtual_devices : string[] = this.VideoInput.GetDeviceNames();
return real_devices.concat(virtual_devices);
}
public static IsNameSet(videoDeviceName: string) : boolean{
if(videoDeviceName !== null && videoDeviceName !== "" )
{
return true;
}
return false;
}
public getUserMedia(config: MediaConfig): Promise<MediaStream>{
if(config.Video && Media.IsNameSet(config.VideoDeviceName)
&& this.videoInput != null
&& this.videoInput.HasDevice(config.VideoDeviceName))
{
let res = Promise.resolve().then(async ()=>{
let stream = this.videoInput.GetStream(config.VideoDeviceName);
if(config.Audio)
{
let constraints = {} as MediaStreamConstraints
constraints.audio = true;
let audio_stream = await DeviceApi.getBrowserUserMedia(constraints);
stream.addTrack(audio_stream.getTracks()[0])
}
return stream;
})
return res;
}
return DeviceApi.getAssetUserMedia(config);
}
}
\ No newline at end of file
/**TS version of the C++ / C# side Native VideoInput API
*
*
* In addition it also supports adding a HTMLCanvasElement as a video device. This can be
* a lot faster in the browser than the C / C++ style UpdateFrame methods that use raw byte arrays
* or pointers to deliver an image.
*
* Note there are currently three distinct ways how this is used:
* 1. Using AddCanvasDevice without scaling (wdith = 0, height =0 or the same as the canvas)
* In this mode the MediaStream will be returned from the canvas. Drawing calls from the canvas
* turn into video frames of the video without any manual UpdateFrame calls
*
* 2. Using AddCanvasDevice with scaling by setting a width / height different from the canvas.
* In this mode the user draws to the canvas and every time UpdateFrame is called a scaled frame
* is created that will turn into video frames. Lower UpdateFrame calls will reduce the framerate
* even if the original canvas us used a higher framerate.
* This mode should result in lower data usage.
*
* 3. Using AddDevice and UpdateFrame to deliver raw byte array frames. This is a compatibility mode
* that works similar to the C / C++ and C# API. An internal canvas is created and updated based on
* the data the user delivers. This mode makes sense if you generate custom data that doesn't have
* a canvas as its source.
* This mode can be quite slow and inefficient.
*
* TODO:
* - Using AddDevice with one resolution & UpdateFrame with another might not support scaling yet but
* activating the 2nd canvas for scaling might
* reduce the performance even more. Check if there is a better solution and if scaling is even needed.
* It could easily be added by calling initScaling but it must be known if scaling is required before
* the device is selected by the user. Given that scaling can reduce the performance doing so by default
* might cause problems for some users.
*
* - UpdateFrame rotation and firstRowIsBottom aren't supported yet. Looks like they aren't needed for
* WebGL anyway. Looks like frames here always start with the top line and rotation is automatically
* handled by the browser.
*
*/
export class VideoInput {
private canvasDevices: CanvasMap = {};
constructor() {
}
/**Adds a canvas to use as video source for streaming.
*
* Make sure canvas.getContext is at least called once before calling this method.
*
* @param canvas
* @param deviceName
* @param width
* @param height
* @param fps
*/
public AddCanvasDevice(canvas: HTMLCanvasElement, deviceName: string, width: number, height: number, fps: number) {
let cdev = CanvasDevice.CreateExternal(canvas, fps);
if (width != canvas.width || height != canvas.height) {
//console.warn("testing scaling");
cdev.initScaling(width, height);
}
this.canvasDevices[deviceName] = cdev;
}
/**For internal use.
* Allows to check if the device already exists.
*
* @param dev
*/
public HasDevice(dev: string): boolean {
return dev in this.canvasDevices;
}
/**For internal use.
* Lists all registered devices.
*
*/
public GetDeviceNames(): Array<string> {
return Object.keys(this.canvasDevices);
}
/**For internal use.
* Returns a MediaStream for the given device.
*
* @param dev
*/
public GetStream(dev: string): MediaStream | null {
if (this.HasDevice(dev)) {
let device = this.canvasDevices[dev];
//watch out: This can trigger an exception if getContext has never been called before.
//There doesn't seem to way to detect this beforehand though
let stream = device.captureStream();
return stream;
}
return null;
}
/**C# API: public void AddDevice(string name, int width, int height, int fps);
*
* Adds a device that will be accessible via the given name. Width / Height determines
* the size of the canvas that is used to stream the video.
*
*
* @param name unique name for the canvas
* @param width width of the canvase used for the stream
* @param height height of the canvase used for the stream
* @param fps Expected FPS used by the stream. 0 or undefined to let the browser decide (likely based on actual draw calls)
*/
public AddDevice(name: string, width: number, height: number, fps?: number): void {
let cdev = CanvasDevice.CreateInternal(width, height, fps);
this.canvasDevices[name] = cdev;
}
private RemCanvasDevice(deviceName: string) {
let cdev = this.canvasDevices[deviceName];
if (cdev) {
delete this.canvasDevices[deviceName];
}
}
//C# API: public void RemoveDevice(string name);
public RemoveDevice(name: string): void {
this.RemCanvasDevice(name);
}
public UpdateFrame(name: string): boolean;
public UpdateFrame(name: string, dataPtr: Uint8ClampedArray, width: number, height: number, type: VideoInputType): boolean
public UpdateFrame(name: string, dataPtr: Uint8ClampedArray, width: number, height: number, type: VideoInputType, rotation: number, firstRowIsBottom: boolean): boolean
/**
* Use UpdateFrame with name only to trigger a new frame without changing the content (e.g. if AddCanvasDevice was used to add the device and it needs scaling)
* Use UpdateFrame with image data if you added the device via AddDevice and want to updat its content
*
*
*
* @param name name of the device
* @param dataPtr array to the image data
* @param width must be the exact width of the image in dataPtr
* @param height must be the exact height of the image in dataPtr
* @param type must be ARGB at the moment
* @param rotation not yet supported
* @param firstRowIsBottom not yet supported
*/
public UpdateFrame(name: string, dataPtr?: Uint8ClampedArray, width?: number, height?: number, type: VideoInputType = VideoInputType.ARGB, rotation: number = 0, firstRowIsBottom: boolean = true): boolean {
if (this.HasDevice(name)) {
let device = this.canvasDevices[name];
if (device.IsExternal() || dataPtr == null) {
//can't change external images / no data available. just generate a new frame without new data
device.UpdateFrame();
} else {
var data = new ImageData(dataPtr, width, height);
device.UpdateFrame(data);
}
return true;
}
return false;
}
}
interface FancyHTMLCanvasElement extends HTMLCanvasElement {
captureStream(fps?: number): MediaStream;
}
/**Wraps around a canvas object to use as a source for MediaStream.
* It supports streaming via a second canvas that is used to scale the image
* before streaming. For scaling UpdateFrame needs to be called one a frame.
* Without scaling the browser will detect changes in the original canvas
* and automatically update the stream
*
*/
class CanvasDevice {
/**Main canvas. This is actively drawn onto by the user (external)
* or by this class.
*
*/
private canvas: FancyHTMLCanvasElement;
/**false = we own the canvas and can change its settings e.g. via VideoInput
* true = externally used canvas. Can't change width / height or any other settings
*/
private external_canvas = false;
/** FPS used for the canvas captureStream.
* 0 or undefined to let the browser handle it automatically via captureStream()
*/
private fps?: number;
/**Canvas element to handle scaling.
* Remains null if initScaling is never called and width / height is expected to
* fit the canvas.
*
*/
private scaling_canvas: FancyHTMLCanvasElement = null;
//private scaling_interval = -1;
private is_capturing = false;
public getStreamingCanvas() {
if (this.scaling_canvas == null)
return this.canvas;
return this.scaling_canvas;
}
public captureStream() {
if (this.is_capturing == false && this.scaling_canvas) {
//scaling is active.
this.startScaling();
}
this.is_capturing = true;
if (this.fps && this.fps > 0) {
return this.getStreamingCanvas().captureStream(this.fps);
}
return this.getStreamingCanvas().captureStream();
}
private constructor(c: HTMLCanvasElement, external_canvas: boolean, fps?: number) {
this.canvas = c as FancyHTMLCanvasElement;
this.external_canvas = external_canvas;
this.fps = fps;
}
public static CreateInternal(width: number, height: number, fps?: number) {
const c = CanvasDevice.MakeCanvas(width, height);
return new CanvasDevice(c, false, fps);
}
public static CreateExternal(c: HTMLCanvasElement, fps?: number) {
return new CanvasDevice(c, true, fps);
}
/**Adds scaling support to this canvas device.
*
* @param width
* @param height
*/
public initScaling(width: number, height: number) {
this.scaling_canvas = document.createElement("canvas") as FancyHTMLCanvasElement;
this.scaling_canvas.width = width;
this.scaling_canvas.height = height;
this.scaling_canvas.getContext("2d");
}
/**Used to update the frame data if the canvas is managed internally.
* Use without image data to just trigger the scaling / generation of a new frame if the canvas is drawn to externally.
*
* If the canvas is managed externally and scaling is not required this method won't do anything. A new frame is instead
* generated automatically based on the browser & canvas drawing operations.
*/
public UpdateFrame(data?: ImageData): void {
if (data) {
let ctx = this.canvas.getContext("2d");
//TODO: This doesn't seem to support scaling out of the box
//we might need to combien this with the scaling system as well
//in case users deliver different resolutions than the device is setup for
ctx.putImageData(data, 0, 0);
}
this.scaleNow();
}
/**Called the first time we need the scaled image to ensure
* the buffers are all filled.
*/
private startScaling() {
this.scaleNow();
}
private scaleNow() {
if (this.scaling_canvas != null) {
let ctx = this.scaling_canvas.getContext("2d");
//ctx.fillStyle = "#FF0000";
//ctx.fillRect(0, 0, this.scaling_canvas.width, this.scaling_canvas.height);
//ctx.clearRect(0, 0, this.scaling_canvas.width, this.scaling_canvas.height)
ctx.drawImage(this.canvas, 0, 0, this.scaling_canvas.width, this.scaling_canvas.height);
}
}
public IsExternal(): boolean {
return this.external_canvas;
}
private static MakeCanvas(width: number, height: number): FancyHTMLCanvasElement {
let canvas = document.createElement("canvas");
canvas.width = width;
canvas.height = height;
let ctx = canvas.getContext("2d");
//make red for debugging purposes
ctx.fillStyle = "red";
ctx.fillRect(0, 0, canvas.width, canvas.height);
return canvas as FancyHTMLCanvasElement;
}
}
interface CanvasMap {
[key: string]: CanvasDevice;
}
/** Only one format supported by browsers so far.
* Maybe more can be added in the future.
*/
export enum VideoInputType {
ARGB
}
......@@ -32,3 +32,5 @@ export * from './BrowserWebRtcCall'
export * from './BrowserMediaStream'
export * from './MediaPeer'
export * from './DeviceApi'
export * from './VideoInput'
export * from './Media'
......@@ -213,22 +213,42 @@ export class SLog {
SLog.LogError(msg, tag);
}
public static Log(msg: any, tag?:string): void {
if(!tag)
tag = "";
if(SLog.sLogLevel >= SLogLevel.Info)
console.log(msg, tag);
{
if(tag)
{
console.log(msg, tag);
}else{
console.log(msg);
}
}
}
public static LogWarning(msg: any, tag?:string): void {
if(!tag)
tag = "";
if(SLog.sLogLevel >= SLogLevel.Warnings)
console.warn(msg, tag);
{
if(tag)
{
console.warn(msg, tag);
}else{
console.warn(msg);
}
}
}
public static LogError(msg: any, tag?:string) {
if(!tag)
tag = "";
if(SLog.sLogLevel >= SLogLevel.Errors)
console.error(msg, tag);
{
if(tag)
{
console.error(msg, tag);
}else{
console.error(msg);
}
}
}
}
\ No newline at end of file
......@@ -31,7 +31,10 @@
"./media_browser/BrowserMediaNetwork.ts",
"./media_browser/BrowserWebRtcCall.ts",
"./media_browser/BrowserMediaStream.ts",
"./media_browser/DeviceApi.ts",
"./media_browser/MediaPeer.ts",
"./media_browser/VideoInput.ts",
"./media_browser/Media.ts",
"./media_browser/index.ts",
"./unity/CAPI.ts",
"./unity/index.ts",
......
......@@ -32,7 +32,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
import {SLog, WebRtcNetwork, SignalingConfig, NetworkEvent, ConnectionId, LocalNetwork, WebsocketNetwork} from "../network/index"
import { MediaConfigurationState, NetworkConfig, MediaConfig } from "../media/index";
import { BrowserMediaStream, BrowserMediaNetwork, DeviceApi, BrowserWebRtcCall } from "../media_browser/index";
import { BrowserMediaStream, BrowserMediaNetwork, DeviceApi, BrowserWebRtcCall, Media, VideoInputType } from "../media_browser/index";
var CAPI_InitMode = {
......@@ -425,9 +425,6 @@ export function CAPI_MediaNetwork_TryGetFrame(lIndex: number, lConnectionId: num
if (frame == null || frame.Buffer == null) {
return false;
} else {
//TODO: copy frame over
lWidthInt32Array[lWidthIntArrayIndex] = frame.Width;
lHeightInt32Array[lHeightIntArrayIndex] = frame.Height;
......@@ -438,6 +435,61 @@ export function CAPI_MediaNetwork_TryGetFrame(lIndex: number, lConnectionId: num
}
}
export function CAPI_MediaNetwork_TryGetFrame_ToTexture(lIndex: number, lConnectionId: number,
lWidth: number,
lHeight: number,
gl:WebGL2RenderingContext, texture:WebGLTexture): boolean
{
//console.log("CAPI_MediaNetwork_TryGetFrame_ToTexture");
let mediaNetwork = gCAPI_WebRtcNetwork_Instances[lIndex] as BrowserMediaNetwork;
let frame = mediaNetwork.TryGetFrame(new ConnectionId(lConnectionId));
if (frame == null ) {
return false;
} else if (frame.Width != lWidth || frame.Height != lHeight) {
SLog.LW("CAPI_MediaNetwork_TryGetFrame_ToTexture failed. Width height expected: " + frame.Width + "x" + frame.Height + " but received " + lWidth + "x" + lHeight);
return false;
}else {
frame.ToTexture(gl, texture);
return true;
}
}
/*
export function CAPI_MediaNetwork_TryGetFrame_ToTexture2(lIndex: number, lConnectionId: number,
lWidthInt32Array: Int32Array, lWidthIntArrayIndex: number,
lHeightInt32Array: Int32Array, lHeightIntArrayIndex: number,
gl:WebGL2RenderingContext): WebGLTexture
{
//console.log("CAPI_MediaNetwork_TryGetFrame_ToTexture");
let mediaNetwork = gCAPI_WebRtcNetwork_Instances[lIndex] as BrowserMediaNetwork;
let frame = mediaNetwork.TryGetFrame(new ConnectionId(lConnectionId));
if (frame == null) {
return false;
} else {
lWidthInt32Array[lWidthIntArrayIndex] = frame.Width;
lHeightInt32Array[lHeightIntArrayIndex] = frame.Height;
let texture = frame.ToTexture2(gl);
return texture;
}
}
*/
export function CAPI_MediaNetwork_TryGetFrame_Resolution(lIndex: number, lConnectionId: number,
lWidthInt32Array: Int32Array, lWidthIntArrayIndex: number,
lHeightInt32Array: Int32Array, lHeightIntArrayIndex: number): boolean
{
let mediaNetwork = gCAPI_WebRtcNetwork_Instances[lIndex] as BrowserMediaNetwork;
let frame = mediaNetwork.PeekFrame(new ConnectionId(lConnectionId));
if (frame == null) {
return false;
} else {
lWidthInt32Array[lWidthIntArrayIndex] = frame.Width;
lHeightInt32Array[lHeightIntArrayIndex] = frame.Height;
return true;
}
}
//Returns the frame buffer size or -1 if no frame is available
export function CAPI_MediaNetwork_TryGetFrameDataLength(lIndex: number, connectionId: number) : number {
let mediaNetwork = gCAPI_WebRtcNetwork_Instances[lIndex] as BrowserMediaNetwork;
......@@ -497,19 +549,63 @@ export function CAPI_DeviceApi_LastUpdate():number
return DeviceApi.LastUpdate;
}
export function CAPI_DeviceApi_Devices_Length():number{
return Object.keys(DeviceApi.Devices).length;
export function CAPI_Media_GetVideoDevices_Length():number{
return Media.SharedInstance.GetVideoDevices().length;
}
export function CAPI_DeviceApi_Devices_Get(index:number):string{
let keys = Object.keys(DeviceApi.Devices);
if(keys.length > index)
export function CAPI_Media_GetVideoDevices(index:number):string{
const devs = Media.SharedInstance.GetVideoDevices();
if(devs.length > index)
{
let key = keys[index];
return DeviceApi.Devices[key].label;
return devs[index];
}
else
{
SLog.LE("Requested device with index " + index + " does not exist.");
//it needs to be "" to behave the same to the C++ API. std::string can't be null
return "";
}
}
\ No newline at end of file
}
export function CAPI_VideoInput_AddCanvasDevice(query:string, name:string, width: number, height: number, fps: number): boolean{
let canvas = document.querySelector(query) as HTMLCanvasElement;
if(canvas){
console.debug("CAPI_VideoInput_AddCanvasDevice", {query, name, width, height, fps});
if(width <= 0 || height <= 0){
width = canvas.width;
height = canvas.height;
}
Media.SharedInstance.VideoInput.AddCanvasDevice(canvas as HTMLCanvasElement, name, width, height, fps);//, width, height, fps);
return true;
}
return false;
}
export function CAPI_VideoInput_AddDevice(name:string, width: number, height: number, fps: number){
Media.SharedInstance.VideoInput.AddDevice(name, width, height, fps);
}
export function CAPI_VideoInput_RemoveDevice(name:string){
Media.SharedInstance.VideoInput.RemoveDevice(name);
}
export function CAPI_VideoInput_UpdateFrame(name:string,
lBufferUint8Array: Uint8Array, lBufferUint8ArrayOffset: number, lBufferUint8ArrayLength: number,
width: number, height: number,
rotation: number, firstRowIsBottom: boolean) : boolean
{
let dataPtrClamped : Uint8ClampedArray = null;
if(lBufferUint8Array && lBufferUint8ArrayLength > 0){
dataPtrClamped = new Uint8ClampedArray(lBufferUint8Array.buffer, lBufferUint8ArrayOffset, lBufferUint8ArrayLength);
}
return Media.SharedInstance.VideoInput.UpdateFrame(name, dataPtrClamped, width, height, VideoInputType.ARGB, rotation, firstRowIsBottom);
}
//TODO: This needs a proper implementation
//so far only works if unity is the only canvas and uses webgl2
export function GetUnityCanvas() : HTMLCanvasElement
{
return document.querySelector("canvas");
}
export function GetUnityContext() : WebGL2RenderingContext
{
return GetUnityCanvas().getContext("webgl2");
}
......@@ -27,4 +27,7 @@ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
import { Media } from "../media_browser/Media";
import { GetUnityCanvas } from "./CAPI";
export * from "./CAPI"
......@@ -29,8 +29,10 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
//current setup needs to load everything as a module
import {DeviceApi, CAPI_DeviceApi_Update,
CAPI_DeviceApi_RequestUpdate, CAPI_DeviceApi_Devices_Length,
CAPI_DeviceApi_Devices_Get} from "../awrtc/index"
CAPI_DeviceApi_RequestUpdate, CAPI_Media_GetVideoDevices_Length,
CAPI_Media_GetVideoDevices,
MediaConfig,
Media} from "../awrtc/index"
export function DeviceApiTest_export()
{
......@@ -132,11 +134,11 @@ describe("DeviceApiTest", () => {
let update2complete = false;
let deviceCount = 0;
const devices_length_unitialized = CAPI_DeviceApi_Devices_Length();
const devices_length_unitialized = CAPI_Media_GetVideoDevices_Length();
expect(devices_length_unitialized).toBe(0);
DeviceApi.AddOnChangedHandler(()=>{
let dev_length = CAPI_DeviceApi_Devices_Length();
let dev_length = CAPI_Media_GetVideoDevices_Length();
expect(dev_length).not.toBe(0);
expect(dev_length).toBe(Object.keys(DeviceApi.Devices).length);
......@@ -145,7 +147,7 @@ describe("DeviceApiTest", () => {
for(let k of keys)
{
let expectedVal = DeviceApi.Devices[k].label;
let actual = CAPI_DeviceApi_Devices_Get(counter);
let actual = CAPI_Media_GetVideoDevices(counter);
expect(actual).toBe(expectedVal);
counter++;
......@@ -153,8 +155,114 @@ describe("DeviceApiTest", () => {
done();
});
CAPI_DeviceApi_Update();
});
it("isMediaAvailable", () => {
const res = DeviceApi.IsUserMediaAvailable();
expect(res).toBe(true);
});
it("getUserMedia", async () => {
let stream = await DeviceApi.getBrowserUserMedia({audio:true});
expect(stream).not.toBeNull();
expect(stream.getVideoTracks().length).toBe(0);
expect(stream.getAudioTracks().length).toBe(1);
stream = await DeviceApi.getBrowserUserMedia({video:true});
expect(stream).not.toBeNull();
expect(stream.getAudioTracks().length).toBe(0);
expect(stream.getVideoTracks().length).toBe(1);
});
it("getAssetMedia", async () => {
let config = new MediaConfig();
config.Audio = true;
config.Video = false;
let stream = await DeviceApi.getAssetUserMedia(config);
expect(stream).not.toBeNull();
expect(stream.getVideoTracks().length).toBe(0);
expect(stream.getAudioTracks().length).toBe(1);
config = new MediaConfig();
config.Audio = false;
config.Video = true;
stream = await DeviceApi.getAssetUserMedia(config);
expect(stream).not.toBeNull();
expect(stream.getAudioTracks().length).toBe(0);
expect(stream.getVideoTracks().length).toBe(1);
});
it("getAssetMedia_invalid", async () => {
let config = new MediaConfig();
config.Audio = false;
config.Video = true;
config.VideoDeviceName = "invalid name"
let error = null;
let stream :MediaStream = null;
console.log("Expecting error message: Failed to find deviceId for label invalid name");
try
{
stream = await DeviceApi.getAssetUserMedia(config);
}catch(err){
error = err;
}
expect(stream).toBeNull();
expect(error).toBeTruthy();
});
//check for a specific bug causing promise catch not to trigger correctly
//due to error in ToConstraints
it("getAssetMedia_invalid_promise", (done) => {
let config = new MediaConfig();
config.Audio = false;
config.Video = true;
config.VideoDeviceName = "invalid name"
let result: Promise<MediaStream> = null;
result = DeviceApi.getAssetUserMedia(config);
result.then(()=>{
fail("getAssetUserMedia returned but was expected to fail");
}).catch((error)=>{
expect(error).toBeTruthy();
done();
})
});
it("UpdateAsync", async (done) => {
expect(DeviceApi.GetVideoDevices().length).toBe(0);
await DeviceApi.UpdateAsync();
expect(DeviceApi.GetVideoDevices().length).toBeGreaterThan(0);
expect(DeviceApi.GetVideoDevices().length).toBe(CAPI_Media_GetVideoDevices_Length());
done();
});
/*
it("Devices", async () => {
DeviceApi.RequestUpdate
let config = new MediaConfig();
config.Audio = false;
config.Video = true;
config.VideoDeviceName = "invalid name"
let error = null;
let stream :MediaStream = null;
console.log("Expecting error message: Failed to find deviceId for label invalid name");
try
{
stream = await DeviceApi.getAssetUserMedia(config);
}catch(err){
error = err;
}
expect(stream).toBeNull();
expect(error).toBeTruthy();
});
*/
});
import { VideoInput, Media, DeviceApi, MediaConfig, CAPI_Media_GetVideoDevices_Length, CAPI_Media_GetVideoDevices, BrowserMediaStream, WaitForIncomingCallEventArgs } from "../awrtc/index";
import { MakeTestCanvas } from "VideoInputTest";
export function MediaTest_export()
{
}
describe("MediaTest", () => {
beforeEach((done)=>{
let handler = ()=>{
DeviceApi.RemOnChangedHandler(handler);
done();
};
DeviceApi.AddOnChangedHandler(handler);
DeviceApi.Update();
Media.ResetSharedInstance();
});
it("SharedInstance", () => {
expect(Media.SharedInstance).toBeTruthy();
let instance1 = Media.SharedInstance;
Media.ResetSharedInstance();
expect(Media.SharedInstance).not.toBe(instance1);
});
it("GetVideoDevices", () => {
const media = new Media();
let devs = media.GetVideoDevices();
expect(devs).toBeTruthy();
expect(devs.length).toBeGreaterThan(0);
});
it("GetUserMedia", async () => {
const media = new Media();
let config = new MediaConfig();
config.Audio = false;
let stream = await media.getUserMedia(config);
expect(stream).not.toBeNull();
expect(stream.getAudioTracks().length).toBe(0);
expect(stream.getVideoTracks().length).toBe(1);
stream = null;
let err = null;
config.VideoDeviceName = "invalid name"
console.log("Expecting error message: Failed to find deviceId for label invalid name");
try{
stream = await media.getUserMedia(config);
}catch(error){
err = error;
}
expect(err).not.toBeNull();
expect(stream).toBeNull();
});
it("GetUserMedia_videoinput", async (done) => {
const name = "test_canvas";
const media = new Media();
const config = new MediaConfig();
config.Audio = false;
config.Video = true;
const canvas = MakeTestCanvas();
media.VideoInput.AddCanvasDevice(canvas, name, canvas.width, canvas.height, 30);
const streamCamera = await media.getUserMedia(config);
expect(streamCamera).not.toBeNull();
expect(streamCamera.getAudioTracks().length).toBe(0);
expect(streamCamera.getVideoTracks().length).toBe(1);
config.VideoDeviceName = name;
const streamCanvas = await media.getUserMedia(config);
expect(streamCanvas).not.toBeNull();
expect(streamCanvas.getAudioTracks().length).toBe(0);
expect(streamCanvas.getVideoTracks().length).toBe(1);
const streamCanvas2 = await media.getUserMedia(config);
expect(streamCanvas2).not.toBeNull();
expect(streamCanvas2.getAudioTracks().length).toBe(0);
expect(streamCanvas2.getVideoTracks().length).toBe(1);
done();
});
it("GetUserMedia_videoinput_and_audio", async () => {
const name = "test_canvas";
const media = new Media();
const config = new MediaConfig();
config.Audio = true;
config.Video = true;
const canvas = MakeTestCanvas();
media.VideoInput.AddCanvasDevice(canvas, name, canvas.width, canvas.height, 30);
config.VideoDeviceName = name;
let stream : MediaStream = null;
try{
stream = await media.getUserMedia(config);
}catch(err){
console.error(err);
fail(err);
}
expect(stream).not.toBeNull();
expect(stream.getAudioTracks().length).toBe(1);
expect(stream.getVideoTracks().length).toBe(1);
config.VideoDeviceName = "invalid name";
stream = null;
let error_result : string = null
try{
stream = await media.getUserMedia(config);
}catch(err){
error_result = err;
}
expect(error_result).not.toBeNull();
expect(stream).toBeNull();
}, 15000);
//CAPI needs to be changed to use Media only instead the device API
it("MediaCapiVideoInput", async (done) => {
//empty normal device api
DeviceApi.Reset();
expect(CAPI_Media_GetVideoDevices_Length()).toBe(0);
const name = "test_canvas";
const canvas = MakeTestCanvas();
Media.SharedInstance.VideoInput.AddCanvasDevice(canvas, name, canvas.width, canvas.height, 30);
expect(CAPI_Media_GetVideoDevices_Length()).toBe(1);
expect(CAPI_Media_GetVideoDevices(0)).toBe(name);
done();
});
});
describe("MediaStreamTest", () => {
beforeEach((done)=>{
let handler = ()=>{
DeviceApi.RemOnChangedHandler(handler);
done();
};
DeviceApi.AddOnChangedHandler(handler);
DeviceApi.Update();
Media.ResetSharedInstance();
});
class TestStreamContainer
{
public canvas: HTMLCanvasElement;
public stream : MediaStream;
public constructor()
{
let canvas = document.createElement("canvas");
canvas.width = 4;
canvas.height = 4;
let ctx = canvas.getContext("2d");
//make blue for debugging purposes
ctx.fillStyle = "blue";
ctx.fillRect(0, 0, canvas.width, canvas.height);
this.canvas = canvas;
this.stream = (canvas as any).captureStream() as MediaStream;
}
public MakeFrame(color : string){
let ctx = this.canvas.getContext("2d");
ctx.clearRect(0, 0, this.canvas.width, this.canvas.height)
//make blue for debugging purposes
ctx.fillStyle = color;
ctx.fillRect(0, 0, this.canvas.width, this.canvas.height);
}
}
function MakeTestStreamContainer()
{
return new TestStreamContainer();
}
//TODO: need proper way to wait and check with async/ await
function sleep(ms) {
return new Promise(resolve => setTimeout(resolve, ms));
}
async function WaitFor(){
}
it("buffer_and_trygetframe", async(done) => {
const testcontainer = MakeTestStreamContainer();
const stream = new BrowserMediaStream(testcontainer.stream);
//frames are not available at the start until fully loaded
let frame = stream.TryGetFrame();
expect(frame).toBeNull();
await sleep(100);
stream.Update();
//waited for the internals to get initialized. We should have a frame now
frame = stream.TryGetFrame();
expect(frame).not.toBeNull();;
//and a buffer
let buffer = frame.Buffer;
expect(buffer).not.toBeNull();;
//expected to be blue
let r = buffer[0];
let g = buffer[1];
let b = buffer[2];
let a = buffer[3];
expect(r).toBe(0);
expect(g).toBe(0);
expect(b).toBe(255);
expect(a).toBe(255);
//we removed the frame now. this should be null
frame = stream.TryGetFrame();
expect(frame).toBeNull();
//make a new frame with different color
testcontainer.MakeFrame("#FFFF00");
await sleep(100);
stream.Update();
//get new frame
frame = stream.TryGetFrame();
expect(frame).not.toBeNull();;
buffer = frame.Buffer;
expect(buffer).not.toBeNull();;
//should be different color now
r = buffer[0];
g = buffer[1];
b = buffer[2];
a = buffer[3];
expect(r).toBe(255);
expect(g).toBe(255);
expect(b).toBe(0);
expect(a).toBe(255);
//done
done();
});
function createTexture(gl: WebGL2RenderingContext) : WebGLTexture
{
const texture = gl.createTexture();
gl.bindTexture(gl.TEXTURE_2D, texture);
// Because images have to be download over the internet
// they might take a moment until they are ready.
// Until then put a single pixel in the texture so we can
// use it immediately. When the image has finished downloading
// we'll update the texture with the contents of the image.
const level = 0;
const internalFormat = gl.RGBA;
const width = 1;
const height = 1;
const border = 0;
const srcFormat = gl.RGBA;
const srcType = gl.UNSIGNED_BYTE;
const pixel = new Uint8Array([0, 0, 255, 255]); // opaque blue
gl.texImage2D(gl.TEXTURE_2D, level, internalFormat,
width, height, border, srcFormat, srcType,
pixel);
return texture;
}
it("texture", async(done) => {
//blue test container to stream from
const testcontainer = MakeTestStreamContainer();
const stream = new BrowserMediaStream(testcontainer.stream);
//document.body.appendChild(testcontainer.canvas);
//waited for the internals to get initialized. We should have a frame now
await sleep(100);
stream.Update();
let frame = stream.PeekFrame()
expect(frame).not.toBeNull();
//create another canvas but with WebGL context
//this is where we copy the texture to
let canvas = document.createElement("canvas");
canvas.width = testcontainer.canvas.width;
canvas.height = testcontainer.canvas.height;
//document.body.appendChild(canvas);
let gl = canvas.getContext("webgl2");
//testing only. draw this one red
gl.clearColor(1,0,0,1);
gl.clear(gl.COLOR_BUFFER_BIT);
//create new texture and copy the image into it
let texture = createTexture(gl);
let res = frame.ToTexture(gl, texture);
expect(res).toBe(true);
//we attach our test texture to a frame buffer, then read from it to copy the data back from the GPU
//into an array dst_buffer
const dst_buffer = new Uint8Array(testcontainer.canvas.width * testcontainer.canvas.height * 4);
const fb = gl.createFramebuffer();
gl.bindFramebuffer(gl.FRAMEBUFFER, fb);
gl.framebufferTexture2D(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0, gl.TEXTURE_2D, texture, 0);
gl.readPixels(0, 0, testcontainer.canvas.width, testcontainer.canvas.height, gl.RGBA, gl.UNSIGNED_BYTE, dst_buffer);
//check if we have the expected blue color we use to setup the testcontainer canvas
let r = dst_buffer[0];
let g = dst_buffer[1];
let b = dst_buffer[2];
let a = dst_buffer[3];
expect(r).toBe(0);
expect(g).toBe(0);
expect(b).toBe(255);
expect(a).toBe(255);
//TODO: could compare whole src / dst buffer to check if something is cut off
//const compare_buffer = frame.Buffer;
done();
});
});
\ No newline at end of file
import { VideoInput, VideoInputType } from "../awrtc/index";
export function VideoInputTest_export() {
}
export function MakeTestCanvas(w?: number, h?: number): HTMLCanvasElement {
if (w == null)
w = 4;
if (h == null)
h = 4;
let canvas = document.createElement("canvas");
canvas.width = w;
canvas.height = h;
let ctx = canvas.getContext("2d");
//make blue for debugging purposes
ctx.fillStyle = "blue";
ctx.fillRect(0, 0, canvas.width, canvas.height);
return canvas;
}
export function MakeBrokenTestCanvas(): HTMLCanvasElement {
let canvas = document.createElement("canvas");
return canvas;
}
/**Create test image with pattern
* Black White
* White Black
*
* So each corner can be tested for correct results.
*
* @param src_width
* @param src_height
*/
export function MakeTestImage(src_width: number, src_height: number): ImageData {
let src_size = src_width * src_height * 4;
let src_data = new Uint8ClampedArray(src_size);
for (let y = 0; y < src_height; y++) {
for (let x = 0; x < src_width; x++) {
let pos = y * src_width + x;
let xp = x >= src_width / 2;
let yp = y >= src_height / 2;
let val = 0;
if (xp || yp)
val = 255;
if (xp && yp)
val = 0;
src_data[pos * 4 + 0] = val;
src_data[pos * 4 + 1] = val;
src_data[pos * 4 + 2] = val;
src_data[pos * 4 + 3] = 255;
}
}
var src_img = new ImageData(src_data, src_width, src_height);
return src_img;
}
export function ExtractData(video: HTMLVideoElement): ImageData {
var canvas = document.createElement("canvas");
canvas.width = video.videoWidth;
canvas.height = video.videoHeight;
let dst_context = canvas.getContext('2d')
dst_context.drawImage(video, 0, 0, canvas.width, canvas.height);
let dst_img = dst_context.getImageData(0, 0, canvas.width, canvas.height);
return dst_img
}
describe("VideoInputTest", () => {
beforeEach(() => {
});
it("AddRem", () => {
let name = "test_canvas";
let vi = new VideoInput();
let canvas = document.createElement("canvas")
expect(vi.HasDevice(name)).toBe(false);
vi.AddCanvasDevice(canvas, name, canvas.width, canvas.height, 30);
expect(vi.HasDevice(name)).toBe(true);
vi.RemoveDevice(name);
expect(vi.HasDevice(name)).toBe(false);
});
it("GetDeviceNames", () => {
let name = "test_canvas";
let name2 = "test_canvas2";
let vi = new VideoInput();
let canvas = document.createElement("canvas")
let names = vi.GetDeviceNames();
expect(names).toBeTruthy();
expect(names.length).toBe(0);
vi.AddCanvasDevice(canvas, name, canvas.width, canvas.height, 30);
names = vi.GetDeviceNames();
expect(names).toBeTruthy();
expect(names.length).toBe(1);
expect(names[0]).toBe(name);
vi.AddCanvasDevice(canvas, name, canvas.width, canvas.height, 30);
names = vi.GetDeviceNames();
expect(names).toBeTruthy();
expect(names.length).toBe(1);
expect(names[0]).toBe(name);
vi.AddCanvasDevice(canvas, name2, canvas.width, canvas.height, 30);
names = vi.GetDeviceNames();
expect(names).toBeTruthy();
expect(names.length).toBe(2);
expect(names.sort()).toEqual([name, name2].sort());
});
it("GetStream", () => {
let name = "test_canvas";
let vi = new VideoInput();
let canvas = MakeTestCanvas();
let stream = vi.GetStream(name);
expect(stream).toBeNull();
vi.AddCanvasDevice(canvas, name, canvas.width, canvas.height, 30);
stream = vi.GetStream(name);
expect(stream).toBeTruthy();
});
it("AddCanvasDevice_no_scaling", (done) => {
let name = "test_canvas";
let vi = new VideoInput();
const src_width = 40;
const src_height = 30;
let canvas = MakeTestCanvas(src_width, src_height);
vi.AddCanvasDevice(canvas, name, canvas.width, canvas.height, 30);
let stream = vi.GetStream(name);
expect(stream).toBeTruthy();
let videoOutput = document.createElement("video")
videoOutput.onloadedmetadata = () => {
expect(videoOutput.videoWidth).toBe(src_width)
expect(videoOutput.videoHeight).toBe(src_height)
done()
}
videoOutput.srcObject = stream;
}, 1000);
it("AddCanvasDevice_scaling", (done) => {
let debug = false;
let name = "test_canvas";
let vi = new VideoInput();
const src_width = 64;
const src_height = 64;
const dst_width = 32;
const dst_height = 32;
let canvas = MakeTestCanvas(src_width, src_height);
let srcContext = canvas.getContext("2d");
var src_img = MakeTestImage(src_width, src_height);
srcContext.putImageData(src_img, 0, 0)
if (debug)
document.body.appendChild(canvas);
vi.AddCanvasDevice(canvas, name, dst_width, dst_height, 30);
let stream = vi.GetStream(name);
expect(stream).toBeTruthy();
let videoOutput = document.createElement("video")
if (debug)
document.body.appendChild(videoOutput);
videoOutput.onloadedmetadata = () => {
expect(videoOutput.videoWidth).toBe(dst_width)
expect(videoOutput.videoHeight).toBe(dst_height)
let dst_img_data = ExtractData(videoOutput)
//upper left
expect(dst_img_data.data[0]).toBe(0);
//upper right
expect(dst_img_data.data[((dst_width - 1) * 4)]).toBe(255);
//lower left
expect(dst_img_data.data[((dst_height - 1) * dst_width) * 4]).toBe(255);
//lower right
expect(dst_img_data.data[(dst_height * dst_width - 1) * 4]).toBe(0);
vi.RemoveDevice(name);
done()
}
videoOutput.srcObject = stream;
}, 1000);
//not yet clear how this can be handled
//this test will trigger an error in firefox
xit("GetStream_no_context", () => {
let name = "test_canvas";
let vi = new VideoInput();
let canvas = MakeBrokenTestCanvas();
//if we try to record from a canvas before
//a context was accessed it will fail.
//uncommenting this line fixes the bug
//but this is out of our control / within user code
//let ctx = canvas.getContext("2d");
let stream = vi.GetStream(name);
expect(stream).toBeNull();
vi.AddCanvasDevice(canvas, name, canvas.width, canvas.height, 30);
stream = vi.GetStream(name);
expect(stream).toBeTruthy();
});
//not yet clear how this can be handled
//this test will trigger an error in firefox
it("AddRemDevice", () => {
let name = "test_canvas";
const w = 640;
const h = 480;
const fps = 30;
let vi = new VideoInput();
let stream = vi.GetStream(name);
expect(stream).toBeNull();
vi.AddDevice(name, w, h, fps);
let res = vi.GetDeviceNames().indexOf(name);
expect(res).toBe(0);
vi.RemoveDevice(name);
let res2 = vi.GetDeviceNames().indexOf(name);
expect(res2).toBe(-1);
});
it("Device_int_array", () => {
let name = "test_canvas";
const w = 2;
const h = 2;
const fps = 30;
let arr = new Uint8ClampedArray([
1, 2, 3, 255,
4, 5, 6, 255,
7, 8, 9, 255,
10, 11, 12, 255,
13, 14, 15, 255
]);
let vi = new VideoInput();
vi.AddDevice(name, w, h, fps);
let stream = vi.GetStream(name);
expect(stream).toBeTruthy();
const clamped = new Uint8ClampedArray(arr.buffer, 4, 4 * 4);
const res = vi.UpdateFrame(name, clamped, w, h, VideoInputType.ARGB, 0, false);
expect(res).toBe(true);
let result_canvas = (vi as any).canvasDevices[name].canvas as HTMLCanvasElement;
expect(result_canvas.width).toBe(w);
expect(result_canvas.height).toBe(h);
let result_img = result_canvas.getContext("2d").getImageData(0, 0, result_canvas.width, result_canvas.height);
const result_arr = new Uint8Array(result_img.data.buffer);
const base_arr = new Uint8Array(arr.buffer, 4, 4 * 4);
expect(base_arr).toEqual(result_arr);
});
it("Device_full", () => {
let src_canvas = MakeTestCanvas();
let src_ctx = src_canvas.getContext("2d");
src_ctx.fillStyle = "yellow";
src_ctx.fillRect(0, 0, src_canvas.width, src_canvas.height);
let name = "test_canvas";
const w = 2;
const h = 2;
const fps = 30;
src_canvas.width = w;
src_canvas.height = h;
let vi = new VideoInput();
let src_img = src_ctx.getImageData(0, 0, src_canvas.width, src_canvas.height);
vi.AddDevice(name, w, h, fps);
let stream = vi.GetStream(name);
expect(stream).toBeTruthy();
const res = vi.UpdateFrame(name, src_img.data, src_img.width, src_img.height, VideoInputType.ARGB, 0, false);
expect(res).toBe(true);
//test if the internal array was set correctly
let result_canvas = (vi as any).canvasDevices[name].canvas as HTMLCanvasElement;
expect(result_canvas.width).toBe(src_canvas.width);
expect(result_canvas.height).toBe(src_canvas.height);
let result_img = result_canvas.getContext("2d").getImageData(0, 0, result_canvas.width, result_canvas.height);
expect(result_img.width).toBe(src_img.width);
expect(result_img.height).toBe(src_img.height);
expect(result_img.data).toEqual(src_img.data);
});
});
\ No newline at end of file
......@@ -34,3 +34,6 @@ export * from "./CallTest"
export * from "./MediaNetworkTest"
export * from "./BrowserApiTest"
export * from "./DeviceApiTest"
export * from "./VideoInputTest"
export * from "./MediaTest"
......@@ -21,6 +21,8 @@
"LocalNetworkTest.ts",
"MediaNetworkTest.ts",
"DeviceApiTest.ts",
"BrowserApiTest.ts"
"VideoInputTest.ts",
"BrowserApiTest.ts",
"MediaTest.ts"
]
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment