- From: guest271314 via GitHub <sysbot+gh@w3.org>
- Date: Mon, 29 Apr 2019 00:53:35 +0000
- To: public-webrtc-logs@w3.org
@martinthomson The feature request would be compatible and consistent with user selection of content. The "Take a Screenshot" feature of Firefox Developer Tools provides a basic template of how to implement the feature, by way of selection of the content that should be captured, which could be translated into the appropriate corresponding `MediaStreamTrack` constraints.
`resizeMode: "crop-and-scale"` roughly provides such functionality now, if a user takes the time to test and determine the resulting output.
One example use case is `getDisplayMedia()` being executed for a `window` opened with `window.open()`, where it is not possible to hide scrollbars, location and title bars; though the expected resulting output is a webm video without scrollbars, location and title bars. After a day of testing finally was able to use CSS to achieve the requirement (at Chromium 73) just moments ago at https://github.com/guest271314/MediaFragmentRecorder/tree/getdisplaymedia-webaudio-windowopen
```
<!DOCTYPE html>
<html>
<head>
<title>Record media fragments to single webm video using getDisplayMedia(), AudioContext(), window.open(), MediaRecorder()</title>
</head>
<body>
<h1 id="click">open window</h1>
<script>
const click = document.getElementById("click");
const go = ({
width = 320, height = 240
} = {}) => (async() => {
const html = `<!DOCTYPE html>
<html>
<head>
<style>
* {padding:0; margin:0;overflow:hidden;}
#video {cursor:none; object-fit:cover;object-position: 50% 50%;}
video::-webkit-media-controls,audio::-webkit-media-controls {display:none !important;}
</style>
</head>
<body>
<!-- add 30 for title and location bars -->
<video id="video" width="${width}" height="${height}"></video>
</body>
</html>`;
const blob_url = URL.createObjectURL(new Blob([html], {
type: "text/html"
}));
let done;
const promise = new Promise(resolve => done = resolve);
const mediaWindow = window.open(blob_url, "getDisplayMedia", `width=${width},height=${height + 30},alwaysOnTop`);
mediaWindow.addEventListener("load", async e => {
console.log(e);
const mediaDocument = mediaWindow.document;
const video = mediaDocument.getElementById("video");
const displayStream = await navigator.mediaDevices.getDisplayMedia({
video: {
cursor: "never", // this has little/no effect https://github.com/web-platform-tests/wpt/issues/16206
displaySurface: "browser"
}
});
console.log(displayStream, displayStream.getTracks());
let urls = await Promise.all([{
src: "https://upload.wikimedia.org/wikipedia/commons/a/a4/Xacti-AC8EX-Sample_video-001.ogv",
from: 0,
to: 4
}, {
src: "https://mirrors.creativecommons.org/movingimages/webm/ScienceCommonsJesseDylan_240p.webm#t=10,20"
}, {
from: 55,
to: 60,
src: "https://nickdesaulniers.github.io/netfix/demo/frag_bunny.mp4"
}, {
from: 0,
to: 5,
src: "https://raw.githubusercontent.com/w3c/web-platform-tests/master/media-source/mp4/test.mp4"
}, {
from: 0,
to: 5,
src: "https://commondatastorage.googleapis.com/gtv-videos-bucket/sample/ForBiggerBlazes.mp4"
}, {
from: 0,
to: 5,
src: "https://commondatastorage.googleapis.com/gtv-videos-bucket/sample/ForBiggerJoyrides.mp4"
}, {
src: "https://commondatastorage.googleapis.com/gtv-videos-bucket/sample/ForBiggerMeltdowns.mp4#t=0,6"
}].map(async({...props
}) => {
const {
src
} = props;
const blob = (await (await fetch(src)).blob());
return {
blob,
...props
}
}));
click.textContent = "click popup window to start recording";
const canvas = document.createElement("canvas");
canvas.width = width;
canvas.height = height;
const ctx = canvas.getContext("2d");
ctx.font = "20px Monospace";
ctx.fillText("click to start recording", 0, height / 2);
video.poster = canvas.toDataURL();
mediaWindow.focus();
mediaWindow.addEventListener("click", async e => {
video.poster = "";
const context = new AudioContext();
const mediaStream = context.createMediaStreamDestination();
const [audioTrack] = mediaStream.stream.getAudioTracks();
const [videoTrack] = displayStream.getVideoTracks();
videoTrack.applyConstraints({
cursor: "never",
width: 320,
height: 240,
aspectRatio: 1.33,
resizeMode: "crop-and-scale"
});
mediaStream.stream.addTrack(videoTrack);
console.log(videoTrack.getSettings());
const source = context.createMediaElementSource(video);
source.connect(context.destination);
source.connect(mediaStream);
[videoTrack, audioTrack].forEach(track => {
track.onended = e => console.log(e);
});
const recorder = new MediaRecorder(mediaStream.stream, {
mimeType: "video/webm;codecs=vp8,opus",
audioBitsPerSecond: 128000,
videoBitsPerSecond: 2500000
});
recorder.addEventListener("error", e => {
console.error(e)
});
recorder.addEventListener("dataavailable", e => {
console.log(e.data);
done(URL.createObjectURL(e.data));
});
recorder.addEventListener("stop", e => {
console.log(e);
[videoTrack, audioTrack].forEach(track => track.stop());
});
video.addEventListener("loadedmetadata", async e => {
console.log(e);
try {
await video.play();
} catch (e) {
console.error(e);
}
});
try {
for (let {
from,
to,
src,
blob
}
of urls) {
await new Promise(resolve => {
const url = new URL(src);
if (url.hash.length) {
[from, to] = url.hash.match(/\d+|\d+\.\d+/g).map(Number);
}
const blobURL = URL.createObjectURL(blob);
video.addEventListener("play", e => {
if (recorder.state === "inactive") {
recorder.start()
} else {
if (recorder.state === "paused") {
recorder.resume();
}
}
}, {
once: true
});
video.addEventListener("pause", e => {
if (recorder.state === "recording") {
recorder.pause();
}
resolve();
}, {
once: true
});
video.src = `${blobURL}#t=${from},${to}`;
})
}
recorder.stop();
} catch (e) {
throw e;
}
}, {
once: true
});
});
return await promise;
})()
.then(blobURL => {
console.log(blobURL);
const video = document.createElement("video");
video.controls = true;
document.body.appendChild(video);
video.src = blobURL;
}, console.error);
click.addEventListener("click", e => {
go();
}, {
once: true
});
</script>
</body>
</html>
```
The Media Capture Screen Share API should provide a means to achieve such a requirement; by either fine-tuning the constraints for such selection, or allowing the user to select the content to be captured in similar fashion as the Firefox "Take a Screenshot" Developer Tool.
What is the reason such functionality should not be provided by this API?
--
GitHub Notification of comment by guest271314
Please view or discuss this issue at https://github.com/w3c/mediacapture-screen-share/issues/105#issuecomment-487429994 using your GitHub account
Received on Monday, 29 April 2019 00:53:37 UTC