我们用手机的摄像头自拍,很容易实现 简单的自拍效果,如复古、黑白等等。其实我们使用web端的JavaScript也是可以实现的。接下来就带领小伙伴实现一个魔法摄像头。并且提供了截图下载功能。
<!DOCTYPE html>
<html>
<head>
<title>Canvas Demo</title>
</head>
<body>
<video id="videoElement" autoplay></video>
<canvas id="canvasElement"></canvas>
<script>
// 获取视频元素和画布元素
const video = document.getElementById('videoElement');
// 检查浏览器是否支持 getUserMedia API
if (navigator.mediaDevices && navigator.mediaDevices.getUserMedia) {
// 请求访问摄像头
navigator.mediaDevices.getUserMedia({ video: true })
.then(function (stream) {
// 将视频流绑定到视频元素上
video.srcObject = stream;
// 开始绘制视频画面到画布上
requestAnimationFrame(drawFrame);
})
.catch(function (error) {
console.error('无法访问摄像头:', error);
});
} else {
console.error('浏览器不支持 getUserMedia API');
}
</script>
</body>
</html>
<!DOCTYPE html>
<html>
<head>
<title>Canvas Demo</title>
</head>
<body>
<video id="videoElement" autoplay></video>
<canvas id="canvasElement"></canvas>
<script>
// 获取视频元素和画布元素
const video = document.getElementById('videoElement');
const canvas = document.getElementById('canvasElement');
const ctx = canvas.getContext('2d');
// 当视频元素加载完成后执行
video.addEventListener('loadedmetadata', function () {
// 设置画布大小与视频尺寸相同
canvas.width = video.videoWidth;
canvas.height = video.videoHeight;
});
// 在每一帧绘制视频画面到画布上 一秒描绘60次
function drawFrame() {
ctx.drawImage(video, 0, 0, canvas.width, canvas.height);// 将视频画在画布上
requestAnimationFrame(drawFrame);
}
// 检查浏览器是否支持 getUserMedia API
if (navigator.mediaDevices && navigator.mediaDevices.getUserMedia) {
// 请求访问摄像头
navigator.mediaDevices.getUserMedia({ video: true })
.then(function (stream) {
// 将视频流绑定到视频元素上
video.srcObject = stream;
// 开始绘制视频画面到画布上
requestAnimationFrame(drawFrame);
})
.catch(function (error) {
console.error('无法访问摄像头:', error);
});
} else {
console.error('浏览器不支持 getUserMedia API');
}
</script>
</body>
</html>
以下代码没有进行过多封装,后续会出一篇使用面向对象和设计模式的续集来优化代码
本次案例实现的滤镜效果主要有 反转 黑白 亮度 复古 红色 绿色 蓝色 透明 马赛克 渐变
在canvas中,可以通过 getImageData 获取到当前画布上所有的像素点,它以4个点为一组,表示画布上当前坐标点的 R G B A (红、绿、蓝、透明度)。我们要实现的滤镜效果,几乎都是直接对该像素点进行操作。如 黑白效果 将每个像素的RGB值转换为灰度值(R、G、B三个分量取平均值)
<!DOCTYPE html>
<html>
<head>
<title>Canvas Demo</title>
</head>
<body>
<video id="videoElement" autoplay></video>
<canvas id="canvasElement"></canvas>
<script>
// 获取视频元素和画布元素
const video = document.getElementById('videoElement');
const canvas = document.getElementById('canvasElement');
const ctx = canvas.getContext('2d');
const buttons = document.querySelectorAll("button[data-type]");
const takePhoto = document.querySelector("#takePhoto")
let drawType = ""
// 当视频元素加载完成后执行
video.addEventListener('loadedmetadata', function () {
// 设置画布大小与视频尺寸相同
canvas.width = video.videoWidth;
canvas.height = video.videoHeight;
});
// 在每一帧绘制视频画面到画布上
function drawFrame() {
ctx.drawImage(video, 0, 0, canvas.width, canvas.height);
let imageObj = ctx.getImageData(0, 0, canvas.width, canvas.height);
// 黑白效果
for (let i = 0; i < imageObj.data.length; i += 4) {
const average = (imageObj.data[i + 0] + imageObj.data[i + 1] + imageObj.data[i + 2] + imageObj.data[i + 3]) / 3;
imageObj.data[i + 0] = average;//红
imageObj.data[i + 1] = average; //绿
imageObj.data[i + 2] = average; //蓝
}
ctx.putImageData(imageObj, 0, 0)
requestAnimationFrame(drawFrame);
}
// 检查浏览器是否支持 getUserMedia API
if (navigator.mediaDevices && navigator.mediaDevices.getUserMedia) {
// 请求访问摄像头
navigator.mediaDevices.getUserMedia({ video: true })
.then(function (stream) {
// 将视频流绑定到视频元素上
video.srcObject = stream;
// 开始绘制视频画面到画布上
requestAnimationFrame(drawFrame);
})
.catch(function (error) {
console.error('无法访问摄像头:', error);
});
} else {
console.error('浏览器不支持 getUserMedia API');
}
</script>
</body>
</html>
getImageData
获取图像数据,然后遍历每个像素,将每个像素的RGB值取反,再使用putImageData
将修改后的数据绘制回Canvas。getImageData
获取图像数据,然后遍历每个像素,将每个像素的RGB值转换为灰度值(R、G、B三个分量取平均值),再使用putImageData
将修改后的数据绘制回Canvas。getImageData
获取图像数据,然后遍历每个像素,调整每个像素的亮度值,再使用putImageData
将修改后的数据绘制回Canvas。getImageData
获取图像数据,然后遍历每个像素,调整每个像素的色调、饱和度和亮度,再使用putImageData
将修改后的数据绘制回Canvas。getImageData
获取图像数据,然后遍历每个像素,增加或减少每个像素的红色、绿色、蓝色分量的值,再使用putImageData
将修改后的数据绘制回Canvas。getImageData
获取图像数据,然后遍历每个像素,调整每个像素的透明度值,再使用putImageData
将修改后的数据绘制回Canvas。getImageData
获取图像数据,然后将图像分割为小块,计算每个小块内像素的平均值,再将该小块内所有像素的值设置为该平均值,最后使用putImageData
将修改后的数据绘制回Canvas。createLinearGradient
或createRadialGradient
创建渐变对象,然后使用渐变对象作为填充样式,绘制图像到Canvas上。<!DOCTYPE html>
<html>
<head>
<title>Canvas Demo</title>
<style>
button {
border-radius: 10px;
display: inline-flex;
align-items: center;
justify-content: center;
cursor: pointer;
overflow: hidden;
user-select: none;
outline: none;
border: none;
padding: 16px;
background-color: #1d93ab;
color: #fff;
}
button:focus {
background-color: #e88f21
}
</style>
</head>
<body>
<div>
<button data-type="gray">反转</button>
<button data-type="blackwhite">黑白</button>
<button data-type="brightness">亮度</button>
<button data-type="sepia">复古</button>
<button data-type="redMask">红色</button>
<button data-type="greenMask">绿色</button>
<button data-type="blueMask">蓝色</button>
<button data-type="opacity">透明</button>
<button data-type="mosaic">马赛克</button>
<button data-type="linearGradient">渐变</button>
</div>
<video id="videoElement" autoplay></video>
<canvas id="canvasElement"></canvas>
<script>
// 获取视频元素和画布元素
const video = document.getElementById('videoElement');
const canvas = document.getElementById('canvasElement');
const ctx = canvas.getContext('2d');
const buttons = document.querySelectorAll("button[data-type]");
let drawType = ""
// 当视频元素加载完成后执行
video.addEventListener('loadedmetadata', function () {
// 设置画布大小与视频尺寸相同
canvas.width = video.videoWidth;
canvas.height = video.videoHeight;
});
// 在每一帧绘制视频画面到画布上
function drawFrame() {
ctx.drawImage(video, 0, 0, canvas.width, canvas.height);
let imageObj = ctx.getImageData(0, 0, canvas.width, canvas.height);
if (drawType === "gray") {
// 反转
for (let i = 0; i < imageObj.data.length; i += 4) {
imageObj.data[i + 0] = 255 - imageObj.data[i + 0];
imageObj.data[i + 1] = 255 - imageObj.data[i + 1];
imageObj.data[i + 2] = 255 - imageObj.data[i + 2];
}
ctx.putImageData(imageObj, 0, 0)
}
if (drawType === "blackwhite") {
// 黑白
for (let i = 0; i < imageObj.data.length; i += 4) {
const average = (imageObj.data[i + 0] + imageObj.data[i + 1] + imageObj.data[i + 2] + imageObj.data[i + 3]) / 3;
imageObj.data[i + 0] = average;//红
imageObj.data[i + 1] = average; //绿
imageObj.data[i + 2] = average; //蓝
}
ctx.putImageData(imageObj, 0, 0)
}
if (drawType === "brightness") {
// 亮度
for (let i = 0; i < imageObj.data.length; i += 4) {
const a = 50;
imageObj.data[i + 0] += a;
imageObj.data[i + 1] += a;
imageObj.data[i + 2] += a;
}
ctx.putImageData(imageObj, 0, 0)
}
if (drawType === "sepia") {
// 复古
for (let i = 0; i < imageObj.data.length; i += 4) {
const r = imageObj.data[i + 0];
const g = imageObj.data[i + 1];
const b = imageObj.data[i + 2];
imageObj.data[i + 0] = r * 0.39 + g * 0.76 + b * 0.18;
imageObj.data[i + 1] = r * 0.35 + g * 0.68 + b * 0.16;
imageObj.data[i + 2] = r * 0.27 + g * 0.53 + b * 0.13;
}
ctx.putImageData(imageObj, 0, 0)
}
if (drawType === "redMask") {
// 红色
for (let i = 0; i < imageObj.data.length; i += 4) {
const r = imageObj.data[i + 0]
const g = imageObj.data[i + 1]
const b = imageObj.data[i + 2]
const average = (r + g + b) / 3
imageObj.data[i + 0] = average
imageObj.data[i + 1] = 0
imageObj.data[i + 2] = 0
}
ctx.putImageData(imageObj, 0, 0)
}
if (drawType === "greenMask") {
// 绿色
for (let i = 0; i < imageObj.data.length; i += 4) {
const r = imageObj.data[i + 0]
const g = imageObj.data[i + 1]
const b = imageObj.data[i + 2]
const average = (r + g + b) / 3
imageObj.data[i + 0] = 0
imageObj.data[i + 1] = average
imageObj.data[i + 2] = 0
}
ctx.putImageData(imageObj, 0, 0)
}
if (drawType === "blueMask") {
// 蓝色
for (let i = 0; i < imageObj.data.length; i += 4) {
const r = imageObj.data[i + 0]
const g = imageObj.data[i + 1]
const b = imageObj.data[i + 2]
const average = (r + g + b) / 3
imageObj.data[i + 0] = 0
imageObj.data[i + 1] = 0
imageObj.data[i + 2] = average
}
ctx.putImageData(imageObj, 0, 0)
}
if (drawType === "opacity") {
// 透明
for (let i = 0; i < imageObj.data.length; i += 4) {
imageObj.data[i + 3] = imageObj.data[i + 3] * 0.3;
}
ctx.putImageData(imageObj, 0, 0)
}
if (drawType === "linearGradient") {
// 渐变
const data = imageObj.data;
// 遍历每个像素
for (let i = 0; i < data.length; i += 4) {
const x = (i / 4) % canvas.width; // 当前像素的 x 坐标
const y = Math.floor(i / (4 * canvas.width)); // 当前像素的 y 坐标
// 计算当前像素的颜色值
const r = x / canvas.width * 255; // 红色分量
const g = y / canvas.height * 255; // 绿色分量
const b = 128; // 蓝色分量
const a = 100; // 不透明度
// 设置当前像素的颜色值
data[i] = r; // 红色分量
data[i + 1] = g; // 绿色分量
data[i + 2] = b; // 蓝色分量
data[i + 3] = a; // 不透明度
}
ctx.putImageData(imageObj, 0, 0)
}
if (drawType === "mosaic") {
// 马赛克
ctx.imageSmoothingEnabled = false; // 禁用图像平滑处理
const tileSize = 10; // 马赛克块的大小
// 缩小马赛克块
ctx.drawImage(canvas, 0, 0, canvas.width, canvas.height, 0, 0, canvas.width / tileSize, canvas.height / tileSize);
// 放大回原来的大小
ctx.drawImage(canvas, 0, 0, canvas.width / tileSize, canvas.height / tileSize, 0, 0, canvas.width, canvas.height);
}
requestAnimationFrame(drawFrame);
// setTimeout(drawFrame, 1000);
}
// 检查浏览器是否支持 getUserMedia API
if (navigator.mediaDevices && navigator.mediaDevices.getUserMedia) {
// 请求访问摄像头
navigator.mediaDevices.getUserMedia({ video: true })
.then(function (stream) {
// 将视频流绑定到视频元素上
video.srcObject = stream;
// 开始绘制视频画面到画布上
requestAnimationFrame(drawFrame);
})
.catch(function (error) {
console.error('无法访问摄像头:', error);
});
} else {
console.error('浏览器不支持 getUserMedia API');
}
buttons.forEach(button => {
button.addEventListener("click", function (e) {
drawType = e.target.dataset.type;
})
})
</script>
</body>
</html>
toDataURL
方法将内容转换为数据 URL。<a>
元素,并将数据 URL 赋值给其 href
属性。<a>
元素的 download
属性为要保存的文件名。<a>
元素来触发下载。const takePhoto = document.querySelector("#takePhoto")// 截图 按钮
takePhoto.addEventListener('click', function (e) {
// 绘制原始 Canvas 的内容到新的 Canvas 上
ctx.drawImage(canvas, 0, 0, canvas.width, canvas.height);
// 将内容转换为数据 URL
const dataURL = canvas.toDataURL();
// 创建一个 <a> 元素并设置属性
const link = document.createElement('a');
link.href = dataURL;
link.download = 'screenshot.png'; // 设置要保存的文件名
// 模拟点击 <a> 元素来触发下载
link.click();
})
<!DOCTYPE html>
<html>
<head>
<title>Canvas Demo</title>
<style>
button {
border-radius: 10px;
display: inline-flex;
align-items: center;
justify-content: center;
cursor: pointer;
overflow: hidden;
user-select: none;
outline: none;
border: none;
padding: 16px;
background-color: #1d93ab;
color: #fff;
}
button:focus {
background-color: #e88f21
}
</style>
</head>
<body>
<div>
<button data-type="gray">反转</button>
<button data-type="blackwhite">黑白</button>
<button data-type="brightness">亮度</button>
<button data-type="sepia">复古</button>
<button data-type="redMask">红色</button>
<button data-type="greenMask">绿色</button>
<button data-type="blueMask">蓝色</button>
<button data-type="opacity">透明</button>
<button data-type="mosaic">马赛克</button>
<button data-type="linearGradient">渐变</button>
<button id="takePhoto">拍摄</button>
</div>
<video id="videoElement" autoplay></video>
<canvas id="canvasElement"></canvas>
<script>
// 获取视频元素和画布元素
const video = document.getElementById('videoElement');
const canvas = document.getElementById('canvasElement');
const ctx = canvas.getContext('2d');
const buttons = document.querySelectorAll("button[data-type]");
const takePhoto = document.querySelector("#takePhoto")// 截图 按钮
let drawType = ""
// 当视频元素加载完成后执行
video.addEventListener('loadedmetadata', function () {
// 设置画布大小与视频尺寸相同
canvas.width = video.videoWidth;
canvas.height = video.videoHeight;
});
// 在每一帧绘制视频画面到画布上
function drawFrame() {
ctx.drawImage(video, 0, 0, canvas.width, canvas.height);
let imageObj = ctx.getImageData(0, 0, canvas.width, canvas.height);
if (drawType === "gray") {
// 反转
for (let i = 0; i < imageObj.data.length; i += 4) {
imageObj.data[i + 0] = 255 - imageObj.data[i + 0];
imageObj.data[i + 1] = 255 - imageObj.data[i + 1];
imageObj.data[i + 2] = 255 - imageObj.data[i + 2];
}
ctx.putImageData(imageObj, 0, 0)
}
if (drawType === "blackwhite") {
// 黑白
for (let i = 0; i < imageObj.data.length; i += 4) {
const average = (imageObj.data[i + 0] + imageObj.data[i + 1] + imageObj.data[i + 2] + imageObj.data[i + 3]) / 3;
imageObj.data[i + 0] = average;//红
imageObj.data[i + 1] = average; //绿
imageObj.data[i + 2] = average; //蓝
}
ctx.putImageData(imageObj, 0, 0)
}
if (drawType === "brightness") {
// 亮度
for (let i = 0; i < imageObj.data.length; i += 4) {
const a = 50;
imageObj.data[i + 0] += a;
imageObj.data[i + 1] += a;
imageObj.data[i + 2] += a;
}
ctx.putImageData(imageObj, 0, 0)
}
if (drawType === "sepia") {
// 复古
for (let i = 0; i < imageObj.data.length; i += 4) {
const r = imageObj.data[i + 0];
const g = imageObj.data[i + 1];
const b = imageObj.data[i + 2];
imageObj.data[i + 0] = r * 0.39 + g * 0.76 + b * 0.18;
imageObj.data[i + 1] = r * 0.35 + g * 0.68 + b * 0.16;
imageObj.data[i + 2] = r * 0.27 + g * 0.53 + b * 0.13;
}
ctx.putImageData(imageObj, 0, 0)
}
if (drawType === "redMask") {
// 红色
for (let i = 0; i < imageObj.data.length; i += 4) {
const r = imageObj.data[i + 0]
const g = imageObj.data[i + 1]
const b = imageObj.data[i + 2]
const average = (r + g + b) / 3
imageObj.data[i + 0] = average
imageObj.data[i + 1] = 0
imageObj.data[i + 2] = 0
}
ctx.putImageData(imageObj, 0, 0)
}
if (drawType === "greenMask") {
// 绿色
for (let i = 0; i < imageObj.data.length; i += 4) {
const r = imageObj.data[i + 0]
const g = imageObj.data[i + 1]
const b = imageObj.data[i + 2]
const average = (r + g + b) / 3
imageObj.data[i + 0] = 0
imageObj.data[i + 1] = average
imageObj.data[i + 2] = 0
}
ctx.putImageData(imageObj, 0, 0)
}
if (drawType === "blueMask") {
// 蓝色
for (let i = 0; i < imageObj.data.length; i += 4) {
const r = imageObj.data[i + 0]
const g = imageObj.data[i + 1]
const b = imageObj.data[i + 2]
const average = (r + g + b) / 3
imageObj.data[i + 0] = 0
imageObj.data[i + 1] = 0
imageObj.data[i + 2] = average
}
ctx.putImageData(imageObj, 0, 0)
}
if (drawType === "opacity") {
// 透明
for (let i = 0; i < imageObj.data.length; i += 4) {
imageObj.data[i + 3] = imageObj.data[i + 3] * 0.3;
}
ctx.putImageData(imageObj, 0, 0)
}
if (drawType === "linearGradient") {
// 渐变
const data = imageObj.data;
// 遍历每个像素
for (let i = 0; i < data.length; i += 4) {
const x = (i / 4) % canvas.width; // 当前像素的 x 坐标
const y = Math.floor(i / (4 * canvas.width)); // 当前像素的 y 坐标
// 计算当前像素的颜色值
const r = x / canvas.width * 255; // 红色分量
const g = y / canvas.height * 255; // 绿色分量
const b = 128; // 蓝色分量
const a = 100; // 不透明度
// 设置当前像素的颜色值
data[i] = r; // 红色分量
data[i + 1] = g; // 绿色分量
data[i + 2] = b; // 蓝色分量
data[i + 3] = a; // 不透明度
}
ctx.putImageData(imageObj, 0, 0)
}
if (drawType === "mosaic") {
// 马赛克
ctx.imageSmoothingEnabled = false; // 禁用图像平滑处理
const tileSize = 10; // 马赛克块的大小
// 缩小马赛克块
ctx.drawImage(canvas, 0, 0, canvas.width, canvas.height, 0, 0, canvas.width / tileSize, canvas.height / tileSize);
// 放大回原来的大小
ctx.drawImage(canvas, 0, 0, canvas.width / tileSize, canvas.height / tileSize, 0, 0, canvas.width, canvas.height);
}
requestAnimationFrame(drawFrame);
// setTimeout(drawFrame, 1000);
}
// 检查浏览器是否支持 getUserMedia API
if (navigator.mediaDevices && navigator.mediaDevices.getUserMedia) {
// 请求访问摄像头
navigator.mediaDevices.getUserMedia({ video: true })
.then(function (stream) {
// 将视频流绑定到视频元素上
video.srcObject = stream;
// 开始绘制视频画面到画布上
requestAnimationFrame(drawFrame);
})
.catch(function (error) {
console.error('无法访问摄像头:', error);
});
} else {
console.error('浏览器不支持 getUserMedia API');
}
buttons.forEach(button => {
button.addEventListener("click", function (e) {
drawType = e.target.dataset.type;
})
})
takePhoto.addEventListener('click', function (e) {
// 绘制原始 Canvas 的内容到新的 Canvas 上
ctx.drawImage(canvas, 0, 0, canvas.width, canvas.height);
// 将内容转换为数据 URL
const dataURL = canvas.toDataURL();
// 创建一个 <a> 元素并设置属性
const link = document.createElement('a');
link.href = dataURL;
link.download = 'screenshot.png'; // 设置要保存的文件名
// 模拟点击 <a> 元素来触发下载
link.click();
})
</script>
</body>
</html>