
鸿蒙自动打码截图工具开发指南 原创
鸿蒙自动打码截图工具开发指南
一、系统架构设计
基于HarmonyOS的自动打码截图工具,利用AI能力和分布式技术实现以下功能:
智能识别:通过@ohos.ai模块识别人脸和文字敏感信息
自动处理:对识别到的区域进行模糊/马赛克处理
跨设备同步:多设备协同处理截图任务
隐私保护:所有处理在设备端完成,确保数据安全
!https://example.com/harmony-auto-mosaic-arch.png
二、核心代码实现
AI识别服务封装
// AIService.ets
import face from ‘@ohos.ai.face’;
import text from ‘@ohos.ai.text’;
import image from ‘@ohos.multimedia.image’;
class AIService {
private static instance: AIService = null;
private faceDetector: face.FaceDetector;
private textDetector: text.TextDetector;
private constructor() {
this.initDetectors();
public static getInstance(): AIService {
if (!AIService.instance) {
AIService.instance = new AIService();
return AIService.instance;
private async initDetectors(): Promise<void> {
try {
const context = getContext() as common.Context;
// 初始化人脸检测器
this.faceDetector = await face.createFaceDetector(context);
await this.faceDetector.setConfig({
processMode: face.ProcessMode.PROCESS_MODE_VIDEO,
performanceMode: face.PerformanceMode.PERFORMANCE_MODE_FAST
});
// 初始化文字检测器
this.textDetector = await text.createTextDetector(context);
await this.textDetector.setConfig({
detectMode: text.DetectMode.DETECT_MODE_ACCURATE,
language: text.Language.LANGUAGE_ZH
});
catch (err) {
console.error('初始化AI检测器失败:', JSON.stringify(err));
}
public async detectSensitiveAreas(pixelMap: image.PixelMap): Promise<DetectionResult> {
const result: DetectionResult = {
faces: [],
texts: []
};
try {
// 人脸检测
if (this.faceDetector) {
const faceResults = await this.faceDetector.detect(pixelMap);
result.faces = faceResults.faceList.map(face => ({
x: face.rect.left,
y: face.rect.top,
width: face.rect.right - face.rect.left,
height: face.rect.bottom - face.rect.top,
confidence: face.confidence
}));
// 文字检测
if (this.textDetector) {
const textResults = await this.textDetector.detect(pixelMap);
result.texts = textResults.textBlocks.flatMap(block =>
block.textLines.map(line => ({
x: line.rect.left,
y: line.rect.top,
width: line.rect.right - line.rect.left,
height: line.rect.bottom - line.rect.top,
content: line.textContent,
confidence: line.confidence
}))
);
} catch (err) {
console.error('检测敏感区域失败:', JSON.stringify(err));
return result;
public async release(): Promise<void> {
if (this.faceDetector) {
await this.faceDetector.release();
this.faceDetector = null;
if (this.textDetector) {
await this.textDetector.release();
this.textDetector = null;
}
interface DetectionResult {
faces: FaceArea[];
texts: TextArea[];
interface FaceArea {
x: number;
y: number;
width: number;
height: number;
confidence: number;
interface TextArea {
x: number;
y: number;
width: number;
height: number;
content: string;
confidence: number;
export const aiService = AIService.getInstance();
图像处理服务
// ImageService.ets
import image from ‘@ohos.multimedia.image’;
import effect from ‘@ohos.effectKit’;
class ImageService {
private static instance: ImageService = null;
private constructor() {}
public static getInstance(): ImageService {
if (!ImageService.instance) {
ImageService.instance = new ImageService();
return ImageService.instance;
public async applyMosaic(
pixelMap: image.PixelMap,
areas: {x: number, y: number, width: number, height: number}[],
mosaicSize: number = 15
): Promise<image.PixelMap> {
if (areas.length === 0) return pixelMap;
try {
// 创建效果处理实例
const effectProcessor = new effect.EffectProcessor();
// 对每个区域应用马赛克
let processedPixelMap = pixelMap;
for (const area of areas) {
if (area.width <= 0 || area.height <= 0) continue;
// 创建马赛克效果配置
const mosaicEffect = new effect.MosaicEffect({
x: area.x,
y: area.y,
width: area.width,
height: area.height,
mosaicSize: mosaicSize
});
// 应用效果
processedPixelMap = await effectProcessor.applyEffect(
processedPixelMap,
[mosaicEffect]
);
return processedPixelMap;
catch (err) {
console.error('应用马赛克失败:', JSON.stringify(err));
return pixelMap;
}
public async applyBlur(
pixelMap: image.PixelMap,
areas: {x: number, y: number, width: number, height: number}[],
blurRadius: number = 25
): Promise<image.PixelMap> {
if (areas.length === 0) return pixelMap;
try {
// 创建效果处理实例
const effectProcessor = new effect.EffectProcessor();
// 对每个区域应用模糊
let processedPixelMap = pixelMap;
for (const area of areas) {
if (area.width <= 0 || area.height <= 0) continue;
// 创建模糊效果配置
const blurEffect = new effect.BlurEffect({
x: area.x,
y: area.y,
width: area.width,
height: area.height,
radius: blurRadius
});
// 应用效果
processedPixelMap = await effectProcessor.applyEffect(
processedPixelMap,
[blurEffect]
);
return processedPixelMap;
catch (err) {
console.error('应用模糊失败:', JSON.stringify(err));
return pixelMap;
}
public async captureScreen(): Promise<image.PixelMap | null> {
try {
const windowClass = window.getTopWindow(getContext(this));
const screenshot = await windowClass.screenshot();
return screenshot;
catch (err) {
console.error('截屏失败:', JSON.stringify(err));
return null;
}
public async saveToGallery(pixelMap: image.PixelMap): Promise<string | null> {
try {
const mediaLib = await mediaLibrary.getMediaLibrary(getContext(this));
const file = await mediaLib.createAsset(
mediaLibrary.MediaType.IMAGE,
‘screenshot_’ + Date.now() + ‘.jpg’,
mediaLibrary.StorageFlag.STORAGE_FLAG_DEFAULT
);
const fd = await file.open('w');
await image.createImagePacker().packing(pixelMap, {
format: 'image/jpeg',
quality: 90
}).then(arrayBuffer => {
fs.writeSync(fd, arrayBuffer);
});
await file.close(fd);
return file.uri;
catch (err) {
console.error('保存到相册失败:', JSON.stringify(err));
return null;
}
export const imageService = ImageService.getInstance();
分布式同步服务
// SyncService.ets
import distributedData from ‘@ohos.distributedData’;
class SyncService {
private static instance: SyncService = null;
private dataManager: distributedData.DataManager;
private syncListeners: SyncListener[] = [];
private constructor() {
this.initDataManager();
public static getInstance(): SyncService {
if (!SyncService.instance) {
SyncService.instance = new SyncService();
return SyncService.instance;
private initDataManager(): void {
this.dataManager = distributedData.createDataManager({
bundleName: 'com.example.automosaic',
area: distributedData.Area.GLOBAL,
isEncrypted: true
});
// 注册数据监听
this.dataManager.registerDataListener('mosaic_sync', (data) => {
this.handleSyncData(data);
});
public async syncScreenshotRequest(deviceId: string): Promise<void> {
this.dataManager.syncData('mosaic_sync', {
type: 'screenshot_request',
fromDevice: deviceId,
timestamp: Date.now()
});
public async syncDetectionResult(
deviceId: string,
result: DetectionResult
): Promise<void> {
this.dataManager.syncData(‘mosaic_sync’, {
type: ‘detection_result’,
fromDevice: deviceId,
result: result,
timestamp: Date.now()
});
public async syncProcessedImage(
deviceId: string,
imageUri: string
): Promise<void> {
this.dataManager.syncData(‘mosaic_sync’, {
type: ‘processed_image’,
fromDevice: deviceId,
imageUri: imageUri,
timestamp: Date.now()
});
private handleSyncData(data: any): void {
if (!data) return;
switch (data.type) {
case 'screenshot_request':
this.syncListeners.forEach(cb => cb.onScreenshotRequest(data.fromDevice));
break;
case 'detection_result':
this.syncListeners.forEach(cb => cb.onDetectionResult(data.result));
break;
case 'processed_image':
this.syncListeners.forEach(cb => cb.onProcessedImage(data.imageUri));
break;
}
public addSyncListener(listener: SyncListener): void {
if (!this.syncListeners.includes(listener)) {
this.syncListeners.push(listener);
}
public removeSyncListener(listener: SyncListener): void {
this.syncListeners = this.syncListeners.filter(cb => cb !== listener);
}
interface SyncListener {
onScreenshotRequest(fromDevice: string): void;
onDetectionResult(result: DetectionResult): void;
onProcessedImage(imageUri: string): void;
export const syncService = SyncService.getInstance();
主界面实现
// MainScreen.ets
import { aiService } from ‘./AIService’;
import { imageService } from ‘./ImageService’;
import { syncService } from ‘./SyncService’;
@Component
export struct MainScreen {
@State originalImage: image.PixelMap | null = null;
@State processedImage: image.PixelMap | null = null;
@State detectionResult: DetectionResult = { faces: [], texts: [] };
@State processing: boolean = false;
@State mosaicSize: number = 15;
@State blurRadius: number = 25;
@State processingMode: ‘mosaic’ | ‘blur’ = ‘mosaic’;
@State connectedDevices: string[] = [];
build() {
Column() {
// 标题栏
Row() {
Text(‘自动打码截图工具’)
.fontSize(24)
.fontWeight(FontWeight.Bold)
.layoutWeight(1)
Button('协同')
.width(80)
.onClick(() => {
this.showDeviceSelector();
})
.padding(10)
.width('100%')
// 图像显示区域
Row() {
if (this.originalImage) {
Column() {
Text('原始截图')
.fontSize(16)
.margin({ bottom: 8 })
Image(this.originalImage)
.width('90%')
.height(200)
.objectFit(ImageFit.Contain)
.width(‘50%’)
if (this.processedImage) {
Column() {
Text('处理后')
.fontSize(16)
.margin({ bottom: 8 })
Image(this.processedImage)
.width('90%')
.height(200)
.objectFit(ImageFit.Contain)
.width(‘50%’)
}
.height(250)
.margin({ bottom: 20 })
// 检测结果显示
if (this.detectionResult.faces.length > 0 || this.detectionResult.texts.length > 0) {
Column() {
Text(检测到 {this.detectionResult.faces.length} 张人脸和 {this.detectionResult.texts.length} 处文字)
.fontSize(16)
.margin({ bottom: 8 })
// 显示检测区域详情
if (this.detectionResult.faces.length > 0) {
Text('人脸区域:')
.fontSize(14)
.margin({ bottom: 4 })
ForEach(this.detectionResult.faces, (face, index) => {
Text({index + 1}. 位置: ({face.x}, {face.y}) 大小: {face.width}x{face.height} 置信度: {face.confidence.toFixed(2)})
.fontSize(12)
.margin({ bottom: 2 })
})
if (this.detectionResult.texts.length > 0) {
Text('文字区域:')
.fontSize(14)
.margin({ top: 8, bottom: 4 })
ForEach(this.detectionResult.texts, (text, index) => {
Text({index + 1}. 内容: {text.content} 位置: ({text.x}, {text.y}) 大小: {text.width}x{text.height})
.fontSize(12)
.margin({ bottom: 2 })
})
}
.width('90%')
.padding(10)
.backgroundColor('#F5F5F5')
.borderRadius(8)
.margin({ bottom: 20 })
// 处理选项
Column() {
Row() {
Text('处理方式:')
.fontSize(16)
.margin({ right: 10 })
Radio({ value: 'mosaic', group: 'processMode' })
.checked(this.processingMode === 'mosaic')
.onChange((checked) => {
if (checked) this.processingMode = 'mosaic';
})
Text('马赛克')
.fontSize(14)
.margin({ right: 15 })
Radio({ value: 'blur', group: 'processMode' })
.checked(this.processingMode === 'blur')
.onChange((checked) => {
if (checked) this.processingMode = 'blur';
})
Text('模糊')
.fontSize(14)
.margin({ bottom: 10 })
if (this.processingMode === 'mosaic') {
Row() {
Text('马赛克大小:')
.fontSize(16)
.margin({ right: 10 })
Slider({
value: this.mosaicSize,
min: 5,
max: 30,
step: 1,
style: SliderStyle.OutSet
})
.onChange((value) => {
this.mosaicSize = value;
})
Text(this.mosaicSize.toString())
.fontSize(14)
.width(30)
} else {
Row() {
Text('模糊强度:')
.fontSize(16)
.margin({ right: 10 })
Slider({
value: this.blurRadius,
min: 10,
max: 50,
step: 1,
style: SliderStyle.OutSet
})
.onChange((value) => {
this.blurRadius = value;
})
Text(this.blurRadius.toString())
.fontSize(14)
.width(30)
}
.width(‘90%’)
.padding(10)
.backgroundColor(Color.White)
.borderRadius(8)
.margin({ bottom: 20 })
// 控制按钮
Column() {
Button('截屏并处理')
.width('80%')
.height(50)
.margin({ bottom: 10 })
.onClick(() => {
this.captureAndProcess();
})
Button('保存到相册')
.width('80%')
.height(50)
.enabled(!!this.processedImage)
.margin({ bottom: 10 })
.onClick(() => {
this.saveToGallery();
})
Button('协同处理')
.width('80%')
.height(50)
.onClick(() => {
this.showDeviceSelector();
})
.margin({ bottom: 20 })
// 处理状态
if (this.processing) {
Progress()
.width('80%')
.color(Color.Blue)
}
.width('100%')
.height('100%')
.padding(20)
.onAppear(() => {
syncService.addSyncListener({
onScreenshotRequest: (fromDevice) => {
this.handleScreenshotRequest(fromDevice);
},
onDetectionResult: (result) => {
this.detectionResult = result;
},
onProcessedImage: (imageUri) => {
this.loadProcessedImage(imageUri);
});
})
.onDisappear(() => {
syncService.removeSyncListener({
onScreenshotRequest: () => {},
onDetectionResult: () => {},
onProcessedImage: () => {}
});
})
private async captureAndProcess(): Promise<void> {
this.processing = true;
try {
// 1. 截屏
this.originalImage = await imageService.captureScreen();
if (!this.originalImage) {
throw new Error('获取截屏失败');
// 2. 检测敏感区域
this.detectionResult = await aiService.detectSensitiveAreas(this.originalImage);
// 3. 应用打码处理
if (this.processingMode === 'mosaic') {
this.processedImage = await imageService.applyMosaic(
this.originalImage,
[...this.detectionResult.faces, ...this.detectionResult.texts],
this.mosaicSize
);
else {
this.processedImage = await imageService.applyBlur(
this.originalImage,
[...this.detectionResult.faces, ...this.detectionResult.texts],
this.blurRadius
);
} catch (err) {
console.error('处理失败:', JSON.stringify(err));
prompt.showToast({ message: '处理失败: ' + err.message });
finally {
this.processing = false;
}
private async saveToGallery(): Promise<void> {
if (!this.processedImage) return;
this.processing = true;
try {
const uri = await imageService.saveToGallery(this.processedImage);
if (uri) {
prompt.showToast({ message: '已保存到相册' });
} catch (err) {
console.error('保存失败:', JSON.stringify(err));
prompt.showToast({ message: '保存失败' });
finally {
this.processing = false;
}
private showDeviceSelector(): void {
// 显示设备选择对话框
const devices = [‘设备1’, ‘设备2’, ‘设备3’]; // 实际应从设备管理服务获取
const dialog = new AlertDialog({
title: ‘选择协同设备’,
items: devices,
onSelect: (index) => {
this.startCollaborativeProcessing(devices[index]);
});
dialog.show();
private async startCollaborativeProcessing(deviceId: string): Promise<void> {
this.processing = true;
try {
// 1. 请求目标设备截屏
syncService.syncScreenshotRequest(deviceId);
prompt.showToast({ message: '已发送处理请求到 ' + deviceId });
catch (err) {
console.error('协同处理失败:', JSON.stringify(err));
prompt.showToast({ message: '协同处理失败' });
finally {
this.processing = false;
}
private async handleScreenshotRequest(fromDevice: string): Promise<void> {
// 用户确认是否允许处理请求
const dialog = new AlertDialog({
title: ‘处理请求’,
message: 设备 ${fromDevice} 请求处理截图,
buttons: [
text: ‘允许’,
action: async () => {
await this.processForDevice(fromDevice);
},
text: ‘拒绝’,
action: () => {
prompt.showToast({ message: '已拒绝请求' });
}
});
dialog.show();
private async processForDevice(deviceId: string): Promise<void> {
this.processing = true;
try {
// 1. 截屏
const screenshot = await imageService.captureScreen();
if (!screenshot) {
throw new Error('获取截屏失败');
// 2. 检测敏感区域
const result = await aiService.detectSensitiveAreas(screenshot);
// 3. 发送检测结果回请求设备
syncService.syncDetectionResult(deviceId, result);
// 4. 应用打码处理
const processedImage = await imageService.applyMosaic(
screenshot,
[...result.faces, ...result.texts],
15
);
// 5. 保存并发送处理后的图像
const uri = await imageService.saveToGallery(processedImage);
if (uri) {
syncService.syncProcessedImage(deviceId, uri);
} catch (err) {
console.error('为设备处理失败:', JSON.stringify(err));
finally {
this.processing = false;
}
private async loadProcessedImage(uri: string): Promise<void> {
try {
const mediaLib = await mediaLibrary.getMediaLibrary(getContext(this));
const file = await mediaLib.getFileAssets({
selections: mediaLibrary.FileKey.URI + ‘=?’,
selectionArgs: [uri]
});
if (file.getCount() > 0) {
const asset = await file.getFirstObject();
const fd = await asset.open('r');
const arrayBuffer = fs.readSync(fd);
const imageSource = image.createImageSource(arrayBuffer.buffer);
this.processedImage = await imageSource.createPixelMap();
await asset.close(fd);
prompt.showToast({ message: '已接收处理后的图像' });
} catch (err) {
console.error('加载处理后的图像失败:', JSON.stringify(err));
}
三、项目配置与权限
权限配置
// module.json5
“module”: {
"requestPermissions": [
“name”: “ohos.permission.CAPTURE_SCREEN”,
"reason": "截取屏幕内容"
},
“name”: “ohos.permission.READ_MEDIA”,
"reason": "读取媒体文件"
},
“name”: “ohos.permission.WRITE_MEDIA”,
"reason": "保存处理后的图像"
},
“name”: “ohos.permission.DISTRIBUTED_DATASYNC”,
"reason": "同步处理数据"
},
“name”: “ohos.permission.INTERNET”,
"reason": "加载AI模型"
},
“name”: “ohos.permission.ACCESS_DISTRIBUTED_DEVICE_MANAGER”,
"reason": "发现和连接其他设备"
},
“name”: “ohos.permission.MEDIA_LOCATION”,
"reason": "访问媒体位置信息"
],
"abilities": [
“name”: “MainAbility”,
"type": "page",
"visible": true
},
“name”: “DeviceAbility”,
"type": "page",
"visible": true
},
“name”: “AIDetectionAbility”,
"type": "service",
"backgroundModes": ["dataTransfer"]
]
}
四、总结与扩展
本自动打码截图工具实现了以下核心功能:
智能识别:精准识别人脸和文字敏感区域
自动处理:提供马赛克和模糊两种打码方式
多设备协同:多设备实时同步处理状态
隐私保护:所有处理在设备端完成,确保数据安全
扩展方向:
自定义打码区域:允许用户手动调整打码区域
OCR过滤:识别特定类型的敏感信息(如身份证号、手机号)
批量处理:支持多张截图批量打码
云同步:安全地上传处理后的截图到云端
历史记录:保存处理记录便于后续查看
通过HarmonyOS的分布式能力和AI框架,我们构建了一个智能、便捷的隐私保护工具,帮助用户轻松处理截图中的敏感信息。
