
回复
作为在鸿蒙分布式系统中摸爬滚打的开发者,曾用Actor模型构建过日均千万级请求的爬虫系统。本文分享从架构设计到容错优化的实战经验,帮你用Actor模型打造高效稳定的分布式爬虫。
actor CrawlerNode {
private var taskQueue: [String] = []
private let aggregator: ActorRef<ResultAggregator>
init(aggregator: ActorRef<ResultAggregator>) {
this.aggregator = aggregator
}
receiver func addTask(url: String) {
taskQueue.append(url)
processTasks()
}
private func processTasks() {
while !taskQueue.isEmpty {
let url = taskQueue.removeFirst()
if let content = fetchPage(url) {
let data = parsePage(content)
aggregator.send(StoreData(data))
} else {
// 失败任务重新入队
taskQueue.append(url)
}
}
}
private func fetchPage(_ url: String) -> String? {
// 带重试的网络请求
for _ in 0..3 {
do {
return Http.get(url).content
} catch {
sleep(1) // 重试间隔
}
}
return nil
}
}
actor TaskScheduler {
private var nodes: [ActorRef<CrawlerNode>] = []
private var taskQueue: [String] = []
receiver func register(node: ActorRef<CrawlerNode>) {
nodes.append(node)
dispatchTasks()
}
receiver func addTask(url: String) {
taskQueue.append(url)
dispatchTasks()
}
private func dispatchTasks() {
while !taskQueue.isEmpty {
let node = nodes.min(by: { $0.load < $1.load })!
node.send(addTask(taskQueue.removeFirst()))
}
}
}
actor ResultAggregator {
private var dataStore: DataStore
receiver func StoreData(data: [String]) {
dataStore.save(data)
if dataStore.count % 100 == 0 {
flushToDB()
}
}
private func flushToDB() {
// 批量写入数据库
}
}
actor CheckpointManager {
private let db: Database
func saveState(nodes: [ActorRef<CrawlerNode>]) {
var tasks = [String]()
for node in nodes {
tasks.append(contentsOf: node.taskQueue)
}
db.save("crawler_tasks", tasks)
}
func restoreState() -> [String] {
return db.load("crawler_tasks") ?? []
}
}
extension CrawlerNode {
private func fetchWithRetry(url: String, retries: Int = 3) -> String? {
if retries == 0 {
log("Failed: \(url)")
return nil
}
do {
return Http.get(url, timeout: 5s).content
} catch {
sleep(1s * retries) // 指数退避
return fetchWithRetry(url, retries-1)
}
}
}
连接池复用:
actor HttpPool {
private let pool: ConnectionPool
func getConnection() -> HttpConnection {
return pool.borrow()
}
func release(connection: HttpConnection) {
pool.release(connection)
}
}
2. **并发控制**:
3. ```cj
4. actor CrawlerNode {
5. private let semaphore = Semaphore(5) // 限制并发请求数
private func fetchPage(_ url: String) -> String? {
semaphore.acquire()
defer { semaphore.release() }
// 请求处理...
}
}
graph TD
A[Prometheus] -->|抓取指标| B(CrawlerNode)
A -->|调度指标| C(TaskScheduler)
A -->|存储指标| D(ResultAggregator)
E[Grafana] --> A
监控关键指标:
┌─────────────┐ ┌─────────────┐ ┌─────────────┐
│ 节点1 │ │ 节点2 │ │ 节点3 │
│ (Crawler) │ │ (Crawler) │ │ (Scheduler) │
└─────────────┘ └─────────────┘ └─────────────┘
↑ 消息总线 ↑ 消息总线 ↑
└────────────────┼────────────────┘
┌──────────────────────┐
│ 分布式消息中间件 │
└──────────────────────┘
func scaleOut() {
for _ in 0..3 {
spawn(CrawlerNode(aggregator))
}
}
func scaleIn() {
2. **故障转移**:
3. ```cj
4. actor Scheduler {
5. receiver func nodeFailed(node: ActorRef<CrawlerNode>) {
6. nodes.remove(node)
7. rebalanceTasks()
8. }
9. }
10. ```
## 五、避坑指南:分布式爬虫的生死线
1. **任务重复抓取**:
2. - 用布隆过滤器去重URL,避免重复任务
2. **网络拥塞**:
3. - 实现全局请求限速,按域名分桶控制
3. **数据一致性**:
4. - 结果聚合器使用幂等存储,避免重复数据
5.