练习来源:https://tour.golang.org/concurrency/10
描述:
在本练习中,您将使用Go的并发特性来并行化web爬虫。 修改Crawl函数,以并行方式获取URL,而不需要获取相同的URL两次。 提示:您可以保存在地图上获取的URL的缓存,但是单独使用地图并不安全!
以下是我的回答:
package main
import (
"fmt"
"sync"
)
type Fetcher interface {
// Fetch returns the body of URL and
// a slice of URLs found on that page.
Fetch(url string) (body string, urls []string, err error)
}
var crawledURLs = make(map[string]bool)
var mux sync.Mutex
func CrawlURL(url string, depth int, fetcher Fetcher, quit chan bool) {
defer func() { quit <- true }()
if depth <= 0 {
return
}
mux.Lock()
_, isCrawled := crawledURLs[url]
if isCrawled {
return
}
crawledURLs[url] = true
mux.Unlock()
body, urls, err := fetcher.Fetch(url)
if err != nil {
fmt.Println(err)
return
}
fmt.Printf("found: %s %q\n", url, body)
quitThis := make(chan bool)
for _, u := range urls {
go CrawlURL(u, depth-1, fetcher, quitThis)
}
for range urls {
<-quitThis
}
return
}
// Crawl uses fetcher to recursively crawl
// pages starting with url, to a maximum of depth.
func Crawl(url string, depth int, fetcher Fetcher) {
CrawlURL(url, depth, fetcher, make(chan bool))
return
}
func main() {
Crawl("https://golang.org/", 4, fetcher)
}
// fakeFetcher is Fetcher that returns canned results.
type fakeFetcher map[string]*fakeResult
type fakeResult struct {
body string
urls []string
}
func (f fakeFetcher) Fetch(url string) (string, []string, error) {
if res, ok := f[url]; ok {
return res.body, res.urls, nil
}
return "", nil, fmt.Errorf("not found: %s", url)
}
// fetcher is a populated fakeFetcher.
var fetcher = fakeFetcher{
"https://golang.org/": &fakeResult{
"The Go Programming Language",
[]string{
"https://golang.org/pkg/",
"https://golang.org/cmd/",
},
},
"https://golang.org/pkg/": &fakeResult{
"Packages",
[]string{
"https://golang.org/",
"https://golang.org/cmd/",
"https://golang.org/pkg/fmt/",
"https://golang.org/pkg/os/",
},
},
"https://golang.org/pkg/fmt/": &fakeResult{
"Package fmt",
[]string{
"https://golang.org/",
"https://golang.org/pkg/",
},
},
"https://golang.org/pkg/os/": &fakeResult{
"Package os",
[]string{
"https://golang.org/",
"https://golang.org/pkg/",
},
},
}
以及产出:
found: https://golang.org/ "The Go Programming Language"
not found: https://golang.org/cmd/
found: https://golang.org/pkg/ "Packages"
found: https://golang.org/pkg/os/ "Package os"
fatal error: all goroutines are asleep - deadlock!
我想知道为什么会出现僵局?是因为我用错了频道吗?
注意到我忘记在if isCrawled {}
分支中释放互斥体,所以我编辑了如下代码:
...
if isCrawled {
mux.Unlock() // added this line
return
}
...
但是死锁仍然存在,并且输出是不同的:
found: https://golang.org/ "The Go Programming Language"
not found: https://golang.org/cmd/
found: https://golang.org/pkg/ "Packages"
found: https://golang.org/pkg/os/ "Package os"
found: https://golang.org/pkg/fmt/ "Package fmt"
fatal error: all goroutines are asleep - deadlock!
发布于 2019-07-04 07:58:08
主要问题是在返回if isCrawled {}
分支之前忘记释放互斥锁。
此外,如果实际需要同步goroutines,我建议使用同步API。信道更好地用于通信和共享数据。
这是使用sync.WaitGroup
:https://play.golang.org/p/slrnmr3sPrs的解决方案
下面是只使用通道的解决方案:https://play.golang.org/p/FbPXxPSXvFL
问题是,当您第一次调用CrawlURL()
时,并不是从作为参数传递的通道中读取。因此,一旦该函数试图通过defer func() { quit <- true }()
向它发送一些东西,它就会永远阻塞,永远不会返回。
package main
import (
"fmt"
"sync"
)
type Fetcher interface {
// Fetch returns the body of URL and
// a slice of URLs found on that page.
Fetch(url string) (body string, urls []string, err error)
}
var crawledURLs = make(map[string]bool)
var mux sync.Mutex
func CrawlURL(url string, depth int, fetcher Fetcher, quit chan bool) {
//For very first function instance, this would block forever if
//nobody is receiving from the other end of this channel
defer func() { quit <- true }()
if depth <= 0 {
return
}
mux.Lock()
_, isCrawled := crawledURLs[url]
if isCrawled {
mux.Unlock()
return
}
crawledURLs[url] = true
mux.Unlock()
body, urls, err := fetcher.Fetch(url)
if err != nil {
fmt.Println(err)
return
}
fmt.Printf("found: %s %q\n", url, body)
quitThis := make(chan bool)
for _, u := range urls {
go CrawlURL(u, depth-1, fetcher, quitThis)
}
for range urls {
<-quitThis
}
return
}
// Crawl uses fetcher to recursively crawl
// pages starting with url, to a maximum of depth.
func Crawl(url string, depth int, fetcher Fetcher) {
lastQuit := make(chan bool)
go CrawlURL(url, depth, fetcher, lastQuit)
//You need to receive from this channel in order to
//unblock the called function
<-lastQuit
return
}
func main() {
Crawl("https://golang.org/", 10, fetcher)
}
// fakeFetcher is Fetcher that returns canned results.
type fakeFetcher map[string]*fakeResult
type fakeResult struct {
body string
urls []string
}
func (f fakeFetcher) Fetch(url string) (string, []string, error) {
if res, ok := f[url]; ok {
return res.body, res.urls, nil
}
return "", nil, fmt.Errorf("not found: %s", url)
}
// fetcher is a populated fakeFetcher.
var fetcher = fakeFetcher{
"https://golang.org/": &fakeResult{
"The Go Programming Language",
[]string{
"https://golang.org/pkg/",
"https://golang.org/cmd/",
},
},
"https://golang.org/pkg/": &fakeResult{
"Packages",
[]string{
"https://golang.org/",
"https://golang.org/cmd/",
"https://golang.org/pkg/fmt/",
"https://golang.org/pkg/os/",
},
},
"https://golang.org/pkg/fmt/": &fakeResult{
"Package fmt",
[]string{
"https://golang.org/",
"https://golang.org/pkg/",
},
},
"https://golang.org/pkg/os/": &fakeResult{
"Package os",
[]string{
"https://golang.org/",
"https://golang.org/pkg/",
},
},
}
https://stackoverflow.com/questions/56882761
复制相似问题