我已经写了一些代码来解析yell.com上不同商店的名称,地址和电话号码。如果我的抓取工具有任何链接它会解析整个内容,而不管它分布了多少页面。但是,我能找到的唯一问题是它总是会跳过第一页的内容,就像有10页一样,我的爬虫会刮掉最后9页。有点抽搐可能会让我得到一个解决方法。这是完整的代码。提前谢谢。
Sub YellUK()
Const mlink = "https://www.yell.com"
Dim http As New MSXML2.XMLHTTP60, html As New HTMLDocument, htm As New HTMLDocument
Dim post As HTMLHtmlElement, page As Object, newlink As String
With http
.Open "GET", "https://www.yell.com/ucs/UcsSearchAction.do?keywords=pizza&location=United+Kingdom&scrambleSeed=1426936001", False
.send
html.body.innerHTML = .responseText
End With
Set page = html.getElementsByClassName("row pagination")(0).getElementsByTagName("a")
For i = 0 To page.Length - 2
newlink = mlink & Replace(page(i).href, "about:", "")
With http
.Open "GET", newlink, False
.send
htm.body.innerHTML = .responseText
End With
For Each post In htm.getElementsByClassName("js-LocalBusiness")
x = x + 1
With post.getElementsByClassName("row businessCapsule--title")(0).getElementsByTagName("a")
If .Length Then Cells(x + 1, 1) = .Item(0).innerText
End With
With post.getElementsByClassName("col-sm-10 col-md-11 col-lg-12 businessCapsule--address")(0).getElementsByTagName("span")
If .Length > 1 Then Cells(x + 1, 2) = .Item(1).innerText
End With
With post.getElementsByClassName("col-sm-10 col-md-11 col-lg-12 businessCapsule--address")(0).getElementsByTagName("span")
If .Length > 2 Then Cells(x + 1, 3) = .Item(2).innerText
End With
With post.getElementsByClassName("col-sm-10 col-md-11 col-lg-12 businessCapsule--address")(0).getElementsByTagName("span")
If .Length > 3 Then Cells(x + 1, 4) = .Item(3).innerText
End With
With post.getElementsByClassName("businessCapsule--tel")
If .Length > 1 Then Cells(x + 1, 5) = .Item(1).innerText
End With
Next post
Next i
End Sub
以下是存储下一页的页码的元素:
<div class="row pagination">
<div class="col-sm-24">
<span class="pagination--page is-selected">1</span>
<a class="pagination--page" rel="nofollow" href="/ucs/UcsSearchAction.do?location=United+Kingdom&keywords=pizza&scrambleSeed=721890588&pageNum=2" data-tracking="DISPLAY:PAGINATION:NUMBER">2</a>
<a class="pagination--page" rel="nofollow" href="/ucs/UcsSearchAction.do?location=United+Kingdom&keywords=pizza&scrambleSeed=721890588&pageNum=3" data-tracking="DISPLAY:PAGINATION:NUMBER">3</a>
<a class="pagination--page" rel="nofollow" href="/ucs/UcsSearchAction.do?location=United+Kingdom&keywords=pizza&scrambleSeed=721890588&pageNum=4" data-tracking="DISPLAY:PAGINATION:NUMBER">4</a>
<a class="pagination--page" rel="nofollow" href="/ucs/UcsSearchAction.do?location=United+Kingdom&keywords=pizza&scrambleSeed=721890588&pageNum=5" data-tracking="DISPLAY:PAGINATION:NUMBER">5</a>
<a class="pagination--page" rel="nofollow" href="/ucs/UcsSearchAction.do?location=United+Kingdom&keywords=pizza&scrambleSeed=721890588&pageNum=6" data-tracking="DISPLAY:PAGINATION:NUMBER">6</a>
<a class="pagination--page" rel="nofollow" href="/ucs/UcsSearchAction.do?location=United+Kingdom&keywords=pizza&scrambleSeed=721890588&pageNum=7" data-tracking="DISPLAY:PAGINATION:NUMBER">7</a>
<a class="pagination--page" rel="nofollow" href="/ucs/UcsSearchAction.do?location=United+Kingdom&keywords=pizza&scrambleSeed=721890588&pageNum=8" data-tracking="DISPLAY:PAGINATION:NUMBER">8</a>
<a class="pagination--page" rel="nofollow" href="/ucs/UcsSearchAction.do?location=United+Kingdom&keywords=pizza&scrambleSeed=721890588&pageNum=9" data-tracking="DISPLAY:PAGINATION:NUMBER">9</a>
<a class="pagination--page" rel="nofollow" href="/ucs/UcsSearchAction.do?location=United+Kingdom&keywords=pizza&scrambleSeed=721890588&pageNum=10" data-tracking="DISPLAY:PAGINATION:NUMBER">10</a>
<a rel="nofollow" class="pagination--next" href="/ucs/UcsSearchAction.do?location=United+Kingdom&keywords=pizza&scrambleSeed=721890588&pageNum=2" data-tracking="DISPLAY:PAGINATION:NEXT">Next</a>
</div>
</div>
答案 0 :(得分:1)
这里的问题是已经选择了第一页,因此它在分页中没有锚。解决方案是先处理第一页,然后使用分页处理剩余的页面。 HTH
Option Explicit
Sub YellUK()
Const mlink = "https://www.yell.com"
Dim http As New MSXML2.XMLHTTP60
Dim html As New HTMLDocument
Dim page As Object, newlink As String
With http
.Open "GET", "https://www.yell.com/ucs/UcsSearchAction.do?keywords=pizza&location=United+Kingdom&scrambleSeed=1426936001", False
.send
html.body.innerHTML = .responseText
End With
Set page = html.getElementsByClassName("row pagination")(0).getElementsByTagName("a")
Dim i, x
' First page first, is selected already, 'row pagination' doesn't have 'a' for it
GetPageData x, html
' Next pages then
Dim html2 As New HTMLDocument
For i = 0 To page.Length - 2
newlink = mlink & Replace(page(i).href, "about:", "")
With http
.Open "GET", newlink, False
.send
html2.body.innerHTML = .responseText
End With
GetPageData x, html2
Next i
End Sub
Private Sub GetPageData(ByRef x, ByRef html As HTMLDocument)
Dim post As HTMLHtmlElement
For Each post In html.getElementsByClassName("js-LocalBusiness")
x = x + 1
With post.getElementsByClassName("row businessCapsule--title")(0).getElementsByTagName("a")
If .Length Then Cells(x + 1, 1) = .Item(0).innerText
End With
With post.getElementsByClassName("col-sm-10 col-md-11 col-lg-12 businessCapsule--address")(0).getElementsByTagName("span")
If .Length > 1 Then Cells(x + 1, 2) = .Item(1).innerText
End With
With post.getElementsByClassName("col-sm-10 col-md-11 col-lg-12 businessCapsule--address")(0).getElementsByTagName("span")
If .Length > 2 Then Cells(x + 1, 3) = .Item(2).innerText
End With
With post.getElementsByClassName("col-sm-10 col-md-11 col-lg-12 businessCapsule--address")(0).getElementsByTagName("span")
If .Length > 3 Then Cells(x + 1, 4) = .Item(3).innerText
End With
With post.getElementsByClassName("businessCapsule--tel")
If .Length > 1 Then Cells(x + 1, 5) = .Item(1).innerText
End With
Next post
End Sub
编辑:
可能是这样的。第一页链接是为i=-1
创建的,接下来的页面是为ussual。
For i = -1 To page.Length - 2
If i = -1 Then
newlink = mlink & Replace(page(i + 1).href, "about:", "")
newlink = Left(newlink, Len(newlink) - 1) & "1"
Else
newlink = mlink & Replace(page(i).href, "about:", "")
End If
Debug.Print i & ", " & newlink ' Prints the links for all the pages
With http
.Open "GET", newlink, False
.send
htm.body.innerHTML = .responseText
End With
' Get page data here ...
Next i