解析HTML表格的内容 - 但iframe是问题的来源
问题描述:
这一天的最后一个问题。我试图找到一种方法来解析这个页面的表格的内容:http://www7.pearsonvue.com/Dispatcher?application=VTCLocator&action=actStartApp&v=W2L&cid=445在一个变种,把它放在一个Excel文件中。解析HTML表格的内容 - 但iframe是问题的来源
在使用BeautifulSoup解析数据后将数据放入excel中没有问题。
但是(总是有一个“but”)源代码很奇怪,里面有一个iframe。
#!/usr/bin/python
# -- coding: utf-8 --
import xlwt
import urllib2
import sys
import re
from bs4 import BeautifulSoup as soup
import urllib
print("TEST FOR PTE TESTS CENTERS")
url = 'http://www6.pearsonvue.com/Dispatcher?application=VTCLocator&action=actStartApp&v=W2L&cid=445'
values = {
'sortColumn' : 2,
'sortDirection' : 1,
'distanceUnits' : 0,
'proximitySearchLimit' : 20,
'countryCode' : 'GBR', # WE TRY FOR NOW WITH A SPECIFIC COUNTRY
}
user_agent = 'Mozilla/5 (Solaris 10) Gecko'
headers = { 'User-Agent' : user_agent }
data = urllib.urlencode(values)
req = urllib2.Request(url, data, headers)
response = urllib2.urlopen(req)
thePage = response.read()
the_page = soup(thePage)
result = the_page.find('frame', attrs={'name' : 'VTCLocatorPageFrame'})
print result # We have now the FRAME link in the result var
所以请在上面找到我正在努力工作的脚本的来源。
脚本运行之后,我们有这样的结果VAR:
如果您有任何想法,这可能是非常有帮助:)
在此先感谢,并通过蟒蛇!
答
对不起,这个问题不是很清楚。我试图找到一个解决方案,这里是我使用的脚本:
#!/usr/bin/python
# -- coding: utf-8 --
import xlwt
import urllib2
import sys
import re
from bs4 import BeautifulSoup as soup
import urllib
liste_countries = ['USA','AFG','ALA','ALB','DZA','ASM','AND','AGO','AIA','ATA','ATG','ARG','ARM','ABW','AUS','AUT','AZE','BHS','BHR','BGD','BRB','BLR','BEL','BLZ','BEN','BMU','BTN','BOL','BES','BIH','BWA','BVT','BRA','IOT','BRN','BGR','BFA','BDI','BDI','KHM','CMR','CAN','CPV','CYM','CAF','TCD','CHL','CHN','CXR','CCK','COL','COM','COG','COD','COK','CRI','CIV','HRV','CUW','CYP','CZE','DNK','DJI','DMA','DOM','ECU','EGY','SLV','GNQ','ERI','EST','ETH','FLK','FRO','FJI','FIN','FRA','GUF','PYF','ATF','GAB','GMB','GEO','DEU','GHA','GIB','GRC','GRL','GRD','GLP','GUM','GTM','GGY','GIN','GNB','GUY','HTI','HMD','HND','HKG','HUN','ISL','IND','IDN','IRN','IRQ','IRL','IMN','ISR','ITA','JAM','JPN','JEY','JOR','KAZ','KEN','KIR','PRK','KOR','KWT','KGZ','LAO','LVA','LBN','LSO','LBR','LBY','LIE','LTU','LUX','MAC','MKD','MDG','MWI','MYS','MDV','MLI','MLT','MHL','MTQ','MRT','MUS','MYT','MEX','FSM','MDA','MCO','MNG','MNE','MSR','MAR','MOZ','MMR','NAM','NRU','NPL','NLD','NCL','NZL','NIC','NER','NGA','NIU','NFK','MNP','NOR','OMN','PAK','PLW','PSE','PAN','PNG','PRY','PER','PHL','PCN','POL','PRT','PRI','QAT','REU','ROU','RUS','RWA','BLM','KNA','LCA','MAF','WSM','SMR','STP','SAU','SEN','SRB','SYC','SLE','SGP','SXM','SVK','SVN','SLB','SOM','ZAF','SGS','SSD','ESP','LKA','SHN','SPM','VCT','SDN','SUR','SJM','SWZ','SWE','CHE','TWN','TJK','TZA','THA','TLS','TKL','TON','TTO','TUN','TUR','TKM','TCA','TUV','UGA','UKR','ARE','GBR','URY','UMI','UZB','VUT','VAT','VEN','VNM','VGB','VIR','WLF','ESH','YEM','ZMB','ZWE']
name_doc_out = raw_input("What do you want for name for the Excel output document ? >>> ")
wb = xlwt.Workbook(encoding='utf-8')
ws = wb.add_sheet("PTE_TC")
x = 0
y = 0
numero = 0
total = len(liste_countries)
total_city = len(villes_us)
number_city = 0
for liste in liste_countries:
if 0 == 1:
print("THIS IF IS JUST FOR TEST")
else:
print("Fetching country number %s on %s" % (numero, total))
numero = numero + 1
url = 'http://www6.pearsonvue.com/Dispatcher?v=W2L&application=VTCLocator&HasXSes=Y&layerPath=ROOT.VTCLocator.SelTestCenterPage&wscid=199372577&layer=SelTestCenterPage&action=actDisplay&bfp=top.VTCLocatorPageFrame&bfpapp=top&wsid=1334887910891'
values = {
'sortColumn' : 2,
'sortDirection' : 1,
'distanceUnits' : 0,
'proximitySearchLimit' : 20,
'countryCode' : liste,
}
user_agent = 'Mozilla/5 (Solaris 10) Gecko'
headers = { 'User-Agent' : user_agent }
data = urllib.urlencode(values)
req = urllib2.Request(url, data, headers)
response = urllib2.urlopen(req)
thePage = response.read()
the_page = soup(thePage)
#print the_page
tableau = the_page.find('table', attrs={'id' : 'apptable'})
print tableau
try:
rows = tableau.findAll('tr')
for tr in rows:
cols = tr.findAll('td')
# del/remove les td qui faut pas
y = 0
x = x + 1
for td in cols:
print td.text
ws.write(x,y,td.text.strip())
wb.save("%s.xls" % name_doc_out)
y = y + 1
except (IndexError, AttributeError):
pass
我认为问题来自我使用的URL。我猜ID是从上要求改变到另一个... http://www6.pearsonvue.com/Dispatcher?v=W2L &应用= VTCLocator & HasXSes = Y & layerPath = ROOT.VTCLocator.SelTestCenterPage & wscid = 199372577 &层= SelTestCenterPage &行动= actDisplay & BFP = top.VTCLocatorPageFrame & bfpapp =顶部& WSID = 1334887910891
它一直很好了一个小时,而现在不一样了! :-)
如果你有过滤/解析IFRAME
答
,该代码是在这里...
from bs4 import BeautifulSoup
import urllib2
page = urllib2.urlopen("put_ur_url")
soup = BeautifulSoup(page)
for link in soup.findAll('iframe'):
if link['src'].startswith('start_of_path'):
print(link)
这不是完全清楚你问... – 2012-04-19 08:30:22
很抱歉。如果您进入该页面,您将通过列表中的选项看到,您将访问包含测试中心名称,国家和地区的表格。我想找到解析这些数据的方法:-) – 2012-04-19 08:34:59