本帖最后由 bingxing8000 于 2018-4-16 15:12 编辑
| # pip install requests # 安装爬虫模块,用来抓取网页的html源代码 | | # pip install beautifulsoup4 # 安装html代码解析模块 | | # pip install prettytable #安装PrettyTable模块,可以将输出内容如表格方式整齐地输出 | | | | import requests # 导入爬虫模块 | | from bs4 import BeautifulSoup # 导入html代码解析模块 | | from prettytable import PrettyTable # 导入整齐输出模块 | | | | url = "http://www.weather.com.cn/weather/101210101.shtml" | | get_html = requests.get(url) # 获取html源代码 | | soure = BeautifulSoup(get_html.content,"html.parser") | | body = soure.find("body") # 找到body标签 | | div = body.find("div",id = "7d") # 找到div开头,并且属性有"id="7d""的标签 | | ul = div.find("ul") # 从 div开头并且属性有"id="7d""的标签 中找到ul标签 | | li = ul.find_all("li") # 找到所有的li标签 | | | | list=[] | | for shuju in li: # 遍历标签 | | dic={} | | riqi = shuju.find("h1").string # 找到hl标签并提取日期 | | dic["date"] = riqi # 把获取到的添加到字典 | | tianqi= shuju.find_all("p") # 找到所有的p标签 | | dic["tq"] = tianqi[0].string # | | wendu = tianqi[1].find("span") | | dic["maxwd"] = wendu.string + "℃" | | wendu2= tianqi[1].find("i") | | dic["minwd"] = wendu2.string | | fengxiang = tianqi[2].find("span")["title"] | | dic["fx"] = fengxiang | | fengli = tianqi[2].find("i") | | dic["fl"] = fengli.string | | list.append(dic) | | | | table = PrettyTable(["日期", "天气","最高温度","最低温度","风向","风力"]) | | #for i in range(0,7): | | # a = list[i]["date"],list[i]["tq"], list[i]["maxwd"], list[i]["minwd"], list[i]["fx"], list[i]["fl"] | | # print(a) | | for i in range(0,7): | | table.add_row([list[i]["date"],list[i]["tq"], list[i]["maxwd"], list[i]["minwd"], list[i]["fx"], list[i]["fl"]]) | | print(table)COPY |
请问:为什么会报ecursionError: maximum recursion depth exceeded while calling a Python object 错误呢?注释部分运行就可以的。 |