爬虫bs4

编程入门 行业动态 更新时间:2024-10-06 21:22:29

<a href=https://www.elefans.com/category/jswz/34/1770264.html style=爬虫bs4"/>

爬虫bs4

#bs4

用法说明

用法验证测试

#!usr/bin/env
# -*-coding:utf-8 -*-import requests
from bs4 import BeautifulSoupif __name__ == "__main__":# UA 伪装header = {"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36"}# URLurl = ".html"# 请求命令page_text =requests.get(url=url, headers=header)page_text.encoding = "gb2312"page_text = page_text.text# print(page_text)soup = BeautifulSoup(page_text, "lxml")print(soup.find('div',class_='article-title').text)list = soup.select('.content > p')for i in list:print(i.text)# print(soup.find('div',class_='article-title').text)# list = soup.select('.content > p')# for i in list:#     print(i.text)# print(soup.a) #soup.tagName反回的是html第一次出现tagName# print(soup.div)# print(soup.find('div')) #相当于print(soup.div)# print(soup.find('div',class_='content')) #选择器# print(soup.find_all('p')) #反回符合要求的所有标签,反回列表# print(soup.select('.content')) #选择器 ID选择器 标签选择器# print(soup.select('.content > p')) #反回列表  层级选择器print(soup.select('.content > p')[0].text)print(soup.select('.content > p')[0].string)print(soup.select('.content > p')[0].get_text())print(soup.select('.content > p')[1].text)print(soup.select('.content > p')[1].string)print(soup.select('.content > p')[1].get_text())

0919_英语语法网.py

#!usr/bin/env
# -*-coding:utf-8 -*-
import requests
from bs4 import BeautifulSoup
from docx import Document
if __name__ == "__main__":header = {"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36"}# URLurl ="/"# 请求命令page_text =requests.get(url=url, headers=header)page_text.encoding = "gb2312"page_text = page_text.textsoup = BeautifulSoup(page_text, "lxml")list_subnav_li = soup.select('#subnav > ul > li')list_subnav = []#for li in list_subnav_li:dict_subnay={}title_subnav = li.texturl_subnav = li.a["href"]dict_subnay["title_subnav"] = title_subnavdict_subnay["url_subnav"] = url_subnavlist_subnav.append(dict_subnay)#list_subnav = []  得到div(id=subnav)名词动词形容词代词冠词数词介词连词非谓语动词情态动词连系动词疑问句# 祈使句感叹句否定句倒装句强调句存在句省略句句子成分状语从句定语从句名词性从句一般现在时将来完成时将来进行时# 一般将来时过去进行时过去将来时过去完成时一般过去时现在完成时现在进行时时态综合主动语态被动语态虚拟语气# 比较等级独立主格主谓一致单词用法小学英语语法初中英语语法高中英语语法大学英语语法i = 0for list in list_subnav:url_subnav = list["url_subnav"]title_subnav = list["title_subnav"]page_text = requests.get(url=url_subnav, headers=header)page_text.encoding = "gb2312"page_text = page_text.textsoup = BeautifulSoup(page_text, "lxml")list_url_title = []li_list_article = soup.select('.list-article > ul > li')# 本页面for li in li_list_article:dict_li = {}title_name = li.find_all("a")[1].texturl_name = "" + li.find_all("a")[1].get("href")dict_li['title_name'] = title_namedict_li['url_name'] = url_namelist_url_title.append(dict_li)# print(list_url_title)li_list_pages = soup.select('.pages > ul > li')for li in li_list_pages:if li.a and li.a.text != "下一页" and li.a.text != "末页":url_pages = url_subnav + li.a.get("href")# print(url_pages)page_text = requests.get(url=url_pages, headers=header)page_text.encoding = "gb2312"page_text = page_text.textsoup = BeautifulSoup(page_text, "lxml")li_list_article = soup.select('.list-article > ul > li')for li in li_list_article:dict_li = {}title_name = li.find_all("a")[1].texturl_name = "" + li.find_all("a")[1].get("href")dict_li['title_name'] = title_namedict_li['url_name'] = url_namelist_url_title.append(dict_li)#print(title_subnav)print(url_subnav)# print(list_url_title)document = Document()  #document.add_heading(title_subnav, level=1)document.add_heading(url_subnav, level=1)# document.add_heading('英语中名词作状语的用法详解, level = 2', level=2)# document.add_paragraph('Intense quote')# document.save(title_subnav+'.doc')# i = i + 1# if i == 3:#     breakfor list in list_url_title:url = list["url_name"]header = {"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36"}# URLurl = list["url_name"]# 请求命令page_text = requests.get(url=url, headers=header)page_text.encoding = "gb2312"page_text = page_text.text# print(page_text)soup = BeautifulSoup(page_text, "lxml")# article_title = soup.find('div', class_='article-title').textarticle_title = soup.select('.article-title')article_title = article_title[0].h1.text  # 取消掉时间作者print(article_title)document.add_heading(article_title, level=2)document.add_paragraph("网址是:" + url)# fp.write(article_title + '\n')list = soup.select('.content > p')for i in list:# fp.write(i.text + '\n')document.add_paragraph(i.text)document.add_paragraph('\n')document.save(title_subnav+'.doc')

0919_yingyuyufa_all_docx_单词用法.py

#!usr/bin/env
# -*-coding:utf-8 -*-
import requests
from bs4 import BeautifulSoup
from docx import Document
import re
if __name__ == "__main__":header = {"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36"}url ="/"page_text =requests.get(url=url, headers=header)page_text.encoding = "gb2312"page_text = page_text.textsoup = BeautifulSoup(page_text, "lxml")list_url_title = []url_list=[]for i in range(2,25):url=""+str(i)+".html"url_list.append(url)for url_pages in url_list:page_text = requests.get(url=url_pages, headers=header)page_text.encoding = "gb2312"page_text = page_text.textsoup = BeautifulSoup(page_text, "lxml")li_list_article = soup.select('.list-article > ul > li')for li in li_list_article:dict_li={}title_name = li.find_all("a")[1].texturl_name = ""+li.find_all("a")[1].get("href")dict_li['title_name'] = title_namedict_li['url_name'] = url_namelist_url_title.append(dict_li)print(list_url_title)fp = open('.主页_词性_名词.txt', 'w', encoding='utf-8')document = Document() #for list in list_url_title:url = list["url_name"]header = {"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36"}# URLurl = list["url_name"]page_text = requests.get(url=url, headers=header)page_text.encoding = "gb2312"page_text = page_text.textprint(page_text)soup = BeautifulSoup(page_text, "lxml")article_title = soup.select('.article-title')article_title = article_title[0].h1.text  # 取消掉时间作者document.add_heading(article_title, level=1)document.add_paragraph("网址是:" + url)list = soup.select('.content > p')for i in list:# fp.write(i.text + '\n')document.add_paragraph(i.text)document.add_paragraph('\n')document.save('单词用法_.doc')

0919_yingyuyufa_all_docx_数词.py

#!usr/bin/env
# -*-coding:utf-8 -*-
import requests
from bs4 import BeautifulSoup
from docx import Document
import re
if __name__ == "__main__":header = {"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36"}# URLurl ="/"# url ="/"# 请求命令page_text =requests.get(url=url, headers=header)page_text.encoding = "gb2312"page_text = page_text.textsoup = BeautifulSoup(page_text, "lxml")list_url_title = []li_list_article = soup.select('.list-article > ul > li')for li in li_list_article:dict_li={}title_name = li.find_all("a")[1].texturl_name = ""+li.find_all("a")[1].get("href")dict_li['title_name'] = title_namedict_li['url_name'] = url_namelist_url_title.append(dict_li)# print(list_url_title)li_list_pages = soup.select('.pages > ul > li')for li in li_list_pages:if li.a and li.a.text != "下一页" and li.a.text != "末页":url_pages = url + li.a.get("href")# print(url_pages)page_text = requests.get(url=url_pages, headers=header)page_text.encoding = "gb2312"page_text = page_text.textsoup = BeautifulSoup(page_text, "lxml")li_list_article = soup.select('.list-article > ul > li')for li in li_list_article:dict_li={}title_name = li.find_all("a")[1].texturl_name = ""+li.find_all("a")[1].get("href")dict_li['title_name'] = title_namedict_li['url_name'] = url_namelist_url_title.append(dict_li)# print(list_url_title)# fp = open('.主页_词性_名词.txt', 'w', encoding='utf-8')document = Document() #for list in list_url_title:url = list["url_name"]header = {"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36"}# URLurl = list["url_name"]# print(url)# 请求命令page_text = requests.get(url=url, headers=header)page_text.encoding = "gb2312"page_text = page_text.text# print(page_text)soup = BeautifulSoup(page_text, "lxml")# article_title = soup.find('div', class_='article-title').textarticle_title = soup.select('.article-title')article_title = article_title[0].h1.text  # 取消掉时间作者print(article_title)document.add_heading(article_title, level=1)document.add_paragraph("网址是:" + url)# fp.write(article_title + '\n')list = soup.select('.content > p')for i in list:# fp.write(i.text + '\n')document.add_paragraph(i.text)document.add_paragraph('\n')document.save('主页_词性_数词.doc')

0919_yingyuyufa_all_docx_动词.py

#!usr/bin/env
# -*-coding:utf-8 -*-
import requests
from bs4 import BeautifulSoup
from docx import Document
import re
if __name__ == "__main__":header = {"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36"}# URLurl ="/"# 请求命令page_text =requests.get(url=url, headers=header)page_text.encoding = "gb2312"page_text = page_text.textsoup = BeautifulSoup(page_text, "lxml")list_url_title = []li_list_article = soup.select('.list-article > ul > li')for li in li_list_article:dict_li={}title_name = li.find_all("a")[1].texturl_name = ""+li.find_all("a")[1].get("href")dict_li['title_name'] = title_namedict_li['url_name'] = url_namelist_url_title.append(dict_li)url_list=['.html','.html','.html','.html','.html','.html','.html','.html','.html','.html','.html']for url_pages in url_list:page_text = requests.get(url=url_pages, headers=header)page_text.encoding = "gb2312"page_text = page_text.textsoup = BeautifulSoup(page_text, "lxml")li_list_article = soup.select('.list-article > ul > li')for li in li_list_article:dict_li={}title_name = li.find_all("a")[1].texturl_name = ""+li.find_all("a")[1].get("href")dict_li['title_name'] = title_namedict_li['url_name'] = url_namelist_url_title.append(dict_li)# print(list_url_title)# fp = open('.主页_词性_名词.txt', 'w', encoding='utf-8')document = Document() #for list in list_url_title:url = list["url_name"]header = {"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36"}# URLurl = list["url_name"]# 请求命令page_text = requests.get(url=url, headers=header)page_text.encoding = "gb2312"page_text = page_text.textprint(page_text)soup = BeautifulSoup(page_text, "lxml")# article_title = soup.find('div', class_='article-title').textarticle_title = soup.select('.article-title')article_title = article_title[0].h1.text  # 取消掉时间作者document.add_heading(article_title, level=1)document.add_paragraph("网址是:" + url)# fp.write(article_title + '\n')list = soup.select('.content > p')for i in list:# fp.write(i.text + '\n')document.add_paragraph(i.text)document.add_paragraph('\n')document.save('主页_词性_动词.doc')

更多推荐

爬虫bs4

本文发布于:2024-02-28 08:58:26,感谢您对本站的认可!
本文链接:https://www.elefans.com/category/jswz/34/1768935.html
版权声明:本站内容均来自互联网,仅供演示用,请勿用于商业和其他非法用途。如果侵犯了您的权益请与我们联系,我们将在24小时内删除。
本文标签:爬虫

发布评论

评论列表 (有 0 条评论)
草根站长

>www.elefans.com

编程频道|电子爱好者 - 技术资讯及电子产品介绍!