BeautifulSopu
作者:互联网
html_doc = """
<html><head><title>The Dormouse's story</title></head>
<body>
<p class="title"><b>The Dormouse's story</b></p>
<p class="story">Once upon a time there were three little sisters; and their names were
<a href="http://example.com/elsie" class="sister" id="link1">Elsie</a>,
<a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and
<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>
<p class="story">...</p>
"""
from bs4 import BeautifulSopu
soup = BeautifulSoup(html_doc, 'html.parser')
print(soup.prettify())
<html>
<head>
<title>
The Dormouse's story
</title>
</head>
<body>
<p class="title">
<b>
The Dormouse's story
</b>
</p>
<p class="story">
Once upon a time there were three little sisters; and their names were
<a class="sister" href="http://example.com/elsie" id="link1">
Elsie
</a>
,
<a class="sister" href="http://example.com/lacie" id="link2">
Lacie
</a>
and
<a class="sister" href="http://example.com/tillie" id="link3">
Tillie
</a>
;
and they lived at the bottom of a well.
</p>
<p class="story">
...
</p>
</body>
</html>
soup.a
<a class="sister" href="http://example.com/elsie" id="link1">Elsie</a>
soup.find_all("a")
[<a class="sister" href="http://example.com/elsie" id="link1">Elsie</a>,
<a class="sister" href="http://example.com/lacie" id="link2">Lacie</a>,
<a class="sister" href="http://example.com/tillie" id="link3">Tillie</a>]
soup.findAll("a")
[<a class="sister" href="http://example.com/elsie" id="link1">Elsie</a>,
<a class="sister" href="http://example.com/lacie" id="link2">Lacie</a>,
<a class="sister" href="http://example.com/tillie" id="link3">Tillie</a>]
#在文档中找到所有<a>标签的链接
for link in soup.find_all('a'):
print(link.get('href'))
http://example.com/elsie
http://example.com/lacie
http://example.com/tillie
#从文档中获取所有文字内容:
print(soup.get_text())
The Dormouse's story
The Dormouse's story
Once upon a time there were three little sisters; and their names were
Elsie,
Lacie and
Tillie;
and they lived at the bottom of a well.
...
#通过正则表达式的search()来匹配内容
import re
for tag in soup.find_all(re.compile("^b")):
print(tag.name)
body
b
#如果传入列表参数,BS会将列表中任一元素匹配的内容返回
soup.find_all(["a",'b'])
[<b>The Dormouse's story</b>,
<a class="sister" href="http://example.com/elsie" id="link1">Elsie</a>,
<a class="sister" href="http://example.com/lacie" id="link2">Lacie</a>,
<a class="sister" href="http://example.com/tillie" id="link3">Tillie</a>]
def has_class_but_no_id(tag):
return tag.has_attr('class') and not tag.has_attr('id')
#将这个方法作为参数传入find_all()方法,将得到所有<p>标签
soup.find_all(has_class_but_no_id)
[<p class="title"><b>The Dormouse's story</b></p>,
<p class="story">Once upon a time there were three little sisters; and their names were
<a class="sister" href="http://example.com/elsie" id="link1">Elsie</a>,
<a class="sister" href="http://example.com/lacie" id="link2">Lacie</a> and
<a class="sister" href="http://example.com/tillie" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>,
<p class="story">...</p>]
soup.find_all("p")
[<p class="title"><b>The Dormouse's story</b></p>,
<p class="story">Once upon a time there were three little sisters; and their names were
<a class="sister" href="http://example.com/elsie" id="link1">Elsie</a>,
<a class="sister" href="http://example.com/lacie" id="link2">Lacie</a> and
<a class="sister" href="http://example.com/tillie" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>,
<p class="story">...</p>]
soup.find_all("p","title")
#find_all(name,attrs,recursive,string,**kwargs)
[<p class="title"><b>The Dormouse's story</b></p>]
按css搜索
soup.find_all("a",class_="sister")
[<a class="sister" href="http://example.com/elsie" id="link1">Elsie</a>,
<a class="sister" href="http://example.com/lacie" id="link2">Lacie</a>,
<a class="sister" href="http://example.com/tillie" id="link3">Tillie</a>]
CSS选择器
soup.select("title")
[<title>The Dormouse's story</title>]
soup.select("body a")
[<a class="sister" href="http://example.com/elsie" id="link1">Elsie</a>,
<a class="sister" href="http://example.com/lacie" id="link2">Lacie</a>,
<a class="sister" href="http://example.com/tillie" id="link3">Tillie</a>]
soup.select("head>title")
[<title>The Dormouse's story</title>]
soup.select("p>a")
[<a class="sister" href="http://example.com/elsie" id="link1">Elsie</a>,
<a class="sister" href="http://example.com/lacie" id="link2">Lacie</a>,
<a class="sister" href="http://example.com/tillie" id="link3">Tillie</a>]
soup.select("p>a:nth-of-type(2)")
[<a class="sister" href="http://example.com/lacie" id="link2">Lacie</a>]
soup.select("p> #link1")
[<a class="sister" href="http://example.com/elsie" id="link1">Elsie</a>]
#通过css的类名查找
soup.select(".sister")
[<a class="sister" href="http://example.com/elsie" id="link1">Elsie</a>,
<a class="sister" href="http://example.com/lacie" id="link2">Lacie</a>,
<a class="sister" href="http://example.com/tillie" id="link3">Tillie</a>]
#通过tag的id查找
soup.select("a#link2")
[<a class="sister" href="http://example.com/lacie" id="link2">Lacie</a>]
#get_text()
markup = '<a href="http://example.com/">\nI linked to <i>example.com</i>\n</a>'
soup = BeautifulSoup(markup)
print(soup.get_text("|",strip=True))
print(soup.i.get_text())
#或使用.stripped_strings生成器,并获得文本列表后手动处理列表:
[text for text in soup.stripped_strings]
I linked to|example.com
example.com
['I linked to', 'example.com']
解析器之间的区别
from lxml import etree
root = etree.Element("root")
print(root.tag)
root
root.append(etree.Element("child1"))
child2=etree.SubElement(root,"child2")
child3=etree.SubElement(root,"child3")
print(etree.tostring(root, pretty_print=True))
b'<root>\n <child1/>\n <child1/>\n <child1/>\n <child2/>\n <child3/>\n <child1/>\n <child2/>\n <child3/>\n <child1/>\n <child2/>\n <child3/>\n <child1/>\n <child2/>\n <child3/>\n</root>\n'
#Elements are lists
child = root[0]
print(child.tag)
print(len(root))
child1
14
for child in root:
print(child.tag)
child1
child1
child1
child2
child3
child1
child2
child3
child1
child2
child3
child1
child2
child3
标签:story,Lacie,BeautifulSopu,Dormouse,soup,Elsie,print 来源: https://www.cnblogs.com/clamber/p/16335157.html