手机
当前位置:查字典教程网 >脚本专栏 >python >python多线程抓取天涯帖子内容示例
python多线程抓取天涯帖子内容示例
摘要:使用re,urllib,threading多线程抓取天涯帖子内容,设置url为需抓取的天涯帖子的第一页,设置file_name为下载后的文件...

使用re, urllib, threading多线程抓取天涯帖子内容,设置url为需抓取的天涯帖子的第一页,设置file_name为下载后的文件名

复制代码 代码如下:

#coding:utf-8

import urllib

import re

import threading

import os, time

class Down_Tianya(threading.Thread):

"""多线程下载"""

def __init__(self, url, num, dt):

threading.Thread.__init__(self)

self.url = url

self.num = num

self.txt_dict = dt

def run(self):

print 'downling from %s' % self.url

self.down_text()

def down_text(self):

"""根据传入的url抓出各页内容,按页数做键存入字典"""

html_content =urllib.urlopen(self.url).read()

text_pattern = re.compile('<span>时间:(.*?)</span>.*?<>.*?<div, re.DOTALL)

text = text_pattern.findall(html_content)

text_join = ['rnrnrnrn'.join(item) for item in text]

self.txt_dict[self.num] = text_join

def page(url):

"""根据第一页地址抓取总页数"""

html_page = urllib.urlopen(url).read()

page_pattern = re.compile(r'<a href="S*?">(d*)</a>s*<a href="S*?">下页</a>')

page_result = page_pattern.search(html_page)

if page_result:

page_num = int(page_result.group(1))

return page_num

def write_text(dict, fn):

"""把字典内容按键(页数)写入文本,每个键值为每页内容的list列表"""

tx_file = open(fn, 'w+')

pn = len(dict)

for i in range(1, pn+1):

tx_list = dict[i]

for tx in tx_list:

tx = tx.replace('<br>', 'rn').replace('<br />', 'rn').replace('', '')

tx_file.write(tx.strip()+'rn'*4)

tx_file.close()

def main():

url = 'http://bbs.tianya.cn/post-16-996521-1.shtml'

file_name ='abc.txt'

my_page = page(url)

my_dict = {}

print 'page num is : %s' % my_page

threads = []

"""根据页数构造urls进行多线程下载"""

for num in range(1, my_page+1):

myurl = '%s%s.shtml' % (url[:-7], num)

downlist = Down_Tianya(myurl, num, my_dict)

downlist.start()

threads.append(downlist)

"""检查下载完成后再进行写入"""

for t in threads:

t.join()

write_text(my_dict, file_name)

print 'All download finished. Save file at directory: %s' % os.getcwd()

if __name__ == '__main__':

main()

down_tianya.py

复制代码 代码如下:

#coding:utf-8

import urllib

import re

import threading

import os

class Down_Tianya(threading.Thread):

"""多线程下载"""

def __init__(self, url, num, dt):

threading.Thread.__init__(self)

self.url = url

self.num = num

self.txt_dict = dt

def run(self):

print 'downling from %s' % self.url

self.down_text()

def down_text(self):

"""根据传入的url抓出各页内容,按页数做键存入字典"""

html_content =urllib.urlopen(self.url).read()

text_pattern = re.compile('<div.*?<span>时间:(.*?)</span>.*?<>.*?<div, re.DOTALL)

text = text_pattern.findall(html_content)

text_join = ['rnrnrnrn'.join(item) for item in text]

self.txt_dict[self.num] = text_join

def page(url):

"""根据第一页地址抓取总页数"""

html_page = urllib.urlopen(url).read()

page_pattern = re.compile(r'<a href="S*?">(d*)</a>s*<a href="S*?">下页</a>')

page_result = page_pattern.search(html_page)

if page_result:

page_num = int(page_result.group(1))

return page_num

def write_text(dict, fn):

"""把字典内容按键(页数)写入文本,每个键值为每页内容的list列表"""

tx_file = open(fn, 'w+')

pn = len(dict)

for i in range(1, pn+1):

tx_list = dict[i]

for tx in tx_list:

tx = tx.replace('<br>', 'rn').replace('<br />', 'rn').replace('', '')

tx_file.write(tx.strip()+'rn'*4)

tx_file.close()

def main():

url = 'http://bbs.tianya.cn/post-16-996521-1.shtml'

file_name ='abc.txt'

my_page = page(url)

my_dict = {}

print 'page num is : %s' % my_page

threads = []

"""根据页数构造urls进行多线程下载"""

for num in range(1, my_page+1):

myurl = '%s%s.shtml' % (url[:-7], num)

downlist = Down_Tianya(myurl, num, my_dict)

downlist.start()

threads.append(downlist)

"""检查下载完成后再进行写入"""

for t in threads:

t.join()

write_text(my_dict, file_name)

print 'All download finished. Save file at directory: %s' % os.getcwd()

if __name__ == '__main__':

main()

【python多线程抓取天涯帖子内容示例】相关文章:

python fabric实现远程操作和部署示例

python线程锁(thread)学习示例

Python urlopen 使用小示例

python实现爬虫下载漫画示例

python动态加载变量示例分享

python获取网页状态码示例

python装饰器使用方法实例

python解析文件示例

Python多线程学习资料

python二叉树的实现实例

精品推荐
分类导航