查看详情

基于python实现多线程分页采集网站段落内容的脚本工具

江西居道科技有限公司主营业务包含网站建设,APP开发,小程序开发,网络推广,SEO优化,网编人员免不了要帮客户进行一些网站维护操作,但是,各行各业特性不同,我们同事每次帮客户维护网站时,都需要获取大量的素材,图片性质的素材倒是好办,直接上百度图片上去找,但是,文字内容就不好弄了。

我们的网络编辑人员以往都是去一些客户同行网站上搜集相关的素材,然后稍加整理,但是,这种方式需要消耗大量的人力,而且没什么技巧可言,纯粹是人工操作;有鉴于此,我们程序开发人员采用python写了一个多线程分页采集网站段落内容的脚本工具,根据设定好的参数自动对指定网站进行采集,提取网站上的段落内容,并将内容保存到本机,现公布相关代码,方便大家使用,转载请注明出处!


#!/usr/bin/python
import json
import os
import requests
import threading
import re
import time
import sys
import colorama

colorama.init(autoreset=True)

#打开文件
with open('config.json','r') as f:
    data = json.load(f)
    f.close()

	
def toInt(num):
	if num !='':
		return int(num)
	else:
		return 0

thead_count = 0    #待结束的进程数
start_ = toInt(data['start'])	#分页起始值
end_ = toInt(data['end'])   #分页结束值
url_ = data['url']   #入口地址
urlinclude = data['urlinclude']   #URL必须包含的字符
urlunclude = data['urlunclude'] #URL不能包含的字符
textinclude = data['textinclude'] #内容中必须包含的内容
textunclude = data['textunclude'] #内容中不能包含的字符
textreplace = data['textreplace'] #需要过滤的字符
textminsize = toInt(data['textminsize']) #有效段落的最少字符数
textmaxsize = toInt(data['textmaxsize']) #有效段落的最大字符数
encoding_ = data['encoding']	#页面编码
starttag = data['starttag']	#内容提取开始字符
endtag = data['endtag']	#内容提取结束字符
sleepTime = toInt(data['sleep'])	#每次请求间隔
jsonkey = data['jsonkey']	#JSON格式数据返回时的字段
headers_ = data['headers'] #request请求主机头参数
todayStr = time.strftime("%Y%m%d",time.localtime())
total = 0

if encoding_=='':
	encoding_ = 'utf-8'
    
#日志保存
def doLog(vstr):
    with open(todayStr + ".log",'a') as fo:
        if vstr !="":
            fo.writelines(time.strftime("%Y-%m-%d %H:%M:%S",time.localtime()) + "\t" + vstr + "\n")
        else:
            fo.writelines(time.strftime("\n"))
            
    fo.close()

def saveText(vstr):
	global total
	if vstr !='':
		#doLog('需要保存的内容长度'+str(len(vstr)))
		#判断不允许包含的内容
		if len(textunclude)>0:
			for tu_ in textunclude:
				if tu_!='':
					if vstr.find(tu_) !=-1:
						#doLog(vstr + "】中存在不允许的字符:" + tu_)
						return ""

		#处理替换内容
		if len(textreplace)>0:
			for vi in textreplace:
				if vi!='':
					vstr = vstr.replace(vi,'')

		print("\033[0;32;40m\t 收集的内容长度:" + str(len(vstr)) + "\t\033[0m ")
		total = total +1
		with open("采集结果.txt",'a') as fo:
			fo.writelines(vstr+"\n")

		fo.close()

def getFromUrl(vurl):
	global thead_count,start_time
	if vurl !='':
		print('即将从' + vurl + '页面获取可用链接')
		#提取主网址
		domain = ""
		if vurl.find('://') !=-1:
			domain = vurl[0:vurl.find('/',vurl.find('://')+4)]
		else:
			domain = vurl[0:vurl.find('/')]

		res = requests.get(vurl,"",headers=headers_ if len(headers_)>0 else {},timeout=3)

		if jsonkey !='':
			_json = res.json()
			if _json[jsonkey] !='':
				htmlStr = _json[jsonkey]
			else:
				print("================== ERROR ===================")

		else:
			htmlStr = res.text

		#print(htmlStr)
		a_href =re.findall('<a.*?href="(.*?)".*?',htmlStr,re.I) #利用正则,提取所有a链接
		a_href = set(a_href) #过渡重复的链接
		for i in a_href:
			urlFlag = 1
			#对链接进行有效性判断,先判断不能包含的字符
			if len(urlunclude)>0:
				for u1 in urlunclude:
					if i.find(u1) !=-1:
						urlFlag = 0
						print("\033[0;31;40m\t" + i + "\t无效\033[0m ")
						break

			#判断必须包含的内容
			if urlFlag>0 and len(urlinclude)>0:
				inFlag = 0
				for u2 in urlinclude:
					if i.find(u2) !=-1:
						inFlag = 1
						break

				if inFlag<1:
					urlFlag=0 #不存在指定内容,视为无效

			if urlFlag:
				#URL有效
				if i[0:1] =='/':
					i = domain + i	#相对目录,补齐路径

				#提取内容
				if sleepTime>0:
					print('延时' + str(sleepTime) + '秒后开始采集')
					time.sleep(sleepTime)

				doLog('开始采集:' + i)
				res2 = requests.get(i,"",headers=headers_ if len(headers_)>0 else {},timeout=3)
				html_ = res2.text
				if html_ !='':
					htmlFlag = 1

					#判断是否包含指定内容
					if len(textinclude)>0:
						if html_.find(textinclude) !=-1:
							htmlFlag = 1
						else:
							htmlFlag = 0

					if htmlFlag<1:
						print(i + "\t不存在特定内容,视为无效!")
					else:
						if starttag!="" or endtag!="":
							_startpos = 0
							_endpos = len(html_)
							if starttag!="":
								_startpos = html_.find(starttag)

							if endtag!="":
								_endpos = html_.find(endtag,_startpos)

							if _startpos>= _endpos:
								_endpos = len(html_)

							#根据标签,提取内容
							html_ = html_[_startpos:_endpos]

						#过滤掉html代码,提取纯中文
						html_ = html_.replace('</p>',"</p>\r\n")	#避免整段HTML代码都没换行
						html_ = re.sub(r'</?\w+[^>]*>','',html_)
						#doLog(i + ':' + html_)
						#对内容进行分割
						tmpArr = html_.split("\r\n")
						for ti in tmpArr:
							ti2 = ti.strip().replace("  "," ")
							if len(ti2)>textminsize and len(ti2)<textmaxsize:
								#doLog(i + ':' + ti)
								saveText(ti2)
							else:
								if len(ti2)>textmaxsize:
									print(i + '的内容长度为:' + str(len(ti2)))
									#内容过长,尝试再次分段
									arr2 = ti2.replace("\r","\n").split("\n")
									for tj in arr2:
										tj2 = tj.strip().replace("  "," ")
										print('当前段落长度为:' + str(len(tj2)))
										if len(tj2)>textminsize and len(tj2)<textmaxsize:
											saveText(tj2)
										#else:
										#	if len(tj2)>textmaxsize:
										#		doLog(i + '-->' + tj2)
								else:
									print('段落不符合设定要求' + str(len(ti2)))

				print(i)

		print(vurl + " 采集完成,退出线程\n")
		if thead_count==1:
			print('任务已完成,共用时:'+str(formatFloat(time.time()-start_time)) + 's')
			print('共计:' + str(total))
			#退出整个程序
			sys.exit()
		else:
			if thead_count>0:
				thead_count -= 1

	else:
		if thead_count>0:
			thead_count -= 1

print("程序成功启动")

if start_<1:
	start_ = 1

if end_<start_:
	end_=start_

thread_list = []
start_time = time.time()
print('江西居道科技有限公司为您提供技术服务,www.juguw.net,转载请注明出处')

if url_.find('[pageindex]') !=-1:
	#循环
	for ui in range(start_,end_+1):
		_url_ = url_.replace('[pageindex]',str(ui))
		myThread = threading.Thread(target=getFromUrl,args=(_url_,))
		thead_count += 1
		thread_list.append(myThread);

	for tl in thread_list:
		tl.start()

		if sleepTime>0:
			print('延时' + str(sleepTime) + '秒后继续')
			time.sleep(sleepTime)
		
		#doLog("启动一个进程");

else:
	getFromUrl(url_)


此外,还需要一个config.json配置文件,用来设定一些参数信息,代码如下:

{"start":1,"end":2,"url":"http://www.juguw.net/articleslist.html","urlinclude":["jsruixi/vip_doc"],"urlunclude":[],"textinclude":"</h1>","textunclude":["___","www.","://"],"textreplace":["南京","1、","2、","3、","4、","5、","6、","7、","8、","9、","①、","①.","②、","②.","③、","③.","④、","④.","⑤、","⑤.","⑥、","⑥.","⑦、","⑦.","⑧、","⑧.","⑨、","⑨.","⑩、","⑩.","⑴、","⑴.","⑵、","⑵.","⑶、","⑶.","⑷、","⑷.","⑸、","⑸.","⑹、","⑹.","⑺、","⑺.","⑻、","⑻.","⑼、","⑼.","⑽、","⑽.","一、","一.","二、","二.","三、","三.","四、","四.","五、","五.","六、","六.","七、","七.","八、","八.","九、","九.","十、","十.","1)、","1).","2)、","2).","3)、","3).","4)、","4).","5)、","5).","6)、","6).","7)、","7).","8)、","8).","①","⑴","1)","②","⑵","2)","③","⑶","3)","④","⑷","4)","⑤","⑸","5)","⑥","⑹","6)","⑦","⑺","7)","⑧","⑻","8)","⑨","⑼","⑩","⑽","(1)","(2)","(3)","(4)","(5)","(6)","(7)","(8)","(9)","(10)"],"textminsize":100,"textmaxsize":300,"encoding":"utf-8","starttag":"</h1>","endtag":"<div class=\"p-details-pre-nex\" id=\"pDetailsPreNext\">","sleep":3,"jsonkey":"","headers":{"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3","Accept - Encoding":"gzip, deflate, br","Accept-Language":"zh-CN,zh;q=0.9","Connection":"Keep-Alive","Host":"www.juguw.net","User-Agent":"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36"}}


相关脚本提供下载,点击下载

运行效果如下图:
基于python实现多线程分页采集网站段落内容的脚本工具运行效果


原创内容,转载请注明出处:网站建设,APP开发,小程序开发请找江西居道科技有限公司,http://www.juguw.net

智能建站系统代理招商
所属分类:文章中心      Time:2020-10-14 01:19:15      人气:686
关闭
13517086454