Code Monkey home page Code Monkey logo

dfcfguba's People

Contributors

alisagou avatar

Stargazers

 avatar  avatar  avatar

Watchers

 avatar

Forkers

lattic

dfcfguba's Issues

gubatiezi_17.12.27

#! /usr/bin/env python

coding = utf-8

Author = Alisa

import requests
from lxml import etree
import pandas as pd
import xlrd
import traceback
import time
from fake_useragent import UserAgent
from pymongo import MongoClient
import socket

def request_get(url, args, **kwargs):
"""将请求链接包一层,可以统一处理异常,应对一些突发状况,比如统一上代理"""
r = requests.get(url, args, **kwargs)
headers = {
'Cookie': '_free_proxy_session=BAh7B0kiD3Nlc3Npb25faWQGOgZFVEkiJTNhYmYzZmE2OTNmNGYzY2U0ZjM3MWJkMTRkYW'
'M3OTY5BjsAVEkiEF9jc3JmX3Rva2VuBjsARkkiMW1JT0hSVTFOV013WUk4TTI0ZU1QMkx2KzFweDN0MlF4N3F4WW51TWJWbTg9B'
'jsARg%3D%3D--28352b3dbf5c190ec3e77677bfe659707af53a62; Hm_lvt_0cf76c77469e965d2957f0553e6ecf59=1514'
'345289; Hm_lpvt_0cf76c77469e965d2957f0553e6ecf59=1514345289',
'If-None-Match': 'W/"046d24145ae6941a3c8238ae60e6f63e"',
'Upgrade-Insecure-Requests': '1',
}
ua = UserAgent() # 设置
headers['User-Agent'] = ua.random # 使用fake-Agent随机生成User-Agent,添加到headers
url = 'http://www.xicidaili.com/nn/'
r = requests.get(url, headers=headers).text
s = etree.HTML(r)
proxy_ip = s.xpath('//
[@id="ip_list"]/tr/td[2]/text()')
proxy_port = s.xpath('//
[@id="ip_list"]/tr/td[3]/text()')

proxies = []
for i in range(min(len(proxy_port), len(proxy_ip))):
    proxy = 'http://' + proxy_ip[i] + ':' + proxy_port[i]
    ipproxy = {'http': proxy}
    proxies.append(ipproxy)

socket.setdefaulttimeout(3)
url = "http://ip.chinaz.com/getip.aspx"
for proxy in proxies:
    try:
        r = requests.get(url, proxies=proxy).text()
        print(r)
    except Exception as e:
        print(proxy)
        print(e)
        continue
return r

class DongFangCaiFuSpider(object):

def get_guba(self):
    """获取所有的股吧链接"""
    headers = {
        'Cookie': 'st_pvi=07289511176421; emstat_bc_emcount=8269995271467659399; em_hq_fls=js; qgqp_b_id=16bc6c0db9'
                  'e5834747e5244ce183dbf2; HAList=a-sz-300059-%u4E1C%u65B9%u8D22%u5BCC%2Ca-sz-000063-%u4E2D%u5174%u901A%u8BA'
                  'F; ct=EQOdBZdiQKgjLDjjLKpPt2J1wu1bik6OjrvrXcEarEDdVz7TOcEJPll5zcUT8vg0_r9jzABole2ATQ69EUop408CQIuiOeCOtqJC'
                  '-Ib4sD3zhLhyJt9DCT1xKLicsPAjKjo7MXiN4QCZqdwQYMokh7zZdcT9qKan947y8JFfzTs; ut=FobyicMgeV52Ad4fCxim_G3WfDRv5X'
                  'tGWFKS1QF9xEiueMg253yd2gIURXeK3Y0Fk7K2V7nUh5pyUMPty8ZcRCwLQr_CZDw6OOoPJo5mDqz7M15gbGPixJXsKgqLOIjEGatLngak'
                  '9iJZy_G3jW5hCH6rQWkrudbvNx2_tRSG6fIowc-Fz8JTgTiTSyARy5VmH1aoD_voTq5S162ZlvfvSlvLQ-1tXqjzMANMEGndJZ5m9W_d6p'
                  'sQ5uMJSv4A2dKYnOzozp3D9ueJj2qKf1C8YInqydKDCf2A; pi=8116065114287126%3bu8116065114287126%3b%e8%82%a1%e5%8f%'
                  '8bKxq4nV%3bEC%2bz9e14Sjom05tMAtFVCoE3cxK0KDhZ6mR8T9dDRyP%2flZvkdobkvQNUset7aIAWYnAfD0qVfNGFMHZHkckIqVNxZRO'
                  'KEecJdddhlqCKsuN3b0FbQOfPiqeGS7Mi3gZlX66vzUzJFi%2bSVlZGTBA85oFp%2bOTu%2f6fLBBpKdjyz4GqDN1ZQq1I1hBnAS5cJ2Aa'
                  '9gdePRe%2br%3biN%2f8t64k98erR5LNCwM2XK%2fsakeQf2bmHbnMnLRTzuS99XbJJR4LtXHCFS7hijcfwVpPV3u0l1LDXsnb5OaVFNFT'
                  '0e0Vefcepetj4m%2bNw20ddTJLWPndGm1l9J%2bwdW7IkZrNusQOn%2f5QGxreCvpZX47rEcrVrg%3d%3d; uidal=8116065114287126'
                  '%e8%82%a1%e5%8f%8bKxq4nV; sid=114124340; vtpst=|; emstat_ss_emcount=5_1513351193_2439751672',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.'
                      '3239.84 Safari/537.36',
        'Upgrade-Insecure-Requests': '1'
    }  # 对应的headers信息

    url = 'http://guba.eastmoney.com/remenba.aspx?type=1'
    r = requests.get(url, headers=headers).text
    s = etree.HTML(r)
    stock_num = s.xpath('/html/body/div[5]/div[2]/div[1]/div/ul/li/a/@href')
    for url in stock_num:
        urls = 'http://guba.eastmoney.com/list,' + url[6:17]
        d = {'url': urls}
        print(d)
        client = MongoClient()
        db = client.dongfangcaifu  # 创建一个dongfangcaifu数据库
        my_set = db.guba_url
        my_set.insert(d)  # 插入数据
        _ = self
    return []

def get_page_num(self):
    """获取页码"""
    for url in urls:  # 遍历每个url
        r = requests.get(url.text)  # 获取url链接
        s = etree.HTML(r)  # 解析网页
        page = s.xpath('//*[@id="articlelistnew"]/div[87]/span/span/span')  # 获得个股股吧页数
        _ = self
    return 5

def list_pag(self, url_format):
    """爬取帖子链接,并抓取详情页"""
    _ = self
    page = self.get_page_num()

    for i in range(page):  # 遍历股吧页数
        ua = UserAgent()  # 设置
        self.headers['User-Agent'] = ua.random  # 使用fake-Agent随机生成User-Agent,添加到headers
        r = request_get(url_format, headers=self.headers).text

        try:
            s = etree.HTML(r)
            urls = s.xpath('//*[@id="articlelistnew"]/div/span[3]/a/@href')

            for url in urls:
                self.detail_pag(url)  # 自动抓取详情页

            print('正在爬取第%s页' % str(i + 1))  # 打印"正在爬取第n页"
            time.sleep(1)  # 设置爬取网页的时间间隔为2秒
        except Exception as e:
            _ = e
            print(url_format, r)
            traceback.print_exc()

headers = {
    'Cookie': 'st_pvi=07289511176421; emstat_bc_emcount=8269995271467659399; em_hq_fls=js; qgqp_b_id=16bc6'
              'c0db9e5834747e5244ce183dbf2; HAList=a-sz-300059-%u4E1C%u65B9%u8D22%u5BCC%2Ca-sz-000063-%u4E2'
              'D%u5174%u901A%u8BAF; st_si=62317377700088; EmPaVCodeCo=47ebe00218714631853a7a63f9928ab7; sid=1'
              '14124340; vtpst=|; ct=ugJ70hWNaio7gPXQTxtTmpLDLh6sdrL8j2rybQIxbDSghfWwbEd_BKHXJriuASTodXFak296K'
              'Orlfkxzrbu-NT-U8hpjvtKu3mXqgcyCqO5MJltGLp76DVw_L9ucdTq3hYzozM9aRJ0auXb4A70zvYuCRYtRVO8mQImUy0s'
              'hj-E; ut=FobyicMgeV7MP9QJfNEgf8-r9TbKvL5aHhpxUzw7ocvpUgxAb33dnUCv07IaWIFM2GVZHu0fHIdMv8s1jhp0lS'
              'WWXjKnSHcQ_Y09rQC1zYUppyOE9wWA5GWXcNRkN0DFEHtiM8VksgjrIAhnWVqslFpiiMOSTgKw9zOeY2sx2OxuZ077q0SGy4'
              'BCLG9jY9Vr4SsRaLXwAbpavWMsSCGITBDXDv516Bf9hMNgvbVb-BL-8KgHQWxGjuw2-77DqJyNrYop94tGwlCVSCacv2rUUjB'
              'o8HyR9qCI; pi=8116065114287126%3bu8116065114287126%3bAlisaGou%3bTIVcVA9AyoQiZGOIe5yV36bl5oaXoirEJ'
              'hVeWh7QdvNf3OVN2%2bVR4FrL3hKFB8V2LzXVwJjzYErPHtx27DTCigSc5OaPon3OvqD%2fMUANcQ2IPRINC7tXrbFotgjh8i'
              'oEsYHrZCw6qmNB65r2%2bfNM7jjpsAW%2fZvB6g8Au5h1F6UiYyzixg65mZz5rUb2MAHKTbbt%2fRVFW%3bTS8P9EuPHU6wSEH'
              'TJNZBTrv%2fy0vhBPzopUd783nknn2rMvcMyjUu7btn2dCve6yKiPMf9c97%2b7LwtYeQcZC%2ffU6iRriJy8ojLxn%2bfejWb'
              'jPNYF4mw06Xmqgp3QYg8WxaFkU7TnozQWGRBkknMHUIM2sHwoyZNQ%3d%3d; uidal=8116065114287126AlisaGou; emstat'
              '_ss_emcount=24_1513377935_3906866301',
    'Upgrade-Insecure-Requests': '1',
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.323'
                  '9.84 Safari/537.36'
}


def detail_pag(self, url):
    """
    详情页抓取函数
    :param url: 详情页url
    :return:
    """

    r = requests.get(url)
    s = etree.HTML(r.text)
    read_vol = s.xpath('//*[@id="zwmbtilr"]/span[1]/text()')
    comment_vol = s.xpath('//*[@id="zwmbtilr"]/span[2]/text()')
    refer_vol = s.xpath('//*[@id="zwmbtilr"]/span[3]/a/span/text()')
    publisher = s.xpath('//*[@id="zwconttbn"]/strong/a/text()')
    age = s.xpath('//*[@id="zwconttbn"]/span/span[2]/text()')
    time_ = s.xpath('//*[@id="zwconttb"]/div[2]/text()')
    title = s.xpath('//*[@id="zwconttbt"]/text()')
    content = s.xpath('//*[@id="zwconbody"]/div/text()')

    file = (read_vol, comment_vol, refer_vol, publisher, age, time_, title, content)

    self.save_data(file)

def save_data(self, data):
    """保存数据函数,自行实现"""
    _ = self
    _ = data
    # df = pd.DataFrame.from_dict(readVol, contentsVol, referVol,publisher,age, time, title, content)
    # df.to_excel('tiezis.xlsx')
    client = MongoClient()
    db = client.dongfangcaifu  # 创建一个dongfangcaifu数据库
    my_set = db.guba_i  # 第i个股吧的集合
    my_set.insert(d)  # 插入数据

if name == 'main':
"""执行入口"""
spider = DongFangCaiFuSpider() # 生产一个爬虫对象
guba_url_format_list = spider.get_guba() # 抓取所有的股吧的样例url

for url_format_ in guba_url_format_list:             # 遍历
    spider.list_pag(url_format_)                     # 抓取列表页, 列表页自动抓取详情页。

guba

import requests
from lxml import etree
import pandas as pd
import xlrd
import time
import traceback
from fake_useragent import UserAgent

headers = {
'Cookie': 'st_pvi=07289511176421; emstat_bc_emcount=8269995271467659399; em_hq_fls=js; st_si=45842755888941; qgqp_b_id=16bc6c0db9e5834747e5244ce183dbf2; HAList=a-sz-300059-%u4E1C%u65B9%u8D22%u5BCC%2Ca-sz-000063-%u4E2D%u5174%u901A%u8BAF; _adsame_fullscreen_14062=1; emstat_ss_emcount=6_1513105790_765519222',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36',
'X-Requested-With': 'MLHttpRequest',
}
HTML = [] # 设置HTML列表,存储股吧中多页帖子链接
def get_data(page): #定义函数get_data,目标是获取多页帖子链接
for i in range(page): # 遍历股吧页数
url = 'http://guba.eastmoney.com/list,600000_{}.html'.format(i*1) # url翻页
ua = UserAgent() #设置
headers['User-Agent'] = ua.random # 使用fake-Agent随机生成User-Agent,添加到headers
r = requests.get(url, headers=headers).text
try:
s = etree.HTML(r)
urls = s.xpath('//*[@id="articlelistnew"]/div/span[3]/a/@href')
HTML.extend(urls) # 添加新的HTML数据
print('正在爬取第%s页' % str(i+1)) # 打印"正在爬取第n页"
time.sleep(1) # 设置爬取网页的时间间隔为1秒
except Exception as e:
_ = e
print(url, r)
traceback.print_exc()

if name =='main':
get_data(1736) # 爬取1736页
df = pd.Series(HTML)
df.to_excel('html.xlsx')

东方财富利用代理爬取

#! /usr/bin/env python

coding = utf-8

Author = Alisa

爬取东方财富通个股股吧帖子

import requests
from lxml import etree
import traceback
import time
from fake_useragent import UserAgent
from pymongo import MongoClient
import pymysql.cursors
import pandas as pd

ip_proxy = list() # 建立IP代理列表

def get_proxy_ip_from_mysql():
"""从MySQL数据库中获得IP代理"""
connection = pymysql.connect(host='192.3.244.150', user='ip_proxy', password='l4771822',
db='ip_proxy', charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor) # 连接数据库
try:
with connection.cursor() as cursor:
sql = "SELECT * from lagou WHERE speed > 0 and speed < 4 and vali_count > 0 ORDER BY
speed"
cursor.execute(sql)
result = cursor.fetchall() # 结果为找寻全部
return result

finally:
    connection.close()

def request_get(url,*args, **kwargs): # 当函数的参数不确定时,可使用 魔法变量:*args当作容纳多个的list或tuple,**kwargs当作容纳多个键值对的dict.
"""将请求链接包一层,可以统一处理异常,应对一些突发状况,统一上代理"""
global ip_proxy # ip_proxy为全局变量,以下程序中的ip_proxy都一致

retrytime = 0
while True:
    try:
        if not ip_proxy:  # 若非ip_proxy
            ip_proxy = get_proxy_ip_from_mysql()  # ip_proxy为get_proxy_ip_from_mysql()函数所得
        proxy = ip_proxy.pop()  # 代理使用一次就减少一个,默认移除列表中的最后一个元素

        proxies = {
            'http': 'http://{ip}:{port}'.format(**proxy),
            'https': 'http://{ip}:{port}'.format(**proxy),
        }

        r = requests.get(url, proxies=proxies, timeout=5, *args, **kwargs)

        if r.status_code == 200:
            return r  # 返回值 r
        else:
            retrytime += 1
            print('url:{} retry_time:{}'.format(url, retrytime))

            if retrytime < 10:
                continue
            elif retrytime > 10:
                break

    except Exception as e:
        _ = e  # 占位,产生变量但未使用
        # traceback.print_exc()
        retrytime += 1
        print('url:{} retry_time:{}'.format(url, retrytime))

        if retrytime < 10:
            continue
        elif retrytime > 10:
            break

class DongFangCaiFuSpider(object):

headers = dict()  # 初始化

def get_guba(self):
    """获取所有的股吧链接"""
    headers = {
        'Cookie': 'st_pvi=07289511176421; emstat_bc_emcount=8269995271467659399; em_hq_fls=js; qgqp_b_id=16bc6c0db9'
                  'e5834747e5244ce183dbf2; HAList=a-sz-300059-%u4E1C%u65B9%u8D22%u5BCC%2Ca-sz-000063-%u4E2D%u5174%u901A%u8BA'
                  'F; ct=EQOdBZdiQKgjLDjjLKpPt2J1wu1bik6OjrvrXcEarEDdVz7TOcEJPll5zcUT8vg0_r9jzABole2ATQ69EUop408CQIuiOeCOtqJC'
                  '-Ib4sD3zhLhyJt9DCT1xKLicsPAjKjo7MXiN4QCZqdwQYMokh7zZdcT9qKan947y8JFfzTs; ut=FobyicMgeV52Ad4fCxim_G3WfDRv5X'
                  'tGWFKS1QF9xEiueMg253yd2gIURXeK3Y0Fk7K2V7nUh5pyUMPty8ZcRCwLQr_CZDw6OOoPJo5mDqz7M15gbGPixJXsKgqLOIjEGatLngak'
                  '9iJZy_G3jW5hCH6rQWkrudbvNx2_tRSG6fIowc-Fz8JTgTiTSyARy5VmH1aoD_voTq5S162ZlvfvSlvLQ-1tXqjzMANMEGndJZ5m9W_d6p'
                  'sQ5uMJSv4A2dKYnOzozp3D9ueJj2qKf1C8YInqydKDCf2A; pi=8116065114287126%3bu8116065114287126%3b%e8%82%a1%e5%8f%'
                  '8bKxq4nV%3bEC%2bz9e14Sjom05tMAtFVCoE3cxK0KDhZ6mR8T9dDRyP%2flZvkdobkvQNUset7aIAWYnAfD0qVfNGFMHZHkckIqVNxZRO'
                  'KEecJdddhlqCKsuN3b0FbQOfPiqeGS7Mi3gZlX66vzUzJFi%2bSVlZGTBA85oFp%2bOTu%2f6fLBBpKdjyz4GqDN1ZQq1I1hBnAS5cJ2Aa'
                  '9gdePRe%2br%3biN%2f8t64k98erR5LNCwM2XK%2fsakeQf2bmHbnMnLRTzuS99XbJJR4LtXHCFS7hijcfwVpPV3u0l1LDXsnb5OaVFNFT'
                  '0e0Vefcepetj4m%2bNw20ddTJLWPndGm1l9J%2bwdW7IkZrNusQOn%2f5QGxreCvpZX47rEcrVrg%3d%3d; uidal=8116065114287126'
                  '%e8%82%a1%e5%8f%8bKxq4nV; sid=114124340; vtpst=|; emstat_ss_emcount=5_1513351193_2439751672',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.'
                      '3239.84 Safari/537.36',
        'Upgrade-Insecure-Requests': '1'
    }  # 对应的headers信息

    url = 'http://guba.eastmoney.com/remenba.aspx?type=1'  # 访问东方财富网股吧首页
    r = requests.get(url, headers=headers).text
    s = etree.HTML(r)
    stock_num = s.xpath('/html/body/div[5]/div[2]/div[1]/div/ul/li/a/@href')

    urls = list()
    client = MongoClient()
    db = client.dongfangcaifu  # 创建一个dongfangcaifu数据库
    my_set = db.guba_url

    for url in stock_num:  # 获取所有证券市场股票对应的东方财富网的股吧链接
        urls_ = url[6:12]
        urls.append(urls_)
        d = {'stock_num': urls_}
        print(d)

        my_set.insert(d)  # 插入股吧链接数据,如http://guba.eastmoney.com/list,600000.html
        _ = self  # 传为self参数
    return urls  # 返回一个urls股吧列表

def list_pag(self, url_format: str, stock_code):
    """爬取帖子链接,并抓取详情页"""
    ua = UserAgent()
    self.headers['User-Agent'] = ua.random  # 使用fake-Agent随机生成User-Agent,添加到headers

    page_num = 1

    while True:
        url = url_format.format(page_num)

        r = request_get(url, headers=self.headers)

        if not r:
            print('数据出错 {}'.format(url))
            page_num += 1
            continue
        r = r.text

        try:
            s = etree.HTML(r)
            stock_name = s.xpath('//*[@id="stockname"]/a/text()')  # 股票名称
            stock_code = stock_code
            tiezi_url = s.xpath('//*[@id="articlelistnew"]/div/span[3]/a/@href')  # 帖子链接

            if not tiezi_url:
                break

            read_vol = s.xpath('//*[@id="articlelistnew"]/div/span[1]/text()')  # 阅读量
            comment_vol = s.xpath('//*[@id="articlelistnew"]/div/span[2]/text()')  # 评论量

            for index, url_ in enumerate(tiezi_url, 1):  # 一个索引类型遍历帖子链接

                if not url_.startswith('/'):
                    continue                                # 跳过本次,break跳过全部

                urls = 'http://guba.eastmoney.com{}'.format(str(url_))  # 帖子链接

                self.detail_pag(urls, stock_name[0], stock_code, read_vol[index], comment_vol[index])  # 自动抓取详情页
                #time.sleep(1)  # 设置爬取网页的时间间隔

        except Exception as e:
            _ = e    # 占位,产生变量但未使用
            print(url_format, r)  # 打印报错信息
            traceback.print_exc()
        page_num += 1



def detail_pag(self, urls, stock_name, stock_code, read_vol, comment_vol):
    """
    详情页抓取函数
    :param url: 详情页url
    :return:
    """
    headers = {
        'Cookie': 'st_pvi=07289511176421; emstat_bc_emcount=8269995271467659399; em_hq_fls=js; qgqp_b_id=16bc6'
                  'c0db9e5834747e5244ce183dbf2; HAList=a-sz-300059-%u4E1C%u65B9%u8D22%u5BCC%2Ca-sz-000063-%u4E2'
                  'D%u5174%u901A%u8BAF; st_si=62317377700088; EmPaVCodeCo=47ebe00218714631853a7a63f9928ab7; sid=1'
                  '14124340; vtpst=|; ct=ugJ70hWNaio7gPXQTxtTmpLDLh6sdrL8j2rybQIxbDSghfWwbEd_BKHXJriuASTodXFak296K'
                  'Orlfkxzrbu-NT-U8hpjvtKu3mXqgcyCqO5MJltGLp76DVw_L9ucdTq3hYzozM9aRJ0auXb4A70zvYuCRYtRVO8mQImUy0s'
                  'hj-E; ut=FobyicMgeV7MP9QJfNEgf8-r9TbKvL5aHhpxUzw7ocvpUgxAb33dnUCv07IaWIFM2GVZHu0fHIdMv8s1jhp0lS'
                  'WWXjKnSHcQ_Y09rQC1zYUppyOE9wWA5GWXcNRkN0DFEHtiM8VksgjrIAhnWVqslFpiiMOSTgKw9zOeY2sx2OxuZ077q0SGy4'
                  'BCLG9jY9Vr4SsRaLXwAbpavWMsSCGITBDXDv516Bf9hMNgvbVb-BL-8KgHQWxGjuw2-77DqJyNrYop94tGwlCVSCacv2rUUjB'
                  'o8HyR9qCI; pi=8116065114287126%3bu8116065114287126%3bAlisaGou%3bTIVcVA9AyoQiZGOIe5yV36bl5oaXoirEJ'
                  'hVeWh7QdvNf3OVN2%2bVR4FrL3hKFB8V2LzXVwJjzYErPHtx27DTCigSc5OaPon3OvqD%2fMUANcQ2IPRINC7tXrbFotgjh8i'
                  'oEsYHrZCw6qmNB65r2%2bfNM7jjpsAW%2fZvB6g8Au5h1F6UiYyzixg65mZz5rUb2MAHKTbbt%2fRVFW%3bTS8P9EuPHU6wSEH'
                  'TJNZBTrv%2fy0vhBPzopUd783nknn2rMvcMyjUu7btn2dCve6yKiPMf9c97%2b7LwtYeQcZC%2ffU6iRriJy8ojLxn%2bfejWb'
                  'jPNYF4mw06Xmqgp3QYg8WxaFkU7TnozQWGRBkknMHUIM2sHwoyZNQ%3d%3d; uidal=8116065114287126AlisaGou; emstat'
                  '_ss_emcount=24_1513377935_3906866301',
        'Upgrade-Insecure-Requests': '1',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.323'
                      '9.84 Safari/537.36'
    }
    r = request_get(url=urls, headers=headers)   # 访问url列表?headers为上面的三个?怎么引入IP代理?

    if r is None:
        print('数据出错{}'.format(urls))
        return
    r = r.text
    s = etree.HTML(r)

    origin_data = dict()
    publisher = ''.join(s.xpath('//*[@id="zwconttbn"]/strong/a/text()'))  # 抓取 作者
    age = ''.join(s.xpath('//*[@id="zwconttbn"]/span/span[2]/text()'))  # 抓取 吧龄
    time_ = ''.join(s.xpath('//*[@id="zwconttb"]/div[2]/text()'))  # 抓取 时间
    title = ''.join(s.xpath('//*[@id="zwconttbt"]/text()'))  # 抓取 标题
    content = ''.join(s.xpath('//*[@id="zwconbody"]/div/text()'))  # 抓取 正文
    print(stock_code, urls)

    origin_data = {'stock_name': stock_name, 'stock_code': stock_code, 'read_vol': read_vol,
                   'comment_vol': comment_vol, 'publisher': publisher, 'age': age, 'time_': time_,
                   'title': title, 'content': content}

    #origin_data = [stock_name, stock_code, read_vol,comment_vol, publisher, age, time_, title, content]
    _ = self

    self.save_data(origin_data)

def save_data(self, data):
    """保存数据函数"""
    client = MongoClient()
    db = client.dongfangcaifu  # 创建一个dongfangcaifu数据库
    my_set = db.guba_2018  # 创建股吧集合
    my_set.insert(data)  #插入股吧详情数据
    # df = pd.DataFrame(origin_data, columns=['股票名称', '股票代码', '阅读量', '评论量','作者','吧龄','发布时间','标题','正文'])
    # df.to_excel('tiezi.xlsx', sheet_name='Sheet1')
    _ = self  # 占位符
    _ = data  # 占位符

if name == 'main':
"""执行入口"""
#request_get('http://guba.eastmoney.com/remenba.aspx?type=1')
spider = DongFangCaiFuSpider() # 生产一个爬虫对象
guba_url_format_list = spider.get_guba() # 抓取所有的股吧的样例url

# http://guba.eastmoney.com/list,603186_{}.html
for stock_code in guba_url_format_list:  # 遍历
    url_format = 'http://guba.eastmoney.com/list,{}_'.format(stock_code)+'{}.html'
    spider.list_pag(url_format, stock_code)  # 抓取列表页, 列表页自动抓取详情页。

gubatiezi

coding = utf-8

Author = Alisa

import requests
from lxml import etree
import pandas as pd
import xlrd
import traceback
import time
from fake_useragent import UserAgent

def request_get(url, *args, **kwargs):
"""将请求链接包一层,可以统一处理异常,应对一些突发状况,比如统一上代理"""
r = requests.get(url, *args, **kwargs)
return r

class DongFangCaiFuSpider(object):

headers = {
    'Cookie': 'st_pvi=07289511176421; emstat_bc_emcount=8269995271467659399; em_hq_fls=js; qgqp_b_id=16bc6'
              'c0db9e5834747e5244ce183dbf2; HAList=a-sz-300059-%u4E1C%u65B9%u8D22%u5BCC%2Ca-sz-000063-%u4E2'
              'D%u5174%u901A%u8BAF; st_si=62317377700088; EmPaVCodeCo=47ebe00218714631853a7a63f9928ab7; sid=1'
              '14124340; vtpst=|; ct=ugJ70hWNaio7gPXQTxtTmpLDLh6sdrL8j2rybQIxbDSghfWwbEd_BKHXJriuASTodXFak296K'
              'Orlfkxzrbu-NT-U8hpjvtKu3mXqgcyCqO5MJltGLp76DVw_L9ucdTq3hYzozM9aRJ0auXb4A70zvYuCRYtRVO8mQImUy0s'
              'hj-E; ut=FobyicMgeV7MP9QJfNEgf8-r9TbKvL5aHhpxUzw7ocvpUgxAb33dnUCv07IaWIFM2GVZHu0fHIdMv8s1jhp0lS'
              'WWXjKnSHcQ_Y09rQC1zYUppyOE9wWA5GWXcNRkN0DFEHtiM8VksgjrIAhnWVqslFpiiMOSTgKw9zOeY2sx2OxuZ077q0SGy4'
              'BCLG9jY9Vr4SsRaLXwAbpavWMsSCGITBDXDv516Bf9hMNgvbVb-BL-8KgHQWxGjuw2-77DqJyNrYop94tGwlCVSCacv2rUUjB'
              'o8HyR9qCI; pi=8116065114287126%3bu8116065114287126%3bAlisaGou%3bTIVcVA9AyoQiZGOIe5yV36bl5oaXoirEJ'
              'hVeWh7QdvNf3OVN2%2bVR4FrL3hKFB8V2LzXVwJjzYErPHtx27DTCigSc5OaPon3OvqD%2fMUANcQ2IPRINC7tXrbFotgjh8i'
              'oEsYHrZCw6qmNB65r2%2bfNM7jjpsAW%2fZvB6g8Au5h1F6UiYyzixg65mZz5rUb2MAHKTbbt%2fRVFW%3bTS8P9EuPHU6wSEH'
              'TJNZBTrv%2fy0vhBPzopUd783nknn2rMvcMyjUu7btn2dCve6yKiPMf9c97%2b7LwtYeQcZC%2ffU6iRriJy8ojLxn%2bfejWb'
              'jPNYF4mw06Xmqgp3QYg8WxaFkU7TnozQWGRBkknMHUIM2sHwoyZNQ%3d%3d; uidal=8116065114287126AlisaGou; emstat'
              '_ss_emcount=24_1513377935_3906866301',
    'Upgrade-Insecure-Requests': '1',
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.323'
                  '9.84 Safari/537.36'
}

def detail_pag(self, url):
    """
    详情页抓取函数
    :param url: 详情页url
    :return:
    """

    r = requests.get(url)
    s = etree.HTML(r.text)
    read_vol = s.xpath('//*[@id="zwmbtilr"]/span[1]/text()')
    comment_vol = s.xpath('//*[@id="zwmbtilr"]/span[2]/text()')
    refer_vol = s.xpath('//*[@id="zwmbtilr"]/span[3]/a/span/text()')
    publisher = s.xpath('//*[@id="zwconttbn"]/strong/a/text()')
    age = s.xpath('//*[@id="zwconttbn"]/span/span[2]/text()')
    time_ = s.xpath('//*[@id="zwconttb"]/div[2]/text()')
    title = s.xpath('//*[@id="zwconttbt"]/text()')
    content = s.xpath('//*[@id="zwconbody"]/div/text()')

    file = (read_vol, comment_vol, refer_vol, publisher, age, time_, title, content)

    self.save_data(file)

def save_data(self, data):
    """保存数据函数,自行实现"""
    _ = self
    _ = data
    # df = pd.DataFrame.from_dict(readVol, contentsVol, referVol,publisher,age, time, title, content)
    # df.to_excel('tiezis.xlsx')

def list_pag(self, url_format):
    """
    """
    _ = self
    page = self.get_page_num()

    for i in range(page):  # 遍历股吧页数
        ua = UserAgent()  # 设置
        self.headers['User-Agent'] = ua.random  # 使用fake-Agent随机生成User-Agent,添加到headers
        r = request_get(url_format, headers=self.headers).text

        try:
            s = etree.HTML(r)
            urls = s.xpath('//*[@id="articlelistnew"]/div/span[3]/a/@href')

            for url in urls:
                self.detail_pag(url)      # 自动抓取详情页

            print('正在爬取第%s页' % str(i + 1))  # 打印"正在爬取第n页"
            time.sleep(1)  # 设置爬取网页的时间间隔为2秒
        except Exception as e:
            _ = e
            print(url_format, r)
            traceback.print_exc()

def get_page_num(self):
    """需要自己实现,获取页码"""
    for url in urls:  # 遍历每个url
        r = requests.get(url.text)  # 获取url链接
    s = etree.HTML(r)  # 解析网页
    page = s.xpath('//*[@id="articlelistnew"]/div[87]/span/span/span')  # 获得个股股吧页数
    _ = self
    return 5

def get_guba(self):
    """获取所有的股吧链接,自行实现"""
    _ = self
    return []

if name == 'main':
"""执行入口"""
spider = DongFangCaiFuSpider() # 生产一个爬虫对象
guba_url_format_list = spider.get_guba() # 抓取所有的股吧的样例url

for url_format_ in guba_url_format_list:             # 遍历
    spider.list_pag(url_format_)                     # 抓取列表页, 列表页自动抓取详情页。

Recommend Projects

  • React photo React

    A declarative, efficient, and flexible JavaScript library for building user interfaces.

  • Vue.js photo Vue.js

    🖖 Vue.js is a progressive, incrementally-adoptable JavaScript framework for building UI on the web.

  • Typescript photo Typescript

    TypeScript is a superset of JavaScript that compiles to clean JavaScript output.

  • TensorFlow photo TensorFlow

    An Open Source Machine Learning Framework for Everyone

  • Django photo Django

    The Web framework for perfectionists with deadlines.

  • D3 photo D3

    Bring data to life with SVG, Canvas and HTML. 📊📈🎉

Recommend Topics

  • javascript

    JavaScript (JS) is a lightweight interpreted programming language with first-class functions.

  • web

    Some thing interesting about web. New door for the world.

  • server

    A server is a program made to process requests and deliver data to clients.

  • Machine learning

    Machine learning is a way of modeling and interpreting data that allows a piece of software to respond intelligently.

  • Game

    Some thing interesting about game, make everyone happy.

Recommend Org

  • Facebook photo Facebook

    We are working to build community through open source technology. NB: members must have two-factor auth.

  • Microsoft photo Microsoft

    Open source projects and samples from Microsoft.

  • Google photo Google

    Google ❤️ Open Source for everyone.

  • D3 photo D3

    Data-Driven Documents codes.