编程语言
首页 > 编程语言> > 用python实现小说的平均句长,词性占比,关键词,标点符号,词形统计

用python实现小说的平均句长,词性占比,关键词,标点符号,词形统计

作者:互联网

用python实现小说的平均句长,词性占比,关键词,标点符号,词形统计
需求如下

在这里插入图片描述

代码:
词性占比
import jieba
from wordcloud import WordCloud
import re

from PIL import Image

import matplotlib.pyplot as plt

def read_file_gbk(filename):
    with open(filename,'r',encoding='GBK') as f:
        s = f.read()
        s = re.sub('/C', '', s)
        s = re.sub('\r|\n|\s','',s)
    return s
import jieba
import numpy as np


#打开词典文件,返回列表
def open_dict(Dict = 'hahah', path=r''):
    path = path + '%s.txt' % Dict
    dictionary = open(path, 'r', encoding='utf-8')
    dict = []
    for word in dictionary:
        word = word.strip(' ,\n')
        dict.append(word)
    return dict



def judgeodd(num):
    if (num % 2) == 0:
        return 'even'
    else:
        return 'odd'


#注意,这里你要修改path路径。
deny_word = open_dict(Dict = '否定词', path= r'')
posdict = open_dict(Dict = 'positive', path= r'')
negdict = open_dict(Dict = 'negative', path= r'')
degree_word = open_dict(Dict = '程度级别词语', path= r'')
mostdict = degree_word[degree_word.index('extreme')+1 : degree_word.index('very')]#权重4,即在情感词前乘以4
verydict = degree_word[degree_word.index('very')+1 : degree_word.index('more')]#权重3
moredict = degree_word[degree_word.index('more')+1 : degree_word.index('ish')]#权重2
ishdict = degree_word[degree_word.index('ish')+1 : degree_word.index('last')]#权重0.5



def sentiment_score_list(dataset):
    seg_sentence = dataset.split('。|!|?')
    count1 = []
    count2 = []
    for sen in seg_sentence: #循环遍历每一个评论
        segtmp = jieba.lcut(sen, cut_all=False,HMM=False)  #把句子进行分词,以列表的形式返回
        i = 0 #记录扫描到的词的位置
        a = 0 #记录情感词的位置
        poscount = 0 #积极词的第一次分值
        poscount2 = 0 #积极词反转后的分值
        poscount3 = 0 #积极词的最后分值(包括叹号的分值)
        negcount = 0
        negcount2 = 0
        negcount3 = 0
        for word in segtmp:
            poscount = 0
            neg_count = 0
            poscount2 = 0
            neg_count2 = 0
            poscount3 = 0
            neg_count3 = 0
            if word in posdict:  # 判断词语是否是情感词
                poscount += 1
                c = 0
                for w in segtmp[a:i]:  # 扫描情感词前的程度词
                    if w in mostdict:
                        poscount *= 4.0
                    elif w in verydict:
                        poscount *= 3.0
                    elif w in moredict:
                        poscount *= 2.0
                    elif w in ishdict:
                        poscount *= 0.5
                    elif w in deny_word:
                        c += 1
                if judgeodd(c) == 'odd':  # 扫描情感词前的否定词数,如果为奇数:
                    poscount *= -1.0
                    poscount2 += poscount
                    poscount = 0
                    poscount3 = poscount + poscount2 + poscount3
                    poscount2 = 0
                else: # 扫描情感词前的否定词数,如果为偶数:
                    poscount3 = poscount + poscount2 + poscount3
                    poscount = 0
                a = i + 1  # 情感词的位置变化

            elif word in negdict:  # 消极情感的分析,与上面一致
                negcount += 1
                d = 0
                for w in segtmp[a:i]:
                    if w in mostdict:
                        negcount *= 4.0
                    elif w in verydict:
                        negcount *= 3.0
                    elif w in moredict:
                        negcount *= 2.0
                    elif w in ishdict:
                        negcount *= 0.5
                    elif w in deny_word:
                        d += 1
                if judgeodd(d) == 'odd':
                    negcount *= -1.0
                    negcount2 += negcount
                    negcount = 0
                    negcount3 = negcount + negcount2 + negcount3
                    negcount2 = 0
                else:
                    negcount3 = negcount + negcount2 + negcount3
                    negcount = 0
                a = i + 1
            elif word == '!' or word == '!':  ##判断句子是否有感叹号
                for w2 in segtmp[::-1]:  # 扫描感叹号前的情感词,发现后权值+2,然后退出循环
                    if w2 in posdict or negdict:
                        poscount3 += 2
                        negcount3 += 2
                        break
            i += 1 # 扫描词位置前移


            # 以下是防止出现负数的情况
            pos_count = 0
            neg_count = 0
            if poscount3 < 0 and negcount3 > 0:
                neg_count += negcount3 - poscount3
                pos_count = 0
            elif negcount3 < 0 and poscount3 > 0:
                pos_count = poscount3 - negcount3
                neg_count = 0
            elif poscount3 < 0 and negcount3 < 0:
                neg_count = -poscount3
                pos_count = -negcount3
            else:
                pos_count = poscount3
                neg_count = negcount3
            count1.append([pos_count, neg_count])
        count2.append(count1)
        count1 = []
    return count2

def sentiment_score(senti_score_list):
    score = []
    for review in senti_score_list:
        score_array = np.array(review)
        Pos = np.sum(score_array[:, 0])
        Neg = np.sum(score_array[:, 1])
        AvgPos = np.mean(score_array[:, 0])
        AvgPos = float('%.1f'%AvgPos)
        AvgNeg = np.mean(score_array[:, 1])
        AvgNeg = float('%.1f'%AvgNeg)
        StdPos = np.std(score_array[:, 0])
        StdPos = float('%.1f'%StdPos)
        StdNeg = np.std(score_array[:, 1])
        StdNeg = float('%.1f'%StdNeg)
        score.append([Pos, Neg, AvgPos, AvgNeg, StdPos, StdNeg])
    return score


def sentiment_sen(data):
    x = sentiment_score(sentiment_score_list(data))[0][4]
    y = sentiment_score(sentiment_score_list(data))[0][5]
    return x-y

#情感分析
def calculate_motion(text):
    print("emotion analyse start")
    pos=0
    neg=0
    neutral=0
    s = read_file_gbk(text)
    sentences = re.split(r' *[\.\。][\'"\)\]]* *', s)
    sen_list=[]
    for stuff in sentences:
        sen_list.append(stuff)
    print(sen_list.__sizeof__())

    for x in sen_list:
        if len(x)>0:
            if sentiment_sen(x)>0:
                pos=pos+1
            elif sentiment_sen(x)==0:
                neutral=neutral+1
            elif sentiment_sen(x)<0:
                neg=neg+1
    print("positive negative and neutral sentence size is为:{}、{}、{}".format(pos,neg,neutral))

    x_data = ["positive", "negative", "neutral"]
    y_data = [pos,neg,neutral]

    bar_width = 0.3

    plt.rcParams['font.sans-serif'] = ['SimHei']  # 用来正常显示中文标签
    plt.rcParams['axes.unicode_minus'] = False  # 用来正常显示负号
    # 将X轴数据改为使用range(len(x_data), 就是0、1、2...
    plt.bar(x=x_data, height=y_data, label='',
            color='steelblue', alpha=0.8, width=bar_width)
    # 将X轴数据改为使用np.arange(len(x_data))+bar_width,
    # 就是bar_width、1+bar_width、2+bar_width...这样就和第一个柱状图并列了

    # 在柱状图上显示具体数值, ha参数控制水平对齐方式, va控制垂直对齐方式
    for x, y in enumerate(y_data):
        plt.text(x, y + 100, '%s' % y, ha='center', va='bottom')

    # 设置标题
    plt.title("情感计算")
    # 为两条坐标轴设置名称
    plt.xlabel("类型")
    plt.ylabel("数量")
    # 显示图例
    plt.legend()
    plt.show()


calculate_motion('XX.txt')
calculate_motion('YQ.txt')

效果图

在这里插入图片描述

平均句长
import jieba
from wordcloud import WordCloud
import re

from PIL import Image

import matplotlib.pyplot as plt

def read_file_gbk(filename):
    with open(filename,'r',encoding='GBK') as f:
        s = f.read()
        s = re.sub('/C', '', s)
        s = re.sub('\r|\n|\s','',s)
    return s
import jieba
import numpy as np


#统计平均句长
def calculate_avg_length(text):
    size = 0
    num = 0
    s = read_file_gbk(text)
    sentences = re.split(r' *[\.\?!。 ,][\'"\)\]]* *', s)
    for stuff in sentences:
        size = size+stuff.__sizeof__()
        num = num +1
    print("avg_length_num is "+str(size/num))

    # 构建数据
    x_data = ["句子总数","总句数","平均句长"]
    y_data = [size,num,size/num]

    bar_width = 0.3

    plt.rcParams['font.sans-serif'] = ['SimHei']  # 用来正常显示中文标签
    plt.rcParams['axes.unicode_minus'] = False  # 用来正常显示负号
    # 将X轴数据改为使用range(len(x_data), 就是0、1、2...
    plt.bar(x=x_data, height=y_data, label='',
            color='steelblue', alpha=0.8, width=bar_width)
    # 将X轴数据改为使用np.arange(len(x_data))+bar_width,
    # 就是bar_width、1+bar_width、2+bar_width...这样就和第一个柱状图并列了

    # 在柱状图上显示具体数值, ha参数控制水平对齐方式, va控制垂直对齐方式
    for x, y in enumerate(y_data):
        plt.text(x, y + 100, '%s' % y, ha='center', va='bottom')

    # 设置标题
    plt.title("平局句长计算")
    # 为两条坐标轴设置名称
    plt.xlabel("类型")
    plt.ylabel("数量")
    # 显示图例
    plt.legend()
    plt.show()



#计算休闲小说平均句长
calculate_avg_length('XX.txt')
#计算言情小说平均句长
calculate_avg_length('YQ.txt')

效果图

在这里插入图片描述

关键词词云
import jieba
from wordcloud import WordCloud
import re
import  numpy as np
from PIL import Image

import matplotlib.pyplot as plt


#生成词云函数
def generate_wordcloud(text):

    list = []
    text = open(text, 'r', encoding='GBK').read()
    with open('stopword.txt', 'r', encoding='utf-8') as f:
        for line in f:
            list.append(line.strip('\n'))
    # print(list)


    cut_text = jieba.cut(text)
    # print(type(cut_text))
    # print(next(cut_text))
    # print(next(cut_text))
    # 3.以空格拼接起来
    result = " ".join(cut_text)
    image=np.array(Image.open('star.jpg'))

    stopwords = set(list)

    # print(result)
    # 4.生成词云
    wc = WordCloud(
        font_path='simhei.ttf',  # 字体路劲
        background_color='white',  # 背景颜色
        width=1000,
        height=600,
        max_font_size=100,  # 字体大小
        min_font_size=20,
        # mask=plt.imread('xin.jpg'),  #背景图片
        max_words=20,
        font_step=2,
        stopwords=stopwords,  # 设置停用词
        mask= image

    )
    wc.generate(result)
    wc.to_file('result.png')  # 图片保存

    # 5.显示图片
    plt.figure('result')  # 图片显示的名字
    plt.imshow(wc)
    plt.axis('off')  # 关闭坐标
    plt.show()


#生成修仙小说词云
generate_wordcloud('XX.txt')
#生成言情小说词云
generate_wordcloud('YQ.txt')



效果图

在这里插入图片描述

标点符号
import jieba
from wordcloud import WordCloud
import re

from PIL import Image

import matplotlib.pyplot as plt

def read_file_gbk(filename):
    with open(filename,'r',encoding='GBK') as f:
        s = f.read()
        s = re.sub('/C', '', s)
        s = re.sub('\r|\n|\s','',s)
    return s


def calculate_sign(text):

    s = read_file_gbk(text)
    s1 =re.findall('。(.*?)!', s)
    print('! num is '+str(s1.__sizeof__()))

    s2=re.findall('。(.*?)?', s)
    print('? num is ' +str(s2.__sizeof__()))
    x_data = ["感叹号数量", "逗号数量"]
    y_data = [s1.__sizeof__(),s2.__sizeof__()]

    bar_width = 0.3

    plt.rcParams['font.sans-serif'] = ['SimHei']  # 用来正常显示中文标签
    plt.rcParams['axes.unicode_minus'] = False  # 用来正常显示负号
    # 将X轴数据改为使用range(len(x_data), 就是0、1、2...
    plt.bar(x=x_data, height=y_data, label='',
            color='steelblue', alpha=0.8, width=bar_width)
    # 将X轴数据改为使用np.arange(len(x_data))+bar_width,
    # 就是bar_width、1+bar_width、2+bar_width...这样就和第一个柱状图并列了

    # 在柱状图上显示具体数值, ha参数控制水平对齐方式, va控制垂直对齐方式
    for x, y in enumerate(y_data):
        plt.text(x, y + 100, '%s' % y, ha='center', va='bottom')

    # 设置标题
    plt.title("标点符号计算")
    # 为两条坐标轴设置名称
    plt.xlabel("类型")
    plt.ylabel("数量")
    # 显示图例
    plt.legend()
    plt.show()


calculate_sign('XX.txt')
calculate_sign('YQ.txt')

效果图

在这里插入图片描述

词形
import jieba
from wordcloud import WordCloud
import re

from PIL import Image

import matplotlib.pyplot as plt

def read_file_gbk(filename):
    with open(filename,'r',encoding='GBK') as f:
        s = f.read()
        s = re.sub('/C', '', s)
        s = re.sub('\r|\n|\s','',s)
    return s
import jieba
import numpy as np



#统计形状
def calculate_shape(text):
    print("start calculate_shape")
    #  读取文本,输出为长串字符
    s = read_file_gbk(text)
    # 通过标点符合进行切分,同时去掉特殊字符
    sentences = re.split(r' *[\.\?!,。…… —— oo ll 99][\'"\)\]]* *', s)

    SIZE_AA = 0;
    SIZE_AABB = 0;
    SIZE_ABB = 0;
    SIZE_ABAB = 0;
    for stuff in sentences:
        # print(stuff)
        # 原理解析
        # "(.)\1(.)\2"这个正则,
        # .表示除换行外任意字符
        # \1 表示第一个括号里面的字符重复,默认重复一次,想重复4次加{4} 即(.)\1{4}
        # \2 表示第二个括号里面的字符重复
        # 开始匹配AA
        strings = re.finditer(r'(.)\1', stuff)

        # print(type(strings))
        for i in strings:

            SIZE_AA = SIZE_AA + 1
        strings = re.finditer(r'(.)\1(.)\2', stuff)
        for i in strings:

            SIZE_AABB = SIZE_AABB + 1
        strings = re.finditer(r'(.)\1(.)\2', stuff)
        for i in strings:

            SIZE_ABB = SIZE_ABB + 1
        strings = re.finditer(r'(..)\1', stuff)
        for i in strings:

            SIZE_ABAB = SIZE_ABAB + 1
    print("AA shape num is " + str(SIZE_AA))
    print("AABB shape num is " + str(+SIZE_AABB) )
    print("ABB shape num is " + str(SIZE_ABB) )
    print("ABAB shape num is " + str(SIZE_ABAB)  )


    # start draw

    # 构建数据
    x_data = ['AA', 'AABB', 'ABB', 'ABAB']
    y_data = [SIZE_AA, SIZE_AABB, SIZE_ABB, SIZE_ABAB]

    bar_width = 0.3

    plt.rcParams['font.sans-serif'] = ['SimHei']  # 用来正常显示中文标签
    plt.rcParams['axes.unicode_minus'] = False  # 用来正常显示负号
    # 将X轴数据改为使用range(len(x_data), 就是0、1、2...
    plt.bar(x=x_data, height=y_data, label='',
            color='steelblue', alpha=0.8, width=bar_width)
    # 将X轴数据改为使用np.arange(len(x_data))+bar_width,
    # 就是bar_width、1+bar_width、2+bar_width...这样就和第一个柱状图并列了

    # 在柱状图上显示具体数值, ha参数控制水平对齐方式, va控制垂直对齐方式
    for x, y in enumerate(y_data):
        plt.text(x, y + 100, '%s' % y, ha='center', va='bottom')

    # 设置标题
    plt.title("词形分析")
    # 为两条坐标轴设置名称
    plt.xlabel("类型")
    plt.ylabel("数量")
    # 显示图例
    plt.legend()
    plt.show()
    print("end calculate_shape")



#生成修仙小说词形
calculate_shape('XX.txt')
#生成言情小说词形
calculate_shape('YQ.txt')




效果图

在这里插入图片描述

标签:词性,plt,word,词形,python,re,import,data,width
来源: https://blog.csdn.net/qq_42338771/article/details/113060547