-
Notifications
You must be signed in to change notification settings - Fork 5
/
Copy pathnighteen_cpc.py
62 lines (45 loc) · 2.06 KB
/
nighteen_cpc.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# author: toddler
import jieba
import re
import os
from collections import Counter
from wordcloud import WordCloud
import matplotlib.pyplot as plt
def cut_analyze(input_file):
"""
:param input_file: 输入带切词分析的文本路径
:return: (list1, list2) list1切词处理后的列表结果, list2输出切词处理排序后的词频结果, 列表-元祖嵌套结果
"""
cpc_dict_path = u'user_dict/cpc_dictionary.txt'
stop_words_path = u'user_dict/stopword.txt'
with open(input_file) as f:
content = f.read()
with open(stop_words_path) as sf:
st_content = sf.readlines()
jieba.load_userdict(cpc_dict_path) # 加载针对全国人民代表大会的分词词典
stop_words = [line.strip().decode('utf-8') for line in st_content] # 将读取的数据都转为unicode处理
seg_list = jieba.cut(content, cut_all=False) # 精确模式
filter_seg_list = list()
for seg in seg_list:
goal_word = ''.join(re.findall(u'[\u4e00-\u9fa5]+', seg)).strip() # 过滤所有非中文字符内容
if len(goal_word) != 0 and not stop_words.__contains__(goal_word): # 过滤分词结果中的停词内容
# filter_seg_list.append(goal_word.encode('utf-8')) # 将unicode的文本转为utf-8保存到列表以备后续处理
filter_seg_list.append(goal_word)
seg_counter_all = Counter(filter_seg_list).most_common() # 对切词结果按照词频排序
# for item in seg_counter_all:
# print "词语: {0} - 频数: {1}".format(item[0].encode('utf-8'), item[1])
return filter_seg_list, seg_counter_all
def main():
input_file_path = u'input_file/nighteen-cpc.txt'
cut_data, sort_data = cut_analyze(input_file=input_file_path)
font = os.path.abspath('assets/msyh.ttf')
wc = WordCloud(collocations=False, font_path=font, width=3600, height=3600, margin=2)
wc.generate_from_frequencies(dict(sort_data))
plt.figure()
plt.imshow(wc)
plt.axis('off')
plt.show()
if __name__ == '__main__':
main()