结巴分词学习笔记

来源:互联网 发布:mac启动后黑屏 编辑:程序博客网 时间:2024/06/02 12:39

结巴分词学习笔记

转载自1,但是有许多修改和简化

#-*-coding:utf-8-*-# python2.7基于ascii来处理字符流,若字符流不属于ascii范围内,就会抛出异常import sysreload(sys)print "原来的编码方式:",sys.getdefaultencoding();sys.setdefaultencoding('utf-8') # 设置默认编码方式为utf-8print "现在的编码方式:",sys.getdefaultencoding();import jiebaseg_list = jieba.cut("我来到北京航空航天大学", cut_all=True)print("Full Mode: " + "/ ".join(seg_list))  # 全模式seg_list = jieba.cut("我来到北京航空航天大学", cut_all = False)print("Precise Mode: " + "/".join(seg_list))  #精确模式,默认状态下也是精确模式seg_list = jieba.cut("他来到知春路大运村。")print("Default Mode: " + "/".join(seg_list))seg_list = jieba.cut_for_search("小明硕士毕业于北航计算机学院,后到美国斯坦福大学深造。")  #搜索引擎模式print("Search Mode: " + "/".join(seg_list))seg_list = jieba.cut("李小福是创新办主任也是云计算方面的专家。")print("Origin: " + "/".join(seg_list))# "lixiaofu"是一个文本文件,作为字典,一行一个词,共3行,分别为 创新办 云计算 也是jieba.load_userdict("lixiaofu")seg_list = jieba.cut("李小福是创新办主任也是云计算方面的专家。")print("Revise: " + "/".join(seg_list))print("/".join(jieba.cut("如果放到post中将出错。", HMM = False)))#利用调节词频使“中”,“将”都能被分出来jieba.suggest_freq(("中", "将"), tune = True)print("/".join(jieba.cut("如果放到post中将出错。", HMM = False)))Original = "/".join(jieba.cut("江州市长江大桥参加了长江大桥的通车仪式。", HMM = False))print "Original: " + Originaljieba.add_word("江大桥", freq = 20000, tag = None)print "/".join(jieba.cut("江州市长江大桥参加了长江大桥的通车仪式。"))# "shizhang"是一个文本文件,一行一个词,共2行,分别为 市长 江大桥 jieba.load_userdict("shizhang")print "Revise: " + "/".join(jieba.cut("江州市长江大桥参加了长江大桥的通车仪式。", HMM = False))print "--------------------"import jieba.posseg as psegwords = pseg.cut("我爱北京天安门。")for w in words:    print("%s %s" %(w.word, w.flag))print "--------------------"import jieba.analyse as anlf = "此外,公司拟对全资子公司吉林欧亚置业有限公司增资4.3亿元,增资后,吉林欧亚置业注册资本由7000万元增加到5亿元。吉林欧亚置业主要经营范围为房地产开发及百货零售等业务。目前在建吉林欧亚城市商业综合体项目。2013年,实现营业收入0万元,实现净利润-139.13万元。"seg = anl.extract_tags(f, topK = 20, withWeight = True)for tag, weight in seg:    print "%s %s" %(tag, weight)print "--------------------"for x, w in jieba.analyse.textrank(f, topK = 5, withWeight = True):    print("%s %s" % (x, w))print "--------------------"# ##Part 6. Tokenize: 返回词语在原文的起止位置result = jieba.tokenize(u"永和服装饰品有限公司")for tk in result:    print("%s \t start at: %d \t end at: %d" %(tk[0], tk[1], tk[2]))print "--------------------"# ###搜索模式# 把句子中所有的可以成词的词语都扫描出来并确定位置。result = jieba.tokenize(u"永和服装饰品有限公司", mode = "search")for tk in result:    print("%s \t start at: %d \t end at: %d" % (tk[0], tk[1], tk[2]))print "--------------------"# ##Part 1. 词频统计、降序排序# "panjiwen"是网上随便找的一个新闻做的文本文件article = open("panjiwen", "r").read()words = jieba.cut(article, cut_all = False)word_freq = {}for word in words:    if word in word_freq:        word_freq[word] += 1    else:        word_freq[word] = 1freq_word = []for word, freq in word_freq.items():    freq_word.append((word, freq))freq_word.sort(key = lambda x: x[1], reverse = True)max_number = 5      # 需要前多少位高频词?for word, freq in freq_word[: max_number]:    print word, freqprint "--------------------"# 标点符号、虚词、连词不在统计范围内。# "stopwords"也是一个文本文件,格式为每行一个词,jieba会再分词时将忽略这些停用词,可以到网上自己搜索常用的停用词stopwords = open("stopwords", 'rb').read().splitlines()article = open("panjiwen", "r").read()words = jieba.cut(article, cut_all = False)stayed_line = ""for word in words:    if word.encode("utf-8") not in stopwords:        stayed_line += word + " "print stayed_lineprint "--------------------"# ##Part 3. 合并同义词# 将同义词列举出来,按下Tab键分隔,把第一个词作为需要显示的词语,后面的词语作为要替代的同义词,一系列同义词放在一行。# 这里,“北京”、“首都”、“京城”、“北平城”、“故都”为同义词。combine_dict = {}for line in open("tongyici", "r"):    seperate_word = line.strip().split("\t")    num = len(seperate_word)    for i in range(1, num):        combine_dict[seperate_word[i]] = seperate_word[0]jieba.suggest_freq("北平城", tune = True) # 提高“北平城”的词频,使它可以被区分出来seg_list = jieba.cut("北京是中国的首都,京城的景色非常优美,就像当年的北平城,我爱这故都的一草一木。", cut_all = False)f = "/".join(seg_list).encode("utf-8")  # 不用utf-8编码的话,就不能和tongyici文件里的词对应上print ffinal_sentence = ""for word in f.split("/"):    if word in combine_dict:        word = combine_dict[word]        final_sentence += word    else:        final_sentence += wordprint final_sentenceprint "--------------------"# ##Part 4. 词语提及率# 主要步骤:分词——过滤停用词(略)——替代同义词——计算词语在文本中出现的概率。jieba.suggest_freq("晨妈妈", tune = True)jieba.suggest_freq("大黑牛", tune = True)jieba.suggest_freq("能力者", tune = True)seg_list = jieba.cut("李晨好帅,又能力超强,是“大黑牛”,也是一个能力者,还是队里贴心的晨妈妈。", cut_all = False)f = "/".join(seg_list).encode("utf-8")print f# 建立同义词词典,这里的同义词被添加了新的一行“李晨 晨妈妈 大黑牛 能力者”,每个词以tab隔开combine_dict = {}for w in open("tongyici", "r"):    w_1 = w.strip().split("\t")    num = len(w_1)    for i in range(0, num):        combine_dict[w_1[i]] = w_1[0]seg_list_2 = ""for word in f.split("/"):    if word in combine_dict:        word = combine_dict[word]        seg_list_2 += word    else:        seg_list_2 += wordprint seg_list_2print "--------------------"freq_word = {}seg_list_3 = jieba.cut(seg_list_2, cut_all = False)for word in seg_list_3:    if word in freq_word:        freq_word[word] += 1    else:        freq_word[word] = 1freq_word_1 = []for word, freq in freq_word.items():    freq_word_1.append((word, freq))freq_word_1.sort(key = lambda x: x[1], reverse = True)for word, freq in freq_word_1:    print word, freqprint "--------------------"total_freq = 0for i in freq_word_1:    total_freq += i[1]# 求词语在该文本中的出现频率for word, freq in freq_word.items():    freq = float(freq) / float(total_freq)    print word, freqprint "--------------------"# ##Part 5. 按词性提取import jieba.posseg as psegword = pseg.cut("李晨好帅,又能力超强,是“大黑牛”,也是一个能力者,还是队里贴心的晨妈妈。")for w in word:    if w.flag in ["n", "v", "x"]:        print w.word, w.flag

0 0
原创粉丝点击