创新研修课——TF-IDF法查找歌词关键词

from nltk.corpus import stopwords

from collections import Counter

import re
from wordcloud import wordcloud

import numpy as np

import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, TfidfTransformer
import math
import os
from sklearn.feature_extraction.text import TfidfTransformer

from sklearn.feature_extraction.text import CountVectorizer

from nltk.tokenize import sent_tokenize

from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from matplotlib import pyplot as plt
import fileinput

from nltk.probability import FreqDist

from nltk.corpus import stopwords

from nltk.stem import PorterStemmer
import csv

songlistsFile='songlyrics.pic'
tf_idfFile='tf_idf.pic'
DATAPATH='./songlyreics'
DATANAME='songdata.csv'
filename='F://songdata.csv'
STOPWORDS=['i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you', "you're", "you've", "you'll", "you'd", 'your', 'yours', 'yourself', 'yourselves', 'he', 'him', 'his', 'himself', 'she', "she's", 'her', 'hers', 'herself', 'it', "it's", 'its', 'itself', 'they', 'them', 'their', 'theirs', 'themselves', 'what', 'which', 'who', 'whom', 'this', 'that', "that'll", 'these', 'those', 'am', 'is', 'are', 'was', 'were', 'be', 'been', 'being', 'have', 'has', 'had', 'having', 'do', 'does', 'did', 'doing', 'a', 'an', 'the', 'and', 'but', 'if', 'or', 'because', 'as', 'until', 'while', 'of', 'at', 'by', 'for', 'with', 'about', 'against', 'between', 'into', 'through', 'during', 'before', 'after', 'above', 'below', 'to', 'from', 'up', 'down', 'in', 'out', 'on', 'off', 'over', 'under', 'again', 'further', 'then', 'once', 'here', 'there', 'when', 'where', 'why', 'how', 'all', 'any', 'both', 'each', 'few', 'more', 'most', 'other', 'some', 'such', 'no', 'nor', 'not', 'only', 'own', 'same', 'so', 'than', 'too', 'very', 's', 't', 'can', 'will', 'just', 'don', "don't", 'should', "should've", 'now', 'd', 'll', 'm', 'o', 're', 've', 'y', 'ain', 'aren', "aren't", 'couldn', "couldn't", 'didn', "didn't", 'doesn', "doesn't", 'hadn', "hadn't", 'hasn', "hasn't", 'haven', "haven't", 'isn', "isn't", 'ma', 'mightn', "mightn't", 'mustn', "mustn't", 'needn', "needn't", 'shan', "shan't", 'shouldn', "shouldn't", 'wasn', "wasn't", 'weren', "weren't", 'won', "won't", 'wouldn', "wouldn't"];

words=stopwords.words('english');

print(words);
string='to be and not to be be and that'
class lyrics():
    def __init__(self,artist,song,link,text):
     self.artist=artist;
     self.song=song;
     self.link=link;
     self.text=text;
def Reader():
        Tot_List = []
        All = []
        Song_Num = -1
        with open(filename, 'r') as csv_file:
            reader = csv.reader(csv_file)
            for data in reader:
                All.append(data)
        for song in All:
            Song_Num = Song_Num + 1
            Tot_List.append(lyrics(song[0], song[1], song[2], song[3]))
        print("%d songs in total." % (Song_Num))
        return Tot_List
def fenci(targ):
        lemmatizer=WordNetLemmatizer()
        pattern=r'[a-zA-Z]+'
        targ=re.findall(pattern,targ)
        i=0
        while(i<len(targ)):
            targ[i]=targ[i].lower()
            targ[i]=targ[i]=lemmatizer.lemmatize(targ[i],pos='v')
            if targ[i] in STOPWORDS or len(targ[i])==1:
                targ.pop(i)
            else:
                i=i+1
        return targ
def extract(text,stoplist):
        tfidf_vec = TfidfVectorizer(stop_words= stoplist)
        tfidf_matrix = tfidf_vec.fit_transform(text)#传入句子组成的list
        weight = tfidf_matrix.toarray()#转为TF_IDF的Array
        word=tfidf_vec.get_feature_names()#获取词袋模型中的所有词语
        words = []
        weights = []
    #weight=tfidf.toarray()#将tf-idf矩阵抽取出来,元素a[i][j]表示j词在i类文本中的tf-idf权重
        for i in range(len(weight)):#打印每类文本的tf-idf词语权重,第一个for遍历所有文本,第二个for便利某一类文本下的词语权重
          for j in range(len(word)):
            if(weight[i][j] != 0):
                if word[j] not in words:
                    words.append(word[j])
                    weights.append(weight[i][j])
        sorting = list(zip(words,weights))
    #print(sorting)
        sorting = sorted(sorting, key=lambda x:x[1], reverse=True)
        return sorting

SongList = Reader()
for lyrics in SongList:
    song =lyrics. song
    link = lyrics.link
    text = lyrics.text
    artist =lyrics.artist

    if artist != 'artist':
        text = fenci(text)
    #print(text)
        sorting =extract(text,words)


`

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值