I have some code that gives me a list of words with their frequencies that they occur in the text, I'm looking to make it so the code converts the top 10 words automatically into an ARFF with
@RELATION wordfrequencies
@ATTRIBUTE word string @ATTRIBUTE frequency numeric
and the top 10 as data with their frequency.
I'm struggling with how to do this with my current code
import re
import nltk
# Quran subset
filename = 'subsetQuran.txt'
# create l开发者_StackOverflow中文版ist of lower case words
word_list = re.split('\s+', file(filename).read().lower())
print 'Words in text:', len(word_list)
word_list2 = [w.strip() for w in word_list if w.strip() not in nltk.corpus.stopwords.words('english')]
# create dictionary of word:frequency pairs
freq_dic = {}
# punctuation and numbers to be removed
punctuation = re.compile(r'[-.?!,":;()|0-9]')
for word in word_list2:
# remove punctuation marks
word = punctuation.sub("", word)
# form dictionary
try:
freq_dic[word] += 1
except:
freq_dic[word] = 1
print '-'*30
print "sorted by highest frequency first:"
# create list of (val, key) tuple pairs
freq_list2 = [(val, key) for key, val in freq_dic.items()]
# sort by val or frequency
freq_list2.sort(reverse=True)
freq_list3 = list(freq_list2)
# display result
for freq, word in freq_list2:
print word, freq
f = open("wordfreq.txt", "w")
f.write( str(freq_list3) )
f.close()
Any help with this is appreciated, a way of doing this is really racking my brain!
I hope you don't mind the slight rewrite:
import re
import nltk
from collections import defaultdict
# Quran subset
filename = 'subsetQuran.txt'
# create list of lower case words
word_list = open(filename).read().lower().split()
print 'Words in text:', len(word_list)
# remove stopwords
word_list = [w for w in word_list if w not in nltk.corpus.stopwords.words('english')]
# create dictionary of word:frequency pairs
freq_dic = defaultdict(int)
# punctuation and numbers to be removed
punctuation = re.compile(r'[-.?!,":;()|0-9]')
for word in word_list:
# remove punctuation marks
word = punctuation.sub("", word)
# increment count for word
freq_dic[word] += 1
print '-' * 30
print "sorted by highest frequency first:"
# create list of (frequency, word) tuple pairs
freq_list = [(freq, word) for word, freq in freq_dic.items()]
# sort by descending frequency
freq_list.sort(reverse=True)
# display result
for freq, word in freq_list:
print word, freq
# write ARFF file for 10 most common words
f = open("wordfreq.txt", "w")
f.write("@RELATION wordfrequencies\n")
f.write("@ATTRIBUTE word string\n")
f.write("@ATTRIBUTE frequency numeric\n")
f.write("@DATA\n")
for freq, word in freq_list[ : 10]:
f.write("'%s',%d\n" % (word, freq))
f.close()
精彩评论