官网案例 doc2vec方法运行出错
案例原址
https://pypi.org/project/word2vec/
https://nbviewer.jupyter.org/github/danielfrg/word2vec/blob/master/examples/doc2vec.ipynb
出错代码-1
id_ = 0
for directory in directories:
rootdir = os.path.join('./aclImdb', directory)
for subdir, dirs, files in os.walk(rootdir):
for file_ in files:
with open(os.path.join(subdir, file_), 'r')as f:
doc_id = '_*%i' % id_
id_ = id_ + 1
text = f.read()
text = text
tokens = nltk.word_tokenize(text)
doc = " ".join(tokens).lower()
doc = doc.encode('ascii', 'ignore')
input_file.write("%s %s\n" %(doc_id, doc))
LookupError Traceback (most recent call last)
<ipython-input-6-d487135ac3c4> in <module>()
10 text = f.read()
11 text = text
---> 12 tokens = nltk.word_tokenize(text)
13 doc = " ".join(tokens).lower()
14 doc = doc.encode('ascii', 'ignore')
D:\Anaconda\lib\site-packages\nltk\tokenize\__init__.py in word_tokenize(text, language, preserve_line)
128 :type preserver_line: bool
129 """
--> 130 sentences = [text] if preserve_line else sent_tokenize(text, language)
131 return [token for sent in sentences
132 for token in _treebank_word_tokenizer.tokenize(sent)]
D:\Anaconda\lib\site-packages\nltk\tokenize\__init__.py in sent_tokenize(text, language)
94 :param language: the model name in the Punkt corpus
95 """
---> 96 tokenizer = load('tokenizers/punkt/{0}.pickle'.format(language))
97 return tokenizer.tokenize(text)
98
D:\Anaconda\lib\site-packages\nltk\data.py in load(resource_url, format, cache, verbose, logic_parser, fstruct_reader, encoding)
812
813 # Load the resource.
--> 814 opened_resource = _open(resource_url)
815
816 if format == 'raw':
D:\Anaconda\lib\site-packages\nltk\data.py in _open(resource_url)
930
931 if protocol is None or protocol.lower() == 'nltk':
--> 932 return find(path_, path + ['']).open()
933 elif protocol.lower() == 'file':
934 # urllib might not use mode='rb', so handle this one ourselves:
D:\Anaconda\lib\site-packages\nltk\data.py in find(resource_name, paths)
651 sep = '*' * 70
652 resource_not_found = '\n%s\n%s\n%s' % (sep, msg, sep)
--> 653 raise LookupError(resource_not_found)
654
655
LookupError:
**********************************************************************
Resource 'tokenizers/punkt/english.pickle' not found. Please
use the NLTK Downloader to obtain the resource: >>>
nltk.download()
Searched in:
- 'C:\\Users\\v_dilldi/nltk_data'
- 'C:\\nltk_data'
- 'D:\\nltk_data'
- 'E:\\nltk_data'
- 'D:\\Anaconda\\nltk_data'
- 'D:\\Anaconda\\lib\\nltk_data'
- 'C:\\Users\\v_dilldi\\AppData\\Roaming\\nltk_data'
- ''
**********************************************************************
解决问题-1
解决问题网址
https://www.douban.com/note/534906136/
nltk.download('punkt')
[nltk_data] Downloading package punkt to
[nltk_data] C:\Users\v_dilldi\AppData\Roaming\nltk_data...
[nltk_data] Unzipping tokenizers\punkt.zip.
True
出错代码-2
word2vec.doc2vec('/Users/drodriguez/Downloads/alldata.txt', '/Users/drodriguez/Downloads/vectors.bin', cbow=0, size=100, window=10, negative=5, hs=0, sample='1e-4', threads=12, iter_=20, min_count=1, verbose=True)
FileNotFoundError Traceback (most recent call last)
<ipython-input-18-842ffe598963> in <module>()
----> 1 word2vec.doc2vec('alldata.txt', 'vectors.bin', cbow=0, size=100, window=10, negative=5, hs=0, sample='1e-4', threads=12, iter_=20, min_count=1, verbose=True)
D:\Anaconda\lib\site-packages\word2vec\scripts_interface.py in doc2vec(train, output, size, window, sample, hs, negative, threads, iter_, min_count, alpha, debug, binary, cbow, save_vocab, read_vocab, verbose)
262 command.append("1")
263
--> 264 run_cmd(command, verbose=verbose)
265
266
D:\Anaconda\lib\site-packages\word2vec\scripts_interface.py in run_cmd(command, verbose)
266
267 def run_cmd(command, verbose=False):
--> 268 proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
269
270 if verbose:
D:\Anaconda\lib\subprocess.py in __init__(self, args, bufsize, executable, stdin, stdout, stderr, preexec_fn, close_fds, shell, cwd, env, universal_newlines, startupinfo, creationflags, restore_signals, start_new_session, pass_fds, encoding, errors)
705 c2pread, c2pwrite,
706 errread, errwrite,
--> 707 restore_signals, start_new_session)
708 except:
709 # Cleanup if the child failed starting.
D:\Anaconda\lib\subprocess.py in _execute_child(self, args, executable, preexec_fn, close_fds, pass_fds, cwd, env, startupinfo, creationflags, shell, p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite, unused_restore_signals, unused_start_new_session)
990 env,
991 os.fspath(cwd) if cwd is not None else None,
--> 992 startupinfo)
993 finally:
994 # Child is launched. Close the parent's copy of those pipe
FileNotFoundError: [WinError 2] 系统找不到指定的文件。
解决问题-2
安装less.js https://fdream.net/blog/article/783.aspx 未解决问题
解决网址
https://blog.csdn.net/u014094184/article/details/80085336 在lib文件夹中找到subprocess.py
1、搜索class Popen(object):
2、将init中的shell=False修改为shell=True
再运行问题解决。
出现了新的问题 未解决
Exception Traceback (most recent call last)
<ipython-input-12-0ba485e5bf89> in <module>()
----> 1 word2vec.doc2vec('D:\practice\Word2Vec\alldata.txt', 'D:\practice\Word2Vec\vectors.bin', cbow=0, size=100, window=10, negative=5, hs=0, sample='1e-4', threads=1, iter_=20, min_count=1, verbose=True)
D:\Anaconda\lib\site-packages\word2vec\scripts_interface.py in doc2vec(train, output, size, window, sample, hs, negative, threads, iter_, min_count, alpha, debug, binary, cbow, save_vocab, read_vocab, verbose)
262 command.append("1")
263
--> 264 run_cmd(command, verbose=verbose)
265
266
D:\Anaconda\lib\site-packages\word2vec\scripts_interface.py in run_cmd(command, verbose)
278
279 if proc.returncode != 0:
--> 280 raise Exception("The training could not be completed.")
281
282 out, err = proc.communicate()
Exception: The training could not be completed.