import numpy as np
import matplotlib.pyplot as plt
X = np.linspace(-1.5 * np.pi, 2 * np.pi, 256, endpoint=True)
C = np.cos(X)
fig = plt.figure(figsize=(10,4), dpi=100, tight_layout=True)
ax  = fig.add_subplot(111)
ax.plot(X,   C,   color="blue",   linewidth=2.5,   linestyle="-",
xAxis = ax.get_xaxis()
yAxis = ax.get_yaxis()
xAxis.set_data_interval(X.min()*1.1, X.max()*1.1)
xAxis.set_ticks([-1.5 * np.pi, -np.pi, -np.pi/2, 0, np.pi/2, np.pi,
1.5 * np.pi, 2*np.pi])
xAxis.set_ticklabels([r'$-3/2\pi$', r'$-\pi$', r'$-\pi/2$', r'$0$',
r'$+\pi/2$', r'$+\pi$', r'$+3/2\pi$', r'$+2\pi$'])
yAxis.set_data_interval(C.min()*1.1, C.max()*1.1)
yAxis.set_ticks([-1, +1])
yAxis.set_ticklabels([r'$-1$', r'$+1$'])
ax2  = fig.add_subplot(111)
t = 2*np.pi/3
ax2.plot([t, t], [0, np.cos(t)], color ='red', linewidth=1.5, linestyle=
ax2.scatter([t, ], [np.cos(t), ], 50, color ='blue')
xy=(t, np.cos(t)),  xycoords='data',
xytext=(-90, -50), textcoords='offsetpoints', fontsize=16,
arrowprops=dict(arrowstyle="->", connectionstyle="arc3, rad=.2"))
ax.legend(loc='upperleft', frameon=False)



runfile('/Users/keny/PythonProjects/flaskdemo2/', wdir='/Users/keny/PythonProjects/flaskdemo2')
/Users/keny/PythonProjects/flaskdemo2/ MatplotlibDeprecationWarning: Adding an axes using the same arguments as a previous axes currently reuses the earlier instance.  In a future version, a new instance will always be created and returned.  Meanwhile, this warning can be suppressed, and the future behavior ensured, by passing a unique label to each axes instance.
  ax2  = fig.add_subplot(111)
/Users/keny/PythonProjects/flaskdemo2/ MatplotlibDeprecationWarning: Unrecognized location 'upperleft'. Falling back on 'best'; valid locations are
    upper right
    upper left
    lower left
    lower right
    center left
    center right
    lower center
    upper center
This will raise an exception in 3.3.
  ax.legend(loc='upperleft', frameon=False)
Traceback (most recent call last):
  File "<input>", line 1, in <module>
  File "/Applications/", line 197, in runfile
    pydev_imports.execfile(filename, global_vars, local_vars)  # execute the script
  File "/Applications/", line 18, in execfile
    exec(compile(contents+"\n", file, 'exec'), glob, loc)
  File "/Users/keny/PythonProjects/flaskdemo2/", line 47, in <module>
  File "/Users/keny/PythonProjects/flaskdemo2/venv/lib/python3.7/site-packages/matplotlib/", line 443, in show
  File "/Applications/", line 99, in show
  File "/Applications/", line 64, in show
  File "/Users/keny/PythonProjects/flaskdemo2/venv/lib/python3.7/site-packages/matplotlib/cbook/", line 358, in wrapper
    return func(*args, **kwargs)

    raise ValueError("%s is not a recognized coordinate" % s)
ValueError: offsetpoints is not a recognized coordinate

发布了282 篇原创文章 · 获赞 16 · 访问量 3万+



# 这是深度学习入门这本书里的一段代码,请问这个问题是什么意思以及怎样解决? 报错如下:(下面有源代码)Python 3.7.3 (default, Mar 27 2019, 17:13:21) [MSC v.1915 64 bit (AMD64)] on win32 runfile('E:/PycharmProjects/deep-learning-from-scratch-master/ch03/', wdir='E:/PycharmProjects/deep-learning-from-scratch-master/ch03') Converting train-images-idx3-ubyte.gz to NumPy Array ... Traceback (most recent call last): File "D:\Anaconda3\lib\site-packages\IPython\core\", line 3296, in run_code exec(code_obj, self.user_global_ns, self.user_ns) File "<ipython-input-2-eab209ee1d7f>", line 1, in <module> runfile('E:/PycharmProjects/deep-learning-from-scratch-master/ch03/', wdir='E:/PycharmProjects/deep-learning-from-scratch-master/ch03') File "D:\Program Files\JetBrains\PyCharm 2019.1.1\helpers\pydev\_pydev_bundle\", line 197, in runfile pydev_imports.execfile(filename, global_vars, local_vars) # execute the script File "D:\Program Files\JetBrains\PyCharm 2019.1.1\helpers\pydev\_pydev_imps\", line 18, in execfile exec(compile(contents+"\n", file, 'exec'), glob, loc) File "E:/PycharmProjects/deep-learning-from-scratch-master/ch03/", line 13, in <module> (x_train, t_train), (x_test, t_test) = load_mnist(flatten=True, normalize=False) File "E:\PycharmProjects\deep-learning-from-scratch-master\dataset\", line 106, in load_mnist init_mnist() File "E:\PycharmProjects\deep-learning-from-scratch-master\dataset\", line 76, in init_mnist dataset = _convert_numpy() 源代码为:# coding: utf-8 import sys, os sys.path.append(os.pardir) # 为了导入父目录的文件而进行的设定 import numpy as np from dataset.mnist import load_mnist from PIL import Image def img_show(img): pil_img = Image.fromarray(np.uint8(img)) (x_train, t_train), (x_test, t_test) = load_mnist(flatten=True, normalize=False) img = x_train[0] label = t_train[0] print(label) # 5 print(img.shape) # (784,) img = img.reshape(28, 28) # 把图像的形状变为原来的尺寸 print(img.shape) # (28, 28) img_show(img) # coding: utf-8 try: import urllib.request except ImportError: raise ImportError('You should use Python 3.x') import os.path import gzip import pickle import os import numpy as np url_base = '' key_file = { 'train_img':'train-images-idx3-ubyte.gz', 'train_label':'train-labels-idx1-ubyte.gz', 'test_img':'t10k-images-idx3-ubyte.gz', 'test_label':'t10k-labels-idx1-ubyte.gz' } dataset_dir = os.path.dirname(os.path.abspath(__file__)) save_file = dataset_dir + "/mnist.pkl" train_num = 60000 test_num = 10000 img_dim = (1, 28, 28) img_size = 784 def _download(file_name): file_path = dataset_dir + "/" + file_name if os.path.exists(file_path): return print("Downloading " + file_name + " ... ") urllib.request.urlretrieve(url_base + file_name, file_path) print("Done") def download_mnist(): for v in key_file.values(): _download(v) def _load_label(file_name): file_path = dataset_dir + "/" + file_name print("Converting " + file_name + " to NumPy Array ...") with, 'rb') as f: labels = np.frombuffer(, np.uint8, offset=8) print("Done") return labels def _load_img(file_name): file_path = dataset_dir + "/" + file_name print("Converting " + file_name + " to NumPy Array ...") with, 'rb') as f: data = np.frombuffer(, np.uint8, offset=16) data = data.reshape(-1, img_size) print("Done") return data def _convert_numpy(): dataset = {} dataset['train_img'] = _load_img(key_file['train_img']) dataset['train_label'] = _load_label(key_file['train_label']) dataset['test_img'] = _load_img(key_file['test_img']) dataset['test_label'] = _load_label(key_file['test_label']) return dataset def init_mnist(): download_mnist() dataset = _convert_numpy() print("Creating pickle file ...") with open(save_file, 'wb') as f: pickle.dump(dataset, f, -1) print("Done!") def _change_one_hot_label(X): T = np.zeros((X.size, 10)) for idx, row in enumerate(T): row[X[idx]] = 1 return T def load_mnist(normalize=True, flatten=True, one_hot_label=False): """读入MNIST数据集 Parameters ---------- normalize : 将图像的像素值正规化为0.0~1.0 one_hot_label : one_hot_label为True的情况下,标签作为one-hot数组返回 one-hot数组是指[0,0,1,0,0,0,0,0,0,0]这样的数组 flatten : 是否将图像展开为一维数组 Returns ------- (训练图像, 训练标签), (测试图像, 测试标签) """ if not os.path.exists(save_file): init_mnist() with open(save_file, 'rb') as f: dataset = pickle.load(f) if normalize: for key in ('train_img', 'test_img'): dataset[key] = dataset[key].astype(np.float32) dataset[key] /= 255.0 if one_hot_label: dataset['train_label'] = _change_one_hot_label(dataset['train_label']) dataset['test_label'] = _change_one_hot_label(dataset['test_label']) if not flatten: for key in ('train_img', 'test_img'): dataset[key] = dataset[key].reshape(-1, 1, 28, 28) return (dataset['train_img'], dataset['train_label']), (dataset['test_img'], dataset['test_label']) if __name__ == '__main__': init_mnist() 问答

python爬取豆瓣电影 一直报错 求解决


==== RESTART: C:\Users\123\AppData\Local\Programs\Python\Python36\类的学习.py ==== Traceback (most recent call last): File "C:\Users\123\AppData\Local\Programs\Python\Python36\类的学习.py", line 29, in <module> movies_list=get_review(getHtmlText(url)) File "C:\Users\123\AppData\Local\Programs\Python\Python36\类的学习.py", line 20, in get_review dict['name']=tag_li.find('span','titlt')[0].string TypeError: 'NoneType' object is not subscriptable ——代码如下————————————————————————————— import requests from bs4 import BeautifulSoup import bs4 def getHtmlText(url): try: r = requests.get(url, timeout = 30); r.raise_for_status(); r.encoding = r.apparent_encoding; return r.text; except: return "" def get_review(html): movies_list=[] soup=BeautifulSoup(html,"html.parser") soup=soup.find('ol','grid_view') for tag_li in soup.find_all('li'): dict={} dict['rank']=tag_li.find('em').string dict['name']=tag_li.find('span','titlt')[0].string dict['score']=tag_li.find('span','rating_num').string if(tag_li.find('span','inq')): dict['desc']=tag_li.find('span','inq').string movies_list.append(dict) return movies_list if __name__=='__main__': for i in range(10): url='' %(i*25) movies_list=get_review(getHtmlText(url)) for movie_dict in movies_list: print('电影排名:'+movie_dict['rank']) print('电影名称:'+movie_dict.get('name')) print('电影评分:'+movie_dict.get('score')) print('电影评词:'+movie_dict.get('desc','无评词')) print('------------------------------------------------------') 问答


©️2019 CSDN 皮肤主题: 1024 设计师: 上身试试