python库分析科比生涯数据
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import KFold
filename= "data.csv"
raw = pd.read_csv(filename)
print (raw.shape)
raw.head()
kobe = raw[pd.notnull(raw['shot_made_flag'])]
print (kobe.shape)
(25697, 25)
alpha = 0.02
plt.figure(figsize=(10,10))
plt.subplot(121)
plt.scatter(kobe.loc_x, kobe.loc_y, color='R', alpha=alpha)
plt.title('loc_x and loc_y')
plt.subplot(122)
plt.scatter(kobe.lon, kobe.lat, color='B', alpha=alpha)
plt.title('lat and lon')
raw['dist'] = np.sqrt(raw['loc_x']**2 + raw['loc_y']**2)
loc_x_zero = raw['loc_x'] == 0
raw['angle'] = np.array([0]*len(raw))
raw['angle'][~loc_x_zero] = np.arctan(raw['loc_y'][~loc_x_zero] / raw['loc_x'][~loc_x_zero])
raw['angle'][loc_x_zero] = np.pi / 2
raw['remaining_time'] = raw['minutes_remaining'] * 60 + raw['seconds_remaining']
print(kobe.action_type.unique())
print(kobe.combined_shot_type.unique())
print(kobe.shot_type.unique())
print(kobe.shot_type.value_counts())
kobe['season'].unique()
array(['2000-01', '2001-02', '2002-03', '2003-04', '2004-05', '2005-06',
'2006-07', '2007-08', '2008-09', '2009-10', '2010-11', '2011-12',
'2012-13', '2013-14', '2014-15', '2015-16', '1996-97', '1997-98',
'1998-99', '1999-00'], dtype=object)
raw['season'] = raw['season'].apply(lambda x: int(x.split('-')[1]) )
raw['season'].unique()
array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 97,
98, 99, 0], dtype=int64)
print(kobe['team_id'].unique())
print(kobe['team_name'].unique())
[1610612747]
['Los Angeles Lakers']
pd.DataFrame({'matchup':kobe.matchup, 'opponent':kobe.opponent})
plt.figure(figsize=(5,5))
plt.scatter(raw.dist, raw.shot_distance, color='blue')
plt.title('dist and shot_distance')
gs = kobe.groupby('shot_zone_area')
print (kobe['shot_zone_area'].value_counts())
print (len(gs))
Center© 11289
Right Side Center(RC) 3981
Right Side® 3859
Left Side Center(LC) 3364
Left Side(L) 3132
Back Court(BC) 72
Name: shot_zone_area, dtype: int64
6
import matplotlib.cm as cm
plt.figure(figsize=(20,10))
def scatter_plot_by_category(feat):
alpha = 0.1
gs = kobe.groupby(feat)
cs = cm.rainbow(np.linspace(0, 1, len(gs)))
for g, c in zip(gs, cs):
plt.scatter(g[1].loc_x, g[1].loc_y, color=c, alpha=alpha)
plt.subplot(131)
scatter_plot_by_category('shot_zone_area')
plt.title('shot_zone_area')
plt.subplot(132)
scatter_plot_by_category('shot_zone_basic')
plt.title('shot_zone_basic')
plt.subplot(133)
scatter_plot_by_category('shot_zone_range')
plt.title('shot_zone_range')
drops = ['shot_id', 'team_id', 'team_name', 'shot_zone_area', 'shot_zone_range', 'shot_zone_basic', \
'matchup', 'lon', 'lat', 'seconds_remaining', 'minutes_remaining', \
'shot_distance', 'loc_x', 'loc_y', 'game_event_id', 'game_id', 'game_date']
for drop in drops:
raw = raw.drop(drop, 1)
print (raw['combined_shot_type'].value_counts())
pd.get_dummies(raw['combined_shot_type'], prefix='combined_shot_type')[0:2]
categorical_vars = ['action_type', 'combined_shot_type', 'shot_type', 'opponent', 'period', 'season']
for var in categorical_vars:
raw = pd.concat([raw, pd.get_dummies(raw[var], prefix=var)], 1)
raw = raw.drop(var, 1)
train_kobe = raw[pd.notnull(raw['shot_made_flag'])]
train_label = train_kobe['shot_made_flag']
train_kobe = train_kobe.drop('shot_made_flag', 1)
test_kobe = raw[pd.isnull(raw['shot_made_flag'])]
test_kobe = test_kobe.drop('shot_made_flag', 1)
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import confusion_matrix,log_loss
import time
import numpy as np
range_m = np.logspace(0,2,num=5).astype(int)
range_m
array([ 1, 3, 10, 31, 100])
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import KFold
print('Finding best n_estimators for RandomForestClassifier...')
min_score = 100000
best_n = 0
scores_n = []
range_n = np.logspace(0,2,num=3).astype(int)
for n in range_n:
print("the number of trees : {0}".format(n))
t1 = time.time()
rfc_score = 0.
rfc = RandomForestClassifier(n_estimators=n)
for train_k, test_k in KFold(10, shuffle=True).split(train_kobe):
rfc.fit(train_kobe.iloc[train_k], train_label.iloc[train_k])
pred = rfc.predict(train_kobe.iloc[test_k])
rfc_score += log_loss(train_label.iloc[test_k], pred) / 10
scores_n.append(rfc_score)
if rfc_score < min_score:
min_score = rfc_score
best_n = n
t2 = time.time()
print('Done processing {0} trees ({1:.3f}sec)'.format(n, t2-t1))
print(best_n, min_score)
print('Finding best max_depth for RandomForestClassifier...')
min_score = 100000
best_m = 0
scores_m = []
range_m = np.logspace(0,2,num=3).astype(int)
for m in range_m:
print("the max depth : {0}".format(m))
t1 = time.time()
rfc_score = 0.
rfc = RandomForestClassifier(max_depth=m, n_estimators=best_n)
for train_k, test_k in KFold(10, shuffle=True).split(train_kobe):
rfc.fit(train_kobe.iloc[train_k], train_label.iloc[train_k])
pred = rfc.predict(train_kobe.iloc[test_k])
rfc_score += log_loss(train_label.iloc[test_k], pred) / 10
scores_m.append(rfc_score)
if rfc_score < min_score:
min_score = rfc_score
best_m = m
t2 = time.time()
print('Done processing {0} trees ({1:.3f}sec)'.format(m, t2-t1))
print(best_m, min_score)
Finding best n_estimators for RandomForestClassifier...
the number of trees : 1
Done processing 1 trees (0.973sec)
the number of trees : 10
Done processing 10 trees (5.755sec)
the number of trees : 100
Done processing 100 trees (51.947sec)
100 11.914000011353393
Finding best max_depth for RandomForestClassifier...
the max depth : 1
Done processing 1 trees (4.065sec)
the max depth : 10
Done processing 10 trees (16.296sec)
the max depth : 100
Done processing 100 trees (49.635sec)
10 11.040351953558947
plt.figure(figsize=(10,5))
plt.subplot(121)
plt.plot(range_n, scores_n)
plt.ylabel('score')
plt.xlabel('number of trees')
plt.subplot(122)
plt.plot(range_m, scores_m)
plt.ylabel('score')
plt.xlabel('max depth')
model = RandomForestClassifier(n_estimators=best_n, max_depth=best_m)
model.fit(train_kobe, train_label)
报错问题解决
for train_k, test_k in KFold(len(train_kobe), n_folds=10, shuffle=True):会报很多错
需要改为for train_k, test_k in KFold(10, shuffle=True).split(train_kobe):
Original: https://blog.csdn.net/qq_43966129/article/details/122799128
Author: 最白の白菜
Title: 实战项目-python库分析科比生涯数据
相关阅读
Title: Ubuntu anaconda换源(最新最全亲测)
文章目录
- 前言
- 一、anaconda换源(适用linux和window)
*
-
+ - 二、anaconda删除源
*
-
+ - 三、anaconda还原默认源
- 总结
前言
其他的热门教程时间跳过久远,部分链接已经失效。
下面就是换源的全部流程。本文写于 2021.07.15
保证时效性,亲测可用。
提示:以下是本篇文章正文内容,下面案例可供参考
一、anaconda换源(适用linux和window)
换源的原因:anacond默认的源下载python包太慢,时间太长。所以需要更换国内的源。
方法一:命令行换源
终端输入以下命令回车:注:以添加清华源为例
conda config --add channels https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/free/
conda config --add channels https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/main
conda config --add channels https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/r
conda config --add channels https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/msys2
conda config --add channels https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/conda-forge
conda config --add channels https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/msys2
conda config --add channels https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/bioconda
conda config --add channels https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/menpo
conda config --add channels https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/pytorch
conda config --add channels https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/simpleitk
检查是否换源成功!!!终端输入以下命令:
conda config --set show_channel_urls yes
conda info
注:可以看到此时 channel URLs 已经全部更换为清华源。
如果需要更换中科大源可使用以下命令:
conda config --add channels https://mirrors.ustc.edu.cn/anaconda/pkgs/main/
conda config --add channels https://mirrors.ustc.edu.cn/anaconda/pkgs/free/
conda config --add channels https://mirrors.ustc.edu.cn/anaconda/cloud/conda-forge/
conda config --add channels https://mirrors.ustc.edu.cn/anaconda/cloud/msys2/
conda config --add channels https://mirrors.ustc.edu.cn/anaconda/cloud/bioconda/
conda config --add channels https://mirrors.ustc.edu.cn/anaconda/cloud/menpo/
conda config --add channels https://mirrors.ustc.edu.cn/anaconda/cloud/pytorch/
检查步骤同上
最后 建议删除defaut的默认源(推荐) 删除方法见下方。[不删除的话,下载python包的速度有可能仍然会很慢]
注:使用conda info 可以看到anaconda的默认源 仍然存在,但是排放在下面。
方法二:可视化界面换源(推荐新手)
终端输入 anaconda-navigator
启动可视化界面 轻松完成channel源的添加,删除,检查。 在换国内源的时候,将网址一条条输入 后 update即可。
; 二、anaconda删除源
方法一:命令行删除
在终端输入如下命令:注意:以删除清华源为例,在命令后面加上相应的URL。逐一删除
[En]
Enter the following command at the terminal: note: take deleting Tsinghua source as an example, put the corresponding URL after the command. Delete one by one
conda config --remove channels https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/conda-forge/
方法二:可视化界面删除(推荐新手)
这个很简单,没啥好说的。上面有提及,删除对应网址即可。
三、anaconda还原默认源
注:常用语重置下载源,省去逐个删除URLs的时间
终端输入以下命令: conda config --remove-key channels
总结
提示:以上就是换源的全部流程。本文写于 2021.07.15
保证时效性,亲测可用。
anaconda常用命令
anaconda配置Pytorch环境
anaconda配置Tensorflow环境
如果这个图文教程有帮助到你的话,能点个免费的赞吗n(≧▽≦)n。其他问题也请留言在评论区哦,博主看到会回复的。O(∩_∩)O~
如果想要安装配置特殊环境如pytorch或者TensorFlow等或者可以学习anaconda的使用,后续可以查阅博主的其他博客。
Original: https://blog.csdn.net/KIK9973/article/details/118776314
Author: 向日葵骑士Faraday
Title: Ubuntu anaconda换源(最新最全亲测)

解决ERROR: Could not find a version that satisfies the requirement 问题及 ERROR: Exception:Traceback

什么是光流法

BERT-TensorFlow预处理create_pretraining_data太慢的解决方法

opencv学习(9):cv::Scalar、cv::Mat::zeros

CMUSphinx免费离线语音识别开源库教程iOS开发
![[报错]-RuntimeError: Input type (torch.cuda.HalfTensor) and weight type (torch.cuda.FloatTensor) should be the same](https://www.itcode1024.com/wp-content/themes/begin/prune.php?src=https://www.itcode1024.com/wp-content/themes/begin/img/loading.png&w=280&h=210&a=&zc=1)
[报错]-RuntimeError: Input type (torch.cuda.HalfTensor) and weight type (torch.cuda.FloatTensor) should be the same

Android 语音播报 文字转语音

新鲜出炉的 NLP 算法岗社招面试经验分享

短视频平台上亿用户都听过的声音|盘点三款超级好用的配音工具(内附教程哦~)

EMNLP 2021事件抽取相关论文汇总

huggingface.transformers速成笔记:Pipeline推理和AutoClass

mip-NeRF代码debug

深度学习系列教程——Tensorflow下载与安装(懂的分享,才是真正的互联网人)

Tensorflow2.x:tensor切片
