学习源代码
import numpy as np
import matplotlib.pyplot as plt
def true_fun(X):
return 1.5*X + 0.2
np.random.seed(0)
n_samples = 30
'''生成随机数据作为训练集,并且加一些噪声'''
X_train = np.sort(np.random.rand(n_samples))
y_train = (true_fun(X_train) + np.random.randn(n_samples) * 0.05).reshape(n_samples,1)
from sklearn.linear_model import LinearRegression
model = LinearRegression()
model.fit(X_train[:,np.newaxis], y_train)
print("输出参数w:",model.coef_)
print("输出参数b:",model.intercept_)
X_test = np.linspace(0, 1, 100)
plt.plot(X_test, model.predict(X_test[:, np.newaxis]), label="Model")
plt.plot(X_test, true_fun(X_test), label="True function")
plt.scatter(X_train,y_train)
plt.legend(loc="best")
plt.show()
np.random.seed()
import numpy as np
def abc():
for i in range(5):
np.random.seed(1)
print(np.random.rand(2))
abc()
[0.417022 0.72032449]
[0.417022 0.72032449]
[0.417022 0.72032449]
[0.417022 0.72032449]
[0.417022 0.72032449]
import numpy as np
def abc_n():
np.random.seed(1)
for i in range(5):
print(np.random.rand(2))
abc_n()
[0.417022 0.72032449]
[1.14374817e-04 3.02332573e-01]
[0.14675589 0.09233859]
[0.18626021 0.34556073]
[0.39676747 0.53881673]
设置相同的np.random.seed()会得出相同的随机数 abc_n在循环第二遍的时候已经不是第一遍的np.random.seed()下了所以算出的数不同。
np.sort
sort(a, axis=-1, kind=None, order=None): a为需要排序的数组 axis=-1或1,-1为默认值,按行排序。
import numpy as np
a=np.array(
[
[1,2,3],
[4,5,6],
[9,8,7]])
a=np.sort(a)
print(a)
[[1 2 3]
[4 5 6]
[7 8 9]]
设定axis=0按列排序
import numpy as np
a=np.array(
[
[2,1,3],
[4,5,6],
[0,8,7]])
a=np.sort(a,axis=0)
print(a)
[[0 1 3]
[2 5 6]
[4 8 7]]
``
```python
def fit(self, X, y, sample_weight=None):
"""
Fit linear model.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values. Will be cast to X's dtype if necessary
sample_weight : array-like of shape (n_samples,), default=None
Individual weights for each sample
plt. scatter()
def scatter(
x, y, s=None, c=None, marker=None, cmap=None, norm=None,
vmin=None, vmax=None, alpha=None, linewidths=None,
verts=cbook.deprecation._deprecated_parameter,
edgecolors=None, *, plotnonfinite=False, data=None, **kwargs):
scatter函数可以生成一个散点图。
二、多项式回归
源代码
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score
def true_fun(X):
return np.cos(1.5 * np.pi * X)
np.random.seed(0)
n_samples = 30
X = np.sort(np.random.rand(n_samples))
y = true_fun(X) + np.random.randn(n_samples) * 0.1
degrees = [1, 4, 15]
plt.figure(figsize=(14, 5))
for i in range(len(degrees)):
ax = plt.subplot(1, len(degrees), i + 1)
plt.setp(ax, xticks=(), yticks=())
polynomial_features = PolynomialFeatures(degree=degrees[i],
include_bias=False)
linear_regression = LinearRegression()
pipeline = Pipeline([("polynomial_features", polynomial_features),
("linear_regression", linear_regression)])
pipeline.fit(X[:, np.newaxis], y)
scores = cross_val_score(pipeline, X[:, np.newaxis], y,scoring="neg_mean_squared_error", cv=10)
X_test = np.linspace(0, 1, 100)
plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label="Model")
plt.plot(X_test, true_fun(X_test), label="True function")
plt.scatter(X, y, edgecolor='b', s=20, label="Samples")
plt.xlabel("x")
plt.ylabel("y")
plt.xlim((0, 1))
plt.ylim((-2, 2))
plt.legend(loc="best")
plt.title("Degree {}\nMSE = {:.2e}(+/- {:.2e})".format(
degrees[i], -scores.mean(), scores.std()))
plt.show()
np.random.randn(d0,d1,d2……dn)
1)当函数括号内没有参数时,则返回一个浮点数; 2)当函数括号内有一个参数时,则返回秩为1的数组,不能表示向量和矩阵; 3)当函数括号内有两个及以上参数时,则返回对应维度的数组,能表示向量或矩阵; 4)np.random.standard_normal()函数与np.random.randn()类似,但是np.random.standard_normal() 的输入参数为元组(tuple). 5)np.random.randn()的输入通常为整数,但是如果为浮点数,则会自动直接截断转换为整数。 ———————————————— 版权声明:本文为CSDN博主「信号挖掘机」的原创文章,遵循CC 4.0 BY-SA版权协议,转载请附上原文出处链接及本声明。 原文链接:https://blog.csdn.net/qq_40130759/article/details/79535575
plt.subplot()
plt.subplot(nrows, ncols, index, **kwargs) 第一个参数:nrows:行 ncols:列 index:索引值 第二个参数:projection : {None, ‘aitoff’, ‘hammer’, ‘lambert’, ‘mollweide’, ‘polar’, ‘rectilinear’, str}, optional The projection type of the subplot (Axes). str is the name of a costum projection, see projections. The default None results in a ‘rectilinear’ projection. 可选参数:可以选择子图的类型,比如选择polar,就是一个极点图。默认是none就是一个线形图。
import matplotlib.pyplot as plt
plt.subplot(3,2,1)
plt.setp()设置对象属性或属性的取值要求
setp()函数可以先做图后对图像属性进行修改
line, = plot([1,2,3])
setp(line, linestyle='--')
xticks()函数
设置x轴标签属性 原函数:
xticks(ticks, [labels], **kwargs)
ticks:数组类型,用于设置X轴刻度间隔
[labels]:数组类型,用于设置每个间隔的显示标签
**kwargs:用于设置标签字体倾斜度和颜色等外观属性。
PolynomialFeatures()
sklearn.preprocessing。PolynomialFeatures ( degree = 2 , * , interact_only = False , include_bias = True , order = 'C' )
生成多项式或交互特征 如果输入样本是二维的且形式为 [a, b],则 2 次多项式特征为 [1, a, b, a^2, ab, b^2] degree:控制多项式的度
interaction_only: 默认为False,如果指定为True,那么就不会有特征自己和自己结合的项,上面的二次项中没有a2和b2。
include_bias:默认为True。如果为True的话,那么就会有上面的 1那一项。
cross_val_score
cross_val_score(estimator, X, y=None, *,
groups=None,
scoring=None,
cv=None,
n_jobs=None,
verbose=0,
fit_params=None,
pre_dispatch='2*n_jobs',
error_score=nan)
将数据集分为10部分,每一部分都做一次测试集,其他部分循环做训练集。 交叉验证优点: 1:交叉验证用于评估模型的预测性能,尤其是训练好的模型在新数据上的表现,可以在一定程度上减小过拟合。 2:还可以从有限的数据中获取尽可能多的有效信息。
plt.legend()
plt.legend 下面对其中常用的一些功能做简单汇总: 1、设置图例位置:plt.legend(loc=‘xxx’)
location string location code ‘best’ 0 ‘upper right’ 1 ‘upper left’ 2 ‘lower left’ 3 ‘lower right’ 4 ‘right’ 5 ‘center left’ 6 ‘center right’ 7 ‘lower center’ 8 ‘‘upper center’’ 9 ‘center’ 10 2、设置图例字体大小 fontsize :int or float or {‘xx-small’, ‘x-small’, ‘small’, ‘medium’, ‘large’, ‘x-large’, ‘xx-large’} 图例的字体大小。 如果该值为数字,则大小将是绝对字体大小(以磅为单位)。若是字符串,则相当于当前默认字体大小。仅当未指定prop时才使用此参数。
3、设置图例边框和背景 plt.legend(loc=‘best’,frameon=False) #去掉图例边框 plt.legend(loc=‘best’,edgecolor=‘blue’) #设置图例边框颜色 plt.legend(loc=‘best’,facecolor=‘blue’) #设置图例背景颜色,若无边框,参数无效
4、设置图例标题plt.legend(title=‘xxx’) plt.legend(title=(‘sinx’, ‘cosx’))则图例如图: ———————————————— 版权声明:本文为CSDN博主「humingzhu_97」的原创文章,遵循CC 4.0 BY-SA版权协议,转载请附上原文出处链接及本声明。 原文链接:https://blog.csdn.net/humingzhu_97/article/details/104899572
逻辑回归
源代码
import sys
from pathlib import Path
curr_path = str(Path().absolute())
parent_path = str(Path().absolute().parent)
p_parent_path = str(Path().absolute().parent.parent)
sys.path.append(p_parent_path)
print(f"主目录为:{p_parent_path}")
from torch.utils.data import DataLoader
from torchvision import datasets
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report
import numpy as np
train_dataset = datasets.MNIST(root = p_parent_path+'/datasets/', train = True,transform = transforms.ToTensor(), download = False)
test_dataset = datasets.MNIST(root = p_parent_path+'/datasets/', train = False, transform = transforms.ToTensor(), download = False)
batch_size = len(train_dataset)
train_loader = DataLoader(dataset=train_dataset, batch_size=100, shuffle=True)
test_loader = DataLoader(dataset=test_dataset, batch_size=100, shuffle=True)
X_train,y_train = next(iter(train_loader))
X_test,y_test = next(iter(train_loader))
images, labels= X_train[:100], y_train[:100]
img = torchvision.utils.make_grid(images, nrow=10)
img = img.numpy().transpose(1,2,0)
print(images.shape)
print(labels.reshape(10,10))
print(img.shape)
plt.imshow(img)
plt.show()
X_train,y_train = X_train.cpu().numpy(),y_train.cpu().numpy()
X_test,y_test = X_test.cpu().numpy(),y_test.cpu().numpy()
X_train = X_train.reshape(X_train.shape[0],784)
X_test = X_test.reshape(X_test.shape[0],784)
model = LogisticRegression(solver='lbfgs', max_iter=400)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print(classification_report(y_test, y_pred))
ones_col=[[1] for i in range(len(X_train))]
X_train = np.append(X_train,ones_col,axis=1)
x_train = np.mat(X_train)
X_test = np.append(X_test,ones_col,axis=1)
x_test = np.mat(X_test)
y_train=np.array([1 if y_train[i]==1 else 0 for i in range(len(y_train))])
y_test=np.array([1 if y_test[i]==1 else 0 for i in range(len(y_test))])
model = LogisticRegression(solver='lbfgs', max_iter=100)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print(classification_report(y_test, y_pred))
Dataset、Dataloader、DataLoaderIter读取数据
感觉这篇写的很好懂
torchvision.utils.make_grid
组成一个图像的网络,将多张图片组合成一张图片。
torchvision.utils.make_grid(tensor:
Union[torch.Tensor, List[torch.Tensor]],
nrow: int = 8, padding: int = 2, normalize: bool = False,
value_range: Optional[Tuple[int, int]] = None,
scale_each: bool = False, pad_value: int = 0, **kwargs)
|