线性关系度量,机器学习
分类:计算机编程

继续总结算法,本次也没怎么特别的,还未到那么深透,也是相比较基础的
1、方差-样本
2、协方差(标准差)-样本
3、变异周密
4、相关周全

此番函数有

这一次有以下函数

原来的作品链接:

BP神经网络是深浅学习的关键基础,它是深度学习的首要性进步算法之风姿洒脱,因而通晓BP神经互联网原理以致落到实处手艺拾壹分有供给。接下来,大家对规律和贯彻打开商讨。

1.原理

悠闲再稳步补上,请先参考老外生机勃勃篇不错的稿子:A Step by Step Backpropagation Example

激活函数参谋:纵深学习常用激活函数之— Sigmoid & ReLU & Softmax

浅显易懂的开首化:CS231n课程笔记翻译:神经互连网笔记 2

有效的Trick:神经互联网锻练中的Tricks之迅捷BP(反向传播算法)

透过简单演示BPNN的乘除进度:一文弄懂神经网络中的反向传播法——BackPropagation

2.落到实处----Batch随机梯度法

此处完毕了层数可定义的BP神经互联网,可透过参数net_struct进行定义互连网结果,如定义独有输出层,未有隐藏层的互连网布局,激活函数为”sigmoid",学习率,可正如概念

net_struct = [[10,"sigmoid",0.01]]#互联网布局

如定义黄金时代层隐敝层为玖十几个神经元,再接风流浪漫层掩瞒层为四19个神经元,输出层为13个神经元的互联网布局,如下

net_struct = [[100,"sigmoid",0.01],[50,"sigmoid",0.01],[10,"sigmoid",0.01]]#网络布局

码农最爱的实现如下:

# # encoding=utf8

'''

Created on 2017-7-3

@author: Administrator

'''

import random

import pandas as pd

import numpy as np

from matplotlib import pyplot as plt

from sklearn.model_selection import train_test_split as ttsplit

class LossFun:

def __init__(self, lf_type="least_square"):

self.name = "loss function"

self.type = lf_type

def cal(self, t, z):

loss = 0

if self.type == "least_square":

loss = self.least_square(t, z)

return loss

def cal_deriv(self, t, z):

delta = 0

if self.type == "least_square":

delta = self.least_square_deriv(t, z)

return delta

def least_square(self, t, z):

zsize = z.shape

sample_num = zsize[1]

return np.sum(0.5 * (t - z) * (t - z) * t) / sample_num

def least_square_deriv(self, t, z):

return z - t

class ActivationFun:

'''

激活函数

'''

def __init__(self, atype="sigmoid"):

self.name = "activation function library"

self.type = atype;

def cal(self, a):

z = 0

if self.type == "sigmoid":

z = self.sigmoid(a)

elif self.type == "relu":

z = self.relu(a)

return z

def cal_deriv(self, a):

z = 0

if self.type == "sigmoid":

z = self.sigmoid_deriv(a)

elif self.type == "relu":

z = self.relu_deriv(a)

return z

def sigmoid(self, a):

return 1 / (1 np.exp(-a))

def sigmoid_deriv(self, a):

fa = self.sigmoid(a)

return fa * (1 - fa)

def relu(self, a):

idx = a <= 0

a[idx] = 0.1 * a[idx]

return a  # np.maximum(a, 0.0)

def relu_deriv(self, a):

# print a

a[a > 0] = 1.0

a[a <= 0] = 0.1

# print a

return a

class Layer:

'''

神经互联网层

'''

def __init__(self, num_neural, af_type="sigmoid", learn_rate=0.5):

self.af_type = af_type  # active function type

self.learn_rate = learn_rate

self.num_neural = num_neural

self.dim = None

self.W = None

self.a = None

self.X = None

self.z = None

self.delta = None

self.theta = None

self.act_fun = ActivationFun(self.af_type)

def fp(self, X):

'''

Foward Propagation

'''

self.X = X

xsize = X.shape

self.dim = xsize[0]

self.num = xsize[1]

if self.W == None:

# self.W = np.random.random((self.dim, self.num_neural))-0.5

# self.W = np.random.uniform(-1,1,size=(self.dim,self.num_neural))

if(self.af_type == "sigmoid"):

self.W = np.random.normal(0, 1, size=(self.dim, self.num_neural)) / np.sqrt(self.num)

elif(self.af_type == "relu"):

self.W = np.random.normal(0, 1, size=(self.dim, self.num_neural)) * np.sqrt(2.0 / self.num)

if self.theta == None:

# self.theta = np.random.random((self.num_neural, 1))-0.5

# self.theta = np.random.uniform(-1,1,size=(self.num_neural,1))

if(self.af_type == "sigmoid"):

self.theta = np.random.normal(0, 1, size=(self.num_neural, 1)) / np.sqrt(self.num)

elif(self.af_type == "relu"):

self.theta = np.random.normal(0, 1, size=(self.num_neural, 1)) * np.sqrt(2.0 / self.num)

# calculate the foreward a

self.a = (self.W.T).dot(self.X)

###calculate the foreward z####

self.z = self.act_fun.cal(self.a)

return self.z

def bp(self, delta):

'''

Back Propagation

'''

self.delta = delta * self.act_fun.cal_deriv(self.a)

self.theta = np.array([np.mean(self.theta - self.learn_rate * self.delta, 1)]).T  # 求全数样品的theta均值

dW = self.X.dot(self.delta.T) / self.num

self.W = self.W - self.learn_rate * dW

delta_out = self.W.dot(self.delta);

return delta_out

class BpNet:

'''

BP神经互联网

'''

def __init__(self, net_struct, stop_crit, max_iter, batch_size=10):

self.name = "net work"

self.net_struct = net_struct

if len(self.net_struct) == 0:

print "no layer is specified!"

return

self.stop_crit = stop_crit

self.max_iter = max_iter

self.batch_size = batch_size

self.layers = []

self.num_layers = 0;

# 创设互联网

self.create_net(net_struct)

self.loss_fun = LossFun("least_square");

def create_net(self, net_struct):

'''

成立网络

'''

self.num_layers = len(net_struct)

for i in range(self.num_layers):

self.layers.append(Layer(net_struct[i][0], net_struct[i][1], net_struct[i][2]))

def train(self, X, t, Xtest=None, ttest=None):

'''

演习网络

'''

eva_acc_list = []

eva_loss_list = []

xshape = X.shape;

num = xshape[0]

dim = xshape[1]

for k in range(self.max_iter):

# i = random.randint(0,num-1)

idxs = random.sample(range(num), self.batch_size)

xi = np.array([X[idxs, :]]).T[:, :, 0]

ti = np.array([t[idxs, :]]).T[:, :, 0]

# 前向计算

zi = self.fp(xi)

# 偏差计算

delta_i = self.loss_fun.cal_deriv(ti, zi)

# 反馈总括

self.bp(delta_i)

# 评估精度

if Xtest != None:

if k % 100 == 0:

[eva_acc, eva_loss] = self.test(Xtest, ttest)

eva_acc_list.append(eva_acc)

eva_loss_list.append(eva_loss)

print "M,O,O" % (k, eva_acc, eva_loss)

else:

print "M" % (k)

return [eva_acc_list, eva_loss_list]

def test(self, X, t):

'''

测量检验模型精度

'''

xshape = X.shape;

num = xshape[0]

z = self.fp_eval(X.T)

t = t.T

est_pos = np.argmax(z, 0)

real_pos = np.argmax(t, 0)

corrct_count = np.sum(est_pos == real_pos)

acc = 1.0 * corrct_count / num

loss = self.loss_fun.cal(t, z)

# print "O,loss:O"%(loss)

return [acc, loss]

def fp(self, X):

'''

前向总结

'''

z = X

for i in range(self.num_layers):

z = self.layers[i].fp(z)

return z

def bp(self, delta):

'''

反映总计

'''

z = delta

for i in range(self.num_layers - 1, -1, -1):

z = self.layers[i].bp(z)

return z

def fp_eval(self, X):

'''

前向计算

'''

layers = self.layers

z = X

for i in range(self.num_layers):

z = layers[i].fp(z)

return z

def z_score_normalization(x):

mu = np.mean(x)

sigma = np.std(x)

x = (x - mu) / sigma;

return x;

def sigmoid(X, useStatus):

if useStatus:

return 1.0 / (1 np.exp(-float(X)));

else:

return float(X);

def plot_curve(data, title, lege, xlabel, ylabel):

num = len(data)

idx = range(num)

plt.plot(idx, data, color="r", linewidth=1)

plt.xlabel(xlabel, fontsize="xx-large")

plt.ylabel(ylabel, fontsize="xx-large")

plt.title(title, fontsize="xx-large")

plt.legend([lege], fontsize="xx-large", loc='upper left');

plt.show()

if __name__ == "__main__":

print ('This is main of module "bp_nn.py"')

print("Import data")

raw_data = pd.read_csv('./train.csv', header=0)

data = raw_data.values

imgs = data[0::, 1::]

labels = data[::, 0]

train_features, test_features, train_labels, test_labels = ttsplit(

imgs, labels, test_size=0.33, random_state=23323)

train_features = z_score_normalization(train_features)

test_features = z_score_normalization(test_features)

sample_num = train_labels.shape[0]

tr_labels = np.zeros([sample_num, 10])

for i in range(sample_num):

tr_labels[i][train_labels[i]] = 1

sample_num = test_labels.shape[0]

te_labels = np.zeros([sample_num, 10])

for i in range(sample_num):

te_labels[i][test_labels[i]] = 1

print train_features.shape

print tr_labels.shape

print test_features.shape

print te_labels.shape

stop_crit = 100  # 停止

max_iter = 10000  # 最大迭代次数

batch_size = 100  # 每回练习的样品个数

net_struct = [[100, "relu", 0.01], [10, "sigmoid", 0.1]]  # 互联网布局[[batch_size,active function, learning rate]]

# net_struct = [[200,"sigmoid",0.5],[100,"sigmoid",0.5],[10,"sigmoid",0.5]]  互联网布局[[batch_size,active function, learning rate]]

bpNNCls = BpNet(net_struct, stop_crit, max_iter, batch_size);

# train model

[acc, loss] = bpNNCls.train(train_features, tr_labels, test_features, te_labels)

# [acc, loss] = bpNNCls.train(train_features, tr_labels)

print("training model finished")

# create test data

plot_curve(acc, "Bp Network Accuracy", "accuracy", "iter", "Accuracy")

plot_curve(loss, "Bp Network Loss", "loss", "iter", "Loss")

# test model

[acc, loss] = bpNNCls.test(test_features, te_labels);

print "test accuracy:%f" % (acc)

施行数据为mnist数据会集,可从以下地点下载:https://github.com/WenDesi/lihang_book_algorithm/blob/master/data/train.csv

a.使用sigmoid激活函数和net_struct = [10,"sigmoid"]的网络布局(可看做是softmax 回归),其校验精度和损失函数的转移,如下图所示:

图片 1

图片 2

测量检验精度达到0.916017,效果照旧不错的。可是自由梯度法,正视于参数的伊始化,若是初阶化不好,会破灭缓慢,以致有倒霉好的结果。

b.使用sigmoid激活函数和net_struct =

[200,"sigmoid",100,"sigmoid",10,"sigmoid"] 的网络布局(三个200的遮盖层,二个100的掩饰层,和多少个10的输出层),其校验精度和损失函数的变型,如下图所示:

图片 3

图片 4

其校验精度达到0.963636,比softmax要好不菲。从损失曲线能够见到,参与隐敝层后,算法收敛要比无隐蔽层的安家乐业。

Make Change - Focus on Computer Vision and Pattern Recognition

版权注明:本文为博主原创小说,未经博主允许不得转发

分类:Deep Learning,MachineLearning

标签:Deep Learning,机械学习,方式识别

仍是先造个list,本次把这些作用写个函数,方便未来调用,其余上大器晚成篇写过的函数此次也会三回九转
def create_rand_list(min_num,max_num,count_list):
  case_list = []
  while len(case_list) < count_list:
    rand_float = random.uniform(min_num,max_num)
    if rand_float in case_list:
      continue
    case_list.append(rand_float)
  case_list = [round(case,2) for case in case_list]
  return case_list

1、阶乘

1、轻便边际概率

上面是野史函数
sum_fun() #累加
len_fun() #总括个数
multiply_fun() #累乘
sum_mean_fun() #算数平平均数量
sum_mean_rate() #算数平平均数量总计回报
median_fun() #中位数
modes_fun() #众数
ext_minus_fun() #极差
geom_mean_fun() #几何平平均数量
geom_mean_rate() #几何平均回报

2、计算组合数C

2、联合可能率

新函数代码

3、二项可能率遍布

3、条件可能率

import random

# 先生成一个随机list,已有函数,不赘述
rand_list = [15.79, 6.83, 12.83, 22.32, 17.92, 6.29, 10.19, 10.13, 24.23, 25.56]

# 1、方差-样本S^2,list中的每个元素减整个list的平均数的平方累加,结果比个数-1,方差总量不-1
def var_fun(rand_list):
  mean_num = sum_mean_fun(rand_list) #计算平均数
  len_num = len_fun(rand_list) #计算总量
  var_list = [(x-mean_num)**2 for x in rand_list]
  var_sum = sum_fun(var_list)
  var_num = var_sum/(len_num - 1)
  return var_num

# 2、协方差(标准差)-样本S,这个简单,用方差开平方就可以了
def covar_fun(rand_list):
  var_num = var_fun(rand_list)
  covar_num = var_num ** 0.5
  return covar_num

# 3、变异系数CV,变异程度度量,协方差/算数平均数*100%
# 说明(百度百科):在进行数据统计分析时,如果变异系数大于15%,则要考虑该数据可能不正常,应该剔除
def  trans_coef_fun(rand_list):
  covar_num = covar_fun(rand_list)
  mean_num = sum_mean_fun(rand_list)
  trans_coef_num = covar_num / mean_num
  return trans_coef_num

# 4、相关系数-样本r,表示两个维之间的线性关系,-1 < r < 1,越接近1关系维间的关系越强
#    因为是两个维,因此需要输入两维的list,算法比较麻烦
'''
((x1-mean(x))(y1-mean(y)) (x2-mean(x))(y2-mean(y)) ...(xn-mean(x))(yn-mean(y)))
/((x1-mean(x))^2 (x2-mean(x))^2 ...(xn-mean(x))^2)^0.5*((y1-mean(y))^2 (y2-mean(y))^2 ...(yn-mean(y))^2)^0.5
'''
x_list = rand_list
y_list = [4.39, 13.84, 9.21, 9.91, 15.69, 14.92, 25.77, 23.99, 8.15, 25.07]
def pearson_fun(x_list,y_list):
  x_mean = sum_mean_fun(x_list)
  y_mean = sum_mean_fun(y_list)
  len_num = len_fun(x_list)
  if len_num == len_fun(y_list):
    xy_multiply_list = [(x_list[i]-x_mean)*(y_list[i]-y_mean) for i in range(len_num)]
    xy_multiply_num = sum_fun(xy_multiply_list)
  else:
    print 'input list wrong,another input try'
    return None
  x_covar_son_list = [(x-x_mean)**2 for x in x_list]
  y_covar_son_list = [(y-y_mean)**2 for y in y_list]
  x_covar_son_num = sum_fun(x_covar_son_list)
  y_covar_son_num = sum_fun(y_covar_son_list)
  xy_covar_son_multiply_num = (x_covar_son_num ** 0.5) * (y_covar_son_num ** 0.5)
  pearson_num = xy_multiply_num / xy_covar_son_multiply_num
  return pearson_num

4、泊松分布

4、随机变量期望值

 

以下是历史函数

5、随机变量方差

create_rand_list() #始建贰个分包钦赐数量成分的list
sum_fun() #累加
len_fun() #计算个数
multiply_fun() #累乘
sum_mean_fun() #算数平平均数量
sum_mean_rate() #算数平平均数量总结回报
median_fun() #中位数
modes_fun() #众数
ext_minus_fun() #极差
geom_mean_fun() #几何平平均数量
geom_mean_rate() #几何平均回报

6、随机变量协方差

var_fun() #方差-样本S^2
covar_fun() #协方差-样本S
trans_coef_fun() #变异周全CV
pearson_fun() #相关周密-样板r

7、联合协方差

unite_rate_fun #一同概率
condition_rate_fun #标准可能率
e_x #随机变量期待值
var_rand_fun #随机变量方差
covar_rand_fun #随机变量协方差
covar_rand_xy_fun #同台协方差
e_p #整合期待回报
var_p_fun #投资组合危害
bayes #贝叶斯

8、组合期待回报

---------------以上是旧的------------------------------------------------------------------------
---------------以下是新的------------------------------------------------------------------------

9、投资组合风险

后续可能率,本次是二项布满和泊松遍布,那个三个依旧挺有趣的,能够当作预测函数用,因为函数相当少,这一次就不给例子了,不过会对函数做逐条表明

 

1、阶乘n!
就算每一趟-1乘,直到*1,例如5! = 5 * 4 * 3 * 2 * 1 = 120,这几个是常规的,但是在写函数的时候那样算法效能会低些,由此平素扭转,1*2*3...这种,那么函数就是

 

def fact_fun:  if n == 0:    return 1  n  = 1  fact_list = [i for i in range(1,n)]  fact_num = multiply_fun(fact_list)  return fact_num

 

2、总结组合数C
C = n! / (x! *
意味着从n个样板中收取x个样品单元,只怕现身结果的组合数,举个例子从5个物品中抽出3个物品,那四个货色的组合数正是10种

 

def c_n_x(case_count,real_count):  fact_n = fact_fun(case_count)  fact_x = fact_fun(real_count)  fact_n_x = fact_fun(case_count - real_count)  c_n_x_num = fact_n / (fact_x * fact_n_x)  return c_n_x_num

说可能率前复习下历史函数
create_rand_list() #创建多少个含有钦定数量成分的list
sum_fun() #累加
len_fun() #总结个数
multiply_fun() #累乘
sum_mean_fun() #算数平平均数量
sum_mean_rate() #算数平平均数量总括回报
median_fun() #中位数
modes_fun() #众数
ext_minus_fun() #极差
geom_mean_fun() #几何平平均数量
geom_mean_rate() #几何平均回报

3、二项概率布满
施行n次伯努利试验,伯努利试验正是施行三回独有三种大概且二种恐怕互斥的事件,比如丢硬币实验,施行n次,成功k次的概率
P = C * p^k * ^
n=5 k=3 P = p p p
p表示一个风浪的名利双收可能率,战败则是1 - p

var_fun() #方差-样本S^2
covar_fun() #协方差(标准差)-样本S
trans_coef_fun() #变异全面CV
pearson_fun() #相关周详-样板r
---------------以上是旧的------------------------------------------------------------------------
---------------以下是新的------------------------------------------------------------------------
可能率那块全数给本身看了个懵逼,前面包车型地铁代码都以绳趋尺步作者自个儿驾驭写的,要是有错误,接待指正
别的表达的是可能率是很精妙的业务,所以浮点型的数字会比超多,何况小数位数十三分纯粹,除特殊情状,作者就四舍五入截取到小数点后4位
简短事件,就是独有三个特征的事件,全体希望事件的集合正是样板空间,举个例证
有两袋子花生米,第三个袋子有三十七个花生米,个中有3个坏的,第贰个袋子有21个花生米,个中有5个坏的,这么些例子的样品空间正是上面那样。作者想说,假若自己选了B袋子小编自然诅咒卖花生的组长娘吃快熟面未有佐料
袋子|是或不是坏的|花生米个数
A   |0       |3
A   |1       |29
B   |0       |5
B   |1       |12
为了方便起见,是True用0表示,否false用1意味
1、轻巧边际概率,记做P(A)
本条轻松精通,比方计算坏花生米的现身率,那么些轻便,就不独立写代码了
P(A) = 坏花生米/总的数量 = 8/49 = 0.1633

def binomial_fun(case_count,real_count,p):  c_n_k_num = c_n_x(case_count,real_count)  pi = (p ** real_count) *  ** (case_count - real_count))  binomial_num = c_n_k_num * pi  return binomial_num

2、联合可能率

4、泊松分布
加以的一个时机域中,机遇域能够是三个限量,也得以是意气风发段时间,在此个机缘域中恐怕爆发某些总计事件的可能率,举个例证,比有个集团,每时辰平均有九个人客户光临,那么一个钟头有16人成本者光临的可能率,就是泊松布满,13个人客户光降就是总结事件
P = /X! = (2.7182818^-10*10^13)/13! = 0.0729
此处的λ是指平均值,可以使用算数平平均数量获得,e是当然常数~=2.7182818,有函数

既然是一路了,就须求七个事件,记为P(A且B),∩这个家伙就是且
正是A事件和B事件联合成同一个事变的概率,从A袋子吃出二个坏花生米的可能率就是联名概率,事件A是坏花生米,事件B是A袋子
其一相比有差异,比较广泛利用的是
P(A∩B) = 3/49 = 0.0612
另后生可畏种就是
P(A∩B) = 3/32*0.5 = 0.0517
本人个人相比同意第生龙活虎种,不过受到任何事件的影响超大,思量要是B袋子有10000个花生,坏花生数不改变,结果会有不小差异
那么函数就有了

def poisson_fun(chance_x, case_list = [0],mean_num = 0):  chance_x_fact = fact_fun  e = 2.7182818  if len_fun(case_list) == 1 and case_list[0] == 0:    poisson_num = ((e ** (0-mean_num)) * mean_num ** chance_x) / chance_x_fact  else:    mean_num = sum_mean_fun(case_list)    poisson_num = ((e ** (0-mean_num)) * mean_num ** chance_x) / chance_x_fact  return poisson_num
def unite_rate_fun(condition_count,all_count):
  p_a_with_b = float(condition_count) / all_count
  return p_a_with_b

本条函数要求表达下,实际要求的是七个参数,三个平均值另叁个是期望计算量,之所以钦赐了3个函数是因为大概输入的不自然是三个数字,也说不定是个list,那么会有三种计算方法,那些已在if中呈现,援用方法有三种,举个例子

3、条件可能率
二个平地风波已产生的动静下,得到另三个事件的发出概率,相比文言的传教是,给定事件B,事件A的发生可能率,当然也得以扭转
P(A|B) = P(A∩B)/P(B)
反过来
P(B|A) = P(A∩B)/P(A)
照旧那些事例,现在已知B事件是从A袋子取,那么P(B) = 32/49
P(A|B) = (3/49)/(32/49) = 3/32 = 0.0937
其大器晚成函数正是

if __name__ == '__main__':  # 第一种  poisson_rate = poisson_fun(mean_num = 10,chance_x = 13)  print poisson_rate   # 第二种  case_list = [8,9,10,11,12]  poisson_rate = poisson_fun(case_list = case_list ,chance_x = 13)  print poisson_rate 
def condition_rate_fun(p_a_with_b,p_b):
  p_a_from_b = p_a_with_b / p_b
  return p_a_from_b

 

上边包车型客车开始和结果用花生米的例证就不对路了,换个学园的事
二个班匈牙利(Magyarország)语考试各分数的比重
分数|占比
20  |0.1
40  |0.1
60  |0.3
80  |0.4
100 |0.1

4、随机变量期望值
和算数平平均数量大致,实际结果不应与那么些数有太多偏侧
μ = E(X) = NΣXiP(Xi)
E(X) = 20 * 0.1 40 * 0.1 60 * 0.3 80 * 0.4 100 * 0.1 = 66

def e_x(count_list,rate_list):
  e_len = len_fun(count_list)
  if e_len == len_fun(rate_list):
    e_list = [count_list[i] * rate_list[i] for i in range(e_len)]
    e_num = sum_fun(e_list)
  else: return None
  return e_num

5、随机变量方差
和样品方差效能切合,相当少说了
σ^2 = NΣ[Xi-E(X)]^2P(Xi)

def var_rand_fun(count_list,rate_list):
  e_num = e_x(count_list,rate_list)
  var_len = len_fun(count_list)
  if var_len == len_fun(rate_list):
    var_list = [((count_list[i] - e_num) ** 2) * rate_list[i] for i in range(var_len)]
    var_num = sum_fun(var_list)
  else: return None
  return var_num

6、随机变量协方差
函数轻易,套用协方差函数就能够

def covar_rand_fun(count_list,rate_list):
  var_rand_num = var_rand_fun(count_list,rate_list)
  covar_num = var_rand_num ** 0.5
  return covar_num

7、联合协方差
σxy = NΣ[Xi-E(X)][Yi-E(Y)]P(XiYi)

def covar_rand_xy_fun(x_count_list,y_count_list,xy_rate_list):
  e_x_num = e_x(x_count_list,xy_rate_list)
  e_y_num = e_x(y_count_list,xy_rate_list)
  covar_len = len_fun(x_count_list)
  if covar_len == len_fun(y_count_list) and covar_len == len_fun(xy_rate_list):
    covar_rand_xy_list = [(x_count_list[i] - e_x_num) * (y_count_list[i] - e_y_num) * xy_rate_list[i] for i in range(covar_len)]
    covar_rand_xy_num = sum_fun(covar_rand_xy_list)
  else: return None
  return covar_rand_xy_num

8、组合期待回报
用相当的小的危害能取得的最大回报
E(P) = wE(X) (1 - w)E(Y)
w是投资资金财产x的百分比

def e_p(x_count_list,y_count_list,xy_rate_list):
  e_x_num = e_x(x_count_list,xy_rate_list)
  e_y_num = e_x(y_count_list,xy_rate_list)
  w = sum_fun(x_count_list) / (sum_fun(x_count_list)   sum_fun(y_count_list))
  e_p_num = w * e_x_num   (1 - w) * e_y_num
  return e_p_num

9、投资组合危害
本条从未搞懂是做什么样的,应该是希望回报的不是值吗
σ(p) = [w^2σ(x)^2 (1 - w)^2σ(y)^2 2w(1 - w)σ(xy)]^0.5

def var_p_fun(x_count_list,y_count_list,xy_rate_list):
  w = sum_fun(x_count_list) / (sum_fun(x_count_list)   sum_fun(y_count_list))
  var_rand_x_num = var_rand_fun(x_count_list,xy_rate_list)
  var_rand_y_num = var_rand_fun(y_count_list,xy_rate_list)
  covar_rand_xy_num = covar_rand_xy_fun(x_count_list,y_count_list,xy_rate_list)
  var_p_num = (w * w * var_rand_y_num   (1 - w) * (1 - w) * var_rand_y_num   2 * w * (1 - w) * covar_rand_xy_num) ** 0.5
  return var_p_num

other、贝叶斯
本条真的是看的最懵逼的,以为作者写的那些不准,就当做参照他事他说加以考查吧

def bayes(true_coef,event_rate,event_bool,manage_num):
  'True = 0,False = 1'
  manage_num = manage_num - 1
  false_coef = 1 - true_coef
  event_count = len_fun(event_rate)
  if event_bool[manage_num] == 0:
    main_rate = event_rate[manage_num] * true_coef
  else:
    main_rate = event_rate[manage_num] * false_coef
  event_true_list = [event_rate[n] * true_coef for n in range(event_count) if event_bool[n] == 0]
  event_false_list = [event_rate[n] * true_coef for n in range(event_count) if event_bool[n] == 1]
  event_sum = sum_fun(event_true_list)   sum-fun(evemt_false_list)
  event_succe_rate = main_rate/event_sum
  return event_succe_rate

 

本文由pc28.am发布于计算机编程,转载请注明出处:线性关系度量,机器学习

上一篇:开始使用 下一篇:没有了
猜你喜欢
热门排行
精彩图文