python 非线性规划
程序员文章站
2022-06-13 16:45:37
...
python 提供了和matlab中fmincon类似的非线性规划函数,即scipy.optimize.minimize。
下面是一些参考:
https://blog.csdn.net/sinat_17697111/article/details/81534935
https://blog.csdn.net/weixin_45508265/article/details/112978943
https://www.jianshu.com/p/94817f7cc89b
下面是我使用到代码,目的是求一个矩阵里面的系数,使得一个函数表达式的值最小:
from process_data import process_data,BetPm,cal_accu
import sklearn
from sklearn import svm
from keras.utils.np_utils import to_categorical
import numpy as np
from scipy.optimize import minimize
..........
..........
def mapping(x,bba_val_2,i):
matrix=np.array([[x[0],x[1],0,x[2],x[3]],
[0,0,x[4],0,x[5]]])
before_trans=np.array(bba_val_2[i])
after_trans=np.dot(before_trans,matrix)
return after_trans
def fusion_this(m1,m2):
'''
m1:[t0,t1,t2]
m2:[t0,t1,t2,[t0,t1],[t0,t1,t2]]
'''
m_fusion=[0,0,0]
k=1-(m1[0]*m2[1]+m1[0]*m2[2]+m1[1]*m2[0]+m1[1]*m2[2]+m1[2]*m2[0]+m1[2]*m2[1]+m1[2]*m2[3])
m_fusion[0]=(m1[0]*m2[0]+m1[0]*m2[3]+m1[0]*m2[4])/k
m_fusion[1]=(m1[1]*m2[1]+m1[1]*m2[3]+m1[1]*m2[4])/k
m_fusion[2]=(m1[2]*m2[2]+m1[2]*m2[4])/k
return np.array(m_fusion)
def fusion(x,bba_val_2,bba_val_1,i):
after_trans=mapping(x,bba_val_2,i)
after_fusion=fusion_this(bba_val_1[i],after_trans)
return after_fusion
def con(arg=0): #limits 100>x[i]>0
# eq / ineq
# eq=0 /ineq> 0
cons = ({'type': 'ineq', 'fun': lambda x: x[0]},\
{'type': 'ineq', 'fun': lambda x: -x[0]+100},\
{'type': 'ineq', 'fun': lambda x: x[1]},\
{'type': 'ineq', 'fun': lambda x: -x[1]+100},\
{'type': 'ineq', 'fun': lambda x: x[2]},\
{'type': 'ineq', 'fun': lambda x: -x[2]+100},\
{'type': 'ineq', 'fun': lambda x: x[3]},\
{'type': 'ineq', 'fun': lambda x: -x[3]+100}, \
{'type': 'ineq', 'fun': lambda x: x[4]},\
{'type': 'ineq', 'fun': lambda x: -x[4]+100},\
{'type': 'ineq', 'fun': lambda x: -x[5]+100},\
{'type': 'ineq', 'fun': lambda x: x[5]})
return cons
cons=con(1)
train_label1_onehot=to_categorical(train_label1.ravel(), 3) #label to one-hot encode
test_label1_onehot=to_categorical(test_label1.ravel(), 3)
def fun(args):
bba_val_2,bba_val_1=args
v = lambda x:sum([np.linalg.norm(fusion(x,bba_val_2,bba_val_1,i)-train_label1_onehot[i]) for i in range(105)])
return v
args=(bba_val_2,bba_val_1)
x0=np.ones((6,1))
res = minimize(fun(args), x0,method='SLSQP',constraints=cons)
# res = minimize(fun(args), x0,method='SLSQP')
# res = minimize(fun(args), x0)
print(np.round(res.fun,2))
print(res.success)
print(np.round(res.x,2))