版本:python 3.7.6,numpy 1.19.2,scipy 1.5.2
import numpy as np
import pandas as pd
from scipy.optimize import minimize
###################### importing the excel file ######################
df = pd.read_csv('data2.csv')
###################### Setting up variables and arrays ######################
a = df.loc[:,'C(ADD)'].values #measured added customers
l = df.loc[:,'C(Loss)'].values #measured lost customers
m = df.loc[:,'m'].values #number of months
mkt = df.loc[:,'Marketing Expense'].values #maketing dollars in each month
e = 5596 #end measured value, Calculated from the cac/total marketing spend over the time period
n = len(df) #creates a variable of the length of the dataframe
###################### Defining equations ######################
g0 = np.zeros(n) #guess values
g0[0] = 0.0001
g0[1] = 0.006
g0[2] = 96755.00
g0[3] = 1.7
g0[4] = 0.6
g0[5] = 0.1
g0[6] = 0.006
g0[7] = 1.7
g0[8] = 0.6
def addhat(g): #Add predict values
pNT = g[0]
r = g[1]
alpha = g[2]
c = g[3]
Bm = g[4]
ah = np.empty(len(df)) #an empty array for the add hat values
b = np.empty(n) #an empty array for the B(m,m') values
b[0] = np.exp(np.log(mkt[0])*Bm)
ah[0] = 400000*((1-pNT) * (1 - (alpha/(alpha + b[0]))**r))
for i in range(1, n):
b[i] = b[i-1] + (m[i]**c - m[i-1]**c)*np.exp(np.log(mkt[i])*Bm)
ah[i] = 400000*((1-pNT) * (1 - (alpha/(alpha + (b[i])))**r))
return ah
print('add pred values: ' + str(addhat(g0)))
def rethat(g): #Retention percentage
rr = g[5]
alphar = g[6]
cr = g[7]
Bmr = g[8]
k = np.empty(n) #an empty array for exponent section of the formula
w = np.empty(n) #an empty array for the retention values
#The value of b(t)r when i = 0
k[0] = np.exp(np.log(mkt[0])*Bmr)
w[0] = 1 - (alphar/(alphar + k[0]))**rr
# the value of B(t) for all other values of q
for i in range(1, n):
k[i] = k[i-1] + (m[i]**cr - m[i-1]**cr)*np.exp(np.log(mkt[i])*Bmr)
w[i] = 1 - (alphar/(alphar + (k[i])))**rr
return w
def endpred(g): #predicting the end hat values
eh = np.empty(n) #an empty array for the end hat values
eh[0] = 213
for i in range(1, n):
eh[i] = (eh[i-1] * rethat(g)[i]) + addhat(g)[i]
return eh
endhat = sum(endpred(g0))
def losshat(g):
lh = np.empty(n) #an empty array for the loss hat values
lh[0] = 0
for i in range(1, n):
lh[i] = endpred(g)[i-1] - (endpred(g)[i] - addhat(g)[i])
return lh
###################### Sum of square errors ######################
def objective(g):
sse = sum((addhat(g)-a)**2 + (losshat(g)-l)**2) + (endhat-e)**2
return sse
print("SSE Initial: " + str(objective(g0)))
###################### Constraints ######################
def constraint1(g): #c is greater than 1
return g[3] - 1
def constraint2(g): #cr is greater than 1
return g[7] - 1
def constraint3(g): #pNT is greater than 0
return g[0]
con1 = {'type': 'ineq', 'fun': constraint1}
con2 = {'type': 'ineq', 'fun': constraint2}
con3 = {'type': 'ineq', 'fun': constraint3}
cons = [con1, con2, con3]
###################### Optimize ######################
s = minimize(objective, g0, method='SLSQP', constraints = cons)
g = s.x
print(g)
print("SSE Final: " + str(objective(g)))
所得的SSE值为4,281,096.9,其值为:
3.48133574e+02, 6.84452015e+02, 9.67550032e+04, 2.22008198e+00, -3.28153006e+03, -1.91454144e+02, 2.20947909e+02, 1.70207912e+00, -1.24649708e+01
我使用的初始猜测值与实际结果值非常接近(我正在检查代码并知道结果的问题)。结果应为0.0001001361, 0.006035783, 96,755.64542, 1.78204741, 0.636357403, 0.152, 0.0065432195, 1.73490796, 0.62625507
,其SSE为912,278。
Link至data2.csv
。
再次感谢您的帮助
答案 0 :(得分:0)
您似乎将相同的变量l
用于2个不同的目的。
首先使用CSV文件中的固定值对其进行初始化,但也将其用作函数rethat
中的内部变量。并且还用于目标函数-这意味着每次优化时,您也会更改目标函数。
看起来不太好...