每个时期toMutableList()
增加会导致什么损失?
用于培训的代码:
model.get_latest_training_loss()
输出:
从时期(1到20)丢失:
class EpochSaver(CallbackAny2Vec):
'''Callback to save model after each epoch and show training parameters '''
def __init__(self, savedir):
self.savedir = savedir
self.epoch = 0
os.makedirs(self.savedir, exist_ok=True)
def on_epoch_end(self, model):
savepath = os.path.join(self.savedir, "model_neg{}_epoch.gz".format(self.epoch))
model.save(savepath)
print(
"Epoch saved: {}".format(self.epoch + 1),
"Start next epoch ... ", sep="\n"
)
if os.path.isfile(os.path.join(self.savedir, "model_neg{}_epoch.gz".format(self.epoch - 1))):
print("Previous model deleted ")
os.remove(os.path.join(self.savedir, "model_neg{}_epoch.gz".format(self.epoch - 1)))
self.epoch += 1
print("Model loss:", model.get_latest_training_loss())
def train():
### Initialize model ###
print("Start training Word2Vec model")
workers = multiprocessing.cpu_count()/2
model = Word2Vec(
DocIter(),
size=300, alpha=0.03, min_alpha=0.00025, iter=20,
min_count=10, hs=0, negative=10, workers=workers,
window=10, callbacks=[EpochSaver("./checkpoints")],
compute_loss=True
)
我在做什么错了?
语言阿拉伯语。 作为DocIter的输入-带有令牌的列表。
答案 0 :(得分:2)
在gensim 3.6.0之前,所报告的损失值可能不是很明智,只将每次调用重置为train()
的计数,而不是将每个内部历元重新设置。此问题中有一些修复程序:
https://github.com/RaRe-Technologies/gensim/pull/2135
与此同时,先前值和最新值之间的差异可能更有意义。在这种情况下,您的数据表明,第一个时期总计亏损745896,而最后一个时期(9676936-9280568 =)396,368 –这可能表示希望取得的进展。
答案 1 :(得分:0)
按照 gojomo 的建议,您可以计算回调函数中的损失差:
import numpy as np
from gekko import GEKKO
import matplotlib.pyplot as plt
import pandas as pd
m = GEKKO(remote=False) # create GEKKO model
#constants 3L fed-batch
KdQ = 0.001 #degree of degradation of glutamine (1/h)
mG = 1.1e-10 #glucose maintenance coefficient (mmol/cell/hour)
YAQ = 0.90 #yield of ammonia from glutamine
YLG = 2 #yield of lactate from glucose
YXG = 2.2e8 #yield of cells from glucose (cells/mmol)
YXQ = 1.5e9 #yield of cells from glutamine (cells/mmol)
KL = 150 #lactate saturation constant (mM)
KA = 40 #ammonia saturation constant (mM)
Kdmax = 0.01 #maximum death rate (1/h)
mumax = 0.044 #maximum growth rate (1/h)
KG = 1 #glucose saturation constant (mM)
KQ = 0.22 #glutamine saturation constant (mM)
mQ = 0 #glutamine maintenance coefficient (mmol/cell/hour)
kmu = 0.01 #intrinsic death rate (1/h)
Klysis = 2e-2 #rate of cell lysis (1/h)
Ci_star = 100 #inhibitor saturation concentration (mM)
qi = 2.5e-10 #specific inhibitor production rate (1/h)
N_HOURS = 150 #Number of hours of the experiment
TIME_STEP = 0.5
feed_small_delta_t = 0.2
#create time array. It will be from 0 to N_HOURS, with time step TIME_STEP,
#and every 24h, there will be a feed_small_delta_t
t = []
for i in range(int(N_HOURS/TIME_STEP +1)):
t_value = i*TIME_STEP
t.append(t_value)
if t_value%24 == 0:
t.append(t_value + feed_small_delta_t)
m.time = t
#Create input feed-rate array
Fi = np.zeros(len(t))
for i in range(1,len(t)):
if t[i]%(24) == 0:
Fi[i] = 0.1/feed_small_delta_t #it is divided by feed_small_delta_t so volume added is constant.
#Flow, volume and concentration
Fi = m.Param(Fi) #input feed-rate (L/h)
Fo = 0 #output feed-rate (L/h)
V = 3 #volume (L)
SG = 653 #glucose concentration in the feed (mM)
SQ = 58.8 #glutamine concentration in the feed (mM)
XTMM = m.Var(value=2,lb=-0.0000,name='XT') #total cell density (MMcells/L)
XVMM = m.Var(value=2,lb=-0.0000, name='XV') #viable cell density (MMcells/L)
XDMM = m.Var(value=0,lb=-0.0000,name='XD') #dead cell density (MMcells/L)
G = m.Var(value = 20,lb=-0.0000,name='G') #glucose concentration (mM)
Q = m.Var(value = 3.8,lb=-0.0000, name='Q') #glutamine concentration (mM)
L = m.Var(value=0.1,lb=-0.0000,name='L') #lactate concentration (mM)
A = m.Var(value=1.8,lb=-0.0000,name='A') #ammonia concentration (mM)
Ci = m.Var(lb=-0.0000, name='Ci') #inhibitor concentration (mM)
mu = m.Var(lb=-0.0000, name='mu') #growth rate (1/h)
Kd = m.Var(lb=-0.0000, name='Kd') #death rate(1/h)
# scale back to cells/L from million cells/L
XT = m.Intermediate(XTMM*1e8)
XV = m.Intermediate(XVMM*1e8)
XD = m.Intermediate(XDMM*1e8)
e1 = m.Intermediate((mu*XV - Klysis*XD - XT*Fo/V)/1e8)
e2 = m.Intermediate(((mu - Kd)*XV - XV*Fo/V)/1e8)
e3 = m.Intermediate((Kd*XV - Klysis*XD - XV*Fo/V)/1e8)
e4 = m.Intermediate((Fi/V)*SG - (Fo/V)*G + (-mu/YXG - mG)*XV)
e5 = m.Intermediate((Fi/V)*SQ - (Fo/V)*Q + (-mu/YXQ - mQ)*XV - KdQ*Q)
e6 = m.Intermediate(-YLG*(-mu/YXG -mG)*XV-(Fo/V)*L)
e7 = m.Intermediate(-YAQ*(-mu/YXQ - mQ)*XV +KdQ*Q-(Fo/V)*A)
e8 = m.Intermediate(qi*XV - (Fo/V)*Ci)
e9a = m.Intermediate((Ci_star*(KG+G)*(KQ+Q)*(L/KL + 1)*(A/KA + 1)))
e9b = m.Intermediate((mumax*G*Q*(Ci_star-Ci)))
e10a = m.Intermediate((mu+kmu))
e10b = m.Intermediate(Kdmax*kmu)
# create GEEKO equations
m.Equation(XTMM.dt() == e1)
m.Equation(XVMM.dt() == e2)
m.Equation(XDMM.dt() == e3)
m.Equation(G.dt() == e4)
m.Equation(Q.dt() == e5)
m.Equation(L.dt() == e6)
m.Equation(A.dt() == e7)
m.Equation(Ci.dt() == e8)
m.Equation(e9a * mu == e9b)
m.Equation(e10a * Kd == e10b)
# solve ODE
m.options.IMODE = 4
m.options.SOLVER = 1
m.options.NODES = 3
m.options.COLDSTART = 2
#m.open_folder()
m.solve(display=False)
plt.figure()
plt.subplot(3,1,1)
plt.plot(m.time, XV.value,label='XV')
plt.plot(m.time, XT.value,label='XT')
plt.plot(m.time, XD.value,label='XD')
plt.legend()
plt.subplot(3,1,2)
plt.plot(m.time, G.value,label='G')
plt.plot(m.time, Q.value,label='Q')
plt.plot(m.time, L.value,label='L')
plt.plot(m.time, A.value,label='A')
plt.plot(m.time, Ci.value,label='Ci')
plt.legend()
plt.subplot(3,1,3)
plt.plot(m.time, mu.value,label='mu')
plt.plot(m.time, Kd.value,label='Kd')
plt.legend()
plt.xlabel('Time (hr)')
plt.figure()
plt.plot(m.time, e1.value,'r-.',label='eqn1')
plt.plot(m.time, e2.value,'g:',label='eqn2')
plt.plot(m.time, e3.value,'b:',label='eqn3')
plt.plot(m.time, e4.value,'b--',label='eqn4')
plt.plot(m.time, e5.value,'y:',label='eqn5')
plt.plot(m.time, e6.value,'m--',label='eqn6')
plt.plot(m.time, e7.value,'b-.',label='eqn7')
plt.plot(m.time, e8.value,'g--',label='eqn8')
plt.plot(m.time, e9a.value,'r:',label='eqn9a')
plt.plot(m.time, e9b.value,'r--',label='eqn9b')
plt.plot(m.time, e10a.value,'k:',label='eqn10a')
plt.plot(m.time, e10b.value,'k--',label='eqn10b')
plt.legend()
plt.show()
要训练模型,请在word2vec训练方法中添加 computer_loss = True 和 callbacks = [callback()] :
import React from 'react';
import MapView from 'react-native-maps';
import { StyleSheet, Text, View, Dimensions } from 'react-native';
import * as Location from 'expo-location';
import * as Permissions from 'expo-permissions';
import Constants from 'expo-constants';
export default class App extends React.Component {
state = {
mapRegion: null,
hasLocationPermissions: false,
locationResult: null,
};
componentDidMount() {
this.getLocationAsync();
}
handleMapRegionChange = mapRegion => {
this.setState({ mapRegion });
};
async getLocationAsync () {
// permissions returns only for location permissions on iOS and under certain conditions, see Permissions.LOCATION
const { status, permissions } = await Permissions.askAsync(
Permissions.LOCATION
);
if (status === 'granted') {
this.setState({ hasLocationPermissions: true });
// let location = await Location.getCurrentPositionAsync({ enableHighAccuracy: true });
let location = await Location.getCurrentPositionAsync({});
this.setState({ locationResult: JSON.stringify(location) });
// Center the map on the location we just fetched.
this.setState({
mapRegion: {
latitude: location.coords.latitude,
longitude: location.coords.longitude,
latitudeDelta: 0.0922,
longitudeDelta: 0.0421,
},
});
} else {
alert('Location permission not granted');
}
};
render() {
return (
<View style={styles.container}>
<MapView
style={styles.mapStyle}
region={this.state.mapRegion}
onRegionChange={this.handleMapRegionChange}
/>
</View>
);
}
}
const styles = StyleSheet.create({
container: {
flex: 1,
backgroundColor: '#fff',
alignItems: 'center',
justifyContent: 'center',
},
mapStyle: {
width: Dimensions.get('window').width,
height: Dimensions.get('window').height,
},
});
这将输出如下内容:
时期0后丢失:4448638.5
在时期1之后丢失:3283735.5
第二阶段后丢失:2826198.0
第3阶段后丢失:2680974.0
第4个时期之后的损失:2601113.0
第5个时期之后的损失:2271333.0
第6个时期之后的损失:2052050.0
第7个时期后的损失:2011768.0
第8个时期之后的损失:1927454.0
第9个时期后的损失:1887798.0