我在sql表中有persiandate列,我想更新persiandate的年份。persiandate是这样的字符串,'1396/12/07'
如何根据+2年默认值将persiandate从'1396/12/07'
更新为'1398/12/07'
答案 0 :(得分:0)
尝试一下:
DECLARE @DataSource TABLE
(
[PersianDate] VARCHAR(12)
);
INSERT INTO @DataSource ([PersianDate])
VALUES ('1396/12/07');
UPDATE @DataSource
SET [PersianDate] = CAST(LEFT([PersianDate], 4) + 2 AS VARCHAR(4)) + RIGHT([PersianDate], 6);
SELECT [PersianDate]
FROM @DataSource;
对于SQL Server 2012+,您可以使用:
CONCAT(LEFT([PersianDate], 4) + 2, RIGHT([PersianDate], 6))
答案 1 :(得分:0)
您好,这段代码可能会帮助您
select cast (floor (( cast (substring ('1396/12/07' ,6,2)as int )
+isnull(monthCount,0))/12 ) + cast (left ('1396/12/07' ,4)as int ) as varchar(4) )
+'/'+
RIGHT('00' + cast (substring ('1396/12/07' ,6,2)as int ) ,2)
+'/'+
RIGHT('00' + CONVERT(VARCHAR,substring ('1396/12/07' ,9,2) ), 2)
答案 2 :(得分:0)
您可以尝试以下查询。
import gym
import numpy as np
import tensorflow as tf
import math
import keras
import random
class pendulum:
def __init__(self, sess, env, state_size, action_size):
self.env = env
self.num_states = state_size
self.num_actions = action_size
self.action_space_min = -2.0
self.action_space_max = 2.0
self.sess = sess
self.epsilon = 1.0
self.epsilon_decay = .99
self.return_actor_loss = 0.0
self.return_critic_loss = 0.0
self.memory = []
self.gamma = .95
self.mu = 0
self.sigma = 0
self.actor_and_value_model()
init = tf.global_variables_initializer()
self.sess.run(init)
def actor_and_value_model(self):
#State input
self.model_state_input = tf.placeholder(shape=[None, self.num_states], dtype=tf.float32)
#Advantage input
self.actual_advantage_label = tf.placeholder(shape=[None, 1], dtype=tf.float32)
#Hidden layer 1 (using relu so we don't have negative outputs.)
self.model_hiddenlayer1_weights = tf.Variable(tf.random_normal([self.num_states, 64]))
self.model_hiddenlayer1_bias = tf.Variable(tf.random_normal([64]))
self.model_hiddenlayer1_output = tf.matmul(self.model_state_input, self.model_hiddenlayer1_weights) + self.model_hiddenlayer1_bias
self.model_hiddenlayer1_output = tf.nn.relu(self.model_hiddenlayer1_output)
#Hidden layer 2
self.model_hiddenlayer2_weights = tf.Variable(tf.random_normal([64, 32]))
self.model_hiddenlayer2_bias = tf.Variable(tf.random_normal([32]))
self.model_hiddenlayer2_output = tf.matmul(self.model_hiddenlayer1_output, self.model_hiddenlayer2_weights) + self.model_hiddenlayer2_bias
self.model_hiddenlayer2_output = tf.nn.relu(self.model_hiddenlayer2_output)
#Predict sigma (softplus so we don't have negative sigma)
self.sigma_weights = tf.Variable(tf.random_normal([32, 1]))
self.sigma_bias = tf.Variable(tf.random_normal([1]))
self.sigma_output = tf.matmul(self.model_hiddenlayer2_output, self.sigma_weights) + self.sigma_bias
self.sigma_output = tf.nn.softplus(self.sigma_output) + .1
#Predict MU (tanh to restrict the domain between -2 and 2)
self.mu_weights = tf.Variable(tf.random_normal([32, 1]))
self.mu_bias = tf.Variable(tf.random_normal([1]))
self.mu_output = tf.matmul(self.model_hiddenlayer2_output, self.mu_weights) + self.mu_bias
self.mu_output = keras.activations.linear(self.mu_output)
# self.mu_output = tf.nn.tanh(self.mu_output + 1e-5) * 2 #Doesn't work, just causes sigma to grow rapidly.
#Predict value
self.q_weights = tf.Variable(tf.random_normal([32, 1]))
self.q_bias = tf.Variable(tf.random_normal([1]))
self.q_output = tf.matmul(self.model_hiddenlayer2_output, self.q_weights) + self.q_bias
self.q_output = keras.activations.linear(self.q_output)
#Create a normal distribution of the actions and the likelyhood they will return a big advantage
self.normal_distribution = tf.distributions.Normal(self.mu_output, self.sigma_output)
#Sample a action from the distribution,
self.action = self.normal_distribution.sample(1)
self.action = tf.clip_by_value(self.action, self.env.action_space.low[0], self.env.action_space.high[0])
#Gradient ascent loss to maxamize the reward. E.g (-log(.01) * 200 = 400), (-log(.99) * 200 = 0.8)
self.actor_loss = -tf.log(self.normal_distribution.prob(self.action) + 1e-5) * self.actual_advantage_label
self.actor_train_model=tf.train.AdamOptimizer(learning_rate=0.0005).minimize(self.actor_loss)
#MSE to minimize value loss
self.critic_loss=tf.losses.mean_squared_error(self.q_output, self.actual_advantage_label)
self.critic_train_model = tf.train.AdamOptimizer(learning_rate=0.001).minimize(self.critic_loss)
def predict_action(self, state):
if self.mu != 0:
self.epsilon *= self.epsilon_decay
if self.epsilon < 0.1:
self.epsilon = 0.2
self.epsilon_decay = 1
if (np.random.random() < self.epsilon):
action = env.action_space.sample()
else:
action = self.sess.run(self.action, feed_dict={self.model_state_input: state})[0]
return action
def predict_value(self, state):
q_value = self.sess.run(self.q_output, feed_dict={self.model_state_input: state})
return q_value
def train_model(self, state, advantage):
mu, sigma, advantage, _, _, actor_loss, critic_loss = self.sess.run([self.mu_output, self.sigma_output, self.actual_advantage_label,self.actor_train_model, self.critic_train_model, self.actor_loss, self.critic_loss], feed_dict={self.model_state_input: state, self.actual_advantage_label: advantage})
self.return_actor_loss = actor_loss
self.return_critic_loss = critic_loss
self.mu = mu
self.sigma = sigma
def get_actor_loss(self):
return self.return_actor_loss
def get_critic_loss(self):
return self.return_critic_loss
def get_mu(self):
return self.mu
def get_sigma(self):
return self.sigma
def experience_replay(self):
#Learn from memory
if len(self.memory) < 250:
return
del self.memory[np.random.randint(0,250)]
batch = random.sample(self.memory, 5)
#Train from a random batch of memory
for state, action, reward, new_state, done in batch:
target = reward
new_state = np.array(new_state).reshape((1, self.num_states))
#Get advantage of taking action over the average reward we get.
if not done:
target = (reward + (self.gamma * self.predict_value(new_state)[0][0])) - self.predict_value(state)[0][0]
target = np.array(target).reshape((1, 1))
#Train model to solve enviroment
self.train_model(state, target)
env = gym.make("Pendulum-v0") # "Pendulum-v0" Or "MountainCarContinuous-v0"
sess = tf.Session()
state_size = env.observation_space.shape[0]
action_size = env.action_space.shape[0]
A2C = pendulum(sess, env, state_size, action_size)
while True:
state = env.reset()
for i in range(1000):
env.render()
state = np.array(state).reshape((1, state_size))
action = A2C.predict_action(state)
new_state, reward, done, _ = env.step(action)
A2C.memory.append([state,action,reward,new_state,done])
A2C.experience_replay()
state = new_state
if done:
break
print("actor loss = ", A2C.get_actor_loss(), "critic loss = ", A2C.get_critic_loss(), "mu = ", A2C.get_mu(), "sigma = ", A2C.get_sigma())
更新查询。
SELECT CONVERT(VARCHAR(10),DATEADD(YEAR,2,CONVERT(DATE, '1396/12/07', 120)),111)