为VueJS组件嵌入Java脚本映射

时间:2018-03-22 03:27:24

标签: javascript vue.js vuejs2 vue-component

  1. 我试图将地图嵌入到用java脚本编写的vue组件中。
  2. 我从此网站链接下载了该地图。 Sri Lanka Map

  3. 在该地图文件文档中 enter image description here

  4. 他们说要将一些JS和CSS文件包含在<link href="jsmaps/jsmaps.css" rel="stylesheet" type="text/css" />部分。

    例如: - head

    我的模板中的head部分在哪里?如何将JS / CSS文件注入 import random import babel import numpy as np import tensorflow as tf from babel.dates import format_date from faker import Faker from sklearn.model_selection import train_test_split import faiss fake = Faker() fake.seed(42) random.seed(42) END_SYMBOL = '<END>' FORMATS = ['short', 'medium', 'long', 'full', 'd MMM YYY', 'd MMMM YYY', 'dd MMM YYY', 'd MMM, YYY', 'd MMMM, YYY', 'dd, MMM YYY', 'd MM YY', 'd MMMM YYY', 'MMMM d YYY', 'MMMM d, YYY', 'dd.MM.YY', ] LOCALES = babel.localedata.locale_identifiers() LOCALES = [lang for lang in LOCALES if 'en' in str(lang)] def create_date(): dt = fake.date_object() try: human = format_date(dt, format=random.choice(FORMATS), locale=random.choice(LOCALES)) case_change = random.randint(0, 3) # 1/2 chance of case change if case_change == 1: human = human.upper() elif case_change == 2: human = human.lower() machine = dt.isoformat() except AttributeError as e: return None, None, None return human, machine # , dt # Date data data = [create_date() for _ in range(50000)] x = [x for x, y in data] y = [y for x, y in data] # BABI dialog # query, response = generate_data(file_path) # x = ''.join(str(query)) # y= ''.join(str(response)) u_characters = set(' '.join(x)) char2numX = dict(zip(u_characters, range(len(u_characters)))) u_characters = set(' '.join(y)) char2numY = dict(zip(u_characters, range(len(u_characters)))) char2numX['<PAD>'] = len(char2numX) num2charX = dict(zip(char2numX.values(), char2numX.keys())) max_len = max([len(date) for date in x]) x = [[char2numX['<PAD>']] * (max_len - len(date)) + [char2numX[x_] for x_ in date] for date in x] print(''.join([num2charX[x_] for x_ in x[4]])) x = np.array(x) char2numY['<GO>'] = len(char2numY) num2charY = dict(zip(char2numY.values(), char2numY.keys())) y = [[char2numY['<GO>']] + [char2numY[y_] for y_ in date] for date in y] print(''.join([num2charY[y_] for y_ in y[4]])) y = np.array(y) x_seq_length = len(x[0]) y_seq_length = len(y[0]) - 1 t = 0 y.flatten() def batch_data(x, y, batch_size): start = 0 while start + batch_size <= len(x): yield x[start:start + batch_size], y[start:start + batch_size] start += batch_size ######################################################################################################################## epochs = 10 batch_size = 128 nodes = 32 embed_size = 10 tf.reset_default_graph() sess = tf.InteractiveSession() inputs = tf.placeholder(tf.int32, (None, None), 'inputs') outputs = tf.placeholder(tf.int32, (None, None), 'output') targets = tf.placeholder(tf.int32, (None, None), 'targets') target_seq_len = tf.placeholder(dtype=tf.int32, shape=(batch_size), name='seq_len') # Embedding layers input_embedding = tf.Variable(tf.random_uniform((len(char2numX), embed_size), -1.0, 1.0), name='enc_embedding') output_embedding = tf.Variable(tf.random_uniform((len(char2numY), embed_size), -1.0, 1.0), name='dec_embedding') date_input_embed = tf.nn.embedding_lookup(input_embedding, inputs) date_output_embed = tf.nn.embedding_lookup(output_embedding, outputs) seqlen = tf.constant(x_seq_length,shape=[x_seq_length]) ######################################################################################################################## with tf.variable_scope("encoding") as encoding_scope: lstm_enc = tf.contrib.rnn.BasicLSTMCell(nodes) encoder_output, last_state = tf.nn.dynamic_rnn(lstm_enc, inputs=date_input_embed, dtype=tf.float32) print("\n\n last state", last_state) with tf.variable_scope("decoding") as decoding_scope: lstm_dec = tf.contrib.rnn.BasicLSTMCell(nodes) # dec_outputs, dec_states = tf.nn.dynamic_rnn(lstm_dec, inputs=date_output_embed, initial_state=last_state) ############################################################################################################################################ X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.33, random_state=42) # Train ############################################################################################################################################ # Train Helper train_helper = tf.contrib.seq2seq.TrainingHelper(inputs=input_embedding, sequence_length=seqlen, time_major=True) # decoder train_decoder = tf.contrib.seq2seq.BasicDecoder(cell=lstm_dec, helper=train_helper, initial_state=last_state) # outputs dec_outputs, _, _ = tf.contrib.seq2seq.dynamic_decode(decoder=train_decoder, maximum_iterations=20) rnn_out, sample_ids = dec_outputs ############################################################################## with tf.name_scope("optimizer"): loss = tf.contrib.seq2seq.sequence_loss(rnn_out, targets, tf.ones([batch_size, y_seq_length])) optimizer = tf.train.RMSPropOptimizer(1e-3).minimize(loss) sess.run(tf.global_variables_initializer()) for epoch_i in range(epochs): for batch_i, (source_batch, target_batch) in enumerate(batch_data(X_train, y_train, batch_size)): batch_logits = sess.run(rnn_out, feed_dict={inputs: source_batch, outputs: target_batch[:, :-1], targets: target_batch[:, 1:] })#target_seq_len: np.ones((batch_size), dtype=int) * x_seq_length print("\n logits:", batch_logits) ############################################################################################################################################ ############################################################################################################################################ # Test # Batches source_batch, target_batch = next(batch_data(X_test, y_test, batch_size)) # Inference Helper inference_helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(embedding=output_embedding, start_tokens=tf.fill([batch_size], 0), end_token=1) # decoder decoder = tf.contrib.seq2seq.BasicDecoder(cell=lstm_dec, helper=inference_helper, initial_state=last_state) # outputs predicted_outputs, predicted_states, _ = tf.contrib.seq2seq.dynamic_decode(decoder=decoder, maximum_iterations=20) 部分?

1 个答案:

答案 0 :(得分:1)

有几种方法可以操作单页面应用程序的head部分。目前,您可以使用名为Vue Meta的Vue插件,查看此存储库https://github.com/declandewet/vue-meta

但如果你不想弄脏手,你可以使用Nuxt.js,看看这个链接https://nuxtjs.org/。 Nuxt.js提供了一种使用Vue.js创建项目的简便方法。您只需编辑此处https://nuxtjs.org/api/configuration-head

所述的nuxt.config.js文件

即使你也可以在上面的评论中提到特定页面的头部分。