测试反应组件无法获得clientWidth

时间:2018-06-03 11:51:35

标签: javascript reactjs jestjs enzyme

我正在尝试用jest和酶开发反应组分的单元测试。所以基本上我的组件有调整大小监听器,当调整大小时我的组件将更新组件状态。但我无法获得反应组件的clientWidth。下面是我的组件的一些代码。

import React, { Component } from "react";
import moment from "moment";
// import PropTypes from "prop-types";

import Table from "./Table";
import Grid from "./Grid";
import ActionBlock from "../ActionBlock";
import ConfirmDialog from './ConfirmDialog';
import ReactTooltip from 'react-tooltip'
import { debounce } from '../../utils';
import styles from './styles.scss';

export default class Pagination extends Component {

  constructor(props) {
    super(props);
    this.state = {
      index: props.index,
      type: props.type,
      config: props.config,
      data: props.data,
      currentPage: 1,
      dataPerPage: 20,
      enableActionBlock: props.enableActionBlock,
      confirmDialogIndex: null,
      confirmDialogActionName: null,
      confirmDialogData: null,
      width: 0
    };

    this.handleWindowResize = debounce(this.handleWindowResize.bind(this), 100); //delay trigger resize event
  }

  componentDidMount() {
    this.setState({ width: this.refs.pagination_wrapper.clientWidth })
    window.addEventListener('resize', this.handleWindowResize)
  }

  componentWillUnmount() {
    window.removeEventListener('resize', this.handleWindowResize);
  }

  handleWindowResize = () => {
    this.setState({ width: this.refs.pagination_wrapper.clientWidth })
  }

  render() {
    return (
      <div ref="pagination_wrapper" className={styles.pagination_wrapper}>
        <ReactTooltip />
        {this.renderViewType()}
        {this.renderConfirmDialog()}
      </div>
    )
  }
}

Pagination.defaultProps = {
  enableActionBlock: true,
  dataPerPage: 20
};

以下是我的测试代码。

import React from 'react'
import { shallow, mount, render } from 'enzyme';
import Pagination from '../index';
let img = 'https://www.jqueryscript.net/images/Simplest-Responsive-jQuery-Image-Lightbox-Plugin-simple-lightbox.jpg';
let imageStream = 'http://192.168.100.125:8080/';
let imgQuoteError = `http://192.168.100.71/target-data/fr/target-person-images/1111112222233333@Rizkifika-Asanuli'nam/qTD8vYa.jpeg`;

describe('Testing Pagination', () => {

  let action = (actionName, indexData) => {
    console.log('action APP', actionName, indexData);
  }

  let dataListProps = {
    index: 'id',
    type: 'grid',
    config: [
      { text: 'Image', type: 'image', textPath: 'image', textColor: 'red', valuePath: 'image' },
      { text: 'Fullname', type: 'string', textPath: 'fullname', valuePath: 'fullname' },
      { text: 'Role', type: 'string', textPath: 'role', valuePath: 'role' },
      { text: 'Datetime', type: 'date', textPath: 'datetime', valuePath: 'datetime' },
      { text: 'Json', type: 'json', textPath: 'json', valuePath: 'json' },
    ],
    data: [
      { id: 305, created_at: '2018-02-23T09:43:08.928Z', rule_detail: { id: 1 }, cam_detail: { id: 2, name: 'kamera huawei' }, vas_detail: { id: 3, name: 'VAS 3' }, image: img },
      { id: 306, created_at: '2018-02-23T09:43:08.928Z', rule_detail: { id: 2, name: '' }, cam_detail: { id: 3, name: 'kamera avigilon' }, vas_detail: { id: 4, name: 'VAS 4' }, image: imageStream },
      { id: 306, created_at: '2018-02-23T09:43:08.928Z', rule_detail: { id: 2, name: null }, cam_detail: { id: 3, name: 'kamera avigilon' }, vas_detail: { id: 4, name: 'VAS 4' }, image: imgQuoteError },
      { id: 306, created_at: '2018-02-23T09:43:08.928Z', rule_detail: { id: 2, name: 'Crowd Behaviour' }, cam_detail: { id: 3, name: 'kamera avigilon' }, vas_detail: { id: 4, name: 'VAS 4' }, image: imageStream },
    ],
    onAction: action,
    enableActionBlock: false
  }

  it('snapshot', () => {
    const wrapper = shallow(<Pagination {...dataListProps}/>)
    expect(wrapper).toMatchSnapshot();
  })
})

我需要帮助解决这个问题

2 个答案:

答案 0 :(得分:0)

您可以访问组件中的window对象,因此检索window.innerWidth字段,我想这就是您要查找的内容。

答案 1 :(得分:0)

正如Xarvalus指出的那样。如果想要访问引用,则必须先使用import time import math import random import os import pandas as pd import numpy as np import matplotlib.pyplot as plt import tensorflow as tf import tensorflow_hub as hub import dataset import cv2 from sklearn.metrics import confusion_matrix from datetime import timedelta # Convolutional Layer 1. filter_size1 = 3 num_filters1 = 32 # Convolutional Layer 2. filter_size2 = 3 num_filters2 = 32 # Convolutional Layer 3. filter_size3 = 3 num_filters3 = 64 # Fully-connected layer. fc_size = 128 # Number of neurons in fully-connected layer. # Number of color channels for the images: 1 channel for gray-scale. num_channels = 3 # image dimensions (only squares for now) img_size = 256 # Size of image when flattened to a single dimension img_size_flat = img_size * img_size * num_channels # Tuple with height and width of images used to reshape arrays. img_shape = (img_size, img_size) # class info classes = ['boys', 'girls'] num_classes = len(classes) # batch size batch_size = 70 # validation split validation_size = .16 # how long to wait after validation loss stops improving before terminating training early_stopping = None # use None if you don't want to implement early stoping train_path = 'data/train/' test_path = 'data/test/test/' checkpoint_dir = "models/" data = dataset.read_train_sets(train_path, img_size, classes, validation_size=validation_size) test_images, test_ids = dataset.read_test_set(test_path, img_size) print("Size of:") print("- Training-set:\t\t{}".format(len(data.train.labels))) print("- Test-set:\t\t{}".format(len(test_images))) print("- Validation-set:\t{}".format(len(data.valid.labels))) def plot_images(images, cls_true, cls_pred=None): if len(images) == 0: print("no images to show") return else: random_indices = random.sample(range(len(images)), min(len(images), 9)) images, cls_true = zip(*[(images[i], cls_true[i]) for i in random_indices]) # Create figure with 3x3 sub-plots. fig, axes = plt.subplots(3, 3) fig.subplots_adjust(hspace=0.3, wspace=0.3) for i, ax in enumerate(axes.flat): # Plot image. ax.imshow(images[i].reshape(img_size, img_size, num_channels)) # Show true and predicted classes. if cls_pred is None: xlabel = "True: {0}".format(cls_true[i]) else: xlabel = "True: {0}, Pred: {1}".format(cls_true[i], cls_pred[i]) # Show the classes as the label on the x-axis. ax.set_xlabel(xlabel) # Remove ticks from the plot. ax.set_xticks([]) ax.set_yticks([]) # Ensure the plot is shown correctly with multiple plots # in a single Notebook cell. plt.show() # Get some random images and their labels from the train set. images, cls_true = data.train.images, data.train.cls # Plot the images and labels using our helper-function above. plot_images(images=images, cls_true=cls_true) def new_weights(shape): return tf.Variable(tf.truncated_normal(shape, stddev=0.05)) def new_biases(length): return tf.Variable(tf.constant(0.05, shape=[length])) def new_conv_layer(input, # The previous layer. num_input_channels, # Num. channels in prev. layer. filter_size, # Width and height of each filter. num_filters, # Number of filters. use_pooling=True): # Use 2x2 max-pooling. # Shape of the filter-weights for the convolution. # This format is determined by the TensorFlow API. shape = [filter_size, filter_size, num_input_channels, num_filters] # Create new weights aka. filters with the given shape. weights = new_weights(shape=shape) # Create new biases, one for each filter. biases = new_biases(length=num_filters) # Create the TensorFlow operation for convolution. # Note the strides are set to 1 in all dimensions. # The first and last stride must always be 1, # because the first is for the image-number and # the last is for the input-channel. # But e.g. strides=[1, 2, 2, 1] would mean that the filter # is moved 2 pixels across the x- and y-axis of the image. # The padding is set to 'SAME' which means the input image # is padded with zeroes so the size of the output is the same. layer = tf.nn.conv2d(input=input, filter=weights, strides=[1, 1, 1, 1], padding='SAME') # Add the biases to the results of the convolution. # A bias-value is added to each filter-channel. layer += biases # Use pooling to down-sample the image resolution? if use_pooling: # This is 2x2 max-pooling, which means that we # consider 2x2 windows and select the largest value # in each window. Then we move 2 pixels to the next window. layer = tf.nn.max_pool(value=layer, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') # Rectified Linear Unit (ReLU). # It calculates max(x, 0) for each input pixel x. # This adds some non-linearity to the formula and allows us # to learn more complicated functions. layer = tf.nn.relu(layer) # Note that ReLU is normally executed before the pooling, # but since relu(max_pool(x)) == max_pool(relu(x)) we can # save 75% of the relu-operations by max-pooling first. # We return both the resulting layer and the filter-weights # because we will plot the weights later. return layer, weights def new_fc_layer(input, # The previous layer. num_inputs, # Num. inputs from prev. layer. num_outputs, # Num. outputs. use_relu=True): # Use Rectified Linear Unit (ReLU)? # Create new weights and biases. weights = new_weights(shape=[num_inputs, num_outputs]) biases = new_biases(length=num_outputs) # Calculate the layer as the matrix multiplication of # the input and weights, and then add the bias-values. layer = tf.matmul(input, weights) + biases # Use ReLU? if use_relu: layer = tf.nn.relu(layer) return layer def flatten_layer(layer): # Get the shape of the input layer. layer_shape = layer.get_shape() # The shape of the input layer is assumed to be: # layer_shape == [num_images, img_height, img_width, num_channels] # The number of features is: img_height * img_width * num_channels # We can use a function from TensorFlow to calculate this. num_features = layer_shape[1:4].num_elements() # Reshape the layer to [num_images, num_features]. # Note that we just set the size of the second dimension # to num_features and the size of the first dimension to -1 # which means the size in that dimension is calculated # so the total size of the tensor is unchanged from the reshaping. layer_flat = tf.reshape(layer, [-1, num_features]) # The shape of the flattened layer is now: # [num_images, img_height * img_width * num_channels] # Return both the flattened layer and the number of features. return layer_flat, num_features def flatten_layer(layer): # Get the shape of the input layer. layer_shape = layer.get_shape() # The shape of the input layer is assumed to be: # layer_shape == [num_images, img_height, img_width, num_channels] # The number of features is: img_height * img_width * num_channels # We can use a function from TensorFlow to calculate this. num_features = layer_shape[1:4].num_elements() # Reshape the layer to [num_images, num_features]. # Note that we just set the size of the second dimension # to num_features and the size of the first dimension to -1 # which means the size in that dimension is calculated # so the total size of the tensor is unchanged from the reshaping. layer_flat = tf.reshape(layer, [-1, num_features]) # The shape of the flattened layer is now: # [num_images, img_height * img_width * num_channels] # Return both the flattened layer and the number of features. return layer_flat, num_features x = tf.placeholder(tf.float32, shape=[None, img_size_flat], name='x') x_image = tf.reshape(x, [-1, img_size, img_size, num_channels]) y_true = tf.placeholder(tf.float32, shape=[None, num_classes], name='y_true') y_true_cls = tf.argmax(y_true, dimension=1) layer_conv1, weights_conv1 = \ new_conv_layer(input=x_image, num_input_channels=num_channels, filter_size=filter_size1, num_filters=num_filters1, use_pooling=True) layer_conv2, weights_conv2 = \ new_conv_layer(input=layer_conv1, num_input_channels=num_filters1, filter_size=filter_size2, num_filters=num_filters2, use_pooling=True) layer_conv3, weights_conv3 = \ new_conv_layer(input=layer_conv2, num_input_channels=num_filters2, filter_size=filter_size3, num_filters=num_filters3, use_pooling=True) layer_flat, num_features = flatten_layer(layer_conv3) layer_fc1 = new_fc_layer(input=layer_flat, num_inputs=num_features, num_outputs=fc_size, use_relu=True) layer_fc2 = new_fc_layer(input=layer_fc1, num_inputs=fc_size, num_outputs=num_classes, use_relu=False) y_pred = tf.nn.softmax(layer_fc2) y_pred_cls = tf.argmax(y_pred, dimension=1) cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=layer_fc2, labels=y_true) cost = tf.reduce_mean(cross_entropy) tf.summary.scalar('cross_entropy', cost) optimizer = tf.train.AdamOptimizer(learning_rate=0.00001).minimize(cost) correct_prediction = tf.equal(y_pred_cls, y_true_cls) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) tf.summary.scalar('accuracy', accuracy) # Merge all the summaries and write them out to the summaries_dir merged = tf.summary.merge_all() train_writer = tf.summary.FileWriter('logs/train', flush_secs=10) validation_writer = tf.summary.FileWriter('logs/validation', flush_secs=10) saver = tf.train.Saver(max_to_keep=40) session = tf.Session() session.run(tf.initialize_all_variables()) if os.path.exists('model/checkpoint'): # saver = tf.train.import_meta_graph('./saved '+modelName+'/model.ckpt.meta') saver.restore(session, tf.train.latest_checkpoint('model')) train_batch_size = batch_size def print_progress(epoch, feed_dict_train, feed_dict_validate, train_loss, val_loss): # Calculate the accuracy on the training-set. acc = session.run(accuracy, feed_dict=feed_dict_train) val_acc = session.run(accuracy, feed_dict=feed_dict_validate) msg = "Epoch {0} --- Training Accuracy: {1:>6.1%}, Training Loss: {2:.5f}, Validation Accuracy: {3:>6.1%}, Validation Loss: {4:.5f}" print(msg.format(epoch + 1, acc, train_loss, val_acc, val_loss)) # Counter for total number of iterations performed so far. total_iterations = 0 def optimize(num_iterations): module_spec = hub.load_module_spec(FLAGS.tfhub_module) # Ensure we update the global variable rather than a local copy. global total_iterations # Start-time used for printing time-usage below. start_time = time.time() best_val_loss = float("inf") patience = 0 for i in range(total_iterations, total_iterations + num_iterations): # Get a batch of training examples. # x_batch now holds a batch of images and # y_true_batch are the true labels for those images. x_batch, y_true_batch, _, cls_batch = data.train.next_batch(train_batch_size) x_valid_batch, y_valid_batch, _, valid_cls_batch = data.valid.next_batch(train_batch_size) # Convert shape from [num examples, rows, columns, depth] # to [num examples, flattened image shape] x_batch = x_batch.reshape(train_batch_size, img_size_flat) x_valid_batch = x_valid_batch.reshape(train_batch_size, img_size_flat) # Put the batch into a dict with the proper names # for placeholder variables in the TensorFlow graph. feed_dict_train = {x: x_batch, y_true: y_true_batch} feed_dict_validate = {x: x_valid_batch, y_true: y_valid_batch} # Run the optimizer using this batch of training data. # TensorFlow assigns the variables in feed_dict_train # to the placeholder variables and then runs the optimizer. train_summary, _ = session.run([merged, optimizer], feed_dict=feed_dict_train) # Print status at end of each epoch (defined as full pass through training dataset). if i % int(data.train.num_examples/batch_size) == 0: val_summary, val_loss = session.run([merged, cost], feed_dict=feed_dict_validate) train_loss = session.run(cost, feed_dict=feed_dict_train) epoch = int(i / int(data.train.num_examples/batch_size)) train_writer.add_summary(train_summary, epoch) validation_writer.add_summary(val_summary, epoch) print_progress(epoch, feed_dict_train, feed_dict_validate, train_loss, val_loss) if epoch % 5 == 0: print("Saving the model") saver.save(session, 'model/model', global_step=epoch) save_graph_to_file(graph, "graph.pb", module_spec,2) if early_stopping: if val_loss < best_val_loss: best_val_loss = val_loss patience = 0 else: patience += 1 if patience == early_stopping: break # Update the total number of iterations performed. total_iterations += num_iterations # Ending time. end_time = time.time() # Difference between start and end-times. time_dif = end_time - start_time # Print the time-usage. print("Time elapsed: " + str(timedelta(seconds=int(round(time_dif))))) def plot_example_errors(cls_pred, correct): # cls_pred is an array of the predicted class-number for # all images in the test-set. # correct is a boolean array whether the predicted class # is equal to the true class for each image in the test-set. # Negate the boolean array. incorrect = (correct == False) # Get the images from the test-set that have been # incorrectly classified. images = data.valid.images[incorrect] # Get the predicted classes for those images. cls_pred = cls_pred[incorrect] # Get the true classes for those images. cls_true = data.valid.cls[incorrect] # Plot the first 9 images. plot_images(images=images[0:9], cls_true=cls_true[0:9], cls_pred=cls_pred[0:9]) def save_graph_to_file(graph, graph_file_name, module_spec, class_count): """Saves an graph to file, creating a valid quantized one if necessary.""" sess, _, _, _, _, _ = build_eval_session(module_spec, class_count) graph = sess.graph output_graph_def = tf.graph_util.convert_variables_to_constants( sess, graph.as_graph_def(), [FLAGS.final_tensor_name]) with tf.gfile.FastGFile(graph_file_name, 'wb') as f: f.write(output_graph_def.SerializeToString()) def plot_confusion_matrix(cls_pred): # cls_pred is an array of the predicted class-number for # all images in the test-set. # Get the true classifications for the test-set. cls_true = data.valid.cls # Get the confusion matrix using sklearn. cm = confusion_matrix(y_true=cls_true, y_pred=cls_pred) # Print the confusion matrix as text. print(cm) # Plot the confusion matrix as an image. plt.matshow(cm) # Make various adjustments to the plot. plt.colorbar() tick_marks = np.arange(num_classes) plt.xticks(tick_marks, range(num_classes)) plt.yticks(tick_marks, range(num_classes)) plt.xlabel('Predicted') plt.ylabel('True') # Ensure the plot is shown correctly with multiple plots # in a single Notebook cell. plt.show() def print_validation_accuracy(show_example_errors=False, show_confusion_matrix=False): # Number of images in the test-set. num_test = len(data.valid.images) # Allocate an array for the predicted classes which # will be calculated in batches and filled into this array. cls_pred = np.zeros(shape=num_test, dtype=np.int) # Now calculate the predicted classes for the batches. # We will just iterate through all the batches. # There might be a more clever and Pythonic way of doing this. # The starting index for the next batch is denoted i. i = 0 while i < num_test: # The ending index for the next batch is denoted j. j = min(i + batch_size, num_test) # Get the images from the test-set between index i and j. images = data.valid.images[i:j, :].reshape(batch_size, img_size_flat) # Get the associated labels. labels = data.valid.labels[i:j, :] # Create a feed-dict with these images and labels. feed_dict = {x: images, y_true: labels} # Calculate the predicted class using TensorFlow. cls_pred[i:j] = session.run(y_pred_cls, feed_dict=feed_dict) # Set the start-index for the next batch to the # end-index of the current batch. i = j cls_true = np.array(data.valid.cls) cls_pred = np.array([classes[x] for x in cls_pred]) # Create a boolean array whether each image is correctly classified. correct = (cls_true == cls_pred) # Calculate the number of correctly classified images. # When summing a boolean array, False means 0 and True means 1. correct_sum = correct.sum() # Classification accuracy is the number of correctly classified # images divided by the total number of images in the test-set. acc = float(correct_sum) / num_test # Print the accuracy. msg = "Accuracy on Test-Set: {0:.1%} ({1} / {2})" print(msg.format(acc, correct_sum, num_test)) # Plot some examples of mis-classifications, if desired. if show_example_errors: print("Example errors:") plot_example_errors(cls_pred=cls_pred, correct=correct) # Plot the confusion matrix, if desired. if show_confusion_matrix: print("Confusion Matrix:") plot_confusion_matrix(cls_pred=cls_pred) optimize(num_iterations=1) #print_validation_accuracy() optimize(num_iterations=99) # We already performed 1 iteration above. #print_validation_accuracy(show_example_errors=True) optimize(num_iterations=900) # We performed 100 iterations above. #print_validation_accuracy(show_example_errors=True) optimize(num_iterations=9000) # We performed 1000 iterations above. #print_validation_accuracy(show_example_errors=True, show_confusion_matrix=True) 中的mount安装该组件。

但它会有bug(RangeError:无效的字符串长度)。因此,要克服这一点,我们必须使用import { shallow, mount, render } from 'enzyme';

将酶转换为json

完整的工作代码

import toJson from 'enzyme-to-json';

reference issue