键盘无法控制游戏显示

时间:2017-11-02 06:00:03

标签: python pygame cursor pycharm

我正在尝试在Pychorm中通过pygame构建一个游戏。在我创建游戏显示并尝试通过pygame.event.get(),

控制它之后

光标仍然停留在Pychorm上,无法进入我的游戏画面。 当我键入时,光标在Pychorm中向下移动,但不是我的游戏对象向下移动。

# What model to archieve.
MODEL_NAME = 'ssd_mobilenet_v1_coco_11_06_2017'

# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'

# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join('data', 'mscoco_label_map.pbtxt')

NUM_CLASSES = 90

# ## Loading label map

label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)


def detect_objects(image_np, sess, detection_graph):
    # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
    image_np_expanded = np.expand_dims(image_np, axis=0)
    image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')

    # Each box represents a part of the image where a particular object was detected.
    boxes = detection_graph.get_tensor_by_name('detection_boxes:0')

    # Each score represent how level of confidence for each of the objects.
    # Score is shown on the result image, together with the class label.
    scores = detection_graph.get_tensor_by_name('detection_scores:0')
    classes = detection_graph.get_tensor_by_name('detection_classes:0')
    num_detections = detection_graph.get_tensor_by_name('num_detections:0')

    # Actual detection.
    (boxes, scores, classes, num_detections) = sess.run(
        [boxes, scores, classes, num_detections],
        feed_dict={image_tensor: image_np_expanded})

    # Visualization of the results of a detection.
    rect_points, class_names, class_colors = draw_boxes_and_labels(
        boxes=np.squeeze(boxes),
        classes=np.squeeze(classes).astype(np.int32),
        scores=np.squeeze(scores),
        category_index=category_index,
        min_score_thresh=.5
    )
    return dict(rect_points=rect_points, class_names=class_names, class_colors=class_colors)

# Archieving Model
# Load a (frozen) Tensorflow model into memory.

def initiateModel(input_queue, output_queue ):
    # Load a (frozen) Tensorflow model into memory.
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=.8)
    detection_graph = tf.Graph()
    with detection_graph.as_default():
        od_graph_def = tf.GraphDef()
        with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
            serialized_graph = fid.read()
            od_graph_def.ParseFromString(serialized_graph)
            tf.import_graph_def(od_graph_def, name='')

        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=True), graph=detection_graph)

    while True:
        frame = input_queue.get()
        frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        output_queue.put(detect_objects(frame_rgb, sess, detection_graph))

    sess.close()

if __name__ == '__main__':

    input_queue = Queue(5)  
    output_queue = Queue()
    for i in range(1):
        t = Thread(target=initiateModel, args=(input_queue, output_queue ))
        t.daemon = True
        t.start()

    video_capture = WebcamVideoStream(src=args.video_source,
                                      width=args.width,
                                      height=args.height).start()
    frameRate=0
    start_time = time.time()

    while True:
        frame = video_capture.read()

        frame = cv2.resize(frame, (640,480))
        height, width, channels = frame.shape
        #print("height,width : ", height, width)

        input_queue.put(frame)

        if output_queue.empty():
            pass  # fill up queue
        else:
            font = cv2.FONT_HERSHEY_SIMPLEX
            data = output_q.get()
            rec_points = data['rect_points']
            class_names = data['class_names']
            class_colors = data['class_colors']

            for point, name, color in zip(rec_points, class_names, class_colors):
                cv2.rectangle(frame, (int(point['xmin'] * args.width), int(point['ymin'] * args.height)),
                              (int(point['xmax'] * args.width), int(point['ymax'] * args.height)), color, 3)
                cv2.rectangle(frame, (int(point['xmin'] * args.width), int(point['ymin'] * args.height)),
                              (int(point['xmin'] * args.width) + len(name[0]) * 6,
                               int(point['ymin'] * args.height) - 10), color, -1, cv2.LINE_AA)
                cv2.putText(frame, name[0], (int(point['xmin'] * args.width), int(point['ymin'] * args.height)), font,
                            0.3, (0, 0, 0), 1)
            cv2.namedWindow("Video",  cv2.WINDOW_NORMAL)
            cv2.imshow('Video', frame)

            if (time.time() >= start_time+1):
                print ("Frame Rate : ", frameRate)
                start_time = time.time()
                frameRate=0
            else:
                frameRate=frameRate+1   

        fps.update()

        #print('[INFO] elapsed time: {:.2f}'.format(time.time() - t))

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    fps.stop()

    video_capture.stop()
    cv2.destroyAllWindows()

如何将光标锁定在游戏画面上?

1 个答案:

答案 0 :(得分:0)

我认为在setMousePosition调用之后需要更新Mouse.position来更新。

尝试:

如果Mouse.position [0]> width - 1或Mouse.position [0]< 1或Mouse.position [1]> height - 1或Mouse.position [1]< 1: render.setMousePosition(拥有[" lastpos"] [0],拥有[" lastpos"] [1]) 其他: 拥有[" lastpos"] = Mouse.position