我在服务器和用Python编写的客户端之间建立了ZeroMQ PUB/SUB
连接。服务器发送消息,客户端将其打印出来。
这些程序在同一台计算机上运行得非常好(Ubuntu 16.04或Windows 7;两者都有效)。
当服务器在Windows 7计算机上且客户端在Ubuntu 16.04计算机上时,它们也可以工作。
但是,如果服务器位于Ubuntu 16.04计算机上,则Windows 7计算机上的客户端无法连接到该计算机。
此外,当我在两台独立的Windows 7计算机之间移动客户端和服务器程序时,没有通信问题。
有谁知道问题可能是什么?
以下是客户端代码:
#Based on code found on this Stack Overflow post:
#https://stackoverflow.com/questions/43817161/how-to-send-opencv-video-footage-over-zeromq-sockets
import cv2
import zmq
import base64
import numpy as np
context = zmq.Context()
footage_socket = context.socket(zmq.SUB)
address = 'tcp://192.168.128.229:5555'
footage_socket.connect(address)
footage_socket.setsockopt_string(zmq.SUBSCRIBE, unicode(''))
print "start"
print "connecting to ", address
while True:
try:
frame = footage_socket.recv_string()
img = base64.b64decode(frame)
npimg = np.fromstring(img, dtype=np.uint8)
source = cv2.imdecode(npimg, 1)
cv2.imshow("image", source)
cv2.waitKey(1)
except KeyboardInterrupt:
cv2.destroyAllWindows()
print "\n\nBye bye\n"
break
这是服务器代码:
#Based on code found on this Stack Overflow post:
#https://stackoverflow.com/questions/43817161/how-to-send-opencv-video-footage-over-zeromq-sockets
import cv2
import zmq
import base64
context = zmq.Context()
footage_socket = context.socket(zmq.PUB)
footage_socket.bind('tcp://*:5555')
footage_socket.setsockopt(zmq.CONFLATE, 1)
camera = cv2.VideoCapture(0)
while True:
try:
(grabbed, frame) = camera.read()
frame = cv2.resize(frame, (640, 480))
encoded, buffer = cv2.imencode('.png', frame)
footage_socket.send_string(base64.b64encode(buffer))
except KeyboardInterrupt:
camera.release()
cv2.destroyAllWindows()
print "\n\nBye bye\n"
break
答案 0 :(得分:0)
string
包装的原创内容(string
成为问题之后会出现很多问题发布Python-3.0 +中的 byte
PUB/SUB
一侧发生PUB
主题过滤在最近的版本中,虽然在早期的ZeroMQ中在 SUB
方面进行了操作。版本很重要。 效果:避免过多的开销"外部"尽可能地核心数据流。使用base64.{b64encode()|b64decode()}
就是这种不利的表现舒适度的一个例子,没有提供任何足够的好处。
对基于 cv2
的任何基于 struct.pack( aPayloadMASK, .data, OptionA, OptionB, OptionC )
的帧处理进行配置,调整并检查实际成本〜色彩空间转换可能需要+170 [ms ] pi (在左下方的窗口中)
可以以非常便宜的方式将任何进一步的细节添加到aMessagePAYLOAD中(通过aSeqNumUINT8, aUsecTimeSTAMP
- 无论是旋转的PUB
,还是其他任何想要添加的内容)
import numpy as np
import zmq
import cv2
import struct
print( "INF: this process uses {0: >10s} v.{1:}".format( "ZMQ", zmq.__version__ ) )
print( "INF: this process uses {0: >10s} v.{1:}".format( "NUMPY", np.__version__ ) )
...
..
aPubSOCKET.setsockopt( zmq.LINGER, 0 )
aPubSOCKET.setsockopt( zmq.IMMEDIATE, 1 )
aPubSOCKET.setsockopt( zmq.TOS, aTypeOfSERVICE_PREFERENCE_ID )
aPubSOCKET.setsockopt( zmq.SNDHWM, 4 )
aPubSOCKET.setsockopt( zmq.SNDBUF, 4*640*480*3 )
aPubSOCKET.setsockopt( zmq.CONFLATE, 1 )
...
..
.
aPayloadMAP = ">"+ ( 640 * 480 * 3 )*"B"
HiResCAM = cv2.VideoCapture( 0 )
while True:
try:
( _ , cv2HiResFRAME ) = HiResCAM.read() # acquire FRAME
aPubSOCKET.send( struct.pack( aPayloadMAP, # pack to aMsgPAYLOAD, once .resize()'d
cv2.resize( cv2HiResFRAME,
( 640, 480 )
).data
), # OpenCV 2.2+ natively uses numpy arrays W * H * D * unit8 in BGR-ordering ( BGRA, if alpha used )
zmq.NOBLOCK
)
except KeyboardInterrupt:
# ...
finally:
# gracefully terminate all OpenCV resources
# gracefully terminate all ZeroMQ resources
break
-side:SUB
import numpy as np
import zmq
import cv2
import struct
print( "INF: this process uses {0: >10s} v.{1:}".format( "ZMQ", zmq.__version__ ) )
print( "INF: this process uses {0: >10s} v.{1:}".format( "NUMPY", np.__version__ ) )
...
..
aSubSOCKET.setsockopt( zmq.LINGER, 0 )
aSubSOCKET.setsockopt( zmq.CONFLATE, 1 )
aSubSOCKET.setsockopt( zmq.MAXMSGSIZE, 100+640*480*3 )
aSubSOCKET.setsockopt( zmq.RCVHWM, 4 )
aSubSOCKET.setsockopt( zmq.RCVBUF, 4*640*480*3 )
aSubSOCKET.connect( address )
aSubSOCKET.setsockopt( zmq.SUBSCRIBE, "" )
aPayloadMASK = ">" + ( 640 * 480 * 3 * "B" )
while True:
try:
cv2.imshow( "LAST IMAGERY AVAILABLE:", struct.decode( aPayloadMASK, aSubSOCKET.recv() )
cv2.waitKey( 1 )
except KeyboardInterrupt:
# ...
finally:
# gracefully terminate all OpenCV resources
# gracefully terminate all ZeroMQ resources
break
- 侧:function releaseSecond() {
// After you do your code, then go and focus the second element
document.getElementById('second').focus();
}