双向流转换器的设计模式

时间:2014-06-27 21:23:38

标签: javascript node.js design-patterns bidirectional

我想将网络协议实现对象设计为完全与套接字无关,并且纯粹作为双向转换器。因此,协议对象应该从“控制”侧提供对象或命令,并从“网络”侧发出字节,并从“网络”侧接受字节以转换为对象/响应并从“控制”侧发出。

我无法在Node.js中选择优雅的设计模式。我希望它完全Stream兼容,到目前为止我最终采用了这种方法:

socket = getSocketSomehow();
proto = new Protocol();

socket.pipe(proto.aux);
proto.aux.pipe(socket);

proto.write({ foo: 'this', bar: ['is', 'command'] });
proto.once('data', function(response) {
    console.log('this is response: ' + response.quux);
});

proto是两个交叉连接的双工流的集合,本身为stream.Duplex,以及aux。传入的网络数据进入proto.aux,然后从proto解析并作为对象发出。传入的对象转到proto,并由字节组成,并从proto.aux发出。

有更好的方法吗?

1 个答案:

答案 0 :(得分:0)

我以下面的方法结束了。代码示例使用CoffeeScript以提高可读性。


Bond类实现了Duplex流接口,但将两个不相关的流绑定在一起,因此读写代理到不同的流。

'use strict'

{ EventEmitter } = require 'events'

class Bond extends EventEmitter
    proxyReadableMethod = (method) =>
        @::[method] = -> @_bondState.readable[method] arguments...

    proxyWritableMethod = (method) =>
        @::[method] = -> @_bondState.writable[method] arguments...

    proxyReadableMethod 'read'
    proxyReadableMethod 'setEncoding'
    proxyReadableMethod 'resume'
    proxyReadableMethod 'pause'
    proxyReadableMethod 'pipe'
    proxyReadableMethod 'unpipe'
    proxyReadableMethod 'unshift'
    proxyReadableMethod 'wrap'

    proxyWritableMethod 'write'
    proxyWritableMethod 'end'

    constructor: (readable, writable) ->
        super

        @_bondState = {}
        @_bondState.readable = readable
        @_bondState.writable = writable

        proxyEvent = (obj, event) =>
            obj.on event, => @emit event, arguments...

        proxyEvent readable, 'readable'
        proxyEvent readable, 'data'
        proxyEvent readable, 'end'
        proxyEvent readable, 'close'
        # proxyEvent readable, 'error'

        proxyEvent writable, 'drain'
        proxyEvent writable, 'finish'
        proxyEvent writable, 'pipe'
        proxyEvent writable, 'unpipe'
        # proxyEvent writable, 'error'

module.exports = Bond

Protocol汇总了两个内部Transform流 - ParserComposerParser获取来自aux方的数据,并将其转换为ctl方面的内容,而Composer执行相反的操作。 auxctl都是解析器和作曲家的关系,但在不同的方向 - 所以aux只处理"编组"数据进出,ctl侧发出并接受"解析"数据。我的设计决定是通过ctl本身公开Protocolaux作为实例变量可见。

Protocol曝光:

  • _parse_compose_transform - 类似方法
  • _parseEnd_composeEnd_flush - 类似方法
  • parsedcomposedpush - 类似方法
  • unparseuncomposeunshift - 类似方法
'use strict'

Bond = require './bond'
BacklogTransform = require './backlog-transform'

class Protocol extends Bond
    constructor: (options) ->
        @_protocolState = {}
        @_protocolState.options = options
        parser = @_protocolState.parser = new ParserTransform @
        composer = @_protocolState.composer = new ComposerTransform @

        parser.__name = 'parser'
        composer.__name = 'composer'

        proxyEvent = (source, event) =>
            source.on event, =>
                @emit event, arguments...

        proxyParserEvent = (event) =>
            proxyEvent @_protocolState.parser, event

        proxyComposerEvent = (event) =>
            proxyEvent @_protocolState.composer, event

        proxyParserEvent 'error'
        proxyComposerEvent 'error'

        super @_protocolState.parser, @_protocolState.composer
        @aux = @_protocolState.aux = new Bond @_protocolState.composer, @_protocolState.parser
        # @_protocolState.main = @main = new Bond @_protocolState.parser, @_protocolState.composer

    parsed: (chunk, encoding) ->
        @_protocolState.parser.push chunk, encoding

    composed: (chunk, encoding) ->
        @_protocolState.composer.push chunk, encoding

    unparse: (chunk, encoding) ->
        @_protocolState.parser.unshift chunk, encoding

    uncompose: (chunk, encoding) ->
        @_protocolState.composer.unshift chunk, encoding

    #
    _parse: (chunk, encoding, callback) ->
        throw new TypeError 'not implemented'

    _compose: (chunk, encoding, callback) ->
        throw new TypeError 'not implemented'

    _parseEnd: (callback) ->
        callback()

    _composeEnd: (callback) ->
        callback()

class ParserTransform extends BacklogTransform
    constructor: (@protocol) ->
        options = @protocol._protocolState.options
        super options, options.auxObjectMode, options.mainObjectMode

    __transform: (chunk, encoding, callback) ->
        @protocol._parse chunk, encoding, callback

    __flush: (callback) ->
        @protocol._parseEnd callback

class ComposerTransform extends BacklogTransform
    constructor: (@protocol) ->
        options = @protocol._protocolState.options
        super options, options.mainObjectMode, options.auxObjectMode

    __transform: (chunk, encoding, callback) ->
        @protocol._compose chunk, encoding, callback

    __flush: (callback) ->
        @protocol._composeEnd callback

module.exports = Protocol

BacklogTransform是实用程序类,扩展Transform流,可以在unshift期间通过调用_transform方法将未转换的块移回队列,因此会出现未移位的数据在下一个_transform上,预先添加到新块。不幸的是,实施并不像我希望的那样理想......

'use strict'

async = require 'async'
stream = require 'stream'

class BacklogTransform extends stream.Transform
    constructor: (options, writableObjectMode, readableObjectMode) ->
        options ?= {}

        super options
        @_writableState.objectMode = writableObjectMode ? options.writableObjectMode
        @_readableState.objectMode = readableObjectMode ? options.readableObjectMode
        @_backlogTransformState = {}
        @_backlogTransformState.backlog = []

    unshift: (chunk, encoding = null) ->
        if @_writableState.decodeStrings
            chunk = new Buffer chunk, encoding ? @_writableState.defaultEncoding

        @_backlogTransformState.backlog.unshift { chunk, encoding }

    _flushBacklog: (callback) ->
        backlog = @_backlogTransformState.backlog

        if backlog.length
            if @_writableState.objectMode
                async.forever(
                    (next) =>
                        return next {} if not backlog.length

                        { chunk, encoding } = backlog.shift()
                        @__transform chunk, encoding, (err) ->
                            return next { err } if err?

                            next null

                    ({ err }) ->
                        return callback err if err?

                        callback()
                )
            else
                chunks = (chunk for { chunk, encoding } in backlog)

                if @_writableState.decodeStrings
                    encoding = 'buffer'
                    chunk = Buffer.concat chunks
                else
                    encoding = backlog[0].encoding
                    for item in backlog[1..]
                        if encoding != item.encoding
                            encoding = null
                            break

                    chunk = chunks.join ''

                @_backlogTransformState.backlog = []
                @__transform chunk, encoding, callback
        else
            callback()

    _transform: (chunk, encoding, callback) ->
        backlog = @_backlogTransformState.backlog

        if backlog.length
            backlog.push { chunk, encoding }

            @_flushBacklog callback
        else
            @__transform chunk, encoding, callback

    _flush: (callback) ->
        @_flushBacklog =>
            @__flush callback

    __transform: (chunk, encoding, callback) ->
        throw new TypeError 'not implemented'

    __flush: (callback) ->
        callback()

module.exports = BacklogTransform