在Spring Webflux中,如何从OutputStream变为Flux <databuffer>?

时间:2018-10-18 20:29:45

标签: spring-boot spring-webflux

我正在动态构建tarball,并希望将其直接流回,使用.tar.gz应该可以100%地实现。

以下代码是通过大量的Google搜索可以最接近dataBuffer的代码。基本上,我需要一种实现OutputStream并提供或发布到Flux<DataBuffer>的东西,以便我可以从我的方法中返回它并具有流输出,而不是将整个tarball缓冲在ram中(我很确定这是怎么回事。我正在使用apache Compress-commons,它具有出色的API,但全部基于OutputStream。

我认为另一种实现方法是直接写响应,但是我认为这样做没有反应性吗?也不确定如何从某种Response对象中获取OutputStream

这是Spring Boot 2.0上的kotlin btw

@GetMapping("/cookbook.tar.gz", "/cookbook")
fun getCookbook(): Mono<DefaultDataBuffer> {
    log.info("Creating tarball of cookbooks: ${soloConfig.cookbookPaths}")

    val transformation = Mono.just(soloConfig.cookbookPaths.stream()
            .toList()
            .flatMap {
                Files.walk(Paths.get(it)).map(Path::toFile).toList()
            })
            .map { files ->

                //Will make one giant databuffer... but oh well? TODO: maybe use some kind of chunking.
                val buffer = DefaultDataBufferFactory().allocateBuffer()
                val outputBufferStream = buffer.asOutputStream()


                //Transform my list of stuff into an archiveOutputStream
                TarArchiveOutputStream(GzipCompressorOutputStream(outputBufferStream)).use { taos ->
                    taos.setLongFileMode(TarArchiveOutputStream.LONGFILE_GNU)

                    log.info("files to compress: ${files}")

                    for (file in files) {
                        if (file.isFile) {
                            val entry = "cookbooks/" + file.name
                            log.info("Adding ${entry} to tarball")
                            taos.putArchiveEntry(TarArchiveEntry(file, entry))
                            FileInputStream(file).use { fis ->
                                fis.copyTo(taos) //Copy that stuff!
                            }
                            taos.closeArchiveEntry()
                        }
                    }
                }
                buffer
            }

    return transformation
}

1 个答案:

答案 0 :(得分:0)

我对此感到困惑,并找到了有效的解决方案。您实现OutputStream并获取这些字节并将其发布到流中。确保覆盖close,并发送onComplete。很棒!

@RestController
class SoloController(
        val soloConfig: SoloConfig
) {
    val log = KotlinLogging.logger { }

    @GetMapping("/cookbooks.tar.gz", "/cookbooks")
    fun streamCookbook(serverHttpResponse: ServerHttpResponse): Flux<DataBuffer> {
        log.info("Creating tarball of cookbooks: ${soloConfig.cookbookPaths}")

        val publishingOutputStream = PublishingOutputStream(serverHttpResponse.bufferFactory())

        //Needs to set up cookbook path as a parent directory, and then do `cookbooks/$cookbook_path/<all files>` for each cookbook path given
        Flux.just(soloConfig.cookbookPaths.stream().toList())
                .doOnNext { paths ->
                    //Transform my list of stuff into an archiveOutputStream
                    TarArchiveOutputStream(GzipCompressorOutputStream(publishingOutputStream)).use { taos ->
                        taos.setLongFileMode(TarArchiveOutputStream.LONGFILE_GNU)

                        paths.forEach { cookbookDir ->
                            if (Paths.get(cookbookDir).toFile().isDirectory) {

                                val cookbookDirFile = Paths.get(cookbookDir).toFile()
                                val directoryName = cookbookDirFile.name
                                val entryStart = "cookbooks/${directoryName}"

                                val files = Files.walk(cookbookDirFile.toPath()).map(Path::toFile).toList()

                                log.info("${files.size} files to compress")

                                for (file in files) {
                                    if (file.isFile) {
                                        val relativePath = file.toRelativeString(cookbookDirFile)
                                        val entry = "$entryStart/$relativePath"
                                        taos.putArchiveEntry(TarArchiveEntry(file, entry))
                                        FileInputStream(file).use { fis ->
                                            fis.copyTo(taos) //Copy that stuff!
                                        }
                                        taos.closeArchiveEntry()
                                    }
                                }
                            }
                        }
                    }
                }
                .subscribeOn(Schedulers.parallel())
                .doOnComplete {
                    publishingOutputStream.close()
                }
                .subscribe()

        return publishingOutputStream.publisher
    }

    class PublishingOutputStream(bufferFactory: DataBufferFactory) : OutputStream() {

        val publisher: UnicastProcessor<DataBuffer> = UnicastProcessor.create(Queues.unbounded<DataBuffer>().get())
        private val bufferPublisher: UnicastProcessor<Byte> = UnicastProcessor.create(Queues.unbounded<Byte>().get())

        init {
            bufferPublisher
                    .bufferTimeout(4096, Duration.ofMillis(100))
                    .doOnNext { intList ->
                        val buffer = bufferFactory.allocateBuffer(intList.size)
                        buffer.write(intList.toByteArray())
                        publisher.onNext(buffer)
                    }
                    .doOnComplete {
                        publisher.onComplete()
                    }
                    .subscribeOn(Schedulers.newSingle("publisherThread"))
                    .subscribe()
        }

        override fun write(b: Int) {
            bufferPublisher.onNext(b.toByte())
        }

        override fun close() {
            bufferPublisher.onComplete() //which should trigger the clean up of the whole thing
        }
    }
}