我想使用带有背压的akka流,使用play-framework 2.5流式传输一个动态创建的zip(不将其完全放在内存中)。这里是我的代码,带有一个小的zip文件(16KB)。当客户端下载与该操作相关的URL时,下载不会启动。
import java.util.zip.{ ZipEntry, ZipOutputStream, GZIPOutputStream }
import akka.stream.scaladsl._
import akka.util.ByteString
import play.api.mvc._
import scala.concurrent.duration._
import java.io.{ BufferedOutputStream, ByteArrayOutputStream }
import scala.concurrent.{ Promise, Future }
import akka.stream.OverflowStrategy
class ZipController extends Controller {
def getStreamedZip = Action {
val source: Source[ByteString, java.io.OutputStream] = StreamConverters.asOutputStream()
val result = source.mapMaterializedValue(x => {
val zip = new ZipOutputStream(x)
(0 to 100).map { i =>
zip.putNextEntry(new ZipEntry("test-zip/README-" + i + ".txt"))
zip.write("This is the line:\n".map(_.toByte).toArray)
zip.closeEntry()
}
zip.close
x
})
Ok.chunked(result).withHeaders(
"Content-Type" -> "application/zip",
"Content-Disposition" -> "attachment; filename=test.zip"
)
}
}
基本上我想在1 GB内存服务器上流式传输2GB的zip文件。此zip文件将由大约15MB的文件组成。是否可以在不完全加载内存中的每个文件的情况下编写zip文件?如果让3个客户以1MB /秒的速度下载zip。大概这些下载需要多少内存?提前谢谢。
答案 0 :(得分:1)
以下是https://gist.github.com/kirked/412b5156f94419e71ce4a84ec1d54761
的实施方案/* License: MIT */
import com.typesafe.scalalogging.slf4j.StrictLogging
import java.io.{ByteArrayOutputStream, InputStream, IOException}
import java.util.zip.{ZipEntry, ZipOutputStream}
import play.api.libs.iteratee.{Enumeratee, Enumerator}
import scala.concurrent.{Future, ExecutionContext}
/**
* Play iteratee-based reactive zip-file generation.
*/
object ZipEnumerator extends StrictLogging {
/**
* A source to zip.
*
* @param filepath The zip-file path at which to store the data.
* @param stream The data stream provider.
*/
case class Source(filepath: String, stream: () => Future[Option[InputStream]])
/**
* Given sources, returns an Enumerator that feeds a zip-file of the source contents.
*/
def apply(sources: Iterable[Source])(implicit ec: ExecutionContext): Enumerator[Array[Byte]] = {
val resolveSources: Enumerator[ResolvedSource] = Enumerator.unfoldM(sources) { sources =>
sources.headOption match {
case None => Future(None)
case Some(Source(filepath, futureStream)) =>
futureStream().map { _.map(stream => (sources.tail, ResolvedSource(filepath, stream)) ) }
}
}
val buffer = new ZipBuffer(8192)
val writeCentralDirectory = Enumerator.generateM(Future {
if (buffer.isClosed) None
else {
buffer.close
Some(buffer.bytes)
}
})
resolveSources &> zipeach(buffer) andThen writeCentralDirectory
}
private def zipeach(buffer: ZipBuffer)(implicit ec: ExecutionContext): Enumeratee[ResolvedSource, Array[Byte]] = {
Enumeratee.mapConcat[ResolvedSource] { source =>
buffer.zipStream.putNextEntry(new ZipEntry(source.filepath))
var done = false
def entryDone: Unit = {
done = true
buffer.zipStream.closeEntry
source.stream.close
}
def restOfStream: Stream[Array[Byte]] = {
if (done) Stream.empty
else {
while (!done && !buffer.full) {
try {
val byte = source.stream.read
if (byte == -1) entryDone
else buffer.zipStream.write(byte)
}
catch {
case e: IOException =>
logger.error(s"reading/zipping stream [${source.filepath}]", e)
entryDone
}
}
buffer.bytes #:: restOfStream
}
}
restOfStream
}
}
private case class ResolvedSource(filepath: String, stream: InputStream)
private class ZipBuffer(capacity: Int) {
private val buf = new ByteArrayOutputStream(capacity)
private var closed = false
val zipStream = new ZipOutputStream(buf)
def close(): Unit = {
if (!closed) {
closed = true
reset
zipStream.close // writes central directory
}
}
def isClosed = closed
def reset: Unit = buf.reset
def full: Boolean = buf.size >= capacity
def bytes: Array[Byte] = {
val result = buf.toByteArray
reset
result
}
}
}
用法如下所示:
val s3 = ...
val sources = items.map(item => ZipEnumerator.Source(item.filename, { () => s3.getInputStream(item.storagePath) }))
Ok.chunked(ZipEnumerator(sources))(play.api.http.Writeable.wBytes).withHeaders(
CONTENT_TYPE -> "application/zip",
CONTENT_DISPOSITION -> s"attachment; filename=MyFiles.zip; filename*=UTF-8''My%20Files.zip"
)
答案 1 :(得分:0)
不知何故上述方法对我没有用。这是我的代码,适用于即时压缩文件并通过播放框架下载。
import java.io.{BufferedOutputStream, ByteArrayInputStream, ByteArrayOutputStream}
import java.util.zip.{ZipEntry, ZipOutputStream}
import akka.stream.scaladsl.{StreamConverters}
import org.apache.commons.io.FileUtils
import play.api.mvc.{Action, Controller}
class HomeController extends Controller {
def single() = Action {
Ok.sendFile(
content = new java.io.File("C:\\Users\\a.csv"),
fileName = _ => "a.csv"
)
}
def zip() = Action {
Ok.chunked(StreamConverters.fromInputStream(fileByteData)).withHeaders(
CONTENT_TYPE -> "application/zip",
CONTENT_DISPOSITION -> s"attachment; filename = test.zip"
)
}
def fileByteData(): ByteArrayInputStream = {
val fileList = List(
new java.io.File("C:\\Users\\a.csv"),
new java.io.File("C:\\Users\\b.csv")
)
val baos = new ByteArrayOutputStream()
val zos = new ZipOutputStream(new BufferedOutputStream(baos))
try {
fileList.map(file => {
zos.putNextEntry(new ZipEntry(file.toPath.getFileName.toString))
zos.write(FileUtils.readFileToByteArray(file))
zos.closeEntry()
})
} finally {
zos.close()
}
new ByteArrayInputStream(baos.toByteArray)
}
}
zip()的基本思想是将文件转换为ByteArrayInputStream
并使用StreamConverter
将其作为分块数据发送。