akka-http如何使用MergeHub来限制来自客户端的请求

时间:2017-09-22 11:01:55

标签: akka-stream akka-http

我使用Source.queue对HttpRequests进行排队,并在客户端将其限制为从远程服务器下载文件。我知道Source.queue不是线程安全的,我们需要使用MergeHub使其成为线程安全的。以下是使用Source.queue并使用cachedHostConnectionPool的代码片段。

import java.io.File

import akka.actor.Actor
import akka.event.Logging
import akka.http.scaladsl.Http
import akka.http.scaladsl.client.RequestBuilding
import akka.http.scaladsl.model.{HttpResponse, HttpRequest, Uri}
import akka.stream._
import akka.stream.scaladsl._
import akka.util.ByteString
import com.typesafe.config.ConfigFactory

import scala.concurrent.{Promise, Future}
import scala.concurrent.duration._
import scala.util.{Failure, Success}

class HttpClient extends Actor with RequestBuilding {

    implicit val system = context.system
    val logger = Logging(system, this)
    implicit lazy val materializer = ActorMaterializer()

    val config = ConfigFactory.load()
    val remoteHost = config.getString("pool.connection.host")
    val remoteHostPort = config.getInt("pool.connection.port")
    val queueSize = config.getInt("pool.queueSize")
    val throttleSize = config.getInt("pool.throttle.numberOfRequests")
    val throttleDuration = config.getInt("pool.throttle.duration")

    import scala.concurrent.ExecutionContext.Implicits.global

    val connectionPool = Http().cachedHostConnectionPool[Promise[HttpResponse]](host = remoteHost, port = remoteHostPort)

   // Construct a Queue
   val requestQueue =
       Source.queue[(HttpRequest, Promise[HttpResponse])](queueSize, OverflowStrategy.backpressure)
      .throttle(throttleSize, throttleDuration.seconds, 1, ThrottleMode.shaping)
      .via(connectionPool)
      .toMat(Sink.foreach({
          case ((Success(resp), p)) => p.success(resp)
          case ((Failure(error), p)) => p.failure(error)
      }))(Keep.left)
      .run()

    // Convert Promise[HttpResponse] to Future[HttpResponse]
    def queueRequest(request: HttpRequest): Future[HttpResponse] = {
        val responsePromise = Promise[HttpResponse]()
        requestQueue.offer(request -> responsePromise).flatMap {
            case QueueOfferResult.Enqueued    => responsePromise.future
            case QueueOfferResult.Dropped     => Future.failed(new RuntimeException("Queue overflowed. Try again later."))
            case QueueOfferResult.Failure(ex) => Future.failed(ex)
            case QueueOfferResult.QueueClosed => Future.failed(new RuntimeException("Queue was closed (pool shut down) while running the request. Try again later."))
         }
     }

    def receive = {
        case "download" =>
        val uri = Uri(s"http://localhost:8080/file_csv.csv")
        downloadFile(uri, new File("/tmp/compass_audience.csv"))
    }

    def downloadFile(uri: Uri, destinationFilePath: File) = {

        def fileSink: Sink[ByteString, Future[IOResult]] =
             Flow[ByteString].buffer(512, OverflowStrategy.backpressure)
             .toMat(FileIO.toPath(destinationFilePath.toPath)) (Keep.right)

        // Submit to queue and execute HttpRequest and write HttpResponse to file
        Source.fromFuture(queueRequest(Get(uri)))
            .flatMapConcat(_.entity.dataBytes)
            .via(Framing.delimiter(ByteString("\n"), maximumFrameLength = 10000, allowTruncation = true))
            .map(_.utf8String)
            .map(d => s"$d\n")
            .map(ByteString(_))
            .runWith(fileSink)

    }
}

然而,当我使用MergeHub时,它返回Sink [(HttpRequest,Promise [HttpResponse]),NotUsed]。我需要提取response.entity.dataBytes并使用filesink将响应写入文件。我无法弄清楚如何使用MergeHub来实现这一目标。任何帮助将不胜感激。

val hub: Sink[(HttpRequest, Promise[HttpResponse]), NotUsed] =
    MergeHub.source[(HttpRequest, Promise[HttpResponse])](perProducerBufferSize = queueSize)
    .throttle(throttleSize, throttleDuration.seconds, 1, ThrottleMode.shaping)
    .via(connectionPool)
    .toMat(Sink.foreach({
        case ((Success(resp), p)) => p.success(resp)
        case ((Failure(error), p)) => p.failure(error)
     }))(Keep.left)
     .run()

1 个答案:

答案 0 :(得分:1)

Source.Queue现在实际上是线程安全的。如果你想使用MergeHub:

  private lazy val poolFlow: Flow[(HttpRequest, Promise[HttpResponse]), (Try[HttpResponse], Promise[HttpResponse]), Http.HostConnectionPool] =
    Http().cachedHostConnectionPool[Promise[HttpResponse]](host).tail.head, port, connectionPoolSettings)


  val ServerSink =
    poolFlow.toMat(Sink.foreach({
      case ((Success(resp), p)) => p.success(resp)
      case ((Failure(e), p)) => p.failure(e)
    }))(Keep.left)

  // Attach a MergeHub Source to the consumer. This will materialize to a
  // corresponding Sink.
  val runnableGraph: RunnableGraph[Sink[(HttpRequest, Promise[HttpResponse]), NotUsed]] =
  MergeHub.source[(HttpRequest, Promise[HttpResponse])](perProducerBufferSize = 16).to(ServerSink)


  val toConsumer: Sink[(HttpRequest, Promise[HttpResponse]), NotUsed] = runnableGraph.run()



  protected[akkahttp] def executeRequest[T](httpRequest: HttpRequest, unmarshal: HttpResponse => Future[T]): Future[T] = {
    val responsePromise = Promise[HttpResponse]()
    Source.single((httpRequest -> responsePromise)).runWith(toConsumer)
    responsePromise.future.flatMap(handleHttpResponse(_, unmarshal))
    )
  }

}