Node + Express与Apache

时间:2015-06-02 20:22:34

标签: node.js apache video express

我正在使用nodejs + express服务器来部署网站。在那个网站上,我将有数百个视频(mp4),我希望我的用户可以加载和查看。

现在,我通过将视频放入节点的公共目录来传送视频,因此数据通过节点传输并表达。我想知道这种做法是否正常,或者我是否应该设置一个单独的apache网络服务器来传送视频。性能如何比较?还有什么需要考虑,比如缓存?

我试图找到相关数据,但没有成功。我看到有些人确实使用节点流式传输视频(例如here),但我没有找到性能比较。我希望不会有太大的区别,因为服务器只需要读取然后输出文件内容,I / O操作应该以类似的速度发生。我忘记了什么吗?

非常感谢!

3 个答案:

答案 0 :(得分:1)

这听起来像是一个很大的视频服务。如果您在许多不同位置观看视频并且您担心用户体验,那么您可能希望使用某种CDN服务。

如果您不熟悉,这些会有效地将您的内容副本缓存在“边缘”附近。因此,远离服务器的位置的用户不会延迟。它们倾向于动态调整以适应越来越不受欢迎的视频。

你仍然需要一个原始服务器,这是你上面描述的那个 - 但是现在一旦特定区域的用户访问了该视频,它应该被缓存在该区域中,这样下一个访问者就不需要加载你的服务器

有许多可用的CDN网络,甚至还有一些node.js特定模块可以帮助使用它们(尽管你可以自己动手) - 例如:

答案 1 :(得分:0)

米克的回答是对的。为了扩展,服务器应该外包缓存。

但如果您需要对单服务器缓存进行基准测试,请检查以下仅使用 nodejs 核心模块的代码。缓存很重要,但前提是网络带宽在每秒千兆字节的数量级并且硬盘太慢。以下代码从缓存命中开始以近 2GB/s 的速度流,并且仍然与所有缓存未命中重叠,作为事件/消息队列上的异步加载。


const cache = require("./simplefastvideostreamcache.js").generateVideoCache; 
const chunkSize = 1024*1024; // size (in bytes) of each video stream chunk
const numCachedChunks = 100; // total chunks cached (shared for all video files accessed)
const chunkExpireSeconds = 100; // when a chunk not accessed for 100 seconds, it is marked as removable
const perfCountObj={}; // just to see performance of cache (total hits and misses where each miss resolves into a hit later so hits = miss + cache hit)
setInterval(function(){console.log(perfCountObj);},1000);

const video = cache(chunkSize,numCachedChunks,chunkExpireSeconds, perfCountObj)

const http = require('http'); 
const options = {};
options.agent = new http.Agent({ keepAlive: true });

const server = http.createServer(options,async (req, res) => {                  
    video.stream(req,res);
});

server.listen(8000, "0.0.0.0", () => {
  console.log("Server running");
});

simplefastvideostreamcache.js:


const urlParse = require("url");
const fs = require("fs");
const path = require("path");
const Lru = require("./lrucache").Lru;
const stream = require('stream').Readable;
function generateVideoCache(chunkSize,numCachedChunks,chunkExpireSeconds, perfCountObj)
{
    perfCountObj.videoCacheMiss=0;
    perfCountObj.videoCacheHit=0;
    let videoCache={chunkSize:chunkSize};
    videoCache.cache= new Lru(numCachedChunks, function(key,callbackPrm){
                        perfCountObj.videoCacheMiss++;
                        let callback = callbackPrm;
                        
                        let data=[];
                        let keyArr = key.split("##@@##");
                        let url2 = keyArr[0];
                        let startByte = parseInt(keyArr[1],10);
                        let stopByte = startByte+videoCache.chunkSize;

                        fs.stat(path.join(__dirname,url2),async function(err,stat){
                            if(err)
                            {
                                callback({data:[], maxSize:-1, startByte:-1, stopByte:-1});
                                return;
                            }
                            
                            if(stopByte > stat.size)
                            {
                                stopByte = parseInt(stat.size,10);
                            }

                            if(startByte >= stopByte)
                            {
                                callback({data:[], maxSize:-1, startByte:-1, stopByte:-1});
                                return;
                            }
                            
                                                            
                            let readStream=fs.createReadStream(path.join(__dirname,url2),{start:startByte, end:stopByte});
                            readStream.on("readable",function(){
                                let dataChunk =""; 
                                while(data.length<(stopByte-startByte))
                                {
                                    let dataChunk = readStream.read((stopByte-startByte) - data.length);
                                    if(dataChunk !== null)
                                    {
                                        data.push(dataChunk);
                                    }
                                    else
                                    {
                                        break;
                                    }
                                }

                            });
                            readStream.on("error",function(err){ 
                                callback({data:[], maxSize:-1, startByte:-1, stopByte:-1});
                                return; 
                            });
                            readStream.on("end",function(){  
                                callback({data:Buffer.concat(data), maxSize:stat.size, startByte:startByte, stopByte:stopByte});
                            }); 
                        });         
    },chunkExpireSeconds*1000);

    videoCache.get = function(filePath, offsetByte,callback){
        filePath = decodeURI(urlParse.parse(filePath).pathname);
        let rangeStart = offsetByte;
        let rangeStop = videoCache.chunkSize; 
        if(rangeStart)
        {

        }
        else
        {
            rangeStart=0;
        }

        if(rangeStop)
        {

        }
        else
        {
            rangeStop = rangeStart + videoCache.chunkSize;
        }
                            
        let dataVideo = [];
        let cacheStart = rangeStart - (rangeStart%videoCache.chunkSize);
        videoCache.cache.get(filePath+"##@@##"+cacheStart,function(video){
            perfCountObj.videoCacheHit++;
            if(video.startByte>=0)
            {
                let offs = rangeStart%videoCache.chunkSize;
                let remain = videoCache.chunkSize - offs;
                if(remain>video.maxSize)
                    remain = video.maxSize;
                if(remain>video.data.length)
                    remain=video.data.length;
                let vidChunk = video.data.slice(offs,offs+remain);
                if(remain>vidChunk.length)
                    remain=vidChunk.length;
                let result={ data:vidChunk, offs:rangeStart, remain:remain, maxSize:video.maxSize};
                callback(result);
                return;
            }
            else
            {
                callback(false);
                return;
            }                               
        });
    };
    videoCache.stream = function(req,res){
        let url2 = decodeURI(urlParse.parse(req.url).pathname);
        let rangeStart = 0;
        let rangeStop = videoCache.chunkSize; 
        if(req.headers.range)
        {
            let spRange = req.headers.range.split("=");
            if(spRange.length>1)
            {
                let spRange2 = spRange[1].split("-");
                if(spRange2.length>1)
                {
                    rangeStart = parseInt(spRange2[0],10);
                    rangeStop = parseInt(spRange2[1],10);
                }
                else if(spRange2.length==1)
                {
                    rangeStart = parseInt(spRange2[0],10);
                    rangeStop = rangeStart + videoCache.chunkSize;
                }
            }
                    
        }
                                                    
        if(rangeStart)
        {

        }
        else
        {
            rangeStart=0;
        }

        if(rangeStop)
        {

        }
        else
        {
            rangeStop = rangeStart + videoCache.chunkSize;
        }
                            
        let dataVideo = [];
        let cacheStart = rangeStart - (rangeStart%videoCache.chunkSize);
        /* {data:[], maxSize:stat.size, startByte:-1, stopByte:-1} */
                            
        videoCache.cache.get(url2+"##@@##"+cacheStart,function(video){
            if(video.startByte>=0)
            {
                let offs = rangeStart%videoCache.chunkSize;
                let remain = videoCache.chunkSize - offs;
                if(remain>video.maxSize)
                    remain = video.maxSize;
                if(remain>video.data.length)
                    remain=video.data.length;
                let vidChunk = video.data.slice(offs,offs+remain);
                if(remain>vidChunk.length)
                    remain=vidChunk.length;
                            
                res.writeHead(206,{
                    "Content-Range": "bytes " + rangeStart + "-" + (rangeStart+remain-1) + "/" + video.maxSize,
                    "Accept-Ranges": "bytes",
                    "Content-Length": remain,
                    "Content-Type": ("video/"+(url2.indexOf(".mp4")!== -1 ? "mp4" : "ogg"))
                });
                                        

                perfCountObj.videoCacheHit++;
                stream.from(vidChunk).pipe(res);
                return;
            }
            else
            {
                res.writeHead(404);
                perfCountObj.videoCacheHit++;                                   
                res.end("404: mp4/ogg video file not found.");
                return;
            }                               
        });
    }
    return videoCache;
}

exports.generateVideoCache = generateVideoCache;


lrucache.js:


'use strict';

/* 
cacheSize: number of elements in cache, constant, must be greater than or equal to number of asynchronous accessors / cache misses
callbackBackingStoreLoad: user-given cache-miss function to load data from datastore
elementLifeTimeMs: maximum miliseconds before an element is invalidated, only invalidated at next get() call with its key
*/

let Lru = function(cacheSize,callbackBackingStoreLoad,elementLifeTimeMs=1000){
    const me = this;
    
    const maxWait = elementLifeTimeMs;
    const size = parseInt(cacheSize,10);
    const mapping = {};
    const mappingInFlightMiss = {};
    const bufData = new Array(size);
    const bufVisited = new Uint8Array(size);
    const bufKey = new Array(size);
    const bufTime = new Float64Array(size);
    const bufLocked = new Uint8Array(size);
    for(let i=0;i<size;i++)
    {
        let rnd = Math.random();
        mapping[rnd] = i;
        
        bufData[i]="";
        bufVisited[i]=0;
        bufKey[i]=rnd;
        bufTime[i]=0;
        bufLocked[i]=0;
    }
    let ctr = 0;
    let ctrEvict = parseInt(cacheSize/2,10);
    const loadData = callbackBackingStoreLoad;
    let inFlightMissCtr = 0;
    this.reload=function(){
        for(let i=0;i<size;i++)
        {
            bufTime[i]=0;
        }
    };
    this.get = function(keyPrm,callbackPrm){
        const key = keyPrm;
        const callback = callbackPrm;
        
        // stop dead-lock when many async get calls are made
        if(inFlightMissCtr>=size)
                {
                    setTimeout(function(){
                me.get(key,function(newData){
                    callback(newData);
                });
            },0);
                    return;
            }
        
        // delay the request towards end of the cache-miss completion
        if(key in mappingInFlightMiss)
        {

            setTimeout(function(){
                me.get(key,function(newData){
                    callback(newData);
                });
            },0);
            return;
        }

        if(key in mapping)
        {
            let slot = mapping[key];
            // RAM speed data
            if((Date.now() - bufTime[slot]) > maxWait)
            {
                
                if(bufLocked[slot])
                {                                       
                    setTimeout(function(){
                        me.get(key,function(newData){
                            callback(newData);
                        });
                    },0);
                    
                }
                else
                {
                    delete mapping[key];
                    
                    me.get(key,function(newData){
                        callback(newData);
                    });
                    
                }
                
            }
            else
            {
                bufVisited[slot]=1;
                bufTime[slot] = Date.now();
                callback(bufData[slot]);
            }
        }
        else
        {
            // datastore loading + cache eviction
            let ctrFound = -1;
            while(ctrFound===-1)
            {
                // give slot a second chance before eviction
                if(!bufLocked[ctr] && bufVisited[ctr])
                {
                    bufVisited[ctr]=0;
                }
                ctr++;
                if(ctr >= size)
                {
                    ctr=0;
                }

                // eviction conditions
                if(!bufLocked[ctrEvict] && !bufVisited[ctrEvict])
                {
                    // evict
                    bufLocked[ctrEvict] = 1;
                    inFlightMissCtr++;
                    ctrFound = ctrEvict;
                }

                ctrEvict++;
                if(ctrEvict >= size)
                {
                    ctrEvict=0;
                }
            }
            
            mappingInFlightMiss[key]=1;
            let f = function(res){
                delete mapping[bufKey[ctrFound]];

                bufData[ctrFound]=res;
                bufVisited[ctrFound]=0;
                bufKey[ctrFound]=key;
                bufTime[ctrFound]=Date.now();
                bufLocked[ctrFound]=0;

                mapping[key] = ctrFound;
                callback(bufData[ctrFound]);
                inFlightMissCtr--;
                delete mappingInFlightMiss[key];        
            };
            loadData(key,f);

        }
    };
};
exports.Lru = Lru;


答案 2 :(得分:0)

这里有几个答案,但应该指出的一件事是,您真的不应该将视频存储在同一台服务器上..

原因是,我相信你知道.. Node 是一个单线程(是的,它对某些事情和子进程等具有多线程)但事件循环是一个单线程,因此集群通常用于增加响应时间等。

如果您计划对您的设置进行集群,甚至将其放入容器中,那么临时存储不是最佳位置,因为它不能保证文件会在那里(除非它被放置在每台服务器上),相反,您可能想要看看像 s3 这样的对象存储(这里有很多选项 Linode、Digital Ocean 等)

这样做将允许您通过像 media.domain.com/video 这样的专用媒体 URL 为他们提供服务,然后您就不必担心任何 IO,然后,正如其他人所说,将 CDN 放在它有助于降低出口成本。

这也带来了关于您计划如何发送数据的另一个问题,如果它只是一个 mp4 浏览器,默认情况下会对其进行分块,因此它将在下载完成之前开始播放,但用户仍然会请求完整的有效载荷.. 这在规模上可能会很昂贵,因此如果您打算“流式传输”,那么您可能需要查看可以流式传输文件 VS 一次下载整个内容的媒体服务器。