在python中将参数传递给守护进程运行器

时间:2017-03-17 10:58:09

标签: python daemon

我有一个这样的守护程序脚本,

#!/usr/bin/python
import time
from daemon import runner

class App():
    def __init__(self):
            self.stdin_path = '/dev/null'
            self.stdout_path = '/dev/tty'
            self.stderr_path = '/dev/tty'
            self.pidfile_path =  '/tmp/foo.pid'
            self.pidfile_timeout = 5

    def run(self):
            while True:
                    self.manage_process()
                    time.sleep(5)

    def manage_process(self):
            initial_list = self.get_process_list()
            for x in initial_list:
                    print x.get_process_name()

    def get_process_list(self):
            return process_list() //Gives the list of processes

def main():
    opts = getopts() //module to parse cmdline arguments 
    //opts.action will have start if command ran is "sample_prog.py --action start"
    app = App()
    daemon_runner = runner.DaemonRunner(app)
    daemon_runner.do_action()

if __name__ == '__main__':
    main()

问题是,我有一个单独的模块来解析命令行参数。使用它,我可以给出这样的论点,sample_prog.py --action start

我无法将收到的值(start | stop | restart)传递给daemon_runner。有人能告诉我有没有办法做到这一点?

2 个答案:

答案 0 :(得分:0)

执行以下操作

import sys
if __name__ == '__main__':
    main(sys.argv)

def main(arguments):
    # use arguments here
    opts = getopts() //module to parse cmdline arguments 
    //opts.action will have start if command ran is "sample_prog.py --action start"
    app = App()
    daemon_runner = runner.DaemonRunner(app)
    daemon_runner.do_action()

同样定义你的主要内容,它会给你一个你要传递的参数列表

答案 1 :(得分:0)

需要大量的簿记来保持守护进程的运行,防止它消耗所有资源,防止让僵尸进程困扰你的系统等等。

下面是我们运行的最简单守护进程的简化版本(它保留了许多工作进程的运行和循环)。它调用commandq.run_command()来完成实际工作(不包括在内)。

如果你可以通过一个更简单的cron-job(你需要一个cron-job或者类似的来验证守护进程是否正在运行)。

import os, sys, time, argparse, random, signal
import multiprocessing
import psutil

# originally from http://www.jejik.com/articles/2007/02/a_simple_unix_linux_daemon_in_python/
# now at https://gist.github.com/dcai/1075904/6f7be00f7f411d5c2e7cd1691dcbb68efacb789c
import daemon

def _ensure_dir(*pth):
    path = os.path.join(*pth)
    if os.path.exists(path):
        if not os.path.isdir(path):
            raise RuntimeError("%r is not a directory!" % path)
    else:
        os.makedirs(path, 0775)
    return path

PID_DIRECTORY = _ensure_dir('/var/run/commandq/')
PID_FNAME = 'commandq-worker.pid'
PID_FILE = os.path.join(PID_DIRECTORY, PID_FNAME)

def worker(args, parentpid):
    """Command Queue worker process.
    """
    # number of tasks to process before dying (we can't just keep looping
    # in case the client code has resource leaks..)
    recycle = args.recycle  

    def sleep_or_die(n=0):
        """If our parent died (or got killed), we commit suicide.
           (this is much easier than trying to kill sub-treads from the
           parent).
        """
        # os.getppid() only exists on Linux..
        if os.getppid() != parentpid:  # i.e. parent died
            sys.exit()

        # back off if the system is busy (i.e. don't cause a death spiral..)
        if psutil.cpu_percent() > 70.0:
            time.sleep(25)
            if os.getppid() != parentpid:  # check that parent didn't die
                sys.exit()

        if n > 0:
            time.sleep(n)

    while recycle:
        sleep_or_die()  # don't take all cpu-resources

        try:
            # WORK: pulls a unit of work and executes it
            #       - raises NoWork if work queue is empty
            #       - raises LockException if too much lock
            #         contention (i.e. timeout waiting for lock)
            commandq.run_command()  

        except commandq.NoWork as e:
            # introduce randomness to prevent "harmonics"
            sleeptime = random.randrange(1, 5)
            sleep_or_die(sleeptime)

        except commandq.LockException as e:
            # too much lock contention... back off a little.
            sleep_or_die(random.randrange(3, 10))

        recycle -= 1

def start_workers(count, args):
    "Start ``count`` number of worker processes."
    procs = [multiprocessing.Process(target=worker, args=(args, os.getpid()))
             for _i in range(count)]
    _t = [t.start() for t in procs]
    return procs

def main(daemon):
    "Daemon entry point."
    args = daemon.args
    procs = start_workers(args.count, args)  # start args.count workers

    while 1:
        # active_children() joins finished processes
        procs = multiprocessing.active_children()
        missing = args.count - len(procs)
        if missing:
            # if any of our workers died, start replacement processes
            procs += start_workers(missing, args)
        time.sleep(5)

        if os.getpid() != daemon.getpid():
            # a second copy has started, i.e. we should stop running
            return

    #[t.join() for t in procs]  # subprocesses will die when they discover that we died.

class WorkerDaemon(daemon.Daemon):
    "Worker daemon."
    args = None

    def run(self):
        "main() does all the work."
        def sigint_handler(signum, frame):
            self.delpid()
            signal.signal(signum, signal.SIG_DFL)
            # re-throw signal (this time without catching it).
            os.kill(os.getpid(), signum)

        # make sure we don't leave sub-processes as zombies when someone 
        # kills the deamon process.
        signal.signal(signal.SIGINT, sigint_handler)
        signal.signal(signal.SIGTERM, sigint_handler)
        main(self)

if __name__ == "__main__":
    cpucount = multiprocessing.cpu_count()
    parser = argparse.ArgumentParser(description='Command Queue worker.')
    parser.add_argument(
        '-n', dest='count', type=int, default=cpucount,
        help='number of worker processes (defaults to number of processors).')
    parser.add_argument('action', nargs='?',
                        help='stop|restart|status of the worker deamon.')

    # when we move to 2.7 we can use the maxtaskperchild argument to
    # multiprocessing.Pool
    parser.add_argument(
        '--recycle', dest='recycle', type=int, default=400,
        help='number of iterations before recycling the worker thread.')

    _args = parser.parse_args()
    daemon = WorkerDaemon(PID_FILE)
    daemon.args = _args
    if _args.action == 'status':
        if daemon.status():
            print "commandq worker is running."
        else:
            print "commandq worker is NOT running."
        sys.exit(0)
    elif _args.action == 'stop':
        daemon.stop()
    elif _args.action == 'restart':
        daemon.restart()
    else:
        daemon.start()
    sys.exit(0)