Python多处理Streamhandler

时间:2015-04-23 21:08:59

标签: python logging multiprocessing python-3.4

我有以下代码(这只是它的一部分,为了SO而减少(我知道它仍然很多)

注意:正在使用的操作系统 - Windows 7

User.get({
  url: 'https://cache.getchute.com/v2/albums/aus6kwrg/assets',
  success: function (data, response) {
    // Through `data.pagination.next_page` I can get the URL
    // of the next page.
      console.log('get');
      console.log(data);
      console.log(response);
  }
});

User.nextPage({
  success: function (data, response) {
    // Here I want to make the same request but using the next_page
    // based on the next related to the previous' one.
    console.log('next');
    console.log(data);
    console.log(response);
  }
});

我要做的是将childprocess语句记录到日志文件中。 我已成功,日志文件包含我正在寻找的日志。但这是CLI的一部分 我有一个from multiprocessing import Pool, cpu_count, freeze_support import os, sys, logging, math, argparse from filechunkio import FileChunkIO def _upload_for_multipart(keyname, offset, multipart, part_num, bytes): exec_file = os.path.abspath( sys.argv[0] ) exec_dir = os.path.dirname( exec_file ) default_logger = dict( pdir=exec_dir, ldir='logs', lname='s3_cli.log', level='INFO', fmt='%(asctime)s %(levelname)s: %(message)s' ) logger = _get_logger( **default_logger ) try: if multipart.id: with FileChunkIO(keyname, 'r', offset=offset, bytes=bytes) as fp: logger.info('Start uploading part #%d ...' % part_num ) #multipart.upload_part_from_file( fp=fp, part_num=part_num ) logger.info('Uploading part') logger.info( 'UPLOADED part #%d' % part_num ) except Exception as e: logger.error( 'FAILED uploading part #%d' % part_num ) raise def multipart_upload(bucketname, keyname, **kwargs): file_path = keyname file_name = os.path.basename(keyname) source_size = os.stat(file_name).st_size bytes_per_chunk = 10485760 mp = '' chunk_amount = int(math.ceil(source_size / float(bytes_per_chunk))) parallel_processes = 1 #mp = bucket.initiate_multipart_upload( keyname ) logger.info('Starting multi upload') chunk_amount = int(math.ceil(source_size / float(bytes_per_chunk))) logger.info ( 'File Upload begininning with {0} cores'.format( parallel_processes ) ) pool = Pool(processes=parallel_processes) for i in range( chunk_amount ): offset = i * bytes_per_chunk remaining_bytes = source_size - offset bytes = min( [bytes_per_chunk, remaining_bytes] ) part_num = i + 1 pool.apply_async( _upload_for_multipart, [keyname, offset, mp, part_num, bytes] ) pool.close() pool.join() def _get_logger( pdir, ldir, lname, level, fmt ): try: logs_dir = os.path.join( pdir, ldir ) if not os.path.exists( logs_dir ): os.makedirs( logs_dir ) except Exception as e: print ('{}'.format(e)) exit(1) logging.basicConfig( filename=os.path.join(logs_dir, lname), level=level, format=fmt ) return logging.getLogger( lname ) if __name__=="__main__": freeze_support() default_bucket = '' exec_file = os.path.abspath( sys.argv[0] ) exec_dir = os.path.dirname( exec_file ) default_logger = dict( pdir=exec_dir, ldir='logs', lname='s3_cli.log', level='INFO', fmt='%(asctime)s %(levelname)s: %(message)s' ) logger = _get_logger(**default_logger) parser = argparse.ArgumentParser( description="CLI." ) group = parser.add_mutually_exclusive_group() group.add_argument( "-v", "--verbose", action='store_const', const=True, help="Output process messages to stdout \ channel") args = parser.parse_args() logger = _get_logger(**default_logger) if args.verbose: try: print_handler = logging.StreamHandler( sys.stdout ) print_handler.setLevel( logging.DEBUG ) formatter = '%(asctime)s %(levelname)s: %(message)s' print_handler.setFormatter( formatter ) logger.addHandler( print_handler ) except Exception as e: print (e) multipart_upload(default_bucket, 'large_testfile.log', cores = 6 ) -v参数,它基本上是流式传输日志文件 到终点站。即使我做(verbose)时,终端也没有输出。我很新 记录模块,所以我不完全了解整个模块。我在这做错了什么任何帮助将不胜感激

0 个答案:

没有答案