我有这个脚本,我能够共同破解并开展工作。
然而,我知道这种效率非常低,并且绝对可以利用这个机会向其他人学习如何有效地处理这个问题。
这是代码(自己撑起):
# !/usr/bin/env python
from __future__ import print_function
from functools import wraps
from pprint import pprint
import sys
import requests
import datetime
import acos_client as acos
import json
import influxdb
from influxdb import client as influxdb
# Define InfluxDB Client Information
db = influxdb.InfluxDBClient(host='127.0.0.1', port=8086, username='root', password='root', database='metrics')
# A10 ACOS Client single connection to LB01
# Look into a DICT/LIST of LB's that we could iterate through?
# Define details of LB01 Connection
c = acos.Client('10.10.10.1', acos.AXAPI_21, 'username', 'password')
# Define details of LB02 Connection
d = acos.Client('10.10.10.2', acos.AXAPI_21, 'username', 'password')
# Define a DICT/LIST of ServiceGroup names that we will pull stats for each LoadBalancer?
name = 'SG_ACCOUNT.BUSINESS.COM_443'
name2 = 'SG_ACCOUNT.BUSINESS.COM_80'
name3 = 'SG_ACCOUNT_MESSENGER_80'
name4 = 'SG_ACCOUNT_MESSENGER_81'
# These will poll LB01 with different ServiceGroup Names:
# Has to be a way to maybe iterate through a list of names?
data = c.slb.service_group.stats(name)
data2 = c.slb.service_group.stats(name2)
# These will poll LB02 with different ServiceGroup Names:
# Has to be a way to maybe iterate through a list of names?
data3 = d.slb.service_group.stats(name3)
data4 = d.slb.service_group.stats(name4)
# Take the data for LB01 and ServiceGroup tied to (name) and 'package' it up and send to InfluxDB
for server in data['service_group_stat']['member_stat_list']:
metricslist = []
metricsentry = {}
metricsentry['measurement'] = "LB01"
metricsentry['tags'] = {}
metricsentry['fields'] = {}
metricsentry['tags']['SGNAME'] = name
metricsentry['tags']['SRVNAME'] = server['server']
metricsentry['fields']['CURCONNS'] = server['cur_conns']
metricsentry['fields']['TOTCONNS'] = server['tot_conns']
metricsentry['fields']['REQBYTES'] = server['req_bytes']
metricsentry['fields']['REQPKTS'] = server['req_pkts']
metricsentry['fields']['RESPBYTES'] = server['resp_bytes']
metricsentry['fields']['RESPPKTS'] = server['resp_pkts']
metricslist.append(metricsentry)
# Write the list to InfluxDB
db.write_points(metricslist)
# Take the data for LB01 and ServiceGroup tied to (name2) and 'package' it up and send to InfluxDB
for server in data2['service_group_stat']['member_stat_list']:
metricslist2 = []
metricsentry = {}
metricsentry['measurement'] = "LB01"
metricsentry['tags'] = {}
metricsentry['fields'] = {}
metricsentry['tags']['SGNAME'] = name2
metricsentry['tags']['SRVNAME'] = server['server']
metricsentry['fields']['CURCONNS'] = server['cur_conns']
metricsentry['fields']['TOTCONNS'] = server['tot_conns']
metricsentry['fields']['REQBYTES'] = server['req_bytes']
metricsentry['fields']['REQPKTS'] = server['req_pkts']
metricsentry['fields']['RESPBYTES'] = server['resp_bytes']
metricsentry['fields']['RESPPKTS'] = server['resp_pkts']
metricslist2.append(metricsentry)
# Write the list to InfluxDB
db.write_points(metricslist2)
# Take the data for LB02 and ServiceGroup tied to (name3) and 'package' it up and send to InfluxDB
for server in data3['service_group_stat']['member_stat_list']:
metricslist3 = []
metricsentry = {}
metricsentry['measurement'] = "LB02"
metricsentry['tags'] = {}
metricsentry['fields'] = {}
metricsentry['tags']['SGNAME'] = name3
metricsentry['tags']['SRVNAME'] = server['server']
metricsentry['fields']['CURCONNS'] = server['cur_conns']
metricsentry['fields']['TOTCONNS'] = server['tot_conns']
metricsentry['fields']['REQBYTES'] = server['req_bytes']
metricsentry['fields']['REQPKTS'] = server['req_pkts']
metricsentry['fields']['RESPBYTES'] = server['resp_bytes']
metricsentry['fields']['RESPPKTS'] = server['resp_pkts']
metricslist3.append(metricsentry)
# Write the list to InfluxDB
db.write_points(metricslist3)
# Take the data for LB02 and ServiceGroup tied to (name4) and 'package' it up and send to InfluxDB
for server in data4['service_group_stat']['member_stat_list']:
metricslist4 = []
metricsentry = {}
metricsentry['measurement'] = "LB02"
metricsentry['tags'] = {}
metricsentry['fields'] = {}
metricsentry['tags']['SGNAME'] = name4
metricsentry['tags']['SRVNAME'] = server['server']
metricsentry['fields']['CURCONNS'] = server['cur_conns']
metricsentry['fields']['TOTCONNS'] = server['tot_conns']
metricsentry['fields']['REQBYTES'] = server['req_bytes']
metricsentry['fields']['REQPKTS'] = server['req_pkts']
metricsentry['fields']['RESPBYTES'] = server['resp_bytes']
metricsentry['fields']['RESPPKTS'] = server['resp_pkts']
metricslist4.append(metricsentry)
# Write the list to InfluxDB
db.write_points(metricslist4)
理想情况下,我希望能够遍历一个" LoadBalancer Connections"列表,即c
和d
" (acos.Client
)行。
然后我想我会有多个" ServiceGroup"必须与它们存在的LoadBalancer关联的名称。
我想你会有这样的事情:
LB01
SG1
SG2
LB02
SG3
SG4
连接到LB01下拉SG1格式的数据并发送到InfluxDB 连接到LB01下拉SG2格式的数据并发送到InfluxDB 继续循环通过与LB01相关的任何SG(?)
然后对下一个负载均衡器LB02执行相同操作。
必须是一种利用某些列表或词汇,迭代事物并更新InfluxDB而无需每次都重新创建这么多代码的方法。
每个Load Balancer都有很多服务组,因此这段代码不会扩展以容纳许多服务组的负载均衡器。
真的很期待从中学习,因为它对未来的项目肯定会派上用场。
答案 0 :(得分:1)
似乎有很多重复可以被抽象成几个函数。您还可以内联构建列表/ dicts并保存一些输入。
def db_write_metrics(db, measurement, name, server):
metricslist = [
{
'measurement':measurement,
'tags':{
'SGNAME':name,
'SRVNAME':server['server']},
'fields':{
'CURCONNS':server['cur_conns'],
'TOTCONNS':server['tot_conns'],
'REQBYTES':server['req_bytes'],
'REQPKTS':server['req_pkts'],
'RESPBYTES':server['resp_bytes'],
'RESPPKTS':server['resp_pkts'],
}]
db.write_points(metricslist)
def db_write_metrics_list(db, data, measurement, name):
for server in data['service_group_stat']['member_stat_list']:
db_write_metrics(db, measurement, name, server)
现在这些for
循环变为
db_write_metrics_list(db, data, "LB01", name)
db_write_metrics_list(db, data2, "LB01", name2)
db_write_metrics_list(db, data3, "LB02", name3)
db_write_metrics_list(db, data4, "LB02", name4)
并且,假设没有相互依赖性要求这些东西并行完成,您可以将它们放在线程池中。
import multiprocessing.pool
pool = multiprocessing.pool.ThreadPool(4)
pool.map(db_write_metrics_list,
( (db, data, "LB01", name),
(db, data2, "LB01", name2),
(db, data3, "LB02", name3),
(db, data4, "LB02", name4)))
pool.close()
pool.join()