由于SQLite 3.7.13在JOIN上更新列的限制,我创建了以下Python脚本来帮助解决问题。虽然由于我正在使用的数据量很多,但我遇到了系统资源问题,并且更新方法耗时太长。
我有一个包含7,159,587条记录的SQLite3表,其中包含以下架构:
dist_trpwrenc (id integer primary key autoincrement, IP TEXT, VNE_INTERNAL TEXT, VNE_ENTERPRISE TEXT, VNE_EXTERNAL TEXT)
我有一个包含9,224,812条重复记录的CSV文件。以下是CSV文件中的数据示例:
"IP","VNE"
"192.168.1.1","internal"
"192.168.1.1","enterprise"
"192.168.1.1","external"
"192.168.2.1","internal"
"192.168.2.1","external"
Python脚本正在获取CSV文件并根据以下示例更新“dist_trpwrenc”表:
--------------------------------------------------------------
| IP | VNE_INTERNAL | VNE_ENTERPRISE | VNE_EXTERNAL |
| 192.168.1.1 | x | x | x |
| 192.168.2.1 | x | | x |
--------------------------------------------------------------
我正在寻找一种更快的方法来处理更新,SQLite3 / Python可以实现吗?
#!/usr/bin/python
from openpyxl.reader.excel import load_workbook
import sys, csv, sqlite3, logging, time, os, errno
s = time.strftime('%Y%m%d%H%M%S')
# Create exception file from standard output
class Logger(object):
def __init__(self):
self.terminal = sys.stdout
self.log = open((s)+"_log", "a")
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def dist_trpwrenc_update():
sys.stdout = Logger()
con = sqlite3.connect(sys.argv[1]) # input database name (e.g. database.db) and creates in current working directory.
cur = con.cursor()
try:
with open(sys.argv[2], "rb") as f: # input CSV file
reader = csv.reader(f, delimiter=',')
for row in reader:
try:
ipupdate = (row[4],)
if row[3] == 'internal':
cur.execute('UPDATE dist_trpwrenc SET VNE_INTERNAL="x" WHERE IP=?;', ipupdate)
con.commit()
print row[0], row[4], 'updated:', row[3], ' successfully!'
elif row[3] == 'enterprise':
cur.execute('UPDATE dist_trpwrenc SET VNE_ENTERPRISE="x" WHERE IP=?;', ipupdate)
con.commit()
print row[0], row[4], 'updated:', row[3], ' successfully!'
elif row[3] == 'external':
cur.execute('UPDATE dist_trpwrenc SET VNE_EXTERNAL="x" WHERE IP=?;', ipupdate)
con.commit()
print row[0], row[4], 'updated:', row[3], ' successfully!'
else:
print row[0], row[4], 'did not update:', row[3], ' successfully.'
except (KeyboardInterrupt, SystemExit):
raise
except IOError:
raise
# Close SQLite database connection
con.close()
# Stop logging
sys.stdout = sys.__stdout__
def main():
dist_trpwrenc_update()
if __name__=='__main__':
main()
感谢所有提示,我使用SQL CASE语句使用了另一种方法:
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys, csv, sqlite3, logging, time, os, errno
# Functions
s = time.strftime('%Y%m%d%H%M%S')
# Create file from standard output for database import
class Logger(object):
def __init__(self):
self.terminal = sys.stdout
self.log = open((s) + "_" + sys.argv[1], "a")
def write(self, message):
self.terminal.write(message)
self.log.write(message)
# Function to create CSV from a SQL query.
def sqlExport():
sys.stdout = Logger() # Start screen capture to log file
con = sqlite3.connect(sys.argv[1]) # input database name (e.g. database.db) and creates in current working directory.
cur = con.cursor()
try:
cur.execute('SELECT network, SUM(case when VNE = "V1" then 1 end) as VNECH1, SUM(case when VNE = "V2" then 1 end) as VNECH2, SUM(case when VNE = "V3" then 1 end) as VNECH3 from data_table GROUP by network ORDER BY network;')
data = cur.fetchall()
for row in data:
print '"'+row[0]+'","'+str(row[1])+'","'+str(row[2])+'","'+str(row[3])+'"'
except (KeyboardInterrupt, SystemExit):
raise
con.close()
sys.stdout = sys.__stdout__ # stops capturing data from database export.
# Primary function to execute
def main():
sqlExport()
if __name__=='__main__':
main()
答案 0 :(得分:1)
如果仍然不够,那么它是否适合您的应用程序(例如,CSV包含dist_trpwrenc表中的所有行)删除现有记录或表并使用INSERT查询从CSV重新填充它可能会更快比很多更新。