我在解析大型XML文件时遇到内存问题。
文件看起来像(仅前几行):
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE raml SYSTEM 'raml20.dtd'>
<raml version="2.0" xmlns="raml20.xsd">
<cmData type="actual">
<header>
<log dateTime="2019-02-05T19:00:18" action="created" appInfo="ActualExporter">InternalValues are used</log>
</header>
<managedObject class="MRBTS" version="MRBTS17A_1701_003" distName="PL/M-1" id="366">
<p name="linkedMrsiteDN">PL/TE-2/p>
<p name="name">Name of street</p>
<list name="PiOptions">
<p>0</p>
<p>5</p>
<p>2</p>
<p>6</p>
<p>7</p>
<p>3</p>
<p>9</p>
<p>10</p>
</list>
<p name="btsName">4251</p>
<p name="spareInUse">1</p>
</managedObject>
<managedObject class="MRBTS" version="MRBTS17A_1701_003" distName="PL/M10" id="958078">
<p name="linkedMrsiteDN">PLMN-PLMN/MRSITE-138</p>
<p name="name">Street 2</p>
<p name="btsName">748</p>
<p name="spareInUse">3</p>
</managedObject>
<managedObject class="MRBTS" version="MRBTS17A_1701_003" distName="PL/M21" id="1482118">
<p name="name">Stree 3</p>
<p name="btsName">529</p>
<p name="spareInUse">4</p>
</managedObject>
</cmData>
</raml>
我正在使用xml eTree Element解析器,但是在机器上文件超过4GB且RAM超过32 GB时,我的内存不足。 我正在使用的代码:
def parse_xml(data, string_in, string_out):
"""
:param data: xml raw file that need to be processed and prased
:param string_in: string that should exist in distinguish name
:param string_out: string that should not exist in distinguish name
string_in and string_out represent the way to filter level of parsing (site or cell)
:return: dictionary with all unnecessary objects for selected technology
"""
version_dict = {}
for child in data:
for grandchild in child:
if isinstance(grandchild.get('distName'), str) and string_in in grandchild.get('distName') and string_out not in grandchild.get('distName'):
inner_dict = {}
inner_dict.update({'class': grandchild.get('class')})
inner_dict.update({'version': grandchild.get('version')})
for grandgrandchild in grandchild:
if grandgrandchild.tag == '{raml20.xsd}p':
inner_dict.update({grandgrandchild.get('name'): grandgrandchild.text})
elif grandgrandchild.tag == '{raml20.xsd}list':
p_lista = []
for gggchild in grandgrandchild:
if gggchild.tag == '{raml20.xsd}p':
p_lista.append(gggchild.text)
inner_dict.update({grandgrandchild.get('name'): p_lista})
if gggchild.tag == '{raml20.xsd}item':
for gdchild in gggchild:
inner_dict.update({gdchild.get('name'): gdchild.text})
version_dict.update({grandchild.get('distName'): inner_dict})
return version_dict
我已经尝试过使用iterparse和root.clear()进行操作,但是没有任何帮助。 我听说DOM解析器的速度较慢,但是SAX给我一个错误:
ValueError: unknown url type: '/development/data/raml20.dtd'
不知道为什么。如果有人对如何改进方式和性能有任何建议,我将非常感谢。 我需要更大的XML示例,我愿意提供它。
先谢谢了。
编辑:
我在第一个答案之后尝试的代码:
import xml.etree.ElementTree as ET
def parse_item(d):
# print(d)
# print('---')
a = '<root>'+ d + '</root>'
tree = ET.fromstring(a)
outer_dict_yield = {}
for elem in tree:
inner_dict_yield = {}
for el in elem:
if isinstance(el.get('name'), str):
inner_dict_yield.update({el.get('name'): el.text})
inner_dict.update({'version': elem.get('version')})
# print (inner_dict_yield)
outer_dict_yield.update({elem.get('distName'): inner_dict_yield})
# print(outer_dict_yield)
return outer_dict_yield
def read_a_line(file_object):
while True:
data = file_object.readline()
if not data:
break
yield data
min_data = ""
inside = False
f = open('/development/file.xml')
outer_main = {}
counter = 1
for line in read_a_line(f):
if line.find('<managedObject') != -1:
inside = True
if inside:
min_data += line
if line.find('</managedObject') != -1:
inside = False
a = parse_item(min_data)
counter = counter + 1
outer_main.update({counter: a})
min_data = ''
答案 0 :(得分:1)
我可以问一个棘手的问题吗?文件平坦吗?似乎有几个父标记,然后所有其他标记都是managedObject
项,也许您可以编写一个自定义解析器,通过该解析器解析每个标记,将其视为XML文档,然后将其丢弃。通过文件流式传输将使您能够交替读取,分析和丢弃项目,从而有效地节省了您的存储空间。
下面是一些示例代码,这些代码将流式传输文件并允许您逐个处理每个块。将parse_item
替换为对您有用的东西。
def parse_item(d):
print('---')
print(d)
print('---')
def read_a_line(file_object):
while True:
data = file_object.readline()
if not data:
break
yield data
min_data = ""
inside = False
f = open('bigfile.xml')
for line in read_a_line(f):
if line.find('<managedObject') != -1:
inside = True
if inside:
min_data += line
if line.find('</managedObject') != -1:
inside = False
parse_item(min_data)
min_data = ''
我还应该提到我很懒,并且使用此处列出的生成器来读取文件(但是我做了一些修改):Lazy Method for Reading Big File in Python?
答案 1 :(得分:1)
如果您只需要从XML文件中提取数据并且不需要执行任何特定于XML的操作(例如XSL转换等),则内存占用量非常低的方法是定义自己的TreeBuilder
。示例:
import pathlib
from pprint import pprint
from xml.etree import ElementTree as ET
class ManagedObjectsCollector:
def __init__(self):
self.item_count = 0
self.items = []
self.curr_item = None
self.attr_name = None
self.list_name = None
self.list_entry = False
def start(self, tag, attr):
if tag == '{raml20.xsd}managedObject':
self.curr_item = dict()
self.curr_item.update(**attr)
elif tag == '{raml20.xsd}p':
if self.list_name is None:
self.attr_name = attr.get('name', None)
self.list_entry = self.list_name is not None
elif tag == '{raml20.xsd}list':
self.list_name = attr.get('name', None)
if self.list_name is not None:
self.curr_item[self.list_name] = []
def end(self, tag):
if tag == '{raml20.xsd}managedObject':
self.items.append(self.curr_item)
self.curr_item = None
elif tag == '{raml20.xsd}p':
self.attr_name = None
self.list_entry = False
elif tag == '{raml20.xsd}list':
self.list_name = None
def data(self, data):
if self.curr_item is None:
return
if self.attr_name is not None:
self.curr_item[self.attr_name] = data
elif self.list_entry:
self.curr_item[self.list_name].append(data)
def close(self):
return self.items
if __name__ == '__main__':
file = pathlib.Path('data.xml')
with file.open(encoding='utf-8') as stream:
collector = ManagedObjectsCollector()
parser = ET.XMLParser(target=collector)
ET.parse(stream, parser=parser)
items = collector.items
print('total:', len(items))
pprint(items)
将上述代码与示例数据一起运行将输出:
total: 3
[{'PiOptions': ['0', '5', '2', '6', '7', '3', '9', '10'],
'btsName': '4251',
'class': 'MRBTS',
'distName': 'PL/M-1',
'id': '366',
'linkedMrsiteDN': 'PL/TE-2',
'name': 'Name of street',
'spareInUse': '1',
'version': 'MRBTS17A_1701_003'},
{'btsName': '748',
'class': 'MRBTS',
'distName': 'PL/M10',
'id': '958078',
'linkedMrsiteDN': 'PLMN-PLMN/MRSITE-138',
'name': 'Street 2',
'spareInUse': '3',
'version': 'MRBTS17A_1701_003'},
{'btsName': '529',
'class': 'MRBTS',
'distName': 'PL/M21',
'id': '1482118',
'name': 'Stree 3',
'spareInUse': '4',
'version': 'MRBTS17A_1701_003'}]
因为我们没有在ManagedObjectsCollector
中构造XML树,并且一次也没有在内存中保留超过当前文件行的数量,所以解析器的内存分配很小,并且内存使用率很大受collector.items
列表的影响。上面的示例解析每个managedObject
项目中的所有数据,因此列表可能会变得非常大。您可以通过注释self.items.append(self.curr_item)
行来验证它-一旦列表不增加,内存使用就保持不变(大约20-30 MiB,具体取决于您的Python版本)。
如果仅需要部分数据,则将受益于TreeBuilder
的更简单实现。例如,下面的TreeBuilder
仅收集版本属性,而忽略其余标签:
class VersionCollector:
def __init__(self):
self.items = []
def start(self, tag, attr):
if tag == '{raml20.xsd}managedObject':
self.items.append(attr['version'])
def close(self):
return self.items
这是一个自包含的脚本,该脚本通过内存使用率测量进行了扩展。您需要安装一些额外的软件包:
$ pip install humanize psutil tqdm
可选:使用lxml
进行更快的解析:
$ pip install lxml
以文件名作为参数运行脚本。 40 MiB XML文件的示例输出:
$ python parse.py data_39M.xml
mem usage: 1%|▏ | 174641152/16483663872 [00:01<03:05, 87764892.80it/s, mem=174.6 MB]
total items memory size: 145.9 MB
total items count: 150603
[{'PiOptions': ['0', '5', '2', '6', '7', '3', '9', '10'],
'btsName': '4251',
'class': 'MRBTS',
'distName': 'PL/M-1',
'id': '366',
'linkedMrsiteDN': 'PL/TE-2',
'name': 'Name of street',
'spareInUse': '1',
'version': 'MRBTS17A_1701_003'},
...
请注意,对于40 MB的XML文件,峰值内存使用量约为174 MB,而items
列表的内存分配约为146 MB;剩下的是Python开销,并且无论文件大小如何都保持不变。这样可以粗略估计读取较大文件所需的内存量。
源代码:
from collections import deque
import itertools
import pathlib
from pprint import pprint
import os
import sys
import humanize
import psutil
import tqdm
try:
from lxml import etree as ET
except ImportError:
from xml.etree import ElementTree as ET
def total_size(o, handlers={}, verbose=False):
"""https://code.activestate.com/recipes/577504/"""
dict_handler = lambda d: itertools.chain.from_iterable(d.items())
all_handlers = {
tuple: iter,
list: iter,
deque: iter,
dict: dict_handler,
set: iter,
frozenset: iter,
}
all_handlers.update(handlers)
seen = set()
default_size = sys.getsizeof(0)
def sizeof(o):
if id(o) in seen:
return 0
seen.add(id(o))
s = sys.getsizeof(o, default_size)
if verbose:
print(s, type(o), repr(o), file=sys.stderr)
for typ, handler in all_handlers.items():
if isinstance(o, typ):
s += sum(map(sizeof, handler(o)))
break
return s
return sizeof(o)
class ManagedObjectsCollector:
def __init__(self, mem_pbar):
self.item_count = 0
self.items = []
self.curr_item = None
self.attr_name = None
self.list_name = None
self.list_entry = False
self.mem_pbar = mem_pbar
self.mem_pbar.set_description('mem usage')
def update_mem_usage(self):
proc_mem = psutil.Process(os.getpid()).memory_info().rss
self.mem_pbar.n = 0
self.mem_pbar.update(proc_mem)
self.mem_pbar.set_postfix(mem=humanize.naturalsize(proc_mem))
def start(self, tag, attr):
if tag == '{raml20.xsd}managedObject':
self.curr_item = dict()
self.curr_item.update(**attr)
elif tag == '{raml20.xsd}p':
if self.list_name is None:
self.attr_name = attr.get('name', None)
self.list_entry = self.list_name is not None
elif tag == '{raml20.xsd}list':
self.list_name = attr.get('name', None)
if self.list_name is not None:
self.curr_item[self.list_name] = []
def end(self, tag):
if tag == '{raml20.xsd}managedObject':
self.items.append(self.curr_item)
self.curr_item = None
elif tag == '{raml20.xsd}p':
self.attr_name = None
self.list_entry = False
elif tag == '{raml20.xsd}list':
self.list_name = None
# Updating progress bar costs resources, don't do it
# on each item parsed or it will slow down the parsing
self.item_count += 1
if self.item_count % 10000 == 0:
self.update_mem_usage()
def data(self, data):
if self.curr_item is None:
return
if self.attr_name is not None:
self.curr_item[self.attr_name] = data
elif self.list_entry:
self.curr_item[self.list_name].append(data)
def close(self):
return self.items
if __name__ == '__main__':
file = pathlib.Path(sys.argv[1])
total_mem = psutil.virtual_memory().total
with file.open(encoding='utf-8') as stream, tqdm.tqdm(total=total_mem, position=0) as pbar_total_mem:
collector = ManagedObjectsCollector(pbar_total_mem)
parser = ET.XMLParser(target=collector)
ET.parse(stream, parser=parser)
items = collector.items
print('total:', len(items))
print('total items memory size:', humanize.naturalsize(total_size(items)))
pprint(items)