下面的问题:How Can I Make This Python Script Work With Python 3?
我想使此脚本在python 3中工作。 我已经设法解决了一些细节,但仍然无法正常工作。.现在似乎在解压缩块数据时抛出错误。
这是我当前遇到的错误:解压缩数据时出现错误-5:流不完整或被截断
我正在使用Python 3.6.2
这是已将内容迁移到Python 3的脚本。 该脚本基本上将自定义iphone格式的PNG标准化。
import pdb
from struct import *
from zlib import *
import stat
import sys
import os
import zlib
def getNormalizedPNG(filename):
pngheader = b"\x89PNG\r\n\x1a\n"
pdb.set_trace()
file = open(filename, "rb")
oldPNG = file.read()
file.close()
if oldPNG[:8] != pngheader:
return None
newPNG = oldPNG[:8]
chunkPos = len(newPNG)
# For each chunk in the PNG file
while chunkPos < len(oldPNG):
# Reading chunk
chunkLength = oldPNG[chunkPos:chunkPos+4]
chunkLength = unpack(">L", chunkLength)[0]
chunkType = oldPNG[chunkPos+4 : chunkPos+8]
chunkData = oldPNG[chunkPos+8:chunkPos+8+chunkLength]
chunkCRC = oldPNG[chunkPos+chunkLength+8:chunkPos+chunkLength+12]
chunkCRC = unpack(">L", chunkCRC)[0]
chunkPos += chunkLength + 12
# Parsing the header chunk
if chunkType == b"IHDR":
width = unpack(">L", chunkData[0:4])[0]
height = unpack(">L", chunkData[4:8])[0]
# Parsing the image chunk
if chunkType == b"IDAT":
try:
pdb.set_trace()
# Uncompressing the image chunk
bufSize = width * height * 4 + height
chunkData = decompress(chunkData, -8, bufSize)
except Exception as e:
print("Already normalized")
print(e)
# The PNG image is normalized
return None
# Swapping red & blue bytes for each pixel
newdata = b""
for y in range(height):
i = len(newdata)
newdata += chunkData[i]
for x in range(width):
i = len(newdata)
newdata += chunkData[i+2]
newdata += chunkData[i+1]
newdata += chunkData[i+0]
newdata += chunkData[i+3]
# Compressing the image chunk
chunkData = newdata
chunkData = compress( chunkData )
chunkLength = len( chunkData )
chunkCRC = crc32(chunkType)
chunkCRC = crc32(chunkData, chunkCRC)
chunkCRC = (chunkCRC + 0x100000000) % 0x100000000
# Removing CgBI chunk
if chunkType != b"CgBI":
newPNG += pack(">L", chunkLength)
newPNG += chunkType
if chunkLength > 0:
newPNG += chunkData
newPNG += pack(">L", chunkCRC)
# Stopping the PNG file parsing
if chunkType == b"IEND":
break
return newPNG
def updatePNG(filename):
data = getNormalizedPNG(filename)
if data != None:
file = open(filename, "wb")
file.write(data)
file.close()
return True
return data
任何线索将不胜感激。 谢谢! :)
答案 0 :(得分:1)
原始代码不会立即处理多个IDAT
块;它执行正确的操作™,并且仅在将其整体解压缩之前将它们连接为一个大对象。 IDAT
块没有单独压缩,但是您的代码假定它们可以压缩-因此,如果有多个块,则失败。
可能有多个IDAT块;如果是这样,它们应连续出现,没有其他中间块。然后,压缩的数据流就是所有IDAT块的数据字段内容的串联。
11.2.4 IDAT Image data
重新连接循环以首先收集所有IDAT
可以解决问题。仅当找到IEND
块时,此数据才被解压缩,字节被交换,并创建新的IDAT
块。最后一步,附加IEND
,将关闭文件。
from struct import *
from zlib import *
import stat
import sys
import os
import zlib
def getNormalizedPNG(filename):
pngheader = b"\x89PNG\r\n\x1a\n"
file = open(filename, "rb")
oldPNG = file.read()
file.close()
if oldPNG[:8] != pngheader:
return None
newPNG = oldPNG[:8]
chunkPos = len(newPNG)
chunkD = bytearray()
foundCGBi = False
# For each chunk in the PNG file
while chunkPos < len(oldPNG):
# Reading chunk
chunkLength = oldPNG[chunkPos:chunkPos+4]
chunkLength = unpack(">L", chunkLength)[0]
chunkType = oldPNG[chunkPos+4 : chunkPos+8]
chunkData = oldPNG[chunkPos+8:chunkPos+8+chunkLength]
chunkCRC = oldPNG[chunkPos+chunkLength+8:chunkPos+chunkLength+12]
chunkCRC = unpack(">L", chunkCRC)[0]
chunkPos += chunkLength + 12
# Parsing the header chunk
if chunkType == b"IHDR":
width = unpack(">L", chunkData[0:4])[0]
height = unpack(">L", chunkData[4:8])[0]
# Parsing the image chunk
if chunkType == b"IDAT":
# Concatename all image data chunks
chunkD += chunkData
continue
# Stopping the PNG file parsing
if chunkType == b"IEND":
if not foundCGBi:
print ('Already normalized')
return None
bufSize = width * height * 4 + height
chunkData = decompress(chunkD, -8, bufSize)
# Swapping red & blue bytes for each pixel
chunkData = bytearray(chunkData)
offset = 1
for y in range(height):
for x in range(width):
chunkData[offset+4*x],chunkData[offset+4*x+2] = chunkData[offset+4*x+2],chunkData[offset+4*x]
offset += 1+4*width
# Compressing the image chunk
#chunkData = newdata
chunkData = compress( chunkData )
chunkLength = len( chunkData )
chunkCRC = crc32(b'IDAT')
chunkCRC = crc32(chunkData, chunkCRC)
chunkCRC = (chunkCRC + 0x100000000) % 0x100000000
newPNG += pack(">L", chunkLength)
newPNG += b'IDAT'
newPNG += chunkData
newPNG += pack(">L", chunkCRC)
chunkCRC = crc32(chunkType)
newPNG += pack(">L", 0)
newPNG += b'IEND'
newPNG += pack(">L", chunkCRC)
break
# Removing CgBI chunk
if chunkType == b"CgBI":
foundCGBi = True
else:
newPNG += pack(">L", chunkLength)
newPNG += chunkType
if chunkLength > 0:
newPNG += chunkData
newPNG += pack(">L", chunkCRC)
return newPNG
def updatePNG(filename):
data = getNormalizedPNG(filename)
if data != None:
file = open(filename+'_fixed.png', "wb")
file.write(data)
file.close()
return True
return data
updatePNG("broken_image.png")
这将生成有效的固定文件。
此代码不不会恢复断开的CgBI
alpha通道!如果需要适当的Alpha透明度,则需要应用行滤镜以获取向上的RGB值,反转Alpha,然后应用行滤镜的逆值,然后再次进行压缩。
您可以使用Python wrapper for PNGDefry,它是确实执行这些遗漏步骤的C程序。
免责声明:我是PNGdefry的作者。