通过sys.stdout打印导出到同一文件后,无法读取同一脚本中的文件

时间:2018-06-28 20:42:22

标签: python stdout

我正在利用一个音频分析脚本来输出一堆打印命令。在执行所有打印命令并将其通过脚本中的sys.stdout导出到外部文本文件后,我想在同一脚本中读取文本文件。但是,文本文件在脚本中显示为空。我试过运行f.flush()和f.seek(),但无济于事。在脚本完成后打开文本文件时,所有打印命令的内容均按预期显示。看来该脚本必须完成才能使sys.stdout()将内容写入外部文件。我可以确保在脚本末尾的f.read()之前执行此步骤吗?

import sys
sys.path.append(".")
#print(sys.path)

import time

import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)

start_time = time.time()

import numba
import numpy as np
import librosa

def print_color(msg, color=32):
    if sys.stdout.isatty():
        print("\033[{color}m {msg} \033[0m".format(msg=msg, color=color))
    else:
        print(" *** {msg} ***".format(msg=msg))

# TODO: configure this via cmdline
SAMPLE_RATE = 44100 # Hz
ELF_THRESHOLD_DB = -22 #dB
#OLDBUCKET N_FFT = 16384
N_FFT = 13000
FIRST_BASS_BUCKET = 0
LAST_BASS_BUCKET = 11
LAST_ANALYSIS_BUCKET = 64
DEBUG_ENABLED = False

if len(sys.argv) < 2:
    print("ElfTag: Extremely Low Frequency Audio Tagger")
    print("Usage: %(cmd)s </path/to/directory>" % {"cmd": sys.argv[0]})
    sys.exit(0)

#filename = sys.argv[1]
directory = sys.argv[1]
print(directory)
#filename = librosa.util.example_audio_file()
#filename = "/Volumes/SDXC128GB/ElfTag/sp/02. Luma.mp3"

def debug(msg):
    if DEBUG_ENABLED:
        print(msg)


files = librosa.util.find_files(directory)
print("Total Tracks: ",len(files))
queue = (len(files))

class Tee:
    def write(self, *args, **kwargs):
        self.out1.write(*args, **kwargs)
        self.out2.write(*args, **kwargs)
    def __init__(self, out1, out2):
        self.out1 = out1
        self.out2 = out2
    def flush(self):
        pass

import sys
logfile = input ("Enter Filename for Log File: ")
sys.stdout = Tee(open(logfile, "w"), sys.stdout)

for filename in files:
    try:
        queue = queue - 1
        print(queue, "Songs Remaining")
        print("Loading %(filename)s" % {"filename": filename})
        y, sr = librosa.load(filename, sr=None)
        duration = librosa.core.get_duration(y=y, sr=sr)
        print("Detected sample rate: %(sr)d Hz, duration: %(duration)f seconds." % {"sr": sr, "duration": duration})

        bin_size_hz = float(sr) / N_FFT
        num_bins = N_FFT / 2 + 1
        print("Using transform length of %(n_fft)d for FFT, which gives us %(num_bins)d bins at %(bin_size_hz)f Hz per bin." % {"n_fft": N_FFT, "num_bins": num_bins, "bin_size_hz": bin_size_hz})

        start_hz = bin_size_hz * FIRST_BASS_BUCKET
        end_hz = bin_size_hz * (LAST_BASS_BUCKET + 1)
        anal_hz = bin_size_hz * (LAST_ANALYSIS_BUCKET + 1)
        print("Detecting deep bass as peaks between %(start)f Hz and %(end)f Hz above %(db)d dB chosen from frequency range below %(anal)f Hz." % { "start" : start_hz, "end" : end_hz, "db" : ELF_THRESHOLD_DB, "anal" : anal_hz })

        #y = librosa.core.to_mono(y)
        D = librosa.stft(y, n_fft = N_FFT)

        tempo, beats = librosa.beat.beat_track(y=y, sr=sr, units='frames', hop_length=512)
        numBeats = beats.shape[0]
        print("Estimated tempo: %(tempo)f." % {"tempo" : tempo})
        print("Number of beats detected: %(beats)d." % {"beats" : numBeats})

        # Split into Harmonic and Percussive layers to aid with beat detection
        #H, P = librosa.decompose.hpss(D)
        P = D
        P = librosa.amplitude_to_db(P, ref=np.max)

        totalFrames = P.shape[1]
        print("Total frames: %(frames)d, about %(secPerFrame)f seconds per frame" % {"frames": totalFrames, "secPerFrame": (duration / totalFrames)})

        # Select significant bass frame rows
        Pbass = P[FIRST_BASS_BUCKET:(LAST_ANALYSIS_BUCKET + 1)]

        firstFrame = np.argmax(Pbass.max(axis=0) > -80)
        debug("firstFrame")
        debug(firstFrame)

        Pbass = Pbass[:, firstFrame:]
        debug("Pbass")
        debug(Pbass)

        localmaxBass = librosa.util.localmax(Pbass)
        debug("localmaxBass")
        debug(localmaxBass)

        maskBass = localmaxBass[FIRST_BASS_BUCKET:(LAST_BASS_BUCKET + 1)]
        debug("maskBass")
        debug(maskBass)

        ourBass = Pbass[FIRST_BASS_BUCKET:(LAST_BASS_BUCKET + 1)]
        debug("ourBass")
        debug(ourBass)

        filteredBass = (ourBass > ELF_THRESHOLD_DB)
        debug("filteredBass")
        debug(filteredBass)

        peakFilteredBass = np.multiply(filteredBass, maskBass)
        debug("peakFilteredBass")
        debug(peakFilteredBass)

        vertBassFrames = np.sum(filteredBass, axis=0)
        debug("vertBassFrames")
        debug(vertBassFrames)

        horizBassFrames = (vertBassFrames > 0)
        debug("horizBassFrames")
        debug(horizBassFrames)

        deepBassFrames = np.nonzero(horizBassFrames)[0]
        debug("deepBassFrames")
        debug(deepBassFrames.shape)
        debug(deepBassFrames)

        # Adjacent Deep Bass detector

        shiftedHorizBassFrames = np.append(horizBassFrames[1:], [False])
        andedShiftedHorizBassFrames = np.logical_and(horizBassFrames, shiftedHorizBassFrames)
        adjacentHorizBassFrames = np.logical_and(andedShiftedHorizBassFrames, np.append(andedShiftedHorizBassFrames[1:], [False]))

        debug("adjacentHorizBassFrames")
        debug(adjacentHorizBassFrames)

        # /End Adjacent Deep Bass detector

        debug("beats")
        debug(beats.shape)
        debug(beats)

        deepBassBeats = np.intersect1d(deepBassFrames, beats, assume_unique=True)
        debug("deepBassBeats")
        debug(deepBassBeats.shape)
        debug(deepBassBeats)

        numDeepBeats = deepBassBeats.shape[0]
        print("Number of deep beats: %(numDeepBeats)d" % {"numDeepBeats": numDeepBeats})
        deepBeatsPercentage = float(numDeepBeats) / numBeats
        print("Percentage of deep beats: %(deepBeatsPercentage)f" % {"deepBeatsPercentage": deepBeatsPercentage})
        numBassFrames = horizBassFrames.sum()
        print("Number of frames with deep bass: %(frames)d." % {"frames": numBassFrames})
        numAdjacentBassFrames = adjacentHorizBassFrames.sum()
        print("Number of adjacent frames with deep bass: %(frames)d." % {"frames": numAdjacentBassFrames})
        bassFramesPerBeat = float(numBassFrames) / numBeats
        print("Number of deep bass frames per beat: %(bassFramesPerBeat)f" % {"bassFramesPerBeat": bassFramesPerBeat})
        bassFramesPercentage = float(numBassFrames) / totalFrames
        print("Percentage of deep bass frames: %(bassFramesPercentage)f" % {"bassFramesPercentage": bassFramesPercentage})
        adjacentBassFramesPercentage = float(numAdjacentBassFrames) / totalFrames
        print("Percentage of adjacent deep bass frames: %(bassFramesPercentage)f" % {"bassFramesPercentage": adjacentBassFramesPercentage})
        #if %(bassFramesPercentage)f" % {"bassFramesPercentage": adjacentBassFramesPercentage} >= "0.30":
        #    print("DEEP BASS TRACK NEEDS TAGGING")
        print(("--- %s seconds ---" % (time.time() - start_time)))
        #sys.exit(0)

    except:
        continue

f= open(logfile, 'r+')
f.flush()
f.seek(0)
fh = f.read()
print(fh.rstrip())

1 个答案:

答案 0 :(得分:0)

您没有刷新要写入的输出文件。在您的flush类中实现Tee方法:

def flush(self):
    self.out1.flush()
    self.out2.flush()

然后在循环结束时使用sys.stdout.flush()