这是我的previous question的后续内容,关于在经常性ETL过程中遵循的模式。
今天,我所做的机器学习工作都是手工完成的。我下载所需的输入文件,学习和预测内容,输出.csv文件,然后将其复制到数据库中。
然而,由于这是投入生产,我需要自动完成所有这些过程。所需的输入文件将每月(最终更频繁地)从提供商到达S3存储桶。
作为一个例子,我试图在Luigi中实现它,但是将S3更改为本地目录,因此事情会更简单。这个程序应该
Extract
任务Transform
任务处理(使用algorithm
功能)Load
任务import glob
import luigi
from luigi.contrib import postgres
import pandas as pd
class ReadFile(luigi.ExternalTask):
# Simply load the new file from input directory
filename = luigi.Parameter()
def output(self):
return luigi.hdfs.LocalTarget('input/' + self.filename)
class Extract(luigi.Task):
# Extract from input directory and put in the data directory
filename = luigi.Parameter()
def requires(self):
return ReadFile(self.filename)
def output(self):
return luigi.hdfs.LocalTarget('data/' + self.filename)
def run(self):
with self.input().open('r') as input_file:
data = input_file.read()
with self.output().open('w') as output_file:
write(output_file, data)
class Transform(luigi.Task):
# Transform the file from data directory using the transform function
filename = luigi.Parameter()
def requires(self):
return Extract(self.filename)
def output(self, filename):
return luigi.hdfs.LocalTarget('results/' + self.filename)
def run(self):
with self.input().open('r') as input_file:
data = input_file.read()
result = trasnform(data)
with self.output().open('w') as output_file:
result.to_csv(output_file)
mark_as_done(self.filename)
class Load(luigi.Task):
# Find new files, run the Transform function and load into the PostgreSQL DB
date = luigi.DateParameter()
def requires(self):
return [Transform(filename) for filename in new_files('input/')]
def output(self):
return postgres.PostgresTarget(host='db', database='luigi', user='luigi', password='luigi', table='test', update_id=self.date)
def run(self):
for input in self.input():
with input.open('r') as inputfile:
result = pd.read_csv(inputfile)
connection = self.output().connect()
for row in result.itertuples():
cursor = connection.cursor()
cursor.execute('INSERT INTO test VALUES (?,?)', row)
# Get connection to the SQLite DB, which will store the files that were already processed
SQLITE_CONNECTION = None
def get_connection():
if SQLITE_CONNECTION is None:
SQLITE_CONNECTION = sqlite3.connect('processed.db')
return SQLITE_CONNECTION
# Mark filename as done in the SQLite DB
def mark_as_done(filename):
connection = get_connection()
cursor = connection.cursor()
cursor.execute('INSERT INTO processed_files VALUES (?)', (filename,))
# Check of the file were already processed
def new_file(filename):
connection = get_connection()
cursor = connection.cursor()
cursor.execute('SELECT * FROM processed_files WHERE file=%s', (filename,))
return cursor.rowcount == 0
# Yields filenames of files that were not processed yet
def new_files(path):
for filename in glob.glob(path + '*.csv'):
if new_file(filename):
yield filename
# Mock of the transform process
def trasnform(data):
return pd.DataFrame({'a': [1,2,3], 'b': [1,2,3]})
问题:
update_id
参数时,Load都会触发吗?答案 0 :(得分:0)
当Load
目录中存在文件时,Transform
任务仅创建result/
个任务。它不应该在input
目录中查找新文件吗?