需要帮助来提高我的python代码的性能

时间:2015-03-04 16:50:20

标签: python amazon-s3 psycopg2 amazon-redshift

欢迎所有热情的程序员。我的代码需要你的帮助。

我的目标:有效地将数据从Amazon S3移动到Amazon Redshift。

基本上我使用下面的代码将S3上的所有CSV文件移动到Redshift。我解析部分文件,构建一个表结构,然后使用copy命令将数据加载到redshift中。

'''
Created on Feb 25, 2015
@author: Siddartha.Reddy
'''

import sys
from boto.s3 import connect_to_region
from boto.s3.connection import Location
import csv
import itertools
import psycopg2

''' ARGUMENTS TO PASS '''
AWS_KEY = sys.argv[1]
AWS_SECRET_KEY = sys.argv[2]
S3_DOWNLOAD_PATH = sys.argv[3]
REDSHIFT_SCHEMA = sys.argv[4]
TABLE_NAME = sys.argv[5]

UTILS = S3_DOWNLOAD_PATH.split('/')

class UTIL():

    global UTILS

    def bucket_name(self):
        self.BUCKET_NAME = UTILS[0]
        return self.BUCKET_NAME

    def path(self):
        self.PATH = ''
        offset = 0
        for value in UTILS:
            if offset == 0:
                offset += 1
            else:
                self.PATH = self.PATH + value + '/'
        return self.PATH[:-1]

def GETDATAINMEMORY():
    conn = connect_to_region(Location.USWest2,aws_access_key_id = AWS_KEY,
        aws_secret_access_key = AWS_SECRET_KEY,
        is_secure=False,host='s3-us-west-2.amazonaws.com'
        )
    ut = util()
    BUCKET_NAME = ut.bucket_name()
    PATH = ut.path()
    filelist = conn.lookup(BUCKET_NAME)

    ''' Fecth part of the data from S3 '''
    for path in filelist:
        if PATH in path.name:
            DATA = path.get_contents_as_string(headers={'Range': 'bytes=%s-%s' % (0,100000000)}) 

    return DATA

def TRAVERSEDATA():
    DATA = getdatainmemory()
    CREATE_TABLE_QUERY = 'CREATE TABLE ' + REDSHIFT_SCHEMA + '.' + TABLE_NAME + '( '
    JUNKED_OUT = DATA[3:]
    PROCESSED_DATA = JUNKED_OUT.split('\n')
    CSV_DATA = csv.reader(PROCESSED_DATA,delimiter=',')
    COUNTER,STRING,NUMBER = 0,0,0
    COLUMN_TYPE = []

    ''' GET COLUMN NAMES AND COUNT '''
    for line in CSV_DATA:
        NUMBER_OF_COLUMNS = len(line)
        COLUMN_NAMES = line
        break;

    ''' PROCESS COLUMN NAMES '''
    a = 0
    for REMOVESPACE in COLUMN_NAMES:
        TEMPHOLDER = REMOVESPACE.split(' ')
        temp1 = ''
        for x in TEMPHOLDER:
            temp1 = temp1 + x 
        COLUMN_NAMES[a] = temp1
        a = a + 1

    ''' GET COLUMN DATA TYPES '''
    # print(NUMBER_OF_COLUMNS,COLUMN_NAMES,COUNTER)
    # print(NUMBER_OF_COLUMNS)
    i,j,a= 0,500,0 
    while COUNTER < NUMBER_OF_COLUMNS:
        for COLUMN in itertools.islice(CSV_DATA,i,j+1):
            if COLUMN[COUNTER].isdigit():
                NUMBER = NUMBER + 1
            else:
                STRING = STRING + 1
        if NUMBER == 501:
            COLUMN_TYPE.append('INTEGER')
            # print('I CAME IN')
            NUMBER = 0
        else:
            COLUMN_TYPE.append('VARCHAR(2500)')
            STRING = 0
        COUNTER = COUNTER + 1
        # print(COUNTER)

    COUNTER = 0
    ''' BUILD SCHEMA '''
    while COUNTER < NUMBER_OF_COLUMNS:
        if COUNTER == 0:
            CREATE_TABLE_QUERY = CREATE_TABLE_QUERY + COLUMN_NAMES[COUNTER] + ' ' + COLUMN_TYPE[COUNTER] + ' NOT NULL,'
        else:
            CREATE_TABLE_QUERY = CREATE_TABLE_QUERY + COLUMN_NAMES[COUNTER] + ' ' + COLUMN_TYPE[COUNTER] + ' ,'
        COUNTER += 1
    CREATE_TABLE_QUERY = CREATE_TABLE_QUERY[:-2]+ ')'

    return CREATE_TABLE_QUERY

def COPY_COMMAND():
    S3_PATH = 's3://' + S3_DOWNLOAD_PATH
    COPY_COMMAND = "COPY "+REDSHIFT_SCHEMA+"."+TABLE_NAME+" from '"+S3_PATH+"' credentials 'aws_access_key_id="+AWS_KEY+";aws_secret_access_key="+AWS_SECRET_KEY+"' REGION 'us-west-2' csv delimiter ',' ignoreheader as 1 TRIMBLANKS maxerror as 500"
    return COPY_COMMAND

def S3TOREDSHIFT():
    conn = psycopg2.connect("dbname='xxx' port='5439' user='xxx' host='xxxxxx' password='xxxxx'")
    cursor = conn.cursor()
    cursor.execute('DROP TABLE IF EXISTS '+ REDSHIFT_SCHEMA + "." + TABLE_NAME)
    SCHEMA = TRAVERSEDATA()
    print(SCHEMA)
    cursor.execute(SCHEMA)
    COPY = COPY_COMMAND()
    print(COPY)
    cursor.execute(COPY)
    conn.commit()

S3TOREDSHIFT()

目前的挑战:

创建表结构的挑战:

  1. 字段长度:现在我只是将VARCHAR字段硬编码为2500.我的所有文件都是&gt; 30gb并解析整个文件以计算字段的长度需要大量的处理时间。
  2. 确定列是否为空:我只是使用COUNTER变量将第一列硬编码为NOT NULL。 (我的所有文件都将ID作为第一列)。想知道是否有更好的方法。
  3. 我可以使用任何数据结构吗?我总是有兴趣学习改进表现的新方法,如果你们有任何建议,请随时发表评论。

0 个答案:

没有答案