import pandas as pd
dict = {'Origin Region': [1.0, 2.0, 3.0, 4.0, 5.0, 1.0, 2.0, 5.0],
'Origin Latitude': [-36.45875, -36.24879, -36.789456, -38.14789, -36.15963, -36.159455, -36.2345, -36.12745],
'Origin Longitude': [145.14563, 145.15987, 145.87456, 146.75314, 145.75483, 145.78458, 145.123654, 145.11111]}
df = pd.DataFrame(dict)
centres_dict = {'Origin Region': [1.0, 2.0, 3.0, 4.0, 5.0],
'Origin Latitude': [-36.25361, -36.78541, -36.74859, -38.74123, -36.14538],
'Origin Longitude': [145.12345, 145.36241, 145.12365, 146.75314, 145.75483]}
centres_df = pd.DataFrame(centres_dict)
grouped_region = df.groupby('Origin Region')
for region, region_group in grouped_region:
outliers = region_group[['Origin Latitude', 'Origin Longitude']].where((region_group['Origin Latitude'] < -36.15))
outliers.dropna(inplace=True)
print(outliers)
if(~outliers.empty):
for index, outlier_value in outliers.iterrows():
for another_index, centre_value in centres_df.iterrows():
a = outlier_value['Origin Longitude']
b = outlier_value['Origin Latitude']
c = centres_df['Origin Longitude']
d = centres_df['Origin Latitude']
#find distance using the above and then find minimum distance
我正在尝试遍历数据帧的每个组(df),然后根据某种条件过滤每个组中的值,并在每个过滤后的值(异常值)与另一个数据帧中的所有值之间进行距离计算( centres_df)。
我在数据帧中有数据,我应该将它们转换为数组,然后使用scipy cdist计算距离吗?或者只是使用循环并使用我自己的距离计算功能?我不确定什么是最好的方法。还是可以使用apply并调用我自己的距离函数?
答案 0 :(得分:1)
无需嵌套循环。只需join
将分组的离群值循环到分组内的中心数据帧。然后计算跨列的距离。然后最后,将所有离群帧与数据帧对象字典绑定在一起。
但是,为了向量化您的进程,必须使用内置的math
库将此Python Haversine Formula进行numpy化。
Numpy 版本的hasrsine公式(接收数组/系列而不是标量作为输入)
def haversine_np(lon1, lat1, lon2, lat2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(np.radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = np.sin(dlat/2)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon/2)**2
c = 2 * np.arcsin(np.sqrt(a))
r = 6371 # Radius of earth in kilometers. Use 3956 for miles
return c * r
熊猫过程
# SET ORIGIN REGION AS INDEX (FOR LATER JOIN)
centres_df = centres_df.set_index('Origin Region')
df_dict = {}
grouped_region = df.sort_values('Origin Region').groupby('Origin Region')
for region, region_group in grouped_region:
# BUILD OUTLIER DF WITH Origin_Region as INDEX
outliers = region_group[['Origin Latitude', 'Origin Longitude']]\
.where((region_group['Origin Latitude'] < -36.15))\
.dropna()\
.assign(Origin_Region = region)\
.set_index('Origin_Region')
# JOIN OUTLIERS WITH CENTRES DF, KEEPING ONLY MATCHED ROWS
outliers = outliers.join(centres_df, how='inner', lsuffix='', rsuffix='_')
# RUN CALCULATION (SEE NUMPY-IFIED haversine())
outliers['Distance_km'] = haversine_np(outliers['Origin Longitude'], outliers['Origin Latitude'],
outliers['Origin Longitude_'], outliers['Origin Latitude_'])
outliers['Origin Region'] = region
# ASSIGN TO DICTIONARY, RE-ORDERING COLUMNS
df_dict[region] = outliers.reindex(outliers.columns[[5,0,1,2,3,4]], axis='columns')
# CONCATENATE OUTSIDE LOOP FOR SINGLE OBJECT
final_df = pd.concat(df_dict, ignore_index=True)
输出
print(final_df)
# Origin Region Origin Latitude Origin Longitude Origin Latitude_ Origin Longitude_ Distance_km
# 0 1.0 -36.458750 145.145630 -36.25361 145.12345 22.896839
# 1 1.0 -36.159455 145.784580 -36.25361 145.12345 60.234887
# 2 2.0 -36.248790 145.159870 -36.78541 145.36241 62.354177
# 3 2.0 -36.234500 145.123654 -36.78541 145.36241 64.868402
# 4 3.0 -36.789456 145.874560 -36.74859 145.12365 67.040011
# 5 4.0 -38.147890 146.753140 -38.74123 146.75314 65.976398
# 6 5.0 -36.159630 145.754830 -36.14538 145.75483 1.584528