我的pyspark控制台告诉我,我的for循环后面的行语法无效。控制台不会执行for循环,直到schema = StructType(fields)行,它有SyntaxError,但for循环看起来不错......
from pyspark import SparkContext
from pyspark.sql import SQLContext
from pyspark.sql.types import *
sqlContext = SQLContext(sc)
lines = sc.textFile('file:///home/w205/hospital_compare/surveys_responses.csv')
parts = lines.map(lambda l: l.split(','))
surveys_responses = parts.map(lambda p: (p[0:33]))
schemaString = 'Provider Number, Hospital Name, Address, City, State, ZIP Code, County Name, Communication with Nurses Achievement Points, Communication with Nurses Improvement Points, Communication with Nurses Dimension Score, Communication with Doctors Achievement Points, Communication with Doctors Improvement Points, Communication with Doctors Dimension Score, Responsiveness of Hospital Staff Achievement Points, Responsiveness of Hospital Staff Improvement Points, Responsiveness of Hospital Staff Dimension Score, Pain Management Achievement Points, Pain Management Improvement Points, Pain Management Dimension Score, Communication about Medicines Achievement Points, Communication about Medicines Improvement Points, Communication about Medicines Dimension Score, Cleanliness and Quietness of Hospital Environment Achievement Points, Cleanliness and Quietness of Hospital Environment Improvement Points, Cleanliness and Quietness of Hospital Environment Dimension Score, Discharge Information Achievement Points, Discharge Information Improvement Points, Discharge Information Dimension Score, Overall Rating of Hospital Achievement Points, Overall Rating of Hospital Improvement Points, Overall Rating of Hospital Dimension Score, HCAHPS Base Score, HCAHPS Consistency Score'
fields = []
for field_name in schemaString.split(", "):
if field_name != ("HCAHPS Base Score" | "HCAHPS Consistency Score"):
fields.append(StructField(field_name, StringType(), True))
else:
fields.append(StructField(field_name, IntegerType(), True))
schema = StructType(fields)
答案 0 :(得分:1)
此|
!=
条件错误,请使用: -
from pyspark import SparkContext
from pyspark.sql import SQLContext
from pyspark.sql.types import *
sqlContext = SQLContext(sc)
lines = sc.textFile('file:///home/w205/hospital_compare/surveys_responses.csv')
parts = lines.map(lambda l: l.split(','))
surveys_responses = parts.map(lambda p: (p[0:33]))
schemaString = 'Provider Number, Hospital Name, Address, City, State, ZIP Code, County Name, Communication with Nurses Achievement Points, Communication with Nurses Improvement Points, Communication with Nurses Dimension Score, Communication with Doctors Achievement Points, Communication with Doctors Improvement Points, Communication with Doctors Dimension Score, Responsiveness of Hospital Staff Achievement Points, Responsiveness of Hospital Staff Improvement Points, Responsiveness of Hospital Staff Dimension Score, Pain Management Achievement Points, Pain Management Improvement Points, Pain Management Dimension Score, Communication about Medicines Achievement Points, Communication about Medicines Improvement Points, Communication about Medicines Dimension Score, Cleanliness and Quietness of Hospital Environment Achievement Points, Cleanliness and Quietness of Hospital Environment Improvement Points, Cleanliness and Quietness of Hospital Environment Dimension Score, Discharge Information Achievement Points, Discharge Information Improvement Points, Discharge Information Dimension Score, Overall Rating of Hospital Achievement Points, Overall Rating of Hospital Improvement Points, Overall Rating of Hospital Dimension Score, HCAHPS Base Score, HCAHPS Consistency Score'
fields = []
for field_name in schemaString.split(", "):
if field_name not in ("HCAHPS Base Score", "HCAHPS Consistency Score"):
fields.append(StructField(field_name, StringType(), True))
else:
fields.append(StructField(field_name, IntegerType(), True))
schema = StructType(fields)