Scala:按条件将字符串列添加到数据框

时间:2018-08-08 12:11:19

标签: scala apache-spark dataframe

我有一个数据框from __future__ import print_function try: import psycopg2 except ImportError: raise ImportError('\n\033[33mpsycopg2 library missing. pip install psycopg2\033[1;m\n') sys.exit(1) import re import sys import json import pprint import time outfilepath = "crtsh_output/crtsh_flat_file" DB_HOST = 'crt.sh' DB_NAME = 'certwatch' DB_USER = 'guest' # DELAY = 0 def connect_to_db(): start = 0 offset = 10 flag = True while flag: filepath = 'forager.txt' with open(filepath) as fp: unique_domains = '' try: conn = psycopg2.connect("dbname={0} user={1} host={2}".format(DB_NAME, DB_USER, DB_HOST)) cursor = conn.cursor() cursor.itersize = 10000 for cnt, domain_name in enumerate(fp): print("Line {}: {}".format(cnt, domain_name)) print(domain_name) domain_name = domain_name.rstrip() cursor.execute('''SELECT c.id, x509_commonName(c.certificate), x509_issuerName(c.certificate), x509_notBefore(c.certificate), x509_notAfter(c.certificate), x509_issuerName(c.certificate), x509_keyAlgorithm(c.certificate), x509_keySize(c.certificate), x509_publicKeyMD5(c.certificate), x509_publicKey(c.certificate), x509_rsaModulus(c.certificate), x509_serialNumber(c.certificate), x509_signatureHashAlgorithm(c.certificate), x509_signatureKeyAlgorithm(c.certificate), x509_subjectName(c.certificate), x509_name(c.certificate), x509_name_print(c.certificate), x509_commonName(c.certificate), x509_subjectKeyIdentifier(c.certificate), x509_extKeyUsages(c.certificate), x509_certPolicies(c.certificate), x509_canIssueCerts(c.certificate), x509_getPathLenConstraint(c.certificate), x509_altNames(c.certificate), x509_altNames_raw(c.certificate), x509_cRLDistributionPoints(c.certificate), x509_authorityInfoAccess(c.certificate), x509_print(c.certificate), x509_anyNamesWithNULs(c.certificate), x509_extensions(c.certificate), x509_tbscert_strip_ct_ext(c.certificate), x509_hasROCAFingerprint(c.certificate) FROM certificate c, certificate_identity ci WHERE c.id= ci.certificate_id AND ci.name_type = 'dNSName' AND lower(ci.name_value) = lower(%s) AND x509_notAfter(c.certificate) > statement_timestamp()''', (domain_name,)) # query db with start and offset unique_domains = cursor.fetchall() if not unique_domains: flag = False else: # do processing with your data pprint.pprint(unique_domains) outfilepath = "crtsh2" + ".json" with open(outfilepath, 'a') as outfile: outfile.write(json.dumps(unique_domains, sort_keys=True, indent=4, default=str, ensure_ascii = False)) offset += limit except Exception as error: print(str(error)) if __name__ == "__main__": connect_to_db() ,其中包含一个名为(“ A”)的300万行的列,看起来像:

"df"

我要寻找的是在数据框中追加一个新列(C),其中将包含col(A)的第一个字符串,例如:

+--------------------+
|             A      |
+--------------------+
|down::string... ... |
|down::string... ... |
|up::string......... |
|up::string......... |
|right::string...... |

我使用+--------------------+ | C | +--------------------+ |down | |down | |up | |up | |right | 如下:

.withColumn

但这不起作用

1 个答案:

答案 0 :(得分:-2)

一种实现此目的的方法是使用Spark UDF如下所示

import org.apache.spark.sql.functions._
import sparkSession.sqlContext.implicits._

val df = Seq("down::string... ...", "up::string.........", "right::string......").toDF("A")
df.show

val myUDF = udf {
  (input: String) => {
    input.split("::")(0)
  }
}

val newDF = df.withColumn("C", myUDF(df("A")))
newDF.show()

输出将是

+-------------------+
|                  A|
+-------------------+
|down::string... ...|
|up::string.........|
|right::string......|
+-------------------+

+-------------------+-----+
|                  A|    C|
+-------------------+-----+
|down::string... ...| down|
|up::string.........|   up|
|right::string......|right|
+-------------------+-----+