SQLITE连接字符串使其动态获取数据库路径

时间:2018-01-30 05:37:25

标签: sqlite relative

所有我想制作connection string as dynamic以便它可以获得the relative path on the server side the data source should have a relative path

请帮助我

 <connectionStrings>
    <add name="mainEntities" connectionString="metadata=res://*/Model1.csdl|res://*/Model1.ssdl|res://*/Model1.msl;provider=System.Data.SQLite.EF6;provider connection string='data source=&quot;C:\Users\Stech\Documents\Visual Studio 2017\Projects\SqLite Demo EntityFramework\Content\Login.db&quot;'" providerName="System.Data.EntityClient" />
  </connectionStrings>

1 个答案:

答案 0 :(得分:0)

import mechanize
import cookielib
from bs4 import BeautifulSoup
import csv

While:True
    ifile = open('license.csv', "rb")
    reader = csv.reader(ifile)
    sl1 = []
    sl2 = []
    for row in reader:
        sl1.append((row[0]))
        sl2.append((row[1]))

    ifile.close()

    # Browser
    br = mechanize.Browser()

    # Cookie Jar
    cj = cookielib.LWPCookieJar()
    br.set_cookiejar(cj)

    # Browser options
    br.set_handle_equiv(True)
    br.set_handle_gzip(True)
    br.set_handle_redirect(True)
    br.set_handle_referer(True)
    br.set_handle_robots(False)

    # Follows refresh 0 but not hangs on refresh > 0
    br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)

    # Want debugging messages?
    #br.set_debug_http(True)
    #br.set_debug_redirects(True)
    #br.set_debug_responses(True)

    # User-Agent (this is cheating, ok?)
    br.addheaders = [('User-agent', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1')]

    # Open some site, let's pick a random one, the first that pops in mind:
    r = br.open('https://secure.utah.gov/llv/search/index.html')
    html = r.read()

    # Show the source
    print html
    # or
    print br.response().read()

    # Show the html title
    print br.title()

    # Show the response headers
    print r.info()
    # or
    print br.response().info()

    # Show the available forms
    for f in br.forms():
        print f

    # Select the first (index zero) form
    br.select_form(nr=1)

    # Let's search
    br.form['licenseNumberCore']=sl1[0]+1
    br.form['licenseNumberFourDigit']=sl2[0]+1
    br.submit()
    print br.response().read()

    # Looking at some results in link format
    for l in br.links(url_regex=sl1[0]+1):
        print l

    # Testing presence of link (if the link is not found you would have to
    # handle a LinkNotFoundError exception)
    br.find_link(text_regex=sl1[0]+1)
    print br.find_link()

    # Actually clicking the link
    req = br.click_link(text_regex=sl1[0]+1)
    br.open(req)
    print br.response().read()
    print br.geturl()

    soup = BeautifulSoup(br.response().read(), 'html.parser')

    soup.find("td", text="Name:").find_next_sibling("td").text
    pname = soup.find("td", text="Name:").find_next_sibling("td").text
    print(pname)

    soup.find("td", text="License Number:").find_next_sibling("td").text
    plic = soup.find("td", text="License Number:").find_next_sibling("td").text
    print(plic)

    soup.find("td", text="License Status:").find_next_sibling("td").text
    pstat = soup.find("td", text="License Status:").find_next_sibling("td").text
    print(pstat)

    soup.find("td", text="Expiration Date:").find_next_sibling("td").text
    pexp = soup.find("td", text="Expiration Date:").find_next_sibling("td").text
    print(pexp)

    # open a csv file with append, so old data will not be erased
    with open('license_check.csv', 'a') as csv_file:
     writer = csv.writer(csv_file)
     writer.writerow([pname, plic, pstat, pexp])

if sh1[0]+1 == 23
    break
else
continue

我这样做了它运作良好,虽然我已经坚持其他点快乐编码。