从minikube中的Docker映像创建部署时,容器不断崩溃

时间:2019-07-12 11:06:18

标签: deployment containers kubectl minikube

我有包含python文件的docker映像,应该从scihub网站下载卫星映像。泊坞窗映像工作正常。现在,当我想创建部署方式kubectl以便将其作为服务公开时,它的容器将继续崩溃。这就是通过kubectl describe pod进行查看时广告连播描述的内容。

这就是我尝试部署sudo kubectl run back --image=back:latest --port=8080 --image-pull-policy Never的方式。我也尝试更改端口,但没有成功。这是docker映像中的文件。

Docker文件

FROM python:3.7-stretch

COPY . /code

WORKDIR /code

RUN pip install -r requirements.txt

ENTRYPOINT ["python", "ingestion.py"]

**摄入**

import os
import shutil
import logging

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(name)s - %(message)s')
logger = logging.getLogger("ingestion")

import requests

import datahub

scihub_username = os.environ["scihub_username"]
scihub_password = os.environ["scihub_password"]
result_url = "http://" + os.environ["CDINRW_BASE_URL"] + "/jobs/" + os.environ["CDINRW_JOB_ID"] + "/results"

logger.info("Searching the Copernicus Open Access Hub")
scenes = datahub.search(username=scihub_username,
                        password=scihub_password,
                        producttype=os.getenv("producttype"),
                        platformname=os.getenv("platformname"),
                        days_back=os.getenv("days_back", 2),
                        footprint=os.getenv("footprint"),
                        max_cloud_cover_percentage=os.getenv("max_cloud_cover_percentage"),
                        start_date = os.getenv("start_date"),
                        end_date = os.getenv("end_date"))

logger.info("Found {} relevant scenes".format(len(scenes)))

job_results = []
for scene in scenes:
    # do not donwload a scene that has already been ingested
    if os.path.exists(os.path.join("/out_data", scene["title"]+".SAFE")):
        logger.info("The scene {} already exists in /out_data and will not be downloaded again.".format(scene["title"]))
        filename = scene["title"]+".SAFE"
    else:
        logger.info("Starting the download of scene {}".format(scene["title"]))
        filename = datahub.download(scene, "/tmp", scihub_username, scihub_password, unpack=True)
        logger.info("The download was successful.")
        shutil.move(filename, "/out_data")
    result_message = {"description": "test",
                      "type": "Raster",
                      "format": "SAFE",
                      "filename": os.path.basename(filename)}
    job_results.append(result_message)

res = requests.put(result_url, json=job_results, timeout=60)
res.raise_for_status()

* datahub

import logging
import os
import urllib.parse
import zipfile

import requests

# constructing URLs for querying the data hub
_BASE_URL = "https://scihub.copernicus.eu/dhus/"
SITE = {}
SITE["SEARCH"] = _BASE_URL + "search?format=xml&sortedby=beginposition&order=desc&rows=100&start={offset}&q="
_PRODUCT_URL = _BASE_URL + "odata/v1/Products('{uuid}')/"
SITE["CHECKSUM"] = _PRODUCT_URL + "Checksum/Value/$value"
SITE["SAFEZIP"] = _PRODUCT_URL + "$value"

logger = logging.getLogger(__name__)

def _build_search_url(producttype=None, platformname=None, days_back=2, footprint=None, max_cloud_cover_percentage=None, start_date=None, end_date=None):
    search_terms = []
    if producttype:
        search_terms.append("producttype:{}".format(producttype))
    if platformname:
        search_terms.append("platformname:{}".format(platformname))
    if start_date and end_date:
        search_terms.append(
            "beginPosition:[{}+TO+{}]".format(start_date, end_date))
    elif days_back:
        search_terms.append(
            "beginPosition:[NOW-{}DAYS+TO+NOW]".format(days_back))
    if footprint:
        search_terms.append("footprint:%22Intersects({})%22".format(
            footprint.replace(" ", "+")))
    if max_cloud_cover_percentage:
        search_terms.append("cloudcoverpercentage:[0+TO+{}]".format(max_cloud_cover_percentage))
    url = SITE["SEARCH"] + "+AND+".join(search_terms)
    return url


def _unpack(zip_file, directory, remove_after=False):
    with zipfile.ZipFile(zip_file) as zf:
        # This assumes that the zipfile only contains the .SAFE directory at root level
        safe_path = zf.namelist()[0]
        zf.extractall(path=directory)
    if remove_after:
        os.remove(zip_file)
    return os.path.normpath(os.path.join(directory, safe_path))


def search(username, password, producttype=None, platformname=None ,days_back=2, footprint=None, max_cloud_cover_percentage=None, start_date=None, end_date=None):
    """ Search the Copernicus SciHub

    Parameters
    ----------
    username : str
      user name for the Copernicus SciHub
    password : str
      password for the Copernicus SciHub
    producttype : str, optional
      product type to filter for in the query (see https://scihub.copernicus.eu/userguide/FullTextSearch#Search_Keywords for allowed values)
    platformname : str, optional 
      plattform name to filter for in the query (see https://scihub.copernicus.eu/userguide/FullTextSearch#Search_Keywords for allowed values)
    days_back : int, optional
      number of days before today that will be searched. Default are the last 2 days. If start and end date are set the days_back parameter is ignored
    footprint : str, optional
      well-known-text representation of the footprint
    max_cloud_cover_percentage: str, optional
      percentage of cloud cover per scene. Can only be used in combination with Sentinel-2 imagery. 
      (see https://scihub.copernicus.eu/userguide/FullTextSearch#Search_Keywords for allowed values)
    start_date: str, optional
        start point of the search extent has to be used in combination with end_date
    end_date: str, optional
        end_point of the search extent has to be used in combination with start_date

    Returns
    -------
    list
      a list of scenes that match the search parameters
    """

    import xml.etree.cElementTree as ET
    scenes = []
    search_url = _build_search_url(producttype, platformname, days_back, footprint, max_cloud_cover_percentage, start_date, end_date)
    logger.info("Search URL: {}".format(search_url))
    offset = 0
    rowsBreak = 5000
    name_space = {"atom": "http://www.w3.org/2005/Atom",
                  "opensearch": "http://a9.com/-/spec/opensearch/1.1/"}
    while offset < rowsBreak:  # Next pagination page:
        response = requests.get(search_url.format(offset=offset), auth=(username, password))
        root = ET.fromstring(response.content)
        if offset == 0:
            rowsBreak = int(
                root.find("opensearch:totalResults", name_space).text)
        for e in root.iterfind("atom:entry", name_space):
            uuid = e.find("atom:id", name_space).text
            title = e.find("atom:title", name_space).text
            begin_position = e.find(
                "atom:date[@name='beginposition']", name_space).text
            end_position = e.find(
                "atom:date[@name='endposition']", name_space).text
            footprint = e.find("atom:str[@name='footprint']", name_space).text
            scenes.append({
                "id": uuid,
                "title": title,
                "begin_position": begin_position,
                "end_position": end_position,
                "footprint": footprint})
        # Ultimate DHuS pagination page size limit (rows per page).
        offset += 100
    return scenes


def download(scene, directory, username, password, unpack=True):
    """ Download a Sentinel scene based on its uuid

    Parameters
    ----------
    scene : dict
        the scene to be downloaded
    path : str
        the path where the file will be downloaded to
    username : str
        username for the Copernicus SciHub
    password : str
        password for the Copernicus SciHub
    unpack: boolean, optional
        flag that defines whether the downloaded product should be unpacked after download. defaults to true

    Raises
    ------
    ValueError
        if the size of the downloaded file does not match the Content-Length header
    ValueError
        if the checksum of the downloaded file does not match the checksum provided by the Copernicus SciHub

    Returns
    -------
    str
        path to the downloaded file
    """

    import hashlib
    md5hash = hashlib.md5()
    md5sum = requests.get(SITE["CHECKSUM"].format(
        uuid=scene["id"]), auth=(username, password)).text

    download_path = os.path.join(directory, scene["title"] + ".zip")
    # overwrite if path already exists
    if os.path.exists(download_path):
        os.remove(download_path)
    url = SITE["SAFEZIP"].format(uuid=scene["id"])
    rsp = requests.get(url, auth=(username, password), stream=True)
    cl = rsp.headers.get("Content-Length")
    size = int(cl) if cl else -1
    # Actually fetch now:
    with open(download_path, "wb") as f:  # Do not read as a whole into memory:
        written = 0
        for block in rsp.iter_content(8192):
            f.write(block)
            written += len(block)
            md5hash.update(block)
    written = os.path.getsize(download_path)
    if size > -1 and written != size:
        raise ValueError("{}: size mismatch, {} bytes written but expected {} bytes to write!".format(
            download_path, written, size))
    elif md5sum:
        calculated = md5hash.hexdigest()
        expected = md5sum.lower()

POD事件

Events:
  Type     Reason   Age                        From               Message
  ----     ------   ----                       ----               -------
  Warning  BackOff  2m39s (x18636 over 2d19h)  kubelet, minikube  Back-off restarting failed container

要使用此服务的系统已经在8081上运行了另一个主要的前端服务(仅运行应用程序),所以也许我需要在同一端口上公开该服务。如何使部署运行?

0 个答案:

没有答案