我正在尝试使用Databricks CLI创建Databricks作业。该作业运行一个python笔记本。我可以使用“ databricks工作区导入...”在Databricks中创建笔记本。我尝试使用以下方法创建作业: databricks作业创建--json文件C:\ Users \ myid \ Documents \ db_cli \ Hello.json
Hello.json文件包含的位置:
{
"name": "SimpleJob",
"settings": {
"notebook_task": {
"language": "PYTHON",
"object_type": "NOTEBOOK",
"notebook_path": "/Users/Appid@blah.com/SimpleNotebook"
}
},
"new_cluster": {
"custom_tags": [{"key": "component", "value": "aeops"}],
"node_type_id": "i3.xlarge",
"spark_version": "5.5.x-scala2.11",
"aws_attributes": {
"ebs_volume_type": "GENERAL_PURPOSE_SSD",
"ebs_volume_count": 3,
"ebs_volume_size": 100,
"spot_bid_price_percent": 120,
"availability": "SPOT_WITH_FALLBACK",
"first_on_demand": 1,
"instance_profile_arn": "arn:aws:iam::<actual acct id goes here>:instance-profile/db-ec2-role",
"num_workers": 8,
"spark_env_vars": {
"PYSPARK_PYTHON": "/databricks/python3/bin/python3"
}
},
"autoscale": {
"min_workers": 1,
"max_workers": 100
},
"email_notifications": {
"on_start": [],
"on_success": [],
"on_failure": ["johnDoe@blah.com"]
},
"timeout_seconds": 0,
"max_retries": 3,
"max_concurrent_runs": 1
}
}
当我跑步时: databricks作业创建--json文件C:\ Users \ user1 \ Documents \ db_cli \ Hello.json
它创建了作业,但是作业中的“任务”没有引用Python笔记本/Users/Appid@blah.com/SimpleNotebook
需要对JSON进行哪些更改,以便我可以创建作业并在作业中引用指定的Python笔记本?