我正在学习Scala并寻找一个优雅的解决方案来解决一个问题,这个问题很容易通过使用协同例程来解决。
由于Scala中默认情况下未启用协同例程,因此我认为它们至少不是广泛接受的最佳实践,因此希望在不使用它们的情况下编写代码。
一个令人信服的论点,即共同惯例/延续是最佳实践,将是另一种可接受的答案。
我想编写一个在基本目录中搜索文件的函数。 匹配和降序标准应由具有“PathMatcher”特征的类的实例提供。 (至少我认为这是Scala的方法)
PathMatcher可用于确定fs_item_path是否匹配AND它确定搜索是否应该下降到目录中(如果fs_item_path是目录的路径)。
我现在采用的Python实现方法仅用于说明我的功能。
我想把这段代码写成“Scala方式”。
我的目标是找到具有这些特征的解决方案:
我认为解决方案将涉及延迟评估流,但我无法以工作方式组装流。
我还读过,如果使用不当,延迟流可以保留“旧值”的副本。我所追求的解决方案不会这样做。
开始搜索的目录的绝对路径
目录名列表,表明我们已经进入base_abs_path子目录的程度
具有PathMatcher特征的类的实例。
在下面的示例中,我使用了正则表达式实现,但我不想将使用限制为正则表达式。
这是一个完整的Python程序(使用Python 3.4测试),其中包含Python版本的“generate_all_matching_paths”。
程序将搜索“d:\ Projects”以查找以“json”结尾的文件系统路径,分析文件以查找它们使用的缩进,然后打印出结果。
如果路径包含子串“python_portable”,则搜索不会进入该目录。
import os
import re
import codecs
#
# this is the bespoke function I want to port to Scala
#
def generate_all_matching_paths(
base_dir_abs_path,
rel_ancestor_dir_list,
rel_path_matcher
):
rooted_ancestor_dir_list = [base_dir_abs_path] + rel_ancestor_dir_list
current_dir_abs_path = os.path.join(*rooted_ancestor_dir_list)
dir_listing = os.listdir(current_dir_abs_path)
for fs_item_name in dir_listing:
fs_item_abs_path = os.path.join(
current_dir_abs_path,
fs_item_name
)
fs_item_rel_ancestor_list = rel_ancestor_dir_list + [fs_item_name]
fs_item_rel_path = os.path.join(
*fs_item_rel_ancestor_list
)
result = rel_path_matcher.match(fs_item_rel_path)
if result.is_match:
yield fs_item_abs_path
if result.do_descend and os.path.isdir(fs_item_abs_path):
child_ancestor_dir_list = rel_ancestor_dir_list + [fs_item_name]
for r in generate_all_matching_paths(
base_dir_abs_path,
child_ancestor_dir_list,
rel_path_matcher
):
yield r
#
# all following code is only a context giving example of how generate_all_matching_paths might be used
#
class MyMatchResult:
def __init__(
self,
is_match,
do_descend
):
self.is_match = is_match
self.do_descend = do_descend
# in Scala this should implement the PathMatcher trait
class MyMatcher:
def __init__(
self,
rel_path_regex,
abort_dir_descend_regex_list
):
self.rel_path_regex = rel_path_regex
self.abort_dir_descend_regex_list = abort_dir_descend_regex_list
def match(self, path):
rel_path_match = self.rel_path_regex.match(path)
is_match = rel_path_match is not None
do_descend = True
for abort_dir_descend_regex in self.abort_dir_descend_regex_list:
abort_match = abort_dir_descend_regex.match(path)
if abort_match:
do_descend = False
break
r = MyMatchResult(is_match, do_descend)
return r
def leading_whitespace(file_path):
b_leading_spaces = False
b_leading_tabs = False
with codecs.open(file_path, "r", "utf-8") as f:
for line in f:
for c in line:
if c == '\t':
b_leading_tabs = True
elif c == ' ':
b_leading_spaces = True
else:
break
if b_leading_tabs and b_leading_spaces:
break
return b_leading_spaces, b_leading_tabs
def print_paths(path_list):
for path in path_list:
print(path)
def main():
leading_spaces_file_path_list = []
leading_tabs_file_path_list = []
leading_mixed_file_path_list = []
leading_none_file_path_list = []
base_dir_abs_path = r'd:\Projects'
rel_path_regex = re.compile('.*json$')
abort_dir_descend_regex_list = [
re.compile('^.*python_portable.*$')
]
rel_patch_matcher = MyMatcher(rel_path_regex, abort_dir_descend_regex_list)
ancestor_dir_list = []
for fs_item_path in generate_all_matching_paths(
base_dir_abs_path,
ancestor_dir_list,
rel_patch_matcher
):
if os.path.isfile(fs_item_path):
b_leading_spaces, b_leading_tabs = leading_whitespace(fs_item_path)
if b_leading_spaces and b_leading_tabs:
leading_mixed_file_path_list.append(fs_item_path)
elif b_leading_spaces:
leading_spaces_file_path_list.append(fs_item_path)
elif b_leading_tabs:
leading_tabs_file_path_list.append(fs_item_path)
else:
leading_none_file_path_list.append(fs_item_path)
print('space indentation:')
print_paths(leading_spaces_file_path_list)
print('tab indentation:')
print_paths(leading_tabs_file_path_list)
print('mixed indentation:')
print_paths(leading_mixed_file_path_list)
print('no indentation:')
print_paths(leading_none_file_path_list)
print('space: {}'.format(len(leading_spaces_file_path_list)))
print('tab: {}'.format(len(leading_tabs_file_path_list)))
print('mixed: {}'.format(len(leading_mixed_file_path_list)))
print('none: {}'.format(len(leading_none_file_path_list)))
if __name__ == '__main__':
main()
答案 0 :(得分:3)
你是对的,你通常会用某种懒惰的评估替换python yield
。这是一个概念证明,它使用案例类来表示目录,以避免为此示例执行文件IO内容。
case class Directory(val name: String, val files: List[String], val subDirectories: List[Directory])
def descendFilter(directory: Directory): Boolean = directory.name != "tmp"
def matchFilter(path: String): Boolean = path contains "important"
def traverse(directory: Directory, path: String = ""): Stream[String] = {
val newPath = path + directory.name + "/"
val files = (directory.files map (newPath + _)).toStream
val filteredSubdirs = directory.subDirectories filter descendFilter
val recursedSubdirs = filteredSubdirs map {x => traverse(x, newPath)}
val combinedSubdirs = recursedSubdirs.fold(Stream.Empty)(_ ++ _)
(path + directory.name) #:: files ++ combinedSubdirs
}
val directory = Directory("", List(), List(
Directory("var", List("pid"), List()),
Directory("opt", List("java"), List()),
Directory("tmp", List("lots", "of", "temp", "files"), List()),
Directory("home", List(), List(
Directory("karl", List("important stuff"), List())
))
))
traverse(directory) filter matchFilter foreach println
您基本上可以使用流,就好像它包含整个文件系统一样,但在内部,它只会根据需要获取它们,并且只是快速丢弃它们,除非您在其他地方保留对它们的引用。
答案 1 :(得分:1)
这是另一种在Scala中执行此操作的方法(再次使用Streams):
def recursiveListFiles(f: File, matcher: (File) => Boolean): Stream[File] = {
val filesList = f.listFiles()
val files = (
if (f.listFiles == null) Array[File]()
else filesList
).toStream
val (allDirs, allFiles) = files.partition(_.isDirectory)
allFiles.filter(matcher(_)) ++
allDirs.flatMap{ d =>
recursiveListFiles(d, matcher)
}
}
def main(args: Array[String]): Unit = {
val allFiles = recursiveListFiles(
new File("/usr/share"),
((f: File) => f.getName.endsWith(".png"))) foreach println
}