id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
|
|---|---|---|---|---|---|---|---|---|---|---|---|
13,800
|
pysal/mapclassify
|
mapclassify/classifiers.py
|
Max_P_Classifier._ss
|
def _ss(self, class_def):
"""calculates sum of squares for a class"""
yc = self.y[class_def]
css = yc - yc.mean()
css *= css
return sum(css)
|
python
|
def _ss(self, class_def):
"""calculates sum of squares for a class"""
yc = self.y[class_def]
css = yc - yc.mean()
css *= css
return sum(css)
|
[
"def",
"_ss",
"(",
"self",
",",
"class_def",
")",
":",
"yc",
"=",
"self",
".",
"y",
"[",
"class_def",
"]",
"css",
"=",
"yc",
"-",
"yc",
".",
"mean",
"(",
")",
"css",
"*=",
"css",
"return",
"sum",
"(",
"css",
")"
] |
calculates sum of squares for a class
|
[
"calculates",
"sum",
"of",
"squares",
"for",
"a",
"class"
] |
5b22ec33f5802becf40557614d90cd38efa1676e
|
https://github.com/pysal/mapclassify/blob/5b22ec33f5802becf40557614d90cd38efa1676e/mapclassify/classifiers.py#L2178-L2183
|
13,801
|
pysal/mapclassify
|
mapclassify/classifiers.py
|
Max_P_Classifier._swap
|
def _swap(self, class1, class2, a):
"""evaluate cost of moving a from class1 to class2"""
ss1 = self._ss(class1)
ss2 = self._ss(class2)
tss1 = ss1 + ss2
class1c = copy.copy(class1)
class2c = copy.copy(class2)
class1c.remove(a)
class2c.append(a)
ss1 = self._ss(class1c)
ss2 = self._ss(class2c)
tss2 = ss1 + ss2
if tss1 < tss2:
return False
else:
return True
|
python
|
def _swap(self, class1, class2, a):
"""evaluate cost of moving a from class1 to class2"""
ss1 = self._ss(class1)
ss2 = self._ss(class2)
tss1 = ss1 + ss2
class1c = copy.copy(class1)
class2c = copy.copy(class2)
class1c.remove(a)
class2c.append(a)
ss1 = self._ss(class1c)
ss2 = self._ss(class2c)
tss2 = ss1 + ss2
if tss1 < tss2:
return False
else:
return True
|
[
"def",
"_swap",
"(",
"self",
",",
"class1",
",",
"class2",
",",
"a",
")",
":",
"ss1",
"=",
"self",
".",
"_ss",
"(",
"class1",
")",
"ss2",
"=",
"self",
".",
"_ss",
"(",
"class2",
")",
"tss1",
"=",
"ss1",
"+",
"ss2",
"class1c",
"=",
"copy",
".",
"copy",
"(",
"class1",
")",
"class2c",
"=",
"copy",
".",
"copy",
"(",
"class2",
")",
"class1c",
".",
"remove",
"(",
"a",
")",
"class2c",
".",
"append",
"(",
"a",
")",
"ss1",
"=",
"self",
".",
"_ss",
"(",
"class1c",
")",
"ss2",
"=",
"self",
".",
"_ss",
"(",
"class2c",
")",
"tss2",
"=",
"ss1",
"+",
"ss2",
"if",
"tss1",
"<",
"tss2",
":",
"return",
"False",
"else",
":",
"return",
"True"
] |
evaluate cost of moving a from class1 to class2
|
[
"evaluate",
"cost",
"of",
"moving",
"a",
"from",
"class1",
"to",
"class2"
] |
5b22ec33f5802becf40557614d90cd38efa1676e
|
https://github.com/pysal/mapclassify/blob/5b22ec33f5802becf40557614d90cd38efa1676e/mapclassify/classifiers.py#L2185-L2200
|
13,802
|
abarker/pdfCropMargins
|
src/pdfCropMargins/calculate_bounding_boxes.py
|
get_bounding_box_list_render_image
|
def get_bounding_box_list_render_image(pdf_file_name, input_doc):
"""Calculate the bounding box list by directly rendering each page of the PDF as
an image file. The MediaBox and CropBox values in input_doc should have
already been set to the chosen page size before the rendering."""
program_to_use = "pdftoppm" # default to pdftoppm
if args.gsRender: program_to_use = "Ghostscript"
# Threshold value set in range 0-255, where 0 is black, with 191 default.
if not args.threshold: args.threshold = 191
threshold = args.threshold
if not args.numSmooths: args.numSmooths = 0
if not args.numBlurs: args.numBlurs = 0
temp_dir = ex.program_temp_directory # use the program default; don't delete dir!
temp_image_file_root = os.path.join(temp_dir, ex.temp_file_prefix + "PageImage")
if args.verbose:
print("\nRendering the PDF to images using the " + program_to_use + " program,"
"\nthis may take a while...")
# Do the rendering of all the files.
render_pdf_file_to_image_files(pdf_file_name, temp_image_file_root, program_to_use)
# Currently assuming that sorting the output will always put them in correct order.
outfiles = sorted(glob.glob(temp_image_file_root + "*"))
if args.verbose:
print("\nAnalyzing the page images with PIL to find bounding boxes,"
"\nusing the threshold " + str(args.threshold) + "."
" Finding the bounding box for page:\n")
bounding_box_list = []
for page_num, tmp_image_file_name in enumerate(outfiles):
curr_page = input_doc.getPage(page_num)
# Open the image in PIL. Retry a few times on fail in case race conditions.
max_num_tries = 3
time_between_tries = 1
curr_num_tries = 0
while True:
try:
# PIL for some reason fails in Python 3.4 if you open the image
# from a file you opened yourself. Works in Python 2 and earlier
# Python 3. So original code is commented out, and path passed.
#
# tmpImageFile = open(tmpImageFileName)
# im = Image.open(tmpImageFile)
im = Image.open(tmp_image_file_name)
break
except (IOError, UnicodeDecodeError) as e:
curr_num_tries += 1
if args.verbose:
print("Warning: Exception opening image", tmp_image_file_name,
"on try", curr_num_tries, "\nError is", e, file=sys.stderr)
# tmpImageFile.close() # see above comment
if curr_num_tries > max_num_tries: raise # re-raise exception
time.sleep(time_between_tries)
# Apply any blur or smooth operations specified by the user.
for i in range(args.numBlurs):
im = im.filter(ImageFilter.BLUR)
for i in range(args.numSmooths):
im = im.filter(ImageFilter.SMOOTH_MORE)
# Convert the image to black and white, according to a threshold.
# Make a negative image, because that works with the PIL getbbox routine.
if args.verbose:
print(page_num+1, end=" ") # page num numbering from 1
# Note that the point method calls the function on each pixel, replacing it.
#im = im.point(lambda p: p > threshold and 255) # create a positive image
#im = im.point(lambda p: p < threshold and 255) # create a negative image
# Below code is easier to understand than tricky use of "and" in evaluation.
im = im.point(lambda p: 255 if p < threshold else 0) # create a negative image
if args.showImages:
im.show() # usually for debugging or param-setting
# Calculate the bounding box of the negative image, and append to list.
bounding_box = calculate_bounding_box_from_image(im, curr_page)
bounding_box_list.append(bounding_box)
# Clean up the image files after they are no longer needed.
# tmpImageFile.close() # see above comment
os.remove(tmp_image_file_name)
if args.verbose:
print()
return bounding_box_list
|
python
|
def get_bounding_box_list_render_image(pdf_file_name, input_doc):
"""Calculate the bounding box list by directly rendering each page of the PDF as
an image file. The MediaBox and CropBox values in input_doc should have
already been set to the chosen page size before the rendering."""
program_to_use = "pdftoppm" # default to pdftoppm
if args.gsRender: program_to_use = "Ghostscript"
# Threshold value set in range 0-255, where 0 is black, with 191 default.
if not args.threshold: args.threshold = 191
threshold = args.threshold
if not args.numSmooths: args.numSmooths = 0
if not args.numBlurs: args.numBlurs = 0
temp_dir = ex.program_temp_directory # use the program default; don't delete dir!
temp_image_file_root = os.path.join(temp_dir, ex.temp_file_prefix + "PageImage")
if args.verbose:
print("\nRendering the PDF to images using the " + program_to_use + " program,"
"\nthis may take a while...")
# Do the rendering of all the files.
render_pdf_file_to_image_files(pdf_file_name, temp_image_file_root, program_to_use)
# Currently assuming that sorting the output will always put them in correct order.
outfiles = sorted(glob.glob(temp_image_file_root + "*"))
if args.verbose:
print("\nAnalyzing the page images with PIL to find bounding boxes,"
"\nusing the threshold " + str(args.threshold) + "."
" Finding the bounding box for page:\n")
bounding_box_list = []
for page_num, tmp_image_file_name in enumerate(outfiles):
curr_page = input_doc.getPage(page_num)
# Open the image in PIL. Retry a few times on fail in case race conditions.
max_num_tries = 3
time_between_tries = 1
curr_num_tries = 0
while True:
try:
# PIL for some reason fails in Python 3.4 if you open the image
# from a file you opened yourself. Works in Python 2 and earlier
# Python 3. So original code is commented out, and path passed.
#
# tmpImageFile = open(tmpImageFileName)
# im = Image.open(tmpImageFile)
im = Image.open(tmp_image_file_name)
break
except (IOError, UnicodeDecodeError) as e:
curr_num_tries += 1
if args.verbose:
print("Warning: Exception opening image", tmp_image_file_name,
"on try", curr_num_tries, "\nError is", e, file=sys.stderr)
# tmpImageFile.close() # see above comment
if curr_num_tries > max_num_tries: raise # re-raise exception
time.sleep(time_between_tries)
# Apply any blur or smooth operations specified by the user.
for i in range(args.numBlurs):
im = im.filter(ImageFilter.BLUR)
for i in range(args.numSmooths):
im = im.filter(ImageFilter.SMOOTH_MORE)
# Convert the image to black and white, according to a threshold.
# Make a negative image, because that works with the PIL getbbox routine.
if args.verbose:
print(page_num+1, end=" ") # page num numbering from 1
# Note that the point method calls the function on each pixel, replacing it.
#im = im.point(lambda p: p > threshold and 255) # create a positive image
#im = im.point(lambda p: p < threshold and 255) # create a negative image
# Below code is easier to understand than tricky use of "and" in evaluation.
im = im.point(lambda p: 255 if p < threshold else 0) # create a negative image
if args.showImages:
im.show() # usually for debugging or param-setting
# Calculate the bounding box of the negative image, and append to list.
bounding_box = calculate_bounding_box_from_image(im, curr_page)
bounding_box_list.append(bounding_box)
# Clean up the image files after they are no longer needed.
# tmpImageFile.close() # see above comment
os.remove(tmp_image_file_name)
if args.verbose:
print()
return bounding_box_list
|
[
"def",
"get_bounding_box_list_render_image",
"(",
"pdf_file_name",
",",
"input_doc",
")",
":",
"program_to_use",
"=",
"\"pdftoppm\"",
"# default to pdftoppm",
"if",
"args",
".",
"gsRender",
":",
"program_to_use",
"=",
"\"Ghostscript\"",
"# Threshold value set in range 0-255, where 0 is black, with 191 default.",
"if",
"not",
"args",
".",
"threshold",
":",
"args",
".",
"threshold",
"=",
"191",
"threshold",
"=",
"args",
".",
"threshold",
"if",
"not",
"args",
".",
"numSmooths",
":",
"args",
".",
"numSmooths",
"=",
"0",
"if",
"not",
"args",
".",
"numBlurs",
":",
"args",
".",
"numBlurs",
"=",
"0",
"temp_dir",
"=",
"ex",
".",
"program_temp_directory",
"# use the program default; don't delete dir!",
"temp_image_file_root",
"=",
"os",
".",
"path",
".",
"join",
"(",
"temp_dir",
",",
"ex",
".",
"temp_file_prefix",
"+",
"\"PageImage\"",
")",
"if",
"args",
".",
"verbose",
":",
"print",
"(",
"\"\\nRendering the PDF to images using the \"",
"+",
"program_to_use",
"+",
"\" program,\"",
"\"\\nthis may take a while...\"",
")",
"# Do the rendering of all the files.",
"render_pdf_file_to_image_files",
"(",
"pdf_file_name",
",",
"temp_image_file_root",
",",
"program_to_use",
")",
"# Currently assuming that sorting the output will always put them in correct order.",
"outfiles",
"=",
"sorted",
"(",
"glob",
".",
"glob",
"(",
"temp_image_file_root",
"+",
"\"*\"",
")",
")",
"if",
"args",
".",
"verbose",
":",
"print",
"(",
"\"\\nAnalyzing the page images with PIL to find bounding boxes,\"",
"\"\\nusing the threshold \"",
"+",
"str",
"(",
"args",
".",
"threshold",
")",
"+",
"\".\"",
"\" Finding the bounding box for page:\\n\"",
")",
"bounding_box_list",
"=",
"[",
"]",
"for",
"page_num",
",",
"tmp_image_file_name",
"in",
"enumerate",
"(",
"outfiles",
")",
":",
"curr_page",
"=",
"input_doc",
".",
"getPage",
"(",
"page_num",
")",
"# Open the image in PIL. Retry a few times on fail in case race conditions.",
"max_num_tries",
"=",
"3",
"time_between_tries",
"=",
"1",
"curr_num_tries",
"=",
"0",
"while",
"True",
":",
"try",
":",
"# PIL for some reason fails in Python 3.4 if you open the image",
"# from a file you opened yourself. Works in Python 2 and earlier",
"# Python 3. So original code is commented out, and path passed.",
"#",
"# tmpImageFile = open(tmpImageFileName)",
"# im = Image.open(tmpImageFile)",
"im",
"=",
"Image",
".",
"open",
"(",
"tmp_image_file_name",
")",
"break",
"except",
"(",
"IOError",
",",
"UnicodeDecodeError",
")",
"as",
"e",
":",
"curr_num_tries",
"+=",
"1",
"if",
"args",
".",
"verbose",
":",
"print",
"(",
"\"Warning: Exception opening image\"",
",",
"tmp_image_file_name",
",",
"\"on try\"",
",",
"curr_num_tries",
",",
"\"\\nError is\"",
",",
"e",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"# tmpImageFile.close() # see above comment",
"if",
"curr_num_tries",
">",
"max_num_tries",
":",
"raise",
"# re-raise exception",
"time",
".",
"sleep",
"(",
"time_between_tries",
")",
"# Apply any blur or smooth operations specified by the user.",
"for",
"i",
"in",
"range",
"(",
"args",
".",
"numBlurs",
")",
":",
"im",
"=",
"im",
".",
"filter",
"(",
"ImageFilter",
".",
"BLUR",
")",
"for",
"i",
"in",
"range",
"(",
"args",
".",
"numSmooths",
")",
":",
"im",
"=",
"im",
".",
"filter",
"(",
"ImageFilter",
".",
"SMOOTH_MORE",
")",
"# Convert the image to black and white, according to a threshold.",
"# Make a negative image, because that works with the PIL getbbox routine.",
"if",
"args",
".",
"verbose",
":",
"print",
"(",
"page_num",
"+",
"1",
",",
"end",
"=",
"\" \"",
")",
"# page num numbering from 1",
"# Note that the point method calls the function on each pixel, replacing it.",
"#im = im.point(lambda p: p > threshold and 255) # create a positive image",
"#im = im.point(lambda p: p < threshold and 255) # create a negative image",
"# Below code is easier to understand than tricky use of \"and\" in evaluation.",
"im",
"=",
"im",
".",
"point",
"(",
"lambda",
"p",
":",
"255",
"if",
"p",
"<",
"threshold",
"else",
"0",
")",
"# create a negative image",
"if",
"args",
".",
"showImages",
":",
"im",
".",
"show",
"(",
")",
"# usually for debugging or param-setting",
"# Calculate the bounding box of the negative image, and append to list.",
"bounding_box",
"=",
"calculate_bounding_box_from_image",
"(",
"im",
",",
"curr_page",
")",
"bounding_box_list",
".",
"append",
"(",
"bounding_box",
")",
"# Clean up the image files after they are no longer needed.",
"# tmpImageFile.close() # see above comment",
"os",
".",
"remove",
"(",
"tmp_image_file_name",
")",
"if",
"args",
".",
"verbose",
":",
"print",
"(",
")",
"return",
"bounding_box_list"
] |
Calculate the bounding box list by directly rendering each page of the PDF as
an image file. The MediaBox and CropBox values in input_doc should have
already been set to the chosen page size before the rendering.
|
[
"Calculate",
"the",
"bounding",
"box",
"list",
"by",
"directly",
"rendering",
"each",
"page",
"of",
"the",
"PDF",
"as",
"an",
"image",
"file",
".",
"The",
"MediaBox",
"and",
"CropBox",
"values",
"in",
"input_doc",
"should",
"have",
"already",
"been",
"set",
"to",
"the",
"chosen",
"page",
"size",
"before",
"the",
"rendering",
"."
] |
55aca874613750ebf4ae69fd8851bdbb7696d6ac
|
https://github.com/abarker/pdfCropMargins/blob/55aca874613750ebf4ae69fd8851bdbb7696d6ac/src/pdfCropMargins/calculate_bounding_boxes.py#L116-L206
|
13,803
|
abarker/pdfCropMargins
|
src/pdfCropMargins/calculate_bounding_boxes.py
|
render_pdf_file_to_image_files
|
def render_pdf_file_to_image_files(pdf_file_name, output_filename_root, program_to_use):
"""Render all the pages of the PDF file at pdf_file_name to image files with
path and filename prefix given by output_filename_root. Any directories must
have already been created, and the calling program is responsible for
deleting any directories or image files. The program program_to_use,
currently either the string "pdftoppm" or the string "Ghostscript", will be
called externally. The image type that the PDF is converted into must to be
directly openable by PIL."""
res_x = str(args.resX)
res_y = str(args.resY)
if program_to_use == "Ghostscript":
if ex.system_os == "Windows": # Windows PIL is more likely to know BMP
ex.render_pdf_file_to_image_files__ghostscript_bmp(
pdf_file_name, output_filename_root, res_x, res_y)
else: # Linux and Cygwin should be fine with PNG
ex.render_pdf_file_to_image_files__ghostscript_png(
pdf_file_name, output_filename_root, res_x, res_y)
elif program_to_use == "pdftoppm":
use_gray = False # this is currently hardcoded, but can be changed to use pgm
if use_gray:
ex.render_pdf_file_to_image_files_pdftoppm_pgm(
pdf_file_name, output_filename_root, res_x, res_y)
else:
ex.render_pdf_file_to_image_files_pdftoppm_ppm(
pdf_file_name, output_filename_root, res_x, res_y)
else:
print("Error in renderPdfFileToImageFile: Unrecognized external program.",
file=sys.stderr)
ex.cleanup_and_exit(1)
|
python
|
def render_pdf_file_to_image_files(pdf_file_name, output_filename_root, program_to_use):
"""Render all the pages of the PDF file at pdf_file_name to image files with
path and filename prefix given by output_filename_root. Any directories must
have already been created, and the calling program is responsible for
deleting any directories or image files. The program program_to_use,
currently either the string "pdftoppm" or the string "Ghostscript", will be
called externally. The image type that the PDF is converted into must to be
directly openable by PIL."""
res_x = str(args.resX)
res_y = str(args.resY)
if program_to_use == "Ghostscript":
if ex.system_os == "Windows": # Windows PIL is more likely to know BMP
ex.render_pdf_file_to_image_files__ghostscript_bmp(
pdf_file_name, output_filename_root, res_x, res_y)
else: # Linux and Cygwin should be fine with PNG
ex.render_pdf_file_to_image_files__ghostscript_png(
pdf_file_name, output_filename_root, res_x, res_y)
elif program_to_use == "pdftoppm":
use_gray = False # this is currently hardcoded, but can be changed to use pgm
if use_gray:
ex.render_pdf_file_to_image_files_pdftoppm_pgm(
pdf_file_name, output_filename_root, res_x, res_y)
else:
ex.render_pdf_file_to_image_files_pdftoppm_ppm(
pdf_file_name, output_filename_root, res_x, res_y)
else:
print("Error in renderPdfFileToImageFile: Unrecognized external program.",
file=sys.stderr)
ex.cleanup_and_exit(1)
|
[
"def",
"render_pdf_file_to_image_files",
"(",
"pdf_file_name",
",",
"output_filename_root",
",",
"program_to_use",
")",
":",
"res_x",
"=",
"str",
"(",
"args",
".",
"resX",
")",
"res_y",
"=",
"str",
"(",
"args",
".",
"resY",
")",
"if",
"program_to_use",
"==",
"\"Ghostscript\"",
":",
"if",
"ex",
".",
"system_os",
"==",
"\"Windows\"",
":",
"# Windows PIL is more likely to know BMP",
"ex",
".",
"render_pdf_file_to_image_files__ghostscript_bmp",
"(",
"pdf_file_name",
",",
"output_filename_root",
",",
"res_x",
",",
"res_y",
")",
"else",
":",
"# Linux and Cygwin should be fine with PNG",
"ex",
".",
"render_pdf_file_to_image_files__ghostscript_png",
"(",
"pdf_file_name",
",",
"output_filename_root",
",",
"res_x",
",",
"res_y",
")",
"elif",
"program_to_use",
"==",
"\"pdftoppm\"",
":",
"use_gray",
"=",
"False",
"# this is currently hardcoded, but can be changed to use pgm",
"if",
"use_gray",
":",
"ex",
".",
"render_pdf_file_to_image_files_pdftoppm_pgm",
"(",
"pdf_file_name",
",",
"output_filename_root",
",",
"res_x",
",",
"res_y",
")",
"else",
":",
"ex",
".",
"render_pdf_file_to_image_files_pdftoppm_ppm",
"(",
"pdf_file_name",
",",
"output_filename_root",
",",
"res_x",
",",
"res_y",
")",
"else",
":",
"print",
"(",
"\"Error in renderPdfFileToImageFile: Unrecognized external program.\"",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"ex",
".",
"cleanup_and_exit",
"(",
"1",
")"
] |
Render all the pages of the PDF file at pdf_file_name to image files with
path and filename prefix given by output_filename_root. Any directories must
have already been created, and the calling program is responsible for
deleting any directories or image files. The program program_to_use,
currently either the string "pdftoppm" or the string "Ghostscript", will be
called externally. The image type that the PDF is converted into must to be
directly openable by PIL.
|
[
"Render",
"all",
"the",
"pages",
"of",
"the",
"PDF",
"file",
"at",
"pdf_file_name",
"to",
"image",
"files",
"with",
"path",
"and",
"filename",
"prefix",
"given",
"by",
"output_filename_root",
".",
"Any",
"directories",
"must",
"have",
"already",
"been",
"created",
"and",
"the",
"calling",
"program",
"is",
"responsible",
"for",
"deleting",
"any",
"directories",
"or",
"image",
"files",
".",
"The",
"program",
"program_to_use",
"currently",
"either",
"the",
"string",
"pdftoppm",
"or",
"the",
"string",
"Ghostscript",
"will",
"be",
"called",
"externally",
".",
"The",
"image",
"type",
"that",
"the",
"PDF",
"is",
"converted",
"into",
"must",
"to",
"be",
"directly",
"openable",
"by",
"PIL",
"."
] |
55aca874613750ebf4ae69fd8851bdbb7696d6ac
|
https://github.com/abarker/pdfCropMargins/blob/55aca874613750ebf4ae69fd8851bdbb7696d6ac/src/pdfCropMargins/calculate_bounding_boxes.py#L208-L237
|
13,804
|
abarker/pdfCropMargins
|
src/pdfCropMargins/calculate_bounding_boxes.py
|
calculate_bounding_box_from_image
|
def calculate_bounding_box_from_image(im, curr_page):
"""This function uses a PIL routine to get the bounding box of the rendered
image."""
xMax, y_max = im.size
bounding_box = im.getbbox() # note this uses ltrb convention
if not bounding_box:
#print("\nWarning: could not calculate a bounding box for this page."
# "\nAn empty page is assumed.", file=sys.stderr)
bounding_box = (xMax/2, y_max/2, xMax/2, y_max/2)
bounding_box = list(bounding_box) # make temporarily mutable
# Compensate for reversal of the image y convention versus PDF.
bounding_box[1] = y_max - bounding_box[1]
bounding_box[3] = y_max - bounding_box[3]
full_page_box = curr_page.mediaBox # should have been set already to chosen box
# Convert pixel units to PDF's bp units.
convert_x = float(full_page_box.getUpperRight_x()
- full_page_box.getLowerLeft_x()) / xMax
convert_y = float(full_page_box.getUpperRight_y()
- full_page_box.getLowerLeft_y()) / y_max
# Get final box; note conversion to lower-left point, upper-right point format.
final_box = [
bounding_box[0] * convert_x,
bounding_box[3] * convert_y,
bounding_box[2] * convert_x,
bounding_box[1] * convert_y]
return final_box
|
python
|
def calculate_bounding_box_from_image(im, curr_page):
"""This function uses a PIL routine to get the bounding box of the rendered
image."""
xMax, y_max = im.size
bounding_box = im.getbbox() # note this uses ltrb convention
if not bounding_box:
#print("\nWarning: could not calculate a bounding box for this page."
# "\nAn empty page is assumed.", file=sys.stderr)
bounding_box = (xMax/2, y_max/2, xMax/2, y_max/2)
bounding_box = list(bounding_box) # make temporarily mutable
# Compensate for reversal of the image y convention versus PDF.
bounding_box[1] = y_max - bounding_box[1]
bounding_box[3] = y_max - bounding_box[3]
full_page_box = curr_page.mediaBox # should have been set already to chosen box
# Convert pixel units to PDF's bp units.
convert_x = float(full_page_box.getUpperRight_x()
- full_page_box.getLowerLeft_x()) / xMax
convert_y = float(full_page_box.getUpperRight_y()
- full_page_box.getLowerLeft_y()) / y_max
# Get final box; note conversion to lower-left point, upper-right point format.
final_box = [
bounding_box[0] * convert_x,
bounding_box[3] * convert_y,
bounding_box[2] * convert_x,
bounding_box[1] * convert_y]
return final_box
|
[
"def",
"calculate_bounding_box_from_image",
"(",
"im",
",",
"curr_page",
")",
":",
"xMax",
",",
"y_max",
"=",
"im",
".",
"size",
"bounding_box",
"=",
"im",
".",
"getbbox",
"(",
")",
"# note this uses ltrb convention",
"if",
"not",
"bounding_box",
":",
"#print(\"\\nWarning: could not calculate a bounding box for this page.\"",
"# \"\\nAn empty page is assumed.\", file=sys.stderr)",
"bounding_box",
"=",
"(",
"xMax",
"/",
"2",
",",
"y_max",
"/",
"2",
",",
"xMax",
"/",
"2",
",",
"y_max",
"/",
"2",
")",
"bounding_box",
"=",
"list",
"(",
"bounding_box",
")",
"# make temporarily mutable",
"# Compensate for reversal of the image y convention versus PDF.",
"bounding_box",
"[",
"1",
"]",
"=",
"y_max",
"-",
"bounding_box",
"[",
"1",
"]",
"bounding_box",
"[",
"3",
"]",
"=",
"y_max",
"-",
"bounding_box",
"[",
"3",
"]",
"full_page_box",
"=",
"curr_page",
".",
"mediaBox",
"# should have been set already to chosen box",
"# Convert pixel units to PDF's bp units.",
"convert_x",
"=",
"float",
"(",
"full_page_box",
".",
"getUpperRight_x",
"(",
")",
"-",
"full_page_box",
".",
"getLowerLeft_x",
"(",
")",
")",
"/",
"xMax",
"convert_y",
"=",
"float",
"(",
"full_page_box",
".",
"getUpperRight_y",
"(",
")",
"-",
"full_page_box",
".",
"getLowerLeft_y",
"(",
")",
")",
"/",
"y_max",
"# Get final box; note conversion to lower-left point, upper-right point format.",
"final_box",
"=",
"[",
"bounding_box",
"[",
"0",
"]",
"*",
"convert_x",
",",
"bounding_box",
"[",
"3",
"]",
"*",
"convert_y",
",",
"bounding_box",
"[",
"2",
"]",
"*",
"convert_x",
",",
"bounding_box",
"[",
"1",
"]",
"*",
"convert_y",
"]",
"return",
"final_box"
] |
This function uses a PIL routine to get the bounding box of the rendered
image.
|
[
"This",
"function",
"uses",
"a",
"PIL",
"routine",
"to",
"get",
"the",
"bounding",
"box",
"of",
"the",
"rendered",
"image",
"."
] |
55aca874613750ebf4ae69fd8851bdbb7696d6ac
|
https://github.com/abarker/pdfCropMargins/blob/55aca874613750ebf4ae69fd8851bdbb7696d6ac/src/pdfCropMargins/calculate_bounding_boxes.py#L239-L270
|
13,805
|
abarker/pdfCropMargins
|
src/pdfCropMargins/external_program_calls.py
|
samefile
|
def samefile(path1, path2):
"""Test if paths refer to the same file or directory."""
if system_os == "Linux" or system_os == "Cygwin":
return os.path.samefile(path1, path2)
return (get_canonical_absolute_expanded_path(path1) ==
get_canonical_absolute_expanded_path(path2))
|
python
|
def samefile(path1, path2):
"""Test if paths refer to the same file or directory."""
if system_os == "Linux" or system_os == "Cygwin":
return os.path.samefile(path1, path2)
return (get_canonical_absolute_expanded_path(path1) ==
get_canonical_absolute_expanded_path(path2))
|
[
"def",
"samefile",
"(",
"path1",
",",
"path2",
")",
":",
"if",
"system_os",
"==",
"\"Linux\"",
"or",
"system_os",
"==",
"\"Cygwin\"",
":",
"return",
"os",
".",
"path",
".",
"samefile",
"(",
"path1",
",",
"path2",
")",
"return",
"(",
"get_canonical_absolute_expanded_path",
"(",
"path1",
")",
"==",
"get_canonical_absolute_expanded_path",
"(",
"path2",
")",
")"
] |
Test if paths refer to the same file or directory.
|
[
"Test",
"if",
"paths",
"refer",
"to",
"the",
"same",
"file",
"or",
"directory",
"."
] |
55aca874613750ebf4ae69fd8851bdbb7696d6ac
|
https://github.com/abarker/pdfCropMargins/blob/55aca874613750ebf4ae69fd8851bdbb7696d6ac/src/pdfCropMargins/external_program_calls.py#L135-L140
|
13,806
|
abarker/pdfCropMargins
|
src/pdfCropMargins/external_program_calls.py
|
convert_windows_path_to_cygwin
|
def convert_windows_path_to_cygwin(path):
"""Convert a Windows path to a Cygwin path. Just handles the basic case."""
if len(path) > 2 and path[1] == ":" and path[2] == "\\":
newpath = cygwin_full_path_prefix + "/" + path[0]
if len(path) > 3: newpath += "/" + path[3:]
path = newpath
path = path.replace("\\", "/")
return path
|
python
|
def convert_windows_path_to_cygwin(path):
"""Convert a Windows path to a Cygwin path. Just handles the basic case."""
if len(path) > 2 and path[1] == ":" and path[2] == "\\":
newpath = cygwin_full_path_prefix + "/" + path[0]
if len(path) > 3: newpath += "/" + path[3:]
path = newpath
path = path.replace("\\", "/")
return path
|
[
"def",
"convert_windows_path_to_cygwin",
"(",
"path",
")",
":",
"if",
"len",
"(",
"path",
")",
">",
"2",
"and",
"path",
"[",
"1",
"]",
"==",
"\":\"",
"and",
"path",
"[",
"2",
"]",
"==",
"\"\\\\\"",
":",
"newpath",
"=",
"cygwin_full_path_prefix",
"+",
"\"/\"",
"+",
"path",
"[",
"0",
"]",
"if",
"len",
"(",
"path",
")",
">",
"3",
":",
"newpath",
"+=",
"\"/\"",
"+",
"path",
"[",
"3",
":",
"]",
"path",
"=",
"newpath",
"path",
"=",
"path",
".",
"replace",
"(",
"\"\\\\\"",
",",
"\"/\"",
")",
"return",
"path"
] |
Convert a Windows path to a Cygwin path. Just handles the basic case.
|
[
"Convert",
"a",
"Windows",
"path",
"to",
"a",
"Cygwin",
"path",
".",
"Just",
"handles",
"the",
"basic",
"case",
"."
] |
55aca874613750ebf4ae69fd8851bdbb7696d6ac
|
https://github.com/abarker/pdfCropMargins/blob/55aca874613750ebf4ae69fd8851bdbb7696d6ac/src/pdfCropMargins/external_program_calls.py#L167-L174
|
13,807
|
abarker/pdfCropMargins
|
src/pdfCropMargins/external_program_calls.py
|
remove_program_temp_directory
|
def remove_program_temp_directory():
"""Remove the global temp directory and all its contents."""
if os.path.exists(program_temp_directory):
max_retries = 3
curr_retries = 0
time_between_retries = 1
while True:
try:
shutil.rmtree(program_temp_directory)
break
except IOError:
curr_retries += 1
if curr_retries > max_retries:
raise # re-raise the exception
time.sleep(time_between_retries)
except:
print("Cleaning up temp dir...", file=sys.stderr)
raise
|
python
|
def remove_program_temp_directory():
"""Remove the global temp directory and all its contents."""
if os.path.exists(program_temp_directory):
max_retries = 3
curr_retries = 0
time_between_retries = 1
while True:
try:
shutil.rmtree(program_temp_directory)
break
except IOError:
curr_retries += 1
if curr_retries > max_retries:
raise # re-raise the exception
time.sleep(time_between_retries)
except:
print("Cleaning up temp dir...", file=sys.stderr)
raise
|
[
"def",
"remove_program_temp_directory",
"(",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"program_temp_directory",
")",
":",
"max_retries",
"=",
"3",
"curr_retries",
"=",
"0",
"time_between_retries",
"=",
"1",
"while",
"True",
":",
"try",
":",
"shutil",
".",
"rmtree",
"(",
"program_temp_directory",
")",
"break",
"except",
"IOError",
":",
"curr_retries",
"+=",
"1",
"if",
"curr_retries",
">",
"max_retries",
":",
"raise",
"# re-raise the exception",
"time",
".",
"sleep",
"(",
"time_between_retries",
")",
"except",
":",
"print",
"(",
"\"Cleaning up temp dir...\"",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"raise"
] |
Remove the global temp directory and all its contents.
|
[
"Remove",
"the",
"global",
"temp",
"directory",
"and",
"all",
"its",
"contents",
"."
] |
55aca874613750ebf4ae69fd8851bdbb7696d6ac
|
https://github.com/abarker/pdfCropMargins/blob/55aca874613750ebf4ae69fd8851bdbb7696d6ac/src/pdfCropMargins/external_program_calls.py#L191-L208
|
13,808
|
abarker/pdfCropMargins
|
src/pdfCropMargins/external_program_calls.py
|
call_external_subprocess
|
def call_external_subprocess(command_list,
stdin_filename=None, stdout_filename=None, stderr_filename=None,
env=None):
"""Run the command and arguments in the command_list. Will search the system
PATH for commands to execute, but no shell is started. Redirects any selected
outputs to the given filename. Waits for command completion."""
if stdin_filename: stdin = open(stdin_filename, "r")
else: stdin = None
if stdout_filename: stdout = open(stdout_filename, "w")
else: stdout = None
if stderr_filename: stderr = open(stderr_filename, "w")
else: stderr = None
subprocess.check_call(command_list, stdin=stdin, stdout=stdout, stderr=stderr,
env=env)
if stdin_filename: stdin.close()
if stdout_filename: stdout.close()
if stderr_filename: stderr.close()
# The older way to do the above with os.system is below, just for reference.
# command = " ".join(command_list)
# if stdin_filename: command += " < " + stdin_filename
# if stdout_filename: command += " > " + stdout_filename
# if stderr_filename: command += " 2> " + stderr_filename
# os.system(command)
return
|
python
|
def call_external_subprocess(command_list,
stdin_filename=None, stdout_filename=None, stderr_filename=None,
env=None):
"""Run the command and arguments in the command_list. Will search the system
PATH for commands to execute, but no shell is started. Redirects any selected
outputs to the given filename. Waits for command completion."""
if stdin_filename: stdin = open(stdin_filename, "r")
else: stdin = None
if stdout_filename: stdout = open(stdout_filename, "w")
else: stdout = None
if stderr_filename: stderr = open(stderr_filename, "w")
else: stderr = None
subprocess.check_call(command_list, stdin=stdin, stdout=stdout, stderr=stderr,
env=env)
if stdin_filename: stdin.close()
if stdout_filename: stdout.close()
if stderr_filename: stderr.close()
# The older way to do the above with os.system is below, just for reference.
# command = " ".join(command_list)
# if stdin_filename: command += " < " + stdin_filename
# if stdout_filename: command += " > " + stdout_filename
# if stderr_filename: command += " 2> " + stderr_filename
# os.system(command)
return
|
[
"def",
"call_external_subprocess",
"(",
"command_list",
",",
"stdin_filename",
"=",
"None",
",",
"stdout_filename",
"=",
"None",
",",
"stderr_filename",
"=",
"None",
",",
"env",
"=",
"None",
")",
":",
"if",
"stdin_filename",
":",
"stdin",
"=",
"open",
"(",
"stdin_filename",
",",
"\"r\"",
")",
"else",
":",
"stdin",
"=",
"None",
"if",
"stdout_filename",
":",
"stdout",
"=",
"open",
"(",
"stdout_filename",
",",
"\"w\"",
")",
"else",
":",
"stdout",
"=",
"None",
"if",
"stderr_filename",
":",
"stderr",
"=",
"open",
"(",
"stderr_filename",
",",
"\"w\"",
")",
"else",
":",
"stderr",
"=",
"None",
"subprocess",
".",
"check_call",
"(",
"command_list",
",",
"stdin",
"=",
"stdin",
",",
"stdout",
"=",
"stdout",
",",
"stderr",
"=",
"stderr",
",",
"env",
"=",
"env",
")",
"if",
"stdin_filename",
":",
"stdin",
".",
"close",
"(",
")",
"if",
"stdout_filename",
":",
"stdout",
".",
"close",
"(",
")",
"if",
"stderr_filename",
":",
"stderr",
".",
"close",
"(",
")",
"# The older way to do the above with os.system is below, just for reference.",
"# command = \" \".join(command_list)",
"# if stdin_filename: command += \" < \" + stdin_filename",
"# if stdout_filename: command += \" > \" + stdout_filename",
"# if stderr_filename: command += \" 2> \" + stderr_filename",
"# os.system(command)",
"return"
] |
Run the command and arguments in the command_list. Will search the system
PATH for commands to execute, but no shell is started. Redirects any selected
outputs to the given filename. Waits for command completion.
|
[
"Run",
"the",
"command",
"and",
"arguments",
"in",
"the",
"command_list",
".",
"Will",
"search",
"the",
"system",
"PATH",
"for",
"commands",
"to",
"execute",
"but",
"no",
"shell",
"is",
"started",
".",
"Redirects",
"any",
"selected",
"outputs",
"to",
"the",
"given",
"filename",
".",
"Waits",
"for",
"command",
"completion",
"."
] |
55aca874613750ebf4ae69fd8851bdbb7696d6ac
|
https://github.com/abarker/pdfCropMargins/blob/55aca874613750ebf4ae69fd8851bdbb7696d6ac/src/pdfCropMargins/external_program_calls.py#L279-L306
|
13,809
|
abarker/pdfCropMargins
|
src/pdfCropMargins/external_program_calls.py
|
run_external_subprocess_in_background
|
def run_external_subprocess_in_background(command_list, env=None):
"""Runs the command and arguments in the list as a background process."""
if system_os == "Windows":
DETACHED_PROCESS = 0x00000008
p = subprocess.Popen(command_list, shell=False, stdin=None, stdout=None,
stderr=None, close_fds=True, creationflags=DETACHED_PROCESS, env=env)
else:
p = subprocess.Popen(command_list, shell=False, stdin=None, stdout=None,
stderr=None, close_fds=True, env=env)
return p
|
python
|
def run_external_subprocess_in_background(command_list, env=None):
"""Runs the command and arguments in the list as a background process."""
if system_os == "Windows":
DETACHED_PROCESS = 0x00000008
p = subprocess.Popen(command_list, shell=False, stdin=None, stdout=None,
stderr=None, close_fds=True, creationflags=DETACHED_PROCESS, env=env)
else:
p = subprocess.Popen(command_list, shell=False, stdin=None, stdout=None,
stderr=None, close_fds=True, env=env)
return p
|
[
"def",
"run_external_subprocess_in_background",
"(",
"command_list",
",",
"env",
"=",
"None",
")",
":",
"if",
"system_os",
"==",
"\"Windows\"",
":",
"DETACHED_PROCESS",
"=",
"0x00000008",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"command_list",
",",
"shell",
"=",
"False",
",",
"stdin",
"=",
"None",
",",
"stdout",
"=",
"None",
",",
"stderr",
"=",
"None",
",",
"close_fds",
"=",
"True",
",",
"creationflags",
"=",
"DETACHED_PROCESS",
",",
"env",
"=",
"env",
")",
"else",
":",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"command_list",
",",
"shell",
"=",
"False",
",",
"stdin",
"=",
"None",
",",
"stdout",
"=",
"None",
",",
"stderr",
"=",
"None",
",",
"close_fds",
"=",
"True",
",",
"env",
"=",
"env",
")",
"return",
"p"
] |
Runs the command and arguments in the list as a background process.
|
[
"Runs",
"the",
"command",
"and",
"arguments",
"in",
"the",
"list",
"as",
"a",
"background",
"process",
"."
] |
55aca874613750ebf4ae69fd8851bdbb7696d6ac
|
https://github.com/abarker/pdfCropMargins/blob/55aca874613750ebf4ae69fd8851bdbb7696d6ac/src/pdfCropMargins/external_program_calls.py#L308-L317
|
13,810
|
abarker/pdfCropMargins
|
src/pdfCropMargins/external_program_calls.py
|
function_call_with_timeout
|
def function_call_with_timeout(fun_name, fun_args, secs=5):
"""Run a Python function with a timeout. No interprocess communication or
return values are handled. Setting secs to 0 gives infinite timeout."""
from multiprocessing import Process, Queue
p = Process(target=fun_name, args=tuple(fun_args))
p.start()
curr_secs = 0
no_timeout = False
if secs == 0: no_timeout = True
else: timeout = secs
while p.is_alive() and not no_timeout:
if curr_secs > timeout:
print("Process time has exceeded timeout, terminating it.")
p.terminate()
return False
time.sleep(0.1)
curr_secs += 0.1
p.join() # Blocks if process hasn't terminated.
return True
|
python
|
def function_call_with_timeout(fun_name, fun_args, secs=5):
"""Run a Python function with a timeout. No interprocess communication or
return values are handled. Setting secs to 0 gives infinite timeout."""
from multiprocessing import Process, Queue
p = Process(target=fun_name, args=tuple(fun_args))
p.start()
curr_secs = 0
no_timeout = False
if secs == 0: no_timeout = True
else: timeout = secs
while p.is_alive() and not no_timeout:
if curr_secs > timeout:
print("Process time has exceeded timeout, terminating it.")
p.terminate()
return False
time.sleep(0.1)
curr_secs += 0.1
p.join() # Blocks if process hasn't terminated.
return True
|
[
"def",
"function_call_with_timeout",
"(",
"fun_name",
",",
"fun_args",
",",
"secs",
"=",
"5",
")",
":",
"from",
"multiprocessing",
"import",
"Process",
",",
"Queue",
"p",
"=",
"Process",
"(",
"target",
"=",
"fun_name",
",",
"args",
"=",
"tuple",
"(",
"fun_args",
")",
")",
"p",
".",
"start",
"(",
")",
"curr_secs",
"=",
"0",
"no_timeout",
"=",
"False",
"if",
"secs",
"==",
"0",
":",
"no_timeout",
"=",
"True",
"else",
":",
"timeout",
"=",
"secs",
"while",
"p",
".",
"is_alive",
"(",
")",
"and",
"not",
"no_timeout",
":",
"if",
"curr_secs",
">",
"timeout",
":",
"print",
"(",
"\"Process time has exceeded timeout, terminating it.\"",
")",
"p",
".",
"terminate",
"(",
")",
"return",
"False",
"time",
".",
"sleep",
"(",
"0.1",
")",
"curr_secs",
"+=",
"0.1",
"p",
".",
"join",
"(",
")",
"# Blocks if process hasn't terminated.",
"return",
"True"
] |
Run a Python function with a timeout. No interprocess communication or
return values are handled. Setting secs to 0 gives infinite timeout.
|
[
"Run",
"a",
"Python",
"function",
"with",
"a",
"timeout",
".",
"No",
"interprocess",
"communication",
"or",
"return",
"values",
"are",
"handled",
".",
"Setting",
"secs",
"to",
"0",
"gives",
"infinite",
"timeout",
"."
] |
55aca874613750ebf4ae69fd8851bdbb7696d6ac
|
https://github.com/abarker/pdfCropMargins/blob/55aca874613750ebf4ae69fd8851bdbb7696d6ac/src/pdfCropMargins/external_program_calls.py#L331-L349
|
13,811
|
abarker/pdfCropMargins
|
src/pdfCropMargins/external_program_calls.py
|
fix_pdf_with_ghostscript_to_tmp_file
|
def fix_pdf_with_ghostscript_to_tmp_file(input_doc_fname):
"""Attempt to fix a bad PDF file with a Ghostscript command, writing the output
PDF to a temporary file and returning the filename. Caller is responsible for
deleting the file."""
if not gs_executable: init_and_test_gs_executable(exit_on_fail=True)
temp_file_name = get_temporary_filename(extension=".pdf")
gs_run_command = [gs_executable, "-dSAFER", "-o", temp_file_name,
"-dPDFSETTINGS=/prepress", "-sDEVICE=pdfwrite", input_doc_fname]
try:
gs_output = get_external_subprocess_output(gs_run_command, print_output=True,
indent_string=" ", env=gs_environment)
except subprocess.CalledProcessError:
print("\nError in pdfCropMargins: Ghostscript returned a non-zero exit"
"\nstatus when attempting to fix the file:\n ", input_doc_fname,
file=sys.stderr)
cleanup_and_exit(1)
except UnicodeDecodeError:
print("\nWarning in pdfCropMargins: In attempting to repair the PDF file"
"\nGhostscript produced a message containing characters which cannot"
"\nbe decoded by the 'utf-8' codec. Ignoring and hoping for the best.",
file=sys.stderr)
return temp_file_name
|
python
|
def fix_pdf_with_ghostscript_to_tmp_file(input_doc_fname):
"""Attempt to fix a bad PDF file with a Ghostscript command, writing the output
PDF to a temporary file and returning the filename. Caller is responsible for
deleting the file."""
if not gs_executable: init_and_test_gs_executable(exit_on_fail=True)
temp_file_name = get_temporary_filename(extension=".pdf")
gs_run_command = [gs_executable, "-dSAFER", "-o", temp_file_name,
"-dPDFSETTINGS=/prepress", "-sDEVICE=pdfwrite", input_doc_fname]
try:
gs_output = get_external_subprocess_output(gs_run_command, print_output=True,
indent_string=" ", env=gs_environment)
except subprocess.CalledProcessError:
print("\nError in pdfCropMargins: Ghostscript returned a non-zero exit"
"\nstatus when attempting to fix the file:\n ", input_doc_fname,
file=sys.stderr)
cleanup_and_exit(1)
except UnicodeDecodeError:
print("\nWarning in pdfCropMargins: In attempting to repair the PDF file"
"\nGhostscript produced a message containing characters which cannot"
"\nbe decoded by the 'utf-8' codec. Ignoring and hoping for the best.",
file=sys.stderr)
return temp_file_name
|
[
"def",
"fix_pdf_with_ghostscript_to_tmp_file",
"(",
"input_doc_fname",
")",
":",
"if",
"not",
"gs_executable",
":",
"init_and_test_gs_executable",
"(",
"exit_on_fail",
"=",
"True",
")",
"temp_file_name",
"=",
"get_temporary_filename",
"(",
"extension",
"=",
"\".pdf\"",
")",
"gs_run_command",
"=",
"[",
"gs_executable",
",",
"\"-dSAFER\"",
",",
"\"-o\"",
",",
"temp_file_name",
",",
"\"-dPDFSETTINGS=/prepress\"",
",",
"\"-sDEVICE=pdfwrite\"",
",",
"input_doc_fname",
"]",
"try",
":",
"gs_output",
"=",
"get_external_subprocess_output",
"(",
"gs_run_command",
",",
"print_output",
"=",
"True",
",",
"indent_string",
"=",
"\" \"",
",",
"env",
"=",
"gs_environment",
")",
"except",
"subprocess",
".",
"CalledProcessError",
":",
"print",
"(",
"\"\\nError in pdfCropMargins: Ghostscript returned a non-zero exit\"",
"\"\\nstatus when attempting to fix the file:\\n \"",
",",
"input_doc_fname",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"cleanup_and_exit",
"(",
"1",
")",
"except",
"UnicodeDecodeError",
":",
"print",
"(",
"\"\\nWarning in pdfCropMargins: In attempting to repair the PDF file\"",
"\"\\nGhostscript produced a message containing characters which cannot\"",
"\"\\nbe decoded by the 'utf-8' codec. Ignoring and hoping for the best.\"",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"return",
"temp_file_name"
] |
Attempt to fix a bad PDF file with a Ghostscript command, writing the output
PDF to a temporary file and returning the filename. Caller is responsible for
deleting the file.
|
[
"Attempt",
"to",
"fix",
"a",
"bad",
"PDF",
"file",
"with",
"a",
"Ghostscript",
"command",
"writing",
"the",
"output",
"PDF",
"to",
"a",
"temporary",
"file",
"and",
"returning",
"the",
"filename",
".",
"Caller",
"is",
"responsible",
"for",
"deleting",
"the",
"file",
"."
] |
55aca874613750ebf4ae69fd8851bdbb7696d6ac
|
https://github.com/abarker/pdfCropMargins/blob/55aca874613750ebf4ae69fd8851bdbb7696d6ac/src/pdfCropMargins/external_program_calls.py#L532-L554
|
13,812
|
abarker/pdfCropMargins
|
src/pdfCropMargins/external_program_calls.py
|
get_bounding_box_list_ghostscript
|
def get_bounding_box_list_ghostscript(input_doc_fname, res_x, res_y, full_page_box):
"""Call Ghostscript to get the bounding box list. Cannot set a threshold
with this method."""
if not gs_executable: init_and_test_gs_executable(exit_on_fail=True)
res = str(res_x) + "x" + str(res_y)
box_arg = "-dUseMediaBox" # should be default, but set anyway
if "c" in full_page_box: box_arg = "-dUseCropBox"
if "t" in full_page_box: box_arg = "-dUseTrimBox"
if "a" in full_page_box: box_arg = "-dUseArtBox"
if "b" in full_page_box: box_arg = "-dUseBleedBox" # may not be defined in gs
gs_run_command = [gs_executable, "-dSAFER", "-dNOPAUSE", "-dBATCH", "-sDEVICE=bbox",
box_arg, "-r"+res, input_doc_fname]
# Set printOutput to True for debugging or extra-verbose with Ghostscript's output.
# Note Ghostscript writes the data to stderr, so the command below must capture it.
try:
gs_output = get_external_subprocess_output(gs_run_command,
print_output=False, indent_string=" ", env=gs_environment)
except UnicodeDecodeError:
print("\nError in pdfCropMargins: In attempting to get the bounding boxes"
"\nGhostscript encountered characters which cannot be decoded by the"
"\n'utf-8' codec.",
file=sys.stderr)
cleanup_and_exit(1)
bounding_box_list = []
for line in gs_output:
split_line = line.split()
if split_line and split_line[0] == r"%%HiResBoundingBox:":
del split_line[0]
if len(split_line) != 4:
print("\nWarning from pdfCropMargins: Ignoring this unparsable line"
"\nwhen finding the bounding boxes with Ghostscript:",
line, "\n", file=sys.stderr)
continue
# Note gs reports values in order left, bottom, right, top,
# i.e., lower left point followed by top right point.
bounding_box_list.append([float(split_line[0]),
float(split_line[1]),
float(split_line[2]),
float(split_line[3])])
if not bounding_box_list:
print("\nError in pdfCropMargins: Ghostscript failed to find any bounding"
"\nboxes in the document.", file=sys.stderr)
cleanup_and_exit(1)
return bounding_box_list
|
python
|
def get_bounding_box_list_ghostscript(input_doc_fname, res_x, res_y, full_page_box):
"""Call Ghostscript to get the bounding box list. Cannot set a threshold
with this method."""
if not gs_executable: init_and_test_gs_executable(exit_on_fail=True)
res = str(res_x) + "x" + str(res_y)
box_arg = "-dUseMediaBox" # should be default, but set anyway
if "c" in full_page_box: box_arg = "-dUseCropBox"
if "t" in full_page_box: box_arg = "-dUseTrimBox"
if "a" in full_page_box: box_arg = "-dUseArtBox"
if "b" in full_page_box: box_arg = "-dUseBleedBox" # may not be defined in gs
gs_run_command = [gs_executable, "-dSAFER", "-dNOPAUSE", "-dBATCH", "-sDEVICE=bbox",
box_arg, "-r"+res, input_doc_fname]
# Set printOutput to True for debugging or extra-verbose with Ghostscript's output.
# Note Ghostscript writes the data to stderr, so the command below must capture it.
try:
gs_output = get_external_subprocess_output(gs_run_command,
print_output=False, indent_string=" ", env=gs_environment)
except UnicodeDecodeError:
print("\nError in pdfCropMargins: In attempting to get the bounding boxes"
"\nGhostscript encountered characters which cannot be decoded by the"
"\n'utf-8' codec.",
file=sys.stderr)
cleanup_and_exit(1)
bounding_box_list = []
for line in gs_output:
split_line = line.split()
if split_line and split_line[0] == r"%%HiResBoundingBox:":
del split_line[0]
if len(split_line) != 4:
print("\nWarning from pdfCropMargins: Ignoring this unparsable line"
"\nwhen finding the bounding boxes with Ghostscript:",
line, "\n", file=sys.stderr)
continue
# Note gs reports values in order left, bottom, right, top,
# i.e., lower left point followed by top right point.
bounding_box_list.append([float(split_line[0]),
float(split_line[1]),
float(split_line[2]),
float(split_line[3])])
if not bounding_box_list:
print("\nError in pdfCropMargins: Ghostscript failed to find any bounding"
"\nboxes in the document.", file=sys.stderr)
cleanup_and_exit(1)
return bounding_box_list
|
[
"def",
"get_bounding_box_list_ghostscript",
"(",
"input_doc_fname",
",",
"res_x",
",",
"res_y",
",",
"full_page_box",
")",
":",
"if",
"not",
"gs_executable",
":",
"init_and_test_gs_executable",
"(",
"exit_on_fail",
"=",
"True",
")",
"res",
"=",
"str",
"(",
"res_x",
")",
"+",
"\"x\"",
"+",
"str",
"(",
"res_y",
")",
"box_arg",
"=",
"\"-dUseMediaBox\"",
"# should be default, but set anyway",
"if",
"\"c\"",
"in",
"full_page_box",
":",
"box_arg",
"=",
"\"-dUseCropBox\"",
"if",
"\"t\"",
"in",
"full_page_box",
":",
"box_arg",
"=",
"\"-dUseTrimBox\"",
"if",
"\"a\"",
"in",
"full_page_box",
":",
"box_arg",
"=",
"\"-dUseArtBox\"",
"if",
"\"b\"",
"in",
"full_page_box",
":",
"box_arg",
"=",
"\"-dUseBleedBox\"",
"# may not be defined in gs",
"gs_run_command",
"=",
"[",
"gs_executable",
",",
"\"-dSAFER\"",
",",
"\"-dNOPAUSE\"",
",",
"\"-dBATCH\"",
",",
"\"-sDEVICE=bbox\"",
",",
"box_arg",
",",
"\"-r\"",
"+",
"res",
",",
"input_doc_fname",
"]",
"# Set printOutput to True for debugging or extra-verbose with Ghostscript's output.",
"# Note Ghostscript writes the data to stderr, so the command below must capture it.",
"try",
":",
"gs_output",
"=",
"get_external_subprocess_output",
"(",
"gs_run_command",
",",
"print_output",
"=",
"False",
",",
"indent_string",
"=",
"\" \"",
",",
"env",
"=",
"gs_environment",
")",
"except",
"UnicodeDecodeError",
":",
"print",
"(",
"\"\\nError in pdfCropMargins: In attempting to get the bounding boxes\"",
"\"\\nGhostscript encountered characters which cannot be decoded by the\"",
"\"\\n'utf-8' codec.\"",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"cleanup_and_exit",
"(",
"1",
")",
"bounding_box_list",
"=",
"[",
"]",
"for",
"line",
"in",
"gs_output",
":",
"split_line",
"=",
"line",
".",
"split",
"(",
")",
"if",
"split_line",
"and",
"split_line",
"[",
"0",
"]",
"==",
"r\"%%HiResBoundingBox:\"",
":",
"del",
"split_line",
"[",
"0",
"]",
"if",
"len",
"(",
"split_line",
")",
"!=",
"4",
":",
"print",
"(",
"\"\\nWarning from pdfCropMargins: Ignoring this unparsable line\"",
"\"\\nwhen finding the bounding boxes with Ghostscript:\"",
",",
"line",
",",
"\"\\n\"",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"continue",
"# Note gs reports values in order left, bottom, right, top,",
"# i.e., lower left point followed by top right point.",
"bounding_box_list",
".",
"append",
"(",
"[",
"float",
"(",
"split_line",
"[",
"0",
"]",
")",
",",
"float",
"(",
"split_line",
"[",
"1",
"]",
")",
",",
"float",
"(",
"split_line",
"[",
"2",
"]",
")",
",",
"float",
"(",
"split_line",
"[",
"3",
"]",
")",
"]",
")",
"if",
"not",
"bounding_box_list",
":",
"print",
"(",
"\"\\nError in pdfCropMargins: Ghostscript failed to find any bounding\"",
"\"\\nboxes in the document.\"",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"cleanup_and_exit",
"(",
"1",
")",
"return",
"bounding_box_list"
] |
Call Ghostscript to get the bounding box list. Cannot set a threshold
with this method.
|
[
"Call",
"Ghostscript",
"to",
"get",
"the",
"bounding",
"box",
"list",
".",
"Cannot",
"set",
"a",
"threshold",
"with",
"this",
"method",
"."
] |
55aca874613750ebf4ae69fd8851bdbb7696d6ac
|
https://github.com/abarker/pdfCropMargins/blob/55aca874613750ebf4ae69fd8851bdbb7696d6ac/src/pdfCropMargins/external_program_calls.py#L556-L602
|
13,813
|
abarker/pdfCropMargins
|
src/pdfCropMargins/external_program_calls.py
|
render_pdf_file_to_image_files_pdftoppm_ppm
|
def render_pdf_file_to_image_files_pdftoppm_ppm(pdf_file_name, root_output_file_path,
res_x=150, res_y=150, extra_args=None):
"""Use the pdftoppm program to render a PDF file to .png images. The
root_output_file_path is prepended to all the output files, which have numbers
and extensions added. Extra arguments can be passed as a list in extra_args.
Return the command output."""
if extra_args is None: extra_args = []
if not pdftoppm_executable:
init_and_test_pdftoppm_executable(prefer_local=False, exit_on_fail=True)
if old_pdftoppm_version:
# We only have -r, not -rx and -ry.
command = [pdftoppm_executable] + extra_args + ["-r", res_x, pdf_file_name,
root_output_file_path]
else:
command = [pdftoppm_executable] + extra_args + ["-rx", res_x, "-ry", res_y,
pdf_file_name, root_output_file_path]
comm_output = get_external_subprocess_output(command)
return comm_output
|
python
|
def render_pdf_file_to_image_files_pdftoppm_ppm(pdf_file_name, root_output_file_path,
res_x=150, res_y=150, extra_args=None):
"""Use the pdftoppm program to render a PDF file to .png images. The
root_output_file_path is prepended to all the output files, which have numbers
and extensions added. Extra arguments can be passed as a list in extra_args.
Return the command output."""
if extra_args is None: extra_args = []
if not pdftoppm_executable:
init_and_test_pdftoppm_executable(prefer_local=False, exit_on_fail=True)
if old_pdftoppm_version:
# We only have -r, not -rx and -ry.
command = [pdftoppm_executable] + extra_args + ["-r", res_x, pdf_file_name,
root_output_file_path]
else:
command = [pdftoppm_executable] + extra_args + ["-rx", res_x, "-ry", res_y,
pdf_file_name, root_output_file_path]
comm_output = get_external_subprocess_output(command)
return comm_output
|
[
"def",
"render_pdf_file_to_image_files_pdftoppm_ppm",
"(",
"pdf_file_name",
",",
"root_output_file_path",
",",
"res_x",
"=",
"150",
",",
"res_y",
"=",
"150",
",",
"extra_args",
"=",
"None",
")",
":",
"if",
"extra_args",
"is",
"None",
":",
"extra_args",
"=",
"[",
"]",
"if",
"not",
"pdftoppm_executable",
":",
"init_and_test_pdftoppm_executable",
"(",
"prefer_local",
"=",
"False",
",",
"exit_on_fail",
"=",
"True",
")",
"if",
"old_pdftoppm_version",
":",
"# We only have -r, not -rx and -ry.",
"command",
"=",
"[",
"pdftoppm_executable",
"]",
"+",
"extra_args",
"+",
"[",
"\"-r\"",
",",
"res_x",
",",
"pdf_file_name",
",",
"root_output_file_path",
"]",
"else",
":",
"command",
"=",
"[",
"pdftoppm_executable",
"]",
"+",
"extra_args",
"+",
"[",
"\"-rx\"",
",",
"res_x",
",",
"\"-ry\"",
",",
"res_y",
",",
"pdf_file_name",
",",
"root_output_file_path",
"]",
"comm_output",
"=",
"get_external_subprocess_output",
"(",
"command",
")",
"return",
"comm_output"
] |
Use the pdftoppm program to render a PDF file to .png images. The
root_output_file_path is prepended to all the output files, which have numbers
and extensions added. Extra arguments can be passed as a list in extra_args.
Return the command output.
|
[
"Use",
"the",
"pdftoppm",
"program",
"to",
"render",
"a",
"PDF",
"file",
"to",
".",
"png",
"images",
".",
"The",
"root_output_file_path",
"is",
"prepended",
"to",
"all",
"the",
"output",
"files",
"which",
"have",
"numbers",
"and",
"extensions",
"added",
".",
"Extra",
"arguments",
"can",
"be",
"passed",
"as",
"a",
"list",
"in",
"extra_args",
".",
"Return",
"the",
"command",
"output",
"."
] |
55aca874613750ebf4ae69fd8851bdbb7696d6ac
|
https://github.com/abarker/pdfCropMargins/blob/55aca874613750ebf4ae69fd8851bdbb7696d6ac/src/pdfCropMargins/external_program_calls.py#L604-L624
|
13,814
|
abarker/pdfCropMargins
|
src/pdfCropMargins/external_program_calls.py
|
render_pdf_file_to_image_files_pdftoppm_pgm
|
def render_pdf_file_to_image_files_pdftoppm_pgm(pdf_file_name, root_output_file_path,
res_x=150, res_y=150):
"""Same as renderPdfFileToImageFile_pdftoppm_ppm but with -gray option for pgm."""
comm_output = render_pdf_file_to_image_files_pdftoppm_ppm(pdf_file_name,
root_output_file_path, res_x, res_y, ["-gray"])
return comm_output
|
python
|
def render_pdf_file_to_image_files_pdftoppm_pgm(pdf_file_name, root_output_file_path,
res_x=150, res_y=150):
"""Same as renderPdfFileToImageFile_pdftoppm_ppm but with -gray option for pgm."""
comm_output = render_pdf_file_to_image_files_pdftoppm_ppm(pdf_file_name,
root_output_file_path, res_x, res_y, ["-gray"])
return comm_output
|
[
"def",
"render_pdf_file_to_image_files_pdftoppm_pgm",
"(",
"pdf_file_name",
",",
"root_output_file_path",
",",
"res_x",
"=",
"150",
",",
"res_y",
"=",
"150",
")",
":",
"comm_output",
"=",
"render_pdf_file_to_image_files_pdftoppm_ppm",
"(",
"pdf_file_name",
",",
"root_output_file_path",
",",
"res_x",
",",
"res_y",
",",
"[",
"\"-gray\"",
"]",
")",
"return",
"comm_output"
] |
Same as renderPdfFileToImageFile_pdftoppm_ppm but with -gray option for pgm.
|
[
"Same",
"as",
"renderPdfFileToImageFile_pdftoppm_ppm",
"but",
"with",
"-",
"gray",
"option",
"for",
"pgm",
"."
] |
55aca874613750ebf4ae69fd8851bdbb7696d6ac
|
https://github.com/abarker/pdfCropMargins/blob/55aca874613750ebf4ae69fd8851bdbb7696d6ac/src/pdfCropMargins/external_program_calls.py#L626-L632
|
13,815
|
abarker/pdfCropMargins
|
src/pdfCropMargins/external_program_calls.py
|
render_pdf_file_to_image_files__ghostscript_png
|
def render_pdf_file_to_image_files__ghostscript_png(pdf_file_name,
root_output_file_path,
res_x=150, res_y=150):
"""Use Ghostscript to render a PDF file to .png images. The root_output_file_path
is prepended to all the output files, which have numbers and extensions added.
Return the command output."""
# For gs commands see
# http://ghostscript.com/doc/current/Devices.htm#File_formats
# http://ghostscript.com/doc/current/Devices.htm#PNG
if not gs_executable: init_and_test_gs_executable(exit_on_fail=True)
command = [gs_executable, "-dBATCH", "-dNOPAUSE", "-sDEVICE=pnggray",
"-r"+res_x+"x"+res_y, "-sOutputFile="+root_output_file_path+"-%06d.png",
pdf_file_name]
comm_output = get_external_subprocess_output(command, env=gs_environment)
return comm_output
|
python
|
def render_pdf_file_to_image_files__ghostscript_png(pdf_file_name,
root_output_file_path,
res_x=150, res_y=150):
"""Use Ghostscript to render a PDF file to .png images. The root_output_file_path
is prepended to all the output files, which have numbers and extensions added.
Return the command output."""
# For gs commands see
# http://ghostscript.com/doc/current/Devices.htm#File_formats
# http://ghostscript.com/doc/current/Devices.htm#PNG
if not gs_executable: init_and_test_gs_executable(exit_on_fail=True)
command = [gs_executable, "-dBATCH", "-dNOPAUSE", "-sDEVICE=pnggray",
"-r"+res_x+"x"+res_y, "-sOutputFile="+root_output_file_path+"-%06d.png",
pdf_file_name]
comm_output = get_external_subprocess_output(command, env=gs_environment)
return comm_output
|
[
"def",
"render_pdf_file_to_image_files__ghostscript_png",
"(",
"pdf_file_name",
",",
"root_output_file_path",
",",
"res_x",
"=",
"150",
",",
"res_y",
"=",
"150",
")",
":",
"# For gs commands see",
"# http://ghostscript.com/doc/current/Devices.htm#File_formats",
"# http://ghostscript.com/doc/current/Devices.htm#PNG",
"if",
"not",
"gs_executable",
":",
"init_and_test_gs_executable",
"(",
"exit_on_fail",
"=",
"True",
")",
"command",
"=",
"[",
"gs_executable",
",",
"\"-dBATCH\"",
",",
"\"-dNOPAUSE\"",
",",
"\"-sDEVICE=pnggray\"",
",",
"\"-r\"",
"+",
"res_x",
"+",
"\"x\"",
"+",
"res_y",
",",
"\"-sOutputFile=\"",
"+",
"root_output_file_path",
"+",
"\"-%06d.png\"",
",",
"pdf_file_name",
"]",
"comm_output",
"=",
"get_external_subprocess_output",
"(",
"command",
",",
"env",
"=",
"gs_environment",
")",
"return",
"comm_output"
] |
Use Ghostscript to render a PDF file to .png images. The root_output_file_path
is prepended to all the output files, which have numbers and extensions added.
Return the command output.
|
[
"Use",
"Ghostscript",
"to",
"render",
"a",
"PDF",
"file",
"to",
".",
"png",
"images",
".",
"The",
"root_output_file_path",
"is",
"prepended",
"to",
"all",
"the",
"output",
"files",
"which",
"have",
"numbers",
"and",
"extensions",
"added",
".",
"Return",
"the",
"command",
"output",
"."
] |
55aca874613750ebf4ae69fd8851bdbb7696d6ac
|
https://github.com/abarker/pdfCropMargins/blob/55aca874613750ebf4ae69fd8851bdbb7696d6ac/src/pdfCropMargins/external_program_calls.py#L634-L648
|
13,816
|
abarker/pdfCropMargins
|
src/pdfCropMargins/external_program_calls.py
|
show_preview
|
def show_preview(viewer_path, pdf_file_name):
"""Run the PDF viewer at the path viewer_path on the file pdf_file_name."""
try:
cmd = [viewer_path, pdf_file_name]
run_external_subprocess_in_background(cmd)
except (subprocess.CalledProcessError, OSError, IOError) as e:
print("\nWarning from pdfCropMargins: The argument to the '--viewer' option:"
"\n ", viewer_path, "\nwas not found or failed to execute correctly.\n",
file=sys.stderr)
return
|
python
|
def show_preview(viewer_path, pdf_file_name):
"""Run the PDF viewer at the path viewer_path on the file pdf_file_name."""
try:
cmd = [viewer_path, pdf_file_name]
run_external_subprocess_in_background(cmd)
except (subprocess.CalledProcessError, OSError, IOError) as e:
print("\nWarning from pdfCropMargins: The argument to the '--viewer' option:"
"\n ", viewer_path, "\nwas not found or failed to execute correctly.\n",
file=sys.stderr)
return
|
[
"def",
"show_preview",
"(",
"viewer_path",
",",
"pdf_file_name",
")",
":",
"try",
":",
"cmd",
"=",
"[",
"viewer_path",
",",
"pdf_file_name",
"]",
"run_external_subprocess_in_background",
"(",
"cmd",
")",
"except",
"(",
"subprocess",
".",
"CalledProcessError",
",",
"OSError",
",",
"IOError",
")",
"as",
"e",
":",
"print",
"(",
"\"\\nWarning from pdfCropMargins: The argument to the '--viewer' option:\"",
"\"\\n \"",
",",
"viewer_path",
",",
"\"\\nwas not found or failed to execute correctly.\\n\"",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"return"
] |
Run the PDF viewer at the path viewer_path on the file pdf_file_name.
|
[
"Run",
"the",
"PDF",
"viewer",
"at",
"the",
"path",
"viewer_path",
"on",
"the",
"file",
"pdf_file_name",
"."
] |
55aca874613750ebf4ae69fd8851bdbb7696d6ac
|
https://github.com/abarker/pdfCropMargins/blob/55aca874613750ebf4ae69fd8851bdbb7696d6ac/src/pdfCropMargins/external_program_calls.py#L673-L682
|
13,817
|
abarker/pdfCropMargins
|
src/pdfCropMargins/pdfCropMargins.py
|
main
|
def main():
"""Run main, catching any exceptions and cleaning up the temp directories."""
cleanup_and_exit = sys.exit # Function to do cleanup and exit before the import.
exit_code = 0
# Imports are done here inside the try block so some ugly (and useless)
# traceback info is avoided on user's ^C (KeyboardInterrupt, EOFError on Windows).
try:
from . import external_program_calls as ex # Creates tmp dir as side effect.
cleanup_and_exit = ex.cleanup_and_exit # Switch to the real one, deletes temp dir.
from . import main_pdfCropMargins # Imports external_program_calls, don't do first.
main_pdfCropMargins.main_crop() # Run the actual program.
except (KeyboardInterrupt, EOFError): # Windows raises EOFError on ^C.
print("\nGot a KeyboardInterrupt, cleaning up and exiting...\n",
file=sys.stderr)
except SystemExit:
exit_code = sys.exc_info()[1]
print()
except:
# Echo back the unexpected error so the user can see it.
print("\nCaught an unexpected exception in the pdfCropMargins program.",
file=sys.stderr)
print("Unexpected error: ", sys.exc_info()[0], file=sys.stderr)
print("Error message : ", sys.exc_info()[1], file=sys.stderr)
print()
exit_code = 1
import traceback
max_traceback_length = 30
traceback.print_tb(sys.exc_info()[2], limit=max_traceback_length)
# raise # Re-raise the error.
finally:
# Some people like to hit multiple ^C chars, which kills cleanup.
# Call cleanup again each time.
for i in range(30): # Give up after 30 tries.
try:
cleanup_and_exit(exit_code)
except (KeyboardInterrupt, EOFError):
continue
|
python
|
def main():
"""Run main, catching any exceptions and cleaning up the temp directories."""
cleanup_and_exit = sys.exit # Function to do cleanup and exit before the import.
exit_code = 0
# Imports are done here inside the try block so some ugly (and useless)
# traceback info is avoided on user's ^C (KeyboardInterrupt, EOFError on Windows).
try:
from . import external_program_calls as ex # Creates tmp dir as side effect.
cleanup_and_exit = ex.cleanup_and_exit # Switch to the real one, deletes temp dir.
from . import main_pdfCropMargins # Imports external_program_calls, don't do first.
main_pdfCropMargins.main_crop() # Run the actual program.
except (KeyboardInterrupt, EOFError): # Windows raises EOFError on ^C.
print("\nGot a KeyboardInterrupt, cleaning up and exiting...\n",
file=sys.stderr)
except SystemExit:
exit_code = sys.exc_info()[1]
print()
except:
# Echo back the unexpected error so the user can see it.
print("\nCaught an unexpected exception in the pdfCropMargins program.",
file=sys.stderr)
print("Unexpected error: ", sys.exc_info()[0], file=sys.stderr)
print("Error message : ", sys.exc_info()[1], file=sys.stderr)
print()
exit_code = 1
import traceback
max_traceback_length = 30
traceback.print_tb(sys.exc_info()[2], limit=max_traceback_length)
# raise # Re-raise the error.
finally:
# Some people like to hit multiple ^C chars, which kills cleanup.
# Call cleanup again each time.
for i in range(30): # Give up after 30 tries.
try:
cleanup_and_exit(exit_code)
except (KeyboardInterrupt, EOFError):
continue
|
[
"def",
"main",
"(",
")",
":",
"cleanup_and_exit",
"=",
"sys",
".",
"exit",
"# Function to do cleanup and exit before the import.",
"exit_code",
"=",
"0",
"# Imports are done here inside the try block so some ugly (and useless)",
"# traceback info is avoided on user's ^C (KeyboardInterrupt, EOFError on Windows).",
"try",
":",
"from",
".",
"import",
"external_program_calls",
"as",
"ex",
"# Creates tmp dir as side effect.",
"cleanup_and_exit",
"=",
"ex",
".",
"cleanup_and_exit",
"# Switch to the real one, deletes temp dir.",
"from",
".",
"import",
"main_pdfCropMargins",
"# Imports external_program_calls, don't do first.",
"main_pdfCropMargins",
".",
"main_crop",
"(",
")",
"# Run the actual program.",
"except",
"(",
"KeyboardInterrupt",
",",
"EOFError",
")",
":",
"# Windows raises EOFError on ^C.",
"print",
"(",
"\"\\nGot a KeyboardInterrupt, cleaning up and exiting...\\n\"",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"except",
"SystemExit",
":",
"exit_code",
"=",
"sys",
".",
"exc_info",
"(",
")",
"[",
"1",
"]",
"print",
"(",
")",
"except",
":",
"# Echo back the unexpected error so the user can see it.",
"print",
"(",
"\"\\nCaught an unexpected exception in the pdfCropMargins program.\"",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"print",
"(",
"\"Unexpected error: \"",
",",
"sys",
".",
"exc_info",
"(",
")",
"[",
"0",
"]",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"print",
"(",
"\"Error message : \"",
",",
"sys",
".",
"exc_info",
"(",
")",
"[",
"1",
"]",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"print",
"(",
")",
"exit_code",
"=",
"1",
"import",
"traceback",
"max_traceback_length",
"=",
"30",
"traceback",
".",
"print_tb",
"(",
"sys",
".",
"exc_info",
"(",
")",
"[",
"2",
"]",
",",
"limit",
"=",
"max_traceback_length",
")",
"# raise # Re-raise the error.",
"finally",
":",
"# Some people like to hit multiple ^C chars, which kills cleanup.",
"# Call cleanup again each time.",
"for",
"i",
"in",
"range",
"(",
"30",
")",
":",
"# Give up after 30 tries.",
"try",
":",
"cleanup_and_exit",
"(",
"exit_code",
")",
"except",
"(",
"KeyboardInterrupt",
",",
"EOFError",
")",
":",
"continue"
] |
Run main, catching any exceptions and cleaning up the temp directories.
|
[
"Run",
"main",
"catching",
"any",
"exceptions",
"and",
"cleaning",
"up",
"the",
"temp",
"directories",
"."
] |
55aca874613750ebf4ae69fd8851bdbb7696d6ac
|
https://github.com/abarker/pdfCropMargins/blob/55aca874613750ebf4ae69fd8851bdbb7696d6ac/src/pdfCropMargins/pdfCropMargins.py#L70-L113
|
13,818
|
abarker/pdfCropMargins
|
src/pdfCropMargins/main_pdfCropMargins.py
|
get_full_page_box_list_assigning_media_and_crop
|
def get_full_page_box_list_assigning_media_and_crop(input_doc, quiet=False):
"""Get a list of all the full-page box values for each page. The argument
input_doc should be a PdfFileReader object. The boxes on the list are in the
simple 4-float list format used by this program, not RectangleObject format."""
full_page_box_list = []
rotation_list = []
if args.verbose and not quiet:
print("\nOriginal full page sizes, in PDF format (lbrt):")
for page_num in range(input_doc.getNumPages()):
# Get the current page and find the full-page box.
curr_page = input_doc.getPage(page_num)
full_page_box = get_full_page_box_assigning_media_and_crop(curr_page)
if args.verbose and not quiet:
# want to display page num numbering from 1, so add one
print("\t"+str(page_num+1), " rot =",
curr_page.rotationAngle, "\t", full_page_box)
# Convert the RectangleObject to floats in an ordinary list and append.
ordinary_box = [float(b) for b in full_page_box]
full_page_box_list.append(ordinary_box)
# Append the rotation value to the rotation_list.
rotation_list.append(curr_page.rotationAngle)
return full_page_box_list, rotation_list
|
python
|
def get_full_page_box_list_assigning_media_and_crop(input_doc, quiet=False):
"""Get a list of all the full-page box values for each page. The argument
input_doc should be a PdfFileReader object. The boxes on the list are in the
simple 4-float list format used by this program, not RectangleObject format."""
full_page_box_list = []
rotation_list = []
if args.verbose and not quiet:
print("\nOriginal full page sizes, in PDF format (lbrt):")
for page_num in range(input_doc.getNumPages()):
# Get the current page and find the full-page box.
curr_page = input_doc.getPage(page_num)
full_page_box = get_full_page_box_assigning_media_and_crop(curr_page)
if args.verbose and not quiet:
# want to display page num numbering from 1, so add one
print("\t"+str(page_num+1), " rot =",
curr_page.rotationAngle, "\t", full_page_box)
# Convert the RectangleObject to floats in an ordinary list and append.
ordinary_box = [float(b) for b in full_page_box]
full_page_box_list.append(ordinary_box)
# Append the rotation value to the rotation_list.
rotation_list.append(curr_page.rotationAngle)
return full_page_box_list, rotation_list
|
[
"def",
"get_full_page_box_list_assigning_media_and_crop",
"(",
"input_doc",
",",
"quiet",
"=",
"False",
")",
":",
"full_page_box_list",
"=",
"[",
"]",
"rotation_list",
"=",
"[",
"]",
"if",
"args",
".",
"verbose",
"and",
"not",
"quiet",
":",
"print",
"(",
"\"\\nOriginal full page sizes, in PDF format (lbrt):\"",
")",
"for",
"page_num",
"in",
"range",
"(",
"input_doc",
".",
"getNumPages",
"(",
")",
")",
":",
"# Get the current page and find the full-page box.",
"curr_page",
"=",
"input_doc",
".",
"getPage",
"(",
"page_num",
")",
"full_page_box",
"=",
"get_full_page_box_assigning_media_and_crop",
"(",
"curr_page",
")",
"if",
"args",
".",
"verbose",
"and",
"not",
"quiet",
":",
"# want to display page num numbering from 1, so add one",
"print",
"(",
"\"\\t\"",
"+",
"str",
"(",
"page_num",
"+",
"1",
")",
",",
"\" rot =\"",
",",
"curr_page",
".",
"rotationAngle",
",",
"\"\\t\"",
",",
"full_page_box",
")",
"# Convert the RectangleObject to floats in an ordinary list and append.",
"ordinary_box",
"=",
"[",
"float",
"(",
"b",
")",
"for",
"b",
"in",
"full_page_box",
"]",
"full_page_box_list",
".",
"append",
"(",
"ordinary_box",
")",
"# Append the rotation value to the rotation_list.",
"rotation_list",
".",
"append",
"(",
"curr_page",
".",
"rotationAngle",
")",
"return",
"full_page_box_list",
",",
"rotation_list"
] |
Get a list of all the full-page box values for each page. The argument
input_doc should be a PdfFileReader object. The boxes on the list are in the
simple 4-float list format used by this program, not RectangleObject format.
|
[
"Get",
"a",
"list",
"of",
"all",
"the",
"full",
"-",
"page",
"box",
"values",
"for",
"each",
"page",
".",
"The",
"argument",
"input_doc",
"should",
"be",
"a",
"PdfFileReader",
"object",
".",
"The",
"boxes",
"on",
"the",
"list",
"are",
"in",
"the",
"simple",
"4",
"-",
"float",
"list",
"format",
"used",
"by",
"this",
"program",
"not",
"RectangleObject",
"format",
"."
] |
55aca874613750ebf4ae69fd8851bdbb7696d6ac
|
https://github.com/abarker/pdfCropMargins/blob/55aca874613750ebf4ae69fd8851bdbb7696d6ac/src/pdfCropMargins/main_pdfCropMargins.py#L207-L236
|
13,819
|
abarker/pdfCropMargins
|
src/pdfCropMargins/main_pdfCropMargins.py
|
set_cropped_metadata
|
def set_cropped_metadata(input_doc, output_doc, metadata_info):
"""Set the metadata for the output document. Mostly just copied over, but
"Producer" has a string appended to indicate that this program modified the
file. That allows for the undo operation to make sure that this
program cropped the file in the first place."""
# Setting metadata with pyPdf requires low-level pyPdf operations, see
# http://stackoverflow.com/questions/2574676/change-metadata-of-pdf-file-with-pypdf
if not metadata_info:
# In case it's null, just set values to empty strings. This class just holds
# data temporary in the same format; this is not sent into PyPDF2.
class MetadataInfo(object):
author = ""
creator = ""
producer = ""
subject = ""
title = ""
metadata_info = MetadataInfo()
output_info_dict = output_doc._info.getObject()
# Check Producer metadata attribute to see if this program cropped document before.
producer_mod = PRODUCER_MODIFIER
already_cropped_by_this_program = False
old_producer_string = metadata_info.producer
if old_producer_string and old_producer_string.endswith(producer_mod):
if args.verbose:
print("\nThe document was already cropped at least once by this program.")
already_cropped_by_this_program = True
producer_mod = "" # No need to pile up suffixes each time on Producer.
# Note that all None metadata attributes are currently set to the empty string
# when passing along the metadata information.
def st(item):
if item is None: return ""
else: return item
output_info_dict.update({
NameObject("/Author"): createStringObject(st(metadata_info.author)),
NameObject("/Creator"): createStringObject(st(metadata_info.creator)),
NameObject("/Producer"): createStringObject(st(metadata_info.producer)
+ producer_mod),
NameObject("/Subject"): createStringObject(st(metadata_info.subject)),
NameObject("/Title"): createStringObject(st(metadata_info.title))
})
return already_cropped_by_this_program
|
python
|
def set_cropped_metadata(input_doc, output_doc, metadata_info):
"""Set the metadata for the output document. Mostly just copied over, but
"Producer" has a string appended to indicate that this program modified the
file. That allows for the undo operation to make sure that this
program cropped the file in the first place."""
# Setting metadata with pyPdf requires low-level pyPdf operations, see
# http://stackoverflow.com/questions/2574676/change-metadata-of-pdf-file-with-pypdf
if not metadata_info:
# In case it's null, just set values to empty strings. This class just holds
# data temporary in the same format; this is not sent into PyPDF2.
class MetadataInfo(object):
author = ""
creator = ""
producer = ""
subject = ""
title = ""
metadata_info = MetadataInfo()
output_info_dict = output_doc._info.getObject()
# Check Producer metadata attribute to see if this program cropped document before.
producer_mod = PRODUCER_MODIFIER
already_cropped_by_this_program = False
old_producer_string = metadata_info.producer
if old_producer_string and old_producer_string.endswith(producer_mod):
if args.verbose:
print("\nThe document was already cropped at least once by this program.")
already_cropped_by_this_program = True
producer_mod = "" # No need to pile up suffixes each time on Producer.
# Note that all None metadata attributes are currently set to the empty string
# when passing along the metadata information.
def st(item):
if item is None: return ""
else: return item
output_info_dict.update({
NameObject("/Author"): createStringObject(st(metadata_info.author)),
NameObject("/Creator"): createStringObject(st(metadata_info.creator)),
NameObject("/Producer"): createStringObject(st(metadata_info.producer)
+ producer_mod),
NameObject("/Subject"): createStringObject(st(metadata_info.subject)),
NameObject("/Title"): createStringObject(st(metadata_info.title))
})
return already_cropped_by_this_program
|
[
"def",
"set_cropped_metadata",
"(",
"input_doc",
",",
"output_doc",
",",
"metadata_info",
")",
":",
"# Setting metadata with pyPdf requires low-level pyPdf operations, see",
"# http://stackoverflow.com/questions/2574676/change-metadata-of-pdf-file-with-pypdf",
"if",
"not",
"metadata_info",
":",
"# In case it's null, just set values to empty strings. This class just holds",
"# data temporary in the same format; this is not sent into PyPDF2.",
"class",
"MetadataInfo",
"(",
"object",
")",
":",
"author",
"=",
"\"\"",
"creator",
"=",
"\"\"",
"producer",
"=",
"\"\"",
"subject",
"=",
"\"\"",
"title",
"=",
"\"\"",
"metadata_info",
"=",
"MetadataInfo",
"(",
")",
"output_info_dict",
"=",
"output_doc",
".",
"_info",
".",
"getObject",
"(",
")",
"# Check Producer metadata attribute to see if this program cropped document before.",
"producer_mod",
"=",
"PRODUCER_MODIFIER",
"already_cropped_by_this_program",
"=",
"False",
"old_producer_string",
"=",
"metadata_info",
".",
"producer",
"if",
"old_producer_string",
"and",
"old_producer_string",
".",
"endswith",
"(",
"producer_mod",
")",
":",
"if",
"args",
".",
"verbose",
":",
"print",
"(",
"\"\\nThe document was already cropped at least once by this program.\"",
")",
"already_cropped_by_this_program",
"=",
"True",
"producer_mod",
"=",
"\"\"",
"# No need to pile up suffixes each time on Producer.",
"# Note that all None metadata attributes are currently set to the empty string",
"# when passing along the metadata information.",
"def",
"st",
"(",
"item",
")",
":",
"if",
"item",
"is",
"None",
":",
"return",
"\"\"",
"else",
":",
"return",
"item",
"output_info_dict",
".",
"update",
"(",
"{",
"NameObject",
"(",
"\"/Author\"",
")",
":",
"createStringObject",
"(",
"st",
"(",
"metadata_info",
".",
"author",
")",
")",
",",
"NameObject",
"(",
"\"/Creator\"",
")",
":",
"createStringObject",
"(",
"st",
"(",
"metadata_info",
".",
"creator",
")",
")",
",",
"NameObject",
"(",
"\"/Producer\"",
")",
":",
"createStringObject",
"(",
"st",
"(",
"metadata_info",
".",
"producer",
")",
"+",
"producer_mod",
")",
",",
"NameObject",
"(",
"\"/Subject\"",
")",
":",
"createStringObject",
"(",
"st",
"(",
"metadata_info",
".",
"subject",
")",
")",
",",
"NameObject",
"(",
"\"/Title\"",
")",
":",
"createStringObject",
"(",
"st",
"(",
"metadata_info",
".",
"title",
")",
")",
"}",
")",
"return",
"already_cropped_by_this_program"
] |
Set the metadata for the output document. Mostly just copied over, but
"Producer" has a string appended to indicate that this program modified the
file. That allows for the undo operation to make sure that this
program cropped the file in the first place.
|
[
"Set",
"the",
"metadata",
"for",
"the",
"output",
"document",
".",
"Mostly",
"just",
"copied",
"over",
"but",
"Producer",
"has",
"a",
"string",
"appended",
"to",
"indicate",
"that",
"this",
"program",
"modified",
"the",
"file",
".",
"That",
"allows",
"for",
"the",
"undo",
"operation",
"to",
"make",
"sure",
"that",
"this",
"program",
"cropped",
"the",
"file",
"in",
"the",
"first",
"place",
"."
] |
55aca874613750ebf4ae69fd8851bdbb7696d6ac
|
https://github.com/abarker/pdfCropMargins/blob/55aca874613750ebf4ae69fd8851bdbb7696d6ac/src/pdfCropMargins/main_pdfCropMargins.py#L448-L494
|
13,820
|
abarker/pdfCropMargins
|
src/pdfCropMargins/main_pdfCropMargins.py
|
apply_crop_list
|
def apply_crop_list(crop_list, input_doc, page_nums_to_crop,
already_cropped_by_this_program):
"""Apply the crop list to the pages of the input PdfFileReader object."""
if args.restore and not already_cropped_by_this_program:
print("\nWarning from pdfCropMargins: The Producer string indicates that"
"\neither this document was not previously cropped by pdfCropMargins"
"\nor else it was modified by another program after that. Trying the"
"\nundo anyway...", file=sys.stderr)
if args.restore and args.verbose:
print("\nRestoring the document to margins saved for each page in the ArtBox.")
if args.verbose and not args.restore:
print("\nNew full page sizes after cropping, in PDF format (lbrt):")
# Copy over each page, after modifying the appropriate PDF boxes.
for page_num in range(input_doc.getNumPages()):
curr_page = input_doc.getPage(page_num)
# Restore any rotation which was originally on the page.
curr_page.rotateClockwise(curr_page.rotationAngle)
# Only do the restore from ArtBox if '--restore' option was selected.
if args.restore:
if not curr_page.artBox:
print("\nWarning from pdfCropMargins: Attempting to restore pages from"
"\nthe ArtBox in each page, but page", page_num, "has no readable"
"\nArtBox. Leaving that page unchanged.", file=sys.stderr)
continue
curr_page.mediaBox = curr_page.artBox
curr_page.cropBox = curr_page.artBox
continue
# Do the save to ArtBox if that option is chosen and Producer is set.
if not args.noundosave and not already_cropped_by_this_program:
curr_page.artBox = intersect_boxes(curr_page.mediaBox, curr_page.cropBox)
# Reset the CropBox and MediaBox to their saved original values
# (which were set in getFullPageBox, in the curr_page object's namespace).
curr_page.mediaBox = curr_page.originalMediaBox
curr_page.cropBox = curr_page.originalCropBox
# Copy the original page without further mods if it wasn't in the range
# selected for cropping.
if page_num not in page_nums_to_crop:
continue
# Convert the computed "box to crop to" into a RectangleObject (for pyPdf).
new_cropped_box = RectangleObject(crop_list[page_num])
if args.verbose:
print("\t"+str(page_num+1)+"\t", new_cropped_box) # page numbering from 1
if not args.boxesToSet:
args.boxesToSet = ["m", "c"]
# Now set any boxes which were selected to be set via the --boxesToSet option.
if "m" in args.boxesToSet: curr_page.mediaBox = new_cropped_box
if "c" in args.boxesToSet: curr_page.cropBox = new_cropped_box
if "t" in args.boxesToSet: curr_page.trimBox = new_cropped_box
if "a" in args.boxesToSet: curr_page.artBox = new_cropped_box
if "b" in args.boxesToSet: curr_page.bleedBox = new_cropped_box
return
|
python
|
def apply_crop_list(crop_list, input_doc, page_nums_to_crop,
already_cropped_by_this_program):
"""Apply the crop list to the pages of the input PdfFileReader object."""
if args.restore and not already_cropped_by_this_program:
print("\nWarning from pdfCropMargins: The Producer string indicates that"
"\neither this document was not previously cropped by pdfCropMargins"
"\nor else it was modified by another program after that. Trying the"
"\nundo anyway...", file=sys.stderr)
if args.restore and args.verbose:
print("\nRestoring the document to margins saved for each page in the ArtBox.")
if args.verbose and not args.restore:
print("\nNew full page sizes after cropping, in PDF format (lbrt):")
# Copy over each page, after modifying the appropriate PDF boxes.
for page_num in range(input_doc.getNumPages()):
curr_page = input_doc.getPage(page_num)
# Restore any rotation which was originally on the page.
curr_page.rotateClockwise(curr_page.rotationAngle)
# Only do the restore from ArtBox if '--restore' option was selected.
if args.restore:
if not curr_page.artBox:
print("\nWarning from pdfCropMargins: Attempting to restore pages from"
"\nthe ArtBox in each page, but page", page_num, "has no readable"
"\nArtBox. Leaving that page unchanged.", file=sys.stderr)
continue
curr_page.mediaBox = curr_page.artBox
curr_page.cropBox = curr_page.artBox
continue
# Do the save to ArtBox if that option is chosen and Producer is set.
if not args.noundosave and not already_cropped_by_this_program:
curr_page.artBox = intersect_boxes(curr_page.mediaBox, curr_page.cropBox)
# Reset the CropBox and MediaBox to their saved original values
# (which were set in getFullPageBox, in the curr_page object's namespace).
curr_page.mediaBox = curr_page.originalMediaBox
curr_page.cropBox = curr_page.originalCropBox
# Copy the original page without further mods if it wasn't in the range
# selected for cropping.
if page_num not in page_nums_to_crop:
continue
# Convert the computed "box to crop to" into a RectangleObject (for pyPdf).
new_cropped_box = RectangleObject(crop_list[page_num])
if args.verbose:
print("\t"+str(page_num+1)+"\t", new_cropped_box) # page numbering from 1
if not args.boxesToSet:
args.boxesToSet = ["m", "c"]
# Now set any boxes which were selected to be set via the --boxesToSet option.
if "m" in args.boxesToSet: curr_page.mediaBox = new_cropped_box
if "c" in args.boxesToSet: curr_page.cropBox = new_cropped_box
if "t" in args.boxesToSet: curr_page.trimBox = new_cropped_box
if "a" in args.boxesToSet: curr_page.artBox = new_cropped_box
if "b" in args.boxesToSet: curr_page.bleedBox = new_cropped_box
return
|
[
"def",
"apply_crop_list",
"(",
"crop_list",
",",
"input_doc",
",",
"page_nums_to_crop",
",",
"already_cropped_by_this_program",
")",
":",
"if",
"args",
".",
"restore",
"and",
"not",
"already_cropped_by_this_program",
":",
"print",
"(",
"\"\\nWarning from pdfCropMargins: The Producer string indicates that\"",
"\"\\neither this document was not previously cropped by pdfCropMargins\"",
"\"\\nor else it was modified by another program after that. Trying the\"",
"\"\\nundo anyway...\"",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"if",
"args",
".",
"restore",
"and",
"args",
".",
"verbose",
":",
"print",
"(",
"\"\\nRestoring the document to margins saved for each page in the ArtBox.\"",
")",
"if",
"args",
".",
"verbose",
"and",
"not",
"args",
".",
"restore",
":",
"print",
"(",
"\"\\nNew full page sizes after cropping, in PDF format (lbrt):\"",
")",
"# Copy over each page, after modifying the appropriate PDF boxes.",
"for",
"page_num",
"in",
"range",
"(",
"input_doc",
".",
"getNumPages",
"(",
")",
")",
":",
"curr_page",
"=",
"input_doc",
".",
"getPage",
"(",
"page_num",
")",
"# Restore any rotation which was originally on the page.",
"curr_page",
".",
"rotateClockwise",
"(",
"curr_page",
".",
"rotationAngle",
")",
"# Only do the restore from ArtBox if '--restore' option was selected.",
"if",
"args",
".",
"restore",
":",
"if",
"not",
"curr_page",
".",
"artBox",
":",
"print",
"(",
"\"\\nWarning from pdfCropMargins: Attempting to restore pages from\"",
"\"\\nthe ArtBox in each page, but page\"",
",",
"page_num",
",",
"\"has no readable\"",
"\"\\nArtBox. Leaving that page unchanged.\"",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"continue",
"curr_page",
".",
"mediaBox",
"=",
"curr_page",
".",
"artBox",
"curr_page",
".",
"cropBox",
"=",
"curr_page",
".",
"artBox",
"continue",
"# Do the save to ArtBox if that option is chosen and Producer is set.",
"if",
"not",
"args",
".",
"noundosave",
"and",
"not",
"already_cropped_by_this_program",
":",
"curr_page",
".",
"artBox",
"=",
"intersect_boxes",
"(",
"curr_page",
".",
"mediaBox",
",",
"curr_page",
".",
"cropBox",
")",
"# Reset the CropBox and MediaBox to their saved original values",
"# (which were set in getFullPageBox, in the curr_page object's namespace).",
"curr_page",
".",
"mediaBox",
"=",
"curr_page",
".",
"originalMediaBox",
"curr_page",
".",
"cropBox",
"=",
"curr_page",
".",
"originalCropBox",
"# Copy the original page without further mods if it wasn't in the range",
"# selected for cropping.",
"if",
"page_num",
"not",
"in",
"page_nums_to_crop",
":",
"continue",
"# Convert the computed \"box to crop to\" into a RectangleObject (for pyPdf).",
"new_cropped_box",
"=",
"RectangleObject",
"(",
"crop_list",
"[",
"page_num",
"]",
")",
"if",
"args",
".",
"verbose",
":",
"print",
"(",
"\"\\t\"",
"+",
"str",
"(",
"page_num",
"+",
"1",
")",
"+",
"\"\\t\"",
",",
"new_cropped_box",
")",
"# page numbering from 1",
"if",
"not",
"args",
".",
"boxesToSet",
":",
"args",
".",
"boxesToSet",
"=",
"[",
"\"m\"",
",",
"\"c\"",
"]",
"# Now set any boxes which were selected to be set via the --boxesToSet option.",
"if",
"\"m\"",
"in",
"args",
".",
"boxesToSet",
":",
"curr_page",
".",
"mediaBox",
"=",
"new_cropped_box",
"if",
"\"c\"",
"in",
"args",
".",
"boxesToSet",
":",
"curr_page",
".",
"cropBox",
"=",
"new_cropped_box",
"if",
"\"t\"",
"in",
"args",
".",
"boxesToSet",
":",
"curr_page",
".",
"trimBox",
"=",
"new_cropped_box",
"if",
"\"a\"",
"in",
"args",
".",
"boxesToSet",
":",
"curr_page",
".",
"artBox",
"=",
"new_cropped_box",
"if",
"\"b\"",
"in",
"args",
".",
"boxesToSet",
":",
"curr_page",
".",
"bleedBox",
"=",
"new_cropped_box",
"return"
] |
Apply the crop list to the pages of the input PdfFileReader object.
|
[
"Apply",
"the",
"crop",
"list",
"to",
"the",
"pages",
"of",
"the",
"input",
"PdfFileReader",
"object",
"."
] |
55aca874613750ebf4ae69fd8851bdbb7696d6ac
|
https://github.com/abarker/pdfCropMargins/blob/55aca874613750ebf4ae69fd8851bdbb7696d6ac/src/pdfCropMargins/main_pdfCropMargins.py#L497-L562
|
13,821
|
abarker/pdfCropMargins
|
src/pdfCropMargins/main_pdfCropMargins.py
|
setup_output_document
|
def setup_output_document(input_doc, tmp_input_doc, metadata_info,
copy_document_catalog=True):
"""Create the output `PdfFileWriter` objects and copy over the relevant info."""
# NOTE: Inserting pages from a PdfFileReader into multiple PdfFileWriters
# seems to cause problems (writer can hang on write), so only one is used.
# This is why the tmp_input_doc file was created earlier, to get copies of
# the page objects which are independent of those in input_doc. An ugly
# hack for a nasty bug to track down.
# NOTE: You can get the _root_object attribute (dict for the document
# catalog) from the output document after calling cloneReaderDocumentRoot
# or else you can just directly get it from the input_doc.trailer dict, as
# below (which is from the code for cloneReaderDocumentRoot), but you
# CANNOT set the full _root_object to be the _root_object attribute for the
# actual output_doc or else only blank pages show up in acroread (whether
# or not there is any attempt to explicitly copy the pages over). The same
# is true for using cloneDocumentFromReader (which just calls
# cloneReaderDocumentRoot followed by appendPagesFromReader). At least the
# '/Pages' key and value in _root_object cause problems, so they are
# skipped in the partial copy. Probably a bug in PyPDF2. See the original
# code for the routines on the github pages below.
#
# https://github.com/mstamy2/PyPDF2/blob/master/PyPDF2/pdf.py
# https://github.com/mstamy2/PyPDF2/blob/master/PyPDF2/generic.py
#
# Files still can change zoom mode on clicking outline links, but that is
# an Adobe implementation problem, and happens even in the uncropped files:
# https://superuser.com/questions/278302/
output_doc = PdfFileWriter()
def root_objects_not_indirect(input_doc, root_object):
"""This can expand some of the `IndirectObject` objects in a root object to
see the actual values. Currently only used for debugging. May mess up the
input doc and require a temporary one."""
if isinstance(root_object, dict):
return {root_objects_not_indirect(input_doc, key):
root_objects_not_indirect(input_doc, value) for
key, value in root_object.items()}
elif isinstance(root_object, list):
return [root_objects_not_indirect(input_doc, item) for item in root_object]
elif isinstance(root_object, IndirectObject):
return input_doc.getObject(root_object)
else:
return root_object
doc_cat_whitelist = args.docCatWhitelist.split()
if "ALL" in doc_cat_whitelist:
doc_cat_whitelist = ["ALL"]
doc_cat_blacklist = args.docCatBlacklist.split()
if "ALL" in doc_cat_blacklist:
doc_cat_blacklist = ["ALL"]
# Partially copy over document catalog data from input_doc to output_doc.
if not copy_document_catalog or (
not doc_cat_whitelist and doc_cat_blacklist == ["ALL"]):
# Check this first, to completely skip the possibly problematic code getting
# document catalog items when possible. Does not print a skipped list, though.
if args.verbose:
print("\nNot copying any document catalog items to the cropped document.")
else:
try:
root_object = input_doc.trailer["/Root"]
copied_items = []
skipped_items = []
for key, value in root_object.items():
# Some possible keys can be:
#
# /Type -- required, must have value /Catalog
# /Pages -- required, indirect ref to page tree; skip, will change
# /PageMode -- set to /UseNone, /UseOutlines, /UseThumbs, /Fullscreen,
# /UseOC, or /UseAttachments, with /UseNone default.
# /OpenAction -- action to take when document is opened, like zooming
# /PageLayout -- set to /SinglePage, /OneColumn, /TwoColumnLeft,
# /TwoColumnRight, /TwoPageLeft, /TwoPageRight
# /Names -- a name dictionary to avoid having to use object numbers
# /Outlines -- indirect ref to document outline, i.e., bookmarks
# /Dests -- a dict of destinations in the PDF
# /ViewerPreferences -- a viewer preferences dict
# /MetaData -- XMP metadata, as opposed to other metadata
# /PageLabels -- alternate numbering for pages, only affect PDF viewers
if key == "/Pages":
skipped_items.append(key)
continue
if doc_cat_whitelist != ["ALL"] and key not in doc_cat_whitelist:
if doc_cat_blacklist == ["ALL"] or key in doc_cat_blacklist:
skipped_items.append(key)
continue
copied_items.append(key)
output_doc._root_object[NameObject(key)] = value
if args.verbose:
print("\nCopied these items from the document catalog:\n ", end="")
print(*copied_items)
print("Skipped copy of these items from the document catalog:\n ", end="")
print(*skipped_items)
except (KeyboardInterrupt, EOFError):
raise
except: # Just catch any errors here; don't know which might be raised.
# On exception just warn and get a new PdfFileWriter object, to be safe.
print("\nWarning: The document catalog data could not be copied to the"
"\nnew, cropped document. Try fixing the PDF document using"
"\n'--gsFix' if you have Ghostscript installed.", file=sys.stderr)
output_doc = PdfFileWriter()
#output_doc.appendPagesFromReader(input_doc) # Works, but wait and test more.
for page in [input_doc.getPage(i) for i in range(input_doc.getNumPages())]:
output_doc.addPage(page)
tmp_output_doc = PdfFileWriter()
#tmp_output_doc.appendPagesFromReader(tmp_input_doc) # Works, but test more.
for page in [tmp_input_doc.getPage(i) for i in range(tmp_input_doc.getNumPages())]:
tmp_output_doc.addPage(page)
##
## Copy the metadata from input_doc to output_doc, modifying the Producer string
## if this program didn't already set it. Get bool for whether this program
## cropped the document already.
##
already_cropped_by_this_program = set_cropped_metadata(input_doc, output_doc,
metadata_info)
return output_doc, tmp_output_doc, already_cropped_by_this_program
|
python
|
def setup_output_document(input_doc, tmp_input_doc, metadata_info,
copy_document_catalog=True):
"""Create the output `PdfFileWriter` objects and copy over the relevant info."""
# NOTE: Inserting pages from a PdfFileReader into multiple PdfFileWriters
# seems to cause problems (writer can hang on write), so only one is used.
# This is why the tmp_input_doc file was created earlier, to get copies of
# the page objects which are independent of those in input_doc. An ugly
# hack for a nasty bug to track down.
# NOTE: You can get the _root_object attribute (dict for the document
# catalog) from the output document after calling cloneReaderDocumentRoot
# or else you can just directly get it from the input_doc.trailer dict, as
# below (which is from the code for cloneReaderDocumentRoot), but you
# CANNOT set the full _root_object to be the _root_object attribute for the
# actual output_doc or else only blank pages show up in acroread (whether
# or not there is any attempt to explicitly copy the pages over). The same
# is true for using cloneDocumentFromReader (which just calls
# cloneReaderDocumentRoot followed by appendPagesFromReader). At least the
# '/Pages' key and value in _root_object cause problems, so they are
# skipped in the partial copy. Probably a bug in PyPDF2. See the original
# code for the routines on the github pages below.
#
# https://github.com/mstamy2/PyPDF2/blob/master/PyPDF2/pdf.py
# https://github.com/mstamy2/PyPDF2/blob/master/PyPDF2/generic.py
#
# Files still can change zoom mode on clicking outline links, but that is
# an Adobe implementation problem, and happens even in the uncropped files:
# https://superuser.com/questions/278302/
output_doc = PdfFileWriter()
def root_objects_not_indirect(input_doc, root_object):
"""This can expand some of the `IndirectObject` objects in a root object to
see the actual values. Currently only used for debugging. May mess up the
input doc and require a temporary one."""
if isinstance(root_object, dict):
return {root_objects_not_indirect(input_doc, key):
root_objects_not_indirect(input_doc, value) for
key, value in root_object.items()}
elif isinstance(root_object, list):
return [root_objects_not_indirect(input_doc, item) for item in root_object]
elif isinstance(root_object, IndirectObject):
return input_doc.getObject(root_object)
else:
return root_object
doc_cat_whitelist = args.docCatWhitelist.split()
if "ALL" in doc_cat_whitelist:
doc_cat_whitelist = ["ALL"]
doc_cat_blacklist = args.docCatBlacklist.split()
if "ALL" in doc_cat_blacklist:
doc_cat_blacklist = ["ALL"]
# Partially copy over document catalog data from input_doc to output_doc.
if not copy_document_catalog or (
not doc_cat_whitelist and doc_cat_blacklist == ["ALL"]):
# Check this first, to completely skip the possibly problematic code getting
# document catalog items when possible. Does not print a skipped list, though.
if args.verbose:
print("\nNot copying any document catalog items to the cropped document.")
else:
try:
root_object = input_doc.trailer["/Root"]
copied_items = []
skipped_items = []
for key, value in root_object.items():
# Some possible keys can be:
#
# /Type -- required, must have value /Catalog
# /Pages -- required, indirect ref to page tree; skip, will change
# /PageMode -- set to /UseNone, /UseOutlines, /UseThumbs, /Fullscreen,
# /UseOC, or /UseAttachments, with /UseNone default.
# /OpenAction -- action to take when document is opened, like zooming
# /PageLayout -- set to /SinglePage, /OneColumn, /TwoColumnLeft,
# /TwoColumnRight, /TwoPageLeft, /TwoPageRight
# /Names -- a name dictionary to avoid having to use object numbers
# /Outlines -- indirect ref to document outline, i.e., bookmarks
# /Dests -- a dict of destinations in the PDF
# /ViewerPreferences -- a viewer preferences dict
# /MetaData -- XMP metadata, as opposed to other metadata
# /PageLabels -- alternate numbering for pages, only affect PDF viewers
if key == "/Pages":
skipped_items.append(key)
continue
if doc_cat_whitelist != ["ALL"] and key not in doc_cat_whitelist:
if doc_cat_blacklist == ["ALL"] or key in doc_cat_blacklist:
skipped_items.append(key)
continue
copied_items.append(key)
output_doc._root_object[NameObject(key)] = value
if args.verbose:
print("\nCopied these items from the document catalog:\n ", end="")
print(*copied_items)
print("Skipped copy of these items from the document catalog:\n ", end="")
print(*skipped_items)
except (KeyboardInterrupt, EOFError):
raise
except: # Just catch any errors here; don't know which might be raised.
# On exception just warn and get a new PdfFileWriter object, to be safe.
print("\nWarning: The document catalog data could not be copied to the"
"\nnew, cropped document. Try fixing the PDF document using"
"\n'--gsFix' if you have Ghostscript installed.", file=sys.stderr)
output_doc = PdfFileWriter()
#output_doc.appendPagesFromReader(input_doc) # Works, but wait and test more.
for page in [input_doc.getPage(i) for i in range(input_doc.getNumPages())]:
output_doc.addPage(page)
tmp_output_doc = PdfFileWriter()
#tmp_output_doc.appendPagesFromReader(tmp_input_doc) # Works, but test more.
for page in [tmp_input_doc.getPage(i) for i in range(tmp_input_doc.getNumPages())]:
tmp_output_doc.addPage(page)
##
## Copy the metadata from input_doc to output_doc, modifying the Producer string
## if this program didn't already set it. Get bool for whether this program
## cropped the document already.
##
already_cropped_by_this_program = set_cropped_metadata(input_doc, output_doc,
metadata_info)
return output_doc, tmp_output_doc, already_cropped_by_this_program
|
[
"def",
"setup_output_document",
"(",
"input_doc",
",",
"tmp_input_doc",
",",
"metadata_info",
",",
"copy_document_catalog",
"=",
"True",
")",
":",
"# NOTE: Inserting pages from a PdfFileReader into multiple PdfFileWriters",
"# seems to cause problems (writer can hang on write), so only one is used.",
"# This is why the tmp_input_doc file was created earlier, to get copies of",
"# the page objects which are independent of those in input_doc. An ugly",
"# hack for a nasty bug to track down.",
"# NOTE: You can get the _root_object attribute (dict for the document",
"# catalog) from the output document after calling cloneReaderDocumentRoot",
"# or else you can just directly get it from the input_doc.trailer dict, as",
"# below (which is from the code for cloneReaderDocumentRoot), but you",
"# CANNOT set the full _root_object to be the _root_object attribute for the",
"# actual output_doc or else only blank pages show up in acroread (whether",
"# or not there is any attempt to explicitly copy the pages over). The same",
"# is true for using cloneDocumentFromReader (which just calls",
"# cloneReaderDocumentRoot followed by appendPagesFromReader). At least the",
"# '/Pages' key and value in _root_object cause problems, so they are",
"# skipped in the partial copy. Probably a bug in PyPDF2. See the original",
"# code for the routines on the github pages below.",
"#",
"# https://github.com/mstamy2/PyPDF2/blob/master/PyPDF2/pdf.py",
"# https://github.com/mstamy2/PyPDF2/blob/master/PyPDF2/generic.py",
"#",
"# Files still can change zoom mode on clicking outline links, but that is",
"# an Adobe implementation problem, and happens even in the uncropped files:",
"# https://superuser.com/questions/278302/",
"output_doc",
"=",
"PdfFileWriter",
"(",
")",
"def",
"root_objects_not_indirect",
"(",
"input_doc",
",",
"root_object",
")",
":",
"\"\"\"This can expand some of the `IndirectObject` objects in a root object to\n see the actual values. Currently only used for debugging. May mess up the\n input doc and require a temporary one.\"\"\"",
"if",
"isinstance",
"(",
"root_object",
",",
"dict",
")",
":",
"return",
"{",
"root_objects_not_indirect",
"(",
"input_doc",
",",
"key",
")",
":",
"root_objects_not_indirect",
"(",
"input_doc",
",",
"value",
")",
"for",
"key",
",",
"value",
"in",
"root_object",
".",
"items",
"(",
")",
"}",
"elif",
"isinstance",
"(",
"root_object",
",",
"list",
")",
":",
"return",
"[",
"root_objects_not_indirect",
"(",
"input_doc",
",",
"item",
")",
"for",
"item",
"in",
"root_object",
"]",
"elif",
"isinstance",
"(",
"root_object",
",",
"IndirectObject",
")",
":",
"return",
"input_doc",
".",
"getObject",
"(",
"root_object",
")",
"else",
":",
"return",
"root_object",
"doc_cat_whitelist",
"=",
"args",
".",
"docCatWhitelist",
".",
"split",
"(",
")",
"if",
"\"ALL\"",
"in",
"doc_cat_whitelist",
":",
"doc_cat_whitelist",
"=",
"[",
"\"ALL\"",
"]",
"doc_cat_blacklist",
"=",
"args",
".",
"docCatBlacklist",
".",
"split",
"(",
")",
"if",
"\"ALL\"",
"in",
"doc_cat_blacklist",
":",
"doc_cat_blacklist",
"=",
"[",
"\"ALL\"",
"]",
"# Partially copy over document catalog data from input_doc to output_doc.",
"if",
"not",
"copy_document_catalog",
"or",
"(",
"not",
"doc_cat_whitelist",
"and",
"doc_cat_blacklist",
"==",
"[",
"\"ALL\"",
"]",
")",
":",
"# Check this first, to completely skip the possibly problematic code getting",
"# document catalog items when possible. Does not print a skipped list, though.",
"if",
"args",
".",
"verbose",
":",
"print",
"(",
"\"\\nNot copying any document catalog items to the cropped document.\"",
")",
"else",
":",
"try",
":",
"root_object",
"=",
"input_doc",
".",
"trailer",
"[",
"\"/Root\"",
"]",
"copied_items",
"=",
"[",
"]",
"skipped_items",
"=",
"[",
"]",
"for",
"key",
",",
"value",
"in",
"root_object",
".",
"items",
"(",
")",
":",
"# Some possible keys can be:",
"#",
"# /Type -- required, must have value /Catalog",
"# /Pages -- required, indirect ref to page tree; skip, will change",
"# /PageMode -- set to /UseNone, /UseOutlines, /UseThumbs, /Fullscreen,",
"# /UseOC, or /UseAttachments, with /UseNone default.",
"# /OpenAction -- action to take when document is opened, like zooming",
"# /PageLayout -- set to /SinglePage, /OneColumn, /TwoColumnLeft,",
"# /TwoColumnRight, /TwoPageLeft, /TwoPageRight",
"# /Names -- a name dictionary to avoid having to use object numbers",
"# /Outlines -- indirect ref to document outline, i.e., bookmarks",
"# /Dests -- a dict of destinations in the PDF",
"# /ViewerPreferences -- a viewer preferences dict",
"# /MetaData -- XMP metadata, as opposed to other metadata",
"# /PageLabels -- alternate numbering for pages, only affect PDF viewers",
"if",
"key",
"==",
"\"/Pages\"",
":",
"skipped_items",
".",
"append",
"(",
"key",
")",
"continue",
"if",
"doc_cat_whitelist",
"!=",
"[",
"\"ALL\"",
"]",
"and",
"key",
"not",
"in",
"doc_cat_whitelist",
":",
"if",
"doc_cat_blacklist",
"==",
"[",
"\"ALL\"",
"]",
"or",
"key",
"in",
"doc_cat_blacklist",
":",
"skipped_items",
".",
"append",
"(",
"key",
")",
"continue",
"copied_items",
".",
"append",
"(",
"key",
")",
"output_doc",
".",
"_root_object",
"[",
"NameObject",
"(",
"key",
")",
"]",
"=",
"value",
"if",
"args",
".",
"verbose",
":",
"print",
"(",
"\"\\nCopied these items from the document catalog:\\n \"",
",",
"end",
"=",
"\"\"",
")",
"print",
"(",
"*",
"copied_items",
")",
"print",
"(",
"\"Skipped copy of these items from the document catalog:\\n \"",
",",
"end",
"=",
"\"\"",
")",
"print",
"(",
"*",
"skipped_items",
")",
"except",
"(",
"KeyboardInterrupt",
",",
"EOFError",
")",
":",
"raise",
"except",
":",
"# Just catch any errors here; don't know which might be raised.",
"# On exception just warn and get a new PdfFileWriter object, to be safe.",
"print",
"(",
"\"\\nWarning: The document catalog data could not be copied to the\"",
"\"\\nnew, cropped document. Try fixing the PDF document using\"",
"\"\\n'--gsFix' if you have Ghostscript installed.\"",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"output_doc",
"=",
"PdfFileWriter",
"(",
")",
"#output_doc.appendPagesFromReader(input_doc) # Works, but wait and test more.",
"for",
"page",
"in",
"[",
"input_doc",
".",
"getPage",
"(",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"input_doc",
".",
"getNumPages",
"(",
")",
")",
"]",
":",
"output_doc",
".",
"addPage",
"(",
"page",
")",
"tmp_output_doc",
"=",
"PdfFileWriter",
"(",
")",
"#tmp_output_doc.appendPagesFromReader(tmp_input_doc) # Works, but test more.",
"for",
"page",
"in",
"[",
"tmp_input_doc",
".",
"getPage",
"(",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"tmp_input_doc",
".",
"getNumPages",
"(",
")",
")",
"]",
":",
"tmp_output_doc",
".",
"addPage",
"(",
"page",
")",
"##",
"## Copy the metadata from input_doc to output_doc, modifying the Producer string",
"## if this program didn't already set it. Get bool for whether this program",
"## cropped the document already.",
"##",
"already_cropped_by_this_program",
"=",
"set_cropped_metadata",
"(",
"input_doc",
",",
"output_doc",
",",
"metadata_info",
")",
"return",
"output_doc",
",",
"tmp_output_doc",
",",
"already_cropped_by_this_program"
] |
Create the output `PdfFileWriter` objects and copy over the relevant info.
|
[
"Create",
"the",
"output",
"PdfFileWriter",
"objects",
"and",
"copy",
"over",
"the",
"relevant",
"info",
"."
] |
55aca874613750ebf4ae69fd8851bdbb7696d6ac
|
https://github.com/abarker/pdfCropMargins/blob/55aca874613750ebf4ae69fd8851bdbb7696d6ac/src/pdfCropMargins/main_pdfCropMargins.py#L564-L689
|
13,822
|
miracle2k/flask-assets
|
src/flask_assets.py
|
FlaskConfigStorage.setdefault
|
def setdefault(self, key, value):
"""We may not always be connected to an app, but we still need
to provide a way to the base environment to set it's defaults.
"""
try:
super(FlaskConfigStorage, self).setdefault(key, value)
except RuntimeError:
self._defaults.__setitem__(key, value)
|
python
|
def setdefault(self, key, value):
"""We may not always be connected to an app, but we still need
to provide a way to the base environment to set it's defaults.
"""
try:
super(FlaskConfigStorage, self).setdefault(key, value)
except RuntimeError:
self._defaults.__setitem__(key, value)
|
[
"def",
"setdefault",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"try",
":",
"super",
"(",
"FlaskConfigStorage",
",",
"self",
")",
".",
"setdefault",
"(",
"key",
",",
"value",
")",
"except",
"RuntimeError",
":",
"self",
".",
"_defaults",
".",
"__setitem__",
"(",
"key",
",",
"value",
")"
] |
We may not always be connected to an app, but we still need
to provide a way to the base environment to set it's defaults.
|
[
"We",
"may",
"not",
"always",
"be",
"connected",
"to",
"an",
"app",
"but",
"we",
"still",
"need",
"to",
"provide",
"a",
"way",
"to",
"the",
"base",
"environment",
"to",
"set",
"it",
"s",
"defaults",
"."
] |
ea9ff985bc96b79edb12ad4bed69403173f75562
|
https://github.com/miracle2k/flask-assets/blob/ea9ff985bc96b79edb12ad4bed69403173f75562/src/flask_assets.py#L75-L82
|
13,823
|
miracle2k/flask-assets
|
src/flask_assets.py
|
Environment._app
|
def _app(self):
"""The application object to work with; this is either the app
that we have been bound to, or the current application.
"""
if self.app is not None:
return self.app
ctx = _request_ctx_stack.top
if ctx is not None:
return ctx.app
try:
from flask import _app_ctx_stack
app_ctx = _app_ctx_stack.top
if app_ctx is not None:
return app_ctx.app
except ImportError:
pass
raise RuntimeError('assets instance not bound to an application, '+
'and no application in current context')
|
python
|
def _app(self):
"""The application object to work with; this is either the app
that we have been bound to, or the current application.
"""
if self.app is not None:
return self.app
ctx = _request_ctx_stack.top
if ctx is not None:
return ctx.app
try:
from flask import _app_ctx_stack
app_ctx = _app_ctx_stack.top
if app_ctx is not None:
return app_ctx.app
except ImportError:
pass
raise RuntimeError('assets instance not bound to an application, '+
'and no application in current context')
|
[
"def",
"_app",
"(",
"self",
")",
":",
"if",
"self",
".",
"app",
"is",
"not",
"None",
":",
"return",
"self",
".",
"app",
"ctx",
"=",
"_request_ctx_stack",
".",
"top",
"if",
"ctx",
"is",
"not",
"None",
":",
"return",
"ctx",
".",
"app",
"try",
":",
"from",
"flask",
"import",
"_app_ctx_stack",
"app_ctx",
"=",
"_app_ctx_stack",
".",
"top",
"if",
"app_ctx",
"is",
"not",
"None",
":",
"return",
"app_ctx",
".",
"app",
"except",
"ImportError",
":",
"pass",
"raise",
"RuntimeError",
"(",
"'assets instance not bound to an application, '",
"+",
"'and no application in current context'",
")"
] |
The application object to work with; this is either the app
that we have been bound to, or the current application.
|
[
"The",
"application",
"object",
"to",
"work",
"with",
";",
"this",
"is",
"either",
"the",
"app",
"that",
"we",
"have",
"been",
"bound",
"to",
"or",
"the",
"current",
"application",
"."
] |
ea9ff985bc96b79edb12ad4bed69403173f75562
|
https://github.com/miracle2k/flask-assets/blob/ea9ff985bc96b79edb12ad4bed69403173f75562/src/flask_assets.py#L310-L330
|
13,824
|
miracle2k/flask-assets
|
src/flask_assets.py
|
Environment.from_yaml
|
def from_yaml(self, path):
"""Register bundles from a YAML configuration file"""
bundles = YAMLLoader(path).load_bundles()
for name in bundles:
self.register(name, bundles[name])
|
python
|
def from_yaml(self, path):
"""Register bundles from a YAML configuration file"""
bundles = YAMLLoader(path).load_bundles()
for name in bundles:
self.register(name, bundles[name])
|
[
"def",
"from_yaml",
"(",
"self",
",",
"path",
")",
":",
"bundles",
"=",
"YAMLLoader",
"(",
"path",
")",
".",
"load_bundles",
"(",
")",
"for",
"name",
"in",
"bundles",
":",
"self",
".",
"register",
"(",
"name",
",",
"bundles",
"[",
"name",
"]",
")"
] |
Register bundles from a YAML configuration file
|
[
"Register",
"bundles",
"from",
"a",
"YAML",
"configuration",
"file"
] |
ea9ff985bc96b79edb12ad4bed69403173f75562
|
https://github.com/miracle2k/flask-assets/blob/ea9ff985bc96b79edb12ad4bed69403173f75562/src/flask_assets.py#L361-L365
|
13,825
|
miracle2k/flask-assets
|
src/flask_assets.py
|
Environment.from_module
|
def from_module(self, path):
"""Register bundles from a Python module"""
bundles = PythonLoader(path).load_bundles()
for name in bundles:
self.register(name, bundles[name])
|
python
|
def from_module(self, path):
"""Register bundles from a Python module"""
bundles = PythonLoader(path).load_bundles()
for name in bundles:
self.register(name, bundles[name])
|
[
"def",
"from_module",
"(",
"self",
",",
"path",
")",
":",
"bundles",
"=",
"PythonLoader",
"(",
"path",
")",
".",
"load_bundles",
"(",
")",
"for",
"name",
"in",
"bundles",
":",
"self",
".",
"register",
"(",
"name",
",",
"bundles",
"[",
"name",
"]",
")"
] |
Register bundles from a Python module
|
[
"Register",
"bundles",
"from",
"a",
"Python",
"module"
] |
ea9ff985bc96b79edb12ad4bed69403173f75562
|
https://github.com/miracle2k/flask-assets/blob/ea9ff985bc96b79edb12ad4bed69403173f75562/src/flask_assets.py#L367-L371
|
13,826
|
persephone-tools/persephone
|
persephone/__init__.py
|
handle_unhandled_exception
|
def handle_unhandled_exception(exc_type, exc_value, exc_traceback):
"""Handler for unhandled exceptions that will write to the logs"""
if issubclass(exc_type, KeyboardInterrupt):
# call the default excepthook saved at __excepthook__
sys.__excepthook__(exc_type, exc_value, exc_traceback)
return
logger = logging.getLogger(__name__) # type: ignore
logger.critical("Unhandled exception", exc_info=(exc_type, exc_value, exc_traceback))
|
python
|
def handle_unhandled_exception(exc_type, exc_value, exc_traceback):
"""Handler for unhandled exceptions that will write to the logs"""
if issubclass(exc_type, KeyboardInterrupt):
# call the default excepthook saved at __excepthook__
sys.__excepthook__(exc_type, exc_value, exc_traceback)
return
logger = logging.getLogger(__name__) # type: ignore
logger.critical("Unhandled exception", exc_info=(exc_type, exc_value, exc_traceback))
|
[
"def",
"handle_unhandled_exception",
"(",
"exc_type",
",",
"exc_value",
",",
"exc_traceback",
")",
":",
"if",
"issubclass",
"(",
"exc_type",
",",
"KeyboardInterrupt",
")",
":",
"# call the default excepthook saved at __excepthook__",
"sys",
".",
"__excepthook__",
"(",
"exc_type",
",",
"exc_value",
",",
"exc_traceback",
")",
"return",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"# type: ignore",
"logger",
".",
"critical",
"(",
"\"Unhandled exception\"",
",",
"exc_info",
"=",
"(",
"exc_type",
",",
"exc_value",
",",
"exc_traceback",
")",
")"
] |
Handler for unhandled exceptions that will write to the logs
|
[
"Handler",
"for",
"unhandled",
"exceptions",
"that",
"will",
"write",
"to",
"the",
"logs"
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/__init__.py#L6-L13
|
13,827
|
persephone-tools/persephone
|
persephone/utterance.py
|
write_transcriptions
|
def write_transcriptions(utterances: List[Utterance],
tgt_dir: Path, ext: str, lazy: bool) -> None:
""" Write the utterance transcriptions to files in the tgt_dir. Is lazy and
checks if the file already exists.
Args:
utterances: A list of Utterance objects to be written.
tgt_dir: The directory in which to write the text of the utterances,
one file per utterance.
ext: The file extension for the utterances. Typically something like
"phonemes", or "phonemes_and_tones".
"""
tgt_dir.mkdir(parents=True, exist_ok=True)
for utter in utterances:
out_path = tgt_dir / "{}.{}".format(utter.prefix, ext)
if lazy and out_path.is_file():
continue
with out_path.open("w") as f:
print(utter.text, file=f)
|
python
|
def write_transcriptions(utterances: List[Utterance],
tgt_dir: Path, ext: str, lazy: bool) -> None:
""" Write the utterance transcriptions to files in the tgt_dir. Is lazy and
checks if the file already exists.
Args:
utterances: A list of Utterance objects to be written.
tgt_dir: The directory in which to write the text of the utterances,
one file per utterance.
ext: The file extension for the utterances. Typically something like
"phonemes", or "phonemes_and_tones".
"""
tgt_dir.mkdir(parents=True, exist_ok=True)
for utter in utterances:
out_path = tgt_dir / "{}.{}".format(utter.prefix, ext)
if lazy and out_path.is_file():
continue
with out_path.open("w") as f:
print(utter.text, file=f)
|
[
"def",
"write_transcriptions",
"(",
"utterances",
":",
"List",
"[",
"Utterance",
"]",
",",
"tgt_dir",
":",
"Path",
",",
"ext",
":",
"str",
",",
"lazy",
":",
"bool",
")",
"->",
"None",
":",
"tgt_dir",
".",
"mkdir",
"(",
"parents",
"=",
"True",
",",
"exist_ok",
"=",
"True",
")",
"for",
"utter",
"in",
"utterances",
":",
"out_path",
"=",
"tgt_dir",
"/",
"\"{}.{}\"",
".",
"format",
"(",
"utter",
".",
"prefix",
",",
"ext",
")",
"if",
"lazy",
"and",
"out_path",
".",
"is_file",
"(",
")",
":",
"continue",
"with",
"out_path",
".",
"open",
"(",
"\"w\"",
")",
"as",
"f",
":",
"print",
"(",
"utter",
".",
"text",
",",
"file",
"=",
"f",
")"
] |
Write the utterance transcriptions to files in the tgt_dir. Is lazy and
checks if the file already exists.
Args:
utterances: A list of Utterance objects to be written.
tgt_dir: The directory in which to write the text of the utterances,
one file per utterance.
ext: The file extension for the utterances. Typically something like
"phonemes", or "phonemes_and_tones".
|
[
"Write",
"the",
"utterance",
"transcriptions",
"to",
"files",
"in",
"the",
"tgt_dir",
".",
"Is",
"lazy",
"and",
"checks",
"if",
"the",
"file",
"already",
"exists",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/utterance.py#L45-L65
|
13,828
|
persephone-tools/persephone
|
persephone/utterance.py
|
remove_duplicates
|
def remove_duplicates(utterances: List[Utterance]) -> List[Utterance]:
""" Removes utterances with the same start_time, end_time and text. Other
metadata isn't considered.
"""
filtered_utters = []
utter_set = set() # type: Set[Tuple[int, int, str]]
for utter in utterances:
if (utter.start_time, utter.end_time, utter.text) in utter_set:
continue
filtered_utters.append(utter)
utter_set.add((utter.start_time, utter.end_time, utter.text))
return filtered_utters
|
python
|
def remove_duplicates(utterances: List[Utterance]) -> List[Utterance]:
""" Removes utterances with the same start_time, end_time and text. Other
metadata isn't considered.
"""
filtered_utters = []
utter_set = set() # type: Set[Tuple[int, int, str]]
for utter in utterances:
if (utter.start_time, utter.end_time, utter.text) in utter_set:
continue
filtered_utters.append(utter)
utter_set.add((utter.start_time, utter.end_time, utter.text))
return filtered_utters
|
[
"def",
"remove_duplicates",
"(",
"utterances",
":",
"List",
"[",
"Utterance",
"]",
")",
"->",
"List",
"[",
"Utterance",
"]",
":",
"filtered_utters",
"=",
"[",
"]",
"utter_set",
"=",
"set",
"(",
")",
"# type: Set[Tuple[int, int, str]]",
"for",
"utter",
"in",
"utterances",
":",
"if",
"(",
"utter",
".",
"start_time",
",",
"utter",
".",
"end_time",
",",
"utter",
".",
"text",
")",
"in",
"utter_set",
":",
"continue",
"filtered_utters",
".",
"append",
"(",
"utter",
")",
"utter_set",
".",
"add",
"(",
"(",
"utter",
".",
"start_time",
",",
"utter",
".",
"end_time",
",",
"utter",
".",
"text",
")",
")",
"return",
"filtered_utters"
] |
Removes utterances with the same start_time, end_time and text. Other
metadata isn't considered.
|
[
"Removes",
"utterances",
"with",
"the",
"same",
"start_time",
"end_time",
"and",
"text",
".",
"Other",
"metadata",
"isn",
"t",
"considered",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/utterance.py#L67-L80
|
13,829
|
persephone-tools/persephone
|
persephone/utterance.py
|
make_speaker_utters
|
def make_speaker_utters(utterances: List[Utterance]) -> Dict[str, List[Utterance]]:
""" Creates a dictionary mapping from speakers to their utterances. """
speaker_utters = defaultdict(list) # type: DefaultDict[str, List[Utterance]]
for utter in utterances:
speaker_utters[utter.speaker].append(utter)
return speaker_utters
|
python
|
def make_speaker_utters(utterances: List[Utterance]) -> Dict[str, List[Utterance]]:
""" Creates a dictionary mapping from speakers to their utterances. """
speaker_utters = defaultdict(list) # type: DefaultDict[str, List[Utterance]]
for utter in utterances:
speaker_utters[utter.speaker].append(utter)
return speaker_utters
|
[
"def",
"make_speaker_utters",
"(",
"utterances",
":",
"List",
"[",
"Utterance",
"]",
")",
"->",
"Dict",
"[",
"str",
",",
"List",
"[",
"Utterance",
"]",
"]",
":",
"speaker_utters",
"=",
"defaultdict",
"(",
"list",
")",
"# type: DefaultDict[str, List[Utterance]]",
"for",
"utter",
"in",
"utterances",
":",
"speaker_utters",
"[",
"utter",
".",
"speaker",
"]",
".",
"append",
"(",
"utter",
")",
"return",
"speaker_utters"
] |
Creates a dictionary mapping from speakers to their utterances.
|
[
"Creates",
"a",
"dictionary",
"mapping",
"from",
"speakers",
"to",
"their",
"utterances",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/utterance.py#L106-L113
|
13,830
|
persephone-tools/persephone
|
persephone/utterance.py
|
remove_too_short
|
def remove_too_short(utterances: List[Utterance],
_winlen=25, winstep=10) -> List[Utterance]:
""" Removes utterances that will probably have issues with CTC because of
the number of frames being less than the number of tokens in the
transcription. Assuming char tokenization to minimize false negatives.
"""
def is_too_short(utterance: Utterance) -> bool:
charlen = len(utterance.text)
if (duration(utterance) / winstep) < charlen:
return True
else:
return False
return [utter for utter in utterances if not is_too_short(utter)]
|
python
|
def remove_too_short(utterances: List[Utterance],
_winlen=25, winstep=10) -> List[Utterance]:
""" Removes utterances that will probably have issues with CTC because of
the number of frames being less than the number of tokens in the
transcription. Assuming char tokenization to minimize false negatives.
"""
def is_too_short(utterance: Utterance) -> bool:
charlen = len(utterance.text)
if (duration(utterance) / winstep) < charlen:
return True
else:
return False
return [utter for utter in utterances if not is_too_short(utter)]
|
[
"def",
"remove_too_short",
"(",
"utterances",
":",
"List",
"[",
"Utterance",
"]",
",",
"_winlen",
"=",
"25",
",",
"winstep",
"=",
"10",
")",
"->",
"List",
"[",
"Utterance",
"]",
":",
"def",
"is_too_short",
"(",
"utterance",
":",
"Utterance",
")",
"->",
"bool",
":",
"charlen",
"=",
"len",
"(",
"utterance",
".",
"text",
")",
"if",
"(",
"duration",
"(",
"utterance",
")",
"/",
"winstep",
")",
"<",
"charlen",
":",
"return",
"True",
"else",
":",
"return",
"False",
"return",
"[",
"utter",
"for",
"utter",
"in",
"utterances",
"if",
"not",
"is_too_short",
"(",
"utter",
")",
"]"
] |
Removes utterances that will probably have issues with CTC because of
the number of frames being less than the number of tokens in the
transcription. Assuming char tokenization to minimize false negatives.
|
[
"Removes",
"utterances",
"that",
"will",
"probably",
"have",
"issues",
"with",
"CTC",
"because",
"of",
"the",
"number",
"of",
"frames",
"being",
"less",
"than",
"the",
"number",
"of",
"tokens",
"in",
"the",
"transcription",
".",
"Assuming",
"char",
"tokenization",
"to",
"minimize",
"false",
"negatives",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/utterance.py#L128-L141
|
13,831
|
persephone-tools/persephone
|
persephone/distance.py
|
min_edit_distance
|
def min_edit_distance(
source: Sequence[T], target: Sequence[T],
ins_cost: Callable[..., int] = lambda _x: 1,
del_cost: Callable[..., int] = lambda _x: 1,
sub_cost: Callable[..., int] = lambda x, y: 0 if x == y else 1) -> int:
"""Calculates the minimum edit distance between two sequences.
Uses the Levenshtein weighting as a default, but offers keyword arguments
to supply functions to measure the costs for editing with different
elements.
Args:
ins_cost: A function describing the cost of inserting a given char
del_cost: A function describing the cost of deleting a given char
sub_cost: A function describing the cost of substituting one char for
Returns:
The edit distance between the two input sequences.
"""
# Initialize an m+1 by n+1 array. Note that the strings start from index 1,
# with index 0 being used to denote the empty string.
n = len(target)
m = len(source)
distance = np.zeros((m+1, n+1), dtype=np.int16)
# Initialize the zeroth row and column to be the distance from the empty
# string.
for i in range(1, m+1):
distance[i, 0] = distance[i-1, 0] + ins_cost(source[i-1])
for j in range(1, n+1):
distance[0, j] = distance[0, j-1] + ins_cost(target[j-1])
# Do the dynamic programming to fill in the matrix with the edit distances.
for j in range(1, n+1):
for i in range(1, m+1):
distance[i, j] = min(
distance[i-1, j] + ins_cost(source[i-1]),
distance[i-1, j-1] + sub_cost(source[i-1],target[j-1]),
distance[i, j-1] + del_cost(target[j-1]))
return int(distance[len(source), len(target)])
|
python
|
def min_edit_distance(
source: Sequence[T], target: Sequence[T],
ins_cost: Callable[..., int] = lambda _x: 1,
del_cost: Callable[..., int] = lambda _x: 1,
sub_cost: Callable[..., int] = lambda x, y: 0 if x == y else 1) -> int:
"""Calculates the minimum edit distance between two sequences.
Uses the Levenshtein weighting as a default, but offers keyword arguments
to supply functions to measure the costs for editing with different
elements.
Args:
ins_cost: A function describing the cost of inserting a given char
del_cost: A function describing the cost of deleting a given char
sub_cost: A function describing the cost of substituting one char for
Returns:
The edit distance between the two input sequences.
"""
# Initialize an m+1 by n+1 array. Note that the strings start from index 1,
# with index 0 being used to denote the empty string.
n = len(target)
m = len(source)
distance = np.zeros((m+1, n+1), dtype=np.int16)
# Initialize the zeroth row and column to be the distance from the empty
# string.
for i in range(1, m+1):
distance[i, 0] = distance[i-1, 0] + ins_cost(source[i-1])
for j in range(1, n+1):
distance[0, j] = distance[0, j-1] + ins_cost(target[j-1])
# Do the dynamic programming to fill in the matrix with the edit distances.
for j in range(1, n+1):
for i in range(1, m+1):
distance[i, j] = min(
distance[i-1, j] + ins_cost(source[i-1]),
distance[i-1, j-1] + sub_cost(source[i-1],target[j-1]),
distance[i, j-1] + del_cost(target[j-1]))
return int(distance[len(source), len(target)])
|
[
"def",
"min_edit_distance",
"(",
"source",
":",
"Sequence",
"[",
"T",
"]",
",",
"target",
":",
"Sequence",
"[",
"T",
"]",
",",
"ins_cost",
":",
"Callable",
"[",
"...",
",",
"int",
"]",
"=",
"lambda",
"_x",
":",
"1",
",",
"del_cost",
":",
"Callable",
"[",
"...",
",",
"int",
"]",
"=",
"lambda",
"_x",
":",
"1",
",",
"sub_cost",
":",
"Callable",
"[",
"...",
",",
"int",
"]",
"=",
"lambda",
"x",
",",
"y",
":",
"0",
"if",
"x",
"==",
"y",
"else",
"1",
")",
"->",
"int",
":",
"# Initialize an m+1 by n+1 array. Note that the strings start from index 1,",
"# with index 0 being used to denote the empty string.",
"n",
"=",
"len",
"(",
"target",
")",
"m",
"=",
"len",
"(",
"source",
")",
"distance",
"=",
"np",
".",
"zeros",
"(",
"(",
"m",
"+",
"1",
",",
"n",
"+",
"1",
")",
",",
"dtype",
"=",
"np",
".",
"int16",
")",
"# Initialize the zeroth row and column to be the distance from the empty",
"# string.",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"m",
"+",
"1",
")",
":",
"distance",
"[",
"i",
",",
"0",
"]",
"=",
"distance",
"[",
"i",
"-",
"1",
",",
"0",
"]",
"+",
"ins_cost",
"(",
"source",
"[",
"i",
"-",
"1",
"]",
")",
"for",
"j",
"in",
"range",
"(",
"1",
",",
"n",
"+",
"1",
")",
":",
"distance",
"[",
"0",
",",
"j",
"]",
"=",
"distance",
"[",
"0",
",",
"j",
"-",
"1",
"]",
"+",
"ins_cost",
"(",
"target",
"[",
"j",
"-",
"1",
"]",
")",
"# Do the dynamic programming to fill in the matrix with the edit distances.",
"for",
"j",
"in",
"range",
"(",
"1",
",",
"n",
"+",
"1",
")",
":",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"m",
"+",
"1",
")",
":",
"distance",
"[",
"i",
",",
"j",
"]",
"=",
"min",
"(",
"distance",
"[",
"i",
"-",
"1",
",",
"j",
"]",
"+",
"ins_cost",
"(",
"source",
"[",
"i",
"-",
"1",
"]",
")",
",",
"distance",
"[",
"i",
"-",
"1",
",",
"j",
"-",
"1",
"]",
"+",
"sub_cost",
"(",
"source",
"[",
"i",
"-",
"1",
"]",
",",
"target",
"[",
"j",
"-",
"1",
"]",
")",
",",
"distance",
"[",
"i",
",",
"j",
"-",
"1",
"]",
"+",
"del_cost",
"(",
"target",
"[",
"j",
"-",
"1",
"]",
")",
")",
"return",
"int",
"(",
"distance",
"[",
"len",
"(",
"source",
")",
",",
"len",
"(",
"target",
")",
"]",
")"
] |
Calculates the minimum edit distance between two sequences.
Uses the Levenshtein weighting as a default, but offers keyword arguments
to supply functions to measure the costs for editing with different
elements.
Args:
ins_cost: A function describing the cost of inserting a given char
del_cost: A function describing the cost of deleting a given char
sub_cost: A function describing the cost of substituting one char for
Returns:
The edit distance between the two input sequences.
|
[
"Calculates",
"the",
"minimum",
"edit",
"distance",
"between",
"two",
"sequences",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/distance.py#L9-L51
|
13,832
|
persephone-tools/persephone
|
persephone/distance.py
|
word_error_rate
|
def word_error_rate(ref: Sequence[T], hyp: Sequence[T]) -> float:
""" Calculate the word error rate of a sequence against a reference.
Args:
ref: The gold-standard reference sequence
hyp: The hypothesis to be evaluated against the reference.
Returns:
The word error rate of the supplied hypothesis with respect to the
reference string.
Raises:
persephone.exceptions.EmptyReferenceException: If the length of the reference sequence is 0.
"""
if len(ref) == 0:
raise EmptyReferenceException(
"Cannot calculating word error rate against a length 0 "\
"reference sequence.")
distance = min_edit_distance(ref, hyp)
return 100 * float(distance) / len(ref)
|
python
|
def word_error_rate(ref: Sequence[T], hyp: Sequence[T]) -> float:
""" Calculate the word error rate of a sequence against a reference.
Args:
ref: The gold-standard reference sequence
hyp: The hypothesis to be evaluated against the reference.
Returns:
The word error rate of the supplied hypothesis with respect to the
reference string.
Raises:
persephone.exceptions.EmptyReferenceException: If the length of the reference sequence is 0.
"""
if len(ref) == 0:
raise EmptyReferenceException(
"Cannot calculating word error rate against a length 0 "\
"reference sequence.")
distance = min_edit_distance(ref, hyp)
return 100 * float(distance) / len(ref)
|
[
"def",
"word_error_rate",
"(",
"ref",
":",
"Sequence",
"[",
"T",
"]",
",",
"hyp",
":",
"Sequence",
"[",
"T",
"]",
")",
"->",
"float",
":",
"if",
"len",
"(",
"ref",
")",
"==",
"0",
":",
"raise",
"EmptyReferenceException",
"(",
"\"Cannot calculating word error rate against a length 0 \"",
"\"reference sequence.\"",
")",
"distance",
"=",
"min_edit_distance",
"(",
"ref",
",",
"hyp",
")",
"return",
"100",
"*",
"float",
"(",
"distance",
")",
"/",
"len",
"(",
"ref",
")"
] |
Calculate the word error rate of a sequence against a reference.
Args:
ref: The gold-standard reference sequence
hyp: The hypothesis to be evaluated against the reference.
Returns:
The word error rate of the supplied hypothesis with respect to the
reference string.
Raises:
persephone.exceptions.EmptyReferenceException: If the length of the reference sequence is 0.
|
[
"Calculate",
"the",
"word",
"error",
"rate",
"of",
"a",
"sequence",
"against",
"a",
"reference",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/distance.py#L178-L200
|
13,833
|
persephone-tools/persephone
|
persephone/model.py
|
dense_to_human_readable
|
def dense_to_human_readable(dense_repr: Sequence[Sequence[int]], index_to_label: Dict[int, str]) -> List[List[str]]:
""" Converts a dense representation of model decoded output into human
readable, using a mapping from indices to labels. """
transcripts = []
for dense_r in dense_repr:
non_empty_phonemes = [phn_i for phn_i in dense_r if phn_i != 0]
transcript = [index_to_label[index] for index in non_empty_phonemes]
transcripts.append(transcript)
return transcripts
|
python
|
def dense_to_human_readable(dense_repr: Sequence[Sequence[int]], index_to_label: Dict[int, str]) -> List[List[str]]:
""" Converts a dense representation of model decoded output into human
readable, using a mapping from indices to labels. """
transcripts = []
for dense_r in dense_repr:
non_empty_phonemes = [phn_i for phn_i in dense_r if phn_i != 0]
transcript = [index_to_label[index] for index in non_empty_phonemes]
transcripts.append(transcript)
return transcripts
|
[
"def",
"dense_to_human_readable",
"(",
"dense_repr",
":",
"Sequence",
"[",
"Sequence",
"[",
"int",
"]",
"]",
",",
"index_to_label",
":",
"Dict",
"[",
"int",
",",
"str",
"]",
")",
"->",
"List",
"[",
"List",
"[",
"str",
"]",
"]",
":",
"transcripts",
"=",
"[",
"]",
"for",
"dense_r",
"in",
"dense_repr",
":",
"non_empty_phonemes",
"=",
"[",
"phn_i",
"for",
"phn_i",
"in",
"dense_r",
"if",
"phn_i",
"!=",
"0",
"]",
"transcript",
"=",
"[",
"index_to_label",
"[",
"index",
"]",
"for",
"index",
"in",
"non_empty_phonemes",
"]",
"transcripts",
".",
"append",
"(",
"transcript",
")",
"return",
"transcripts"
] |
Converts a dense representation of model decoded output into human
readable, using a mapping from indices to labels.
|
[
"Converts",
"a",
"dense",
"representation",
"of",
"model",
"decoded",
"output",
"into",
"human",
"readable",
"using",
"a",
"mapping",
"from",
"indices",
"to",
"labels",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/model.py#L36-L46
|
13,834
|
persephone-tools/persephone
|
persephone/model.py
|
decode
|
def decode(model_path_prefix: Union[str, Path],
input_paths: Sequence[Path],
label_set: Set[str],
*,
feature_type: str = "fbank", #TODO Make this None and infer feature_type from dimension of NN input layer.
batch_size: int = 64,
feat_dir: Optional[Path]=None,
batch_x_name: str="batch_x:0",
batch_x_lens_name: str="batch_x_lens:0",
output_name: str="hyp_dense_decoded:0") -> List[List[str]]:
"""Use an existing tensorflow model that exists on disk to decode
WAV files.
Args:
model_path_prefix: The path to the saved tensorflow model.
This is the full prefix to the ".ckpt" file.
input_paths: A sequence of `pathlib.Path`s to WAV files to put through
the model provided.
label_set: The set of all the labels this model uses.
feature_type: The type of features this model uses.
Note that this MUST match the type of features that the
model was trained on initially.
feat_dir: Any files that require preprocessing will be
saved to the path specified by this.
batch_x_name: The name of the tensorflow input for batch_x
batch_x_lens_name: The name of the tensorflow input for batch_x_lens
output_name: The name of the tensorflow output
"""
if not input_paths:
raise PersephoneException("No untranscribed WAVs to transcribe.")
model_path_prefix = str(model_path_prefix)
for p in input_paths:
if not p.exists():
raise PersephoneException(
"The WAV file path {} does not exist".format(p)
)
preprocessed_file_paths = []
for p in input_paths:
prefix = p.stem
# Check the "feat" directory as per the filesystem conventions of a Corpus
feature_file_ext = ".{}.npy".format(feature_type)
conventional_npy_location = p.parent.parent / "feat" / (Path(prefix + feature_file_ext))
if conventional_npy_location.exists():
# don't need to preprocess it
preprocessed_file_paths.append(conventional_npy_location)
else:
if not feat_dir:
feat_dir = p.parent.parent / "feat"
if not feat_dir.is_dir():
os.makedirs(str(feat_dir))
mono16k_wav_path = feat_dir / "{}.wav".format(prefix)
feat_path = feat_dir / "{}.{}.npy".format(prefix, feature_type)
feat_extract.convert_wav(p, mono16k_wav_path)
preprocessed_file_paths.append(feat_path)
# preprocess the file that weren't found in the features directory
# as per the filesystem conventions
if feat_dir:
feat_extract.from_dir(feat_dir, feature_type)
fn_batches = utils.make_batches(preprocessed_file_paths, batch_size)
# Load the model and perform decoding.
metagraph = load_metagraph(model_path_prefix)
with tf.Session() as sess:
metagraph.restore(sess, model_path_prefix)
for fn_batch in fn_batches:
batch_x, batch_x_lens = utils.load_batch_x(fn_batch)
# TODO These placeholder names should be a backup if names from a newer
# naming scheme aren't present. Otherwise this won't generalize to
# different architectures.
feed_dict = {batch_x_name: batch_x,
batch_x_lens_name: batch_x_lens}
dense_decoded = sess.run(output_name, feed_dict=feed_dict)
# Create a human-readable representation of the decoded.
indices_to_labels = labels.make_indices_to_labels(label_set)
human_readable = dense_to_human_readable(dense_decoded, indices_to_labels)
return human_readable
|
python
|
def decode(model_path_prefix: Union[str, Path],
input_paths: Sequence[Path],
label_set: Set[str],
*,
feature_type: str = "fbank", #TODO Make this None and infer feature_type from dimension of NN input layer.
batch_size: int = 64,
feat_dir: Optional[Path]=None,
batch_x_name: str="batch_x:0",
batch_x_lens_name: str="batch_x_lens:0",
output_name: str="hyp_dense_decoded:0") -> List[List[str]]:
"""Use an existing tensorflow model that exists on disk to decode
WAV files.
Args:
model_path_prefix: The path to the saved tensorflow model.
This is the full prefix to the ".ckpt" file.
input_paths: A sequence of `pathlib.Path`s to WAV files to put through
the model provided.
label_set: The set of all the labels this model uses.
feature_type: The type of features this model uses.
Note that this MUST match the type of features that the
model was trained on initially.
feat_dir: Any files that require preprocessing will be
saved to the path specified by this.
batch_x_name: The name of the tensorflow input for batch_x
batch_x_lens_name: The name of the tensorflow input for batch_x_lens
output_name: The name of the tensorflow output
"""
if not input_paths:
raise PersephoneException("No untranscribed WAVs to transcribe.")
model_path_prefix = str(model_path_prefix)
for p in input_paths:
if not p.exists():
raise PersephoneException(
"The WAV file path {} does not exist".format(p)
)
preprocessed_file_paths = []
for p in input_paths:
prefix = p.stem
# Check the "feat" directory as per the filesystem conventions of a Corpus
feature_file_ext = ".{}.npy".format(feature_type)
conventional_npy_location = p.parent.parent / "feat" / (Path(prefix + feature_file_ext))
if conventional_npy_location.exists():
# don't need to preprocess it
preprocessed_file_paths.append(conventional_npy_location)
else:
if not feat_dir:
feat_dir = p.parent.parent / "feat"
if not feat_dir.is_dir():
os.makedirs(str(feat_dir))
mono16k_wav_path = feat_dir / "{}.wav".format(prefix)
feat_path = feat_dir / "{}.{}.npy".format(prefix, feature_type)
feat_extract.convert_wav(p, mono16k_wav_path)
preprocessed_file_paths.append(feat_path)
# preprocess the file that weren't found in the features directory
# as per the filesystem conventions
if feat_dir:
feat_extract.from_dir(feat_dir, feature_type)
fn_batches = utils.make_batches(preprocessed_file_paths, batch_size)
# Load the model and perform decoding.
metagraph = load_metagraph(model_path_prefix)
with tf.Session() as sess:
metagraph.restore(sess, model_path_prefix)
for fn_batch in fn_batches:
batch_x, batch_x_lens = utils.load_batch_x(fn_batch)
# TODO These placeholder names should be a backup if names from a newer
# naming scheme aren't present. Otherwise this won't generalize to
# different architectures.
feed_dict = {batch_x_name: batch_x,
batch_x_lens_name: batch_x_lens}
dense_decoded = sess.run(output_name, feed_dict=feed_dict)
# Create a human-readable representation of the decoded.
indices_to_labels = labels.make_indices_to_labels(label_set)
human_readable = dense_to_human_readable(dense_decoded, indices_to_labels)
return human_readable
|
[
"def",
"decode",
"(",
"model_path_prefix",
":",
"Union",
"[",
"str",
",",
"Path",
"]",
",",
"input_paths",
":",
"Sequence",
"[",
"Path",
"]",
",",
"label_set",
":",
"Set",
"[",
"str",
"]",
",",
"*",
",",
"feature_type",
":",
"str",
"=",
"\"fbank\"",
",",
"#TODO Make this None and infer feature_type from dimension of NN input layer.",
"batch_size",
":",
"int",
"=",
"64",
",",
"feat_dir",
":",
"Optional",
"[",
"Path",
"]",
"=",
"None",
",",
"batch_x_name",
":",
"str",
"=",
"\"batch_x:0\"",
",",
"batch_x_lens_name",
":",
"str",
"=",
"\"batch_x_lens:0\"",
",",
"output_name",
":",
"str",
"=",
"\"hyp_dense_decoded:0\"",
")",
"->",
"List",
"[",
"List",
"[",
"str",
"]",
"]",
":",
"if",
"not",
"input_paths",
":",
"raise",
"PersephoneException",
"(",
"\"No untranscribed WAVs to transcribe.\"",
")",
"model_path_prefix",
"=",
"str",
"(",
"model_path_prefix",
")",
"for",
"p",
"in",
"input_paths",
":",
"if",
"not",
"p",
".",
"exists",
"(",
")",
":",
"raise",
"PersephoneException",
"(",
"\"The WAV file path {} does not exist\"",
".",
"format",
"(",
"p",
")",
")",
"preprocessed_file_paths",
"=",
"[",
"]",
"for",
"p",
"in",
"input_paths",
":",
"prefix",
"=",
"p",
".",
"stem",
"# Check the \"feat\" directory as per the filesystem conventions of a Corpus",
"feature_file_ext",
"=",
"\".{}.npy\"",
".",
"format",
"(",
"feature_type",
")",
"conventional_npy_location",
"=",
"p",
".",
"parent",
".",
"parent",
"/",
"\"feat\"",
"/",
"(",
"Path",
"(",
"prefix",
"+",
"feature_file_ext",
")",
")",
"if",
"conventional_npy_location",
".",
"exists",
"(",
")",
":",
"# don't need to preprocess it",
"preprocessed_file_paths",
".",
"append",
"(",
"conventional_npy_location",
")",
"else",
":",
"if",
"not",
"feat_dir",
":",
"feat_dir",
"=",
"p",
".",
"parent",
".",
"parent",
"/",
"\"feat\"",
"if",
"not",
"feat_dir",
".",
"is_dir",
"(",
")",
":",
"os",
".",
"makedirs",
"(",
"str",
"(",
"feat_dir",
")",
")",
"mono16k_wav_path",
"=",
"feat_dir",
"/",
"\"{}.wav\"",
".",
"format",
"(",
"prefix",
")",
"feat_path",
"=",
"feat_dir",
"/",
"\"{}.{}.npy\"",
".",
"format",
"(",
"prefix",
",",
"feature_type",
")",
"feat_extract",
".",
"convert_wav",
"(",
"p",
",",
"mono16k_wav_path",
")",
"preprocessed_file_paths",
".",
"append",
"(",
"feat_path",
")",
"# preprocess the file that weren't found in the features directory",
"# as per the filesystem conventions",
"if",
"feat_dir",
":",
"feat_extract",
".",
"from_dir",
"(",
"feat_dir",
",",
"feature_type",
")",
"fn_batches",
"=",
"utils",
".",
"make_batches",
"(",
"preprocessed_file_paths",
",",
"batch_size",
")",
"# Load the model and perform decoding.",
"metagraph",
"=",
"load_metagraph",
"(",
"model_path_prefix",
")",
"with",
"tf",
".",
"Session",
"(",
")",
"as",
"sess",
":",
"metagraph",
".",
"restore",
"(",
"sess",
",",
"model_path_prefix",
")",
"for",
"fn_batch",
"in",
"fn_batches",
":",
"batch_x",
",",
"batch_x_lens",
"=",
"utils",
".",
"load_batch_x",
"(",
"fn_batch",
")",
"# TODO These placeholder names should be a backup if names from a newer",
"# naming scheme aren't present. Otherwise this won't generalize to",
"# different architectures.",
"feed_dict",
"=",
"{",
"batch_x_name",
":",
"batch_x",
",",
"batch_x_lens_name",
":",
"batch_x_lens",
"}",
"dense_decoded",
"=",
"sess",
".",
"run",
"(",
"output_name",
",",
"feed_dict",
"=",
"feed_dict",
")",
"# Create a human-readable representation of the decoded.",
"indices_to_labels",
"=",
"labels",
".",
"make_indices_to_labels",
"(",
"label_set",
")",
"human_readable",
"=",
"dense_to_human_readable",
"(",
"dense_decoded",
",",
"indices_to_labels",
")",
"return",
"human_readable"
] |
Use an existing tensorflow model that exists on disk to decode
WAV files.
Args:
model_path_prefix: The path to the saved tensorflow model.
This is the full prefix to the ".ckpt" file.
input_paths: A sequence of `pathlib.Path`s to WAV files to put through
the model provided.
label_set: The set of all the labels this model uses.
feature_type: The type of features this model uses.
Note that this MUST match the type of features that the
model was trained on initially.
feat_dir: Any files that require preprocessing will be
saved to the path specified by this.
batch_x_name: The name of the tensorflow input for batch_x
batch_x_lens_name: The name of the tensorflow input for batch_x_lens
output_name: The name of the tensorflow output
|
[
"Use",
"an",
"existing",
"tensorflow",
"model",
"that",
"exists",
"on",
"disk",
"to",
"decode",
"WAV",
"files",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/model.py#L68-L153
|
13,835
|
persephone-tools/persephone
|
persephone/model.py
|
Model.eval
|
def eval(self, restore_model_path: Optional[str]=None) -> None:
""" Evaluates the model on a test set."""
saver = tf.train.Saver()
with tf.Session(config=allow_growth_config) as sess:
if restore_model_path:
logger.info("restoring model from %s", restore_model_path)
saver.restore(sess, restore_model_path)
else:
assert self.saved_model_path, "{}".format(self.saved_model_path)
logger.info("restoring model from %s", self.saved_model_path)
saver.restore(sess, self.saved_model_path)
test_x, test_x_lens, test_y = self.corpus_reader.test_batch()
feed_dict = {self.batch_x: test_x,
self.batch_x_lens: test_x_lens,
self.batch_y: test_y}
test_ler, dense_decoded, dense_ref = sess.run(
[self.ler, self.dense_decoded, self.dense_ref],
feed_dict=feed_dict)
hyps, refs = self.corpus_reader.human_readable_hyp_ref(
dense_decoded, dense_ref)
# Log hypotheses
hyps_dir = os.path.join(self.exp_dir, "test")
if not os.path.isdir(hyps_dir):
os.mkdir(hyps_dir)
with open(os.path.join(hyps_dir, "hyps"), "w",
encoding=ENCODING) as hyps_f:
for hyp in hyps:
print(" ".join(hyp), file=hyps_f)
with open(os.path.join(hyps_dir, "refs"), "w",
encoding=ENCODING) as refs_f:
for ref in refs:
print(" ".join(ref), file=refs_f)
test_per = utils.batch_per(hyps, refs)
assert test_per == test_ler
with open(os.path.join(hyps_dir, "test_per"), "w",
encoding=ENCODING) as per_f:
print("LER: %f" % (test_ler), file=per_f)
|
python
|
def eval(self, restore_model_path: Optional[str]=None) -> None:
""" Evaluates the model on a test set."""
saver = tf.train.Saver()
with tf.Session(config=allow_growth_config) as sess:
if restore_model_path:
logger.info("restoring model from %s", restore_model_path)
saver.restore(sess, restore_model_path)
else:
assert self.saved_model_path, "{}".format(self.saved_model_path)
logger.info("restoring model from %s", self.saved_model_path)
saver.restore(sess, self.saved_model_path)
test_x, test_x_lens, test_y = self.corpus_reader.test_batch()
feed_dict = {self.batch_x: test_x,
self.batch_x_lens: test_x_lens,
self.batch_y: test_y}
test_ler, dense_decoded, dense_ref = sess.run(
[self.ler, self.dense_decoded, self.dense_ref],
feed_dict=feed_dict)
hyps, refs = self.corpus_reader.human_readable_hyp_ref(
dense_decoded, dense_ref)
# Log hypotheses
hyps_dir = os.path.join(self.exp_dir, "test")
if not os.path.isdir(hyps_dir):
os.mkdir(hyps_dir)
with open(os.path.join(hyps_dir, "hyps"), "w",
encoding=ENCODING) as hyps_f:
for hyp in hyps:
print(" ".join(hyp), file=hyps_f)
with open(os.path.join(hyps_dir, "refs"), "w",
encoding=ENCODING) as refs_f:
for ref in refs:
print(" ".join(ref), file=refs_f)
test_per = utils.batch_per(hyps, refs)
assert test_per == test_ler
with open(os.path.join(hyps_dir, "test_per"), "w",
encoding=ENCODING) as per_f:
print("LER: %f" % (test_ler), file=per_f)
|
[
"def",
"eval",
"(",
"self",
",",
"restore_model_path",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
")",
"->",
"None",
":",
"saver",
"=",
"tf",
".",
"train",
".",
"Saver",
"(",
")",
"with",
"tf",
".",
"Session",
"(",
"config",
"=",
"allow_growth_config",
")",
"as",
"sess",
":",
"if",
"restore_model_path",
":",
"logger",
".",
"info",
"(",
"\"restoring model from %s\"",
",",
"restore_model_path",
")",
"saver",
".",
"restore",
"(",
"sess",
",",
"restore_model_path",
")",
"else",
":",
"assert",
"self",
".",
"saved_model_path",
",",
"\"{}\"",
".",
"format",
"(",
"self",
".",
"saved_model_path",
")",
"logger",
".",
"info",
"(",
"\"restoring model from %s\"",
",",
"self",
".",
"saved_model_path",
")",
"saver",
".",
"restore",
"(",
"sess",
",",
"self",
".",
"saved_model_path",
")",
"test_x",
",",
"test_x_lens",
",",
"test_y",
"=",
"self",
".",
"corpus_reader",
".",
"test_batch",
"(",
")",
"feed_dict",
"=",
"{",
"self",
".",
"batch_x",
":",
"test_x",
",",
"self",
".",
"batch_x_lens",
":",
"test_x_lens",
",",
"self",
".",
"batch_y",
":",
"test_y",
"}",
"test_ler",
",",
"dense_decoded",
",",
"dense_ref",
"=",
"sess",
".",
"run",
"(",
"[",
"self",
".",
"ler",
",",
"self",
".",
"dense_decoded",
",",
"self",
".",
"dense_ref",
"]",
",",
"feed_dict",
"=",
"feed_dict",
")",
"hyps",
",",
"refs",
"=",
"self",
".",
"corpus_reader",
".",
"human_readable_hyp_ref",
"(",
"dense_decoded",
",",
"dense_ref",
")",
"# Log hypotheses",
"hyps_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"exp_dir",
",",
"\"test\"",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"hyps_dir",
")",
":",
"os",
".",
"mkdir",
"(",
"hyps_dir",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"hyps_dir",
",",
"\"hyps\"",
")",
",",
"\"w\"",
",",
"encoding",
"=",
"ENCODING",
")",
"as",
"hyps_f",
":",
"for",
"hyp",
"in",
"hyps",
":",
"print",
"(",
"\" \"",
".",
"join",
"(",
"hyp",
")",
",",
"file",
"=",
"hyps_f",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"hyps_dir",
",",
"\"refs\"",
")",
",",
"\"w\"",
",",
"encoding",
"=",
"ENCODING",
")",
"as",
"refs_f",
":",
"for",
"ref",
"in",
"refs",
":",
"print",
"(",
"\" \"",
".",
"join",
"(",
"ref",
")",
",",
"file",
"=",
"refs_f",
")",
"test_per",
"=",
"utils",
".",
"batch_per",
"(",
"hyps",
",",
"refs",
")",
"assert",
"test_per",
"==",
"test_ler",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"hyps_dir",
",",
"\"test_per\"",
")",
",",
"\"w\"",
",",
"encoding",
"=",
"ENCODING",
")",
"as",
"per_f",
":",
"print",
"(",
"\"LER: %f\"",
"%",
"(",
"test_ler",
")",
",",
"file",
"=",
"per_f",
")"
] |
Evaluates the model on a test set.
|
[
"Evaluates",
"the",
"model",
"on",
"a",
"test",
"set",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/model.py#L258-L299
|
13,836
|
persephone-tools/persephone
|
persephone/model.py
|
Model.output_best_scores
|
def output_best_scores(self, best_epoch_str: str) -> None:
"""Output best scores to the filesystem"""
BEST_SCORES_FILENAME = "best_scores.txt"
with open(os.path.join(self.exp_dir, BEST_SCORES_FILENAME),
"w", encoding=ENCODING) as best_f:
print(best_epoch_str, file=best_f, flush=True)
|
python
|
def output_best_scores(self, best_epoch_str: str) -> None:
"""Output best scores to the filesystem"""
BEST_SCORES_FILENAME = "best_scores.txt"
with open(os.path.join(self.exp_dir, BEST_SCORES_FILENAME),
"w", encoding=ENCODING) as best_f:
print(best_epoch_str, file=best_f, flush=True)
|
[
"def",
"output_best_scores",
"(",
"self",
",",
"best_epoch_str",
":",
"str",
")",
"->",
"None",
":",
"BEST_SCORES_FILENAME",
"=",
"\"best_scores.txt\"",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"exp_dir",
",",
"BEST_SCORES_FILENAME",
")",
",",
"\"w\"",
",",
"encoding",
"=",
"ENCODING",
")",
"as",
"best_f",
":",
"print",
"(",
"best_epoch_str",
",",
"file",
"=",
"best_f",
",",
"flush",
"=",
"True",
")"
] |
Output best scores to the filesystem
|
[
"Output",
"best",
"scores",
"to",
"the",
"filesystem"
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/model.py#L301-L306
|
13,837
|
persephone-tools/persephone
|
persephone/corpus.py
|
ensure_no_set_overlap
|
def ensure_no_set_overlap(train: Sequence[str], valid: Sequence[str], test: Sequence[str]) -> None:
""" Ensures no test set data has creeped into the training set."""
logger.debug("Ensuring that the training, validation and test data sets have no overlap")
train_s = set(train)
valid_s = set(valid)
test_s = set(test)
if train_s & valid_s:
logger.warning("train and valid have overlapping items: {}".format(train_s & valid_s))
raise PersephoneException("train and valid have overlapping items: {}".format(train_s & valid_s))
if train_s & test_s:
logger.warning("train and test have overlapping items: {}".format(train_s & test_s))
raise PersephoneException("train and test have overlapping items: {}".format(train_s & test_s))
if valid_s & test_s:
logger.warning("valid and test have overlapping items: {}".format(valid_s & test_s))
raise PersephoneException("valid and test have overlapping items: {}".format(valid_s & test_s))
|
python
|
def ensure_no_set_overlap(train: Sequence[str], valid: Sequence[str], test: Sequence[str]) -> None:
""" Ensures no test set data has creeped into the training set."""
logger.debug("Ensuring that the training, validation and test data sets have no overlap")
train_s = set(train)
valid_s = set(valid)
test_s = set(test)
if train_s & valid_s:
logger.warning("train and valid have overlapping items: {}".format(train_s & valid_s))
raise PersephoneException("train and valid have overlapping items: {}".format(train_s & valid_s))
if train_s & test_s:
logger.warning("train and test have overlapping items: {}".format(train_s & test_s))
raise PersephoneException("train and test have overlapping items: {}".format(train_s & test_s))
if valid_s & test_s:
logger.warning("valid and test have overlapping items: {}".format(valid_s & test_s))
raise PersephoneException("valid and test have overlapping items: {}".format(valid_s & test_s))
|
[
"def",
"ensure_no_set_overlap",
"(",
"train",
":",
"Sequence",
"[",
"str",
"]",
",",
"valid",
":",
"Sequence",
"[",
"str",
"]",
",",
"test",
":",
"Sequence",
"[",
"str",
"]",
")",
"->",
"None",
":",
"logger",
".",
"debug",
"(",
"\"Ensuring that the training, validation and test data sets have no overlap\"",
")",
"train_s",
"=",
"set",
"(",
"train",
")",
"valid_s",
"=",
"set",
"(",
"valid",
")",
"test_s",
"=",
"set",
"(",
"test",
")",
"if",
"train_s",
"&",
"valid_s",
":",
"logger",
".",
"warning",
"(",
"\"train and valid have overlapping items: {}\"",
".",
"format",
"(",
"train_s",
"&",
"valid_s",
")",
")",
"raise",
"PersephoneException",
"(",
"\"train and valid have overlapping items: {}\"",
".",
"format",
"(",
"train_s",
"&",
"valid_s",
")",
")",
"if",
"train_s",
"&",
"test_s",
":",
"logger",
".",
"warning",
"(",
"\"train and test have overlapping items: {}\"",
".",
"format",
"(",
"train_s",
"&",
"test_s",
")",
")",
"raise",
"PersephoneException",
"(",
"\"train and test have overlapping items: {}\"",
".",
"format",
"(",
"train_s",
"&",
"test_s",
")",
")",
"if",
"valid_s",
"&",
"test_s",
":",
"logger",
".",
"warning",
"(",
"\"valid and test have overlapping items: {}\"",
".",
"format",
"(",
"valid_s",
"&",
"test_s",
")",
")",
"raise",
"PersephoneException",
"(",
"\"valid and test have overlapping items: {}\"",
".",
"format",
"(",
"valid_s",
"&",
"test_s",
")",
")"
] |
Ensures no test set data has creeped into the training set.
|
[
"Ensures",
"no",
"test",
"set",
"data",
"has",
"creeped",
"into",
"the",
"training",
"set",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/corpus.py#L31-L47
|
13,838
|
persephone-tools/persephone
|
persephone/corpus.py
|
get_untranscribed_prefixes_from_file
|
def get_untranscribed_prefixes_from_file(target_directory: Path) -> List[str]:
"""
The file "untranscribed_prefixes.txt" will specify prefixes which
do not have an associated transcription file if placed in the target directory.
This will fetch those prefixes from that file and will return an empty
list if that file does not exist.
See find_untranscribed_wavs function for finding untranscribed prefixes in an
experiment directory.
Returns:
A list of all untranscribed prefixes as specified in the file
"""
untranscribed_prefix_fn = target_directory / "untranscribed_prefixes.txt"
if untranscribed_prefix_fn.exists():
with untranscribed_prefix_fn.open() as f:
prefixes = f.readlines()
return [prefix.strip() for prefix in prefixes]
else:
#logger.warning("Attempting to get untranscribed prefixes but the file ({})"
# " that should specify these does not exist".format(untranscribed_prefix_fn))
pass
return []
|
python
|
def get_untranscribed_prefixes_from_file(target_directory: Path) -> List[str]:
"""
The file "untranscribed_prefixes.txt" will specify prefixes which
do not have an associated transcription file if placed in the target directory.
This will fetch those prefixes from that file and will return an empty
list if that file does not exist.
See find_untranscribed_wavs function for finding untranscribed prefixes in an
experiment directory.
Returns:
A list of all untranscribed prefixes as specified in the file
"""
untranscribed_prefix_fn = target_directory / "untranscribed_prefixes.txt"
if untranscribed_prefix_fn.exists():
with untranscribed_prefix_fn.open() as f:
prefixes = f.readlines()
return [prefix.strip() for prefix in prefixes]
else:
#logger.warning("Attempting to get untranscribed prefixes but the file ({})"
# " that should specify these does not exist".format(untranscribed_prefix_fn))
pass
return []
|
[
"def",
"get_untranscribed_prefixes_from_file",
"(",
"target_directory",
":",
"Path",
")",
"->",
"List",
"[",
"str",
"]",
":",
"untranscribed_prefix_fn",
"=",
"target_directory",
"/",
"\"untranscribed_prefixes.txt\"",
"if",
"untranscribed_prefix_fn",
".",
"exists",
"(",
")",
":",
"with",
"untranscribed_prefix_fn",
".",
"open",
"(",
")",
"as",
"f",
":",
"prefixes",
"=",
"f",
".",
"readlines",
"(",
")",
"return",
"[",
"prefix",
".",
"strip",
"(",
")",
"for",
"prefix",
"in",
"prefixes",
"]",
"else",
":",
"#logger.warning(\"Attempting to get untranscribed prefixes but the file ({})\"",
"# \" that should specify these does not exist\".format(untranscribed_prefix_fn))",
"pass",
"return",
"[",
"]"
] |
The file "untranscribed_prefixes.txt" will specify prefixes which
do not have an associated transcription file if placed in the target directory.
This will fetch those prefixes from that file and will return an empty
list if that file does not exist.
See find_untranscribed_wavs function for finding untranscribed prefixes in an
experiment directory.
Returns:
A list of all untranscribed prefixes as specified in the file
|
[
"The",
"file",
"untranscribed_prefixes",
".",
"txt",
"will",
"specify",
"prefixes",
"which",
"do",
"not",
"have",
"an",
"associated",
"transcription",
"file",
"if",
"placed",
"in",
"the",
"target",
"directory",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/corpus.py#L69-L94
|
13,839
|
persephone-tools/persephone
|
persephone/corpus.py
|
determine_labels
|
def determine_labels(target_dir: Path, label_type: str) -> Set[str]:
""" Returns a set of all phonemes found in the corpus. Assumes that WAV files and
label files are split into utterances and segregated in a directory which contains a
"wav" subdirectory and "label" subdirectory.
Arguments:
target_dir: A `Path` to the directory where the corpus data is found
label_type: The type of label we are creating the label set from. For example
"phonemes" would only search for labels for that type.
"""
logger.info("Finding phonemes of type %s in directory %s", label_type, target_dir)
label_dir = target_dir / "label/"
if not label_dir.is_dir():
raise FileNotFoundError(
"The directory {} does not exist.".format(target_dir))
phonemes = set() # type: Set[str]
for fn in os.listdir(str(label_dir)):
if fn.endswith(str(label_type)):
with (label_dir / fn).open("r", encoding=ENCODING) as f:
try:
line_phonemes = set(f.readline().split())
except UnicodeDecodeError:
logger.error("Unicode decode error on file %s", fn)
print("Unicode decode error on file {}".format(fn))
raise
phonemes = phonemes.union(line_phonemes)
return phonemes
|
python
|
def determine_labels(target_dir: Path, label_type: str) -> Set[str]:
""" Returns a set of all phonemes found in the corpus. Assumes that WAV files and
label files are split into utterances and segregated in a directory which contains a
"wav" subdirectory and "label" subdirectory.
Arguments:
target_dir: A `Path` to the directory where the corpus data is found
label_type: The type of label we are creating the label set from. For example
"phonemes" would only search for labels for that type.
"""
logger.info("Finding phonemes of type %s in directory %s", label_type, target_dir)
label_dir = target_dir / "label/"
if not label_dir.is_dir():
raise FileNotFoundError(
"The directory {} does not exist.".format(target_dir))
phonemes = set() # type: Set[str]
for fn in os.listdir(str(label_dir)):
if fn.endswith(str(label_type)):
with (label_dir / fn).open("r", encoding=ENCODING) as f:
try:
line_phonemes = set(f.readline().split())
except UnicodeDecodeError:
logger.error("Unicode decode error on file %s", fn)
print("Unicode decode error on file {}".format(fn))
raise
phonemes = phonemes.union(line_phonemes)
return phonemes
|
[
"def",
"determine_labels",
"(",
"target_dir",
":",
"Path",
",",
"label_type",
":",
"str",
")",
"->",
"Set",
"[",
"str",
"]",
":",
"logger",
".",
"info",
"(",
"\"Finding phonemes of type %s in directory %s\"",
",",
"label_type",
",",
"target_dir",
")",
"label_dir",
"=",
"target_dir",
"/",
"\"label/\"",
"if",
"not",
"label_dir",
".",
"is_dir",
"(",
")",
":",
"raise",
"FileNotFoundError",
"(",
"\"The directory {} does not exist.\"",
".",
"format",
"(",
"target_dir",
")",
")",
"phonemes",
"=",
"set",
"(",
")",
"# type: Set[str]",
"for",
"fn",
"in",
"os",
".",
"listdir",
"(",
"str",
"(",
"label_dir",
")",
")",
":",
"if",
"fn",
".",
"endswith",
"(",
"str",
"(",
"label_type",
")",
")",
":",
"with",
"(",
"label_dir",
"/",
"fn",
")",
".",
"open",
"(",
"\"r\"",
",",
"encoding",
"=",
"ENCODING",
")",
"as",
"f",
":",
"try",
":",
"line_phonemes",
"=",
"set",
"(",
"f",
".",
"readline",
"(",
")",
".",
"split",
"(",
")",
")",
"except",
"UnicodeDecodeError",
":",
"logger",
".",
"error",
"(",
"\"Unicode decode error on file %s\"",
",",
"fn",
")",
"print",
"(",
"\"Unicode decode error on file {}\"",
".",
"format",
"(",
"fn",
")",
")",
"raise",
"phonemes",
"=",
"phonemes",
".",
"union",
"(",
"line_phonemes",
")",
"return",
"phonemes"
] |
Returns a set of all phonemes found in the corpus. Assumes that WAV files and
label files are split into utterances and segregated in a directory which contains a
"wav" subdirectory and "label" subdirectory.
Arguments:
target_dir: A `Path` to the directory where the corpus data is found
label_type: The type of label we are creating the label set from. For example
"phonemes" would only search for labels for that type.
|
[
"Returns",
"a",
"set",
"of",
"all",
"phonemes",
"found",
"in",
"the",
"corpus",
".",
"Assumes",
"that",
"WAV",
"files",
"and",
"label",
"files",
"are",
"split",
"into",
"utterances",
"and",
"segregated",
"in",
"a",
"directory",
"which",
"contains",
"a",
"wav",
"subdirectory",
"and",
"label",
"subdirectory",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/corpus.py#L617-L645
|
13,840
|
persephone-tools/persephone
|
persephone/corpus.py
|
Corpus.from_elan
|
def from_elan(cls: Type[CorpusT], org_dir: Path, tgt_dir: Path,
feat_type: str = "fbank", label_type: str = "phonemes",
utterance_filter: Callable[[Utterance], bool] = None,
label_segmenter: Optional[LabelSegmenter] = None,
speakers: List[str] = None, lazy: bool = True,
tier_prefixes: Tuple[str, ...] = ("xv", "rf")) -> CorpusT:
""" Construct a `Corpus` from ELAN files.
Args:
org_dir: A path to the directory containing the unpreprocessed
data.
tgt_dir: A path to the directory where the preprocessed data will
be stored.
feat_type: A string describing the input speech features. For
example, "fbank" for log Mel filterbank features.
label_type: A string describing the transcription labels. For example,
"phonemes" or "tones".
utterance_filter: A function that returns False if an utterance
should not be included in the corpus and True otherwise. This
can be used to remove undesirable utterances for training, such as
codeswitched utterances.
label_segmenter: An object that has an attribute `segment_labels`,
which is creates new `Utterance` instances from old ones,
by segmenting the tokens in their `text` attribute. Note,
`LabelSegmenter` might be better as a function, the only issue
is it needs to carry with it a list of labels. This could
potentially be a function attribute.
speakers: A list of speakers to filter for. If `None`, utterances
from all speakers are included.
tier_prefixes: A collection of strings that prefix ELAN tiers to
filter for. For example, if this is `("xv", "rf")`, then tiers
named "xv", "xv@Mark", "rf@Rose" would be extracted if they
existed.
"""
# This currently bails out if label_segmenter is not provided
if not label_segmenter:
raise ValueError("A label segmenter must be provided via label_segmenter")
# In case path is supplied as a string, make it a Path
if isinstance(tgt_dir, str):
tgt_dir = Path(tgt_dir)
# Read utterances from org_dir.
utterances = elan.utterances_from_dir(org_dir,
tier_prefixes=tier_prefixes)
# Filter utterances based on some criteria (such as codeswitching).
if utterance_filter:
utterances = [utter for utter in utterances if utterance_filter(utter)]
utterances = utterance.remove_duplicates(utterances)
# Segment the labels in the utterances appropriately
if label_segmenter:
utterances = [label_segmenter.segment_labels(utter) for utter in utterances]
# Remove utterances without transcriptions.
utterances = utterance.remove_empty_text(utterances)
# Remove utterances with exceptionally short wav_files that are too
# short for CTC to work.
utterances = utterance.remove_too_short(utterances)
tgt_dir.mkdir(parents=True, exist_ok=True)
# TODO A lot of these methods aren't ELAN-specific. preprocess.elan was
# only used to get the utterances. There could be another Corpus
# factory method that takes Utterance objects. the fromElan and
# fromPangloss constructors could call this.
# Writes the transcriptions to the tgt_dir/label/ dir
utterance.write_transcriptions(utterances, (tgt_dir / "label"),
label_type, lazy=lazy)
# Extracts utterance level WAV information from the input file.
wav.extract_wavs(utterances, (tgt_dir / "wav"), lazy=lazy)
corpus = cls(feat_type, label_type, tgt_dir,
labels=label_segmenter.labels, speakers=speakers)
corpus.utterances = utterances
return corpus
|
python
|
def from_elan(cls: Type[CorpusT], org_dir: Path, tgt_dir: Path,
feat_type: str = "fbank", label_type: str = "phonemes",
utterance_filter: Callable[[Utterance], bool] = None,
label_segmenter: Optional[LabelSegmenter] = None,
speakers: List[str] = None, lazy: bool = True,
tier_prefixes: Tuple[str, ...] = ("xv", "rf")) -> CorpusT:
""" Construct a `Corpus` from ELAN files.
Args:
org_dir: A path to the directory containing the unpreprocessed
data.
tgt_dir: A path to the directory where the preprocessed data will
be stored.
feat_type: A string describing the input speech features. For
example, "fbank" for log Mel filterbank features.
label_type: A string describing the transcription labels. For example,
"phonemes" or "tones".
utterance_filter: A function that returns False if an utterance
should not be included in the corpus and True otherwise. This
can be used to remove undesirable utterances for training, such as
codeswitched utterances.
label_segmenter: An object that has an attribute `segment_labels`,
which is creates new `Utterance` instances from old ones,
by segmenting the tokens in their `text` attribute. Note,
`LabelSegmenter` might be better as a function, the only issue
is it needs to carry with it a list of labels. This could
potentially be a function attribute.
speakers: A list of speakers to filter for. If `None`, utterances
from all speakers are included.
tier_prefixes: A collection of strings that prefix ELAN tiers to
filter for. For example, if this is `("xv", "rf")`, then tiers
named "xv", "xv@Mark", "rf@Rose" would be extracted if they
existed.
"""
# This currently bails out if label_segmenter is not provided
if not label_segmenter:
raise ValueError("A label segmenter must be provided via label_segmenter")
# In case path is supplied as a string, make it a Path
if isinstance(tgt_dir, str):
tgt_dir = Path(tgt_dir)
# Read utterances from org_dir.
utterances = elan.utterances_from_dir(org_dir,
tier_prefixes=tier_prefixes)
# Filter utterances based on some criteria (such as codeswitching).
if utterance_filter:
utterances = [utter for utter in utterances if utterance_filter(utter)]
utterances = utterance.remove_duplicates(utterances)
# Segment the labels in the utterances appropriately
if label_segmenter:
utterances = [label_segmenter.segment_labels(utter) for utter in utterances]
# Remove utterances without transcriptions.
utterances = utterance.remove_empty_text(utterances)
# Remove utterances with exceptionally short wav_files that are too
# short for CTC to work.
utterances = utterance.remove_too_short(utterances)
tgt_dir.mkdir(parents=True, exist_ok=True)
# TODO A lot of these methods aren't ELAN-specific. preprocess.elan was
# only used to get the utterances. There could be another Corpus
# factory method that takes Utterance objects. the fromElan and
# fromPangloss constructors could call this.
# Writes the transcriptions to the tgt_dir/label/ dir
utterance.write_transcriptions(utterances, (tgt_dir / "label"),
label_type, lazy=lazy)
# Extracts utterance level WAV information from the input file.
wav.extract_wavs(utterances, (tgt_dir / "wav"), lazy=lazy)
corpus = cls(feat_type, label_type, tgt_dir,
labels=label_segmenter.labels, speakers=speakers)
corpus.utterances = utterances
return corpus
|
[
"def",
"from_elan",
"(",
"cls",
":",
"Type",
"[",
"CorpusT",
"]",
",",
"org_dir",
":",
"Path",
",",
"tgt_dir",
":",
"Path",
",",
"feat_type",
":",
"str",
"=",
"\"fbank\"",
",",
"label_type",
":",
"str",
"=",
"\"phonemes\"",
",",
"utterance_filter",
":",
"Callable",
"[",
"[",
"Utterance",
"]",
",",
"bool",
"]",
"=",
"None",
",",
"label_segmenter",
":",
"Optional",
"[",
"LabelSegmenter",
"]",
"=",
"None",
",",
"speakers",
":",
"List",
"[",
"str",
"]",
"=",
"None",
",",
"lazy",
":",
"bool",
"=",
"True",
",",
"tier_prefixes",
":",
"Tuple",
"[",
"str",
",",
"...",
"]",
"=",
"(",
"\"xv\"",
",",
"\"rf\"",
")",
")",
"->",
"CorpusT",
":",
"# This currently bails out if label_segmenter is not provided",
"if",
"not",
"label_segmenter",
":",
"raise",
"ValueError",
"(",
"\"A label segmenter must be provided via label_segmenter\"",
")",
"# In case path is supplied as a string, make it a Path",
"if",
"isinstance",
"(",
"tgt_dir",
",",
"str",
")",
":",
"tgt_dir",
"=",
"Path",
"(",
"tgt_dir",
")",
"# Read utterances from org_dir.",
"utterances",
"=",
"elan",
".",
"utterances_from_dir",
"(",
"org_dir",
",",
"tier_prefixes",
"=",
"tier_prefixes",
")",
"# Filter utterances based on some criteria (such as codeswitching).",
"if",
"utterance_filter",
":",
"utterances",
"=",
"[",
"utter",
"for",
"utter",
"in",
"utterances",
"if",
"utterance_filter",
"(",
"utter",
")",
"]",
"utterances",
"=",
"utterance",
".",
"remove_duplicates",
"(",
"utterances",
")",
"# Segment the labels in the utterances appropriately",
"if",
"label_segmenter",
":",
"utterances",
"=",
"[",
"label_segmenter",
".",
"segment_labels",
"(",
"utter",
")",
"for",
"utter",
"in",
"utterances",
"]",
"# Remove utterances without transcriptions.",
"utterances",
"=",
"utterance",
".",
"remove_empty_text",
"(",
"utterances",
")",
"# Remove utterances with exceptionally short wav_files that are too",
"# short for CTC to work.",
"utterances",
"=",
"utterance",
".",
"remove_too_short",
"(",
"utterances",
")",
"tgt_dir",
".",
"mkdir",
"(",
"parents",
"=",
"True",
",",
"exist_ok",
"=",
"True",
")",
"# TODO A lot of these methods aren't ELAN-specific. preprocess.elan was",
"# only used to get the utterances. There could be another Corpus",
"# factory method that takes Utterance objects. the fromElan and",
"# fromPangloss constructors could call this.",
"# Writes the transcriptions to the tgt_dir/label/ dir",
"utterance",
".",
"write_transcriptions",
"(",
"utterances",
",",
"(",
"tgt_dir",
"/",
"\"label\"",
")",
",",
"label_type",
",",
"lazy",
"=",
"lazy",
")",
"# Extracts utterance level WAV information from the input file.",
"wav",
".",
"extract_wavs",
"(",
"utterances",
",",
"(",
"tgt_dir",
"/",
"\"wav\"",
")",
",",
"lazy",
"=",
"lazy",
")",
"corpus",
"=",
"cls",
"(",
"feat_type",
",",
"label_type",
",",
"tgt_dir",
",",
"labels",
"=",
"label_segmenter",
".",
"labels",
",",
"speakers",
"=",
"speakers",
")",
"corpus",
".",
"utterances",
"=",
"utterances",
"return",
"corpus"
] |
Construct a `Corpus` from ELAN files.
Args:
org_dir: A path to the directory containing the unpreprocessed
data.
tgt_dir: A path to the directory where the preprocessed data will
be stored.
feat_type: A string describing the input speech features. For
example, "fbank" for log Mel filterbank features.
label_type: A string describing the transcription labels. For example,
"phonemes" or "tones".
utterance_filter: A function that returns False if an utterance
should not be included in the corpus and True otherwise. This
can be used to remove undesirable utterances for training, such as
codeswitched utterances.
label_segmenter: An object that has an attribute `segment_labels`,
which is creates new `Utterance` instances from old ones,
by segmenting the tokens in their `text` attribute. Note,
`LabelSegmenter` might be better as a function, the only issue
is it needs to carry with it a list of labels. This could
potentially be a function attribute.
speakers: A list of speakers to filter for. If `None`, utterances
from all speakers are included.
tier_prefixes: A collection of strings that prefix ELAN tiers to
filter for. For example, if this is `("xv", "rf")`, then tiers
named "xv", "xv@Mark", "rf@Rose" would be extracted if they
existed.
|
[
"Construct",
"a",
"Corpus",
"from",
"ELAN",
"files",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/corpus.py#L236-L315
|
13,841
|
persephone-tools/persephone
|
persephone/corpus.py
|
Corpus.set_and_check_directories
|
def set_and_check_directories(self, tgt_dir: Path) -> None:
"""
Make sure that the required directories exist in the target directory.
set variables accordingly.
"""
logger.info("Setting up directories for corpus in %s", tgt_dir)
# Check directories exist.
if not tgt_dir.is_dir():
raise FileNotFoundError(
"The directory {} does not exist.".format(tgt_dir))
if not self.wav_dir.is_dir():
raise PersephoneException(
"The supplied path requires a 'wav' subdirectory.")
self.feat_dir.mkdir(parents=True, exist_ok=True)
if not self.label_dir.is_dir():
raise PersephoneException(
"The supplied path requires a 'label' subdirectory.")
|
python
|
def set_and_check_directories(self, tgt_dir: Path) -> None:
"""
Make sure that the required directories exist in the target directory.
set variables accordingly.
"""
logger.info("Setting up directories for corpus in %s", tgt_dir)
# Check directories exist.
if not tgt_dir.is_dir():
raise FileNotFoundError(
"The directory {} does not exist.".format(tgt_dir))
if not self.wav_dir.is_dir():
raise PersephoneException(
"The supplied path requires a 'wav' subdirectory.")
self.feat_dir.mkdir(parents=True, exist_ok=True)
if not self.label_dir.is_dir():
raise PersephoneException(
"The supplied path requires a 'label' subdirectory.")
|
[
"def",
"set_and_check_directories",
"(",
"self",
",",
"tgt_dir",
":",
"Path",
")",
"->",
"None",
":",
"logger",
".",
"info",
"(",
"\"Setting up directories for corpus in %s\"",
",",
"tgt_dir",
")",
"# Check directories exist.",
"if",
"not",
"tgt_dir",
".",
"is_dir",
"(",
")",
":",
"raise",
"FileNotFoundError",
"(",
"\"The directory {} does not exist.\"",
".",
"format",
"(",
"tgt_dir",
")",
")",
"if",
"not",
"self",
".",
"wav_dir",
".",
"is_dir",
"(",
")",
":",
"raise",
"PersephoneException",
"(",
"\"The supplied path requires a 'wav' subdirectory.\"",
")",
"self",
".",
"feat_dir",
".",
"mkdir",
"(",
"parents",
"=",
"True",
",",
"exist_ok",
"=",
"True",
")",
"if",
"not",
"self",
".",
"label_dir",
".",
"is_dir",
"(",
")",
":",
"raise",
"PersephoneException",
"(",
"\"The supplied path requires a 'label' subdirectory.\"",
")"
] |
Make sure that the required directories exist in the target directory.
set variables accordingly.
|
[
"Make",
"sure",
"that",
"the",
"required",
"directories",
"exist",
"in",
"the",
"target",
"directory",
".",
"set",
"variables",
"accordingly",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/corpus.py#L338-L355
|
13,842
|
persephone-tools/persephone
|
persephone/corpus.py
|
Corpus.initialize_labels
|
def initialize_labels(self, labels: Set[str]) -> Tuple[dict, dict]:
"""Create mappings from label to index and index to label"""
logger.debug("Creating mappings for labels")
label_to_index = {label: index for index, label in enumerate(
["pad"] + sorted(list(labels)))}
index_to_label = {index: phn for index, phn in enumerate(
["pad"] + sorted(list(labels)))}
return label_to_index, index_to_label
|
python
|
def initialize_labels(self, labels: Set[str]) -> Tuple[dict, dict]:
"""Create mappings from label to index and index to label"""
logger.debug("Creating mappings for labels")
label_to_index = {label: index for index, label in enumerate(
["pad"] + sorted(list(labels)))}
index_to_label = {index: phn for index, phn in enumerate(
["pad"] + sorted(list(labels)))}
return label_to_index, index_to_label
|
[
"def",
"initialize_labels",
"(",
"self",
",",
"labels",
":",
"Set",
"[",
"str",
"]",
")",
"->",
"Tuple",
"[",
"dict",
",",
"dict",
"]",
":",
"logger",
".",
"debug",
"(",
"\"Creating mappings for labels\"",
")",
"label_to_index",
"=",
"{",
"label",
":",
"index",
"for",
"index",
",",
"label",
"in",
"enumerate",
"(",
"[",
"\"pad\"",
"]",
"+",
"sorted",
"(",
"list",
"(",
"labels",
")",
")",
")",
"}",
"index_to_label",
"=",
"{",
"index",
":",
"phn",
"for",
"index",
",",
"phn",
"in",
"enumerate",
"(",
"[",
"\"pad\"",
"]",
"+",
"sorted",
"(",
"list",
"(",
"labels",
")",
")",
")",
"}",
"return",
"label_to_index",
",",
"index_to_label"
] |
Create mappings from label to index and index to label
|
[
"Create",
"mappings",
"from",
"label",
"to",
"index",
"and",
"index",
"to",
"label"
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/corpus.py#L357-L366
|
13,843
|
persephone-tools/persephone
|
persephone/corpus.py
|
Corpus.prepare_feats
|
def prepare_feats(self) -> None:
""" Prepares input features"""
logger.debug("Preparing input features")
self.feat_dir.mkdir(parents=True, exist_ok=True)
should_extract_feats = False
for path in self.wav_dir.iterdir():
if not path.suffix == ".wav":
logger.info("Non wav file found in wav directory: %s", path)
continue
prefix = os.path.basename(os.path.splitext(str(path))[0])
mono16k_wav_path = self.feat_dir / "{}.wav".format(prefix)
feat_path = self.feat_dir / "{}.{}.npy".format(prefix, self.feat_type)
if not feat_path.is_file():
# Then we should extract feats
should_extract_feats = True
if not mono16k_wav_path.is_file():
feat_extract.convert_wav(path, mono16k_wav_path)
# TODO Should be extracting feats on a per-file basis. Right now we
# check if any feats files don't exist and then do all the feature
# extraction.
if should_extract_feats:
feat_extract.from_dir(self.feat_dir, self.feat_type)
|
python
|
def prepare_feats(self) -> None:
""" Prepares input features"""
logger.debug("Preparing input features")
self.feat_dir.mkdir(parents=True, exist_ok=True)
should_extract_feats = False
for path in self.wav_dir.iterdir():
if not path.suffix == ".wav":
logger.info("Non wav file found in wav directory: %s", path)
continue
prefix = os.path.basename(os.path.splitext(str(path))[0])
mono16k_wav_path = self.feat_dir / "{}.wav".format(prefix)
feat_path = self.feat_dir / "{}.{}.npy".format(prefix, self.feat_type)
if not feat_path.is_file():
# Then we should extract feats
should_extract_feats = True
if not mono16k_wav_path.is_file():
feat_extract.convert_wav(path, mono16k_wav_path)
# TODO Should be extracting feats on a per-file basis. Right now we
# check if any feats files don't exist and then do all the feature
# extraction.
if should_extract_feats:
feat_extract.from_dir(self.feat_dir, self.feat_type)
|
[
"def",
"prepare_feats",
"(",
"self",
")",
"->",
"None",
":",
"logger",
".",
"debug",
"(",
"\"Preparing input features\"",
")",
"self",
".",
"feat_dir",
".",
"mkdir",
"(",
"parents",
"=",
"True",
",",
"exist_ok",
"=",
"True",
")",
"should_extract_feats",
"=",
"False",
"for",
"path",
"in",
"self",
".",
"wav_dir",
".",
"iterdir",
"(",
")",
":",
"if",
"not",
"path",
".",
"suffix",
"==",
"\".wav\"",
":",
"logger",
".",
"info",
"(",
"\"Non wav file found in wav directory: %s\"",
",",
"path",
")",
"continue",
"prefix",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"os",
".",
"path",
".",
"splitext",
"(",
"str",
"(",
"path",
")",
")",
"[",
"0",
"]",
")",
"mono16k_wav_path",
"=",
"self",
".",
"feat_dir",
"/",
"\"{}.wav\"",
".",
"format",
"(",
"prefix",
")",
"feat_path",
"=",
"self",
".",
"feat_dir",
"/",
"\"{}.{}.npy\"",
".",
"format",
"(",
"prefix",
",",
"self",
".",
"feat_type",
")",
"if",
"not",
"feat_path",
".",
"is_file",
"(",
")",
":",
"# Then we should extract feats",
"should_extract_feats",
"=",
"True",
"if",
"not",
"mono16k_wav_path",
".",
"is_file",
"(",
")",
":",
"feat_extract",
".",
"convert_wav",
"(",
"path",
",",
"mono16k_wav_path",
")",
"# TODO Should be extracting feats on a per-file basis. Right now we",
"# check if any feats files don't exist and then do all the feature",
"# extraction.",
"if",
"should_extract_feats",
":",
"feat_extract",
".",
"from_dir",
"(",
"self",
".",
"feat_dir",
",",
"self",
".",
"feat_type",
")"
] |
Prepares input features
|
[
"Prepares",
"input",
"features"
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/corpus.py#L368-L392
|
13,844
|
persephone-tools/persephone
|
persephone/corpus.py
|
Corpus.make_data_splits
|
def make_data_splits(self, max_samples: int) -> None:
""" Splits the utterances into training, validation and test sets."""
train_f_exists = self.train_prefix_fn.is_file()
valid_f_exists = self.valid_prefix_fn.is_file()
test_f_exists = self.test_prefix_fn.is_file()
if train_f_exists and valid_f_exists and test_f_exists:
logger.debug("Split for training, validation and tests specified by files")
self.train_prefixes = self.read_prefixes(self.train_prefix_fn)
self.valid_prefixes = self.read_prefixes(self.valid_prefix_fn)
self.test_prefixes = self.read_prefixes(self.test_prefix_fn)
return
# Otherwise we now need to load prefixes for other cases addressed
# below
prefixes = self.determine_prefixes()
prefixes = utils.filter_by_size(
self.feat_dir, prefixes, self.feat_type, max_samples)
if not train_f_exists and not valid_f_exists and not test_f_exists:
logger.debug("No files supplied to define the split for training, validation"
" and tests. Using default.")
train_prefixes, valid_prefixes, test_prefixes = self.divide_prefixes(prefixes)
self.train_prefixes = train_prefixes
self.valid_prefixes = valid_prefixes
self.test_prefixes = test_prefixes
self.write_prefixes(train_prefixes, self.train_prefix_fn)
self.write_prefixes(valid_prefixes, self.valid_prefix_fn)
self.write_prefixes(test_prefixes, self.test_prefix_fn)
elif not train_f_exists and valid_f_exists and test_f_exists:
# Then we just make all other prefixes training prefixes.
self.valid_prefixes = self.read_prefixes(self.valid_prefix_fn)
self.test_prefixes = self.read_prefixes(self.test_prefix_fn)
train_prefixes = list(
set(prefixes) - set(self.valid_prefixes))
self.train_prefixes = list(
set(train_prefixes) - set(self.test_prefixes))
self.write_prefixes(self.train_prefixes, self.train_prefix_fn)
else:
raise NotImplementedError(
"The following case has not been implemented:" +
"{} exists - {}\n".format(self.train_prefix_fn, train_f_exists) +
"{} exists - {}\n".format(self.valid_prefix_fn, valid_f_exists) +
"{} exists - {}\n".format(self.test_prefix_fn, test_f_exists))
|
python
|
def make_data_splits(self, max_samples: int) -> None:
""" Splits the utterances into training, validation and test sets."""
train_f_exists = self.train_prefix_fn.is_file()
valid_f_exists = self.valid_prefix_fn.is_file()
test_f_exists = self.test_prefix_fn.is_file()
if train_f_exists and valid_f_exists and test_f_exists:
logger.debug("Split for training, validation and tests specified by files")
self.train_prefixes = self.read_prefixes(self.train_prefix_fn)
self.valid_prefixes = self.read_prefixes(self.valid_prefix_fn)
self.test_prefixes = self.read_prefixes(self.test_prefix_fn)
return
# Otherwise we now need to load prefixes for other cases addressed
# below
prefixes = self.determine_prefixes()
prefixes = utils.filter_by_size(
self.feat_dir, prefixes, self.feat_type, max_samples)
if not train_f_exists and not valid_f_exists and not test_f_exists:
logger.debug("No files supplied to define the split for training, validation"
" and tests. Using default.")
train_prefixes, valid_prefixes, test_prefixes = self.divide_prefixes(prefixes)
self.train_prefixes = train_prefixes
self.valid_prefixes = valid_prefixes
self.test_prefixes = test_prefixes
self.write_prefixes(train_prefixes, self.train_prefix_fn)
self.write_prefixes(valid_prefixes, self.valid_prefix_fn)
self.write_prefixes(test_prefixes, self.test_prefix_fn)
elif not train_f_exists and valid_f_exists and test_f_exists:
# Then we just make all other prefixes training prefixes.
self.valid_prefixes = self.read_prefixes(self.valid_prefix_fn)
self.test_prefixes = self.read_prefixes(self.test_prefix_fn)
train_prefixes = list(
set(prefixes) - set(self.valid_prefixes))
self.train_prefixes = list(
set(train_prefixes) - set(self.test_prefixes))
self.write_prefixes(self.train_prefixes, self.train_prefix_fn)
else:
raise NotImplementedError(
"The following case has not been implemented:" +
"{} exists - {}\n".format(self.train_prefix_fn, train_f_exists) +
"{} exists - {}\n".format(self.valid_prefix_fn, valid_f_exists) +
"{} exists - {}\n".format(self.test_prefix_fn, test_f_exists))
|
[
"def",
"make_data_splits",
"(",
"self",
",",
"max_samples",
":",
"int",
")",
"->",
"None",
":",
"train_f_exists",
"=",
"self",
".",
"train_prefix_fn",
".",
"is_file",
"(",
")",
"valid_f_exists",
"=",
"self",
".",
"valid_prefix_fn",
".",
"is_file",
"(",
")",
"test_f_exists",
"=",
"self",
".",
"test_prefix_fn",
".",
"is_file",
"(",
")",
"if",
"train_f_exists",
"and",
"valid_f_exists",
"and",
"test_f_exists",
":",
"logger",
".",
"debug",
"(",
"\"Split for training, validation and tests specified by files\"",
")",
"self",
".",
"train_prefixes",
"=",
"self",
".",
"read_prefixes",
"(",
"self",
".",
"train_prefix_fn",
")",
"self",
".",
"valid_prefixes",
"=",
"self",
".",
"read_prefixes",
"(",
"self",
".",
"valid_prefix_fn",
")",
"self",
".",
"test_prefixes",
"=",
"self",
".",
"read_prefixes",
"(",
"self",
".",
"test_prefix_fn",
")",
"return",
"# Otherwise we now need to load prefixes for other cases addressed",
"# below",
"prefixes",
"=",
"self",
".",
"determine_prefixes",
"(",
")",
"prefixes",
"=",
"utils",
".",
"filter_by_size",
"(",
"self",
".",
"feat_dir",
",",
"prefixes",
",",
"self",
".",
"feat_type",
",",
"max_samples",
")",
"if",
"not",
"train_f_exists",
"and",
"not",
"valid_f_exists",
"and",
"not",
"test_f_exists",
":",
"logger",
".",
"debug",
"(",
"\"No files supplied to define the split for training, validation\"",
"\" and tests. Using default.\"",
")",
"train_prefixes",
",",
"valid_prefixes",
",",
"test_prefixes",
"=",
"self",
".",
"divide_prefixes",
"(",
"prefixes",
")",
"self",
".",
"train_prefixes",
"=",
"train_prefixes",
"self",
".",
"valid_prefixes",
"=",
"valid_prefixes",
"self",
".",
"test_prefixes",
"=",
"test_prefixes",
"self",
".",
"write_prefixes",
"(",
"train_prefixes",
",",
"self",
".",
"train_prefix_fn",
")",
"self",
".",
"write_prefixes",
"(",
"valid_prefixes",
",",
"self",
".",
"valid_prefix_fn",
")",
"self",
".",
"write_prefixes",
"(",
"test_prefixes",
",",
"self",
".",
"test_prefix_fn",
")",
"elif",
"not",
"train_f_exists",
"and",
"valid_f_exists",
"and",
"test_f_exists",
":",
"# Then we just make all other prefixes training prefixes.",
"self",
".",
"valid_prefixes",
"=",
"self",
".",
"read_prefixes",
"(",
"self",
".",
"valid_prefix_fn",
")",
"self",
".",
"test_prefixes",
"=",
"self",
".",
"read_prefixes",
"(",
"self",
".",
"test_prefix_fn",
")",
"train_prefixes",
"=",
"list",
"(",
"set",
"(",
"prefixes",
")",
"-",
"set",
"(",
"self",
".",
"valid_prefixes",
")",
")",
"self",
".",
"train_prefixes",
"=",
"list",
"(",
"set",
"(",
"train_prefixes",
")",
"-",
"set",
"(",
"self",
".",
"test_prefixes",
")",
")",
"self",
".",
"write_prefixes",
"(",
"self",
".",
"train_prefixes",
",",
"self",
".",
"train_prefix_fn",
")",
"else",
":",
"raise",
"NotImplementedError",
"(",
"\"The following case has not been implemented:\"",
"+",
"\"{} exists - {}\\n\"",
".",
"format",
"(",
"self",
".",
"train_prefix_fn",
",",
"train_f_exists",
")",
"+",
"\"{} exists - {}\\n\"",
".",
"format",
"(",
"self",
".",
"valid_prefix_fn",
",",
"valid_f_exists",
")",
"+",
"\"{} exists - {}\\n\"",
".",
"format",
"(",
"self",
".",
"test_prefix_fn",
",",
"test_f_exists",
")",
")"
] |
Splits the utterances into training, validation and test sets.
|
[
"Splits",
"the",
"utterances",
"into",
"training",
"validation",
"and",
"test",
"sets",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/corpus.py#L394-L438
|
13,845
|
persephone-tools/persephone
|
persephone/corpus.py
|
Corpus.divide_prefixes
|
def divide_prefixes(prefixes: List[str], seed:int=0) -> Tuple[List[str], List[str], List[str]]:
"""Divide data into training, validation and test subsets"""
if len(prefixes) < 3:
raise PersephoneException(
"{} cannot be split into 3 groups as it only has {} items".format(prefixes, len(prefixes))
)
Ratios = namedtuple("Ratios", ["train", "valid", "test"])
ratios=Ratios(.90, .05, .05)
train_end = int(ratios.train*len(prefixes))
valid_end = int(train_end + ratios.valid*len(prefixes))
# We must make sure that at least one element exists in test
if valid_end == len(prefixes):
valid_end -= 1
# If train_end and valid_end are the same we end up with no valid_prefixes
# so we must ensure at least one prefix is placed in this category
if train_end == valid_end:
train_end -= 1
random.seed(seed)
random.shuffle(prefixes)
train_prefixes = prefixes[:train_end]
valid_prefixes = prefixes[train_end:valid_end]
test_prefixes = prefixes[valid_end:]
assert train_prefixes, "Got empty set for training data"
assert valid_prefixes, "Got empty set for validation data"
assert test_prefixes, "Got empty set for testing data"
return train_prefixes, valid_prefixes, test_prefixes
|
python
|
def divide_prefixes(prefixes: List[str], seed:int=0) -> Tuple[List[str], List[str], List[str]]:
"""Divide data into training, validation and test subsets"""
if len(prefixes) < 3:
raise PersephoneException(
"{} cannot be split into 3 groups as it only has {} items".format(prefixes, len(prefixes))
)
Ratios = namedtuple("Ratios", ["train", "valid", "test"])
ratios=Ratios(.90, .05, .05)
train_end = int(ratios.train*len(prefixes))
valid_end = int(train_end + ratios.valid*len(prefixes))
# We must make sure that at least one element exists in test
if valid_end == len(prefixes):
valid_end -= 1
# If train_end and valid_end are the same we end up with no valid_prefixes
# so we must ensure at least one prefix is placed in this category
if train_end == valid_end:
train_end -= 1
random.seed(seed)
random.shuffle(prefixes)
train_prefixes = prefixes[:train_end]
valid_prefixes = prefixes[train_end:valid_end]
test_prefixes = prefixes[valid_end:]
assert train_prefixes, "Got empty set for training data"
assert valid_prefixes, "Got empty set for validation data"
assert test_prefixes, "Got empty set for testing data"
return train_prefixes, valid_prefixes, test_prefixes
|
[
"def",
"divide_prefixes",
"(",
"prefixes",
":",
"List",
"[",
"str",
"]",
",",
"seed",
":",
"int",
"=",
"0",
")",
"->",
"Tuple",
"[",
"List",
"[",
"str",
"]",
",",
"List",
"[",
"str",
"]",
",",
"List",
"[",
"str",
"]",
"]",
":",
"if",
"len",
"(",
"prefixes",
")",
"<",
"3",
":",
"raise",
"PersephoneException",
"(",
"\"{} cannot be split into 3 groups as it only has {} items\"",
".",
"format",
"(",
"prefixes",
",",
"len",
"(",
"prefixes",
")",
")",
")",
"Ratios",
"=",
"namedtuple",
"(",
"\"Ratios\"",
",",
"[",
"\"train\"",
",",
"\"valid\"",
",",
"\"test\"",
"]",
")",
"ratios",
"=",
"Ratios",
"(",
".90",
",",
".05",
",",
".05",
")",
"train_end",
"=",
"int",
"(",
"ratios",
".",
"train",
"*",
"len",
"(",
"prefixes",
")",
")",
"valid_end",
"=",
"int",
"(",
"train_end",
"+",
"ratios",
".",
"valid",
"*",
"len",
"(",
"prefixes",
")",
")",
"# We must make sure that at least one element exists in test",
"if",
"valid_end",
"==",
"len",
"(",
"prefixes",
")",
":",
"valid_end",
"-=",
"1",
"# If train_end and valid_end are the same we end up with no valid_prefixes",
"# so we must ensure at least one prefix is placed in this category",
"if",
"train_end",
"==",
"valid_end",
":",
"train_end",
"-=",
"1",
"random",
".",
"seed",
"(",
"seed",
")",
"random",
".",
"shuffle",
"(",
"prefixes",
")",
"train_prefixes",
"=",
"prefixes",
"[",
":",
"train_end",
"]",
"valid_prefixes",
"=",
"prefixes",
"[",
"train_end",
":",
"valid_end",
"]",
"test_prefixes",
"=",
"prefixes",
"[",
"valid_end",
":",
"]",
"assert",
"train_prefixes",
",",
"\"Got empty set for training data\"",
"assert",
"valid_prefixes",
",",
"\"Got empty set for validation data\"",
"assert",
"test_prefixes",
",",
"\"Got empty set for testing data\"",
"return",
"train_prefixes",
",",
"valid_prefixes",
",",
"test_prefixes"
] |
Divide data into training, validation and test subsets
|
[
"Divide",
"data",
"into",
"training",
"validation",
"and",
"test",
"subsets"
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/corpus.py#L464-L495
|
13,846
|
persephone-tools/persephone
|
persephone/corpus.py
|
Corpus.indices_to_labels
|
def indices_to_labels(self, indices: Sequence[int]) -> List[str]:
""" Converts a sequence of indices into their corresponding labels."""
return [(self.INDEX_TO_LABEL[index]) for index in indices]
|
python
|
def indices_to_labels(self, indices: Sequence[int]) -> List[str]:
""" Converts a sequence of indices into their corresponding labels."""
return [(self.INDEX_TO_LABEL[index]) for index in indices]
|
[
"def",
"indices_to_labels",
"(",
"self",
",",
"indices",
":",
"Sequence",
"[",
"int",
"]",
")",
"->",
"List",
"[",
"str",
"]",
":",
"return",
"[",
"(",
"self",
".",
"INDEX_TO_LABEL",
"[",
"index",
"]",
")",
"for",
"index",
"in",
"indices",
"]"
] |
Converts a sequence of indices into their corresponding labels.
|
[
"Converts",
"a",
"sequence",
"of",
"indices",
"into",
"their",
"corresponding",
"labels",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/corpus.py#L497-L500
|
13,847
|
persephone-tools/persephone
|
persephone/corpus.py
|
Corpus.labels_to_indices
|
def labels_to_indices(self, labels: Sequence[str]) -> List[int]:
""" Converts a sequence of labels into their corresponding indices."""
return [self.LABEL_TO_INDEX[label] for label in labels]
|
python
|
def labels_to_indices(self, labels: Sequence[str]) -> List[int]:
""" Converts a sequence of labels into their corresponding indices."""
return [self.LABEL_TO_INDEX[label] for label in labels]
|
[
"def",
"labels_to_indices",
"(",
"self",
",",
"labels",
":",
"Sequence",
"[",
"str",
"]",
")",
"->",
"List",
"[",
"int",
"]",
":",
"return",
"[",
"self",
".",
"LABEL_TO_INDEX",
"[",
"label",
"]",
"for",
"label",
"in",
"labels",
"]"
] |
Converts a sequence of labels into their corresponding indices.
|
[
"Converts",
"a",
"sequence",
"of",
"labels",
"into",
"their",
"corresponding",
"indices",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/corpus.py#L502-L505
|
13,848
|
persephone-tools/persephone
|
persephone/corpus.py
|
Corpus.num_feats
|
def num_feats(self):
""" The number of features per time step in the corpus. """
if not self._num_feats:
filename = self.get_train_fns()[0][0]
feats = np.load(filename)
# pylint: disable=maybe-no-member
if len(feats.shape) == 3:
# Then there are multiple channels of multiple feats
self._num_feats = feats.shape[1] * feats.shape[2]
elif len(feats.shape) == 2:
# Otherwise it is just of shape time x feats
self._num_feats = feats.shape[1]
else:
raise ValueError(
"Feature matrix of shape %s unexpected" % str(feats.shape))
return self._num_feats
|
python
|
def num_feats(self):
""" The number of features per time step in the corpus. """
if not self._num_feats:
filename = self.get_train_fns()[0][0]
feats = np.load(filename)
# pylint: disable=maybe-no-member
if len(feats.shape) == 3:
# Then there are multiple channels of multiple feats
self._num_feats = feats.shape[1] * feats.shape[2]
elif len(feats.shape) == 2:
# Otherwise it is just of shape time x feats
self._num_feats = feats.shape[1]
else:
raise ValueError(
"Feature matrix of shape %s unexpected" % str(feats.shape))
return self._num_feats
|
[
"def",
"num_feats",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_num_feats",
":",
"filename",
"=",
"self",
".",
"get_train_fns",
"(",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"feats",
"=",
"np",
".",
"load",
"(",
"filename",
")",
"# pylint: disable=maybe-no-member",
"if",
"len",
"(",
"feats",
".",
"shape",
")",
"==",
"3",
":",
"# Then there are multiple channels of multiple feats",
"self",
".",
"_num_feats",
"=",
"feats",
".",
"shape",
"[",
"1",
"]",
"*",
"feats",
".",
"shape",
"[",
"2",
"]",
"elif",
"len",
"(",
"feats",
".",
"shape",
")",
"==",
"2",
":",
"# Otherwise it is just of shape time x feats",
"self",
".",
"_num_feats",
"=",
"feats",
".",
"shape",
"[",
"1",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"\"Feature matrix of shape %s unexpected\"",
"%",
"str",
"(",
"feats",
".",
"shape",
")",
")",
"return",
"self",
".",
"_num_feats"
] |
The number of features per time step in the corpus.
|
[
"The",
"number",
"of",
"features",
"per",
"time",
"step",
"in",
"the",
"corpus",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/corpus.py#L508-L523
|
13,849
|
persephone-tools/persephone
|
persephone/corpus.py
|
Corpus.prefixes_to_fns
|
def prefixes_to_fns(self, prefixes: List[str]) -> Tuple[List[str], List[str]]:
""" Fetches the file paths to the features files and labels files
corresponding to the provided list of features"""
# TODO Return pathlib.Paths
feat_fns = [str(self.feat_dir / ("%s.%s.npy" % (prefix, self.feat_type)))
for prefix in prefixes]
label_fns = [str(self.label_dir / ("%s.%s" % (prefix, self.label_type)))
for prefix in prefixes]
return feat_fns, label_fns
|
python
|
def prefixes_to_fns(self, prefixes: List[str]) -> Tuple[List[str], List[str]]:
""" Fetches the file paths to the features files and labels files
corresponding to the provided list of features"""
# TODO Return pathlib.Paths
feat_fns = [str(self.feat_dir / ("%s.%s.npy" % (prefix, self.feat_type)))
for prefix in prefixes]
label_fns = [str(self.label_dir / ("%s.%s" % (prefix, self.label_type)))
for prefix in prefixes]
return feat_fns, label_fns
|
[
"def",
"prefixes_to_fns",
"(",
"self",
",",
"prefixes",
":",
"List",
"[",
"str",
"]",
")",
"->",
"Tuple",
"[",
"List",
"[",
"str",
"]",
",",
"List",
"[",
"str",
"]",
"]",
":",
"# TODO Return pathlib.Paths",
"feat_fns",
"=",
"[",
"str",
"(",
"self",
".",
"feat_dir",
"/",
"(",
"\"%s.%s.npy\"",
"%",
"(",
"prefix",
",",
"self",
".",
"feat_type",
")",
")",
")",
"for",
"prefix",
"in",
"prefixes",
"]",
"label_fns",
"=",
"[",
"str",
"(",
"self",
".",
"label_dir",
"/",
"(",
"\"%s.%s\"",
"%",
"(",
"prefix",
",",
"self",
".",
"label_type",
")",
")",
")",
"for",
"prefix",
"in",
"prefixes",
"]",
"return",
"feat_fns",
",",
"label_fns"
] |
Fetches the file paths to the features files and labels files
corresponding to the provided list of features
|
[
"Fetches",
"the",
"file",
"paths",
"to",
"the",
"features",
"files",
"and",
"labels",
"files",
"corresponding",
"to",
"the",
"provided",
"list",
"of",
"features"
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/corpus.py#L525-L533
|
13,850
|
persephone-tools/persephone
|
persephone/corpus.py
|
Corpus.get_train_fns
|
def get_train_fns(self) -> Tuple[List[str], List[str]]:
""" Fetches the training set of the corpus.
Outputs a Tuple of size 2, where the first element is a list of paths
to input features files, one per utterance. The second element is a list
of paths to the transcriptions.
"""
return self.prefixes_to_fns(self.train_prefixes)
|
python
|
def get_train_fns(self) -> Tuple[List[str], List[str]]:
""" Fetches the training set of the corpus.
Outputs a Tuple of size 2, where the first element is a list of paths
to input features files, one per utterance. The second element is a list
of paths to the transcriptions.
"""
return self.prefixes_to_fns(self.train_prefixes)
|
[
"def",
"get_train_fns",
"(",
"self",
")",
"->",
"Tuple",
"[",
"List",
"[",
"str",
"]",
",",
"List",
"[",
"str",
"]",
"]",
":",
"return",
"self",
".",
"prefixes_to_fns",
"(",
"self",
".",
"train_prefixes",
")"
] |
Fetches the training set of the corpus.
Outputs a Tuple of size 2, where the first element is a list of paths
to input features files, one per utterance. The second element is a list
of paths to the transcriptions.
|
[
"Fetches",
"the",
"training",
"set",
"of",
"the",
"corpus",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/corpus.py#L535-L542
|
13,851
|
persephone-tools/persephone
|
persephone/corpus.py
|
Corpus.get_valid_fns
|
def get_valid_fns(self) -> Tuple[List[str], List[str]]:
""" Fetches the validation set of the corpus."""
return self.prefixes_to_fns(self.valid_prefixes)
|
python
|
def get_valid_fns(self) -> Tuple[List[str], List[str]]:
""" Fetches the validation set of the corpus."""
return self.prefixes_to_fns(self.valid_prefixes)
|
[
"def",
"get_valid_fns",
"(",
"self",
")",
"->",
"Tuple",
"[",
"List",
"[",
"str",
"]",
",",
"List",
"[",
"str",
"]",
"]",
":",
"return",
"self",
".",
"prefixes_to_fns",
"(",
"self",
".",
"valid_prefixes",
")"
] |
Fetches the validation set of the corpus.
|
[
"Fetches",
"the",
"validation",
"set",
"of",
"the",
"corpus",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/corpus.py#L544-L546
|
13,852
|
persephone-tools/persephone
|
persephone/corpus.py
|
Corpus.review
|
def review(self) -> None:
""" Used to play the WAV files and compare with the transcription. """
for prefix in self.determine_prefixes():
print("Utterance: {}".format(prefix))
wav_fn = self.feat_dir / "{}.wav".format(prefix)
label_fn = self.label_dir / "{}.{}".format(prefix,self.label_type)
with label_fn.open() as f:
transcript = f.read().strip()
print("Transcription: {}".format(transcript))
subprocess.run(["play", str(wav_fn)])
|
python
|
def review(self) -> None:
""" Used to play the WAV files and compare with the transcription. """
for prefix in self.determine_prefixes():
print("Utterance: {}".format(prefix))
wav_fn = self.feat_dir / "{}.wav".format(prefix)
label_fn = self.label_dir / "{}.{}".format(prefix,self.label_type)
with label_fn.open() as f:
transcript = f.read().strip()
print("Transcription: {}".format(transcript))
subprocess.run(["play", str(wav_fn)])
|
[
"def",
"review",
"(",
"self",
")",
"->",
"None",
":",
"for",
"prefix",
"in",
"self",
".",
"determine_prefixes",
"(",
")",
":",
"print",
"(",
"\"Utterance: {}\"",
".",
"format",
"(",
"prefix",
")",
")",
"wav_fn",
"=",
"self",
".",
"feat_dir",
"/",
"\"{}.wav\"",
".",
"format",
"(",
"prefix",
")",
"label_fn",
"=",
"self",
".",
"label_dir",
"/",
"\"{}.{}\"",
".",
"format",
"(",
"prefix",
",",
"self",
".",
"label_type",
")",
"with",
"label_fn",
".",
"open",
"(",
")",
"as",
"f",
":",
"transcript",
"=",
"f",
".",
"read",
"(",
")",
".",
"strip",
"(",
")",
"print",
"(",
"\"Transcription: {}\"",
".",
"format",
"(",
"transcript",
")",
")",
"subprocess",
".",
"run",
"(",
"[",
"\"play\"",
",",
"str",
"(",
"wav_fn",
")",
"]",
")"
] |
Used to play the WAV files and compare with the transcription.
|
[
"Used",
"to",
"play",
"the",
"WAV",
"files",
"and",
"compare",
"with",
"the",
"transcription",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/corpus.py#L589-L599
|
13,853
|
persephone-tools/persephone
|
persephone/corpus.py
|
Corpus.pickle
|
def pickle(self) -> None:
""" Pickles the Corpus object in a file in tgt_dir. """
pickle_path = self.tgt_dir / "corpus.p"
logger.debug("pickling %r object and saving it to path %s", self, pickle_path)
with pickle_path.open("wb") as f:
pickle.dump(self, f)
|
python
|
def pickle(self) -> None:
""" Pickles the Corpus object in a file in tgt_dir. """
pickle_path = self.tgt_dir / "corpus.p"
logger.debug("pickling %r object and saving it to path %s", self, pickle_path)
with pickle_path.open("wb") as f:
pickle.dump(self, f)
|
[
"def",
"pickle",
"(",
"self",
")",
"->",
"None",
":",
"pickle_path",
"=",
"self",
".",
"tgt_dir",
"/",
"\"corpus.p\"",
"logger",
".",
"debug",
"(",
"\"pickling %r object and saving it to path %s\"",
",",
"self",
",",
"pickle_path",
")",
"with",
"pickle_path",
".",
"open",
"(",
"\"wb\"",
")",
"as",
"f",
":",
"pickle",
".",
"dump",
"(",
"self",
",",
"f",
")"
] |
Pickles the Corpus object in a file in tgt_dir.
|
[
"Pickles",
"the",
"Corpus",
"object",
"in",
"a",
"file",
"in",
"tgt_dir",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/corpus.py#L601-L607
|
13,854
|
persephone-tools/persephone
|
persephone/utils.py
|
zero_pad
|
def zero_pad(matrix, to_length):
""" Zero pads along the 0th dimension to make sure the utterance array
x is of length to_length."""
assert matrix.shape[0] <= to_length
if not matrix.shape[0] <= to_length:
logger.error("zero_pad cannot be performed on matrix with shape {}"
" to length {}".format(matrix.shape[0], to_length))
raise ValueError
result = np.zeros((to_length,) + matrix.shape[1:])
result[:matrix.shape[0]] = matrix
return result
|
python
|
def zero_pad(matrix, to_length):
""" Zero pads along the 0th dimension to make sure the utterance array
x is of length to_length."""
assert matrix.shape[0] <= to_length
if not matrix.shape[0] <= to_length:
logger.error("zero_pad cannot be performed on matrix with shape {}"
" to length {}".format(matrix.shape[0], to_length))
raise ValueError
result = np.zeros((to_length,) + matrix.shape[1:])
result[:matrix.shape[0]] = matrix
return result
|
[
"def",
"zero_pad",
"(",
"matrix",
",",
"to_length",
")",
":",
"assert",
"matrix",
".",
"shape",
"[",
"0",
"]",
"<=",
"to_length",
"if",
"not",
"matrix",
".",
"shape",
"[",
"0",
"]",
"<=",
"to_length",
":",
"logger",
".",
"error",
"(",
"\"zero_pad cannot be performed on matrix with shape {}\"",
"\" to length {}\"",
".",
"format",
"(",
"matrix",
".",
"shape",
"[",
"0",
"]",
",",
"to_length",
")",
")",
"raise",
"ValueError",
"result",
"=",
"np",
".",
"zeros",
"(",
"(",
"to_length",
",",
")",
"+",
"matrix",
".",
"shape",
"[",
"1",
":",
"]",
")",
"result",
"[",
":",
"matrix",
".",
"shape",
"[",
"0",
"]",
"]",
"=",
"matrix",
"return",
"result"
] |
Zero pads along the 0th dimension to make sure the utterance array
x is of length to_length.
|
[
"Zero",
"pads",
"along",
"the",
"0th",
"dimension",
"to",
"make",
"sure",
"the",
"utterance",
"array",
"x",
"is",
"of",
"length",
"to_length",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/utils.py#L58-L69
|
13,855
|
persephone-tools/persephone
|
persephone/utils.py
|
load_batch_x
|
def load_batch_x(path_batch,
flatten = False,
time_major = False):
""" Loads a batch of input features given a list of paths to numpy
arrays in that batch."""
utterances = [np.load(str(path)) for path in path_batch]
utter_lens = [utterance.shape[0] for utterance in utterances]
max_len = max(utter_lens)
batch_size = len(path_batch)
shape = (batch_size, max_len) + tuple(utterances[0].shape[1:])
batch = np.zeros(shape)
for i, utt in enumerate(utterances):
batch[i] = zero_pad(utt, max_len)
if flatten:
batch = collapse(batch, time_major=time_major)
return batch, np.array(utter_lens)
|
python
|
def load_batch_x(path_batch,
flatten = False,
time_major = False):
""" Loads a batch of input features given a list of paths to numpy
arrays in that batch."""
utterances = [np.load(str(path)) for path in path_batch]
utter_lens = [utterance.shape[0] for utterance in utterances]
max_len = max(utter_lens)
batch_size = len(path_batch)
shape = (batch_size, max_len) + tuple(utterances[0].shape[1:])
batch = np.zeros(shape)
for i, utt in enumerate(utterances):
batch[i] = zero_pad(utt, max_len)
if flatten:
batch = collapse(batch, time_major=time_major)
return batch, np.array(utter_lens)
|
[
"def",
"load_batch_x",
"(",
"path_batch",
",",
"flatten",
"=",
"False",
",",
"time_major",
"=",
"False",
")",
":",
"utterances",
"=",
"[",
"np",
".",
"load",
"(",
"str",
"(",
"path",
")",
")",
"for",
"path",
"in",
"path_batch",
"]",
"utter_lens",
"=",
"[",
"utterance",
".",
"shape",
"[",
"0",
"]",
"for",
"utterance",
"in",
"utterances",
"]",
"max_len",
"=",
"max",
"(",
"utter_lens",
")",
"batch_size",
"=",
"len",
"(",
"path_batch",
")",
"shape",
"=",
"(",
"batch_size",
",",
"max_len",
")",
"+",
"tuple",
"(",
"utterances",
"[",
"0",
"]",
".",
"shape",
"[",
"1",
":",
"]",
")",
"batch",
"=",
"np",
".",
"zeros",
"(",
"shape",
")",
"for",
"i",
",",
"utt",
"in",
"enumerate",
"(",
"utterances",
")",
":",
"batch",
"[",
"i",
"]",
"=",
"zero_pad",
"(",
"utt",
",",
"max_len",
")",
"if",
"flatten",
":",
"batch",
"=",
"collapse",
"(",
"batch",
",",
"time_major",
"=",
"time_major",
")",
"return",
"batch",
",",
"np",
".",
"array",
"(",
"utter_lens",
")"
] |
Loads a batch of input features given a list of paths to numpy
arrays in that batch.
|
[
"Loads",
"a",
"batch",
"of",
"input",
"features",
"given",
"a",
"list",
"of",
"paths",
"to",
"numpy",
"arrays",
"in",
"that",
"batch",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/utils.py#L88-L104
|
13,856
|
persephone-tools/persephone
|
persephone/utils.py
|
batch_per
|
def batch_per(hyps: Sequence[Sequence[T]],
refs: Sequence[Sequence[T]]) -> float:
""" Calculates the phoneme error rate of a batch."""
macro_per = 0.0
for i in range(len(hyps)):
ref = [phn_i for phn_i in refs[i] if phn_i != 0]
hyp = [phn_i for phn_i in hyps[i] if phn_i != 0]
macro_per += distance.edit_distance(ref, hyp)/len(ref)
return macro_per/len(hyps)
|
python
|
def batch_per(hyps: Sequence[Sequence[T]],
refs: Sequence[Sequence[T]]) -> float:
""" Calculates the phoneme error rate of a batch."""
macro_per = 0.0
for i in range(len(hyps)):
ref = [phn_i for phn_i in refs[i] if phn_i != 0]
hyp = [phn_i for phn_i in hyps[i] if phn_i != 0]
macro_per += distance.edit_distance(ref, hyp)/len(ref)
return macro_per/len(hyps)
|
[
"def",
"batch_per",
"(",
"hyps",
":",
"Sequence",
"[",
"Sequence",
"[",
"T",
"]",
"]",
",",
"refs",
":",
"Sequence",
"[",
"Sequence",
"[",
"T",
"]",
"]",
")",
"->",
"float",
":",
"macro_per",
"=",
"0.0",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"hyps",
")",
")",
":",
"ref",
"=",
"[",
"phn_i",
"for",
"phn_i",
"in",
"refs",
"[",
"i",
"]",
"if",
"phn_i",
"!=",
"0",
"]",
"hyp",
"=",
"[",
"phn_i",
"for",
"phn_i",
"in",
"hyps",
"[",
"i",
"]",
"if",
"phn_i",
"!=",
"0",
"]",
"macro_per",
"+=",
"distance",
".",
"edit_distance",
"(",
"ref",
",",
"hyp",
")",
"/",
"len",
"(",
"ref",
")",
"return",
"macro_per",
"/",
"len",
"(",
"hyps",
")"
] |
Calculates the phoneme error rate of a batch.
|
[
"Calculates",
"the",
"phoneme",
"error",
"rate",
"of",
"a",
"batch",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/utils.py#L106-L115
|
13,857
|
persephone-tools/persephone
|
persephone/utils.py
|
filter_by_size
|
def filter_by_size(feat_dir: Path, prefixes: List[str], feat_type: str,
max_samples: int) -> List[str]:
""" Sorts the files by their length and returns those with less
than or equal to max_samples length. Returns the filename prefixes of
those files. The main job of the method is to filter, but the sorting
may give better efficiency when doing dynamic batching unless it gets
shuffled downstream.
"""
# TODO Tell the user what utterances we are removing.
prefix_lens = get_prefix_lens(Path(feat_dir), prefixes, feat_type)
prefixes = [prefix for prefix, length in prefix_lens
if length <= max_samples]
return prefixes
|
python
|
def filter_by_size(feat_dir: Path, prefixes: List[str], feat_type: str,
max_samples: int) -> List[str]:
""" Sorts the files by their length and returns those with less
than or equal to max_samples length. Returns the filename prefixes of
those files. The main job of the method is to filter, but the sorting
may give better efficiency when doing dynamic batching unless it gets
shuffled downstream.
"""
# TODO Tell the user what utterances we are removing.
prefix_lens = get_prefix_lens(Path(feat_dir), prefixes, feat_type)
prefixes = [prefix for prefix, length in prefix_lens
if length <= max_samples]
return prefixes
|
[
"def",
"filter_by_size",
"(",
"feat_dir",
":",
"Path",
",",
"prefixes",
":",
"List",
"[",
"str",
"]",
",",
"feat_type",
":",
"str",
",",
"max_samples",
":",
"int",
")",
"->",
"List",
"[",
"str",
"]",
":",
"# TODO Tell the user what utterances we are removing.",
"prefix_lens",
"=",
"get_prefix_lens",
"(",
"Path",
"(",
"feat_dir",
")",
",",
"prefixes",
",",
"feat_type",
")",
"prefixes",
"=",
"[",
"prefix",
"for",
"prefix",
",",
"length",
"in",
"prefix_lens",
"if",
"length",
"<=",
"max_samples",
"]",
"return",
"prefixes"
] |
Sorts the files by their length and returns those with less
than or equal to max_samples length. Returns the filename prefixes of
those files. The main job of the method is to filter, but the sorting
may give better efficiency when doing dynamic batching unless it gets
shuffled downstream.
|
[
"Sorts",
"the",
"files",
"by",
"their",
"length",
"and",
"returns",
"those",
"with",
"less",
"than",
"or",
"equal",
"to",
"max_samples",
"length",
".",
"Returns",
"the",
"filename",
"prefixes",
"of",
"those",
"files",
".",
"The",
"main",
"job",
"of",
"the",
"method",
"is",
"to",
"filter",
"but",
"the",
"sorting",
"may",
"give",
"better",
"efficiency",
"when",
"doing",
"dynamic",
"batching",
"unless",
"it",
"gets",
"shuffled",
"downstream",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/utils.py#L141-L154
|
13,858
|
persephone-tools/persephone
|
persephone/utils.py
|
wav_length
|
def wav_length(fn: str) -> float:
""" Returns the length of the WAV file in seconds."""
args = [config.SOX_PATH, fn, "-n", "stat"]
p = subprocess.Popen(
args, stdin=PIPE, stdout=PIPE, stderr=PIPE)
length_line = str(p.communicate()[1]).split("\\n")[1].split()
print(length_line)
assert length_line[0] == "Length"
return float(length_line[-1])
|
python
|
def wav_length(fn: str) -> float:
""" Returns the length of the WAV file in seconds."""
args = [config.SOX_PATH, fn, "-n", "stat"]
p = subprocess.Popen(
args, stdin=PIPE, stdout=PIPE, stderr=PIPE)
length_line = str(p.communicate()[1]).split("\\n")[1].split()
print(length_line)
assert length_line[0] == "Length"
return float(length_line[-1])
|
[
"def",
"wav_length",
"(",
"fn",
":",
"str",
")",
"->",
"float",
":",
"args",
"=",
"[",
"config",
".",
"SOX_PATH",
",",
"fn",
",",
"\"-n\"",
",",
"\"stat\"",
"]",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"args",
",",
"stdin",
"=",
"PIPE",
",",
"stdout",
"=",
"PIPE",
",",
"stderr",
"=",
"PIPE",
")",
"length_line",
"=",
"str",
"(",
"p",
".",
"communicate",
"(",
")",
"[",
"1",
"]",
")",
".",
"split",
"(",
"\"\\\\n\"",
")",
"[",
"1",
"]",
".",
"split",
"(",
")",
"print",
"(",
"length_line",
")",
"assert",
"length_line",
"[",
"0",
"]",
"==",
"\"Length\"",
"return",
"float",
"(",
"length_line",
"[",
"-",
"1",
"]",
")"
] |
Returns the length of the WAV file in seconds.
|
[
"Returns",
"the",
"length",
"of",
"the",
"WAV",
"file",
"in",
"seconds",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/utils.py#L170-L179
|
13,859
|
persephone-tools/persephone
|
persephone/datasets/bkw.py
|
pull_en_words
|
def pull_en_words() -> None:
""" Fetches a repository containing English words. """
ENGLISH_WORDS_URL = "https://github.com/dwyl/english-words.git"
en_words_path = Path(config.EN_WORDS_PATH)
if not en_words_path.is_file():
subprocess.run(["git", "clone",
ENGLISH_WORDS_URL, str(en_words_path.parent)])
|
python
|
def pull_en_words() -> None:
""" Fetches a repository containing English words. """
ENGLISH_WORDS_URL = "https://github.com/dwyl/english-words.git"
en_words_path = Path(config.EN_WORDS_PATH)
if not en_words_path.is_file():
subprocess.run(["git", "clone",
ENGLISH_WORDS_URL, str(en_words_path.parent)])
|
[
"def",
"pull_en_words",
"(",
")",
"->",
"None",
":",
"ENGLISH_WORDS_URL",
"=",
"\"https://github.com/dwyl/english-words.git\"",
"en_words_path",
"=",
"Path",
"(",
"config",
".",
"EN_WORDS_PATH",
")",
"if",
"not",
"en_words_path",
".",
"is_file",
"(",
")",
":",
"subprocess",
".",
"run",
"(",
"[",
"\"git\"",
",",
"\"clone\"",
",",
"ENGLISH_WORDS_URL",
",",
"str",
"(",
"en_words_path",
".",
"parent",
")",
"]",
")"
] |
Fetches a repository containing English words.
|
[
"Fetches",
"a",
"repository",
"containing",
"English",
"words",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/datasets/bkw.py#L27-L34
|
13,860
|
persephone-tools/persephone
|
persephone/datasets/bkw.py
|
get_en_words
|
def get_en_words() -> Set[str]:
"""
Returns a list of English words which can be used to filter out
code-switched sentences.
"""
pull_en_words()
with open(config.EN_WORDS_PATH) as words_f:
raw_words = words_f.readlines()
en_words = set([word.strip().lower() for word in raw_words])
NA_WORDS_IN_EN_DICT = set(["kore", "nani", "karri", "imi", "o", "yaw", "i",
"bi", "aye", "imi", "ane", "kubba", "kab", "a-",
"ad", "a", "mak", "selim", "ngai", "en", "yo",
"wud", "mani", "yak", "manu", "ka-", "mong",
"manga", "ka-", "mane", "kala", "name", "kayo",
"kare", "laik", "bale", "ni", "rey", "bu",
"re", "iman", "bom", "wam",
"alu", "nan", "kure", "kuri", "wam", "ka", "ng",
"yi", "na", "m", "arri", "e", "kele", "arri", "nga",
"kakan", "ai", "ning", "mala", "ti", "wolk",
"bo", "andi", "ken", "ba", "aa", "kun", "bini",
"wo", "bim", "man", "bord", "al", "mah", "won",
"ku", "ay", "belen", "wen", "yah", "muni",
"bah", "di", "mm", "anu", "nane", "ma", "kum",
"birri", "ray", "h", "kane", "mumu", "bi", "ah",
"i-", "n", "mi", "bedman", "rud", "le", "babu",
"da", "kakkak", "yun", "ande", "naw", "kam", "bolk",
"woy", "u", "bi-",
])
EN_WORDS_NOT_IN_EN_DICT = set(["screenprinting"])
en_words = en_words.difference(NA_WORDS_IN_EN_DICT)
en_words = en_words | EN_WORDS_NOT_IN_EN_DICT
return en_words
|
python
|
def get_en_words() -> Set[str]:
"""
Returns a list of English words which can be used to filter out
code-switched sentences.
"""
pull_en_words()
with open(config.EN_WORDS_PATH) as words_f:
raw_words = words_f.readlines()
en_words = set([word.strip().lower() for word in raw_words])
NA_WORDS_IN_EN_DICT = set(["kore", "nani", "karri", "imi", "o", "yaw", "i",
"bi", "aye", "imi", "ane", "kubba", "kab", "a-",
"ad", "a", "mak", "selim", "ngai", "en", "yo",
"wud", "mani", "yak", "manu", "ka-", "mong",
"manga", "ka-", "mane", "kala", "name", "kayo",
"kare", "laik", "bale", "ni", "rey", "bu",
"re", "iman", "bom", "wam",
"alu", "nan", "kure", "kuri", "wam", "ka", "ng",
"yi", "na", "m", "arri", "e", "kele", "arri", "nga",
"kakan", "ai", "ning", "mala", "ti", "wolk",
"bo", "andi", "ken", "ba", "aa", "kun", "bini",
"wo", "bim", "man", "bord", "al", "mah", "won",
"ku", "ay", "belen", "wen", "yah", "muni",
"bah", "di", "mm", "anu", "nane", "ma", "kum",
"birri", "ray", "h", "kane", "mumu", "bi", "ah",
"i-", "n", "mi", "bedman", "rud", "le", "babu",
"da", "kakkak", "yun", "ande", "naw", "kam", "bolk",
"woy", "u", "bi-",
])
EN_WORDS_NOT_IN_EN_DICT = set(["screenprinting"])
en_words = en_words.difference(NA_WORDS_IN_EN_DICT)
en_words = en_words | EN_WORDS_NOT_IN_EN_DICT
return en_words
|
[
"def",
"get_en_words",
"(",
")",
"->",
"Set",
"[",
"str",
"]",
":",
"pull_en_words",
"(",
")",
"with",
"open",
"(",
"config",
".",
"EN_WORDS_PATH",
")",
"as",
"words_f",
":",
"raw_words",
"=",
"words_f",
".",
"readlines",
"(",
")",
"en_words",
"=",
"set",
"(",
"[",
"word",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
"for",
"word",
"in",
"raw_words",
"]",
")",
"NA_WORDS_IN_EN_DICT",
"=",
"set",
"(",
"[",
"\"kore\"",
",",
"\"nani\"",
",",
"\"karri\"",
",",
"\"imi\"",
",",
"\"o\"",
",",
"\"yaw\"",
",",
"\"i\"",
",",
"\"bi\"",
",",
"\"aye\"",
",",
"\"imi\"",
",",
"\"ane\"",
",",
"\"kubba\"",
",",
"\"kab\"",
",",
"\"a-\"",
",",
"\"ad\"",
",",
"\"a\"",
",",
"\"mak\"",
",",
"\"selim\"",
",",
"\"ngai\"",
",",
"\"en\"",
",",
"\"yo\"",
",",
"\"wud\"",
",",
"\"mani\"",
",",
"\"yak\"",
",",
"\"manu\"",
",",
"\"ka-\"",
",",
"\"mong\"",
",",
"\"manga\"",
",",
"\"ka-\"",
",",
"\"mane\"",
",",
"\"kala\"",
",",
"\"name\"",
",",
"\"kayo\"",
",",
"\"kare\"",
",",
"\"laik\"",
",",
"\"bale\"",
",",
"\"ni\"",
",",
"\"rey\"",
",",
"\"bu\"",
",",
"\"re\"",
",",
"\"iman\"",
",",
"\"bom\"",
",",
"\"wam\"",
",",
"\"alu\"",
",",
"\"nan\"",
",",
"\"kure\"",
",",
"\"kuri\"",
",",
"\"wam\"",
",",
"\"ka\"",
",",
"\"ng\"",
",",
"\"yi\"",
",",
"\"na\"",
",",
"\"m\"",
",",
"\"arri\"",
",",
"\"e\"",
",",
"\"kele\"",
",",
"\"arri\"",
",",
"\"nga\"",
",",
"\"kakan\"",
",",
"\"ai\"",
",",
"\"ning\"",
",",
"\"mala\"",
",",
"\"ti\"",
",",
"\"wolk\"",
",",
"\"bo\"",
",",
"\"andi\"",
",",
"\"ken\"",
",",
"\"ba\"",
",",
"\"aa\"",
",",
"\"kun\"",
",",
"\"bini\"",
",",
"\"wo\"",
",",
"\"bim\"",
",",
"\"man\"",
",",
"\"bord\"",
",",
"\"al\"",
",",
"\"mah\"",
",",
"\"won\"",
",",
"\"ku\"",
",",
"\"ay\"",
",",
"\"belen\"",
",",
"\"wen\"",
",",
"\"yah\"",
",",
"\"muni\"",
",",
"\"bah\"",
",",
"\"di\"",
",",
"\"mm\"",
",",
"\"anu\"",
",",
"\"nane\"",
",",
"\"ma\"",
",",
"\"kum\"",
",",
"\"birri\"",
",",
"\"ray\"",
",",
"\"h\"",
",",
"\"kane\"",
",",
"\"mumu\"",
",",
"\"bi\"",
",",
"\"ah\"",
",",
"\"i-\"",
",",
"\"n\"",
",",
"\"mi\"",
",",
"\"bedman\"",
",",
"\"rud\"",
",",
"\"le\"",
",",
"\"babu\"",
",",
"\"da\"",
",",
"\"kakkak\"",
",",
"\"yun\"",
",",
"\"ande\"",
",",
"\"naw\"",
",",
"\"kam\"",
",",
"\"bolk\"",
",",
"\"woy\"",
",",
"\"u\"",
",",
"\"bi-\"",
",",
"]",
")",
"EN_WORDS_NOT_IN_EN_DICT",
"=",
"set",
"(",
"[",
"\"screenprinting\"",
"]",
")",
"en_words",
"=",
"en_words",
".",
"difference",
"(",
"NA_WORDS_IN_EN_DICT",
")",
"en_words",
"=",
"en_words",
"|",
"EN_WORDS_NOT_IN_EN_DICT",
"return",
"en_words"
] |
Returns a list of English words which can be used to filter out
code-switched sentences.
|
[
"Returns",
"a",
"list",
"of",
"English",
"words",
"which",
"can",
"be",
"used",
"to",
"filter",
"out",
"code",
"-",
"switched",
"sentences",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/datasets/bkw.py#L36-L69
|
13,861
|
persephone-tools/persephone
|
persephone/datasets/bkw.py
|
explore_elan_files
|
def explore_elan_files(elan_paths):
"""
A function to explore the tiers of ELAN files.
"""
for elan_path in elan_paths:
print(elan_path)
eafob = Eaf(elan_path)
tier_names = eafob.get_tier_names()
for tier in tier_names:
print("\t", tier)
try:
for annotation in eafob.get_annotation_data_for_tier(tier):
print("\t\t", annotation)
except KeyError:
continue
input()
|
python
|
def explore_elan_files(elan_paths):
"""
A function to explore the tiers of ELAN files.
"""
for elan_path in elan_paths:
print(elan_path)
eafob = Eaf(elan_path)
tier_names = eafob.get_tier_names()
for tier in tier_names:
print("\t", tier)
try:
for annotation in eafob.get_annotation_data_for_tier(tier):
print("\t\t", annotation)
except KeyError:
continue
input()
|
[
"def",
"explore_elan_files",
"(",
"elan_paths",
")",
":",
"for",
"elan_path",
"in",
"elan_paths",
":",
"print",
"(",
"elan_path",
")",
"eafob",
"=",
"Eaf",
"(",
"elan_path",
")",
"tier_names",
"=",
"eafob",
".",
"get_tier_names",
"(",
")",
"for",
"tier",
"in",
"tier_names",
":",
"print",
"(",
"\"\\t\"",
",",
"tier",
")",
"try",
":",
"for",
"annotation",
"in",
"eafob",
".",
"get_annotation_data_for_tier",
"(",
"tier",
")",
":",
"print",
"(",
"\"\\t\\t\"",
",",
"annotation",
")",
"except",
"KeyError",
":",
"continue",
"input",
"(",
")"
] |
A function to explore the tiers of ELAN files.
|
[
"A",
"function",
"to",
"explore",
"the",
"tiers",
"of",
"ELAN",
"files",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/datasets/bkw.py#L73-L90
|
13,862
|
persephone-tools/persephone
|
persephone/preprocess/elan.py
|
sort_annotations
|
def sort_annotations(annotations: List[Tuple[int, int, str]]
) -> List[Tuple[int, int, str]]:
""" Sorts the annotations by their start_time. """
return sorted(annotations, key=lambda x: x[0])
|
python
|
def sort_annotations(annotations: List[Tuple[int, int, str]]
) -> List[Tuple[int, int, str]]:
""" Sorts the annotations by their start_time. """
return sorted(annotations, key=lambda x: x[0])
|
[
"def",
"sort_annotations",
"(",
"annotations",
":",
"List",
"[",
"Tuple",
"[",
"int",
",",
"int",
",",
"str",
"]",
"]",
")",
"->",
"List",
"[",
"Tuple",
"[",
"int",
",",
"int",
",",
"str",
"]",
"]",
":",
"return",
"sorted",
"(",
"annotations",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"0",
"]",
")"
] |
Sorts the annotations by their start_time.
|
[
"Sorts",
"the",
"annotations",
"by",
"their",
"start_time",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/preprocess/elan.py#L62-L65
|
13,863
|
persephone-tools/persephone
|
persephone/preprocess/elan.py
|
utterances_from_tier
|
def utterances_from_tier(eafob: Eaf, tier_name: str) -> List[Utterance]:
""" Returns utterances found in the given Eaf object in the given tier."""
try:
speaker = eafob.tiers[tier_name][2]["PARTICIPANT"]
except KeyError:
speaker = None # We don't know the name of the speaker.
tier_utterances = []
annotations = sort_annotations(
list(eafob.get_annotation_data_for_tier(tier_name)))
for i, annotation in enumerate(annotations):
eaf_stem = eafob.eaf_path.stem
utter_id = "{}.{}.{}".format(eaf_stem, tier_name, i)
start_time = eafob.time_origin + annotation[0]
end_time = eafob.time_origin + annotation[1]
text = annotation[2]
utterance = Utterance(eafob.media_path, eafob.eaf_path, utter_id,
start_time, end_time, text, speaker)
tier_utterances.append(utterance)
return tier_utterances
|
python
|
def utterances_from_tier(eafob: Eaf, tier_name: str) -> List[Utterance]:
""" Returns utterances found in the given Eaf object in the given tier."""
try:
speaker = eafob.tiers[tier_name][2]["PARTICIPANT"]
except KeyError:
speaker = None # We don't know the name of the speaker.
tier_utterances = []
annotations = sort_annotations(
list(eafob.get_annotation_data_for_tier(tier_name)))
for i, annotation in enumerate(annotations):
eaf_stem = eafob.eaf_path.stem
utter_id = "{}.{}.{}".format(eaf_stem, tier_name, i)
start_time = eafob.time_origin + annotation[0]
end_time = eafob.time_origin + annotation[1]
text = annotation[2]
utterance = Utterance(eafob.media_path, eafob.eaf_path, utter_id,
start_time, end_time, text, speaker)
tier_utterances.append(utterance)
return tier_utterances
|
[
"def",
"utterances_from_tier",
"(",
"eafob",
":",
"Eaf",
",",
"tier_name",
":",
"str",
")",
"->",
"List",
"[",
"Utterance",
"]",
":",
"try",
":",
"speaker",
"=",
"eafob",
".",
"tiers",
"[",
"tier_name",
"]",
"[",
"2",
"]",
"[",
"\"PARTICIPANT\"",
"]",
"except",
"KeyError",
":",
"speaker",
"=",
"None",
"# We don't know the name of the speaker.",
"tier_utterances",
"=",
"[",
"]",
"annotations",
"=",
"sort_annotations",
"(",
"list",
"(",
"eafob",
".",
"get_annotation_data_for_tier",
"(",
"tier_name",
")",
")",
")",
"for",
"i",
",",
"annotation",
"in",
"enumerate",
"(",
"annotations",
")",
":",
"eaf_stem",
"=",
"eafob",
".",
"eaf_path",
".",
"stem",
"utter_id",
"=",
"\"{}.{}.{}\"",
".",
"format",
"(",
"eaf_stem",
",",
"tier_name",
",",
"i",
")",
"start_time",
"=",
"eafob",
".",
"time_origin",
"+",
"annotation",
"[",
"0",
"]",
"end_time",
"=",
"eafob",
".",
"time_origin",
"+",
"annotation",
"[",
"1",
"]",
"text",
"=",
"annotation",
"[",
"2",
"]",
"utterance",
"=",
"Utterance",
"(",
"eafob",
".",
"media_path",
",",
"eafob",
".",
"eaf_path",
",",
"utter_id",
",",
"start_time",
",",
"end_time",
",",
"text",
",",
"speaker",
")",
"tier_utterances",
".",
"append",
"(",
"utterance",
")",
"return",
"tier_utterances"
] |
Returns utterances found in the given Eaf object in the given tier.
|
[
"Returns",
"utterances",
"found",
"in",
"the",
"given",
"Eaf",
"object",
"in",
"the",
"given",
"tier",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/preprocess/elan.py#L68-L91
|
13,864
|
persephone-tools/persephone
|
persephone/preprocess/elan.py
|
utterances_from_eaf
|
def utterances_from_eaf(eaf_path: Path, tier_prefixes: Tuple[str, ...]) -> List[Utterance]:
"""
Extracts utterances in tiers that start with tier_prefixes found in the ELAN .eaf XML file
at eaf_path.
For example, if xv@Mark is a tier in the eaf file, and
tier_prefixes = ["xv"], then utterances from that tier will be gathered.
"""
if not eaf_path.is_file():
raise FileNotFoundError("Cannot find {}".format(eaf_path))
eaf = Eaf(eaf_path)
utterances = []
for tier_name in sorted(list(eaf.tiers)): # Sorting for determinism
for tier_prefix in tier_prefixes:
if tier_name.startswith(tier_prefix):
utterances.extend(utterances_from_tier(eaf, tier_name))
break
return utterances
|
python
|
def utterances_from_eaf(eaf_path: Path, tier_prefixes: Tuple[str, ...]) -> List[Utterance]:
"""
Extracts utterances in tiers that start with tier_prefixes found in the ELAN .eaf XML file
at eaf_path.
For example, if xv@Mark is a tier in the eaf file, and
tier_prefixes = ["xv"], then utterances from that tier will be gathered.
"""
if not eaf_path.is_file():
raise FileNotFoundError("Cannot find {}".format(eaf_path))
eaf = Eaf(eaf_path)
utterances = []
for tier_name in sorted(list(eaf.tiers)): # Sorting for determinism
for tier_prefix in tier_prefixes:
if tier_name.startswith(tier_prefix):
utterances.extend(utterances_from_tier(eaf, tier_name))
break
return utterances
|
[
"def",
"utterances_from_eaf",
"(",
"eaf_path",
":",
"Path",
",",
"tier_prefixes",
":",
"Tuple",
"[",
"str",
",",
"...",
"]",
")",
"->",
"List",
"[",
"Utterance",
"]",
":",
"if",
"not",
"eaf_path",
".",
"is_file",
"(",
")",
":",
"raise",
"FileNotFoundError",
"(",
"\"Cannot find {}\"",
".",
"format",
"(",
"eaf_path",
")",
")",
"eaf",
"=",
"Eaf",
"(",
"eaf_path",
")",
"utterances",
"=",
"[",
"]",
"for",
"tier_name",
"in",
"sorted",
"(",
"list",
"(",
"eaf",
".",
"tiers",
")",
")",
":",
"# Sorting for determinism",
"for",
"tier_prefix",
"in",
"tier_prefixes",
":",
"if",
"tier_name",
".",
"startswith",
"(",
"tier_prefix",
")",
":",
"utterances",
".",
"extend",
"(",
"utterances_from_tier",
"(",
"eaf",
",",
"tier_name",
")",
")",
"break",
"return",
"utterances"
] |
Extracts utterances in tiers that start with tier_prefixes found in the ELAN .eaf XML file
at eaf_path.
For example, if xv@Mark is a tier in the eaf file, and
tier_prefixes = ["xv"], then utterances from that tier will be gathered.
|
[
"Extracts",
"utterances",
"in",
"tiers",
"that",
"start",
"with",
"tier_prefixes",
"found",
"in",
"the",
"ELAN",
".",
"eaf",
"XML",
"file",
"at",
"eaf_path",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/preprocess/elan.py#L94-L113
|
13,865
|
persephone-tools/persephone
|
persephone/preprocess/elan.py
|
utterances_from_dir
|
def utterances_from_dir(eaf_dir: Path,
tier_prefixes: Tuple[str, ...]) -> List[Utterance]:
""" Returns the utterances found in ELAN files in a directory.
Recursively explores the directory, gathering ELAN files and extracting
utterances from them for tiers that start with the specified prefixes.
Args:
eaf_dir: A path to the directory to be searched
tier_prefixes: Stings matching the start of ELAN tier names that are to
be extracted. For example, if you want to extract from tiers "xv-Jane"
and "xv-Mark", then tier_prefixes = ["xv"] would do the job.
Returns:
A list of Utterance objects.
"""
logger.info(
"EAF from directory: {}, searching with tier_prefixes {}".format(
eaf_dir, tier_prefixes))
utterances = []
for eaf_path in eaf_dir.glob("**/*.eaf"):
eaf_utterances = utterances_from_eaf(eaf_path, tier_prefixes)
utterances.extend(eaf_utterances)
return utterances
|
python
|
def utterances_from_dir(eaf_dir: Path,
tier_prefixes: Tuple[str, ...]) -> List[Utterance]:
""" Returns the utterances found in ELAN files in a directory.
Recursively explores the directory, gathering ELAN files and extracting
utterances from them for tiers that start with the specified prefixes.
Args:
eaf_dir: A path to the directory to be searched
tier_prefixes: Stings matching the start of ELAN tier names that are to
be extracted. For example, if you want to extract from tiers "xv-Jane"
and "xv-Mark", then tier_prefixes = ["xv"] would do the job.
Returns:
A list of Utterance objects.
"""
logger.info(
"EAF from directory: {}, searching with tier_prefixes {}".format(
eaf_dir, tier_prefixes))
utterances = []
for eaf_path in eaf_dir.glob("**/*.eaf"):
eaf_utterances = utterances_from_eaf(eaf_path, tier_prefixes)
utterances.extend(eaf_utterances)
return utterances
|
[
"def",
"utterances_from_dir",
"(",
"eaf_dir",
":",
"Path",
",",
"tier_prefixes",
":",
"Tuple",
"[",
"str",
",",
"...",
"]",
")",
"->",
"List",
"[",
"Utterance",
"]",
":",
"logger",
".",
"info",
"(",
"\"EAF from directory: {}, searching with tier_prefixes {}\"",
".",
"format",
"(",
"eaf_dir",
",",
"tier_prefixes",
")",
")",
"utterances",
"=",
"[",
"]",
"for",
"eaf_path",
"in",
"eaf_dir",
".",
"glob",
"(",
"\"**/*.eaf\"",
")",
":",
"eaf_utterances",
"=",
"utterances_from_eaf",
"(",
"eaf_path",
",",
"tier_prefixes",
")",
"utterances",
".",
"extend",
"(",
"eaf_utterances",
")",
"return",
"utterances"
] |
Returns the utterances found in ELAN files in a directory.
Recursively explores the directory, gathering ELAN files and extracting
utterances from them for tiers that start with the specified prefixes.
Args:
eaf_dir: A path to the directory to be searched
tier_prefixes: Stings matching the start of ELAN tier names that are to
be extracted. For example, if you want to extract from tiers "xv-Jane"
and "xv-Mark", then tier_prefixes = ["xv"] would do the job.
Returns:
A list of Utterance objects.
|
[
"Returns",
"the",
"utterances",
"found",
"in",
"ELAN",
"files",
"in",
"a",
"directory",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/preprocess/elan.py#L116-L142
|
13,866
|
persephone-tools/persephone
|
persephone/corpus_reader.py
|
CorpusReader.load_batch
|
def load_batch(self, fn_batch):
""" Loads a batch with the given prefixes. The prefixes is the full path to the
training example minus the extension.
"""
# TODO Assumes targets are available, which is how its distinct from
# utils.load_batch_x(). These functions need to change names to be
# clearer.
inverse = list(zip(*fn_batch))
feat_fn_batch = inverse[0]
target_fn_batch = inverse[1]
batch_inputs, batch_inputs_lens = utils.load_batch_x(feat_fn_batch,
flatten=False)
batch_targets_list = []
for targets_path in target_fn_batch:
with open(targets_path, encoding=ENCODING) as targets_f:
target_indices = self.corpus.labels_to_indices(targets_f.readline().split())
batch_targets_list.append(target_indices)
batch_targets = utils.target_list_to_sparse_tensor(batch_targets_list)
return batch_inputs, batch_inputs_lens, batch_targets
|
python
|
def load_batch(self, fn_batch):
""" Loads a batch with the given prefixes. The prefixes is the full path to the
training example minus the extension.
"""
# TODO Assumes targets are available, which is how its distinct from
# utils.load_batch_x(). These functions need to change names to be
# clearer.
inverse = list(zip(*fn_batch))
feat_fn_batch = inverse[0]
target_fn_batch = inverse[1]
batch_inputs, batch_inputs_lens = utils.load_batch_x(feat_fn_batch,
flatten=False)
batch_targets_list = []
for targets_path in target_fn_batch:
with open(targets_path, encoding=ENCODING) as targets_f:
target_indices = self.corpus.labels_to_indices(targets_f.readline().split())
batch_targets_list.append(target_indices)
batch_targets = utils.target_list_to_sparse_tensor(batch_targets_list)
return batch_inputs, batch_inputs_lens, batch_targets
|
[
"def",
"load_batch",
"(",
"self",
",",
"fn_batch",
")",
":",
"# TODO Assumes targets are available, which is how its distinct from",
"# utils.load_batch_x(). These functions need to change names to be",
"# clearer.",
"inverse",
"=",
"list",
"(",
"zip",
"(",
"*",
"fn_batch",
")",
")",
"feat_fn_batch",
"=",
"inverse",
"[",
"0",
"]",
"target_fn_batch",
"=",
"inverse",
"[",
"1",
"]",
"batch_inputs",
",",
"batch_inputs_lens",
"=",
"utils",
".",
"load_batch_x",
"(",
"feat_fn_batch",
",",
"flatten",
"=",
"False",
")",
"batch_targets_list",
"=",
"[",
"]",
"for",
"targets_path",
"in",
"target_fn_batch",
":",
"with",
"open",
"(",
"targets_path",
",",
"encoding",
"=",
"ENCODING",
")",
"as",
"targets_f",
":",
"target_indices",
"=",
"self",
".",
"corpus",
".",
"labels_to_indices",
"(",
"targets_f",
".",
"readline",
"(",
")",
".",
"split",
"(",
")",
")",
"batch_targets_list",
".",
"append",
"(",
"target_indices",
")",
"batch_targets",
"=",
"utils",
".",
"target_list_to_sparse_tensor",
"(",
"batch_targets_list",
")",
"return",
"batch_inputs",
",",
"batch_inputs_lens",
",",
"batch_targets"
] |
Loads a batch with the given prefixes. The prefixes is the full path to the
training example minus the extension.
|
[
"Loads",
"a",
"batch",
"with",
"the",
"given",
"prefixes",
".",
"The",
"prefixes",
"is",
"the",
"full",
"path",
"to",
"the",
"training",
"example",
"minus",
"the",
"extension",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/corpus_reader.py#L95-L117
|
13,867
|
persephone-tools/persephone
|
persephone/corpus_reader.py
|
CorpusReader.train_batch_gen
|
def train_batch_gen(self) -> Iterator:
""" Returns a generator that outputs batches in the training data."""
if len(self.train_fns) == 0:
raise PersephoneException("""No training data available; cannot
generate training batches.""")
# Create batches of batch_size and shuffle them.
fn_batches = self.make_batches(self.train_fns)
if self.rand:
random.shuffle(fn_batches)
for fn_batch in fn_batches:
logger.debug("Batch of training filenames: %s",
pprint.pformat(fn_batch))
yield self.load_batch(fn_batch)
else:
raise StopIteration
|
python
|
def train_batch_gen(self) -> Iterator:
""" Returns a generator that outputs batches in the training data."""
if len(self.train_fns) == 0:
raise PersephoneException("""No training data available; cannot
generate training batches.""")
# Create batches of batch_size and shuffle them.
fn_batches = self.make_batches(self.train_fns)
if self.rand:
random.shuffle(fn_batches)
for fn_batch in fn_batches:
logger.debug("Batch of training filenames: %s",
pprint.pformat(fn_batch))
yield self.load_batch(fn_batch)
else:
raise StopIteration
|
[
"def",
"train_batch_gen",
"(",
"self",
")",
"->",
"Iterator",
":",
"if",
"len",
"(",
"self",
".",
"train_fns",
")",
"==",
"0",
":",
"raise",
"PersephoneException",
"(",
"\"\"\"No training data available; cannot\n generate training batches.\"\"\"",
")",
"# Create batches of batch_size and shuffle them.",
"fn_batches",
"=",
"self",
".",
"make_batches",
"(",
"self",
".",
"train_fns",
")",
"if",
"self",
".",
"rand",
":",
"random",
".",
"shuffle",
"(",
"fn_batches",
")",
"for",
"fn_batch",
"in",
"fn_batches",
":",
"logger",
".",
"debug",
"(",
"\"Batch of training filenames: %s\"",
",",
"pprint",
".",
"pformat",
"(",
"fn_batch",
")",
")",
"yield",
"self",
".",
"load_batch",
"(",
"fn_batch",
")",
"else",
":",
"raise",
"StopIteration"
] |
Returns a generator that outputs batches in the training data.
|
[
"Returns",
"a",
"generator",
"that",
"outputs",
"batches",
"in",
"the",
"training",
"data",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/corpus_reader.py#L125-L144
|
13,868
|
persephone-tools/persephone
|
persephone/corpus_reader.py
|
CorpusReader.valid_batch
|
def valid_batch(self):
""" Returns a single batch with all the validation cases."""
valid_fns = list(zip(*self.corpus.get_valid_fns()))
return self.load_batch(valid_fns)
|
python
|
def valid_batch(self):
""" Returns a single batch with all the validation cases."""
valid_fns = list(zip(*self.corpus.get_valid_fns()))
return self.load_batch(valid_fns)
|
[
"def",
"valid_batch",
"(",
"self",
")",
":",
"valid_fns",
"=",
"list",
"(",
"zip",
"(",
"*",
"self",
".",
"corpus",
".",
"get_valid_fns",
"(",
")",
")",
")",
"return",
"self",
".",
"load_batch",
"(",
"valid_fns",
")"
] |
Returns a single batch with all the validation cases.
|
[
"Returns",
"a",
"single",
"batch",
"with",
"all",
"the",
"validation",
"cases",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/corpus_reader.py#L146-L150
|
13,869
|
persephone-tools/persephone
|
persephone/corpus_reader.py
|
CorpusReader.untranscribed_batch_gen
|
def untranscribed_batch_gen(self):
""" A batch generator for all the untranscribed data. """
feat_fns = self.corpus.get_untranscribed_fns()
fn_batches = self.make_batches(feat_fns)
for fn_batch in fn_batches:
batch_inputs, batch_inputs_lens = utils.load_batch_x(fn_batch,
flatten=False)
yield batch_inputs, batch_inputs_lens, fn_batch
|
python
|
def untranscribed_batch_gen(self):
""" A batch generator for all the untranscribed data. """
feat_fns = self.corpus.get_untranscribed_fns()
fn_batches = self.make_batches(feat_fns)
for fn_batch in fn_batches:
batch_inputs, batch_inputs_lens = utils.load_batch_x(fn_batch,
flatten=False)
yield batch_inputs, batch_inputs_lens, fn_batch
|
[
"def",
"untranscribed_batch_gen",
"(",
"self",
")",
":",
"feat_fns",
"=",
"self",
".",
"corpus",
".",
"get_untranscribed_fns",
"(",
")",
"fn_batches",
"=",
"self",
".",
"make_batches",
"(",
"feat_fns",
")",
"for",
"fn_batch",
"in",
"fn_batches",
":",
"batch_inputs",
",",
"batch_inputs_lens",
"=",
"utils",
".",
"load_batch_x",
"(",
"fn_batch",
",",
"flatten",
"=",
"False",
")",
"yield",
"batch_inputs",
",",
"batch_inputs_lens",
",",
"fn_batch"
] |
A batch generator for all the untranscribed data.
|
[
"A",
"batch",
"generator",
"for",
"all",
"the",
"untranscribed",
"data",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/corpus_reader.py#L158-L167
|
13,870
|
persephone-tools/persephone
|
persephone/corpus_reader.py
|
CorpusReader.human_readable_hyp_ref
|
def human_readable_hyp_ref(self, dense_decoded, dense_y):
""" Returns a human readable version of the hypothesis for manual
inspection, along with the reference.
"""
hyps = []
refs = []
for i in range(len(dense_decoded)):
ref = [phn_i for phn_i in dense_y[i] if phn_i != 0]
hyp = [phn_i for phn_i in dense_decoded[i] if phn_i != 0]
ref = self.corpus.indices_to_labels(ref)
hyp = self.corpus.indices_to_labels(hyp)
refs.append(ref)
hyps.append(hyp)
return hyps, refs
|
python
|
def human_readable_hyp_ref(self, dense_decoded, dense_y):
""" Returns a human readable version of the hypothesis for manual
inspection, along with the reference.
"""
hyps = []
refs = []
for i in range(len(dense_decoded)):
ref = [phn_i for phn_i in dense_y[i] if phn_i != 0]
hyp = [phn_i for phn_i in dense_decoded[i] if phn_i != 0]
ref = self.corpus.indices_to_labels(ref)
hyp = self.corpus.indices_to_labels(hyp)
refs.append(ref)
hyps.append(hyp)
return hyps, refs
|
[
"def",
"human_readable_hyp_ref",
"(",
"self",
",",
"dense_decoded",
",",
"dense_y",
")",
":",
"hyps",
"=",
"[",
"]",
"refs",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"dense_decoded",
")",
")",
":",
"ref",
"=",
"[",
"phn_i",
"for",
"phn_i",
"in",
"dense_y",
"[",
"i",
"]",
"if",
"phn_i",
"!=",
"0",
"]",
"hyp",
"=",
"[",
"phn_i",
"for",
"phn_i",
"in",
"dense_decoded",
"[",
"i",
"]",
"if",
"phn_i",
"!=",
"0",
"]",
"ref",
"=",
"self",
".",
"corpus",
".",
"indices_to_labels",
"(",
"ref",
")",
"hyp",
"=",
"self",
".",
"corpus",
".",
"indices_to_labels",
"(",
"hyp",
")",
"refs",
".",
"append",
"(",
"ref",
")",
"hyps",
".",
"append",
"(",
"hyp",
")",
"return",
"hyps",
",",
"refs"
] |
Returns a human readable version of the hypothesis for manual
inspection, along with the reference.
|
[
"Returns",
"a",
"human",
"readable",
"version",
"of",
"the",
"hypothesis",
"for",
"manual",
"inspection",
"along",
"with",
"the",
"reference",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/corpus_reader.py#L169-L184
|
13,871
|
persephone-tools/persephone
|
persephone/corpus_reader.py
|
CorpusReader.human_readable
|
def human_readable(self, dense_repr: Sequence[Sequence[int]]) -> List[List[str]]:
""" Returns a human readable version of a dense representation of
either or reference to facilitate simple manual inspection.
"""
transcripts = []
for dense_r in dense_repr:
non_empty_phonemes = [phn_i for phn_i in dense_r if phn_i != 0]
transcript = self.corpus.indices_to_labels(non_empty_phonemes)
transcripts.append(transcript)
return transcripts
|
python
|
def human_readable(self, dense_repr: Sequence[Sequence[int]]) -> List[List[str]]:
""" Returns a human readable version of a dense representation of
either or reference to facilitate simple manual inspection.
"""
transcripts = []
for dense_r in dense_repr:
non_empty_phonemes = [phn_i for phn_i in dense_r if phn_i != 0]
transcript = self.corpus.indices_to_labels(non_empty_phonemes)
transcripts.append(transcript)
return transcripts
|
[
"def",
"human_readable",
"(",
"self",
",",
"dense_repr",
":",
"Sequence",
"[",
"Sequence",
"[",
"int",
"]",
"]",
")",
"->",
"List",
"[",
"List",
"[",
"str",
"]",
"]",
":",
"transcripts",
"=",
"[",
"]",
"for",
"dense_r",
"in",
"dense_repr",
":",
"non_empty_phonemes",
"=",
"[",
"phn_i",
"for",
"phn_i",
"in",
"dense_r",
"if",
"phn_i",
"!=",
"0",
"]",
"transcript",
"=",
"self",
".",
"corpus",
".",
"indices_to_labels",
"(",
"non_empty_phonemes",
")",
"transcripts",
".",
"append",
"(",
"transcript",
")",
"return",
"transcripts"
] |
Returns a human readable version of a dense representation of
either or reference to facilitate simple manual inspection.
|
[
"Returns",
"a",
"human",
"readable",
"version",
"of",
"a",
"dense",
"representation",
"of",
"either",
"or",
"reference",
"to",
"facilitate",
"simple",
"manual",
"inspection",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/corpus_reader.py#L186-L197
|
13,872
|
persephone-tools/persephone
|
persephone/corpus_reader.py
|
CorpusReader.calc_time
|
def calc_time(self) -> None:
"""
Prints statistics about the the total duration of recordings in the
corpus.
"""
def get_number_of_frames(feat_fns):
""" fns: A list of numpy files which contain a number of feature
frames. """
total = 0
for feat_fn in feat_fns:
num_frames = len(np.load(feat_fn))
total += num_frames
return total
def numframes_to_minutes(num_frames):
# TODO Assumes 10ms strides for the frames. This should generalize to
# different frame stride widths, as should feature preparation.
minutes = ((num_frames*10)/1000)/60
return minutes
total_frames = 0
train_fns = [train_fn[0] for train_fn in self.train_fns]
num_train_frames = get_number_of_frames(train_fns)
total_frames += num_train_frames
num_valid_frames = get_number_of_frames(self.corpus.get_valid_fns()[0])
total_frames += num_valid_frames
num_test_frames = get_number_of_frames(self.corpus.get_test_fns()[0])
total_frames += num_test_frames
print("Train duration: %0.3f" % numframes_to_minutes(num_train_frames))
print("Validation duration: %0.3f" % numframes_to_minutes(num_valid_frames))
print("Test duration: %0.3f" % numframes_to_minutes(num_test_frames))
print("Total duration: %0.3f" % numframes_to_minutes(total_frames))
|
python
|
def calc_time(self) -> None:
"""
Prints statistics about the the total duration of recordings in the
corpus.
"""
def get_number_of_frames(feat_fns):
""" fns: A list of numpy files which contain a number of feature
frames. """
total = 0
for feat_fn in feat_fns:
num_frames = len(np.load(feat_fn))
total += num_frames
return total
def numframes_to_minutes(num_frames):
# TODO Assumes 10ms strides for the frames. This should generalize to
# different frame stride widths, as should feature preparation.
minutes = ((num_frames*10)/1000)/60
return minutes
total_frames = 0
train_fns = [train_fn[0] for train_fn in self.train_fns]
num_train_frames = get_number_of_frames(train_fns)
total_frames += num_train_frames
num_valid_frames = get_number_of_frames(self.corpus.get_valid_fns()[0])
total_frames += num_valid_frames
num_test_frames = get_number_of_frames(self.corpus.get_test_fns()[0])
total_frames += num_test_frames
print("Train duration: %0.3f" % numframes_to_minutes(num_train_frames))
print("Validation duration: %0.3f" % numframes_to_minutes(num_valid_frames))
print("Test duration: %0.3f" % numframes_to_minutes(num_test_frames))
print("Total duration: %0.3f" % numframes_to_minutes(total_frames))
|
[
"def",
"calc_time",
"(",
"self",
")",
"->",
"None",
":",
"def",
"get_number_of_frames",
"(",
"feat_fns",
")",
":",
"\"\"\" fns: A list of numpy files which contain a number of feature\n frames. \"\"\"",
"total",
"=",
"0",
"for",
"feat_fn",
"in",
"feat_fns",
":",
"num_frames",
"=",
"len",
"(",
"np",
".",
"load",
"(",
"feat_fn",
")",
")",
"total",
"+=",
"num_frames",
"return",
"total",
"def",
"numframes_to_minutes",
"(",
"num_frames",
")",
":",
"# TODO Assumes 10ms strides for the frames. This should generalize to",
"# different frame stride widths, as should feature preparation.",
"minutes",
"=",
"(",
"(",
"num_frames",
"*",
"10",
")",
"/",
"1000",
")",
"/",
"60",
"return",
"minutes",
"total_frames",
"=",
"0",
"train_fns",
"=",
"[",
"train_fn",
"[",
"0",
"]",
"for",
"train_fn",
"in",
"self",
".",
"train_fns",
"]",
"num_train_frames",
"=",
"get_number_of_frames",
"(",
"train_fns",
")",
"total_frames",
"+=",
"num_train_frames",
"num_valid_frames",
"=",
"get_number_of_frames",
"(",
"self",
".",
"corpus",
".",
"get_valid_fns",
"(",
")",
"[",
"0",
"]",
")",
"total_frames",
"+=",
"num_valid_frames",
"num_test_frames",
"=",
"get_number_of_frames",
"(",
"self",
".",
"corpus",
".",
"get_test_fns",
"(",
")",
"[",
"0",
"]",
")",
"total_frames",
"+=",
"num_test_frames",
"print",
"(",
"\"Train duration: %0.3f\"",
"%",
"numframes_to_minutes",
"(",
"num_train_frames",
")",
")",
"print",
"(",
"\"Validation duration: %0.3f\"",
"%",
"numframes_to_minutes",
"(",
"num_valid_frames",
")",
")",
"print",
"(",
"\"Test duration: %0.3f\"",
"%",
"numframes_to_minutes",
"(",
"num_test_frames",
")",
")",
"print",
"(",
"\"Total duration: %0.3f\"",
"%",
"numframes_to_minutes",
"(",
"total_frames",
")",
")"
] |
Prints statistics about the the total duration of recordings in the
corpus.
|
[
"Prints",
"statistics",
"about",
"the",
"the",
"total",
"duration",
"of",
"recordings",
"in",
"the",
"corpus",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/corpus_reader.py#L205-L241
|
13,873
|
persephone-tools/persephone
|
persephone/rnn_ctc.py
|
lstm_cell
|
def lstm_cell(hidden_size):
""" Wrapper function to create an LSTM cell. """
return tf.contrib.rnn.LSTMCell(
hidden_size, use_peepholes=True, state_is_tuple=True)
|
python
|
def lstm_cell(hidden_size):
""" Wrapper function to create an LSTM cell. """
return tf.contrib.rnn.LSTMCell(
hidden_size, use_peepholes=True, state_is_tuple=True)
|
[
"def",
"lstm_cell",
"(",
"hidden_size",
")",
":",
"return",
"tf",
".",
"contrib",
".",
"rnn",
".",
"LSTMCell",
"(",
"hidden_size",
",",
"use_peepholes",
"=",
"True",
",",
"state_is_tuple",
"=",
"True",
")"
] |
Wrapper function to create an LSTM cell.
|
[
"Wrapper",
"function",
"to",
"create",
"an",
"LSTM",
"cell",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/rnn_ctc.py#L12-L16
|
13,874
|
persephone-tools/persephone
|
persephone/rnn_ctc.py
|
Model.write_desc
|
def write_desc(self) -> None:
""" Writes a description of the model to the exp_dir. """
path = os.path.join(self.exp_dir, "model_description.txt")
with open(path, "w") as desc_f:
for key, val in self.__dict__.items():
print("%s=%s" % (key, val), file=desc_f)
import json
json_path = os.path.join(self.exp_dir, "model_description.json")
desc = { } #type: Dict[str, Any]
# For use in decoding from a saved model
desc["topology"] = {
"batch_x_name" : self.batch_x.name, #type: ignore
"batch_x_lens_name" : self.batch_x_lens.name, #type: ignore
"dense_decoded_name" : self.dense_decoded.name #type: ignore
}
desc["model_type"] = str(self.__class__)
for key, val in self.__dict__.items():
if isinstance(val, int):
desc[str(key)] = val
elif isinstance(val, tf.Tensor):
desc[key] = {
"type": "tf.Tensor",
"name": val.name, #type: ignore
"shape": str(val.shape), #type: ignore
"dtype" : str(val.dtype), #type: ignore
"value" : str(val),
}
elif isinstance(val, tf.SparseTensor): #type: ignore
desc[key] = {
"type": "tf.SparseTensor",
"value": str(val), #type: ignore
}
else:
desc[str(key)] = str(val)
with open(json_path, "w") as json_desc_f:
json.dump(desc, json_desc_f, skipkeys=True)
|
python
|
def write_desc(self) -> None:
""" Writes a description of the model to the exp_dir. """
path = os.path.join(self.exp_dir, "model_description.txt")
with open(path, "w") as desc_f:
for key, val in self.__dict__.items():
print("%s=%s" % (key, val), file=desc_f)
import json
json_path = os.path.join(self.exp_dir, "model_description.json")
desc = { } #type: Dict[str, Any]
# For use in decoding from a saved model
desc["topology"] = {
"batch_x_name" : self.batch_x.name, #type: ignore
"batch_x_lens_name" : self.batch_x_lens.name, #type: ignore
"dense_decoded_name" : self.dense_decoded.name #type: ignore
}
desc["model_type"] = str(self.__class__)
for key, val in self.__dict__.items():
if isinstance(val, int):
desc[str(key)] = val
elif isinstance(val, tf.Tensor):
desc[key] = {
"type": "tf.Tensor",
"name": val.name, #type: ignore
"shape": str(val.shape), #type: ignore
"dtype" : str(val.dtype), #type: ignore
"value" : str(val),
}
elif isinstance(val, tf.SparseTensor): #type: ignore
desc[key] = {
"type": "tf.SparseTensor",
"value": str(val), #type: ignore
}
else:
desc[str(key)] = str(val)
with open(json_path, "w") as json_desc_f:
json.dump(desc, json_desc_f, skipkeys=True)
|
[
"def",
"write_desc",
"(",
"self",
")",
"->",
"None",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"exp_dir",
",",
"\"model_description.txt\"",
")",
"with",
"open",
"(",
"path",
",",
"\"w\"",
")",
"as",
"desc_f",
":",
"for",
"key",
",",
"val",
"in",
"self",
".",
"__dict__",
".",
"items",
"(",
")",
":",
"print",
"(",
"\"%s=%s\"",
"%",
"(",
"key",
",",
"val",
")",
",",
"file",
"=",
"desc_f",
")",
"import",
"json",
"json_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"exp_dir",
",",
"\"model_description.json\"",
")",
"desc",
"=",
"{",
"}",
"#type: Dict[str, Any]",
"# For use in decoding from a saved model",
"desc",
"[",
"\"topology\"",
"]",
"=",
"{",
"\"batch_x_name\"",
":",
"self",
".",
"batch_x",
".",
"name",
",",
"#type: ignore",
"\"batch_x_lens_name\"",
":",
"self",
".",
"batch_x_lens",
".",
"name",
",",
"#type: ignore",
"\"dense_decoded_name\"",
":",
"self",
".",
"dense_decoded",
".",
"name",
"#type: ignore",
"}",
"desc",
"[",
"\"model_type\"",
"]",
"=",
"str",
"(",
"self",
".",
"__class__",
")",
"for",
"key",
",",
"val",
"in",
"self",
".",
"__dict__",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"val",
",",
"int",
")",
":",
"desc",
"[",
"str",
"(",
"key",
")",
"]",
"=",
"val",
"elif",
"isinstance",
"(",
"val",
",",
"tf",
".",
"Tensor",
")",
":",
"desc",
"[",
"key",
"]",
"=",
"{",
"\"type\"",
":",
"\"tf.Tensor\"",
",",
"\"name\"",
":",
"val",
".",
"name",
",",
"#type: ignore",
"\"shape\"",
":",
"str",
"(",
"val",
".",
"shape",
")",
",",
"#type: ignore",
"\"dtype\"",
":",
"str",
"(",
"val",
".",
"dtype",
")",
",",
"#type: ignore",
"\"value\"",
":",
"str",
"(",
"val",
")",
",",
"}",
"elif",
"isinstance",
"(",
"val",
",",
"tf",
".",
"SparseTensor",
")",
":",
"#type: ignore",
"desc",
"[",
"key",
"]",
"=",
"{",
"\"type\"",
":",
"\"tf.SparseTensor\"",
",",
"\"value\"",
":",
"str",
"(",
"val",
")",
",",
"#type: ignore",
"}",
"else",
":",
"desc",
"[",
"str",
"(",
"key",
")",
"]",
"=",
"str",
"(",
"val",
")",
"with",
"open",
"(",
"json_path",
",",
"\"w\"",
")",
"as",
"json_desc_f",
":",
"json",
".",
"dump",
"(",
"desc",
",",
"json_desc_f",
",",
"skipkeys",
"=",
"True",
")"
] |
Writes a description of the model to the exp_dir.
|
[
"Writes",
"a",
"description",
"of",
"the",
"model",
"to",
"the",
"exp_dir",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/rnn_ctc.py#L21-L58
|
13,875
|
persephone-tools/persephone
|
persephone/preprocess/feat_extract.py
|
empty_wav
|
def empty_wav(wav_path: Union[Path, str]) -> bool:
"""Check if a wav contains data"""
with wave.open(str(wav_path), 'rb') as wav_f:
return wav_f.getnframes() == 0
|
python
|
def empty_wav(wav_path: Union[Path, str]) -> bool:
"""Check if a wav contains data"""
with wave.open(str(wav_path), 'rb') as wav_f:
return wav_f.getnframes() == 0
|
[
"def",
"empty_wav",
"(",
"wav_path",
":",
"Union",
"[",
"Path",
",",
"str",
"]",
")",
"->",
"bool",
":",
"with",
"wave",
".",
"open",
"(",
"str",
"(",
"wav_path",
")",
",",
"'rb'",
")",
"as",
"wav_f",
":",
"return",
"wav_f",
".",
"getnframes",
"(",
")",
"==",
"0"
] |
Check if a wav contains data
|
[
"Check",
"if",
"a",
"wav",
"contains",
"data"
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/preprocess/feat_extract.py#L19-L22
|
13,876
|
persephone-tools/persephone
|
persephone/preprocess/feat_extract.py
|
extract_energy
|
def extract_energy(rate, sig):
""" Extracts the energy of frames. """
mfcc = python_speech_features.mfcc(sig, rate, appendEnergy=True)
energy_row_vec = mfcc[:, 0]
energy_col_vec = energy_row_vec[:, np.newaxis]
return energy_col_vec
|
python
|
def extract_energy(rate, sig):
""" Extracts the energy of frames. """
mfcc = python_speech_features.mfcc(sig, rate, appendEnergy=True)
energy_row_vec = mfcc[:, 0]
energy_col_vec = energy_row_vec[:, np.newaxis]
return energy_col_vec
|
[
"def",
"extract_energy",
"(",
"rate",
",",
"sig",
")",
":",
"mfcc",
"=",
"python_speech_features",
".",
"mfcc",
"(",
"sig",
",",
"rate",
",",
"appendEnergy",
"=",
"True",
")",
"energy_row_vec",
"=",
"mfcc",
"[",
":",
",",
"0",
"]",
"energy_col_vec",
"=",
"energy_row_vec",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
"return",
"energy_col_vec"
] |
Extracts the energy of frames.
|
[
"Extracts",
"the",
"energy",
"of",
"frames",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/preprocess/feat_extract.py#L25-L31
|
13,877
|
persephone-tools/persephone
|
persephone/preprocess/feat_extract.py
|
fbank
|
def fbank(wav_path, flat=True):
""" Currently grabs log Mel filterbank, deltas and double deltas."""
(rate, sig) = wav.read(wav_path)
if len(sig) == 0:
logger.warning("Empty wav: {}".format(wav_path))
fbank_feat = python_speech_features.logfbank(sig, rate, nfilt=40)
energy = extract_energy(rate, sig)
feat = np.hstack([energy, fbank_feat])
delta_feat = python_speech_features.delta(feat, 2)
delta_delta_feat = python_speech_features.delta(delta_feat, 2)
all_feats = [feat, delta_feat, delta_delta_feat]
if not flat:
all_feats = np.array(all_feats)
# Make time the first dimension for easy length normalization padding
# later.
all_feats = np.swapaxes(all_feats, 0, 1)
all_feats = np.swapaxes(all_feats, 1, 2)
else:
all_feats = np.concatenate(all_feats, axis=1)
# Log Mel Filterbank, with delta, and double delta
feat_fn = wav_path[:-3] + "fbank.npy"
np.save(feat_fn, all_feats)
|
python
|
def fbank(wav_path, flat=True):
""" Currently grabs log Mel filterbank, deltas and double deltas."""
(rate, sig) = wav.read(wav_path)
if len(sig) == 0:
logger.warning("Empty wav: {}".format(wav_path))
fbank_feat = python_speech_features.logfbank(sig, rate, nfilt=40)
energy = extract_energy(rate, sig)
feat = np.hstack([energy, fbank_feat])
delta_feat = python_speech_features.delta(feat, 2)
delta_delta_feat = python_speech_features.delta(delta_feat, 2)
all_feats = [feat, delta_feat, delta_delta_feat]
if not flat:
all_feats = np.array(all_feats)
# Make time the first dimension for easy length normalization padding
# later.
all_feats = np.swapaxes(all_feats, 0, 1)
all_feats = np.swapaxes(all_feats, 1, 2)
else:
all_feats = np.concatenate(all_feats, axis=1)
# Log Mel Filterbank, with delta, and double delta
feat_fn = wav_path[:-3] + "fbank.npy"
np.save(feat_fn, all_feats)
|
[
"def",
"fbank",
"(",
"wav_path",
",",
"flat",
"=",
"True",
")",
":",
"(",
"rate",
",",
"sig",
")",
"=",
"wav",
".",
"read",
"(",
"wav_path",
")",
"if",
"len",
"(",
"sig",
")",
"==",
"0",
":",
"logger",
".",
"warning",
"(",
"\"Empty wav: {}\"",
".",
"format",
"(",
"wav_path",
")",
")",
"fbank_feat",
"=",
"python_speech_features",
".",
"logfbank",
"(",
"sig",
",",
"rate",
",",
"nfilt",
"=",
"40",
")",
"energy",
"=",
"extract_energy",
"(",
"rate",
",",
"sig",
")",
"feat",
"=",
"np",
".",
"hstack",
"(",
"[",
"energy",
",",
"fbank_feat",
"]",
")",
"delta_feat",
"=",
"python_speech_features",
".",
"delta",
"(",
"feat",
",",
"2",
")",
"delta_delta_feat",
"=",
"python_speech_features",
".",
"delta",
"(",
"delta_feat",
",",
"2",
")",
"all_feats",
"=",
"[",
"feat",
",",
"delta_feat",
",",
"delta_delta_feat",
"]",
"if",
"not",
"flat",
":",
"all_feats",
"=",
"np",
".",
"array",
"(",
"all_feats",
")",
"# Make time the first dimension for easy length normalization padding",
"# later.",
"all_feats",
"=",
"np",
".",
"swapaxes",
"(",
"all_feats",
",",
"0",
",",
"1",
")",
"all_feats",
"=",
"np",
".",
"swapaxes",
"(",
"all_feats",
",",
"1",
",",
"2",
")",
"else",
":",
"all_feats",
"=",
"np",
".",
"concatenate",
"(",
"all_feats",
",",
"axis",
"=",
"1",
")",
"# Log Mel Filterbank, with delta, and double delta",
"feat_fn",
"=",
"wav_path",
"[",
":",
"-",
"3",
"]",
"+",
"\"fbank.npy\"",
"np",
".",
"save",
"(",
"feat_fn",
",",
"all_feats",
")"
] |
Currently grabs log Mel filterbank, deltas and double deltas.
|
[
"Currently",
"grabs",
"log",
"Mel",
"filterbank",
"deltas",
"and",
"double",
"deltas",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/preprocess/feat_extract.py#L33-L56
|
13,878
|
persephone-tools/persephone
|
persephone/preprocess/feat_extract.py
|
mfcc
|
def mfcc(wav_path):
""" Grabs MFCC features with energy and derivates. """
(rate, sig) = wav.read(wav_path)
feat = python_speech_features.mfcc(sig, rate, appendEnergy=True)
delta_feat = python_speech_features.delta(feat, 2)
all_feats = [feat, delta_feat]
all_feats = np.array(all_feats)
# Make time the first dimension for easy length normalization padding later.
all_feats = np.swapaxes(all_feats, 0, 1)
all_feats = np.swapaxes(all_feats, 1, 2)
feat_fn = wav_path[:-3] + "mfcc13_d.npy"
np.save(feat_fn, all_feats)
|
python
|
def mfcc(wav_path):
""" Grabs MFCC features with energy and derivates. """
(rate, sig) = wav.read(wav_path)
feat = python_speech_features.mfcc(sig, rate, appendEnergy=True)
delta_feat = python_speech_features.delta(feat, 2)
all_feats = [feat, delta_feat]
all_feats = np.array(all_feats)
# Make time the first dimension for easy length normalization padding later.
all_feats = np.swapaxes(all_feats, 0, 1)
all_feats = np.swapaxes(all_feats, 1, 2)
feat_fn = wav_path[:-3] + "mfcc13_d.npy"
np.save(feat_fn, all_feats)
|
[
"def",
"mfcc",
"(",
"wav_path",
")",
":",
"(",
"rate",
",",
"sig",
")",
"=",
"wav",
".",
"read",
"(",
"wav_path",
")",
"feat",
"=",
"python_speech_features",
".",
"mfcc",
"(",
"sig",
",",
"rate",
",",
"appendEnergy",
"=",
"True",
")",
"delta_feat",
"=",
"python_speech_features",
".",
"delta",
"(",
"feat",
",",
"2",
")",
"all_feats",
"=",
"[",
"feat",
",",
"delta_feat",
"]",
"all_feats",
"=",
"np",
".",
"array",
"(",
"all_feats",
")",
"# Make time the first dimension for easy length normalization padding later.",
"all_feats",
"=",
"np",
".",
"swapaxes",
"(",
"all_feats",
",",
"0",
",",
"1",
")",
"all_feats",
"=",
"np",
".",
"swapaxes",
"(",
"all_feats",
",",
"1",
",",
"2",
")",
"feat_fn",
"=",
"wav_path",
"[",
":",
"-",
"3",
"]",
"+",
"\"mfcc13_d.npy\"",
"np",
".",
"save",
"(",
"feat_fn",
",",
"all_feats",
")"
] |
Grabs MFCC features with energy and derivates.
|
[
"Grabs",
"MFCC",
"features",
"with",
"energy",
"and",
"derivates",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/preprocess/feat_extract.py#L58-L71
|
13,879
|
persephone-tools/persephone
|
persephone/preprocess/feat_extract.py
|
from_dir
|
def from_dir(dirpath: Path, feat_type: str) -> None:
""" Performs feature extraction from the WAV files in a directory.
Args:
dirpath: A `Path` to the directory where the WAV files reside.
feat_type: The type of features that are being used.
"""
logger.info("Extracting features from directory {}".format(dirpath))
dirname = str(dirpath)
def all_wavs_processed() -> bool:
"""
True if all wavs in the directory have corresponding numpy feature
file; False otherwise.
"""
for fn in os.listdir(dirname):
prefix, ext = os.path.splitext(fn)
if ext == ".wav":
if not os.path.exists(
os.path.join(dirname, "%s.%s.npy" % (prefix, feat_type))):
return False
return True
if all_wavs_processed():
# Then nothing needs to be done here
logger.info("All WAV files already preprocessed")
return
# Otherwise, go on and process everything...
# If pitch features are needed as part of this, extract them
if feat_type == "pitch" or feat_type == "fbank_and_pitch":
kaldi_pitch(dirname, dirname)
# Then apply file-wise feature extraction
for filename in os.listdir(dirname):
logger.info("Preparing %s features for %s", feat_type, filename)
path = os.path.join(dirname, filename)
if path.endswith(".wav"):
if empty_wav(path):
raise PersephoneException("Can't extract features for {} since it is an empty WAV file. Remove it from the corpus.".format(path))
if feat_type == "fbank":
fbank(path)
elif feat_type == "fbank_and_pitch":
fbank(path)
prefix = os.path.splitext(filename)[0]
combine_fbank_and_pitch(dirname, prefix)
elif feat_type == "pitch":
# Already extracted pitch at the start of this function.
pass
elif feat_type == "mfcc13_d":
mfcc(path)
else:
logger.warning("Feature type not found: %s", feat_type)
raise PersephoneException("Feature type not found: %s" % feat_type)
|
python
|
def from_dir(dirpath: Path, feat_type: str) -> None:
""" Performs feature extraction from the WAV files in a directory.
Args:
dirpath: A `Path` to the directory where the WAV files reside.
feat_type: The type of features that are being used.
"""
logger.info("Extracting features from directory {}".format(dirpath))
dirname = str(dirpath)
def all_wavs_processed() -> bool:
"""
True if all wavs in the directory have corresponding numpy feature
file; False otherwise.
"""
for fn in os.listdir(dirname):
prefix, ext = os.path.splitext(fn)
if ext == ".wav":
if not os.path.exists(
os.path.join(dirname, "%s.%s.npy" % (prefix, feat_type))):
return False
return True
if all_wavs_processed():
# Then nothing needs to be done here
logger.info("All WAV files already preprocessed")
return
# Otherwise, go on and process everything...
# If pitch features are needed as part of this, extract them
if feat_type == "pitch" or feat_type == "fbank_and_pitch":
kaldi_pitch(dirname, dirname)
# Then apply file-wise feature extraction
for filename in os.listdir(dirname):
logger.info("Preparing %s features for %s", feat_type, filename)
path = os.path.join(dirname, filename)
if path.endswith(".wav"):
if empty_wav(path):
raise PersephoneException("Can't extract features for {} since it is an empty WAV file. Remove it from the corpus.".format(path))
if feat_type == "fbank":
fbank(path)
elif feat_type == "fbank_and_pitch":
fbank(path)
prefix = os.path.splitext(filename)[0]
combine_fbank_and_pitch(dirname, prefix)
elif feat_type == "pitch":
# Already extracted pitch at the start of this function.
pass
elif feat_type == "mfcc13_d":
mfcc(path)
else:
logger.warning("Feature type not found: %s", feat_type)
raise PersephoneException("Feature type not found: %s" % feat_type)
|
[
"def",
"from_dir",
"(",
"dirpath",
":",
"Path",
",",
"feat_type",
":",
"str",
")",
"->",
"None",
":",
"logger",
".",
"info",
"(",
"\"Extracting features from directory {}\"",
".",
"format",
"(",
"dirpath",
")",
")",
"dirname",
"=",
"str",
"(",
"dirpath",
")",
"def",
"all_wavs_processed",
"(",
")",
"->",
"bool",
":",
"\"\"\"\n True if all wavs in the directory have corresponding numpy feature\n file; False otherwise.\n \"\"\"",
"for",
"fn",
"in",
"os",
".",
"listdir",
"(",
"dirname",
")",
":",
"prefix",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"fn",
")",
"if",
"ext",
"==",
"\".wav\"",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"dirname",
",",
"\"%s.%s.npy\"",
"%",
"(",
"prefix",
",",
"feat_type",
")",
")",
")",
":",
"return",
"False",
"return",
"True",
"if",
"all_wavs_processed",
"(",
")",
":",
"# Then nothing needs to be done here",
"logger",
".",
"info",
"(",
"\"All WAV files already preprocessed\"",
")",
"return",
"# Otherwise, go on and process everything...",
"# If pitch features are needed as part of this, extract them",
"if",
"feat_type",
"==",
"\"pitch\"",
"or",
"feat_type",
"==",
"\"fbank_and_pitch\"",
":",
"kaldi_pitch",
"(",
"dirname",
",",
"dirname",
")",
"# Then apply file-wise feature extraction",
"for",
"filename",
"in",
"os",
".",
"listdir",
"(",
"dirname",
")",
":",
"logger",
".",
"info",
"(",
"\"Preparing %s features for %s\"",
",",
"feat_type",
",",
"filename",
")",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dirname",
",",
"filename",
")",
"if",
"path",
".",
"endswith",
"(",
"\".wav\"",
")",
":",
"if",
"empty_wav",
"(",
"path",
")",
":",
"raise",
"PersephoneException",
"(",
"\"Can't extract features for {} since it is an empty WAV file. Remove it from the corpus.\"",
".",
"format",
"(",
"path",
")",
")",
"if",
"feat_type",
"==",
"\"fbank\"",
":",
"fbank",
"(",
"path",
")",
"elif",
"feat_type",
"==",
"\"fbank_and_pitch\"",
":",
"fbank",
"(",
"path",
")",
"prefix",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"filename",
")",
"[",
"0",
"]",
"combine_fbank_and_pitch",
"(",
"dirname",
",",
"prefix",
")",
"elif",
"feat_type",
"==",
"\"pitch\"",
":",
"# Already extracted pitch at the start of this function.",
"pass",
"elif",
"feat_type",
"==",
"\"mfcc13_d\"",
":",
"mfcc",
"(",
"path",
")",
"else",
":",
"logger",
".",
"warning",
"(",
"\"Feature type not found: %s\"",
",",
"feat_type",
")",
"raise",
"PersephoneException",
"(",
"\"Feature type not found: %s\"",
"%",
"feat_type",
")"
] |
Performs feature extraction from the WAV files in a directory.
Args:
dirpath: A `Path` to the directory where the WAV files reside.
feat_type: The type of features that are being used.
|
[
"Performs",
"feature",
"extraction",
"from",
"the",
"WAV",
"files",
"in",
"a",
"directory",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/preprocess/feat_extract.py#L117-L173
|
13,880
|
persephone-tools/persephone
|
persephone/preprocess/feat_extract.py
|
convert_wav
|
def convert_wav(org_wav_fn: Path, tgt_wav_fn: Path) -> None:
""" Converts the wav into a 16bit mono 16000Hz wav.
Args:
org_wav_fn: A `Path` to the original wave file
tgt_wav_fn: The `Path` to output the processed wave file
"""
if not org_wav_fn.exists():
raise FileNotFoundError
args = [config.FFMPEG_PATH,
"-i", str(org_wav_fn), "-ac", "1", "-ar", "16000", str(tgt_wav_fn)]
subprocess.run(args)
|
python
|
def convert_wav(org_wav_fn: Path, tgt_wav_fn: Path) -> None:
""" Converts the wav into a 16bit mono 16000Hz wav.
Args:
org_wav_fn: A `Path` to the original wave file
tgt_wav_fn: The `Path` to output the processed wave file
"""
if not org_wav_fn.exists():
raise FileNotFoundError
args = [config.FFMPEG_PATH,
"-i", str(org_wav_fn), "-ac", "1", "-ar", "16000", str(tgt_wav_fn)]
subprocess.run(args)
|
[
"def",
"convert_wav",
"(",
"org_wav_fn",
":",
"Path",
",",
"tgt_wav_fn",
":",
"Path",
")",
"->",
"None",
":",
"if",
"not",
"org_wav_fn",
".",
"exists",
"(",
")",
":",
"raise",
"FileNotFoundError",
"args",
"=",
"[",
"config",
".",
"FFMPEG_PATH",
",",
"\"-i\"",
",",
"str",
"(",
"org_wav_fn",
")",
",",
"\"-ac\"",
",",
"\"1\"",
",",
"\"-ar\"",
",",
"\"16000\"",
",",
"str",
"(",
"tgt_wav_fn",
")",
"]",
"subprocess",
".",
"run",
"(",
"args",
")"
] |
Converts the wav into a 16bit mono 16000Hz wav.
Args:
org_wav_fn: A `Path` to the original wave file
tgt_wav_fn: The `Path` to output the processed wave file
|
[
"Converts",
"the",
"wav",
"into",
"a",
"16bit",
"mono",
"16000Hz",
"wav",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/preprocess/feat_extract.py#L175-L186
|
13,881
|
persephone-tools/persephone
|
persephone/preprocess/feat_extract.py
|
kaldi_pitch
|
def kaldi_pitch(wav_dir: str, feat_dir: str) -> None:
""" Extract Kaldi pitch features. Assumes 16k mono wav files."""
logger.debug("Make wav.scp and pitch.scp files")
# Make wav.scp and pitch.scp files
prefixes = []
for fn in os.listdir(wav_dir):
prefix, ext = os.path.splitext(fn)
if ext == ".wav":
prefixes.append(prefix)
wav_scp_path = os.path.join(feat_dir, "wavs.scp")
with open(wav_scp_path, "w") as wav_scp:
for prefix in prefixes:
logger.info("Writing wav file: %s", os.path.join(wav_dir, prefix + ".wav"))
print(prefix, os.path.join(wav_dir, prefix + ".wav"), file=wav_scp)
pitch_scp_path = os.path.join(feat_dir, "pitch_feats.scp")
with open(pitch_scp_path, "w") as pitch_scp:
for prefix in prefixes:
logger.info("Writing scp file: %s", os.path.join(feat_dir, prefix + ".pitch.txt"))
print(prefix, os.path.join(feat_dir, prefix + ".pitch.txt"), file=pitch_scp)
# Call Kaldi pitch feat extraction
args = [os.path.join(config.KALDI_ROOT, "src/featbin/compute-kaldi-pitch-feats"),
"scp:%s" % (wav_scp_path), "scp,t:%s" % pitch_scp_path]
logger.info("Extracting pitch features from wavs listed in {}".format(
wav_scp_path))
subprocess.run(args)
# Convert the Kaldi pitch *.txt files to numpy arrays.
for fn in os.listdir(feat_dir):
if fn.endswith(".pitch.txt"):
pitch_feats = []
with open(os.path.join(feat_dir, fn)) as f:
for line in f:
sp = line.split()
if len(sp) > 1:
pitch_feats.append([float(sp[0]), float(sp[1])])
prefix, _ = os.path.splitext(fn)
out_fn = prefix + ".npy"
a = np.array(pitch_feats)
np.save(os.path.join(feat_dir, out_fn), a)
|
python
|
def kaldi_pitch(wav_dir: str, feat_dir: str) -> None:
""" Extract Kaldi pitch features. Assumes 16k mono wav files."""
logger.debug("Make wav.scp and pitch.scp files")
# Make wav.scp and pitch.scp files
prefixes = []
for fn in os.listdir(wav_dir):
prefix, ext = os.path.splitext(fn)
if ext == ".wav":
prefixes.append(prefix)
wav_scp_path = os.path.join(feat_dir, "wavs.scp")
with open(wav_scp_path, "w") as wav_scp:
for prefix in prefixes:
logger.info("Writing wav file: %s", os.path.join(wav_dir, prefix + ".wav"))
print(prefix, os.path.join(wav_dir, prefix + ".wav"), file=wav_scp)
pitch_scp_path = os.path.join(feat_dir, "pitch_feats.scp")
with open(pitch_scp_path, "w") as pitch_scp:
for prefix in prefixes:
logger.info("Writing scp file: %s", os.path.join(feat_dir, prefix + ".pitch.txt"))
print(prefix, os.path.join(feat_dir, prefix + ".pitch.txt"), file=pitch_scp)
# Call Kaldi pitch feat extraction
args = [os.path.join(config.KALDI_ROOT, "src/featbin/compute-kaldi-pitch-feats"),
"scp:%s" % (wav_scp_path), "scp,t:%s" % pitch_scp_path]
logger.info("Extracting pitch features from wavs listed in {}".format(
wav_scp_path))
subprocess.run(args)
# Convert the Kaldi pitch *.txt files to numpy arrays.
for fn in os.listdir(feat_dir):
if fn.endswith(".pitch.txt"):
pitch_feats = []
with open(os.path.join(feat_dir, fn)) as f:
for line in f:
sp = line.split()
if len(sp) > 1:
pitch_feats.append([float(sp[0]), float(sp[1])])
prefix, _ = os.path.splitext(fn)
out_fn = prefix + ".npy"
a = np.array(pitch_feats)
np.save(os.path.join(feat_dir, out_fn), a)
|
[
"def",
"kaldi_pitch",
"(",
"wav_dir",
":",
"str",
",",
"feat_dir",
":",
"str",
")",
"->",
"None",
":",
"logger",
".",
"debug",
"(",
"\"Make wav.scp and pitch.scp files\"",
")",
"# Make wav.scp and pitch.scp files",
"prefixes",
"=",
"[",
"]",
"for",
"fn",
"in",
"os",
".",
"listdir",
"(",
"wav_dir",
")",
":",
"prefix",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"fn",
")",
"if",
"ext",
"==",
"\".wav\"",
":",
"prefixes",
".",
"append",
"(",
"prefix",
")",
"wav_scp_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"feat_dir",
",",
"\"wavs.scp\"",
")",
"with",
"open",
"(",
"wav_scp_path",
",",
"\"w\"",
")",
"as",
"wav_scp",
":",
"for",
"prefix",
"in",
"prefixes",
":",
"logger",
".",
"info",
"(",
"\"Writing wav file: %s\"",
",",
"os",
".",
"path",
".",
"join",
"(",
"wav_dir",
",",
"prefix",
"+",
"\".wav\"",
")",
")",
"print",
"(",
"prefix",
",",
"os",
".",
"path",
".",
"join",
"(",
"wav_dir",
",",
"prefix",
"+",
"\".wav\"",
")",
",",
"file",
"=",
"wav_scp",
")",
"pitch_scp_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"feat_dir",
",",
"\"pitch_feats.scp\"",
")",
"with",
"open",
"(",
"pitch_scp_path",
",",
"\"w\"",
")",
"as",
"pitch_scp",
":",
"for",
"prefix",
"in",
"prefixes",
":",
"logger",
".",
"info",
"(",
"\"Writing scp file: %s\"",
",",
"os",
".",
"path",
".",
"join",
"(",
"feat_dir",
",",
"prefix",
"+",
"\".pitch.txt\"",
")",
")",
"print",
"(",
"prefix",
",",
"os",
".",
"path",
".",
"join",
"(",
"feat_dir",
",",
"prefix",
"+",
"\".pitch.txt\"",
")",
",",
"file",
"=",
"pitch_scp",
")",
"# Call Kaldi pitch feat extraction",
"args",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"config",
".",
"KALDI_ROOT",
",",
"\"src/featbin/compute-kaldi-pitch-feats\"",
")",
",",
"\"scp:%s\"",
"%",
"(",
"wav_scp_path",
")",
",",
"\"scp,t:%s\"",
"%",
"pitch_scp_path",
"]",
"logger",
".",
"info",
"(",
"\"Extracting pitch features from wavs listed in {}\"",
".",
"format",
"(",
"wav_scp_path",
")",
")",
"subprocess",
".",
"run",
"(",
"args",
")",
"# Convert the Kaldi pitch *.txt files to numpy arrays.",
"for",
"fn",
"in",
"os",
".",
"listdir",
"(",
"feat_dir",
")",
":",
"if",
"fn",
".",
"endswith",
"(",
"\".pitch.txt\"",
")",
":",
"pitch_feats",
"=",
"[",
"]",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"feat_dir",
",",
"fn",
")",
")",
"as",
"f",
":",
"for",
"line",
"in",
"f",
":",
"sp",
"=",
"line",
".",
"split",
"(",
")",
"if",
"len",
"(",
"sp",
")",
">",
"1",
":",
"pitch_feats",
".",
"append",
"(",
"[",
"float",
"(",
"sp",
"[",
"0",
"]",
")",
",",
"float",
"(",
"sp",
"[",
"1",
"]",
")",
"]",
")",
"prefix",
",",
"_",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"fn",
")",
"out_fn",
"=",
"prefix",
"+",
"\".npy\"",
"a",
"=",
"np",
".",
"array",
"(",
"pitch_feats",
")",
"np",
".",
"save",
"(",
"os",
".",
"path",
".",
"join",
"(",
"feat_dir",
",",
"out_fn",
")",
",",
"a",
")"
] |
Extract Kaldi pitch features. Assumes 16k mono wav files.
|
[
"Extract",
"Kaldi",
"pitch",
"features",
".",
"Assumes",
"16k",
"mono",
"wav",
"files",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/preprocess/feat_extract.py#L188-L230
|
13,882
|
persephone-tools/persephone
|
persephone/experiment.py
|
get_exp_dir_num
|
def get_exp_dir_num(parent_dir: str) -> int:
""" Gets the number of the current experiment directory."""
return max([int(fn.split(".")[0])
for fn in os.listdir(parent_dir) if fn.split(".")[0].isdigit()]
+ [-1])
|
python
|
def get_exp_dir_num(parent_dir: str) -> int:
""" Gets the number of the current experiment directory."""
return max([int(fn.split(".")[0])
for fn in os.listdir(parent_dir) if fn.split(".")[0].isdigit()]
+ [-1])
|
[
"def",
"get_exp_dir_num",
"(",
"parent_dir",
":",
"str",
")",
"->",
"int",
":",
"return",
"max",
"(",
"[",
"int",
"(",
"fn",
".",
"split",
"(",
"\".\"",
")",
"[",
"0",
"]",
")",
"for",
"fn",
"in",
"os",
".",
"listdir",
"(",
"parent_dir",
")",
"if",
"fn",
".",
"split",
"(",
"\".\"",
")",
"[",
"0",
"]",
".",
"isdigit",
"(",
")",
"]",
"+",
"[",
"-",
"1",
"]",
")"
] |
Gets the number of the current experiment directory.
|
[
"Gets",
"the",
"number",
"of",
"the",
"current",
"experiment",
"directory",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/experiment.py#L18-L22
|
13,883
|
persephone-tools/persephone
|
persephone/experiment.py
|
transcribe
|
def transcribe(model_path, corpus):
""" Applies a trained model to untranscribed data in a Corpus. """
exp_dir = prep_exp_dir()
model = get_simple_model(exp_dir, corpus)
model.transcribe(model_path)
|
python
|
def transcribe(model_path, corpus):
""" Applies a trained model to untranscribed data in a Corpus. """
exp_dir = prep_exp_dir()
model = get_simple_model(exp_dir, corpus)
model.transcribe(model_path)
|
[
"def",
"transcribe",
"(",
"model_path",
",",
"corpus",
")",
":",
"exp_dir",
"=",
"prep_exp_dir",
"(",
")",
"model",
"=",
"get_simple_model",
"(",
"exp_dir",
",",
"corpus",
")",
"model",
".",
"transcribe",
"(",
"model_path",
")"
] |
Applies a trained model to untranscribed data in a Corpus.
|
[
"Applies",
"a",
"trained",
"model",
"to",
"untranscribed",
"data",
"in",
"a",
"Corpus",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/experiment.py#L106-L111
|
13,884
|
persephone-tools/persephone
|
persephone/preprocess/wav.py
|
trim_wav_ms
|
def trim_wav_ms(in_path: Path, out_path: Path,
start_time: int, end_time: int) -> None:
""" Extracts part of a WAV File.
First attempts to call sox. If sox is unavailable, it backs off to
pydub+ffmpeg.
Args:
in_path: A path to the source file to extract a portion of
out_path: A path describing the to-be-created WAV file.
start_time: The point in the source WAV file at which to begin
extraction.
end_time: The point in the source WAV file at which to end extraction.
"""
try:
trim_wav_sox(in_path, out_path, start_time, end_time)
except FileNotFoundError:
# Then sox isn't installed, so use pydub/ffmpeg
trim_wav_pydub(in_path, out_path, start_time, end_time)
except subprocess.CalledProcessError:
# Then there is an issue calling sox. Perhaps the input file is an mp4
# or some other filetype not supported out-of-the-box by sox. So we try
# using pydub/ffmpeg.
trim_wav_pydub(in_path, out_path, start_time, end_time)
|
python
|
def trim_wav_ms(in_path: Path, out_path: Path,
start_time: int, end_time: int) -> None:
""" Extracts part of a WAV File.
First attempts to call sox. If sox is unavailable, it backs off to
pydub+ffmpeg.
Args:
in_path: A path to the source file to extract a portion of
out_path: A path describing the to-be-created WAV file.
start_time: The point in the source WAV file at which to begin
extraction.
end_time: The point in the source WAV file at which to end extraction.
"""
try:
trim_wav_sox(in_path, out_path, start_time, end_time)
except FileNotFoundError:
# Then sox isn't installed, so use pydub/ffmpeg
trim_wav_pydub(in_path, out_path, start_time, end_time)
except subprocess.CalledProcessError:
# Then there is an issue calling sox. Perhaps the input file is an mp4
# or some other filetype not supported out-of-the-box by sox. So we try
# using pydub/ffmpeg.
trim_wav_pydub(in_path, out_path, start_time, end_time)
|
[
"def",
"trim_wav_ms",
"(",
"in_path",
":",
"Path",
",",
"out_path",
":",
"Path",
",",
"start_time",
":",
"int",
",",
"end_time",
":",
"int",
")",
"->",
"None",
":",
"try",
":",
"trim_wav_sox",
"(",
"in_path",
",",
"out_path",
",",
"start_time",
",",
"end_time",
")",
"except",
"FileNotFoundError",
":",
"# Then sox isn't installed, so use pydub/ffmpeg",
"trim_wav_pydub",
"(",
"in_path",
",",
"out_path",
",",
"start_time",
",",
"end_time",
")",
"except",
"subprocess",
".",
"CalledProcessError",
":",
"# Then there is an issue calling sox. Perhaps the input file is an mp4",
"# or some other filetype not supported out-of-the-box by sox. So we try",
"# using pydub/ffmpeg.",
"trim_wav_pydub",
"(",
"in_path",
",",
"out_path",
",",
"start_time",
",",
"end_time",
")"
] |
Extracts part of a WAV File.
First attempts to call sox. If sox is unavailable, it backs off to
pydub+ffmpeg.
Args:
in_path: A path to the source file to extract a portion of
out_path: A path describing the to-be-created WAV file.
start_time: The point in the source WAV file at which to begin
extraction.
end_time: The point in the source WAV file at which to end extraction.
|
[
"Extracts",
"part",
"of",
"a",
"WAV",
"File",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/preprocess/wav.py#L18-L43
|
13,885
|
persephone-tools/persephone
|
persephone/preprocess/wav.py
|
trim_wav_pydub
|
def trim_wav_pydub(in_path: Path, out_path: Path,
start_time: int, end_time: int) -> None:
""" Crops the wav file. """
logger.info(
"Using pydub/ffmpeg to create {} from {}".format(out_path, in_path) +
" using a start_time of {} and an end_time of {}".format(start_time,
end_time))
if out_path.is_file():
return
# TODO add logging here
#print("in_fn: {}".format(in_fn))
#print("out_fn: {}".format(out_fn))
in_ext = in_path.suffix[1:]
out_ext = out_path.suffix[1:]
audio = AudioSegment.from_file(str(in_path), in_ext)
trimmed = audio[start_time:end_time]
# pydub evidently doesn't actually use the parameters when outputting wavs,
# since it doesn't use FFMPEG to deal with outputtting WAVs. This is a bit
# of a leaky abstraction. No warning is given, so normalization to 16Khz
# mono wavs has to happen later. Leaving the parameters here in case it
# changes
trimmed.export(str(out_path), format=out_ext,
parameters=["-ac", "1", "-ar", "16000"])
|
python
|
def trim_wav_pydub(in_path: Path, out_path: Path,
start_time: int, end_time: int) -> None:
""" Crops the wav file. """
logger.info(
"Using pydub/ffmpeg to create {} from {}".format(out_path, in_path) +
" using a start_time of {} and an end_time of {}".format(start_time,
end_time))
if out_path.is_file():
return
# TODO add logging here
#print("in_fn: {}".format(in_fn))
#print("out_fn: {}".format(out_fn))
in_ext = in_path.suffix[1:]
out_ext = out_path.suffix[1:]
audio = AudioSegment.from_file(str(in_path), in_ext)
trimmed = audio[start_time:end_time]
# pydub evidently doesn't actually use the parameters when outputting wavs,
# since it doesn't use FFMPEG to deal with outputtting WAVs. This is a bit
# of a leaky abstraction. No warning is given, so normalization to 16Khz
# mono wavs has to happen later. Leaving the parameters here in case it
# changes
trimmed.export(str(out_path), format=out_ext,
parameters=["-ac", "1", "-ar", "16000"])
|
[
"def",
"trim_wav_pydub",
"(",
"in_path",
":",
"Path",
",",
"out_path",
":",
"Path",
",",
"start_time",
":",
"int",
",",
"end_time",
":",
"int",
")",
"->",
"None",
":",
"logger",
".",
"info",
"(",
"\"Using pydub/ffmpeg to create {} from {}\"",
".",
"format",
"(",
"out_path",
",",
"in_path",
")",
"+",
"\" using a start_time of {} and an end_time of {}\"",
".",
"format",
"(",
"start_time",
",",
"end_time",
")",
")",
"if",
"out_path",
".",
"is_file",
"(",
")",
":",
"return",
"# TODO add logging here",
"#print(\"in_fn: {}\".format(in_fn))",
"#print(\"out_fn: {}\".format(out_fn))",
"in_ext",
"=",
"in_path",
".",
"suffix",
"[",
"1",
":",
"]",
"out_ext",
"=",
"out_path",
".",
"suffix",
"[",
"1",
":",
"]",
"audio",
"=",
"AudioSegment",
".",
"from_file",
"(",
"str",
"(",
"in_path",
")",
",",
"in_ext",
")",
"trimmed",
"=",
"audio",
"[",
"start_time",
":",
"end_time",
"]",
"# pydub evidently doesn't actually use the parameters when outputting wavs,",
"# since it doesn't use FFMPEG to deal with outputtting WAVs. This is a bit",
"# of a leaky abstraction. No warning is given, so normalization to 16Khz",
"# mono wavs has to happen later. Leaving the parameters here in case it",
"# changes",
"trimmed",
".",
"export",
"(",
"str",
"(",
"out_path",
")",
",",
"format",
"=",
"out_ext",
",",
"parameters",
"=",
"[",
"\"-ac\"",
",",
"\"1\"",
",",
"\"-ar\"",
",",
"\"16000\"",
"]",
")"
] |
Crops the wav file.
|
[
"Crops",
"the",
"wav",
"file",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/preprocess/wav.py#L45-L70
|
13,886
|
persephone-tools/persephone
|
persephone/preprocess/wav.py
|
trim_wav_sox
|
def trim_wav_sox(in_path: Path, out_path: Path,
start_time: int, end_time: int) -> None:
""" Crops the wav file at in_fn so that the audio between start_time and
end_time is output to out_fn. Measured in milliseconds.
"""
if out_path.is_file():
logger.info("Output path %s already exists, not trimming file", out_path)
return
start_time_secs = millisecs_to_secs(start_time)
end_time_secs = millisecs_to_secs(end_time)
args = [config.SOX_PATH, str(in_path), str(out_path),
"trim", str(start_time_secs), "=" + str(end_time_secs)]
logger.info("Cropping file %s, from start time %d (seconds) to end time %d (seconds), outputting to %s",
in_path, start_time_secs, end_time_secs, out_path)
subprocess.run(args, check=True)
|
python
|
def trim_wav_sox(in_path: Path, out_path: Path,
start_time: int, end_time: int) -> None:
""" Crops the wav file at in_fn so that the audio between start_time and
end_time is output to out_fn. Measured in milliseconds.
"""
if out_path.is_file():
logger.info("Output path %s already exists, not trimming file", out_path)
return
start_time_secs = millisecs_to_secs(start_time)
end_time_secs = millisecs_to_secs(end_time)
args = [config.SOX_PATH, str(in_path), str(out_path),
"trim", str(start_time_secs), "=" + str(end_time_secs)]
logger.info("Cropping file %s, from start time %d (seconds) to end time %d (seconds), outputting to %s",
in_path, start_time_secs, end_time_secs, out_path)
subprocess.run(args, check=True)
|
[
"def",
"trim_wav_sox",
"(",
"in_path",
":",
"Path",
",",
"out_path",
":",
"Path",
",",
"start_time",
":",
"int",
",",
"end_time",
":",
"int",
")",
"->",
"None",
":",
"if",
"out_path",
".",
"is_file",
"(",
")",
":",
"logger",
".",
"info",
"(",
"\"Output path %s already exists, not trimming file\"",
",",
"out_path",
")",
"return",
"start_time_secs",
"=",
"millisecs_to_secs",
"(",
"start_time",
")",
"end_time_secs",
"=",
"millisecs_to_secs",
"(",
"end_time",
")",
"args",
"=",
"[",
"config",
".",
"SOX_PATH",
",",
"str",
"(",
"in_path",
")",
",",
"str",
"(",
"out_path",
")",
",",
"\"trim\"",
",",
"str",
"(",
"start_time_secs",
")",
",",
"\"=\"",
"+",
"str",
"(",
"end_time_secs",
")",
"]",
"logger",
".",
"info",
"(",
"\"Cropping file %s, from start time %d (seconds) to end time %d (seconds), outputting to %s\"",
",",
"in_path",
",",
"start_time_secs",
",",
"end_time_secs",
",",
"out_path",
")",
"subprocess",
".",
"run",
"(",
"args",
",",
"check",
"=",
"True",
")"
] |
Crops the wav file at in_fn so that the audio between start_time and
end_time is output to out_fn. Measured in milliseconds.
|
[
"Crops",
"the",
"wav",
"file",
"at",
"in_fn",
"so",
"that",
"the",
"audio",
"between",
"start_time",
"and",
"end_time",
"is",
"output",
"to",
"out_fn",
".",
"Measured",
"in",
"milliseconds",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/preprocess/wav.py#L72-L88
|
13,887
|
persephone-tools/persephone
|
persephone/preprocess/wav.py
|
extract_wavs
|
def extract_wavs(utterances: List[Utterance], tgt_dir: Path,
lazy: bool) -> None:
""" Extracts WAVs from the media files associated with a list of Utterance
objects and stores it in a target directory.
Args:
utterances: A list of Utterance objects, which include information
about the source media file, and the offset of the utterance in the
media_file.
tgt_dir: The directory in which to write the output WAVs.
lazy: If True, then existing WAVs will not be overwritten if they have
the same name
"""
tgt_dir.mkdir(parents=True, exist_ok=True)
for utter in utterances:
wav_fn = "{}.{}".format(utter.prefix, "wav")
out_wav_path = tgt_dir / wav_fn
if lazy and out_wav_path.is_file():
logger.info("File {} already exists and lazy == {}; not " \
"writing.".format(out_wav_path, lazy))
continue
logger.info("File {} does not exist and lazy == {}; creating " \
"it.".format(out_wav_path, lazy))
trim_wav_ms(utter.org_media_path, out_wav_path,
utter.start_time, utter.end_time)
|
python
|
def extract_wavs(utterances: List[Utterance], tgt_dir: Path,
lazy: bool) -> None:
""" Extracts WAVs from the media files associated with a list of Utterance
objects and stores it in a target directory.
Args:
utterances: A list of Utterance objects, which include information
about the source media file, and the offset of the utterance in the
media_file.
tgt_dir: The directory in which to write the output WAVs.
lazy: If True, then existing WAVs will not be overwritten if they have
the same name
"""
tgt_dir.mkdir(parents=True, exist_ok=True)
for utter in utterances:
wav_fn = "{}.{}".format(utter.prefix, "wav")
out_wav_path = tgt_dir / wav_fn
if lazy and out_wav_path.is_file():
logger.info("File {} already exists and lazy == {}; not " \
"writing.".format(out_wav_path, lazy))
continue
logger.info("File {} does not exist and lazy == {}; creating " \
"it.".format(out_wav_path, lazy))
trim_wav_ms(utter.org_media_path, out_wav_path,
utter.start_time, utter.end_time)
|
[
"def",
"extract_wavs",
"(",
"utterances",
":",
"List",
"[",
"Utterance",
"]",
",",
"tgt_dir",
":",
"Path",
",",
"lazy",
":",
"bool",
")",
"->",
"None",
":",
"tgt_dir",
".",
"mkdir",
"(",
"parents",
"=",
"True",
",",
"exist_ok",
"=",
"True",
")",
"for",
"utter",
"in",
"utterances",
":",
"wav_fn",
"=",
"\"{}.{}\"",
".",
"format",
"(",
"utter",
".",
"prefix",
",",
"\"wav\"",
")",
"out_wav_path",
"=",
"tgt_dir",
"/",
"wav_fn",
"if",
"lazy",
"and",
"out_wav_path",
".",
"is_file",
"(",
")",
":",
"logger",
".",
"info",
"(",
"\"File {} already exists and lazy == {}; not \"",
"\"writing.\"",
".",
"format",
"(",
"out_wav_path",
",",
"lazy",
")",
")",
"continue",
"logger",
".",
"info",
"(",
"\"File {} does not exist and lazy == {}; creating \"",
"\"it.\"",
".",
"format",
"(",
"out_wav_path",
",",
"lazy",
")",
")",
"trim_wav_ms",
"(",
"utter",
".",
"org_media_path",
",",
"out_wav_path",
",",
"utter",
".",
"start_time",
",",
"utter",
".",
"end_time",
")"
] |
Extracts WAVs from the media files associated with a list of Utterance
objects and stores it in a target directory.
Args:
utterances: A list of Utterance objects, which include information
about the source media file, and the offset of the utterance in the
media_file.
tgt_dir: The directory in which to write the output WAVs.
lazy: If True, then existing WAVs will not be overwritten if they have
the same name
|
[
"Extracts",
"WAVs",
"from",
"the",
"media",
"files",
"associated",
"with",
"a",
"list",
"of",
"Utterance",
"objects",
"and",
"stores",
"it",
"in",
"a",
"target",
"directory",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/preprocess/wav.py#L90-L114
|
13,888
|
persephone-tools/persephone
|
persephone/results.py
|
filter_labels
|
def filter_labels(sent: Sequence[str], labels: Set[str] = None) -> List[str]:
""" Returns only the tokens present in the sentence that are in labels."""
if labels:
return [tok for tok in sent if tok in labels]
return list(sent)
|
python
|
def filter_labels(sent: Sequence[str], labels: Set[str] = None) -> List[str]:
""" Returns only the tokens present in the sentence that are in labels."""
if labels:
return [tok for tok in sent if tok in labels]
return list(sent)
|
[
"def",
"filter_labels",
"(",
"sent",
":",
"Sequence",
"[",
"str",
"]",
",",
"labels",
":",
"Set",
"[",
"str",
"]",
"=",
"None",
")",
"->",
"List",
"[",
"str",
"]",
":",
"if",
"labels",
":",
"return",
"[",
"tok",
"for",
"tok",
"in",
"sent",
"if",
"tok",
"in",
"labels",
"]",
"return",
"list",
"(",
"sent",
")"
] |
Returns only the tokens present in the sentence that are in labels.
|
[
"Returns",
"only",
"the",
"tokens",
"present",
"in",
"the",
"sentence",
"that",
"are",
"in",
"labels",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/results.py#L11-L16
|
13,889
|
persephone-tools/persephone
|
persephone/results.py
|
filtered_error_rate
|
def filtered_error_rate(hyps_path: Union[str, Path], refs_path: Union[str, Path], labels: Set[str]) -> float:
""" Returns the error rate of hypotheses in hyps_path against references in refs_path after filtering only for labels in labels.
"""
if isinstance(hyps_path, Path):
hyps_path = str(hyps_path)
if isinstance(refs_path, Path):
refs_path = str(refs_path)
with open(hyps_path) as hyps_f:
lines = hyps_f.readlines()
hyps = [filter_labels(line.split(), labels) for line in lines]
with open(refs_path) as refs_f:
lines = refs_f.readlines()
refs = [filter_labels(line.split(), labels) for line in lines]
# For the case where there are no tokens left after filtering.
only_empty = True
for entry in hyps:
if entry is not []:
only_empty = False
break # found something so can move on immediately
if only_empty:
return -1
return utils.batch_per(hyps, refs)
|
python
|
def filtered_error_rate(hyps_path: Union[str, Path], refs_path: Union[str, Path], labels: Set[str]) -> float:
""" Returns the error rate of hypotheses in hyps_path against references in refs_path after filtering only for labels in labels.
"""
if isinstance(hyps_path, Path):
hyps_path = str(hyps_path)
if isinstance(refs_path, Path):
refs_path = str(refs_path)
with open(hyps_path) as hyps_f:
lines = hyps_f.readlines()
hyps = [filter_labels(line.split(), labels) for line in lines]
with open(refs_path) as refs_f:
lines = refs_f.readlines()
refs = [filter_labels(line.split(), labels) for line in lines]
# For the case where there are no tokens left after filtering.
only_empty = True
for entry in hyps:
if entry is not []:
only_empty = False
break # found something so can move on immediately
if only_empty:
return -1
return utils.batch_per(hyps, refs)
|
[
"def",
"filtered_error_rate",
"(",
"hyps_path",
":",
"Union",
"[",
"str",
",",
"Path",
"]",
",",
"refs_path",
":",
"Union",
"[",
"str",
",",
"Path",
"]",
",",
"labels",
":",
"Set",
"[",
"str",
"]",
")",
"->",
"float",
":",
"if",
"isinstance",
"(",
"hyps_path",
",",
"Path",
")",
":",
"hyps_path",
"=",
"str",
"(",
"hyps_path",
")",
"if",
"isinstance",
"(",
"refs_path",
",",
"Path",
")",
":",
"refs_path",
"=",
"str",
"(",
"refs_path",
")",
"with",
"open",
"(",
"hyps_path",
")",
"as",
"hyps_f",
":",
"lines",
"=",
"hyps_f",
".",
"readlines",
"(",
")",
"hyps",
"=",
"[",
"filter_labels",
"(",
"line",
".",
"split",
"(",
")",
",",
"labels",
")",
"for",
"line",
"in",
"lines",
"]",
"with",
"open",
"(",
"refs_path",
")",
"as",
"refs_f",
":",
"lines",
"=",
"refs_f",
".",
"readlines",
"(",
")",
"refs",
"=",
"[",
"filter_labels",
"(",
"line",
".",
"split",
"(",
")",
",",
"labels",
")",
"for",
"line",
"in",
"lines",
"]",
"# For the case where there are no tokens left after filtering.",
"only_empty",
"=",
"True",
"for",
"entry",
"in",
"hyps",
":",
"if",
"entry",
"is",
"not",
"[",
"]",
":",
"only_empty",
"=",
"False",
"break",
"# found something so can move on immediately",
"if",
"only_empty",
":",
"return",
"-",
"1",
"return",
"utils",
".",
"batch_per",
"(",
"hyps",
",",
"refs",
")"
] |
Returns the error rate of hypotheses in hyps_path against references in refs_path after filtering only for labels in labels.
|
[
"Returns",
"the",
"error",
"rate",
"of",
"hypotheses",
"in",
"hyps_path",
"against",
"references",
"in",
"refs_path",
"after",
"filtering",
"only",
"for",
"labels",
"in",
"labels",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/results.py#L18-L42
|
13,890
|
persephone-tools/persephone
|
persephone/results.py
|
fmt_latex_output
|
def fmt_latex_output(hyps: Sequence[Sequence[str]],
refs: Sequence[Sequence[str]],
prefixes: Sequence[str],
out_fn: Path,
) -> None:
""" Output the hypotheses and references to a LaTeX source file for
pretty printing.
"""
alignments_ = [min_edit_distance_align(ref, hyp)
for hyp, ref in zip(hyps, refs)]
with out_fn.open("w") as out_f:
print(latex_header(), file=out_f)
print("\\begin{document}\n"
"\\begin{longtable}{ll}", file=out_f)
print(r"\toprule", file=out_f)
for sent in zip(prefixes, alignments_):
prefix = sent[0]
alignments = sent[1:]
print("Utterance ID: &", prefix.strip().replace(r"_", r"\_"), r"\\", file=out_f)
for i, alignment in enumerate(alignments):
ref_list = []
hyp_list = []
for arrow in alignment:
if arrow[0] == arrow[1]:
# Then don't highlight it; it's correct.
ref_list.append(arrow[0])
hyp_list.append(arrow[1])
else:
# Then highlight the errors.
ref_list.append("\\hl{%s}" % arrow[0])
hyp_list.append("\\hl{%s}" % arrow[1])
print("Ref: &", "".join(ref_list), r"\\", file=out_f)
print("Hyp: &", "".join(hyp_list), r"\\", file=out_f)
print(r"\midrule", file=out_f)
print(r"\end{longtable}", file=out_f)
print(r"\end{document}", file=out_f)
|
python
|
def fmt_latex_output(hyps: Sequence[Sequence[str]],
refs: Sequence[Sequence[str]],
prefixes: Sequence[str],
out_fn: Path,
) -> None:
""" Output the hypotheses and references to a LaTeX source file for
pretty printing.
"""
alignments_ = [min_edit_distance_align(ref, hyp)
for hyp, ref in zip(hyps, refs)]
with out_fn.open("w") as out_f:
print(latex_header(), file=out_f)
print("\\begin{document}\n"
"\\begin{longtable}{ll}", file=out_f)
print(r"\toprule", file=out_f)
for sent in zip(prefixes, alignments_):
prefix = sent[0]
alignments = sent[1:]
print("Utterance ID: &", prefix.strip().replace(r"_", r"\_"), r"\\", file=out_f)
for i, alignment in enumerate(alignments):
ref_list = []
hyp_list = []
for arrow in alignment:
if arrow[0] == arrow[1]:
# Then don't highlight it; it's correct.
ref_list.append(arrow[0])
hyp_list.append(arrow[1])
else:
# Then highlight the errors.
ref_list.append("\\hl{%s}" % arrow[0])
hyp_list.append("\\hl{%s}" % arrow[1])
print("Ref: &", "".join(ref_list), r"\\", file=out_f)
print("Hyp: &", "".join(hyp_list), r"\\", file=out_f)
print(r"\midrule", file=out_f)
print(r"\end{longtable}", file=out_f)
print(r"\end{document}", file=out_f)
|
[
"def",
"fmt_latex_output",
"(",
"hyps",
":",
"Sequence",
"[",
"Sequence",
"[",
"str",
"]",
"]",
",",
"refs",
":",
"Sequence",
"[",
"Sequence",
"[",
"str",
"]",
"]",
",",
"prefixes",
":",
"Sequence",
"[",
"str",
"]",
",",
"out_fn",
":",
"Path",
",",
")",
"->",
"None",
":",
"alignments_",
"=",
"[",
"min_edit_distance_align",
"(",
"ref",
",",
"hyp",
")",
"for",
"hyp",
",",
"ref",
"in",
"zip",
"(",
"hyps",
",",
"refs",
")",
"]",
"with",
"out_fn",
".",
"open",
"(",
"\"w\"",
")",
"as",
"out_f",
":",
"print",
"(",
"latex_header",
"(",
")",
",",
"file",
"=",
"out_f",
")",
"print",
"(",
"\"\\\\begin{document}\\n\"",
"\"\\\\begin{longtable}{ll}\"",
",",
"file",
"=",
"out_f",
")",
"print",
"(",
"r\"\\toprule\"",
",",
"file",
"=",
"out_f",
")",
"for",
"sent",
"in",
"zip",
"(",
"prefixes",
",",
"alignments_",
")",
":",
"prefix",
"=",
"sent",
"[",
"0",
"]",
"alignments",
"=",
"sent",
"[",
"1",
":",
"]",
"print",
"(",
"\"Utterance ID: &\"",
",",
"prefix",
".",
"strip",
"(",
")",
".",
"replace",
"(",
"r\"_\"",
",",
"r\"\\_\"",
")",
",",
"r\"\\\\\"",
",",
"file",
"=",
"out_f",
")",
"for",
"i",
",",
"alignment",
"in",
"enumerate",
"(",
"alignments",
")",
":",
"ref_list",
"=",
"[",
"]",
"hyp_list",
"=",
"[",
"]",
"for",
"arrow",
"in",
"alignment",
":",
"if",
"arrow",
"[",
"0",
"]",
"==",
"arrow",
"[",
"1",
"]",
":",
"# Then don't highlight it; it's correct.",
"ref_list",
".",
"append",
"(",
"arrow",
"[",
"0",
"]",
")",
"hyp_list",
".",
"append",
"(",
"arrow",
"[",
"1",
"]",
")",
"else",
":",
"# Then highlight the errors.",
"ref_list",
".",
"append",
"(",
"\"\\\\hl{%s}\"",
"%",
"arrow",
"[",
"0",
"]",
")",
"hyp_list",
".",
"append",
"(",
"\"\\\\hl{%s}\"",
"%",
"arrow",
"[",
"1",
"]",
")",
"print",
"(",
"\"Ref: &\"",
",",
"\"\"",
".",
"join",
"(",
"ref_list",
")",
",",
"r\"\\\\\"",
",",
"file",
"=",
"out_f",
")",
"print",
"(",
"\"Hyp: &\"",
",",
"\"\"",
".",
"join",
"(",
"hyp_list",
")",
",",
"r\"\\\\\"",
",",
"file",
"=",
"out_f",
")",
"print",
"(",
"r\"\\midrule\"",
",",
"file",
"=",
"out_f",
")",
"print",
"(",
"r\"\\end{longtable}\"",
",",
"file",
"=",
"out_f",
")",
"print",
"(",
"r\"\\end{document}\"",
",",
"file",
"=",
"out_f",
")"
] |
Output the hypotheses and references to a LaTeX source file for
pretty printing.
|
[
"Output",
"the",
"hypotheses",
"and",
"references",
"to",
"a",
"LaTeX",
"source",
"file",
"for",
"pretty",
"printing",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/results.py#L57-L96
|
13,891
|
persephone-tools/persephone
|
persephone/results.py
|
fmt_confusion_matrix
|
def fmt_confusion_matrix(hyps: Sequence[Sequence[str]],
refs: Sequence[Sequence[str]],
label_set: Set[str] = None,
max_width: int = 25) -> str:
""" Formats a confusion matrix over substitutions, ignoring insertions
and deletions. """
if not label_set:
# Then determine the label set by reading
raise NotImplementedError()
alignments = [min_edit_distance_align(ref, hyp)
for hyp, ref in zip(hyps, refs)]
arrow_counter = Counter() # type: Dict[Tuple[str, str], int]
for alignment in alignments:
arrow_counter.update(alignment)
ref_total = Counter() # type: Dict[str, int]
for alignment in alignments:
ref_total.update([arrow[0] for arrow in alignment])
labels = [label for label, count
in sorted(ref_total.items(), key=lambda x: x[1], reverse=True)
if label != ""][:max_width]
format_pieces = []
fmt = "{:3} "*(len(labels)+1)
format_pieces.append(fmt.format(" ", *labels))
fmt = "{:3} " + ("{:<3} " * (len(labels)))
for ref in labels:
# TODO
ref_results = [arrow_counter[(ref, hyp)] for hyp in labels]
format_pieces.append(fmt.format(ref, *ref_results))
return "\n".join(format_pieces)
|
python
|
def fmt_confusion_matrix(hyps: Sequence[Sequence[str]],
refs: Sequence[Sequence[str]],
label_set: Set[str] = None,
max_width: int = 25) -> str:
""" Formats a confusion matrix over substitutions, ignoring insertions
and deletions. """
if not label_set:
# Then determine the label set by reading
raise NotImplementedError()
alignments = [min_edit_distance_align(ref, hyp)
for hyp, ref in zip(hyps, refs)]
arrow_counter = Counter() # type: Dict[Tuple[str, str], int]
for alignment in alignments:
arrow_counter.update(alignment)
ref_total = Counter() # type: Dict[str, int]
for alignment in alignments:
ref_total.update([arrow[0] for arrow in alignment])
labels = [label for label, count
in sorted(ref_total.items(), key=lambda x: x[1], reverse=True)
if label != ""][:max_width]
format_pieces = []
fmt = "{:3} "*(len(labels)+1)
format_pieces.append(fmt.format(" ", *labels))
fmt = "{:3} " + ("{:<3} " * (len(labels)))
for ref in labels:
# TODO
ref_results = [arrow_counter[(ref, hyp)] for hyp in labels]
format_pieces.append(fmt.format(ref, *ref_results))
return "\n".join(format_pieces)
|
[
"def",
"fmt_confusion_matrix",
"(",
"hyps",
":",
"Sequence",
"[",
"Sequence",
"[",
"str",
"]",
"]",
",",
"refs",
":",
"Sequence",
"[",
"Sequence",
"[",
"str",
"]",
"]",
",",
"label_set",
":",
"Set",
"[",
"str",
"]",
"=",
"None",
",",
"max_width",
":",
"int",
"=",
"25",
")",
"->",
"str",
":",
"if",
"not",
"label_set",
":",
"# Then determine the label set by reading",
"raise",
"NotImplementedError",
"(",
")",
"alignments",
"=",
"[",
"min_edit_distance_align",
"(",
"ref",
",",
"hyp",
")",
"for",
"hyp",
",",
"ref",
"in",
"zip",
"(",
"hyps",
",",
"refs",
")",
"]",
"arrow_counter",
"=",
"Counter",
"(",
")",
"# type: Dict[Tuple[str, str], int]",
"for",
"alignment",
"in",
"alignments",
":",
"arrow_counter",
".",
"update",
"(",
"alignment",
")",
"ref_total",
"=",
"Counter",
"(",
")",
"# type: Dict[str, int]",
"for",
"alignment",
"in",
"alignments",
":",
"ref_total",
".",
"update",
"(",
"[",
"arrow",
"[",
"0",
"]",
"for",
"arrow",
"in",
"alignment",
"]",
")",
"labels",
"=",
"[",
"label",
"for",
"label",
",",
"count",
"in",
"sorted",
"(",
"ref_total",
".",
"items",
"(",
")",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"1",
"]",
",",
"reverse",
"=",
"True",
")",
"if",
"label",
"!=",
"\"\"",
"]",
"[",
":",
"max_width",
"]",
"format_pieces",
"=",
"[",
"]",
"fmt",
"=",
"\"{:3} \"",
"*",
"(",
"len",
"(",
"labels",
")",
"+",
"1",
")",
"format_pieces",
".",
"append",
"(",
"fmt",
".",
"format",
"(",
"\" \"",
",",
"*",
"labels",
")",
")",
"fmt",
"=",
"\"{:3} \"",
"+",
"(",
"\"{:<3} \"",
"*",
"(",
"len",
"(",
"labels",
")",
")",
")",
"for",
"ref",
"in",
"labels",
":",
"# TODO",
"ref_results",
"=",
"[",
"arrow_counter",
"[",
"(",
"ref",
",",
"hyp",
")",
"]",
"for",
"hyp",
"in",
"labels",
"]",
"format_pieces",
".",
"append",
"(",
"fmt",
".",
"format",
"(",
"ref",
",",
"*",
"ref_results",
")",
")",
"return",
"\"\\n\"",
".",
"join",
"(",
"format_pieces",
")"
] |
Formats a confusion matrix over substitutions, ignoring insertions
and deletions.
|
[
"Formats",
"a",
"confusion",
"matrix",
"over",
"substitutions",
"ignoring",
"insertions",
"and",
"deletions",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/results.py#L132-L167
|
13,892
|
persephone-tools/persephone
|
persephone/results.py
|
fmt_latex_untranscribed
|
def fmt_latex_untranscribed(hyps: Sequence[Sequence[str]],
prefixes: Sequence[str],
out_fn: Path) -> None:
""" Formats automatic hypotheses that have not previously been
transcribed in LaTeX. """
hyps_prefixes = list(zip(hyps, prefixes))
def utter_id_key(hyp_prefix):
hyp, prefix = hyp_prefix
prefix_split = prefix.split(".")
return (prefix_split[0], int(prefix_split[1]))
hyps_prefixes.sort(key=utter_id_key)
with out_fn.open("w") as out_f:
print(latex_header(), file=out_f)
print("\\begin{document}\n"
"\\begin{longtable}{ll}", file=out_f)
print(r"\toprule", file=out_f)
for hyp, prefix in hyps_prefixes:
print("Utterance ID: &", prefix.strip().replace(r"_", r"\_"), "\\\\", file=out_f)
print("Hypothesis: &", hyp, r"\\", file=out_f)
print("\\midrule", file=out_f)
print(r"\end{longtable}", file=out_f)
print(r"\end{document}", file=out_f)
|
python
|
def fmt_latex_untranscribed(hyps: Sequence[Sequence[str]],
prefixes: Sequence[str],
out_fn: Path) -> None:
""" Formats automatic hypotheses that have not previously been
transcribed in LaTeX. """
hyps_prefixes = list(zip(hyps, prefixes))
def utter_id_key(hyp_prefix):
hyp, prefix = hyp_prefix
prefix_split = prefix.split(".")
return (prefix_split[0], int(prefix_split[1]))
hyps_prefixes.sort(key=utter_id_key)
with out_fn.open("w") as out_f:
print(latex_header(), file=out_f)
print("\\begin{document}\n"
"\\begin{longtable}{ll}", file=out_f)
print(r"\toprule", file=out_f)
for hyp, prefix in hyps_prefixes:
print("Utterance ID: &", prefix.strip().replace(r"_", r"\_"), "\\\\", file=out_f)
print("Hypothesis: &", hyp, r"\\", file=out_f)
print("\\midrule", file=out_f)
print(r"\end{longtable}", file=out_f)
print(r"\end{document}", file=out_f)
|
[
"def",
"fmt_latex_untranscribed",
"(",
"hyps",
":",
"Sequence",
"[",
"Sequence",
"[",
"str",
"]",
"]",
",",
"prefixes",
":",
"Sequence",
"[",
"str",
"]",
",",
"out_fn",
":",
"Path",
")",
"->",
"None",
":",
"hyps_prefixes",
"=",
"list",
"(",
"zip",
"(",
"hyps",
",",
"prefixes",
")",
")",
"def",
"utter_id_key",
"(",
"hyp_prefix",
")",
":",
"hyp",
",",
"prefix",
"=",
"hyp_prefix",
"prefix_split",
"=",
"prefix",
".",
"split",
"(",
"\".\"",
")",
"return",
"(",
"prefix_split",
"[",
"0",
"]",
",",
"int",
"(",
"prefix_split",
"[",
"1",
"]",
")",
")",
"hyps_prefixes",
".",
"sort",
"(",
"key",
"=",
"utter_id_key",
")",
"with",
"out_fn",
".",
"open",
"(",
"\"w\"",
")",
"as",
"out_f",
":",
"print",
"(",
"latex_header",
"(",
")",
",",
"file",
"=",
"out_f",
")",
"print",
"(",
"\"\\\\begin{document}\\n\"",
"\"\\\\begin{longtable}{ll}\"",
",",
"file",
"=",
"out_f",
")",
"print",
"(",
"r\"\\toprule\"",
",",
"file",
"=",
"out_f",
")",
"for",
"hyp",
",",
"prefix",
"in",
"hyps_prefixes",
":",
"print",
"(",
"\"Utterance ID: &\"",
",",
"prefix",
".",
"strip",
"(",
")",
".",
"replace",
"(",
"r\"_\"",
",",
"r\"\\_\"",
")",
",",
"\"\\\\\\\\\"",
",",
"file",
"=",
"out_f",
")",
"print",
"(",
"\"Hypothesis: &\"",
",",
"hyp",
",",
"r\"\\\\\"",
",",
"file",
"=",
"out_f",
")",
"print",
"(",
"\"\\\\midrule\"",
",",
"file",
"=",
"out_f",
")",
"print",
"(",
"r\"\\end{longtable}\"",
",",
"file",
"=",
"out_f",
")",
"print",
"(",
"r\"\\end{document}\"",
",",
"file",
"=",
"out_f",
")"
] |
Formats automatic hypotheses that have not previously been
transcribed in LaTeX.
|
[
"Formats",
"automatic",
"hypotheses",
"that",
"have",
"not",
"previously",
"been",
"transcribed",
"in",
"LaTeX",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/results.py#L169-L192
|
13,893
|
persephone-tools/persephone
|
persephone/preprocess/labels.py
|
segment_into_chars
|
def segment_into_chars(utterance: str) -> str:
""" Segments an utterance into space delimited characters. """
if not isinstance(utterance, str):
raise TypeError("Input type must be a string. Got {}.".format(type(utterance)))
utterance.strip()
utterance = utterance.replace(" ", "")
return " ".join(utterance)
|
python
|
def segment_into_chars(utterance: str) -> str:
""" Segments an utterance into space delimited characters. """
if not isinstance(utterance, str):
raise TypeError("Input type must be a string. Got {}.".format(type(utterance)))
utterance.strip()
utterance = utterance.replace(" ", "")
return " ".join(utterance)
|
[
"def",
"segment_into_chars",
"(",
"utterance",
":",
"str",
")",
"->",
"str",
":",
"if",
"not",
"isinstance",
"(",
"utterance",
",",
"str",
")",
":",
"raise",
"TypeError",
"(",
"\"Input type must be a string. Got {}.\"",
".",
"format",
"(",
"type",
"(",
"utterance",
")",
")",
")",
"utterance",
".",
"strip",
"(",
")",
"utterance",
"=",
"utterance",
".",
"replace",
"(",
"\" \"",
",",
"\"\"",
")",
"return",
"\" \"",
".",
"join",
"(",
"utterance",
")"
] |
Segments an utterance into space delimited characters.
|
[
"Segments",
"an",
"utterance",
"into",
"space",
"delimited",
"characters",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/preprocess/labels.py#L28-L36
|
13,894
|
persephone-tools/persephone
|
persephone/preprocess/labels.py
|
make_indices_to_labels
|
def make_indices_to_labels(labels: Set[str]) -> Dict[int, str]:
""" Creates a mapping from indices to labels. """
return {index: label for index, label in
enumerate(["pad"] + sorted(list(labels)))}
|
python
|
def make_indices_to_labels(labels: Set[str]) -> Dict[int, str]:
""" Creates a mapping from indices to labels. """
return {index: label for index, label in
enumerate(["pad"] + sorted(list(labels)))}
|
[
"def",
"make_indices_to_labels",
"(",
"labels",
":",
"Set",
"[",
"str",
"]",
")",
"->",
"Dict",
"[",
"int",
",",
"str",
"]",
":",
"return",
"{",
"index",
":",
"label",
"for",
"index",
",",
"label",
"in",
"enumerate",
"(",
"[",
"\"pad\"",
"]",
"+",
"sorted",
"(",
"list",
"(",
"labels",
")",
")",
")",
"}"
] |
Creates a mapping from indices to labels.
|
[
"Creates",
"a",
"mapping",
"from",
"indices",
"to",
"labels",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/preprocess/labels.py#L81-L85
|
13,895
|
persephone-tools/persephone
|
persephone/datasets/na.py
|
preprocess_french
|
def preprocess_french(trans, fr_nlp, remove_brackets_content=True):
""" Takes a list of sentences in french and preprocesses them."""
if remove_brackets_content:
trans = pangloss.remove_content_in_brackets(trans, "[]")
# Not sure why I have to split and rejoin, but that fixes a Spacy token
# error.
trans = fr_nlp(" ".join(trans.split()[:]))
#trans = fr_nlp(trans)
trans = " ".join([token.lower_ for token in trans if not token.is_punct])
return trans
|
python
|
def preprocess_french(trans, fr_nlp, remove_brackets_content=True):
""" Takes a list of sentences in french and preprocesses them."""
if remove_brackets_content:
trans = pangloss.remove_content_in_brackets(trans, "[]")
# Not sure why I have to split and rejoin, but that fixes a Spacy token
# error.
trans = fr_nlp(" ".join(trans.split()[:]))
#trans = fr_nlp(trans)
trans = " ".join([token.lower_ for token in trans if not token.is_punct])
return trans
|
[
"def",
"preprocess_french",
"(",
"trans",
",",
"fr_nlp",
",",
"remove_brackets_content",
"=",
"True",
")",
":",
"if",
"remove_brackets_content",
":",
"trans",
"=",
"pangloss",
".",
"remove_content_in_brackets",
"(",
"trans",
",",
"\"[]\"",
")",
"# Not sure why I have to split and rejoin, but that fixes a Spacy token",
"# error.",
"trans",
"=",
"fr_nlp",
"(",
"\" \"",
".",
"join",
"(",
"trans",
".",
"split",
"(",
")",
"[",
":",
"]",
")",
")",
"#trans = fr_nlp(trans)",
"trans",
"=",
"\" \"",
".",
"join",
"(",
"[",
"token",
".",
"lower_",
"for",
"token",
"in",
"trans",
"if",
"not",
"token",
".",
"is_punct",
"]",
")",
"return",
"trans"
] |
Takes a list of sentences in french and preprocesses them.
|
[
"Takes",
"a",
"list",
"of",
"sentences",
"in",
"french",
"and",
"preprocesses",
"them",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/datasets/na.py#L209-L220
|
13,896
|
persephone-tools/persephone
|
persephone/datasets/na.py
|
trim_wavs
|
def trim_wavs(org_wav_dir=ORG_WAV_DIR,
tgt_wav_dir=TGT_WAV_DIR,
org_xml_dir=ORG_XML_DIR):
""" Extracts sentence-level transcriptions, translations and wavs from the
Na Pangloss XML and WAV files. But otherwise doesn't preprocess them."""
logging.info("Trimming wavs...")
if not os.path.exists(os.path.join(tgt_wav_dir, "TEXT")):
os.makedirs(os.path.join(tgt_wav_dir, "TEXT"))
if not os.path.exists(os.path.join(tgt_wav_dir, "WORDLIST")):
os.makedirs(os.path.join(tgt_wav_dir, "WORDLIST"))
for fn in os.listdir(org_xml_dir):
path = os.path.join(org_xml_dir, fn)
prefix, _ = os.path.splitext(fn)
if os.path.isdir(path):
continue
if not path.endswith(".xml"):
continue
logging.info("Trimming wavs from {}".format(fn))
rec_type, _, times, _ = pangloss.get_sents_times_and_translations(path)
# Extract the wavs given the times.
for i, (start_time, end_time) in enumerate(times):
if prefix.endswith("PLUSEGG"):
in_wav_path = os.path.join(org_wav_dir, prefix.upper()[:-len("PLUSEGG")]) + ".wav"
else:
in_wav_path = os.path.join(org_wav_dir, prefix.upper()) + ".wav"
headmic_path = os.path.join(org_wav_dir, prefix.upper()) + "_HEADMIC.wav"
if os.path.isfile(headmic_path):
in_wav_path = headmic_path
out_wav_path = os.path.join(tgt_wav_dir, rec_type, "%s.%d.wav" % (prefix, i))
if not os.path.isfile(in_wav_path):
raise PersephoneException("{} not a file.".format(in_wav_path))
start_time = start_time * ureg.seconds
end_time = end_time * ureg.seconds
wav.trim_wav_ms(Path(in_wav_path), Path(out_wav_path),
start_time.to(ureg.milliseconds).magnitude,
end_time.to(ureg.milliseconds).magnitude)
|
python
|
def trim_wavs(org_wav_dir=ORG_WAV_DIR,
tgt_wav_dir=TGT_WAV_DIR,
org_xml_dir=ORG_XML_DIR):
""" Extracts sentence-level transcriptions, translations and wavs from the
Na Pangloss XML and WAV files. But otherwise doesn't preprocess them."""
logging.info("Trimming wavs...")
if not os.path.exists(os.path.join(tgt_wav_dir, "TEXT")):
os.makedirs(os.path.join(tgt_wav_dir, "TEXT"))
if not os.path.exists(os.path.join(tgt_wav_dir, "WORDLIST")):
os.makedirs(os.path.join(tgt_wav_dir, "WORDLIST"))
for fn in os.listdir(org_xml_dir):
path = os.path.join(org_xml_dir, fn)
prefix, _ = os.path.splitext(fn)
if os.path.isdir(path):
continue
if not path.endswith(".xml"):
continue
logging.info("Trimming wavs from {}".format(fn))
rec_type, _, times, _ = pangloss.get_sents_times_and_translations(path)
# Extract the wavs given the times.
for i, (start_time, end_time) in enumerate(times):
if prefix.endswith("PLUSEGG"):
in_wav_path = os.path.join(org_wav_dir, prefix.upper()[:-len("PLUSEGG")]) + ".wav"
else:
in_wav_path = os.path.join(org_wav_dir, prefix.upper()) + ".wav"
headmic_path = os.path.join(org_wav_dir, prefix.upper()) + "_HEADMIC.wav"
if os.path.isfile(headmic_path):
in_wav_path = headmic_path
out_wav_path = os.path.join(tgt_wav_dir, rec_type, "%s.%d.wav" % (prefix, i))
if not os.path.isfile(in_wav_path):
raise PersephoneException("{} not a file.".format(in_wav_path))
start_time = start_time * ureg.seconds
end_time = end_time * ureg.seconds
wav.trim_wav_ms(Path(in_wav_path), Path(out_wav_path),
start_time.to(ureg.milliseconds).magnitude,
end_time.to(ureg.milliseconds).magnitude)
|
[
"def",
"trim_wavs",
"(",
"org_wav_dir",
"=",
"ORG_WAV_DIR",
",",
"tgt_wav_dir",
"=",
"TGT_WAV_DIR",
",",
"org_xml_dir",
"=",
"ORG_XML_DIR",
")",
":",
"logging",
".",
"info",
"(",
"\"Trimming wavs...\"",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"tgt_wav_dir",
",",
"\"TEXT\"",
")",
")",
":",
"os",
".",
"makedirs",
"(",
"os",
".",
"path",
".",
"join",
"(",
"tgt_wav_dir",
",",
"\"TEXT\"",
")",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"tgt_wav_dir",
",",
"\"WORDLIST\"",
")",
")",
":",
"os",
".",
"makedirs",
"(",
"os",
".",
"path",
".",
"join",
"(",
"tgt_wav_dir",
",",
"\"WORDLIST\"",
")",
")",
"for",
"fn",
"in",
"os",
".",
"listdir",
"(",
"org_xml_dir",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"org_xml_dir",
",",
"fn",
")",
"prefix",
",",
"_",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"fn",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
":",
"continue",
"if",
"not",
"path",
".",
"endswith",
"(",
"\".xml\"",
")",
":",
"continue",
"logging",
".",
"info",
"(",
"\"Trimming wavs from {}\"",
".",
"format",
"(",
"fn",
")",
")",
"rec_type",
",",
"_",
",",
"times",
",",
"_",
"=",
"pangloss",
".",
"get_sents_times_and_translations",
"(",
"path",
")",
"# Extract the wavs given the times.",
"for",
"i",
",",
"(",
"start_time",
",",
"end_time",
")",
"in",
"enumerate",
"(",
"times",
")",
":",
"if",
"prefix",
".",
"endswith",
"(",
"\"PLUSEGG\"",
")",
":",
"in_wav_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"org_wav_dir",
",",
"prefix",
".",
"upper",
"(",
")",
"[",
":",
"-",
"len",
"(",
"\"PLUSEGG\"",
")",
"]",
")",
"+",
"\".wav\"",
"else",
":",
"in_wav_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"org_wav_dir",
",",
"prefix",
".",
"upper",
"(",
")",
")",
"+",
"\".wav\"",
"headmic_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"org_wav_dir",
",",
"prefix",
".",
"upper",
"(",
")",
")",
"+",
"\"_HEADMIC.wav\"",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"headmic_path",
")",
":",
"in_wav_path",
"=",
"headmic_path",
"out_wav_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tgt_wav_dir",
",",
"rec_type",
",",
"\"%s.%d.wav\"",
"%",
"(",
"prefix",
",",
"i",
")",
")",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"in_wav_path",
")",
":",
"raise",
"PersephoneException",
"(",
"\"{} not a file.\"",
".",
"format",
"(",
"in_wav_path",
")",
")",
"start_time",
"=",
"start_time",
"*",
"ureg",
".",
"seconds",
"end_time",
"=",
"end_time",
"*",
"ureg",
".",
"seconds",
"wav",
".",
"trim_wav_ms",
"(",
"Path",
"(",
"in_wav_path",
")",
",",
"Path",
"(",
"out_wav_path",
")",
",",
"start_time",
".",
"to",
"(",
"ureg",
".",
"milliseconds",
")",
".",
"magnitude",
",",
"end_time",
".",
"to",
"(",
"ureg",
".",
"milliseconds",
")",
".",
"magnitude",
")"
] |
Extracts sentence-level transcriptions, translations and wavs from the
Na Pangloss XML and WAV files. But otherwise doesn't preprocess them.
|
[
"Extracts",
"sentence",
"-",
"level",
"transcriptions",
"translations",
"and",
"wavs",
"from",
"the",
"Na",
"Pangloss",
"XML",
"and",
"WAV",
"files",
".",
"But",
"otherwise",
"doesn",
"t",
"preprocess",
"them",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/datasets/na.py#L222-L265
|
13,897
|
persephone-tools/persephone
|
persephone/datasets/na.py
|
prepare_labels
|
def prepare_labels(label_type, org_xml_dir=ORG_XML_DIR, label_dir=LABEL_DIR):
""" Prepare the neural network output targets."""
if not os.path.exists(os.path.join(label_dir, "TEXT")):
os.makedirs(os.path.join(label_dir, "TEXT"))
if not os.path.exists(os.path.join(label_dir, "WORDLIST")):
os.makedirs(os.path.join(label_dir, "WORDLIST"))
for path in Path(org_xml_dir).glob("*.xml"):
fn = path.name
prefix, _ = os.path.splitext(fn)
rec_type, sents, _, _ = pangloss.get_sents_times_and_translations(str(path))
# Write the sentence transcriptions to file
sents = [preprocess_na(sent, label_type) for sent in sents]
for i, sent in enumerate(sents):
if sent.strip() == "":
# Then there's no transcription, so ignore this.
continue
out_fn = "%s.%d.%s" % (prefix, i, label_type)
sent_path = os.path.join(label_dir, rec_type, out_fn)
with open(sent_path, "w") as sent_f:
print(sent, file=sent_f)
|
python
|
def prepare_labels(label_type, org_xml_dir=ORG_XML_DIR, label_dir=LABEL_DIR):
""" Prepare the neural network output targets."""
if not os.path.exists(os.path.join(label_dir, "TEXT")):
os.makedirs(os.path.join(label_dir, "TEXT"))
if not os.path.exists(os.path.join(label_dir, "WORDLIST")):
os.makedirs(os.path.join(label_dir, "WORDLIST"))
for path in Path(org_xml_dir).glob("*.xml"):
fn = path.name
prefix, _ = os.path.splitext(fn)
rec_type, sents, _, _ = pangloss.get_sents_times_and_translations(str(path))
# Write the sentence transcriptions to file
sents = [preprocess_na(sent, label_type) for sent in sents]
for i, sent in enumerate(sents):
if sent.strip() == "":
# Then there's no transcription, so ignore this.
continue
out_fn = "%s.%d.%s" % (prefix, i, label_type)
sent_path = os.path.join(label_dir, rec_type, out_fn)
with open(sent_path, "w") as sent_f:
print(sent, file=sent_f)
|
[
"def",
"prepare_labels",
"(",
"label_type",
",",
"org_xml_dir",
"=",
"ORG_XML_DIR",
",",
"label_dir",
"=",
"LABEL_DIR",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"label_dir",
",",
"\"TEXT\"",
")",
")",
":",
"os",
".",
"makedirs",
"(",
"os",
".",
"path",
".",
"join",
"(",
"label_dir",
",",
"\"TEXT\"",
")",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"label_dir",
",",
"\"WORDLIST\"",
")",
")",
":",
"os",
".",
"makedirs",
"(",
"os",
".",
"path",
".",
"join",
"(",
"label_dir",
",",
"\"WORDLIST\"",
")",
")",
"for",
"path",
"in",
"Path",
"(",
"org_xml_dir",
")",
".",
"glob",
"(",
"\"*.xml\"",
")",
":",
"fn",
"=",
"path",
".",
"name",
"prefix",
",",
"_",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"fn",
")",
"rec_type",
",",
"sents",
",",
"_",
",",
"_",
"=",
"pangloss",
".",
"get_sents_times_and_translations",
"(",
"str",
"(",
"path",
")",
")",
"# Write the sentence transcriptions to file",
"sents",
"=",
"[",
"preprocess_na",
"(",
"sent",
",",
"label_type",
")",
"for",
"sent",
"in",
"sents",
"]",
"for",
"i",
",",
"sent",
"in",
"enumerate",
"(",
"sents",
")",
":",
"if",
"sent",
".",
"strip",
"(",
")",
"==",
"\"\"",
":",
"# Then there's no transcription, so ignore this.",
"continue",
"out_fn",
"=",
"\"%s.%d.%s\"",
"%",
"(",
"prefix",
",",
"i",
",",
"label_type",
")",
"sent_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"label_dir",
",",
"rec_type",
",",
"out_fn",
")",
"with",
"open",
"(",
"sent_path",
",",
"\"w\"",
")",
"as",
"sent_f",
":",
"print",
"(",
"sent",
",",
"file",
"=",
"sent_f",
")"
] |
Prepare the neural network output targets.
|
[
"Prepare",
"the",
"neural",
"network",
"output",
"targets",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/datasets/na.py#L267-L289
|
13,898
|
persephone-tools/persephone
|
persephone/datasets/na.py
|
prepare_untran
|
def prepare_untran(feat_type, tgt_dir, untran_dir):
""" Preprocesses untranscribed audio."""
org_dir = str(untran_dir)
wav_dir = os.path.join(str(tgt_dir), "wav", "untranscribed")
feat_dir = os.path.join(str(tgt_dir), "feat", "untranscribed")
if not os.path.isdir(wav_dir):
os.makedirs(wav_dir)
if not os.path.isdir(feat_dir):
os.makedirs(feat_dir)
# Standardize into wav files
for fn in os.listdir(org_dir):
in_path = os.path.join(org_dir, fn)
prefix, _ = os.path.splitext(fn)
mono16k_wav_path = os.path.join(wav_dir, "%s.wav" % prefix)
if not os.path.isfile(mono16k_wav_path):
feat_extract.convert_wav(Path(in_path), Path(mono16k_wav_path))
# Split up the wavs and write prefixes to prefix file.
wav_fns = os.listdir(wav_dir)
with (tgt_dir / "untranscribed_prefixes.txt").open("w") as prefix_f:
for fn in wav_fns:
in_fn = os.path.join(wav_dir, fn)
prefix, _ = os.path.splitext(fn)
# Split into sub-wavs and perform feat extraction.
split_id = 0
start, end = 0, 10 #in seconds
length = utils.wav_length(in_fn)
while True:
sub_wav_prefix = "{}.{}".format(prefix, split_id)
print(sub_wav_prefix, file=prefix_f)
out_fn = os.path.join(feat_dir, "{}.wav".format(sub_wav_prefix))
start_time = start * ureg.seconds
end_time = end * ureg.seconds
if not Path(out_fn).is_file():
wav.trim_wav_ms(Path(in_fn), Path(out_fn),
start_time.to(ureg.milliseconds).magnitude,
end_time.to(ureg.milliseconds).magnitude)
if end > length:
break
start += 10
end += 10
split_id += 1
# Do feat extraction.
feat_extract.from_dir(Path(os.path.join(feat_dir)), feat_type=feat_type)
|
python
|
def prepare_untran(feat_type, tgt_dir, untran_dir):
""" Preprocesses untranscribed audio."""
org_dir = str(untran_dir)
wav_dir = os.path.join(str(tgt_dir), "wav", "untranscribed")
feat_dir = os.path.join(str(tgt_dir), "feat", "untranscribed")
if not os.path.isdir(wav_dir):
os.makedirs(wav_dir)
if not os.path.isdir(feat_dir):
os.makedirs(feat_dir)
# Standardize into wav files
for fn in os.listdir(org_dir):
in_path = os.path.join(org_dir, fn)
prefix, _ = os.path.splitext(fn)
mono16k_wav_path = os.path.join(wav_dir, "%s.wav" % prefix)
if not os.path.isfile(mono16k_wav_path):
feat_extract.convert_wav(Path(in_path), Path(mono16k_wav_path))
# Split up the wavs and write prefixes to prefix file.
wav_fns = os.listdir(wav_dir)
with (tgt_dir / "untranscribed_prefixes.txt").open("w") as prefix_f:
for fn in wav_fns:
in_fn = os.path.join(wav_dir, fn)
prefix, _ = os.path.splitext(fn)
# Split into sub-wavs and perform feat extraction.
split_id = 0
start, end = 0, 10 #in seconds
length = utils.wav_length(in_fn)
while True:
sub_wav_prefix = "{}.{}".format(prefix, split_id)
print(sub_wav_prefix, file=prefix_f)
out_fn = os.path.join(feat_dir, "{}.wav".format(sub_wav_prefix))
start_time = start * ureg.seconds
end_time = end * ureg.seconds
if not Path(out_fn).is_file():
wav.trim_wav_ms(Path(in_fn), Path(out_fn),
start_time.to(ureg.milliseconds).magnitude,
end_time.to(ureg.milliseconds).magnitude)
if end > length:
break
start += 10
end += 10
split_id += 1
# Do feat extraction.
feat_extract.from_dir(Path(os.path.join(feat_dir)), feat_type=feat_type)
|
[
"def",
"prepare_untran",
"(",
"feat_type",
",",
"tgt_dir",
",",
"untran_dir",
")",
":",
"org_dir",
"=",
"str",
"(",
"untran_dir",
")",
"wav_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"str",
"(",
"tgt_dir",
")",
",",
"\"wav\"",
",",
"\"untranscribed\"",
")",
"feat_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"str",
"(",
"tgt_dir",
")",
",",
"\"feat\"",
",",
"\"untranscribed\"",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"wav_dir",
")",
":",
"os",
".",
"makedirs",
"(",
"wav_dir",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"feat_dir",
")",
":",
"os",
".",
"makedirs",
"(",
"feat_dir",
")",
"# Standardize into wav files",
"for",
"fn",
"in",
"os",
".",
"listdir",
"(",
"org_dir",
")",
":",
"in_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"org_dir",
",",
"fn",
")",
"prefix",
",",
"_",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"fn",
")",
"mono16k_wav_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"wav_dir",
",",
"\"%s.wav\"",
"%",
"prefix",
")",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"mono16k_wav_path",
")",
":",
"feat_extract",
".",
"convert_wav",
"(",
"Path",
"(",
"in_path",
")",
",",
"Path",
"(",
"mono16k_wav_path",
")",
")",
"# Split up the wavs and write prefixes to prefix file.",
"wav_fns",
"=",
"os",
".",
"listdir",
"(",
"wav_dir",
")",
"with",
"(",
"tgt_dir",
"/",
"\"untranscribed_prefixes.txt\"",
")",
".",
"open",
"(",
"\"w\"",
")",
"as",
"prefix_f",
":",
"for",
"fn",
"in",
"wav_fns",
":",
"in_fn",
"=",
"os",
".",
"path",
".",
"join",
"(",
"wav_dir",
",",
"fn",
")",
"prefix",
",",
"_",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"fn",
")",
"# Split into sub-wavs and perform feat extraction.",
"split_id",
"=",
"0",
"start",
",",
"end",
"=",
"0",
",",
"10",
"#in seconds",
"length",
"=",
"utils",
".",
"wav_length",
"(",
"in_fn",
")",
"while",
"True",
":",
"sub_wav_prefix",
"=",
"\"{}.{}\"",
".",
"format",
"(",
"prefix",
",",
"split_id",
")",
"print",
"(",
"sub_wav_prefix",
",",
"file",
"=",
"prefix_f",
")",
"out_fn",
"=",
"os",
".",
"path",
".",
"join",
"(",
"feat_dir",
",",
"\"{}.wav\"",
".",
"format",
"(",
"sub_wav_prefix",
")",
")",
"start_time",
"=",
"start",
"*",
"ureg",
".",
"seconds",
"end_time",
"=",
"end",
"*",
"ureg",
".",
"seconds",
"if",
"not",
"Path",
"(",
"out_fn",
")",
".",
"is_file",
"(",
")",
":",
"wav",
".",
"trim_wav_ms",
"(",
"Path",
"(",
"in_fn",
")",
",",
"Path",
"(",
"out_fn",
")",
",",
"start_time",
".",
"to",
"(",
"ureg",
".",
"milliseconds",
")",
".",
"magnitude",
",",
"end_time",
".",
"to",
"(",
"ureg",
".",
"milliseconds",
")",
".",
"magnitude",
")",
"if",
"end",
">",
"length",
":",
"break",
"start",
"+=",
"10",
"end",
"+=",
"10",
"split_id",
"+=",
"1",
"# Do feat extraction.",
"feat_extract",
".",
"from_dir",
"(",
"Path",
"(",
"os",
".",
"path",
".",
"join",
"(",
"feat_dir",
")",
")",
",",
"feat_type",
"=",
"feat_type",
")"
] |
Preprocesses untranscribed audio.
|
[
"Preprocesses",
"untranscribed",
"audio",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/datasets/na.py#L292-L337
|
13,899
|
persephone-tools/persephone
|
persephone/datasets/na.py
|
prepare_feats
|
def prepare_feats(feat_type, org_wav_dir=ORG_WAV_DIR, feat_dir=FEAT_DIR, tgt_wav_dir=TGT_WAV_DIR,
org_xml_dir=ORG_XML_DIR, label_dir=LABEL_DIR):
""" Prepare the input features."""
if not os.path.isdir(TGT_DIR):
os.makedirs(TGT_DIR)
if not os.path.isdir(FEAT_DIR):
os.makedirs(FEAT_DIR)
if not os.path.isdir(os.path.join(feat_dir, "WORDLIST")):
os.makedirs(os.path.join(feat_dir, "WORDLIST"))
if not os.path.isdir(os.path.join(feat_dir, "TEXT")):
os.makedirs(os.path.join(feat_dir, "TEXT"))
# Extract utterances from WAVS.
trim_wavs(org_wav_dir=org_wav_dir,
tgt_wav_dir=tgt_wav_dir,
org_xml_dir=org_xml_dir)
# TODO Currently assumes that the wav trimming from XML has already been
# done.
prefixes = []
for fn in os.listdir(os.path.join(tgt_wav_dir, "WORDLIST")):
if fn.endswith(".wav"):
pre, _ = os.path.splitext(fn)
prefixes.append(os.path.join("WORDLIST", pre))
for fn in os.listdir(os.path.join(tgt_wav_dir, "TEXT")):
if fn.endswith(".wav"):
pre, _ = os.path.splitext(fn)
prefixes.append(os.path.join("TEXT", pre))
if feat_type=="phonemes_onehot":
import numpy as np
#prepare_labels("phonemes")
for prefix in prefixes:
label_fn = os.path.join(label_dir, "%s.phonemes" % prefix)
out_fn = os.path.join(feat_dir, "%s.phonemes_onehot" % prefix)
try:
with open(label_fn) as label_f:
labels = label_f.readlines()[0].split()
except FileNotFoundError:
continue
indices = [PHONEMES_TO_INDICES[label] for label in labels]
one_hots = [[0]*len(PHONEMES) for _ in labels]
for i, index in enumerate(indices):
one_hots[i][index] = 1
one_hots = np.array(one_hots)
np.save(out_fn, one_hots)
else:
# Otherwise,
for prefix in prefixes:
# Convert the wave to 16k mono.
wav_fn = os.path.join(tgt_wav_dir, "%s.wav" % prefix)
mono16k_wav_fn = os.path.join(feat_dir, "%s.wav" % prefix)
if not os.path.isfile(mono16k_wav_fn):
logging.info("Normalizing wav {} to a 16k 16KHz mono {}".format(
wav_fn, mono16k_wav_fn))
feat_extract.convert_wav(wav_fn, mono16k_wav_fn)
# Extract features from the wavs.
feat_extract.from_dir(Path(os.path.join(feat_dir, "WORDLIST")), feat_type=feat_type)
feat_extract.from_dir(Path(os.path.join(feat_dir, "TEXT")), feat_type=feat_type)
|
python
|
def prepare_feats(feat_type, org_wav_dir=ORG_WAV_DIR, feat_dir=FEAT_DIR, tgt_wav_dir=TGT_WAV_DIR,
org_xml_dir=ORG_XML_DIR, label_dir=LABEL_DIR):
""" Prepare the input features."""
if not os.path.isdir(TGT_DIR):
os.makedirs(TGT_DIR)
if not os.path.isdir(FEAT_DIR):
os.makedirs(FEAT_DIR)
if not os.path.isdir(os.path.join(feat_dir, "WORDLIST")):
os.makedirs(os.path.join(feat_dir, "WORDLIST"))
if not os.path.isdir(os.path.join(feat_dir, "TEXT")):
os.makedirs(os.path.join(feat_dir, "TEXT"))
# Extract utterances from WAVS.
trim_wavs(org_wav_dir=org_wav_dir,
tgt_wav_dir=tgt_wav_dir,
org_xml_dir=org_xml_dir)
# TODO Currently assumes that the wav trimming from XML has already been
# done.
prefixes = []
for fn in os.listdir(os.path.join(tgt_wav_dir, "WORDLIST")):
if fn.endswith(".wav"):
pre, _ = os.path.splitext(fn)
prefixes.append(os.path.join("WORDLIST", pre))
for fn in os.listdir(os.path.join(tgt_wav_dir, "TEXT")):
if fn.endswith(".wav"):
pre, _ = os.path.splitext(fn)
prefixes.append(os.path.join("TEXT", pre))
if feat_type=="phonemes_onehot":
import numpy as np
#prepare_labels("phonemes")
for prefix in prefixes:
label_fn = os.path.join(label_dir, "%s.phonemes" % prefix)
out_fn = os.path.join(feat_dir, "%s.phonemes_onehot" % prefix)
try:
with open(label_fn) as label_f:
labels = label_f.readlines()[0].split()
except FileNotFoundError:
continue
indices = [PHONEMES_TO_INDICES[label] for label in labels]
one_hots = [[0]*len(PHONEMES) for _ in labels]
for i, index in enumerate(indices):
one_hots[i][index] = 1
one_hots = np.array(one_hots)
np.save(out_fn, one_hots)
else:
# Otherwise,
for prefix in prefixes:
# Convert the wave to 16k mono.
wav_fn = os.path.join(tgt_wav_dir, "%s.wav" % prefix)
mono16k_wav_fn = os.path.join(feat_dir, "%s.wav" % prefix)
if not os.path.isfile(mono16k_wav_fn):
logging.info("Normalizing wav {} to a 16k 16KHz mono {}".format(
wav_fn, mono16k_wav_fn))
feat_extract.convert_wav(wav_fn, mono16k_wav_fn)
# Extract features from the wavs.
feat_extract.from_dir(Path(os.path.join(feat_dir, "WORDLIST")), feat_type=feat_type)
feat_extract.from_dir(Path(os.path.join(feat_dir, "TEXT")), feat_type=feat_type)
|
[
"def",
"prepare_feats",
"(",
"feat_type",
",",
"org_wav_dir",
"=",
"ORG_WAV_DIR",
",",
"feat_dir",
"=",
"FEAT_DIR",
",",
"tgt_wav_dir",
"=",
"TGT_WAV_DIR",
",",
"org_xml_dir",
"=",
"ORG_XML_DIR",
",",
"label_dir",
"=",
"LABEL_DIR",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"TGT_DIR",
")",
":",
"os",
".",
"makedirs",
"(",
"TGT_DIR",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"FEAT_DIR",
")",
":",
"os",
".",
"makedirs",
"(",
"FEAT_DIR",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"feat_dir",
",",
"\"WORDLIST\"",
")",
")",
":",
"os",
".",
"makedirs",
"(",
"os",
".",
"path",
".",
"join",
"(",
"feat_dir",
",",
"\"WORDLIST\"",
")",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"feat_dir",
",",
"\"TEXT\"",
")",
")",
":",
"os",
".",
"makedirs",
"(",
"os",
".",
"path",
".",
"join",
"(",
"feat_dir",
",",
"\"TEXT\"",
")",
")",
"# Extract utterances from WAVS.",
"trim_wavs",
"(",
"org_wav_dir",
"=",
"org_wav_dir",
",",
"tgt_wav_dir",
"=",
"tgt_wav_dir",
",",
"org_xml_dir",
"=",
"org_xml_dir",
")",
"# TODO Currently assumes that the wav trimming from XML has already been",
"# done.",
"prefixes",
"=",
"[",
"]",
"for",
"fn",
"in",
"os",
".",
"listdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"tgt_wav_dir",
",",
"\"WORDLIST\"",
")",
")",
":",
"if",
"fn",
".",
"endswith",
"(",
"\".wav\"",
")",
":",
"pre",
",",
"_",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"fn",
")",
"prefixes",
".",
"append",
"(",
"os",
".",
"path",
".",
"join",
"(",
"\"WORDLIST\"",
",",
"pre",
")",
")",
"for",
"fn",
"in",
"os",
".",
"listdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"tgt_wav_dir",
",",
"\"TEXT\"",
")",
")",
":",
"if",
"fn",
".",
"endswith",
"(",
"\".wav\"",
")",
":",
"pre",
",",
"_",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"fn",
")",
"prefixes",
".",
"append",
"(",
"os",
".",
"path",
".",
"join",
"(",
"\"TEXT\"",
",",
"pre",
")",
")",
"if",
"feat_type",
"==",
"\"phonemes_onehot\"",
":",
"import",
"numpy",
"as",
"np",
"#prepare_labels(\"phonemes\")",
"for",
"prefix",
"in",
"prefixes",
":",
"label_fn",
"=",
"os",
".",
"path",
".",
"join",
"(",
"label_dir",
",",
"\"%s.phonemes\"",
"%",
"prefix",
")",
"out_fn",
"=",
"os",
".",
"path",
".",
"join",
"(",
"feat_dir",
",",
"\"%s.phonemes_onehot\"",
"%",
"prefix",
")",
"try",
":",
"with",
"open",
"(",
"label_fn",
")",
"as",
"label_f",
":",
"labels",
"=",
"label_f",
".",
"readlines",
"(",
")",
"[",
"0",
"]",
".",
"split",
"(",
")",
"except",
"FileNotFoundError",
":",
"continue",
"indices",
"=",
"[",
"PHONEMES_TO_INDICES",
"[",
"label",
"]",
"for",
"label",
"in",
"labels",
"]",
"one_hots",
"=",
"[",
"[",
"0",
"]",
"*",
"len",
"(",
"PHONEMES",
")",
"for",
"_",
"in",
"labels",
"]",
"for",
"i",
",",
"index",
"in",
"enumerate",
"(",
"indices",
")",
":",
"one_hots",
"[",
"i",
"]",
"[",
"index",
"]",
"=",
"1",
"one_hots",
"=",
"np",
".",
"array",
"(",
"one_hots",
")",
"np",
".",
"save",
"(",
"out_fn",
",",
"one_hots",
")",
"else",
":",
"# Otherwise, ",
"for",
"prefix",
"in",
"prefixes",
":",
"# Convert the wave to 16k mono.",
"wav_fn",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tgt_wav_dir",
",",
"\"%s.wav\"",
"%",
"prefix",
")",
"mono16k_wav_fn",
"=",
"os",
".",
"path",
".",
"join",
"(",
"feat_dir",
",",
"\"%s.wav\"",
"%",
"prefix",
")",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"mono16k_wav_fn",
")",
":",
"logging",
".",
"info",
"(",
"\"Normalizing wav {} to a 16k 16KHz mono {}\"",
".",
"format",
"(",
"wav_fn",
",",
"mono16k_wav_fn",
")",
")",
"feat_extract",
".",
"convert_wav",
"(",
"wav_fn",
",",
"mono16k_wav_fn",
")",
"# Extract features from the wavs.",
"feat_extract",
".",
"from_dir",
"(",
"Path",
"(",
"os",
".",
"path",
".",
"join",
"(",
"feat_dir",
",",
"\"WORDLIST\"",
")",
")",
",",
"feat_type",
"=",
"feat_type",
")",
"feat_extract",
".",
"from_dir",
"(",
"Path",
"(",
"os",
".",
"path",
".",
"join",
"(",
"feat_dir",
",",
"\"TEXT\"",
")",
")",
",",
"feat_type",
"=",
"feat_type",
")"
] |
Prepare the input features.
|
[
"Prepare",
"the",
"input",
"features",
"."
] |
f94c63e4d5fe719fb1deba449b177bb299d225fb
|
https://github.com/persephone-tools/persephone/blob/f94c63e4d5fe719fb1deba449b177bb299d225fb/persephone/datasets/na.py#L340-L402
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.