_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q14800
|
workspace_backup_add
|
train
|
def workspace_backup_add(ctx):
"""
Create a new backup
"""
backup_manager = WorkspaceBackupManager(Workspace(ctx.resolver, directory=ctx.directory, mets_basename=ctx.mets_basename, automatic_backup=ctx.automatic_backup))
backup_manager.add()
|
python
|
{
"resource": ""
}
|
q14801
|
workspace_backup_restore
|
train
|
def workspace_backup_restore(ctx, choose_first, bak):
"""
Restore backup BAK
"""
backup_manager = WorkspaceBackupManager(Workspace(ctx.resolver, directory=ctx.directory, mets_basename=ctx.mets_basename, automatic_backup=ctx.automatic_backup))
backup_manager.restore(bak, choose_first)
|
python
|
{
"resource": ""
}
|
q14802
|
workspace_backup_undo
|
train
|
def workspace_backup_undo(ctx):
"""
Restore the last backup
"""
backup_manager = WorkspaceBackupManager(Workspace(ctx.resolver, directory=ctx.directory, mets_basename=ctx.mets_basename, automatic_backup=ctx.automatic_backup))
backup_manager.undo()
|
python
|
{
"resource": ""
}
|
q14803
|
extend_with_default
|
train
|
def extend_with_default(validator_class):
"""
Add a default-setting mechanism to a ``jsonschema`` validation class.
"""
validate_properties = validator_class.VALIDATORS["properties"]
def set_defaults(validator, properties, instance, schema):
"""
Set defaults in subschemas
"""
for prop, subschema in properties.items():
if "default" in subschema:
instance.setdefault(prop, subschema["default"])
for error in validate_properties(validator, properties, instance, schema):
yield error
return validators.extend(validator_class, {"properties": set_defaults})
|
python
|
{
"resource": ""
}
|
q14804
|
JsonValidator.validate
|
train
|
def validate(obj, schema):
"""
Validate an object against a schema
Args:
obj (dict):
schema (dict):
"""
if isinstance(obj, str):
obj = json.loads(obj)
return JsonValidator(schema)._validate(obj)
|
python
|
{
"resource": ""
}
|
q14805
|
run_processor
|
train
|
def run_processor(
processorClass,
ocrd_tool=None,
mets_url=None,
resolver=None,
workspace=None,
page_id=None,
log_level=None,
input_file_grp=None,
output_file_grp=None,
parameter=None,
working_dir=None,
): # pylint: disable=too-many-locals
"""
Create a workspace for mets_url and run processor through it
Args:
parameter (string): URL to the parameter
"""
workspace = _get_workspace(
workspace,
resolver,
mets_url,
working_dir
)
if parameter is not None:
if not '://' in parameter:
fname = os.path.abspath(parameter)
else:
fname = workspace.download_url(parameter)
with open(fname, 'r') as param_json_file:
parameter = json.load(param_json_file)
else:
parameter = {}
log.debug("Running processor %s", processorClass)
processor = processorClass(
workspace,
ocrd_tool=ocrd_tool,
page_id=page_id,
input_file_grp=input_file_grp,
output_file_grp=output_file_grp,
parameter=parameter
)
ocrd_tool = processor.ocrd_tool
name = '%s v%s' % (ocrd_tool['executable'], processor.version)
otherrole = ocrd_tool['steps'][0]
log.debug("Processor instance %s (%s doing %s)", processor, name, otherrole)
processor.process()
workspace.mets.add_agent(
name=name,
_type='OTHER',
othertype='SOFTWARE',
role='OTHER',
otherrole=otherrole
)
workspace.save_mets()
return processor
|
python
|
{
"resource": ""
}
|
q14806
|
run_cli
|
train
|
def run_cli(
executable,
mets_url=None,
resolver=None,
workspace=None,
page_id=None,
log_level=None,
input_file_grp=None,
output_file_grp=None,
parameter=None,
working_dir=None,
):
"""
Create a workspace for mets_url and run MP CLI through it
"""
workspace = _get_workspace(workspace, resolver, mets_url, working_dir)
args = [executable, '--working-dir', workspace.directory]
args += ['--mets', mets_url]
if log_level:
args += ['--log-level', log_level]
if page_id:
args += ['--page-id', page_id]
if input_file_grp:
args += ['--input-file-grp', input_file_grp]
if output_file_grp:
args += ['--output-file-grp', output_file_grp]
if parameter:
args += ['--parameter', parameter]
log.debug("Running subprocess '%s'", ' '.join(args))
return subprocess.call(args)
|
python
|
{
"resource": ""
}
|
q14807
|
Processor.input_files
|
train
|
def input_files(self):
"""
List the input files
"""
return self.workspace.mets.find_files(fileGrp=self.input_file_grp, pageId=self.page_id)
|
python
|
{
"resource": ""
}
|
q14808
|
page_from_file
|
train
|
def page_from_file(input_file):
"""
Create a new PAGE-XML from a METS file representing a PAGE-XML or an image.
Arguments:
* input_file (OcrdFile):
"""
# print("PARSING PARSING '%s'" % input_file)
if input_file.mimetype.startswith('image'):
return page_from_image(input_file)
if input_file.mimetype == MIMETYPE_PAGE:
return parse(input_file.local_filename, silence=True)
raise Exception("Unsupported mimetype '%s'" % input_file.mimetype)
|
python
|
{
"resource": ""
}
|
q14809
|
concat_padded
|
train
|
def concat_padded(base, *args):
"""
Concatenate string and zero-padded 4 digit number
"""
ret = base
for n in args:
if is_string(n):
ret = "%s_%s" % (ret, n)
else:
ret = "%s_%04i" % (ret, n + 1)
return ret
|
python
|
{
"resource": ""
}
|
q14810
|
points_from_xywh
|
train
|
def points_from_xywh(box):
"""
Constructs a polygon representation from a rectangle described as a dict with keys x, y, w, h.
"""
x, y, w, h = box['x'], box['y'], box['w'], box['h']
# tesseract uses a different region representation format
return "%i,%i %i,%i %i,%i %i,%i" % (
x, y,
x + w, y,
x + w, y + h,
x, y + h
)
|
python
|
{
"resource": ""
}
|
q14811
|
polygon_from_points
|
train
|
def polygon_from_points(points):
"""
Constructs a numpy-compatible polygon from a page representation.
"""
polygon = []
for pair in points.split(" "):
x_y = pair.split(",")
polygon.append([float(x_y[0]), float(x_y[1])])
return polygon
|
python
|
{
"resource": ""
}
|
q14812
|
unzip_file_to_dir
|
train
|
def unzip_file_to_dir(path_to_zip, output_directory):
"""
Extract a ZIP archive to a directory
"""
z = ZipFile(path_to_zip, 'r')
z.extractall(output_directory)
z.close()
|
python
|
{
"resource": ""
}
|
q14813
|
xywh_from_points
|
train
|
def xywh_from_points(points):
"""
Constructs an dict representing a rectangle with keys x, y, w, h
"""
xys = [[int(p) for p in pair.split(',')] for pair in points.split(' ')]
minx = sys.maxsize
miny = sys.maxsize
maxx = 0
maxy = 0
for xy in xys:
if xy[0] < minx:
minx = xy[0]
if xy[0] > maxx:
maxx = xy[0]
if xy[1] < miny:
miny = xy[1]
if xy[1] > maxy:
maxy = xy[1]
return {
'x': minx,
'y': miny,
'w': maxx - minx,
'h': maxy - miny,
}
|
python
|
{
"resource": ""
}
|
q14814
|
OcrdZipValidator.validate
|
train
|
def validate(self, skip_checksums=False, skip_bag=False, skip_unzip=False, skip_delete=False, processes=2):
"""
Validate an OCRD-ZIP file for profile, bag and workspace conformance
Arguments:
skip_bag (boolean): Whether to skip all checks of manifests and files
skip_checksums (boolean): Whether to omit checksum checks but still check basic BagIt conformance
skip_unzip (boolean): Whether the OCRD-ZIP is unzipped, i.e. a directory
skip_delete (boolean): Whether to skip deleting the unpacked OCRD-ZIP dir after valdiation
processes (integer): Number of processes used for checksum validation
"""
if skip_unzip:
bagdir = self.path_to_zip
skip_delete = True
else:
# try:
self.profile_validator.validate_serialization(self.path_to_zip)
# except IOError as err:
# raise err
# except ProfileValidationError as err:
# self.report.add_error(err.value)
bagdir = mkdtemp(prefix=TMP_BAGIT_PREFIX)
unzip_file_to_dir(self.path_to_zip, bagdir)
try:
bag = Bag(bagdir)
self._validate_profile(bag)
if not skip_bag:
self._validate_bag(bag, fast=skip_checksums, processes=processes)
finally:
if not skip_delete:
# remove tempdir
rmtree(bagdir)
return self.report
|
python
|
{
"resource": ""
}
|
q14815
|
quote_xml
|
train
|
def quote_xml(inStr):
"Escape markup chars, but do not modify CDATA sections."
if not inStr:
return ''
s1 = (isinstance(inStr, BaseStrType_) and inStr or '%s' % inStr)
s2 = ''
pos = 0
matchobjects = CDATA_pattern_.finditer(s1)
for mo in matchobjects:
s3 = s1[pos:mo.start()]
s2 += quote_xml_aux(s3)
s2 += s1[mo.start():mo.end()]
pos = mo.end()
s3 = s1[pos:]
s2 += quote_xml_aux(s3)
return s2
|
python
|
{
"resource": ""
}
|
q14816
|
parseString
|
train
|
def parseString(inString, silence=False):
'''Parse a string, create the object tree, and export it.
Arguments:
- inString -- A string. This XML fragment should not start
with an XML declaration containing an encoding.
- silence -- A boolean. If False, export the object.
Returns -- The root object in the tree.
'''
parser = None
rootNode= parsexmlstring_(inString, parser)
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'PcGts'
rootClass = PcGts
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
if not silence:
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(
sys.stdout, 0, name_=rootTag,
namespacedef_='xmlns:pc="http://schema.primaresearch.org/PAGE/gts/pagecontent/2018-07-15"')
return rootObj
|
python
|
{
"resource": ""
}
|
q14817
|
ocrd_tool_tool_parse_params
|
train
|
def ocrd_tool_tool_parse_params(ctx, parameters, json):
"""
Parse parameters with fallback to defaults and output as shell-eval'able assignments to params var.
"""
if parameters is None or parameters == "":
parameters = {}
else:
with open(parameters, 'r') as f:
parameters = loads(f.read())
parameterValidator = ParameterValidator(ctx.json['tools'][ctx.tool_name])
report = parameterValidator.validate(parameters)
if not report.is_valid:
print(report.to_xml())
sys.exit(1)
if json:
print(dumps(parameters))
else:
for k in parameters:
print('params["%s"]="%s"' % (k, parameters[k]))
|
python
|
{
"resource": ""
}
|
q14818
|
OcrdAgent.othertype
|
train
|
def othertype(self, othertype):
"""
Set the ``OTHERTYPE`` attribute value.
"""
if othertype is not None:
self._el.set('TYPE', 'OTHER')
self._el.set('OTHERTYPE', othertype)
|
python
|
{
"resource": ""
}
|
q14819
|
OcrdAgent.otherrole
|
train
|
def otherrole(self, otherrole):
"""
Get the ``OTHERROLE`` attribute value.
"""
if otherrole is not None:
self._el.set('ROLE', 'OTHER')
self._el.set('OTHERROLE', otherrole)
|
python
|
{
"resource": ""
}
|
q14820
|
ParameterValidator.validate
|
train
|
def validate(self, *args, **kwargs): # pylint: disable=arguments-differ
"""
Validate a parameter dict against a parameter schema from an ocrd-tool.json
Args:
obj (dict):
schema (dict):
"""
return super(ParameterValidator, self)._validate(*args, **kwargs)
|
python
|
{
"resource": ""
}
|
q14821
|
handle_inconsistencies
|
train
|
def handle_inconsistencies(node, strictness, strategy, report):
"""
Check whether the text results on an element is consistent with its child element text results.
"""
if isinstance(node, PcGtsType):
node = node.get_Page()
elif isinstance(node, GlyphType):
return report
_, tag, getter, concatenate_with = [x for x in _HIERARCHY if isinstance(node, x[0])][0]
children_are_consistent = True
children = getattr(node, getter)()
for child in children:
errors_before = len(report.errors)
handle_inconsistencies(child, strictness, strategy, report)
if len(report.errors) > errors_before:
children_are_consistent = False
if concatenate_with is not None:
concatenated_children = concatenate_children(node, concatenate_with, strategy)
text_results = get_text(node, strategy)
if concatenated_children and text_results and concatenated_children != text_results:
if strictness == 'fix':
set_text(node, concatenated_children, strategy)
# if children_are_consistent:
# else:
# # TODO fix text results recursively
# report.add_warning("Fixing inconsistencies recursively not implemented")
elif strictness == 'lax':
if not compare_without_whitespace(concatenated_children, text_results):
report.add_error(ConsistencyError(tag, node.id, text_results, concatenated_children))
else:
report.add_error(ConsistencyError(tag, node.id, text_results, concatenated_children))
return report
|
python
|
{
"resource": ""
}
|
q14822
|
get_text
|
train
|
def get_text(node, strategy):
"""
Get the most confident text results, either those with @index = 1 or the first text results or empty string.
"""
textEquivs = node.get_TextEquiv()
if not textEquivs:
log.debug("No text results on %s %s", node, node.id)
return ''
# elif strategy == 'index1':
else:
if len(textEquivs) > 1:
index1 = [x for x in textEquivs if x.index == 1]
if index1:
return index1[0].get_Unicode().strip()
return textEquivs[0].get_Unicode().strip()
|
python
|
{
"resource": ""
}
|
q14823
|
set_text
|
train
|
def set_text(node, text, strategy):
"""
Set the most confident text results, either those with @index = 1, the first text results or add new one.
"""
text = text.strip()
textEquivs = node.get_TextEquiv()
if not textEquivs:
node.add_TextEquiv(TextEquivType(Unicode=text))
# elif strategy == 'index1':
else:
if len(textEquivs) > 1:
index1 = [x for x in textEquivs if x.index == 1]
if index1:
index1[0].set_Unicode(text)
return
textEquivs[0].set_Unicode(text)
|
python
|
{
"resource": ""
}
|
q14824
|
PageValidator.validate
|
train
|
def validate(filename=None, ocrd_page=None, ocrd_file=None, strictness='strict', strategy='index1'):
"""
Validates a PAGE file for consistency by filename, OcrdFile or passing OcrdPage directly.
Arguments:
filename (string): Path to PAGE
ocrd_page (OcrdPage): OcrdPage instance
ocrd_file (OcrdFile): OcrdFile instance wrapping OcrdPage
strictness (string): 'strict', 'lax', 'fix' or 'off'
strategy (string): Currently only 'index1'
Returns:
report (:class:`ValidationReport`) Report on the validity
"""
if ocrd_page:
validator = PageValidator(ocrd_page, strictness, strategy)
elif ocrd_file:
validator = PageValidator(page_from_file(ocrd_file), strictness, strategy)
elif filename:
validator = PageValidator(parse(filename, silence=True), strictness, strategy)
else:
raise Exception("At least one of ocrd_page, ocrd_file or filename must be set")
return validator._validate()
|
python
|
{
"resource": ""
}
|
q14825
|
ocrd_cli_options
|
train
|
def ocrd_cli_options(f):
"""
Implement MP CLI.
Usage::
import ocrd_click_cli from ocrd.utils
@click.command()
@ocrd_click_cli
def cli(mets_url):
print(mets_url)
"""
params = [
click.option('-m', '--mets', help="METS URL to validate"),
click.option('-w', '--working-dir', help="Working Directory"),
click.option('-I', '--input-file-grp', help='File group(s) used as input.', default='INPUT'),
click.option('-O', '--output-file-grp', help='File group(s) used as output.', default='OUTPUT'),
click.option('-g', '--page-id', help="ID(s) of the pages to process"),
click.option('-p', '--parameter', type=click.Path()),
click.option('-J', '--dump-json', help="Dump tool description as JSON and exit", is_flag=True, default=False),
loglevel_option,
click.option('-V', '--version', help="Show version", is_flag=True, default=False)
]
for param in params:
param(f)
return f
|
python
|
{
"resource": ""
}
|
q14826
|
ValidationReport.merge_report
|
train
|
def merge_report(self, otherself):
"""
Merge another report into this one.
"""
self.notices += otherself.notices
self.warnings += otherself.warnings
self.errors += otherself.errors
|
python
|
{
"resource": ""
}
|
q14827
|
process_cli
|
train
|
def process_cli(log_level, mets, page_id, tasks):
"""
Process a series of tasks
"""
log = getLogger('ocrd.cli.process')
run_tasks(mets, log_level, page_id, tasks)
log.info("Finished")
|
python
|
{
"resource": ""
}
|
q14828
|
bag
|
train
|
def bag(directory, mets_basename, dest, identifier, in_place, manifestation_depth, mets, base_version_checksum, tag_file, skip_zip, processes):
"""
Bag workspace as OCRD-ZIP at DEST
"""
resolver = Resolver()
workspace = Workspace(resolver, directory=directory, mets_basename=mets_basename)
workspace_bagger = WorkspaceBagger(resolver)
workspace_bagger.bag(
workspace,
dest=dest,
ocrd_identifier=identifier,
ocrd_manifestation_depth=manifestation_depth,
ocrd_mets=mets,
ocrd_base_version_checksum=base_version_checksum,
processes=processes,
tag_files=tag_file,
skip_zip=skip_zip,
in_place=in_place
)
|
python
|
{
"resource": ""
}
|
q14829
|
validate
|
train
|
def validate(src, **kwargs):
"""
Validate OCRD-ZIP
SRC must exist an be an OCRD-ZIP, either a ZIP file or a directory.
"""
resolver = Resolver()
validator = OcrdZipValidator(resolver, src)
report = validator.validate(**kwargs)
print(report)
if not report.is_valid:
sys.exit(1)
|
python
|
{
"resource": ""
}
|
q14830
|
WorkspaceBackupManager.restore
|
train
|
def restore(self, chksum, choose_first=False):
"""
Restore mets.xml to previous state
"""
log = getLogger('ocrd.workspace_backup.restore')
bak = None
candidates = glob(join(self.backup_directory, '%s*' % chksum))
if not candidates:
log.error("No backup found: %s" % chksum)
return
if len(candidates) > 1 and not choose_first:
raise Exception("Not unique, could be\n%s" % '\n'.join(candidates))
bak = candidates[0]
self.add()
log.info("Restoring from %s/mets.xml" % bak)
src = join(bak, 'mets.xml')
dest = self.workspace.mets_target
log.debug('cp "%s" "%s"', src, dest)
copy(src, dest)
self.workspace.reload_mets()
|
python
|
{
"resource": ""
}
|
q14831
|
WorkspaceBackupManager.list
|
train
|
def list(self):
"""
List all backups as WorkspaceBackup objects, sorted descending by lastmod.
"""
backups = []
for d in glob(join(self.backup_directory, '*')):
backups.append(WorkspaceBackup.from_path(d))
backups.sort(key=lambda b: b.lastmod, reverse=True)
return backups
|
python
|
{
"resource": ""
}
|
q14832
|
WorkspaceBackupManager.undo
|
train
|
def undo(self):
"""
Restore to last version
"""
log = getLogger('ocrd.workspace_backup.undo')
backups = self.list()
if backups:
last_backup = backups[0]
self.restore(last_backup.chksum, choose_first=True)
else:
log.info("No backups, nothing to undo.")
|
python
|
{
"resource": ""
}
|
q14833
|
setOverrideLogLevel
|
train
|
def setOverrideLogLevel(lvl):
"""
Override all logger filter levels to include lvl and above.
- Set root logger level
- iterates all existing loggers and sets their log level to ``NOTSET``.
Args:
lvl (string): Log level name.
"""
if lvl is None:
return
logging.info('Overriding log level globally to %s', lvl)
lvl = getLevelName(lvl)
global _overrideLogLevel # pylint: disable=global-statement
_overrideLogLevel = lvl
logging.getLogger('').setLevel(lvl)
for loggerName in logging.Logger.manager.loggerDict:
logger = logging.Logger.manager.loggerDict[loggerName]
if isinstance(logger, logging.PlaceHolder):
continue
logger.setLevel(logging.NOTSET)
|
python
|
{
"resource": ""
}
|
q14834
|
initLogging
|
train
|
def initLogging():
"""
Sets logging defaults
"""
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s.%(msecs)03d %(levelname)s %(name)s - %(message)s',
datefmt='%H:%M:%S')
logging.getLogger('').setLevel(logging.INFO)
# logging.getLogger('ocrd.resolver').setLevel(logging.INFO)
# logging.getLogger('ocrd.resolver.download_to_directory').setLevel(logging.INFO)
# logging.getLogger('ocrd.resolver.add_files_to_mets').setLevel(logging.INFO)
logging.getLogger('PIL').setLevel(logging.INFO)
# Allow overriding
CONFIG_PATHS = [
os.path.curdir,
os.path.join(os.path.expanduser('~')),
'/etc',
]
for p in CONFIG_PATHS:
config_file = os.path.join(p, 'ocrd_logging.py')
if os.path.exists(config_file):
logging.info("Loading logging configuration from '%s'", config_file)
with open(config_file) as f:
code = compile(f.read(), config_file, 'exec')
exec(code, globals(), locals())
|
python
|
{
"resource": ""
}
|
q14835
|
WorkspaceBagger.bag
|
train
|
def bag(self,
workspace,
ocrd_identifier,
dest=None,
ocrd_mets='mets.xml',
ocrd_manifestation_depth='full',
ocrd_base_version_checksum=None,
processes=1,
skip_zip=False,
in_place=False,
tag_files=None
):
"""
Bag a workspace
See https://ocr-d.github.com/ocrd_zip#packing-a-workspace-as-ocrd-zip
Arguments:
workspace (ocrd.Workspace): workspace to bag
ord_identifier (string): Ocrd-Identifier in bag-info.txt
dest (string): Path of the generated OCRD-ZIP.
ord_mets (string): Ocrd-Mets in bag-info.txt
ord_manifestation_depth (string): Ocrd-Manifestation-Depth in bag-info.txt
ord_base_version_checksum (string): Ocrd-Base-Version-Checksum in bag-info.txt
processes (integer): Number of parallel processes checksumming
skip_zip (boolean): Whether to leave directory unzipped
in_place (boolean): Whether to **replace** the workspace with its BagIt variant
tag_files (list<string>): Path names of additional tag files to be bagged at the root of the bag
"""
if ocrd_manifestation_depth not in ('full', 'partial'):
raise Exception("manifestation_depth must be 'full' or 'partial'")
if in_place and (dest is not None):
raise Exception("Setting 'dest' and 'in_place' is a contradiction")
if in_place and not skip_zip:
raise Exception("Setting 'skip_zip' and not 'in_place' is a contradiction")
if tag_files is None:
tag_files = []
# create bagdir
bagdir = mkdtemp(prefix=TMP_BAGIT_PREFIX)
if dest is None:
if in_place:
dest = workspace.directory
elif not skip_zip:
dest = '%s.ocrd.zip' % workspace.directory
else:
dest = '%s.ocrd' % workspace.directory
log.info("Bagging %s to %s (temp dir %s)", workspace.directory, '(in-place)' if in_place else dest, bagdir)
# create data dir
makedirs(join(bagdir, 'data'))
# create bagit.txt
with open(join(bagdir, 'bagit.txt'), 'wb') as f:
f.write(BAGIT_TXT.encode('utf-8'))
# create manifests
total_bytes, total_files = self._bag_mets_files(workspace, bagdir, ocrd_manifestation_depth, ocrd_mets, processes)
# create bag-info.txt
bag = Bag(bagdir)
self._set_bag_info(bag, total_bytes, total_files, ocrd_identifier, ocrd_manifestation_depth, ocrd_base_version_checksum)
for tag_file in tag_files:
copyfile(tag_file, join(bagdir, basename(tag_file)))
# save bag
bag.save()
# ZIP it
self._serialize_bag(workspace, bagdir, dest, in_place, skip_zip)
log.info('Created bag at %s', dest)
return dest
|
python
|
{
"resource": ""
}
|
q14836
|
WorkspaceBagger.spill
|
train
|
def spill(self, src, dest):
"""
Spill a workspace, i.e. unpack it and turn it into a workspace.
See https://ocr-d.github.com/ocrd_zip#unpacking-ocrd-zip-to-a-workspace
Arguments:
src (string): Path to OCRD-ZIP
dest (string): Path to directory to unpack data folder to
"""
# print(dest)
if exists(dest) and not isdir(dest):
raise Exception("Not a directory: %s" % dest)
# If dest is an existing directory, try to derive its name from src
if isdir(dest):
workspace_name = re.sub(r'(\.ocrd)?\.zip$', '', basename(src))
new_dest = join(dest, workspace_name)
if exists(new_dest):
raise Exception("Directory exists: %s" % new_dest)
dest = new_dest
log.info("Spilling %s to %s", src, dest)
bagdir = mkdtemp(prefix=TMP_BAGIT_PREFIX)
unzip_file_to_dir(src, bagdir)
datadir = join(bagdir, 'data')
for root, _, files in walk(datadir):
for f in files:
srcfile = join(root, f)
destdir = join(dest, relpath(root, datadir))
destfile = join(destdir, f)
if not exists(destdir):
makedirs(destdir)
log.debug("Copy %s -> %s", srcfile, destfile)
copyfile(srcfile, destfile)
# TODO copy allowed tag files if present
# TODO validate bagit
# Drop tempdir
rmtree(bagdir)
# Create workspace
workspace = Workspace(self.resolver, directory=dest)
# TODO validate workspace
return workspace
|
python
|
{
"resource": ""
}
|
q14837
|
Workspace.download_url
|
train
|
def download_url(self, url, **kwargs):
"""
Download a URL to the workspace.
Args:
url (string): URL to download to directory
**kwargs : See :py:mod:`ocrd.resolver.Resolver`
Returns:
The local filename of the downloaded file
"""
if self.baseurl and '://' not in url:
url = join(self.baseurl, url)
return self.resolver.download_to_directory(self.directory, url, **kwargs)
|
python
|
{
"resource": ""
}
|
q14838
|
Workspace.save_mets
|
train
|
def save_mets(self):
"""
Write out the current state of the METS file.
"""
log.info("Saving mets '%s'" % self.mets_target)
if self.automatic_backup:
WorkspaceBackupManager(self).add()
with open(self.mets_target, 'wb') as f:
f.write(self.mets.to_xml(xmllint=True))
|
python
|
{
"resource": ""
}
|
q14839
|
Workspace.resolve_image_as_pil
|
train
|
def resolve_image_as_pil(self, image_url, coords=None):
"""
Resolve an image URL to a PIL image.
Args:
coords (list) : Coordinates of the bounding box to cut from the image
Returns:
Image or region in image as PIL.Image
"""
files = self.mets.find_files(url=image_url)
if files:
image_filename = self.download_file(files[0]).local_filename
else:
image_filename = self.download_url(image_url)
if image_url not in self.image_cache['pil']:
self.image_cache['pil'][image_url] = Image.open(image_filename)
pil_image = self.image_cache['pil'][image_url]
if coords is None:
return pil_image
if image_url not in self.image_cache['cv2']:
log.debug("Converting PIL to OpenCV: %s", image_url)
color_conversion = cv2.COLOR_GRAY2BGR if pil_image.mode in ('1', 'L') else cv2.COLOR_RGB2BGR
pil_as_np_array = np.array(pil_image).astype('uint8') if pil_image.mode == '1' else np.array(pil_image)
self.image_cache['cv2'][image_url] = cv2.cvtColor(pil_as_np_array, color_conversion)
cv2_image = self.image_cache['cv2'][image_url]
poly = np.array(coords, np.int32)
log.debug("Cutting region %s from %s", coords, image_url)
region_cut = cv2_image[
np.min(poly[:, 1]):np.max(poly[:, 1]),
np.min(poly[:, 0]):np.max(poly[:, 0])
]
return Image.fromarray(region_cut)
|
python
|
{
"resource": ""
}
|
q14840
|
OcrdExif.to_xml
|
train
|
def to_xml(self):
"""
Serialize all properties as XML
"""
ret = '<exif>'
for k in self.__dict__:
ret += '<%s>%s</%s>' % (k, self.__dict__[k], k)
ret += '</exif>'
return ret
|
python
|
{
"resource": ""
}
|
q14841
|
OcrdFile.basename_without_extension
|
train
|
def basename_without_extension(self):
"""
Get the ``os.path.basename`` of the local file, if any, with extension removed.
"""
ret = self.basename.rsplit('.', 1)[0]
if ret.endswith('.tar'):
ret = ret[0:len(ret)-4]
return ret
|
python
|
{
"resource": ""
}
|
q14842
|
OcrdFile.pageId
|
train
|
def pageId(self):
"""
Get the ID of the physical page this file manifests.
"""
if self.mets is None:
raise Exception("OcrdFile %s has no member 'mets' pointing to parent OcrdMets" % self)
return self.mets.get_physical_page_for_file(self)
|
python
|
{
"resource": ""
}
|
q14843
|
OcrdFile.pageId
|
train
|
def pageId(self, pageId):
"""
Set the ID of the physical page this file manifests.
"""
if pageId is None:
return
if self.mets is None:
raise Exception("OcrdFile %s has no member 'mets' pointing to parent OcrdMets" % self)
self.mets.set_physical_page_for_file(pageId, self)
|
python
|
{
"resource": ""
}
|
q14844
|
OcrdMets.empty_mets
|
train
|
def empty_mets():
"""
Create an empty METS file from bundled template.
"""
tpl = METS_XML_EMPTY.decode('utf-8')
tpl = tpl.replace('{{ VERSION }}', VERSION)
tpl = tpl.replace('{{ NOW }}', '%s' % datetime.now())
return OcrdMets(content=tpl.encode('utf-8'))
|
python
|
{
"resource": ""
}
|
q14845
|
OcrdMets.set_physical_page_for_file
|
train
|
def set_physical_page_for_file(self, pageId, ocrd_file, order=None, orderlabel=None):
"""
Create a new physical page
"""
# print(pageId, ocrd_file)
# delete any page mapping for this file.ID
for el_fptr in self._tree.getroot().findall(
'mets:structMap[@TYPE="PHYSICAL"]/mets:div[@TYPE="physSequence"]/mets:div[@TYPE="page"]/mets:fptr[@FILEID="%s"]' %
ocrd_file.ID, namespaces=NS):
el_fptr.getparent().remove(el_fptr)
# find/construct as necessary
el_structmap = self._tree.getroot().find('mets:structMap[@TYPE="PHYSICAL"]', NS)
if el_structmap is None:
el_structmap = ET.SubElement(self._tree.getroot(), TAG_METS_STRUCTMAP)
el_structmap.set('TYPE', 'PHYSICAL')
el_seqdiv = el_structmap.find('mets:div[@TYPE="physSequence"]', NS)
if el_seqdiv is None:
el_seqdiv = ET.SubElement(el_structmap, TAG_METS_DIV)
el_seqdiv.set('TYPE', 'physSequence')
el_pagediv = el_seqdiv.find('mets:div[@ID="%s"]' % pageId, NS)
if el_pagediv is None:
el_pagediv = ET.SubElement(el_seqdiv, TAG_METS_DIV)
el_pagediv.set('TYPE', 'page')
el_pagediv.set('ID', pageId)
if order:
el_pagediv.set('ORDER', order)
if orderlabel:
el_pagediv.set('ORDERLABEL', orderlabel)
el_fptr = ET.SubElement(el_pagediv, TAG_METS_FPTR)
el_fptr.set('FILEID', ocrd_file.ID)
|
python
|
{
"resource": ""
}
|
q14846
|
OcrdMets.get_physical_page_for_file
|
train
|
def get_physical_page_for_file(self, ocrd_file):
"""
Get the pageId for a ocrd_file
"""
ret = self._tree.getroot().xpath(
'/mets:mets/mets:structMap[@TYPE="PHYSICAL"]/mets:div[@TYPE="physSequence"]/mets:div[@TYPE="page"][./mets:fptr[@FILEID="%s"]]/@ID' %
ocrd_file.ID, namespaces=NS)
if ret:
return ret[0]
|
python
|
{
"resource": ""
}
|
q14847
|
WorkspaceValidator._validate
|
train
|
def _validate(self):
"""
Actual validation.
"""
try:
self._resolve_workspace()
if 'mets_unique_identifier' not in self.skip:
self._validate_mets_unique_identifier()
if 'mets_file_group_names' not in self.skip:
self._validate_mets_file_group_names()
if 'mets_files' not in self.skip:
self._validate_mets_files()
if 'pixel_density' not in self.skip:
self._validate_pixel_density()
if 'page' not in self.skip:
self._validate_page()
except Exception as e: # pylint: disable=broad-except
self.report.add_error("Failed to instantiate workspace: %s" % e)
# raise e
return self.report
|
python
|
{
"resource": ""
}
|
q14848
|
WorkspaceValidator._resolve_workspace
|
train
|
def _resolve_workspace(self):
"""
Clone workspace from mets_url unless workspace was provided.
"""
if self.workspace is None:
self.workspace = self.resolver.workspace_from_url(self.mets_url, baseurl=self.src_dir, download=self.download)
self.mets = self.workspace.mets
|
python
|
{
"resource": ""
}
|
q14849
|
WorkspaceValidator._validate_pixel_density
|
train
|
def _validate_pixel_density(self):
"""
Validate image pixel density
See `spec <https://ocr-d.github.io/mets#pixel-density-of-images-must-be-explicit-and-high-enough>`_.
"""
for f in [f for f in self.mets.find_files() if f.mimetype.startswith('image/')]:
if not f.local_filename and not self.download:
self.report.add_notice("Won't download remote image <%s>" % f.url)
continue
exif = self.workspace.resolve_image_exif(f.url)
for k in ['xResolution', 'yResolution']:
v = exif.__dict__.get(k)
if v is None or v <= 72:
self.report.add_error("Image %s: %s (%s pixels per %s) is too low" % (f.ID, k, v, exif.resolutionUnit))
|
python
|
{
"resource": ""
}
|
q14850
|
WorkspaceValidator._validate_page
|
train
|
def _validate_page(self):
"""
Run PageValidator on the PAGE-XML documents referenced in the METS.
"""
for ocrd_file in self.mets.find_files(mimetype=MIMETYPE_PAGE, local_only=True):
self.workspace.download_file(ocrd_file)
page_report = PageValidator.validate(ocrd_file=ocrd_file, strictness=self.page_strictness)
self.report.merge_report(page_report)
|
python
|
{
"resource": ""
}
|
q14851
|
ActionslogModelRegistry.register
|
train
|
def register(self, model, include_fields=[], exclude_fields=[]):
"""
Register a model with actionslog. Actionslog will then track mutations on this model's instances.
:param model: The model to register.
:type model: Model
:param include_fields: The fields to include. Implicitly excludes all other fields.
:type include_fields: list
:param exclude_fields: The fields to exclude. Overrides the fields to include.
:type exclude_fields: list
"""
if issubclass(model, Model):
self._registry[model] = {
'include_fields': include_fields,
'exclude_fields': exclude_fields,
}
self._connect_signals(model)
else:
raise TypeError("Supplied model is not a valid model.")
|
python
|
{
"resource": ""
}
|
q14852
|
track_field
|
train
|
def track_field(field):
"""
Returns whether the given field should be tracked by Actionslog.
Untracked fields are many-to-many relations and relations to the Actionslog LogAction model.
:param field: The field to check.
:type field: Field
:return: Whether the given field should be tracked.
:rtype: bool
"""
from actionslog.models import LogAction
# Do not track many to many relations
if field.many_to_many:
return False
# Do not track relations to LogAction
if getattr(field, 'rel', None) is not None and field.rel.to == LogAction:
return False
return True
|
python
|
{
"resource": ""
}
|
q14853
|
HttpClient.request
|
train
|
def request(self, method, api_url, params={}, **kwargs):
"""Generate the API call to the device."""
LOG.debug("axapi_http: full url = %s", self.url_base + api_url)
LOG.debug("axapi_http: %s url = %s", method, api_url)
LOG.debug("axapi_http: params = %s", json.dumps(logutils.clean(params), indent=4))
# Set "data" variable for the request
if params:
extra_params = kwargs.get('axapi_args', {})
params_copy = merge_dicts(params, extra_params)
LOG.debug("axapi_http: params_all = %s", logutils.clean(params_copy))
payload = json.dumps(params_copy)
else:
try:
payload = kwargs.pop('payload', None)
self.headers = dict(self.HEADERS, **kwargs.pop('headers', {}))
LOG.debug("axapi_http: headers_all = %s", logutils.clean(self.headers))
except KeyError:
payload = None
max_retries = kwargs.get('max_retries', self.max_retries)
timeout = kwargs.get('timeout', self.timeout)
# Create session to set HTTPAdapter or SSLAdapter
session = Session()
if self.port == 443:
# Add adapter for any https session to force TLS1_0 connection for v21 of AXAPI
session.mount('https://', SSLAdapter(max_retries=max_retries))
else:
session.mount('http://', HTTPAdapter(max_retries=max_retries))
session_request = getattr(session, method.lower())
# Make actual request and handle any errors
try:
device_response = session_request(
self.url_base + api_url, verify=False, data=payload, headers=self.HEADERS, timeout=timeout
)
except (Exception) as e:
LOG.error("acos_client failing with error %s after %s retries", e.__class__.__name__, max_retries)
raise e
finally:
session.close()
# Log if the reponse is one of the known broken response
if device_response in broken_replies:
device_response = broken_replies[device_response]
LOG.debug("axapi_http: broken reply, new response: %s", logutils.clean(device_response))
# Validate json response
try:
json_response = device_response.json()
LOG.debug("axapi_http: data = %s", json.dumps(logutils.clean(json_response), indent=4))
except ValueError as e:
# The response is not JSON but it still succeeded.
LOG.debug("axapi_http: json = %s", e)
return device_response
# Handle "fail" responses returned by AXAPI
if 'response' in json_response and 'status' in json_response['response']:
if json_response['response']['status'] == 'fail':
acos_responses.raise_axapi_ex(json_response, action=extract_method(api_url))
# Return json portion of response
return json_response
|
python
|
{
"resource": ""
}
|
q14854
|
LicenseManager.create
|
train
|
def create(self, host_list=[], serial=None, instance_name=None, use_mgmt_port=False,
interval=None, bandwidth_base=None, bandwidth_unrestricted=None):
"""Creates a license manager entry
Keyword arguments:
instance_name -- license manager instance name
host_list -- list(dict) a list of dictionaries of the format:
{'ip': '127.0.0.1', 'port': 443}
serial - (str) appliance serial number
use_mgmt_port - (bool) use management for license interactions
interval - (int) 1=Monthly, 2=Daily, 3=Hourly
bandwidth_base - (int) Configure feature bandwidth base (Mb)
Valid range - 10-102400
bandwidth_unrestricted - (bool) Set the bandwidth to maximum
"""
payload = self._build_payload(host_list=host_list, serial=serial,
instance_name=instance_name,
use_mgmt_port=use_mgmt_port,
interval=interval, bandwidth_base=bandwidth_base,
bandwidth_unrestricted=bandwidth_unrestricted)
return self._post(self.url_base, payload)
|
python
|
{
"resource": ""
}
|
q14855
|
LicenseManager.update
|
train
|
def update(self, host_list=[], serial=None, instance_name=None, use_mgmt_port=False,
interval=None, bandwidth_base=None, bandwidth_unrestricted=None):
"""Update a license manager entry
Keyword arguments:
instance_name -- license manager instance name
host_list -- list(dict) a list of dictionaries of the format:
{'ip': '127.0.0.1', 'port': 443}
serial - (str) appliance serial number
use_mgmt_port - (bool) use management for license interactions
interval - (int) 1=Monthly, 2=Daily, 3=Hourly
bandwidth_base - (int) Configure feature bandwidth base (Mb)
Valid range - 10-102400
bandwidth_unrestricted - (bool) Set the bandwidth to maximum
"""
return self.create(host_list=host_list, serial=serial, instance_name=instance_name,
use_mgmt_port=use_mgmt_port,
interval=interval, bandwidth_base=bandwidth_base,
bandwidth_unrestricted=bandwidth_unrestricted)
|
python
|
{
"resource": ""
}
|
q14856
|
DeviceContext.switch
|
train
|
def switch(self, device_id, obj_slot_id):
"""Switching of device-context"""
payload = {
"device-context": self._build_payload(device_id, obj_slot_id)
}
return self._post(self.url_prefix, payload)
|
python
|
{
"resource": ""
}
|
q14857
|
contains_vasp_input
|
train
|
def contains_vasp_input(dir_name):
"""
Checks if a directory contains valid VASP input.
Args:
dir_name:
Directory name to check.
Returns:
True if directory contains all four VASP input files (INCAR, POSCAR,
KPOINTS and POTCAR).
"""
for f in ["INCAR", "POSCAR", "POTCAR", "KPOINTS"]:
if not os.path.exists(os.path.join(dir_name, f)) and \
not os.path.exists(os.path.join(dir_name, f + ".orig")):
return False
return True
|
python
|
{
"resource": ""
}
|
q14858
|
get_coordination_numbers
|
train
|
def get_coordination_numbers(d):
"""
Helper method to get the coordination number of all sites in the final
structure from a run.
Args:
d:
Run dict generated by VaspToDbTaskDrone.
Returns:
Coordination numbers as a list of dict of [{"site": site_dict,
"coordination": number}, ...].
"""
structure = Structure.from_dict(d["output"]["crystal"])
f = VoronoiNN()
cn = []
for i, s in enumerate(structure.sites):
try:
n = f.get_cn(structure, i)
number = int(round(n))
cn.append({"site": s.as_dict(), "coordination": number})
except Exception:
logger.error("Unable to parse coordination errors")
return cn
|
python
|
{
"resource": ""
}
|
q14859
|
get_uri
|
train
|
def get_uri(dir_name):
"""
Returns the URI path for a directory. This allows files hosted on
different file servers to have distinct locations.
Args:
dir_name:
A directory name.
Returns:
Full URI path, e.g., fileserver.host.com:/full/path/of/dir_name.
"""
fullpath = os.path.abspath(dir_name)
try:
hostname = socket.gethostbyaddr(socket.gethostname())[0]
except:
hostname = socket.gethostname()
return "{}:{}".format(hostname, fullpath)
|
python
|
{
"resource": ""
}
|
q14860
|
VaspToDbTaskDrone.assimilate
|
train
|
def assimilate(self, path):
"""
Parses vasp runs. Then insert the result into the db. and return the
task_id or doc of the insertion.
Returns:
If in simulate_mode, the entire doc is returned for debugging
purposes. Else, only the task_id of the inserted doc is returned.
"""
try:
d = self.get_task_doc(path)
if self.mapi_key is not None and d["state"] == "successful":
self.calculate_stability(d)
tid = self._insert_doc(d)
return tid
except Exception as ex:
import traceback
logger.error(traceback.format_exc())
return False
|
python
|
{
"resource": ""
}
|
q14861
|
VaspToDbTaskDrone.get_task_doc
|
train
|
def get_task_doc(self, path):
"""
Get the entire task doc for a path, including any post-processing.
"""
logger.info("Getting task doc for base dir :{}".format(path))
files = os.listdir(path)
vasprun_files = OrderedDict()
if "STOPCAR" in files:
#Stopped runs. Try to parse as much as possible.
logger.info(path + " contains stopped run")
for r in self.runs:
if r in files: #try subfolder schema
for f in os.listdir(os.path.join(path, r)):
if fnmatch(f, "vasprun.xml*"):
vasprun_files[r] = os.path.join(r, f)
else: #try extension schema
for f in files:
if fnmatch(f, "vasprun.xml.{}*".format(r)):
vasprun_files[r] = f
if len(vasprun_files) == 0:
for f in files: #get any vasprun from the folder
if fnmatch(f, "vasprun.xml*") and \
f not in vasprun_files.values():
vasprun_files['standard'] = f
if len(vasprun_files) > 0:
d = self.generate_doc(path, vasprun_files)
if not d:
d = self.process_killed_run(path)
self.post_process(path, d)
elif (not (path.endswith("relax1") or
path.endswith("relax2"))) and contains_vasp_input(path):
#If not Materials Project style, process as a killed run.
logger.warning(path + " contains killed run")
d = self.process_killed_run(path)
self.post_process(path, d)
else:
raise ValueError("No VASP files found!")
return d
|
python
|
{
"resource": ""
}
|
q14862
|
VaspToDbTaskDrone.post_process
|
train
|
def post_process(self, dir_name, d):
"""
Simple post-processing for various files other than the vasprun.xml.
Called by generate_task_doc. Modify this if your runs have other
kinds of processing requirements.
Args:
dir_name:
The dir_name.
d:
Current doc generated.
"""
logger.info("Post-processing dir:{}".format(dir_name))
fullpath = os.path.abspath(dir_name)
# VASP input generated by pymatgen's alchemy has a
# transformations.json file that keeps track of the origin of a
# particular structure. This is extremely useful for tracing back a
# result. If such a file is found, it is inserted into the task doc
# as d["transformations"]
transformations = {}
filenames = glob.glob(os.path.join(fullpath, "transformations.json*"))
if len(filenames) >= 1:
with zopen(filenames[0], "rt") as f:
transformations = json.load(f)
try:
m = re.match("(\d+)-ICSD",
transformations["history"][0]["source"])
if m:
d["icsd_id"] = int(m.group(1))
except Exception as ex:
logger.warning("Cannot parse ICSD from transformations "
"file.")
pass
else:
logger.warning("Transformations file does not exist.")
other_parameters = transformations.get("other_parameters")
new_tags = None
if other_parameters:
# We don't want to leave tags or authors in the
# transformations file because they'd be copied into
# every structure generated after this one.
new_tags = other_parameters.pop("tags", None)
new_author = other_parameters.pop("author", None)
if new_author:
d["author"] = new_author
if not other_parameters: # if dict is now empty remove it
transformations.pop("other_parameters")
d["transformations"] = transformations
# Calculations done using custodian has a custodian.json,
# which tracks the jobs performed and any errors detected and fixed.
# This is useful for tracking what has actually be done to get a
# result. If such a file is found, it is inserted into the task doc
# as d["custodian"]
filenames = glob.glob(os.path.join(fullpath, "custodian.json*"))
if len(filenames) >= 1:
with zopen(filenames[0], "rt") as f:
d["custodian"] = json.load(f)
# Parse OUTCAR for additional information and run stats that are
# generally not in vasprun.xml.
try:
run_stats = {}
for filename in glob.glob(os.path.join(fullpath, "OUTCAR*")):
outcar = Outcar(filename)
i = 1 if re.search("relax2", filename) else 0
taskname = "relax2" if re.search("relax2", filename) else \
"relax1"
d["calculations"][i]["output"]["outcar"] = outcar.as_dict()
run_stats[taskname] = outcar.run_stats
except:
logger.error("Bad OUTCAR for {}.".format(fullpath))
try:
overall_run_stats = {}
for key in ["Total CPU time used (sec)", "User time (sec)",
"System time (sec)", "Elapsed time (sec)"]:
overall_run_stats[key] = sum([v[key]
for v in run_stats.values()])
run_stats["overall"] = overall_run_stats
except:
logger.error("Bad run stats for {}.".format(fullpath))
d["run_stats"] = run_stats
#Convert to full uri path.
if self.use_full_uri:
d["dir_name"] = get_uri(dir_name)
if new_tags:
d["tags"] = new_tags
logger.info("Post-processed " + fullpath)
|
python
|
{
"resource": ""
}
|
q14863
|
VaspToDbTaskDrone.process_vasprun
|
train
|
def process_vasprun(self, dir_name, taskname, filename):
"""
Process a vasprun.xml file.
"""
vasprun_file = os.path.join(dir_name, filename)
if self.parse_projected_eigen and (self.parse_projected_eigen != 'final' or \
taskname == self.runs[-1]):
parse_projected_eigen = True
else:
parse_projected_eigen = False
r = Vasprun(vasprun_file,parse_projected_eigen=parse_projected_eigen)
d = r.as_dict()
d["dir_name"] = os.path.abspath(dir_name)
d["completed_at"] = \
str(datetime.datetime.fromtimestamp(os.path.getmtime(
vasprun_file)))
d["cif"] = str(CifWriter(r.final_structure))
d["density"] = r.final_structure.density
if self.parse_dos and (self.parse_dos != 'final' \
or taskname == self.runs[-1]):
try:
d["dos"] = r.complete_dos.as_dict()
except Exception:
logger.warning("No valid dos data exist in {}.\n Skipping dos"
.format(dir_name))
if taskname == "relax1" or taskname == "relax2":
d["task"] = {"type": "aflow", "name": taskname}
else:
d["task"] = {"type": taskname, "name": taskname}
d["oxide_type"] = oxide_type(r.final_structure)
return d
|
python
|
{
"resource": ""
}
|
q14864
|
total_size
|
train
|
def total_size(o, handlers={}, verbose=False, count=False):
"""Returns the approximate memory footprint an object and all of its contents.
Automatically finds the contents of the following builtin containers and
their subclasses: tuple, list, deque, dict, set and frozenset.
To search other containers, add handlers to iterate over their contents:
handlers = {SomeContainerClass: iter,
OtherContainerClass: OtherContainerClass.get_elements}
Source: http://code.activestate.com/recipes/577504/ (r3)
"""
# How to make different types of objects iterable
dict_handler = lambda d: chain.from_iterable(d.items())
all_handlers = {tuple: iter,
list: iter,
deque: iter,
dict: dict_handler,
set: iter,
frozenset: iter}
all_handlers.update(handlers) # user handlers take precedence
seen = set() # track which object id's have already been seen
default_size = getsizeof(0) # estimate sizeof object without __sizeof__
def sizeof(o):
"Calculate size of `o` and all its children"
if id(o) in seen: # do not double count the same object
return 0
seen.add(id(o))
if count:
s = 1
else:
s = getsizeof(o, default_size)
# If `o` is iterable, add size of its members
for typ, handler in all_handlers.items():
if isinstance(o, typ):
s += sum(map(sizeof, handler(o)))
break
return s
return sizeof(o)
|
python
|
{
"resource": ""
}
|
q14865
|
args_kvp_nodup
|
train
|
def args_kvp_nodup(s):
"""Parse argument string as key=value pairs separated by commas.
:param s: Argument string
:return: Parsed value
:rtype: dict
:raises: ValueError for format violations or a duplicated key.
"""
if s is None:
return {}
d = {}
for item in [e.strip() for e in s.split(",")]:
try:
key, value = item.split("=", 1)
except ValueError:
msg = "argument item '{}' not in form key=value".format(item)
if _argparse_is_dumb:
_alog.warn(msg)
raise ValueError(msg)
if key in d:
msg = "Duplicate key for '{}' not allowed".format(key)
if _argparse_is_dumb:
_alog.warn(msg)
raise ValueError(msg)
d[key] = value
return d
|
python
|
{
"resource": ""
}
|
q14866
|
JsonWalker.walk
|
train
|
def walk(self, o):
"""Walk a dict & transform.
"""
if isinstance(o, dict):
d = o if self._dx is None else self._dx(o)
return {k: self.walk(v) for k, v in d.items()}
elif isinstance(o, list):
return [self.walk(v) for v in o]
else:
return o if self._vx is None else self._vx(o)
|
python
|
{
"resource": ""
}
|
q14867
|
Mark.update
|
train
|
def update(self):
"""Update the position of the mark in the collection.
:return: this object, for chaining
:rtype: Mark
"""
rec = self._c.find_one({}, {self._fld: 1}, sort=[(self._fld, -1)], limit=1)
if rec is None:
self._pos = self._empty_pos()
elif not self._fld in rec:
_log.error("Tracking field not found. field={} collection={}"
.format(self._fld, self._c.name))
_log.warn("Continuing without tracking")
self._pos = self._empty_pos()
else:
self._pos = {self._fld: rec[self._fld]}
return self
|
python
|
{
"resource": ""
}
|
q14868
|
Mark.as_dict
|
train
|
def as_dict(self):
"""Representation as a dict for JSON serialization.
"""
return {self.FLD_OP: self._op.name,
self.FLD_MARK: self._pos,
self.FLD_FLD: self._fld}
|
python
|
{
"resource": ""
}
|
q14869
|
Mark.from_dict
|
train
|
def from_dict(cls, coll, d):
"""Construct from dict
:param coll: Collection for the mark
:param d: Input
:type d: dict
:return: new instance
:rtype: Mark
"""
return Mark(collection=coll, operation=Operation[d[cls.FLD_OP]],
pos=d[cls.FLD_MARK], field=d[cls.FLD_FLD])
|
python
|
{
"resource": ""
}
|
q14870
|
Mark.query
|
train
|
def query(self):
"""A mongdb query expression to find all records with higher values
for this mark's fields in the collection.
:rtype: dict
"""
q = {}
for field, value in self._pos.items():
if value is None:
q.update({field: {'$exists': True}})
else:
q.update({field: {'$gt': value}})
return q
|
python
|
{
"resource": ""
}
|
q14871
|
CollectionTracker.create
|
train
|
def create(self):
"""Create tracking collection.
Does nothing if tracking collection already exists.
"""
if self._track is None:
self._track = self.db[self.tracking_collection_name]
|
python
|
{
"resource": ""
}
|
q14872
|
CollectionTracker.save
|
train
|
def save(self, mark):
"""Save a position in this collection.
:param mark: The position to save
:type mark: Mark
:raises: DBError, NoTrackingCollection
"""
self._check_exists()
obj = mark.as_dict()
try:
# Make a 'filter' to find/update existing record, which uses
# the field name and operation (but not the position).
filt = {k: obj[k] for k in (mark.FLD_FLD, mark.FLD_OP)}
_log.debug("save: upsert-spec={} upsert-obj={}".format(filt, obj))
self._track.update(filt, obj, upsert=True)
except pymongo.errors.PyMongoError as err:
raise DBError("{}".format(err))
|
python
|
{
"resource": ""
}
|
q14873
|
CollectionTracker.retrieve
|
train
|
def retrieve(self, operation, field=None):
"""Retrieve a position in this collection.
:param operation: Name of an operation
:type operation: :class:`Operation`
:param field: Name of field for sort order
:type field: str
:return: The position for this operation
:rtype: Mark
:raises: NoTrackingCollection
"""
obj = self._get(operation, field)
if obj is None:
# empty Mark instance
return Mark(collection=self.collection, operation=operation, field=field)
return Mark.from_dict(self.collection, obj)
|
python
|
{
"resource": ""
}
|
q14874
|
CollectionTracker._get
|
train
|
def _get(self, operation, field):
"""Get tracked position for a given operation and field."""
self._check_exists()
query = {Mark.FLD_OP: operation.name,
Mark.FLD_MARK + "." + field: {"$exists": True}}
return self._track.find_one(query)
|
python
|
{
"resource": ""
}
|
q14875
|
QueryEngine.set_aliases_and_defaults
|
train
|
def set_aliases_and_defaults(self, aliases_config=None,
default_properties=None):
"""
Set the alias config and defaults to use. Typically used when
switching to a collection with a different schema.
Args:
aliases_config:
An alias dict to use. Defaults to None, which means the default
aliases defined in "aliases.json" is used. See constructor
for format.
default_properties:
List of property names (strings) to use by default, if no
properties are given to the 'properties' argument of
query().
"""
if aliases_config is None:
with open(os.path.join(os.path.dirname(__file__),
"aliases.json")) as f:
d = json.load(f)
self.aliases = d.get("aliases", {})
self.default_criteria = d.get("defaults", {})
else:
self.aliases = aliases_config.get("aliases", {})
self.default_criteria = aliases_config.get("defaults", {})
# set default properties
if default_properties is None:
self._default_props, self._default_prop_dict = None, None
else:
self._default_props, self._default_prop_dict = \
self._parse_properties(default_properties)
|
python
|
{
"resource": ""
}
|
q14876
|
QueryEngine.get_entries
|
train
|
def get_entries(self, criteria, inc_structure=False, optional_data=None):
"""
Get ComputedEntries satisfying a particular criteria.
.. note::
The get_entries_in_system and get_entries methods should be used
with care. In essence, all entries, GGA, GGA+U or otherwise,
are returned. The dataset is very heterogeneous and not
directly comparable. It is highly recommended that you perform
post-processing using pymatgen.entries.compatibility.
Args:
criteria:
Criteria obeying the same syntax as query.
inc_structure:
Optional parameter as to whether to include a structure with
the ComputedEntry. Defaults to False. Use with care - including
structures with a large number of entries can potentially slow
down your code to a crawl.
optional_data:
Optional data to include with the entry. This allows the data
to be access via entry.data[key].
Returns:
List of pymatgen.entries.ComputedEntries satisfying criteria.
"""
all_entries = list()
optional_data = [] if not optional_data else list(optional_data)
optional_data.append("oxide_type")
fields = [k for k in optional_data]
fields.extend(["task_id", "unit_cell_formula", "energy", "is_hubbard",
"hubbards", "pseudo_potential.labels",
"pseudo_potential.functional", "run_type",
"input.is_lasph", "input.xc_override",
"input.potcar_spec"])
if inc_structure:
fields.append("output.crystal")
for c in self.query(fields, criteria):
func = c["pseudo_potential.functional"]
labels = c["pseudo_potential.labels"]
symbols = ["{} {}".format(func, label) for label in labels]
parameters = {"run_type": c["run_type"],
"is_hubbard": c["is_hubbard"],
"hubbards": c["hubbards"],
"potcar_symbols": symbols,
"is_lasph": c.get("input.is_lasph") or False,
"potcar_spec": c.get("input.potcar_spec"),
"xc_override": c.get("input.xc_override")}
optional_data = {k: c[k] for k in optional_data}
if inc_structure:
struct = Structure.from_dict(c["output.crystal"])
entry = ComputedStructureEntry(struct, c["energy"],
0.0, parameters=parameters,
data=optional_data,
entry_id=c["task_id"])
else:
entry = ComputedEntry(Composition(c["unit_cell_formula"]),
c["energy"], 0.0, parameters=parameters,
data=optional_data,
entry_id=c["task_id"])
all_entries.append(entry)
return all_entries
|
python
|
{
"resource": ""
}
|
q14877
|
QueryEngine.ensure_index
|
train
|
def ensure_index(self, key, unique=False):
"""Wrapper for pymongo.Collection.ensure_index
"""
return self.collection.ensure_index(key, unique=unique)
|
python
|
{
"resource": ""
}
|
q14878
|
QueryEngine.query
|
train
|
def query(self, properties=None, criteria=None, distinct_key=None,
**kwargs):
"""
Convenience method for database access. All properties and criteria
can be specified using simplified names defined in Aliases. You can
use the supported_properties property to get the list of supported
properties.
Results are returned as an iterator of dicts to ensure memory and cpu
efficiency.
Note that the dict returned have keys also in the simplified names
form, not in the mongo format. For example, if you query for
"analysis.e_above_hull", the returned result must be accessed as
r['analysis.e_above_hull'] instead of mongo's
r['analysis']['e_above_hull']. This is a *feature* of the query engine
to allow simple access to deeply nested docs without having to resort
to some recursion to go deep into the result.
However, if you query for 'analysis', the entire 'analysis' key is
returned as r['analysis'] and then the subkeys can be accessed in the
usual form, i.e., r['analysis']['e_above_hull']
:param properties: Properties to query for. Defaults to None which means all supported properties.
:param criteria: Criteria to query for as a dict.
:param distinct_key: If not None, the key for which to get distinct results
:param \*\*kwargs: Other kwargs supported by pymongo.collection.find.
Useful examples are limit, skip, sort, etc.
:return: A QueryResults Iterable, which is somewhat like pymongo's
cursor except that it performs mapping. In general, the dev does
not need to concern himself with the form. It is sufficient to know
that the results are in the form of an iterable of dicts.
"""
if properties is not None:
props, prop_dict = self._parse_properties(properties)
else:
props, prop_dict = None, None
crit = self._parse_criteria(criteria)
if self.query_post:
for func in self.query_post:
func(crit, props)
cur = self.collection.find(filter=crit, projection=props, **kwargs)
if distinct_key is not None:
cur = cur.distinct(distinct_key)
return QueryListResults(prop_dict, cur, postprocess=self.result_post)
else:
return QueryResults(prop_dict, cur, postprocess=self.result_post)
|
python
|
{
"resource": ""
}
|
q14879
|
QueryEngine.get_structure_from_id
|
train
|
def get_structure_from_id(self, task_id, final_structure=True):
"""
Returns a structure from the database given the task id.
Args:
task_id:
The task_id to query for.
final_structure:
Whether to obtain the final or initial structure. Defaults to
True.
"""
args = {'task_id': task_id}
field = 'output.crystal' if final_structure else 'input.crystal'
results = tuple(self.query([field], args))
if len(results) > 1:
raise QueryError("More than one result found for task_id {}!".format(task_id))
elif len(results) == 0:
raise QueryError("No structure found for task_id {}!".format(task_id))
c = results[0]
return Structure.from_dict(c[field])
|
python
|
{
"resource": ""
}
|
q14880
|
QueryEngine.from_config
|
train
|
def from_config(config_file, use_admin=False):
"""
Initialize a QueryEngine from a JSON config file generated using mgdb
init.
Args:
config_file:
Filename of config file.
use_admin:
If True, the admin user and password in the config file is
used. Otherwise, the readonly_user and password is used.
Defaults to False.
Returns:
QueryEngine
"""
with open(config_file) as f:
d = json.load(f)
user = d["admin_user"] if use_admin else d["readonly_user"]
password = d["admin_password"] if use_admin \
else d["readonly_password"]
return QueryEngine(
host=d["host"], port=d["port"], database=d["database"],
user=user, password=password, collection=d["collection"],
aliases_config=d.get("aliases_config", None))
|
python
|
{
"resource": ""
}
|
q14881
|
QueryEngine.get_dos_from_id
|
train
|
def get_dos_from_id(self, task_id):
"""
Overrides the get_dos_from_id for the MIT gridfs format.
"""
args = {'task_id': task_id}
fields = ['calculations']
structure = self.get_structure_from_id(task_id)
dosid = None
for r in self.query(fields, args):
dosid = r['calculations'][-1]['dos_fs_id']
if dosid is not None:
self._fs = gridfs.GridFS(self.db, 'dos_fs')
with self._fs.get(dosid) as dosfile:
s = dosfile.read()
try:
d = json.loads(s)
except:
s = zlib.decompress(s)
d = json.loads(s.decode("utf-8"))
tdos = Dos.from_dict(d)
pdoss = {}
for i in range(len(d['pdos'])):
ados = d['pdos'][i]
all_ados = {}
for j in range(len(ados)):
orb = Orbital(j)
odos = ados[str(orb)]
all_ados[orb] = {Spin(int(k)): v
for k, v
in odos['densities'].items()}
pdoss[structure[i]] = all_ados
return CompleteDos(structure, tdos, pdoss)
return None
|
python
|
{
"resource": ""
}
|
q14882
|
add_schemas
|
train
|
def add_schemas(path, ext="json"):
"""Add schemas from files in 'path'.
:param path: Path with schema files. Schemas are named by their file,
with the extension stripped. e.g., if path is "/tmp/foo",
then the schema in "/tmp/foo/bar.json" will be named "bar".
:type path: str
:param ext: File extension that identifies schema files
:type ext: str
:return: None
:raise: SchemaPathError, if no such path. SchemaParseError, if a schema
is not valid JSON.
"""
if not os.path.exists(path):
raise SchemaPathError()
filepat = "*." + ext if ext else "*"
for f in glob.glob(os.path.join(path, filepat)):
with open(f, 'r') as fp:
try:
schema = json.load(fp)
except ValueError:
raise SchemaParseError("error parsing '{}'".format(f))
name = os.path.splitext(os.path.basename(f))[0]
schemata[name] = Schema(schema)
|
python
|
{
"resource": ""
}
|
q14883
|
load_schema
|
train
|
def load_schema(file_or_fp):
"""Load schema from file.
:param file_or_fp: File name or file object
:type file_or_fp: str, file
:raise: IOError if file cannot be opened or read, ValueError if
file is not valid JSON or JSON is not a valid schema.
"""
fp = open(file_or_fp, 'r') if isinstance(file_or_fp, str) else file_or_fp
obj = json.load(fp)
schema = Schema(obj)
return schema
|
python
|
{
"resource": ""
}
|
q14884
|
Schema.json_schema
|
train
|
def json_schema(self, **add_keys):
"""Convert our compact schema representation to the standard, but more verbose,
JSON Schema standard.
Example JSON schema: http://json-schema.org/examples.html
Core standard: http://json-schema.org/latest/json-schema-core.html
:param add_keys: Key, default value pairs to add in,
e.g. description=""
"""
self._json_schema_keys = add_keys
if self._json_schema is None:
self._json_schema = self._build_schema(self._schema)
return self._json_schema
|
python
|
{
"resource": ""
}
|
q14885
|
Schema._build_schema
|
train
|
def _build_schema(self, s):
"""Recursive schema builder, called by `json_schema`.
"""
w = self._whatis(s)
if w == self.IS_LIST:
w0 = self._whatis(s[0])
js = {"type": "array",
"items": {"type": self._jstype(w0, s[0])}}
elif w == self.IS_DICT:
js = {"type": "object",
"properties": {key: self._build_schema(val) for key, val in s.items()}}
req = [key for key, val in s.items() if not val.is_optional]
if req:
js["required"] = req
else:
js = {"type": self._jstype(w, s)}
for k, v in self._json_schema_keys.items():
if k not in js:
js[k] = v
return js
|
python
|
{
"resource": ""
}
|
q14886
|
Schema._jstype
|
train
|
def _jstype(self, stype, sval):
"""Get JavaScript name for given data type, called by `_build_schema`.
"""
if stype == self.IS_LIST:
return "array"
if stype == self.IS_DICT:
return "object"
if isinstance(sval, Scalar):
return sval.jstype
# it is a Schema, so return type of contents
v = sval._schema
return self._jstype(self._whatis(v), v)
|
python
|
{
"resource": ""
}
|
q14887
|
get_schema_dir
|
train
|
def get_schema_dir(db_version=1):
"""Get path to directory with schemata.
:param db_version: Version of the database
:type db_version: int
:return: Path
:rtype: str
"""
v = str(db_version)
return os.path.join(_top_dir, '..', 'schemata', 'versions', v)
|
python
|
{
"resource": ""
}
|
q14888
|
get_schema_file
|
train
|
def get_schema_file(db_version=1, db="mg_core", collection="materials"):
"""Get file with appropriate schema.
:param db_version: Version of the database
:type db_version: int
:param db: Name of database, e.g. 'mg_core'
:type db: str
:param collection: Name of collection, e.g. 'materials'
:type collection: str
:return: File with schema
:rtype: file
:raise: IOError, if file is not found or not accessible
"""
d = get_schema_dir(db_version=db_version)
schemafile = "{}.{}.json".format(db, collection)
f = open(os.path.join(d, schemafile), "r")
return f
|
python
|
{
"resource": ""
}
|
q14889
|
get_settings
|
train
|
def get_settings(infile):
"""Read settings from input file.
:param infile: Input file for JSON settings.
:type infile: file or str path
:return: Settings parsed from file
:rtype: dict
"""
settings = yaml.load(_as_file(infile))
if not hasattr(settings, 'keys'):
raise ValueError("Settings not found in {}".format(infile))
# Processing of namespaced parameters in .pmgrc.yaml.
processed_settings = {}
for k, v in settings.items():
if k.startswith("PMG_DB_"):
processed_settings[k[7:].lower()] = v
else:
processed_settings[k] = v
auth_aliases(processed_settings)
return processed_settings
|
python
|
{
"resource": ""
}
|
q14890
|
DiffFormatter.result_subsets
|
train
|
def result_subsets(self, rs):
"""Break a result set into subsets with the same keys.
:param rs: Result set, rows of a result as a list of dicts
:type rs: list of dict
:return: A set with distinct keys (tuples), and a dict, by these tuples, of max. widths for each column
"""
keyset, maxwid = set(), {}
for r in rs:
key = tuple(sorted(r.keys()))
keyset.add(key)
if key not in maxwid:
maxwid[key] = [len(k) for k in key]
for i, k in enumerate(key):
strlen = len("{}".format(r[k]))
maxwid[key][i] = max(maxwid[key][i], strlen)
return keyset, maxwid
|
python
|
{
"resource": ""
}
|
q14891
|
DiffFormatter.ordered_cols
|
train
|
def ordered_cols(self, columns, section):
"""Return ordered list of columns, from given columns and the name of the section
"""
columns = list(columns) # might be a tuple
fixed_cols = [self.key]
if section.lower() == "different":
fixed_cols.extend([Differ.CHANGED_MATCH_KEY, Differ.CHANGED_OLD, Differ.CHANGED_NEW])
map(columns.remove, fixed_cols)
columns.sort()
return fixed_cols + columns
|
python
|
{
"resource": ""
}
|
q14892
|
DiffFormatter.sort_rows
|
train
|
def sort_rows(self, rows, section):
"""Sort the rows, as appropriate for the section.
:param rows: List of tuples (all same length, same values in each position)
:param section: Name of section, should match const in Differ class
:return: None; rows are sorted in-place
"""
#print("@@ SORT ROWS:\n{}".format(rows))
# Section-specific determination of sort key
if section.lower() == Differ.CHANGED.lower():
sort_key = Differ.CHANGED_DELTA
else:
sort_key = None
if sort_key is not None:
rows.sort(key=itemgetter(sort_key))
|
python
|
{
"resource": ""
}
|
q14893
|
DiffJsonFormatter.document
|
train
|
def document(self, result):
"""Build dict for MongoDB, expanding result keys as we go.
"""
self._add_meta(result)
walker = JsonWalker(JsonWalker.value_json, JsonWalker.dict_expand)
r = walker.walk(result)
return r
|
python
|
{
"resource": ""
}
|
q14894
|
DiffTextFormatter.format
|
train
|
def format(self, result):
"""Generate plain text report.
:return: Report body
:rtype: str
"""
m = self.meta
lines = ['-' * len(self.TITLE),
self.TITLE,
'-' * len(self.TITLE),
"Compared: {db1} <-> {db2}".format(**m),
"Filter: {filter}".format(**m),
"Run time: {start_time} -- {end_time} ({elapsed:.1f} sec)".format(**m),
""]
for section in result.keys():
lines.append("* " + section.title())
indent = " " * 4
if len(result[section]) == 0:
lines.append("{}EMPTY".format(indent))
else:
keyset, maxwid = self.result_subsets(result[section])
for columns in keyset:
ocol = self.ordered_cols(columns, section)
mw = maxwid[columns]
mw_i = [columns.index(c) for c in ocol] # reorder indexes
fmt = ' '.join(["{{:{:d}s}}".format(mw[i]) for i in mw_i])
lines.append("")
lines.append(indent + fmt.format(*ocol))
lines.append(indent + '-_' * (sum(mw)/2 + len(columns)))
rows = result[section]
self.sort_rows(rows, section)
for r in rows:
key = tuple(sorted(r.keys()))
if key == columns:
values = [str(r[k]) for k in ocol]
lines.append(indent + fmt.format(*values))
return '\n'.join(lines)
|
python
|
{
"resource": ""
}
|
q14895
|
create_query_engine
|
train
|
def create_query_engine(config, clazz):
"""Create and return new query engine object from the
given `DBConfig` object.
:param config: Database configuration
:type config: dbconfig.DBConfig
:param clazz: Class to use for creating query engine. Should
act like query_engine.QueryEngine.
:type clazz: class
:return: New query engine
"""
try:
qe = clazz(**config.settings)
except Exception as err:
raise CreateQueryEngineError(clazz, config.settings, err)
return qe
|
python
|
{
"resource": ""
}
|
q14896
|
ConfigGroup.add
|
train
|
def add(self, name, cfg, expand=False):
"""Add a configuration object.
:param name: Name for later retrieval
:param cfg: Configuration object
:param expand: Flag for adding sub-configs for each sub-collection.
See discussion in method doc.
:return: self, for chaining
:raises: CreateQueryEngineError (only if expand=True)
"""
self._d[name] = cfg
if expand:
self.expand(name)
return self
|
python
|
{
"resource": ""
}
|
q14897
|
ConfigGroup._get_qe
|
train
|
def _get_qe(self, key, obj):
"""Instantiate a query engine, or retrieve a cached one.
"""
if key in self._cached:
return self._cached[key]
qe = create_query_engine(obj, self._class)
self._cached[key] = qe
return qe
|
python
|
{
"resource": ""
}
|
q14898
|
RegexDict.re_keys
|
train
|
def re_keys(self, pattern):
"""Find keys matching `pattern`.
:param pattern: Regular expression
:return: Matching keys or empty list
:rtype: list
"""
if not pattern.endswith("$"):
pattern += "$"
expr = re.compile(pattern)
return list(filter(expr.match, self.keys()))
|
python
|
{
"resource": ""
}
|
q14899
|
RegexDict.re_get
|
train
|
def re_get(self, pattern):
"""Return values whose key matches `pattern`
:param pattern: Regular expression
:return: Found values, as a dict.
"""
return {k: self[k] for k in self.re_keys(pattern)}
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.