code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
|---|---|---|---|---|---|
yield self
if self.has_arguments():
for arg in self.args_definition:
if not arg["name"] in self.arguments:
continue
value = self.arguments[arg["name"]]
if type(value) == list:
if self.__get_arg_type(arg["name"]) == ["testlist"]:
for t in value:
for node in t.walk():
yield node
if isinstance(value, Command):
for node in value.walk():
yield node
for ch in self.children:
for node in ch.walk():
yield node
|
def walk(self)
|
Walk through commands.
| 3.885441
| 3.710742
| 1.047079
|
if not self.accept_children:
return False
self.children += [child]
return True
|
def addchild(self, child)
|
Add a new child to the command
A child corresponds to a command located into a block (this
command's block). It can be either an action or a control.
:param child: the new child
:return: True on succes, False otherwise
| 6.934842
| 8.434278
| 0.822221
|
if self.variable_args_nb:
return False
if self.required_args == -1:
self.required_args = 0
for arg in self.args_definition:
if arg.get("required", False):
self.required_args += 1
return (
(not self.curarg or
"extra_arg" not in self.curarg or
("valid_for" in self.curarg["extra_arg"] and
atype and atype in self.curarg["extra_arg"]["type"] and
avalue not in self.curarg["extra_arg"]["valid_for"])) and
(self.rargs_cnt == self.required_args)
)
|
def iscomplete(self, atype=None, avalue=None)
|
Check if the command is complete
Check if all required arguments have been encountered. For
commands that allow an undefined number of arguments, this
method always returns False.
:return: True if command is complete, False otherwise
| 4.141622
| 4.045175
| 1.023843
|
if "values" not in arg and "extension_values" not in arg:
return True
if "values" in arg and value.lower() in arg["values"]:
return True
if "extension_values" in arg:
extension = arg["extension_values"].get(value.lower())
if extension:
condition = (
check_extension and
extension not in RequireCommand.loaded_extensions
)
if condition:
raise ExtensionNotLoaded(extension)
return True
return False
|
def __is_valid_value_for_arg(self, arg, value, check_extension=True)
|
Check if value is allowed for arg
Some commands only allow a limited set of values. The method
always returns True for methods that do not provide such a
set.
:param arg: the argument's name
:param value: the value to check
:param check_extension: check if value requires an extension
:return: True on succes, False otherwise
| 3.813631
| 4.026711
| 0.947083
|
typ_is_str = typ == "string"
str_list_in_typlist = "stringlist" in typlist
return typ in typlist or (typ_is_str and str_list_in_typlist)
|
def __is_valid_type(self, typ, typlist)
|
Check if type is valid based on input type list
"string" is special because it can be used for stringlist
:param typ: the type to check
:param typlist: the list of type to check
:return: True on success, False otherwise
| 4.852075
| 4.057314
| 1.195884
|
if not self.has_arguments():
return False
if self.iscomplete(atype, avalue):
return False
if self.curarg is not None and "extra_arg" in self.curarg:
condition = (
atype in self.curarg["extra_arg"]["type"] and
("values" not in self.curarg["extra_arg"] or
avalue in self.curarg["extra_arg"]["values"])
)
if condition:
if add:
self.extra_arguments[self.curarg["name"]] = avalue
self.curarg = None
return True
raise BadValue(self.curarg["name"], avalue)
failed = False
pos = self.nextargpos
while pos < len(self.args_definition):
curarg = self.args_definition[pos]
if curarg.get("required", False):
if curarg["type"] == ["testlist"]:
if atype != "test":
failed = True
elif add:
if not curarg["name"] in self.arguments:
self.arguments[curarg["name"]] = []
self.arguments[curarg["name"]] += [avalue]
elif not self.__is_valid_type(atype, curarg["type"]) or \
not self.__is_valid_value_for_arg(
curarg, avalue, check_extension):
failed = True
else:
self.curarg = curarg
self.rargs_cnt += 1
self.nextargpos = pos + 1
if add:
self.arguments[curarg["name"]] = avalue
break
condition = (
atype in curarg["type"] and
self.__is_valid_value_for_arg(curarg, avalue, check_extension)
)
if condition:
ext = curarg.get("extension")
condition = (
check_extension and ext and
ext not in RequireCommand.loaded_extensions)
if condition:
raise ExtensionNotLoaded(ext)
condition = (
"extra_arg" in curarg and
("valid_for" not in curarg["extra_arg"] or
avalue in curarg["extra_arg"]["valid_for"])
)
if condition:
self.curarg = curarg
if add:
self.arguments[curarg["name"]] = avalue
break
pos += 1
if failed:
raise BadArgument(self.name, avalue,
self.args_definition[pos]["type"])
return True
|
def check_next_arg(self, atype, avalue, add=True, check_extension=True)
|
Argument validity checking
This method is usually used by the parser to check if detected
argument is allowed for this command.
We make a distinction between required and optional
arguments. Optional (or tagged) arguments can be provided
unordered but not the required ones.
A special handling is also done for arguments that require an
argument (example: the :comparator argument expects a string
argument).
The "testlist" type is checked separately as we can't know in
advance how many arguments will be provided.
If the argument is incorrect, the method raises the
appropriate exception, or return False to let the parser
handle the exception.
:param atype: the argument's type
:param avalue: the argument's value
:param add: indicates if this argument should be recorded on success
:param check_extension: raise ExtensionNotLoaded if extension not
loaded
:return: True on success, False otherwise
| 2.73167
| 2.599664
| 1.050778
|
value = self.arguments["header-names"]
if isinstance(value, list):
value = "[{}]".format(
",".join('"{}"'.format(item) for item in value))
if not value.startswith("["):
return ('exists', value.strip('"'))
return ("exists", ) + tuple(tools.to_list(value))
|
def args_as_tuple(self)
|
FIXME: en fonction de la manière dont la commande a été générée
(factory ou parser), le type des arguments est différent :
string quand ça vient de la factory ou type normal depuis le
parser. Il faut uniformiser tout ça !!
| 7.323376
| 7.060195
| 1.037277
|
if "," in self.arguments["header-names"]:
result = tuple(tools.to_list(self.arguments["header-names"]))
else:
result = (self.arguments["header-names"].strip('"'),)
result = result + (self.arguments["match-type"],)
if "," in self.arguments["key-list"]:
result = result + tuple(
tools.to_list(self.arguments["key-list"], unquote=False))
else:
result = result + (self.arguments["key-list"].strip('"'),)
return result
|
def args_as_tuple(self)
|
Return arguments as a list.
| 3.249405
| 3.172936
| 1.0241
|
result = ("body", )
result = result + (
self.arguments["body-transform"], self.arguments["match-type"])
if self.arguments["key-list"].startswith("["):
result = result + tuple(
tools.to_list(self.arguments["key-list"]))
else:
result = result + (self.arguments["key-list"].strip('"'),)
return result
|
def args_as_tuple(self)
|
Return arguments as a list.
| 6.461025
| 6.074958
| 1.063551
|
condition = (
"variable-list" in self.arguments and
"list-of-flags" not in self.arguments
)
if condition:
self.arguments["list-of-flags"] = (
self.arguments.pop("variable-list"))
self.rargs_cnt = 1
|
def reassign_arguments(self)
|
Deal with optional stringlist before a required one.
| 6.083042
| 5.380401
| 1.130593
|
result = ("currentdate", )
result += (
":zone",
self.extra_arguments["zone"].strip('"'),
self.arguments["match-type"],
)
if self.arguments["match-type"] in [":count", ":value"]:
result += (self.extra_arguments["match-type"].strip('"'), )
result += (self.arguments["date-part"].strip('"'), )
value = self.arguments["key-list"]
if isinstance(value, list):
# FIXME
value = "[{}]".format(
",".join('"{}"'.format(item) for item in value))
if value.startswith("["):
result = result + tuple(tools.to_list(value))
else:
result = result + (value.strip('"'),)
return result
|
def args_as_tuple(self)
|
Return arguments as a list.
| 5.556232
| 5.32367
| 1.043684
|
if isinstance(s, six.binary_type):
return s
if six.PY3:
return bytes(s, encoding)
return s.encode(encoding)
|
def to_bytes(s, encoding="utf-8")
|
Convert a string to bytes.
| 2.533985
| 2.135871
| 1.186394
|
stringlist = stringlist[1:-1]
return [
string.strip('"') if unquote else string
for string in stringlist.split(",")
]
|
def to_list(stringlist, unquote=True)
|
Convert a string representing a list to real list.
| 3.344395
| 3.055671
| 1.094488
|
import jinja2.ext
from puente.ext import PuenteI18nExtension
jinja2.ext.InternationalizationExtension = PuenteI18nExtension
jinja2.ext.i18n = PuenteI18nExtension
|
def monkeypatch_i18n()
|
Alleviates problems with extraction for trans blocks
Jinja2 has a ``babel_extract`` function which sets up a Jinja2
environment to parse Jinja2 templates to extract strings for
translation. That's awesome! Yay! However, when it goes to
set up the environment, it checks to see if the environment
has InternationalizationExtension in it and if not, adds it.
https://github.com/mitsuhiko/jinja2/blob/2.8/jinja2/ext.py#L587
That stomps on our PuenteI18nExtension so trans blocks don't get
whitespace collapsed and we end up with msgids that are different
between extraction and rendering. Argh!
Two possible ways to deal with this:
1. Rename our block from "trans" to something else like
"blocktrans" or "transam".
This means everyone has to make sweeping changes to their
templates plus we adjust gettext, too, so now we're talking
about two different extensions.
2. Have people include both InternationalizationExtension
before PuenteI18nExtension even though it gets stomped on.
This will look wrong in settings and someone will want to
"fix" it thus breaking extractino subtly, so I'm loathe to
force everyone to do this.
3. Stomp on the InternationalizationExtension variable in
``jinja2.ext`` just before message extraction.
This is easy and hopefully the underlying issue will go away
soon.
For now, we're going to do number 3. Why? Because I'm hoping
Jinja2 will fix the trans tag so it collapses whitespace if
you tell it to. Then we don't have to do what we're doing and
all these problems go away.
We can remove this monkeypatch when one of the following is true:
1. we remove our whitespace collapsing code because Jinja2 trans
tag supports whitespace collapsing
2. Jinja2's ``babel_extract`` stops adding
InternationalizationExtension to the environment if it's
not there
| 3.449758
| 2.623266
| 1.315062
|
# Shallow copy
keywords = dict(BABEL_KEYWORDS)
keywords.update({
'_lazy': None,
'gettext_lazy': None,
'ugettext_lazy': None,
'gettext_noop': None,
'ugettext_noop': None,
'ngettext_lazy': (1, 2),
'ungettext_lazy': (1, 2),
'npgettext': ((1, 'c'), 2, 3),
'pgettext_lazy': ((1, 'c'), 2),
'npgettext_lazy': ((1, 'c'), 2, 3),
})
# Add specified keywords
if additional_keywords:
for key, val in additional_keywords.items():
keywords[key] = val
return keywords
|
def generate_keywords(additional_keywords=None)
|
Generates gettext keywords list
:arg additional_keywords: dict of keyword -> value
:returns: dict of keyword -> values for Babel extraction
Here's what Babel has for DEFAULT_KEYWORDS::
DEFAULT_KEYWORDS = {
'_': None,
'gettext': None,
'ngettext': (1, 2),
'ugettext': None,
'ungettext': (1, 2),
'dgettext': (2,),
'dngettext': (2, 3),
'N_': None,
'pgettext': ((1, 'c'), 2)
}
If you wanted to add a new one ``_frank`` that was like
gettext, then you'd do this::
generate_keywords({'_frank': None})
If you wanted to add a new one ``upgettext`` that was like
gettext, then you'd do this::
generate_keywords({'upgettext': ((1, 'c'), 2)})
| 3.002708
| 2.365307
| 1.269479
|
return u' '.join(map(lambda s: s.strip(),
filter(None, message.strip().splitlines())))
|
def collapse_whitespace(message)
|
Collapses consecutive whitespace into a single space
| 5.215734
| 5.470129
| 0.953494
|
try:
return settings.PUENTE['JINJA2_CONFIG']
except KeyError:
pass
# If using Django 1.8+, we can skim the TEMPLATES for a backend that we
# know about and extract the settings from that.
for tmpl_config in getattr(settings, 'TEMPLATES', []):
try:
backend = tmpl_config['BACKEND']
except KeyError:
continue
if backend == 'django_jinja.backend.Jinja2':
extensions = tmpl_config.get('OPTIONS', {}).get('extensions', [])
return {
'**.*': {
'extensions': ','.join(extensions),
'silent': 'False',
}
}
# If this is Django 1.7 and Jingo, try to grab extensions from
# JINJA_CONFIG.
if getattr(settings, 'JINJA_CONFIG'):
jinja_config = settings.JINJA_CONFIG
if callable(jinja_config):
jinja_config = jinja_config()
return {
'**.*': {
'extensions': ','.join(jinja_config['extensions']),
'silent': 'False',
}
}
raise CommandError(
'No valid jinja2 config found in settings. See configuration '
'documentation.'
)
|
def generate_options_map()
|
Generate an ``options_map` to pass to ``extract_from_dir``
This is the options_map that's used to generate a Jinja2 environment. We
want to generate and environment for extraction that's the same as the
environment we use for rendering.
This allows developers to explicitly set a ``JINJA2_CONFIG`` in settings.
If that's not there, then this will pull the relevant bits from the first
Jinja2 backend listed in ``TEMPLATES``.
| 4.100248
| 3.578027
| 1.145952
|
# Must monkeypatch first to fix i18n extensions stomping issues!
monkeypatch_i18n()
# Create the outputdir if it doesn't exist
outputdir = os.path.abspath(outputdir)
if not os.path.isdir(outputdir):
print('Creating output dir %s ...' % outputdir)
os.makedirs(outputdir)
domains = domain_methods.keys()
def callback(filename, method, options):
if method != 'ignore':
print(' %s' % filename)
# Extract string for each domain
for domain in domains:
print('Extracting all strings in domain %s...' % domain)
methods = domain_methods[domain]
catalog = Catalog(
header_comment='',
project=project,
version=version,
msgid_bugs_address=msgid_bugs_address,
charset='utf-8',
)
extracted = extract_from_dir(
base_dir,
method_map=methods,
options_map=generate_options_map(),
keywords=keywords,
comment_tags=comment_tags,
callback=callback,
)
for filename, lineno, msg, cmts, ctxt in extracted:
catalog.add(msg, None, [(filename, lineno)], auto_comments=cmts,
context=ctxt)
with open(os.path.join(outputdir, '%s.pot' % domain), 'wb') as fp:
write_po(fp, catalog, width=80)
print('Done')
|
def extract_command(outputdir, domain_methods, text_domain, keywords,
comment_tags, base_dir, project, version,
msgid_bugs_address)
|
Extracts strings into .pot files
:arg domain: domains to generate strings for or 'all' for all domains
:arg outputdir: output dir for .pot files; usually
locale/templates/LC_MESSAGES/
:arg domain_methods: DOMAIN_METHODS setting
:arg text_domain: TEXT_DOMAIN settings
:arg keywords: KEYWORDS setting
:arg comment_tags: COMMENT_TAGS setting
:arg base_dir: BASE_DIR setting
:arg project: PROJECT setting
:arg version: VERSION setting
:arg msgid_bugs_address: MSGID_BUGS_ADDRESS setting
| 3.718605
| 3.638354
| 1.022057
|
locale_dir = os.path.join(base_dir, 'locale')
# Verify existence of msginit and msgmerge
if not call(['which', 'msginit'], stdout=PIPE) == 0:
raise CommandError('You do not have gettext installed.')
if not call(['which', 'msgmerge'], stdout=PIPE) == 0:
raise CommandError('You do not have gettext installed.')
if languages and isinstance(languages[0], (tuple, list)):
# Django's LANGUAGES setting takes a value like:
#
# LANGUAGES = (
# ('de', _('German')),
# ('en', _('English')),
# )
#
# but we only want the language codes, so we pull the first
# part from all the tuples.
languages = [lang[0] for lang in languages]
if create:
for lang in languages:
d = os.path.join(locale_dir, lang.replace('-', '_'),
'LC_MESSAGES')
if not os.path.exists(d):
os.makedirs(d)
domains = domain_methods.keys()
for domain in domains:
print('Merging %s strings to each locale...' % domain)
domain_pot = os.path.join(locale_dir, 'templates', 'LC_MESSAGES',
'%s.pot' % domain)
if not os.path.isfile(domain_pot):
raise CommandError('Can not find %s.pot' % domain)
for locale in os.listdir(locale_dir):
if ((not os.path.isdir(os.path.join(locale_dir, locale)) or
locale.startswith('.') or
locale == 'templates')):
continue
domain_po = os.path.join(locale_dir, locale, 'LC_MESSAGES',
'%s.po' % domain)
if not os.path.isfile(domain_po):
print(' Can not find (%s). Creating...' % domain_po)
p1 = Popen([
'msginit',
'--no-translator',
'--locale=%s' % locale,
'--input=%s' % domain_pot,
'--output-file=%s' % domain_po,
'--width=200'
])
p1.communicate()
print('Merging %s.po for %s' % (domain, locale))
with open(domain_pot) as domain_pot_file:
if locale == 'en_US':
# Create an English translation catalog, then merge
with TemporaryFile('w+t') as enmerged:
p2 = Popen(['msgen', '-'], stdin=domain_pot_file,
stdout=enmerged)
p2.communicate()
_msgmerge(domain_po, enmerged, backup)
else:
_msgmerge(domain_po, domain_pot_file, backup)
print('Domain %s finished' % domain)
print('All finished')
|
def merge_command(create, backup, base_dir, domain_methods, languages)
|
:arg create: whether or not to create directories if they don't
exist
:arg backup: whether or not to create backup .po files
:arg base_dir: BASE_DIR setting
:arg domain_methods: DOMAIN_METHODS setting
:arg languages: LANGUAGES setting
| 2.879156
| 2.882777
| 0.998744
|
pot_file.seek(0)
command = [
'msgmerge',
'--update',
'--width=200',
'--backup=%s' % ('simple' if backup else 'off'),
po_path,
'-'
]
p3 = Popen(command, stdin=pot_file)
p3.communicate()
|
def _msgmerge(po_path, pot_file, backup)
|
Merge an existing .po file with new translations.
:arg po_path: path to the .po file
:arg pot_file: a file-like object for the related templates
:arg backup: whether or not to create backup .po files
| 4.041432
| 4.36112
| 0.926696
|
self.cycle_()
self.handleSignal_()
self.postRun_()
|
def run(self): # No "_" in the name, but nevertheless, running in the backed
self.preRun_()
self.running=True
while(self.running)
|
After the fork. Now the process starts running
| 16.867935
| 13.033646
| 1.294184
|
self.report("preRun_")
super().preRun_()
self.client = ShmemRGBClient(
name=self.shmem_name,
n_ringbuffer=self.n_buffer, # size of ring buffer
width=self.image_dimensions[0],
height=self.image_dimensions[1],
# client timeouts if nothing has been received in 1000 milliseconds
mstimeout=1000,
verbose=False
)
|
def preRun_(self)
|
Create the shared memory client immediately after fork
| 7.265279
| 6.483433
| 1.120591
|
if (self.active): # activated: shared mem client has been reserved
self.cycle_()
else:
if (self.verbose): print(self.pre, "sleep")
time.sleep(0.2)
self.handleSignal_()
self.postRun_()
|
def run(self): # No "_" in the name, but nevertheless, running in the backed
self.preRun_()
self.running=True
while(self.running)
|
After the fork. Now the process starts running
| 14.509193
| 13.592285
| 1.067458
|
self.active = True
self.image_dimensions = image_dimensions
self.client = ShmemRGBClient(
name =shmem_name,
n_ringbuffer =n_buffer, # size of ring buffer
width =image_dimensions[0],
height =image_dimensions[1],
# client timeouts if nothing has been received in 1000 milliseconds
mstimeout =1000,
verbose =False
)
self.postActivate_()
|
def activate_(self, n_buffer, image_dimensions, shmem_name)
|
Shared mem info is given. Now we can create the shmem client
| 5.618418
| 5.409785
| 1.038566
|
self.preDeactivate_()
self.active = False
self.image_dimensions = None
self.client = None
|
def deactivate_(self)
|
Init shmem variables to None
| 12.51295
| 11.372619
| 1.10027
|
import subprocess
import fcntl
width = str(self.image_dimensions[0])
height = str(self.image_dimensions[1])
comlist = self.executable.split() + [width, height, self.tmpfile] # e.g. "python3", "example_process1.py", etc.
try:
self.p = subprocess.Popen(comlist, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
except Exception as e:
print(self.pre, "Could not open external process. Failed with '"+str(e)+"'")
return
self.reset()
|
def init(self)
|
Start the process
| 5.582175
| 5.406366
| 1.032519
|
self.report("sending reset")
try:
self.p.stdin.write(bytes("T\n","utf-8"))
self.p.stdin.flush()
except IOError:
self.report("could not send reset command")
|
def reset(self)
|
Tell the external analyzer to reset itself
| 4.697653
| 4.119764
| 1.140272
|
try:
self.p.stdin.write(bytes("X\n","utf-8"))
self.p.stdin.flush()
except IOError:
self.report("could not send exit command")
self.p.wait() # wait until the process is closed
try:
os.remove(self.tmpfile) # clean up the temporary file
except FileNotFoundError:
pass
|
def close(self)
|
Tell the process to exit
| 3.798833
| 3.469614
| 1.094886
|
btt = bytes()
while True:
bt = self.p.stdout.read(1)
if bt:
btt += bt
else:
# print("!")
break
return btt[0:-1].decode("utf-8")
|
def readStdout(self)
|
Not used
| 5.160334
| 4.816752
| 1.071331
|
self.tmpfile = os.path.join(constant.tmpdir,"valkka-"+str(os.getpid())) # e.g. "/tmp/valkka-10968"
self.analyzer = ExternalDetector(
executable = self.executable,
image_dimensions = self.image_dimensions,
tmpfile = self.tmpfile
)
|
def postActivate_(self)
|
Create temporary file for image dumps and the analyzer itself
| 7.928311
| 6.105315
| 1.298592
|
from veripress import app
from veripress.model import storage
from veripress.model.parsers import get_standard_format_name
from veripress.helpers import traverse_directory
deploy_dir = get_deploy_dir()
def copy_file(src, dst):
makedirs(os.path.dirname(dst), mode=0o755, exist_ok=True)
shutil.copyfile(src, dst)
with app.app_context(), app.test_client() as client:
root_path = os.path.join(app.instance_path, 'pages')
for path in traverse_directory(root_path):
# e.g. 'a/b/c/index.md'
rel_path = os.path.relpath(path, root_path)
# e.g. ('a/b/c/index', '.md')
filename, ext = os.path.splitext(rel_path)
if get_standard_format_name(ext[1:]) is not None:
# is source of custom page
rel_url = filename.replace(os.path.sep, '/') + '.html'
page = storage.get_page(rel_url, include_draft=False)
if page is not None:
# it's not a draft, so generate the html page
makedirs(os.path.join(deploy_dir,
os.path.dirname(rel_path)),
mode=0o755, exist_ok=True)
with open(os.path.join(deploy_dir, filename + '.html'),
'wb') as f:
f.write(client.get('/' + rel_url).data)
if app.config['PAGE_SOURCE_ACCESSIBLE']:
copy_file(path, os.path.join(deploy_dir, rel_path))
else:
# is other direct files
copy_file(path, os.path.join(deploy_dir, rel_path))
|
def generate_pages_by_file()
|
Generates custom pages of 'file' storage type.
| 2.808297
| 2.742465
| 1.024005
|
self.camera_collection.clear()
self.camera_collection.save()
cc = nstart
for i in range(1, min((n + 1, constant.max_devices + 1))):
print(i)
self.camera_collection.new(
self.RTSPCameraRow,
{
"slot": i,
"address": base_address+"."+str(cc),
"username": username,
"password": password,
"port": port,
"tail": tail,
"subaddress_main" : "",
"live_main" : True,
"rec_main" : False,
"subaddress_sub" : "",
"live_sub" : False,
"rec_sub" : False
})
cc +=1
print("Camera addesses now:")
for c, device in enumerate(self.camera_collection.get()):
print(c+1, self.RTSPCameraRow.getMainAddressFromDict(device))
for i in range(n+1, constant.max_devices + 1):
self.camera_collection.new(self.EmptyRow, {"slot": i})
self.camera_collection.save()
print("Camera collection now:")
for c, device in enumerate(self.camera_collection.get()):
print(c+1, device)
|
def autoGenerateCameraCollection(self, base_address, nstart, n, port, tail, username, password)
|
:param: base_address str, e.g. "192.168.1"
:param: nstart int, e.g. 24
:param: n int, how many ips generated
| 3.742169
| 3.828423
| 0.97747
|
self.collections = []
self.camera_collection = \
SimpleCollection(filename=os.path.join(self.directory, "devices.dat"),
row_classes=[
DataModel.EmptyRow,
DataModel.RTSPCameraRow,
DataModel.USBCameraRow
]
)
self.collections.append(self.camera_collection)
self.config_collection = \
SimpleCollection(filename=os.path.join(self.directory, "config.dat"),
row_classes=[ # we could dump here all kinds of info related to different kind of configuration forms
DataModel.MemoryConfigRow
]
)
self.collections.append(self.config_collection)
|
def define(self)
|
Define column patterns and collections
| 5.831081
| 5.641775
| 1.033554
|
rows = self.camera_collection.get()
devices_by_id = {}
for row in rows:
classname=row.pop("classname")
if (classname == "RTSPCameraRow"):
device = DataModel.RTSPCameraDevice(**row)
elif (classname == "USBCameraRow"):
device = DataModel.USBCameraDevice(**row)
else:
device = None
if (device):
devices_by_id[device._id] = device
return devices_by_id
|
def getDevicesById(self): # , query)
|
rows = self.camera_collection.get(query)
devices_by_id = {}
for row in rows:
row.pop("classname")
device = DataModel.RTSPCameraDevice(**row)
devices_by_id[device._id] = device
return devices_by_id
| 3.975412
| 2.155477
| 1.844331
|
# some modules might need to be imported "on the other side of the fork"
# .. but the, when importing this module, the import is not tested
#
#
# from openalpr import Alpr
from valkka.mvision.alpr.openalpr_fix import Alpr
self.movement = MovementDetector()
self.alpr = Alpr(self.country, self.conf_file, self.runtime_data)
if not self.alpr.is_loaded():
self.alpr = None
return
self.alpr.set_top_n(self.top_n)
self.reset()
|
def init(self)
|
Init alpr
The LicensePlateDetector object gets instantiated in the multiprocess, so the library is imported in the multiprocess (i.e. "other side of the fork") as well
| 10.415555
| 8.197488
| 1.270579
|
self.analyzer = LicensePlateDetector(**self.analyzer_pars) # this is called after the fork (i.e. after the multiprocess has been spawned)
self.report("analyzer object=", self.analyzer)
|
def postActivate_(self)
|
Whatever you need to do after creating the shmem client
| 25.008776
| 24.115158
| 1.037056
|
if self.is_injected():
return False
with open(self, mode="a+") as fp:
fp.seek(0)
fp.write("\n".join([
"",
"try:",
" import pout",
"except ImportError:",
" pass",
"else:",
" pout.inject()",
"",
]))
return True
|
def inject(self)
|
inject code into sitecustomize.py that will inject pout into the builtins
so it will be available globally
| 4.281891
| 3.32376
| 1.288267
|
if publish_type == 'post':
return self.fix_post_relative_url(rel_url), False
elif publish_type == 'page':
return self.fix_page_relative_url(rel_url)
else:
raise ValueError(
'Publish type "{}" is not supported'.format(publish_type))
|
def fix_relative_url(self, publish_type, rel_url)
|
Fix post or page relative url to a standard, uniform format.
:param publish_type: publish type ('post' or 'page')
:param rel_url: relative url to fix
:return: tuple(fixed relative url or file path if exists else None,
file exists or not)
:raise ValueError: unknown publish type
| 2.39288
| 2.203641
| 1.085876
|
m = re.match(
r'^(?P<year>\d{4})/(?P<month>\d{1,2})/(?P<day>\d{1,2})/'
r'(?P<post_name>[^/]+?)'
r'(?:(?:\.html)|(?:/(?P<index>index(?:\.html?)?)?))?$',
rel_url
)
if not m:
return None
year, month, day, post_name = m.groups()[:4]
try:
d = date(year=int(year), month=int(month), day=int(day))
return '/'.join((d.strftime('%Y/%m/%d'), post_name,
'index.html' if m.group('index') else ''))
except (TypeError, ValueError):
# the date is invalid
return None
|
def fix_post_relative_url(rel_url)
|
Fix post relative url to a standard, uniform format.
Possible input:
- 2016/7/8/my-post
- 2016/07/08/my-post.html
- 2016/8/09/my-post/
- 2016/8/09/my-post/index
- 2016/8/09/my-post/index.htm
- 2016/8/09/my-post/index.html
:param rel_url: relative url to fix
:return: fixed relative url, or None if cannot recognize
| 2.455581
| 2.220161
| 1.106037
|
if filter_functions is not None:
for filter_func in filter_functions:
result = filter(filter_func, result)
return result
|
def _filter_result(result, filter_functions=None)
|
Filter result with given filter functions.
:param result: an iterable object
:param filter_functions: some filter functions
:return: a filter object (filtered result)
| 2.599206
| 3.232043
| 0.804199
|
filter_funcs = []
for attr in ('title', 'layout', 'author',
'email', 'tags', 'categories'):
if limits.get(attr):
filter_set = set(to_list(limits.get(attr)))
def get_filter_func(filter_set_, attr_):
return lambda p: filter_set_.intersection(
to_list(getattr(p, attr_)))
filter_funcs.append(get_filter_func(filter_set, attr))
for attr in ('created', 'updated'):
interval = limits.get(attr)
if isinstance(interval, (list, tuple)) and len(interval) == 2 \
and isinstance(interval[0], date) and isinstance(
interval[1], date):
# [start date(time), end date(time)]
start, end = interval
start = to_datetime(start)
if not isinstance(end, datetime):
# 'end' is a date,
# we should convert it to 00:00:00 of the next day,
# so that posts of that day will be included
end = datetime.strptime(
'%04d-%02d-%02d' % (end.year, end.month, end.day),
'%Y-%m-%d')
end += timedelta(days=1)
def get_filter_func(attr_, start_dt, end_dt):
return lambda p: start_dt <= getattr(p, attr_) < end_dt
filter_funcs.append(get_filter_func(attr, start, end))
return self.get_posts(include_draft=include_draft,
filter_functions=filter_funcs)
|
def get_posts_with_limits(self, include_draft=False, **limits)
|
Get all posts and filter them as needed.
:param include_draft: return draft posts or not
:param limits: other limits to the attrs of the result,
should be a dict with string or list values
:return: an iterable of Post objects
| 2.720347
| 2.72487
| 0.99834
|
query = query.lower()
if not query:
return []
def contains_query_keyword(post_or_page):
contains = query in post_or_page.title.lower() \
or query in Markup(
get_parser(post_or_page.format).parse_whole(
post_or_page.raw_content)
).striptags().lower()
return contains
return filter(contains_query_keyword,
chain(self.get_posts(include_draft=include_draft),
self.get_pages(include_draft=include_draft)
if current_app.config[
'ALLOW_SEARCH_PAGES'] else []))
|
def search_for(self, query, include_draft=False)
|
Search for a query text.
:param query: keyword to query
:param include_draft: return draft posts/pages or not
:return: an iterable object of posts and pages (if allowed).
| 3.829016
| 3.713185
| 1.031195
|
rel_url = rel_url.lstrip('/') # trim all heading '/'
endswith_slash = rel_url.endswith('/')
rel_url = rel_url.rstrip('/') + (
'/' if endswith_slash else '') # preserve only one trailing '/'
if not rel_url or rel_url == '/':
return None, False
file_path = os.path.join(current_app.instance_path, 'pages',
rel_url.replace('/', os.path.sep))
if rel_url.endswith('/'):
index_html_file_path = os.path.join(file_path, 'index.html')
if os.path.isfile(index_html_file_path):
# index.html exists
return index_html_file_path, True
return rel_url, False
elif os.path.isfile(file_path):
ext = os.path.splitext(file_path)[1][1:]
if get_standard_format_name(ext) is not None:
# is source of custom page
if current_app.config['PAGE_SOURCE_ACCESSIBLE']:
return file_path, True
else:
# is other direct files
return file_path, True
elif os.path.isdir(file_path):
return rel_url + '/', False
sp = rel_url.rsplit('/', 1)
m = re.match(r'(.+)\.html?', sp[-1])
if m:
sp[-1] = m.group(1) + '.html'
else:
sp[-1] += '.html'
return '/'.join(sp), False
|
def fix_page_relative_url(rel_url)
|
Fix page relative url to a standard, uniform format.
Possible input:
- my-page
- my-page/
- my-page/index
- my-page/index.htm
- my-page/index.html
- my-page/specific.file
:param rel_url: relative url to fix
:return: tuple(fixed relative url or FILE PATH if exists else None,
file exists or not)
| 2.946685
| 2.884215
| 1.021659
|
if instance_relative_root:
search_root = os.path.join(current_app.instance_path, search_root)
file_path = None
file_ext = None
for file in os.listdir(search_root):
filename, ext = os.path.splitext(file)
if filename == search_filename and ext and ext != '.':
file_path = os.path.join(search_root, filename + ext)
file_ext = ext[1:] # remove heading '.' (dot)
break
return file_path, file_ext
|
def search_file(search_root, search_filename,
instance_relative_root=False)
|
Search for a filename in a specific search root dir.
:param search_root: root dir to search
:param search_filename: filename to search (no extension)
:param instance_relative_root: search root is relative to instance path
:return: tuple(full_file_path, extension without heading dot)
| 2.26538
| 2.117103
| 1.070038
|
with open(file_path, 'r', encoding='utf-8') as f:
whole = f.read().strip()
if whole.startswith('---'):
# may has yaml meta info, so we try to split it out
sp = re.split(r'-{3,}', whole.lstrip('-'), maxsplit=1)
if len(sp) == 2:
# do have yaml meta info, so we read it
return yaml.load(sp[0]), sp[1].lstrip()
return {}, whole
|
def read_file(file_path)
|
Read yaml head and raw body content from a file.
:param file_path: file path
:return: tuple(meta, raw_content)
| 4.507467
| 4.186708
| 1.076614
|
def posts_generator(path):
if os.path.isdir(path):
for file in os.listdir(path):
filename, ext = os.path.splitext(file)
format_name = get_standard_format_name(ext[1:])
if format_name is not None and re.match(
r'\d{4}-\d{2}-\d{2}-.+', filename):
# the format is supported and the filename is valid,
# so load this post
post = Post()
post.format = format_name
post.meta, post.raw_content = FileStorage.read_file(
os.path.join(path, file))
post.rel_url = filename.replace('-', '/', 3) + '/'
post.unique_key = '/post/' + post.rel_url
yield post
posts_path = os.path.join(current_app.instance_path, 'posts')
result = filter(lambda p: include_draft or not p.is_draft,
posts_generator(posts_path))
result = self._filter_result(result, filter_functions)
return sorted(result, key=lambda p: p.created, reverse=True)
|
def get_posts(self, include_draft=False, filter_functions=None)
|
Get all posts from filesystem.
:param include_draft: return draft posts or not
:param filter_functions: filter to apply BEFORE result being sorted
:return: an iterable of Post objects (the first is the latest post)
| 3.403641
| 3.238667
| 1.050939
|
raw_rel_url = str(rel_url)
if rel_url.endswith('/index.html'):
rel_url = rel_url.rsplit('/', 1)[
0] + '/' # remove the trailing 'index.html'
post_filename = rel_url[:-1].replace('/', '-')
post_file_path, post_file_ext = FileStorage.search_instance_file(
'posts', post_filename)
if post_file_path is None or post_file_ext is None or \
get_standard_format_name(post_file_ext) is None:
# no such post
return None
# construct the post object
post = Post()
post.rel_url = raw_rel_url
# 'rel_url' contains no trailing 'index.html'
post.unique_key = '/post/' + rel_url
post.format = get_standard_format_name(post_file_ext)
post.meta, post.raw_content = FileStorage.read_file(post_file_path)
return post if include_draft or not post.is_draft else None
|
def get_post(self, rel_url, include_draft=False)
|
Get post for given relative url from filesystem.
Possible input:
- 2017/01/01/my-post/
- 2017/01/01/my-post/index.html
:param rel_url: relative url
:param include_draft: return draft post or not
:return: a Post object
| 3.916612
| 3.900178
| 1.004214
|
posts = self.get_posts(include_draft=True)
result = {}
for post in posts:
for tag_name in set(post.tags):
result[tag_name] = result.setdefault(
tag_name, Pair(0, 0)) + Pair(1, 0 if post.is_draft else 1)
return list(result.items())
|
def get_tags(self)
|
Get all tags and post count of each tag.
:return: dict_item(tag_name, Pair(count_all, count_published))
| 3.654232
| 2.75845
| 1.324741
|
posts = self.get_posts(include_draft=True)
result = {}
for post in posts:
for category_name in set(post.categories):
result[category_name] = result.setdefault(
category_name,
Pair(0, 0)) + Pair(1, 0 if post.is_draft else 1)
return list(result.items())
|
def get_categories(self)
|
Get all categories and post count of each category.
:return dict_item(category_name, Pair(count_all, count_published))
| 3.78506
| 2.83635
| 1.334482
|
def pages_generator(pages_root_path):
for file_path in traverse_directory(pages_root_path,
yield_dir=False):
rel_path = os.path.relpath(file_path, pages_root_path)
rel_path, ext = os.path.splitext(rel_path)
if not ext or ext == '.' or get_standard_format_name(
ext[1:]) is None:
continue # pragma: no cover
if rel_path.endswith(os.path.sep + 'index'):
rel_path = rel_path[:-len('index')]
else:
rel_path += '.html'
page = self.get_page(rel_path.replace(os.path.sep, '/'),
include_draft=include_draft)
if page is not None:
yield page
pages_path = os.path.join(current_app.instance_path, 'pages')
return list(pages_generator(pages_path))
|
def get_pages(self, include_draft=False)
|
Get all custom pages
(supported formats, excluding other files like '.js', '.css', '.html').
:param include_draft: return draft page or not
:return: an iterable of Page objects
| 2.827359
| 2.774041
| 1.01922
|
page_dir = os.path.dirname(rel_url.replace('/', os.path.sep))
page_path = os.path.join(current_app.instance_path, 'pages', page_dir)
if not os.path.isdir(page_path):
# no such directory
return None
page_filename = rel_url[len(page_dir):].lstrip('/')
if not page_filename:
page_filename = 'index'
else:
page_filename = os.path.splitext(page_filename)[0]
page_file_path, page_file_ext = FileStorage.search_file(page_path,
page_filename)
if page_file_path is None or page_file_ext is None or \
get_standard_format_name(page_file_ext) is None:
# no such page
return None
page = Page()
page.rel_url = rel_url
page.unique_key = '/' + (
rel_url.rsplit('/', 1)[0] + '/' if rel_url.endswith(
'/index.html') else rel_url)
page.format = get_standard_format_name(page_file_ext)
page.meta, page.raw_content = FileStorage.read_file(page_file_path)
return page if include_draft or not page.is_draft else None
|
def get_page(self, rel_url, include_draft=False)
|
Get custom page for given relative url from filesystem.
Possible input:
- my-page/
- my-page/index.html
- my-another-page.html
- a/b/c/
- a/b/c/d.html
:param rel_url: relative url
:param include_draft: return draft page or not
:return: a Page object
| 2.667837
| 2.695288
| 0.989815
|
def widgets_generator(path):
if os.path.isdir(path):
for file in os.listdir(path):
_, ext = os.path.splitext(file)
format_name = get_standard_format_name(ext[1:])
if format_name is not None:
# the format is supported, so load it
widget = Widget()
widget.format = format_name
widget.meta, widget.raw_content = \
FileStorage.read_file(os.path.join(path, file))
yield widget
widgets_path = os.path.join(current_app.instance_path, 'widgets')
positions = to_list(position) if position is not None else position
result = filter(
lambda w: (w.position in positions
if positions is not None else True) and
(include_draft or not w.is_draft),
widgets_generator(widgets_path))
return sorted(result, key=lambda w: (w.position, w.order))
|
def get_widgets(self, position=None, include_draft=False)
|
Get widgets for given position from filesystem.
:param position: position or position list
:param include_draft: return draft widgets or not
:return: an iterable of Widget objects
| 3.514792
| 3.339145
| 1.052603
|
response_str = render_template(
functools.reduce(lambda x, y: x + [os.path.join('custom', y), y],
to_list(template_name_or_list), []),
**context
)
if hasattr(g, 'status_code'):
status_code = g.status_code
else:
status_code = 200
return response_str, status_code
|
def custom_render_template(template_name_or_list, **context)
|
Try to render templates in the custom folder first,
if no custom templates, try the theme's default ones.
| 3.430486
| 3.390662
| 1.011745
|
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
template_ = template
if template_ is None:
template_ = request.endpoint.split('.', 1)[1].replace(
'.', '/') + '.html'
context = func(*args, **kwargs)
if context is None:
context = {}
elif not isinstance(context, dict):
return context
return custom_render_template(
list(chain(to_list(template_), templates)), **context)
return wrapper
return decorator
|
def templated(template=None, *templates)
|
Decorate a view function with one or more default template name.
This will try templates in the custom folder first,
the theme's original ones second.
:param template: template name or template name list
| 3.051715
| 3.200909
| 0.95339
|
for file in os.listdir(src):
file_path = os.path.join(src, file)
dst_file_path = os.path.join(dst, file)
if os.path.isdir(file_path):
shutil.copytree(file_path, dst_file_path)
else:
shutil.copyfile(file_path, dst_file_path)
|
def copy_folder_content(src, dst)
|
Copy all content in src directory to dst directory.
The src and dst must exist.
| 1.387366
| 1.441295
| 0.962583
|
for file in os.listdir(path):
if ignore_hidden_file and file.startswith('.'):
continue
file_path = os.path.join(path, file)
if os.path.isdir(file_path):
shutil.rmtree(file_path)
else:
os.remove(file_path)
|
def remove_folder_content(path, ignore_hidden_file=False)
|
Remove all content in the given folder.
| 1.535068
| 1.507769
| 1.018105
|
os.makedirs(path, mode, exist_ok)
|
def makedirs(path, mode=0o777, exist_ok=False)
|
A wrapper of os.makedirs().
| 3.986309
| 3.419115
| 1.165889
|
if self.loaded:
self.loaded = False
self._openalprpy_lib.dispose(self.alpr_pointer)
|
def unload(self)
|
Unloads OpenALPR from memory.
:return: None
| 18.667919
| 8.935513
| 2.089183
|
file_path = _convert_to_charp(file_path)
ptr = self._recognize_file_func(self.alpr_pointer, file_path)
json_data = ctypes.cast(ptr, ctypes.c_char_p).value
json_data = _convert_from_charp(json_data)
response_obj = json.loads(json_data)
self._free_json_mem_func(ctypes.c_void_p(ptr))
return response_obj
|
def recognize_file(self, file_path)
|
This causes OpenALPR to attempt to recognize an image by opening a file on
disk.
:param file_path: The path to the image that will be analyzed
:return: An OpenALPR analysis in the form of a response dictionary
| 3.647922
| 4.016752
| 0.908177
|
if type(byte_array) != bytes:
raise TypeError("Expected a byte array (string in Python 2, bytes in Python 3)")
pb = ctypes.cast(byte_array, ctypes.POINTER(ctypes.c_ubyte))
ptr = self._recognize_array_func(self.alpr_pointer, pb, len(byte_array))
json_data = ctypes.cast(ptr, ctypes.c_char_p).value
json_data = _convert_from_charp(json_data)
response_obj = json.loads(json_data)
self._free_json_mem_func(ctypes.c_void_p(ptr))
return response_obj
|
def recognize_array(self, byte_array)
|
This causes OpenALPR to attempt to recognize an image passed in as a byte array.
:param byte_array: This should be a string (Python 2) or a bytes object (Python 3)
:return: An OpenALPR analysis in the form of a response dictionary
| 3.294272
| 3.234007
| 1.018635
|
if self._recognize_raw_image_func is None:
raise RuntimeError('NumPy missing')
height, width = ndarray.shape[:2]
bpp = ndarray.shape[2] if len(ndarray.shape) > 2 else 1
ptr = self._recognize_raw_image_func(self.alpr_pointer, ndarray.flatten(), bpp, width, height)
json_data = ctypes.cast(ptr, ctypes.c_char_p).value
json_data = _convert_from_charp(json_data)
# there is a bug in the openalpr python bindings
# sometimes there are real numbers with "," as the decimal point..!
# print("openalpr_lib : recognize_ndarray : json_data =", json_data)
p = re.compile('\d(\,)\d')
json_data = p.subn(".", json_data)[0]
response_obj = json.loads(json_data)
self._free_json_mem_func(ctypes.c_void_p(ptr))
return response_obj
|
def recognize_ndarray(self, ndarray)
|
This causes OpenALPR to attempt to recognize an image passed in as a numpy array.
:param ndarray: numpy.array as used in cv2 module
:return: An OpenALPR analysis in the form of a response dictionary
| 4.962846
| 4.846981
| 1.023905
|
ptr = self._get_version_func(self.alpr_pointer)
version_number = ctypes.cast(ptr, ctypes.c_char_p).value
version_number = _convert_from_charp(version_number)
self._free_json_mem_func(ctypes.c_void_p(ptr))
return version_number
|
def get_version(self)
|
This gets the version of OpenALPR
:return: Version information
| 6.125754
| 4.872794
| 1.257134
|
country = _convert_to_charp(country)
self._set_country_func(self.alpr_pointer, country)
|
def set_country(self, country)
|
This sets the country for detecting license plates. For example,
setting country to "us" for United States or "eu" for Europe.
:param country: A unicode/ascii string (Python 2/3) or bytes array (Python 3)
:return: None
| 15.072521
| 19.742531
| 0.763454
|
prewarp = _convert_to_charp(prewarp)
self._set_prewarp_func(self.alpr_pointer, prewarp)
|
def set_prewarp(self, prewarp)
|
Updates the prewarp configuration used to skew images in OpenALPR before
processing.
:param prewarp: A unicode/ascii string (Python 2/3) or bytes array (Python 3)
:return: None
| 8.372163
| 8.828382
| 0.948324
|
region = _convert_to_charp(region)
self._set_default_region_func(self.alpr_pointer, region)
|
def set_default_region(self, region)
|
This sets the default region for detecting license plates. For example,
setting region to "md" for Maryland or "fr" for France.
:param region: A unicode/ascii string (Python 2/3) or bytes array (Python 3)
:return: None
| 12.677249
| 15.808479
| 0.801927
|
try:
[t for t in self.tokens]
ret = True
logger.debug('CallString [{}] is complete'.format(self.strip()))
except tokenize.TokenError:
logger.debug('CallString [{}] is NOT complete'.format(self.strip()))
ret = False
return ret
|
def is_complete(self)
|
Return True if this call string is complete, meaning it has a function
name and balanced parens
| 5.68775
| 4.495348
| 1.265253
|
'''
scan the abstract source tree looking for possible ways to call the called_module
and called_func
since -- 7-2-12 -- Jay
example --
# import the module a couple ways:
import pout
from pout import v
from pout import v as voom
import pout as poom
# this function would return: ['pout.v', 'v', 'voom', 'poom.v']
module finder might be useful someday
link -- http://docs.python.org/library/modulefinder.html
link -- http://stackoverflow.com/questions/2572582/return-a-list-of-imported-python-modules-used-in-a-script
ast_tree -- _ast.* instance -- the internal ast object that is being checked, returned from compile()
with ast.PyCF_ONLY_AST flag
called_module -- string -- we are checking the ast for imports of this module
called_func -- string -- we are checking the ast for aliases of this function
return -- set -- the list of possible calls the ast_tree could make to call the called_func
'''
s = set()
# always add the default call, the set will make sure there are no dupes...
s.add("{}.{}".format(called_module, called_func))
if hasattr(ast_tree, 'name'):
if ast_tree.name == called_func:
# the function is defined in this module
s.add(called_func)
if hasattr(ast_tree, 'body'):
# further down the rabbit hole we go
if isinstance(ast_tree.body, Iterable):
for ast_body in ast_tree.body:
s.update(self._find_calls(ast_body, called_module, called_func))
elif hasattr(ast_tree, 'names'):
# base case
if hasattr(ast_tree, 'module'):
# we are in a from ... import ... statement
if ast_tree.module == called_module:
for ast_name in ast_tree.names:
if ast_name.name == called_func:
s.add(unicode(ast_name.asname if ast_name.asname is not None else ast_name.name))
else:
# we are in a import ... statement
for ast_name in ast_tree.names:
if hasattr(ast_name, 'name') and (ast_name.name == called_module):
call = "{}.{}".format(
ast_name.asname if ast_name.asname is not None else ast_name.name,
called_func
)
s.add(call)
return s
|
def _find_calls(self, ast_tree, called_module, called_func)
|
scan the abstract source tree looking for possible ways to call the called_module
and called_func
since -- 7-2-12 -- Jay
example --
# import the module a couple ways:
import pout
from pout import v
from pout import v as voom
import pout as poom
# this function would return: ['pout.v', 'v', 'voom', 'poom.v']
module finder might be useful someday
link -- http://docs.python.org/library/modulefinder.html
link -- http://stackoverflow.com/questions/2572582/return-a-list-of-imported-python-modules-used-in-a-script
ast_tree -- _ast.* instance -- the internal ast object that is being checked, returned from compile()
with ast.PyCF_ONLY_AST flag
called_module -- string -- we are checking the ast for imports of this module
called_func -- string -- we are checking the ast for aliases of this function
return -- set -- the list of possible calls the ast_tree could make to call the called_func
| 4.371201
| 1.803647
| 2.423534
|
'''
get all the info of a method call
this will find what arg names you passed into the method and tie them to their passed in values,
it will also find file and line number
return -- dict -- a bunch of info on the call
'''
ret_dict = {
'args': [],
#'frame': None,
'line': 'Unknown',
'file': 'Unknown',
'arg_names': []
}
arg_vals = self.arg_vals
#modname = self.modname
c = self.call
ret_dict.update(c.info)
if len(arg_vals) > 0:
args = []
if len(ret_dict['arg_names']) > 0:
# match the found arg names to their respective values
for i, arg_name in enumerate(ret_dict['arg_names']):
args.append({'name': arg_name, 'val': arg_vals[i]})
else:
# we can't autodiscover the names, in an interactive shell session?
for i, arg_val in enumerate(arg_vals):
args.append({'name': 'Unknown {}'.format(i), 'val': arg_val})
ret_dict['args'] = args
return ret_dict
|
def _get_arg_info(self)
|
get all the info of a method call
this will find what arg names you passed into the method and tie them to their passed in values,
it will also find file and line number
return -- dict -- a bunch of info on the call
| 4.901841
| 2.936851
| 1.66908
|
back_i = 0
pout_path = self._get_src_file(self.modname)
for frame_i, frame in enumerate(frames):
if frame[1] == pout_path:
back_i = frame_i
return Call(frames[back_i])
|
def _find_entry_call(self, frames)
|
attempts to auto-discover the correct frame
| 6.40275
| 6.227787
| 1.028094
|
if (self.requiredGPU_MB(self.required_mb)):
self.analyzer = YoloV2Analyzer(verbose = self.verbose)
else:
self.warning_message = "WARNING: not enough GPU memory!"
self.analyzer = None
|
def postActivate_(self)
|
Whatever you need to do after creating the shmem client
| 13.90676
| 13.356384
| 1.041207
|
for rule in to_list(rules):
blueprint_or_app.add_url_rule(rule,
endpoint=endpoint,
view_func=view_func,
**options)
|
def url_rule(blueprint_or_app, rules,
endpoint=None, view_func=None, **options)
|
Add one or more url rules to the given Flask blueprint or app.
:param blueprint_or_app: Flask blueprint or app
:param rules: a single rule string or a list of rules
:param endpoint: endpoint
:param view_func: view function
:param options: other options
| 2.235141
| 2.996585
| 0.745896
|
if isinstance(item_or_list, list):
return item_or_list
elif isinstance(item_or_list, (str, bytes)):
return [item_or_list]
elif isinstance(item_or_list, Iterable):
return list(item_or_list)
else:
return [item_or_list]
|
def to_list(item_or_list)
|
Convert a single item, a tuple, a generator or anything else to a list.
:param item_or_list: single item or iterable to convert
:return: a list
| 1.618186
| 1.780043
| 0.909072
|
if isinstance(date_or_datetime, date) and \
not isinstance(date_or_datetime, datetime):
d = date_or_datetime
return datetime.strptime(
'%04d-%02d-%02d' % (d.year, d.month, d.day), '%Y-%m-%d')
return date_or_datetime
|
def to_datetime(date_or_datetime)
|
Convert a date object to a datetime object,
or return as it is if it's not a date object.
:param date_or_datetime: date or datetime object
:return: a datetime object
| 1.992575
| 2.1971
| 0.906912
|
m = re.match(r'UTC([+|-]\d{1,2}):(\d{2})', tz_str)
if m:
# in format 'UTC±[hh]:[mm]'
delta_h = int(m.group(1))
delta_m = int(m.group(2)) if delta_h >= 0 else -int(m.group(2))
return timezone(timedelta(hours=delta_h, minutes=delta_m))
# in format 'Asia/Shanghai'
try:
return pytz.timezone(tz_str)
except pytz.exceptions.UnknownTimeZoneError:
return None
|
def timezone_from_str(tz_str)
|
Convert a timezone string to a timezone object.
:param tz_str: string with format 'Asia/Shanghai' or 'UTC±[hh]:[mm]'
:return: a timezone object (tzinfo)
| 2.397334
| 2.080374
| 1.152357
|
if not os.path.isdir(dir_path):
return
for item in os.listdir(dir_path):
new_path = os.path.join(dir_path, item)
if os.path.isdir(new_path):
if yield_dir:
yield new_path + os.path.sep
yield from traverse_directory(new_path, yield_dir)
else:
yield new_path
|
def traverse_directory(dir_path, yield_dir=False)
|
Traverse through a directory recursively.
:param dir_path: directory path
:param yield_dir: yield subdirectory or not
:return: a generator
| 1.686873
| 1.943797
| 0.867824
|
from flask import current_app
from veripress.model.toc import HtmlTocParser
if current_app.config['SHOW_TOC']:
toc_parser = HtmlTocParser()
toc_parser.feed(html_content)
toc_html = toc_parser.toc_html(
depth=current_app.config['TOC_DEPTH'],
lowest_level=current_app.config['TOC_LOWEST_LEVEL'])
toc = toc_parser.toc(
depth=current_app.config['TOC_DEPTH'],
lowest_level=current_app.config['TOC_LOWEST_LEVEL'])
return toc_parser.html, toc, toc_html
else:
return html_content, None, None
|
def parse_toc(html_content)
|
Parse TOC of HTML content if the SHOW_TOC config is true.
:param html_content: raw HTML content
:return: tuple(processed HTML, toc list, toc HTML unordered list)
| 2.633199
| 2.389356
| 1.102054
|
'''
add whitespace to the beginning of each line of val
link -- http://code.activestate.com/recipes/66055-changing-the-indentation-of-a-multi-line-string/
val -- string
indent -- integer -- how much whitespace we want in front of each line of val
return -- string -- val with more whitespace
'''
if indent_count < 1: return self
s = [("\t" * indent_count) + line for line in self.splitlines(False)]
s = "\n".join(s)
return type(self)(s)
|
def indent(self, indent_count)
|
add whitespace to the beginning of each line of val
link -- http://code.activestate.com/recipes/66055-changing-the-indentation-of-a-multi-line-string/
val -- string
indent -- integer -- how much whitespace we want in front of each line of val
return -- string -- val with more whitespace
| 5.192105
| 1.974983
| 2.628937
|
x264opts="-x264opts keyint=10:min-keyint=10:bframes=0"
h264_dummystream_flavor="-pix_fmt yuv420p -vprofile main"
opts="-vcodec h264 "+h264_dummystream_flavor+" "+x264opts+" -fflags +genpts -r 25 -t "+str(T)
com="ffmpeg -y -loop 1 -fflags +genpts -r 25 -i "+infile+" "+opts+" "+outfile
print(com)
os.system(com)
|
def genH264(infile, outfile, T)
|
Generate H264 stream
Input image, output video file, time in seconds
Example:
::
genH264("/home/sampsa/python3/tests/lprtest/RealImages/IMG_20170308_093511.jpg","testi.mkv", 10)
| 4.314191
| 4.528606
| 0.952653
|
if not self.hasIndex(row, column, parent):
return QtCore.QModelIndex()
if not parent.isValid():
parentItem = self.root
else:
# So, here we go from QModelIndex to the actual object .. ?
parentItem = parent.internalPointer()
# the only place where a child item is queried
childItem = parentItem.getChild(row)
if childItem:
# return self.createIndex(row, column)
return self.createIndex(row, column, childItem)
else:
return QtCore.QModelIndex()
|
def index(self, row, column, parent)
|
Returns the index of the item in the model specified by the given row, column and parent index.
row, column == int, parent == QModelIndex
| 3.681246
| 3.631164
| 1.013792
|
if not index.isValid():
return QtCore.QModelIndex()
childItem = index.internalPointer()
# the only place where the parent item is queried
parentItem = childItem.getParent()
if parentItem == self.root:
return QtCore.QModelIndex()
return self.createIndex(parentItem.row(), 0, parentItem)
|
def parent(self, index)
|
Returns the parent of the model item with the given index. If the item has no parent, an invalid QModelIndex is returned.
| 2.924631
| 2.916966
| 1.002628
|
# print("columnCount:",self)
if parent.isValid():
return parent.internalPointer().columnCount()
else:
return self.root.columnCount()
|
def columnCount(self, parent)
|
Returns the number of columns for the children of the given parent.
| 4.478968
| 4.255436
| 1.052529
|
if parent.column() > 0:
return 0
if not parent.isValid():
parentItem = self.root
else:
parentItem = parent.internalPointer()
return parentItem.childCount()
|
def rowCount(self, parent)
|
Returns the number of rows under the given parent. When the parent is valid it means that rowCount is returning the number of children of parent.
| 3.347691
| 3.476699
| 0.962894
|
if not index.isValid():
return QtCore.Qt.NoItemFlags
item = index.internalPointer()
# return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable |
# QtCore.Qt.ItemIsDragEnabled
return item.getFlags()
|
def flags(self, index)
|
Returns the item flags for the given index.
| 2.882758
| 2.397538
| 1.202382
|
s = self.name_value()
s += self.path_value()
s += "\n\n"
return s
|
def full_value(self)
|
Returns the full value with the path also (ie, name = value (path))
:returns: String
| 7.097992
| 6.413721
| 1.106689
|
# unicode sandwich, everything printed should be a byte string
s = "\n"
for arg in args:
#s += arg.encode('utf-8', 'pout.replace')
s += arg
if call_info:
s += "({}:{})\n\n".format(self._get_path(call_info['file']), call_info['line'])
return s
|
def _printstr(self, args, call_info=None)
|
this gets all the args ready to be printed, see self._print()
| 8.279005
| 7.638511
| 1.083851
|
'''
return a string version of name = val that can be printed
example --
_str('foo', 'bar') # foo = bar
name -- string -- the variable name that was passed into one of the public methods
val -- mixed -- the variable at name's value
return -- string
'''
s = ''
v = Value(val)
if name:
logger.debug("{} is type {}".format(name, v.typename))
try:
count = len(val)
s = "{} ({}) = {}".format(name, count, v.string_value())
except (TypeError, KeyError, AttributeError) as e:
logger.info(e, exc_info=True)
s = "{} = {}".format(name, v.string_value())
else:
s = v.string_value()
return s
|
def _str(self, name, val)
|
return a string version of name = val that can be printed
example --
_str('foo', 'bar') # foo = bar
name -- string -- the variable name that was passed into one of the public methods
val -- mixed -- the variable at name's value
return -- string
| 5.949273
| 2.763509
| 2.152797
|
ret = []
if self.logger_name:
if isinstance(self.logger_name, logging.Logger):
ret.append((self.logger_name.name, self.logger_name))
else:
ret.append((self.logger_name, logging.getLogger(self.logger_name)))
else:
ret = list(logging.Logger.manager.loggerDict.items())
ret.append(("root", logging.getLogger()))
return ret
|
def loggers(self)
|
Return all the loggers that should be activated
| 2.330406
| 2.306333
| 1.010438
|
'''
get a nicely formatted backtrace
since -- 7-6-12
frames -- list -- the frame_tuple frames to format
inpsect_packages -- boolean -- by default, this only prints code of packages that are not
in the pythonN directories, that cuts out a lot of the noise, set this to True if you
want a full stacktrace
depth -- integer -- how deep you want the stack trace to print (ie, if you only care about
the last three calls, pass in depth=3 so you only get the last 3 rows of the stack)
return -- list -- each line will be a nicely formatted entry of the backtrace
'''
calls = []
#count = 1
#for count, f in enumerate(frames[1:], 1):
for count, f in enumerate(frames, 1):
#prev_f = frames[i]
#called_module = inspect.getmodule(prev_f[0]).__name__
#called_func = prev_f[3]
call = self.call_class(f)
s = self._get_call_summary(call, inspect_packages=inspect_packages, index=count)
calls.append(s)
#count += 1
if depth and (count > depth):
break
# reverse the order on return so most recent is on the bottom
return calls[::-1]
|
def _get_backtrace(self, frames, inspect_packages=False, depth=0)
|
get a nicely formatted backtrace
since -- 7-6-12
frames -- list -- the frame_tuple frames to format
inpsect_packages -- boolean -- by default, this only prints code of packages that are not
in the pythonN directories, that cuts out a lot of the noise, set this to True if you
want a full stacktrace
depth -- integer -- how deep you want the stack trace to print (ie, if you only care about
the last three calls, pass in depth=3 so you only get the last 3 rows of the stack)
return -- list -- each line will be a nicely formatted entry of the backtrace
| 7.373097
| 2.32583
| 3.170092
|
'''
get a call summary
a call summary is a nicely formatted string synopsis of the call
handy for backtraces
since -- 7-6-12
call_info -- dict -- the dict returned from _get_call_info()
index -- integer -- set to something above 0 if you would like the summary to be numbered
inspect_packages -- boolean -- set to True to get the full format even for system frames
return -- string
'''
call_info = call.info
inspect_regex = re.compile(r'[\\\\/]python\d(?:\.\d+)?', re.I)
# truncate the filepath if it is super long
f = call_info['file']
if len(f) > 75:
f = "{}...{}".format(f[0:30], f[-45:])
if inspect_packages or not inspect_regex.search(call_info['file']):
s = "{}:{}\n\n{}\n\n".format(
f,
call_info['line'],
String(call_info['call']).indent(1)
)
else:
s = "{}:{}\n".format(
f,
call_info['line']
)
if index > 0:
s = "{:02d} - {}".format(index, s)
return s
|
def _get_call_summary(self, call, index=0, inspect_packages=True)
|
get a call summary
a call summary is a nicely formatted string synopsis of the call
handy for backtraces
since -- 7-6-12
call_info -- dict -- the dict returned from _get_call_info()
index -- integer -- set to something above 0 if you would like the summary to be numbered
inspect_packages -- boolean -- set to True to get the full format even for system frames
return -- string
| 5.538405
| 2.627692
| 2.107707
|
return
if (not device and not self.device): # None can be passed as an argument when the device has not been set yet
return
if (self.device):
if self.device == device:
self.report("setDevice : same device")
return
if self.filterchain: # there's video already
self.clearDevice()
self.device = device
self.video.setDevice(self.device) # inform the video widget so it can start drags
# ManagedFilterChain.addViewPort accepts ViewPort instance
self.filterchain = self.filterchain_group.get(_id = self.device._id)
if self.filterchain:
self.viewport.setXScreenNum(self.n_xscreen)
self.viewport.setWindowId (int(self.video.winId()))
self.filterchain.addViewPort(self.viewport)
# now the shared mem / semaphore part :
self.shmem_name = self.filterchain.getShmem()
print(self.pre, "setDevice : got shmem name", self.shmem_name)
self.mvision_widget = self.mvision_process.getWidget()
self.mvision_widget.setParent(self.main_widget)
self.main_layout.addWidget(self.mvision_widget)
self.mvision_process.activate(
n_buffer = constant.shmem_n_buffer,
image_dimensions = constant.shmem_image_dimensions,
shmem_name = self.shmem_name
)
self.thread.addProcess(self.mvision_process)
# is there a signal giving the bounding boxes..? let's connect it
if hasattr(self.mvision_process.signals,"bboxes"):
print(self.pre, "setDevice : connecting bboxes signal")
self.mvision_process.signals.bboxes.connect(self.set_bounding_boxes_slot)
|
def setDevice(self, device):
self.report("setDevice :", device)
if (self.mvision_process == None)
|
Sets the video stream
:param device: A rather generic device class. In this case DataModel.RTSPCameraDevice.
| 6.608441
| 6.633753
| 0.996184
|
print(self.pre, "clearDevice: ")
self.report("clearDevice")
if not self.device:
return
if (self.mvision_process==None):
return
self.filterchain.delViewPort(self.viewport)
self.filterchain.releaseShmem(self.shmem_name)
self.mvision_process.deactivate() # put process back to sleep ..
self.main_layout.removeWidget(self.mvision_widget)
self.filterchain = None
self.device = None
self.video.update()
|
def clearDevice(self)
|
Remove the current stream
| 9.875304
| 9.673068
| 1.020907
|
def decorator(cls):
format_name_lower = format_name.lower()
if ext_names is None:
_ext_format_mapping[format_name_lower] = format_name_lower
else:
for ext in to_list(ext_names):
_ext_format_mapping[ext.lower()] = format_name_lower
_format_parser_mapping[format_name_lower] = cls()
return cls
return decorator
|
def parser(format_name, ext_names=None)
|
Decorate a parser class to register it.
:param format_name: standard format name
:param ext_names: supported extension name
| 2.561611
| 2.574873
| 0.994849
|
if self._read_more_exp is None:
return self.parse_whole(raw_content), False
sp = self._read_more_exp.split(raw_content, maxsplit=1)
if len(sp) == 2 and sp[0]:
has_more_content = True
result = sp[0].rstrip()
else:
has_more_content = False
result = raw_content
# since the preview part contains no read_more_sep,
# we can safely use the parse_whole method
return self.parse_whole(result), has_more_content
|
def parse_preview(self, raw_content)
|
Parse the preview part of the content,
and return the parsed string and whether there is more content or not.
If the preview part is equal to the whole part,
the second element of the returned tuple will be False, else True.
:param raw_content: raw content
:return: tuple(parsed string, whether there is more content or not)
| 4.31745
| 3.690859
| 1.169768
|
if self._read_more_exp is None:
return raw_content
sp = self._read_more_exp.split(raw_content, maxsplit=1)
if len(sp) == 2 and sp[0]:
result = '\n\n'.join((sp[0].rstrip(), sp[1].lstrip()))
else:
result = raw_content
return result
|
def remove_read_more_sep(self, raw_content)
|
Removes the first read_more_sep that occurs in raw_content.
Subclasses should call this method to preprocess raw_content.
| 2.693332
| 2.515972
| 1.070494
|
if (self.requiredGPU_MB(self.required_mb)):
self.analyzer = YoloV3TinyAnalyzer(verbose = self.verbose)
else:
self.warning_message = "WARNING: not enough GPU memory!"
self.analyzer = None
|
def postActivate_(self)
|
Whatever you need to do after creating the shmem client
| 14.988739
| 14.544573
| 1.030538
|
if not path:
path = os.path.join(os.getcwd(), "{}.txt".format(__name__))
global stream
orig_stream = stream
try:
stream = FileStream(path)
yield stream
finally:
stream = orig_stream
|
def tofile(path="")
|
Instead of printing to a screen print to a file
:Example:
with pout.tofile("/path/to/file.txt"):
# all pout calls in this with block will print to file.txt
pout.v("a string")
pout.b()
pout.h()
:param path: str, a path to the file you want to write to
| 4.753081
| 5.29271
| 0.898043
|
'''
print the name = values of any passed in variables
this prints out the passed in name, the value, and the file:line where the v()
method was called so you can easily find it and remove it later
example --
foo = 1
bar = [1, 2, 3]
out.v(foo, bar)
*args -- list -- the variables you want to see pretty printed for humans
'''
if not args:
raise ValueError("you didn't pass any arguments to print out")
with Reflect.context(args, **kwargs) as r:
instance = V_CLASS(r, stream, **kwargs)
instance()
|
def v(*args, **kwargs)
|
print the name = values of any passed in variables
this prints out the passed in name, the value, and the file:line where the v()
method was called so you can easily find it and remove it later
example --
foo = 1
bar = [1, 2, 3]
out.v(foo, bar)
""" prints out:
foo = 1
bar =
[
0: 1,
1: 2,
2: 3
]
(/file:line)
"""
*args -- list -- the variables you want to see pretty printed for humans
| 15.026464
| 3.160418
| 4.754581
|
if not args:
raise ValueError("you didn't pass any arguments to print out")
with Reflect.context(args, **kwargs) as r:
instance = V_CLASS(r, stream, **kwargs)
instance.writeline(instance.value())
|
def vs(*args, **kwargs)
|
exactly like v, but doesn't print variable names or file positions
.. seealso:: ss()
| 17.925957
| 17.435566
| 1.028126
|
if not args:
raise ValueError("you didn't pass any arguments to print out")
with Reflect.context(args, **kwargs) as r:
instance = V_CLASS(r, stream, **kwargs)
return instance.full_value().strip()
|
def s(*args, **kwargs)
|
exactly like v() but returns the string instead of printing it out
since -- 10-15-2015
return -- str
| 18.163975
| 15.004075
| 1.210603
|
if not args:
raise ValueError("you didn't pass any arguments to print out")
with Reflect.context(args, **kwargs) as r:
instance = V_CLASS(r, stream, **kwargs)
return instance.value().strip()
|
def ss(*args, **kwargs)
|
exactly like s, but doesn't return variable names or file positions (useful for logging)
since -- 10-15-2015
return -- str
| 18.356279
| 17.252439
| 1.063982
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.